]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
5ead97c8 JF |
2 | /* |
3 | * Machine specific setup for xen | |
4 | * | |
5 | * Jeremy Fitzhardinge <[email protected]>, XenSource Inc, 2007 | |
6 | */ | |
7 | ||
7a2463dc | 8 | #include <linux/init.h> |
9338c223 | 9 | #include <linux/iscsi_ibft.h> |
5ead97c8 | 10 | #include <linux/sched.h> |
639b2e2f | 11 | #include <linux/kstrtox.h> |
5ead97c8 JF |
12 | #include <linux/mm.h> |
13 | #include <linux/pm.h> | |
a9ce6bc1 | 14 | #include <linux/memblock.h> |
d91ee586 | 15 | #include <linux/cpuidle.h> |
48cdd828 | 16 | #include <linux/cpufreq.h> |
1d988ed4 | 17 | #include <linux/memory_hotplug.h> |
9221222c | 18 | #include <linux/acpi.h> |
5ead97c8 JF |
19 | |
20 | #include <asm/elf.h> | |
6c3652ef | 21 | #include <asm/vdso.h> |
66441bd3 | 22 | #include <asm/e820/api.h> |
5ead97c8 | 23 | #include <asm/setup.h> |
8d54db79 | 24 | #include <asm/numa.h> |
2f6474e4 | 25 | #include <asm/idtentry.h> |
5ead97c8 JF |
26 | #include <asm/xen/hypervisor.h> |
27 | #include <asm/xen/hypercall.h> | |
28 | ||
45263cb0 | 29 | #include <xen/xen.h> |
8006ec3e | 30 | #include <xen/page.h> |
e2a81baf | 31 | #include <xen/interface/callback.h> |
35ae11fd | 32 | #include <xen/interface/memory.h> |
5ead97c8 JF |
33 | #include <xen/interface/physdev.h> |
34 | #include <xen/features.h> | |
808fdb71 | 35 | #include <xen/hvc-console.h> |
5ead97c8 JF |
36 | #include "xen-ops.h" |
37 | ||
c70727a5 JG |
38 | #define GB(x) ((uint64_t)(x) * 1024 * 1024 * 1024) |
39 | ||
aa24411b DV |
40 | /* Number of pages released from the initial allocation. */ |
41 | unsigned long xen_released_pages; | |
42 | ||
358cd9af JG |
43 | /* Memory map would allow PCI passthrough. */ |
44 | bool xen_pv_pci_possible; | |
45 | ||
69632ecf | 46 | /* E820 map used during setting up memory. */ |
e7dbf7ad | 47 | static struct e820_table xen_e820_table __initdata; |
69632ecf | 48 | |
43dc2a0f JG |
49 | /* Number of initially usable memory pages. */ |
50 | static unsigned long ini_nr_pages __initdata; | |
51 | ||
1f3ac86b JG |
52 | /* |
53 | * Buffer used to remap identity mapped pages. We only need the virtual space. | |
54 | * The physical page behind this address is remapped as needed to different | |
55 | * buffer pages. | |
56 | */ | |
57 | #define REMAP_SIZE (P2M_PER_PAGE - 3) | |
58 | static struct { | |
59 | unsigned long next_area_mfn; | |
60 | unsigned long target_pfn; | |
61 | unsigned long size; | |
62 | unsigned long mfns[REMAP_SIZE]; | |
63 | } xen_remap_buf __initdata __aligned(PAGE_SIZE); | |
64 | static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY; | |
4fbb67e3 | 65 | |
c70727a5 JG |
66 | static bool xen_512gb_limit __initdata = IS_ENABLED(CONFIG_XEN_512GB); |
67 | ||
68 | static void __init xen_parse_512gb(void) | |
69 | { | |
70 | bool val = false; | |
71 | char *arg; | |
72 | ||
73 | arg = strstr(xen_start_info->cmd_line, "xen_512gb_limit"); | |
74 | if (!arg) | |
75 | return; | |
76 | ||
77 | arg = strstr(xen_start_info->cmd_line, "xen_512gb_limit="); | |
78 | if (!arg) | |
79 | val = true; | |
639b2e2f | 80 | else if (kstrtobool(arg + strlen("xen_512gb_limit="), &val)) |
c70727a5 JG |
81 | return; |
82 | ||
83 | xen_512gb_limit = val; | |
84 | } | |
85 | ||
626d7508 JG |
86 | static void __init xen_del_extra_mem(unsigned long start_pfn, |
87 | unsigned long n_pfns) | |
5b8e7d80 JG |
88 | { |
89 | int i; | |
626d7508 | 90 | unsigned long start_r, size_r; |
c96aae1f | 91 | |
5b8e7d80 | 92 | for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) { |
626d7508 JG |
93 | start_r = xen_extra_mem[i].start_pfn; |
94 | size_r = xen_extra_mem[i].n_pfns; | |
5b8e7d80 JG |
95 | |
96 | /* Start of region. */ | |
626d7508 JG |
97 | if (start_r == start_pfn) { |
98 | BUG_ON(n_pfns > size_r); | |
99 | xen_extra_mem[i].start_pfn += n_pfns; | |
100 | xen_extra_mem[i].n_pfns -= n_pfns; | |
5b8e7d80 JG |
101 | break; |
102 | } | |
103 | /* End of region. */ | |
626d7508 JG |
104 | if (start_r + size_r == start_pfn + n_pfns) { |
105 | BUG_ON(n_pfns > size_r); | |
106 | xen_extra_mem[i].n_pfns -= n_pfns; | |
5b8e7d80 JG |
107 | break; |
108 | } | |
109 | /* Mid of region. */ | |
626d7508 JG |
110 | if (start_pfn > start_r && start_pfn < start_r + size_r) { |
111 | BUG_ON(start_pfn + n_pfns > start_r + size_r); | |
112 | xen_extra_mem[i].n_pfns = start_pfn - start_r; | |
5b8e7d80 | 113 | /* Calling memblock_reserve() again is okay. */ |
626d7508 JG |
114 | xen_add_extra_mem(start_pfn + n_pfns, start_r + size_r - |
115 | (start_pfn + n_pfns)); | |
5b8e7d80 JG |
116 | break; |
117 | } | |
118 | } | |
3ecc6834 | 119 | memblock_phys_free(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns)); |
5b8e7d80 JG |
120 | } |
121 | ||
122 | /* | |
123 | * Called during boot before the p2m list can take entries beyond the | |
124 | * hypervisor supplied p2m list. Entries in extra mem are to be regarded as | |
125 | * invalid. | |
126 | */ | |
127 | unsigned long __ref xen_chk_extra_mem(unsigned long pfn) | |
128 | { | |
129 | int i; | |
6eaa412f | 130 | |
5b8e7d80 | 131 | for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) { |
626d7508 JG |
132 | if (pfn >= xen_extra_mem[i].start_pfn && |
133 | pfn < xen_extra_mem[i].start_pfn + xen_extra_mem[i].n_pfns) | |
5b8e7d80 JG |
134 | return INVALID_P2M_ENTRY; |
135 | } | |
136 | ||
137 | return IDENTITY_FRAME(pfn); | |
138 | } | |
139 | ||
140 | /* | |
141 | * Mark all pfns of extra mem as invalid in p2m list. | |
142 | */ | |
143 | void __init xen_inv_extra_mem(void) | |
144 | { | |
145 | unsigned long pfn, pfn_s, pfn_e; | |
146 | int i; | |
147 | ||
148 | for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) { | |
626d7508 | 149 | if (!xen_extra_mem[i].n_pfns) |
9a17ad7f | 150 | continue; |
626d7508 JG |
151 | pfn_s = xen_extra_mem[i].start_pfn; |
152 | pfn_e = pfn_s + xen_extra_mem[i].n_pfns; | |
5b8e7d80 JG |
153 | for (pfn = pfn_s; pfn < pfn_e; pfn++) |
154 | set_phys_to_machine(pfn, INVALID_P2M_ENTRY); | |
c96aae1f | 155 | } |
42ee1471 JF |
156 | } |
157 | ||
4fbb67e3 MR |
158 | /* |
159 | * Finds the next RAM pfn available in the E820 map after min_pfn. | |
160 | * This function updates min_pfn with the pfn found and returns | |
161 | * the size of that range or zero if not found. | |
162 | */ | |
69632ecf | 163 | static unsigned long __init xen_find_pfn_range(unsigned long *min_pfn) |
2e2fb754 | 164 | { |
e7dbf7ad | 165 | const struct e820_entry *entry = xen_e820_table.entries; |
2e2fb754 KRW |
166 | unsigned int i; |
167 | unsigned long done = 0; | |
2e2fb754 | 168 | |
e7dbf7ad | 169 | for (i = 0; i < xen_e820_table.nr_entries; i++, entry++) { |
2e2fb754 KRW |
170 | unsigned long s_pfn; |
171 | unsigned long e_pfn; | |
2e2fb754 | 172 | |
09821ff1 | 173 | if (entry->type != E820_TYPE_RAM) |
2e2fb754 KRW |
174 | continue; |
175 | ||
c3d93f88 | 176 | e_pfn = PFN_DOWN(entry->addr + entry->size); |
2e2fb754 | 177 | |
4fbb67e3 | 178 | /* We only care about E820 after this */ |
abed7d07 | 179 | if (e_pfn <= *min_pfn) |
2e2fb754 KRW |
180 | continue; |
181 | ||
c3d93f88 | 182 | s_pfn = PFN_UP(entry->addr); |
4fbb67e3 MR |
183 | |
184 | /* If min_pfn falls within the E820 entry, we want to start | |
185 | * at the min_pfn PFN. | |
2e2fb754 | 186 | */ |
4fbb67e3 MR |
187 | if (s_pfn <= *min_pfn) { |
188 | done = e_pfn - *min_pfn; | |
2e2fb754 | 189 | } else { |
4fbb67e3 MR |
190 | done = e_pfn - s_pfn; |
191 | *min_pfn = s_pfn; | |
2e2fb754 | 192 | } |
4fbb67e3 MR |
193 | break; |
194 | } | |
2e2fb754 | 195 | |
4fbb67e3 MR |
196 | return done; |
197 | } | |
2e2fb754 | 198 | |
1f3ac86b JG |
199 | static int __init xen_free_mfn(unsigned long mfn) |
200 | { | |
201 | struct xen_memory_reservation reservation = { | |
202 | .address_bits = 0, | |
203 | .extent_order = 0, | |
204 | .domid = DOMID_SELF | |
205 | }; | |
206 | ||
207 | set_xen_guest_handle(reservation.extent_start, &mfn); | |
208 | reservation.nr_extents = 1; | |
209 | ||
210 | return HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); | |
211 | } | |
212 | ||
4fbb67e3 | 213 | /* |
1f3ac86b | 214 | * This releases a chunk of memory and then does the identity map. It's used |
4fbb67e3 MR |
215 | * as a fallback if the remapping fails. |
216 | */ | |
217 | static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn, | |
43dc2a0f | 218 | unsigned long end_pfn) |
4fbb67e3 | 219 | { |
1f3ac86b JG |
220 | unsigned long pfn, end; |
221 | int ret; | |
222 | ||
4fbb67e3 MR |
223 | WARN_ON(start_pfn > end_pfn); |
224 | ||
bc7142cf | 225 | /* Release pages first. */ |
43dc2a0f | 226 | end = min(end_pfn, ini_nr_pages); |
1f3ac86b JG |
227 | for (pfn = start_pfn; pfn < end; pfn++) { |
228 | unsigned long mfn = pfn_to_mfn(pfn); | |
229 | ||
230 | /* Make sure pfn exists to start with */ | |
231 | if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn) | |
232 | continue; | |
233 | ||
234 | ret = xen_free_mfn(mfn); | |
235 | WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret); | |
236 | ||
237 | if (ret == 1) { | |
5097cdf6 | 238 | xen_released_pages++; |
1f3ac86b JG |
239 | if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY)) |
240 | break; | |
1f3ac86b JG |
241 | } else |
242 | break; | |
243 | } | |
244 | ||
bc7142cf | 245 | set_phys_range_identity(start_pfn, end_pfn); |
4fbb67e3 MR |
246 | } |
247 | ||
248 | /* | |
1f3ac86b | 249 | * Helper function to update the p2m and m2p tables and kernel mapping. |
4fbb67e3 | 250 | */ |
1f3ac86b | 251 | static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn) |
4fbb67e3 MR |
252 | { |
253 | struct mmu_update update = { | |
3ba5c867 | 254 | .ptr = ((uint64_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE, |
4fbb67e3 MR |
255 | .val = pfn |
256 | }; | |
257 | ||
258 | /* Update p2m */ | |
1f3ac86b | 259 | if (!set_phys_to_machine(pfn, mfn)) { |
4fbb67e3 MR |
260 | WARN(1, "Failed to set p2m mapping for pfn=%ld mfn=%ld\n", |
261 | pfn, mfn); | |
1f3ac86b | 262 | BUG(); |
2e2fb754 | 263 | } |
4fbb67e3 MR |
264 | |
265 | /* Update m2p */ | |
266 | if (HYPERVISOR_mmu_update(&update, 1, NULL, DOMID_SELF) < 0) { | |
267 | WARN(1, "Failed to set m2p mapping for mfn=%ld pfn=%ld\n", | |
268 | mfn, pfn); | |
1f3ac86b | 269 | BUG(); |
4fbb67e3 MR |
270 | } |
271 | ||
1f3ac86b JG |
272 | if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT), |
273 | mfn_pte(mfn, PAGE_KERNEL), 0)) { | |
274 | WARN(1, "Failed to update kernel mapping for mfn=%ld pfn=%ld\n", | |
275 | mfn, pfn); | |
276 | BUG(); | |
277 | } | |
2e2fb754 | 278 | } |
83d51ab4 | 279 | |
4fbb67e3 MR |
280 | /* |
281 | * This function updates the p2m and m2p tables with an identity map from | |
1f3ac86b JG |
282 | * start_pfn to start_pfn+size and prepares remapping the underlying RAM of the |
283 | * original allocation at remap_pfn. The information needed for remapping is | |
284 | * saved in the memory itself to avoid the need for allocating buffers. The | |
285 | * complete remap information is contained in a list of MFNs each containing | |
286 | * up to REMAP_SIZE MFNs and the start target PFN for doing the remap. | |
287 | * This enables us to preserve the original mfn sequence while doing the | |
288 | * remapping at a time when the memory management is capable of allocating | |
289 | * virtual and physical memory in arbitrary amounts, see 'xen_remap_memory' and | |
290 | * its callers. | |
4fbb67e3 | 291 | */ |
1f3ac86b | 292 | static void __init xen_do_set_identity_and_remap_chunk( |
4fbb67e3 | 293 | unsigned long start_pfn, unsigned long size, unsigned long remap_pfn) |
83d51ab4 | 294 | { |
1f3ac86b JG |
295 | unsigned long buf = (unsigned long)&xen_remap_buf; |
296 | unsigned long mfn_save, mfn; | |
4fbb67e3 | 297 | unsigned long ident_pfn_iter, remap_pfn_iter; |
1f3ac86b | 298 | unsigned long ident_end_pfn = start_pfn + size; |
4fbb67e3 | 299 | unsigned long left = size; |
1f3ac86b | 300 | unsigned int i, chunk; |
4fbb67e3 MR |
301 | |
302 | WARN_ON(size == 0); | |
303 | ||
067e4f17 | 304 | mfn_save = virt_to_mfn((void *)buf); |
e201bfcc | 305 | |
1f3ac86b JG |
306 | for (ident_pfn_iter = start_pfn, remap_pfn_iter = remap_pfn; |
307 | ident_pfn_iter < ident_end_pfn; | |
308 | ident_pfn_iter += REMAP_SIZE, remap_pfn_iter += REMAP_SIZE) { | |
309 | chunk = (left < REMAP_SIZE) ? left : REMAP_SIZE; | |
4fbb67e3 | 310 | |
1f3ac86b JG |
311 | /* Map first pfn to xen_remap_buf */ |
312 | mfn = pfn_to_mfn(ident_pfn_iter); | |
313 | set_pte_mfn(buf, mfn, PAGE_KERNEL); | |
4fbb67e3 | 314 | |
1f3ac86b JG |
315 | /* Save mapping information in page */ |
316 | xen_remap_buf.next_area_mfn = xen_remap_mfn; | |
317 | xen_remap_buf.target_pfn = remap_pfn_iter; | |
318 | xen_remap_buf.size = chunk; | |
319 | for (i = 0; i < chunk; i++) | |
320 | xen_remap_buf.mfns[i] = pfn_to_mfn(ident_pfn_iter + i); | |
4fbb67e3 | 321 | |
1f3ac86b JG |
322 | /* Put remap buf into list. */ |
323 | xen_remap_mfn = mfn; | |
4fbb67e3 | 324 | |
1f3ac86b | 325 | /* Set identity map */ |
bc7142cf | 326 | set_phys_range_identity(ident_pfn_iter, ident_pfn_iter + chunk); |
83d51ab4 | 327 | |
1f3ac86b | 328 | left -= chunk; |
4fbb67e3 | 329 | } |
83d51ab4 | 330 | |
1f3ac86b JG |
331 | /* Restore old xen_remap_buf mapping */ |
332 | set_pte_mfn(buf, mfn_save, PAGE_KERNEL); | |
83d51ab4 DV |
333 | } |
334 | ||
4fbb67e3 MR |
335 | /* |
336 | * This function takes a contiguous pfn range that needs to be identity mapped | |
337 | * and: | |
338 | * | |
339 | * 1) Finds a new range of pfns to use to remap based on E820 and remap_pfn. | |
340 | * 2) Calls the do_ function to actually do the mapping/remapping work. | |
341 | * | |
342 | * The goal is to not allocate additional memory but to remap the existing | |
343 | * pages. In the case of an error the underlying memory is simply released back | |
344 | * to Xen and not remapped. | |
345 | */ | |
76f0a486 | 346 | static unsigned long __init xen_set_identity_and_remap_chunk( |
43dc2a0f | 347 | unsigned long start_pfn, unsigned long end_pfn, unsigned long remap_pfn) |
4fbb67e3 MR |
348 | { |
349 | unsigned long pfn; | |
350 | unsigned long i = 0; | |
351 | unsigned long n = end_pfn - start_pfn; | |
352 | ||
dd14be92 | 353 | if (remap_pfn == 0) |
43dc2a0f | 354 | remap_pfn = ini_nr_pages; |
dd14be92 | 355 | |
4fbb67e3 MR |
356 | while (i < n) { |
357 | unsigned long cur_pfn = start_pfn + i; | |
358 | unsigned long left = n - i; | |
359 | unsigned long size = left; | |
360 | unsigned long remap_range_size; | |
361 | ||
362 | /* Do not remap pages beyond the current allocation */ | |
43dc2a0f | 363 | if (cur_pfn >= ini_nr_pages) { |
4fbb67e3 | 364 | /* Identity map remaining pages */ |
bc7142cf | 365 | set_phys_range_identity(cur_pfn, cur_pfn + size); |
4fbb67e3 MR |
366 | break; |
367 | } | |
43dc2a0f JG |
368 | if (cur_pfn + size > ini_nr_pages) |
369 | size = ini_nr_pages - cur_pfn; | |
4fbb67e3 | 370 | |
69632ecf | 371 | remap_range_size = xen_find_pfn_range(&remap_pfn); |
4fbb67e3 | 372 | if (!remap_range_size) { |
8d3bcc44 | 373 | pr_warn("Unable to find available pfn range, not remapping identity pages\n"); |
4fbb67e3 | 374 | xen_set_identity_and_release_chunk(cur_pfn, |
43dc2a0f | 375 | cur_pfn + left); |
4fbb67e3 MR |
376 | break; |
377 | } | |
378 | /* Adjust size to fit in current e820 RAM region */ | |
379 | if (size > remap_range_size) | |
380 | size = remap_range_size; | |
381 | ||
1f3ac86b | 382 | xen_do_set_identity_and_remap_chunk(cur_pfn, size, remap_pfn); |
4fbb67e3 MR |
383 | |
384 | /* Update variables to reflect new mappings. */ | |
385 | i += size; | |
386 | remap_pfn += size; | |
4fbb67e3 MR |
387 | } |
388 | ||
389 | /* | |
9a58b352 JB |
390 | * If the PFNs are currently mapped, their VA mappings need to be |
391 | * zapped. | |
4fbb67e3 MR |
392 | */ |
393 | for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++) | |
394 | (void)HYPERVISOR_update_va_mapping( | |
395 | (unsigned long)__va(pfn << PAGE_SHIFT), | |
9a58b352 | 396 | native_make_pte(0), 0); |
4fbb67e3 MR |
397 | |
398 | return remap_pfn; | |
399 | } | |
400 | ||
dd14be92 | 401 | static unsigned long __init xen_count_remap_pages( |
43dc2a0f | 402 | unsigned long start_pfn, unsigned long end_pfn, |
dd14be92 JG |
403 | unsigned long remap_pages) |
404 | { | |
43dc2a0f | 405 | if (start_pfn >= ini_nr_pages) |
dd14be92 JG |
406 | return remap_pages; |
407 | ||
43dc2a0f | 408 | return remap_pages + min(end_pfn, ini_nr_pages) - start_pfn; |
dd14be92 JG |
409 | } |
410 | ||
43dc2a0f | 411 | static unsigned long __init xen_foreach_remap_area( |
dd14be92 | 412 | unsigned long (*func)(unsigned long start_pfn, unsigned long end_pfn, |
43dc2a0f | 413 | unsigned long last_val)) |
093d7b46 | 414 | { |
f3f436e3 | 415 | phys_addr_t start = 0; |
dd14be92 | 416 | unsigned long ret_val = 0; |
e7dbf7ad | 417 | const struct e820_entry *entry = xen_e820_table.entries; |
68df0da7 KRW |
418 | int i; |
419 | ||
f3f436e3 DV |
420 | /* |
421 | * Combine non-RAM regions and gaps until a RAM region (or the | |
dd14be92 JG |
422 | * end of the map) is reached, then call the provided function |
423 | * to perform its duty on the non-RAM region. | |
f3f436e3 DV |
424 | * |
425 | * The combined non-RAM regions are rounded to a whole number | |
426 | * of pages so any partial pages are accessible via the 1:1 | |
427 | * mapping. This is needed for some BIOSes that put (for | |
428 | * example) the DMI tables in a reserved region that begins on | |
429 | * a non-page boundary. | |
430 | */ | |
e7dbf7ad | 431 | for (i = 0; i < xen_e820_table.nr_entries; i++, entry++) { |
f3f436e3 | 432 | phys_addr_t end = entry->addr + entry->size; |
e7dbf7ad | 433 | if (entry->type == E820_TYPE_RAM || i == xen_e820_table.nr_entries - 1) { |
f3f436e3 DV |
434 | unsigned long start_pfn = PFN_DOWN(start); |
435 | unsigned long end_pfn = PFN_UP(end); | |
68df0da7 | 436 | |
09821ff1 | 437 | if (entry->type == E820_TYPE_RAM) |
f3f436e3 | 438 | end_pfn = PFN_UP(entry->addr); |
68df0da7 | 439 | |
83d51ab4 | 440 | if (start_pfn < end_pfn) |
43dc2a0f | 441 | ret_val = func(start_pfn, end_pfn, ret_val); |
f3f436e3 | 442 | start = end; |
68df0da7 | 443 | } |
68df0da7 | 444 | } |
f3f436e3 | 445 | |
dd14be92 | 446 | return ret_val; |
4fbb67e3 | 447 | } |
1f3ac86b JG |
448 | |
449 | /* | |
450 | * Remap the memory prepared in xen_do_set_identity_and_remap_chunk(). | |
451 | * The remap information (which mfn remap to which pfn) is contained in the | |
452 | * to be remapped memory itself in a linked list anchored at xen_remap_mfn. | |
453 | * This scheme allows to remap the different chunks in arbitrary order while | |
a97673a1 | 454 | * the resulting mapping will be independent from the order. |
1f3ac86b JG |
455 | */ |
456 | void __init xen_remap_memory(void) | |
457 | { | |
458 | unsigned long buf = (unsigned long)&xen_remap_buf; | |
bf1b9ddf | 459 | unsigned long mfn_save, pfn; |
1f3ac86b JG |
460 | unsigned long remapped = 0; |
461 | unsigned int i; | |
462 | unsigned long pfn_s = ~0UL; | |
463 | unsigned long len = 0; | |
464 | ||
067e4f17 | 465 | mfn_save = virt_to_mfn((void *)buf); |
1f3ac86b JG |
466 | |
467 | while (xen_remap_mfn != INVALID_P2M_ENTRY) { | |
468 | /* Map the remap information */ | |
469 | set_pte_mfn(buf, xen_remap_mfn, PAGE_KERNEL); | |
470 | ||
471 | BUG_ON(xen_remap_mfn != xen_remap_buf.mfns[0]); | |
472 | ||
473 | pfn = xen_remap_buf.target_pfn; | |
474 | for (i = 0; i < xen_remap_buf.size; i++) { | |
bf1b9ddf | 475 | xen_update_mem_tables(pfn, xen_remap_buf.mfns[i]); |
1f3ac86b JG |
476 | remapped++; |
477 | pfn++; | |
478 | } | |
479 | if (pfn_s == ~0UL || pfn == pfn_s) { | |
480 | pfn_s = xen_remap_buf.target_pfn; | |
481 | len += xen_remap_buf.size; | |
482 | } else if (pfn_s + len == xen_remap_buf.target_pfn) { | |
483 | len += xen_remap_buf.size; | |
484 | } else { | |
626d7508 | 485 | xen_del_extra_mem(pfn_s, len); |
1f3ac86b JG |
486 | pfn_s = xen_remap_buf.target_pfn; |
487 | len = xen_remap_buf.size; | |
488 | } | |
1f3ac86b JG |
489 | xen_remap_mfn = xen_remap_buf.next_area_mfn; |
490 | } | |
491 | ||
492 | if (pfn_s != ~0UL && len) | |
626d7508 | 493 | xen_del_extra_mem(pfn_s, len); |
1f3ac86b JG |
494 | |
495 | set_pte_mfn(buf, mfn_save, PAGE_KERNEL); | |
496 | ||
497 | pr_info("Remapped %ld page(s)\n", remapped); | |
be35d91c JG |
498 | |
499 | xen_do_remap_nonram(); | |
1f3ac86b JG |
500 | } |
501 | ||
c70727a5 JG |
502 | static unsigned long __init xen_get_pages_limit(void) |
503 | { | |
504 | unsigned long limit; | |
505 | ||
cb9e444b | 506 | limit = MAXMEM / PAGE_SIZE; |
c70727a5 JG |
507 | if (!xen_initial_domain() && xen_512gb_limit) |
508 | limit = GB(512) / PAGE_SIZE; | |
a13f2ef1 | 509 | |
c70727a5 JG |
510 | return limit; |
511 | } | |
512 | ||
d312ae87 DV |
513 | static unsigned long __init xen_get_max_pages(void) |
514 | { | |
c70727a5 | 515 | unsigned long max_pages, limit; |
d312ae87 | 516 | domid_t domid = DOMID_SELF; |
24f775a6 | 517 | long ret; |
d312ae87 | 518 | |
c70727a5 JG |
519 | limit = xen_get_pages_limit(); |
520 | max_pages = limit; | |
521 | ||
d3db7281 IC |
522 | /* |
523 | * For the initial domain we use the maximum reservation as | |
524 | * the maximum page. | |
525 | * | |
526 | * For guest domains the current maximum reservation reflects | |
527 | * the current maximum rather than the static maximum. In this | |
528 | * case the e820 map provided to us will cover the static | |
529 | * maximum region. | |
530 | */ | |
531 | if (xen_initial_domain()) { | |
532 | ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid); | |
533 | if (ret > 0) | |
534 | max_pages = ret; | |
535 | } | |
536 | ||
c70727a5 | 537 | return min(max_pages, limit); |
d312ae87 DV |
538 | } |
539 | ||
a3f52396 JG |
540 | static void __init xen_align_and_add_e820_region(phys_addr_t start, |
541 | phys_addr_t size, int type) | |
dc91c728 | 542 | { |
3ba5c867 | 543 | phys_addr_t end = start + size; |
dc91c728 DV |
544 | |
545 | /* Align RAM regions to page boundaries. */ | |
09821ff1 | 546 | if (type == E820_TYPE_RAM) { |
dc91c728 | 547 | start = PAGE_ALIGN(start); |
3ba5c867 | 548 | end &= ~((phys_addr_t)PAGE_SIZE - 1); |
1d988ed4 JG |
549 | #ifdef CONFIG_MEMORY_HOTPLUG |
550 | /* | |
551 | * Don't allow adding memory not in E820 map while booting the | |
552 | * system. Once the balloon driver is up it will remove that | |
553 | * restriction again. | |
554 | */ | |
555 | max_mem_size = end; | |
556 | #endif | |
dc91c728 DV |
557 | } |
558 | ||
ab6bc04c | 559 | e820__range_add(start, end - start, type); |
dc91c728 DV |
560 | } |
561 | ||
69632ecf | 562 | static void __init xen_ignore_unusable(void) |
3bc38cbc | 563 | { |
e7dbf7ad | 564 | struct e820_entry *entry = xen_e820_table.entries; |
3bc38cbc DV |
565 | unsigned int i; |
566 | ||
e7dbf7ad | 567 | for (i = 0; i < xen_e820_table.nr_entries; i++, entry++) { |
09821ff1 IM |
568 | if (entry->type == E820_TYPE_UNUSABLE) |
569 | entry->type = E820_TYPE_RAM; | |
3bc38cbc DV |
570 | } |
571 | } | |
572 | ||
ba888297 | 573 | static bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size) |
e612b4a7 | 574 | { |
8ec67d97 | 575 | struct e820_entry *entry; |
e612b4a7 JG |
576 | unsigned mapcnt; |
577 | phys_addr_t end; | |
578 | ||
579 | if (!size) | |
580 | return false; | |
581 | ||
582 | end = start + size; | |
e7dbf7ad | 583 | entry = xen_e820_table.entries; |
e612b4a7 | 584 | |
e7dbf7ad | 585 | for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++) { |
09821ff1 | 586 | if (entry->type == E820_TYPE_RAM && entry->addr <= start && |
e612b4a7 JG |
587 | (entry->addr + entry->size) >= end) |
588 | return false; | |
589 | ||
590 | entry++; | |
591 | } | |
592 | ||
593 | return true; | |
594 | } | |
595 | ||
9ddac5b7 JG |
596 | /* |
597 | * Find a free area in physical memory not yet reserved and compliant with | |
598 | * E820 map. | |
599 | * Used to relocate pre-allocated areas like initrd or p2m list which are in | |
600 | * conflict with the to be used E820 map. | |
601 | * In case no area is found, return 0. Otherwise return the physical address | |
602 | * of the area which is already reserved for convenience. | |
603 | */ | |
604 | phys_addr_t __init xen_find_free_area(phys_addr_t size) | |
605 | { | |
606 | unsigned mapcnt; | |
607 | phys_addr_t addr, start; | |
e7dbf7ad | 608 | struct e820_entry *entry = xen_e820_table.entries; |
9ddac5b7 | 609 | |
e7dbf7ad | 610 | for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++, entry++) { |
09821ff1 | 611 | if (entry->type != E820_TYPE_RAM || entry->size < size) |
9ddac5b7 JG |
612 | continue; |
613 | start = entry->addr; | |
614 | for (addr = start; addr < start + size; addr += PAGE_SIZE) { | |
615 | if (!memblock_is_reserved(addr)) | |
616 | continue; | |
617 | start = addr + PAGE_SIZE; | |
618 | if (start + size > entry->addr + entry->size) | |
619 | break; | |
620 | } | |
621 | if (addr >= start + size) { | |
622 | memblock_reserve(start, size); | |
623 | return start; | |
624 | } | |
625 | } | |
626 | ||
627 | return 0; | |
628 | } | |
629 | ||
be35d91c JG |
630 | /* |
631 | * Swap a non-RAM E820 map entry with RAM above ini_nr_pages. | |
632 | * Note that the E820 map is modified accordingly, but the P2M map isn't yet. | |
633 | * The adaption of the P2M must be deferred until page allocation is possible. | |
634 | */ | |
635 | static void __init xen_e820_swap_entry_with_ram(struct e820_entry *swap_entry) | |
636 | { | |
637 | struct e820_entry *entry; | |
638 | unsigned int mapcnt; | |
639 | phys_addr_t mem_end = PFN_PHYS(ini_nr_pages); | |
640 | phys_addr_t swap_addr, swap_size, entry_end; | |
641 | ||
642 | swap_addr = PAGE_ALIGN_DOWN(swap_entry->addr); | |
643 | swap_size = PAGE_ALIGN(swap_entry->addr - swap_addr + swap_entry->size); | |
644 | entry = xen_e820_table.entries; | |
645 | ||
646 | for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++) { | |
647 | entry_end = entry->addr + entry->size; | |
648 | if (entry->type == E820_TYPE_RAM && entry->size >= swap_size && | |
649 | entry_end - swap_size >= mem_end) { | |
650 | /* Reduce RAM entry by needed space (whole pages). */ | |
651 | entry->size -= swap_size; | |
652 | ||
653 | /* Add new entry at the end of E820 map. */ | |
654 | entry = xen_e820_table.entries + | |
655 | xen_e820_table.nr_entries; | |
656 | xen_e820_table.nr_entries++; | |
657 | ||
658 | /* Fill new entry (keep size and page offset). */ | |
659 | entry->type = swap_entry->type; | |
660 | entry->addr = entry_end - swap_size + | |
661 | swap_addr - swap_entry->addr; | |
662 | entry->size = swap_entry->size; | |
663 | ||
664 | /* Convert old entry to RAM, align to pages. */ | |
665 | swap_entry->type = E820_TYPE_RAM; | |
666 | swap_entry->addr = swap_addr; | |
667 | swap_entry->size = swap_size; | |
668 | ||
669 | /* Remember PFN<->MFN relation for P2M update. */ | |
670 | xen_add_remap_nonram(swap_addr, entry_end - swap_size, | |
671 | swap_size); | |
672 | ||
673 | /* Order E820 table and merge entries. */ | |
674 | e820__update_table(&xen_e820_table); | |
675 | ||
676 | return; | |
677 | } | |
678 | ||
679 | entry++; | |
680 | } | |
681 | ||
682 | xen_raw_console_write("No suitable area found for required E820 entry remapping action\n"); | |
683 | BUG(); | |
684 | } | |
685 | ||
686 | /* | |
687 | * Look for non-RAM memory types in a specific guest physical area and move | |
688 | * those away if possible (ACPI NVS only for now). | |
689 | */ | |
690 | static void __init xen_e820_resolve_conflicts(phys_addr_t start, | |
691 | phys_addr_t size) | |
692 | { | |
693 | struct e820_entry *entry; | |
694 | unsigned int mapcnt; | |
695 | phys_addr_t end; | |
696 | ||
697 | if (!size) | |
698 | return; | |
699 | ||
700 | end = start + size; | |
701 | entry = xen_e820_table.entries; | |
702 | ||
703 | for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++) { | |
704 | if (entry->addr >= end) | |
705 | return; | |
706 | ||
707 | if (entry->addr + entry->size > start && | |
708 | entry->type == E820_TYPE_NVS) | |
709 | xen_e820_swap_entry_with_ram(entry); | |
710 | ||
711 | entry++; | |
712 | } | |
713 | } | |
714 | ||
ba888297 JG |
715 | /* |
716 | * Check for an area in physical memory to be usable for non-movable purposes. | |
be35d91c JG |
717 | * An area is considered to usable if the used E820 map lists it to be RAM or |
718 | * some other type which can be moved to higher PFNs while keeping the MFNs. | |
ba888297 JG |
719 | * In case the area is not usable, crash the system with an error message. |
720 | */ | |
721 | void __init xen_chk_is_e820_usable(phys_addr_t start, phys_addr_t size, | |
722 | const char *component) | |
723 | { | |
be35d91c JG |
724 | xen_e820_resolve_conflicts(start, size); |
725 | ||
ba888297 JG |
726 | if (!xen_is_e820_reserved(start, size)) |
727 | return; | |
728 | ||
729 | xen_raw_console_write("Xen hypervisor allocated "); | |
730 | xen_raw_console_write(component); | |
731 | xen_raw_console_write(" memory conflicts with E820 map\n"); | |
732 | BUG(); | |
733 | } | |
734 | ||
4b9c1537 JG |
735 | /* |
736 | * Like memcpy, but with physical addresses for dest and src. | |
737 | */ | |
738 | static void __init xen_phys_memcpy(phys_addr_t dest, phys_addr_t src, | |
739 | phys_addr_t n) | |
740 | { | |
741 | phys_addr_t dest_off, src_off, dest_len, src_len, len; | |
742 | void *from, *to; | |
743 | ||
744 | while (n) { | |
745 | dest_off = dest & ~PAGE_MASK; | |
746 | src_off = src & ~PAGE_MASK; | |
747 | dest_len = n; | |
748 | if (dest_len > (NR_FIX_BTMAPS << PAGE_SHIFT) - dest_off) | |
749 | dest_len = (NR_FIX_BTMAPS << PAGE_SHIFT) - dest_off; | |
750 | src_len = n; | |
751 | if (src_len > (NR_FIX_BTMAPS << PAGE_SHIFT) - src_off) | |
752 | src_len = (NR_FIX_BTMAPS << PAGE_SHIFT) - src_off; | |
753 | len = min(dest_len, src_len); | |
754 | to = early_memremap(dest - dest_off, dest_len + dest_off); | |
755 | from = early_memremap(src - src_off, src_len + src_off); | |
756 | memcpy(to, from, len); | |
757 | early_memunmap(to, dest_len + dest_off); | |
758 | early_memunmap(from, src_len + src_off); | |
759 | n -= len; | |
760 | dest += len; | |
761 | src += len; | |
762 | } | |
763 | } | |
764 | ||
8f5b0c63 JG |
765 | /* |
766 | * Reserve Xen mfn_list. | |
8f5b0c63 JG |
767 | */ |
768 | static void __init xen_reserve_xen_mfnlist(void) | |
769 | { | |
70e61199 JG |
770 | phys_addr_t start, size; |
771 | ||
8f5b0c63 | 772 | if (xen_start_info->mfn_list >= __START_KERNEL_map) { |
70e61199 JG |
773 | start = __pa(xen_start_info->mfn_list); |
774 | size = PFN_ALIGN(xen_start_info->nr_pages * | |
775 | sizeof(unsigned long)); | |
776 | } else { | |
777 | start = PFN_PHYS(xen_start_info->first_p2m_pfn); | |
778 | size = PFN_PHYS(xen_start_info->nr_p2m_frames); | |
779 | } | |
780 | ||
7ecec850 RL |
781 | memblock_reserve(start, size); |
782 | if (!xen_is_e820_reserved(start, size)) | |
8f5b0c63 | 783 | return; |
8f5b0c63 | 784 | |
70e61199 | 785 | xen_relocate_p2m(); |
3ecc6834 | 786 | memblock_phys_free(start, size); |
8f5b0c63 JG |
787 | } |
788 | ||
5ead97c8 | 789 | /** |
b359b3a0 | 790 | * xen_memory_setup - Hook for machine specific memory setup. |
5ead97c8 | 791 | **/ |
5ead97c8 JF |
792 | char * __init xen_memory_setup(void) |
793 | { | |
43dc2a0f | 794 | unsigned long pfn_s, n_pfns; |
5097cdf6 JG |
795 | phys_addr_t mem_end, addr, size, chunk_size; |
796 | u32 type; | |
35ae11fd IC |
797 | int rc; |
798 | struct xen_memory_map memmap; | |
dc91c728 | 799 | unsigned long max_pages; |
42ee1471 | 800 | unsigned long extra_pages = 0; |
e8432ac8 | 801 | unsigned long maxmem_pages; |
35ae11fd | 802 | int i; |
9e9a5fcb | 803 | int op; |
5ead97c8 | 804 | |
c70727a5 | 805 | xen_parse_512gb(); |
43dc2a0f JG |
806 | ini_nr_pages = min(xen_get_pages_limit(), xen_start_info->nr_pages); |
807 | mem_end = PFN_PHYS(ini_nr_pages); | |
35ae11fd | 808 | |
e7dbf7ad IM |
809 | memmap.nr_entries = ARRAY_SIZE(xen_e820_table.entries); |
810 | set_xen_guest_handle(memmap.buffer, xen_e820_table.entries); | |
35ae11fd | 811 | |
1d988ed4 JG |
812 | #if defined(CONFIG_MEMORY_HOTPLUG) && defined(CONFIG_XEN_BALLOON) |
813 | xen_saved_max_mem_size = max_mem_size; | |
814 | #endif | |
815 | ||
9e9a5fcb IC |
816 | op = xen_initial_domain() ? |
817 | XENMEM_machine_memory_map : | |
818 | XENMEM_memory_map; | |
819 | rc = HYPERVISOR_memory_op(op, &memmap); | |
35ae11fd | 820 | if (rc == -ENOSYS) { |
9ec23a7f | 821 | BUG_ON(xen_initial_domain()); |
35ae11fd | 822 | memmap.nr_entries = 1; |
e7dbf7ad IM |
823 | xen_e820_table.entries[0].addr = 0ULL; |
824 | xen_e820_table.entries[0].size = mem_end; | |
35ae11fd | 825 | /* 8MB slack (to balance backend allocations). */ |
e7dbf7ad IM |
826 | xen_e820_table.entries[0].size += 8ULL << 20; |
827 | xen_e820_table.entries[0].type = E820_TYPE_RAM; | |
35ae11fd IC |
828 | rc = 0; |
829 | } | |
830 | BUG_ON(rc); | |
1ea644c8 | 831 | BUG_ON(memmap.nr_entries == 0); |
e7dbf7ad | 832 | xen_e820_table.nr_entries = memmap.nr_entries; |
8006ec3e | 833 | |
9338c223 RL |
834 | if (xen_initial_domain()) { |
835 | /* | |
836 | * Xen won't allow a 1:1 mapping to be created to UNUSABLE | |
837 | * regions, so if we're using the machine memory map leave the | |
838 | * region as RAM as it is in the pseudo-physical map. | |
839 | * | |
840 | * UNUSABLE regions in domUs are not handled and will need | |
841 | * a patch in the future. | |
842 | */ | |
69632ecf | 843 | xen_ignore_unusable(); |
3bc38cbc | 844 | |
9338c223 RL |
845 | #ifdef CONFIG_ISCSI_IBFT_FIND |
846 | /* Reserve 0.5 MiB to 1 MiB region so iBFT can be found */ | |
847 | xen_e820_table.entries[xen_e820_table.nr_entries].addr = IBFT_START; | |
848 | xen_e820_table.entries[xen_e820_table.nr_entries].size = IBFT_END - IBFT_START; | |
849 | xen_e820_table.entries[xen_e820_table.nr_entries].type = E820_TYPE_RESERVED; | |
850 | xen_e820_table.nr_entries++; | |
851 | #endif | |
852 | } | |
853 | ||
dc91c728 | 854 | /* Make sure the Xen-supplied memory map is well-ordered. */ |
f9748fa0 | 855 | e820__update_table(&xen_e820_table); |
dc91c728 | 856 | |
c4498ae3 JG |
857 | /* |
858 | * Check whether the kernel itself conflicts with the target E820 map. | |
859 | * Failing now is better than running into weird problems later due | |
860 | * to relocating (and even reusing) pages with kernel text or data. | |
861 | */ | |
862 | xen_chk_is_e820_usable(__pa_symbol(_text), | |
863 | __pa_symbol(_end) - __pa_symbol(_text), | |
864 | "kernel"); | |
865 | ||
866 | /* | |
867 | * Check for a conflict of the xen_start_info memory with the target | |
868 | * E820 map. | |
869 | */ | |
870 | xen_chk_is_e820_usable(__pa(xen_start_info), sizeof(*xen_start_info), | |
871 | "xen_start_info"); | |
872 | ||
873 | /* | |
874 | * Check for a conflict of the hypervisor supplied page tables with | |
875 | * the target E820 map. | |
876 | */ | |
877 | xen_pt_check_e820(); | |
878 | ||
dc91c728 | 879 | max_pages = xen_get_max_pages(); |
dc91c728 | 880 | |
5097cdf6 | 881 | /* How many extra pages do we need due to remapping? */ |
43dc2a0f | 882 | max_pages += xen_foreach_remap_area(xen_count_remap_pages); |
eafd72e0 | 883 | |
43dc2a0f JG |
884 | if (max_pages > ini_nr_pages) |
885 | extra_pages += max_pages - ini_nr_pages; | |
2e2fb754 | 886 | |
dc91c728 | 887 | /* |
af44a387 | 888 | * Clamp the amount of extra memory to a EXTRA_MEM_RATIO |
88221399 | 889 | * factor the base size. |
dc91c728 | 890 | * |
c70727a5 JG |
891 | * Make sure we have no memory above max_pages, as this area |
892 | * isn't handled by the p2m management. | |
dc91c728 | 893 | */ |
43dc2a0f JG |
894 | maxmem_pages = EXTRA_MEM_RATIO * min(ini_nr_pages, PFN_DOWN(MAXMEM)); |
895 | extra_pages = min3(maxmem_pages, extra_pages, max_pages - ini_nr_pages); | |
dc91c728 | 896 | i = 0; |
e7dbf7ad IM |
897 | addr = xen_e820_table.entries[0].addr; |
898 | size = xen_e820_table.entries[0].size; | |
899 | while (i < xen_e820_table.nr_entries) { | |
12366410 | 900 | bool discard = false; |
f5775e0b | 901 | |
5097cdf6 | 902 | chunk_size = size; |
e7dbf7ad | 903 | type = xen_e820_table.entries[i].type; |
dc91c728 | 904 | |
358cd9af JG |
905 | if (type == E820_TYPE_RESERVED) |
906 | xen_pv_pci_possible = true; | |
907 | ||
09821ff1 | 908 | if (type == E820_TYPE_RAM) { |
dc91c728 | 909 | if (addr < mem_end) { |
5097cdf6 | 910 | chunk_size = min(size, mem_end - addr); |
dc91c728 | 911 | } else if (extra_pages) { |
5097cdf6 | 912 | chunk_size = min(size, PFN_PHYS(extra_pages)); |
626d7508 JG |
913 | pfn_s = PFN_UP(addr); |
914 | n_pfns = PFN_DOWN(addr + chunk_size) - pfn_s; | |
915 | extra_pages -= n_pfns; | |
916 | xen_add_extra_mem(pfn_s, n_pfns); | |
917 | xen_max_p2m_pfn = pfn_s + n_pfns; | |
dc91c728 | 918 | } else |
12366410 | 919 | discard = true; |
3654581e JF |
920 | } |
921 | ||
12366410 ID |
922 | if (!discard) |
923 | xen_align_and_add_e820_region(addr, chunk_size, type); | |
b5b43ced | 924 | |
5097cdf6 JG |
925 | addr += chunk_size; |
926 | size -= chunk_size; | |
927 | if (size == 0) { | |
dc91c728 | 928 | i++; |
e7dbf7ad IM |
929 | if (i < xen_e820_table.nr_entries) { |
930 | addr = xen_e820_table.entries[i].addr; | |
931 | size = xen_e820_table.entries[i].size; | |
5097cdf6 JG |
932 | } |
933 | } | |
35ae11fd | 934 | } |
b792c755 | 935 | |
25b884a8 DV |
936 | /* |
937 | * Set the rest as identity mapped, in case PCI BARs are | |
938 | * located here. | |
25b884a8 | 939 | */ |
5097cdf6 | 940 | set_phys_range_identity(addr / PAGE_SIZE, ~0ul); |
25b884a8 | 941 | |
b792c755 | 942 | /* |
9ec23a7f IC |
943 | * In domU, the ISA region is normal, usable memory, but we |
944 | * reserve ISA memory anyway because too many things poke | |
b792c755 JF |
945 | * about in there. |
946 | */ | |
f9748fa0 | 947 | e820__range_add(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_TYPE_RESERVED); |
5ead97c8 | 948 | |
f9748fa0 | 949 | e820__update_table(e820_table); |
be5bf9fa | 950 | |
8f5b0c63 JG |
951 | xen_reserve_xen_mfnlist(); |
952 | ||
4b9c1537 JG |
953 | /* Check for a conflict of the initrd with the target E820 map. */ |
954 | if (xen_is_e820_reserved(boot_params.hdr.ramdisk_image, | |
955 | boot_params.hdr.ramdisk_size)) { | |
956 | phys_addr_t new_area, start, size; | |
957 | ||
958 | new_area = xen_find_free_area(boot_params.hdr.ramdisk_size); | |
959 | if (!new_area) { | |
960 | xen_raw_console_write("Can't find new memory area for initrd needed due to E820 map conflict\n"); | |
961 | BUG(); | |
962 | } | |
963 | ||
964 | start = boot_params.hdr.ramdisk_image; | |
965 | size = boot_params.hdr.ramdisk_size; | |
966 | xen_phys_memcpy(new_area, start, size); | |
967 | pr_info("initrd moved from [mem %#010llx-%#010llx] to [mem %#010llx-%#010llx]\n", | |
968 | start, start + size, new_area, new_area + size); | |
3ecc6834 | 969 | memblock_phys_free(start, size); |
4b9c1537 JG |
970 | boot_params.hdr.ramdisk_image = new_area; |
971 | boot_params.ext_ramdisk_image = new_area >> 32; | |
972 | } | |
973 | ||
5097cdf6 JG |
974 | /* |
975 | * Set identity map on non-RAM pages and prepare remapping the | |
976 | * underlying RAM. | |
977 | */ | |
43dc2a0f | 978 | xen_foreach_remap_area(xen_set_identity_and_remap_chunk); |
dd14be92 JG |
979 | |
980 | pr_info("Released %ld page(s)\n", xen_released_pages); | |
5097cdf6 | 981 | |
5ead97c8 JF |
982 | return "Xen"; |
983 | } | |
984 | ||
148f9bb8 | 985 | static int register_callback(unsigned type, const void *func) |
e2a81baf | 986 | { |
88459d4c JF |
987 | struct callback_register callback = { |
988 | .type = type, | |
989 | .address = XEN_CALLBACK(__KERNEL_CS, func), | |
e2a81baf JF |
990 | .flags = CALLBACKF_mask_events, |
991 | }; | |
992 | ||
88459d4c JF |
993 | return HYPERVISOR_callback_op(CALLBACKOP_register, &callback); |
994 | } | |
995 | ||
148f9bb8 | 996 | void xen_enable_sysenter(void) |
88459d4c | 997 | { |
4bff677b JG |
998 | if (cpu_feature_enabled(X86_FEATURE_SYSENTER32) && |
999 | register_callback(CALLBACKTYPE_sysenter, xen_entry_SYSENTER_compat)) | |
1000 | setup_clear_cpu_cap(X86_FEATURE_SYSENTER32); | |
e2a81baf JF |
1001 | } |
1002 | ||
148f9bb8 | 1003 | void xen_enable_syscall(void) |
6fcac6d3 | 1004 | { |
6fcac6d3 | 1005 | int ret; |
6fcac6d3 | 1006 | |
b75b7f8e | 1007 | ret = register_callback(CALLBACKTYPE_syscall, xen_entry_SYSCALL_64); |
6fcac6d3 | 1008 | if (ret != 0) { |
d5303b81 | 1009 | printk(KERN_ERR "Failed to set syscall callback: %d\n", ret); |
62541c37 JF |
1010 | /* Pretty fatal; 64-bit userspace has no other |
1011 | mechanism for syscalls. */ | |
1012 | } | |
1013 | ||
4bff677b JG |
1014 | if (cpu_feature_enabled(X86_FEATURE_SYSCALL32) && |
1015 | register_callback(CALLBACKTYPE_syscall32, xen_entry_SYSCALL_compat)) | |
1016 | setup_clear_cpu_cap(X86_FEATURE_SYSCALL32); | |
6fcac6d3 | 1017 | } |
ea9f9274 | 1018 | |
0e1b4271 | 1019 | static void __init xen_pvmmu_arch_setup(void) |
5ead97c8 | 1020 | { |
5ead97c8 JF |
1021 | HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables); |
1022 | ||
2f6474e4 TG |
1023 | if (register_callback(CALLBACKTYPE_event, |
1024 | xen_asm_exc_xen_hypervisor_callback) || | |
88459d4c JF |
1025 | register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback)) |
1026 | BUG(); | |
5ead97c8 | 1027 | |
e2a81baf | 1028 | xen_enable_sysenter(); |
6fcac6d3 | 1029 | xen_enable_syscall(); |
d285d683 MR |
1030 | } |
1031 | ||
1032 | /* This function is not called for HVM domains */ | |
1033 | void __init xen_arch_setup(void) | |
1034 | { | |
1035 | xen_panic_handler_init(); | |
82616f95 | 1036 | xen_pvmmu_arch_setup(); |
d285d683 | 1037 | |
5ead97c8 JF |
1038 | #ifdef CONFIG_ACPI |
1039 | if (!(xen_start_info->flags & SIF_INITDOMAIN)) { | |
1040 | printk(KERN_INFO "ACPI in unprivileged domain disabled\n"); | |
1041 | disable_acpi(); | |
1042 | } | |
1043 | #endif | |
1044 | ||
1045 | memcpy(boot_command_line, xen_start_info->cmd_line, | |
1046 | MAX_GUEST_CMDLINE > COMMAND_LINE_SIZE ? | |
1047 | COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE); | |
1048 | ||
bc15fde7 | 1049 | /* Set up idle, making sure it calls safe_halt() pvop */ |
d91ee586 | 1050 | disable_cpuidle(); |
48cdd828 | 1051 | disable_cpufreq(); |
6a377ddc | 1052 | WARN_ON(xen_set_default_idle()); |
8d54db79 KRW |
1053 | #ifdef CONFIG_NUMA |
1054 | numa_off = 1; | |
1055 | #endif | |
5ead97c8 | 1056 | } |