2 * Machine specific setup for xen
7 #include <linux/module.h>
8 #include <linux/sched.h>
11 #include <linux/memblock.h>
12 #include <linux/cpuidle.h>
13 #include <linux/cpufreq.h>
18 #include <asm/setup.h>
21 #include <asm/xen/hypervisor.h>
22 #include <asm/xen/hypercall.h>
26 #include <xen/interface/callback.h>
27 #include <xen/interface/memory.h>
28 #include <xen/interface/physdev.h>
29 #include <xen/features.h>
33 /* These are code, but not functions. Defined in entry.S */
34 extern const char xen_hypervisor_callback[];
35 extern const char xen_failsafe_callback[];
37 extern const char nmi[];
39 extern void xen_sysenter_target(void);
40 extern void xen_syscall_target(void);
41 extern void xen_syscall32_target(void);
43 /* Amount of extra memory space we add to the e820 ranges */
44 struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
46 /* Number of pages released from the initial allocation. */
47 unsigned long xen_released_pages;
50 * The maximum amount of extra memory compared to the base size. The
51 * main scaling factor is the size of struct page. At extreme ratios
52 * of base:extra, all the base memory can be filled with page
53 * structures for the extra memory, leaving no space for anything
56 * 10x seems like a reasonable balance between scaling flexibility and
57 * leaving a practically usable system.
59 #define EXTRA_MEM_RATIO (10)
61 static void __init xen_add_extra_mem(u64 start, u64 size)
66 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
68 if (xen_extra_mem[i].size == 0) {
69 xen_extra_mem[i].start = start;
70 xen_extra_mem[i].size = size;
73 /* Append to existing region. */
74 if (xen_extra_mem[i].start + xen_extra_mem[i].size == start) {
75 xen_extra_mem[i].size += size;
79 if (i == XEN_EXTRA_MEM_MAX_REGIONS)
80 printk(KERN_WARNING "Warning: not enough extra memory regions\n");
82 memblock_reserve(start, size);
84 xen_max_p2m_pfn = PFN_DOWN(start + size);
85 for (pfn = PFN_DOWN(start); pfn < xen_max_p2m_pfn; pfn++) {
86 unsigned long mfn = pfn_to_mfn(pfn);
88 if (WARN(mfn == pfn, "Trying to over-write 1-1 mapping (pfn: %lx)\n", pfn))
90 WARN(mfn != INVALID_P2M_ENTRY, "Trying to remove %lx which has %lx mfn!\n",
93 __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
97 static unsigned long __init xen_do_chunk(unsigned long start,
98 unsigned long end, bool release)
100 struct xen_memory_reservation reservation = {
105 unsigned long len = 0;
109 for (pfn = start; pfn < end; pfn++) {
111 unsigned long mfn = pfn_to_mfn(pfn);
114 /* Make sure pfn exists to start with */
115 if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
119 if (mfn != INVALID_P2M_ENTRY)
123 set_xen_guest_handle(reservation.extent_start, &frame);
124 reservation.nr_extents = 1;
126 ret = HYPERVISOR_memory_op(release ? XENMEM_decrease_reservation : XENMEM_populate_physmap,
128 WARN(ret != 1, "Failed to %s pfn %lx err=%d\n",
129 release ? "release" : "populate", pfn, ret);
132 if (!early_set_phys_to_machine(pfn, release ? INVALID_P2M_ENTRY : frame)) {
135 set_xen_guest_handle(reservation.extent_start, &frame);
136 reservation.nr_extents = 1;
137 ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
146 printk(KERN_INFO "%s %lx-%lx pfn range: %lu pages %s\n",
147 release ? "Freeing" : "Populating",
149 release ? "freed" : "added");
154 static unsigned long __init xen_release_chunk(unsigned long start,
157 return xen_do_chunk(start, end, true);
160 static unsigned long __init xen_populate_chunk(
161 const struct e820entry *list, size_t map_size,
162 unsigned long max_pfn, unsigned long *last_pfn,
163 unsigned long credits_left)
165 const struct e820entry *entry;
167 unsigned long done = 0;
168 unsigned long dest_pfn;
170 for (i = 0, entry = list; i < map_size; i++, entry++) {
176 if (credits_left <= 0)
179 if (entry->type != E820_RAM)
182 e_pfn = PFN_DOWN(entry->addr + entry->size);
184 /* We only care about E820 after the xen_start_info->nr_pages */
185 if (e_pfn <= max_pfn)
188 s_pfn = PFN_UP(entry->addr);
189 /* If the E820 falls within the nr_pages, we want to start
190 * at the nr_pages PFN.
191 * If that would mean going past the E820 entry, skip it
193 if (s_pfn <= max_pfn) {
194 capacity = e_pfn - max_pfn;
197 capacity = e_pfn - s_pfn;
201 if (credits_left < capacity)
202 capacity = credits_left;
204 pfns = xen_do_chunk(dest_pfn, dest_pfn + capacity, false);
206 *last_pfn = (dest_pfn + pfns);
209 credits_left -= pfns;
214 static void __init xen_set_identity_and_release_chunk(
215 unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
216 unsigned long *released, unsigned long *identity)
221 * If the PFNs are currently mapped, the VA mapping also needs
222 * to be updated to be 1:1.
224 for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++)
225 (void)HYPERVISOR_update_va_mapping(
226 (unsigned long)__va(pfn << PAGE_SHIFT),
227 mfn_pte(pfn, PAGE_KERNEL_IO), 0);
229 if (start_pfn < nr_pages)
230 *released += xen_release_chunk(
231 start_pfn, min(end_pfn, nr_pages));
233 *identity += set_phys_range_identity(start_pfn, end_pfn);
236 static unsigned long __init xen_set_identity_and_release(
237 const struct e820entry *list, size_t map_size, unsigned long nr_pages)
239 phys_addr_t start = 0;
240 unsigned long released = 0;
241 unsigned long identity = 0;
242 const struct e820entry *entry;
246 * Combine non-RAM regions and gaps until a RAM region (or the
247 * end of the map) is reached, then set the 1:1 map and
248 * release the pages (if available) in those non-RAM regions.
250 * The combined non-RAM regions are rounded to a whole number
251 * of pages so any partial pages are accessible via the 1:1
252 * mapping. This is needed for some BIOSes that put (for
253 * example) the DMI tables in a reserved region that begins on
254 * a non-page boundary.
256 for (i = 0, entry = list; i < map_size; i++, entry++) {
257 phys_addr_t end = entry->addr + entry->size;
258 if (entry->type == E820_RAM || i == map_size - 1) {
259 unsigned long start_pfn = PFN_DOWN(start);
260 unsigned long end_pfn = PFN_UP(end);
262 if (entry->type == E820_RAM)
263 end_pfn = PFN_UP(entry->addr);
265 if (start_pfn < end_pfn)
266 xen_set_identity_and_release_chunk(
267 start_pfn, end_pfn, nr_pages,
268 &released, &identity);
275 printk(KERN_INFO "Released %lu pages of unused memory\n", released);
277 printk(KERN_INFO "Set %ld page(s) to 1-1 mapping\n", identity);
282 static unsigned long __init xen_get_max_pages(void)
284 unsigned long max_pages = MAX_DOMAIN_PAGES;
285 domid_t domid = DOMID_SELF;
289 * For the initial domain we use the maximum reservation as
292 * For guest domains the current maximum reservation reflects
293 * the current maximum rather than the static maximum. In this
294 * case the e820 map provided to us will cover the static
297 if (xen_initial_domain()) {
298 ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
303 return min(max_pages, MAX_DOMAIN_PAGES);
306 static void xen_align_and_add_e820_region(u64 start, u64 size, int type)
308 u64 end = start + size;
310 /* Align RAM regions to page boundaries. */
311 if (type == E820_RAM) {
312 start = PAGE_ALIGN(start);
313 end &= ~((u64)PAGE_SIZE - 1);
316 e820_add_region(start, end - start, type);
320 * machine_specific_memory_setup - Hook for machine specific memory setup.
322 char * __init xen_memory_setup(void)
324 static struct e820entry map[E820MAX] __initdata;
326 unsigned long max_pfn = xen_start_info->nr_pages;
327 unsigned long long mem_end;
329 struct xen_memory_map memmap;
330 unsigned long max_pages;
331 unsigned long last_pfn = 0;
332 unsigned long extra_pages = 0;
333 unsigned long populated;
337 max_pfn = min(MAX_DOMAIN_PAGES, max_pfn);
338 mem_end = PFN_PHYS(max_pfn);
340 memmap.nr_entries = E820MAX;
341 set_xen_guest_handle(memmap.buffer, map);
343 op = xen_initial_domain() ?
344 XENMEM_machine_memory_map :
346 rc = HYPERVISOR_memory_op(op, &memmap);
348 BUG_ON(xen_initial_domain());
349 memmap.nr_entries = 1;
351 map[0].size = mem_end;
352 /* 8MB slack (to balance backend allocations). */
353 map[0].size += 8ULL << 20;
354 map[0].type = E820_RAM;
359 /* Make sure the Xen-supplied memory map is well-ordered. */
360 sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries);
362 max_pages = xen_get_max_pages();
363 if (max_pages > max_pfn)
364 extra_pages += max_pages - max_pfn;
367 * Set P2M for all non-RAM pages and E820 gaps to be identity
368 * type PFNs. Any RAM pages that would be made inaccesible by
369 * this are first released.
371 xen_released_pages = xen_set_identity_and_release(
372 map, memmap.nr_entries, max_pfn);
375 * Populate back the non-RAM pages and E820 gaps that had been
377 populated = xen_populate_chunk(map, memmap.nr_entries,
378 max_pfn, &last_pfn, xen_released_pages);
380 xen_released_pages -= populated;
381 extra_pages += xen_released_pages;
383 if (last_pfn > max_pfn) {
384 max_pfn = min(MAX_DOMAIN_PAGES, last_pfn);
385 mem_end = PFN_PHYS(max_pfn);
388 * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
389 * factor the base size. On non-highmem systems, the base
390 * size is the full initial memory allocation; on highmem it
391 * is limited to the max size of lowmem, so that it doesn't
392 * get completely filled.
394 * In principle there could be a problem in lowmem systems if
395 * the initial memory is also very large with respect to
396 * lowmem, but we won't try to deal with that here.
398 extra_pages = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
401 while (i < memmap.nr_entries) {
402 u64 addr = map[i].addr;
403 u64 size = map[i].size;
404 u32 type = map[i].type;
406 if (type == E820_RAM) {
407 if (addr < mem_end) {
408 size = min(size, mem_end - addr);
409 } else if (extra_pages) {
410 size = min(size, (u64)extra_pages * PAGE_SIZE);
411 extra_pages -= size / PAGE_SIZE;
412 xen_add_extra_mem(addr, size);
414 type = E820_UNUSABLE;
417 xen_align_and_add_e820_region(addr, size, type);
421 if (map[i].size == 0)
426 * In domU, the ISA region is normal, usable memory, but we
427 * reserve ISA memory anyway because too many things poke
430 e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS,
437 * See comment above "struct start_info" in <xen/interface/xen.h>
438 * We tried to make the the memblock_reserve more selective so
439 * that it would be clear what region is reserved. Sadly we ran
440 * in the problem wherein on a 64-bit hypervisor with a 32-bit
441 * initial domain, the pt_base has the cr3 value which is not
442 * neccessarily where the pagetable starts! As Jan put it: "
443 * Actually, the adjustment turns out to be correct: The page
444 * tables for a 32-on-64 dom0 get allocated in the order "first L1",
445 * "first L2", "first L3", so the offset to the page table base is
446 * indeed 2. When reading xen/include/public/xen.h's comment
447 * very strictly, this is not a violation (since there nothing is said
448 * that the first thing in the page table space is pointed to by
449 * pt_base; I admit that this seems to be implied though, namely
450 * do I think that it is implied that the page table space is the
451 * range [pt_base, pt_base + nt_pt_frames), whereas that
452 * range here indeed is [pt_base - 2, pt_base - 2 + nt_pt_frames),
453 * which - without a priori knowledge - the kernel would have
454 * difficulty to figure out)." - so lets just fall back to the
455 * easy way and reserve the whole region.
457 memblock_reserve(__pa(xen_start_info->mfn_list),
458 xen_start_info->pt_base - xen_start_info->mfn_list);
460 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
466 * Set the bit indicating "nosegneg" library variants should be used.
467 * We only need to bother in pure 32-bit mode; compat 32-bit processes
468 * can have un-truncated segments, so wrapping around is allowed.
470 static void __init fiddle_vdso(void)
474 mask = VDSO32_SYMBOL(&vdso32_int80_start, NOTE_MASK);
475 *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
476 mask = VDSO32_SYMBOL(&vdso32_sysenter_start, NOTE_MASK);
477 *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
481 static int register_callback(unsigned type, const void *func)
483 struct callback_register callback = {
485 .address = XEN_CALLBACK(__KERNEL_CS, func),
486 .flags = CALLBACKF_mask_events,
489 return HYPERVISOR_callback_op(CALLBACKOP_register, &callback);
492 void xen_enable_sysenter(void)
495 unsigned sysenter_feature;
498 sysenter_feature = X86_FEATURE_SEP;
500 sysenter_feature = X86_FEATURE_SYSENTER32;
503 if (!boot_cpu_has(sysenter_feature))
506 ret = register_callback(CALLBACKTYPE_sysenter, xen_sysenter_target);
508 setup_clear_cpu_cap(sysenter_feature);
511 void xen_enable_syscall(void)
516 ret = register_callback(CALLBACKTYPE_syscall, xen_syscall_target);
518 printk(KERN_ERR "Failed to set syscall callback: %d\n", ret);
519 /* Pretty fatal; 64-bit userspace has no other
520 mechanism for syscalls. */
523 if (boot_cpu_has(X86_FEATURE_SYSCALL32)) {
524 ret = register_callback(CALLBACKTYPE_syscall32,
525 xen_syscall32_target);
527 setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
529 #endif /* CONFIG_X86_64 */
531 void __cpuinit xen_enable_nmi(void)
534 if (register_callback(CALLBACKTYPE_nmi, nmi))
538 void __init xen_arch_setup(void)
540 xen_panic_handler_init();
542 HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
543 HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);
545 if (!xen_feature(XENFEAT_auto_translated_physmap))
546 HYPERVISOR_vm_assist(VMASST_CMD_enable,
547 VMASST_TYPE_pae_extended_cr3);
549 if (register_callback(CALLBACKTYPE_event, xen_hypervisor_callback) ||
550 register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback))
553 xen_enable_sysenter();
554 xen_enable_syscall();
557 if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
558 printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
563 memcpy(boot_command_line, xen_start_info->cmd_line,
564 MAX_GUEST_CMDLINE > COMMAND_LINE_SIZE ?
565 COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE);
567 /* Set up idle, making sure it calls safe_halt() pvop */
570 WARN_ON(xen_set_default_idle());