1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
4 * Copyright 2010-2011 Freescale Semiconductor, Inc.
10 #include <linux/kvm_host.h>
11 #include <linux/init.h>
12 #include <linux/export.h>
13 #include <linux/kmemleak.h>
14 #include <linux/kvm_para.h>
15 #include <linux/slab.h>
17 #include <linux/pagemap.h>
20 #include <asm/sections.h>
21 #include <asm/cacheflush.h>
22 #include <asm/disassemble.h>
23 #include <asm/ppc-opcode.h>
24 #include <asm/epapr_hcalls.h>
26 #define KVM_MAGIC_PAGE (-4096L)
27 #define magic_var(x) KVM_MAGIC_PAGE + offsetof(struct kvm_vcpu_arch_shared, x)
29 #define KVM_INST_LWZ 0x80000000
30 #define KVM_INST_STW 0x90000000
31 #define KVM_INST_LD 0xe8000000
32 #define KVM_INST_STD 0xf8000000
33 #define KVM_INST_NOP 0x60000000
34 #define KVM_INST_B 0x48000000
35 #define KVM_INST_B_MASK 0x03ffffff
36 #define KVM_INST_B_MAX 0x01ffffff
37 #define KVM_INST_LI 0x38000000
39 #define KVM_MASK_RT 0x03e00000
40 #define KVM_RT_30 0x03c00000
41 #define KVM_MASK_RB 0x0000f800
42 #define KVM_INST_MFMSR 0x7c0000a6
47 #define KVM_INST_SPR(sprn, moveto) (0x7c0002a6 | \
48 (((sprn) & 0x1f) << 16) | \
49 (((sprn) & 0x3e0) << 6) | \
52 #define KVM_INST_MFSPR(sprn) KVM_INST_SPR(sprn, SPR_FROM)
53 #define KVM_INST_MTSPR(sprn) KVM_INST_SPR(sprn, SPR_TO)
55 #define KVM_INST_TLBSYNC 0x7c00046c
56 #define KVM_INST_MTMSRD_L0 0x7c000164
57 #define KVM_INST_MTMSRD_L1 0x7c010164
58 #define KVM_INST_MTMSR 0x7c000124
60 #define KVM_INST_WRTEE 0x7c000106
61 #define KVM_INST_WRTEEI_0 0x7c000146
62 #define KVM_INST_WRTEEI_1 0x7c008146
64 #define KVM_INST_MTSRIN 0x7c0001e4
66 static bool kvm_patching_worked = true;
67 char kvm_tmp[1024 * 1024];
68 static int kvm_tmp_index;
70 static inline void kvm_patch_ins(u32 *inst, u32 new_inst)
73 flush_icache_range((ulong)inst, (ulong)inst + 4);
76 static void kvm_patch_ins_ll(u32 *inst, long addr, u32 rt)
79 kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
81 kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000fffc));
85 static void kvm_patch_ins_ld(u32 *inst, long addr, u32 rt)
88 kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
90 kvm_patch_ins(inst, KVM_INST_LWZ | rt | ((addr + 4) & 0x0000fffc));
94 static void kvm_patch_ins_lwz(u32 *inst, long addr, u32 rt)
96 kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000ffff));
99 static void kvm_patch_ins_std(u32 *inst, long addr, u32 rt)
102 kvm_patch_ins(inst, KVM_INST_STD | rt | (addr & 0x0000fffc));
104 kvm_patch_ins(inst, KVM_INST_STW | rt | ((addr + 4) & 0x0000fffc));
108 static void kvm_patch_ins_stw(u32 *inst, long addr, u32 rt)
110 kvm_patch_ins(inst, KVM_INST_STW | rt | (addr & 0x0000fffc));
113 static void kvm_patch_ins_nop(u32 *inst)
115 kvm_patch_ins(inst, KVM_INST_NOP);
118 static void kvm_patch_ins_b(u32 *inst, int addr)
120 #if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC_BOOK3S)
121 /* On relocatable kernels interrupts handlers and our code
122 can be in different regions, so we don't patch them */
124 if ((ulong)inst < (ulong)&__end_interrupts)
128 kvm_patch_ins(inst, KVM_INST_B | (addr & KVM_INST_B_MASK));
131 static u32 *kvm_alloc(int len)
135 if ((kvm_tmp_index + len) > ARRAY_SIZE(kvm_tmp)) {
136 printk(KERN_ERR "KVM: No more space (%d + %d)\n",
138 kvm_patching_worked = false;
142 p = (void*)&kvm_tmp[kvm_tmp_index];
143 kvm_tmp_index += len;
148 extern u32 kvm_emulate_mtmsrd_branch_offs;
149 extern u32 kvm_emulate_mtmsrd_reg_offs;
150 extern u32 kvm_emulate_mtmsrd_orig_ins_offs;
151 extern u32 kvm_emulate_mtmsrd_len;
152 extern u32 kvm_emulate_mtmsrd[];
154 static void kvm_patch_ins_mtmsrd(u32 *inst, u32 rt)
161 p = kvm_alloc(kvm_emulate_mtmsrd_len * 4);
165 /* Find out where we are and put everything there */
166 distance_start = (ulong)p - (ulong)inst;
167 next_inst = ((ulong)inst + 4);
168 distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsrd_branch_offs];
170 /* Make sure we only write valid b instructions */
171 if (distance_start > KVM_INST_B_MAX) {
172 kvm_patching_worked = false;
176 /* Modify the chunk to fit the invocation */
177 memcpy(p, kvm_emulate_mtmsrd, kvm_emulate_mtmsrd_len * 4);
178 p[kvm_emulate_mtmsrd_branch_offs] |= distance_end & KVM_INST_B_MASK;
179 switch (get_rt(rt)) {
181 kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs],
182 magic_var(scratch2), KVM_RT_30);
185 kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs],
186 magic_var(scratch1), KVM_RT_30);
189 p[kvm_emulate_mtmsrd_reg_offs] |= rt;
193 p[kvm_emulate_mtmsrd_orig_ins_offs] = *inst;
194 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsrd_len * 4);
196 /* Patch the invocation */
197 kvm_patch_ins_b(inst, distance_start);
200 extern u32 kvm_emulate_mtmsr_branch_offs;
201 extern u32 kvm_emulate_mtmsr_reg1_offs;
202 extern u32 kvm_emulate_mtmsr_reg2_offs;
203 extern u32 kvm_emulate_mtmsr_orig_ins_offs;
204 extern u32 kvm_emulate_mtmsr_len;
205 extern u32 kvm_emulate_mtmsr[];
207 static void kvm_patch_ins_mtmsr(u32 *inst, u32 rt)
214 p = kvm_alloc(kvm_emulate_mtmsr_len * 4);
218 /* Find out where we are and put everything there */
219 distance_start = (ulong)p - (ulong)inst;
220 next_inst = ((ulong)inst + 4);
221 distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsr_branch_offs];
223 /* Make sure we only write valid b instructions */
224 if (distance_start > KVM_INST_B_MAX) {
225 kvm_patching_worked = false;
229 /* Modify the chunk to fit the invocation */
230 memcpy(p, kvm_emulate_mtmsr, kvm_emulate_mtmsr_len * 4);
231 p[kvm_emulate_mtmsr_branch_offs] |= distance_end & KVM_INST_B_MASK;
233 /* Make clobbered registers work too */
234 switch (get_rt(rt)) {
236 kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
237 magic_var(scratch2), KVM_RT_30);
238 kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
239 magic_var(scratch2), KVM_RT_30);
242 kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
243 magic_var(scratch1), KVM_RT_30);
244 kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
245 magic_var(scratch1), KVM_RT_30);
248 p[kvm_emulate_mtmsr_reg1_offs] |= rt;
249 p[kvm_emulate_mtmsr_reg2_offs] |= rt;
253 p[kvm_emulate_mtmsr_orig_ins_offs] = *inst;
254 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsr_len * 4);
256 /* Patch the invocation */
257 kvm_patch_ins_b(inst, distance_start);
262 extern u32 kvm_emulate_wrtee_branch_offs;
263 extern u32 kvm_emulate_wrtee_reg_offs;
264 extern u32 kvm_emulate_wrtee_orig_ins_offs;
265 extern u32 kvm_emulate_wrtee_len;
266 extern u32 kvm_emulate_wrtee[];
268 static void kvm_patch_ins_wrtee(u32 *inst, u32 rt, int imm_one)
275 p = kvm_alloc(kvm_emulate_wrtee_len * 4);
279 /* Find out where we are and put everything there */
280 distance_start = (ulong)p - (ulong)inst;
281 next_inst = ((ulong)inst + 4);
282 distance_end = next_inst - (ulong)&p[kvm_emulate_wrtee_branch_offs];
284 /* Make sure we only write valid b instructions */
285 if (distance_start > KVM_INST_B_MAX) {
286 kvm_patching_worked = false;
290 /* Modify the chunk to fit the invocation */
291 memcpy(p, kvm_emulate_wrtee, kvm_emulate_wrtee_len * 4);
292 p[kvm_emulate_wrtee_branch_offs] |= distance_end & KVM_INST_B_MASK;
295 p[kvm_emulate_wrtee_reg_offs] =
296 KVM_INST_LI | __PPC_RT(R30) | MSR_EE;
298 /* Make clobbered registers work too */
299 switch (get_rt(rt)) {
301 kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs],
302 magic_var(scratch2), KVM_RT_30);
305 kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs],
306 magic_var(scratch1), KVM_RT_30);
309 p[kvm_emulate_wrtee_reg_offs] |= rt;
314 p[kvm_emulate_wrtee_orig_ins_offs] = *inst;
315 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrtee_len * 4);
317 /* Patch the invocation */
318 kvm_patch_ins_b(inst, distance_start);
321 extern u32 kvm_emulate_wrteei_0_branch_offs;
322 extern u32 kvm_emulate_wrteei_0_len;
323 extern u32 kvm_emulate_wrteei_0[];
325 static void kvm_patch_ins_wrteei_0(u32 *inst)
332 p = kvm_alloc(kvm_emulate_wrteei_0_len * 4);
336 /* Find out where we are and put everything there */
337 distance_start = (ulong)p - (ulong)inst;
338 next_inst = ((ulong)inst + 4);
339 distance_end = next_inst - (ulong)&p[kvm_emulate_wrteei_0_branch_offs];
341 /* Make sure we only write valid b instructions */
342 if (distance_start > KVM_INST_B_MAX) {
343 kvm_patching_worked = false;
347 memcpy(p, kvm_emulate_wrteei_0, kvm_emulate_wrteei_0_len * 4);
348 p[kvm_emulate_wrteei_0_branch_offs] |= distance_end & KVM_INST_B_MASK;
349 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrteei_0_len * 4);
351 /* Patch the invocation */
352 kvm_patch_ins_b(inst, distance_start);
357 #ifdef CONFIG_PPC_BOOK3S_32
359 extern u32 kvm_emulate_mtsrin_branch_offs;
360 extern u32 kvm_emulate_mtsrin_reg1_offs;
361 extern u32 kvm_emulate_mtsrin_reg2_offs;
362 extern u32 kvm_emulate_mtsrin_orig_ins_offs;
363 extern u32 kvm_emulate_mtsrin_len;
364 extern u32 kvm_emulate_mtsrin[];
366 static void kvm_patch_ins_mtsrin(u32 *inst, u32 rt, u32 rb)
373 p = kvm_alloc(kvm_emulate_mtsrin_len * 4);
377 /* Find out where we are and put everything there */
378 distance_start = (ulong)p - (ulong)inst;
379 next_inst = ((ulong)inst + 4);
380 distance_end = next_inst - (ulong)&p[kvm_emulate_mtsrin_branch_offs];
382 /* Make sure we only write valid b instructions */
383 if (distance_start > KVM_INST_B_MAX) {
384 kvm_patching_worked = false;
388 /* Modify the chunk to fit the invocation */
389 memcpy(p, kvm_emulate_mtsrin, kvm_emulate_mtsrin_len * 4);
390 p[kvm_emulate_mtsrin_branch_offs] |= distance_end & KVM_INST_B_MASK;
391 p[kvm_emulate_mtsrin_reg1_offs] |= (rb << 10);
392 p[kvm_emulate_mtsrin_reg2_offs] |= rt;
393 p[kvm_emulate_mtsrin_orig_ins_offs] = *inst;
394 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtsrin_len * 4);
396 /* Patch the invocation */
397 kvm_patch_ins_b(inst, distance_start);
402 static void kvm_map_magic_page(void *data)
404 u32 *features = data;
409 in[0] = KVM_MAGIC_PAGE;
410 in[1] = KVM_MAGIC_PAGE | MAGIC_PAGE_FLAG_NOT_MAPPED_NX;
412 epapr_hypercall(in, out, KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE));
417 static void kvm_check_ins(u32 *inst, u32 features)
420 u32 inst_no_rt = _inst & ~KVM_MASK_RT;
421 u32 inst_rt = _inst & KVM_MASK_RT;
423 switch (inst_no_rt) {
426 kvm_patch_ins_ld(inst, magic_var(msr), inst_rt);
428 case KVM_INST_MFSPR(SPRN_SPRG0):
429 kvm_patch_ins_ld(inst, magic_var(sprg0), inst_rt);
431 case KVM_INST_MFSPR(SPRN_SPRG1):
432 kvm_patch_ins_ld(inst, magic_var(sprg1), inst_rt);
434 case KVM_INST_MFSPR(SPRN_SPRG2):
435 kvm_patch_ins_ld(inst, magic_var(sprg2), inst_rt);
437 case KVM_INST_MFSPR(SPRN_SPRG3):
438 kvm_patch_ins_ld(inst, magic_var(sprg3), inst_rt);
440 case KVM_INST_MFSPR(SPRN_SRR0):
441 kvm_patch_ins_ld(inst, magic_var(srr0), inst_rt);
443 case KVM_INST_MFSPR(SPRN_SRR1):
444 kvm_patch_ins_ld(inst, magic_var(srr1), inst_rt);
447 case KVM_INST_MFSPR(SPRN_DEAR):
449 case KVM_INST_MFSPR(SPRN_DAR):
451 kvm_patch_ins_ld(inst, magic_var(dar), inst_rt);
453 case KVM_INST_MFSPR(SPRN_DSISR):
454 kvm_patch_ins_lwz(inst, magic_var(dsisr), inst_rt);
457 #ifdef CONFIG_PPC_BOOK3E_MMU
458 case KVM_INST_MFSPR(SPRN_MAS0):
459 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
460 kvm_patch_ins_lwz(inst, magic_var(mas0), inst_rt);
462 case KVM_INST_MFSPR(SPRN_MAS1):
463 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
464 kvm_patch_ins_lwz(inst, magic_var(mas1), inst_rt);
466 case KVM_INST_MFSPR(SPRN_MAS2):
467 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
468 kvm_patch_ins_ld(inst, magic_var(mas2), inst_rt);
470 case KVM_INST_MFSPR(SPRN_MAS3):
471 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
472 kvm_patch_ins_lwz(inst, magic_var(mas7_3) + 4, inst_rt);
474 case KVM_INST_MFSPR(SPRN_MAS4):
475 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
476 kvm_patch_ins_lwz(inst, magic_var(mas4), inst_rt);
478 case KVM_INST_MFSPR(SPRN_MAS6):
479 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
480 kvm_patch_ins_lwz(inst, magic_var(mas6), inst_rt);
482 case KVM_INST_MFSPR(SPRN_MAS7):
483 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
484 kvm_patch_ins_lwz(inst, magic_var(mas7_3), inst_rt);
486 #endif /* CONFIG_PPC_BOOK3E_MMU */
488 case KVM_INST_MFSPR(SPRN_SPRG4):
490 case KVM_INST_MFSPR(SPRN_SPRG4R):
492 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
493 kvm_patch_ins_ld(inst, magic_var(sprg4), inst_rt);
495 case KVM_INST_MFSPR(SPRN_SPRG5):
497 case KVM_INST_MFSPR(SPRN_SPRG5R):
499 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
500 kvm_patch_ins_ld(inst, magic_var(sprg5), inst_rt);
502 case KVM_INST_MFSPR(SPRN_SPRG6):
504 case KVM_INST_MFSPR(SPRN_SPRG6R):
506 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
507 kvm_patch_ins_ld(inst, magic_var(sprg6), inst_rt);
509 case KVM_INST_MFSPR(SPRN_SPRG7):
511 case KVM_INST_MFSPR(SPRN_SPRG7R):
513 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
514 kvm_patch_ins_ld(inst, magic_var(sprg7), inst_rt);
518 case KVM_INST_MFSPR(SPRN_ESR):
519 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
520 kvm_patch_ins_lwz(inst, magic_var(esr), inst_rt);
524 case KVM_INST_MFSPR(SPRN_PIR):
525 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
526 kvm_patch_ins_lwz(inst, magic_var(pir), inst_rt);
531 case KVM_INST_MTSPR(SPRN_SPRG0):
532 kvm_patch_ins_std(inst, magic_var(sprg0), inst_rt);
534 case KVM_INST_MTSPR(SPRN_SPRG1):
535 kvm_patch_ins_std(inst, magic_var(sprg1), inst_rt);
537 case KVM_INST_MTSPR(SPRN_SPRG2):
538 kvm_patch_ins_std(inst, magic_var(sprg2), inst_rt);
540 case KVM_INST_MTSPR(SPRN_SPRG3):
541 kvm_patch_ins_std(inst, magic_var(sprg3), inst_rt);
543 case KVM_INST_MTSPR(SPRN_SRR0):
544 kvm_patch_ins_std(inst, magic_var(srr0), inst_rt);
546 case KVM_INST_MTSPR(SPRN_SRR1):
547 kvm_patch_ins_std(inst, magic_var(srr1), inst_rt);
550 case KVM_INST_MTSPR(SPRN_DEAR):
552 case KVM_INST_MTSPR(SPRN_DAR):
554 kvm_patch_ins_std(inst, magic_var(dar), inst_rt);
556 case KVM_INST_MTSPR(SPRN_DSISR):
557 kvm_patch_ins_stw(inst, magic_var(dsisr), inst_rt);
559 #ifdef CONFIG_PPC_BOOK3E_MMU
560 case KVM_INST_MTSPR(SPRN_MAS0):
561 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
562 kvm_patch_ins_stw(inst, magic_var(mas0), inst_rt);
564 case KVM_INST_MTSPR(SPRN_MAS1):
565 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
566 kvm_patch_ins_stw(inst, magic_var(mas1), inst_rt);
568 case KVM_INST_MTSPR(SPRN_MAS2):
569 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
570 kvm_patch_ins_std(inst, magic_var(mas2), inst_rt);
572 case KVM_INST_MTSPR(SPRN_MAS3):
573 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
574 kvm_patch_ins_stw(inst, magic_var(mas7_3) + 4, inst_rt);
576 case KVM_INST_MTSPR(SPRN_MAS4):
577 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
578 kvm_patch_ins_stw(inst, magic_var(mas4), inst_rt);
580 case KVM_INST_MTSPR(SPRN_MAS6):
581 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
582 kvm_patch_ins_stw(inst, magic_var(mas6), inst_rt);
584 case KVM_INST_MTSPR(SPRN_MAS7):
585 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
586 kvm_patch_ins_stw(inst, magic_var(mas7_3), inst_rt);
588 #endif /* CONFIG_PPC_BOOK3E_MMU */
590 case KVM_INST_MTSPR(SPRN_SPRG4):
591 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
592 kvm_patch_ins_std(inst, magic_var(sprg4), inst_rt);
594 case KVM_INST_MTSPR(SPRN_SPRG5):
595 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
596 kvm_patch_ins_std(inst, magic_var(sprg5), inst_rt);
598 case KVM_INST_MTSPR(SPRN_SPRG6):
599 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
600 kvm_patch_ins_std(inst, magic_var(sprg6), inst_rt);
602 case KVM_INST_MTSPR(SPRN_SPRG7):
603 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
604 kvm_patch_ins_std(inst, magic_var(sprg7), inst_rt);
608 case KVM_INST_MTSPR(SPRN_ESR):
609 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
610 kvm_patch_ins_stw(inst, magic_var(esr), inst_rt);
615 case KVM_INST_TLBSYNC:
616 kvm_patch_ins_nop(inst);
620 case KVM_INST_MTMSRD_L1:
621 kvm_patch_ins_mtmsrd(inst, inst_rt);
624 case KVM_INST_MTMSRD_L0:
625 kvm_patch_ins_mtmsr(inst, inst_rt);
629 kvm_patch_ins_wrtee(inst, inst_rt, 0);
634 switch (inst_no_rt & ~KVM_MASK_RB) {
635 #ifdef CONFIG_PPC_BOOK3S_32
636 case KVM_INST_MTSRIN:
637 if (features & KVM_MAGIC_FEAT_SR) {
638 u32 inst_rb = _inst & KVM_MASK_RB;
639 kvm_patch_ins_mtsrin(inst, inst_rt, inst_rb);
647 case KVM_INST_WRTEEI_0:
648 kvm_patch_ins_wrteei_0(inst);
651 case KVM_INST_WRTEEI_1:
652 kvm_patch_ins_wrtee(inst, 0, 1);
658 extern u32 kvm_template_start[];
659 extern u32 kvm_template_end[];
661 static void kvm_use_magic_page(void)
667 /* Tell the host to map the magic page to -4096 on all CPUs */
668 on_each_cpu(kvm_map_magic_page, &features, 1);
670 /* Quick self-test to see if the mapping works */
671 if (!fault_in_pages_readable((const char *)KVM_MAGIC_PAGE, sizeof(u32))) {
672 kvm_patching_worked = false;
676 /* Now loop through all code and find instructions */
677 start = (void*)_stext;
681 * Being interrupted in the middle of patching would
682 * be bad for SPRG4-7, which KVM can't keep in sync
683 * with emulated accesses because reads don't trap.
687 for (p = start; p < end; p++) {
688 /* Avoid patching the template code */
689 if (p >= kvm_template_start && p < kvm_template_end) {
690 p = kvm_template_end - 1;
693 kvm_check_ins(p, features);
698 printk(KERN_INFO "KVM: Live patching for a fast VM %s\n",
699 kvm_patching_worked ? "worked" : "failed");
702 static __init void kvm_free_tmp(void)
705 * Inform kmemleak about the hole in the .bss section since the
706 * corresponding pages will be unmapped with DEBUG_PAGEALLOC=y.
708 kmemleak_free_part(&kvm_tmp[kvm_tmp_index],
709 ARRAY_SIZE(kvm_tmp) - kvm_tmp_index);
710 free_reserved_area(&kvm_tmp[kvm_tmp_index],
711 &kvm_tmp[ARRAY_SIZE(kvm_tmp)], -1, NULL);
714 static int __init kvm_guest_init(void)
716 if (!kvm_para_available())
719 if (!epapr_paravirt_enabled)
722 if (kvm_para_has_feature(KVM_FEATURE_MAGIC_PAGE))
723 kvm_use_magic_page();
725 #ifdef CONFIG_PPC_BOOK3S_64
736 postcore_initcall(kvm_guest_init);