1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021-2022 Intel Corporation */
5 #define pr_fmt(fmt) "tdx: " fmt
7 #include <linux/cpufeature.h>
8 #include <linux/export.h>
15 #include <asm/insn-eval.h>
16 #include <asm/pgtable.h>
22 /* Port I/O direction */
26 /* See Exit Qualification for I/O Instructions in VMX documentation */
27 #define VE_IS_IO_IN(e) ((e) & BIT(3))
28 #define VE_GET_IO_SIZE(e) (((e) & GENMASK(2, 0)) + 1)
29 #define VE_GET_PORT_NUM(e) ((e) >> 16)
30 #define VE_IS_IO_STRING(e) ((e) & BIT(4))
32 #define ATTR_DEBUG BIT(0)
33 #define ATTR_SEPT_VE_DISABLE BIT(28)
35 /* TDX Module call error codes */
36 #define TDCALL_RETURN_CODE(a) ((a) >> 32)
37 #define TDCALL_INVALID_OPERAND 0xc0000100
39 #define TDREPORT_SUBTYPE_0 0
41 /* Called from __tdx_hypercall() for unrecoverable failure */
42 noinstr void __noreturn __tdx_hypercall_failed(void)
44 instrumentation_begin();
45 panic("TDVMCALL failed. TDX module bug?");
48 #ifdef CONFIG_KVM_GUEST
49 long tdx_kvm_hypercall(unsigned int nr, unsigned long p1, unsigned long p2,
50 unsigned long p3, unsigned long p4)
52 struct tdx_module_args args = {
60 return __tdx_hypercall(&args);
62 EXPORT_SYMBOL_GPL(tdx_kvm_hypercall);
66 * Used for TDX guests to make calls directly to the TD module. This
67 * should only be used for calls that have no legitimate reason to fail
68 * or where the kernel can not survive the call failing.
70 static inline void tdcall(u64 fn, struct tdx_module_args *args)
72 if (__tdcall_ret(fn, args))
73 panic("TDCALL %lld failed (Buggy TDX module!)\n", fn);
77 * tdx_mcall_get_report0() - Wrapper to get TDREPORT0 (a.k.a. TDREPORT
78 * subtype 0) using TDG.MR.REPORT TDCALL.
79 * @reportdata: Address of the input buffer which contains user-defined
80 * REPORTDATA to be included into TDREPORT.
81 * @tdreport: Address of the output buffer to store TDREPORT.
83 * Refer to section titled "TDG.MR.REPORT leaf" in the TDX Module
84 * v1.0 specification for more information on TDG.MR.REPORT TDCALL.
85 * It is used in the TDX guest driver module to get the TDREPORT0.
87 * Return 0 on success, -EINVAL for invalid operands, or -EIO on
88 * other TDCALL failures.
90 int tdx_mcall_get_report0(u8 *reportdata, u8 *tdreport)
92 struct tdx_module_args args = {
93 .rcx = virt_to_phys(tdreport),
94 .rdx = virt_to_phys(reportdata),
95 .r8 = TDREPORT_SUBTYPE_0,
99 ret = __tdcall(TDG_MR_REPORT, &args);
101 if (TDCALL_RETURN_CODE(ret) == TDCALL_INVALID_OPERAND)
108 EXPORT_SYMBOL_GPL(tdx_mcall_get_report0);
111 * tdx_hcall_get_quote() - Wrapper to request TD Quote using GetQuote
113 * @buf: Address of the directly mapped shared kernel buffer which
114 * contains TDREPORT. The same buffer will be used by VMM to
115 * store the generated TD Quote output.
116 * @size: size of the tdquote buffer (4KB-aligned).
118 * Refer to section titled "TDG.VP.VMCALL<GetQuote>" in the TDX GHCI
119 * v1.0 specification for more information on GetQuote hypercall.
120 * It is used in the TDX guest driver module to get the TD Quote.
122 * Return 0 on success or error code on failure.
124 u64 tdx_hcall_get_quote(u8 *buf, size_t size)
126 /* Since buf is a shared memory, set the shared (decrypted) bits */
127 return _tdx_hypercall(TDVMCALL_GET_QUOTE, cc_mkdec(virt_to_phys(buf)), size, 0, 0);
129 EXPORT_SYMBOL_GPL(tdx_hcall_get_quote);
131 static void __noreturn tdx_panic(const char *msg)
133 struct tdx_module_args args = {
134 .r10 = TDX_HYPERCALL_STANDARD,
135 .r11 = TDVMCALL_REPORT_FATAL_ERROR,
136 .r12 = 0, /* Error code: 0 is Panic */
139 /* Define register order according to the GHCI */
140 struct { u64 r14, r15, rbx, rdi, rsi, r8, r9, rdx; };
145 /* VMM assumes '\0' in byte 65, if the message took all 64 bytes */
146 strtomem_pad(message.str, msg, '\0');
148 args.r8 = message.r8;
149 args.r9 = message.r9;
150 args.r14 = message.r14;
151 args.r15 = message.r15;
152 args.rdi = message.rdi;
153 args.rsi = message.rsi;
154 args.rbx = message.rbx;
155 args.rdx = message.rdx;
158 * This hypercall should never return and it is not safe
159 * to keep the guest running. Call it forever if it
163 __tdx_hypercall(&args);
166 static void tdx_parse_tdinfo(u64 *cc_mask)
168 struct tdx_module_args args = {};
169 unsigned int gpa_width;
173 * TDINFO TDX module call is used to get the TD execution environment
174 * information like GPA width, number of available vcpus, debug mode
175 * information, etc. More details about the ABI can be found in TDX
176 * Guest-Host-Communication Interface (GHCI), section 2.4.2 TDCALL
179 tdcall(TDG_VP_INFO, &args);
182 * The highest bit of a guest physical address is the "sharing" bit.
183 * Set it for shared pages and clear it for private pages.
185 * The GPA width that comes out of this call is critical. TDX guests
186 * can not meaningfully run without it.
188 gpa_width = args.rcx & GENMASK(5, 0);
189 *cc_mask = BIT_ULL(gpa_width - 1);
192 * The kernel can not handle #VE's when accessing normal kernel
193 * memory. Ensure that no #VE will be delivered for accesses to
194 * TD-private memory. Only VMM-shared memory (MMIO) will #VE.
197 if (!(td_attr & ATTR_SEPT_VE_DISABLE)) {
198 const char *msg = "TD misconfiguration: SEPT_VE_DISABLE attribute must be set.";
200 /* Relax SEPT_VE_DISABLE check for debug TD. */
201 if (td_attr & ATTR_DEBUG)
202 pr_warn("%s\n", msg);
209 * The TDX module spec states that #VE may be injected for a limited set of
212 * - Emulation of the architectural #VE injection on EPT violation;
214 * - As a result of guest TD execution of a disallowed instruction,
215 * a disallowed MSR access, or CPUID virtualization;
217 * - A notification to the guest TD about anomalous behavior;
219 * The last one is opt-in and is not used by the kernel.
221 * The Intel Software Developer's Manual describes cases when instruction
222 * length field can be used in section "Information for VM Exits Due to
223 * Instruction Execution".
225 * For TDX, it ultimately means GET_VEINFO provides reliable instruction length
226 * information if #VE occurred due to instruction execution, but not for EPT
229 static int ve_instr_len(struct ve_info *ve)
231 switch (ve->exit_reason) {
232 case EXIT_REASON_HLT:
233 case EXIT_REASON_MSR_READ:
234 case EXIT_REASON_MSR_WRITE:
235 case EXIT_REASON_CPUID:
236 case EXIT_REASON_IO_INSTRUCTION:
237 /* It is safe to use ve->instr_len for #VE due instructions */
238 return ve->instr_len;
239 case EXIT_REASON_EPT_VIOLATION:
241 * For EPT violations, ve->insn_len is not defined. For those,
242 * the kernel must decode instructions manually and should not
243 * be using this function.
245 WARN_ONCE(1, "ve->instr_len is not defined for EPT violations");
248 WARN_ONCE(1, "Unexpected #VE-type: %lld\n", ve->exit_reason);
249 return ve->instr_len;
253 static u64 __cpuidle __halt(const bool irq_disabled)
255 struct tdx_module_args args = {
256 .r10 = TDX_HYPERCALL_STANDARD,
257 .r11 = hcall_func(EXIT_REASON_HLT),
262 * Emulate HLT operation via hypercall. More info about ABI
263 * can be found in TDX Guest-Host-Communication Interface
264 * (GHCI), section 3.8 TDG.VP.VMCALL<Instruction.HLT>.
266 * The VMM uses the "IRQ disabled" param to understand IRQ
267 * enabled status (RFLAGS.IF) of the TD guest and to determine
268 * whether or not it should schedule the halted vCPU if an
269 * IRQ becomes pending. E.g. if IRQs are disabled, the VMM
270 * can keep the vCPU in virtual HLT, even if an IRQ is
271 * pending, without hanging/breaking the guest.
273 return __tdx_hypercall(&args);
276 static int handle_halt(struct ve_info *ve)
278 const bool irq_disabled = irqs_disabled();
280 if (__halt(irq_disabled))
283 return ve_instr_len(ve);
286 void __cpuidle tdx_safe_halt(void)
288 const bool irq_disabled = false;
291 * Use WARN_ONCE() to report the failure.
293 if (__halt(irq_disabled))
294 WARN_ONCE(1, "HLT instruction emulation failed\n");
297 static int read_msr(struct pt_regs *regs, struct ve_info *ve)
299 struct tdx_module_args args = {
300 .r10 = TDX_HYPERCALL_STANDARD,
301 .r11 = hcall_func(EXIT_REASON_MSR_READ),
306 * Emulate the MSR read via hypercall. More info about ABI
307 * can be found in TDX Guest-Host-Communication Interface
308 * (GHCI), section titled "TDG.VP.VMCALL<Instruction.RDMSR>".
310 if (__tdx_hypercall(&args))
313 regs->ax = lower_32_bits(args.r11);
314 regs->dx = upper_32_bits(args.r11);
315 return ve_instr_len(ve);
318 static int write_msr(struct pt_regs *regs, struct ve_info *ve)
320 struct tdx_module_args args = {
321 .r10 = TDX_HYPERCALL_STANDARD,
322 .r11 = hcall_func(EXIT_REASON_MSR_WRITE),
324 .r13 = (u64)regs->dx << 32 | regs->ax,
328 * Emulate the MSR write via hypercall. More info about ABI
329 * can be found in TDX Guest-Host-Communication Interface
330 * (GHCI) section titled "TDG.VP.VMCALL<Instruction.WRMSR>".
332 if (__tdx_hypercall(&args))
335 return ve_instr_len(ve);
338 static int handle_cpuid(struct pt_regs *regs, struct ve_info *ve)
340 struct tdx_module_args args = {
341 .r10 = TDX_HYPERCALL_STANDARD,
342 .r11 = hcall_func(EXIT_REASON_CPUID),
348 * Only allow VMM to control range reserved for hypervisor
351 * Return all-zeros for any CPUID outside the range. It matches CPU
352 * behaviour for non-supported leaf.
354 if (regs->ax < 0x40000000 || regs->ax > 0x4FFFFFFF) {
355 regs->ax = regs->bx = regs->cx = regs->dx = 0;
356 return ve_instr_len(ve);
360 * Emulate the CPUID instruction via a hypercall. More info about
361 * ABI can be found in TDX Guest-Host-Communication Interface
362 * (GHCI), section titled "VP.VMCALL<Instruction.CPUID>".
364 if (__tdx_hypercall(&args))
368 * As per TDX GHCI CPUID ABI, r12-r15 registers contain contents of
369 * EAX, EBX, ECX, EDX registers after the CPUID instruction execution.
370 * So copy the register contents back to pt_regs.
377 return ve_instr_len(ve);
380 static bool mmio_read(int size, unsigned long addr, unsigned long *val)
382 struct tdx_module_args args = {
383 .r10 = TDX_HYPERCALL_STANDARD,
384 .r11 = hcall_func(EXIT_REASON_EPT_VIOLATION),
391 if (__tdx_hypercall(&args))
398 static bool mmio_write(int size, unsigned long addr, unsigned long val)
400 return !_tdx_hypercall(hcall_func(EXIT_REASON_EPT_VIOLATION), size,
401 EPT_WRITE, addr, val);
404 static int handle_mmio(struct pt_regs *regs, struct ve_info *ve)
406 unsigned long *reg, val, vaddr;
407 char buffer[MAX_INSN_SIZE];
408 enum insn_mmio_type mmio;
409 struct insn insn = {};
410 int size, extend_size;
413 /* Only in-kernel MMIO is supported */
414 if (WARN_ON_ONCE(user_mode(regs)))
417 if (copy_from_kernel_nofault(buffer, (void *)regs->ip, MAX_INSN_SIZE))
420 if (insn_decode(&insn, buffer, MAX_INSN_SIZE, INSN_MODE_64))
423 mmio = insn_decode_mmio(&insn, &size);
424 if (WARN_ON_ONCE(mmio == INSN_MMIO_DECODE_FAILED))
427 if (mmio != INSN_MMIO_WRITE_IMM && mmio != INSN_MMIO_MOVS) {
428 reg = insn_get_modrm_reg_ptr(&insn, regs);
434 * Reject EPT violation #VEs that split pages.
436 * MMIO accesses are supposed to be naturally aligned and therefore
437 * never cross page boundaries. Seeing split page accesses indicates
438 * a bug or a load_unaligned_zeropad() that stepped into an MMIO page.
440 * load_unaligned_zeropad() will recover using exception fixups.
442 vaddr = (unsigned long)insn_get_addr_ref(&insn, regs);
443 if (vaddr / PAGE_SIZE != (vaddr + size - 1) / PAGE_SIZE)
446 /* Handle writes first */
448 case INSN_MMIO_WRITE:
449 memcpy(&val, reg, size);
450 if (!mmio_write(size, ve->gpa, val))
453 case INSN_MMIO_WRITE_IMM:
454 val = insn.immediate.value;
455 if (!mmio_write(size, ve->gpa, val))
459 case INSN_MMIO_READ_ZERO_EXTEND:
460 case INSN_MMIO_READ_SIGN_EXTEND:
461 /* Reads are handled below */
464 case INSN_MMIO_DECODE_FAILED:
466 * MMIO was accessed with an instruction that could not be
467 * decoded or handled properly. It was likely not using io.h
468 * helpers or accessed MMIO accidentally.
472 WARN_ONCE(1, "Unknown insn_decode_mmio() decode value?");
477 if (!mmio_read(size, ve->gpa, &val))
482 /* Zero-extend for 32-bit operation */
483 extend_size = size == 4 ? sizeof(*reg) : 0;
485 case INSN_MMIO_READ_ZERO_EXTEND:
486 /* Zero extend based on operand size */
487 extend_size = insn.opnd_bytes;
489 case INSN_MMIO_READ_SIGN_EXTEND:
490 /* Sign extend based on operand size */
491 extend_size = insn.opnd_bytes;
492 if (size == 1 && val & BIT(7))
494 else if (size > 1 && val & BIT(15))
498 /* All other cases has to be covered with the first switch() */
504 memset(reg, extend_val, extend_size);
505 memcpy(reg, &val, size);
509 static bool handle_in(struct pt_regs *regs, int size, int port)
511 struct tdx_module_args args = {
512 .r10 = TDX_HYPERCALL_STANDARD,
513 .r11 = hcall_func(EXIT_REASON_IO_INSTRUCTION),
518 u64 mask = GENMASK(BITS_PER_BYTE * size, 0);
522 * Emulate the I/O read via hypercall. More info about ABI can be found
523 * in TDX Guest-Host-Communication Interface (GHCI) section titled
524 * "TDG.VP.VMCALL<Instruction.IO>".
526 success = !__tdx_hypercall(&args);
528 /* Update part of the register affected by the emulated instruction */
531 regs->ax |= args.r11 & mask;
536 static bool handle_out(struct pt_regs *regs, int size, int port)
538 u64 mask = GENMASK(BITS_PER_BYTE * size, 0);
541 * Emulate the I/O write via hypercall. More info about ABI can be found
542 * in TDX Guest-Host-Communication Interface (GHCI) section titled
543 * "TDG.VP.VMCALL<Instruction.IO>".
545 return !_tdx_hypercall(hcall_func(EXIT_REASON_IO_INSTRUCTION), size,
546 PORT_WRITE, port, regs->ax & mask);
550 * Emulate I/O using hypercall.
552 * Assumes the IO instruction was using ax, which is enforced
553 * by the standard io.h macros.
555 * Return True on success or False on failure.
557 static int handle_io(struct pt_regs *regs, struct ve_info *ve)
559 u32 exit_qual = ve->exit_qual;
563 if (VE_IS_IO_STRING(exit_qual))
566 in = VE_IS_IO_IN(exit_qual);
567 size = VE_GET_IO_SIZE(exit_qual);
568 port = VE_GET_PORT_NUM(exit_qual);
572 ret = handle_in(regs, size, port);
574 ret = handle_out(regs, size, port);
578 return ve_instr_len(ve);
582 * Early #VE exception handler. Only handles a subset of port I/O.
583 * Intended only for earlyprintk. If failed, return false.
585 __init bool tdx_early_handle_ve(struct pt_regs *regs)
590 tdx_get_ve_info(&ve);
592 if (ve.exit_reason != EXIT_REASON_IO_INSTRUCTION)
595 insn_len = handle_io(regs, &ve);
599 regs->ip += insn_len;
603 void tdx_get_ve_info(struct ve_info *ve)
605 struct tdx_module_args args = {};
608 * Called during #VE handling to retrieve the #VE info from the
611 * This has to be called early in #VE handling. A "nested" #VE which
612 * occurs before this will raise a #DF and is not recoverable.
614 * The call retrieves the #VE info from the TDX module, which also
615 * clears the "#VE valid" flag. This must be done before anything else
616 * because any #VE that occurs while the valid flag is set will lead to
619 * Note, the TDX module treats virtual NMIs as inhibited if the #VE
620 * valid flag is set. It means that NMI=>#VE will not result in a #DF.
622 tdcall(TDG_VP_VEINFO_GET, &args);
624 /* Transfer the output parameters */
625 ve->exit_reason = args.rcx;
626 ve->exit_qual = args.rdx;
629 ve->instr_len = lower_32_bits(args.r10);
630 ve->instr_info = upper_32_bits(args.r10);
634 * Handle the user initiated #VE.
636 * On success, returns the number of bytes RIP should be incremented (>=0)
637 * or -errno on error.
639 static int virt_exception_user(struct pt_regs *regs, struct ve_info *ve)
641 switch (ve->exit_reason) {
642 case EXIT_REASON_CPUID:
643 return handle_cpuid(regs, ve);
645 pr_warn("Unexpected #VE: %lld\n", ve->exit_reason);
650 static inline bool is_private_gpa(u64 gpa)
652 return gpa == cc_mkenc(gpa);
656 * Handle the kernel #VE.
658 * On success, returns the number of bytes RIP should be incremented (>=0)
659 * or -errno on error.
661 static int virt_exception_kernel(struct pt_regs *regs, struct ve_info *ve)
663 switch (ve->exit_reason) {
664 case EXIT_REASON_HLT:
665 return handle_halt(ve);
666 case EXIT_REASON_MSR_READ:
667 return read_msr(regs, ve);
668 case EXIT_REASON_MSR_WRITE:
669 return write_msr(regs, ve);
670 case EXIT_REASON_CPUID:
671 return handle_cpuid(regs, ve);
672 case EXIT_REASON_EPT_VIOLATION:
673 if (is_private_gpa(ve->gpa))
674 panic("Unexpected EPT-violation on private memory.");
675 return handle_mmio(regs, ve);
676 case EXIT_REASON_IO_INSTRUCTION:
677 return handle_io(regs, ve);
679 pr_warn("Unexpected #VE: %lld\n", ve->exit_reason);
684 bool tdx_handle_virt_exception(struct pt_regs *regs, struct ve_info *ve)
689 insn_len = virt_exception_user(regs, ve);
691 insn_len = virt_exception_kernel(regs, ve);
695 /* After successful #VE handling, move the IP */
696 regs->ip += insn_len;
701 static bool tdx_tlb_flush_required(bool private)
704 * TDX guest is responsible for flushing TLB on private->shared
705 * transition. VMM is responsible for flushing on shared->private.
707 * The VMM _can't_ flush private addresses as it can't generate PAs
708 * with the guest's HKID. Shared memory isn't subject to integrity
709 * checking, i.e. the VMM doesn't need to flush for its own protection.
711 * There's no need to flush when converting from shared to private,
712 * as flushing is the VMM's responsibility in this case, e.g. it must
713 * flush to avoid integrity failures in the face of a buggy or
719 static bool tdx_cache_flush_required(void)
722 * AMD SME/SEV can avoid cache flushing if HW enforces cache coherence.
723 * TDX doesn't have such capability.
725 * Flush cache unconditionally.
731 * Notify the VMM about page mapping conversion. More info about ABI
732 * can be found in TDX Guest-Host-Communication Interface (GHCI),
733 * section "TDG.VP.VMCALL<MapGPA>".
735 static bool tdx_map_gpa(phys_addr_t start, phys_addr_t end, bool enc)
737 /* Retrying the hypercall a second time should succeed; use 3 just in case */
738 const int max_retries_per_page = 3;
742 /* Set the shared (decrypted) bits: */
743 start |= cc_mkdec(0);
747 while (retry_count < max_retries_per_page) {
748 struct tdx_module_args args = {
749 .r10 = TDX_HYPERCALL_STANDARD,
750 .r11 = TDVMCALL_MAP_GPA,
752 .r13 = end - start };
755 u64 ret = __tdx_hypercall(&args);
757 if (ret != TDVMCALL_STATUS_RETRY)
760 * The guest must retry the operation for the pages in the
761 * region starting at the GPA specified in R11. R11 comes
762 * from the untrusted VMM. Sanity check it.
764 map_fail_paddr = args.r11;
765 if (map_fail_paddr < start || map_fail_paddr >= end)
768 /* "Consume" a retry without forward progress */
769 if (map_fail_paddr == start) {
774 start = map_fail_paddr;
782 * Inform the VMM of the guest's intent for this physical page: shared with
783 * the VMM or private to the guest. The VMM is expected to change its mapping
784 * of the page in response.
786 static bool tdx_enc_status_changed(unsigned long vaddr, int numpages, bool enc)
788 phys_addr_t start = __pa(vaddr);
789 phys_addr_t end = __pa(vaddr + numpages * PAGE_SIZE);
791 if (!tdx_map_gpa(start, end, enc))
794 /* shared->private conversion requires memory to be accepted before use */
796 return tdx_accept_memory(start, end);
801 static bool tdx_enc_status_change_prepare(unsigned long vaddr, int numpages,
805 * Only handle shared->private conversion here.
806 * See the comment in tdx_early_init().
809 return tdx_enc_status_changed(vaddr, numpages, enc);
813 static bool tdx_enc_status_change_finish(unsigned long vaddr, int numpages,
817 * Only handle private->shared conversion here.
818 * See the comment in tdx_early_init().
821 return tdx_enc_status_changed(vaddr, numpages, enc);
825 void __init tdx_early_init(void)
827 struct tdx_module_args args = {
828 .rdx = TDCS_NOTIFY_ENABLES,
834 cpuid_count(TDX_CPUID_LEAF_ID, 0, &eax, &sig[0], &sig[2], &sig[1]);
836 if (memcmp(TDX_IDENT, sig, sizeof(sig)))
839 setup_force_cpu_cap(X86_FEATURE_TDX_GUEST);
841 /* TSC is the only reliable clock in TDX guest */
842 setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE);
844 cc_vendor = CC_VENDOR_INTEL;
845 tdx_parse_tdinfo(&cc_mask);
846 cc_set_mask(cc_mask);
848 /* Kernel does not use NOTIFY_ENABLES and does not need random #VEs */
849 tdcall(TDG_VM_WR, &args);
852 * All bits above GPA width are reserved and kernel treats shared bit
853 * as flag, not as part of physical address.
855 * Adjust physical mask to only cover valid GPA bits.
857 physical_mask &= cc_mask - 1;
860 * The kernel mapping should match the TDX metadata for the page.
861 * load_unaligned_zeropad() can touch memory *adjacent* to that which is
862 * owned by the caller and can catch even _momentary_ mismatches. Bad
863 * things happen on mismatch:
865 * - Private mapping => Shared Page == Guest shutdown
866 * - Shared mapping => Private Page == Recoverable #VE
868 * guest.enc_status_change_prepare() converts the page from
869 * shared=>private before the mapping becomes private.
871 * guest.enc_status_change_finish() converts the page from
872 * private=>shared after the mapping becomes private.
874 * In both cases there is a temporary shared mapping to a private page,
875 * which can result in a #VE. But, there is never a private mapping to
878 x86_platform.guest.enc_status_change_prepare = tdx_enc_status_change_prepare;
879 x86_platform.guest.enc_status_change_finish = tdx_enc_status_change_finish;
881 x86_platform.guest.enc_cache_flush_required = tdx_cache_flush_required;
882 x86_platform.guest.enc_tlb_flush_required = tdx_tlb_flush_required;
885 * TDX intercepts the RDMSR to read the X2APIC ID in the parallel
886 * bringup low level code. That raises #VE which cannot be handled
889 * Intel-TDX has a secure RDMSR hypercall, but that needs to be
890 * implemented separately in the low level startup ASM code.
891 * Until that is in place, disable parallel bringup for TDX.
893 x86_cpuinit.parallel_bringup = false;
895 pr_info("Guest detected\n");