1 // SPDX-License-Identifier: GPL-2.0-only
3 * AMD Secure Encrypted Virtualization (SEV) interface
5 * Copyright (C) 2016,2019 Advanced Micro Devices, Inc.
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/kthread.h>
13 #include <linux/sched.h>
14 #include <linux/interrupt.h>
15 #include <linux/spinlock.h>
16 #include <linux/spinlock_types.h>
17 #include <linux/types.h>
18 #include <linux/mutex.h>
19 #include <linux/delay.h>
20 #include <linux/hw_random.h>
21 #include <linux/ccp.h>
22 #include <linux/firmware.h>
23 #include <linux/gfp.h>
30 #define DEVICE_NAME "sev"
31 #define SEV_FW_FILE "amd/sev.fw"
32 #define SEV_FW_NAME_SIZE 64
34 static DEFINE_MUTEX(sev_cmd_mutex);
35 static struct sev_misc_dev *misc_dev;
37 static int psp_cmd_timeout = 100;
38 module_param(psp_cmd_timeout, int, 0644);
39 MODULE_PARM_DESC(psp_cmd_timeout, " default timeout value, in seconds, for PSP commands");
41 static int psp_probe_timeout = 5;
42 module_param(psp_probe_timeout, int, 0644);
43 MODULE_PARM_DESC(psp_probe_timeout, " default timeout value, in seconds, during PSP device probe");
46 static int psp_timeout;
48 /* Trusted Memory Region (TMR):
49 * The TMR is a 1MB area that must be 1MB aligned. Use the page allocator
50 * to allocate the memory, which will return aligned memory for the specified
53 #define SEV_ES_TMR_SIZE (1024 * 1024)
54 static void *sev_es_tmr;
56 static inline bool sev_version_greater_or_equal(u8 maj, u8 min)
58 struct sev_device *sev = psp_master->sev_data;
60 if (sev->api_major > maj)
63 if (sev->api_major == maj && sev->api_minor >= min)
69 static void sev_irq_handler(int irq, void *data, unsigned int status)
71 struct sev_device *sev = data;
74 /* Check if it is command completion: */
75 if (!(status & SEV_CMD_COMPLETE))
78 /* Check if it is SEV command completion: */
79 reg = ioread32(sev->io_regs + sev->vdata->cmdresp_reg);
80 if (reg & PSP_CMDRESP_RESP) {
82 wake_up(&sev->int_queue);
86 static int sev_wait_cmd_ioc(struct sev_device *sev,
87 unsigned int *reg, unsigned int timeout)
91 ret = wait_event_timeout(sev->int_queue,
92 sev->int_rcvd, timeout * HZ);
96 *reg = ioread32(sev->io_regs + sev->vdata->cmdresp_reg);
101 static int sev_cmd_buffer_len(int cmd)
104 case SEV_CMD_INIT: return sizeof(struct sev_data_init);
105 case SEV_CMD_PLATFORM_STATUS: return sizeof(struct sev_user_data_status);
106 case SEV_CMD_PEK_CSR: return sizeof(struct sev_data_pek_csr);
107 case SEV_CMD_PEK_CERT_IMPORT: return sizeof(struct sev_data_pek_cert_import);
108 case SEV_CMD_PDH_CERT_EXPORT: return sizeof(struct sev_data_pdh_cert_export);
109 case SEV_CMD_LAUNCH_START: return sizeof(struct sev_data_launch_start);
110 case SEV_CMD_LAUNCH_UPDATE_DATA: return sizeof(struct sev_data_launch_update_data);
111 case SEV_CMD_LAUNCH_UPDATE_VMSA: return sizeof(struct sev_data_launch_update_vmsa);
112 case SEV_CMD_LAUNCH_FINISH: return sizeof(struct sev_data_launch_finish);
113 case SEV_CMD_LAUNCH_MEASURE: return sizeof(struct sev_data_launch_measure);
114 case SEV_CMD_ACTIVATE: return sizeof(struct sev_data_activate);
115 case SEV_CMD_DEACTIVATE: return sizeof(struct sev_data_deactivate);
116 case SEV_CMD_DECOMMISSION: return sizeof(struct sev_data_decommission);
117 case SEV_CMD_GUEST_STATUS: return sizeof(struct sev_data_guest_status);
118 case SEV_CMD_DBG_DECRYPT: return sizeof(struct sev_data_dbg);
119 case SEV_CMD_DBG_ENCRYPT: return sizeof(struct sev_data_dbg);
120 case SEV_CMD_SEND_START: return sizeof(struct sev_data_send_start);
121 case SEV_CMD_SEND_UPDATE_DATA: return sizeof(struct sev_data_send_update_data);
122 case SEV_CMD_SEND_UPDATE_VMSA: return sizeof(struct sev_data_send_update_vmsa);
123 case SEV_CMD_SEND_FINISH: return sizeof(struct sev_data_send_finish);
124 case SEV_CMD_RECEIVE_START: return sizeof(struct sev_data_receive_start);
125 case SEV_CMD_RECEIVE_FINISH: return sizeof(struct sev_data_receive_finish);
126 case SEV_CMD_RECEIVE_UPDATE_DATA: return sizeof(struct sev_data_receive_update_data);
127 case SEV_CMD_RECEIVE_UPDATE_VMSA: return sizeof(struct sev_data_receive_update_vmsa);
128 case SEV_CMD_LAUNCH_UPDATE_SECRET: return sizeof(struct sev_data_launch_secret);
129 case SEV_CMD_DOWNLOAD_FIRMWARE: return sizeof(struct sev_data_download_firmware);
130 case SEV_CMD_GET_ID: return sizeof(struct sev_data_get_id);
137 static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
139 struct psp_device *psp = psp_master;
140 struct sev_device *sev;
141 unsigned int phys_lsb, phys_msb;
142 unsigned int reg, ret = 0;
144 if (!psp || !psp->sev_data)
152 /* Get the physical address of the command buffer */
153 phys_lsb = data ? lower_32_bits(__psp_pa(data)) : 0;
154 phys_msb = data ? upper_32_bits(__psp_pa(data)) : 0;
156 dev_dbg(sev->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n",
157 cmd, phys_msb, phys_lsb, psp_timeout);
159 print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data,
160 sev_cmd_buffer_len(cmd), false);
162 iowrite32(phys_lsb, sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg);
163 iowrite32(phys_msb, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg);
168 reg <<= SEV_CMDRESP_CMD_SHIFT;
169 reg |= SEV_CMDRESP_IOC;
170 iowrite32(reg, sev->io_regs + sev->vdata->cmdresp_reg);
172 /* wait for command completion */
173 ret = sev_wait_cmd_ioc(sev, ®, psp_timeout);
178 dev_err(sev->dev, "sev command %#x timed out, disabling PSP\n", cmd);
184 psp_timeout = psp_cmd_timeout;
187 *psp_ret = reg & PSP_CMDRESP_ERR_MASK;
189 if (reg & PSP_CMDRESP_ERR_MASK) {
190 dev_dbg(sev->dev, "sev command %#x failed (%#010x)\n",
191 cmd, reg & PSP_CMDRESP_ERR_MASK);
195 print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data,
196 sev_cmd_buffer_len(cmd), false);
201 static int sev_do_cmd(int cmd, void *data, int *psp_ret)
205 mutex_lock(&sev_cmd_mutex);
206 rc = __sev_do_cmd_locked(cmd, data, psp_ret);
207 mutex_unlock(&sev_cmd_mutex);
212 static int __sev_platform_init_locked(int *error)
214 struct psp_device *psp = psp_master;
215 struct sev_device *sev;
218 if (!psp || !psp->sev_data)
223 if (sev->state == SEV_STATE_INIT)
230 * Do not include the encryption mask on the physical
231 * address of the TMR (firmware should clear it anyway).
233 tmr_pa = __pa(sev_es_tmr);
235 sev->init_cmd_buf.flags |= SEV_INIT_FLAGS_SEV_ES;
236 sev->init_cmd_buf.tmr_address = tmr_pa;
237 sev->init_cmd_buf.tmr_len = SEV_ES_TMR_SIZE;
240 rc = __sev_do_cmd_locked(SEV_CMD_INIT, &sev->init_cmd_buf, error);
244 sev->state = SEV_STATE_INIT;
246 /* Prepare for first SEV guest launch after INIT */
247 wbinvd_on_all_cpus();
248 rc = __sev_do_cmd_locked(SEV_CMD_DF_FLUSH, NULL, error);
252 dev_dbg(sev->dev, "SEV firmware initialized\n");
257 int sev_platform_init(int *error)
261 mutex_lock(&sev_cmd_mutex);
262 rc = __sev_platform_init_locked(error);
263 mutex_unlock(&sev_cmd_mutex);
267 EXPORT_SYMBOL_GPL(sev_platform_init);
269 static int __sev_platform_shutdown_locked(int *error)
271 struct sev_device *sev = psp_master->sev_data;
274 ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error);
278 sev->state = SEV_STATE_UNINIT;
279 dev_dbg(sev->dev, "SEV firmware shutdown\n");
284 static int sev_platform_shutdown(int *error)
288 mutex_lock(&sev_cmd_mutex);
289 rc = __sev_platform_shutdown_locked(NULL);
290 mutex_unlock(&sev_cmd_mutex);
295 static int sev_get_platform_state(int *state, int *error)
297 struct sev_device *sev = psp_master->sev_data;
300 rc = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS,
301 &sev->status_cmd_buf, error);
305 *state = sev->status_cmd_buf.state;
309 static int sev_ioctl_do_reset(struct sev_issue_cmd *argp, bool writable)
317 * The SEV spec requires that FACTORY_RESET must be issued in
318 * UNINIT state. Before we go further lets check if any guest is
321 * If FW is in WORKING state then deny the request otherwise issue
322 * SHUTDOWN command do INIT -> UNINIT before issuing the FACTORY_RESET.
325 rc = sev_get_platform_state(&state, &argp->error);
329 if (state == SEV_STATE_WORKING)
332 if (state == SEV_STATE_INIT) {
333 rc = __sev_platform_shutdown_locked(&argp->error);
338 return __sev_do_cmd_locked(SEV_CMD_FACTORY_RESET, NULL, &argp->error);
341 static int sev_ioctl_do_platform_status(struct sev_issue_cmd *argp)
343 struct sev_device *sev = psp_master->sev_data;
344 struct sev_user_data_status *data = &sev->status_cmd_buf;
347 ret = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, data, &argp->error);
351 if (copy_to_user((void __user *)argp->data, data, sizeof(*data)))
357 static int sev_ioctl_do_pek_pdh_gen(int cmd, struct sev_issue_cmd *argp, bool writable)
359 struct sev_device *sev = psp_master->sev_data;
365 if (sev->state == SEV_STATE_UNINIT) {
366 rc = __sev_platform_init_locked(&argp->error);
371 return __sev_do_cmd_locked(cmd, NULL, &argp->error);
374 static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp, bool writable)
376 struct sev_device *sev = psp_master->sev_data;
377 struct sev_user_data_pek_csr input;
378 struct sev_data_pek_csr *data;
379 void __user *input_address;
386 if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
389 data = kzalloc(sizeof(*data), GFP_KERNEL);
393 /* userspace wants to query CSR length */
394 if (!input.address || !input.length)
397 /* allocate a physically contiguous buffer to store the CSR blob */
398 input_address = (void __user *)input.address;
399 if (input.length > SEV_FW_BLOB_MAX_SIZE) {
404 blob = kmalloc(input.length, GFP_KERNEL);
410 data->address = __psp_pa(blob);
411 data->len = input.length;
414 if (sev->state == SEV_STATE_UNINIT) {
415 ret = __sev_platform_init_locked(&argp->error);
420 ret = __sev_do_cmd_locked(SEV_CMD_PEK_CSR, data, &argp->error);
422 /* If we query the CSR length, FW responded with expected data. */
423 input.length = data->len;
425 if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
431 if (copy_to_user(input_address, blob, input.length))
442 void *psp_copy_user_blob(u64 uaddr, u32 len)
445 return ERR_PTR(-EINVAL);
447 /* verify that blob length does not exceed our limit */
448 if (len > SEV_FW_BLOB_MAX_SIZE)
449 return ERR_PTR(-EINVAL);
451 return memdup_user((void __user *)uaddr, len);
453 EXPORT_SYMBOL_GPL(psp_copy_user_blob);
455 static int sev_get_api_version(void)
457 struct sev_device *sev = psp_master->sev_data;
458 struct sev_user_data_status *status;
461 status = &sev->status_cmd_buf;
462 ret = sev_platform_status(status, &error);
465 "SEV: failed to get status. Error: %#x\n", error);
469 sev->api_major = status->api_major;
470 sev->api_minor = status->api_minor;
471 sev->build = status->build;
472 sev->state = status->state;
477 static int sev_get_firmware(struct device *dev,
478 const struct firmware **firmware)
480 char fw_name_specific[SEV_FW_NAME_SIZE];
481 char fw_name_subset[SEV_FW_NAME_SIZE];
483 snprintf(fw_name_specific, sizeof(fw_name_specific),
484 "amd/amd_sev_fam%.2xh_model%.2xh.sbin",
485 boot_cpu_data.x86, boot_cpu_data.x86_model);
487 snprintf(fw_name_subset, sizeof(fw_name_subset),
488 "amd/amd_sev_fam%.2xh_model%.1xxh.sbin",
489 boot_cpu_data.x86, (boot_cpu_data.x86_model & 0xf0) >> 4);
491 /* Check for SEV FW for a particular model.
492 * Ex. amd_sev_fam17h_model00h.sbin for Family 17h Model 00h
496 * Check for SEV FW common to a subset of models.
497 * Ex. amd_sev_fam17h_model0xh.sbin for
498 * Family 17h Model 00h -- Family 17h Model 0Fh
502 * Fall-back to using generic name: sev.fw
504 if ((firmware_request_nowarn(firmware, fw_name_specific, dev) >= 0) ||
505 (firmware_request_nowarn(firmware, fw_name_subset, dev) >= 0) ||
506 (firmware_request_nowarn(firmware, SEV_FW_FILE, dev) >= 0))
512 /* Don't fail if SEV FW couldn't be updated. Continue with existing SEV FW */
513 static int sev_update_firmware(struct device *dev)
515 struct sev_data_download_firmware *data;
516 const struct firmware *firmware;
517 int ret, error, order;
521 if (sev_get_firmware(dev, &firmware) == -ENOENT) {
522 dev_dbg(dev, "No SEV firmware file present\n");
527 * SEV FW expects the physical address given to it to be 32
528 * byte aligned. Memory allocated has structure placed at the
529 * beginning followed by the firmware being passed to the SEV
530 * FW. Allocate enough memory for data structure + alignment
533 data_size = ALIGN(sizeof(struct sev_data_download_firmware), 32);
535 order = get_order(firmware->size + data_size);
536 p = alloc_pages(GFP_KERNEL, order);
543 * Copy firmware data to a kernel allocated contiguous
546 data = page_address(p);
547 memcpy(page_address(p) + data_size, firmware->data, firmware->size);
549 data->address = __psp_pa(page_address(p) + data_size);
550 data->len = firmware->size;
552 ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error);
554 dev_dbg(dev, "Failed to update SEV firmware: %#x\n", error);
556 dev_info(dev, "SEV firmware update successful\n");
558 __free_pages(p, order);
561 release_firmware(firmware);
566 static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp, bool writable)
568 struct sev_device *sev = psp_master->sev_data;
569 struct sev_user_data_pek_cert_import input;
570 struct sev_data_pek_cert_import *data;
571 void *pek_blob, *oca_blob;
577 if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
580 data = kzalloc(sizeof(*data), GFP_KERNEL);
584 /* copy PEK certificate blobs from userspace */
585 pek_blob = psp_copy_user_blob(input.pek_cert_address, input.pek_cert_len);
586 if (IS_ERR(pek_blob)) {
587 ret = PTR_ERR(pek_blob);
591 data->pek_cert_address = __psp_pa(pek_blob);
592 data->pek_cert_len = input.pek_cert_len;
594 /* copy PEK certificate blobs from userspace */
595 oca_blob = psp_copy_user_blob(input.oca_cert_address, input.oca_cert_len);
596 if (IS_ERR(oca_blob)) {
597 ret = PTR_ERR(oca_blob);
601 data->oca_cert_address = __psp_pa(oca_blob);
602 data->oca_cert_len = input.oca_cert_len;
604 /* If platform is not in INIT state then transition it to INIT */
605 if (sev->state != SEV_STATE_INIT) {
606 ret = __sev_platform_init_locked(&argp->error);
611 ret = __sev_do_cmd_locked(SEV_CMD_PEK_CERT_IMPORT, data, &argp->error);
622 static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp)
624 struct sev_user_data_get_id2 input;
625 struct sev_data_get_id *data;
626 void __user *input_address;
627 void *id_blob = NULL;
630 /* SEV GET_ID is available from SEV API v0.16 and up */
631 if (!sev_version_greater_or_equal(0, 16))
634 if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
637 input_address = (void __user *)input.address;
639 data = kzalloc(sizeof(*data), GFP_KERNEL);
643 if (input.address && input.length) {
644 id_blob = kmalloc(input.length, GFP_KERNEL);
650 data->address = __psp_pa(id_blob);
651 data->len = input.length;
654 ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, data, &argp->error);
657 * Firmware will return the length of the ID value (either the minimum
658 * required length or the actual length written), return it to the user.
660 input.length = data->len;
662 if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
668 if (copy_to_user(input_address, id_blob, data->len)) {
681 static int sev_ioctl_do_get_id(struct sev_issue_cmd *argp)
683 struct sev_data_get_id *data;
684 u64 data_size, user_size;
688 /* SEV GET_ID available from SEV API v0.16 and up */
689 if (!sev_version_greater_or_equal(0, 16))
692 /* SEV FW expects the buffer it fills with the ID to be
693 * 8-byte aligned. Memory allocated should be enough to
694 * hold data structure + alignment padding + memory
695 * where SEV FW writes the ID.
697 data_size = ALIGN(sizeof(struct sev_data_get_id), 8);
698 user_size = sizeof(struct sev_user_data_get_id);
700 mem = kzalloc(data_size + user_size, GFP_KERNEL);
705 id_blob = mem + data_size;
707 data->address = __psp_pa(id_blob);
708 data->len = user_size;
710 ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, data, &argp->error);
712 if (copy_to_user((void __user *)argp->data, id_blob, data->len))
721 static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable)
723 struct sev_device *sev = psp_master->sev_data;
724 struct sev_user_data_pdh_cert_export input;
725 void *pdh_blob = NULL, *cert_blob = NULL;
726 struct sev_data_pdh_cert_export *data;
727 void __user *input_cert_chain_address;
728 void __user *input_pdh_cert_address;
731 /* If platform is not in INIT state then transition it to INIT. */
732 if (sev->state != SEV_STATE_INIT) {
736 ret = __sev_platform_init_locked(&argp->error);
741 if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
744 data = kzalloc(sizeof(*data), GFP_KERNEL);
748 /* Userspace wants to query the certificate length. */
749 if (!input.pdh_cert_address ||
750 !input.pdh_cert_len ||
751 !input.cert_chain_address)
754 input_pdh_cert_address = (void __user *)input.pdh_cert_address;
755 input_cert_chain_address = (void __user *)input.cert_chain_address;
757 /* Allocate a physically contiguous buffer to store the PDH blob. */
758 if (input.pdh_cert_len > SEV_FW_BLOB_MAX_SIZE) {
763 /* Allocate a physically contiguous buffer to store the cert chain blob. */
764 if (input.cert_chain_len > SEV_FW_BLOB_MAX_SIZE) {
769 pdh_blob = kmalloc(input.pdh_cert_len, GFP_KERNEL);
775 data->pdh_cert_address = __psp_pa(pdh_blob);
776 data->pdh_cert_len = input.pdh_cert_len;
778 cert_blob = kmalloc(input.cert_chain_len, GFP_KERNEL);
784 data->cert_chain_address = __psp_pa(cert_blob);
785 data->cert_chain_len = input.cert_chain_len;
788 ret = __sev_do_cmd_locked(SEV_CMD_PDH_CERT_EXPORT, data, &argp->error);
790 /* If we query the length, FW responded with expected data. */
791 input.cert_chain_len = data->cert_chain_len;
792 input.pdh_cert_len = data->pdh_cert_len;
794 if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
800 if (copy_to_user(input_pdh_cert_address,
801 pdh_blob, input.pdh_cert_len)) {
808 if (copy_to_user(input_cert_chain_address,
809 cert_blob, input.cert_chain_len))
822 static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
824 void __user *argp = (void __user *)arg;
825 struct sev_issue_cmd input;
827 bool writable = file->f_mode & FMODE_WRITE;
829 if (!psp_master || !psp_master->sev_data)
832 if (ioctl != SEV_ISSUE_CMD)
835 if (copy_from_user(&input, argp, sizeof(struct sev_issue_cmd)))
838 if (input.cmd > SEV_MAX)
841 mutex_lock(&sev_cmd_mutex);
845 case SEV_FACTORY_RESET:
846 ret = sev_ioctl_do_reset(&input, writable);
848 case SEV_PLATFORM_STATUS:
849 ret = sev_ioctl_do_platform_status(&input);
852 ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PEK_GEN, &input, writable);
855 ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PDH_GEN, &input, writable);
858 ret = sev_ioctl_do_pek_csr(&input, writable);
860 case SEV_PEK_CERT_IMPORT:
861 ret = sev_ioctl_do_pek_import(&input, writable);
863 case SEV_PDH_CERT_EXPORT:
864 ret = sev_ioctl_do_pdh_export(&input, writable);
867 pr_warn_once("SEV_GET_ID command is deprecated, use SEV_GET_ID2\n");
868 ret = sev_ioctl_do_get_id(&input);
871 ret = sev_ioctl_do_get_id2(&input);
878 if (copy_to_user(argp, &input, sizeof(struct sev_issue_cmd)))
881 mutex_unlock(&sev_cmd_mutex);
886 static const struct file_operations sev_fops = {
887 .owner = THIS_MODULE,
888 .unlocked_ioctl = sev_ioctl,
891 int sev_platform_status(struct sev_user_data_status *data, int *error)
893 return sev_do_cmd(SEV_CMD_PLATFORM_STATUS, data, error);
895 EXPORT_SYMBOL_GPL(sev_platform_status);
897 int sev_guest_deactivate(struct sev_data_deactivate *data, int *error)
899 return sev_do_cmd(SEV_CMD_DEACTIVATE, data, error);
901 EXPORT_SYMBOL_GPL(sev_guest_deactivate);
903 int sev_guest_activate(struct sev_data_activate *data, int *error)
905 return sev_do_cmd(SEV_CMD_ACTIVATE, data, error);
907 EXPORT_SYMBOL_GPL(sev_guest_activate);
909 int sev_guest_decommission(struct sev_data_decommission *data, int *error)
911 return sev_do_cmd(SEV_CMD_DECOMMISSION, data, error);
913 EXPORT_SYMBOL_GPL(sev_guest_decommission);
915 int sev_guest_df_flush(int *error)
917 return sev_do_cmd(SEV_CMD_DF_FLUSH, NULL, error);
919 EXPORT_SYMBOL_GPL(sev_guest_df_flush);
921 static void sev_exit(struct kref *ref)
923 misc_deregister(&misc_dev->misc);
928 static int sev_misc_init(struct sev_device *sev)
930 struct device *dev = sev->dev;
934 * SEV feature support can be detected on multiple devices but the SEV
935 * FW commands must be issued on the master. During probe, we do not
936 * know the master hence we create /dev/sev on the first device probe.
937 * sev_do_cmd() finds the right master device to which to issue the
938 * command to the firmware.
941 struct miscdevice *misc;
943 misc_dev = kzalloc(sizeof(*misc_dev), GFP_KERNEL);
947 misc = &misc_dev->misc;
948 misc->minor = MISC_DYNAMIC_MINOR;
949 misc->name = DEVICE_NAME;
950 misc->fops = &sev_fops;
952 ret = misc_register(misc);
956 kref_init(&misc_dev->refcount);
958 kref_get(&misc_dev->refcount);
961 init_waitqueue_head(&sev->int_queue);
962 sev->misc = misc_dev;
963 dev_dbg(dev, "registered SEV device\n");
968 int sev_dev_init(struct psp_device *psp)
970 struct device *dev = psp->dev;
971 struct sev_device *sev;
974 sev = devm_kzalloc(dev, sizeof(*sev), GFP_KERNEL);
983 sev->io_regs = psp->io_regs;
985 sev->vdata = (struct sev_vdata *)psp->vdata->sev;
988 dev_err(dev, "sev: missing driver data\n");
992 psp_set_sev_irq_handler(psp, sev_irq_handler, sev);
994 ret = sev_misc_init(sev);
998 dev_notice(dev, "sev enabled\n");
1003 psp_clear_sev_irq_handler(psp);
1005 psp->sev_data = NULL;
1007 dev_notice(dev, "sev initialization failed\n");
1012 void sev_dev_destroy(struct psp_device *psp)
1014 struct sev_device *sev = psp->sev_data;
1020 kref_put(&misc_dev->refcount, sev_exit);
1022 psp_clear_sev_irq_handler(psp);
1025 int sev_issue_cmd_external_user(struct file *filep, unsigned int cmd,
1026 void *data, int *error)
1028 if (!filep || filep->f_op != &sev_fops)
1031 return sev_do_cmd(cmd, data, error);
1033 EXPORT_SYMBOL_GPL(sev_issue_cmd_external_user);
1035 void sev_pci_init(void)
1037 struct sev_device *sev = psp_master->sev_data;
1038 struct page *tmr_page;
1044 psp_timeout = psp_probe_timeout;
1046 if (sev_get_api_version())
1050 * If platform is not in UNINIT state then firmware upgrade and/or
1051 * platform INIT command will fail. These command require UNINIT state.
1053 * In a normal boot we should never run into case where the firmware
1054 * is not in UNINIT state on boot. But in case of kexec boot, a reboot
1055 * may not go through a typical shutdown sequence and may leave the
1056 * firmware in INIT or WORKING state.
1059 if (sev->state != SEV_STATE_UNINIT) {
1060 sev_platform_shutdown(NULL);
1061 sev->state = SEV_STATE_UNINIT;
1064 if (sev_version_greater_or_equal(0, 15) &&
1065 sev_update_firmware(sev->dev) == 0)
1066 sev_get_api_version();
1068 /* Obtain the TMR memory area for SEV-ES use */
1069 tmr_page = alloc_pages(GFP_KERNEL, get_order(SEV_ES_TMR_SIZE));
1071 sev_es_tmr = page_address(tmr_page);
1075 "SEV: TMR allocation failed, SEV-ES support unavailable\n");
1078 /* Initialize the platform */
1079 rc = sev_platform_init(&error);
1080 if (rc && (error == SEV_RET_SECURE_DATA_INVALID)) {
1082 * INIT command returned an integrity check failure
1083 * status code, meaning that firmware load and
1084 * validation of SEV related persistent data has
1085 * failed and persistent state has been erased.
1086 * Retrying INIT command here should succeed.
1088 dev_dbg(sev->dev, "SEV: retrying INIT command");
1089 rc = sev_platform_init(&error);
1093 dev_err(sev->dev, "SEV: failed to INIT error %#x\n", error);
1097 dev_info(sev->dev, "SEV API:%d.%d build:%d\n", sev->api_major,
1098 sev->api_minor, sev->build);
1103 psp_master->sev_data = NULL;
1106 void sev_pci_exit(void)
1108 if (!psp_master->sev_data)
1111 sev_platform_shutdown(NULL);
1114 /* The TMR area was encrypted, flush it from the cache */
1115 wbinvd_on_all_cpus();
1117 free_pages((unsigned long)sev_es_tmr,
1118 get_order(SEV_ES_TMR_SIZE));