]> Git Repo - linux.git/blob - arch/x86/hyperv/ivm.c
driver core: Return proper error code when dev_set_name() fails
[linux.git] / arch / x86 / hyperv / ivm.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Hyper-V Isolation VM interface with paravisor and hypervisor
4  *
5  * Author:
6  *  Tianyu Lan <[email protected]>
7  */
8
9 #include <linux/bitfield.h>
10 #include <linux/hyperv.h>
11 #include <linux/types.h>
12 #include <linux/slab.h>
13 #include <asm/svm.h>
14 #include <asm/sev.h>
15 #include <asm/io.h>
16 #include <asm/coco.h>
17 #include <asm/mem_encrypt.h>
18 #include <asm/mshyperv.h>
19 #include <asm/hypervisor.h>
20 #include <asm/mtrr.h>
21
22 #ifdef CONFIG_AMD_MEM_ENCRYPT
23
24 #define GHCB_USAGE_HYPERV_CALL  1
25
26 union hv_ghcb {
27         struct ghcb ghcb;
28         struct {
29                 u64 hypercalldata[509];
30                 u64 outputgpa;
31                 union {
32                         union {
33                                 struct {
34                                         u32 callcode        : 16;
35                                         u32 isfast          : 1;
36                                         u32 reserved1       : 14;
37                                         u32 isnested        : 1;
38                                         u32 countofelements : 12;
39                                         u32 reserved2       : 4;
40                                         u32 repstartindex   : 12;
41                                         u32 reserved3       : 4;
42                                 };
43                                 u64 asuint64;
44                         } hypercallinput;
45                         union {
46                                 struct {
47                                         u16 callstatus;
48                                         u16 reserved1;
49                                         u32 elementsprocessed : 12;
50                                         u32 reserved2         : 20;
51                                 };
52                                 u64 asunit64;
53                         } hypercalloutput;
54                 };
55                 u64 reserved2;
56         } hypercall;
57 } __packed __aligned(HV_HYP_PAGE_SIZE);
58
59 static u16 hv_ghcb_version __ro_after_init;
60
61 u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size)
62 {
63         union hv_ghcb *hv_ghcb;
64         void **ghcb_base;
65         unsigned long flags;
66         u64 status;
67
68         if (!hv_ghcb_pg)
69                 return -EFAULT;
70
71         WARN_ON(in_nmi());
72
73         local_irq_save(flags);
74         ghcb_base = (void **)this_cpu_ptr(hv_ghcb_pg);
75         hv_ghcb = (union hv_ghcb *)*ghcb_base;
76         if (!hv_ghcb) {
77                 local_irq_restore(flags);
78                 return -EFAULT;
79         }
80
81         hv_ghcb->ghcb.protocol_version = GHCB_PROTOCOL_MAX;
82         hv_ghcb->ghcb.ghcb_usage = GHCB_USAGE_HYPERV_CALL;
83
84         hv_ghcb->hypercall.outputgpa = (u64)output;
85         hv_ghcb->hypercall.hypercallinput.asuint64 = 0;
86         hv_ghcb->hypercall.hypercallinput.callcode = control;
87
88         if (input_size)
89                 memcpy(hv_ghcb->hypercall.hypercalldata, input, input_size);
90
91         VMGEXIT();
92
93         hv_ghcb->ghcb.ghcb_usage = 0xffffffff;
94         memset(hv_ghcb->ghcb.save.valid_bitmap, 0,
95                sizeof(hv_ghcb->ghcb.save.valid_bitmap));
96
97         status = hv_ghcb->hypercall.hypercalloutput.callstatus;
98
99         local_irq_restore(flags);
100
101         return status;
102 }
103
104 static inline u64 rd_ghcb_msr(void)
105 {
106         return __rdmsr(MSR_AMD64_SEV_ES_GHCB);
107 }
108
109 static inline void wr_ghcb_msr(u64 val)
110 {
111         native_wrmsrl(MSR_AMD64_SEV_ES_GHCB, val);
112 }
113
114 static enum es_result hv_ghcb_hv_call(struct ghcb *ghcb, u64 exit_code,
115                                    u64 exit_info_1, u64 exit_info_2)
116 {
117         /* Fill in protocol and format specifiers */
118         ghcb->protocol_version = hv_ghcb_version;
119         ghcb->ghcb_usage       = GHCB_DEFAULT_USAGE;
120
121         ghcb_set_sw_exit_code(ghcb, exit_code);
122         ghcb_set_sw_exit_info_1(ghcb, exit_info_1);
123         ghcb_set_sw_exit_info_2(ghcb, exit_info_2);
124
125         VMGEXIT();
126
127         if (ghcb->save.sw_exit_info_1 & GENMASK_ULL(31, 0))
128                 return ES_VMM_ERROR;
129         else
130                 return ES_OK;
131 }
132
133 void __noreturn hv_ghcb_terminate(unsigned int set, unsigned int reason)
134 {
135         u64 val = GHCB_MSR_TERM_REQ;
136
137         /* Tell the hypervisor what went wrong. */
138         val |= GHCB_SEV_TERM_REASON(set, reason);
139
140         /* Request Guest Termination from Hypvervisor */
141         wr_ghcb_msr(val);
142         VMGEXIT();
143
144         while (true)
145                 asm volatile("hlt\n" : : : "memory");
146 }
147
148 bool hv_ghcb_negotiate_protocol(void)
149 {
150         u64 ghcb_gpa;
151         u64 val;
152
153         /* Save ghcb page gpa. */
154         ghcb_gpa = rd_ghcb_msr();
155
156         /* Do the GHCB protocol version negotiation */
157         wr_ghcb_msr(GHCB_MSR_SEV_INFO_REQ);
158         VMGEXIT();
159         val = rd_ghcb_msr();
160
161         if (GHCB_MSR_INFO(val) != GHCB_MSR_SEV_INFO_RESP)
162                 return false;
163
164         if (GHCB_MSR_PROTO_MAX(val) < GHCB_PROTOCOL_MIN ||
165             GHCB_MSR_PROTO_MIN(val) > GHCB_PROTOCOL_MAX)
166                 return false;
167
168         hv_ghcb_version = min_t(size_t, GHCB_MSR_PROTO_MAX(val),
169                              GHCB_PROTOCOL_MAX);
170
171         /* Write ghcb page back after negotiating protocol. */
172         wr_ghcb_msr(ghcb_gpa);
173         VMGEXIT();
174
175         return true;
176 }
177
178 void hv_ghcb_msr_write(u64 msr, u64 value)
179 {
180         union hv_ghcb *hv_ghcb;
181         void **ghcb_base;
182         unsigned long flags;
183
184         if (!hv_ghcb_pg)
185                 return;
186
187         WARN_ON(in_nmi());
188
189         local_irq_save(flags);
190         ghcb_base = (void **)this_cpu_ptr(hv_ghcb_pg);
191         hv_ghcb = (union hv_ghcb *)*ghcb_base;
192         if (!hv_ghcb) {
193                 local_irq_restore(flags);
194                 return;
195         }
196
197         ghcb_set_rcx(&hv_ghcb->ghcb, msr);
198         ghcb_set_rax(&hv_ghcb->ghcb, lower_32_bits(value));
199         ghcb_set_rdx(&hv_ghcb->ghcb, upper_32_bits(value));
200
201         if (hv_ghcb_hv_call(&hv_ghcb->ghcb, SVM_EXIT_MSR, 1, 0))
202                 pr_warn("Fail to write msr via ghcb %llx.\n", msr);
203
204         local_irq_restore(flags);
205 }
206 EXPORT_SYMBOL_GPL(hv_ghcb_msr_write);
207
208 void hv_ghcb_msr_read(u64 msr, u64 *value)
209 {
210         union hv_ghcb *hv_ghcb;
211         void **ghcb_base;
212         unsigned long flags;
213
214         /* Check size of union hv_ghcb here. */
215         BUILD_BUG_ON(sizeof(union hv_ghcb) != HV_HYP_PAGE_SIZE);
216
217         if (!hv_ghcb_pg)
218                 return;
219
220         WARN_ON(in_nmi());
221
222         local_irq_save(flags);
223         ghcb_base = (void **)this_cpu_ptr(hv_ghcb_pg);
224         hv_ghcb = (union hv_ghcb *)*ghcb_base;
225         if (!hv_ghcb) {
226                 local_irq_restore(flags);
227                 return;
228         }
229
230         ghcb_set_rcx(&hv_ghcb->ghcb, msr);
231         if (hv_ghcb_hv_call(&hv_ghcb->ghcb, SVM_EXIT_MSR, 0, 0))
232                 pr_warn("Fail to read msr via ghcb %llx.\n", msr);
233         else
234                 *value = (u64)lower_32_bits(hv_ghcb->ghcb.save.rax)
235                         | ((u64)lower_32_bits(hv_ghcb->ghcb.save.rdx) << 32);
236         local_irq_restore(flags);
237 }
238 EXPORT_SYMBOL_GPL(hv_ghcb_msr_read);
239
240 /*
241  * hv_mark_gpa_visibility - Set pages visible to host via hvcall.
242  *
243  * In Isolation VM, all guest memory is encrypted from host and guest
244  * needs to set memory visible to host via hvcall before sharing memory
245  * with host.
246  */
247 static int hv_mark_gpa_visibility(u16 count, const u64 pfn[],
248                            enum hv_mem_host_visibility visibility)
249 {
250         struct hv_gpa_range_for_visibility **input_pcpu, *input;
251         u16 pages_processed;
252         u64 hv_status;
253         unsigned long flags;
254
255         /* no-op if partition isolation is not enabled */
256         if (!hv_is_isolation_supported())
257                 return 0;
258
259         if (count > HV_MAX_MODIFY_GPA_REP_COUNT) {
260                 pr_err("Hyper-V: GPA count:%d exceeds supported:%lu\n", count,
261                         HV_MAX_MODIFY_GPA_REP_COUNT);
262                 return -EINVAL;
263         }
264
265         local_irq_save(flags);
266         input_pcpu = (struct hv_gpa_range_for_visibility **)
267                         this_cpu_ptr(hyperv_pcpu_input_arg);
268         input = *input_pcpu;
269         if (unlikely(!input)) {
270                 local_irq_restore(flags);
271                 return -EINVAL;
272         }
273
274         input->partition_id = HV_PARTITION_ID_SELF;
275         input->host_visibility = visibility;
276         input->reserved0 = 0;
277         input->reserved1 = 0;
278         memcpy((void *)input->gpa_page_list, pfn, count * sizeof(*pfn));
279         hv_status = hv_do_rep_hypercall(
280                         HVCALL_MODIFY_SPARSE_GPA_PAGE_HOST_VISIBILITY, count,
281                         0, input, &pages_processed);
282         local_irq_restore(flags);
283
284         if (hv_result_success(hv_status))
285                 return 0;
286         else
287                 return -EFAULT;
288 }
289
290 /*
291  * hv_vtom_set_host_visibility - Set specified memory visible to host.
292  *
293  * In Isolation VM, all guest memory is encrypted from host and guest
294  * needs to set memory visible to host via hvcall before sharing memory
295  * with host. This function works as wrap of hv_mark_gpa_visibility()
296  * with memory base and size.
297  */
298 static bool hv_vtom_set_host_visibility(unsigned long kbuffer, int pagecount, bool enc)
299 {
300         enum hv_mem_host_visibility visibility = enc ?
301                         VMBUS_PAGE_NOT_VISIBLE : VMBUS_PAGE_VISIBLE_READ_WRITE;
302         u64 *pfn_array;
303         int ret = 0;
304         bool result = true;
305         int i, pfn;
306
307         pfn_array = kmalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
308         if (!pfn_array)
309                 return false;
310
311         for (i = 0, pfn = 0; i < pagecount; i++) {
312                 pfn_array[pfn] = virt_to_hvpfn((void *)kbuffer + i * HV_HYP_PAGE_SIZE);
313                 pfn++;
314
315                 if (pfn == HV_MAX_MODIFY_GPA_REP_COUNT || i == pagecount - 1) {
316                         ret = hv_mark_gpa_visibility(pfn, pfn_array,
317                                                      visibility);
318                         if (ret) {
319                                 result = false;
320                                 goto err_free_pfn_array;
321                         }
322                         pfn = 0;
323                 }
324         }
325
326  err_free_pfn_array:
327         kfree(pfn_array);
328         return result;
329 }
330
331 static bool hv_vtom_tlb_flush_required(bool private)
332 {
333         return true;
334 }
335
336 static bool hv_vtom_cache_flush_required(void)
337 {
338         return false;
339 }
340
341 static bool hv_is_private_mmio(u64 addr)
342 {
343         /*
344          * Hyper-V always provides a single IO-APIC in a guest VM.
345          * When a paravisor is used, it is emulated by the paravisor
346          * in the guest context and must be mapped private.
347          */
348         if (addr >= HV_IOAPIC_BASE_ADDRESS &&
349             addr < (HV_IOAPIC_BASE_ADDRESS + PAGE_SIZE))
350                 return true;
351
352         /* Same with a vTPM */
353         if (addr >= VTPM_BASE_ADDRESS &&
354             addr < (VTPM_BASE_ADDRESS + PAGE_SIZE))
355                 return true;
356
357         return false;
358 }
359
360 void __init hv_vtom_init(void)
361 {
362         /*
363          * By design, a VM using vTOM doesn't see the SEV setting,
364          * so SEV initialization is bypassed and sev_status isn't set.
365          * Set it here to indicate a vTOM VM.
366          */
367         sev_status = MSR_AMD64_SNP_VTOM;
368         cc_vendor = CC_VENDOR_AMD;
369         cc_set_mask(ms_hyperv.shared_gpa_boundary);
370         physical_mask &= ms_hyperv.shared_gpa_boundary - 1;
371
372         x86_platform.hyper.is_private_mmio = hv_is_private_mmio;
373         x86_platform.guest.enc_cache_flush_required = hv_vtom_cache_flush_required;
374         x86_platform.guest.enc_tlb_flush_required = hv_vtom_tlb_flush_required;
375         x86_platform.guest.enc_status_change_finish = hv_vtom_set_host_visibility;
376
377         /* Set WB as the default cache mode. */
378         mtrr_overwrite_state(NULL, 0, MTRR_TYPE_WRBACK);
379 }
380
381 #endif /* CONFIG_AMD_MEM_ENCRYPT */
382
383 enum hv_isolation_type hv_get_isolation_type(void)
384 {
385         if (!(ms_hyperv.priv_high & HV_ISOLATION))
386                 return HV_ISOLATION_TYPE_NONE;
387         return FIELD_GET(HV_ISOLATION_TYPE, ms_hyperv.isolation_config_b);
388 }
389 EXPORT_SYMBOL_GPL(hv_get_isolation_type);
390
391 /*
392  * hv_is_isolation_supported - Check system runs in the Hyper-V
393  * isolation VM.
394  */
395 bool hv_is_isolation_supported(void)
396 {
397         if (!cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
398                 return false;
399
400         if (!hypervisor_is_type(X86_HYPER_MS_HYPERV))
401                 return false;
402
403         return hv_get_isolation_type() != HV_ISOLATION_TYPE_NONE;
404 }
405
406 DEFINE_STATIC_KEY_FALSE(isolation_type_snp);
407
408 /*
409  * hv_isolation_type_snp - Check system runs in the AMD SEV-SNP based
410  * isolation VM.
411  */
412 bool hv_isolation_type_snp(void)
413 {
414         return static_branch_unlikely(&isolation_type_snp);
415 }
This page took 0.057493 seconds and 4 git commands to generate.