]> Git Repo - linux.git/blame - drivers/gpu/drm/i915/gvt/gtt.c
Merge tag 'for-kvmgt' of git://git.kernel.org/pub/scm/virt/kvm/kvm into drm-intel...
[linux.git] / drivers / gpu / drm / i915 / gvt / gtt.c
CommitLineData
2707e444
ZW
1/*
2 * GTT virtualization
3 *
4 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 *
25 * Authors:
26 * Zhi Wang <[email protected]>
27 * Zhenyu Wang <[email protected]>
28 * Xiao Zheng <[email protected]>
29 *
30 * Contributors:
31 * Min He <[email protected]>
32 * Bing Niu <[email protected]>
33 *
34 */
35
36#include "i915_drv.h"
feddf6e8
ZW
37#include "gvt.h"
38#include "i915_pvinfo.h"
2707e444
ZW
39#include "trace.h"
40
41static bool enable_out_of_sync = false;
42static int preallocated_oos_pages = 8192;
43
44/*
45 * validate a gm address and related range size,
46 * translate it to host gm address
47 */
48bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
49{
50 if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size
51 && !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) {
52 gvt_err("vgpu%d: invalid range gmadr 0x%llx size 0x%x\n",
53 vgpu->id, addr, size);
54 return false;
55 }
56 return true;
57}
58
59/* translate a guest gmadr to host gmadr */
60int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr)
61{
62 if (WARN(!vgpu_gmadr_is_valid(vgpu, g_addr),
63 "invalid guest gmadr %llx\n", g_addr))
64 return -EACCES;
65
66 if (vgpu_gmadr_is_aperture(vgpu, g_addr))
67 *h_addr = vgpu_aperture_gmadr_base(vgpu)
68 + (g_addr - vgpu_aperture_offset(vgpu));
69 else
70 *h_addr = vgpu_hidden_gmadr_base(vgpu)
71 + (g_addr - vgpu_hidden_offset(vgpu));
72 return 0;
73}
74
75/* translate a host gmadr to guest gmadr */
76int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr)
77{
78 if (WARN(!gvt_gmadr_is_valid(vgpu->gvt, h_addr),
79 "invalid host gmadr %llx\n", h_addr))
80 return -EACCES;
81
82 if (gvt_gmadr_is_aperture(vgpu->gvt, h_addr))
83 *g_addr = vgpu_aperture_gmadr_base(vgpu)
84 + (h_addr - gvt_aperture_gmadr_base(vgpu->gvt));
85 else
86 *g_addr = vgpu_hidden_gmadr_base(vgpu)
87 + (h_addr - gvt_hidden_gmadr_base(vgpu->gvt));
88 return 0;
89}
90
91int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
92 unsigned long *h_index)
93{
94 u64 h_addr;
95 int ret;
96
97 ret = intel_gvt_ggtt_gmadr_g2h(vgpu, g_index << GTT_PAGE_SHIFT,
98 &h_addr);
99 if (ret)
100 return ret;
101
102 *h_index = h_addr >> GTT_PAGE_SHIFT;
103 return 0;
104}
105
106int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
107 unsigned long *g_index)
108{
109 u64 g_addr;
110 int ret;
111
112 ret = intel_gvt_ggtt_gmadr_h2g(vgpu, h_index << GTT_PAGE_SHIFT,
113 &g_addr);
114 if (ret)
115 return ret;
116
117 *g_index = g_addr >> GTT_PAGE_SHIFT;
118 return 0;
119}
120
121#define gtt_type_is_entry(type) \
122 (type > GTT_TYPE_INVALID && type < GTT_TYPE_PPGTT_ENTRY \
123 && type != GTT_TYPE_PPGTT_PTE_ENTRY \
124 && type != GTT_TYPE_PPGTT_ROOT_ENTRY)
125
126#define gtt_type_is_pt(type) \
127 (type >= GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX)
128
129#define gtt_type_is_pte_pt(type) \
130 (type == GTT_TYPE_PPGTT_PTE_PT)
131
132#define gtt_type_is_root_pointer(type) \
133 (gtt_type_is_entry(type) && type > GTT_TYPE_PPGTT_ROOT_ENTRY)
134
135#define gtt_init_entry(e, t, p, v) do { \
136 (e)->type = t; \
137 (e)->pdev = p; \
138 memcpy(&(e)->val64, &v, sizeof(v)); \
139} while (0)
140
2707e444
ZW
141/*
142 * Mappings between GTT_TYPE* enumerations.
143 * Following information can be found according to the given type:
144 * - type of next level page table
145 * - type of entry inside this level page table
146 * - type of entry with PSE set
147 *
148 * If the given type doesn't have such a kind of information,
149 * e.g. give a l4 root entry type, then request to get its PSE type,
150 * give a PTE page table type, then request to get its next level page
151 * table type, as we know l4 root entry doesn't have a PSE bit,
152 * and a PTE page table doesn't have a next level page table type,
153 * GTT_TYPE_INVALID will be returned. This is useful when traversing a
154 * page table.
155 */
156
157struct gtt_type_table_entry {
158 int entry_type;
159 int next_pt_type;
160 int pse_entry_type;
161};
162
163#define GTT_TYPE_TABLE_ENTRY(type, e_type, npt_type, pse_type) \
164 [type] = { \
165 .entry_type = e_type, \
166 .next_pt_type = npt_type, \
167 .pse_entry_type = pse_type, \
168 }
169
170static struct gtt_type_table_entry gtt_type_table[] = {
171 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
172 GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
173 GTT_TYPE_PPGTT_PML4_PT,
174 GTT_TYPE_INVALID),
175 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_PT,
176 GTT_TYPE_PPGTT_PML4_ENTRY,
177 GTT_TYPE_PPGTT_PDP_PT,
178 GTT_TYPE_INVALID),
179 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_ENTRY,
180 GTT_TYPE_PPGTT_PML4_ENTRY,
181 GTT_TYPE_PPGTT_PDP_PT,
182 GTT_TYPE_INVALID),
183 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_PT,
184 GTT_TYPE_PPGTT_PDP_ENTRY,
185 GTT_TYPE_PPGTT_PDE_PT,
186 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
187 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
188 GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
189 GTT_TYPE_PPGTT_PDE_PT,
190 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
191 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_ENTRY,
192 GTT_TYPE_PPGTT_PDP_ENTRY,
193 GTT_TYPE_PPGTT_PDE_PT,
194 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
195 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_PT,
196 GTT_TYPE_PPGTT_PDE_ENTRY,
197 GTT_TYPE_PPGTT_PTE_PT,
198 GTT_TYPE_PPGTT_PTE_2M_ENTRY),
199 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_ENTRY,
200 GTT_TYPE_PPGTT_PDE_ENTRY,
201 GTT_TYPE_PPGTT_PTE_PT,
202 GTT_TYPE_PPGTT_PTE_2M_ENTRY),
203 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_PT,
204 GTT_TYPE_PPGTT_PTE_4K_ENTRY,
205 GTT_TYPE_INVALID,
206 GTT_TYPE_INVALID),
207 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_4K_ENTRY,
208 GTT_TYPE_PPGTT_PTE_4K_ENTRY,
209 GTT_TYPE_INVALID,
210 GTT_TYPE_INVALID),
211 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_2M_ENTRY,
212 GTT_TYPE_PPGTT_PDE_ENTRY,
213 GTT_TYPE_INVALID,
214 GTT_TYPE_PPGTT_PTE_2M_ENTRY),
215 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_1G_ENTRY,
216 GTT_TYPE_PPGTT_PDP_ENTRY,
217 GTT_TYPE_INVALID,
218 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
219 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_GGTT_PTE,
220 GTT_TYPE_GGTT_PTE,
221 GTT_TYPE_INVALID,
222 GTT_TYPE_INVALID),
223};
224
225static inline int get_next_pt_type(int type)
226{
227 return gtt_type_table[type].next_pt_type;
228}
229
230static inline int get_entry_type(int type)
231{
232 return gtt_type_table[type].entry_type;
233}
234
235static inline int get_pse_type(int type)
236{
237 return gtt_type_table[type].pse_entry_type;
238}
239
240static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index)
241{
321927db 242 void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
2707e444
ZW
243 u64 pte;
244
245#ifdef readq
246 pte = readq(addr);
247#else
248 pte = ioread32(addr);
31f09cb7 249 pte |= (u64)ioread32(addr + 4) << 32;
2707e444
ZW
250#endif
251 return pte;
252}
253
254static void write_pte64(struct drm_i915_private *dev_priv,
255 unsigned long index, u64 pte)
256{
321927db 257 void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
2707e444
ZW
258
259#ifdef writeq
260 writeq(pte, addr);
261#else
262 iowrite32((u32)pte, addr);
263 iowrite32(pte >> 32, addr + 4);
264#endif
265 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
266 POSTING_READ(GFX_FLSH_CNTL_GEN6);
267}
268
269static inline struct intel_gvt_gtt_entry *gtt_get_entry64(void *pt,
270 struct intel_gvt_gtt_entry *e,
271 unsigned long index, bool hypervisor_access, unsigned long gpa,
272 struct intel_vgpu *vgpu)
273{
274 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
275 int ret;
276
277 if (WARN_ON(info->gtt_entry_size != 8))
278 return e;
279
280 if (hypervisor_access) {
281 ret = intel_gvt_hypervisor_read_gpa(vgpu, gpa +
282 (index << info->gtt_entry_size_shift),
283 &e->val64, 8);
284 WARN_ON(ret);
285 } else if (!pt) {
286 e->val64 = read_pte64(vgpu->gvt->dev_priv, index);
287 } else {
288 e->val64 = *((u64 *)pt + index);
289 }
290 return e;
291}
292
293static inline struct intel_gvt_gtt_entry *gtt_set_entry64(void *pt,
294 struct intel_gvt_gtt_entry *e,
295 unsigned long index, bool hypervisor_access, unsigned long gpa,
296 struct intel_vgpu *vgpu)
297{
298 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
299 int ret;
300
301 if (WARN_ON(info->gtt_entry_size != 8))
302 return e;
303
304 if (hypervisor_access) {
305 ret = intel_gvt_hypervisor_write_gpa(vgpu, gpa +
306 (index << info->gtt_entry_size_shift),
307 &e->val64, 8);
308 WARN_ON(ret);
309 } else if (!pt) {
310 write_pte64(vgpu->gvt->dev_priv, index, e->val64);
311 } else {
312 *((u64 *)pt + index) = e->val64;
313 }
314 return e;
315}
316
317#define GTT_HAW 46
318
319#define ADDR_1G_MASK (((1UL << (GTT_HAW - 30 + 1)) - 1) << 30)
320#define ADDR_2M_MASK (((1UL << (GTT_HAW - 21 + 1)) - 1) << 21)
321#define ADDR_4K_MASK (((1UL << (GTT_HAW - 12 + 1)) - 1) << 12)
322
323static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
324{
325 unsigned long pfn;
326
327 if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY)
328 pfn = (e->val64 & ADDR_1G_MASK) >> 12;
329 else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY)
330 pfn = (e->val64 & ADDR_2M_MASK) >> 12;
331 else
332 pfn = (e->val64 & ADDR_4K_MASK) >> 12;
333 return pfn;
334}
335
336static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry *e, unsigned long pfn)
337{
338 if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) {
339 e->val64 &= ~ADDR_1G_MASK;
340 pfn &= (ADDR_1G_MASK >> 12);
341 } else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) {
342 e->val64 &= ~ADDR_2M_MASK;
343 pfn &= (ADDR_2M_MASK >> 12);
344 } else {
345 e->val64 &= ~ADDR_4K_MASK;
346 pfn &= (ADDR_4K_MASK >> 12);
347 }
348
349 e->val64 |= (pfn << 12);
350}
351
352static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e)
353{
354 /* Entry doesn't have PSE bit. */
355 if (get_pse_type(e->type) == GTT_TYPE_INVALID)
356 return false;
357
358 e->type = get_entry_type(e->type);
359 if (!(e->val64 & (1 << 7)))
360 return false;
361
362 e->type = get_pse_type(e->type);
363 return true;
364}
365
366static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e)
367{
368 /*
369 * i915 writes PDP root pointer registers without present bit,
370 * it also works, so we need to treat root pointer entry
371 * specifically.
372 */
373 if (e->type == GTT_TYPE_PPGTT_ROOT_L3_ENTRY
374 || e->type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
375 return (e->val64 != 0);
376 else
377 return (e->val64 & (1 << 0));
378}
379
380static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e)
381{
382 e->val64 &= ~(1 << 0);
383}
384
385/*
386 * Per-platform GMA routines.
387 */
388static unsigned long gma_to_ggtt_pte_index(unsigned long gma)
389{
390 unsigned long x = (gma >> GTT_PAGE_SHIFT);
391
392 trace_gma_index(__func__, gma, x);
393 return x;
394}
395
396#define DEFINE_PPGTT_GMA_TO_INDEX(prefix, ename, exp) \
397static unsigned long prefix##_gma_to_##ename##_index(unsigned long gma) \
398{ \
399 unsigned long x = (exp); \
400 trace_gma_index(__func__, gma, x); \
401 return x; \
402}
403
404DEFINE_PPGTT_GMA_TO_INDEX(gen8, pte, (gma >> 12 & 0x1ff));
405DEFINE_PPGTT_GMA_TO_INDEX(gen8, pde, (gma >> 21 & 0x1ff));
406DEFINE_PPGTT_GMA_TO_INDEX(gen8, l3_pdp, (gma >> 30 & 0x3));
407DEFINE_PPGTT_GMA_TO_INDEX(gen8, l4_pdp, (gma >> 30 & 0x1ff));
408DEFINE_PPGTT_GMA_TO_INDEX(gen8, pml4, (gma >> 39 & 0x1ff));
409
410static struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = {
411 .get_entry = gtt_get_entry64,
412 .set_entry = gtt_set_entry64,
413 .clear_present = gtt_entry_clear_present,
414 .test_present = gen8_gtt_test_present,
415 .test_pse = gen8_gtt_test_pse,
416 .get_pfn = gen8_gtt_get_pfn,
417 .set_pfn = gen8_gtt_set_pfn,
418};
419
420static struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops = {
421 .gma_to_ggtt_pte_index = gma_to_ggtt_pte_index,
422 .gma_to_pte_index = gen8_gma_to_pte_index,
423 .gma_to_pde_index = gen8_gma_to_pde_index,
424 .gma_to_l3_pdp_index = gen8_gma_to_l3_pdp_index,
425 .gma_to_l4_pdp_index = gen8_gma_to_l4_pdp_index,
426 .gma_to_pml4_index = gen8_gma_to_pml4_index,
427};
428
429static int gtt_entry_p2m(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *p,
430 struct intel_gvt_gtt_entry *m)
431{
432 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
433 unsigned long gfn, mfn;
434
435 *m = *p;
436
437 if (!ops->test_present(p))
438 return 0;
439
440 gfn = ops->get_pfn(p);
441
442 mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gfn);
443 if (mfn == INTEL_GVT_INVALID_ADDR) {
444 gvt_err("fail to translate gfn: 0x%lx\n", gfn);
445 return -ENXIO;
446 }
447
448 ops->set_pfn(m, mfn);
449 return 0;
450}
451
452/*
453 * MM helpers.
454 */
455struct intel_gvt_gtt_entry *intel_vgpu_mm_get_entry(struct intel_vgpu_mm *mm,
456 void *page_table, struct intel_gvt_gtt_entry *e,
457 unsigned long index)
458{
459 struct intel_gvt *gvt = mm->vgpu->gvt;
460 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
461
462 e->type = mm->page_table_entry_type;
463
464 ops->get_entry(page_table, e, index, false, 0, mm->vgpu);
465 ops->test_pse(e);
466 return e;
467}
468
469struct intel_gvt_gtt_entry *intel_vgpu_mm_set_entry(struct intel_vgpu_mm *mm,
470 void *page_table, struct intel_gvt_gtt_entry *e,
471 unsigned long index)
472{
473 struct intel_gvt *gvt = mm->vgpu->gvt;
474 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
475
476 return ops->set_entry(page_table, e, index, false, 0, mm->vgpu);
477}
478
479/*
480 * PPGTT shadow page table helpers.
481 */
482static inline struct intel_gvt_gtt_entry *ppgtt_spt_get_entry(
483 struct intel_vgpu_ppgtt_spt *spt,
484 void *page_table, int type,
485 struct intel_gvt_gtt_entry *e, unsigned long index,
486 bool guest)
487{
488 struct intel_gvt *gvt = spt->vgpu->gvt;
489 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
490
491 e->type = get_entry_type(type);
492
493 if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
494 return e;
495
496 ops->get_entry(page_table, e, index, guest,
497 spt->guest_page.gfn << GTT_PAGE_SHIFT,
498 spt->vgpu);
499 ops->test_pse(e);
500 return e;
501}
502
503static inline struct intel_gvt_gtt_entry *ppgtt_spt_set_entry(
504 struct intel_vgpu_ppgtt_spt *spt,
505 void *page_table, int type,
506 struct intel_gvt_gtt_entry *e, unsigned long index,
507 bool guest)
508{
509 struct intel_gvt *gvt = spt->vgpu->gvt;
510 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
511
512 if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
513 return e;
514
515 return ops->set_entry(page_table, e, index, guest,
516 spt->guest_page.gfn << GTT_PAGE_SHIFT,
517 spt->vgpu);
518}
519
520#define ppgtt_get_guest_entry(spt, e, index) \
521 ppgtt_spt_get_entry(spt, NULL, \
522 spt->guest_page_type, e, index, true)
523
524#define ppgtt_set_guest_entry(spt, e, index) \
525 ppgtt_spt_set_entry(spt, NULL, \
526 spt->guest_page_type, e, index, true)
527
528#define ppgtt_get_shadow_entry(spt, e, index) \
529 ppgtt_spt_get_entry(spt, spt->shadow_page.vaddr, \
530 spt->shadow_page.type, e, index, false)
531
532#define ppgtt_set_shadow_entry(spt, e, index) \
533 ppgtt_spt_set_entry(spt, spt->shadow_page.vaddr, \
534 spt->shadow_page.type, e, index, false)
535
536/**
537 * intel_vgpu_init_guest_page - init a guest page data structure
538 * @vgpu: a vGPU
539 * @p: a guest page data structure
540 * @gfn: guest memory page frame number
541 * @handler: function will be called when target guest memory page has
542 * been modified.
543 *
544 * This function is called when user wants to track a guest memory page.
545 *
546 * Returns:
547 * Zero on success, negative error code if failed.
548 */
549int intel_vgpu_init_guest_page(struct intel_vgpu *vgpu,
550 struct intel_vgpu_guest_page *p,
551 unsigned long gfn,
552 int (*handler)(void *, u64, void *, int),
553 void *data)
554{
555 INIT_HLIST_NODE(&p->node);
556
557 p->writeprotection = false;
558 p->gfn = gfn;
559 p->handler = handler;
560 p->data = data;
561 p->oos_page = NULL;
562 p->write_cnt = 0;
563
564 hash_add(vgpu->gtt.guest_page_hash_table, &p->node, p->gfn);
565 return 0;
566}
567
568static int detach_oos_page(struct intel_vgpu *vgpu,
569 struct intel_vgpu_oos_page *oos_page);
570
571/**
572 * intel_vgpu_clean_guest_page - release the resource owned by guest page data
573 * structure
574 * @vgpu: a vGPU
575 * @p: a tracked guest page
576 *
577 * This function is called when user tries to stop tracking a guest memory
578 * page.
579 */
580void intel_vgpu_clean_guest_page(struct intel_vgpu *vgpu,
581 struct intel_vgpu_guest_page *p)
582{
583 if (!hlist_unhashed(&p->node))
584 hash_del(&p->node);
585
586 if (p->oos_page)
587 detach_oos_page(vgpu, p->oos_page);
588
589 if (p->writeprotection)
590 intel_gvt_hypervisor_unset_wp_page(vgpu, p);
591}
592
593/**
594 * intel_vgpu_find_guest_page - find a guest page data structure by GFN.
595 * @vgpu: a vGPU
596 * @gfn: guest memory page frame number
597 *
598 * This function is called when emulation logic wants to know if a trapped GFN
599 * is a tracked guest page.
600 *
601 * Returns:
602 * Pointer to guest page data structure, NULL if failed.
603 */
604struct intel_vgpu_guest_page *intel_vgpu_find_guest_page(
605 struct intel_vgpu *vgpu, unsigned long gfn)
606{
607 struct intel_vgpu_guest_page *p;
608
609 hash_for_each_possible(vgpu->gtt.guest_page_hash_table,
610 p, node, gfn) {
611 if (p->gfn == gfn)
612 return p;
613 }
614 return NULL;
615}
616
617static inline int init_shadow_page(struct intel_vgpu *vgpu,
618 struct intel_vgpu_shadow_page *p, int type)
619{
620 p->vaddr = page_address(p->page);
621 p->type = type;
622
623 INIT_HLIST_NODE(&p->node);
624
625 p->mfn = intel_gvt_hypervisor_virt_to_mfn(p->vaddr);
626 if (p->mfn == INTEL_GVT_INVALID_ADDR)
627 return -EFAULT;
628
629 hash_add(vgpu->gtt.shadow_page_hash_table, &p->node, p->mfn);
630 return 0;
631}
632
633static inline void clean_shadow_page(struct intel_vgpu_shadow_page *p)
634{
635 if (!hlist_unhashed(&p->node))
636 hash_del(&p->node);
637}
638
639static inline struct intel_vgpu_shadow_page *find_shadow_page(
640 struct intel_vgpu *vgpu, unsigned long mfn)
641{
642 struct intel_vgpu_shadow_page *p;
643
644 hash_for_each_possible(vgpu->gtt.shadow_page_hash_table,
645 p, node, mfn) {
646 if (p->mfn == mfn)
647 return p;
648 }
649 return NULL;
650}
651
652#define guest_page_to_ppgtt_spt(ptr) \
653 container_of(ptr, struct intel_vgpu_ppgtt_spt, guest_page)
654
655#define shadow_page_to_ppgtt_spt(ptr) \
656 container_of(ptr, struct intel_vgpu_ppgtt_spt, shadow_page)
657
658static void *alloc_spt(gfp_t gfp_mask)
659{
660 struct intel_vgpu_ppgtt_spt *spt;
661
662 spt = kzalloc(sizeof(*spt), gfp_mask);
663 if (!spt)
664 return NULL;
665
666 spt->shadow_page.page = alloc_page(gfp_mask);
667 if (!spt->shadow_page.page) {
668 kfree(spt);
669 return NULL;
670 }
671 return spt;
672}
673
674static void free_spt(struct intel_vgpu_ppgtt_spt *spt)
675{
676 __free_page(spt->shadow_page.page);
677 kfree(spt);
678}
679
680static void ppgtt_free_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
681{
682 trace_spt_free(spt->vgpu->id, spt, spt->shadow_page.type);
683
684 clean_shadow_page(&spt->shadow_page);
685 intel_vgpu_clean_guest_page(spt->vgpu, &spt->guest_page);
686 list_del_init(&spt->post_shadow_list);
687
688 free_spt(spt);
689}
690
691static void ppgtt_free_all_shadow_page(struct intel_vgpu *vgpu)
692{
693 struct hlist_node *n;
694 struct intel_vgpu_shadow_page *sp;
695 int i;
696
697 hash_for_each_safe(vgpu->gtt.shadow_page_hash_table, i, n, sp, node)
698 ppgtt_free_shadow_page(shadow_page_to_ppgtt_spt(sp));
699}
700
701static int ppgtt_handle_guest_write_page_table_bytes(void *gp,
702 u64 pa, void *p_data, int bytes);
703
704static int ppgtt_write_protection_handler(void *gp, u64 pa,
705 void *p_data, int bytes)
706{
707 struct intel_vgpu_guest_page *gpt = (struct intel_vgpu_guest_page *)gp;
708 int ret;
709
710 if (bytes != 4 && bytes != 8)
711 return -EINVAL;
712
713 if (!gpt->writeprotection)
714 return -EINVAL;
715
716 ret = ppgtt_handle_guest_write_page_table_bytes(gp,
717 pa, p_data, bytes);
718 if (ret)
719 return ret;
720 return ret;
721}
722
723static int reclaim_one_mm(struct intel_gvt *gvt);
724
725static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_shadow_page(
726 struct intel_vgpu *vgpu, int type, unsigned long gfn)
727{
728 struct intel_vgpu_ppgtt_spt *spt = NULL;
729 int ret;
730
731retry:
732 spt = alloc_spt(GFP_KERNEL | __GFP_ZERO);
733 if (!spt) {
734 if (reclaim_one_mm(vgpu->gvt))
735 goto retry;
736
737 gvt_err("fail to allocate ppgtt shadow page\n");
738 return ERR_PTR(-ENOMEM);
739 }
740
741 spt->vgpu = vgpu;
742 spt->guest_page_type = type;
743 atomic_set(&spt->refcount, 1);
744 INIT_LIST_HEAD(&spt->post_shadow_list);
745
746 /*
747 * TODO: guest page type may be different with shadow page type,
748 * when we support PSE page in future.
749 */
750 ret = init_shadow_page(vgpu, &spt->shadow_page, type);
751 if (ret) {
752 gvt_err("fail to initialize shadow page for spt\n");
753 goto err;
754 }
755
756 ret = intel_vgpu_init_guest_page(vgpu, &spt->guest_page,
757 gfn, ppgtt_write_protection_handler, NULL);
758 if (ret) {
759 gvt_err("fail to initialize guest page for spt\n");
760 goto err;
761 }
762
763 trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn);
764 return spt;
765err:
766 ppgtt_free_shadow_page(spt);
767 return ERR_PTR(ret);
768}
769
770static struct intel_vgpu_ppgtt_spt *ppgtt_find_shadow_page(
771 struct intel_vgpu *vgpu, unsigned long mfn)
772{
773 struct intel_vgpu_shadow_page *p = find_shadow_page(vgpu, mfn);
774
775 if (p)
776 return shadow_page_to_ppgtt_spt(p);
777
778 gvt_err("vgpu%d: fail to find ppgtt shadow page: 0x%lx\n",
779 vgpu->id, mfn);
780 return NULL;
781}
782
783#define pt_entry_size_shift(spt) \
784 ((spt)->vgpu->gvt->device_info.gtt_entry_size_shift)
785
786#define pt_entries(spt) \
787 (GTT_PAGE_SIZE >> pt_entry_size_shift(spt))
788
789#define for_each_present_guest_entry(spt, e, i) \
790 for (i = 0; i < pt_entries(spt); i++) \
791 if (spt->vgpu->gvt->gtt.pte_ops->test_present( \
792 ppgtt_get_guest_entry(spt, e, i)))
793
794#define for_each_present_shadow_entry(spt, e, i) \
795 for (i = 0; i < pt_entries(spt); i++) \
796 if (spt->vgpu->gvt->gtt.pte_ops->test_present( \
797 ppgtt_get_shadow_entry(spt, e, i)))
798
799static void ppgtt_get_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
800{
801 int v = atomic_read(&spt->refcount);
802
803 trace_spt_refcount(spt->vgpu->id, "inc", spt, v, (v + 1));
804
805 atomic_inc(&spt->refcount);
806}
807
808static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt);
809
810static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu,
811 struct intel_gvt_gtt_entry *e)
812{
813 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
814 struct intel_vgpu_ppgtt_spt *s;
3b6411c2 815 intel_gvt_gtt_type_t cur_pt_type;
2707e444
ZW
816
817 if (WARN_ON(!gtt_type_is_pt(get_next_pt_type(e->type))))
818 return -EINVAL;
819
3b6411c2
PG
820 if (e->type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY
821 && e->type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
822 cur_pt_type = get_next_pt_type(e->type) + 1;
823 if (ops->get_pfn(e) ==
824 vgpu->gtt.scratch_pt[cur_pt_type].page_mfn)
825 return 0;
826 }
2707e444
ZW
827 s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e));
828 if (!s) {
829 gvt_err("vgpu%d: fail to find shadow page: mfn: 0x%lx\n",
830 vgpu->id, ops->get_pfn(e));
831 return -ENXIO;
832 }
833 return ppgtt_invalidate_shadow_page(s);
834}
835
836static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
837{
838 struct intel_gvt_gtt_entry e;
839 unsigned long index;
840 int ret;
841 int v = atomic_read(&spt->refcount);
842
843 trace_spt_change(spt->vgpu->id, "die", spt,
844 spt->guest_page.gfn, spt->shadow_page.type);
845
846 trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1));
847
848 if (atomic_dec_return(&spt->refcount) > 0)
849 return 0;
850
851 if (gtt_type_is_pte_pt(spt->shadow_page.type))
852 goto release;
853
854 for_each_present_shadow_entry(spt, &e, index) {
855 if (!gtt_type_is_pt(get_next_pt_type(e.type))) {
856 gvt_err("GVT doesn't support pse bit for now\n");
857 return -EINVAL;
858 }
859 ret = ppgtt_invalidate_shadow_page_by_shadow_entry(
860 spt->vgpu, &e);
861 if (ret)
862 goto fail;
863 }
864release:
865 trace_spt_change(spt->vgpu->id, "release", spt,
866 spt->guest_page.gfn, spt->shadow_page.type);
867 ppgtt_free_shadow_page(spt);
868 return 0;
869fail:
870 gvt_err("vgpu%d: fail: shadow page %p shadow entry 0x%llx type %d\n",
871 spt->vgpu->id, spt, e.val64, e.type);
872 return ret;
873}
874
875static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt);
876
877static struct intel_vgpu_ppgtt_spt *ppgtt_populate_shadow_page_by_guest_entry(
878 struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *we)
879{
880 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
881 struct intel_vgpu_ppgtt_spt *s = NULL;
882 struct intel_vgpu_guest_page *g;
883 int ret;
884
885 if (WARN_ON(!gtt_type_is_pt(get_next_pt_type(we->type)))) {
886 ret = -EINVAL;
887 goto fail;
888 }
889
890 g = intel_vgpu_find_guest_page(vgpu, ops->get_pfn(we));
891 if (g) {
892 s = guest_page_to_ppgtt_spt(g);
893 ppgtt_get_shadow_page(s);
894 } else {
895 int type = get_next_pt_type(we->type);
896
897 s = ppgtt_alloc_shadow_page(vgpu, type, ops->get_pfn(we));
898 if (IS_ERR(s)) {
899 ret = PTR_ERR(s);
900 goto fail;
901 }
902
903 ret = intel_gvt_hypervisor_set_wp_page(vgpu, &s->guest_page);
904 if (ret)
905 goto fail;
906
907 ret = ppgtt_populate_shadow_page(s);
908 if (ret)
909 goto fail;
910
911 trace_spt_change(vgpu->id, "new", s, s->guest_page.gfn,
912 s->shadow_page.type);
913 }
914 return s;
915fail:
916 gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n",
917 vgpu->id, s, we->val64, we->type);
918 return ERR_PTR(ret);
919}
920
921static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
922 struct intel_vgpu_ppgtt_spt *s, struct intel_gvt_gtt_entry *ge)
923{
924 struct intel_gvt_gtt_pte_ops *ops = s->vgpu->gvt->gtt.pte_ops;
925
926 se->type = ge->type;
927 se->val64 = ge->val64;
928
929 ops->set_pfn(se, s->shadow_page.mfn);
930}
931
932static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
933{
934 struct intel_vgpu *vgpu = spt->vgpu;
935 struct intel_vgpu_ppgtt_spt *s;
936 struct intel_gvt_gtt_entry se, ge;
937 unsigned long i;
938 int ret;
939
940 trace_spt_change(spt->vgpu->id, "born", spt,
941 spt->guest_page.gfn, spt->shadow_page.type);
942
943 if (gtt_type_is_pte_pt(spt->shadow_page.type)) {
944 for_each_present_guest_entry(spt, &ge, i) {
945 ret = gtt_entry_p2m(vgpu, &ge, &se);
946 if (ret)
947 goto fail;
948 ppgtt_set_shadow_entry(spt, &se, i);
949 }
950 return 0;
951 }
952
953 for_each_present_guest_entry(spt, &ge, i) {
954 if (!gtt_type_is_pt(get_next_pt_type(ge.type))) {
955 gvt_err("GVT doesn't support pse bit now\n");
956 ret = -EINVAL;
957 goto fail;
958 }
959
960 s = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge);
961 if (IS_ERR(s)) {
962 ret = PTR_ERR(s);
963 goto fail;
964 }
965 ppgtt_get_shadow_entry(spt, &se, i);
966 ppgtt_generate_shadow_entry(&se, s, &ge);
967 ppgtt_set_shadow_entry(spt, &se, i);
968 }
969 return 0;
970fail:
971 gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n",
972 vgpu->id, spt, ge.val64, ge.type);
973 return ret;
974}
975
976static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
977 struct intel_gvt_gtt_entry *we, unsigned long index)
978{
979 struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
980 struct intel_vgpu_shadow_page *sp = &spt->shadow_page;
981 struct intel_vgpu *vgpu = spt->vgpu;
982 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
983 struct intel_gvt_gtt_entry e;
984 int ret;
985
986 trace_gpt_change(spt->vgpu->id, "remove", spt, sp->type,
987 we->val64, index);
988
989 ppgtt_get_shadow_entry(spt, &e, index);
990 if (!ops->test_present(&e))
991 return 0;
992
3b6411c2 993 if (ops->get_pfn(&e) == vgpu->gtt.scratch_pt[sp->type].page_mfn)
2707e444
ZW
994 return 0;
995
996 if (gtt_type_is_pt(get_next_pt_type(we->type))) {
997 struct intel_vgpu_guest_page *g =
998 intel_vgpu_find_guest_page(vgpu, ops->get_pfn(we));
999 if (!g) {
1000 gvt_err("fail to find guest page\n");
1001 ret = -ENXIO;
1002 goto fail;
1003 }
1004 ret = ppgtt_invalidate_shadow_page(guest_page_to_ppgtt_spt(g));
1005 if (ret)
1006 goto fail;
1007 }
3b6411c2 1008 ops->set_pfn(&e, vgpu->gtt.scratch_pt[sp->type].page_mfn);
2707e444
ZW
1009 ppgtt_set_shadow_entry(spt, &e, index);
1010 return 0;
1011fail:
1012 gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n",
1013 vgpu->id, spt, we->val64, we->type);
1014 return ret;
1015}
1016
1017static int ppgtt_handle_guest_entry_add(struct intel_vgpu_guest_page *gpt,
1018 struct intel_gvt_gtt_entry *we, unsigned long index)
1019{
1020 struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
1021 struct intel_vgpu_shadow_page *sp = &spt->shadow_page;
1022 struct intel_vgpu *vgpu = spt->vgpu;
1023 struct intel_gvt_gtt_entry m;
1024 struct intel_vgpu_ppgtt_spt *s;
1025 int ret;
1026
1027 trace_gpt_change(spt->vgpu->id, "add", spt, sp->type,
1028 we->val64, index);
1029
1030 if (gtt_type_is_pt(get_next_pt_type(we->type))) {
1031 s = ppgtt_populate_shadow_page_by_guest_entry(vgpu, we);
1032 if (IS_ERR(s)) {
1033 ret = PTR_ERR(s);
1034 goto fail;
1035 }
1036 ppgtt_get_shadow_entry(spt, &m, index);
1037 ppgtt_generate_shadow_entry(&m, s, we);
1038 ppgtt_set_shadow_entry(spt, &m, index);
1039 } else {
1040 ret = gtt_entry_p2m(vgpu, we, &m);
1041 if (ret)
1042 goto fail;
1043 ppgtt_set_shadow_entry(spt, &m, index);
1044 }
1045 return 0;
1046fail:
1047 gvt_err("vgpu%d: fail: spt %p guest entry 0x%llx type %d\n", vgpu->id,
1048 spt, we->val64, we->type);
1049 return ret;
1050}
1051
1052static int sync_oos_page(struct intel_vgpu *vgpu,
1053 struct intel_vgpu_oos_page *oos_page)
1054{
1055 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1056 struct intel_gvt *gvt = vgpu->gvt;
1057 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
1058 struct intel_vgpu_ppgtt_spt *spt =
1059 guest_page_to_ppgtt_spt(oos_page->guest_page);
1060 struct intel_gvt_gtt_entry old, new, m;
1061 int index;
1062 int ret;
1063
1064 trace_oos_change(vgpu->id, "sync", oos_page->id,
1065 oos_page->guest_page, spt->guest_page_type);
1066
1067 old.type = new.type = get_entry_type(spt->guest_page_type);
1068 old.val64 = new.val64 = 0;
1069
1070 for (index = 0; index < (GTT_PAGE_SIZE >> info->gtt_entry_size_shift);
1071 index++) {
1072 ops->get_entry(oos_page->mem, &old, index, false, 0, vgpu);
1073 ops->get_entry(NULL, &new, index, true,
1074 oos_page->guest_page->gfn << PAGE_SHIFT, vgpu);
1075
1076 if (old.val64 == new.val64
1077 && !test_and_clear_bit(index, spt->post_shadow_bitmap))
1078 continue;
1079
1080 trace_oos_sync(vgpu->id, oos_page->id,
1081 oos_page->guest_page, spt->guest_page_type,
1082 new.val64, index);
1083
1084 ret = gtt_entry_p2m(vgpu, &new, &m);
1085 if (ret)
1086 return ret;
1087
1088 ops->set_entry(oos_page->mem, &new, index, false, 0, vgpu);
1089 ppgtt_set_shadow_entry(spt, &m, index);
1090 }
1091
1092 oos_page->guest_page->write_cnt = 0;
1093 list_del_init(&spt->post_shadow_list);
1094 return 0;
1095}
1096
1097static int detach_oos_page(struct intel_vgpu *vgpu,
1098 struct intel_vgpu_oos_page *oos_page)
1099{
1100 struct intel_gvt *gvt = vgpu->gvt;
1101 struct intel_vgpu_ppgtt_spt *spt =
1102 guest_page_to_ppgtt_spt(oos_page->guest_page);
1103
1104 trace_oos_change(vgpu->id, "detach", oos_page->id,
1105 oos_page->guest_page, spt->guest_page_type);
1106
1107 oos_page->guest_page->write_cnt = 0;
1108 oos_page->guest_page->oos_page = NULL;
1109 oos_page->guest_page = NULL;
1110
1111 list_del_init(&oos_page->vm_list);
1112 list_move_tail(&oos_page->list, &gvt->gtt.oos_page_free_list_head);
1113
1114 return 0;
1115}
1116
1117static int attach_oos_page(struct intel_vgpu *vgpu,
1118 struct intel_vgpu_oos_page *oos_page,
1119 struct intel_vgpu_guest_page *gpt)
1120{
1121 struct intel_gvt *gvt = vgpu->gvt;
1122 int ret;
1123
1124 ret = intel_gvt_hypervisor_read_gpa(vgpu, gpt->gfn << GTT_PAGE_SHIFT,
1125 oos_page->mem, GTT_PAGE_SIZE);
1126 if (ret)
1127 return ret;
1128
1129 oos_page->guest_page = gpt;
1130 gpt->oos_page = oos_page;
1131
1132 list_move_tail(&oos_page->list, &gvt->gtt.oos_page_use_list_head);
1133
1134 trace_oos_change(vgpu->id, "attach", gpt->oos_page->id,
1135 gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type);
1136 return 0;
1137}
1138
1139static int ppgtt_set_guest_page_sync(struct intel_vgpu *vgpu,
1140 struct intel_vgpu_guest_page *gpt)
1141{
1142 int ret;
1143
1144 ret = intel_gvt_hypervisor_set_wp_page(vgpu, gpt);
1145 if (ret)
1146 return ret;
1147
1148 trace_oos_change(vgpu->id, "set page sync", gpt->oos_page->id,
1149 gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type);
1150
1151 list_del_init(&gpt->oos_page->vm_list);
1152 return sync_oos_page(vgpu, gpt->oos_page);
1153}
1154
1155static int ppgtt_allocate_oos_page(struct intel_vgpu *vgpu,
1156 struct intel_vgpu_guest_page *gpt)
1157{
1158 struct intel_gvt *gvt = vgpu->gvt;
1159 struct intel_gvt_gtt *gtt = &gvt->gtt;
1160 struct intel_vgpu_oos_page *oos_page = gpt->oos_page;
1161 int ret;
1162
1163 WARN(oos_page, "shadow PPGTT page has already has a oos page\n");
1164
1165 if (list_empty(&gtt->oos_page_free_list_head)) {
1166 oos_page = container_of(gtt->oos_page_use_list_head.next,
1167 struct intel_vgpu_oos_page, list);
1168 ret = ppgtt_set_guest_page_sync(vgpu, oos_page->guest_page);
1169 if (ret)
1170 return ret;
1171 ret = detach_oos_page(vgpu, oos_page);
1172 if (ret)
1173 return ret;
1174 } else
1175 oos_page = container_of(gtt->oos_page_free_list_head.next,
1176 struct intel_vgpu_oos_page, list);
1177 return attach_oos_page(vgpu, oos_page, gpt);
1178}
1179
1180static int ppgtt_set_guest_page_oos(struct intel_vgpu *vgpu,
1181 struct intel_vgpu_guest_page *gpt)
1182{
1183 struct intel_vgpu_oos_page *oos_page = gpt->oos_page;
1184
1185 if (WARN(!oos_page, "shadow PPGTT page should have a oos page\n"))
1186 return -EINVAL;
1187
1188 trace_oos_change(vgpu->id, "set page out of sync", gpt->oos_page->id,
1189 gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type);
1190
1191 list_add_tail(&oos_page->vm_list, &vgpu->gtt.oos_page_list_head);
1192 return intel_gvt_hypervisor_unset_wp_page(vgpu, gpt);
1193}
1194
1195/**
1196 * intel_vgpu_sync_oos_pages - sync all the out-of-synced shadow for vGPU
1197 * @vgpu: a vGPU
1198 *
1199 * This function is called before submitting a guest workload to host,
1200 * to sync all the out-of-synced shadow for vGPU
1201 *
1202 * Returns:
1203 * Zero on success, negative error code if failed.
1204 */
1205int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu)
1206{
1207 struct list_head *pos, *n;
1208 struct intel_vgpu_oos_page *oos_page;
1209 int ret;
1210
1211 if (!enable_out_of_sync)
1212 return 0;
1213
1214 list_for_each_safe(pos, n, &vgpu->gtt.oos_page_list_head) {
1215 oos_page = container_of(pos,
1216 struct intel_vgpu_oos_page, vm_list);
1217 ret = ppgtt_set_guest_page_sync(vgpu, oos_page->guest_page);
1218 if (ret)
1219 return ret;
1220 }
1221 return 0;
1222}
1223
1224/*
1225 * The heart of PPGTT shadow page table.
1226 */
1227static int ppgtt_handle_guest_write_page_table(
1228 struct intel_vgpu_guest_page *gpt,
1229 struct intel_gvt_gtt_entry *we, unsigned long index)
1230{
1231 struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
1232 struct intel_vgpu *vgpu = spt->vgpu;
1233 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1234 struct intel_gvt_gtt_entry ge;
1235
1236 int old_present, new_present;
1237 int ret;
1238
1239 ppgtt_get_guest_entry(spt, &ge, index);
1240
1241 old_present = ops->test_present(&ge);
1242 new_present = ops->test_present(we);
1243
1244 ppgtt_set_guest_entry(spt, we, index);
1245
1246 if (old_present) {
1247 ret = ppgtt_handle_guest_entry_removal(gpt, &ge, index);
1248 if (ret)
1249 goto fail;
1250 }
1251 if (new_present) {
1252 ret = ppgtt_handle_guest_entry_add(gpt, we, index);
1253 if (ret)
1254 goto fail;
1255 }
1256 return 0;
1257fail:
1258 gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d.\n",
1259 vgpu->id, spt, we->val64, we->type);
1260 return ret;
1261}
1262
1263static inline bool can_do_out_of_sync(struct intel_vgpu_guest_page *gpt)
1264{
1265 return enable_out_of_sync
1266 && gtt_type_is_pte_pt(
1267 guest_page_to_ppgtt_spt(gpt)->guest_page_type)
1268 && gpt->write_cnt >= 2;
1269}
1270
1271static void ppgtt_set_post_shadow(struct intel_vgpu_ppgtt_spt *spt,
1272 unsigned long index)
1273{
1274 set_bit(index, spt->post_shadow_bitmap);
1275 if (!list_empty(&spt->post_shadow_list))
1276 return;
1277
1278 list_add_tail(&spt->post_shadow_list,
1279 &spt->vgpu->gtt.post_shadow_list_head);
1280}
1281
1282/**
1283 * intel_vgpu_flush_post_shadow - flush the post shadow transactions
1284 * @vgpu: a vGPU
1285 *
1286 * This function is called before submitting a guest workload to host,
1287 * to flush all the post shadows for a vGPU.
1288 *
1289 * Returns:
1290 * Zero on success, negative error code if failed.
1291 */
1292int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu)
1293{
1294 struct list_head *pos, *n;
1295 struct intel_vgpu_ppgtt_spt *spt;
1296 struct intel_gvt_gtt_entry ge, e;
1297 unsigned long index;
1298 int ret;
1299
1300 list_for_each_safe(pos, n, &vgpu->gtt.post_shadow_list_head) {
1301 spt = container_of(pos, struct intel_vgpu_ppgtt_spt,
1302 post_shadow_list);
1303
1304 for_each_set_bit(index, spt->post_shadow_bitmap,
1305 GTT_ENTRY_NUM_IN_ONE_PAGE) {
1306 ppgtt_get_guest_entry(spt, &ge, index);
1307 e = ge;
1308 e.val64 = 0;
1309 ppgtt_set_guest_entry(spt, &e, index);
1310
1311 ret = ppgtt_handle_guest_write_page_table(
1312 &spt->guest_page, &ge, index);
1313 if (ret)
1314 return ret;
1315 clear_bit(index, spt->post_shadow_bitmap);
1316 }
1317 list_del_init(&spt->post_shadow_list);
1318 }
1319 return 0;
1320}
1321
1322static int ppgtt_handle_guest_write_page_table_bytes(void *gp,
1323 u64 pa, void *p_data, int bytes)
1324{
1325 struct intel_vgpu_guest_page *gpt = (struct intel_vgpu_guest_page *)gp;
1326 struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
1327 struct intel_vgpu *vgpu = spt->vgpu;
1328 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1329 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1330 struct intel_gvt_gtt_entry we;
1331 unsigned long index;
1332 int ret;
1333
1334 index = (pa & (PAGE_SIZE - 1)) >> info->gtt_entry_size_shift;
1335
1336 ppgtt_get_guest_entry(spt, &we, index);
1337 memcpy((void *)&we.val64 + (pa & (info->gtt_entry_size - 1)),
1338 p_data, bytes);
1339
1340 ops->test_pse(&we);
1341
1342 if (bytes == info->gtt_entry_size) {
1343 ret = ppgtt_handle_guest_write_page_table(gpt, &we, index);
1344 if (ret)
1345 return ret;
1346 } else {
1347 struct intel_gvt_gtt_entry ge;
1348
1349 ppgtt_get_guest_entry(spt, &ge, index);
1350
1351 if (!test_bit(index, spt->post_shadow_bitmap)) {
1352 ret = ppgtt_handle_guest_entry_removal(gpt,
1353 &ge, index);
1354 if (ret)
1355 return ret;
1356 }
1357
1358 ppgtt_set_post_shadow(spt, index);
1359 ppgtt_set_guest_entry(spt, &we, index);
1360 }
1361
1362 if (!enable_out_of_sync)
1363 return 0;
1364
1365 gpt->write_cnt++;
1366
1367 if (gpt->oos_page)
1368 ops->set_entry(gpt->oos_page->mem, &we, index,
1369 false, 0, vgpu);
1370
1371 if (can_do_out_of_sync(gpt)) {
1372 if (!gpt->oos_page)
1373 ppgtt_allocate_oos_page(vgpu, gpt);
1374
1375 ret = ppgtt_set_guest_page_oos(vgpu, gpt);
1376 if (ret < 0)
1377 return ret;
1378 }
1379 return 0;
1380}
1381
1382/*
1383 * mm page table allocation policy for bdw+
1384 * - for ggtt, only virtual page table will be allocated.
1385 * - for ppgtt, dedicated virtual/shadow page table will be allocated.
1386 */
1387static int gen8_mm_alloc_page_table(struct intel_vgpu_mm *mm)
1388{
1389 struct intel_vgpu *vgpu = mm->vgpu;
1390 struct intel_gvt *gvt = vgpu->gvt;
1391 const struct intel_gvt_device_info *info = &gvt->device_info;
1392 void *mem;
1393
1394 if (mm->type == INTEL_GVT_MM_PPGTT) {
1395 mm->page_table_entry_cnt = 4;
1396 mm->page_table_entry_size = mm->page_table_entry_cnt *
1397 info->gtt_entry_size;
1398 mem = kzalloc(mm->has_shadow_page_table ?
1399 mm->page_table_entry_size * 2
1400 : mm->page_table_entry_size,
1401 GFP_ATOMIC);
1402 if (!mem)
1403 return -ENOMEM;
1404 mm->virtual_page_table = mem;
1405 if (!mm->has_shadow_page_table)
1406 return 0;
1407 mm->shadow_page_table = mem + mm->page_table_entry_size;
1408 } else if (mm->type == INTEL_GVT_MM_GGTT) {
1409 mm->page_table_entry_cnt =
1410 (gvt_ggtt_gm_sz(gvt) >> GTT_PAGE_SHIFT);
1411 mm->page_table_entry_size = mm->page_table_entry_cnt *
1412 info->gtt_entry_size;
1413 mem = vzalloc(mm->page_table_entry_size);
1414 if (!mem)
1415 return -ENOMEM;
1416 mm->virtual_page_table = mem;
1417 }
1418 return 0;
1419}
1420
1421static void gen8_mm_free_page_table(struct intel_vgpu_mm *mm)
1422{
1423 if (mm->type == INTEL_GVT_MM_PPGTT) {
1424 kfree(mm->virtual_page_table);
1425 } else if (mm->type == INTEL_GVT_MM_GGTT) {
1426 if (mm->virtual_page_table)
1427 vfree(mm->virtual_page_table);
1428 }
1429 mm->virtual_page_table = mm->shadow_page_table = NULL;
1430}
1431
1432static void invalidate_mm(struct intel_vgpu_mm *mm)
1433{
1434 struct intel_vgpu *vgpu = mm->vgpu;
1435 struct intel_gvt *gvt = vgpu->gvt;
1436 struct intel_gvt_gtt *gtt = &gvt->gtt;
1437 struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
1438 struct intel_gvt_gtt_entry se;
1439 int i;
1440
1441 if (WARN_ON(!mm->has_shadow_page_table || !mm->shadowed))
1442 return;
1443
1444 for (i = 0; i < mm->page_table_entry_cnt; i++) {
1445 ppgtt_get_shadow_root_entry(mm, &se, i);
1446 if (!ops->test_present(&se))
1447 continue;
1448 ppgtt_invalidate_shadow_page_by_shadow_entry(
1449 vgpu, &se);
1450 se.val64 = 0;
1451 ppgtt_set_shadow_root_entry(mm, &se, i);
1452
1453 trace_gpt_change(vgpu->id, "destroy root pointer",
1454 NULL, se.type, se.val64, i);
1455 }
1456 mm->shadowed = false;
1457}
1458
1459/**
1460 * intel_vgpu_destroy_mm - destroy a mm object
1461 * @mm: a kref object
1462 *
1463 * This function is used to destroy a mm object for vGPU
1464 *
1465 */
1466void intel_vgpu_destroy_mm(struct kref *mm_ref)
1467{
1468 struct intel_vgpu_mm *mm = container_of(mm_ref, typeof(*mm), ref);
1469 struct intel_vgpu *vgpu = mm->vgpu;
1470 struct intel_gvt *gvt = vgpu->gvt;
1471 struct intel_gvt_gtt *gtt = &gvt->gtt;
1472
1473 if (!mm->initialized)
1474 goto out;
1475
1476 list_del(&mm->list);
1477 list_del(&mm->lru_list);
1478
1479 if (mm->has_shadow_page_table)
1480 invalidate_mm(mm);
1481
1482 gtt->mm_free_page_table(mm);
1483out:
1484 kfree(mm);
1485}
1486
1487static int shadow_mm(struct intel_vgpu_mm *mm)
1488{
1489 struct intel_vgpu *vgpu = mm->vgpu;
1490 struct intel_gvt *gvt = vgpu->gvt;
1491 struct intel_gvt_gtt *gtt = &gvt->gtt;
1492 struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
1493 struct intel_vgpu_ppgtt_spt *spt;
1494 struct intel_gvt_gtt_entry ge, se;
1495 int i;
1496 int ret;
1497
1498 if (WARN_ON(!mm->has_shadow_page_table || mm->shadowed))
1499 return 0;
1500
1501 mm->shadowed = true;
1502
1503 for (i = 0; i < mm->page_table_entry_cnt; i++) {
1504 ppgtt_get_guest_root_entry(mm, &ge, i);
1505 if (!ops->test_present(&ge))
1506 continue;
1507
1508 trace_gpt_change(vgpu->id, __func__, NULL,
1509 ge.type, ge.val64, i);
1510
1511 spt = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge);
1512 if (IS_ERR(spt)) {
1513 gvt_err("fail to populate guest root pointer\n");
1514 ret = PTR_ERR(spt);
1515 goto fail;
1516 }
1517 ppgtt_generate_shadow_entry(&se, spt, &ge);
1518 ppgtt_set_shadow_root_entry(mm, &se, i);
1519
1520 trace_gpt_change(vgpu->id, "populate root pointer",
1521 NULL, se.type, se.val64, i);
1522 }
1523 return 0;
1524fail:
1525 invalidate_mm(mm);
1526 return ret;
1527}
1528
1529/**
1530 * intel_vgpu_create_mm - create a mm object for a vGPU
1531 * @vgpu: a vGPU
1532 * @mm_type: mm object type, should be PPGTT or GGTT
1533 * @virtual_page_table: page table root pointers. Could be NULL if user wants
1534 * to populate shadow later.
1535 * @page_table_level: describe the page table level of the mm object
1536 * @pde_base_index: pde root pointer base in GGTT MMIO.
1537 *
1538 * This function is used to create a mm object for a vGPU.
1539 *
1540 * Returns:
1541 * Zero on success, negative error code in pointer if failed.
1542 */
1543struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
1544 int mm_type, void *virtual_page_table, int page_table_level,
1545 u32 pde_base_index)
1546{
1547 struct intel_gvt *gvt = vgpu->gvt;
1548 struct intel_gvt_gtt *gtt = &gvt->gtt;
1549 struct intel_vgpu_mm *mm;
1550 int ret;
1551
1552 mm = kzalloc(sizeof(*mm), GFP_ATOMIC);
1553 if (!mm) {
1554 ret = -ENOMEM;
1555 goto fail;
1556 }
1557
1558 mm->type = mm_type;
1559
1560 if (page_table_level == 1)
1561 mm->page_table_entry_type = GTT_TYPE_GGTT_PTE;
1562 else if (page_table_level == 3)
1563 mm->page_table_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
1564 else if (page_table_level == 4)
1565 mm->page_table_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
1566 else {
1567 WARN_ON(1);
1568 ret = -EINVAL;
1569 goto fail;
1570 }
1571
1572 mm->page_table_level = page_table_level;
1573 mm->pde_base_index = pde_base_index;
1574
1575 mm->vgpu = vgpu;
1576 mm->has_shadow_page_table = !!(mm_type == INTEL_GVT_MM_PPGTT);
1577
1578 kref_init(&mm->ref);
1579 atomic_set(&mm->pincount, 0);
1580 INIT_LIST_HEAD(&mm->list);
1581 INIT_LIST_HEAD(&mm->lru_list);
1582 list_add_tail(&mm->list, &vgpu->gtt.mm_list_head);
1583
1584 ret = gtt->mm_alloc_page_table(mm);
1585 if (ret) {
1586 gvt_err("fail to allocate page table for mm\n");
1587 goto fail;
1588 }
1589
1590 mm->initialized = true;
1591
1592 if (virtual_page_table)
1593 memcpy(mm->virtual_page_table, virtual_page_table,
1594 mm->page_table_entry_size);
1595
1596 if (mm->has_shadow_page_table) {
1597 ret = shadow_mm(mm);
1598 if (ret)
1599 goto fail;
1600 list_add_tail(&mm->lru_list, &gvt->gtt.mm_lru_list_head);
1601 }
1602 return mm;
1603fail:
1604 gvt_err("fail to create mm\n");
1605 if (mm)
1606 intel_gvt_mm_unreference(mm);
1607 return ERR_PTR(ret);
1608}
1609
1610/**
1611 * intel_vgpu_unpin_mm - decrease the pin count of a vGPU mm object
1612 * @mm: a vGPU mm object
1613 *
1614 * This function is called when user doesn't want to use a vGPU mm object
1615 */
1616void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm)
1617{
1618 if (WARN_ON(mm->type != INTEL_GVT_MM_PPGTT))
1619 return;
1620
1621 atomic_dec(&mm->pincount);
1622}
1623
1624/**
1625 * intel_vgpu_pin_mm - increase the pin count of a vGPU mm object
1626 * @vgpu: a vGPU
1627 *
1628 * This function is called when user wants to use a vGPU mm object. If this
1629 * mm object hasn't been shadowed yet, the shadow will be populated at this
1630 * time.
1631 *
1632 * Returns:
1633 * Zero on success, negative error code if failed.
1634 */
1635int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm)
1636{
1637 int ret;
1638
1639 if (WARN_ON(mm->type != INTEL_GVT_MM_PPGTT))
1640 return 0;
1641
1642 atomic_inc(&mm->pincount);
1643
1644 if (!mm->shadowed) {
1645 ret = shadow_mm(mm);
1646 if (ret)
1647 return ret;
1648 }
1649
1650 list_del_init(&mm->lru_list);
1651 list_add_tail(&mm->lru_list, &mm->vgpu->gvt->gtt.mm_lru_list_head);
1652 return 0;
1653}
1654
1655static int reclaim_one_mm(struct intel_gvt *gvt)
1656{
1657 struct intel_vgpu_mm *mm;
1658 struct list_head *pos, *n;
1659
1660 list_for_each_safe(pos, n, &gvt->gtt.mm_lru_list_head) {
1661 mm = container_of(pos, struct intel_vgpu_mm, lru_list);
1662
1663 if (mm->type != INTEL_GVT_MM_PPGTT)
1664 continue;
1665 if (atomic_read(&mm->pincount))
1666 continue;
1667
1668 list_del_init(&mm->lru_list);
1669 invalidate_mm(mm);
1670 return 1;
1671 }
1672 return 0;
1673}
1674
1675/*
1676 * GMA translation APIs.
1677 */
1678static inline int ppgtt_get_next_level_entry(struct intel_vgpu_mm *mm,
1679 struct intel_gvt_gtt_entry *e, unsigned long index, bool guest)
1680{
1681 struct intel_vgpu *vgpu = mm->vgpu;
1682 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1683 struct intel_vgpu_ppgtt_spt *s;
1684
1685 if (WARN_ON(!mm->has_shadow_page_table))
1686 return -EINVAL;
1687
1688 s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e));
1689 if (!s)
1690 return -ENXIO;
1691
1692 if (!guest)
1693 ppgtt_get_shadow_entry(s, e, index);
1694 else
1695 ppgtt_get_guest_entry(s, e, index);
1696 return 0;
1697}
1698
1699/**
1700 * intel_vgpu_gma_to_gpa - translate a gma to GPA
1701 * @mm: mm object. could be a PPGTT or GGTT mm object
1702 * @gma: graphics memory address in this mm object
1703 *
1704 * This function is used to translate a graphics memory address in specific
1705 * graphics memory space to guest physical address.
1706 *
1707 * Returns:
1708 * Guest physical address on success, INTEL_GVT_INVALID_ADDR if failed.
1709 */
1710unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
1711{
1712 struct intel_vgpu *vgpu = mm->vgpu;
1713 struct intel_gvt *gvt = vgpu->gvt;
1714 struct intel_gvt_gtt_pte_ops *pte_ops = gvt->gtt.pte_ops;
1715 struct intel_gvt_gtt_gma_ops *gma_ops = gvt->gtt.gma_ops;
1716 unsigned long gpa = INTEL_GVT_INVALID_ADDR;
1717 unsigned long gma_index[4];
1718 struct intel_gvt_gtt_entry e;
1719 int i, index;
1720 int ret;
1721
1722 if (mm->type != INTEL_GVT_MM_GGTT && mm->type != INTEL_GVT_MM_PPGTT)
1723 return INTEL_GVT_INVALID_ADDR;
1724
1725 if (mm->type == INTEL_GVT_MM_GGTT) {
1726 if (!vgpu_gmadr_is_valid(vgpu, gma))
1727 goto err;
1728
1729 ggtt_get_guest_entry(mm, &e,
1730 gma_ops->gma_to_ggtt_pte_index(gma));
1731 gpa = (pte_ops->get_pfn(&e) << GTT_PAGE_SHIFT)
1732 + (gma & ~GTT_PAGE_MASK);
1733
1734 trace_gma_translate(vgpu->id, "ggtt", 0, 0, gma, gpa);
1735 return gpa;
1736 }
1737
1738 switch (mm->page_table_level) {
1739 case 4:
1740 ppgtt_get_shadow_root_entry(mm, &e, 0);
1741 gma_index[0] = gma_ops->gma_to_pml4_index(gma);
1742 gma_index[1] = gma_ops->gma_to_l4_pdp_index(gma);
1743 gma_index[2] = gma_ops->gma_to_pde_index(gma);
1744 gma_index[3] = gma_ops->gma_to_pte_index(gma);
1745 index = 4;
1746 break;
1747 case 3:
1748 ppgtt_get_shadow_root_entry(mm, &e,
1749 gma_ops->gma_to_l3_pdp_index(gma));
1750 gma_index[0] = gma_ops->gma_to_pde_index(gma);
1751 gma_index[1] = gma_ops->gma_to_pte_index(gma);
1752 index = 2;
1753 break;
1754 case 2:
1755 ppgtt_get_shadow_root_entry(mm, &e,
1756 gma_ops->gma_to_pde_index(gma));
1757 gma_index[0] = gma_ops->gma_to_pte_index(gma);
1758 index = 1;
1759 break;
1760 default:
1761 WARN_ON(1);
1762 goto err;
1763 }
1764
1765 /* walk into the shadow page table and get gpa from guest entry */
1766 for (i = 0; i < index; i++) {
1767 ret = ppgtt_get_next_level_entry(mm, &e, gma_index[i],
1768 (i == index - 1));
1769 if (ret)
1770 goto err;
1771 }
1772
1773 gpa = (pte_ops->get_pfn(&e) << GTT_PAGE_SHIFT)
1774 + (gma & ~GTT_PAGE_MASK);
1775
1776 trace_gma_translate(vgpu->id, "ppgtt", 0,
1777 mm->page_table_level, gma, gpa);
1778 return gpa;
1779err:
1780 gvt_err("invalid mm type: %d gma %lx\n", mm->type, gma);
1781 return INTEL_GVT_INVALID_ADDR;
1782}
1783
1784static int emulate_gtt_mmio_read(struct intel_vgpu *vgpu,
1785 unsigned int off, void *p_data, unsigned int bytes)
1786{
1787 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
1788 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1789 unsigned long index = off >> info->gtt_entry_size_shift;
1790 struct intel_gvt_gtt_entry e;
1791
1792 if (bytes != 4 && bytes != 8)
1793 return -EINVAL;
1794
1795 ggtt_get_guest_entry(ggtt_mm, &e, index);
1796 memcpy(p_data, (void *)&e.val64 + (off & (info->gtt_entry_size - 1)),
1797 bytes);
1798 return 0;
1799}
1800
1801/**
1802 * intel_vgpu_emulate_gtt_mmio_read - emulate GTT MMIO register read
1803 * @vgpu: a vGPU
1804 * @off: register offset
1805 * @p_data: data will be returned to guest
1806 * @bytes: data length
1807 *
1808 * This function is used to emulate the GTT MMIO register read
1809 *
1810 * Returns:
1811 * Zero on success, error code if failed.
1812 */
1813int intel_vgpu_emulate_gtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
1814 void *p_data, unsigned int bytes)
1815{
1816 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1817 int ret;
1818
1819 if (bytes != 4 && bytes != 8)
1820 return -EINVAL;
1821
1822 off -= info->gtt_start_offset;
1823 ret = emulate_gtt_mmio_read(vgpu, off, p_data, bytes);
1824 return ret;
1825}
1826
1827static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
1828 void *p_data, unsigned int bytes)
1829{
1830 struct intel_gvt *gvt = vgpu->gvt;
1831 const struct intel_gvt_device_info *info = &gvt->device_info;
1832 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
1833 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
1834 unsigned long g_gtt_index = off >> info->gtt_entry_size_shift;
1835 unsigned long gma;
1836 struct intel_gvt_gtt_entry e, m;
1837 int ret;
1838
1839 if (bytes != 4 && bytes != 8)
1840 return -EINVAL;
1841
1842 gma = g_gtt_index << GTT_PAGE_SHIFT;
1843
1844 /* the VM may configure the whole GM space when ballooning is used */
1845 if (WARN_ONCE(!vgpu_gmadr_is_valid(vgpu, gma),
1846 "vgpu%d: found oob ggtt write, offset %x\n",
1847 vgpu->id, off)) {
1848 return 0;
1849 }
1850
1851 ggtt_get_guest_entry(ggtt_mm, &e, g_gtt_index);
1852
1853 memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
1854 bytes);
1855
1856 if (ops->test_present(&e)) {
1857 ret = gtt_entry_p2m(vgpu, &e, &m);
1858 if (ret) {
1859 gvt_err("vgpu%d: fail to translate guest gtt entry\n",
1860 vgpu->id);
1861 return ret;
1862 }
1863 } else {
1864 m = e;
1865 m.val64 = 0;
1866 }
1867
1868 ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index);
1869 ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
1870 return 0;
1871}
1872
1873/*
1874 * intel_vgpu_emulate_gtt_mmio_write - emulate GTT MMIO register write
1875 * @vgpu: a vGPU
1876 * @off: register offset
1877 * @p_data: data from guest write
1878 * @bytes: data length
1879 *
1880 * This function is used to emulate the GTT MMIO register write
1881 *
1882 * Returns:
1883 * Zero on success, error code if failed.
1884 */
1885int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
1886 void *p_data, unsigned int bytes)
1887{
1888 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1889 int ret;
1890
1891 if (bytes != 4 && bytes != 8)
1892 return -EINVAL;
1893
1894 off -= info->gtt_start_offset;
1895 ret = emulate_gtt_mmio_write(vgpu, off, p_data, bytes);
1896 return ret;
1897}
1898
3b6411c2
PG
1899static int alloc_scratch_pages(struct intel_vgpu *vgpu,
1900 intel_gvt_gtt_type_t type)
2707e444
ZW
1901{
1902 struct intel_vgpu_gtt *gtt = &vgpu->gtt;
3b6411c2
PG
1903 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1904 int page_entry_num = GTT_PAGE_SIZE >>
1905 vgpu->gvt->device_info.gtt_entry_size_shift;
1906 struct page *scratch_pt;
2707e444 1907 unsigned long mfn;
3b6411c2
PG
1908 int i;
1909 void *p;
2707e444 1910
3b6411c2
PG
1911 if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
1912 return -EINVAL;
1913
1914 scratch_pt = alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO);
1915 if (!scratch_pt) {
1916 gvt_err("fail to allocate scratch page\n");
2707e444
ZW
1917 return -ENOMEM;
1918 }
1919
3b6411c2
PG
1920 p = kmap_atomic(scratch_pt);
1921 mfn = intel_gvt_hypervisor_virt_to_mfn(p);
1922 if (mfn == INTEL_GVT_INVALID_ADDR) {
1923 gvt_err("fail to translate vaddr:0x%llx\n", (u64)p);
1924 kunmap_atomic(p);
1925 __free_page(scratch_pt);
1926 return -EFAULT;
1927 }
1928 gtt->scratch_pt[type].page_mfn = mfn;
1929 gtt->scratch_pt[type].page = scratch_pt;
1930 gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
1931 vgpu->id, type, mfn);
1932
1933 /* Build the tree by full filled the scratch pt with the entries which
1934 * point to the next level scratch pt or scratch page. The
1935 * scratch_pt[type] indicate the scratch pt/scratch page used by the
1936 * 'type' pt.
1937 * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by
1938 * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scatch_pt it self
1939 * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn.
1940 */
1941 if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) {
1942 struct intel_gvt_gtt_entry se;
1943
1944 memset(&se, 0, sizeof(struct intel_gvt_gtt_entry));
1945 se.type = get_entry_type(type - 1);
1946 ops->set_pfn(&se, gtt->scratch_pt[type - 1].page_mfn);
1947
1948 /* The entry parameters like present/writeable/cache type
1949 * set to the same as i915's scratch page tree.
1950 */
1951 se.val64 |= _PAGE_PRESENT | _PAGE_RW;
1952 if (type == GTT_TYPE_PPGTT_PDE_PT)
1953 se.val64 |= PPAT_CACHED_INDEX;
1954
1955 for (i = 0; i < page_entry_num; i++)
1956 ops->set_entry(p, &se, i, false, 0, vgpu);
1957 }
1958
2707e444
ZW
1959 kunmap_atomic(p);
1960
3b6411c2
PG
1961 return 0;
1962}
2707e444 1963
3b6411c2
PG
1964static int release_scratch_page_tree(struct intel_vgpu *vgpu)
1965{
1966 int i;
1967
1968 for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
1969 if (vgpu->gtt.scratch_pt[i].page != NULL) {
1970 __free_page(vgpu->gtt.scratch_pt[i].page);
1971 vgpu->gtt.scratch_pt[i].page = NULL;
1972 vgpu->gtt.scratch_pt[i].page_mfn = 0;
1973 }
2707e444
ZW
1974 }
1975
2707e444
ZW
1976 return 0;
1977}
1978
3b6411c2 1979static int create_scratch_page_tree(struct intel_vgpu *vgpu)
2707e444 1980{
3b6411c2
PG
1981 int i, ret;
1982
1983 for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
1984 ret = alloc_scratch_pages(vgpu, i);
1985 if (ret)
1986 goto err;
2707e444 1987 }
3b6411c2
PG
1988
1989 return 0;
1990
1991err:
1992 release_scratch_page_tree(vgpu);
1993 return ret;
2707e444
ZW
1994}
1995
1996/**
1997 * intel_vgpu_init_gtt - initialize per-vGPU graphics memory virulization
1998 * @vgpu: a vGPU
1999 *
2000 * This function is used to initialize per-vGPU graphics memory virtualization
2001 * components.
2002 *
2003 * Returns:
2004 * Zero on success, error code if failed.
2005 */
2006int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
2007{
2008 struct intel_vgpu_gtt *gtt = &vgpu->gtt;
2009 struct intel_vgpu_mm *ggtt_mm;
2010
2011 hash_init(gtt->guest_page_hash_table);
2012 hash_init(gtt->shadow_page_hash_table);
2013
2014 INIT_LIST_HEAD(&gtt->mm_list_head);
2015 INIT_LIST_HEAD(&gtt->oos_page_list_head);
2016 INIT_LIST_HEAD(&gtt->post_shadow_list_head);
2017
2018 ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT,
2019 NULL, 1, 0);
2020 if (IS_ERR(ggtt_mm)) {
2021 gvt_err("fail to create mm for ggtt.\n");
2022 return PTR_ERR(ggtt_mm);
2023 }
2024
2025 gtt->ggtt_mm = ggtt_mm;
2026
3b6411c2 2027 return create_scratch_page_tree(vgpu);
2707e444
ZW
2028}
2029
2030/**
2031 * intel_vgpu_clean_gtt - clean up per-vGPU graphics memory virulization
2032 * @vgpu: a vGPU
2033 *
2034 * This function is used to clean up per-vGPU graphics memory virtualization
2035 * components.
2036 *
2037 * Returns:
2038 * Zero on success, error code if failed.
2039 */
2040void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu)
2041{
2042 struct list_head *pos, *n;
2043 struct intel_vgpu_mm *mm;
2044
2045 ppgtt_free_all_shadow_page(vgpu);
3b6411c2 2046 release_scratch_page_tree(vgpu);
2707e444
ZW
2047
2048 list_for_each_safe(pos, n, &vgpu->gtt.mm_list_head) {
2049 mm = container_of(pos, struct intel_vgpu_mm, list);
2050 vgpu->gvt->gtt.mm_free_page_table(mm);
2051 list_del(&mm->list);
2052 list_del(&mm->lru_list);
2053 kfree(mm);
2054 }
2055}
2056
2057static void clean_spt_oos(struct intel_gvt *gvt)
2058{
2059 struct intel_gvt_gtt *gtt = &gvt->gtt;
2060 struct list_head *pos, *n;
2061 struct intel_vgpu_oos_page *oos_page;
2062
2063 WARN(!list_empty(&gtt->oos_page_use_list_head),
2064 "someone is still using oos page\n");
2065
2066 list_for_each_safe(pos, n, &gtt->oos_page_free_list_head) {
2067 oos_page = container_of(pos, struct intel_vgpu_oos_page, list);
2068 list_del(&oos_page->list);
2069 kfree(oos_page);
2070 }
2071}
2072
2073static int setup_spt_oos(struct intel_gvt *gvt)
2074{
2075 struct intel_gvt_gtt *gtt = &gvt->gtt;
2076 struct intel_vgpu_oos_page *oos_page;
2077 int i;
2078 int ret;
2079
2080 INIT_LIST_HEAD(&gtt->oos_page_free_list_head);
2081 INIT_LIST_HEAD(&gtt->oos_page_use_list_head);
2082
2083 for (i = 0; i < preallocated_oos_pages; i++) {
2084 oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL);
2085 if (!oos_page) {
2086 gvt_err("fail to pre-allocate oos page\n");
2087 ret = -ENOMEM;
2088 goto fail;
2089 }
2090
2091 INIT_LIST_HEAD(&oos_page->list);
2092 INIT_LIST_HEAD(&oos_page->vm_list);
2093 oos_page->id = i;
2094 list_add_tail(&oos_page->list, &gtt->oos_page_free_list_head);
2095 }
2096
2097 gvt_dbg_mm("%d oos pages preallocated\n", i);
2098
2099 return 0;
2100fail:
2101 clean_spt_oos(gvt);
2102 return ret;
2103}
2104
2105/**
2106 * intel_vgpu_find_ppgtt_mm - find a PPGTT mm object
2107 * @vgpu: a vGPU
2108 * @page_table_level: PPGTT page table level
2109 * @root_entry: PPGTT page table root pointers
2110 *
2111 * This function is used to find a PPGTT mm object from mm object pool
2112 *
2113 * Returns:
2114 * pointer to mm object on success, NULL if failed.
2115 */
2116struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
2117 int page_table_level, void *root_entry)
2118{
2119 struct list_head *pos;
2120 struct intel_vgpu_mm *mm;
2121 u64 *src, *dst;
2122
2123 list_for_each(pos, &vgpu->gtt.mm_list_head) {
2124 mm = container_of(pos, struct intel_vgpu_mm, list);
2125 if (mm->type != INTEL_GVT_MM_PPGTT)
2126 continue;
2127
2128 if (mm->page_table_level != page_table_level)
2129 continue;
2130
2131 src = root_entry;
2132 dst = mm->virtual_page_table;
2133
2134 if (page_table_level == 3) {
2135 if (src[0] == dst[0]
2136 && src[1] == dst[1]
2137 && src[2] == dst[2]
2138 && src[3] == dst[3])
2139 return mm;
2140 } else {
2141 if (src[0] == dst[0])
2142 return mm;
2143 }
2144 }
2145 return NULL;
2146}
2147
2148/**
2149 * intel_vgpu_g2v_create_ppgtt_mm - create a PPGTT mm object from
2150 * g2v notification
2151 * @vgpu: a vGPU
2152 * @page_table_level: PPGTT page table level
2153 *
2154 * This function is used to create a PPGTT mm object from a guest to GVT-g
2155 * notification.
2156 *
2157 * Returns:
2158 * Zero on success, negative error code if failed.
2159 */
2160int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu,
2161 int page_table_level)
2162{
2163 u64 *pdp = (u64 *)&vgpu_vreg64(vgpu, vgtif_reg(pdp[0]));
2164 struct intel_vgpu_mm *mm;
2165
2166 if (WARN_ON((page_table_level != 4) && (page_table_level != 3)))
2167 return -EINVAL;
2168
2169 mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp);
2170 if (mm) {
2171 intel_gvt_mm_reference(mm);
2172 } else {
2173 mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_PPGTT,
2174 pdp, page_table_level, 0);
2175 if (IS_ERR(mm)) {
2176 gvt_err("fail to create mm\n");
2177 return PTR_ERR(mm);
2178 }
2179 }
2180 return 0;
2181}
2182
2183/**
2184 * intel_vgpu_g2v_destroy_ppgtt_mm - destroy a PPGTT mm object from
2185 * g2v notification
2186 * @vgpu: a vGPU
2187 * @page_table_level: PPGTT page table level
2188 *
2189 * This function is used to create a PPGTT mm object from a guest to GVT-g
2190 * notification.
2191 *
2192 * Returns:
2193 * Zero on success, negative error code if failed.
2194 */
2195int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
2196 int page_table_level)
2197{
2198 u64 *pdp = (u64 *)&vgpu_vreg64(vgpu, vgtif_reg(pdp[0]));
2199 struct intel_vgpu_mm *mm;
2200
2201 if (WARN_ON((page_table_level != 4) && (page_table_level != 3)))
2202 return -EINVAL;
2203
2204 mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp);
2205 if (!mm) {
2206 gvt_err("fail to find ppgtt instance.\n");
2207 return -EINVAL;
2208 }
2209 intel_gvt_mm_unreference(mm);
2210 return 0;
2211}
2212
2213/**
2214 * intel_gvt_init_gtt - initialize mm components of a GVT device
2215 * @gvt: GVT device
2216 *
2217 * This function is called at the initialization stage, to initialize
2218 * the mm components of a GVT device.
2219 *
2220 * Returns:
2221 * zero on success, negative error code if failed.
2222 */
2223int intel_gvt_init_gtt(struct intel_gvt *gvt)
2224{
2225 int ret;
2226
2227 gvt_dbg_core("init gtt\n");
2228
2229 if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
2230 gvt->gtt.pte_ops = &gen8_gtt_pte_ops;
2231 gvt->gtt.gma_ops = &gen8_gtt_gma_ops;
2232 gvt->gtt.mm_alloc_page_table = gen8_mm_alloc_page_table;
2233 gvt->gtt.mm_free_page_table = gen8_mm_free_page_table;
2234 } else {
2235 return -ENODEV;
2236 }
2237
2238 if (enable_out_of_sync) {
2239 ret = setup_spt_oos(gvt);
2240 if (ret) {
2241 gvt_err("fail to initialize SPT oos\n");
2242 return ret;
2243 }
2244 }
2245 INIT_LIST_HEAD(&gvt->gtt.mm_lru_list_head);
2246 return 0;
2247}
2248
2249/**
2250 * intel_gvt_clean_gtt - clean up mm components of a GVT device
2251 * @gvt: GVT device
2252 *
2253 * This function is called at the driver unloading stage, to clean up the
2254 * the mm components of a GVT device.
2255 *
2256 */
2257void intel_gvt_clean_gtt(struct intel_gvt *gvt)
2258{
2259 if (enable_out_of_sync)
2260 clean_spt_oos(gvt);
2261}
This page took 0.285859 seconds and 4 git commands to generate.