]> Git Repo - linux.git/blame - drivers/gpu/drm/i915/gvt/gtt.c
drm/i915/gvt: Add carefully checking in GTT walker paths
[linux.git] / drivers / gpu / drm / i915 / gvt / gtt.c
CommitLineData
2707e444
ZW
1/*
2 * GTT virtualization
3 *
4 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 *
25 * Authors:
26 * Zhi Wang <[email protected]>
27 * Zhenyu Wang <[email protected]>
28 * Xiao Zheng <[email protected]>
29 *
30 * Contributors:
31 * Min He <[email protected]>
32 * Bing Niu <[email protected]>
33 *
34 */
35
36#include "i915_drv.h"
feddf6e8
ZW
37#include "gvt.h"
38#include "i915_pvinfo.h"
2707e444
ZW
39#include "trace.h"
40
41static bool enable_out_of_sync = false;
42static int preallocated_oos_pages = 8192;
43
44/*
45 * validate a gm address and related range size,
46 * translate it to host gm address
47 */
48bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
49{
50 if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size
51 && !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) {
695fbc08
TZ
52 gvt_vgpu_err("invalid range gmadr 0x%llx size 0x%x\n",
53 addr, size);
2707e444
ZW
54 return false;
55 }
56 return true;
57}
58
59/* translate a guest gmadr to host gmadr */
60int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr)
61{
62 if (WARN(!vgpu_gmadr_is_valid(vgpu, g_addr),
63 "invalid guest gmadr %llx\n", g_addr))
64 return -EACCES;
65
66 if (vgpu_gmadr_is_aperture(vgpu, g_addr))
67 *h_addr = vgpu_aperture_gmadr_base(vgpu)
68 + (g_addr - vgpu_aperture_offset(vgpu));
69 else
70 *h_addr = vgpu_hidden_gmadr_base(vgpu)
71 + (g_addr - vgpu_hidden_offset(vgpu));
72 return 0;
73}
74
75/* translate a host gmadr to guest gmadr */
76int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr)
77{
78 if (WARN(!gvt_gmadr_is_valid(vgpu->gvt, h_addr),
79 "invalid host gmadr %llx\n", h_addr))
80 return -EACCES;
81
82 if (gvt_gmadr_is_aperture(vgpu->gvt, h_addr))
83 *g_addr = vgpu_aperture_gmadr_base(vgpu)
84 + (h_addr - gvt_aperture_gmadr_base(vgpu->gvt));
85 else
86 *g_addr = vgpu_hidden_gmadr_base(vgpu)
87 + (h_addr - gvt_hidden_gmadr_base(vgpu->gvt));
88 return 0;
89}
90
91int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
92 unsigned long *h_index)
93{
94 u64 h_addr;
95 int ret;
96
97 ret = intel_gvt_ggtt_gmadr_g2h(vgpu, g_index << GTT_PAGE_SHIFT,
98 &h_addr);
99 if (ret)
100 return ret;
101
102 *h_index = h_addr >> GTT_PAGE_SHIFT;
103 return 0;
104}
105
106int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
107 unsigned long *g_index)
108{
109 u64 g_addr;
110 int ret;
111
112 ret = intel_gvt_ggtt_gmadr_h2g(vgpu, h_index << GTT_PAGE_SHIFT,
113 &g_addr);
114 if (ret)
115 return ret;
116
117 *g_index = g_addr >> GTT_PAGE_SHIFT;
118 return 0;
119}
120
121#define gtt_type_is_entry(type) \
122 (type > GTT_TYPE_INVALID && type < GTT_TYPE_PPGTT_ENTRY \
123 && type != GTT_TYPE_PPGTT_PTE_ENTRY \
124 && type != GTT_TYPE_PPGTT_ROOT_ENTRY)
125
126#define gtt_type_is_pt(type) \
127 (type >= GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX)
128
129#define gtt_type_is_pte_pt(type) \
130 (type == GTT_TYPE_PPGTT_PTE_PT)
131
132#define gtt_type_is_root_pointer(type) \
133 (gtt_type_is_entry(type) && type > GTT_TYPE_PPGTT_ROOT_ENTRY)
134
135#define gtt_init_entry(e, t, p, v) do { \
136 (e)->type = t; \
137 (e)->pdev = p; \
138 memcpy(&(e)->val64, &v, sizeof(v)); \
139} while (0)
140
2707e444
ZW
141/*
142 * Mappings between GTT_TYPE* enumerations.
143 * Following information can be found according to the given type:
144 * - type of next level page table
145 * - type of entry inside this level page table
146 * - type of entry with PSE set
147 *
148 * If the given type doesn't have such a kind of information,
149 * e.g. give a l4 root entry type, then request to get its PSE type,
150 * give a PTE page table type, then request to get its next level page
151 * table type, as we know l4 root entry doesn't have a PSE bit,
152 * and a PTE page table doesn't have a next level page table type,
153 * GTT_TYPE_INVALID will be returned. This is useful when traversing a
154 * page table.
155 */
156
157struct gtt_type_table_entry {
158 int entry_type;
159 int next_pt_type;
160 int pse_entry_type;
161};
162
163#define GTT_TYPE_TABLE_ENTRY(type, e_type, npt_type, pse_type) \
164 [type] = { \
165 .entry_type = e_type, \
166 .next_pt_type = npt_type, \
167 .pse_entry_type = pse_type, \
168 }
169
170static struct gtt_type_table_entry gtt_type_table[] = {
171 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
172 GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
173 GTT_TYPE_PPGTT_PML4_PT,
174 GTT_TYPE_INVALID),
175 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_PT,
176 GTT_TYPE_PPGTT_PML4_ENTRY,
177 GTT_TYPE_PPGTT_PDP_PT,
178 GTT_TYPE_INVALID),
179 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_ENTRY,
180 GTT_TYPE_PPGTT_PML4_ENTRY,
181 GTT_TYPE_PPGTT_PDP_PT,
182 GTT_TYPE_INVALID),
183 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_PT,
184 GTT_TYPE_PPGTT_PDP_ENTRY,
185 GTT_TYPE_PPGTT_PDE_PT,
186 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
187 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
188 GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
189 GTT_TYPE_PPGTT_PDE_PT,
190 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
191 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_ENTRY,
192 GTT_TYPE_PPGTT_PDP_ENTRY,
193 GTT_TYPE_PPGTT_PDE_PT,
194 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
195 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_PT,
196 GTT_TYPE_PPGTT_PDE_ENTRY,
197 GTT_TYPE_PPGTT_PTE_PT,
198 GTT_TYPE_PPGTT_PTE_2M_ENTRY),
199 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_ENTRY,
200 GTT_TYPE_PPGTT_PDE_ENTRY,
201 GTT_TYPE_PPGTT_PTE_PT,
202 GTT_TYPE_PPGTT_PTE_2M_ENTRY),
203 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_PT,
204 GTT_TYPE_PPGTT_PTE_4K_ENTRY,
205 GTT_TYPE_INVALID,
206 GTT_TYPE_INVALID),
207 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_4K_ENTRY,
208 GTT_TYPE_PPGTT_PTE_4K_ENTRY,
209 GTT_TYPE_INVALID,
210 GTT_TYPE_INVALID),
211 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_2M_ENTRY,
212 GTT_TYPE_PPGTT_PDE_ENTRY,
213 GTT_TYPE_INVALID,
214 GTT_TYPE_PPGTT_PTE_2M_ENTRY),
215 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_1G_ENTRY,
216 GTT_TYPE_PPGTT_PDP_ENTRY,
217 GTT_TYPE_INVALID,
218 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
219 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_GGTT_PTE,
220 GTT_TYPE_GGTT_PTE,
221 GTT_TYPE_INVALID,
222 GTT_TYPE_INVALID),
223};
224
225static inline int get_next_pt_type(int type)
226{
227 return gtt_type_table[type].next_pt_type;
228}
229
230static inline int get_entry_type(int type)
231{
232 return gtt_type_table[type].entry_type;
233}
234
235static inline int get_pse_type(int type)
236{
237 return gtt_type_table[type].pse_entry_type;
238}
239
240static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index)
241{
321927db 242 void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
905a5035
CD
243
244 return readq(addr);
2707e444
ZW
245}
246
af2c6399
CD
247static void gtt_invalidate(struct drm_i915_private *dev_priv)
248{
249 mmio_hw_access_pre(dev_priv);
250 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
251 mmio_hw_access_post(dev_priv);
252}
253
2707e444
ZW
254static void write_pte64(struct drm_i915_private *dev_priv,
255 unsigned long index, u64 pte)
256{
321927db 257 void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
2707e444 258
2707e444 259 writeq(pte, addr);
2707e444
ZW
260}
261
4b2dbbc2 262static inline int gtt_get_entry64(void *pt,
2707e444
ZW
263 struct intel_gvt_gtt_entry *e,
264 unsigned long index, bool hypervisor_access, unsigned long gpa,
265 struct intel_vgpu *vgpu)
266{
267 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
268 int ret;
269
270 if (WARN_ON(info->gtt_entry_size != 8))
4b2dbbc2 271 return -EINVAL;
2707e444
ZW
272
273 if (hypervisor_access) {
274 ret = intel_gvt_hypervisor_read_gpa(vgpu, gpa +
275 (index << info->gtt_entry_size_shift),
276 &e->val64, 8);
4b2dbbc2
CD
277 if (WARN_ON(ret))
278 return ret;
2707e444
ZW
279 } else if (!pt) {
280 e->val64 = read_pte64(vgpu->gvt->dev_priv, index);
281 } else {
282 e->val64 = *((u64 *)pt + index);
283 }
4b2dbbc2 284 return 0;
2707e444
ZW
285}
286
4b2dbbc2 287static inline int gtt_set_entry64(void *pt,
2707e444
ZW
288 struct intel_gvt_gtt_entry *e,
289 unsigned long index, bool hypervisor_access, unsigned long gpa,
290 struct intel_vgpu *vgpu)
291{
292 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
293 int ret;
294
295 if (WARN_ON(info->gtt_entry_size != 8))
4b2dbbc2 296 return -EINVAL;
2707e444
ZW
297
298 if (hypervisor_access) {
299 ret = intel_gvt_hypervisor_write_gpa(vgpu, gpa +
300 (index << info->gtt_entry_size_shift),
301 &e->val64, 8);
4b2dbbc2
CD
302 if (WARN_ON(ret))
303 return ret;
2707e444
ZW
304 } else if (!pt) {
305 write_pte64(vgpu->gvt->dev_priv, index, e->val64);
306 } else {
307 *((u64 *)pt + index) = e->val64;
308 }
4b2dbbc2 309 return 0;
2707e444
ZW
310}
311
312#define GTT_HAW 46
313
314#define ADDR_1G_MASK (((1UL << (GTT_HAW - 30 + 1)) - 1) << 30)
315#define ADDR_2M_MASK (((1UL << (GTT_HAW - 21 + 1)) - 1) << 21)
316#define ADDR_4K_MASK (((1UL << (GTT_HAW - 12 + 1)) - 1) << 12)
317
318static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
319{
320 unsigned long pfn;
321
322 if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY)
323 pfn = (e->val64 & ADDR_1G_MASK) >> 12;
324 else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY)
325 pfn = (e->val64 & ADDR_2M_MASK) >> 12;
326 else
327 pfn = (e->val64 & ADDR_4K_MASK) >> 12;
328 return pfn;
329}
330
331static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry *e, unsigned long pfn)
332{
333 if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) {
334 e->val64 &= ~ADDR_1G_MASK;
335 pfn &= (ADDR_1G_MASK >> 12);
336 } else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) {
337 e->val64 &= ~ADDR_2M_MASK;
338 pfn &= (ADDR_2M_MASK >> 12);
339 } else {
340 e->val64 &= ~ADDR_4K_MASK;
341 pfn &= (ADDR_4K_MASK >> 12);
342 }
343
344 e->val64 |= (pfn << 12);
345}
346
347static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e)
348{
349 /* Entry doesn't have PSE bit. */
350 if (get_pse_type(e->type) == GTT_TYPE_INVALID)
351 return false;
352
353 e->type = get_entry_type(e->type);
354 if (!(e->val64 & (1 << 7)))
355 return false;
356
357 e->type = get_pse_type(e->type);
358 return true;
359}
360
361static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e)
362{
363 /*
364 * i915 writes PDP root pointer registers without present bit,
365 * it also works, so we need to treat root pointer entry
366 * specifically.
367 */
368 if (e->type == GTT_TYPE_PPGTT_ROOT_L3_ENTRY
369 || e->type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
370 return (e->val64 != 0);
371 else
372 return (e->val64 & (1 << 0));
373}
374
375static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e)
376{
377 e->val64 &= ~(1 << 0);
378}
379
380/*
381 * Per-platform GMA routines.
382 */
383static unsigned long gma_to_ggtt_pte_index(unsigned long gma)
384{
385 unsigned long x = (gma >> GTT_PAGE_SHIFT);
386
387 trace_gma_index(__func__, gma, x);
388 return x;
389}
390
391#define DEFINE_PPGTT_GMA_TO_INDEX(prefix, ename, exp) \
392static unsigned long prefix##_gma_to_##ename##_index(unsigned long gma) \
393{ \
394 unsigned long x = (exp); \
395 trace_gma_index(__func__, gma, x); \
396 return x; \
397}
398
399DEFINE_PPGTT_GMA_TO_INDEX(gen8, pte, (gma >> 12 & 0x1ff));
400DEFINE_PPGTT_GMA_TO_INDEX(gen8, pde, (gma >> 21 & 0x1ff));
401DEFINE_PPGTT_GMA_TO_INDEX(gen8, l3_pdp, (gma >> 30 & 0x3));
402DEFINE_PPGTT_GMA_TO_INDEX(gen8, l4_pdp, (gma >> 30 & 0x1ff));
403DEFINE_PPGTT_GMA_TO_INDEX(gen8, pml4, (gma >> 39 & 0x1ff));
404
405static struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = {
406 .get_entry = gtt_get_entry64,
407 .set_entry = gtt_set_entry64,
408 .clear_present = gtt_entry_clear_present,
409 .test_present = gen8_gtt_test_present,
410 .test_pse = gen8_gtt_test_pse,
411 .get_pfn = gen8_gtt_get_pfn,
412 .set_pfn = gen8_gtt_set_pfn,
413};
414
415static struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops = {
416 .gma_to_ggtt_pte_index = gma_to_ggtt_pte_index,
417 .gma_to_pte_index = gen8_gma_to_pte_index,
418 .gma_to_pde_index = gen8_gma_to_pde_index,
419 .gma_to_l3_pdp_index = gen8_gma_to_l3_pdp_index,
420 .gma_to_l4_pdp_index = gen8_gma_to_l4_pdp_index,
421 .gma_to_pml4_index = gen8_gma_to_pml4_index,
422};
423
424static int gtt_entry_p2m(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *p,
425 struct intel_gvt_gtt_entry *m)
426{
427 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
428 unsigned long gfn, mfn;
429
430 *m = *p;
431
432 if (!ops->test_present(p))
433 return 0;
434
435 gfn = ops->get_pfn(p);
436
437 mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gfn);
438 if (mfn == INTEL_GVT_INVALID_ADDR) {
695fbc08 439 gvt_vgpu_err("fail to translate gfn: 0x%lx\n", gfn);
2707e444
ZW
440 return -ENXIO;
441 }
442
443 ops->set_pfn(m, mfn);
444 return 0;
445}
446
447/*
448 * MM helpers.
449 */
4b2dbbc2 450int intel_vgpu_mm_get_entry(struct intel_vgpu_mm *mm,
2707e444
ZW
451 void *page_table, struct intel_gvt_gtt_entry *e,
452 unsigned long index)
453{
454 struct intel_gvt *gvt = mm->vgpu->gvt;
455 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
4b2dbbc2 456 int ret;
2707e444
ZW
457
458 e->type = mm->page_table_entry_type;
459
4b2dbbc2
CD
460 ret = ops->get_entry(page_table, e, index, false, 0, mm->vgpu);
461 if (ret)
462 return ret;
463
2707e444 464 ops->test_pse(e);
4b2dbbc2 465 return 0;
2707e444
ZW
466}
467
4b2dbbc2 468int intel_vgpu_mm_set_entry(struct intel_vgpu_mm *mm,
2707e444
ZW
469 void *page_table, struct intel_gvt_gtt_entry *e,
470 unsigned long index)
471{
472 struct intel_gvt *gvt = mm->vgpu->gvt;
473 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
474
475 return ops->set_entry(page_table, e, index, false, 0, mm->vgpu);
476}
477
478/*
479 * PPGTT shadow page table helpers.
480 */
4b2dbbc2 481static inline int ppgtt_spt_get_entry(
2707e444
ZW
482 struct intel_vgpu_ppgtt_spt *spt,
483 void *page_table, int type,
484 struct intel_gvt_gtt_entry *e, unsigned long index,
485 bool guest)
486{
487 struct intel_gvt *gvt = spt->vgpu->gvt;
488 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
4b2dbbc2 489 int ret;
2707e444
ZW
490
491 e->type = get_entry_type(type);
492
493 if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
4b2dbbc2 494 return -EINVAL;
2707e444 495
4b2dbbc2 496 ret = ops->get_entry(page_table, e, index, guest,
2707e444
ZW
497 spt->guest_page.gfn << GTT_PAGE_SHIFT,
498 spt->vgpu);
4b2dbbc2
CD
499 if (ret)
500 return ret;
501
2707e444 502 ops->test_pse(e);
4b2dbbc2 503 return 0;
2707e444
ZW
504}
505
4b2dbbc2 506static inline int ppgtt_spt_set_entry(
2707e444
ZW
507 struct intel_vgpu_ppgtt_spt *spt,
508 void *page_table, int type,
509 struct intel_gvt_gtt_entry *e, unsigned long index,
510 bool guest)
511{
512 struct intel_gvt *gvt = spt->vgpu->gvt;
513 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
514
515 if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
4b2dbbc2 516 return -EINVAL;
2707e444
ZW
517
518 return ops->set_entry(page_table, e, index, guest,
519 spt->guest_page.gfn << GTT_PAGE_SHIFT,
520 spt->vgpu);
521}
522
523#define ppgtt_get_guest_entry(spt, e, index) \
524 ppgtt_spt_get_entry(spt, NULL, \
525 spt->guest_page_type, e, index, true)
526
527#define ppgtt_set_guest_entry(spt, e, index) \
528 ppgtt_spt_set_entry(spt, NULL, \
529 spt->guest_page_type, e, index, true)
530
531#define ppgtt_get_shadow_entry(spt, e, index) \
532 ppgtt_spt_get_entry(spt, spt->shadow_page.vaddr, \
533 spt->shadow_page.type, e, index, false)
534
535#define ppgtt_set_shadow_entry(spt, e, index) \
536 ppgtt_spt_set_entry(spt, spt->shadow_page.vaddr, \
537 spt->shadow_page.type, e, index, false)
538
539/**
540 * intel_vgpu_init_guest_page - init a guest page data structure
541 * @vgpu: a vGPU
542 * @p: a guest page data structure
543 * @gfn: guest memory page frame number
544 * @handler: function will be called when target guest memory page has
545 * been modified.
546 *
547 * This function is called when user wants to track a guest memory page.
548 *
549 * Returns:
550 * Zero on success, negative error code if failed.
551 */
552int intel_vgpu_init_guest_page(struct intel_vgpu *vgpu,
553 struct intel_vgpu_guest_page *p,
554 unsigned long gfn,
555 int (*handler)(void *, u64, void *, int),
556 void *data)
557{
558 INIT_HLIST_NODE(&p->node);
559
560 p->writeprotection = false;
561 p->gfn = gfn;
562 p->handler = handler;
563 p->data = data;
564 p->oos_page = NULL;
565 p->write_cnt = 0;
566
567 hash_add(vgpu->gtt.guest_page_hash_table, &p->node, p->gfn);
568 return 0;
569}
570
571static int detach_oos_page(struct intel_vgpu *vgpu,
572 struct intel_vgpu_oos_page *oos_page);
573
574/**
575 * intel_vgpu_clean_guest_page - release the resource owned by guest page data
576 * structure
577 * @vgpu: a vGPU
578 * @p: a tracked guest page
579 *
580 * This function is called when user tries to stop tracking a guest memory
581 * page.
582 */
583void intel_vgpu_clean_guest_page(struct intel_vgpu *vgpu,
584 struct intel_vgpu_guest_page *p)
585{
586 if (!hlist_unhashed(&p->node))
587 hash_del(&p->node);
588
589 if (p->oos_page)
590 detach_oos_page(vgpu, p->oos_page);
591
592 if (p->writeprotection)
593 intel_gvt_hypervisor_unset_wp_page(vgpu, p);
594}
595
596/**
597 * intel_vgpu_find_guest_page - find a guest page data structure by GFN.
598 * @vgpu: a vGPU
599 * @gfn: guest memory page frame number
600 *
601 * This function is called when emulation logic wants to know if a trapped GFN
602 * is a tracked guest page.
603 *
604 * Returns:
605 * Pointer to guest page data structure, NULL if failed.
606 */
607struct intel_vgpu_guest_page *intel_vgpu_find_guest_page(
608 struct intel_vgpu *vgpu, unsigned long gfn)
609{
610 struct intel_vgpu_guest_page *p;
611
612 hash_for_each_possible(vgpu->gtt.guest_page_hash_table,
613 p, node, gfn) {
614 if (p->gfn == gfn)
615 return p;
616 }
617 return NULL;
618}
619
620static inline int init_shadow_page(struct intel_vgpu *vgpu,
621 struct intel_vgpu_shadow_page *p, int type)
622{
5de6bd4c
CD
623 struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
624 dma_addr_t daddr;
625
626 daddr = dma_map_page(kdev, p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
627 if (dma_mapping_error(kdev, daddr)) {
695fbc08 628 gvt_vgpu_err("fail to map dma addr\n");
5de6bd4c
CD
629 return -EINVAL;
630 }
631
2707e444
ZW
632 p->vaddr = page_address(p->page);
633 p->type = type;
634
635 INIT_HLIST_NODE(&p->node);
636
5de6bd4c 637 p->mfn = daddr >> GTT_PAGE_SHIFT;
2707e444
ZW
638 hash_add(vgpu->gtt.shadow_page_hash_table, &p->node, p->mfn);
639 return 0;
640}
641
5de6bd4c
CD
642static inline void clean_shadow_page(struct intel_vgpu *vgpu,
643 struct intel_vgpu_shadow_page *p)
2707e444 644{
5de6bd4c
CD
645 struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
646
647 dma_unmap_page(kdev, p->mfn << GTT_PAGE_SHIFT, 4096,
648 PCI_DMA_BIDIRECTIONAL);
649
2707e444
ZW
650 if (!hlist_unhashed(&p->node))
651 hash_del(&p->node);
652}
653
654static inline struct intel_vgpu_shadow_page *find_shadow_page(
655 struct intel_vgpu *vgpu, unsigned long mfn)
656{
657 struct intel_vgpu_shadow_page *p;
658
659 hash_for_each_possible(vgpu->gtt.shadow_page_hash_table,
660 p, node, mfn) {
661 if (p->mfn == mfn)
662 return p;
663 }
664 return NULL;
665}
666
667#define guest_page_to_ppgtt_spt(ptr) \
668 container_of(ptr, struct intel_vgpu_ppgtt_spt, guest_page)
669
670#define shadow_page_to_ppgtt_spt(ptr) \
671 container_of(ptr, struct intel_vgpu_ppgtt_spt, shadow_page)
672
673static void *alloc_spt(gfp_t gfp_mask)
674{
675 struct intel_vgpu_ppgtt_spt *spt;
676
677 spt = kzalloc(sizeof(*spt), gfp_mask);
678 if (!spt)
679 return NULL;
680
681 spt->shadow_page.page = alloc_page(gfp_mask);
682 if (!spt->shadow_page.page) {
683 kfree(spt);
684 return NULL;
685 }
686 return spt;
687}
688
689static void free_spt(struct intel_vgpu_ppgtt_spt *spt)
690{
691 __free_page(spt->shadow_page.page);
692 kfree(spt);
693}
694
695static void ppgtt_free_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
696{
697 trace_spt_free(spt->vgpu->id, spt, spt->shadow_page.type);
698
5de6bd4c 699 clean_shadow_page(spt->vgpu, &spt->shadow_page);
2707e444
ZW
700 intel_vgpu_clean_guest_page(spt->vgpu, &spt->guest_page);
701 list_del_init(&spt->post_shadow_list);
702
703 free_spt(spt);
704}
705
706static void ppgtt_free_all_shadow_page(struct intel_vgpu *vgpu)
707{
708 struct hlist_node *n;
709 struct intel_vgpu_shadow_page *sp;
710 int i;
711
712 hash_for_each_safe(vgpu->gtt.shadow_page_hash_table, i, n, sp, node)
713 ppgtt_free_shadow_page(shadow_page_to_ppgtt_spt(sp));
714}
715
716static int ppgtt_handle_guest_write_page_table_bytes(void *gp,
717 u64 pa, void *p_data, int bytes);
718
719static int ppgtt_write_protection_handler(void *gp, u64 pa,
720 void *p_data, int bytes)
721{
722 struct intel_vgpu_guest_page *gpt = (struct intel_vgpu_guest_page *)gp;
723 int ret;
724
725 if (bytes != 4 && bytes != 8)
726 return -EINVAL;
727
728 if (!gpt->writeprotection)
729 return -EINVAL;
730
731 ret = ppgtt_handle_guest_write_page_table_bytes(gp,
732 pa, p_data, bytes);
733 if (ret)
734 return ret;
735 return ret;
736}
737
738static int reclaim_one_mm(struct intel_gvt *gvt);
739
740static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_shadow_page(
741 struct intel_vgpu *vgpu, int type, unsigned long gfn)
742{
743 struct intel_vgpu_ppgtt_spt *spt = NULL;
744 int ret;
745
746retry:
747 spt = alloc_spt(GFP_KERNEL | __GFP_ZERO);
748 if (!spt) {
749 if (reclaim_one_mm(vgpu->gvt))
750 goto retry;
751
695fbc08 752 gvt_vgpu_err("fail to allocate ppgtt shadow page\n");
2707e444
ZW
753 return ERR_PTR(-ENOMEM);
754 }
755
756 spt->vgpu = vgpu;
757 spt->guest_page_type = type;
758 atomic_set(&spt->refcount, 1);
759 INIT_LIST_HEAD(&spt->post_shadow_list);
760
761 /*
762 * TODO: guest page type may be different with shadow page type,
763 * when we support PSE page in future.
764 */
765 ret = init_shadow_page(vgpu, &spt->shadow_page, type);
766 if (ret) {
695fbc08 767 gvt_vgpu_err("fail to initialize shadow page for spt\n");
2707e444
ZW
768 goto err;
769 }
770
771 ret = intel_vgpu_init_guest_page(vgpu, &spt->guest_page,
772 gfn, ppgtt_write_protection_handler, NULL);
773 if (ret) {
695fbc08 774 gvt_vgpu_err("fail to initialize guest page for spt\n");
2707e444
ZW
775 goto err;
776 }
777
778 trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn);
779 return spt;
780err:
781 ppgtt_free_shadow_page(spt);
782 return ERR_PTR(ret);
783}
784
785static struct intel_vgpu_ppgtt_spt *ppgtt_find_shadow_page(
786 struct intel_vgpu *vgpu, unsigned long mfn)
787{
788 struct intel_vgpu_shadow_page *p = find_shadow_page(vgpu, mfn);
789
790 if (p)
791 return shadow_page_to_ppgtt_spt(p);
792
695fbc08 793 gvt_vgpu_err("fail to find ppgtt shadow page: 0x%lx\n", mfn);
2707e444
ZW
794 return NULL;
795}
796
797#define pt_entry_size_shift(spt) \
798 ((spt)->vgpu->gvt->device_info.gtt_entry_size_shift)
799
800#define pt_entries(spt) \
801 (GTT_PAGE_SIZE >> pt_entry_size_shift(spt))
802
803#define for_each_present_guest_entry(spt, e, i) \
804 for (i = 0; i < pt_entries(spt); i++) \
4b2dbbc2
CD
805 if (!ppgtt_get_guest_entry(spt, e, i) && \
806 spt->vgpu->gvt->gtt.pte_ops->test_present(e))
2707e444
ZW
807
808#define for_each_present_shadow_entry(spt, e, i) \
809 for (i = 0; i < pt_entries(spt); i++) \
4b2dbbc2
CD
810 if (!ppgtt_get_shadow_entry(spt, e, i) && \
811 spt->vgpu->gvt->gtt.pte_ops->test_present(e))
2707e444
ZW
812
813static void ppgtt_get_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
814{
815 int v = atomic_read(&spt->refcount);
816
817 trace_spt_refcount(spt->vgpu->id, "inc", spt, v, (v + 1));
818
819 atomic_inc(&spt->refcount);
820}
821
822static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt);
823
824static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu,
825 struct intel_gvt_gtt_entry *e)
826{
827 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
828 struct intel_vgpu_ppgtt_spt *s;
3b6411c2 829 intel_gvt_gtt_type_t cur_pt_type;
2707e444
ZW
830
831 if (WARN_ON(!gtt_type_is_pt(get_next_pt_type(e->type))))
832 return -EINVAL;
833
3b6411c2
PG
834 if (e->type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY
835 && e->type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
836 cur_pt_type = get_next_pt_type(e->type) + 1;
837 if (ops->get_pfn(e) ==
838 vgpu->gtt.scratch_pt[cur_pt_type].page_mfn)
839 return 0;
840 }
2707e444
ZW
841 s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e));
842 if (!s) {
695fbc08
TZ
843 gvt_vgpu_err("fail to find shadow page: mfn: 0x%lx\n",
844 ops->get_pfn(e));
2707e444
ZW
845 return -ENXIO;
846 }
847 return ppgtt_invalidate_shadow_page(s);
848}
849
850static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
851{
695fbc08 852 struct intel_vgpu *vgpu = spt->vgpu;
2707e444
ZW
853 struct intel_gvt_gtt_entry e;
854 unsigned long index;
855 int ret;
856 int v = atomic_read(&spt->refcount);
857
858 trace_spt_change(spt->vgpu->id, "die", spt,
859 spt->guest_page.gfn, spt->shadow_page.type);
860
861 trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1));
862
863 if (atomic_dec_return(&spt->refcount) > 0)
864 return 0;
865
866 if (gtt_type_is_pte_pt(spt->shadow_page.type))
867 goto release;
868
869 for_each_present_shadow_entry(spt, &e, index) {
870 if (!gtt_type_is_pt(get_next_pt_type(e.type))) {
695fbc08 871 gvt_vgpu_err("GVT doesn't support pse bit for now\n");
2707e444
ZW
872 return -EINVAL;
873 }
874 ret = ppgtt_invalidate_shadow_page_by_shadow_entry(
875 spt->vgpu, &e);
876 if (ret)
877 goto fail;
878 }
879release:
880 trace_spt_change(spt->vgpu->id, "release", spt,
881 spt->guest_page.gfn, spt->shadow_page.type);
882 ppgtt_free_shadow_page(spt);
883 return 0;
884fail:
695fbc08
TZ
885 gvt_vgpu_err("fail: shadow page %p shadow entry 0x%llx type %d\n",
886 spt, e.val64, e.type);
2707e444
ZW
887 return ret;
888}
889
890static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt);
891
892static struct intel_vgpu_ppgtt_spt *ppgtt_populate_shadow_page_by_guest_entry(
893 struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *we)
894{
895 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
896 struct intel_vgpu_ppgtt_spt *s = NULL;
897 struct intel_vgpu_guest_page *g;
898 int ret;
899
900 if (WARN_ON(!gtt_type_is_pt(get_next_pt_type(we->type)))) {
901 ret = -EINVAL;
902 goto fail;
903 }
904
905 g = intel_vgpu_find_guest_page(vgpu, ops->get_pfn(we));
906 if (g) {
907 s = guest_page_to_ppgtt_spt(g);
908 ppgtt_get_shadow_page(s);
909 } else {
910 int type = get_next_pt_type(we->type);
911
912 s = ppgtt_alloc_shadow_page(vgpu, type, ops->get_pfn(we));
913 if (IS_ERR(s)) {
914 ret = PTR_ERR(s);
915 goto fail;
916 }
917
918 ret = intel_gvt_hypervisor_set_wp_page(vgpu, &s->guest_page);
919 if (ret)
920 goto fail;
921
922 ret = ppgtt_populate_shadow_page(s);
923 if (ret)
924 goto fail;
925
926 trace_spt_change(vgpu->id, "new", s, s->guest_page.gfn,
927 s->shadow_page.type);
928 }
929 return s;
930fail:
695fbc08
TZ
931 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
932 s, we->val64, we->type);
2707e444
ZW
933 return ERR_PTR(ret);
934}
935
936static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
937 struct intel_vgpu_ppgtt_spt *s, struct intel_gvt_gtt_entry *ge)
938{
939 struct intel_gvt_gtt_pte_ops *ops = s->vgpu->gvt->gtt.pte_ops;
940
941 se->type = ge->type;
942 se->val64 = ge->val64;
943
944 ops->set_pfn(se, s->shadow_page.mfn);
945}
946
947static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
948{
949 struct intel_vgpu *vgpu = spt->vgpu;
950 struct intel_vgpu_ppgtt_spt *s;
951 struct intel_gvt_gtt_entry se, ge;
952 unsigned long i;
953 int ret;
954
955 trace_spt_change(spt->vgpu->id, "born", spt,
956 spt->guest_page.gfn, spt->shadow_page.type);
957
958 if (gtt_type_is_pte_pt(spt->shadow_page.type)) {
959 for_each_present_guest_entry(spt, &ge, i) {
960 ret = gtt_entry_p2m(vgpu, &ge, &se);
961 if (ret)
962 goto fail;
963 ppgtt_set_shadow_entry(spt, &se, i);
964 }
965 return 0;
966 }
967
968 for_each_present_guest_entry(spt, &ge, i) {
969 if (!gtt_type_is_pt(get_next_pt_type(ge.type))) {
695fbc08 970 gvt_vgpu_err("GVT doesn't support pse bit now\n");
2707e444
ZW
971 ret = -EINVAL;
972 goto fail;
973 }
974
975 s = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge);
976 if (IS_ERR(s)) {
977 ret = PTR_ERR(s);
978 goto fail;
979 }
980 ppgtt_get_shadow_entry(spt, &se, i);
981 ppgtt_generate_shadow_entry(&se, s, &ge);
982 ppgtt_set_shadow_entry(spt, &se, i);
983 }
984 return 0;
985fail:
695fbc08
TZ
986 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
987 spt, ge.val64, ge.type);
2707e444
ZW
988 return ret;
989}
990
991static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
9baf0920 992 unsigned long index)
2707e444
ZW
993{
994 struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
995 struct intel_vgpu_shadow_page *sp = &spt->shadow_page;
996 struct intel_vgpu *vgpu = spt->vgpu;
997 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
998 struct intel_gvt_gtt_entry e;
999 int ret;
1000
2707e444 1001 ppgtt_get_shadow_entry(spt, &e, index);
9baf0920
BN
1002
1003 trace_gpt_change(spt->vgpu->id, "remove", spt, sp->type, e.val64,
1004 index);
1005
2707e444
ZW
1006 if (!ops->test_present(&e))
1007 return 0;
1008
3b6411c2 1009 if (ops->get_pfn(&e) == vgpu->gtt.scratch_pt[sp->type].page_mfn)
2707e444
ZW
1010 return 0;
1011
9baf0920
BN
1012 if (gtt_type_is_pt(get_next_pt_type(e.type))) {
1013 struct intel_vgpu_ppgtt_spt *s =
1014 ppgtt_find_shadow_page(vgpu, ops->get_pfn(&e));
1015 if (!s) {
695fbc08 1016 gvt_vgpu_err("fail to find guest page\n");
2707e444
ZW
1017 ret = -ENXIO;
1018 goto fail;
1019 }
9baf0920 1020 ret = ppgtt_invalidate_shadow_page(s);
2707e444
ZW
1021 if (ret)
1022 goto fail;
1023 }
3b6411c2 1024 ops->set_pfn(&e, vgpu->gtt.scratch_pt[sp->type].page_mfn);
2707e444
ZW
1025 ppgtt_set_shadow_entry(spt, &e, index);
1026 return 0;
1027fail:
695fbc08
TZ
1028 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
1029 spt, e.val64, e.type);
2707e444
ZW
1030 return ret;
1031}
1032
1033static int ppgtt_handle_guest_entry_add(struct intel_vgpu_guest_page *gpt,
1034 struct intel_gvt_gtt_entry *we, unsigned long index)
1035{
1036 struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
1037 struct intel_vgpu_shadow_page *sp = &spt->shadow_page;
1038 struct intel_vgpu *vgpu = spt->vgpu;
1039 struct intel_gvt_gtt_entry m;
1040 struct intel_vgpu_ppgtt_spt *s;
1041 int ret;
1042
1043 trace_gpt_change(spt->vgpu->id, "add", spt, sp->type,
1044 we->val64, index);
1045
1046 if (gtt_type_is_pt(get_next_pt_type(we->type))) {
1047 s = ppgtt_populate_shadow_page_by_guest_entry(vgpu, we);
1048 if (IS_ERR(s)) {
1049 ret = PTR_ERR(s);
1050 goto fail;
1051 }
1052 ppgtt_get_shadow_entry(spt, &m, index);
1053 ppgtt_generate_shadow_entry(&m, s, we);
1054 ppgtt_set_shadow_entry(spt, &m, index);
1055 } else {
1056 ret = gtt_entry_p2m(vgpu, we, &m);
1057 if (ret)
1058 goto fail;
1059 ppgtt_set_shadow_entry(spt, &m, index);
1060 }
1061 return 0;
1062fail:
695fbc08
TZ
1063 gvt_vgpu_err("fail: spt %p guest entry 0x%llx type %d\n",
1064 spt, we->val64, we->type);
2707e444
ZW
1065 return ret;
1066}
1067
1068static int sync_oos_page(struct intel_vgpu *vgpu,
1069 struct intel_vgpu_oos_page *oos_page)
1070{
1071 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1072 struct intel_gvt *gvt = vgpu->gvt;
1073 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
1074 struct intel_vgpu_ppgtt_spt *spt =
1075 guest_page_to_ppgtt_spt(oos_page->guest_page);
1076 struct intel_gvt_gtt_entry old, new, m;
1077 int index;
1078 int ret;
1079
1080 trace_oos_change(vgpu->id, "sync", oos_page->id,
1081 oos_page->guest_page, spt->guest_page_type);
1082
1083 old.type = new.type = get_entry_type(spt->guest_page_type);
1084 old.val64 = new.val64 = 0;
1085
1086 for (index = 0; index < (GTT_PAGE_SIZE >> info->gtt_entry_size_shift);
1087 index++) {
1088 ops->get_entry(oos_page->mem, &old, index, false, 0, vgpu);
1089 ops->get_entry(NULL, &new, index, true,
1090 oos_page->guest_page->gfn << PAGE_SHIFT, vgpu);
1091
1092 if (old.val64 == new.val64
1093 && !test_and_clear_bit(index, spt->post_shadow_bitmap))
1094 continue;
1095
1096 trace_oos_sync(vgpu->id, oos_page->id,
1097 oos_page->guest_page, spt->guest_page_type,
1098 new.val64, index);
1099
1100 ret = gtt_entry_p2m(vgpu, &new, &m);
1101 if (ret)
1102 return ret;
1103
1104 ops->set_entry(oos_page->mem, &new, index, false, 0, vgpu);
1105 ppgtt_set_shadow_entry(spt, &m, index);
1106 }
1107
1108 oos_page->guest_page->write_cnt = 0;
1109 list_del_init(&spt->post_shadow_list);
1110 return 0;
1111}
1112
1113static int detach_oos_page(struct intel_vgpu *vgpu,
1114 struct intel_vgpu_oos_page *oos_page)
1115{
1116 struct intel_gvt *gvt = vgpu->gvt;
1117 struct intel_vgpu_ppgtt_spt *spt =
1118 guest_page_to_ppgtt_spt(oos_page->guest_page);
1119
1120 trace_oos_change(vgpu->id, "detach", oos_page->id,
1121 oos_page->guest_page, spt->guest_page_type);
1122
1123 oos_page->guest_page->write_cnt = 0;
1124 oos_page->guest_page->oos_page = NULL;
1125 oos_page->guest_page = NULL;
1126
1127 list_del_init(&oos_page->vm_list);
1128 list_move_tail(&oos_page->list, &gvt->gtt.oos_page_free_list_head);
1129
1130 return 0;
1131}
1132
1133static int attach_oos_page(struct intel_vgpu *vgpu,
1134 struct intel_vgpu_oos_page *oos_page,
1135 struct intel_vgpu_guest_page *gpt)
1136{
1137 struct intel_gvt *gvt = vgpu->gvt;
1138 int ret;
1139
1140 ret = intel_gvt_hypervisor_read_gpa(vgpu, gpt->gfn << GTT_PAGE_SHIFT,
1141 oos_page->mem, GTT_PAGE_SIZE);
1142 if (ret)
1143 return ret;
1144
1145 oos_page->guest_page = gpt;
1146 gpt->oos_page = oos_page;
1147
1148 list_move_tail(&oos_page->list, &gvt->gtt.oos_page_use_list_head);
1149
1150 trace_oos_change(vgpu->id, "attach", gpt->oos_page->id,
1151 gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type);
1152 return 0;
1153}
1154
1155static int ppgtt_set_guest_page_sync(struct intel_vgpu *vgpu,
1156 struct intel_vgpu_guest_page *gpt)
1157{
1158 int ret;
1159
1160 ret = intel_gvt_hypervisor_set_wp_page(vgpu, gpt);
1161 if (ret)
1162 return ret;
1163
1164 trace_oos_change(vgpu->id, "set page sync", gpt->oos_page->id,
1165 gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type);
1166
1167 list_del_init(&gpt->oos_page->vm_list);
1168 return sync_oos_page(vgpu, gpt->oos_page);
1169}
1170
1171static int ppgtt_allocate_oos_page(struct intel_vgpu *vgpu,
1172 struct intel_vgpu_guest_page *gpt)
1173{
1174 struct intel_gvt *gvt = vgpu->gvt;
1175 struct intel_gvt_gtt *gtt = &gvt->gtt;
1176 struct intel_vgpu_oos_page *oos_page = gpt->oos_page;
1177 int ret;
1178
1179 WARN(oos_page, "shadow PPGTT page has already has a oos page\n");
1180
1181 if (list_empty(&gtt->oos_page_free_list_head)) {
1182 oos_page = container_of(gtt->oos_page_use_list_head.next,
1183 struct intel_vgpu_oos_page, list);
1184 ret = ppgtt_set_guest_page_sync(vgpu, oos_page->guest_page);
1185 if (ret)
1186 return ret;
1187 ret = detach_oos_page(vgpu, oos_page);
1188 if (ret)
1189 return ret;
1190 } else
1191 oos_page = container_of(gtt->oos_page_free_list_head.next,
1192 struct intel_vgpu_oos_page, list);
1193 return attach_oos_page(vgpu, oos_page, gpt);
1194}
1195
1196static int ppgtt_set_guest_page_oos(struct intel_vgpu *vgpu,
1197 struct intel_vgpu_guest_page *gpt)
1198{
1199 struct intel_vgpu_oos_page *oos_page = gpt->oos_page;
1200
1201 if (WARN(!oos_page, "shadow PPGTT page should have a oos page\n"))
1202 return -EINVAL;
1203
1204 trace_oos_change(vgpu->id, "set page out of sync", gpt->oos_page->id,
1205 gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type);
1206
1207 list_add_tail(&oos_page->vm_list, &vgpu->gtt.oos_page_list_head);
1208 return intel_gvt_hypervisor_unset_wp_page(vgpu, gpt);
1209}
1210
1211/**
1212 * intel_vgpu_sync_oos_pages - sync all the out-of-synced shadow for vGPU
1213 * @vgpu: a vGPU
1214 *
1215 * This function is called before submitting a guest workload to host,
1216 * to sync all the out-of-synced shadow for vGPU
1217 *
1218 * Returns:
1219 * Zero on success, negative error code if failed.
1220 */
1221int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu)
1222{
1223 struct list_head *pos, *n;
1224 struct intel_vgpu_oos_page *oos_page;
1225 int ret;
1226
1227 if (!enable_out_of_sync)
1228 return 0;
1229
1230 list_for_each_safe(pos, n, &vgpu->gtt.oos_page_list_head) {
1231 oos_page = container_of(pos,
1232 struct intel_vgpu_oos_page, vm_list);
1233 ret = ppgtt_set_guest_page_sync(vgpu, oos_page->guest_page);
1234 if (ret)
1235 return ret;
1236 }
1237 return 0;
1238}
1239
1240/*
1241 * The heart of PPGTT shadow page table.
1242 */
1243static int ppgtt_handle_guest_write_page_table(
1244 struct intel_vgpu_guest_page *gpt,
1245 struct intel_gvt_gtt_entry *we, unsigned long index)
1246{
1247 struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
1248 struct intel_vgpu *vgpu = spt->vgpu;
1249 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
2707e444 1250
2707e444 1251 int ret;
9baf0920 1252 int new_present;
2707e444 1253
2707e444
ZW
1254 new_present = ops->test_present(we);
1255
9baf0920
BN
1256 ret = ppgtt_handle_guest_entry_removal(gpt, index);
1257 if (ret)
1258 goto fail;
2707e444 1259
2707e444
ZW
1260 if (new_present) {
1261 ret = ppgtt_handle_guest_entry_add(gpt, we, index);
1262 if (ret)
1263 goto fail;
1264 }
1265 return 0;
1266fail:
695fbc08
TZ
1267 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d.\n",
1268 spt, we->val64, we->type);
2707e444
ZW
1269 return ret;
1270}
1271
1272static inline bool can_do_out_of_sync(struct intel_vgpu_guest_page *gpt)
1273{
1274 return enable_out_of_sync
1275 && gtt_type_is_pte_pt(
1276 guest_page_to_ppgtt_spt(gpt)->guest_page_type)
1277 && gpt->write_cnt >= 2;
1278}
1279
1280static void ppgtt_set_post_shadow(struct intel_vgpu_ppgtt_spt *spt,
1281 unsigned long index)
1282{
1283 set_bit(index, spt->post_shadow_bitmap);
1284 if (!list_empty(&spt->post_shadow_list))
1285 return;
1286
1287 list_add_tail(&spt->post_shadow_list,
1288 &spt->vgpu->gtt.post_shadow_list_head);
1289}
1290
1291/**
1292 * intel_vgpu_flush_post_shadow - flush the post shadow transactions
1293 * @vgpu: a vGPU
1294 *
1295 * This function is called before submitting a guest workload to host,
1296 * to flush all the post shadows for a vGPU.
1297 *
1298 * Returns:
1299 * Zero on success, negative error code if failed.
1300 */
1301int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu)
1302{
1303 struct list_head *pos, *n;
1304 struct intel_vgpu_ppgtt_spt *spt;
9baf0920 1305 struct intel_gvt_gtt_entry ge;
2707e444
ZW
1306 unsigned long index;
1307 int ret;
1308
1309 list_for_each_safe(pos, n, &vgpu->gtt.post_shadow_list_head) {
1310 spt = container_of(pos, struct intel_vgpu_ppgtt_spt,
1311 post_shadow_list);
1312
1313 for_each_set_bit(index, spt->post_shadow_bitmap,
1314 GTT_ENTRY_NUM_IN_ONE_PAGE) {
1315 ppgtt_get_guest_entry(spt, &ge, index);
2707e444
ZW
1316
1317 ret = ppgtt_handle_guest_write_page_table(
1318 &spt->guest_page, &ge, index);
1319 if (ret)
1320 return ret;
1321 clear_bit(index, spt->post_shadow_bitmap);
1322 }
1323 list_del_init(&spt->post_shadow_list);
1324 }
1325 return 0;
1326}
1327
1328static int ppgtt_handle_guest_write_page_table_bytes(void *gp,
1329 u64 pa, void *p_data, int bytes)
1330{
1331 struct intel_vgpu_guest_page *gpt = (struct intel_vgpu_guest_page *)gp;
1332 struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
1333 struct intel_vgpu *vgpu = spt->vgpu;
1334 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1335 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1336 struct intel_gvt_gtt_entry we;
1337 unsigned long index;
1338 int ret;
1339
1340 index = (pa & (PAGE_SIZE - 1)) >> info->gtt_entry_size_shift;
1341
1342 ppgtt_get_guest_entry(spt, &we, index);
2707e444
ZW
1343
1344 ops->test_pse(&we);
1345
1346 if (bytes == info->gtt_entry_size) {
1347 ret = ppgtt_handle_guest_write_page_table(gpt, &we, index);
1348 if (ret)
1349 return ret;
1350 } else {
2707e444 1351 if (!test_bit(index, spt->post_shadow_bitmap)) {
9baf0920 1352 ret = ppgtt_handle_guest_entry_removal(gpt, index);
2707e444
ZW
1353 if (ret)
1354 return ret;
1355 }
1356
1357 ppgtt_set_post_shadow(spt, index);
2707e444
ZW
1358 }
1359
1360 if (!enable_out_of_sync)
1361 return 0;
1362
1363 gpt->write_cnt++;
1364
1365 if (gpt->oos_page)
1366 ops->set_entry(gpt->oos_page->mem, &we, index,
1367 false, 0, vgpu);
1368
1369 if (can_do_out_of_sync(gpt)) {
1370 if (!gpt->oos_page)
1371 ppgtt_allocate_oos_page(vgpu, gpt);
1372
1373 ret = ppgtt_set_guest_page_oos(vgpu, gpt);
1374 if (ret < 0)
1375 return ret;
1376 }
1377 return 0;
1378}
1379
1380/*
1381 * mm page table allocation policy for bdw+
1382 * - for ggtt, only virtual page table will be allocated.
1383 * - for ppgtt, dedicated virtual/shadow page table will be allocated.
1384 */
1385static int gen8_mm_alloc_page_table(struct intel_vgpu_mm *mm)
1386{
1387 struct intel_vgpu *vgpu = mm->vgpu;
1388 struct intel_gvt *gvt = vgpu->gvt;
1389 const struct intel_gvt_device_info *info = &gvt->device_info;
1390 void *mem;
1391
1392 if (mm->type == INTEL_GVT_MM_PPGTT) {
1393 mm->page_table_entry_cnt = 4;
1394 mm->page_table_entry_size = mm->page_table_entry_cnt *
1395 info->gtt_entry_size;
1396 mem = kzalloc(mm->has_shadow_page_table ?
1397 mm->page_table_entry_size * 2
9631739f 1398 : mm->page_table_entry_size, GFP_KERNEL);
2707e444
ZW
1399 if (!mem)
1400 return -ENOMEM;
1401 mm->virtual_page_table = mem;
1402 if (!mm->has_shadow_page_table)
1403 return 0;
1404 mm->shadow_page_table = mem + mm->page_table_entry_size;
1405 } else if (mm->type == INTEL_GVT_MM_GGTT) {
1406 mm->page_table_entry_cnt =
1407 (gvt_ggtt_gm_sz(gvt) >> GTT_PAGE_SHIFT);
1408 mm->page_table_entry_size = mm->page_table_entry_cnt *
1409 info->gtt_entry_size;
1410 mem = vzalloc(mm->page_table_entry_size);
1411 if (!mem)
1412 return -ENOMEM;
1413 mm->virtual_page_table = mem;
1414 }
1415 return 0;
1416}
1417
1418static void gen8_mm_free_page_table(struct intel_vgpu_mm *mm)
1419{
1420 if (mm->type == INTEL_GVT_MM_PPGTT) {
1421 kfree(mm->virtual_page_table);
1422 } else if (mm->type == INTEL_GVT_MM_GGTT) {
1423 if (mm->virtual_page_table)
1424 vfree(mm->virtual_page_table);
1425 }
1426 mm->virtual_page_table = mm->shadow_page_table = NULL;
1427}
1428
1429static void invalidate_mm(struct intel_vgpu_mm *mm)
1430{
1431 struct intel_vgpu *vgpu = mm->vgpu;
1432 struct intel_gvt *gvt = vgpu->gvt;
1433 struct intel_gvt_gtt *gtt = &gvt->gtt;
1434 struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
1435 struct intel_gvt_gtt_entry se;
1436 int i;
1437
1438 if (WARN_ON(!mm->has_shadow_page_table || !mm->shadowed))
1439 return;
1440
1441 for (i = 0; i < mm->page_table_entry_cnt; i++) {
1442 ppgtt_get_shadow_root_entry(mm, &se, i);
1443 if (!ops->test_present(&se))
1444 continue;
1445 ppgtt_invalidate_shadow_page_by_shadow_entry(
1446 vgpu, &se);
1447 se.val64 = 0;
1448 ppgtt_set_shadow_root_entry(mm, &se, i);
1449
1450 trace_gpt_change(vgpu->id, "destroy root pointer",
1451 NULL, se.type, se.val64, i);
1452 }
1453 mm->shadowed = false;
1454}
1455
1456/**
1457 * intel_vgpu_destroy_mm - destroy a mm object
1458 * @mm: a kref object
1459 *
1460 * This function is used to destroy a mm object for vGPU
1461 *
1462 */
1463void intel_vgpu_destroy_mm(struct kref *mm_ref)
1464{
1465 struct intel_vgpu_mm *mm = container_of(mm_ref, typeof(*mm), ref);
1466 struct intel_vgpu *vgpu = mm->vgpu;
1467 struct intel_gvt *gvt = vgpu->gvt;
1468 struct intel_gvt_gtt *gtt = &gvt->gtt;
1469
1470 if (!mm->initialized)
1471 goto out;
1472
1473 list_del(&mm->list);
1474 list_del(&mm->lru_list);
1475
1476 if (mm->has_shadow_page_table)
1477 invalidate_mm(mm);
1478
1479 gtt->mm_free_page_table(mm);
1480out:
1481 kfree(mm);
1482}
1483
1484static int shadow_mm(struct intel_vgpu_mm *mm)
1485{
1486 struct intel_vgpu *vgpu = mm->vgpu;
1487 struct intel_gvt *gvt = vgpu->gvt;
1488 struct intel_gvt_gtt *gtt = &gvt->gtt;
1489 struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
1490 struct intel_vgpu_ppgtt_spt *spt;
1491 struct intel_gvt_gtt_entry ge, se;
1492 int i;
1493 int ret;
1494
1495 if (WARN_ON(!mm->has_shadow_page_table || mm->shadowed))
1496 return 0;
1497
1498 mm->shadowed = true;
1499
1500 for (i = 0; i < mm->page_table_entry_cnt; i++) {
1501 ppgtt_get_guest_root_entry(mm, &ge, i);
1502 if (!ops->test_present(&ge))
1503 continue;
1504
1505 trace_gpt_change(vgpu->id, __func__, NULL,
1506 ge.type, ge.val64, i);
1507
1508 spt = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge);
1509 if (IS_ERR(spt)) {
695fbc08 1510 gvt_vgpu_err("fail to populate guest root pointer\n");
2707e444
ZW
1511 ret = PTR_ERR(spt);
1512 goto fail;
1513 }
1514 ppgtt_generate_shadow_entry(&se, spt, &ge);
1515 ppgtt_set_shadow_root_entry(mm, &se, i);
1516
1517 trace_gpt_change(vgpu->id, "populate root pointer",
1518 NULL, se.type, se.val64, i);
1519 }
1520 return 0;
1521fail:
1522 invalidate_mm(mm);
1523 return ret;
1524}
1525
1526/**
1527 * intel_vgpu_create_mm - create a mm object for a vGPU
1528 * @vgpu: a vGPU
1529 * @mm_type: mm object type, should be PPGTT or GGTT
1530 * @virtual_page_table: page table root pointers. Could be NULL if user wants
1531 * to populate shadow later.
1532 * @page_table_level: describe the page table level of the mm object
1533 * @pde_base_index: pde root pointer base in GGTT MMIO.
1534 *
1535 * This function is used to create a mm object for a vGPU.
1536 *
1537 * Returns:
1538 * Zero on success, negative error code in pointer if failed.
1539 */
1540struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
1541 int mm_type, void *virtual_page_table, int page_table_level,
1542 u32 pde_base_index)
1543{
1544 struct intel_gvt *gvt = vgpu->gvt;
1545 struct intel_gvt_gtt *gtt = &gvt->gtt;
1546 struct intel_vgpu_mm *mm;
1547 int ret;
1548
9631739f 1549 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
2707e444
ZW
1550 if (!mm) {
1551 ret = -ENOMEM;
1552 goto fail;
1553 }
1554
1555 mm->type = mm_type;
1556
1557 if (page_table_level == 1)
1558 mm->page_table_entry_type = GTT_TYPE_GGTT_PTE;
1559 else if (page_table_level == 3)
1560 mm->page_table_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
1561 else if (page_table_level == 4)
1562 mm->page_table_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
1563 else {
1564 WARN_ON(1);
1565 ret = -EINVAL;
1566 goto fail;
1567 }
1568
1569 mm->page_table_level = page_table_level;
1570 mm->pde_base_index = pde_base_index;
1571
1572 mm->vgpu = vgpu;
1573 mm->has_shadow_page_table = !!(mm_type == INTEL_GVT_MM_PPGTT);
1574
1575 kref_init(&mm->ref);
1576 atomic_set(&mm->pincount, 0);
1577 INIT_LIST_HEAD(&mm->list);
1578 INIT_LIST_HEAD(&mm->lru_list);
1579 list_add_tail(&mm->list, &vgpu->gtt.mm_list_head);
1580
1581 ret = gtt->mm_alloc_page_table(mm);
1582 if (ret) {
695fbc08 1583 gvt_vgpu_err("fail to allocate page table for mm\n");
2707e444
ZW
1584 goto fail;
1585 }
1586
1587 mm->initialized = true;
1588
1589 if (virtual_page_table)
1590 memcpy(mm->virtual_page_table, virtual_page_table,
1591 mm->page_table_entry_size);
1592
1593 if (mm->has_shadow_page_table) {
1594 ret = shadow_mm(mm);
1595 if (ret)
1596 goto fail;
1597 list_add_tail(&mm->lru_list, &gvt->gtt.mm_lru_list_head);
1598 }
1599 return mm;
1600fail:
695fbc08 1601 gvt_vgpu_err("fail to create mm\n");
2707e444
ZW
1602 if (mm)
1603 intel_gvt_mm_unreference(mm);
1604 return ERR_PTR(ret);
1605}
1606
1607/**
1608 * intel_vgpu_unpin_mm - decrease the pin count of a vGPU mm object
1609 * @mm: a vGPU mm object
1610 *
1611 * This function is called when user doesn't want to use a vGPU mm object
1612 */
1613void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm)
1614{
1615 if (WARN_ON(mm->type != INTEL_GVT_MM_PPGTT))
1616 return;
1617
1618 atomic_dec(&mm->pincount);
1619}
1620
1621/**
1622 * intel_vgpu_pin_mm - increase the pin count of a vGPU mm object
1623 * @vgpu: a vGPU
1624 *
1625 * This function is called when user wants to use a vGPU mm object. If this
1626 * mm object hasn't been shadowed yet, the shadow will be populated at this
1627 * time.
1628 *
1629 * Returns:
1630 * Zero on success, negative error code if failed.
1631 */
1632int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm)
1633{
1634 int ret;
1635
1636 if (WARN_ON(mm->type != INTEL_GVT_MM_PPGTT))
1637 return 0;
1638
1639 atomic_inc(&mm->pincount);
1640
1641 if (!mm->shadowed) {
1642 ret = shadow_mm(mm);
1643 if (ret)
1644 return ret;
1645 }
1646
1647 list_del_init(&mm->lru_list);
1648 list_add_tail(&mm->lru_list, &mm->vgpu->gvt->gtt.mm_lru_list_head);
1649 return 0;
1650}
1651
1652static int reclaim_one_mm(struct intel_gvt *gvt)
1653{
1654 struct intel_vgpu_mm *mm;
1655 struct list_head *pos, *n;
1656
1657 list_for_each_safe(pos, n, &gvt->gtt.mm_lru_list_head) {
1658 mm = container_of(pos, struct intel_vgpu_mm, lru_list);
1659
1660 if (mm->type != INTEL_GVT_MM_PPGTT)
1661 continue;
1662 if (atomic_read(&mm->pincount))
1663 continue;
1664
1665 list_del_init(&mm->lru_list);
1666 invalidate_mm(mm);
1667 return 1;
1668 }
1669 return 0;
1670}
1671
1672/*
1673 * GMA translation APIs.
1674 */
1675static inline int ppgtt_get_next_level_entry(struct intel_vgpu_mm *mm,
1676 struct intel_gvt_gtt_entry *e, unsigned long index, bool guest)
1677{
1678 struct intel_vgpu *vgpu = mm->vgpu;
1679 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1680 struct intel_vgpu_ppgtt_spt *s;
1681
1682 if (WARN_ON(!mm->has_shadow_page_table))
1683 return -EINVAL;
1684
1685 s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e));
1686 if (!s)
1687 return -ENXIO;
1688
1689 if (!guest)
1690 ppgtt_get_shadow_entry(s, e, index);
1691 else
1692 ppgtt_get_guest_entry(s, e, index);
1693 return 0;
1694}
1695
1696/**
1697 * intel_vgpu_gma_to_gpa - translate a gma to GPA
1698 * @mm: mm object. could be a PPGTT or GGTT mm object
1699 * @gma: graphics memory address in this mm object
1700 *
1701 * This function is used to translate a graphics memory address in specific
1702 * graphics memory space to guest physical address.
1703 *
1704 * Returns:
1705 * Guest physical address on success, INTEL_GVT_INVALID_ADDR if failed.
1706 */
1707unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
1708{
1709 struct intel_vgpu *vgpu = mm->vgpu;
1710 struct intel_gvt *gvt = vgpu->gvt;
1711 struct intel_gvt_gtt_pte_ops *pte_ops = gvt->gtt.pte_ops;
1712 struct intel_gvt_gtt_gma_ops *gma_ops = gvt->gtt.gma_ops;
1713 unsigned long gpa = INTEL_GVT_INVALID_ADDR;
1714 unsigned long gma_index[4];
1715 struct intel_gvt_gtt_entry e;
1716 int i, index;
1717 int ret;
1718
1719 if (mm->type != INTEL_GVT_MM_GGTT && mm->type != INTEL_GVT_MM_PPGTT)
1720 return INTEL_GVT_INVALID_ADDR;
1721
1722 if (mm->type == INTEL_GVT_MM_GGTT) {
1723 if (!vgpu_gmadr_is_valid(vgpu, gma))
1724 goto err;
1725
4b2dbbc2
CD
1726 ret = ggtt_get_guest_entry(mm, &e,
1727 gma_ops->gma_to_ggtt_pte_index(gma));
1728 if (ret)
1729 goto err;
2707e444
ZW
1730 gpa = (pte_ops->get_pfn(&e) << GTT_PAGE_SHIFT)
1731 + (gma & ~GTT_PAGE_MASK);
1732
1733 trace_gma_translate(vgpu->id, "ggtt", 0, 0, gma, gpa);
1734 return gpa;
1735 }
1736
1737 switch (mm->page_table_level) {
1738 case 4:
4b2dbbc2
CD
1739 ret = ppgtt_get_shadow_root_entry(mm, &e, 0);
1740 if (ret)
1741 goto err;
2707e444
ZW
1742 gma_index[0] = gma_ops->gma_to_pml4_index(gma);
1743 gma_index[1] = gma_ops->gma_to_l4_pdp_index(gma);
1744 gma_index[2] = gma_ops->gma_to_pde_index(gma);
1745 gma_index[3] = gma_ops->gma_to_pte_index(gma);
1746 index = 4;
1747 break;
1748 case 3:
4b2dbbc2 1749 ret = ppgtt_get_shadow_root_entry(mm, &e,
2707e444 1750 gma_ops->gma_to_l3_pdp_index(gma));
4b2dbbc2
CD
1751 if (ret)
1752 goto err;
2707e444
ZW
1753 gma_index[0] = gma_ops->gma_to_pde_index(gma);
1754 gma_index[1] = gma_ops->gma_to_pte_index(gma);
1755 index = 2;
1756 break;
1757 case 2:
4b2dbbc2 1758 ret = ppgtt_get_shadow_root_entry(mm, &e,
2707e444 1759 gma_ops->gma_to_pde_index(gma));
4b2dbbc2
CD
1760 if (ret)
1761 goto err;
2707e444
ZW
1762 gma_index[0] = gma_ops->gma_to_pte_index(gma);
1763 index = 1;
1764 break;
1765 default:
1766 WARN_ON(1);
1767 goto err;
1768 }
1769
1770 /* walk into the shadow page table and get gpa from guest entry */
1771 for (i = 0; i < index; i++) {
1772 ret = ppgtt_get_next_level_entry(mm, &e, gma_index[i],
1773 (i == index - 1));
1774 if (ret)
1775 goto err;
4b2dbbc2
CD
1776
1777 if (!pte_ops->test_present(&e)) {
1778 gvt_dbg_core("GMA 0x%lx is not present\n", gma);
1779 goto err;
1780 }
2707e444
ZW
1781 }
1782
1783 gpa = (pte_ops->get_pfn(&e) << GTT_PAGE_SHIFT)
1784 + (gma & ~GTT_PAGE_MASK);
1785
1786 trace_gma_translate(vgpu->id, "ppgtt", 0,
1787 mm->page_table_level, gma, gpa);
1788 return gpa;
1789err:
695fbc08 1790 gvt_vgpu_err("invalid mm type: %d gma %lx\n", mm->type, gma);
2707e444
ZW
1791 return INTEL_GVT_INVALID_ADDR;
1792}
1793
1794static int emulate_gtt_mmio_read(struct intel_vgpu *vgpu,
1795 unsigned int off, void *p_data, unsigned int bytes)
1796{
1797 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
1798 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1799 unsigned long index = off >> info->gtt_entry_size_shift;
1800 struct intel_gvt_gtt_entry e;
1801
1802 if (bytes != 4 && bytes != 8)
1803 return -EINVAL;
1804
1805 ggtt_get_guest_entry(ggtt_mm, &e, index);
1806 memcpy(p_data, (void *)&e.val64 + (off & (info->gtt_entry_size - 1)),
1807 bytes);
1808 return 0;
1809}
1810
1811/**
1812 * intel_vgpu_emulate_gtt_mmio_read - emulate GTT MMIO register read
1813 * @vgpu: a vGPU
1814 * @off: register offset
1815 * @p_data: data will be returned to guest
1816 * @bytes: data length
1817 *
1818 * This function is used to emulate the GTT MMIO register read
1819 *
1820 * Returns:
1821 * Zero on success, error code if failed.
1822 */
1823int intel_vgpu_emulate_gtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
1824 void *p_data, unsigned int bytes)
1825{
1826 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1827 int ret;
1828
1829 if (bytes != 4 && bytes != 8)
1830 return -EINVAL;
1831
1832 off -= info->gtt_start_offset;
1833 ret = emulate_gtt_mmio_read(vgpu, off, p_data, bytes);
1834 return ret;
1835}
1836
1837static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
1838 void *p_data, unsigned int bytes)
1839{
1840 struct intel_gvt *gvt = vgpu->gvt;
1841 const struct intel_gvt_device_info *info = &gvt->device_info;
1842 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
1843 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
1844 unsigned long g_gtt_index = off >> info->gtt_entry_size_shift;
1845 unsigned long gma;
1846 struct intel_gvt_gtt_entry e, m;
1847 int ret;
1848
1849 if (bytes != 4 && bytes != 8)
1850 return -EINVAL;
1851
1852 gma = g_gtt_index << GTT_PAGE_SHIFT;
1853
1854 /* the VM may configure the whole GM space when ballooning is used */
7c28135c 1855 if (!vgpu_gmadr_is_valid(vgpu, gma))
2707e444 1856 return 0;
2707e444
ZW
1857
1858 ggtt_get_guest_entry(ggtt_mm, &e, g_gtt_index);
1859
1860 memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
1861 bytes);
1862
1863 if (ops->test_present(&e)) {
1864 ret = gtt_entry_p2m(vgpu, &e, &m);
1865 if (ret) {
695fbc08 1866 gvt_vgpu_err("fail to translate guest gtt entry\n");
359b6931
XC
1867 /* guest driver may read/write the entry when partial
1868 * update the entry in this situation p2m will fail
1869 * settting the shadow entry to point to a scratch page
1870 */
1871 ops->set_pfn(&m, gvt->gtt.scratch_ggtt_mfn);
2707e444
ZW
1872 }
1873 } else {
1874 m = e;
359b6931 1875 ops->set_pfn(&m, gvt->gtt.scratch_ggtt_mfn);
2707e444
ZW
1876 }
1877
1878 ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index);
af2c6399 1879 gtt_invalidate(gvt->dev_priv);
2707e444
ZW
1880 ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
1881 return 0;
1882}
1883
1884/*
1885 * intel_vgpu_emulate_gtt_mmio_write - emulate GTT MMIO register write
1886 * @vgpu: a vGPU
1887 * @off: register offset
1888 * @p_data: data from guest write
1889 * @bytes: data length
1890 *
1891 * This function is used to emulate the GTT MMIO register write
1892 *
1893 * Returns:
1894 * Zero on success, error code if failed.
1895 */
1896int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
1897 void *p_data, unsigned int bytes)
1898{
1899 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1900 int ret;
1901
1902 if (bytes != 4 && bytes != 8)
1903 return -EINVAL;
1904
1905 off -= info->gtt_start_offset;
1906 ret = emulate_gtt_mmio_write(vgpu, off, p_data, bytes);
1907 return ret;
1908}
1909
3b6411c2
PG
1910static int alloc_scratch_pages(struct intel_vgpu *vgpu,
1911 intel_gvt_gtt_type_t type)
2707e444
ZW
1912{
1913 struct intel_vgpu_gtt *gtt = &vgpu->gtt;
3b6411c2
PG
1914 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1915 int page_entry_num = GTT_PAGE_SIZE >>
1916 vgpu->gvt->device_info.gtt_entry_size_shift;
9631739f 1917 void *scratch_pt;
3b6411c2 1918 int i;
5de6bd4c
CD
1919 struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
1920 dma_addr_t daddr;
2707e444 1921
3b6411c2
PG
1922 if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
1923 return -EINVAL;
1924
9631739f 1925 scratch_pt = (void *)get_zeroed_page(GFP_KERNEL);
3b6411c2 1926 if (!scratch_pt) {
695fbc08 1927 gvt_vgpu_err("fail to allocate scratch page\n");
2707e444
ZW
1928 return -ENOMEM;
1929 }
1930
5de6bd4c
CD
1931 daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0,
1932 4096, PCI_DMA_BIDIRECTIONAL);
1933 if (dma_mapping_error(dev, daddr)) {
695fbc08 1934 gvt_vgpu_err("fail to dmamap scratch_pt\n");
5de6bd4c
CD
1935 __free_page(virt_to_page(scratch_pt));
1936 return -ENOMEM;
3b6411c2 1937 }
5de6bd4c
CD
1938 gtt->scratch_pt[type].page_mfn =
1939 (unsigned long)(daddr >> GTT_PAGE_SHIFT);
9631739f 1940 gtt->scratch_pt[type].page = virt_to_page(scratch_pt);
3b6411c2 1941 gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
5de6bd4c 1942 vgpu->id, type, gtt->scratch_pt[type].page_mfn);
3b6411c2
PG
1943
1944 /* Build the tree by full filled the scratch pt with the entries which
1945 * point to the next level scratch pt or scratch page. The
1946 * scratch_pt[type] indicate the scratch pt/scratch page used by the
1947 * 'type' pt.
1948 * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by
9631739f 1949 * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self
3b6411c2
PG
1950 * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn.
1951 */
1952 if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) {
1953 struct intel_gvt_gtt_entry se;
1954
1955 memset(&se, 0, sizeof(struct intel_gvt_gtt_entry));
1956 se.type = get_entry_type(type - 1);
1957 ops->set_pfn(&se, gtt->scratch_pt[type - 1].page_mfn);
1958
1959 /* The entry parameters like present/writeable/cache type
1960 * set to the same as i915's scratch page tree.
1961 */
1962 se.val64 |= _PAGE_PRESENT | _PAGE_RW;
1963 if (type == GTT_TYPE_PPGTT_PDE_PT)
1964 se.val64 |= PPAT_CACHED_INDEX;
1965
1966 for (i = 0; i < page_entry_num; i++)
9631739f 1967 ops->set_entry(scratch_pt, &se, i, false, 0, vgpu);
3b6411c2
PG
1968 }
1969
3b6411c2
PG
1970 return 0;
1971}
2707e444 1972
3b6411c2
PG
1973static int release_scratch_page_tree(struct intel_vgpu *vgpu)
1974{
1975 int i;
5de6bd4c
CD
1976 struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
1977 dma_addr_t daddr;
3b6411c2
PG
1978
1979 for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
1980 if (vgpu->gtt.scratch_pt[i].page != NULL) {
5de6bd4c
CD
1981 daddr = (dma_addr_t)(vgpu->gtt.scratch_pt[i].page_mfn <<
1982 GTT_PAGE_SHIFT);
1983 dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
3b6411c2
PG
1984 __free_page(vgpu->gtt.scratch_pt[i].page);
1985 vgpu->gtt.scratch_pt[i].page = NULL;
1986 vgpu->gtt.scratch_pt[i].page_mfn = 0;
1987 }
2707e444
ZW
1988 }
1989
2707e444
ZW
1990 return 0;
1991}
1992
3b6411c2 1993static int create_scratch_page_tree(struct intel_vgpu *vgpu)
2707e444 1994{
3b6411c2
PG
1995 int i, ret;
1996
1997 for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
1998 ret = alloc_scratch_pages(vgpu, i);
1999 if (ret)
2000 goto err;
2707e444 2001 }
3b6411c2
PG
2002
2003 return 0;
2004
2005err:
2006 release_scratch_page_tree(vgpu);
2007 return ret;
2707e444
ZW
2008}
2009
2010/**
2011 * intel_vgpu_init_gtt - initialize per-vGPU graphics memory virulization
2012 * @vgpu: a vGPU
2013 *
2014 * This function is used to initialize per-vGPU graphics memory virtualization
2015 * components.
2016 *
2017 * Returns:
2018 * Zero on success, error code if failed.
2019 */
2020int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
2021{
2022 struct intel_vgpu_gtt *gtt = &vgpu->gtt;
2023 struct intel_vgpu_mm *ggtt_mm;
2024
2025 hash_init(gtt->guest_page_hash_table);
2026 hash_init(gtt->shadow_page_hash_table);
2027
2028 INIT_LIST_HEAD(&gtt->mm_list_head);
2029 INIT_LIST_HEAD(&gtt->oos_page_list_head);
2030 INIT_LIST_HEAD(&gtt->post_shadow_list_head);
2031
d650ac06
PG
2032 intel_vgpu_reset_ggtt(vgpu);
2033
2707e444
ZW
2034 ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT,
2035 NULL, 1, 0);
2036 if (IS_ERR(ggtt_mm)) {
695fbc08 2037 gvt_vgpu_err("fail to create mm for ggtt.\n");
2707e444
ZW
2038 return PTR_ERR(ggtt_mm);
2039 }
2040
2041 gtt->ggtt_mm = ggtt_mm;
2042
3b6411c2 2043 return create_scratch_page_tree(vgpu);
2707e444
ZW
2044}
2045
da9cc8de
PG
2046static void intel_vgpu_free_mm(struct intel_vgpu *vgpu, int type)
2047{
2048 struct list_head *pos, *n;
2049 struct intel_vgpu_mm *mm;
2050
2051 list_for_each_safe(pos, n, &vgpu->gtt.mm_list_head) {
2052 mm = container_of(pos, struct intel_vgpu_mm, list);
2053 if (mm->type == type) {
2054 vgpu->gvt->gtt.mm_free_page_table(mm);
2055 list_del(&mm->list);
2056 list_del(&mm->lru_list);
2057 kfree(mm);
2058 }
2059 }
2060}
2061
2707e444
ZW
2062/**
2063 * intel_vgpu_clean_gtt - clean up per-vGPU graphics memory virulization
2064 * @vgpu: a vGPU
2065 *
2066 * This function is used to clean up per-vGPU graphics memory virtualization
2067 * components.
2068 *
2069 * Returns:
2070 * Zero on success, error code if failed.
2071 */
2072void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu)
2073{
2707e444 2074 ppgtt_free_all_shadow_page(vgpu);
3b6411c2 2075 release_scratch_page_tree(vgpu);
2707e444 2076
da9cc8de
PG
2077 intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_PPGTT);
2078 intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_GGTT);
2707e444
ZW
2079}
2080
2081static void clean_spt_oos(struct intel_gvt *gvt)
2082{
2083 struct intel_gvt_gtt *gtt = &gvt->gtt;
2084 struct list_head *pos, *n;
2085 struct intel_vgpu_oos_page *oos_page;
2086
2087 WARN(!list_empty(&gtt->oos_page_use_list_head),
2088 "someone is still using oos page\n");
2089
2090 list_for_each_safe(pos, n, &gtt->oos_page_free_list_head) {
2091 oos_page = container_of(pos, struct intel_vgpu_oos_page, list);
2092 list_del(&oos_page->list);
2093 kfree(oos_page);
2094 }
2095}
2096
2097static int setup_spt_oos(struct intel_gvt *gvt)
2098{
2099 struct intel_gvt_gtt *gtt = &gvt->gtt;
2100 struct intel_vgpu_oos_page *oos_page;
2101 int i;
2102 int ret;
2103
2104 INIT_LIST_HEAD(&gtt->oos_page_free_list_head);
2105 INIT_LIST_HEAD(&gtt->oos_page_use_list_head);
2106
2107 for (i = 0; i < preallocated_oos_pages; i++) {
2108 oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL);
2109 if (!oos_page) {
2707e444
ZW
2110 ret = -ENOMEM;
2111 goto fail;
2112 }
2113
2114 INIT_LIST_HEAD(&oos_page->list);
2115 INIT_LIST_HEAD(&oos_page->vm_list);
2116 oos_page->id = i;
2117 list_add_tail(&oos_page->list, &gtt->oos_page_free_list_head);
2118 }
2119
2120 gvt_dbg_mm("%d oos pages preallocated\n", i);
2121
2122 return 0;
2123fail:
2124 clean_spt_oos(gvt);
2125 return ret;
2126}
2127
2128/**
2129 * intel_vgpu_find_ppgtt_mm - find a PPGTT mm object
2130 * @vgpu: a vGPU
2131 * @page_table_level: PPGTT page table level
2132 * @root_entry: PPGTT page table root pointers
2133 *
2134 * This function is used to find a PPGTT mm object from mm object pool
2135 *
2136 * Returns:
2137 * pointer to mm object on success, NULL if failed.
2138 */
2139struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
2140 int page_table_level, void *root_entry)
2141{
2142 struct list_head *pos;
2143 struct intel_vgpu_mm *mm;
2144 u64 *src, *dst;
2145
2146 list_for_each(pos, &vgpu->gtt.mm_list_head) {
2147 mm = container_of(pos, struct intel_vgpu_mm, list);
2148 if (mm->type != INTEL_GVT_MM_PPGTT)
2149 continue;
2150
2151 if (mm->page_table_level != page_table_level)
2152 continue;
2153
2154 src = root_entry;
2155 dst = mm->virtual_page_table;
2156
2157 if (page_table_level == 3) {
2158 if (src[0] == dst[0]
2159 && src[1] == dst[1]
2160 && src[2] == dst[2]
2161 && src[3] == dst[3])
2162 return mm;
2163 } else {
2164 if (src[0] == dst[0])
2165 return mm;
2166 }
2167 }
2168 return NULL;
2169}
2170
2171/**
2172 * intel_vgpu_g2v_create_ppgtt_mm - create a PPGTT mm object from
2173 * g2v notification
2174 * @vgpu: a vGPU
2175 * @page_table_level: PPGTT page table level
2176 *
2177 * This function is used to create a PPGTT mm object from a guest to GVT-g
2178 * notification.
2179 *
2180 * Returns:
2181 * Zero on success, negative error code if failed.
2182 */
2183int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu,
2184 int page_table_level)
2185{
2186 u64 *pdp = (u64 *)&vgpu_vreg64(vgpu, vgtif_reg(pdp[0]));
2187 struct intel_vgpu_mm *mm;
2188
2189 if (WARN_ON((page_table_level != 4) && (page_table_level != 3)))
2190 return -EINVAL;
2191
2192 mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp);
2193 if (mm) {
2194 intel_gvt_mm_reference(mm);
2195 } else {
2196 mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_PPGTT,
2197 pdp, page_table_level, 0);
2198 if (IS_ERR(mm)) {
695fbc08 2199 gvt_vgpu_err("fail to create mm\n");
2707e444
ZW
2200 return PTR_ERR(mm);
2201 }
2202 }
2203 return 0;
2204}
2205
2206/**
2207 * intel_vgpu_g2v_destroy_ppgtt_mm - destroy a PPGTT mm object from
2208 * g2v notification
2209 * @vgpu: a vGPU
2210 * @page_table_level: PPGTT page table level
2211 *
2212 * This function is used to create a PPGTT mm object from a guest to GVT-g
2213 * notification.
2214 *
2215 * Returns:
2216 * Zero on success, negative error code if failed.
2217 */
2218int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
2219 int page_table_level)
2220{
2221 u64 *pdp = (u64 *)&vgpu_vreg64(vgpu, vgtif_reg(pdp[0]));
2222 struct intel_vgpu_mm *mm;
2223
2224 if (WARN_ON((page_table_level != 4) && (page_table_level != 3)))
2225 return -EINVAL;
2226
2227 mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp);
2228 if (!mm) {
695fbc08 2229 gvt_vgpu_err("fail to find ppgtt instance.\n");
2707e444
ZW
2230 return -EINVAL;
2231 }
2232 intel_gvt_mm_unreference(mm);
2233 return 0;
2234}
2235
2236/**
2237 * intel_gvt_init_gtt - initialize mm components of a GVT device
2238 * @gvt: GVT device
2239 *
2240 * This function is called at the initialization stage, to initialize
2241 * the mm components of a GVT device.
2242 *
2243 * Returns:
2244 * zero on success, negative error code if failed.
2245 */
2246int intel_gvt_init_gtt(struct intel_gvt *gvt)
2247{
2248 int ret;
9631739f 2249 void *page;
5de6bd4c
CD
2250 struct device *dev = &gvt->dev_priv->drm.pdev->dev;
2251 dma_addr_t daddr;
2707e444
ZW
2252
2253 gvt_dbg_core("init gtt\n");
2254
e3476c00
XH
2255 if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
2256 || IS_KABYLAKE(gvt->dev_priv)) {
2707e444
ZW
2257 gvt->gtt.pte_ops = &gen8_gtt_pte_ops;
2258 gvt->gtt.gma_ops = &gen8_gtt_gma_ops;
2259 gvt->gtt.mm_alloc_page_table = gen8_mm_alloc_page_table;
2260 gvt->gtt.mm_free_page_table = gen8_mm_free_page_table;
2261 } else {
2262 return -ENODEV;
2263 }
2264
9631739f
JS
2265 page = (void *)get_zeroed_page(GFP_KERNEL);
2266 if (!page) {
d650ac06
PG
2267 gvt_err("fail to allocate scratch ggtt page\n");
2268 return -ENOMEM;
2269 }
2270
5de6bd4c
CD
2271 daddr = dma_map_page(dev, virt_to_page(page), 0,
2272 4096, PCI_DMA_BIDIRECTIONAL);
2273 if (dma_mapping_error(dev, daddr)) {
2274 gvt_err("fail to dmamap scratch ggtt page\n");
2275 __free_page(virt_to_page(page));
2276 return -ENOMEM;
d650ac06 2277 }
5de6bd4c
CD
2278 gvt->gtt.scratch_ggtt_page = virt_to_page(page);
2279 gvt->gtt.scratch_ggtt_mfn = (unsigned long)(daddr >> GTT_PAGE_SHIFT);
d650ac06 2280
2707e444
ZW
2281 if (enable_out_of_sync) {
2282 ret = setup_spt_oos(gvt);
2283 if (ret) {
2284 gvt_err("fail to initialize SPT oos\n");
0de98709
ZW
2285 dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
2286 __free_page(gvt->gtt.scratch_ggtt_page);
2707e444
ZW
2287 return ret;
2288 }
2289 }
2290 INIT_LIST_HEAD(&gvt->gtt.mm_lru_list_head);
2291 return 0;
2292}
2293
2294/**
2295 * intel_gvt_clean_gtt - clean up mm components of a GVT device
2296 * @gvt: GVT device
2297 *
2298 * This function is called at the driver unloading stage, to clean up the
2299 * the mm components of a GVT device.
2300 *
2301 */
2302void intel_gvt_clean_gtt(struct intel_gvt *gvt)
2303{
5de6bd4c
CD
2304 struct device *dev = &gvt->dev_priv->drm.pdev->dev;
2305 dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_ggtt_mfn <<
2306 GTT_PAGE_SHIFT);
2307
2308 dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
2309
d650ac06
PG
2310 __free_page(gvt->gtt.scratch_ggtt_page);
2311
2707e444
ZW
2312 if (enable_out_of_sync)
2313 clean_spt_oos(gvt);
2314}
d650ac06
PG
2315
2316/**
2317 * intel_vgpu_reset_ggtt - reset the GGTT entry
2318 * @vgpu: a vGPU
2319 *
2320 * This function is called at the vGPU create stage
2321 * to reset all the GGTT entries.
2322 *
2323 */
2324void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
2325{
2326 struct intel_gvt *gvt = vgpu->gvt;
5ad59bf0 2327 struct drm_i915_private *dev_priv = gvt->dev_priv;
d650ac06
PG
2328 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
2329 u32 index;
2330 u32 offset;
2331 u32 num_entries;
2332 struct intel_gvt_gtt_entry e;
2333
2334 memset(&e, 0, sizeof(struct intel_gvt_gtt_entry));
2335 e.type = GTT_TYPE_GGTT_PTE;
2336 ops->set_pfn(&e, gvt->gtt.scratch_ggtt_mfn);
2337 e.val64 |= _PAGE_PRESENT;
2338
2339 index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
2340 num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT;
2341 for (offset = 0; offset < num_entries; offset++)
2342 ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
2343
2344 index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT;
2345 num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
2346 for (offset = 0; offset < num_entries; offset++)
2347 ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
5ad59bf0 2348
af2c6399 2349 gtt_invalidate(dev_priv);
d650ac06 2350}
b611581b
CD
2351
2352/**
2353 * intel_vgpu_reset_gtt - reset the all GTT related status
2354 * @vgpu: a vGPU
2355 * @dmlr: true for vGPU Device Model Level Reset, false for GT Reset
2356 *
2357 * This function is called from vfio core to reset reset all
2358 * GTT related status, including GGTT, PPGTT, scratch page.
2359 *
2360 */
2361void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr)
2362{
2363 int i;
2364
2365 ppgtt_free_all_shadow_page(vgpu);
da9cc8de
PG
2366
2367 /* Shadow pages are only created when there is no page
2368 * table tracking data, so remove page tracking data after
2369 * removing the shadow pages.
2370 */
2371 intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_PPGTT);
2372
b611581b
CD
2373 if (!dmlr)
2374 return;
2375
2376 intel_vgpu_reset_ggtt(vgpu);
2377
2378 /* clear scratch page for security */
2379 for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
2380 if (vgpu->gtt.scratch_pt[i].page != NULL)
2381 memset(page_address(vgpu->gtt.scratch_pt[i].page),
2382 0, PAGE_SIZE);
2383 }
2384}
This page took 0.438114 seconds and 4 git commands to generate.