]> Git Repo - linux.git/blame - drivers/gpu/drm/i915/gvt/gtt.c
drm/i915/gvt: Avoid dereference a potential null pointer
[linux.git] / drivers / gpu / drm / i915 / gvt / gtt.c
CommitLineData
2707e444
ZW
1/*
2 * GTT virtualization
3 *
4 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 *
25 * Authors:
26 * Zhi Wang <[email protected]>
27 * Zhenyu Wang <[email protected]>
28 * Xiao Zheng <[email protected]>
29 *
30 * Contributors:
31 * Min He <[email protected]>
32 * Bing Niu <[email protected]>
33 *
34 */
35
36#include "i915_drv.h"
feddf6e8
ZW
37#include "gvt.h"
38#include "i915_pvinfo.h"
2707e444
ZW
39#include "trace.h"
40
bc37ab56
CD
41#if defined(VERBOSE_DEBUG)
42#define gvt_vdbg_mm(fmt, args...) gvt_dbg_mm(fmt, ##args)
43#else
44#define gvt_vdbg_mm(fmt, args...)
45#endif
46
2707e444
ZW
47static bool enable_out_of_sync = false;
48static int preallocated_oos_pages = 8192;
49
50/*
51 * validate a gm address and related range size,
52 * translate it to host gm address
53 */
54bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
55{
56 if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size
57 && !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) {
695fbc08
TZ
58 gvt_vgpu_err("invalid range gmadr 0x%llx size 0x%x\n",
59 addr, size);
2707e444
ZW
60 return false;
61 }
62 return true;
63}
64
65/* translate a guest gmadr to host gmadr */
66int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr)
67{
68 if (WARN(!vgpu_gmadr_is_valid(vgpu, g_addr),
69 "invalid guest gmadr %llx\n", g_addr))
70 return -EACCES;
71
72 if (vgpu_gmadr_is_aperture(vgpu, g_addr))
73 *h_addr = vgpu_aperture_gmadr_base(vgpu)
74 + (g_addr - vgpu_aperture_offset(vgpu));
75 else
76 *h_addr = vgpu_hidden_gmadr_base(vgpu)
77 + (g_addr - vgpu_hidden_offset(vgpu));
78 return 0;
79}
80
81/* translate a host gmadr to guest gmadr */
82int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr)
83{
84 if (WARN(!gvt_gmadr_is_valid(vgpu->gvt, h_addr),
85 "invalid host gmadr %llx\n", h_addr))
86 return -EACCES;
87
88 if (gvt_gmadr_is_aperture(vgpu->gvt, h_addr))
89 *g_addr = vgpu_aperture_gmadr_base(vgpu)
90 + (h_addr - gvt_aperture_gmadr_base(vgpu->gvt));
91 else
92 *g_addr = vgpu_hidden_gmadr_base(vgpu)
93 + (h_addr - gvt_hidden_gmadr_base(vgpu->gvt));
94 return 0;
95}
96
97int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
98 unsigned long *h_index)
99{
100 u64 h_addr;
101 int ret;
102
9556e118 103 ret = intel_gvt_ggtt_gmadr_g2h(vgpu, g_index << I915_GTT_PAGE_SHIFT,
2707e444
ZW
104 &h_addr);
105 if (ret)
106 return ret;
107
9556e118 108 *h_index = h_addr >> I915_GTT_PAGE_SHIFT;
2707e444
ZW
109 return 0;
110}
111
112int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
113 unsigned long *g_index)
114{
115 u64 g_addr;
116 int ret;
117
9556e118 118 ret = intel_gvt_ggtt_gmadr_h2g(vgpu, h_index << I915_GTT_PAGE_SHIFT,
2707e444
ZW
119 &g_addr);
120 if (ret)
121 return ret;
122
9556e118 123 *g_index = g_addr >> I915_GTT_PAGE_SHIFT;
2707e444
ZW
124 return 0;
125}
126
127#define gtt_type_is_entry(type) \
128 (type > GTT_TYPE_INVALID && type < GTT_TYPE_PPGTT_ENTRY \
129 && type != GTT_TYPE_PPGTT_PTE_ENTRY \
130 && type != GTT_TYPE_PPGTT_ROOT_ENTRY)
131
132#define gtt_type_is_pt(type) \
133 (type >= GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX)
134
135#define gtt_type_is_pte_pt(type) \
136 (type == GTT_TYPE_PPGTT_PTE_PT)
137
138#define gtt_type_is_root_pointer(type) \
139 (gtt_type_is_entry(type) && type > GTT_TYPE_PPGTT_ROOT_ENTRY)
140
141#define gtt_init_entry(e, t, p, v) do { \
142 (e)->type = t; \
143 (e)->pdev = p; \
144 memcpy(&(e)->val64, &v, sizeof(v)); \
145} while (0)
146
2707e444
ZW
147/*
148 * Mappings between GTT_TYPE* enumerations.
149 * Following information can be found according to the given type:
150 * - type of next level page table
151 * - type of entry inside this level page table
152 * - type of entry with PSE set
153 *
154 * If the given type doesn't have such a kind of information,
155 * e.g. give a l4 root entry type, then request to get its PSE type,
156 * give a PTE page table type, then request to get its next level page
157 * table type, as we know l4 root entry doesn't have a PSE bit,
158 * and a PTE page table doesn't have a next level page table type,
159 * GTT_TYPE_INVALID will be returned. This is useful when traversing a
160 * page table.
161 */
162
163struct gtt_type_table_entry {
164 int entry_type;
054f4eba 165 int pt_type;
2707e444
ZW
166 int next_pt_type;
167 int pse_entry_type;
168};
169
054f4eba 170#define GTT_TYPE_TABLE_ENTRY(type, e_type, cpt_type, npt_type, pse_type) \
2707e444
ZW
171 [type] = { \
172 .entry_type = e_type, \
054f4eba 173 .pt_type = cpt_type, \
2707e444
ZW
174 .next_pt_type = npt_type, \
175 .pse_entry_type = pse_type, \
176 }
177
178static struct gtt_type_table_entry gtt_type_table[] = {
179 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
180 GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
054f4eba 181 GTT_TYPE_INVALID,
2707e444
ZW
182 GTT_TYPE_PPGTT_PML4_PT,
183 GTT_TYPE_INVALID),
184 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_PT,
185 GTT_TYPE_PPGTT_PML4_ENTRY,
054f4eba 186 GTT_TYPE_PPGTT_PML4_PT,
2707e444
ZW
187 GTT_TYPE_PPGTT_PDP_PT,
188 GTT_TYPE_INVALID),
189 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_ENTRY,
190 GTT_TYPE_PPGTT_PML4_ENTRY,
054f4eba 191 GTT_TYPE_PPGTT_PML4_PT,
2707e444
ZW
192 GTT_TYPE_PPGTT_PDP_PT,
193 GTT_TYPE_INVALID),
194 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_PT,
195 GTT_TYPE_PPGTT_PDP_ENTRY,
054f4eba 196 GTT_TYPE_PPGTT_PDP_PT,
2707e444
ZW
197 GTT_TYPE_PPGTT_PDE_PT,
198 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
199 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
200 GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
054f4eba 201 GTT_TYPE_INVALID,
2707e444
ZW
202 GTT_TYPE_PPGTT_PDE_PT,
203 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
204 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_ENTRY,
205 GTT_TYPE_PPGTT_PDP_ENTRY,
054f4eba 206 GTT_TYPE_PPGTT_PDP_PT,
2707e444
ZW
207 GTT_TYPE_PPGTT_PDE_PT,
208 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
209 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_PT,
210 GTT_TYPE_PPGTT_PDE_ENTRY,
054f4eba 211 GTT_TYPE_PPGTT_PDE_PT,
2707e444
ZW
212 GTT_TYPE_PPGTT_PTE_PT,
213 GTT_TYPE_PPGTT_PTE_2M_ENTRY),
214 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_ENTRY,
215 GTT_TYPE_PPGTT_PDE_ENTRY,
054f4eba 216 GTT_TYPE_PPGTT_PDE_PT,
2707e444
ZW
217 GTT_TYPE_PPGTT_PTE_PT,
218 GTT_TYPE_PPGTT_PTE_2M_ENTRY),
219 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_PT,
220 GTT_TYPE_PPGTT_PTE_4K_ENTRY,
054f4eba 221 GTT_TYPE_PPGTT_PTE_PT,
2707e444
ZW
222 GTT_TYPE_INVALID,
223 GTT_TYPE_INVALID),
224 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_4K_ENTRY,
225 GTT_TYPE_PPGTT_PTE_4K_ENTRY,
054f4eba 226 GTT_TYPE_PPGTT_PTE_PT,
2707e444
ZW
227 GTT_TYPE_INVALID,
228 GTT_TYPE_INVALID),
229 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_2M_ENTRY,
230 GTT_TYPE_PPGTT_PDE_ENTRY,
054f4eba 231 GTT_TYPE_PPGTT_PDE_PT,
2707e444
ZW
232 GTT_TYPE_INVALID,
233 GTT_TYPE_PPGTT_PTE_2M_ENTRY),
234 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_1G_ENTRY,
235 GTT_TYPE_PPGTT_PDP_ENTRY,
054f4eba 236 GTT_TYPE_PPGTT_PDP_PT,
2707e444
ZW
237 GTT_TYPE_INVALID,
238 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
239 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_GGTT_PTE,
240 GTT_TYPE_GGTT_PTE,
241 GTT_TYPE_INVALID,
054f4eba 242 GTT_TYPE_INVALID,
2707e444
ZW
243 GTT_TYPE_INVALID),
244};
245
246static inline int get_next_pt_type(int type)
247{
248 return gtt_type_table[type].next_pt_type;
249}
250
054f4eba
ZW
251static inline int get_pt_type(int type)
252{
253 return gtt_type_table[type].pt_type;
254}
255
2707e444
ZW
256static inline int get_entry_type(int type)
257{
258 return gtt_type_table[type].entry_type;
259}
260
261static inline int get_pse_type(int type)
262{
263 return gtt_type_table[type].pse_entry_type;
264}
265
266static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index)
267{
321927db 268 void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
905a5035
CD
269
270 return readq(addr);
2707e444
ZW
271}
272
a143cef7 273static void ggtt_invalidate(struct drm_i915_private *dev_priv)
af2c6399
CD
274{
275 mmio_hw_access_pre(dev_priv);
276 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
277 mmio_hw_access_post(dev_priv);
278}
279
2707e444
ZW
280static void write_pte64(struct drm_i915_private *dev_priv,
281 unsigned long index, u64 pte)
282{
321927db 283 void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
2707e444 284
2707e444 285 writeq(pte, addr);
2707e444
ZW
286}
287
4b2dbbc2 288static inline int gtt_get_entry64(void *pt,
2707e444
ZW
289 struct intel_gvt_gtt_entry *e,
290 unsigned long index, bool hypervisor_access, unsigned long gpa,
291 struct intel_vgpu *vgpu)
292{
293 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
294 int ret;
295
296 if (WARN_ON(info->gtt_entry_size != 8))
4b2dbbc2 297 return -EINVAL;
2707e444
ZW
298
299 if (hypervisor_access) {
300 ret = intel_gvt_hypervisor_read_gpa(vgpu, gpa +
301 (index << info->gtt_entry_size_shift),
302 &e->val64, 8);
4b2dbbc2
CD
303 if (WARN_ON(ret))
304 return ret;
2707e444
ZW
305 } else if (!pt) {
306 e->val64 = read_pte64(vgpu->gvt->dev_priv, index);
307 } else {
308 e->val64 = *((u64 *)pt + index);
309 }
4b2dbbc2 310 return 0;
2707e444
ZW
311}
312
4b2dbbc2 313static inline int gtt_set_entry64(void *pt,
2707e444
ZW
314 struct intel_gvt_gtt_entry *e,
315 unsigned long index, bool hypervisor_access, unsigned long gpa,
316 struct intel_vgpu *vgpu)
317{
318 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
319 int ret;
320
321 if (WARN_ON(info->gtt_entry_size != 8))
4b2dbbc2 322 return -EINVAL;
2707e444
ZW
323
324 if (hypervisor_access) {
325 ret = intel_gvt_hypervisor_write_gpa(vgpu, gpa +
326 (index << info->gtt_entry_size_shift),
327 &e->val64, 8);
4b2dbbc2
CD
328 if (WARN_ON(ret))
329 return ret;
2707e444
ZW
330 } else if (!pt) {
331 write_pte64(vgpu->gvt->dev_priv, index, e->val64);
332 } else {
333 *((u64 *)pt + index) = e->val64;
334 }
4b2dbbc2 335 return 0;
2707e444
ZW
336}
337
338#define GTT_HAW 46
339
420fba78
CD
340#define ADDR_1G_MASK GENMASK_ULL(GTT_HAW - 1, 30)
341#define ADDR_2M_MASK GENMASK_ULL(GTT_HAW - 1, 21)
342#define ADDR_4K_MASK GENMASK_ULL(GTT_HAW - 1, 12)
2707e444
ZW
343
344static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
345{
346 unsigned long pfn;
347
348 if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY)
d861ca23 349 pfn = (e->val64 & ADDR_1G_MASK) >> PAGE_SHIFT;
2707e444 350 else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY)
d861ca23 351 pfn = (e->val64 & ADDR_2M_MASK) >> PAGE_SHIFT;
2707e444 352 else
d861ca23 353 pfn = (e->val64 & ADDR_4K_MASK) >> PAGE_SHIFT;
2707e444
ZW
354 return pfn;
355}
356
357static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry *e, unsigned long pfn)
358{
359 if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) {
360 e->val64 &= ~ADDR_1G_MASK;
d861ca23 361 pfn &= (ADDR_1G_MASK >> PAGE_SHIFT);
2707e444
ZW
362 } else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) {
363 e->val64 &= ~ADDR_2M_MASK;
d861ca23 364 pfn &= (ADDR_2M_MASK >> PAGE_SHIFT);
2707e444
ZW
365 } else {
366 e->val64 &= ~ADDR_4K_MASK;
d861ca23 367 pfn &= (ADDR_4K_MASK >> PAGE_SHIFT);
2707e444
ZW
368 }
369
d861ca23 370 e->val64 |= (pfn << PAGE_SHIFT);
2707e444
ZW
371}
372
373static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e)
374{
375 /* Entry doesn't have PSE bit. */
376 if (get_pse_type(e->type) == GTT_TYPE_INVALID)
377 return false;
378
379 e->type = get_entry_type(e->type);
d861ca23 380 if (!(e->val64 & _PAGE_PSE))
2707e444
ZW
381 return false;
382
383 e->type = get_pse_type(e->type);
384 return true;
385}
386
387static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e)
388{
389 /*
390 * i915 writes PDP root pointer registers without present bit,
391 * it also works, so we need to treat root pointer entry
392 * specifically.
393 */
394 if (e->type == GTT_TYPE_PPGTT_ROOT_L3_ENTRY
395 || e->type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
396 return (e->val64 != 0);
397 else
d861ca23 398 return (e->val64 & _PAGE_PRESENT);
2707e444
ZW
399}
400
401static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e)
402{
d861ca23 403 e->val64 &= ~_PAGE_PRESENT;
2707e444
ZW
404}
405
655c64ef
ZW
406static void gtt_entry_set_present(struct intel_gvt_gtt_entry *e)
407{
d861ca23 408 e->val64 |= _PAGE_PRESENT;
2707e444
ZW
409}
410
411/*
412 * Per-platform GMA routines.
413 */
414static unsigned long gma_to_ggtt_pte_index(unsigned long gma)
415{
9556e118 416 unsigned long x = (gma >> I915_GTT_PAGE_SHIFT);
2707e444
ZW
417
418 trace_gma_index(__func__, gma, x);
419 return x;
420}
421
422#define DEFINE_PPGTT_GMA_TO_INDEX(prefix, ename, exp) \
423static unsigned long prefix##_gma_to_##ename##_index(unsigned long gma) \
424{ \
425 unsigned long x = (exp); \
426 trace_gma_index(__func__, gma, x); \
427 return x; \
428}
429
430DEFINE_PPGTT_GMA_TO_INDEX(gen8, pte, (gma >> 12 & 0x1ff));
431DEFINE_PPGTT_GMA_TO_INDEX(gen8, pde, (gma >> 21 & 0x1ff));
432DEFINE_PPGTT_GMA_TO_INDEX(gen8, l3_pdp, (gma >> 30 & 0x3));
433DEFINE_PPGTT_GMA_TO_INDEX(gen8, l4_pdp, (gma >> 30 & 0x1ff));
434DEFINE_PPGTT_GMA_TO_INDEX(gen8, pml4, (gma >> 39 & 0x1ff));
435
436static struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = {
437 .get_entry = gtt_get_entry64,
438 .set_entry = gtt_set_entry64,
439 .clear_present = gtt_entry_clear_present,
655c64ef 440 .set_present = gtt_entry_set_present,
2707e444
ZW
441 .test_present = gen8_gtt_test_present,
442 .test_pse = gen8_gtt_test_pse,
443 .get_pfn = gen8_gtt_get_pfn,
444 .set_pfn = gen8_gtt_set_pfn,
445};
446
447static struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops = {
448 .gma_to_ggtt_pte_index = gma_to_ggtt_pte_index,
449 .gma_to_pte_index = gen8_gma_to_pte_index,
450 .gma_to_pde_index = gen8_gma_to_pde_index,
451 .gma_to_l3_pdp_index = gen8_gma_to_l3_pdp_index,
452 .gma_to_l4_pdp_index = gen8_gma_to_l4_pdp_index,
453 .gma_to_pml4_index = gen8_gma_to_pml4_index,
454};
455
2707e444
ZW
456/*
457 * MM helpers.
458 */
3aff3512
CD
459static void _ppgtt_get_root_entry(struct intel_vgpu_mm *mm,
460 struct intel_gvt_gtt_entry *entry, unsigned long index,
461 bool guest)
2707e444 462{
3aff3512 463 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
2707e444 464
3aff3512 465 GEM_BUG_ON(mm->type != INTEL_GVT_MM_PPGTT);
2707e444 466
3aff3512
CD
467 entry->type = mm->ppgtt_mm.root_entry_type;
468 pte_ops->get_entry(guest ? mm->ppgtt_mm.guest_pdps :
469 mm->ppgtt_mm.shadow_pdps,
470 entry, index, false, 0, mm->vgpu);
4b2dbbc2 471
3aff3512 472 pte_ops->test_pse(entry);
2707e444
ZW
473}
474
3aff3512
CD
475static inline void ppgtt_get_guest_root_entry(struct intel_vgpu_mm *mm,
476 struct intel_gvt_gtt_entry *entry, unsigned long index)
2707e444 477{
3aff3512
CD
478 _ppgtt_get_root_entry(mm, entry, index, true);
479}
480
481static inline void ppgtt_get_shadow_root_entry(struct intel_vgpu_mm *mm,
482 struct intel_gvt_gtt_entry *entry, unsigned long index)
483{
484 _ppgtt_get_root_entry(mm, entry, index, false);
485}
486
487static void _ppgtt_set_root_entry(struct intel_vgpu_mm *mm,
488 struct intel_gvt_gtt_entry *entry, unsigned long index,
489 bool guest)
490{
491 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
492
493 pte_ops->set_entry(guest ? mm->ppgtt_mm.guest_pdps :
494 mm->ppgtt_mm.shadow_pdps,
495 entry, index, false, 0, mm->vgpu);
496}
497
498static inline void ppgtt_set_guest_root_entry(struct intel_vgpu_mm *mm,
499 struct intel_gvt_gtt_entry *entry, unsigned long index)
500{
501 _ppgtt_set_root_entry(mm, entry, index, true);
502}
503
504static inline void ppgtt_set_shadow_root_entry(struct intel_vgpu_mm *mm,
505 struct intel_gvt_gtt_entry *entry, unsigned long index)
506{
507 _ppgtt_set_root_entry(mm, entry, index, false);
508}
509
510static void ggtt_get_guest_entry(struct intel_vgpu_mm *mm,
511 struct intel_gvt_gtt_entry *entry, unsigned long index)
512{
513 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
514
515 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
516
517 entry->type = GTT_TYPE_GGTT_PTE;
518 pte_ops->get_entry(mm->ggtt_mm.virtual_ggtt, entry, index,
519 false, 0, mm->vgpu);
520}
521
522static void ggtt_set_guest_entry(struct intel_vgpu_mm *mm,
523 struct intel_gvt_gtt_entry *entry, unsigned long index)
524{
525 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
526
527 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
528
529 pte_ops->set_entry(mm->ggtt_mm.virtual_ggtt, entry, index,
530 false, 0, mm->vgpu);
531}
532
7598e870
CD
533static void ggtt_get_host_entry(struct intel_vgpu_mm *mm,
534 struct intel_gvt_gtt_entry *entry, unsigned long index)
535{
536 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
537
538 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
539
540 pte_ops->get_entry(NULL, entry, index, false, 0, mm->vgpu);
541}
542
3aff3512
CD
543static void ggtt_set_host_entry(struct intel_vgpu_mm *mm,
544 struct intel_gvt_gtt_entry *entry, unsigned long index)
545{
546 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
547
548 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
2707e444 549
3aff3512 550 pte_ops->set_entry(NULL, entry, index, false, 0, mm->vgpu);
2707e444
ZW
551}
552
553/*
554 * PPGTT shadow page table helpers.
555 */
4b2dbbc2 556static inline int ppgtt_spt_get_entry(
2707e444
ZW
557 struct intel_vgpu_ppgtt_spt *spt,
558 void *page_table, int type,
559 struct intel_gvt_gtt_entry *e, unsigned long index,
560 bool guest)
561{
562 struct intel_gvt *gvt = spt->vgpu->gvt;
563 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
4b2dbbc2 564 int ret;
2707e444
ZW
565
566 e->type = get_entry_type(type);
567
568 if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
4b2dbbc2 569 return -EINVAL;
2707e444 570
4b2dbbc2 571 ret = ops->get_entry(page_table, e, index, guest,
e502a2af 572 spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
2707e444 573 spt->vgpu);
4b2dbbc2
CD
574 if (ret)
575 return ret;
576
2707e444 577 ops->test_pse(e);
bc37ab56
CD
578
579 gvt_vdbg_mm("read ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n",
580 type, e->type, index, e->val64);
4b2dbbc2 581 return 0;
2707e444
ZW
582}
583
4b2dbbc2 584static inline int ppgtt_spt_set_entry(
2707e444
ZW
585 struct intel_vgpu_ppgtt_spt *spt,
586 void *page_table, int type,
587 struct intel_gvt_gtt_entry *e, unsigned long index,
588 bool guest)
589{
590 struct intel_gvt *gvt = spt->vgpu->gvt;
591 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
592
593 if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
4b2dbbc2 594 return -EINVAL;
2707e444 595
bc37ab56
CD
596 gvt_vdbg_mm("set ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n",
597 type, e->type, index, e->val64);
598
2707e444 599 return ops->set_entry(page_table, e, index, guest,
e502a2af 600 spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
2707e444
ZW
601 spt->vgpu);
602}
603
604#define ppgtt_get_guest_entry(spt, e, index) \
605 ppgtt_spt_get_entry(spt, NULL, \
44b46733 606 spt->guest_page.type, e, index, true)
2707e444
ZW
607
608#define ppgtt_set_guest_entry(spt, e, index) \
609 ppgtt_spt_set_entry(spt, NULL, \
44b46733 610 spt->guest_page.type, e, index, true)
2707e444
ZW
611
612#define ppgtt_get_shadow_entry(spt, e, index) \
613 ppgtt_spt_get_entry(spt, spt->shadow_page.vaddr, \
614 spt->shadow_page.type, e, index, false)
615
616#define ppgtt_set_shadow_entry(spt, e, index) \
617 ppgtt_spt_set_entry(spt, spt->shadow_page.vaddr, \
618 spt->shadow_page.type, e, index, false)
619
44b46733 620static void *alloc_spt(gfp_t gfp_mask)
2707e444 621{
44b46733 622 struct intel_vgpu_ppgtt_spt *spt;
2707e444 623
44b46733
CD
624 spt = kzalloc(sizeof(*spt), gfp_mask);
625 if (!spt)
626 return NULL;
2707e444 627
44b46733
CD
628 spt->shadow_page.page = alloc_page(gfp_mask);
629 if (!spt->shadow_page.page) {
630 kfree(spt);
631 return NULL;
632 }
633 return spt;
2707e444
ZW
634}
635
44b46733 636static void free_spt(struct intel_vgpu_ppgtt_spt *spt)
2707e444 637{
44b46733
CD
638 __free_page(spt->shadow_page.page);
639 kfree(spt);
2707e444
ZW
640}
641
7d1e5cdf
ZW
642static int detach_oos_page(struct intel_vgpu *vgpu,
643 struct intel_vgpu_oos_page *oos_page);
644
d87f5ff3 645static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt)
2707e444 646{
44b46733 647 struct device *kdev = &spt->vgpu->gvt->dev_priv->drm.pdev->dev;
2707e444 648
44b46733 649 trace_spt_free(spt->vgpu->id, spt, spt->guest_page.type);
7d1e5cdf 650
44b46733
CD
651 dma_unmap_page(kdev, spt->shadow_page.mfn << I915_GTT_PAGE_SHIFT, 4096,
652 PCI_DMA_BIDIRECTIONAL);
b6c126a3
CD
653
654 radix_tree_delete(&spt->vgpu->gtt.spt_tree, spt->shadow_page.mfn);
2707e444 655
44b46733
CD
656 if (spt->guest_page.oos_page)
657 detach_oos_page(spt->vgpu, spt->guest_page.oos_page);
2707e444 658
e502a2af 659 intel_vgpu_unregister_page_track(spt->vgpu, spt->guest_page.gfn);
2707e444 660
2707e444 661 list_del_init(&spt->post_shadow_list);
2707e444
ZW
662 free_spt(spt);
663}
664
d87f5ff3 665static void ppgtt_free_all_spt(struct intel_vgpu *vgpu)
2707e444 666{
44b46733 667 struct intel_vgpu_ppgtt_spt *spt;
b6c126a3
CD
668 struct radix_tree_iter iter;
669 void **slot;
2707e444 670
b6c126a3
CD
671 radix_tree_for_each_slot(slot, &vgpu->gtt.spt_tree, &iter, 0) {
672 spt = radix_tree_deref_slot(slot);
d87f5ff3 673 ppgtt_free_spt(spt);
b6c126a3 674 }
2707e444
ZW
675}
676
7d1e5cdf 677static int ppgtt_handle_guest_write_page_table_bytes(
44b46733 678 struct intel_vgpu_ppgtt_spt *spt,
2707e444
ZW
679 u64 pa, void *p_data, int bytes);
680
e502a2af
CD
681static int ppgtt_write_protection_handler(
682 struct intel_vgpu_page_track *page_track,
683 u64 gpa, void *data, int bytes)
2707e444 684{
e502a2af
CD
685 struct intel_vgpu_ppgtt_spt *spt = page_track->priv_data;
686
2707e444
ZW
687 int ret;
688
689 if (bytes != 4 && bytes != 8)
690 return -EINVAL;
691
e502a2af 692 ret = ppgtt_handle_guest_write_page_table_bytes(spt, gpa, data, bytes);
2707e444
ZW
693 if (ret)
694 return ret;
695 return ret;
696}
697
44b46733
CD
698/* Find a spt by guest gfn. */
699static struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_gfn(
700 struct intel_vgpu *vgpu, unsigned long gfn)
701{
702 struct intel_vgpu_page_track *track;
703
e502a2af
CD
704 track = intel_vgpu_find_page_track(vgpu, gfn);
705 if (track && track->handler == ppgtt_write_protection_handler)
706 return track->priv_data;
44b46733
CD
707
708 return NULL;
709}
710
711/* Find the spt by shadow page mfn. */
b6c126a3 712static inline struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_mfn(
44b46733
CD
713 struct intel_vgpu *vgpu, unsigned long mfn)
714{
b6c126a3 715 return radix_tree_lookup(&vgpu->gtt.spt_tree, mfn);
44b46733
CD
716}
717
ede9d0cf 718static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt);
2707e444 719
d87f5ff3 720static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt(
2707e444
ZW
721 struct intel_vgpu *vgpu, int type, unsigned long gfn)
722{
44b46733 723 struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
2707e444 724 struct intel_vgpu_ppgtt_spt *spt = NULL;
44b46733 725 dma_addr_t daddr;
e502a2af 726 int ret;
2707e444
ZW
727
728retry:
729 spt = alloc_spt(GFP_KERNEL | __GFP_ZERO);
730 if (!spt) {
ede9d0cf 731 if (reclaim_one_ppgtt_mm(vgpu->gvt))
2707e444
ZW
732 goto retry;
733
695fbc08 734 gvt_vgpu_err("fail to allocate ppgtt shadow page\n");
2707e444
ZW
735 return ERR_PTR(-ENOMEM);
736 }
737
738 spt->vgpu = vgpu;
2707e444
ZW
739 atomic_set(&spt->refcount, 1);
740 INIT_LIST_HEAD(&spt->post_shadow_list);
741
742 /*
44b46733 743 * Init shadow_page.
2707e444 744 */
44b46733
CD
745 spt->shadow_page.type = type;
746 daddr = dma_map_page(kdev, spt->shadow_page.page,
747 0, 4096, PCI_DMA_BIDIRECTIONAL);
748 if (dma_mapping_error(kdev, daddr)) {
749 gvt_vgpu_err("fail to map dma addr\n");
b6c126a3
CD
750 ret = -EINVAL;
751 goto err_free_spt;
2707e444 752 }
44b46733
CD
753 spt->shadow_page.vaddr = page_address(spt->shadow_page.page);
754 spt->shadow_page.mfn = daddr >> I915_GTT_PAGE_SHIFT;
2707e444 755
44b46733
CD
756 /*
757 * Init guest_page.
758 */
759 spt->guest_page.type = type;
760 spt->guest_page.gfn = gfn;
2707e444 761
e502a2af
CD
762 ret = intel_vgpu_register_page_track(vgpu, spt->guest_page.gfn,
763 ppgtt_write_protection_handler, spt);
b6c126a3
CD
764 if (ret)
765 goto err_unmap_dma;
2707e444 766
b6c126a3
CD
767 ret = radix_tree_insert(&vgpu->gtt.spt_tree, spt->shadow_page.mfn, spt);
768 if (ret)
769 goto err_unreg_page_track;
2707e444 770
44b46733
CD
771 trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn);
772 return spt;
b6c126a3
CD
773
774err_unreg_page_track:
775 intel_vgpu_unregister_page_track(vgpu, spt->guest_page.gfn);
776err_unmap_dma:
777 dma_unmap_page(kdev, daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
778err_free_spt:
779 free_spt(spt);
780 return ERR_PTR(ret);
2707e444
ZW
781}
782
783#define pt_entry_size_shift(spt) \
784 ((spt)->vgpu->gvt->device_info.gtt_entry_size_shift)
785
786#define pt_entries(spt) \
9556e118 787 (I915_GTT_PAGE_SIZE >> pt_entry_size_shift(spt))
2707e444
ZW
788
789#define for_each_present_guest_entry(spt, e, i) \
790 for (i = 0; i < pt_entries(spt); i++) \
4b2dbbc2
CD
791 if (!ppgtt_get_guest_entry(spt, e, i) && \
792 spt->vgpu->gvt->gtt.pte_ops->test_present(e))
2707e444
ZW
793
794#define for_each_present_shadow_entry(spt, e, i) \
795 for (i = 0; i < pt_entries(spt); i++) \
4b2dbbc2
CD
796 if (!ppgtt_get_shadow_entry(spt, e, i) && \
797 spt->vgpu->gvt->gtt.pte_ops->test_present(e))
2707e444 798
d87f5ff3 799static void ppgtt_get_spt(struct intel_vgpu_ppgtt_spt *spt)
2707e444
ZW
800{
801 int v = atomic_read(&spt->refcount);
802
803 trace_spt_refcount(spt->vgpu->id, "inc", spt, v, (v + 1));
804
805 atomic_inc(&spt->refcount);
806}
807
d87f5ff3 808static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt);
2707e444 809
d87f5ff3 810static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu,
2707e444
ZW
811 struct intel_gvt_gtt_entry *e)
812{
813 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
814 struct intel_vgpu_ppgtt_spt *s;
3b6411c2 815 intel_gvt_gtt_type_t cur_pt_type;
2707e444 816
72f03d7e 817 GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(e->type)));
2707e444 818
3b6411c2
PG
819 if (e->type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY
820 && e->type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
821 cur_pt_type = get_next_pt_type(e->type) + 1;
822 if (ops->get_pfn(e) ==
823 vgpu->gtt.scratch_pt[cur_pt_type].page_mfn)
824 return 0;
825 }
44b46733 826 s = intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(e));
2707e444 827 if (!s) {
695fbc08
TZ
828 gvt_vgpu_err("fail to find shadow page: mfn: 0x%lx\n",
829 ops->get_pfn(e));
2707e444
ZW
830 return -ENXIO;
831 }
d87f5ff3 832 return ppgtt_invalidate_spt(s);
2707e444
ZW
833}
834
cf4ee73f
CD
835static inline void ppgtt_invalidate_pte(struct intel_vgpu_ppgtt_spt *spt,
836 struct intel_gvt_gtt_entry *entry)
837{
838 struct intel_vgpu *vgpu = spt->vgpu;
839 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
840 unsigned long pfn;
841 int type;
842
843 pfn = ops->get_pfn(entry);
844 type = spt->shadow_page.type;
845
846 if (pfn == vgpu->gtt.scratch_pt[type].page_mfn)
847 return;
848
849 intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT);
850}
851
d87f5ff3 852static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt)
2707e444 853{
695fbc08 854 struct intel_vgpu *vgpu = spt->vgpu;
2707e444
ZW
855 struct intel_gvt_gtt_entry e;
856 unsigned long index;
857 int ret;
858 int v = atomic_read(&spt->refcount);
859
860 trace_spt_change(spt->vgpu->id, "die", spt,
44b46733 861 spt->guest_page.gfn, spt->shadow_page.type);
2707e444
ZW
862
863 trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1));
864
865 if (atomic_dec_return(&spt->refcount) > 0)
866 return 0;
867
2707e444 868 for_each_present_shadow_entry(spt, &e, index) {
72f03d7e
CD
869 switch (e.type) {
870 case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
871 gvt_vdbg_mm("invalidate 4K entry\n");
cf4ee73f
CD
872 ppgtt_invalidate_pte(spt, &e);
873 break;
72f03d7e
CD
874 case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
875 case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
876 WARN(1, "GVT doesn't support 2M/1GB page\n");
877 continue;
878 case GTT_TYPE_PPGTT_PML4_ENTRY:
879 case GTT_TYPE_PPGTT_PDP_ENTRY:
880 case GTT_TYPE_PPGTT_PDE_ENTRY:
881 gvt_vdbg_mm("invalidate PMUL4/PDP/PDE entry\n");
d87f5ff3 882 ret = ppgtt_invalidate_spt_by_shadow_entry(
72f03d7e
CD
883 spt->vgpu, &e);
884 if (ret)
885 goto fail;
886 break;
887 default:
888 GEM_BUG_ON(1);
2707e444 889 }
2707e444 890 }
cf4ee73f 891
2707e444 892 trace_spt_change(spt->vgpu->id, "release", spt,
44b46733 893 spt->guest_page.gfn, spt->shadow_page.type);
d87f5ff3 894 ppgtt_free_spt(spt);
2707e444
ZW
895 return 0;
896fail:
695fbc08
TZ
897 gvt_vgpu_err("fail: shadow page %p shadow entry 0x%llx type %d\n",
898 spt, e.val64, e.type);
2707e444
ZW
899 return ret;
900}
901
d87f5ff3 902static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt);
2707e444 903
d87f5ff3 904static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry(
2707e444
ZW
905 struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *we)
906{
907 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
44b46733 908 struct intel_vgpu_ppgtt_spt *spt = NULL;
2707e444
ZW
909 int ret;
910
72f03d7e 911 GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(we->type)));
2707e444 912
44b46733
CD
913 spt = intel_vgpu_find_spt_by_gfn(vgpu, ops->get_pfn(we));
914 if (spt)
d87f5ff3 915 ppgtt_get_spt(spt);
44b46733 916 else {
2707e444
ZW
917 int type = get_next_pt_type(we->type);
918
d87f5ff3 919 spt = ppgtt_alloc_spt(vgpu, type, ops->get_pfn(we));
44b46733
CD
920 if (IS_ERR(spt)) {
921 ret = PTR_ERR(spt);
2707e444
ZW
922 goto fail;
923 }
924
e502a2af 925 ret = intel_vgpu_enable_page_track(vgpu, spt->guest_page.gfn);
2707e444
ZW
926 if (ret)
927 goto fail;
928
d87f5ff3 929 ret = ppgtt_populate_spt(spt);
2707e444
ZW
930 if (ret)
931 goto fail;
932
44b46733
CD
933 trace_spt_change(vgpu->id, "new", spt, spt->guest_page.gfn,
934 spt->shadow_page.type);
2707e444 935 }
44b46733 936 return spt;
2707e444 937fail:
695fbc08 938 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
44b46733 939 spt, we->val64, we->type);
2707e444
ZW
940 return ERR_PTR(ret);
941}
942
943static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
944 struct intel_vgpu_ppgtt_spt *s, struct intel_gvt_gtt_entry *ge)
945{
946 struct intel_gvt_gtt_pte_ops *ops = s->vgpu->gvt->gtt.pte_ops;
947
948 se->type = ge->type;
949 se->val64 = ge->val64;
950
951 ops->set_pfn(se, s->shadow_page.mfn);
952}
953
72f03d7e
CD
954static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
955 struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
956 struct intel_gvt_gtt_entry *ge)
957{
958 struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
959 struct intel_gvt_gtt_entry se = *ge;
cf4ee73f
CD
960 unsigned long gfn;
961 dma_addr_t dma_addr;
962 int ret;
72f03d7e
CD
963
964 if (!pte_ops->test_present(ge))
965 return 0;
966
967 gfn = pte_ops->get_pfn(ge);
968
969 switch (ge->type) {
970 case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
971 gvt_vdbg_mm("shadow 4K gtt entry\n");
972 break;
973 case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
974 case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
975 gvt_vgpu_err("GVT doesn't support 2M/1GB entry\n");
976 return -EINVAL;
977 default:
978 GEM_BUG_ON(1);
979 };
980
981 /* direct shadow */
cf4ee73f
CD
982 ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, &dma_addr);
983 if (ret)
72f03d7e
CD
984 return -ENXIO;
985
cf4ee73f 986 pte_ops->set_pfn(&se, dma_addr >> PAGE_SHIFT);
72f03d7e
CD
987 ppgtt_set_shadow_entry(spt, &se, index);
988 return 0;
989}
990
d87f5ff3 991static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt)
2707e444
ZW
992{
993 struct intel_vgpu *vgpu = spt->vgpu;
cc753fbe
HY
994 struct intel_gvt *gvt = vgpu->gvt;
995 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
2707e444
ZW
996 struct intel_vgpu_ppgtt_spt *s;
997 struct intel_gvt_gtt_entry se, ge;
cc753fbe 998 unsigned long gfn, i;
2707e444
ZW
999 int ret;
1000
1001 trace_spt_change(spt->vgpu->id, "born", spt,
e502a2af 1002 spt->guest_page.gfn, spt->shadow_page.type);
2707e444 1003
72f03d7e
CD
1004 for_each_present_guest_entry(spt, &ge, i) {
1005 if (gtt_type_is_pt(get_next_pt_type(ge.type))) {
d87f5ff3 1006 s = ppgtt_populate_spt_by_guest_entry(vgpu, &ge);
72f03d7e
CD
1007 if (IS_ERR(s)) {
1008 ret = PTR_ERR(s);
1009 goto fail;
1010 }
1011 ppgtt_get_shadow_entry(spt, &se, i);
1012 ppgtt_generate_shadow_entry(&se, s, &ge);
1013 ppgtt_set_shadow_entry(spt, &se, i);
1014 } else {
cc753fbe 1015 gfn = ops->get_pfn(&ge);
72f03d7e 1016 if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) {
cc753fbe 1017 ops->set_pfn(&se, gvt->gtt.scratch_mfn);
72f03d7e
CD
1018 ppgtt_set_shadow_entry(spt, &se, i);
1019 continue;
1020 }
2707e444 1021
72f03d7e
CD
1022 ret = ppgtt_populate_shadow_entry(vgpu, spt, i, &ge);
1023 if (ret)
1024 goto fail;
2707e444 1025 }
2707e444
ZW
1026 }
1027 return 0;
1028fail:
695fbc08
TZ
1029 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
1030 spt, ge.val64, ge.type);
2707e444
ZW
1031 return ret;
1032}
1033
44b46733 1034static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_ppgtt_spt *spt,
6b3816d6 1035 struct intel_gvt_gtt_entry *se, unsigned long index)
2707e444 1036{
2707e444
ZW
1037 struct intel_vgpu *vgpu = spt->vgpu;
1038 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
2707e444
ZW
1039 int ret;
1040
44b46733
CD
1041 trace_spt_guest_change(spt->vgpu->id, "remove", spt,
1042 spt->shadow_page.type, se->val64, index);
9baf0920 1043
bc37ab56
CD
1044 gvt_vdbg_mm("destroy old shadow entry, type %d, index %lu, value %llx\n",
1045 se->type, index, se->val64);
1046
6b3816d6 1047 if (!ops->test_present(se))
2707e444
ZW
1048 return 0;
1049
44b46733
CD
1050 if (ops->get_pfn(se) ==
1051 vgpu->gtt.scratch_pt[spt->shadow_page.type].page_mfn)
2707e444
ZW
1052 return 0;
1053
6b3816d6 1054 if (gtt_type_is_pt(get_next_pt_type(se->type))) {
9baf0920 1055 struct intel_vgpu_ppgtt_spt *s =
44b46733 1056 intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(se));
9baf0920 1057 if (!s) {
695fbc08 1058 gvt_vgpu_err("fail to find guest page\n");
2707e444
ZW
1059 ret = -ENXIO;
1060 goto fail;
1061 }
d87f5ff3 1062 ret = ppgtt_invalidate_spt(s);
2707e444
ZW
1063 if (ret)
1064 goto fail;
cf4ee73f
CD
1065 } else
1066 ppgtt_invalidate_pte(spt, se);
1067
2707e444
ZW
1068 return 0;
1069fail:
695fbc08 1070 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
6b3816d6 1071 spt, se->val64, se->type);
2707e444
ZW
1072 return ret;
1073}
1074
44b46733 1075static int ppgtt_handle_guest_entry_add(struct intel_vgpu_ppgtt_spt *spt,
2707e444
ZW
1076 struct intel_gvt_gtt_entry *we, unsigned long index)
1077{
2707e444
ZW
1078 struct intel_vgpu *vgpu = spt->vgpu;
1079 struct intel_gvt_gtt_entry m;
1080 struct intel_vgpu_ppgtt_spt *s;
1081 int ret;
1082
44b46733
CD
1083 trace_spt_guest_change(spt->vgpu->id, "add", spt, spt->shadow_page.type,
1084 we->val64, index);
2707e444 1085
bc37ab56
CD
1086 gvt_vdbg_mm("add shadow entry: type %d, index %lu, value %llx\n",
1087 we->type, index, we->val64);
1088
2707e444 1089 if (gtt_type_is_pt(get_next_pt_type(we->type))) {
d87f5ff3 1090 s = ppgtt_populate_spt_by_guest_entry(vgpu, we);
2707e444
ZW
1091 if (IS_ERR(s)) {
1092 ret = PTR_ERR(s);
1093 goto fail;
1094 }
1095 ppgtt_get_shadow_entry(spt, &m, index);
1096 ppgtt_generate_shadow_entry(&m, s, we);
1097 ppgtt_set_shadow_entry(spt, &m, index);
1098 } else {
72f03d7e 1099 ret = ppgtt_populate_shadow_entry(vgpu, spt, index, we);
2707e444
ZW
1100 if (ret)
1101 goto fail;
2707e444
ZW
1102 }
1103 return 0;
1104fail:
695fbc08
TZ
1105 gvt_vgpu_err("fail: spt %p guest entry 0x%llx type %d\n",
1106 spt, we->val64, we->type);
2707e444
ZW
1107 return ret;
1108}
1109
1110static int sync_oos_page(struct intel_vgpu *vgpu,
1111 struct intel_vgpu_oos_page *oos_page)
1112{
1113 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1114 struct intel_gvt *gvt = vgpu->gvt;
1115 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
44b46733 1116 struct intel_vgpu_ppgtt_spt *spt = oos_page->spt;
72f03d7e 1117 struct intel_gvt_gtt_entry old, new;
2707e444
ZW
1118 int index;
1119 int ret;
1120
1121 trace_oos_change(vgpu->id, "sync", oos_page->id,
44b46733 1122 spt, spt->guest_page.type);
2707e444 1123
44b46733 1124 old.type = new.type = get_entry_type(spt->guest_page.type);
2707e444
ZW
1125 old.val64 = new.val64 = 0;
1126
9556e118
ZW
1127 for (index = 0; index < (I915_GTT_PAGE_SIZE >>
1128 info->gtt_entry_size_shift); index++) {
2707e444
ZW
1129 ops->get_entry(oos_page->mem, &old, index, false, 0, vgpu);
1130 ops->get_entry(NULL, &new, index, true,
44b46733 1131 spt->guest_page.gfn << PAGE_SHIFT, vgpu);
2707e444
ZW
1132
1133 if (old.val64 == new.val64
1134 && !test_and_clear_bit(index, spt->post_shadow_bitmap))
1135 continue;
1136
1137 trace_oos_sync(vgpu->id, oos_page->id,
44b46733 1138 spt, spt->guest_page.type,
2707e444
ZW
1139 new.val64, index);
1140
72f03d7e 1141 ret = ppgtt_populate_shadow_entry(vgpu, spt, index, &new);
2707e444
ZW
1142 if (ret)
1143 return ret;
1144
1145 ops->set_entry(oos_page->mem, &new, index, false, 0, vgpu);
2707e444
ZW
1146 }
1147
44b46733 1148 spt->guest_page.write_cnt = 0;
2707e444
ZW
1149 list_del_init(&spt->post_shadow_list);
1150 return 0;
1151}
1152
1153static int detach_oos_page(struct intel_vgpu *vgpu,
1154 struct intel_vgpu_oos_page *oos_page)
1155{
1156 struct intel_gvt *gvt = vgpu->gvt;
44b46733 1157 struct intel_vgpu_ppgtt_spt *spt = oos_page->spt;
2707e444
ZW
1158
1159 trace_oos_change(vgpu->id, "detach", oos_page->id,
44b46733 1160 spt, spt->guest_page.type);
2707e444 1161
44b46733
CD
1162 spt->guest_page.write_cnt = 0;
1163 spt->guest_page.oos_page = NULL;
1164 oos_page->spt = NULL;
2707e444
ZW
1165
1166 list_del_init(&oos_page->vm_list);
1167 list_move_tail(&oos_page->list, &gvt->gtt.oos_page_free_list_head);
1168
1169 return 0;
1170}
1171
44b46733
CD
1172static int attach_oos_page(struct intel_vgpu_oos_page *oos_page,
1173 struct intel_vgpu_ppgtt_spt *spt)
2707e444 1174{
44b46733 1175 struct intel_gvt *gvt = spt->vgpu->gvt;
2707e444
ZW
1176 int ret;
1177
44b46733
CD
1178 ret = intel_gvt_hypervisor_read_gpa(spt->vgpu,
1179 spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
9556e118 1180 oos_page->mem, I915_GTT_PAGE_SIZE);
2707e444
ZW
1181 if (ret)
1182 return ret;
1183
44b46733
CD
1184 oos_page->spt = spt;
1185 spt->guest_page.oos_page = oos_page;
2707e444
ZW
1186
1187 list_move_tail(&oos_page->list, &gvt->gtt.oos_page_use_list_head);
1188
44b46733
CD
1189 trace_oos_change(spt->vgpu->id, "attach", oos_page->id,
1190 spt, spt->guest_page.type);
2707e444
ZW
1191 return 0;
1192}
1193
44b46733 1194static int ppgtt_set_guest_page_sync(struct intel_vgpu_ppgtt_spt *spt)
2707e444 1195{
44b46733 1196 struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page;
2707e444
ZW
1197 int ret;
1198
e502a2af 1199 ret = intel_vgpu_enable_page_track(spt->vgpu, spt->guest_page.gfn);
2707e444
ZW
1200 if (ret)
1201 return ret;
1202
44b46733
CD
1203 trace_oos_change(spt->vgpu->id, "set page sync", oos_page->id,
1204 spt, spt->guest_page.type);
2707e444 1205
44b46733
CD
1206 list_del_init(&oos_page->vm_list);
1207 return sync_oos_page(spt->vgpu, oos_page);
2707e444
ZW
1208}
1209
44b46733 1210static int ppgtt_allocate_oos_page(struct intel_vgpu_ppgtt_spt *spt)
2707e444 1211{
44b46733 1212 struct intel_gvt *gvt = spt->vgpu->gvt;
2707e444 1213 struct intel_gvt_gtt *gtt = &gvt->gtt;
44b46733 1214 struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page;
2707e444
ZW
1215 int ret;
1216
1217 WARN(oos_page, "shadow PPGTT page has already has a oos page\n");
1218
1219 if (list_empty(&gtt->oos_page_free_list_head)) {
1220 oos_page = container_of(gtt->oos_page_use_list_head.next,
1221 struct intel_vgpu_oos_page, list);
44b46733 1222 ret = ppgtt_set_guest_page_sync(oos_page->spt);
2707e444
ZW
1223 if (ret)
1224 return ret;
44b46733 1225 ret = detach_oos_page(spt->vgpu, oos_page);
2707e444
ZW
1226 if (ret)
1227 return ret;
1228 } else
1229 oos_page = container_of(gtt->oos_page_free_list_head.next,
1230 struct intel_vgpu_oos_page, list);
44b46733 1231 return attach_oos_page(oos_page, spt);
2707e444
ZW
1232}
1233
44b46733 1234static int ppgtt_set_guest_page_oos(struct intel_vgpu_ppgtt_spt *spt)
2707e444 1235{
44b46733 1236 struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page;
2707e444
ZW
1237
1238 if (WARN(!oos_page, "shadow PPGTT page should have a oos page\n"))
1239 return -EINVAL;
1240
44b46733
CD
1241 trace_oos_change(spt->vgpu->id, "set page out of sync", oos_page->id,
1242 spt, spt->guest_page.type);
2707e444 1243
44b46733 1244 list_add_tail(&oos_page->vm_list, &spt->vgpu->gtt.oos_page_list_head);
e502a2af 1245 return intel_vgpu_disable_page_track(spt->vgpu, spt->guest_page.gfn);
2707e444
ZW
1246}
1247
1248/**
1249 * intel_vgpu_sync_oos_pages - sync all the out-of-synced shadow for vGPU
1250 * @vgpu: a vGPU
1251 *
1252 * This function is called before submitting a guest workload to host,
1253 * to sync all the out-of-synced shadow for vGPU
1254 *
1255 * Returns:
1256 * Zero on success, negative error code if failed.
1257 */
1258int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu)
1259{
1260 struct list_head *pos, *n;
1261 struct intel_vgpu_oos_page *oos_page;
1262 int ret;
1263
1264 if (!enable_out_of_sync)
1265 return 0;
1266
1267 list_for_each_safe(pos, n, &vgpu->gtt.oos_page_list_head) {
1268 oos_page = container_of(pos,
1269 struct intel_vgpu_oos_page, vm_list);
44b46733 1270 ret = ppgtt_set_guest_page_sync(oos_page->spt);
2707e444
ZW
1271 if (ret)
1272 return ret;
1273 }
1274 return 0;
1275}
1276
1277/*
1278 * The heart of PPGTT shadow page table.
1279 */
1280static int ppgtt_handle_guest_write_page_table(
44b46733 1281 struct intel_vgpu_ppgtt_spt *spt,
2707e444
ZW
1282 struct intel_gvt_gtt_entry *we, unsigned long index)
1283{
2707e444 1284 struct intel_vgpu *vgpu = spt->vgpu;
6b3816d6 1285 int type = spt->shadow_page.type;
2707e444 1286 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
72f03d7e 1287 struct intel_gvt_gtt_entry old_se;
9baf0920 1288 int new_present;
72f03d7e 1289 int ret;
2707e444 1290
2707e444
ZW
1291 new_present = ops->test_present(we);
1292
6b3816d6
TZ
1293 /*
1294 * Adding the new entry first and then removing the old one, that can
1295 * guarantee the ppgtt table is validated during the window between
1296 * adding and removal.
1297 */
72f03d7e 1298 ppgtt_get_shadow_entry(spt, &old_se, index);
2707e444 1299
2707e444 1300 if (new_present) {
44b46733 1301 ret = ppgtt_handle_guest_entry_add(spt, we, index);
2707e444
ZW
1302 if (ret)
1303 goto fail;
1304 }
6b3816d6 1305
44b46733 1306 ret = ppgtt_handle_guest_entry_removal(spt, &old_se, index);
6b3816d6
TZ
1307 if (ret)
1308 goto fail;
1309
1310 if (!new_present) {
72f03d7e
CD
1311 ops->set_pfn(&old_se, vgpu->gtt.scratch_pt[type].page_mfn);
1312 ppgtt_set_shadow_entry(spt, &old_se, index);
6b3816d6
TZ
1313 }
1314
2707e444
ZW
1315 return 0;
1316fail:
695fbc08
TZ
1317 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d.\n",
1318 spt, we->val64, we->type);
2707e444
ZW
1319 return ret;
1320}
1321
72f03d7e
CD
1322
1323
44b46733 1324static inline bool can_do_out_of_sync(struct intel_vgpu_ppgtt_spt *spt)
2707e444
ZW
1325{
1326 return enable_out_of_sync
44b46733
CD
1327 && gtt_type_is_pte_pt(spt->guest_page.type)
1328 && spt->guest_page.write_cnt >= 2;
2707e444
ZW
1329}
1330
1331static void ppgtt_set_post_shadow(struct intel_vgpu_ppgtt_spt *spt,
1332 unsigned long index)
1333{
1334 set_bit(index, spt->post_shadow_bitmap);
1335 if (!list_empty(&spt->post_shadow_list))
1336 return;
1337
1338 list_add_tail(&spt->post_shadow_list,
1339 &spt->vgpu->gtt.post_shadow_list_head);
1340}
1341
1342/**
1343 * intel_vgpu_flush_post_shadow - flush the post shadow transactions
1344 * @vgpu: a vGPU
1345 *
1346 * This function is called before submitting a guest workload to host,
1347 * to flush all the post shadows for a vGPU.
1348 *
1349 * Returns:
1350 * Zero on success, negative error code if failed.
1351 */
1352int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu)
1353{
1354 struct list_head *pos, *n;
1355 struct intel_vgpu_ppgtt_spt *spt;
9baf0920 1356 struct intel_gvt_gtt_entry ge;
2707e444
ZW
1357 unsigned long index;
1358 int ret;
1359
1360 list_for_each_safe(pos, n, &vgpu->gtt.post_shadow_list_head) {
1361 spt = container_of(pos, struct intel_vgpu_ppgtt_spt,
1362 post_shadow_list);
1363
1364 for_each_set_bit(index, spt->post_shadow_bitmap,
1365 GTT_ENTRY_NUM_IN_ONE_PAGE) {
1366 ppgtt_get_guest_entry(spt, &ge, index);
2707e444 1367
44b46733
CD
1368 ret = ppgtt_handle_guest_write_page_table(spt,
1369 &ge, index);
2707e444
ZW
1370 if (ret)
1371 return ret;
1372 clear_bit(index, spt->post_shadow_bitmap);
1373 }
1374 list_del_init(&spt->post_shadow_list);
1375 }
1376 return 0;
1377}
1378
7d1e5cdf 1379static int ppgtt_handle_guest_write_page_table_bytes(
44b46733 1380 struct intel_vgpu_ppgtt_spt *spt,
2707e444
ZW
1381 u64 pa, void *p_data, int bytes)
1382{
2707e444
ZW
1383 struct intel_vgpu *vgpu = spt->vgpu;
1384 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1385 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
6b3816d6 1386 struct intel_gvt_gtt_entry we, se;
2707e444
ZW
1387 unsigned long index;
1388 int ret;
1389
1390 index = (pa & (PAGE_SIZE - 1)) >> info->gtt_entry_size_shift;
1391
1392 ppgtt_get_guest_entry(spt, &we, index);
2707e444
ZW
1393
1394 ops->test_pse(&we);
1395
1396 if (bytes == info->gtt_entry_size) {
44b46733 1397 ret = ppgtt_handle_guest_write_page_table(spt, &we, index);
2707e444
ZW
1398 if (ret)
1399 return ret;
1400 } else {
2707e444 1401 if (!test_bit(index, spt->post_shadow_bitmap)) {
121d760d
ZW
1402 int type = spt->shadow_page.type;
1403
6b3816d6 1404 ppgtt_get_shadow_entry(spt, &se, index);
44b46733 1405 ret = ppgtt_handle_guest_entry_removal(spt, &se, index);
2707e444
ZW
1406 if (ret)
1407 return ret;
121d760d
ZW
1408 ops->set_pfn(&se, vgpu->gtt.scratch_pt[type].page_mfn);
1409 ppgtt_set_shadow_entry(spt, &se, index);
2707e444 1410 }
2707e444 1411 ppgtt_set_post_shadow(spt, index);
2707e444
ZW
1412 }
1413
1414 if (!enable_out_of_sync)
1415 return 0;
1416
44b46733 1417 spt->guest_page.write_cnt++;
2707e444 1418
44b46733
CD
1419 if (spt->guest_page.oos_page)
1420 ops->set_entry(spt->guest_page.oos_page->mem, &we, index,
2707e444
ZW
1421 false, 0, vgpu);
1422
44b46733
CD
1423 if (can_do_out_of_sync(spt)) {
1424 if (!spt->guest_page.oos_page)
1425 ppgtt_allocate_oos_page(spt);
2707e444 1426
44b46733 1427 ret = ppgtt_set_guest_page_oos(spt);
2707e444
ZW
1428 if (ret < 0)
1429 return ret;
1430 }
1431 return 0;
1432}
1433
ede9d0cf 1434static void invalidate_ppgtt_mm(struct intel_vgpu_mm *mm)
2707e444
ZW
1435{
1436 struct intel_vgpu *vgpu = mm->vgpu;
1437 struct intel_gvt *gvt = vgpu->gvt;
1438 struct intel_gvt_gtt *gtt = &gvt->gtt;
1439 struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
1440 struct intel_gvt_gtt_entry se;
ede9d0cf 1441 int index;
2707e444 1442
ede9d0cf 1443 if (!mm->ppgtt_mm.shadowed)
2707e444
ZW
1444 return;
1445
ede9d0cf
CD
1446 for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.shadow_pdps); index++) {
1447 ppgtt_get_shadow_root_entry(mm, &se, index);
1448
2707e444
ZW
1449 if (!ops->test_present(&se))
1450 continue;
ede9d0cf 1451
d87f5ff3 1452 ppgtt_invalidate_spt_by_shadow_entry(vgpu, &se);
2707e444 1453 se.val64 = 0;
ede9d0cf 1454 ppgtt_set_shadow_root_entry(mm, &se, index);
2707e444 1455
44b46733
CD
1456 trace_spt_guest_change(vgpu->id, "destroy root pointer",
1457 NULL, se.type, se.val64, index);
2707e444 1458 }
2707e444 1459
ede9d0cf 1460 mm->ppgtt_mm.shadowed = false;
2707e444
ZW
1461}
1462
ede9d0cf
CD
1463
1464static int shadow_ppgtt_mm(struct intel_vgpu_mm *mm)
2707e444
ZW
1465{
1466 struct intel_vgpu *vgpu = mm->vgpu;
1467 struct intel_gvt *gvt = vgpu->gvt;
1468 struct intel_gvt_gtt *gtt = &gvt->gtt;
1469 struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
1470 struct intel_vgpu_ppgtt_spt *spt;
1471 struct intel_gvt_gtt_entry ge, se;
ede9d0cf 1472 int index, ret;
2707e444 1473
ede9d0cf 1474 if (mm->ppgtt_mm.shadowed)
2707e444
ZW
1475 return 0;
1476
ede9d0cf
CD
1477 mm->ppgtt_mm.shadowed = true;
1478
1479 for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.guest_pdps); index++) {
1480 ppgtt_get_guest_root_entry(mm, &ge, index);
2707e444 1481
2707e444
ZW
1482 if (!ops->test_present(&ge))
1483 continue;
1484
44b46733
CD
1485 trace_spt_guest_change(vgpu->id, __func__, NULL,
1486 ge.type, ge.val64, index);
2707e444 1487
d87f5ff3 1488 spt = ppgtt_populate_spt_by_guest_entry(vgpu, &ge);
2707e444 1489 if (IS_ERR(spt)) {
695fbc08 1490 gvt_vgpu_err("fail to populate guest root pointer\n");
2707e444
ZW
1491 ret = PTR_ERR(spt);
1492 goto fail;
1493 }
1494 ppgtt_generate_shadow_entry(&se, spt, &ge);
ede9d0cf 1495 ppgtt_set_shadow_root_entry(mm, &se, index);
2707e444 1496
44b46733
CD
1497 trace_spt_guest_change(vgpu->id, "populate root pointer",
1498 NULL, se.type, se.val64, index);
2707e444 1499 }
ede9d0cf 1500
2707e444
ZW
1501 return 0;
1502fail:
ede9d0cf 1503 invalidate_ppgtt_mm(mm);
2707e444
ZW
1504 return ret;
1505}
1506
ede9d0cf
CD
1507static struct intel_vgpu_mm *vgpu_alloc_mm(struct intel_vgpu *vgpu)
1508{
1509 struct intel_vgpu_mm *mm;
1510
1511 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
1512 if (!mm)
1513 return NULL;
1514
1515 mm->vgpu = vgpu;
1516 kref_init(&mm->ref);
1517 atomic_set(&mm->pincount, 0);
1518
1519 return mm;
1520}
1521
1522static void vgpu_free_mm(struct intel_vgpu_mm *mm)
1523{
1524 kfree(mm);
1525}
1526
2707e444 1527/**
ede9d0cf 1528 * intel_vgpu_create_ppgtt_mm - create a ppgtt mm object for a vGPU
2707e444 1529 * @vgpu: a vGPU
ede9d0cf
CD
1530 * @root_entry_type: ppgtt root entry type
1531 * @pdps: guest pdps.
2707e444 1532 *
ede9d0cf 1533 * This function is used to create a ppgtt mm object for a vGPU.
2707e444
ZW
1534 *
1535 * Returns:
1536 * Zero on success, negative error code in pointer if failed.
1537 */
ede9d0cf
CD
1538struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
1539 intel_gvt_gtt_type_t root_entry_type, u64 pdps[])
2707e444
ZW
1540{
1541 struct intel_gvt *gvt = vgpu->gvt;
2707e444
ZW
1542 struct intel_vgpu_mm *mm;
1543 int ret;
1544
ede9d0cf
CD
1545 mm = vgpu_alloc_mm(vgpu);
1546 if (!mm)
1547 return ERR_PTR(-ENOMEM);
2707e444 1548
ede9d0cf 1549 mm->type = INTEL_GVT_MM_PPGTT;
2707e444 1550
ede9d0cf
CD
1551 GEM_BUG_ON(root_entry_type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY &&
1552 root_entry_type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY);
1553 mm->ppgtt_mm.root_entry_type = root_entry_type;
2707e444 1554
ede9d0cf
CD
1555 INIT_LIST_HEAD(&mm->ppgtt_mm.list);
1556 INIT_LIST_HEAD(&mm->ppgtt_mm.lru_list);
2707e444 1557
ede9d0cf
CD
1558 if (root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
1559 mm->ppgtt_mm.guest_pdps[0] = pdps[0];
1560 else
1561 memcpy(mm->ppgtt_mm.guest_pdps, pdps,
1562 sizeof(mm->ppgtt_mm.guest_pdps));
2707e444 1563
ede9d0cf 1564 ret = shadow_ppgtt_mm(mm);
2707e444 1565 if (ret) {
ede9d0cf
CD
1566 gvt_vgpu_err("failed to shadow ppgtt mm\n");
1567 vgpu_free_mm(mm);
1568 return ERR_PTR(ret);
2707e444
ZW
1569 }
1570
ede9d0cf
CD
1571 list_add_tail(&mm->ppgtt_mm.list, &vgpu->gtt.ppgtt_mm_list_head);
1572 list_add_tail(&mm->ppgtt_mm.lru_list, &gvt->gtt.ppgtt_mm_lru_list_head);
1573 return mm;
1574}
2707e444 1575
ede9d0cf
CD
1576static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
1577{
1578 struct intel_vgpu_mm *mm;
1579 unsigned long nr_entries;
2707e444 1580
ede9d0cf
CD
1581 mm = vgpu_alloc_mm(vgpu);
1582 if (!mm)
1583 return ERR_PTR(-ENOMEM);
1584
1585 mm->type = INTEL_GVT_MM_GGTT;
1586
1587 nr_entries = gvt_ggtt_gm_sz(vgpu->gvt) >> I915_GTT_PAGE_SHIFT;
1588 mm->ggtt_mm.virtual_ggtt = vzalloc(nr_entries *
1589 vgpu->gvt->device_info.gtt_entry_size);
1590 if (!mm->ggtt_mm.virtual_ggtt) {
1591 vgpu_free_mm(mm);
1592 return ERR_PTR(-ENOMEM);
2707e444 1593 }
ede9d0cf 1594
2707e444 1595 return mm;
ede9d0cf
CD
1596}
1597
1598/**
1bc25851 1599 * _intel_vgpu_mm_release - destroy a mm object
ede9d0cf
CD
1600 * @mm_ref: a kref object
1601 *
1602 * This function is used to destroy a mm object for vGPU
1603 *
1604 */
1bc25851 1605void _intel_vgpu_mm_release(struct kref *mm_ref)
ede9d0cf
CD
1606{
1607 struct intel_vgpu_mm *mm = container_of(mm_ref, typeof(*mm), ref);
1608
1609 if (GEM_WARN_ON(atomic_read(&mm->pincount)))
1610 gvt_err("vgpu mm pin count bug detected\n");
1611
1612 if (mm->type == INTEL_GVT_MM_PPGTT) {
1613 list_del(&mm->ppgtt_mm.list);
1614 list_del(&mm->ppgtt_mm.lru_list);
1615 invalidate_ppgtt_mm(mm);
1616 } else {
1617 vfree(mm->ggtt_mm.virtual_ggtt);
1618 }
1619
1620 vgpu_free_mm(mm);
2707e444
ZW
1621}
1622
1623/**
1624 * intel_vgpu_unpin_mm - decrease the pin count of a vGPU mm object
1625 * @mm: a vGPU mm object
1626 *
1627 * This function is called when user doesn't want to use a vGPU mm object
1628 */
1629void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm)
1630{
2707e444
ZW
1631 atomic_dec(&mm->pincount);
1632}
1633
1634/**
1635 * intel_vgpu_pin_mm - increase the pin count of a vGPU mm object
1636 * @vgpu: a vGPU
1637 *
1638 * This function is called when user wants to use a vGPU mm object. If this
1639 * mm object hasn't been shadowed yet, the shadow will be populated at this
1640 * time.
1641 *
1642 * Returns:
1643 * Zero on success, negative error code if failed.
1644 */
1645int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm)
1646{
1647 int ret;
1648
ede9d0cf 1649 atomic_inc(&mm->pincount);
2707e444 1650
ede9d0cf
CD
1651 if (mm->type == INTEL_GVT_MM_PPGTT) {
1652 ret = shadow_ppgtt_mm(mm);
2707e444
ZW
1653 if (ret)
1654 return ret;
ede9d0cf
CD
1655
1656 list_move_tail(&mm->ppgtt_mm.lru_list,
1657 &mm->vgpu->gvt->gtt.ppgtt_mm_lru_list_head);
1658
2707e444
ZW
1659 }
1660
2707e444
ZW
1661 return 0;
1662}
1663
ede9d0cf 1664static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt)
2707e444
ZW
1665{
1666 struct intel_vgpu_mm *mm;
1667 struct list_head *pos, *n;
1668
ede9d0cf
CD
1669 list_for_each_safe(pos, n, &gvt->gtt.ppgtt_mm_lru_list_head) {
1670 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.lru_list);
2707e444 1671
2707e444
ZW
1672 if (atomic_read(&mm->pincount))
1673 continue;
1674
ede9d0cf
CD
1675 list_del_init(&mm->ppgtt_mm.lru_list);
1676 invalidate_ppgtt_mm(mm);
2707e444
ZW
1677 return 1;
1678 }
1679 return 0;
1680}
1681
1682/*
1683 * GMA translation APIs.
1684 */
1685static inline int ppgtt_get_next_level_entry(struct intel_vgpu_mm *mm,
1686 struct intel_gvt_gtt_entry *e, unsigned long index, bool guest)
1687{
1688 struct intel_vgpu *vgpu = mm->vgpu;
1689 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1690 struct intel_vgpu_ppgtt_spt *s;
1691
44b46733 1692 s = intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(e));
2707e444
ZW
1693 if (!s)
1694 return -ENXIO;
1695
1696 if (!guest)
1697 ppgtt_get_shadow_entry(s, e, index);
1698 else
1699 ppgtt_get_guest_entry(s, e, index);
1700 return 0;
1701}
1702
1703/**
1704 * intel_vgpu_gma_to_gpa - translate a gma to GPA
1705 * @mm: mm object. could be a PPGTT or GGTT mm object
1706 * @gma: graphics memory address in this mm object
1707 *
1708 * This function is used to translate a graphics memory address in specific
1709 * graphics memory space to guest physical address.
1710 *
1711 * Returns:
1712 * Guest physical address on success, INTEL_GVT_INVALID_ADDR if failed.
1713 */
1714unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
1715{
1716 struct intel_vgpu *vgpu = mm->vgpu;
1717 struct intel_gvt *gvt = vgpu->gvt;
1718 struct intel_gvt_gtt_pte_ops *pte_ops = gvt->gtt.pte_ops;
1719 struct intel_gvt_gtt_gma_ops *gma_ops = gvt->gtt.gma_ops;
1720 unsigned long gpa = INTEL_GVT_INVALID_ADDR;
1721 unsigned long gma_index[4];
1722 struct intel_gvt_gtt_entry e;
ede9d0cf 1723 int i, levels = 0;
2707e444
ZW
1724 int ret;
1725
ede9d0cf
CD
1726 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT &&
1727 mm->type != INTEL_GVT_MM_PPGTT);
2707e444
ZW
1728
1729 if (mm->type == INTEL_GVT_MM_GGTT) {
1730 if (!vgpu_gmadr_is_valid(vgpu, gma))
1731 goto err;
1732
ede9d0cf
CD
1733 ggtt_get_guest_entry(mm, &e,
1734 gma_ops->gma_to_ggtt_pte_index(gma));
1735
9556e118
ZW
1736 gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT)
1737 + (gma & ~I915_GTT_PAGE_MASK);
2707e444
ZW
1738
1739 trace_gma_translate(vgpu->id, "ggtt", 0, 0, gma, gpa);
ede9d0cf
CD
1740 } else {
1741 switch (mm->ppgtt_mm.root_entry_type) {
1742 case GTT_TYPE_PPGTT_ROOT_L4_ENTRY:
1743 ppgtt_get_shadow_root_entry(mm, &e, 0);
1744
1745 gma_index[0] = gma_ops->gma_to_pml4_index(gma);
1746 gma_index[1] = gma_ops->gma_to_l4_pdp_index(gma);
1747 gma_index[2] = gma_ops->gma_to_pde_index(gma);
1748 gma_index[3] = gma_ops->gma_to_pte_index(gma);
1749 levels = 4;
1750 break;
1751 case GTT_TYPE_PPGTT_ROOT_L3_ENTRY:
1752 ppgtt_get_shadow_root_entry(mm, &e,
1753 gma_ops->gma_to_l3_pdp_index(gma));
1754
1755 gma_index[0] = gma_ops->gma_to_pde_index(gma);
1756 gma_index[1] = gma_ops->gma_to_pte_index(gma);
1757 levels = 2;
1758 break;
1759 default:
1760 GEM_BUG_ON(1);
1761 }
2707e444 1762
ede9d0cf
CD
1763 /* walk the shadow page table and get gpa from guest entry */
1764 for (i = 0; i < levels; i++) {
1765 ret = ppgtt_get_next_level_entry(mm, &e, gma_index[i],
1766 (i == levels - 1));
1767 if (ret)
1768 goto err;
4b2dbbc2 1769
ede9d0cf
CD
1770 if (!pte_ops->test_present(&e)) {
1771 gvt_dbg_core("GMA 0x%lx is not present\n", gma);
1772 goto err;
1773 }
4b2dbbc2 1774 }
2707e444 1775
ede9d0cf
CD
1776 gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT) +
1777 (gma & ~I915_GTT_PAGE_MASK);
1778 trace_gma_translate(vgpu->id, "ppgtt", 0,
1779 mm->ppgtt_mm.root_entry_type, gma, gpa);
1780 }
2707e444 1781
2707e444
ZW
1782 return gpa;
1783err:
695fbc08 1784 gvt_vgpu_err("invalid mm type: %d gma %lx\n", mm->type, gma);
2707e444
ZW
1785 return INTEL_GVT_INVALID_ADDR;
1786}
1787
a143cef7 1788static int emulate_ggtt_mmio_read(struct intel_vgpu *vgpu,
2707e444
ZW
1789 unsigned int off, void *p_data, unsigned int bytes)
1790{
1791 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
1792 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1793 unsigned long index = off >> info->gtt_entry_size_shift;
1794 struct intel_gvt_gtt_entry e;
1795
1796 if (bytes != 4 && bytes != 8)
1797 return -EINVAL;
1798
1799 ggtt_get_guest_entry(ggtt_mm, &e, index);
1800 memcpy(p_data, (void *)&e.val64 + (off & (info->gtt_entry_size - 1)),
1801 bytes);
1802 return 0;
1803}
1804
1805/**
1806 * intel_vgpu_emulate_gtt_mmio_read - emulate GTT MMIO register read
1807 * @vgpu: a vGPU
1808 * @off: register offset
1809 * @p_data: data will be returned to guest
1810 * @bytes: data length
1811 *
1812 * This function is used to emulate the GTT MMIO register read
1813 *
1814 * Returns:
1815 * Zero on success, error code if failed.
1816 */
a143cef7 1817int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
2707e444
ZW
1818 void *p_data, unsigned int bytes)
1819{
1820 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1821 int ret;
1822
1823 if (bytes != 4 && bytes != 8)
1824 return -EINVAL;
1825
1826 off -= info->gtt_start_offset;
a143cef7 1827 ret = emulate_ggtt_mmio_read(vgpu, off, p_data, bytes);
2707e444
ZW
1828 return ret;
1829}
1830
7598e870
CD
1831static void ggtt_invalidate_pte(struct intel_vgpu *vgpu,
1832 struct intel_gvt_gtt_entry *entry)
1833{
1834 struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
1835 unsigned long pfn;
1836
1837 pfn = pte_ops->get_pfn(entry);
1838 if (pfn != vgpu->gvt->gtt.scratch_mfn)
1839 intel_gvt_hypervisor_dma_unmap_guest_page(vgpu,
1840 pfn << PAGE_SHIFT);
1841}
1842
a143cef7 1843static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
2707e444
ZW
1844 void *p_data, unsigned int bytes)
1845{
1846 struct intel_gvt *gvt = vgpu->gvt;
1847 const struct intel_gvt_device_info *info = &gvt->device_info;
1848 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
1849 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
1850 unsigned long g_gtt_index = off >> info->gtt_entry_size_shift;
cf4ee73f 1851 unsigned long gma, gfn;
2707e444 1852 struct intel_gvt_gtt_entry e, m;
cf4ee73f
CD
1853 dma_addr_t dma_addr;
1854 int ret;
2707e444
ZW
1855
1856 if (bytes != 4 && bytes != 8)
1857 return -EINVAL;
1858
9556e118 1859 gma = g_gtt_index << I915_GTT_PAGE_SHIFT;
2707e444
ZW
1860
1861 /* the VM may configure the whole GM space when ballooning is used */
7c28135c 1862 if (!vgpu_gmadr_is_valid(vgpu, gma))
2707e444 1863 return 0;
2707e444
ZW
1864
1865 ggtt_get_guest_entry(ggtt_mm, &e, g_gtt_index);
1866
1867 memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
1868 bytes);
1869
1870 if (ops->test_present(&e)) {
cc753fbe 1871 gfn = ops->get_pfn(&e);
7598e870 1872 m = e;
cc753fbe
HY
1873
1874 /* one PTE update may be issued in multiple writes and the
1875 * first write may not construct a valid gfn
1876 */
1877 if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) {
1878 ops->set_pfn(&m, gvt->gtt.scratch_mfn);
1879 goto out;
1880 }
1881
cf4ee73f
CD
1882 ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn,
1883 &dma_addr);
1884 if (ret) {
72f03d7e 1885 gvt_vgpu_err("fail to populate guest ggtt entry\n");
359b6931
XC
1886 /* guest driver may read/write the entry when partial
1887 * update the entry in this situation p2m will fail
1888 * settting the shadow entry to point to a scratch page
1889 */
22115cef 1890 ops->set_pfn(&m, gvt->gtt.scratch_mfn);
72f03d7e 1891 } else
cf4ee73f 1892 ops->set_pfn(&m, dma_addr >> PAGE_SHIFT);
7598e870
CD
1893 } else {
1894 ggtt_get_host_entry(ggtt_mm, &m, g_gtt_index);
1895 ggtt_invalidate_pte(vgpu, &m);
22115cef 1896 ops->set_pfn(&m, gvt->gtt.scratch_mfn);
7598e870
CD
1897 ops->clear_present(&m);
1898 }
2707e444 1899
cc753fbe 1900out:
3aff3512 1901 ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index);
a143cef7 1902 ggtt_invalidate(gvt->dev_priv);
2707e444
ZW
1903 ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
1904 return 0;
1905}
1906
1907/*
a143cef7 1908 * intel_vgpu_emulate_ggtt_mmio_write - emulate GTT MMIO register write
2707e444
ZW
1909 * @vgpu: a vGPU
1910 * @off: register offset
1911 * @p_data: data from guest write
1912 * @bytes: data length
1913 *
1914 * This function is used to emulate the GTT MMIO register write
1915 *
1916 * Returns:
1917 * Zero on success, error code if failed.
1918 */
a143cef7
CD
1919int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
1920 unsigned int off, void *p_data, unsigned int bytes)
2707e444
ZW
1921{
1922 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1923 int ret;
1924
1925 if (bytes != 4 && bytes != 8)
1926 return -EINVAL;
1927
1928 off -= info->gtt_start_offset;
a143cef7 1929 ret = emulate_ggtt_mmio_write(vgpu, off, p_data, bytes);
2707e444
ZW
1930 return ret;
1931}
1932
3b6411c2
PG
1933static int alloc_scratch_pages(struct intel_vgpu *vgpu,
1934 intel_gvt_gtt_type_t type)
2707e444
ZW
1935{
1936 struct intel_vgpu_gtt *gtt = &vgpu->gtt;
3b6411c2 1937 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
5c35258d 1938 int page_entry_num = I915_GTT_PAGE_SIZE >>
3b6411c2 1939 vgpu->gvt->device_info.gtt_entry_size_shift;
9631739f 1940 void *scratch_pt;
3b6411c2 1941 int i;
5de6bd4c
CD
1942 struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
1943 dma_addr_t daddr;
2707e444 1944
3b6411c2
PG
1945 if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
1946 return -EINVAL;
1947
9631739f 1948 scratch_pt = (void *)get_zeroed_page(GFP_KERNEL);
3b6411c2 1949 if (!scratch_pt) {
695fbc08 1950 gvt_vgpu_err("fail to allocate scratch page\n");
2707e444
ZW
1951 return -ENOMEM;
1952 }
1953
5de6bd4c
CD
1954 daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0,
1955 4096, PCI_DMA_BIDIRECTIONAL);
1956 if (dma_mapping_error(dev, daddr)) {
695fbc08 1957 gvt_vgpu_err("fail to dmamap scratch_pt\n");
5de6bd4c
CD
1958 __free_page(virt_to_page(scratch_pt));
1959 return -ENOMEM;
3b6411c2 1960 }
5de6bd4c 1961 gtt->scratch_pt[type].page_mfn =
5c35258d 1962 (unsigned long)(daddr >> I915_GTT_PAGE_SHIFT);
9631739f 1963 gtt->scratch_pt[type].page = virt_to_page(scratch_pt);
3b6411c2 1964 gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
5de6bd4c 1965 vgpu->id, type, gtt->scratch_pt[type].page_mfn);
3b6411c2
PG
1966
1967 /* Build the tree by full filled the scratch pt with the entries which
1968 * point to the next level scratch pt or scratch page. The
1969 * scratch_pt[type] indicate the scratch pt/scratch page used by the
1970 * 'type' pt.
1971 * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by
9631739f 1972 * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self
3b6411c2
PG
1973 * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn.
1974 */
1975 if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) {
1976 struct intel_gvt_gtt_entry se;
1977
1978 memset(&se, 0, sizeof(struct intel_gvt_gtt_entry));
1979 se.type = get_entry_type(type - 1);
1980 ops->set_pfn(&se, gtt->scratch_pt[type - 1].page_mfn);
1981
1982 /* The entry parameters like present/writeable/cache type
1983 * set to the same as i915's scratch page tree.
1984 */
1985 se.val64 |= _PAGE_PRESENT | _PAGE_RW;
1986 if (type == GTT_TYPE_PPGTT_PDE_PT)
c095b97c 1987 se.val64 |= PPAT_CACHED;
3b6411c2
PG
1988
1989 for (i = 0; i < page_entry_num; i++)
9631739f 1990 ops->set_entry(scratch_pt, &se, i, false, 0, vgpu);
3b6411c2
PG
1991 }
1992
3b6411c2
PG
1993 return 0;
1994}
2707e444 1995
3b6411c2
PG
1996static int release_scratch_page_tree(struct intel_vgpu *vgpu)
1997{
1998 int i;
5de6bd4c
CD
1999 struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
2000 dma_addr_t daddr;
3b6411c2
PG
2001
2002 for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
2003 if (vgpu->gtt.scratch_pt[i].page != NULL) {
5de6bd4c 2004 daddr = (dma_addr_t)(vgpu->gtt.scratch_pt[i].page_mfn <<
5c35258d 2005 I915_GTT_PAGE_SHIFT);
5de6bd4c 2006 dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
3b6411c2
PG
2007 __free_page(vgpu->gtt.scratch_pt[i].page);
2008 vgpu->gtt.scratch_pt[i].page = NULL;
2009 vgpu->gtt.scratch_pt[i].page_mfn = 0;
2010 }
2707e444
ZW
2011 }
2012
2707e444
ZW
2013 return 0;
2014}
2015
3b6411c2 2016static int create_scratch_page_tree(struct intel_vgpu *vgpu)
2707e444 2017{
3b6411c2
PG
2018 int i, ret;
2019
2020 for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
2021 ret = alloc_scratch_pages(vgpu, i);
2022 if (ret)
2023 goto err;
2707e444 2024 }
3b6411c2
PG
2025
2026 return 0;
2027
2028err:
2029 release_scratch_page_tree(vgpu);
2030 return ret;
2707e444
ZW
2031}
2032
2033/**
2034 * intel_vgpu_init_gtt - initialize per-vGPU graphics memory virulization
2035 * @vgpu: a vGPU
2036 *
2037 * This function is used to initialize per-vGPU graphics memory virtualization
2038 * components.
2039 *
2040 * Returns:
2041 * Zero on success, error code if failed.
2042 */
2043int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
2044{
2045 struct intel_vgpu_gtt *gtt = &vgpu->gtt;
2707e444 2046
b6c126a3 2047 INIT_RADIX_TREE(&gtt->spt_tree, GFP_KERNEL);
2707e444 2048
ede9d0cf 2049 INIT_LIST_HEAD(&gtt->ppgtt_mm_list_head);
2707e444
ZW
2050 INIT_LIST_HEAD(&gtt->oos_page_list_head);
2051 INIT_LIST_HEAD(&gtt->post_shadow_list_head);
2052
ede9d0cf
CD
2053 gtt->ggtt_mm = intel_vgpu_create_ggtt_mm(vgpu);
2054 if (IS_ERR(gtt->ggtt_mm)) {
695fbc08 2055 gvt_vgpu_err("fail to create mm for ggtt.\n");
ede9d0cf 2056 return PTR_ERR(gtt->ggtt_mm);
2707e444
ZW
2057 }
2058
f4c43db3 2059 intel_vgpu_reset_ggtt(vgpu, false);
2707e444 2060
3b6411c2 2061 return create_scratch_page_tree(vgpu);
2707e444
ZW
2062}
2063
ede9d0cf 2064static void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu)
da9cc8de
PG
2065{
2066 struct list_head *pos, *n;
2067 struct intel_vgpu_mm *mm;
2068
ede9d0cf
CD
2069 list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) {
2070 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
1bc25851 2071 intel_vgpu_destroy_mm(mm);
da9cc8de 2072 }
ede9d0cf
CD
2073
2074 if (GEM_WARN_ON(!list_empty(&vgpu->gtt.ppgtt_mm_list_head)))
84f69ba0 2075 gvt_err("vgpu ppgtt mm is not fully destroyed\n");
ede9d0cf 2076
b6c126a3 2077 if (GEM_WARN_ON(!radix_tree_empty(&vgpu->gtt.spt_tree))) {
ede9d0cf 2078 gvt_err("Why we still has spt not freed?\n");
d87f5ff3 2079 ppgtt_free_all_spt(vgpu);
ede9d0cf
CD
2080 }
2081}
2082
2083static void intel_vgpu_destroy_ggtt_mm(struct intel_vgpu *vgpu)
2084{
1bc25851 2085 intel_vgpu_destroy_mm(vgpu->gtt.ggtt_mm);
ede9d0cf 2086 vgpu->gtt.ggtt_mm = NULL;
da9cc8de
PG
2087}
2088
2707e444
ZW
2089/**
2090 * intel_vgpu_clean_gtt - clean up per-vGPU graphics memory virulization
2091 * @vgpu: a vGPU
2092 *
2093 * This function is used to clean up per-vGPU graphics memory virtualization
2094 * components.
2095 *
2096 * Returns:
2097 * Zero on success, error code if failed.
2098 */
2099void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu)
2100{
ede9d0cf
CD
2101 intel_vgpu_destroy_all_ppgtt_mm(vgpu);
2102 intel_vgpu_destroy_ggtt_mm(vgpu);
3b6411c2 2103 release_scratch_page_tree(vgpu);
2707e444
ZW
2104}
2105
2106static void clean_spt_oos(struct intel_gvt *gvt)
2107{
2108 struct intel_gvt_gtt *gtt = &gvt->gtt;
2109 struct list_head *pos, *n;
2110 struct intel_vgpu_oos_page *oos_page;
2111
2112 WARN(!list_empty(&gtt->oos_page_use_list_head),
2113 "someone is still using oos page\n");
2114
2115 list_for_each_safe(pos, n, &gtt->oos_page_free_list_head) {
2116 oos_page = container_of(pos, struct intel_vgpu_oos_page, list);
2117 list_del(&oos_page->list);
2118 kfree(oos_page);
2119 }
2120}
2121
2122static int setup_spt_oos(struct intel_gvt *gvt)
2123{
2124 struct intel_gvt_gtt *gtt = &gvt->gtt;
2125 struct intel_vgpu_oos_page *oos_page;
2126 int i;
2127 int ret;
2128
2129 INIT_LIST_HEAD(&gtt->oos_page_free_list_head);
2130 INIT_LIST_HEAD(&gtt->oos_page_use_list_head);
2131
2132 for (i = 0; i < preallocated_oos_pages; i++) {
2133 oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL);
2134 if (!oos_page) {
2707e444
ZW
2135 ret = -ENOMEM;
2136 goto fail;
2137 }
2138
2139 INIT_LIST_HEAD(&oos_page->list);
2140 INIT_LIST_HEAD(&oos_page->vm_list);
2141 oos_page->id = i;
2142 list_add_tail(&oos_page->list, &gtt->oos_page_free_list_head);
2143 }
2144
2145 gvt_dbg_mm("%d oos pages preallocated\n", i);
2146
2147 return 0;
2148fail:
2149 clean_spt_oos(gvt);
2150 return ret;
2151}
2152
2153/**
2154 * intel_vgpu_find_ppgtt_mm - find a PPGTT mm object
2155 * @vgpu: a vGPU
2156 * @page_table_level: PPGTT page table level
2157 * @root_entry: PPGTT page table root pointers
2158 *
2159 * This function is used to find a PPGTT mm object from mm object pool
2160 *
2161 * Returns:
2162 * pointer to mm object on success, NULL if failed.
2163 */
2164struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
ede9d0cf 2165 u64 pdps[])
2707e444 2166{
2707e444 2167 struct intel_vgpu_mm *mm;
ede9d0cf 2168 struct list_head *pos;
2707e444 2169
ede9d0cf
CD
2170 list_for_each(pos, &vgpu->gtt.ppgtt_mm_list_head) {
2171 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
2707e444 2172
ede9d0cf
CD
2173 switch (mm->ppgtt_mm.root_entry_type) {
2174 case GTT_TYPE_PPGTT_ROOT_L4_ENTRY:
2175 if (pdps[0] == mm->ppgtt_mm.guest_pdps[0])
2707e444 2176 return mm;
ede9d0cf
CD
2177 break;
2178 case GTT_TYPE_PPGTT_ROOT_L3_ENTRY:
2179 if (!memcmp(pdps, mm->ppgtt_mm.guest_pdps,
2180 sizeof(mm->ppgtt_mm.guest_pdps)))
2707e444 2181 return mm;
ede9d0cf
CD
2182 break;
2183 default:
2184 GEM_BUG_ON(1);
2707e444
ZW
2185 }
2186 }
2187 return NULL;
2188}
2189
2190/**
e6e9c46f 2191 * intel_vgpu_get_ppgtt_mm - get or create a PPGTT mm object.
2707e444 2192 * @vgpu: a vGPU
ede9d0cf
CD
2193 * @root_entry_type: ppgtt root entry type
2194 * @pdps: guest pdps
2707e444 2195 *
e6e9c46f 2196 * This function is used to find or create a PPGTT mm object from a guest.
2707e444
ZW
2197 *
2198 * Returns:
2199 * Zero on success, negative error code if failed.
2200 */
e6e9c46f 2201struct intel_vgpu_mm *intel_vgpu_get_ppgtt_mm(struct intel_vgpu *vgpu,
ede9d0cf 2202 intel_gvt_gtt_type_t root_entry_type, u64 pdps[])
2707e444 2203{
2707e444
ZW
2204 struct intel_vgpu_mm *mm;
2205
ede9d0cf 2206 mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
2707e444 2207 if (mm) {
1bc25851 2208 intel_vgpu_mm_get(mm);
2707e444 2209 } else {
ede9d0cf 2210 mm = intel_vgpu_create_ppgtt_mm(vgpu, root_entry_type, pdps);
e6e9c46f 2211 if (IS_ERR(mm))
695fbc08 2212 gvt_vgpu_err("fail to create mm\n");
2707e444 2213 }
e6e9c46f 2214 return mm;
2707e444
ZW
2215}
2216
2217/**
e6e9c46f 2218 * intel_vgpu_put_ppgtt_mm - find and put a PPGTT mm object.
2707e444 2219 * @vgpu: a vGPU
ede9d0cf 2220 * @pdps: guest pdps
2707e444 2221 *
e6e9c46f 2222 * This function is used to find a PPGTT mm object from a guest and destroy it.
2707e444
ZW
2223 *
2224 * Returns:
2225 * Zero on success, negative error code if failed.
2226 */
e6e9c46f 2227int intel_vgpu_put_ppgtt_mm(struct intel_vgpu *vgpu, u64 pdps[])
2707e444 2228{
2707e444
ZW
2229 struct intel_vgpu_mm *mm;
2230
ede9d0cf 2231 mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
2707e444 2232 if (!mm) {
695fbc08 2233 gvt_vgpu_err("fail to find ppgtt instance.\n");
2707e444
ZW
2234 return -EINVAL;
2235 }
1bc25851 2236 intel_vgpu_mm_put(mm);
2707e444
ZW
2237 return 0;
2238}
2239
2240/**
2241 * intel_gvt_init_gtt - initialize mm components of a GVT device
2242 * @gvt: GVT device
2243 *
2244 * This function is called at the initialization stage, to initialize
2245 * the mm components of a GVT device.
2246 *
2247 * Returns:
2248 * zero on success, negative error code if failed.
2249 */
2250int intel_gvt_init_gtt(struct intel_gvt *gvt)
2251{
2252 int ret;
9631739f 2253 void *page;
5de6bd4c
CD
2254 struct device *dev = &gvt->dev_priv->drm.pdev->dev;
2255 dma_addr_t daddr;
2707e444
ZW
2256
2257 gvt_dbg_core("init gtt\n");
2258
e3476c00
XH
2259 if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
2260 || IS_KABYLAKE(gvt->dev_priv)) {
2707e444
ZW
2261 gvt->gtt.pte_ops = &gen8_gtt_pte_ops;
2262 gvt->gtt.gma_ops = &gen8_gtt_gma_ops;
2707e444
ZW
2263 } else {
2264 return -ENODEV;
2265 }
2266
9631739f
JS
2267 page = (void *)get_zeroed_page(GFP_KERNEL);
2268 if (!page) {
d650ac06
PG
2269 gvt_err("fail to allocate scratch ggtt page\n");
2270 return -ENOMEM;
2271 }
2272
5de6bd4c
CD
2273 daddr = dma_map_page(dev, virt_to_page(page), 0,
2274 4096, PCI_DMA_BIDIRECTIONAL);
2275 if (dma_mapping_error(dev, daddr)) {
2276 gvt_err("fail to dmamap scratch ggtt page\n");
2277 __free_page(virt_to_page(page));
2278 return -ENOMEM;
d650ac06 2279 }
22115cef
ZW
2280
2281 gvt->gtt.scratch_page = virt_to_page(page);
2282 gvt->gtt.scratch_mfn = (unsigned long)(daddr >> I915_GTT_PAGE_SHIFT);
d650ac06 2283
2707e444
ZW
2284 if (enable_out_of_sync) {
2285 ret = setup_spt_oos(gvt);
2286 if (ret) {
2287 gvt_err("fail to initialize SPT oos\n");
0de98709 2288 dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
22115cef 2289 __free_page(gvt->gtt.scratch_page);
2707e444
ZW
2290 return ret;
2291 }
2292 }
ede9d0cf 2293 INIT_LIST_HEAD(&gvt->gtt.ppgtt_mm_lru_list_head);
2707e444
ZW
2294 return 0;
2295}
2296
2297/**
2298 * intel_gvt_clean_gtt - clean up mm components of a GVT device
2299 * @gvt: GVT device
2300 *
2301 * This function is called at the driver unloading stage, to clean up the
2302 * the mm components of a GVT device.
2303 *
2304 */
2305void intel_gvt_clean_gtt(struct intel_gvt *gvt)
2306{
5de6bd4c 2307 struct device *dev = &gvt->dev_priv->drm.pdev->dev;
22115cef 2308 dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_mfn <<
9556e118 2309 I915_GTT_PAGE_SHIFT);
5de6bd4c
CD
2310
2311 dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
2312
22115cef 2313 __free_page(gvt->gtt.scratch_page);
d650ac06 2314
2707e444
ZW
2315 if (enable_out_of_sync)
2316 clean_spt_oos(gvt);
2317}
d650ac06 2318
730c8ead
ZW
2319/**
2320 * intel_vgpu_invalidate_ppgtt - invalidate PPGTT instances
2321 * @vgpu: a vGPU
2322 *
2323 * This function is called when invalidate all PPGTT instances of a vGPU.
2324 *
2325 */
2326void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu)
2327{
2328 struct list_head *pos, *n;
2329 struct intel_vgpu_mm *mm;
2330
2331 list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) {
2332 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
2333 if (mm->type == INTEL_GVT_MM_PPGTT) {
2334 list_del_init(&mm->ppgtt_mm.lru_list);
2335 if (mm->ppgtt_mm.shadowed)
2336 invalidate_ppgtt_mm(mm);
2337 }
2338 }
2339}
2340
d650ac06
PG
2341/**
2342 * intel_vgpu_reset_ggtt - reset the GGTT entry
2343 * @vgpu: a vGPU
f4c43db3 2344 * @invalidate_old: invalidate old entries
d650ac06
PG
2345 *
2346 * This function is called at the vGPU create stage
2347 * to reset all the GGTT entries.
2348 *
2349 */
f4c43db3 2350void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old)
d650ac06
PG
2351{
2352 struct intel_gvt *gvt = vgpu->gvt;
5ad59bf0 2353 struct drm_i915_private *dev_priv = gvt->dev_priv;
b0c766bf
CD
2354 struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
2355 struct intel_gvt_gtt_entry entry = {.type = GTT_TYPE_GGTT_PTE};
f4c43db3 2356 struct intel_gvt_gtt_entry old_entry;
d650ac06 2357 u32 index;
d650ac06 2358 u32 num_entries;
d650ac06 2359
b0c766bf
CD
2360 pte_ops->set_pfn(&entry, gvt->gtt.scratch_mfn);
2361 pte_ops->set_present(&entry);
d650ac06
PG
2362
2363 index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
2364 num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT;
f4c43db3
CD
2365 while (num_entries--) {
2366 if (invalidate_old) {
2367 ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index);
2368 ggtt_invalidate_pte(vgpu, &old_entry);
2369 }
b0c766bf 2370 ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
f4c43db3 2371 }
d650ac06
PG
2372
2373 index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT;
2374 num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
f4c43db3
CD
2375 while (num_entries--) {
2376 if (invalidate_old) {
2377 ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index);
2378 ggtt_invalidate_pte(vgpu, &old_entry);
2379 }
b0c766bf 2380 ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
f4c43db3 2381 }
5ad59bf0 2382
a143cef7 2383 ggtt_invalidate(dev_priv);
d650ac06 2384}
b611581b
CD
2385
2386/**
2387 * intel_vgpu_reset_gtt - reset the all GTT related status
2388 * @vgpu: a vGPU
b611581b
CD
2389 *
2390 * This function is called from vfio core to reset reset all
2391 * GTT related status, including GGTT, PPGTT, scratch page.
2392 *
2393 */
4d3e67bb 2394void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu)
b611581b 2395{
da9cc8de
PG
2396 /* Shadow pages are only created when there is no page
2397 * table tracking data, so remove page tracking data after
2398 * removing the shadow pages.
2399 */
ede9d0cf 2400 intel_vgpu_destroy_all_ppgtt_mm(vgpu);
f4c43db3 2401 intel_vgpu_reset_ggtt(vgpu, true);
b611581b 2402}
This page took 0.53389 seconds and 4 git commands to generate.