4 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38 #include "i915_pvinfo.h"
41 #include "gt/intel_gt_regs.h"
42 #include <linux/vmalloc.h>
44 #if defined(VERBOSE_DEBUG)
45 #define gvt_vdbg_mm(fmt, args...) gvt_dbg_mm(fmt, ##args)
47 #define gvt_vdbg_mm(fmt, args...)
50 static bool enable_out_of_sync = false;
51 static int preallocated_oos_pages = 8192;
54 * validate a gm address and related range size,
55 * translate it to host gm address
57 bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
60 return vgpu_gmadr_is_valid(vgpu, addr);
62 if (vgpu_gmadr_is_aperture(vgpu, addr) &&
63 vgpu_gmadr_is_aperture(vgpu, addr + size - 1))
65 else if (vgpu_gmadr_is_hidden(vgpu, addr) &&
66 vgpu_gmadr_is_hidden(vgpu, addr + size - 1))
69 gvt_dbg_mm("Invalid ggtt range at 0x%llx, size: 0x%x\n",
74 /* translate a guest gmadr to host gmadr */
75 int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr)
77 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
79 if (drm_WARN(&i915->drm, !vgpu_gmadr_is_valid(vgpu, g_addr),
80 "invalid guest gmadr %llx\n", g_addr))
83 if (vgpu_gmadr_is_aperture(vgpu, g_addr))
84 *h_addr = vgpu_aperture_gmadr_base(vgpu)
85 + (g_addr - vgpu_aperture_offset(vgpu));
87 *h_addr = vgpu_hidden_gmadr_base(vgpu)
88 + (g_addr - vgpu_hidden_offset(vgpu));
92 /* translate a host gmadr to guest gmadr */
93 int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr)
95 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
97 if (drm_WARN(&i915->drm, !gvt_gmadr_is_valid(vgpu->gvt, h_addr),
98 "invalid host gmadr %llx\n", h_addr))
101 if (gvt_gmadr_is_aperture(vgpu->gvt, h_addr))
102 *g_addr = vgpu_aperture_gmadr_base(vgpu)
103 + (h_addr - gvt_aperture_gmadr_base(vgpu->gvt));
105 *g_addr = vgpu_hidden_gmadr_base(vgpu)
106 + (h_addr - gvt_hidden_gmadr_base(vgpu->gvt));
110 int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
111 unsigned long *h_index)
116 ret = intel_gvt_ggtt_gmadr_g2h(vgpu, g_index << I915_GTT_PAGE_SHIFT,
121 *h_index = h_addr >> I915_GTT_PAGE_SHIFT;
125 int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
126 unsigned long *g_index)
131 ret = intel_gvt_ggtt_gmadr_h2g(vgpu, h_index << I915_GTT_PAGE_SHIFT,
136 *g_index = g_addr >> I915_GTT_PAGE_SHIFT;
140 #define gtt_type_is_entry(type) \
141 (type > GTT_TYPE_INVALID && type < GTT_TYPE_PPGTT_ENTRY \
142 && type != GTT_TYPE_PPGTT_PTE_ENTRY \
143 && type != GTT_TYPE_PPGTT_ROOT_ENTRY)
145 #define gtt_type_is_pt(type) \
146 (type >= GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX)
148 #define gtt_type_is_pte_pt(type) \
149 (type == GTT_TYPE_PPGTT_PTE_PT)
151 #define gtt_type_is_root_pointer(type) \
152 (gtt_type_is_entry(type) && type > GTT_TYPE_PPGTT_ROOT_ENTRY)
154 #define gtt_init_entry(e, t, p, v) do { \
157 memcpy(&(e)->val64, &v, sizeof(v)); \
161 * Mappings between GTT_TYPE* enumerations.
162 * Following information can be found according to the given type:
163 * - type of next level page table
164 * - type of entry inside this level page table
165 * - type of entry with PSE set
167 * If the given type doesn't have such a kind of information,
168 * e.g. give a l4 root entry type, then request to get its PSE type,
169 * give a PTE page table type, then request to get its next level page
170 * table type, as we know l4 root entry doesn't have a PSE bit,
171 * and a PTE page table doesn't have a next level page table type,
172 * GTT_TYPE_INVALID will be returned. This is useful when traversing a
176 struct gtt_type_table_entry {
183 #define GTT_TYPE_TABLE_ENTRY(type, e_type, cpt_type, npt_type, pse_type) \
185 .entry_type = e_type, \
186 .pt_type = cpt_type, \
187 .next_pt_type = npt_type, \
188 .pse_entry_type = pse_type, \
191 static const struct gtt_type_table_entry gtt_type_table[] = {
192 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
193 GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
195 GTT_TYPE_PPGTT_PML4_PT,
197 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_PT,
198 GTT_TYPE_PPGTT_PML4_ENTRY,
199 GTT_TYPE_PPGTT_PML4_PT,
200 GTT_TYPE_PPGTT_PDP_PT,
202 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_ENTRY,
203 GTT_TYPE_PPGTT_PML4_ENTRY,
204 GTT_TYPE_PPGTT_PML4_PT,
205 GTT_TYPE_PPGTT_PDP_PT,
207 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_PT,
208 GTT_TYPE_PPGTT_PDP_ENTRY,
209 GTT_TYPE_PPGTT_PDP_PT,
210 GTT_TYPE_PPGTT_PDE_PT,
211 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
212 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
213 GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
215 GTT_TYPE_PPGTT_PDE_PT,
216 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
217 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_ENTRY,
218 GTT_TYPE_PPGTT_PDP_ENTRY,
219 GTT_TYPE_PPGTT_PDP_PT,
220 GTT_TYPE_PPGTT_PDE_PT,
221 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
222 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_PT,
223 GTT_TYPE_PPGTT_PDE_ENTRY,
224 GTT_TYPE_PPGTT_PDE_PT,
225 GTT_TYPE_PPGTT_PTE_PT,
226 GTT_TYPE_PPGTT_PTE_2M_ENTRY),
227 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_ENTRY,
228 GTT_TYPE_PPGTT_PDE_ENTRY,
229 GTT_TYPE_PPGTT_PDE_PT,
230 GTT_TYPE_PPGTT_PTE_PT,
231 GTT_TYPE_PPGTT_PTE_2M_ENTRY),
232 /* We take IPS bit as 'PSE' for PTE level. */
233 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_PT,
234 GTT_TYPE_PPGTT_PTE_4K_ENTRY,
235 GTT_TYPE_PPGTT_PTE_PT,
237 GTT_TYPE_PPGTT_PTE_64K_ENTRY),
238 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_4K_ENTRY,
239 GTT_TYPE_PPGTT_PTE_4K_ENTRY,
240 GTT_TYPE_PPGTT_PTE_PT,
242 GTT_TYPE_PPGTT_PTE_64K_ENTRY),
243 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_64K_ENTRY,
244 GTT_TYPE_PPGTT_PTE_4K_ENTRY,
245 GTT_TYPE_PPGTT_PTE_PT,
247 GTT_TYPE_PPGTT_PTE_64K_ENTRY),
248 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_2M_ENTRY,
249 GTT_TYPE_PPGTT_PDE_ENTRY,
250 GTT_TYPE_PPGTT_PDE_PT,
252 GTT_TYPE_PPGTT_PTE_2M_ENTRY),
253 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_1G_ENTRY,
254 GTT_TYPE_PPGTT_PDP_ENTRY,
255 GTT_TYPE_PPGTT_PDP_PT,
257 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
258 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_GGTT_PTE,
265 static inline int get_next_pt_type(int type)
267 return gtt_type_table[type].next_pt_type;
270 static inline int get_entry_type(int type)
272 return gtt_type_table[type].entry_type;
275 static inline int get_pse_type(int type)
277 return gtt_type_table[type].pse_entry_type;
280 static u64 read_pte64(struct i915_ggtt *ggtt, unsigned long index)
282 void __iomem *addr = (gen8_pte_t __iomem *)ggtt->gsm + index;
287 static void ggtt_invalidate(struct intel_gt *gt)
289 mmio_hw_access_pre(gt);
290 intel_uncore_write(gt->uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
291 mmio_hw_access_post(gt);
294 static void write_pte64(struct i915_ggtt *ggtt, unsigned long index, u64 pte)
296 void __iomem *addr = (gen8_pte_t __iomem *)ggtt->gsm + index;
301 static inline int gtt_get_entry64(void *pt,
302 struct intel_gvt_gtt_entry *e,
303 unsigned long index, bool hypervisor_access, unsigned long gpa,
304 struct intel_vgpu *vgpu)
306 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
309 if (WARN_ON(info->gtt_entry_size != 8))
312 if (hypervisor_access) {
313 ret = intel_gvt_read_gpa(vgpu, gpa +
314 (index << info->gtt_entry_size_shift),
319 e->val64 = read_pte64(vgpu->gvt->gt->ggtt, index);
321 e->val64 = *((u64 *)pt + index);
326 static inline int gtt_set_entry64(void *pt,
327 struct intel_gvt_gtt_entry *e,
328 unsigned long index, bool hypervisor_access, unsigned long gpa,
329 struct intel_vgpu *vgpu)
331 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
334 if (WARN_ON(info->gtt_entry_size != 8))
337 if (hypervisor_access) {
338 ret = intel_gvt_write_gpa(vgpu, gpa +
339 (index << info->gtt_entry_size_shift),
344 write_pte64(vgpu->gvt->gt->ggtt, index, e->val64);
346 *((u64 *)pt + index) = e->val64;
353 #define ADDR_1G_MASK GENMASK_ULL(GTT_HAW - 1, 30)
354 #define ADDR_2M_MASK GENMASK_ULL(GTT_HAW - 1, 21)
355 #define ADDR_64K_MASK GENMASK_ULL(GTT_HAW - 1, 16)
356 #define ADDR_4K_MASK GENMASK_ULL(GTT_HAW - 1, 12)
358 #define GTT_SPTE_FLAG_MASK GENMASK_ULL(62, 52)
359 #define GTT_SPTE_FLAG_64K_SPLITED BIT(52) /* splited 64K gtt entry */
361 #define GTT_64K_PTE_STRIDE 16
363 static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
367 if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY)
368 pfn = (e->val64 & ADDR_1G_MASK) >> PAGE_SHIFT;
369 else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY)
370 pfn = (e->val64 & ADDR_2M_MASK) >> PAGE_SHIFT;
371 else if (e->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY)
372 pfn = (e->val64 & ADDR_64K_MASK) >> PAGE_SHIFT;
374 pfn = (e->val64 & ADDR_4K_MASK) >> PAGE_SHIFT;
378 static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry *e, unsigned long pfn)
380 if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) {
381 e->val64 &= ~ADDR_1G_MASK;
382 pfn &= (ADDR_1G_MASK >> PAGE_SHIFT);
383 } else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) {
384 e->val64 &= ~ADDR_2M_MASK;
385 pfn &= (ADDR_2M_MASK >> PAGE_SHIFT);
386 } else if (e->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY) {
387 e->val64 &= ~ADDR_64K_MASK;
388 pfn &= (ADDR_64K_MASK >> PAGE_SHIFT);
390 e->val64 &= ~ADDR_4K_MASK;
391 pfn &= (ADDR_4K_MASK >> PAGE_SHIFT);
394 e->val64 |= (pfn << PAGE_SHIFT);
397 static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e)
399 return !!(e->val64 & _PAGE_PSE);
402 static void gen8_gtt_clear_pse(struct intel_gvt_gtt_entry *e)
404 if (gen8_gtt_test_pse(e)) {
406 case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
407 e->val64 &= ~_PAGE_PSE;
408 e->type = GTT_TYPE_PPGTT_PDE_ENTRY;
410 case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
411 e->type = GTT_TYPE_PPGTT_PDP_ENTRY;
412 e->val64 &= ~_PAGE_PSE;
420 static bool gen8_gtt_test_ips(struct intel_gvt_gtt_entry *e)
422 if (GEM_WARN_ON(e->type != GTT_TYPE_PPGTT_PDE_ENTRY))
425 return !!(e->val64 & GEN8_PDE_IPS_64K);
428 static void gen8_gtt_clear_ips(struct intel_gvt_gtt_entry *e)
430 if (GEM_WARN_ON(e->type != GTT_TYPE_PPGTT_PDE_ENTRY))
433 e->val64 &= ~GEN8_PDE_IPS_64K;
436 static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e)
439 * i915 writes PDP root pointer registers without present bit,
440 * it also works, so we need to treat root pointer entry
443 if (e->type == GTT_TYPE_PPGTT_ROOT_L3_ENTRY
444 || e->type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
445 return (e->val64 != 0);
447 return (e->val64 & GEN8_PAGE_PRESENT);
450 static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e)
452 e->val64 &= ~GEN8_PAGE_PRESENT;
455 static void gtt_entry_set_present(struct intel_gvt_gtt_entry *e)
457 e->val64 |= GEN8_PAGE_PRESENT;
460 static bool gen8_gtt_test_64k_splited(struct intel_gvt_gtt_entry *e)
462 return !!(e->val64 & GTT_SPTE_FLAG_64K_SPLITED);
465 static void gen8_gtt_set_64k_splited(struct intel_gvt_gtt_entry *e)
467 e->val64 |= GTT_SPTE_FLAG_64K_SPLITED;
470 static void gen8_gtt_clear_64k_splited(struct intel_gvt_gtt_entry *e)
472 e->val64 &= ~GTT_SPTE_FLAG_64K_SPLITED;
476 * Per-platform GMA routines.
478 static unsigned long gma_to_ggtt_pte_index(unsigned long gma)
480 unsigned long x = (gma >> I915_GTT_PAGE_SHIFT);
482 trace_gma_index(__func__, gma, x);
486 #define DEFINE_PPGTT_GMA_TO_INDEX(prefix, ename, exp) \
487 static unsigned long prefix##_gma_to_##ename##_index(unsigned long gma) \
489 unsigned long x = (exp); \
490 trace_gma_index(__func__, gma, x); \
494 DEFINE_PPGTT_GMA_TO_INDEX(gen8, pte, (gma >> 12 & 0x1ff));
495 DEFINE_PPGTT_GMA_TO_INDEX(gen8, pde, (gma >> 21 & 0x1ff));
496 DEFINE_PPGTT_GMA_TO_INDEX(gen8, l3_pdp, (gma >> 30 & 0x3));
497 DEFINE_PPGTT_GMA_TO_INDEX(gen8, l4_pdp, (gma >> 30 & 0x1ff));
498 DEFINE_PPGTT_GMA_TO_INDEX(gen8, pml4, (gma >> 39 & 0x1ff));
500 static const struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = {
501 .get_entry = gtt_get_entry64,
502 .set_entry = gtt_set_entry64,
503 .clear_present = gtt_entry_clear_present,
504 .set_present = gtt_entry_set_present,
505 .test_present = gen8_gtt_test_present,
506 .test_pse = gen8_gtt_test_pse,
507 .clear_pse = gen8_gtt_clear_pse,
508 .clear_ips = gen8_gtt_clear_ips,
509 .test_ips = gen8_gtt_test_ips,
510 .clear_64k_splited = gen8_gtt_clear_64k_splited,
511 .set_64k_splited = gen8_gtt_set_64k_splited,
512 .test_64k_splited = gen8_gtt_test_64k_splited,
513 .get_pfn = gen8_gtt_get_pfn,
514 .set_pfn = gen8_gtt_set_pfn,
517 static const struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops = {
518 .gma_to_ggtt_pte_index = gma_to_ggtt_pte_index,
519 .gma_to_pte_index = gen8_gma_to_pte_index,
520 .gma_to_pde_index = gen8_gma_to_pde_index,
521 .gma_to_l3_pdp_index = gen8_gma_to_l3_pdp_index,
522 .gma_to_l4_pdp_index = gen8_gma_to_l4_pdp_index,
523 .gma_to_pml4_index = gen8_gma_to_pml4_index,
526 /* Update entry type per pse and ips bit. */
527 static void update_entry_type_for_real(const struct intel_gvt_gtt_pte_ops *pte_ops,
528 struct intel_gvt_gtt_entry *entry, bool ips)
530 switch (entry->type) {
531 case GTT_TYPE_PPGTT_PDE_ENTRY:
532 case GTT_TYPE_PPGTT_PDP_ENTRY:
533 if (pte_ops->test_pse(entry))
534 entry->type = get_pse_type(entry->type);
536 case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
538 entry->type = get_pse_type(entry->type);
541 GEM_BUG_ON(!gtt_type_is_entry(entry->type));
544 GEM_BUG_ON(entry->type == GTT_TYPE_INVALID);
550 static void _ppgtt_get_root_entry(struct intel_vgpu_mm *mm,
551 struct intel_gvt_gtt_entry *entry, unsigned long index,
554 const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
556 GEM_BUG_ON(mm->type != INTEL_GVT_MM_PPGTT);
558 entry->type = mm->ppgtt_mm.root_entry_type;
559 pte_ops->get_entry(guest ? mm->ppgtt_mm.guest_pdps :
560 mm->ppgtt_mm.shadow_pdps,
561 entry, index, false, 0, mm->vgpu);
562 update_entry_type_for_real(pte_ops, entry, false);
565 static inline void ppgtt_get_guest_root_entry(struct intel_vgpu_mm *mm,
566 struct intel_gvt_gtt_entry *entry, unsigned long index)
568 _ppgtt_get_root_entry(mm, entry, index, true);
571 static inline void ppgtt_get_shadow_root_entry(struct intel_vgpu_mm *mm,
572 struct intel_gvt_gtt_entry *entry, unsigned long index)
574 _ppgtt_get_root_entry(mm, entry, index, false);
577 static void _ppgtt_set_root_entry(struct intel_vgpu_mm *mm,
578 struct intel_gvt_gtt_entry *entry, unsigned long index,
581 const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
583 pte_ops->set_entry(guest ? mm->ppgtt_mm.guest_pdps :
584 mm->ppgtt_mm.shadow_pdps,
585 entry, index, false, 0, mm->vgpu);
588 static inline void ppgtt_set_shadow_root_entry(struct intel_vgpu_mm *mm,
589 struct intel_gvt_gtt_entry *entry, unsigned long index)
591 _ppgtt_set_root_entry(mm, entry, index, false);
594 static void ggtt_get_guest_entry(struct intel_vgpu_mm *mm,
595 struct intel_gvt_gtt_entry *entry, unsigned long index)
597 const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
599 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
601 entry->type = GTT_TYPE_GGTT_PTE;
602 pte_ops->get_entry(mm->ggtt_mm.virtual_ggtt, entry, index,
606 static void ggtt_set_guest_entry(struct intel_vgpu_mm *mm,
607 struct intel_gvt_gtt_entry *entry, unsigned long index)
609 const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
611 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
613 pte_ops->set_entry(mm->ggtt_mm.virtual_ggtt, entry, index,
617 static void ggtt_get_host_entry(struct intel_vgpu_mm *mm,
618 struct intel_gvt_gtt_entry *entry, unsigned long index)
620 const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
622 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
624 pte_ops->get_entry(NULL, entry, index, false, 0, mm->vgpu);
627 static void ggtt_set_host_entry(struct intel_vgpu_mm *mm,
628 struct intel_gvt_gtt_entry *entry, unsigned long index)
630 const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
631 unsigned long offset = index;
633 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
635 if (vgpu_gmadr_is_aperture(mm->vgpu, index << I915_GTT_PAGE_SHIFT)) {
636 offset -= (vgpu_aperture_gmadr_base(mm->vgpu) >> PAGE_SHIFT);
637 mm->ggtt_mm.host_ggtt_aperture[offset] = entry->val64;
638 } else if (vgpu_gmadr_is_hidden(mm->vgpu, index << I915_GTT_PAGE_SHIFT)) {
639 offset -= (vgpu_hidden_gmadr_base(mm->vgpu) >> PAGE_SHIFT);
640 mm->ggtt_mm.host_ggtt_hidden[offset] = entry->val64;
643 pte_ops->set_entry(NULL, entry, index, false, 0, mm->vgpu);
647 * PPGTT shadow page table helpers.
649 static inline int ppgtt_spt_get_entry(
650 struct intel_vgpu_ppgtt_spt *spt,
651 void *page_table, int type,
652 struct intel_gvt_gtt_entry *e, unsigned long index,
655 struct intel_gvt *gvt = spt->vgpu->gvt;
656 const struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
659 e->type = get_entry_type(type);
661 if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
664 ret = ops->get_entry(page_table, e, index, guest,
665 spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
670 update_entry_type_for_real(ops, e, guest ?
671 spt->guest_page.pde_ips : false);
673 gvt_vdbg_mm("read ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n",
674 type, e->type, index, e->val64);
678 static inline int ppgtt_spt_set_entry(
679 struct intel_vgpu_ppgtt_spt *spt,
680 void *page_table, int type,
681 struct intel_gvt_gtt_entry *e, unsigned long index,
684 struct intel_gvt *gvt = spt->vgpu->gvt;
685 const struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
687 if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
690 gvt_vdbg_mm("set ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n",
691 type, e->type, index, e->val64);
693 return ops->set_entry(page_table, e, index, guest,
694 spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
698 #define ppgtt_get_guest_entry(spt, e, index) \
699 ppgtt_spt_get_entry(spt, NULL, \
700 spt->guest_page.type, e, index, true)
702 #define ppgtt_set_guest_entry(spt, e, index) \
703 ppgtt_spt_set_entry(spt, NULL, \
704 spt->guest_page.type, e, index, true)
706 #define ppgtt_get_shadow_entry(spt, e, index) \
707 ppgtt_spt_get_entry(spt, spt->shadow_page.vaddr, \
708 spt->shadow_page.type, e, index, false)
710 #define ppgtt_set_shadow_entry(spt, e, index) \
711 ppgtt_spt_set_entry(spt, spt->shadow_page.vaddr, \
712 spt->shadow_page.type, e, index, false)
714 static void *alloc_spt(gfp_t gfp_mask)
716 struct intel_vgpu_ppgtt_spt *spt;
718 spt = kzalloc(sizeof(*spt), gfp_mask);
722 spt->shadow_page.page = alloc_page(gfp_mask);
723 if (!spt->shadow_page.page) {
730 static void free_spt(struct intel_vgpu_ppgtt_spt *spt)
732 __free_page(spt->shadow_page.page);
736 static int detach_oos_page(struct intel_vgpu *vgpu,
737 struct intel_vgpu_oos_page *oos_page);
739 static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt)
741 struct device *kdev = spt->vgpu->gvt->gt->i915->drm.dev;
743 trace_spt_free(spt->vgpu->id, spt, spt->guest_page.type);
745 dma_unmap_page(kdev, spt->shadow_page.mfn << I915_GTT_PAGE_SHIFT, 4096,
748 radix_tree_delete(&spt->vgpu->gtt.spt_tree, spt->shadow_page.mfn);
750 if (spt->guest_page.gfn) {
751 if (spt->guest_page.oos_page)
752 detach_oos_page(spt->vgpu, spt->guest_page.oos_page);
754 intel_vgpu_unregister_page_track(spt->vgpu, spt->guest_page.gfn);
757 list_del_init(&spt->post_shadow_list);
761 static void ppgtt_free_all_spt(struct intel_vgpu *vgpu)
763 struct intel_vgpu_ppgtt_spt *spt, *spn;
764 struct radix_tree_iter iter;
769 radix_tree_for_each_slot(slot, &vgpu->gtt.spt_tree, &iter, 0) {
770 spt = radix_tree_deref_slot(slot);
771 list_move(&spt->post_shadow_list, &all_spt);
775 list_for_each_entry_safe(spt, spn, &all_spt, post_shadow_list)
779 static int ppgtt_handle_guest_write_page_table_bytes(
780 struct intel_vgpu_ppgtt_spt *spt,
781 u64 pa, void *p_data, int bytes);
783 static int ppgtt_write_protection_handler(
784 struct intel_vgpu_page_track *page_track,
785 u64 gpa, void *data, int bytes)
787 struct intel_vgpu_ppgtt_spt *spt = page_track->priv_data;
791 if (bytes != 4 && bytes != 8)
794 ret = ppgtt_handle_guest_write_page_table_bytes(spt, gpa, data, bytes);
800 /* Find a spt by guest gfn. */
801 static struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_gfn(
802 struct intel_vgpu *vgpu, unsigned long gfn)
804 struct intel_vgpu_page_track *track;
806 track = intel_vgpu_find_page_track(vgpu, gfn);
807 if (track && track->handler == ppgtt_write_protection_handler)
808 return track->priv_data;
813 /* Find the spt by shadow page mfn. */
814 static inline struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_mfn(
815 struct intel_vgpu *vgpu, unsigned long mfn)
817 return radix_tree_lookup(&vgpu->gtt.spt_tree, mfn);
820 static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt);
822 /* Allocate shadow page table without guest page. */
823 static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt(
824 struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type)
826 struct device *kdev = vgpu->gvt->gt->i915->drm.dev;
827 struct intel_vgpu_ppgtt_spt *spt = NULL;
832 spt = alloc_spt(GFP_KERNEL | __GFP_ZERO);
834 if (reclaim_one_ppgtt_mm(vgpu->gvt))
837 gvt_vgpu_err("fail to allocate ppgtt shadow page\n");
838 return ERR_PTR(-ENOMEM);
842 atomic_set(&spt->refcount, 1);
843 INIT_LIST_HEAD(&spt->post_shadow_list);
848 spt->shadow_page.type = type;
849 daddr = dma_map_page(kdev, spt->shadow_page.page,
850 0, 4096, DMA_BIDIRECTIONAL);
851 if (dma_mapping_error(kdev, daddr)) {
852 gvt_vgpu_err("fail to map dma addr\n");
856 spt->shadow_page.vaddr = page_address(spt->shadow_page.page);
857 spt->shadow_page.mfn = daddr >> I915_GTT_PAGE_SHIFT;
859 ret = radix_tree_insert(&vgpu->gtt.spt_tree, spt->shadow_page.mfn, spt);
866 dma_unmap_page(kdev, daddr, PAGE_SIZE, DMA_BIDIRECTIONAL);
872 /* Allocate shadow page table associated with specific gfn. */
873 static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt_gfn(
874 struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type,
875 unsigned long gfn, bool guest_pde_ips)
877 struct intel_vgpu_ppgtt_spt *spt;
880 spt = ppgtt_alloc_spt(vgpu, type);
887 ret = intel_vgpu_register_page_track(vgpu, gfn,
888 ppgtt_write_protection_handler, spt);
894 spt->guest_page.type = type;
895 spt->guest_page.gfn = gfn;
896 spt->guest_page.pde_ips = guest_pde_ips;
898 trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn);
903 #define pt_entry_size_shift(spt) \
904 ((spt)->vgpu->gvt->device_info.gtt_entry_size_shift)
906 #define pt_entries(spt) \
907 (I915_GTT_PAGE_SIZE >> pt_entry_size_shift(spt))
909 #define for_each_present_guest_entry(spt, e, i) \
910 for (i = 0; i < pt_entries(spt); \
911 i += spt->guest_page.pde_ips ? GTT_64K_PTE_STRIDE : 1) \
912 if (!ppgtt_get_guest_entry(spt, e, i) && \
913 spt->vgpu->gvt->gtt.pte_ops->test_present(e))
915 #define for_each_present_shadow_entry(spt, e, i) \
916 for (i = 0; i < pt_entries(spt); \
917 i += spt->shadow_page.pde_ips ? GTT_64K_PTE_STRIDE : 1) \
918 if (!ppgtt_get_shadow_entry(spt, e, i) && \
919 spt->vgpu->gvt->gtt.pte_ops->test_present(e))
921 #define for_each_shadow_entry(spt, e, i) \
922 for (i = 0; i < pt_entries(spt); \
923 i += (spt->shadow_page.pde_ips ? GTT_64K_PTE_STRIDE : 1)) \
924 if (!ppgtt_get_shadow_entry(spt, e, i))
926 static inline void ppgtt_get_spt(struct intel_vgpu_ppgtt_spt *spt)
928 int v = atomic_read(&spt->refcount);
930 trace_spt_refcount(spt->vgpu->id, "inc", spt, v, (v + 1));
931 atomic_inc(&spt->refcount);
934 static inline int ppgtt_put_spt(struct intel_vgpu_ppgtt_spt *spt)
936 int v = atomic_read(&spt->refcount);
938 trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1));
939 return atomic_dec_return(&spt->refcount);
942 static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt);
944 static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu,
945 struct intel_gvt_gtt_entry *e)
947 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
948 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
949 struct intel_vgpu_ppgtt_spt *s;
950 enum intel_gvt_gtt_type cur_pt_type;
952 GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(e->type)));
954 if (e->type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY
955 && e->type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
956 cur_pt_type = get_next_pt_type(e->type);
958 if (!gtt_type_is_pt(cur_pt_type) ||
959 !gtt_type_is_pt(cur_pt_type + 1)) {
960 drm_WARN(&i915->drm, 1,
961 "Invalid page table type, cur_pt_type is: %d\n",
968 if (ops->get_pfn(e) ==
969 vgpu->gtt.scratch_pt[cur_pt_type].page_mfn)
972 s = intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(e));
974 gvt_vgpu_err("fail to find shadow page: mfn: 0x%lx\n",
978 return ppgtt_invalidate_spt(s);
981 static inline void ppgtt_invalidate_pte(struct intel_vgpu_ppgtt_spt *spt,
982 struct intel_gvt_gtt_entry *entry)
984 struct intel_vgpu *vgpu = spt->vgpu;
985 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
989 pfn = ops->get_pfn(entry);
990 type = spt->shadow_page.type;
992 /* Uninitialized spte or unshadowed spte. */
993 if (!pfn || pfn == vgpu->gtt.scratch_pt[type].page_mfn)
996 intel_gvt_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT);
999 static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt)
1001 struct intel_vgpu *vgpu = spt->vgpu;
1002 struct intel_gvt_gtt_entry e;
1003 unsigned long index;
1006 trace_spt_change(spt->vgpu->id, "die", spt,
1007 spt->guest_page.gfn, spt->shadow_page.type);
1009 if (ppgtt_put_spt(spt) > 0)
1012 for_each_present_shadow_entry(spt, &e, index) {
1014 case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
1015 gvt_vdbg_mm("invalidate 4K entry\n");
1016 ppgtt_invalidate_pte(spt, &e);
1018 case GTT_TYPE_PPGTT_PTE_64K_ENTRY:
1019 /* We don't setup 64K shadow entry so far. */
1020 WARN(1, "suspicious 64K gtt entry\n");
1022 case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
1023 gvt_vdbg_mm("invalidate 2M entry\n");
1025 case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
1026 WARN(1, "GVT doesn't support 1GB page\n");
1028 case GTT_TYPE_PPGTT_PML4_ENTRY:
1029 case GTT_TYPE_PPGTT_PDP_ENTRY:
1030 case GTT_TYPE_PPGTT_PDE_ENTRY:
1031 gvt_vdbg_mm("invalidate PMUL4/PDP/PDE entry\n");
1032 ret = ppgtt_invalidate_spt_by_shadow_entry(
1042 trace_spt_change(spt->vgpu->id, "release", spt,
1043 spt->guest_page.gfn, spt->shadow_page.type);
1044 ppgtt_free_spt(spt);
1047 gvt_vgpu_err("fail: shadow page %p shadow entry 0x%llx type %d\n",
1048 spt, e.val64, e.type);
1052 static bool vgpu_ips_enabled(struct intel_vgpu *vgpu)
1054 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
1056 if (GRAPHICS_VER(dev_priv) == 9) {
1057 u32 ips = vgpu_vreg_t(vgpu, GEN8_GAMW_ECO_DEV_RW_IA) &
1058 GAMW_ECO_ENABLE_64K_IPS_FIELD;
1060 return ips == GAMW_ECO_ENABLE_64K_IPS_FIELD;
1061 } else if (GRAPHICS_VER(dev_priv) >= 11) {
1062 /* 64K paging only controlled by IPS bit in PTE now. */
1068 static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt);
1070 static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry(
1071 struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *we)
1073 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1074 struct intel_vgpu_ppgtt_spt *spt = NULL;
1078 GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(we->type)));
1080 if (we->type == GTT_TYPE_PPGTT_PDE_ENTRY)
1081 ips = vgpu_ips_enabled(vgpu) && ops->test_ips(we);
1083 spt = intel_vgpu_find_spt_by_gfn(vgpu, ops->get_pfn(we));
1087 if (ips != spt->guest_page.pde_ips) {
1088 spt->guest_page.pde_ips = ips;
1090 gvt_dbg_mm("reshadow PDE since ips changed\n");
1091 clear_page(spt->shadow_page.vaddr);
1092 ret = ppgtt_populate_spt(spt);
1099 int type = get_next_pt_type(we->type);
1101 if (!gtt_type_is_pt(type)) {
1106 spt = ppgtt_alloc_spt_gfn(vgpu, type, ops->get_pfn(we), ips);
1112 ret = intel_vgpu_enable_page_track(vgpu, spt->guest_page.gfn);
1116 ret = ppgtt_populate_spt(spt);
1120 trace_spt_change(vgpu->id, "new", spt, spt->guest_page.gfn,
1121 spt->shadow_page.type);
1126 ppgtt_free_spt(spt);
1129 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
1130 spt, we->val64, we->type);
1131 return ERR_PTR(ret);
1134 static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
1135 struct intel_vgpu_ppgtt_spt *s, struct intel_gvt_gtt_entry *ge)
1137 const struct intel_gvt_gtt_pte_ops *ops = s->vgpu->gvt->gtt.pte_ops;
1139 se->type = ge->type;
1140 se->val64 = ge->val64;
1142 /* Because we always split 64KB pages, so clear IPS in shadow PDE. */
1143 if (se->type == GTT_TYPE_PPGTT_PDE_ENTRY)
1146 ops->set_pfn(se, s->shadow_page.mfn);
1149 static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
1150 struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
1151 struct intel_gvt_gtt_entry *se)
1153 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1154 struct intel_vgpu_ppgtt_spt *sub_spt;
1155 struct intel_gvt_gtt_entry sub_se;
1156 unsigned long start_gfn;
1157 dma_addr_t dma_addr;
1158 unsigned long sub_index;
1161 gvt_dbg_mm("Split 2M gtt entry, index %lu\n", index);
1163 start_gfn = ops->get_pfn(se);
1165 sub_spt = ppgtt_alloc_spt(vgpu, GTT_TYPE_PPGTT_PTE_PT);
1166 if (IS_ERR(sub_spt))
1167 return PTR_ERR(sub_spt);
1169 for_each_shadow_entry(sub_spt, &sub_se, sub_index) {
1170 ret = intel_gvt_dma_map_guest_page(vgpu, start_gfn + sub_index,
1171 PAGE_SIZE, &dma_addr);
1174 sub_se.val64 = se->val64;
1176 /* Copy the PAT field from PDE. */
1177 sub_se.val64 &= ~_PAGE_PAT;
1178 sub_se.val64 |= (se->val64 & _PAGE_PAT_LARGE) >> 5;
1180 ops->set_pfn(&sub_se, dma_addr >> PAGE_SHIFT);
1181 ppgtt_set_shadow_entry(sub_spt, &sub_se, sub_index);
1184 /* Clear dirty field. */
1185 se->val64 &= ~_PAGE_DIRTY;
1189 ops->set_pfn(se, sub_spt->shadow_page.mfn);
1190 ppgtt_set_shadow_entry(spt, se, index);
1193 /* Cancel the existing address mappings of DMA addr. */
1194 for_each_present_shadow_entry(sub_spt, &sub_se, sub_index) {
1195 gvt_vdbg_mm("invalidate 4K entry\n");
1196 ppgtt_invalidate_pte(sub_spt, &sub_se);
1198 /* Release the new allocated spt. */
1199 trace_spt_change(sub_spt->vgpu->id, "release", sub_spt,
1200 sub_spt->guest_page.gfn, sub_spt->shadow_page.type);
1201 ppgtt_free_spt(sub_spt);
1205 static int split_64KB_gtt_entry(struct intel_vgpu *vgpu,
1206 struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
1207 struct intel_gvt_gtt_entry *se)
1209 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1210 struct intel_gvt_gtt_entry entry = *se;
1211 unsigned long start_gfn;
1212 dma_addr_t dma_addr;
1215 gvt_vdbg_mm("Split 64K gtt entry, index %lu\n", index);
1217 GEM_BUG_ON(index % GTT_64K_PTE_STRIDE);
1219 start_gfn = ops->get_pfn(se);
1221 entry.type = GTT_TYPE_PPGTT_PTE_4K_ENTRY;
1222 ops->set_64k_splited(&entry);
1224 for (i = 0; i < GTT_64K_PTE_STRIDE; i++) {
1225 ret = intel_gvt_dma_map_guest_page(vgpu, start_gfn + i,
1226 PAGE_SIZE, &dma_addr);
1230 ops->set_pfn(&entry, dma_addr >> PAGE_SHIFT);
1231 ppgtt_set_shadow_entry(spt, &entry, index + i);
1236 static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
1237 struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
1238 struct intel_gvt_gtt_entry *ge)
1240 const struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
1241 struct intel_gvt_gtt_entry se = *ge;
1243 dma_addr_t dma_addr;
1246 if (!pte_ops->test_present(ge))
1249 gfn = pte_ops->get_pfn(ge);
1252 case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
1253 gvt_vdbg_mm("shadow 4K gtt entry\n");
1254 ret = intel_gvt_dma_map_guest_page(vgpu, gfn, PAGE_SIZE, &dma_addr);
1258 case GTT_TYPE_PPGTT_PTE_64K_ENTRY:
1259 gvt_vdbg_mm("shadow 64K gtt entry\n");
1261 * The layout of 64K page is special, the page size is
1262 * controlled by uper PDE. To be simple, we always split
1263 * 64K page to smaller 4K pages in shadow PT.
1265 return split_64KB_gtt_entry(vgpu, spt, index, &se);
1266 case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
1267 gvt_vdbg_mm("shadow 2M gtt entry\n");
1268 if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M) ||
1269 intel_gvt_dma_map_guest_page(vgpu, gfn,
1270 I915_GTT_PAGE_SIZE_2M, &dma_addr))
1271 return split_2MB_gtt_entry(vgpu, spt, index, &se);
1273 case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
1274 gvt_vgpu_err("GVT doesn't support 1GB entry\n");
1281 /* Successfully shadowed a 4K or 2M page (without splitting). */
1282 pte_ops->set_pfn(&se, dma_addr >> PAGE_SHIFT);
1283 ppgtt_set_shadow_entry(spt, &se, index);
1287 static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt)
1289 struct intel_vgpu *vgpu = spt->vgpu;
1290 struct intel_vgpu_ppgtt_spt *s;
1291 struct intel_gvt_gtt_entry se, ge;
1295 trace_spt_change(spt->vgpu->id, "born", spt,
1296 spt->guest_page.gfn, spt->shadow_page.type);
1298 for_each_present_guest_entry(spt, &ge, i) {
1299 if (gtt_type_is_pt(get_next_pt_type(ge.type))) {
1300 s = ppgtt_populate_spt_by_guest_entry(vgpu, &ge);
1305 ppgtt_get_shadow_entry(spt, &se, i);
1306 ppgtt_generate_shadow_entry(&se, s, &ge);
1307 ppgtt_set_shadow_entry(spt, &se, i);
1309 ret = ppgtt_populate_shadow_entry(vgpu, spt, i, &ge);
1316 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
1317 spt, ge.val64, ge.type);
1321 static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_ppgtt_spt *spt,
1322 struct intel_gvt_gtt_entry *se, unsigned long index)
1324 struct intel_vgpu *vgpu = spt->vgpu;
1325 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1328 trace_spt_guest_change(spt->vgpu->id, "remove", spt,
1329 spt->shadow_page.type, se->val64, index);
1331 gvt_vdbg_mm("destroy old shadow entry, type %d, index %lu, value %llx\n",
1332 se->type, index, se->val64);
1334 if (!ops->test_present(se))
1337 if (ops->get_pfn(se) ==
1338 vgpu->gtt.scratch_pt[spt->shadow_page.type].page_mfn)
1341 if (gtt_type_is_pt(get_next_pt_type(se->type))) {
1342 struct intel_vgpu_ppgtt_spt *s =
1343 intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(se));
1345 gvt_vgpu_err("fail to find guest page\n");
1349 ret = ppgtt_invalidate_spt(s);
1353 /* We don't setup 64K shadow entry so far. */
1354 WARN(se->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY,
1355 "suspicious 64K entry\n");
1356 ppgtt_invalidate_pte(spt, se);
1361 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
1362 spt, se->val64, se->type);
1366 static int ppgtt_handle_guest_entry_add(struct intel_vgpu_ppgtt_spt *spt,
1367 struct intel_gvt_gtt_entry *we, unsigned long index)
1369 struct intel_vgpu *vgpu = spt->vgpu;
1370 struct intel_gvt_gtt_entry m;
1371 struct intel_vgpu_ppgtt_spt *s;
1374 trace_spt_guest_change(spt->vgpu->id, "add", spt, spt->shadow_page.type,
1377 gvt_vdbg_mm("add shadow entry: type %d, index %lu, value %llx\n",
1378 we->type, index, we->val64);
1380 if (gtt_type_is_pt(get_next_pt_type(we->type))) {
1381 s = ppgtt_populate_spt_by_guest_entry(vgpu, we);
1386 ppgtt_get_shadow_entry(spt, &m, index);
1387 ppgtt_generate_shadow_entry(&m, s, we);
1388 ppgtt_set_shadow_entry(spt, &m, index);
1390 ret = ppgtt_populate_shadow_entry(vgpu, spt, index, we);
1396 gvt_vgpu_err("fail: spt %p guest entry 0x%llx type %d\n",
1397 spt, we->val64, we->type);
1401 static int sync_oos_page(struct intel_vgpu *vgpu,
1402 struct intel_vgpu_oos_page *oos_page)
1404 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1405 struct intel_gvt *gvt = vgpu->gvt;
1406 const struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
1407 struct intel_vgpu_ppgtt_spt *spt = oos_page->spt;
1408 struct intel_gvt_gtt_entry old, new;
1412 trace_oos_change(vgpu->id, "sync", oos_page->id,
1413 spt, spt->guest_page.type);
1415 old.type = new.type = get_entry_type(spt->guest_page.type);
1416 old.val64 = new.val64 = 0;
1418 for (index = 0; index < (I915_GTT_PAGE_SIZE >>
1419 info->gtt_entry_size_shift); index++) {
1420 ops->get_entry(oos_page->mem, &old, index, false, 0, vgpu);
1421 ops->get_entry(NULL, &new, index, true,
1422 spt->guest_page.gfn << PAGE_SHIFT, vgpu);
1424 if (old.val64 == new.val64
1425 && !test_and_clear_bit(index, spt->post_shadow_bitmap))
1428 trace_oos_sync(vgpu->id, oos_page->id,
1429 spt, spt->guest_page.type,
1432 ret = ppgtt_populate_shadow_entry(vgpu, spt, index, &new);
1436 ops->set_entry(oos_page->mem, &new, index, false, 0, vgpu);
1439 spt->guest_page.write_cnt = 0;
1440 list_del_init(&spt->post_shadow_list);
1444 static int detach_oos_page(struct intel_vgpu *vgpu,
1445 struct intel_vgpu_oos_page *oos_page)
1447 struct intel_gvt *gvt = vgpu->gvt;
1448 struct intel_vgpu_ppgtt_spt *spt = oos_page->spt;
1450 trace_oos_change(vgpu->id, "detach", oos_page->id,
1451 spt, spt->guest_page.type);
1453 spt->guest_page.write_cnt = 0;
1454 spt->guest_page.oos_page = NULL;
1455 oos_page->spt = NULL;
1457 list_del_init(&oos_page->vm_list);
1458 list_move_tail(&oos_page->list, &gvt->gtt.oos_page_free_list_head);
1463 static int attach_oos_page(struct intel_vgpu_oos_page *oos_page,
1464 struct intel_vgpu_ppgtt_spt *spt)
1466 struct intel_gvt *gvt = spt->vgpu->gvt;
1469 ret = intel_gvt_read_gpa(spt->vgpu,
1470 spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
1471 oos_page->mem, I915_GTT_PAGE_SIZE);
1475 oos_page->spt = spt;
1476 spt->guest_page.oos_page = oos_page;
1478 list_move_tail(&oos_page->list, &gvt->gtt.oos_page_use_list_head);
1480 trace_oos_change(spt->vgpu->id, "attach", oos_page->id,
1481 spt, spt->guest_page.type);
1485 static int ppgtt_set_guest_page_sync(struct intel_vgpu_ppgtt_spt *spt)
1487 struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page;
1490 ret = intel_vgpu_enable_page_track(spt->vgpu, spt->guest_page.gfn);
1494 trace_oos_change(spt->vgpu->id, "set page sync", oos_page->id,
1495 spt, spt->guest_page.type);
1497 list_del_init(&oos_page->vm_list);
1498 return sync_oos_page(spt->vgpu, oos_page);
1501 static int ppgtt_allocate_oos_page(struct intel_vgpu_ppgtt_spt *spt)
1503 struct intel_gvt *gvt = spt->vgpu->gvt;
1504 struct intel_gvt_gtt *gtt = &gvt->gtt;
1505 struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page;
1508 WARN(oos_page, "shadow PPGTT page has already has a oos page\n");
1510 if (list_empty(>t->oos_page_free_list_head)) {
1511 oos_page = container_of(gtt->oos_page_use_list_head.next,
1512 struct intel_vgpu_oos_page, list);
1513 ret = ppgtt_set_guest_page_sync(oos_page->spt);
1516 ret = detach_oos_page(spt->vgpu, oos_page);
1520 oos_page = container_of(gtt->oos_page_free_list_head.next,
1521 struct intel_vgpu_oos_page, list);
1522 return attach_oos_page(oos_page, spt);
1525 static int ppgtt_set_guest_page_oos(struct intel_vgpu_ppgtt_spt *spt)
1527 struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page;
1529 if (WARN(!oos_page, "shadow PPGTT page should have a oos page\n"))
1532 trace_oos_change(spt->vgpu->id, "set page out of sync", oos_page->id,
1533 spt, spt->guest_page.type);
1535 list_add_tail(&oos_page->vm_list, &spt->vgpu->gtt.oos_page_list_head);
1536 return intel_vgpu_disable_page_track(spt->vgpu, spt->guest_page.gfn);
1540 * intel_vgpu_sync_oos_pages - sync all the out-of-synced shadow for vGPU
1543 * This function is called before submitting a guest workload to host,
1544 * to sync all the out-of-synced shadow for vGPU
1547 * Zero on success, negative error code if failed.
1549 int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu)
1551 struct list_head *pos, *n;
1552 struct intel_vgpu_oos_page *oos_page;
1555 if (!enable_out_of_sync)
1558 list_for_each_safe(pos, n, &vgpu->gtt.oos_page_list_head) {
1559 oos_page = container_of(pos,
1560 struct intel_vgpu_oos_page, vm_list);
1561 ret = ppgtt_set_guest_page_sync(oos_page->spt);
1569 * The heart of PPGTT shadow page table.
1571 static int ppgtt_handle_guest_write_page_table(
1572 struct intel_vgpu_ppgtt_spt *spt,
1573 struct intel_gvt_gtt_entry *we, unsigned long index)
1575 struct intel_vgpu *vgpu = spt->vgpu;
1576 int type = spt->shadow_page.type;
1577 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1578 struct intel_gvt_gtt_entry old_se;
1582 new_present = ops->test_present(we);
1585 * Adding the new entry first and then removing the old one, that can
1586 * guarantee the ppgtt table is validated during the window between
1587 * adding and removal.
1589 ppgtt_get_shadow_entry(spt, &old_se, index);
1592 ret = ppgtt_handle_guest_entry_add(spt, we, index);
1597 ret = ppgtt_handle_guest_entry_removal(spt, &old_se, index);
1602 /* For 64KB splited entries, we need clear them all. */
1603 if (ops->test_64k_splited(&old_se) &&
1604 !(index % GTT_64K_PTE_STRIDE)) {
1605 gvt_vdbg_mm("remove splited 64K shadow entries\n");
1606 for (i = 0; i < GTT_64K_PTE_STRIDE; i++) {
1607 ops->clear_64k_splited(&old_se);
1608 ops->set_pfn(&old_se,
1609 vgpu->gtt.scratch_pt[type].page_mfn);
1610 ppgtt_set_shadow_entry(spt, &old_se, index + i);
1612 } else if (old_se.type == GTT_TYPE_PPGTT_PTE_2M_ENTRY ||
1613 old_se.type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) {
1614 ops->clear_pse(&old_se);
1615 ops->set_pfn(&old_se,
1616 vgpu->gtt.scratch_pt[type].page_mfn);
1617 ppgtt_set_shadow_entry(spt, &old_se, index);
1619 ops->set_pfn(&old_se,
1620 vgpu->gtt.scratch_pt[type].page_mfn);
1621 ppgtt_set_shadow_entry(spt, &old_se, index);
1627 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d.\n",
1628 spt, we->val64, we->type);
1634 static inline bool can_do_out_of_sync(struct intel_vgpu_ppgtt_spt *spt)
1636 return enable_out_of_sync
1637 && gtt_type_is_pte_pt(spt->guest_page.type)
1638 && spt->guest_page.write_cnt >= 2;
1641 static void ppgtt_set_post_shadow(struct intel_vgpu_ppgtt_spt *spt,
1642 unsigned long index)
1644 set_bit(index, spt->post_shadow_bitmap);
1645 if (!list_empty(&spt->post_shadow_list))
1648 list_add_tail(&spt->post_shadow_list,
1649 &spt->vgpu->gtt.post_shadow_list_head);
1653 * intel_vgpu_flush_post_shadow - flush the post shadow transactions
1656 * This function is called before submitting a guest workload to host,
1657 * to flush all the post shadows for a vGPU.
1660 * Zero on success, negative error code if failed.
1662 int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu)
1664 struct list_head *pos, *n;
1665 struct intel_vgpu_ppgtt_spt *spt;
1666 struct intel_gvt_gtt_entry ge;
1667 unsigned long index;
1670 list_for_each_safe(pos, n, &vgpu->gtt.post_shadow_list_head) {
1671 spt = container_of(pos, struct intel_vgpu_ppgtt_spt,
1674 for_each_set_bit(index, spt->post_shadow_bitmap,
1675 GTT_ENTRY_NUM_IN_ONE_PAGE) {
1676 ppgtt_get_guest_entry(spt, &ge, index);
1678 ret = ppgtt_handle_guest_write_page_table(spt,
1682 clear_bit(index, spt->post_shadow_bitmap);
1684 list_del_init(&spt->post_shadow_list);
1689 static int ppgtt_handle_guest_write_page_table_bytes(
1690 struct intel_vgpu_ppgtt_spt *spt,
1691 u64 pa, void *p_data, int bytes)
1693 struct intel_vgpu *vgpu = spt->vgpu;
1694 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1695 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1696 struct intel_gvt_gtt_entry we, se;
1697 unsigned long index;
1700 index = (pa & (PAGE_SIZE - 1)) >> info->gtt_entry_size_shift;
1702 ppgtt_get_guest_entry(spt, &we, index);
1705 * For page table which has 64K gtt entry, only PTE#0, PTE#16,
1706 * PTE#32, ... PTE#496 are used. Unused PTEs update should be
1709 if (we.type == GTT_TYPE_PPGTT_PTE_64K_ENTRY &&
1710 (index % GTT_64K_PTE_STRIDE)) {
1711 gvt_vdbg_mm("Ignore write to unused PTE entry, index %lu\n",
1716 if (bytes == info->gtt_entry_size) {
1717 ret = ppgtt_handle_guest_write_page_table(spt, &we, index);
1721 if (!test_bit(index, spt->post_shadow_bitmap)) {
1722 int type = spt->shadow_page.type;
1724 ppgtt_get_shadow_entry(spt, &se, index);
1725 ret = ppgtt_handle_guest_entry_removal(spt, &se, index);
1728 ops->set_pfn(&se, vgpu->gtt.scratch_pt[type].page_mfn);
1729 ppgtt_set_shadow_entry(spt, &se, index);
1731 ppgtt_set_post_shadow(spt, index);
1734 if (!enable_out_of_sync)
1737 spt->guest_page.write_cnt++;
1739 if (spt->guest_page.oos_page)
1740 ops->set_entry(spt->guest_page.oos_page->mem, &we, index,
1743 if (can_do_out_of_sync(spt)) {
1744 if (!spt->guest_page.oos_page)
1745 ppgtt_allocate_oos_page(spt);
1747 ret = ppgtt_set_guest_page_oos(spt);
1754 static void invalidate_ppgtt_mm(struct intel_vgpu_mm *mm)
1756 struct intel_vgpu *vgpu = mm->vgpu;
1757 struct intel_gvt *gvt = vgpu->gvt;
1758 struct intel_gvt_gtt *gtt = &gvt->gtt;
1759 const struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
1760 struct intel_gvt_gtt_entry se;
1763 if (!mm->ppgtt_mm.shadowed)
1766 for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.shadow_pdps); index++) {
1767 ppgtt_get_shadow_root_entry(mm, &se, index);
1769 if (!ops->test_present(&se))
1772 ppgtt_invalidate_spt_by_shadow_entry(vgpu, &se);
1774 ppgtt_set_shadow_root_entry(mm, &se, index);
1776 trace_spt_guest_change(vgpu->id, "destroy root pointer",
1777 NULL, se.type, se.val64, index);
1780 mm->ppgtt_mm.shadowed = false;
1784 static int shadow_ppgtt_mm(struct intel_vgpu_mm *mm)
1786 struct intel_vgpu *vgpu = mm->vgpu;
1787 struct intel_gvt *gvt = vgpu->gvt;
1788 struct intel_gvt_gtt *gtt = &gvt->gtt;
1789 const struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
1790 struct intel_vgpu_ppgtt_spt *spt;
1791 struct intel_gvt_gtt_entry ge, se;
1794 if (mm->ppgtt_mm.shadowed)
1797 if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
1800 mm->ppgtt_mm.shadowed = true;
1802 for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.guest_pdps); index++) {
1803 ppgtt_get_guest_root_entry(mm, &ge, index);
1805 if (!ops->test_present(&ge))
1808 trace_spt_guest_change(vgpu->id, __func__, NULL,
1809 ge.type, ge.val64, index);
1811 spt = ppgtt_populate_spt_by_guest_entry(vgpu, &ge);
1813 gvt_vgpu_err("fail to populate guest root pointer\n");
1817 ppgtt_generate_shadow_entry(&se, spt, &ge);
1818 ppgtt_set_shadow_root_entry(mm, &se, index);
1820 trace_spt_guest_change(vgpu->id, "populate root pointer",
1821 NULL, se.type, se.val64, index);
1826 invalidate_ppgtt_mm(mm);
1830 static struct intel_vgpu_mm *vgpu_alloc_mm(struct intel_vgpu *vgpu)
1832 struct intel_vgpu_mm *mm;
1834 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
1839 kref_init(&mm->ref);
1840 atomic_set(&mm->pincount, 0);
1845 static void vgpu_free_mm(struct intel_vgpu_mm *mm)
1851 * intel_vgpu_create_ppgtt_mm - create a ppgtt mm object for a vGPU
1853 * @root_entry_type: ppgtt root entry type
1854 * @pdps: guest pdps.
1856 * This function is used to create a ppgtt mm object for a vGPU.
1859 * Zero on success, negative error code in pointer if failed.
1861 struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
1862 enum intel_gvt_gtt_type root_entry_type, u64 pdps[])
1864 struct intel_gvt *gvt = vgpu->gvt;
1865 struct intel_vgpu_mm *mm;
1868 mm = vgpu_alloc_mm(vgpu);
1870 return ERR_PTR(-ENOMEM);
1872 mm->type = INTEL_GVT_MM_PPGTT;
1874 GEM_BUG_ON(root_entry_type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY &&
1875 root_entry_type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY);
1876 mm->ppgtt_mm.root_entry_type = root_entry_type;
1878 INIT_LIST_HEAD(&mm->ppgtt_mm.list);
1879 INIT_LIST_HEAD(&mm->ppgtt_mm.lru_list);
1880 INIT_LIST_HEAD(&mm->ppgtt_mm.link);
1882 if (root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
1883 mm->ppgtt_mm.guest_pdps[0] = pdps[0];
1885 memcpy(mm->ppgtt_mm.guest_pdps, pdps,
1886 sizeof(mm->ppgtt_mm.guest_pdps));
1888 ret = shadow_ppgtt_mm(mm);
1890 gvt_vgpu_err("failed to shadow ppgtt mm\n");
1892 return ERR_PTR(ret);
1895 list_add_tail(&mm->ppgtt_mm.list, &vgpu->gtt.ppgtt_mm_list_head);
1897 mutex_lock(&gvt->gtt.ppgtt_mm_lock);
1898 list_add_tail(&mm->ppgtt_mm.lru_list, &gvt->gtt.ppgtt_mm_lru_list_head);
1899 mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
1904 static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
1906 struct intel_vgpu_mm *mm;
1907 unsigned long nr_entries;
1909 mm = vgpu_alloc_mm(vgpu);
1911 return ERR_PTR(-ENOMEM);
1913 mm->type = INTEL_GVT_MM_GGTT;
1915 nr_entries = gvt_ggtt_gm_sz(vgpu->gvt) >> I915_GTT_PAGE_SHIFT;
1916 mm->ggtt_mm.virtual_ggtt =
1917 vzalloc(array_size(nr_entries,
1918 vgpu->gvt->device_info.gtt_entry_size));
1919 if (!mm->ggtt_mm.virtual_ggtt) {
1921 return ERR_PTR(-ENOMEM);
1924 mm->ggtt_mm.host_ggtt_aperture = vzalloc((vgpu_aperture_sz(vgpu) >> PAGE_SHIFT) * sizeof(u64));
1925 if (!mm->ggtt_mm.host_ggtt_aperture) {
1926 vfree(mm->ggtt_mm.virtual_ggtt);
1928 return ERR_PTR(-ENOMEM);
1931 mm->ggtt_mm.host_ggtt_hidden = vzalloc((vgpu_hidden_sz(vgpu) >> PAGE_SHIFT) * sizeof(u64));
1932 if (!mm->ggtt_mm.host_ggtt_hidden) {
1933 vfree(mm->ggtt_mm.host_ggtt_aperture);
1934 vfree(mm->ggtt_mm.virtual_ggtt);
1936 return ERR_PTR(-ENOMEM);
1943 * _intel_vgpu_mm_release - destroy a mm object
1944 * @mm_ref: a kref object
1946 * This function is used to destroy a mm object for vGPU
1949 void _intel_vgpu_mm_release(struct kref *mm_ref)
1951 struct intel_vgpu_mm *mm = container_of(mm_ref, typeof(*mm), ref);
1953 if (GEM_WARN_ON(atomic_read(&mm->pincount)))
1954 gvt_err("vgpu mm pin count bug detected\n");
1956 if (mm->type == INTEL_GVT_MM_PPGTT) {
1957 list_del(&mm->ppgtt_mm.list);
1959 mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
1960 list_del(&mm->ppgtt_mm.lru_list);
1961 mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
1963 invalidate_ppgtt_mm(mm);
1965 vfree(mm->ggtt_mm.virtual_ggtt);
1966 vfree(mm->ggtt_mm.host_ggtt_aperture);
1967 vfree(mm->ggtt_mm.host_ggtt_hidden);
1974 * intel_vgpu_unpin_mm - decrease the pin count of a vGPU mm object
1975 * @mm: a vGPU mm object
1977 * This function is called when user doesn't want to use a vGPU mm object
1979 void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm)
1981 atomic_dec_if_positive(&mm->pincount);
1985 * intel_vgpu_pin_mm - increase the pin count of a vGPU mm object
1986 * @mm: target vgpu mm
1988 * This function is called when user wants to use a vGPU mm object. If this
1989 * mm object hasn't been shadowed yet, the shadow will be populated at this
1993 * Zero on success, negative error code if failed.
1995 int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm)
1999 atomic_inc(&mm->pincount);
2001 if (mm->type == INTEL_GVT_MM_PPGTT) {
2002 ret = shadow_ppgtt_mm(mm);
2006 mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
2007 list_move_tail(&mm->ppgtt_mm.lru_list,
2008 &mm->vgpu->gvt->gtt.ppgtt_mm_lru_list_head);
2009 mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
2015 static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt)
2017 struct intel_vgpu_mm *mm;
2018 struct list_head *pos, *n;
2020 mutex_lock(&gvt->gtt.ppgtt_mm_lock);
2022 list_for_each_safe(pos, n, &gvt->gtt.ppgtt_mm_lru_list_head) {
2023 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.lru_list);
2025 if (atomic_read(&mm->pincount))
2028 list_del_init(&mm->ppgtt_mm.lru_list);
2029 mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
2030 invalidate_ppgtt_mm(mm);
2033 mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
2038 * GMA translation APIs.
2040 static inline int ppgtt_get_next_level_entry(struct intel_vgpu_mm *mm,
2041 struct intel_gvt_gtt_entry *e, unsigned long index, bool guest)
2043 struct intel_vgpu *vgpu = mm->vgpu;
2044 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
2045 struct intel_vgpu_ppgtt_spt *s;
2047 s = intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(e));
2052 ppgtt_get_shadow_entry(s, e, index);
2054 ppgtt_get_guest_entry(s, e, index);
2059 * intel_vgpu_gma_to_gpa - translate a gma to GPA
2060 * @mm: mm object. could be a PPGTT or GGTT mm object
2061 * @gma: graphics memory address in this mm object
2063 * This function is used to translate a graphics memory address in specific
2064 * graphics memory space to guest physical address.
2067 * Guest physical address on success, INTEL_GVT_INVALID_ADDR if failed.
2069 unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
2071 struct intel_vgpu *vgpu = mm->vgpu;
2072 struct intel_gvt *gvt = vgpu->gvt;
2073 const struct intel_gvt_gtt_pte_ops *pte_ops = gvt->gtt.pte_ops;
2074 const struct intel_gvt_gtt_gma_ops *gma_ops = gvt->gtt.gma_ops;
2075 unsigned long gpa = INTEL_GVT_INVALID_ADDR;
2076 unsigned long gma_index[4];
2077 struct intel_gvt_gtt_entry e;
2081 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT &&
2082 mm->type != INTEL_GVT_MM_PPGTT);
2084 if (mm->type == INTEL_GVT_MM_GGTT) {
2085 if (!vgpu_gmadr_is_valid(vgpu, gma))
2088 ggtt_get_guest_entry(mm, &e,
2089 gma_ops->gma_to_ggtt_pte_index(gma));
2091 gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT)
2092 + (gma & ~I915_GTT_PAGE_MASK);
2094 trace_gma_translate(vgpu->id, "ggtt", 0, 0, gma, gpa);
2096 switch (mm->ppgtt_mm.root_entry_type) {
2097 case GTT_TYPE_PPGTT_ROOT_L4_ENTRY:
2098 ppgtt_get_shadow_root_entry(mm, &e, 0);
2100 gma_index[0] = gma_ops->gma_to_pml4_index(gma);
2101 gma_index[1] = gma_ops->gma_to_l4_pdp_index(gma);
2102 gma_index[2] = gma_ops->gma_to_pde_index(gma);
2103 gma_index[3] = gma_ops->gma_to_pte_index(gma);
2106 case GTT_TYPE_PPGTT_ROOT_L3_ENTRY:
2107 ppgtt_get_shadow_root_entry(mm, &e,
2108 gma_ops->gma_to_l3_pdp_index(gma));
2110 gma_index[0] = gma_ops->gma_to_pde_index(gma);
2111 gma_index[1] = gma_ops->gma_to_pte_index(gma);
2118 /* walk the shadow page table and get gpa from guest entry */
2119 for (i = 0; i < levels; i++) {
2120 ret = ppgtt_get_next_level_entry(mm, &e, gma_index[i],
2125 if (!pte_ops->test_present(&e)) {
2126 gvt_dbg_core("GMA 0x%lx is not present\n", gma);
2131 gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT) +
2132 (gma & ~I915_GTT_PAGE_MASK);
2133 trace_gma_translate(vgpu->id, "ppgtt", 0,
2134 mm->ppgtt_mm.root_entry_type, gma, gpa);
2139 gvt_vgpu_err("invalid mm type: %d gma %lx\n", mm->type, gma);
2140 return INTEL_GVT_INVALID_ADDR;
2143 static int emulate_ggtt_mmio_read(struct intel_vgpu *vgpu,
2144 unsigned int off, void *p_data, unsigned int bytes)
2146 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
2147 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
2148 unsigned long index = off >> info->gtt_entry_size_shift;
2150 struct intel_gvt_gtt_entry e;
2152 if (bytes != 4 && bytes != 8)
2155 gma = index << I915_GTT_PAGE_SHIFT;
2156 if (!intel_gvt_ggtt_validate_range(vgpu,
2157 gma, 1 << I915_GTT_PAGE_SHIFT)) {
2158 gvt_dbg_mm("read invalid ggtt at 0x%lx\n", gma);
2159 memset(p_data, 0, bytes);
2163 ggtt_get_guest_entry(ggtt_mm, &e, index);
2164 memcpy(p_data, (void *)&e.val64 + (off & (info->gtt_entry_size - 1)),
2170 * intel_vgpu_emulate_ggtt_mmio_read - emulate GTT MMIO register read
2172 * @off: register offset
2173 * @p_data: data will be returned to guest
2174 * @bytes: data length
2176 * This function is used to emulate the GTT MMIO register read
2179 * Zero on success, error code if failed.
2181 int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
2182 void *p_data, unsigned int bytes)
2184 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
2187 if (bytes != 4 && bytes != 8)
2190 off -= info->gtt_start_offset;
2191 ret = emulate_ggtt_mmio_read(vgpu, off, p_data, bytes);
2195 static void ggtt_invalidate_pte(struct intel_vgpu *vgpu,
2196 struct intel_gvt_gtt_entry *entry)
2198 const struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
2201 pfn = pte_ops->get_pfn(entry);
2202 if (pfn != vgpu->gvt->gtt.scratch_mfn)
2203 intel_gvt_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT);
2206 static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
2207 void *p_data, unsigned int bytes)
2209 struct intel_gvt *gvt = vgpu->gvt;
2210 const struct intel_gvt_device_info *info = &gvt->device_info;
2211 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
2212 const struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
2213 unsigned long g_gtt_index = off >> info->gtt_entry_size_shift;
2214 unsigned long gma, gfn;
2215 struct intel_gvt_gtt_entry e = {.val64 = 0, .type = GTT_TYPE_GGTT_PTE};
2216 struct intel_gvt_gtt_entry m = {.val64 = 0, .type = GTT_TYPE_GGTT_PTE};
2217 dma_addr_t dma_addr;
2219 struct intel_gvt_partial_pte *partial_pte, *pos, *n;
2220 bool partial_update = false;
2222 if (bytes != 4 && bytes != 8)
2225 gma = g_gtt_index << I915_GTT_PAGE_SHIFT;
2227 /* the VM may configure the whole GM space when ballooning is used */
2228 if (!vgpu_gmadr_is_valid(vgpu, gma))
2231 e.type = GTT_TYPE_GGTT_PTE;
2232 memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
2235 /* If ggtt entry size is 8 bytes, and it's split into two 4 bytes
2236 * write, save the first 4 bytes in a list and update virtual
2237 * PTE. Only update shadow PTE when the second 4 bytes comes.
2239 if (bytes < info->gtt_entry_size) {
2242 list_for_each_entry_safe(pos, n,
2243 &ggtt_mm->ggtt_mm.partial_pte_list, list) {
2244 if (g_gtt_index == pos->offset >>
2245 info->gtt_entry_size_shift) {
2246 if (off != pos->offset) {
2247 /* the second partial part*/
2248 int last_off = pos->offset &
2249 (info->gtt_entry_size - 1);
2251 memcpy((void *)&e.val64 + last_off,
2252 (void *)&pos->data + last_off,
2255 list_del(&pos->list);
2261 /* update of the first partial part */
2262 pos->data = e.val64;
2263 ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
2269 /* the first partial part */
2270 partial_pte = kzalloc(sizeof(*partial_pte), GFP_KERNEL);
2273 partial_pte->offset = off;
2274 partial_pte->data = e.val64;
2275 list_add_tail(&partial_pte->list,
2276 &ggtt_mm->ggtt_mm.partial_pte_list);
2277 partial_update = true;
2281 if (!partial_update && (ops->test_present(&e))) {
2282 gfn = ops->get_pfn(&e);
2286 ret = intel_gvt_dma_map_guest_page(vgpu, gfn, PAGE_SIZE,
2289 gvt_vgpu_err("fail to populate guest ggtt entry\n");
2290 /* guest driver may read/write the entry when partial
2291 * update the entry in this situation p2m will fail
2292 * setting the shadow entry to point to a scratch page
2294 ops->set_pfn(&m, gvt->gtt.scratch_mfn);
2296 ops->set_pfn(&m, dma_addr >> PAGE_SHIFT);
2298 ops->set_pfn(&m, gvt->gtt.scratch_mfn);
2299 ops->clear_present(&m);
2302 ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
2304 ggtt_get_host_entry(ggtt_mm, &e, g_gtt_index);
2305 ggtt_invalidate_pte(vgpu, &e);
2307 ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index);
2308 ggtt_invalidate(gvt->gt);
2313 * intel_vgpu_emulate_ggtt_mmio_write - emulate GTT MMIO register write
2315 * @off: register offset
2316 * @p_data: data from guest write
2317 * @bytes: data length
2319 * This function is used to emulate the GTT MMIO register write
2322 * Zero on success, error code if failed.
2324 int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
2325 unsigned int off, void *p_data, unsigned int bytes)
2327 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
2329 struct intel_vgpu_submission *s = &vgpu->submission;
2330 struct intel_engine_cs *engine;
2333 if (bytes != 4 && bytes != 8)
2336 off -= info->gtt_start_offset;
2337 ret = emulate_ggtt_mmio_write(vgpu, off, p_data, bytes);
2339 /* if ggtt of last submitted context is written,
2340 * that context is probably got unpinned.
2341 * Set last shadowed ctx to invalid.
2343 for_each_engine(engine, vgpu->gvt->gt, i) {
2344 if (!s->last_ctx[i].valid)
2347 if (s->last_ctx[i].lrca == (off >> info->gtt_entry_size_shift))
2348 s->last_ctx[i].valid = false;
2353 static int alloc_scratch_pages(struct intel_vgpu *vgpu,
2354 enum intel_gvt_gtt_type type)
2356 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
2357 struct intel_vgpu_gtt *gtt = &vgpu->gtt;
2358 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
2359 int page_entry_num = I915_GTT_PAGE_SIZE >>
2360 vgpu->gvt->device_info.gtt_entry_size_shift;
2363 struct device *dev = vgpu->gvt->gt->i915->drm.dev;
2366 if (drm_WARN_ON(&i915->drm,
2367 type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
2370 scratch_pt = (void *)get_zeroed_page(GFP_KERNEL);
2372 gvt_vgpu_err("fail to allocate scratch page\n");
2376 daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0, 4096, DMA_BIDIRECTIONAL);
2377 if (dma_mapping_error(dev, daddr)) {
2378 gvt_vgpu_err("fail to dmamap scratch_pt\n");
2379 __free_page(virt_to_page(scratch_pt));
2382 gtt->scratch_pt[type].page_mfn =
2383 (unsigned long)(daddr >> I915_GTT_PAGE_SHIFT);
2384 gtt->scratch_pt[type].page = virt_to_page(scratch_pt);
2385 gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
2386 vgpu->id, type, gtt->scratch_pt[type].page_mfn);
2388 /* Build the tree by full filled the scratch pt with the entries which
2389 * point to the next level scratch pt or scratch page. The
2390 * scratch_pt[type] indicate the scratch pt/scratch page used by the
2392 * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by
2393 * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self
2394 * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn.
2396 if (type > GTT_TYPE_PPGTT_PTE_PT) {
2397 struct intel_gvt_gtt_entry se;
2399 memset(&se, 0, sizeof(struct intel_gvt_gtt_entry));
2400 se.type = get_entry_type(type - 1);
2401 ops->set_pfn(&se, gtt->scratch_pt[type - 1].page_mfn);
2403 /* The entry parameters like present/writeable/cache type
2404 * set to the same as i915's scratch page tree.
2406 se.val64 |= GEN8_PAGE_PRESENT | GEN8_PAGE_RW;
2407 if (type == GTT_TYPE_PPGTT_PDE_PT)
2408 se.val64 |= PPAT_CACHED;
2410 for (i = 0; i < page_entry_num; i++)
2411 ops->set_entry(scratch_pt, &se, i, false, 0, vgpu);
2417 static int release_scratch_page_tree(struct intel_vgpu *vgpu)
2420 struct device *dev = vgpu->gvt->gt->i915->drm.dev;
2423 for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
2424 if (vgpu->gtt.scratch_pt[i].page != NULL) {
2425 daddr = (dma_addr_t)(vgpu->gtt.scratch_pt[i].page_mfn <<
2426 I915_GTT_PAGE_SHIFT);
2427 dma_unmap_page(dev, daddr, 4096, DMA_BIDIRECTIONAL);
2428 __free_page(vgpu->gtt.scratch_pt[i].page);
2429 vgpu->gtt.scratch_pt[i].page = NULL;
2430 vgpu->gtt.scratch_pt[i].page_mfn = 0;
2437 static int create_scratch_page_tree(struct intel_vgpu *vgpu)
2441 for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
2442 ret = alloc_scratch_pages(vgpu, i);
2450 release_scratch_page_tree(vgpu);
2455 * intel_vgpu_init_gtt - initialize per-vGPU graphics memory virulization
2458 * This function is used to initialize per-vGPU graphics memory virtualization
2462 * Zero on success, error code if failed.
2464 int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
2466 struct intel_vgpu_gtt *gtt = &vgpu->gtt;
2468 INIT_RADIX_TREE(>t->spt_tree, GFP_KERNEL);
2470 INIT_LIST_HEAD(>t->ppgtt_mm_list_head);
2471 INIT_LIST_HEAD(>t->oos_page_list_head);
2472 INIT_LIST_HEAD(>t->post_shadow_list_head);
2474 gtt->ggtt_mm = intel_vgpu_create_ggtt_mm(vgpu);
2475 if (IS_ERR(gtt->ggtt_mm)) {
2476 gvt_vgpu_err("fail to create mm for ggtt.\n");
2477 return PTR_ERR(gtt->ggtt_mm);
2480 intel_vgpu_reset_ggtt(vgpu, false);
2482 INIT_LIST_HEAD(>t->ggtt_mm->ggtt_mm.partial_pte_list);
2484 return create_scratch_page_tree(vgpu);
2487 void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu)
2489 struct list_head *pos, *n;
2490 struct intel_vgpu_mm *mm;
2492 list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) {
2493 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
2494 intel_vgpu_destroy_mm(mm);
2497 if (GEM_WARN_ON(!list_empty(&vgpu->gtt.ppgtt_mm_list_head)))
2498 gvt_err("vgpu ppgtt mm is not fully destroyed\n");
2500 if (GEM_WARN_ON(!radix_tree_empty(&vgpu->gtt.spt_tree))) {
2501 gvt_err("Why we still has spt not freed?\n");
2502 ppgtt_free_all_spt(vgpu);
2506 static void intel_vgpu_destroy_ggtt_mm(struct intel_vgpu *vgpu)
2508 struct intel_gvt_partial_pte *pos, *next;
2510 list_for_each_entry_safe(pos, next,
2511 &vgpu->gtt.ggtt_mm->ggtt_mm.partial_pte_list,
2513 gvt_dbg_mm("partial PTE update on hold 0x%lx : 0x%llx\n",
2514 pos->offset, pos->data);
2517 intel_vgpu_destroy_mm(vgpu->gtt.ggtt_mm);
2518 vgpu->gtt.ggtt_mm = NULL;
2522 * intel_vgpu_clean_gtt - clean up per-vGPU graphics memory virulization
2525 * This function is used to clean up per-vGPU graphics memory virtualization
2529 * Zero on success, error code if failed.
2531 void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu)
2533 intel_vgpu_destroy_all_ppgtt_mm(vgpu);
2534 intel_vgpu_destroy_ggtt_mm(vgpu);
2535 release_scratch_page_tree(vgpu);
2538 static void clean_spt_oos(struct intel_gvt *gvt)
2540 struct intel_gvt_gtt *gtt = &gvt->gtt;
2541 struct list_head *pos, *n;
2542 struct intel_vgpu_oos_page *oos_page;
2544 WARN(!list_empty(>t->oos_page_use_list_head),
2545 "someone is still using oos page\n");
2547 list_for_each_safe(pos, n, >t->oos_page_free_list_head) {
2548 oos_page = container_of(pos, struct intel_vgpu_oos_page, list);
2549 list_del(&oos_page->list);
2550 free_page((unsigned long)oos_page->mem);
2555 static int setup_spt_oos(struct intel_gvt *gvt)
2557 struct intel_gvt_gtt *gtt = &gvt->gtt;
2558 struct intel_vgpu_oos_page *oos_page;
2562 INIT_LIST_HEAD(>t->oos_page_free_list_head);
2563 INIT_LIST_HEAD(>t->oos_page_use_list_head);
2565 for (i = 0; i < preallocated_oos_pages; i++) {
2566 oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL);
2571 oos_page->mem = (void *)__get_free_pages(GFP_KERNEL, 0);
2572 if (!oos_page->mem) {
2578 INIT_LIST_HEAD(&oos_page->list);
2579 INIT_LIST_HEAD(&oos_page->vm_list);
2581 list_add_tail(&oos_page->list, >t->oos_page_free_list_head);
2584 gvt_dbg_mm("%d oos pages preallocated\n", i);
2593 * intel_vgpu_find_ppgtt_mm - find a PPGTT mm object
2595 * @pdps: pdp root array
2597 * This function is used to find a PPGTT mm object from mm object pool
2600 * pointer to mm object on success, NULL if failed.
2602 struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
2605 struct intel_vgpu_mm *mm;
2606 struct list_head *pos;
2608 list_for_each(pos, &vgpu->gtt.ppgtt_mm_list_head) {
2609 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
2611 switch (mm->ppgtt_mm.root_entry_type) {
2612 case GTT_TYPE_PPGTT_ROOT_L4_ENTRY:
2613 if (pdps[0] == mm->ppgtt_mm.guest_pdps[0])
2616 case GTT_TYPE_PPGTT_ROOT_L3_ENTRY:
2617 if (!memcmp(pdps, mm->ppgtt_mm.guest_pdps,
2618 sizeof(mm->ppgtt_mm.guest_pdps)))
2629 * intel_vgpu_get_ppgtt_mm - get or create a PPGTT mm object.
2631 * @root_entry_type: ppgtt root entry type
2634 * This function is used to find or create a PPGTT mm object from a guest.
2637 * Zero on success, negative error code if failed.
2639 struct intel_vgpu_mm *intel_vgpu_get_ppgtt_mm(struct intel_vgpu *vgpu,
2640 enum intel_gvt_gtt_type root_entry_type, u64 pdps[])
2642 struct intel_vgpu_mm *mm;
2644 mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
2646 intel_vgpu_mm_get(mm);
2648 mm = intel_vgpu_create_ppgtt_mm(vgpu, root_entry_type, pdps);
2650 gvt_vgpu_err("fail to create mm\n");
2656 * intel_vgpu_put_ppgtt_mm - find and put a PPGTT mm object.
2660 * This function is used to find a PPGTT mm object from a guest and destroy it.
2663 * Zero on success, negative error code if failed.
2665 int intel_vgpu_put_ppgtt_mm(struct intel_vgpu *vgpu, u64 pdps[])
2667 struct intel_vgpu_mm *mm;
2669 mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
2671 gvt_vgpu_err("fail to find ppgtt instance.\n");
2674 intel_vgpu_mm_put(mm);
2679 * intel_gvt_init_gtt - initialize mm components of a GVT device
2682 * This function is called at the initialization stage, to initialize
2683 * the mm components of a GVT device.
2686 * zero on success, negative error code if failed.
2688 int intel_gvt_init_gtt(struct intel_gvt *gvt)
2692 struct device *dev = gvt->gt->i915->drm.dev;
2695 gvt_dbg_core("init gtt\n");
2697 gvt->gtt.pte_ops = &gen8_gtt_pte_ops;
2698 gvt->gtt.gma_ops = &gen8_gtt_gma_ops;
2700 page = (void *)get_zeroed_page(GFP_KERNEL);
2702 gvt_err("fail to allocate scratch ggtt page\n");
2706 daddr = dma_map_page(dev, virt_to_page(page), 0,
2707 4096, DMA_BIDIRECTIONAL);
2708 if (dma_mapping_error(dev, daddr)) {
2709 gvt_err("fail to dmamap scratch ggtt page\n");
2710 __free_page(virt_to_page(page));
2714 gvt->gtt.scratch_page = virt_to_page(page);
2715 gvt->gtt.scratch_mfn = (unsigned long)(daddr >> I915_GTT_PAGE_SHIFT);
2717 if (enable_out_of_sync) {
2718 ret = setup_spt_oos(gvt);
2720 gvt_err("fail to initialize SPT oos\n");
2721 dma_unmap_page(dev, daddr, 4096, DMA_BIDIRECTIONAL);
2722 __free_page(gvt->gtt.scratch_page);
2726 INIT_LIST_HEAD(&gvt->gtt.ppgtt_mm_lru_list_head);
2727 mutex_init(&gvt->gtt.ppgtt_mm_lock);
2732 * intel_gvt_clean_gtt - clean up mm components of a GVT device
2735 * This function is called at the driver unloading stage, to clean up
2736 * the mm components of a GVT device.
2739 void intel_gvt_clean_gtt(struct intel_gvt *gvt)
2741 struct device *dev = gvt->gt->i915->drm.dev;
2742 dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_mfn <<
2743 I915_GTT_PAGE_SHIFT);
2745 dma_unmap_page(dev, daddr, 4096, DMA_BIDIRECTIONAL);
2747 __free_page(gvt->gtt.scratch_page);
2749 if (enable_out_of_sync)
2754 * intel_vgpu_invalidate_ppgtt - invalidate PPGTT instances
2757 * This function is called when invalidate all PPGTT instances of a vGPU.
2760 void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu)
2762 struct list_head *pos, *n;
2763 struct intel_vgpu_mm *mm;
2765 list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) {
2766 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
2767 if (mm->type == INTEL_GVT_MM_PPGTT) {
2768 mutex_lock(&vgpu->gvt->gtt.ppgtt_mm_lock);
2769 list_del_init(&mm->ppgtt_mm.lru_list);
2770 mutex_unlock(&vgpu->gvt->gtt.ppgtt_mm_lock);
2771 if (mm->ppgtt_mm.shadowed)
2772 invalidate_ppgtt_mm(mm);
2778 * intel_vgpu_reset_ggtt - reset the GGTT entry
2780 * @invalidate_old: invalidate old entries
2782 * This function is called at the vGPU create stage
2783 * to reset all the GGTT entries.
2786 void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old)
2788 struct intel_gvt *gvt = vgpu->gvt;
2789 const struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
2790 struct intel_gvt_gtt_entry entry = {.type = GTT_TYPE_GGTT_PTE};
2791 struct intel_gvt_gtt_entry old_entry;
2795 pte_ops->set_pfn(&entry, gvt->gtt.scratch_mfn);
2796 pte_ops->set_present(&entry);
2798 index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
2799 num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT;
2800 while (num_entries--) {
2801 if (invalidate_old) {
2802 ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index);
2803 ggtt_invalidate_pte(vgpu, &old_entry);
2805 ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
2808 index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT;
2809 num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
2810 while (num_entries--) {
2811 if (invalidate_old) {
2812 ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index);
2813 ggtt_invalidate_pte(vgpu, &old_entry);
2815 ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
2818 ggtt_invalidate(gvt->gt);
2822 * intel_gvt_restore_ggtt - restore all vGPU's ggtt entries
2823 * @gvt: intel gvt device
2825 * This function is called at driver resume stage to restore
2826 * GGTT entries of every vGPU.
2829 void intel_gvt_restore_ggtt(struct intel_gvt *gvt)
2831 struct intel_vgpu *vgpu;
2832 struct intel_vgpu_mm *mm;
2835 u32 idx, num_low, num_hi, offset;
2837 /* Restore dirty host ggtt for all vGPUs */
2838 idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
2839 mm = vgpu->gtt.ggtt_mm;
2841 num_low = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT;
2842 offset = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
2843 for (idx = 0; idx < num_low; idx++) {
2844 pte = mm->ggtt_mm.host_ggtt_aperture[idx];
2845 if (pte & GEN8_PAGE_PRESENT)
2846 write_pte64(vgpu->gvt->gt->ggtt, offset + idx, pte);
2849 num_hi = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
2850 offset = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT;
2851 for (idx = 0; idx < num_hi; idx++) {
2852 pte = mm->ggtt_mm.host_ggtt_hidden[idx];
2853 if (pte & GEN8_PAGE_PRESENT)
2854 write_pte64(vgpu->gvt->gt->ggtt, offset + idx, pte);