2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 #include <linux/kernel.h>
38 #include <linux/kref.h>
39 #include <linux/mutex.h>
40 #include <linux/radix-tree.h>
42 #include "gt/intel_gtt.h"
48 #define I915_GTT_PAGE_SHIFT 12
50 #define INTEL_GVT_INVALID_ADDR (~0UL)
52 struct intel_gvt_gtt_entry {
57 struct intel_gvt_gtt_pte_ops {
58 int (*get_entry)(void *pt,
59 struct intel_gvt_gtt_entry *e,
61 bool hypervisor_access,
63 struct intel_vgpu *vgpu);
64 int (*set_entry)(void *pt,
65 struct intel_gvt_gtt_entry *e,
67 bool hypervisor_access,
69 struct intel_vgpu *vgpu);
70 bool (*test_present)(struct intel_gvt_gtt_entry *e);
71 void (*clear_present)(struct intel_gvt_gtt_entry *e);
72 void (*set_present)(struct intel_gvt_gtt_entry *e);
73 bool (*test_pse)(struct intel_gvt_gtt_entry *e);
74 void (*clear_pse)(struct intel_gvt_gtt_entry *e);
75 bool (*test_ips)(struct intel_gvt_gtt_entry *e);
76 void (*clear_ips)(struct intel_gvt_gtt_entry *e);
77 bool (*test_64k_splited)(struct intel_gvt_gtt_entry *e);
78 void (*clear_64k_splited)(struct intel_gvt_gtt_entry *e);
79 void (*set_64k_splited)(struct intel_gvt_gtt_entry *e);
80 void (*set_pfn)(struct intel_gvt_gtt_entry *e, unsigned long pfn);
81 unsigned long (*get_pfn)(struct intel_gvt_gtt_entry *e);
84 struct intel_gvt_gtt_gma_ops {
85 unsigned long (*gma_to_ggtt_pte_index)(unsigned long gma);
86 unsigned long (*gma_to_pte_index)(unsigned long gma);
87 unsigned long (*gma_to_pde_index)(unsigned long gma);
88 unsigned long (*gma_to_l3_pdp_index)(unsigned long gma);
89 unsigned long (*gma_to_l4_pdp_index)(unsigned long gma);
90 unsigned long (*gma_to_pml4_index)(unsigned long gma);
93 struct intel_gvt_gtt {
94 const struct intel_gvt_gtt_pte_ops *pte_ops;
95 const struct intel_gvt_gtt_gma_ops *gma_ops;
96 struct list_head oos_page_use_list_head;
97 struct list_head oos_page_free_list_head;
98 struct mutex ppgtt_mm_lock;
99 struct list_head ppgtt_mm_lru_list_head;
101 struct page *scratch_page;
102 unsigned long scratch_mfn;
105 enum intel_gvt_gtt_type {
106 GTT_TYPE_INVALID = 0,
110 GTT_TYPE_PPGTT_PTE_4K_ENTRY,
111 GTT_TYPE_PPGTT_PTE_64K_ENTRY,
112 GTT_TYPE_PPGTT_PTE_2M_ENTRY,
113 GTT_TYPE_PPGTT_PTE_1G_ENTRY,
115 GTT_TYPE_PPGTT_PTE_ENTRY,
117 GTT_TYPE_PPGTT_PDE_ENTRY,
118 GTT_TYPE_PPGTT_PDP_ENTRY,
119 GTT_TYPE_PPGTT_PML4_ENTRY,
121 GTT_TYPE_PPGTT_ROOT_ENTRY,
123 GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
124 GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
126 GTT_TYPE_PPGTT_ENTRY,
128 GTT_TYPE_PPGTT_PTE_PT,
129 GTT_TYPE_PPGTT_PDE_PT,
130 GTT_TYPE_PPGTT_PDP_PT,
131 GTT_TYPE_PPGTT_PML4_PT,
136 enum intel_gvt_mm_type {
141 #define GVT_RING_CTX_NR_PDPS GEN8_3LVL_PDPES
143 struct intel_gvt_partial_pte {
144 unsigned long offset;
146 struct list_head list;
149 struct intel_vgpu_mm {
150 enum intel_gvt_mm_type type;
151 struct intel_vgpu *vgpu;
158 enum intel_gvt_gtt_type root_entry_type;
160 * The 4 PDPs in ring context. For 48bit addressing,
161 * only PDP0 is valid and point to PML4. For 32it
162 * addressing, all 4 are used as true PDPs.
164 u64 guest_pdps[GVT_RING_CTX_NR_PDPS];
165 u64 shadow_pdps[GVT_RING_CTX_NR_PDPS];
168 struct list_head list;
169 struct list_head lru_list;
170 struct list_head link; /* possible LRI shadow mm list */
174 /* Save/restore for PM */
175 u64 *host_ggtt_aperture;
176 u64 *host_ggtt_hidden;
177 struct list_head partial_pte_list;
182 struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
183 enum intel_gvt_gtt_type root_entry_type, u64 pdps[]);
185 static inline void intel_vgpu_mm_get(struct intel_vgpu_mm *mm)
190 void _intel_vgpu_mm_release(struct kref *mm_ref);
192 static inline void intel_vgpu_mm_put(struct intel_vgpu_mm *mm)
194 kref_put(&mm->ref, _intel_vgpu_mm_release);
197 static inline void intel_vgpu_destroy_mm(struct intel_vgpu_mm *mm)
199 intel_vgpu_mm_put(mm);
202 struct intel_vgpu_guest_page;
204 struct intel_vgpu_scratch_pt {
206 unsigned long page_mfn;
209 struct intel_vgpu_gtt {
210 struct intel_vgpu_mm *ggtt_mm;
211 struct list_head ppgtt_mm_list_head;
212 struct radix_tree_root spt_tree;
213 struct list_head oos_page_list_head;
214 struct list_head post_shadow_list_head;
215 struct intel_vgpu_scratch_pt scratch_pt[GTT_TYPE_MAX];
218 int intel_vgpu_init_gtt(struct intel_vgpu *vgpu);
219 void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu);
220 void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old);
221 void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu);
223 int intel_gvt_init_gtt(struct intel_gvt *gvt);
224 void intel_gvt_clean_gtt(struct intel_gvt *gvt);
226 struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu,
227 int page_table_level,
230 struct intel_vgpu_oos_page {
231 struct intel_vgpu_ppgtt_spt *spt;
232 struct list_head list;
233 struct list_head vm_list;
238 #define GTT_ENTRY_NUM_IN_ONE_PAGE 512
240 /* Represent a vgpu shadow page table. */
241 struct intel_vgpu_ppgtt_spt {
243 struct intel_vgpu *vgpu;
246 enum intel_gvt_gtt_type type;
247 bool pde_ips; /* for 64KB PTEs */
254 enum intel_gvt_gtt_type type;
255 bool pde_ips; /* for 64KB PTEs */
257 unsigned long write_cnt;
258 struct intel_vgpu_oos_page *oos_page;
261 DECLARE_BITMAP(post_shadow_bitmap, GTT_ENTRY_NUM_IN_ONE_PAGE);
262 struct list_head post_shadow_list;
265 int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu);
267 int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu);
269 int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm);
271 void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm);
273 unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm,
276 struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
279 struct intel_vgpu_mm *intel_vgpu_get_ppgtt_mm(struct intel_vgpu *vgpu,
280 enum intel_gvt_gtt_type root_entry_type, u64 pdps[]);
282 int intel_vgpu_put_ppgtt_mm(struct intel_vgpu *vgpu, u64 pdps[]);
284 int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu,
285 unsigned int off, void *p_data, unsigned int bytes);
287 int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
288 unsigned int off, void *p_data, unsigned int bytes);
290 void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu);
291 void intel_gvt_restore_ggtt(struct intel_gvt *gvt);
293 #endif /* _GVT_GTT_H_ */