1 // SPDX-License-Identifier: MIT
3 * Copyright © 2020 Intel Corporation
6 #include <linux/slab.h>
8 #include "gem/i915_gem_lmem.h"
10 #include "i915_trace.h"
12 #include "intel_gtt.h"
13 #include "gen6_ppgtt.h"
14 #include "gen8_ppgtt.h"
16 struct i915_page_table *alloc_pt(struct i915_address_space *vm, int sz)
18 struct i915_page_table *pt;
20 pt = kmalloc(sizeof(*pt), I915_GFP_ALLOW_FAIL);
22 return ERR_PTR(-ENOMEM);
24 pt->base = vm->alloc_pt_dma(vm, sz);
25 if (IS_ERR(pt->base)) {
27 return ERR_PTR(-ENOMEM);
30 pt->is_compact = false;
31 atomic_set(&pt->used, 0);
35 struct i915_page_directory *__alloc_pd(int count)
37 struct i915_page_directory *pd;
39 pd = kzalloc(sizeof(*pd), I915_GFP_ALLOW_FAIL);
43 pd->entry = kcalloc(count, sizeof(*pd->entry), I915_GFP_ALLOW_FAIL);
44 if (unlikely(!pd->entry)) {
49 spin_lock_init(&pd->lock);
53 struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
55 struct i915_page_directory *pd;
57 pd = __alloc_pd(I915_PDES);
59 return ERR_PTR(-ENOMEM);
61 pd->pt.base = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
62 if (IS_ERR(pd->pt.base)) {
65 return ERR_PTR(-ENOMEM);
71 void free_px(struct i915_address_space *vm, struct i915_page_table *pt, int lvl)
73 BUILD_BUG_ON(offsetof(struct i915_page_directory, pt));
76 struct i915_page_directory *pd =
77 container_of(pt, typeof(*pd), pt);
82 i915_gem_object_put(pt->base);
88 write_dma_entry(struct drm_i915_gem_object * const pdma,
89 const unsigned short idx,
90 const u64 encoded_entry)
92 u64 * const vaddr = __px_vaddr(pdma);
94 vaddr[idx] = encoded_entry;
95 drm_clflush_virt_range(&vaddr[idx], sizeof(u64));
99 __set_pd_entry(struct i915_page_directory * const pd,
100 const unsigned short idx,
101 struct i915_page_table * const to,
102 u64 (*encode)(const dma_addr_t, const enum i915_cache_level))
104 /* Each thread pre-pins the pd, and we may have a thread per pde. */
105 GEM_BUG_ON(atomic_read(px_used(pd)) > NALLOC * I915_PDES);
107 atomic_inc(px_used(pd));
109 write_dma_entry(px_base(pd), idx, encode(px_dma(to), I915_CACHE_LLC));
113 clear_pd_entry(struct i915_page_directory * const pd,
114 const unsigned short idx,
115 const struct drm_i915_gem_object * const scratch)
117 GEM_BUG_ON(atomic_read(px_used(pd)) == 0);
119 write_dma_entry(px_base(pd), idx, scratch->encode);
120 pd->entry[idx] = NULL;
121 atomic_dec(px_used(pd));
125 release_pd_entry(struct i915_page_directory * const pd,
126 const unsigned short idx,
127 struct i915_page_table * const pt,
128 const struct drm_i915_gem_object * const scratch)
132 if (atomic_add_unless(&pt->used, -1, 1))
135 spin_lock(&pd->lock);
136 if (atomic_dec_and_test(&pt->used)) {
137 clear_pd_entry(pd, idx, scratch);
140 spin_unlock(&pd->lock);
145 int i915_ppgtt_init_hw(struct intel_gt *gt)
147 struct drm_i915_private *i915 = gt->i915;
149 gtt_write_workarounds(gt);
151 if (GRAPHICS_VER(i915) == 6)
152 gen6_ppgtt_enable(gt);
153 else if (GRAPHICS_VER(i915) == 7)
154 gen7_ppgtt_enable(gt);
159 static struct i915_ppgtt *
160 __ppgtt_create(struct intel_gt *gt, unsigned long lmem_pt_obj_flags)
162 if (GRAPHICS_VER(gt->i915) < 8)
163 return gen6_ppgtt_create(gt);
165 return gen8_ppgtt_create(gt, lmem_pt_obj_flags);
168 struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt,
169 unsigned long lmem_pt_obj_flags)
171 struct i915_ppgtt *ppgtt;
173 ppgtt = __ppgtt_create(gt, lmem_pt_obj_flags);
177 trace_i915_ppgtt_create(&ppgtt->vm);
182 void ppgtt_bind_vma(struct i915_address_space *vm,
183 struct i915_vm_pt_stash *stash,
184 struct i915_vma_resource *vma_res,
185 unsigned int pat_index,
190 if (!vma_res->allocated) {
191 vm->allocate_va_range(vm, stash, vma_res->start,
193 vma_res->allocated = true;
196 /* Applicable to VLV, and gen8+ */
198 if (vma_res->bi.readonly)
199 pte_flags |= PTE_READ_ONLY;
200 if (vma_res->bi.lmem)
203 vm->insert_entries(vm, vma_res, pat_index, pte_flags);
207 void ppgtt_unbind_vma(struct i915_address_space *vm,
208 struct i915_vma_resource *vma_res)
210 if (!vma_res->allocated)
213 vm->clear_range(vm, vma_res->start, vma_res->vma_size);
214 vma_invalidate_tlb(vm, vma_res->tlb);
217 static unsigned long pd_count(u64 size, int shift)
219 /* Beware later misalignment */
220 return (size + 2 * (BIT_ULL(shift) - 1)) >> shift;
223 int i915_vm_alloc_pt_stash(struct i915_address_space *vm,
224 struct i915_vm_pt_stash *stash,
230 shift = vm->pd_shift;
234 pt_sz = stash->pt_sz;
236 pt_sz = I915_GTT_PAGE_SIZE_4K;
238 GEM_BUG_ON(!IS_DGFX(vm->i915));
240 GEM_BUG_ON(!is_power_of_2(pt_sz));
242 count = pd_count(size, shift);
244 struct i915_page_table *pt;
246 pt = alloc_pt(vm, pt_sz);
248 i915_vm_free_pt_stash(vm, stash);
252 pt->stash = stash->pt[0];
256 for (n = 1; n < vm->top; n++) {
257 shift += ilog2(I915_PDES); /* Each PD holds 512 entries */
258 count = pd_count(size, shift);
260 struct i915_page_directory *pd;
264 i915_vm_free_pt_stash(vm, stash);
268 pd->pt.stash = stash->pt[1];
269 stash->pt[1] = &pd->pt;
276 int i915_vm_map_pt_stash(struct i915_address_space *vm,
277 struct i915_vm_pt_stash *stash)
279 struct i915_page_table *pt;
282 for (n = 0; n < ARRAY_SIZE(stash->pt); n++) {
283 for (pt = stash->pt[n]; pt; pt = pt->stash) {
284 err = map_pt_dma_locked(vm, pt->base);
293 void i915_vm_free_pt_stash(struct i915_address_space *vm,
294 struct i915_vm_pt_stash *stash)
296 struct i915_page_table *pt;
299 for (n = 0; n < ARRAY_SIZE(stash->pt); n++) {
300 while ((pt = stash->pt[n])) {
301 stash->pt[n] = pt->stash;
307 void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt,
308 unsigned long lmem_pt_obj_flags)
310 struct drm_i915_private *i915 = gt->i915;
313 ppgtt->vm.i915 = i915;
314 ppgtt->vm.dma = i915->drm.dev;
315 ppgtt->vm.total = BIT_ULL(RUNTIME_INFO(i915)->ppgtt_size);
316 ppgtt->vm.lmem_pt_obj_flags = lmem_pt_obj_flags;
318 dma_resv_init(&ppgtt->vm._resv);
319 i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
321 ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma;
322 ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma;