]> Git Repo - linux.git/blob - drivers/gpu/drm/i915/gt/gen8_ppgtt.c
drm/nouveau/kms: Don't change EDID when it hasn't actually changed
[linux.git] / drivers / gpu / drm / i915 / gt / gen8_ppgtt.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5
6 #include <linux/log2.h>
7
8 #include "gen8_ppgtt.h"
9 #include "i915_scatterlist.h"
10 #include "i915_trace.h"
11 #include "i915_pvinfo.h"
12 #include "i915_vgpu.h"
13 #include "intel_gt.h"
14 #include "intel_gtt.h"
15
16 static u64 gen8_pde_encode(const dma_addr_t addr,
17                            const enum i915_cache_level level)
18 {
19         u64 pde = addr | _PAGE_PRESENT | _PAGE_RW;
20
21         if (level != I915_CACHE_NONE)
22                 pde |= PPAT_CACHED_PDE;
23         else
24                 pde |= PPAT_UNCACHED;
25
26         return pde;
27 }
28
29 static u64 gen8_pte_encode(dma_addr_t addr,
30                            enum i915_cache_level level,
31                            u32 flags)
32 {
33         gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;
34
35         if (unlikely(flags & PTE_READ_ONLY))
36                 pte &= ~_PAGE_RW;
37
38         switch (level) {
39         case I915_CACHE_NONE:
40                 pte |= PPAT_UNCACHED;
41                 break;
42         case I915_CACHE_WT:
43                 pte |= PPAT_DISPLAY_ELLC;
44                 break;
45         default:
46                 pte |= PPAT_CACHED;
47                 break;
48         }
49
50         return pte;
51 }
52
53 static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
54 {
55         struct drm_i915_private *i915 = ppgtt->vm.i915;
56         struct intel_uncore *uncore = ppgtt->vm.gt->uncore;
57         enum vgt_g2v_type msg;
58         int i;
59
60         if (create)
61                 atomic_inc(px_used(ppgtt->pd)); /* never remove */
62         else
63                 atomic_dec(px_used(ppgtt->pd));
64
65         mutex_lock(&i915->vgpu.lock);
66
67         if (i915_vm_is_4lvl(&ppgtt->vm)) {
68                 const u64 daddr = px_dma(ppgtt->pd);
69
70                 intel_uncore_write(uncore,
71                                    vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
72                 intel_uncore_write(uncore,
73                                    vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
74
75                 msg = create ?
76                         VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
77                         VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY;
78         } else {
79                 for (i = 0; i < GEN8_3LVL_PDPES; i++) {
80                         const u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
81
82                         intel_uncore_write(uncore,
83                                            vgtif_reg(pdp[i].lo),
84                                            lower_32_bits(daddr));
85                         intel_uncore_write(uncore,
86                                            vgtif_reg(pdp[i].hi),
87                                            upper_32_bits(daddr));
88                 }
89
90                 msg = create ?
91                         VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
92                         VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY;
93         }
94
95         /* g2v_notify atomically (via hv trap) consumes the message packet. */
96         intel_uncore_write(uncore, vgtif_reg(g2v_notify), msg);
97
98         mutex_unlock(&i915->vgpu.lock);
99 }
100
101 /* Index shifts into the pagetable are offset by GEN8_PTE_SHIFT [12] */
102 #define GEN8_PAGE_SIZE (SZ_4K) /* page and page-directory sizes are the same */
103 #define GEN8_PTE_SHIFT (ilog2(GEN8_PAGE_SIZE))
104 #define GEN8_PDES (GEN8_PAGE_SIZE / sizeof(u64))
105 #define gen8_pd_shift(lvl) ((lvl) * ilog2(GEN8_PDES))
106 #define gen8_pd_index(i, lvl) i915_pde_index((i), gen8_pd_shift(lvl))
107 #define __gen8_pte_shift(lvl) (GEN8_PTE_SHIFT + gen8_pd_shift(lvl))
108 #define __gen8_pte_index(a, lvl) i915_pde_index((a), __gen8_pte_shift(lvl))
109
110 #define as_pd(x) container_of((x), typeof(struct i915_page_directory), pt)
111
112 static inline unsigned int
113 gen8_pd_range(u64 start, u64 end, int lvl, unsigned int *idx)
114 {
115         const int shift = gen8_pd_shift(lvl);
116         const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
117
118         GEM_BUG_ON(start >= end);
119         end += ~mask >> gen8_pd_shift(1);
120
121         *idx = i915_pde_index(start, shift);
122         if ((start ^ end) & mask)
123                 return GEN8_PDES - *idx;
124         else
125                 return i915_pde_index(end, shift) - *idx;
126 }
127
128 static inline bool gen8_pd_contains(u64 start, u64 end, int lvl)
129 {
130         const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
131
132         GEM_BUG_ON(start >= end);
133         return (start ^ end) & mask && (start & ~mask) == 0;
134 }
135
136 static inline unsigned int gen8_pt_count(u64 start, u64 end)
137 {
138         GEM_BUG_ON(start >= end);
139         if ((start ^ end) >> gen8_pd_shift(1))
140                 return GEN8_PDES - (start & (GEN8_PDES - 1));
141         else
142                 return end - start;
143 }
144
145 static inline unsigned int
146 gen8_pd_top_count(const struct i915_address_space *vm)
147 {
148         unsigned int shift = __gen8_pte_shift(vm->top);
149         return (vm->total + (1ull << shift) - 1) >> shift;
150 }
151
152 static inline struct i915_page_directory *
153 gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx)
154 {
155         struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
156
157         if (vm->top == 2)
158                 return ppgtt->pd;
159         else
160                 return i915_pd_entry(ppgtt->pd, gen8_pd_index(idx, vm->top));
161 }
162
163 static inline struct i915_page_directory *
164 gen8_pdp_for_page_address(struct i915_address_space * const vm, const u64 addr)
165 {
166         return gen8_pdp_for_page_index(vm, addr >> GEN8_PTE_SHIFT);
167 }
168
169 static void __gen8_ppgtt_cleanup(struct i915_address_space *vm,
170                                  struct i915_page_directory *pd,
171                                  int count, int lvl)
172 {
173         if (lvl) {
174                 void **pde = pd->entry;
175
176                 do {
177                         if (!*pde)
178                                 continue;
179
180                         __gen8_ppgtt_cleanup(vm, *pde, GEN8_PDES, lvl - 1);
181                 } while (pde++, --count);
182         }
183
184         free_px(vm, pd);
185 }
186
187 static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
188 {
189         struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
190
191         if (intel_vgpu_active(vm->i915))
192                 gen8_ppgtt_notify_vgt(ppgtt, false);
193
194         __gen8_ppgtt_cleanup(vm, ppgtt->pd, gen8_pd_top_count(vm), vm->top);
195         free_scratch(vm);
196 }
197
198 static u64 __gen8_ppgtt_clear(struct i915_address_space * const vm,
199                               struct i915_page_directory * const pd,
200                               u64 start, const u64 end, int lvl)
201 {
202         const struct i915_page_scratch * const scratch = &vm->scratch[lvl];
203         unsigned int idx, len;
204
205         GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
206
207         len = gen8_pd_range(start, end, lvl--, &idx);
208         DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
209             __func__, vm, lvl + 1, start, end,
210             idx, len, atomic_read(px_used(pd)));
211         GEM_BUG_ON(!len || len >= atomic_read(px_used(pd)));
212
213         do {
214                 struct i915_page_table *pt = pd->entry[idx];
215
216                 if (atomic_fetch_inc(&pt->used) >> gen8_pd_shift(1) &&
217                     gen8_pd_contains(start, end, lvl)) {
218                         DBG("%s(%p):{ lvl:%d, idx:%d, start:%llx, end:%llx } removing pd\n",
219                             __func__, vm, lvl + 1, idx, start, end);
220                         clear_pd_entry(pd, idx, scratch);
221                         __gen8_ppgtt_cleanup(vm, as_pd(pt), I915_PDES, lvl);
222                         start += (u64)I915_PDES << gen8_pd_shift(lvl);
223                         continue;
224                 }
225
226                 if (lvl) {
227                         start = __gen8_ppgtt_clear(vm, as_pd(pt),
228                                                    start, end, lvl);
229                 } else {
230                         unsigned int count;
231                         u64 *vaddr;
232
233                         count = gen8_pt_count(start, end);
234                         DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } removing pte\n",
235                             __func__, vm, lvl, start, end,
236                             gen8_pd_index(start, 0), count,
237                             atomic_read(&pt->used));
238                         GEM_BUG_ON(!count || count >= atomic_read(&pt->used));
239
240                         vaddr = kmap_atomic_px(pt);
241                         memset64(vaddr + gen8_pd_index(start, 0),
242                                  vm->scratch[0].encode,
243                                  count);
244                         kunmap_atomic(vaddr);
245
246                         atomic_sub(count, &pt->used);
247                         start += count;
248                 }
249
250                 if (release_pd_entry(pd, idx, pt, scratch))
251                         free_px(vm, pt);
252         } while (idx++, --len);
253
254         return start;
255 }
256
257 static void gen8_ppgtt_clear(struct i915_address_space *vm,
258                              u64 start, u64 length)
259 {
260         GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
261         GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
262         GEM_BUG_ON(range_overflows(start, length, vm->total));
263
264         start >>= GEN8_PTE_SHIFT;
265         length >>= GEN8_PTE_SHIFT;
266         GEM_BUG_ON(length == 0);
267
268         __gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd,
269                            start, start + length, vm->top);
270 }
271
272 static int __gen8_ppgtt_alloc(struct i915_address_space * const vm,
273                               struct i915_page_directory * const pd,
274                               u64 * const start, const u64 end, int lvl)
275 {
276         const struct i915_page_scratch * const scratch = &vm->scratch[lvl];
277         struct i915_page_table *alloc = NULL;
278         unsigned int idx, len;
279         int ret = 0;
280
281         GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
282
283         len = gen8_pd_range(*start, end, lvl--, &idx);
284         DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
285             __func__, vm, lvl + 1, *start, end,
286             idx, len, atomic_read(px_used(pd)));
287         GEM_BUG_ON(!len || (idx + len - 1) >> gen8_pd_shift(1));
288
289         spin_lock(&pd->lock);
290         GEM_BUG_ON(!atomic_read(px_used(pd))); /* Must be pinned! */
291         do {
292                 struct i915_page_table *pt = pd->entry[idx];
293
294                 if (!pt) {
295                         spin_unlock(&pd->lock);
296
297                         DBG("%s(%p):{ lvl:%d, idx:%d } allocating new tree\n",
298                             __func__, vm, lvl + 1, idx);
299
300                         pt = fetch_and_zero(&alloc);
301                         if (lvl) {
302                                 if (!pt) {
303                                         pt = &alloc_pd(vm)->pt;
304                                         if (IS_ERR(pt)) {
305                                                 ret = PTR_ERR(pt);
306                                                 goto out;
307                                         }
308                                 }
309
310                                 fill_px(pt, vm->scratch[lvl].encode);
311                         } else {
312                                 if (!pt) {
313                                         pt = alloc_pt(vm);
314                                         if (IS_ERR(pt)) {
315                                                 ret = PTR_ERR(pt);
316                                                 goto out;
317                                         }
318                                 }
319
320                                 if (intel_vgpu_active(vm->i915) ||
321                                     gen8_pt_count(*start, end) < I915_PDES)
322                                         fill_px(pt, vm->scratch[lvl].encode);
323                         }
324
325                         spin_lock(&pd->lock);
326                         if (likely(!pd->entry[idx]))
327                                 set_pd_entry(pd, idx, pt);
328                         else
329                                 alloc = pt, pt = pd->entry[idx];
330                 }
331
332                 if (lvl) {
333                         atomic_inc(&pt->used);
334                         spin_unlock(&pd->lock);
335
336                         ret = __gen8_ppgtt_alloc(vm, as_pd(pt),
337                                                  start, end, lvl);
338                         if (unlikely(ret)) {
339                                 if (release_pd_entry(pd, idx, pt, scratch))
340                                         free_px(vm, pt);
341                                 goto out;
342                         }
343
344                         spin_lock(&pd->lock);
345                         atomic_dec(&pt->used);
346                         GEM_BUG_ON(!atomic_read(&pt->used));
347                 } else {
348                         unsigned int count = gen8_pt_count(*start, end);
349
350                         DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d } inserting pte\n",
351                             __func__, vm, lvl, *start, end,
352                             gen8_pd_index(*start, 0), count,
353                             atomic_read(&pt->used));
354
355                         atomic_add(count, &pt->used);
356                         /* All other pdes may be simultaneously removed */
357                         GEM_BUG_ON(atomic_read(&pt->used) > NALLOC * I915_PDES);
358                         *start += count;
359                 }
360         } while (idx++, --len);
361         spin_unlock(&pd->lock);
362 out:
363         if (alloc)
364                 free_px(vm, alloc);
365         return ret;
366 }
367
368 static int gen8_ppgtt_alloc(struct i915_address_space *vm,
369                             u64 start, u64 length)
370 {
371         u64 from;
372         int err;
373
374         GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
375         GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
376         GEM_BUG_ON(range_overflows(start, length, vm->total));
377
378         start >>= GEN8_PTE_SHIFT;
379         length >>= GEN8_PTE_SHIFT;
380         GEM_BUG_ON(length == 0);
381         from = start;
382
383         err = __gen8_ppgtt_alloc(vm, i915_vm_to_ppgtt(vm)->pd,
384                                  &start, start + length, vm->top);
385         if (unlikely(err && from != start))
386                 __gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd,
387                                    from, start, vm->top);
388
389         return err;
390 }
391
392 static __always_inline void
393 write_pte(gen8_pte_t *pte, const gen8_pte_t val)
394 {
395         /* Magic delays? Or can we refine these to flush all in one pass? */
396         *pte = val;
397         wmb(); /* cpu to cache */
398         clflush(pte); /* cache to memory */
399         wmb(); /* visible to all */
400 }
401
402 static __always_inline u64
403 gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
404                       struct i915_page_directory *pdp,
405                       struct sgt_dma *iter,
406                       u64 idx,
407                       enum i915_cache_level cache_level,
408                       u32 flags)
409 {
410         struct i915_page_directory *pd;
411         const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
412         gen8_pte_t *vaddr;
413
414         pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2));
415         vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
416         do {
417                 GEM_BUG_ON(iter->sg->length < I915_GTT_PAGE_SIZE);
418                 write_pte(&vaddr[gen8_pd_index(idx, 0)],
419                           pte_encode | iter->dma);
420
421                 iter->dma += I915_GTT_PAGE_SIZE;
422                 if (iter->dma >= iter->max) {
423                         iter->sg = __sg_next(iter->sg);
424                         if (!iter->sg) {
425                                 idx = 0;
426                                 break;
427                         }
428
429                         iter->dma = sg_dma_address(iter->sg);
430                         iter->max = iter->dma + iter->sg->length;
431                 }
432
433                 if (gen8_pd_index(++idx, 0) == 0) {
434                         if (gen8_pd_index(idx, 1) == 0) {
435                                 /* Limited by sg length for 3lvl */
436                                 if (gen8_pd_index(idx, 2) == 0)
437                                         break;
438
439                                 pd = pdp->entry[gen8_pd_index(idx, 2)];
440                         }
441
442                         kunmap_atomic(vaddr);
443                         vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
444                 }
445         } while (1);
446         kunmap_atomic(vaddr);
447
448         return idx;
449 }
450
451 static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
452                                    struct sgt_dma *iter,
453                                    enum i915_cache_level cache_level,
454                                    u32 flags)
455 {
456         const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
457         u64 start = vma->node.start;
458         dma_addr_t rem = iter->sg->length;
459
460         GEM_BUG_ON(!i915_vm_is_4lvl(vma->vm));
461
462         do {
463                 struct i915_page_directory * const pdp =
464                         gen8_pdp_for_page_address(vma->vm, start);
465                 struct i915_page_directory * const pd =
466                         i915_pd_entry(pdp, __gen8_pte_index(start, 2));
467                 gen8_pte_t encode = pte_encode;
468                 unsigned int maybe_64K = -1;
469                 unsigned int page_size;
470                 gen8_pte_t *vaddr;
471                 u16 index;
472
473                 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
474                     IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
475                     rem >= I915_GTT_PAGE_SIZE_2M &&
476                     !__gen8_pte_index(start, 0)) {
477                         index = __gen8_pte_index(start, 1);
478                         encode |= GEN8_PDE_PS_2M;
479                         page_size = I915_GTT_PAGE_SIZE_2M;
480
481                         vaddr = kmap_atomic_px(pd);
482                 } else {
483                         struct i915_page_table *pt =
484                                 i915_pt_entry(pd, __gen8_pte_index(start, 1));
485
486                         index = __gen8_pte_index(start, 0);
487                         page_size = I915_GTT_PAGE_SIZE;
488
489                         if (!index &&
490                             vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
491                             IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
492                             (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
493                              rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE))
494                                 maybe_64K = __gen8_pte_index(start, 1);
495
496                         vaddr = kmap_atomic_px(pt);
497                 }
498
499                 do {
500                         GEM_BUG_ON(iter->sg->length < page_size);
501                         write_pte(&vaddr[index++], encode | iter->dma);
502
503                         start += page_size;
504                         iter->dma += page_size;
505                         rem -= page_size;
506                         if (iter->dma >= iter->max) {
507                                 iter->sg = __sg_next(iter->sg);
508                                 if (!iter->sg)
509                                         break;
510
511                                 rem = iter->sg->length;
512                                 iter->dma = sg_dma_address(iter->sg);
513                                 iter->max = iter->dma + rem;
514
515                                 if (maybe_64K != -1 && index < I915_PDES &&
516                                     !(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
517                                       (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
518                                        rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE)))
519                                         maybe_64K = -1;
520
521                                 if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
522                                         break;
523                         }
524                 } while (rem >= page_size && index < I915_PDES);
525
526                 kunmap_atomic(vaddr);
527
528                 /*
529                  * Is it safe to mark the 2M block as 64K? -- Either we have
530                  * filled whole page-table with 64K entries, or filled part of
531                  * it and have reached the end of the sg table and we have
532                  * enough padding.
533                  */
534                 if (maybe_64K != -1 &&
535                     (index == I915_PDES ||
536                      (i915_vm_has_scratch_64K(vma->vm) &&
537                       !iter->sg && IS_ALIGNED(vma->node.start +
538                                               vma->node.size,
539                                               I915_GTT_PAGE_SIZE_2M)))) {
540                         vaddr = kmap_atomic_px(pd);
541                         vaddr[maybe_64K] |= GEN8_PDE_IPS_64K;
542                         kunmap_atomic(vaddr);
543                         page_size = I915_GTT_PAGE_SIZE_64K;
544
545                         /*
546                          * We write all 4K page entries, even when using 64K
547                          * pages. In order to verify that the HW isn't cheating
548                          * by using the 4K PTE instead of the 64K PTE, we want
549                          * to remove all the surplus entries. If the HW skipped
550                          * the 64K PTE, it will read/write into the scratch page
551                          * instead - which we detect as missing results during
552                          * selftests.
553                          */
554                         if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) {
555                                 u16 i;
556
557                                 encode = vma->vm->scratch[0].encode;
558                                 vaddr = kmap_atomic_px(i915_pt_entry(pd, maybe_64K));
559
560                                 for (i = 1; i < index; i += 16)
561                                         memset64(vaddr + i, encode, 15);
562
563                                 kunmap_atomic(vaddr);
564                         }
565                 }
566
567                 vma->page_sizes.gtt |= page_size;
568         } while (iter->sg);
569 }
570
571 static void gen8_ppgtt_insert(struct i915_address_space *vm,
572                               struct i915_vma *vma,
573                               enum i915_cache_level cache_level,
574                               u32 flags)
575 {
576         struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
577         struct sgt_dma iter = sgt_dma(vma);
578
579         if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
580                 gen8_ppgtt_insert_huge(vma, &iter, cache_level, flags);
581         } else  {
582                 u64 idx = vma->node.start >> GEN8_PTE_SHIFT;
583
584                 do {
585                         struct i915_page_directory * const pdp =
586                                 gen8_pdp_for_page_index(vm, idx);
587
588                         idx = gen8_ppgtt_insert_pte(ppgtt, pdp, &iter, idx,
589                                                     cache_level, flags);
590                 } while (idx);
591
592                 vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
593         }
594 }
595
596 static int gen8_init_scratch(struct i915_address_space *vm)
597 {
598         int ret;
599         int i;
600
601         /*
602          * If everybody agrees to not to write into the scratch page,
603          * we can reuse it for all vm, keeping contexts and processes separate.
604          */
605         if (vm->has_read_only && vm->gt->vm && !i915_is_ggtt(vm->gt->vm)) {
606                 struct i915_address_space *clone = vm->gt->vm;
607
608                 GEM_BUG_ON(!clone->has_read_only);
609
610                 vm->scratch_order = clone->scratch_order;
611                 memcpy(vm->scratch, clone->scratch, sizeof(vm->scratch));
612                 px_dma(&vm->scratch[0]) = 0; /* no xfer of ownership */
613                 return 0;
614         }
615
616         ret = setup_scratch_page(vm, __GFP_HIGHMEM);
617         if (ret)
618                 return ret;
619
620         vm->scratch[0].encode =
621                 gen8_pte_encode(px_dma(&vm->scratch[0]),
622                                 I915_CACHE_LLC, vm->has_read_only);
623
624         for (i = 1; i <= vm->top; i++) {
625                 if (unlikely(setup_page_dma(vm, px_base(&vm->scratch[i]))))
626                         goto free_scratch;
627
628                 fill_px(&vm->scratch[i], vm->scratch[i - 1].encode);
629                 vm->scratch[i].encode =
630                         gen8_pde_encode(px_dma(&vm->scratch[i]),
631                                         I915_CACHE_LLC);
632         }
633
634         return 0;
635
636 free_scratch:
637         free_scratch(vm);
638         return -ENOMEM;
639 }
640
641 static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
642 {
643         struct i915_address_space *vm = &ppgtt->vm;
644         struct i915_page_directory *pd = ppgtt->pd;
645         unsigned int idx;
646
647         GEM_BUG_ON(vm->top != 2);
648         GEM_BUG_ON(gen8_pd_top_count(vm) != GEN8_3LVL_PDPES);
649
650         for (idx = 0; idx < GEN8_3LVL_PDPES; idx++) {
651                 struct i915_page_directory *pde;
652
653                 pde = alloc_pd(vm);
654                 if (IS_ERR(pde))
655                         return PTR_ERR(pde);
656
657                 fill_px(pde, vm->scratch[1].encode);
658                 set_pd_entry(pd, idx, pde);
659                 atomic_inc(px_used(pde)); /* keep pinned */
660         }
661         wmb();
662
663         return 0;
664 }
665
666 static struct i915_page_directory *
667 gen8_alloc_top_pd(struct i915_address_space *vm)
668 {
669         const unsigned int count = gen8_pd_top_count(vm);
670         struct i915_page_directory *pd;
671
672         GEM_BUG_ON(count > ARRAY_SIZE(pd->entry));
673
674         pd = __alloc_pd(offsetof(typeof(*pd), entry[count]));
675         if (unlikely(!pd))
676                 return ERR_PTR(-ENOMEM);
677
678         if (unlikely(setup_page_dma(vm, px_base(pd)))) {
679                 kfree(pd);
680                 return ERR_PTR(-ENOMEM);
681         }
682
683         fill_page_dma(px_base(pd), vm->scratch[vm->top].encode, count);
684         atomic_inc(px_used(pd)); /* mark as pinned */
685         return pd;
686 }
687
688 /*
689  * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
690  * with a net effect resembling a 2-level page table in normal x86 terms. Each
691  * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
692  * space.
693  *
694  */
695 struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt)
696 {
697         struct i915_ppgtt *ppgtt;
698         int err;
699
700         ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
701         if (!ppgtt)
702                 return ERR_PTR(-ENOMEM);
703
704         ppgtt_init(ppgtt, gt);
705         ppgtt->vm.top = i915_vm_is_4lvl(&ppgtt->vm) ? 3 : 2;
706
707         /*
708          * From bdw, there is hw support for read-only pages in the PPGTT.
709          *
710          * Gen11 has HSDES#:1807136187 unresolved. Disable ro support
711          * for now.
712          *
713          * Gen12 has inherited the same read-only fault issue from gen11.
714          */
715         ppgtt->vm.has_read_only = !IS_GEN_RANGE(gt->i915, 11, 12);
716
717         /*
718          * There are only few exceptions for gen >=6. chv and bxt.
719          * And we are not sure about the latter so play safe for now.
720          */
721         if (IS_CHERRYVIEW(gt->i915) || IS_BROXTON(gt->i915))
722                 ppgtt->vm.pt_kmap_wc = true;
723
724         err = gen8_init_scratch(&ppgtt->vm);
725         if (err)
726                 goto err_free;
727
728         ppgtt->pd = gen8_alloc_top_pd(&ppgtt->vm);
729         if (IS_ERR(ppgtt->pd)) {
730                 err = PTR_ERR(ppgtt->pd);
731                 goto err_free_scratch;
732         }
733
734         if (!i915_vm_is_4lvl(&ppgtt->vm)) {
735                 err = gen8_preallocate_top_level_pdp(ppgtt);
736                 if (err)
737                         goto err_free_pd;
738         }
739
740         ppgtt->vm.bind_async_flags = I915_VMA_LOCAL_BIND;
741         ppgtt->vm.insert_entries = gen8_ppgtt_insert;
742         ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc;
743         ppgtt->vm.clear_range = gen8_ppgtt_clear;
744
745         ppgtt->vm.pte_encode = gen8_pte_encode;
746
747         if (intel_vgpu_active(gt->i915))
748                 gen8_ppgtt_notify_vgt(ppgtt, true);
749
750         ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
751
752         return ppgtt;
753
754 err_free_pd:
755         __gen8_ppgtt_cleanup(&ppgtt->vm, ppgtt->pd,
756                              gen8_pd_top_count(&ppgtt->vm), ppgtt->vm.top);
757 err_free_scratch:
758         free_scratch(&ppgtt->vm);
759 err_free:
760         kfree(ppgtt);
761         return ERR_PTR(err);
762 }
This page took 0.080579 seconds and 4 git commands to generate.