]> Git Repo - linux.git/blob - drivers/gpu/drm/i915/gt/intel_ggtt.c
Merge patch series "riscv: Extension parsing fixes"
[linux.git] / drivers / gpu / drm / i915 / gt / intel_ggtt.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5
6 #include <asm/set_memory.h>
7 #include <asm/smp.h>
8 #include <linux/types.h>
9 #include <linux/stop_machine.h>
10
11 #include <drm/drm_managed.h>
12 #include <drm/i915_drm.h>
13 #include <drm/intel-gtt.h>
14
15 #include "display/intel_display.h"
16 #include "gem/i915_gem_lmem.h"
17
18 #include "intel_context.h"
19 #include "intel_ggtt_gmch.h"
20 #include "intel_gpu_commands.h"
21 #include "intel_gt.h"
22 #include "intel_gt_regs.h"
23 #include "intel_pci_config.h"
24 #include "intel_ring.h"
25 #include "i915_drv.h"
26 #include "i915_pci.h"
27 #include "i915_reg.h"
28 #include "i915_request.h"
29 #include "i915_scatterlist.h"
30 #include "i915_utils.h"
31 #include "i915_vgpu.h"
32
33 #include "intel_gtt.h"
34 #include "gen8_ppgtt.h"
35 #include "intel_engine_pm.h"
36
37 static void i915_ggtt_color_adjust(const struct drm_mm_node *node,
38                                    unsigned long color,
39                                    u64 *start,
40                                    u64 *end)
41 {
42         if (i915_node_color_differs(node, color))
43                 *start += I915_GTT_PAGE_SIZE;
44
45         /*
46          * Also leave a space between the unallocated reserved node after the
47          * GTT and any objects within the GTT, i.e. we use the color adjustment
48          * to insert a guard page to prevent prefetches crossing over the
49          * GTT boundary.
50          */
51         node = list_next_entry(node, node_list);
52         if (node->color != color)
53                 *end -= I915_GTT_PAGE_SIZE;
54 }
55
56 static int ggtt_init_hw(struct i915_ggtt *ggtt)
57 {
58         struct drm_i915_private *i915 = ggtt->vm.i915;
59
60         i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
61
62         ggtt->vm.is_ggtt = true;
63
64         /* Only VLV supports read-only GGTT mappings */
65         ggtt->vm.has_read_only = IS_VALLEYVIEW(i915);
66
67         if (!HAS_LLC(i915) && !HAS_PPGTT(i915))
68                 ggtt->vm.mm.color_adjust = i915_ggtt_color_adjust;
69
70         if (ggtt->mappable_end) {
71                 if (!io_mapping_init_wc(&ggtt->iomap,
72                                         ggtt->gmadr.start,
73                                         ggtt->mappable_end)) {
74                         ggtt->vm.cleanup(&ggtt->vm);
75                         return -EIO;
76                 }
77
78                 ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start,
79                                               ggtt->mappable_end);
80         }
81
82         intel_ggtt_init_fences(ggtt);
83
84         return 0;
85 }
86
87 /**
88  * i915_ggtt_init_hw - Initialize GGTT hardware
89  * @i915: i915 device
90  */
91 int i915_ggtt_init_hw(struct drm_i915_private *i915)
92 {
93         int ret;
94
95         /*
96          * Note that we use page colouring to enforce a guard page at the
97          * end of the address space. This is required as the CS may prefetch
98          * beyond the end of the batch buffer, across the page boundary,
99          * and beyond the end of the GTT if we do not provide a guard.
100          */
101         ret = ggtt_init_hw(to_gt(i915)->ggtt);
102         if (ret)
103                 return ret;
104
105         return 0;
106 }
107
108 /**
109  * i915_ggtt_suspend_vm - Suspend the memory mappings for a GGTT or DPT VM
110  * @vm: The VM to suspend the mappings for
111  *
112  * Suspend the memory mappings for all objects mapped to HW via the GGTT or a
113  * DPT page table.
114  */
115 void i915_ggtt_suspend_vm(struct i915_address_space *vm)
116 {
117         struct i915_vma *vma, *vn;
118         int save_skip_rewrite;
119
120         drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt);
121
122 retry:
123         i915_gem_drain_freed_objects(vm->i915);
124
125         mutex_lock(&vm->mutex);
126
127         /*
128          * Skip rewriting PTE on VMA unbind.
129          * FIXME: Use an argument to i915_vma_unbind() instead?
130          */
131         save_skip_rewrite = vm->skip_pte_rewrite;
132         vm->skip_pte_rewrite = true;
133
134         list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) {
135                 struct drm_i915_gem_object *obj = vma->obj;
136
137                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
138
139                 if (i915_vma_is_pinned(vma) || !i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
140                         continue;
141
142                 /* unlikely to race when GPU is idle, so no worry about slowpath.. */
143                 if (WARN_ON(!i915_gem_object_trylock(obj, NULL))) {
144                         /*
145                          * No dead objects should appear here, GPU should be
146                          * completely idle, and userspace suspended
147                          */
148                         i915_gem_object_get(obj);
149
150                         mutex_unlock(&vm->mutex);
151
152                         i915_gem_object_lock(obj, NULL);
153                         GEM_WARN_ON(i915_vma_unbind(vma));
154                         i915_gem_object_unlock(obj);
155                         i915_gem_object_put(obj);
156
157                         vm->skip_pte_rewrite = save_skip_rewrite;
158                         goto retry;
159                 }
160
161                 if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) {
162                         i915_vma_wait_for_bind(vma);
163
164                         __i915_vma_evict(vma, false);
165                         drm_mm_remove_node(&vma->node);
166                 }
167
168                 i915_gem_object_unlock(obj);
169         }
170
171         vm->clear_range(vm, 0, vm->total);
172
173         vm->skip_pte_rewrite = save_skip_rewrite;
174
175         mutex_unlock(&vm->mutex);
176 }
177
178 void i915_ggtt_suspend(struct i915_ggtt *ggtt)
179 {
180         struct intel_gt *gt;
181
182         i915_ggtt_suspend_vm(&ggtt->vm);
183         ggtt->invalidate(ggtt);
184
185         list_for_each_entry(gt, &ggtt->gt_list, ggtt_link)
186                 intel_gt_check_and_clear_faults(gt);
187 }
188
189 void gen6_ggtt_invalidate(struct i915_ggtt *ggtt)
190 {
191         struct intel_uncore *uncore = ggtt->vm.gt->uncore;
192
193         spin_lock_irq(&uncore->lock);
194         intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
195         intel_uncore_read_fw(uncore, GFX_FLSH_CNTL_GEN6);
196         spin_unlock_irq(&uncore->lock);
197 }
198
199 static bool needs_wc_ggtt_mapping(struct drm_i915_private *i915)
200 {
201         /*
202          * On BXT+/ICL+ writes larger than 64 bit to the GTT pagetable range
203          * will be dropped. For WC mappings in general we have 64 byte burst
204          * writes when the WC buffer is flushed, so we can't use it, but have to
205          * resort to an uncached mapping. The WC issue is easily caught by the
206          * readback check when writing GTT PTE entries.
207          */
208         if (!IS_GEN9_LP(i915) && GRAPHICS_VER(i915) < 11)
209                 return true;
210
211         return false;
212 }
213
214 static void gen8_ggtt_invalidate(struct i915_ggtt *ggtt)
215 {
216         struct intel_uncore *uncore = ggtt->vm.gt->uncore;
217
218         /*
219          * Note that as an uncached mmio write, this will flush the
220          * WCB of the writes into the GGTT before it triggers the invalidate.
221          *
222          * Only perform this when GGTT is mapped as WC, see ggtt_probe_common().
223          */
224         if (needs_wc_ggtt_mapping(ggtt->vm.i915))
225                 intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6,
226                                       GFX_FLSH_CNTL_EN);
227 }
228
229 static void guc_ggtt_ct_invalidate(struct intel_gt *gt)
230 {
231         struct intel_uncore *uncore = gt->uncore;
232         intel_wakeref_t wakeref;
233
234         with_intel_runtime_pm_if_active(uncore->rpm, wakeref)
235                 intel_guc_invalidate_tlb_guc(gt_to_guc(gt));
236 }
237
238 static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
239 {
240         struct drm_i915_private *i915 = ggtt->vm.i915;
241         struct intel_gt *gt;
242
243         gen8_ggtt_invalidate(ggtt);
244
245         list_for_each_entry(gt, &ggtt->gt_list, ggtt_link) {
246                 if (intel_guc_tlb_invalidation_is_available(gt_to_guc(gt)))
247                         guc_ggtt_ct_invalidate(gt);
248                 else if (GRAPHICS_VER(i915) >= 12)
249                         intel_uncore_write_fw(gt->uncore,
250                                               GEN12_GUC_TLB_INV_CR,
251                                               GEN12_GUC_TLB_INV_CR_INVALIDATE);
252                 else
253                         intel_uncore_write_fw(gt->uncore,
254                                               GEN8_GTCR, GEN8_GTCR_INVALIDATE);
255         }
256 }
257
258 static u64 mtl_ggtt_pte_encode(dma_addr_t addr,
259                                unsigned int pat_index,
260                                u32 flags)
261 {
262         gen8_pte_t pte = addr | GEN8_PAGE_PRESENT;
263
264         WARN_ON_ONCE(addr & ~GEN12_GGTT_PTE_ADDR_MASK);
265
266         if (flags & PTE_LM)
267                 pte |= GEN12_GGTT_PTE_LM;
268
269         if (pat_index & BIT(0))
270                 pte |= MTL_GGTT_PTE_PAT0;
271
272         if (pat_index & BIT(1))
273                 pte |= MTL_GGTT_PTE_PAT1;
274
275         return pte;
276 }
277
278 u64 gen8_ggtt_pte_encode(dma_addr_t addr,
279                          unsigned int pat_index,
280                          u32 flags)
281 {
282         gen8_pte_t pte = addr | GEN8_PAGE_PRESENT;
283
284         if (flags & PTE_LM)
285                 pte |= GEN12_GGTT_PTE_LM;
286
287         return pte;
288 }
289
290 static bool should_update_ggtt_with_bind(struct i915_ggtt *ggtt)
291 {
292         struct intel_gt *gt = ggtt->vm.gt;
293
294         return intel_gt_is_bind_context_ready(gt);
295 }
296
297 static struct intel_context *gen8_ggtt_bind_get_ce(struct i915_ggtt *ggtt, intel_wakeref_t *wakeref)
298 {
299         struct intel_context *ce;
300         struct intel_gt *gt = ggtt->vm.gt;
301
302         if (intel_gt_is_wedged(gt))
303                 return NULL;
304
305         ce = gt->engine[BCS0]->bind_context;
306         GEM_BUG_ON(!ce);
307
308         /*
309          * If the GT is not awake already at this stage then fallback
310          * to pci based GGTT update otherwise __intel_wakeref_get_first()
311          * would conflict with fs_reclaim trying to allocate memory while
312          * doing rpm_resume().
313          */
314         *wakeref = intel_gt_pm_get_if_awake(gt);
315         if (!*wakeref)
316                 return NULL;
317
318         intel_engine_pm_get(ce->engine);
319
320         return ce;
321 }
322
323 static void gen8_ggtt_bind_put_ce(struct intel_context *ce, intel_wakeref_t wakeref)
324 {
325         intel_engine_pm_put(ce->engine);
326         intel_gt_pm_put(ce->engine->gt, wakeref);
327 }
328
329 static bool gen8_ggtt_bind_ptes(struct i915_ggtt *ggtt, u32 offset,
330                                 struct sg_table *pages, u32 num_entries,
331                                 const gen8_pte_t pte)
332 {
333         struct i915_sched_attr attr = {};
334         struct intel_gt *gt = ggtt->vm.gt;
335         const gen8_pte_t scratch_pte = ggtt->vm.scratch[0]->encode;
336         struct sgt_iter iter;
337         struct i915_request *rq;
338         struct intel_context *ce;
339         intel_wakeref_t wakeref;
340         u32 *cs;
341
342         if (!num_entries)
343                 return true;
344
345         ce = gen8_ggtt_bind_get_ce(ggtt, &wakeref);
346         if (!ce)
347                 return false;
348
349         if (pages)
350                 iter = __sgt_iter(pages->sgl, true);
351
352         while (num_entries) {
353                 int count = 0;
354                 dma_addr_t addr;
355                 /*
356                  * MI_UPDATE_GTT can update 512 entries in a single command but
357                  * that end up with engine reset, 511 works.
358                  */
359                 u32 n_ptes = min_t(u32, 511, num_entries);
360
361                 if (mutex_lock_interruptible(&ce->timeline->mutex))
362                         goto put_ce;
363
364                 intel_context_enter(ce);
365                 rq = __i915_request_create(ce, GFP_NOWAIT | GFP_ATOMIC);
366                 intel_context_exit(ce);
367                 if (IS_ERR(rq)) {
368                         GT_TRACE(gt, "Failed to get bind request\n");
369                         mutex_unlock(&ce->timeline->mutex);
370                         goto put_ce;
371                 }
372
373                 cs = intel_ring_begin(rq, 2 * n_ptes + 2);
374                 if (IS_ERR(cs)) {
375                         GT_TRACE(gt, "Failed to ring space for GGTT bind\n");
376                         i915_request_set_error_once(rq, PTR_ERR(cs));
377                         /* once a request is created, it must be queued */
378                         goto queue_err_rq;
379                 }
380
381                 *cs++ = MI_UPDATE_GTT | (2 * n_ptes);
382                 *cs++ = offset << 12;
383
384                 if (pages) {
385                         for_each_sgt_daddr_next(addr, iter) {
386                                 if (count == n_ptes)
387                                         break;
388                                 *cs++ = lower_32_bits(pte | addr);
389                                 *cs++ = upper_32_bits(pte | addr);
390                                 count++;
391                         }
392                         /* fill remaining with scratch pte, if any */
393                         if (count < n_ptes) {
394                                 memset64((u64 *)cs, scratch_pte,
395                                          n_ptes - count);
396                                 cs += (n_ptes - count) * 2;
397                         }
398                 } else {
399                         memset64((u64 *)cs, pte, n_ptes);
400                         cs += n_ptes * 2;
401                 }
402
403                 intel_ring_advance(rq, cs);
404 queue_err_rq:
405                 i915_request_get(rq);
406                 __i915_request_commit(rq);
407                 __i915_request_queue(rq, &attr);
408
409                 mutex_unlock(&ce->timeline->mutex);
410                 /* This will break if the request is complete or after engine reset */
411                 i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
412                 if (rq->fence.error)
413                         goto err_rq;
414
415                 i915_request_put(rq);
416
417                 num_entries -= n_ptes;
418                 offset += n_ptes;
419         }
420
421         gen8_ggtt_bind_put_ce(ce, wakeref);
422         return true;
423
424 err_rq:
425         i915_request_put(rq);
426 put_ce:
427         gen8_ggtt_bind_put_ce(ce, wakeref);
428         return false;
429 }
430
431 static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
432 {
433         writeq(pte, addr);
434 }
435
436 static void gen8_ggtt_insert_page(struct i915_address_space *vm,
437                                   dma_addr_t addr,
438                                   u64 offset,
439                                   unsigned int pat_index,
440                                   u32 flags)
441 {
442         struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
443         gen8_pte_t __iomem *pte =
444                 (gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
445
446         gen8_set_pte(pte, ggtt->vm.pte_encode(addr, pat_index, flags));
447
448         ggtt->invalidate(ggtt);
449 }
450
451 static void gen8_ggtt_insert_page_bind(struct i915_address_space *vm,
452                                        dma_addr_t addr, u64 offset,
453                                        unsigned int pat_index, u32 flags)
454 {
455         struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
456         gen8_pte_t pte;
457
458         pte = ggtt->vm.pte_encode(addr, pat_index, flags);
459         if (should_update_ggtt_with_bind(i915_vm_to_ggtt(vm)) &&
460             gen8_ggtt_bind_ptes(ggtt, offset, NULL, 1, pte))
461                 return ggtt->invalidate(ggtt);
462
463         gen8_ggtt_insert_page(vm, addr, offset, pat_index, flags);
464 }
465
466 static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
467                                      struct i915_vma_resource *vma_res,
468                                      unsigned int pat_index,
469                                      u32 flags)
470 {
471         struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
472         const gen8_pte_t pte_encode = ggtt->vm.pte_encode(0, pat_index, flags);
473         gen8_pte_t __iomem *gte;
474         gen8_pte_t __iomem *end;
475         struct sgt_iter iter;
476         dma_addr_t addr;
477
478         /*
479          * Note that we ignore PTE_READ_ONLY here. The caller must be careful
480          * not to allow the user to override access to a read only page.
481          */
482
483         gte = (gen8_pte_t __iomem *)ggtt->gsm;
484         gte += (vma_res->start - vma_res->guard) / I915_GTT_PAGE_SIZE;
485         end = gte + vma_res->guard / I915_GTT_PAGE_SIZE;
486         while (gte < end)
487                 gen8_set_pte(gte++, vm->scratch[0]->encode);
488         end += (vma_res->node_size + vma_res->guard) / I915_GTT_PAGE_SIZE;
489
490         for_each_sgt_daddr(addr, iter, vma_res->bi.pages)
491                 gen8_set_pte(gte++, pte_encode | addr);
492         GEM_BUG_ON(gte > end);
493
494         /* Fill the allocated but "unused" space beyond the end of the buffer */
495         while (gte < end)
496                 gen8_set_pte(gte++, vm->scratch[0]->encode);
497
498         /*
499          * We want to flush the TLBs only after we're certain all the PTE
500          * updates have finished.
501          */
502         ggtt->invalidate(ggtt);
503 }
504
505 static bool __gen8_ggtt_insert_entries_bind(struct i915_address_space *vm,
506                                             struct i915_vma_resource *vma_res,
507                                             unsigned int pat_index, u32 flags)
508 {
509         struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
510         gen8_pte_t scratch_pte = vm->scratch[0]->encode;
511         gen8_pte_t pte_encode;
512         u64 start, end;
513
514         pte_encode = ggtt->vm.pte_encode(0, pat_index, flags);
515         start = (vma_res->start - vma_res->guard) / I915_GTT_PAGE_SIZE;
516         end = start + vma_res->guard / I915_GTT_PAGE_SIZE;
517         if (!gen8_ggtt_bind_ptes(ggtt, start, NULL, end - start, scratch_pte))
518                 goto err;
519
520         start = end;
521         end += (vma_res->node_size + vma_res->guard) / I915_GTT_PAGE_SIZE;
522         if (!gen8_ggtt_bind_ptes(ggtt, start, vma_res->bi.pages,
523               vma_res->node_size / I915_GTT_PAGE_SIZE, pte_encode))
524                 goto err;
525
526         start += vma_res->node_size / I915_GTT_PAGE_SIZE;
527         if (!gen8_ggtt_bind_ptes(ggtt, start, NULL, end - start, scratch_pte))
528                 goto err;
529
530         return true;
531
532 err:
533         return false;
534 }
535
536 static void gen8_ggtt_insert_entries_bind(struct i915_address_space *vm,
537                                           struct i915_vma_resource *vma_res,
538                                           unsigned int pat_index, u32 flags)
539 {
540         struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
541
542         if (should_update_ggtt_with_bind(i915_vm_to_ggtt(vm)) &&
543             __gen8_ggtt_insert_entries_bind(vm, vma_res, pat_index, flags))
544                 return ggtt->invalidate(ggtt);
545
546         gen8_ggtt_insert_entries(vm, vma_res, pat_index, flags);
547 }
548
549 static void gen8_ggtt_clear_range(struct i915_address_space *vm,
550                                   u64 start, u64 length)
551 {
552         struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
553         unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
554         unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
555         const gen8_pte_t scratch_pte = vm->scratch[0]->encode;
556         gen8_pte_t __iomem *gtt_base =
557                 (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
558         const int max_entries = ggtt_total_entries(ggtt) - first_entry;
559         int i;
560
561         if (WARN(num_entries > max_entries,
562                  "First entry = %d; Num entries = %d (max=%d)\n",
563                  first_entry, num_entries, max_entries))
564                 num_entries = max_entries;
565
566         for (i = 0; i < num_entries; i++)
567                 gen8_set_pte(&gtt_base[i], scratch_pte);
568 }
569
570 static void gen8_ggtt_scratch_range_bind(struct i915_address_space *vm,
571                                          u64 start, u64 length)
572 {
573         struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
574         unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
575         unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
576         const gen8_pte_t scratch_pte = vm->scratch[0]->encode;
577         const int max_entries = ggtt_total_entries(ggtt) - first_entry;
578
579         if (WARN(num_entries > max_entries,
580                  "First entry = %d; Num entries = %d (max=%d)\n",
581                  first_entry, num_entries, max_entries))
582                 num_entries = max_entries;
583
584         if (should_update_ggtt_with_bind(ggtt) && gen8_ggtt_bind_ptes(ggtt, first_entry,
585              NULL, num_entries, scratch_pte))
586                 return ggtt->invalidate(ggtt);
587
588         gen8_ggtt_clear_range(vm, start, length);
589 }
590
591 static void gen6_ggtt_insert_page(struct i915_address_space *vm,
592                                   dma_addr_t addr,
593                                   u64 offset,
594                                   unsigned int pat_index,
595                                   u32 flags)
596 {
597         struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
598         gen6_pte_t __iomem *pte =
599                 (gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
600
601         iowrite32(vm->pte_encode(addr, pat_index, flags), pte);
602
603         ggtt->invalidate(ggtt);
604 }
605
606 /*
607  * Binds an object into the global gtt with the specified cache level.
608  * The object will be accessible to the GPU via commands whose operands
609  * reference offsets within the global GTT as well as accessible by the GPU
610  * through the GMADR mapped BAR (i915->mm.gtt->gtt).
611  */
612 static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
613                                      struct i915_vma_resource *vma_res,
614                                      unsigned int pat_index,
615                                      u32 flags)
616 {
617         struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
618         gen6_pte_t __iomem *gte;
619         gen6_pte_t __iomem *end;
620         struct sgt_iter iter;
621         dma_addr_t addr;
622
623         gte = (gen6_pte_t __iomem *)ggtt->gsm;
624         gte += (vma_res->start - vma_res->guard) / I915_GTT_PAGE_SIZE;
625
626         end = gte + vma_res->guard / I915_GTT_PAGE_SIZE;
627         while (gte < end)
628                 iowrite32(vm->scratch[0]->encode, gte++);
629         end += (vma_res->node_size + vma_res->guard) / I915_GTT_PAGE_SIZE;
630         for_each_sgt_daddr(addr, iter, vma_res->bi.pages)
631                 iowrite32(vm->pte_encode(addr, pat_index, flags), gte++);
632         GEM_BUG_ON(gte > end);
633
634         /* Fill the allocated but "unused" space beyond the end of the buffer */
635         while (gte < end)
636                 iowrite32(vm->scratch[0]->encode, gte++);
637
638         /*
639          * We want to flush the TLBs only after we're certain all the PTE
640          * updates have finished.
641          */
642         ggtt->invalidate(ggtt);
643 }
644
645 static void nop_clear_range(struct i915_address_space *vm,
646                             u64 start, u64 length)
647 {
648 }
649
650 static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
651 {
652         /*
653          * Make sure the internal GAM fifo has been cleared of all GTT
654          * writes before exiting stop_machine(). This guarantees that
655          * any aperture accesses waiting to start in another process
656          * cannot back up behind the GTT writes causing a hang.
657          * The register can be any arbitrary GAM register.
658          */
659         intel_uncore_posting_read_fw(vm->gt->uncore, GFX_FLSH_CNTL_GEN6);
660 }
661
662 struct insert_page {
663         struct i915_address_space *vm;
664         dma_addr_t addr;
665         u64 offset;
666         unsigned int pat_index;
667 };
668
669 static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
670 {
671         struct insert_page *arg = _arg;
672
673         gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset,
674                               arg->pat_index, 0);
675         bxt_vtd_ggtt_wa(arg->vm);
676
677         return 0;
678 }
679
680 static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
681                                           dma_addr_t addr,
682                                           u64 offset,
683                                           unsigned int pat_index,
684                                           u32 unused)
685 {
686         struct insert_page arg = { vm, addr, offset, pat_index };
687
688         stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
689 }
690
691 struct insert_entries {
692         struct i915_address_space *vm;
693         struct i915_vma_resource *vma_res;
694         unsigned int pat_index;
695         u32 flags;
696 };
697
698 static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
699 {
700         struct insert_entries *arg = _arg;
701
702         gen8_ggtt_insert_entries(arg->vm, arg->vma_res,
703                                  arg->pat_index, arg->flags);
704         bxt_vtd_ggtt_wa(arg->vm);
705
706         return 0;
707 }
708
709 static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
710                                              struct i915_vma_resource *vma_res,
711                                              unsigned int pat_index,
712                                              u32 flags)
713 {
714         struct insert_entries arg = { vm, vma_res, pat_index, flags };
715
716         stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
717 }
718
719 static void gen6_ggtt_clear_range(struct i915_address_space *vm,
720                                   u64 start, u64 length)
721 {
722         struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
723         unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
724         unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
725         gen6_pte_t scratch_pte, __iomem *gtt_base =
726                 (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
727         const int max_entries = ggtt_total_entries(ggtt) - first_entry;
728         int i;
729
730         if (WARN(num_entries > max_entries,
731                  "First entry = %d; Num entries = %d (max=%d)\n",
732                  first_entry, num_entries, max_entries))
733                 num_entries = max_entries;
734
735         scratch_pte = vm->scratch[0]->encode;
736         for (i = 0; i < num_entries; i++)
737                 iowrite32(scratch_pte, &gtt_base[i]);
738 }
739
740 void intel_ggtt_bind_vma(struct i915_address_space *vm,
741                          struct i915_vm_pt_stash *stash,
742                          struct i915_vma_resource *vma_res,
743                          unsigned int pat_index,
744                          u32 flags)
745 {
746         u32 pte_flags;
747
748         if (vma_res->bound_flags & (~flags & I915_VMA_BIND_MASK))
749                 return;
750
751         vma_res->bound_flags |= flags;
752
753         /* Applicable to VLV (gen8+ do not support RO in the GGTT) */
754         pte_flags = 0;
755         if (vma_res->bi.readonly)
756                 pte_flags |= PTE_READ_ONLY;
757         if (vma_res->bi.lmem)
758                 pte_flags |= PTE_LM;
759
760         vm->insert_entries(vm, vma_res, pat_index, pte_flags);
761         vma_res->page_sizes_gtt = I915_GTT_PAGE_SIZE;
762 }
763
764 void intel_ggtt_unbind_vma(struct i915_address_space *vm,
765                            struct i915_vma_resource *vma_res)
766 {
767         vm->clear_range(vm, vma_res->start, vma_res->vma_size);
768 }
769
770 /*
771  * Reserve the top of the GuC address space for firmware images. Addresses
772  * beyond GUC_GGTT_TOP in the GuC address space are inaccessible by GuC,
773  * which makes for a suitable range to hold GuC/HuC firmware images if the
774  * size of the GGTT is 4G. However, on a 32-bit platform the size of the GGTT
775  * is limited to 2G, which is less than GUC_GGTT_TOP, but we reserve a chunk
776  * of the same size anyway, which is far more than needed, to keep the logic
777  * in uc_fw_ggtt_offset() simple.
778  */
779 #define GUC_TOP_RESERVE_SIZE (SZ_4G - GUC_GGTT_TOP)
780
781 static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
782 {
783         u64 offset;
784         int ret;
785
786         if (!intel_uc_uses_guc(&ggtt->vm.gt->uc))
787                 return 0;
788
789         GEM_BUG_ON(ggtt->vm.total <= GUC_TOP_RESERVE_SIZE);
790         offset = ggtt->vm.total - GUC_TOP_RESERVE_SIZE;
791
792         ret = i915_gem_gtt_reserve(&ggtt->vm, NULL, &ggtt->uc_fw,
793                                    GUC_TOP_RESERVE_SIZE, offset,
794                                    I915_COLOR_UNEVICTABLE, PIN_NOEVICT);
795         if (ret)
796                 drm_dbg(&ggtt->vm.i915->drm,
797                         "Failed to reserve top of GGTT for GuC\n");
798
799         return ret;
800 }
801
802 static void ggtt_release_guc_top(struct i915_ggtt *ggtt)
803 {
804         if (drm_mm_node_allocated(&ggtt->uc_fw))
805                 drm_mm_remove_node(&ggtt->uc_fw);
806 }
807
808 static void cleanup_init_ggtt(struct i915_ggtt *ggtt)
809 {
810         ggtt_release_guc_top(ggtt);
811         if (drm_mm_node_allocated(&ggtt->error_capture))
812                 drm_mm_remove_node(&ggtt->error_capture);
813         mutex_destroy(&ggtt->error_mutex);
814 }
815
816 static int init_ggtt(struct i915_ggtt *ggtt)
817 {
818         /*
819          * Let GEM Manage all of the aperture.
820          *
821          * However, leave one page at the end still bound to the scratch page.
822          * There are a number of places where the hardware apparently prefetches
823          * past the end of the object, and we've seen multiple hangs with the
824          * GPU head pointer stuck in a batchbuffer bound at the last page of the
825          * aperture.  One page should be enough to keep any prefetching inside
826          * of the aperture.
827          */
828         unsigned long hole_start, hole_end;
829         struct drm_mm_node *entry;
830         int ret;
831
832         /*
833          * GuC requires all resources that we're sharing with it to be placed in
834          * non-WOPCM memory. If GuC is not present or not in use we still need a
835          * small bias as ring wraparound at offset 0 sometimes hangs. No idea
836          * why.
837          */
838         ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE,
839                                intel_wopcm_guc_size(&ggtt->vm.gt->wopcm));
840
841         ret = intel_vgt_balloon(ggtt);
842         if (ret)
843                 return ret;
844
845         mutex_init(&ggtt->error_mutex);
846         if (ggtt->mappable_end) {
847                 /*
848                  * Reserve a mappable slot for our lockless error capture.
849                  *
850                  * We strongly prefer taking address 0x0 in order to protect
851                  * other critical buffers against accidental overwrites,
852                  * as writing to address 0 is a very common mistake.
853                  *
854                  * Since 0 may already be in use by the system (e.g. the BIOS
855                  * framebuffer), we let the reservation fail quietly and hope
856                  * 0 remains reserved always.
857                  *
858                  * If we fail to reserve 0, and then fail to find any space
859                  * for an error-capture, remain silent. We can afford not
860                  * to reserve an error_capture node as we have fallback
861                  * paths, and we trust that 0 will remain reserved. However,
862                  * the only likely reason for failure to insert is a driver
863                  * bug, which we expect to cause other failures...
864                  *
865                  * Since CPU can perform speculative reads on error capture
866                  * (write-combining allows it) add scratch page after error
867                  * capture to avoid DMAR errors.
868                  */
869                 ggtt->error_capture.size = 2 * I915_GTT_PAGE_SIZE;
870                 ggtt->error_capture.color = I915_COLOR_UNEVICTABLE;
871                 if (drm_mm_reserve_node(&ggtt->vm.mm, &ggtt->error_capture))
872                         drm_mm_insert_node_in_range(&ggtt->vm.mm,
873                                                     &ggtt->error_capture,
874                                                     ggtt->error_capture.size, 0,
875                                                     ggtt->error_capture.color,
876                                                     0, ggtt->mappable_end,
877                                                     DRM_MM_INSERT_LOW);
878         }
879         if (drm_mm_node_allocated(&ggtt->error_capture)) {
880                 u64 start = ggtt->error_capture.start;
881                 u64 size = ggtt->error_capture.size;
882
883                 ggtt->vm.scratch_range(&ggtt->vm, start, size);
884                 drm_dbg(&ggtt->vm.i915->drm,
885                         "Reserved GGTT:[%llx, %llx] for use by error capture\n",
886                         start, start + size);
887         }
888
889         /*
890          * The upper portion of the GuC address space has a sizeable hole
891          * (several MB) that is inaccessible by GuC. Reserve this range within
892          * GGTT as it can comfortably hold GuC/HuC firmware images.
893          */
894         ret = ggtt_reserve_guc_top(ggtt);
895         if (ret)
896                 goto err;
897
898         /* Clear any non-preallocated blocks */
899         drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
900                 drm_dbg(&ggtt->vm.i915->drm,
901                         "clearing unused GTT space: [%lx, %lx]\n",
902                         hole_start, hole_end);
903                 ggtt->vm.clear_range(&ggtt->vm, hole_start,
904                                      hole_end - hole_start);
905         }
906
907         /* And finally clear the reserved guard page */
908         ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE);
909
910         return 0;
911
912 err:
913         cleanup_init_ggtt(ggtt);
914         return ret;
915 }
916
917 static void aliasing_gtt_bind_vma(struct i915_address_space *vm,
918                                   struct i915_vm_pt_stash *stash,
919                                   struct i915_vma_resource *vma_res,
920                                   unsigned int pat_index,
921                                   u32 flags)
922 {
923         u32 pte_flags;
924
925         /* Currently applicable only to VLV */
926         pte_flags = 0;
927         if (vma_res->bi.readonly)
928                 pte_flags |= PTE_READ_ONLY;
929
930         if (flags & I915_VMA_LOCAL_BIND)
931                 ppgtt_bind_vma(&i915_vm_to_ggtt(vm)->alias->vm,
932                                stash, vma_res, pat_index, flags);
933
934         if (flags & I915_VMA_GLOBAL_BIND)
935                 vm->insert_entries(vm, vma_res, pat_index, pte_flags);
936
937         vma_res->bound_flags |= flags;
938 }
939
940 static void aliasing_gtt_unbind_vma(struct i915_address_space *vm,
941                                     struct i915_vma_resource *vma_res)
942 {
943         if (vma_res->bound_flags & I915_VMA_GLOBAL_BIND)
944                 vm->clear_range(vm, vma_res->start, vma_res->vma_size);
945
946         if (vma_res->bound_flags & I915_VMA_LOCAL_BIND)
947                 ppgtt_unbind_vma(&i915_vm_to_ggtt(vm)->alias->vm, vma_res);
948 }
949
950 static int init_aliasing_ppgtt(struct i915_ggtt *ggtt)
951 {
952         struct i915_vm_pt_stash stash = {};
953         struct i915_ppgtt *ppgtt;
954         int err;
955
956         ppgtt = i915_ppgtt_create(ggtt->vm.gt, 0);
957         if (IS_ERR(ppgtt))
958                 return PTR_ERR(ppgtt);
959
960         if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) {
961                 err = -ENODEV;
962                 goto err_ppgtt;
963         }
964
965         err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, ggtt->vm.total);
966         if (err)
967                 goto err_ppgtt;
968
969         i915_gem_object_lock(ppgtt->vm.scratch[0], NULL);
970         err = i915_vm_map_pt_stash(&ppgtt->vm, &stash);
971         i915_gem_object_unlock(ppgtt->vm.scratch[0]);
972         if (err)
973                 goto err_stash;
974
975         /*
976          * Note we only pre-allocate as far as the end of the global
977          * GTT. On 48b / 4-level page-tables, the difference is very,
978          * very significant! We have to preallocate as GVT/vgpu does
979          * not like the page directory disappearing.
980          */
981         ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, 0, ggtt->vm.total);
982
983         ggtt->alias = ppgtt;
984         ggtt->vm.bind_async_flags |= ppgtt->vm.bind_async_flags;
985
986         GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != intel_ggtt_bind_vma);
987         ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma;
988
989         GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != intel_ggtt_unbind_vma);
990         ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma;
991
992         i915_vm_free_pt_stash(&ppgtt->vm, &stash);
993         return 0;
994
995 err_stash:
996         i915_vm_free_pt_stash(&ppgtt->vm, &stash);
997 err_ppgtt:
998         i915_vm_put(&ppgtt->vm);
999         return err;
1000 }
1001
1002 static void fini_aliasing_ppgtt(struct i915_ggtt *ggtt)
1003 {
1004         struct i915_ppgtt *ppgtt;
1005
1006         ppgtt = fetch_and_zero(&ggtt->alias);
1007         if (!ppgtt)
1008                 return;
1009
1010         i915_vm_put(&ppgtt->vm);
1011
1012         ggtt->vm.vma_ops.bind_vma   = intel_ggtt_bind_vma;
1013         ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
1014 }
1015
1016 int i915_init_ggtt(struct drm_i915_private *i915)
1017 {
1018         int ret;
1019
1020         ret = init_ggtt(to_gt(i915)->ggtt);
1021         if (ret)
1022                 return ret;
1023
1024         if (INTEL_PPGTT(i915) == INTEL_PPGTT_ALIASING) {
1025                 ret = init_aliasing_ppgtt(to_gt(i915)->ggtt);
1026                 if (ret)
1027                         cleanup_init_ggtt(to_gt(i915)->ggtt);
1028         }
1029
1030         return 0;
1031 }
1032
1033 static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
1034 {
1035         struct i915_vma *vma, *vn;
1036
1037         flush_workqueue(ggtt->vm.i915->wq);
1038         i915_gem_drain_freed_objects(ggtt->vm.i915);
1039
1040         mutex_lock(&ggtt->vm.mutex);
1041
1042         ggtt->vm.skip_pte_rewrite = true;
1043
1044         list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
1045                 struct drm_i915_gem_object *obj = vma->obj;
1046                 bool trylock;
1047
1048                 trylock = i915_gem_object_trylock(obj, NULL);
1049                 WARN_ON(!trylock);
1050
1051                 WARN_ON(__i915_vma_unbind(vma));
1052                 if (trylock)
1053                         i915_gem_object_unlock(obj);
1054         }
1055
1056         if (drm_mm_node_allocated(&ggtt->error_capture))
1057                 drm_mm_remove_node(&ggtt->error_capture);
1058         mutex_destroy(&ggtt->error_mutex);
1059
1060         ggtt_release_guc_top(ggtt);
1061         intel_vgt_deballoon(ggtt);
1062
1063         ggtt->vm.cleanup(&ggtt->vm);
1064
1065         mutex_unlock(&ggtt->vm.mutex);
1066         i915_address_space_fini(&ggtt->vm);
1067
1068         arch_phys_wc_del(ggtt->mtrr);
1069
1070         if (ggtt->iomap.size)
1071                 io_mapping_fini(&ggtt->iomap);
1072 }
1073
1074 /**
1075  * i915_ggtt_driver_release - Clean up GGTT hardware initialization
1076  * @i915: i915 device
1077  */
1078 void i915_ggtt_driver_release(struct drm_i915_private *i915)
1079 {
1080         struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
1081
1082         fini_aliasing_ppgtt(ggtt);
1083
1084         intel_ggtt_fini_fences(ggtt);
1085         ggtt_cleanup_hw(ggtt);
1086 }
1087
1088 /**
1089  * i915_ggtt_driver_late_release - Cleanup of GGTT that needs to be done after
1090  * all free objects have been drained.
1091  * @i915: i915 device
1092  */
1093 void i915_ggtt_driver_late_release(struct drm_i915_private *i915)
1094 {
1095         struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
1096
1097         GEM_WARN_ON(kref_read(&ggtt->vm.resv_ref) != 1);
1098         dma_resv_fini(&ggtt->vm._resv);
1099 }
1100
1101 static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
1102 {
1103         snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
1104         snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
1105         return snb_gmch_ctl << 20;
1106 }
1107
1108 static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
1109 {
1110         bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
1111         bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
1112         if (bdw_gmch_ctl)
1113                 bdw_gmch_ctl = 1 << bdw_gmch_ctl;
1114
1115 #ifdef CONFIG_X86_32
1116         /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * I915_GTT_PAGE_SIZE */
1117         if (bdw_gmch_ctl > 4)
1118                 bdw_gmch_ctl = 4;
1119 #endif
1120
1121         return bdw_gmch_ctl << 20;
1122 }
1123
1124 static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
1125 {
1126         gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
1127         gmch_ctrl &= SNB_GMCH_GGMS_MASK;
1128
1129         if (gmch_ctrl)
1130                 return 1 << (20 + gmch_ctrl);
1131
1132         return 0;
1133 }
1134
1135 static unsigned int gen6_gttmmadr_size(struct drm_i915_private *i915)
1136 {
1137         /*
1138          * GEN6: GTTMMADR size is 4MB and GTTADR starts at 2MB offset
1139          * GEN8: GTTMMADR size is 16MB and GTTADR starts at 8MB offset
1140          */
1141         GEM_BUG_ON(GRAPHICS_VER(i915) < 6);
1142         return (GRAPHICS_VER(i915) < 8) ? SZ_4M : SZ_16M;
1143 }
1144
1145 static unsigned int gen6_gttadr_offset(struct drm_i915_private *i915)
1146 {
1147         return gen6_gttmmadr_size(i915) / 2;
1148 }
1149
1150 static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
1151 {
1152         struct drm_i915_private *i915 = ggtt->vm.i915;
1153         struct intel_uncore *uncore = ggtt->vm.gt->uncore;
1154         struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
1155         phys_addr_t phys_addr;
1156         u32 pte_flags;
1157         int ret;
1158
1159         GEM_WARN_ON(pci_resource_len(pdev, GEN4_GTTMMADR_BAR) != gen6_gttmmadr_size(i915));
1160
1161         if (i915_direct_stolen_access(i915)) {
1162                 drm_dbg(&i915->drm, "Using direct GSM access\n");
1163                 phys_addr = intel_uncore_read64(uncore, GEN6_GSMBASE) & GEN11_BDSM_MASK;
1164         } else {
1165                 phys_addr = pci_resource_start(pdev, GEN4_GTTMMADR_BAR) + gen6_gttadr_offset(i915);
1166         }
1167
1168         if (needs_wc_ggtt_mapping(i915))
1169                 ggtt->gsm = ioremap_wc(phys_addr, size);
1170         else
1171                 ggtt->gsm = ioremap(phys_addr, size);
1172
1173         if (!ggtt->gsm) {
1174                 drm_err(&i915->drm, "Failed to map the ggtt page table\n");
1175                 return -ENOMEM;
1176         }
1177
1178         kref_init(&ggtt->vm.resv_ref);
1179         ret = setup_scratch_page(&ggtt->vm);
1180         if (ret) {
1181                 drm_err(&i915->drm, "Scratch setup failed\n");
1182                 /* iounmap will also get called at remove, but meh */
1183                 iounmap(ggtt->gsm);
1184                 return ret;
1185         }
1186
1187         pte_flags = 0;
1188         if (i915_gem_object_is_lmem(ggtt->vm.scratch[0]))
1189                 pte_flags |= PTE_LM;
1190
1191         ggtt->vm.scratch[0]->encode =
1192                 ggtt->vm.pte_encode(px_dma(ggtt->vm.scratch[0]),
1193                                     i915_gem_get_pat_index(i915,
1194                                                            I915_CACHE_NONE),
1195                                     pte_flags);
1196
1197         return 0;
1198 }
1199
1200 static void gen6_gmch_remove(struct i915_address_space *vm)
1201 {
1202         struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
1203
1204         iounmap(ggtt->gsm);
1205         free_scratch(vm);
1206 }
1207
1208 static struct resource pci_resource(struct pci_dev *pdev, int bar)
1209 {
1210         return DEFINE_RES_MEM(pci_resource_start(pdev, bar),
1211                               pci_resource_len(pdev, bar));
1212 }
1213
1214 static int gen8_gmch_probe(struct i915_ggtt *ggtt)
1215 {
1216         struct drm_i915_private *i915 = ggtt->vm.i915;
1217         struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
1218         unsigned int size;
1219         u16 snb_gmch_ctl;
1220
1221         if (!HAS_LMEM(i915) && !HAS_LMEMBAR_SMEM_STOLEN(i915)) {
1222                 if (!i915_pci_resource_valid(pdev, GEN4_GMADR_BAR))
1223                         return -ENXIO;
1224
1225                 ggtt->gmadr = pci_resource(pdev, GEN4_GMADR_BAR);
1226                 ggtt->mappable_end = resource_size(&ggtt->gmadr);
1227         }
1228
1229         pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
1230         if (IS_CHERRYVIEW(i915))
1231                 size = chv_get_total_gtt_size(snb_gmch_ctl);
1232         else
1233                 size = gen8_get_total_gtt_size(snb_gmch_ctl);
1234
1235         ggtt->vm.alloc_pt_dma = alloc_pt_dma;
1236         ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
1237         ggtt->vm.lmem_pt_obj_flags = I915_BO_ALLOC_PM_EARLY;
1238
1239         ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
1240         ggtt->vm.cleanup = gen6_gmch_remove;
1241         ggtt->vm.insert_page = gen8_ggtt_insert_page;
1242         ggtt->vm.clear_range = nop_clear_range;
1243         ggtt->vm.scratch_range = gen8_ggtt_clear_range;
1244
1245         ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
1246
1247         /*
1248          * Serialize GTT updates with aperture access on BXT if VT-d is on,
1249          * and always on CHV.
1250          */
1251         if (intel_vm_no_concurrent_access_wa(i915)) {
1252                 ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
1253                 ggtt->vm.insert_page    = bxt_vtd_ggtt_insert_page__BKL;
1254
1255                 /*
1256                  * Calling stop_machine() version of GGTT update function
1257                  * at error capture/reset path will raise lockdep warning.
1258                  * Allow calling gen8_ggtt_insert_* directly at reset path
1259                  * which is safe from parallel GGTT updates.
1260                  */
1261                 ggtt->vm.raw_insert_page = gen8_ggtt_insert_page;
1262                 ggtt->vm.raw_insert_entries = gen8_ggtt_insert_entries;
1263
1264                 ggtt->vm.bind_async_flags =
1265                         I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
1266         }
1267
1268         if (i915_ggtt_require_binder(i915)) {
1269                 ggtt->vm.scratch_range = gen8_ggtt_scratch_range_bind;
1270                 ggtt->vm.insert_page = gen8_ggtt_insert_page_bind;
1271                 ggtt->vm.insert_entries = gen8_ggtt_insert_entries_bind;
1272                 /*
1273                  * On GPU is hung, we might bind VMAs for error capture.
1274                  * Fallback to CPU GGTT updates in that case.
1275                  */
1276                 ggtt->vm.raw_insert_page = gen8_ggtt_insert_page;
1277         }
1278
1279         if (intel_uc_wants_guc_submission(&ggtt->vm.gt->uc))
1280                 ggtt->invalidate = guc_ggtt_invalidate;
1281         else
1282                 ggtt->invalidate = gen8_ggtt_invalidate;
1283
1284         ggtt->vm.vma_ops.bind_vma    = intel_ggtt_bind_vma;
1285         ggtt->vm.vma_ops.unbind_vma  = intel_ggtt_unbind_vma;
1286
1287         if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70))
1288                 ggtt->vm.pte_encode = mtl_ggtt_pte_encode;
1289         else
1290                 ggtt->vm.pte_encode = gen8_ggtt_pte_encode;
1291
1292         return ggtt_probe_common(ggtt, size);
1293 }
1294
1295 /*
1296  * For pre-gen8 platforms pat_index is the same as enum i915_cache_level,
1297  * so the switch-case statements in these PTE encode functions are still valid.
1298  * See translation table LEGACY_CACHELEVEL.
1299  */
1300 static u64 snb_pte_encode(dma_addr_t addr,
1301                           unsigned int pat_index,
1302                           u32 flags)
1303 {
1304         gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
1305
1306         switch (pat_index) {
1307         case I915_CACHE_L3_LLC:
1308         case I915_CACHE_LLC:
1309                 pte |= GEN6_PTE_CACHE_LLC;
1310                 break;
1311         case I915_CACHE_NONE:
1312                 pte |= GEN6_PTE_UNCACHED;
1313                 break;
1314         default:
1315                 MISSING_CASE(pat_index);
1316         }
1317
1318         return pte;
1319 }
1320
1321 static u64 ivb_pte_encode(dma_addr_t addr,
1322                           unsigned int pat_index,
1323                           u32 flags)
1324 {
1325         gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
1326
1327         switch (pat_index) {
1328         case I915_CACHE_L3_LLC:
1329                 pte |= GEN7_PTE_CACHE_L3_LLC;
1330                 break;
1331         case I915_CACHE_LLC:
1332                 pte |= GEN6_PTE_CACHE_LLC;
1333                 break;
1334         case I915_CACHE_NONE:
1335                 pte |= GEN6_PTE_UNCACHED;
1336                 break;
1337         default:
1338                 MISSING_CASE(pat_index);
1339         }
1340
1341         return pte;
1342 }
1343
1344 static u64 byt_pte_encode(dma_addr_t addr,
1345                           unsigned int pat_index,
1346                           u32 flags)
1347 {
1348         gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
1349
1350         if (!(flags & PTE_READ_ONLY))
1351                 pte |= BYT_PTE_WRITEABLE;
1352
1353         if (pat_index != I915_CACHE_NONE)
1354                 pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
1355
1356         return pte;
1357 }
1358
1359 static u64 hsw_pte_encode(dma_addr_t addr,
1360                           unsigned int pat_index,
1361                           u32 flags)
1362 {
1363         gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
1364
1365         if (pat_index != I915_CACHE_NONE)
1366                 pte |= HSW_WB_LLC_AGE3;
1367
1368         return pte;
1369 }
1370
1371 static u64 iris_pte_encode(dma_addr_t addr,
1372                            unsigned int pat_index,
1373                            u32 flags)
1374 {
1375         gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
1376
1377         switch (pat_index) {
1378         case I915_CACHE_NONE:
1379                 break;
1380         case I915_CACHE_WT:
1381                 pte |= HSW_WT_ELLC_LLC_AGE3;
1382                 break;
1383         default:
1384                 pte |= HSW_WB_ELLC_LLC_AGE3;
1385                 break;
1386         }
1387
1388         return pte;
1389 }
1390
1391 static int gen6_gmch_probe(struct i915_ggtt *ggtt)
1392 {
1393         struct drm_i915_private *i915 = ggtt->vm.i915;
1394         struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
1395         unsigned int size;
1396         u16 snb_gmch_ctl;
1397
1398         if (!i915_pci_resource_valid(pdev, GEN4_GMADR_BAR))
1399                 return -ENXIO;
1400
1401         ggtt->gmadr = pci_resource(pdev, GEN4_GMADR_BAR);
1402         ggtt->mappable_end = resource_size(&ggtt->gmadr);
1403
1404         /*
1405          * 64/512MB is the current min/max we actually know of, but this is
1406          * just a coarse sanity check.
1407          */
1408         if (ggtt->mappable_end < (64 << 20) ||
1409             ggtt->mappable_end > (512 << 20)) {
1410                 drm_err(&i915->drm, "Unknown GMADR size (%pa)\n",
1411                         &ggtt->mappable_end);
1412                 return -ENXIO;
1413         }
1414
1415         pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
1416
1417         size = gen6_get_total_gtt_size(snb_gmch_ctl);
1418         ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE;
1419
1420         ggtt->vm.alloc_pt_dma = alloc_pt_dma;
1421         ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
1422
1423         ggtt->vm.clear_range = nop_clear_range;
1424         if (!HAS_FULL_PPGTT(i915))
1425                 ggtt->vm.clear_range = gen6_ggtt_clear_range;
1426         ggtt->vm.scratch_range = gen6_ggtt_clear_range;
1427         ggtt->vm.insert_page = gen6_ggtt_insert_page;
1428         ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
1429         ggtt->vm.cleanup = gen6_gmch_remove;
1430
1431         ggtt->invalidate = gen6_ggtt_invalidate;
1432
1433         if (HAS_EDRAM(i915))
1434                 ggtt->vm.pte_encode = iris_pte_encode;
1435         else if (IS_HASWELL(i915))
1436                 ggtt->vm.pte_encode = hsw_pte_encode;
1437         else if (IS_VALLEYVIEW(i915))
1438                 ggtt->vm.pte_encode = byt_pte_encode;
1439         else if (GRAPHICS_VER(i915) >= 7)
1440                 ggtt->vm.pte_encode = ivb_pte_encode;
1441         else
1442                 ggtt->vm.pte_encode = snb_pte_encode;
1443
1444         ggtt->vm.vma_ops.bind_vma    = intel_ggtt_bind_vma;
1445         ggtt->vm.vma_ops.unbind_vma  = intel_ggtt_unbind_vma;
1446
1447         return ggtt_probe_common(ggtt, size);
1448 }
1449
1450 static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt)
1451 {
1452         struct drm_i915_private *i915 = gt->i915;
1453         int ret;
1454
1455         ggtt->vm.gt = gt;
1456         ggtt->vm.i915 = i915;
1457         ggtt->vm.dma = i915->drm.dev;
1458         dma_resv_init(&ggtt->vm._resv);
1459
1460         if (GRAPHICS_VER(i915) >= 8)
1461                 ret = gen8_gmch_probe(ggtt);
1462         else if (GRAPHICS_VER(i915) >= 6)
1463                 ret = gen6_gmch_probe(ggtt);
1464         else
1465                 ret = intel_ggtt_gmch_probe(ggtt);
1466
1467         if (ret) {
1468                 dma_resv_fini(&ggtt->vm._resv);
1469                 return ret;
1470         }
1471
1472         if ((ggtt->vm.total - 1) >> 32) {
1473                 drm_err(&i915->drm,
1474                         "We never expected a Global GTT with more than 32bits"
1475                         " of address space! Found %lldM!\n",
1476                         ggtt->vm.total >> 20);
1477                 ggtt->vm.total = 1ULL << 32;
1478                 ggtt->mappable_end =
1479                         min_t(u64, ggtt->mappable_end, ggtt->vm.total);
1480         }
1481
1482         if (ggtt->mappable_end > ggtt->vm.total) {
1483                 drm_err(&i915->drm,
1484                         "mappable aperture extends past end of GGTT,"
1485                         " aperture=%pa, total=%llx\n",
1486                         &ggtt->mappable_end, ggtt->vm.total);
1487                 ggtt->mappable_end = ggtt->vm.total;
1488         }
1489
1490         /* GMADR is the PCI mmio aperture into the global GTT. */
1491         drm_dbg(&i915->drm, "GGTT size = %lluM\n", ggtt->vm.total >> 20);
1492         drm_dbg(&i915->drm, "GMADR size = %lluM\n",
1493                 (u64)ggtt->mappable_end >> 20);
1494         drm_dbg(&i915->drm, "DSM size = %lluM\n",
1495                 (u64)resource_size(&intel_graphics_stolen_res) >> 20);
1496
1497         return 0;
1498 }
1499
1500 /**
1501  * i915_ggtt_probe_hw - Probe GGTT hardware location
1502  * @i915: i915 device
1503  */
1504 int i915_ggtt_probe_hw(struct drm_i915_private *i915)
1505 {
1506         struct intel_gt *gt;
1507         int ret, i;
1508
1509         for_each_gt(gt, i915, i) {
1510                 ret = intel_gt_assign_ggtt(gt);
1511                 if (ret)
1512                         return ret;
1513         }
1514
1515         ret = ggtt_probe_hw(to_gt(i915)->ggtt, to_gt(i915));
1516         if (ret)
1517                 return ret;
1518
1519         if (i915_vtd_active(i915))
1520                 drm_info(&i915->drm, "VT-d active for gfx access\n");
1521
1522         return 0;
1523 }
1524
1525 struct i915_ggtt *i915_ggtt_create(struct drm_i915_private *i915)
1526 {
1527         struct i915_ggtt *ggtt;
1528
1529         ggtt = drmm_kzalloc(&i915->drm, sizeof(*ggtt), GFP_KERNEL);
1530         if (!ggtt)
1531                 return ERR_PTR(-ENOMEM);
1532
1533         INIT_LIST_HEAD(&ggtt->gt_list);
1534
1535         return ggtt;
1536 }
1537
1538 int i915_ggtt_enable_hw(struct drm_i915_private *i915)
1539 {
1540         if (GRAPHICS_VER(i915) < 6)
1541                 return intel_ggtt_gmch_enable_hw(i915);
1542
1543         return 0;
1544 }
1545
1546 /**
1547  * i915_ggtt_resume_vm - Restore the memory mappings for a GGTT or DPT VM
1548  * @vm: The VM to restore the mappings for
1549  *
1550  * Restore the memory mappings for all objects mapped to HW via the GGTT or a
1551  * DPT page table.
1552  *
1553  * Returns %true if restoring the mapping for any object that was in a write
1554  * domain before suspend.
1555  */
1556 bool i915_ggtt_resume_vm(struct i915_address_space *vm)
1557 {
1558         struct i915_vma *vma;
1559         bool write_domain_objs = false;
1560
1561         drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt);
1562
1563         /* First fill our portion of the GTT with scratch pages */
1564         vm->clear_range(vm, 0, vm->total);
1565
1566         /* clflush objects bound into the GGTT and rebind them. */
1567         list_for_each_entry(vma, &vm->bound_list, vm_link) {
1568                 struct drm_i915_gem_object *obj = vma->obj;
1569                 unsigned int was_bound =
1570                         atomic_read(&vma->flags) & I915_VMA_BIND_MASK;
1571
1572                 GEM_BUG_ON(!was_bound);
1573
1574                 /*
1575                  * Clear the bound flags of the vma resource to allow
1576                  * ptes to be repopulated.
1577                  */
1578                 vma->resource->bound_flags = 0;
1579                 vma->ops->bind_vma(vm, NULL, vma->resource,
1580                                    obj ? obj->pat_index :
1581                                          i915_gem_get_pat_index(vm->i915,
1582                                                                 I915_CACHE_NONE),
1583                                    was_bound);
1584
1585                 if (obj) { /* only used during resume => exclusive access */
1586                         write_domain_objs |= fetch_and_zero(&obj->write_domain);
1587                         obj->read_domains |= I915_GEM_DOMAIN_GTT;
1588                 }
1589         }
1590
1591         return write_domain_objs;
1592 }
1593
1594 void i915_ggtt_resume(struct i915_ggtt *ggtt)
1595 {
1596         struct intel_gt *gt;
1597         bool flush;
1598
1599         list_for_each_entry(gt, &ggtt->gt_list, ggtt_link)
1600                 intel_gt_check_and_clear_faults(gt);
1601
1602         flush = i915_ggtt_resume_vm(&ggtt->vm);
1603
1604         if (drm_mm_node_allocated(&ggtt->error_capture))
1605                 ggtt->vm.scratch_range(&ggtt->vm, ggtt->error_capture.start,
1606                                        ggtt->error_capture.size);
1607
1608         list_for_each_entry(gt, &ggtt->gt_list, ggtt_link)
1609                 intel_uc_resume_mappings(&gt->uc);
1610
1611         ggtt->invalidate(ggtt);
1612
1613         if (flush)
1614                 wbinvd_on_all_cpus();
1615
1616         intel_ggtt_restore_fences(ggtt);
1617 }
This page took 0.129706 seconds and 4 git commands to generate.