]> Git Repo - linux.git/blob - drivers/gpu/drm/i915/i915_vma.c
Merge branch 'next-smack' of git://git.kernel.org/pub/scm/linux/kernel/git/jmorris...
[linux.git] / drivers / gpu / drm / i915 / i915_vma.c
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24
25 #include "i915_vma.h"
26
27 #include "i915_drv.h"
28 #include "intel_ringbuffer.h"
29 #include "intel_frontbuffer.h"
30
31 #include <drm/drm_gem.h>
32
33 #if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM)
34
35 #include <linux/stackdepot.h>
36
37 static void vma_print_allocator(struct i915_vma *vma, const char *reason)
38 {
39         unsigned long entries[12];
40         struct stack_trace trace = {
41                 .entries = entries,
42                 .max_entries = ARRAY_SIZE(entries),
43         };
44         char buf[512];
45
46         if (!vma->node.stack) {
47                 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n",
48                                  vma->node.start, vma->node.size, reason);
49                 return;
50         }
51
52         depot_fetch_stack(vma->node.stack, &trace);
53         snprint_stack_trace(buf, sizeof(buf), &trace, 0);
54         DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n",
55                          vma->node.start, vma->node.size, reason, buf);
56 }
57
58 #else
59
60 static void vma_print_allocator(struct i915_vma *vma, const char *reason)
61 {
62 }
63
64 #endif
65
66 struct i915_vma_active {
67         struct i915_gem_active base;
68         struct i915_vma *vma;
69         struct rb_node node;
70         u64 timeline;
71 };
72
73 static void
74 __i915_vma_retire(struct i915_vma *vma, struct i915_request *rq)
75 {
76         struct drm_i915_gem_object *obj = vma->obj;
77
78         GEM_BUG_ON(!i915_vma_is_active(vma));
79         if (--vma->active_count)
80                 return;
81
82         GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
83         list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
84
85         GEM_BUG_ON(!i915_gem_object_is_active(obj));
86         if (--obj->active_count)
87                 return;
88
89         /* Prune the shared fence arrays iff completely idle (inc. external) */
90         if (reservation_object_trylock(obj->resv)) {
91                 if (reservation_object_test_signaled_rcu(obj->resv, true))
92                         reservation_object_add_excl_fence(obj->resv, NULL);
93                 reservation_object_unlock(obj->resv);
94         }
95
96         /* Bump our place on the bound list to keep it roughly in LRU order
97          * so that we don't steal from recently used but inactive objects
98          * (unless we are forced to ofc!)
99          */
100         spin_lock(&rq->i915->mm.obj_lock);
101         if (obj->bind_count)
102                 list_move_tail(&obj->mm.link, &rq->i915->mm.bound_list);
103         spin_unlock(&rq->i915->mm.obj_lock);
104
105         obj->mm.dirty = true; /* be paranoid  */
106
107         if (i915_gem_object_has_active_reference(obj)) {
108                 i915_gem_object_clear_active_reference(obj);
109                 i915_gem_object_put(obj);
110         }
111 }
112
113 static void
114 i915_vma_retire(struct i915_gem_active *base, struct i915_request *rq)
115 {
116         struct i915_vma_active *active =
117                 container_of(base, typeof(*active), base);
118
119         __i915_vma_retire(active->vma, rq);
120 }
121
122 static void
123 i915_vma_last_retire(struct i915_gem_active *base, struct i915_request *rq)
124 {
125         __i915_vma_retire(container_of(base, struct i915_vma, last_active), rq);
126 }
127
128 static struct i915_vma *
129 vma_create(struct drm_i915_gem_object *obj,
130            struct i915_address_space *vm,
131            const struct i915_ggtt_view *view)
132 {
133         struct i915_vma *vma;
134         struct rb_node *rb, **p;
135
136         /* The aliasing_ppgtt should never be used directly! */
137         GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
138
139         vma = kmem_cache_zalloc(vm->i915->vmas, GFP_KERNEL);
140         if (vma == NULL)
141                 return ERR_PTR(-ENOMEM);
142
143         vma->active = RB_ROOT;
144
145         init_request_active(&vma->last_active, i915_vma_last_retire);
146         init_request_active(&vma->last_fence, NULL);
147         vma->vm = vm;
148         vma->ops = &vm->vma_ops;
149         vma->obj = obj;
150         vma->resv = obj->resv;
151         vma->size = obj->base.size;
152         vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
153
154         if (view && view->type != I915_GGTT_VIEW_NORMAL) {
155                 vma->ggtt_view = *view;
156                 if (view->type == I915_GGTT_VIEW_PARTIAL) {
157                         GEM_BUG_ON(range_overflows_t(u64,
158                                                      view->partial.offset,
159                                                      view->partial.size,
160                                                      obj->base.size >> PAGE_SHIFT));
161                         vma->size = view->partial.size;
162                         vma->size <<= PAGE_SHIFT;
163                         GEM_BUG_ON(vma->size > obj->base.size);
164                 } else if (view->type == I915_GGTT_VIEW_ROTATED) {
165                         vma->size = intel_rotation_info_size(&view->rotated);
166                         vma->size <<= PAGE_SHIFT;
167                 }
168         }
169
170         if (unlikely(vma->size > vm->total))
171                 goto err_vma;
172
173         GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
174
175         if (i915_is_ggtt(vm)) {
176                 if (unlikely(overflows_type(vma->size, u32)))
177                         goto err_vma;
178
179                 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
180                                                       i915_gem_object_get_tiling(obj),
181                                                       i915_gem_object_get_stride(obj));
182                 if (unlikely(vma->fence_size < vma->size || /* overflow */
183                              vma->fence_size > vm->total))
184                         goto err_vma;
185
186                 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
187
188                 vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
189                                                                 i915_gem_object_get_tiling(obj),
190                                                                 i915_gem_object_get_stride(obj));
191                 GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
192
193                 /*
194                  * We put the GGTT vma at the start of the vma-list, followed
195                  * by the ppGGTT vma. This allows us to break early when
196                  * iterating over only the GGTT vma for an object, see
197                  * for_each_ggtt_vma()
198                  */
199                 vma->flags |= I915_VMA_GGTT;
200                 list_add(&vma->obj_link, &obj->vma_list);
201         } else {
202                 i915_ppgtt_get(i915_vm_to_ppgtt(vm));
203                 list_add_tail(&vma->obj_link, &obj->vma_list);
204         }
205
206         rb = NULL;
207         p = &obj->vma_tree.rb_node;
208         while (*p) {
209                 struct i915_vma *pos;
210
211                 rb = *p;
212                 pos = rb_entry(rb, struct i915_vma, obj_node);
213                 if (i915_vma_compare(pos, vm, view) < 0)
214                         p = &rb->rb_right;
215                 else
216                         p = &rb->rb_left;
217         }
218         rb_link_node(&vma->obj_node, rb, p);
219         rb_insert_color(&vma->obj_node, &obj->vma_tree);
220         list_add(&vma->vm_link, &vm->unbound_list);
221
222         return vma;
223
224 err_vma:
225         kmem_cache_free(vm->i915->vmas, vma);
226         return ERR_PTR(-E2BIG);
227 }
228
229 static struct i915_vma *
230 vma_lookup(struct drm_i915_gem_object *obj,
231            struct i915_address_space *vm,
232            const struct i915_ggtt_view *view)
233 {
234         struct rb_node *rb;
235
236         rb = obj->vma_tree.rb_node;
237         while (rb) {
238                 struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
239                 long cmp;
240
241                 cmp = i915_vma_compare(vma, vm, view);
242                 if (cmp == 0)
243                         return vma;
244
245                 if (cmp < 0)
246                         rb = rb->rb_right;
247                 else
248                         rb = rb->rb_left;
249         }
250
251         return NULL;
252 }
253
254 /**
255  * i915_vma_instance - return the singleton instance of the VMA
256  * @obj: parent &struct drm_i915_gem_object to be mapped
257  * @vm: address space in which the mapping is located
258  * @view: additional mapping requirements
259  *
260  * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with
261  * the same @view characteristics. If a match is not found, one is created.
262  * Once created, the VMA is kept until either the object is freed, or the
263  * address space is closed.
264  *
265  * Must be called with struct_mutex held.
266  *
267  * Returns the vma, or an error pointer.
268  */
269 struct i915_vma *
270 i915_vma_instance(struct drm_i915_gem_object *obj,
271                   struct i915_address_space *vm,
272                   const struct i915_ggtt_view *view)
273 {
274         struct i915_vma *vma;
275
276         lockdep_assert_held(&obj->base.dev->struct_mutex);
277         GEM_BUG_ON(view && !i915_is_ggtt(vm));
278         GEM_BUG_ON(vm->closed);
279
280         vma = vma_lookup(obj, vm, view);
281         if (!vma)
282                 vma = vma_create(obj, vm, view);
283
284         GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
285         GEM_BUG_ON(!IS_ERR(vma) && vma_lookup(obj, vm, view) != vma);
286         return vma;
287 }
288
289 /**
290  * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
291  * @vma: VMA to map
292  * @cache_level: mapping cache level
293  * @flags: flags like global or local mapping
294  *
295  * DMA addresses are taken from the scatter-gather table of this object (or of
296  * this VMA in case of non-default GGTT views) and PTE entries set up.
297  * Note that DMA addresses are also the only part of the SG table we care about.
298  */
299 int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
300                   u32 flags)
301 {
302         u32 bind_flags;
303         u32 vma_flags;
304         int ret;
305
306         GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
307         GEM_BUG_ON(vma->size > vma->node.size);
308
309         if (GEM_WARN_ON(range_overflows(vma->node.start,
310                                         vma->node.size,
311                                         vma->vm->total)))
312                 return -ENODEV;
313
314         if (GEM_WARN_ON(!flags))
315                 return -EINVAL;
316
317         bind_flags = 0;
318         if (flags & PIN_GLOBAL)
319                 bind_flags |= I915_VMA_GLOBAL_BIND;
320         if (flags & PIN_USER)
321                 bind_flags |= I915_VMA_LOCAL_BIND;
322
323         vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
324         if (flags & PIN_UPDATE)
325                 bind_flags |= vma_flags;
326         else
327                 bind_flags &= ~vma_flags;
328         if (bind_flags == 0)
329                 return 0;
330
331         GEM_BUG_ON(!vma->pages);
332
333         trace_i915_vma_bind(vma, bind_flags);
334         ret = vma->ops->bind_vma(vma, cache_level, bind_flags);
335         if (ret)
336                 return ret;
337
338         vma->flags |= bind_flags;
339         return 0;
340 }
341
342 void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
343 {
344         void __iomem *ptr;
345         int err;
346
347         /* Access through the GTT requires the device to be awake. */
348         assert_rpm_wakelock_held(vma->vm->i915);
349
350         lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
351         if (WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
352                 err = -ENODEV;
353                 goto err;
354         }
355
356         GEM_BUG_ON(!i915_vma_is_ggtt(vma));
357         GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0);
358
359         ptr = vma->iomap;
360         if (ptr == NULL) {
361                 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
362                                         vma->node.start,
363                                         vma->node.size);
364                 if (ptr == NULL) {
365                         err = -ENOMEM;
366                         goto err;
367                 }
368
369                 vma->iomap = ptr;
370         }
371
372         __i915_vma_pin(vma);
373
374         err = i915_vma_pin_fence(vma);
375         if (err)
376                 goto err_unpin;
377
378         i915_vma_set_ggtt_write(vma);
379         return ptr;
380
381 err_unpin:
382         __i915_vma_unpin(vma);
383 err:
384         return IO_ERR_PTR(err);
385 }
386
387 void i915_vma_flush_writes(struct i915_vma *vma)
388 {
389         if (!i915_vma_has_ggtt_write(vma))
390                 return;
391
392         i915_gem_flush_ggtt_writes(vma->vm->i915);
393
394         i915_vma_unset_ggtt_write(vma);
395 }
396
397 void i915_vma_unpin_iomap(struct i915_vma *vma)
398 {
399         lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
400
401         GEM_BUG_ON(vma->iomap == NULL);
402
403         i915_vma_flush_writes(vma);
404
405         i915_vma_unpin_fence(vma);
406         i915_vma_unpin(vma);
407 }
408
409 void i915_vma_unpin_and_release(struct i915_vma **p_vma)
410 {
411         struct i915_vma *vma;
412         struct drm_i915_gem_object *obj;
413
414         vma = fetch_and_zero(p_vma);
415         if (!vma)
416                 return;
417
418         obj = vma->obj;
419         GEM_BUG_ON(!obj);
420
421         i915_vma_unpin(vma);
422         i915_vma_close(vma);
423
424         __i915_gem_object_release_unless_active(obj);
425 }
426
427 bool i915_vma_misplaced(const struct i915_vma *vma,
428                         u64 size, u64 alignment, u64 flags)
429 {
430         if (!drm_mm_node_allocated(&vma->node))
431                 return false;
432
433         if (vma->node.size < size)
434                 return true;
435
436         GEM_BUG_ON(alignment && !is_power_of_2(alignment));
437         if (alignment && !IS_ALIGNED(vma->node.start, alignment))
438                 return true;
439
440         if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
441                 return true;
442
443         if (flags & PIN_OFFSET_BIAS &&
444             vma->node.start < (flags & PIN_OFFSET_MASK))
445                 return true;
446
447         if (flags & PIN_OFFSET_FIXED &&
448             vma->node.start != (flags & PIN_OFFSET_MASK))
449                 return true;
450
451         return false;
452 }
453
454 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
455 {
456         bool mappable, fenceable;
457
458         GEM_BUG_ON(!i915_vma_is_ggtt(vma));
459         GEM_BUG_ON(!vma->fence_size);
460
461         /*
462          * Explicitly disable for rotated VMA since the display does not
463          * need the fence and the VMA is not accessible to other users.
464          */
465         if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
466                 return;
467
468         fenceable = (vma->node.size >= vma->fence_size &&
469                      IS_ALIGNED(vma->node.start, vma->fence_alignment));
470
471         mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
472
473         if (mappable && fenceable)
474                 vma->flags |= I915_VMA_CAN_FENCE;
475         else
476                 vma->flags &= ~I915_VMA_CAN_FENCE;
477 }
478
479 static bool color_differs(struct drm_mm_node *node, unsigned long color)
480 {
481         return node->allocated && node->color != color;
482 }
483
484 bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level)
485 {
486         struct drm_mm_node *node = &vma->node;
487         struct drm_mm_node *other;
488
489         /*
490          * On some machines we have to be careful when putting differing types
491          * of snoopable memory together to avoid the prefetcher crossing memory
492          * domains and dying. During vm initialisation, we decide whether or not
493          * these constraints apply and set the drm_mm.color_adjust
494          * appropriately.
495          */
496         if (vma->vm->mm.color_adjust == NULL)
497                 return true;
498
499         /* Only valid to be called on an already inserted vma */
500         GEM_BUG_ON(!drm_mm_node_allocated(node));
501         GEM_BUG_ON(list_empty(&node->node_list));
502
503         other = list_prev_entry(node, node_list);
504         if (color_differs(other, cache_level) && !drm_mm_hole_follows(other))
505                 return false;
506
507         other = list_next_entry(node, node_list);
508         if (color_differs(other, cache_level) && !drm_mm_hole_follows(node))
509                 return false;
510
511         return true;
512 }
513
514 static void assert_bind_count(const struct drm_i915_gem_object *obj)
515 {
516         /*
517          * Combine the assertion that the object is bound and that we have
518          * pinned its pages. But we should never have bound the object
519          * more than we have pinned its pages. (For complete accuracy, we
520          * assume that no else is pinning the pages, but as a rough assertion
521          * that we will not run into problems later, this will do!)
522          */
523         GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
524 }
525
526 /**
527  * i915_vma_insert - finds a slot for the vma in its address space
528  * @vma: the vma
529  * @size: requested size in bytes (can be larger than the VMA)
530  * @alignment: required alignment
531  * @flags: mask of PIN_* flags to use
532  *
533  * First we try to allocate some free space that meets the requirements for
534  * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
535  * preferrably the oldest idle entry to make room for the new VMA.
536  *
537  * Returns:
538  * 0 on success, negative error code otherwise.
539  */
540 static int
541 i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
542 {
543         struct drm_i915_private *dev_priv = vma->vm->i915;
544         unsigned int cache_level;
545         u64 start, end;
546         int ret;
547
548         GEM_BUG_ON(i915_vma_is_closed(vma));
549         GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
550         GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
551
552         size = max(size, vma->size);
553         alignment = max(alignment, vma->display_alignment);
554         if (flags & PIN_MAPPABLE) {
555                 size = max_t(typeof(size), size, vma->fence_size);
556                 alignment = max_t(typeof(alignment),
557                                   alignment, vma->fence_alignment);
558         }
559
560         GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
561         GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
562         GEM_BUG_ON(!is_power_of_2(alignment));
563
564         start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
565         GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
566
567         end = vma->vm->total;
568         if (flags & PIN_MAPPABLE)
569                 end = min_t(u64, end, dev_priv->ggtt.mappable_end);
570         if (flags & PIN_ZONE_4G)
571                 end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
572         GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
573
574         /* If binding the object/GGTT view requires more space than the entire
575          * aperture has, reject it early before evicting everything in a vain
576          * attempt to find space.
577          */
578         if (size > end) {
579                 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n",
580                           size, flags & PIN_MAPPABLE ? "mappable" : "total",
581                           end);
582                 return -ENOSPC;
583         }
584
585         if (vma->obj) {
586                 ret = i915_gem_object_pin_pages(vma->obj);
587                 if (ret)
588                         return ret;
589
590                 cache_level = vma->obj->cache_level;
591         } else {
592                 cache_level = 0;
593         }
594
595         GEM_BUG_ON(vma->pages);
596
597         ret = vma->ops->set_pages(vma);
598         if (ret)
599                 goto err_unpin;
600
601         if (flags & PIN_OFFSET_FIXED) {
602                 u64 offset = flags & PIN_OFFSET_MASK;
603                 if (!IS_ALIGNED(offset, alignment) ||
604                     range_overflows(offset, size, end)) {
605                         ret = -EINVAL;
606                         goto err_clear;
607                 }
608
609                 ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
610                                            size, offset, cache_level,
611                                            flags);
612                 if (ret)
613                         goto err_clear;
614         } else {
615                 /*
616                  * We only support huge gtt pages through the 48b PPGTT,
617                  * however we also don't want to force any alignment for
618                  * objects which need to be tightly packed into the low 32bits.
619                  *
620                  * Note that we assume that GGTT are limited to 4GiB for the
621                  * forseeable future. See also i915_ggtt_offset().
622                  */
623                 if (upper_32_bits(end - 1) &&
624                     vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
625                         /*
626                          * We can't mix 64K and 4K PTEs in the same page-table
627                          * (2M block), and so to avoid the ugliness and
628                          * complexity of coloring we opt for just aligning 64K
629                          * objects to 2M.
630                          */
631                         u64 page_alignment =
632                                 rounddown_pow_of_two(vma->page_sizes.sg |
633                                                      I915_GTT_PAGE_SIZE_2M);
634
635                         /*
636                          * Check we don't expand for the limited Global GTT
637                          * (mappable aperture is even more precious!). This
638                          * also checks that we exclude the aliasing-ppgtt.
639                          */
640                         GEM_BUG_ON(i915_vma_is_ggtt(vma));
641
642                         alignment = max(alignment, page_alignment);
643
644                         if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
645                                 size = round_up(size, I915_GTT_PAGE_SIZE_2M);
646                 }
647
648                 ret = i915_gem_gtt_insert(vma->vm, &vma->node,
649                                           size, alignment, cache_level,
650                                           start, end, flags);
651                 if (ret)
652                         goto err_clear;
653
654                 GEM_BUG_ON(vma->node.start < start);
655                 GEM_BUG_ON(vma->node.start + vma->node.size > end);
656         }
657         GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
658         GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, cache_level));
659
660         list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
661
662         if (vma->obj) {
663                 struct drm_i915_gem_object *obj = vma->obj;
664
665                 spin_lock(&dev_priv->mm.obj_lock);
666                 list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
667                 obj->bind_count++;
668                 spin_unlock(&dev_priv->mm.obj_lock);
669
670                 assert_bind_count(obj);
671         }
672
673         return 0;
674
675 err_clear:
676         vma->ops->clear_pages(vma);
677 err_unpin:
678         if (vma->obj)
679                 i915_gem_object_unpin_pages(vma->obj);
680         return ret;
681 }
682
683 static void
684 i915_vma_remove(struct i915_vma *vma)
685 {
686         struct drm_i915_private *i915 = vma->vm->i915;
687
688         GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
689         GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
690
691         vma->ops->clear_pages(vma);
692
693         drm_mm_remove_node(&vma->node);
694         list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
695
696         /*
697          * Since the unbound list is global, only move to that list if
698          * no more VMAs exist.
699          */
700         if (vma->obj) {
701                 struct drm_i915_gem_object *obj = vma->obj;
702
703                 spin_lock(&i915->mm.obj_lock);
704                 if (--obj->bind_count == 0)
705                         list_move_tail(&obj->mm.link, &i915->mm.unbound_list);
706                 spin_unlock(&i915->mm.obj_lock);
707
708                 /*
709                  * And finally now the object is completely decoupled from this
710                  * vma, we can drop its hold on the backing storage and allow
711                  * it to be reaped by the shrinker.
712                  */
713                 i915_gem_object_unpin_pages(obj);
714                 assert_bind_count(obj);
715         }
716 }
717
718 int __i915_vma_do_pin(struct i915_vma *vma,
719                       u64 size, u64 alignment, u64 flags)
720 {
721         const unsigned int bound = vma->flags;
722         int ret;
723
724         lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
725         GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
726         GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
727
728         if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
729                 ret = -EBUSY;
730                 goto err_unpin;
731         }
732
733         if ((bound & I915_VMA_BIND_MASK) == 0) {
734                 ret = i915_vma_insert(vma, size, alignment, flags);
735                 if (ret)
736                         goto err_unpin;
737         }
738         GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
739
740         ret = i915_vma_bind(vma, vma->obj ? vma->obj->cache_level : 0, flags);
741         if (ret)
742                 goto err_remove;
743
744         GEM_BUG_ON((vma->flags & I915_VMA_BIND_MASK) == 0);
745
746         if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
747                 __i915_vma_set_map_and_fenceable(vma);
748
749         GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
750         return 0;
751
752 err_remove:
753         if ((bound & I915_VMA_BIND_MASK) == 0) {
754                 i915_vma_remove(vma);
755                 GEM_BUG_ON(vma->pages);
756                 GEM_BUG_ON(vma->flags & I915_VMA_BIND_MASK);
757         }
758 err_unpin:
759         __i915_vma_unpin(vma);
760         return ret;
761 }
762
763 void i915_vma_close(struct i915_vma *vma)
764 {
765         lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
766
767         GEM_BUG_ON(i915_vma_is_closed(vma));
768         vma->flags |= I915_VMA_CLOSED;
769
770         /*
771          * We defer actually closing, unbinding and destroying the VMA until
772          * the next idle point, or if the object is freed in the meantime. By
773          * postponing the unbind, we allow for it to be resurrected by the
774          * client, avoiding the work required to rebind the VMA. This is
775          * advantageous for DRI, where the client/server pass objects
776          * between themselves, temporarily opening a local VMA to the
777          * object, and then closing it again. The same object is then reused
778          * on the next frame (or two, depending on the depth of the swap queue)
779          * causing us to rebind the VMA once more. This ends up being a lot
780          * of wasted work for the steady state.
781          */
782         list_add_tail(&vma->closed_link, &vma->vm->i915->gt.closed_vma);
783 }
784
785 void i915_vma_reopen(struct i915_vma *vma)
786 {
787         lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
788
789         if (vma->flags & I915_VMA_CLOSED) {
790                 vma->flags &= ~I915_VMA_CLOSED;
791                 list_del(&vma->closed_link);
792         }
793 }
794
795 static void __i915_vma_destroy(struct i915_vma *vma)
796 {
797         struct drm_i915_private *i915 = vma->vm->i915;
798         struct i915_vma_active *iter, *n;
799
800         GEM_BUG_ON(vma->node.allocated);
801         GEM_BUG_ON(vma->fence);
802
803         GEM_BUG_ON(i915_gem_active_isset(&vma->last_fence));
804
805         list_del(&vma->obj_link);
806         list_del(&vma->vm_link);
807         if (vma->obj)
808                 rb_erase(&vma->obj_node, &vma->obj->vma_tree);
809
810         if (!i915_vma_is_ggtt(vma))
811                 i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
812
813         rbtree_postorder_for_each_entry_safe(iter, n, &vma->active, node) {
814                 GEM_BUG_ON(i915_gem_active_isset(&iter->base));
815                 kfree(iter);
816         }
817
818         kmem_cache_free(i915->vmas, vma);
819 }
820
821 void i915_vma_destroy(struct i915_vma *vma)
822 {
823         lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
824
825         GEM_BUG_ON(i915_vma_is_active(vma));
826         GEM_BUG_ON(i915_vma_is_pinned(vma));
827
828         if (i915_vma_is_closed(vma))
829                 list_del(&vma->closed_link);
830
831         WARN_ON(i915_vma_unbind(vma));
832         __i915_vma_destroy(vma);
833 }
834
835 void i915_vma_parked(struct drm_i915_private *i915)
836 {
837         struct i915_vma *vma, *next;
838
839         list_for_each_entry_safe(vma, next, &i915->gt.closed_vma, closed_link) {
840                 GEM_BUG_ON(!i915_vma_is_closed(vma));
841                 i915_vma_destroy(vma);
842         }
843
844         GEM_BUG_ON(!list_empty(&i915->gt.closed_vma));
845 }
846
847 static void __i915_vma_iounmap(struct i915_vma *vma)
848 {
849         GEM_BUG_ON(i915_vma_is_pinned(vma));
850
851         if (vma->iomap == NULL)
852                 return;
853
854         io_mapping_unmap(vma->iomap);
855         vma->iomap = NULL;
856 }
857
858 void i915_vma_revoke_mmap(struct i915_vma *vma)
859 {
860         struct drm_vma_offset_node *node = &vma->obj->base.vma_node;
861         u64 vma_offset;
862
863         lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
864
865         if (!i915_vma_has_userfault(vma))
866                 return;
867
868         GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
869         GEM_BUG_ON(!vma->obj->userfault_count);
870
871         vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
872         unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
873                             drm_vma_node_offset_addr(node) + vma_offset,
874                             vma->size,
875                             1);
876
877         i915_vma_unset_userfault(vma);
878         if (!--vma->obj->userfault_count)
879                 list_del(&vma->obj->userfault_link);
880 }
881
882 static void export_fence(struct i915_vma *vma,
883                          struct i915_request *rq,
884                          unsigned int flags)
885 {
886         struct reservation_object *resv = vma->resv;
887
888         /*
889          * Ignore errors from failing to allocate the new fence, we can't
890          * handle an error right now. Worst case should be missed
891          * synchronisation leading to rendering corruption.
892          */
893         reservation_object_lock(resv, NULL);
894         if (flags & EXEC_OBJECT_WRITE)
895                 reservation_object_add_excl_fence(resv, &rq->fence);
896         else if (reservation_object_reserve_shared(resv) == 0)
897                 reservation_object_add_shared_fence(resv, &rq->fence);
898         reservation_object_unlock(resv);
899 }
900
901 static struct i915_gem_active *active_instance(struct i915_vma *vma, u64 idx)
902 {
903         struct i915_vma_active *active;
904         struct rb_node **p, *parent;
905         struct i915_request *old;
906
907         /*
908          * We track the most recently used timeline to skip a rbtree search
909          * for the common case, under typical loads we never need the rbtree
910          * at all. We can reuse the last_active slot if it is empty, that is
911          * after the previous activity has been retired, or if the active
912          * matches the current timeline.
913          *
914          * Note that we allow the timeline to be active simultaneously in
915          * the rbtree and the last_active cache. We do this to avoid having
916          * to search and replace the rbtree element for a new timeline, with
917          * the cost being that we must be aware that the vma may be retired
918          * twice for the same timeline (as the older rbtree element will be
919          * retired before the new request added to last_active).
920          */
921         old = i915_gem_active_raw(&vma->last_active,
922                                   &vma->vm->i915->drm.struct_mutex);
923         if (!old || old->fence.context == idx)
924                 goto out;
925
926         /* Move the currently active fence into the rbtree */
927         idx = old->fence.context;
928
929         parent = NULL;
930         p = &vma->active.rb_node;
931         while (*p) {
932                 parent = *p;
933
934                 active = rb_entry(parent, struct i915_vma_active, node);
935                 if (active->timeline == idx)
936                         goto replace;
937
938                 if (active->timeline < idx)
939                         p = &parent->rb_right;
940                 else
941                         p = &parent->rb_left;
942         }
943
944         active = kmalloc(sizeof(*active), GFP_KERNEL);
945
946         /* kmalloc may retire the vma->last_active request (thanks shrinker)! */
947         if (unlikely(!i915_gem_active_raw(&vma->last_active,
948                                           &vma->vm->i915->drm.struct_mutex))) {
949                 kfree(active);
950                 goto out;
951         }
952
953         if (unlikely(!active))
954                 return ERR_PTR(-ENOMEM);
955
956         init_request_active(&active->base, i915_vma_retire);
957         active->vma = vma;
958         active->timeline = idx;
959
960         rb_link_node(&active->node, parent, p);
961         rb_insert_color(&active->node, &vma->active);
962
963 replace:
964         /*
965          * Overwrite the previous active slot in the rbtree with last_active,
966          * leaving last_active zeroed. If the previous slot is still active,
967          * we must be careful as we now only expect to receive one retire
968          * callback not two, and so much undo the active counting for the
969          * overwritten slot.
970          */
971         if (i915_gem_active_isset(&active->base)) {
972                 /* Retire ourselves from the old rq->active_list */
973                 __list_del_entry(&active->base.link);
974                 vma->active_count--;
975                 GEM_BUG_ON(!vma->active_count);
976         }
977         GEM_BUG_ON(list_empty(&vma->last_active.link));
978         list_replace_init(&vma->last_active.link, &active->base.link);
979         active->base.request = fetch_and_zero(&vma->last_active.request);
980
981 out:
982         return &vma->last_active;
983 }
984
985 int i915_vma_move_to_active(struct i915_vma *vma,
986                             struct i915_request *rq,
987                             unsigned int flags)
988 {
989         struct drm_i915_gem_object *obj = vma->obj;
990         struct i915_gem_active *active;
991
992         lockdep_assert_held(&rq->i915->drm.struct_mutex);
993         GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
994
995         active = active_instance(vma, rq->fence.context);
996         if (IS_ERR(active))
997                 return PTR_ERR(active);
998
999         /*
1000          * Add a reference if we're newly entering the active list.
1001          * The order in which we add operations to the retirement queue is
1002          * vital here: mark_active adds to the start of the callback list,
1003          * such that subsequent callbacks are called first. Therefore we
1004          * add the active reference first and queue for it to be dropped
1005          * *last*.
1006          */
1007         if (!i915_gem_active_isset(active) && !vma->active_count++) {
1008                 list_move_tail(&vma->vm_link, &vma->vm->active_list);
1009                 obj->active_count++;
1010         }
1011         i915_gem_active_set(active, rq);
1012         GEM_BUG_ON(!i915_vma_is_active(vma));
1013         GEM_BUG_ON(!obj->active_count);
1014
1015         obj->write_domain = 0;
1016         if (flags & EXEC_OBJECT_WRITE) {
1017                 obj->write_domain = I915_GEM_DOMAIN_RENDER;
1018
1019                 if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
1020                         i915_gem_active_set(&obj->frontbuffer_write, rq);
1021
1022                 obj->read_domains = 0;
1023         }
1024         obj->read_domains |= I915_GEM_GPU_DOMAINS;
1025
1026         if (flags & EXEC_OBJECT_NEEDS_FENCE)
1027                 i915_gem_active_set(&vma->last_fence, rq);
1028
1029         export_fence(vma, rq, flags);
1030         return 0;
1031 }
1032
1033 int i915_vma_unbind(struct i915_vma *vma)
1034 {
1035         int ret;
1036
1037         lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
1038
1039         /*
1040          * First wait upon any activity as retiring the request may
1041          * have side-effects such as unpinning or even unbinding this vma.
1042          */
1043         might_sleep();
1044         if (i915_vma_is_active(vma)) {
1045                 struct i915_vma_active *active, *n;
1046
1047                 /*
1048                  * When a closed VMA is retired, it is unbound - eek.
1049                  * In order to prevent it from being recursively closed,
1050                  * take a pin on the vma so that the second unbind is
1051                  * aborted.
1052                  *
1053                  * Even more scary is that the retire callback may free
1054                  * the object (last active vma). To prevent the explosion
1055                  * we defer the actual object free to a worker that can
1056                  * only proceed once it acquires the struct_mutex (which
1057                  * we currently hold, therefore it cannot free this object
1058                  * before we are finished).
1059                  */
1060                 __i915_vma_pin(vma);
1061
1062                 ret = i915_gem_active_retire(&vma->last_active,
1063                                              &vma->vm->i915->drm.struct_mutex);
1064                 if (ret)
1065                         goto unpin;
1066
1067                 rbtree_postorder_for_each_entry_safe(active, n,
1068                                                      &vma->active, node) {
1069                         ret = i915_gem_active_retire(&active->base,
1070                                                      &vma->vm->i915->drm.struct_mutex);
1071                         if (ret)
1072                                 goto unpin;
1073                 }
1074
1075                 ret = i915_gem_active_retire(&vma->last_fence,
1076                                              &vma->vm->i915->drm.struct_mutex);
1077 unpin:
1078                 __i915_vma_unpin(vma);
1079                 if (ret)
1080                         return ret;
1081         }
1082         GEM_BUG_ON(i915_vma_is_active(vma));
1083
1084         if (i915_vma_is_pinned(vma)) {
1085                 vma_print_allocator(vma, "is pinned");
1086                 return -EBUSY;
1087         }
1088
1089         if (!drm_mm_node_allocated(&vma->node))
1090                 return 0;
1091
1092         if (i915_vma_is_map_and_fenceable(vma)) {
1093                 /*
1094                  * Check that we have flushed all writes through the GGTT
1095                  * before the unbind, other due to non-strict nature of those
1096                  * indirect writes they may end up referencing the GGTT PTE
1097                  * after the unbind.
1098                  */
1099                 i915_vma_flush_writes(vma);
1100                 GEM_BUG_ON(i915_vma_has_ggtt_write(vma));
1101
1102                 /* release the fence reg _after_ flushing */
1103                 ret = i915_vma_put_fence(vma);
1104                 if (ret)
1105                         return ret;
1106
1107                 /* Force a pagefault for domain tracking on next user access */
1108                 i915_vma_revoke_mmap(vma);
1109
1110                 __i915_vma_iounmap(vma);
1111                 vma->flags &= ~I915_VMA_CAN_FENCE;
1112         }
1113         GEM_BUG_ON(vma->fence);
1114         GEM_BUG_ON(i915_vma_has_userfault(vma));
1115
1116         if (likely(!vma->vm->closed)) {
1117                 trace_i915_vma_unbind(vma);
1118                 vma->ops->unbind_vma(vma);
1119         }
1120         vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
1121
1122         i915_vma_remove(vma);
1123
1124         return 0;
1125 }
1126
1127 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1128 #include "selftests/i915_vma.c"
1129 #endif
This page took 0.090377 seconds and 4 git commands to generate.