]> Git Repo - linux.git/blob - drivers/gpu/drm/i915/gem/i915_gem_object.c
Backmerge tag 'v5.13-rc7' into drm-next
[linux.git] / drivers / gpu / drm / i915 / gem / i915_gem_object.c
1 /*
2  * Copyright © 2017 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24
25 #include <linux/sched/mm.h>
26
27 #include "display/intel_frontbuffer.h"
28 #include "i915_drv.h"
29 #include "i915_gem_clflush.h"
30 #include "i915_gem_context.h"
31 #include "i915_gem_mman.h"
32 #include "i915_gem_object.h"
33 #include "i915_globals.h"
34 #include "i915_memcpy.h"
35 #include "i915_trace.h"
36
37 static struct i915_global_object {
38         struct i915_global base;
39         struct kmem_cache *slab_objects;
40 } global;
41
42 static const struct drm_gem_object_funcs i915_gem_object_funcs;
43
44 struct drm_i915_gem_object *i915_gem_object_alloc(void)
45 {
46         struct drm_i915_gem_object *obj;
47
48         obj = kmem_cache_zalloc(global.slab_objects, GFP_KERNEL);
49         if (!obj)
50                 return NULL;
51         obj->base.funcs = &i915_gem_object_funcs;
52
53         return obj;
54 }
55
56 void i915_gem_object_free(struct drm_i915_gem_object *obj)
57 {
58         return kmem_cache_free(global.slab_objects, obj);
59 }
60
61 void i915_gem_object_init(struct drm_i915_gem_object *obj,
62                           const struct drm_i915_gem_object_ops *ops,
63                           struct lock_class_key *key, unsigned flags)
64 {
65         /*
66          * A gem object is embedded both in a struct ttm_buffer_object :/ and
67          * in a drm_i915_gem_object. Make sure they are aliased.
68          */
69         BUILD_BUG_ON(offsetof(typeof(*obj), base) !=
70                      offsetof(typeof(*obj), __do_not_access.base));
71
72         spin_lock_init(&obj->vma.lock);
73         INIT_LIST_HEAD(&obj->vma.list);
74
75         INIT_LIST_HEAD(&obj->mm.link);
76
77         INIT_LIST_HEAD(&obj->lut_list);
78         spin_lock_init(&obj->lut_lock);
79
80         spin_lock_init(&obj->mmo.lock);
81         obj->mmo.offsets = RB_ROOT;
82
83         init_rcu_head(&obj->rcu);
84
85         obj->ops = ops;
86         GEM_BUG_ON(flags & ~I915_BO_ALLOC_FLAGS);
87         obj->flags = flags;
88
89         obj->mm.madv = I915_MADV_WILLNEED;
90         INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
91         mutex_init(&obj->mm.get_page.lock);
92         INIT_RADIX_TREE(&obj->mm.get_dma_page.radix, GFP_KERNEL | __GFP_NOWARN);
93         mutex_init(&obj->mm.get_dma_page.lock);
94 }
95
96 /**
97  * Mark up the object's coherency levels for a given cache_level
98  * @obj: #drm_i915_gem_object
99  * @cache_level: cache level
100  */
101 void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
102                                          unsigned int cache_level)
103 {
104         obj->cache_level = cache_level;
105
106         if (cache_level != I915_CACHE_NONE)
107                 obj->cache_coherent = (I915_BO_CACHE_COHERENT_FOR_READ |
108                                        I915_BO_CACHE_COHERENT_FOR_WRITE);
109         else if (HAS_LLC(to_i915(obj->base.dev)))
110                 obj->cache_coherent = I915_BO_CACHE_COHERENT_FOR_READ;
111         else
112                 obj->cache_coherent = 0;
113
114         obj->cache_dirty =
115                 !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE);
116 }
117
118 static void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
119 {
120         struct drm_i915_gem_object *obj = to_intel_bo(gem);
121         struct drm_i915_file_private *fpriv = file->driver_priv;
122         struct i915_lut_handle bookmark = {};
123         struct i915_mmap_offset *mmo, *mn;
124         struct i915_lut_handle *lut, *ln;
125         LIST_HEAD(close);
126
127         spin_lock(&obj->lut_lock);
128         list_for_each_entry_safe(lut, ln, &obj->lut_list, obj_link) {
129                 struct i915_gem_context *ctx = lut->ctx;
130
131                 if (ctx && ctx->file_priv == fpriv) {
132                         i915_gem_context_get(ctx);
133                         list_move(&lut->obj_link, &close);
134                 }
135
136                 /* Break long locks, and carefully continue on from this spot */
137                 if (&ln->obj_link != &obj->lut_list) {
138                         list_add_tail(&bookmark.obj_link, &ln->obj_link);
139                         if (cond_resched_lock(&obj->lut_lock))
140                                 list_safe_reset_next(&bookmark, ln, obj_link);
141                         __list_del_entry(&bookmark.obj_link);
142                 }
143         }
144         spin_unlock(&obj->lut_lock);
145
146         spin_lock(&obj->mmo.lock);
147         rbtree_postorder_for_each_entry_safe(mmo, mn, &obj->mmo.offsets, offset)
148                 drm_vma_node_revoke(&mmo->vma_node, file);
149         spin_unlock(&obj->mmo.lock);
150
151         list_for_each_entry_safe(lut, ln, &close, obj_link) {
152                 struct i915_gem_context *ctx = lut->ctx;
153                 struct i915_vma *vma;
154
155                 /*
156                  * We allow the process to have multiple handles to the same
157                  * vma, in the same fd namespace, by virtue of flink/open.
158                  */
159
160                 mutex_lock(&ctx->lut_mutex);
161                 vma = radix_tree_delete(&ctx->handles_vma, lut->handle);
162                 if (vma) {
163                         GEM_BUG_ON(vma->obj != obj);
164                         GEM_BUG_ON(!atomic_read(&vma->open_count));
165                         i915_vma_close(vma);
166                 }
167                 mutex_unlock(&ctx->lut_mutex);
168
169                 i915_gem_context_put(lut->ctx);
170                 i915_lut_handle_free(lut);
171                 i915_gem_object_put(obj);
172         }
173 }
174
175 static void __i915_gem_free_object_rcu(struct rcu_head *head)
176 {
177         struct drm_i915_gem_object *obj =
178                 container_of(head, typeof(*obj), rcu);
179         struct drm_i915_private *i915 = to_i915(obj->base.dev);
180
181         dma_resv_fini(&obj->base._resv);
182         i915_gem_object_free(obj);
183
184         GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
185         atomic_dec(&i915->mm.free_count);
186 }
187
188 static void __i915_gem_object_free_mmaps(struct drm_i915_gem_object *obj)
189 {
190         /* Skip serialisation and waking the device if known to be not used. */
191
192         if (obj->userfault_count)
193                 i915_gem_object_release_mmap_gtt(obj);
194
195         if (!RB_EMPTY_ROOT(&obj->mmo.offsets)) {
196                 struct i915_mmap_offset *mmo, *mn;
197
198                 i915_gem_object_release_mmap_offset(obj);
199
200                 rbtree_postorder_for_each_entry_safe(mmo, mn,
201                                                      &obj->mmo.offsets,
202                                                      offset) {
203                         drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
204                                               &mmo->vma_node);
205                         kfree(mmo);
206                 }
207                 obj->mmo.offsets = RB_ROOT;
208         }
209 }
210
211 static void __i915_gem_free_objects(struct drm_i915_private *i915,
212                                     struct llist_node *freed)
213 {
214         struct drm_i915_gem_object *obj, *on;
215
216         llist_for_each_entry_safe(obj, on, freed, freed) {
217                 trace_i915_gem_object_destroy(obj);
218
219                 if (!list_empty(&obj->vma.list)) {
220                         struct i915_vma *vma;
221
222                         /*
223                          * Note that the vma keeps an object reference while
224                          * it is active, so it *should* not sleep while we
225                          * destroy it. Our debug code errs insits it *might*.
226                          * For the moment, play along.
227                          */
228                         spin_lock(&obj->vma.lock);
229                         while ((vma = list_first_entry_or_null(&obj->vma.list,
230                                                                struct i915_vma,
231                                                                obj_link))) {
232                                 GEM_BUG_ON(vma->obj != obj);
233                                 spin_unlock(&obj->vma.lock);
234
235                                 __i915_vma_put(vma);
236
237                                 spin_lock(&obj->vma.lock);
238                         }
239                         spin_unlock(&obj->vma.lock);
240                 }
241
242                 __i915_gem_object_free_mmaps(obj);
243
244                 GEM_BUG_ON(!list_empty(&obj->lut_list));
245
246                 atomic_set(&obj->mm.pages_pin_count, 0);
247                 __i915_gem_object_put_pages(obj);
248                 GEM_BUG_ON(i915_gem_object_has_pages(obj));
249                 bitmap_free(obj->bit_17);
250
251                 if (obj->base.import_attach)
252                         drm_prime_gem_destroy(&obj->base, NULL);
253
254                 drm_gem_free_mmap_offset(&obj->base);
255
256                 if (obj->ops->release)
257                         obj->ops->release(obj);
258
259                 if (obj->mm.n_placements > 1)
260                         kfree(obj->mm.placements);
261
262                 if (obj->shares_resv_from)
263                         i915_vm_resv_put(obj->shares_resv_from);
264
265                 /* But keep the pointer alive for RCU-protected lookups */
266                 call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
267                 cond_resched();
268         }
269 }
270
271 void i915_gem_flush_free_objects(struct drm_i915_private *i915)
272 {
273         struct llist_node *freed = llist_del_all(&i915->mm.free_list);
274
275         if (unlikely(freed))
276                 __i915_gem_free_objects(i915, freed);
277 }
278
279 static void __i915_gem_free_work(struct work_struct *work)
280 {
281         struct drm_i915_private *i915 =
282                 container_of(work, struct drm_i915_private, mm.free_work);
283
284         i915_gem_flush_free_objects(i915);
285 }
286
287 static void i915_gem_free_object(struct drm_gem_object *gem_obj)
288 {
289         struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
290         struct drm_i915_private *i915 = to_i915(obj->base.dev);
291
292         GEM_BUG_ON(i915_gem_object_is_framebuffer(obj));
293
294         /*
295          * Before we free the object, make sure any pure RCU-only
296          * read-side critical sections are complete, e.g.
297          * i915_gem_busy_ioctl(). For the corresponding synchronized
298          * lookup see i915_gem_object_lookup_rcu().
299          */
300         atomic_inc(&i915->mm.free_count);
301
302         /*
303          * This serializes freeing with the shrinker. Since the free
304          * is delayed, first by RCU then by the workqueue, we want the
305          * shrinker to be able to free pages of unreferenced objects,
306          * or else we may oom whilst there are plenty of deferred
307          * freed objects.
308          */
309         i915_gem_object_make_unshrinkable(obj);
310
311         /*
312          * Since we require blocking on struct_mutex to unbind the freed
313          * object from the GPU before releasing resources back to the
314          * system, we can not do that directly from the RCU callback (which may
315          * be a softirq context), but must instead then defer that work onto a
316          * kthread. We use the RCU callback rather than move the freed object
317          * directly onto the work queue so that we can mix between using the
318          * worker and performing frees directly from subsequent allocations for
319          * crude but effective memory throttling.
320          */
321         if (llist_add(&obj->freed, &i915->mm.free_list))
322                 queue_work(i915->wq, &i915->mm.free_work);
323 }
324
325 void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
326                                          enum fb_op_origin origin)
327 {
328         struct intel_frontbuffer *front;
329
330         front = __intel_frontbuffer_get(obj);
331         if (front) {
332                 intel_frontbuffer_flush(front, origin);
333                 intel_frontbuffer_put(front);
334         }
335 }
336
337 void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
338                                               enum fb_op_origin origin)
339 {
340         struct intel_frontbuffer *front;
341
342         front = __intel_frontbuffer_get(obj);
343         if (front) {
344                 intel_frontbuffer_invalidate(front, origin);
345                 intel_frontbuffer_put(front);
346         }
347 }
348
349 static void
350 i915_gem_object_read_from_page_kmap(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
351 {
352         void *src_map;
353         void *src_ptr;
354
355         src_map = kmap_atomic(i915_gem_object_get_page(obj, offset >> PAGE_SHIFT));
356
357         src_ptr = src_map + offset_in_page(offset);
358         if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
359                 drm_clflush_virt_range(src_ptr, size);
360         memcpy(dst, src_ptr, size);
361
362         kunmap_atomic(src_map);
363 }
364
365 static void
366 i915_gem_object_read_from_page_iomap(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
367 {
368         void __iomem *src_map;
369         void __iomem *src_ptr;
370         dma_addr_t dma = i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT);
371
372         src_map = io_mapping_map_wc(&obj->mm.region->iomap,
373                                     dma - obj->mm.region->region.start,
374                                     PAGE_SIZE);
375
376         src_ptr = src_map + offset_in_page(offset);
377         if (!i915_memcpy_from_wc(dst, (void __force *)src_ptr, size))
378                 memcpy_fromio(dst, src_ptr, size);
379
380         io_mapping_unmap(src_map);
381 }
382
383 /**
384  * i915_gem_object_read_from_page - read data from the page of a GEM object
385  * @obj: GEM object to read from
386  * @offset: offset within the object
387  * @dst: buffer to store the read data
388  * @size: size to read
389  *
390  * Reads data from @obj at the specified offset. The requested region to read
391  * from can't cross a page boundary. The caller must ensure that @obj pages
392  * are pinned and that @obj is synced wrt. any related writes.
393  *
394  * Returns 0 on success or -ENODEV if the type of @obj's backing store is
395  * unsupported.
396  */
397 int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
398 {
399         GEM_BUG_ON(offset >= obj->base.size);
400         GEM_BUG_ON(offset_in_page(offset) > PAGE_SIZE - size);
401         GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
402
403         if (i915_gem_object_has_struct_page(obj))
404                 i915_gem_object_read_from_page_kmap(obj, offset, dst, size);
405         else if (i915_gem_object_has_iomem(obj))
406                 i915_gem_object_read_from_page_iomap(obj, offset, dst, size);
407         else
408                 return -ENODEV;
409
410         return 0;
411 }
412
413 void i915_gem_init__objects(struct drm_i915_private *i915)
414 {
415         INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
416 }
417
418 static void i915_global_objects_shrink(void)
419 {
420         kmem_cache_shrink(global.slab_objects);
421 }
422
423 static void i915_global_objects_exit(void)
424 {
425         kmem_cache_destroy(global.slab_objects);
426 }
427
428 static struct i915_global_object global = { {
429         .shrink = i915_global_objects_shrink,
430         .exit = i915_global_objects_exit,
431 } };
432
433 int __init i915_global_objects_init(void)
434 {
435         global.slab_objects =
436                 KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
437         if (!global.slab_objects)
438                 return -ENOMEM;
439
440         i915_global_register(&global.base);
441         return 0;
442 }
443
444 static const struct drm_gem_object_funcs i915_gem_object_funcs = {
445         .free = i915_gem_free_object,
446         .close = i915_gem_close_object,
447         .export = i915_gem_prime_export,
448 };
449
450 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
451 #include "selftests/huge_gem_object.c"
452 #include "selftests/huge_pages.c"
453 #include "selftests/i915_gem_object.c"
454 #include "selftests/i915_gem_coherency.c"
455 #endif
This page took 0.0616719999999999 seconds and 4 git commands to generate.