2 * Copyright © 2008-2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 #include <linux/dma-fence-array.h>
29 #include <linux/kthread.h>
30 #include <linux/dma-resv.h>
31 #include <linux/shmem_fs.h>
32 #include <linux/slab.h>
33 #include <linux/stop_machine.h>
34 #include <linux/swap.h>
35 #include <linux/pci.h>
36 #include <linux/dma-buf.h>
37 #include <linux/mman.h>
39 #include <drm/drm_cache.h>
40 #include <drm/drm_vma_manager.h>
42 #include "display/intel_display.h"
43 #include "display/intel_frontbuffer.h"
45 #include "gem/i915_gem_clflush.h"
46 #include "gem/i915_gem_context.h"
47 #include "gem/i915_gem_ioctls.h"
48 #include "gem/i915_gem_mman.h"
49 #include "gem/i915_gem_pm.h"
50 #include "gem/i915_gem_region.h"
51 #include "gem/i915_gem_userptr.h"
52 #include "gt/intel_engine_user.h"
53 #include "gt/intel_gt.h"
54 #include "gt/intel_gt_pm.h"
55 #include "gt/intel_workarounds.h"
58 #include "i915_file_private.h"
59 #include "i915_trace.h"
60 #include "i915_vgpu.h"
61 #include "intel_clock_gating.h"
64 insert_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node, u32 size)
68 err = mutex_lock_interruptible(&ggtt->vm.mutex);
72 memset(node, 0, sizeof(*node));
73 err = drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
74 size, 0, I915_COLOR_UNEVICTABLE,
75 0, ggtt->mappable_end,
78 mutex_unlock(&ggtt->vm.mutex);
84 remove_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node)
86 mutex_lock(&ggtt->vm.mutex);
87 drm_mm_remove_node(node);
88 mutex_unlock(&ggtt->vm.mutex);
92 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
93 struct drm_file *file)
95 struct drm_i915_private *i915 = to_i915(dev);
96 struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
97 struct drm_i915_gem_get_aperture *args = data;
101 if (mutex_lock_interruptible(&ggtt->vm.mutex))
104 pinned = ggtt->vm.reserved;
105 list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
106 if (i915_vma_is_pinned(vma))
107 pinned += vma->node.size;
109 mutex_unlock(&ggtt->vm.mutex);
111 args->aper_size = ggtt->vm.total;
112 args->aper_available_size = args->aper_size - pinned;
117 int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
120 struct intel_runtime_pm *rpm = &to_i915(obj->base.dev)->runtime_pm;
121 bool vm_trylock = !!(flags & I915_GEM_OBJECT_UNBIND_VM_TRYLOCK);
122 LIST_HEAD(still_in_list);
123 intel_wakeref_t wakeref;
124 struct i915_vma *vma;
127 assert_object_held(obj);
129 if (list_empty(&obj->vma.list))
133 * As some machines use ACPI to handle runtime-resume callbacks, and
134 * ACPI is quite kmalloc happy, we cannot resume beneath the vm->mutex
135 * as they are required by the shrinker. Ergo, we wake the device up
136 * first just in case.
138 wakeref = intel_runtime_pm_get(rpm);
142 spin_lock(&obj->vma.lock);
143 while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,
146 list_move_tail(&vma->obj_link, &still_in_list);
147 if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK))
150 if (flags & I915_GEM_OBJECT_UNBIND_TEST) {
156 * Requiring the vm destructor to take the object lock
157 * before destroying a vma would help us eliminate the
158 * i915_vm_tryget() here, AND thus also the barrier stuff
159 * at the end. That's an easy fix, but sleeping locks in
160 * a kthread should generally be avoided.
163 if (!i915_vm_tryget(vma->vm))
166 spin_unlock(&obj->vma.lock);
169 * Since i915_vma_parked() takes the object lock
170 * before vma destruction, it won't race us here,
171 * and destroy the vma from under us.
175 if (flags & I915_GEM_OBJECT_UNBIND_ASYNC) {
176 assert_object_held(vma->obj);
177 ret = i915_vma_unbind_async(vma, vm_trylock);
180 if (ret == -EBUSY && (flags & I915_GEM_OBJECT_UNBIND_ACTIVE ||
181 !i915_vma_is_active(vma))) {
183 if (mutex_trylock(&vma->vm->mutex)) {
184 ret = __i915_vma_unbind(vma);
185 mutex_unlock(&vma->vm->mutex);
188 ret = i915_vma_unbind(vma);
192 i915_vm_put(vma->vm);
193 spin_lock(&obj->vma.lock);
195 list_splice_init(&still_in_list, &obj->vma.list);
196 spin_unlock(&obj->vma.lock);
198 if (ret == -EAGAIN && flags & I915_GEM_OBJECT_UNBIND_BARRIER) {
199 rcu_barrier(); /* flush the i915_vm_release() */
203 intel_runtime_pm_put(rpm, wakeref);
209 shmem_pread(struct page *page, int offset, int len, char __user *user_data,
218 drm_clflush_virt_range(vaddr + offset, len);
220 ret = __copy_to_user(user_data, vaddr + offset, len);
224 return ret ? -EFAULT : 0;
228 i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
229 struct drm_i915_gem_pread *args)
231 unsigned int needs_clflush;
232 char __user *user_data;
233 unsigned long offset;
238 ret = i915_gem_object_lock_interruptible(obj, NULL);
242 ret = i915_gem_object_pin_pages(obj);
246 ret = i915_gem_object_prepare_read(obj, &needs_clflush);
250 i915_gem_object_finish_access(obj);
251 i915_gem_object_unlock(obj);
254 user_data = u64_to_user_ptr(args->data_ptr);
255 offset = offset_in_page(args->offset);
256 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
257 struct page *page = i915_gem_object_get_page(obj, idx);
258 unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
260 ret = shmem_pread(page, offset, length, user_data,
270 i915_gem_object_unpin_pages(obj);
274 i915_gem_object_unpin_pages(obj);
276 i915_gem_object_unlock(obj);
281 gtt_user_read(struct io_mapping *mapping,
282 loff_t base, int offset,
283 char __user *user_data, int length)
286 unsigned long unwritten;
288 /* We can use the cpu mem copy function because this is X86. */
289 vaddr = io_mapping_map_atomic_wc(mapping, base);
290 unwritten = __copy_to_user_inatomic(user_data,
291 (void __force *)vaddr + offset,
293 io_mapping_unmap_atomic(vaddr);
295 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
296 unwritten = copy_to_user(user_data,
297 (void __force *)vaddr + offset,
299 io_mapping_unmap(vaddr);
304 static struct i915_vma *i915_gem_gtt_prepare(struct drm_i915_gem_object *obj,
305 struct drm_mm_node *node,
308 struct drm_i915_private *i915 = to_i915(obj->base.dev);
309 struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
310 struct i915_vma *vma;
311 struct i915_gem_ww_ctx ww;
314 i915_gem_ww_ctx_init(&ww, true);
316 vma = ERR_PTR(-ENODEV);
317 ret = i915_gem_object_lock(obj, &ww);
321 ret = i915_gem_object_set_to_gtt_domain(obj, write);
325 if (!i915_gem_object_is_tiled(obj))
326 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, NULL, 0, 0,
328 PIN_NONBLOCK /* NOWARN */ |
330 if (vma == ERR_PTR(-EDEADLK)) {
333 } else if (!IS_ERR(vma)) {
334 node->start = i915_ggtt_offset(vma);
337 ret = insert_mappable_node(ggtt, node, PAGE_SIZE);
340 GEM_BUG_ON(!drm_mm_node_allocated(node));
344 ret = i915_gem_object_pin_pages(obj);
346 if (drm_mm_node_allocated(node)) {
347 ggtt->vm.clear_range(&ggtt->vm, node->start, node->size);
348 remove_mappable_node(ggtt, node);
355 if (ret == -EDEADLK) {
356 ret = i915_gem_ww_ctx_backoff(&ww);
360 i915_gem_ww_ctx_fini(&ww);
362 return ret ? ERR_PTR(ret) : vma;
365 static void i915_gem_gtt_cleanup(struct drm_i915_gem_object *obj,
366 struct drm_mm_node *node,
367 struct i915_vma *vma)
369 struct drm_i915_private *i915 = to_i915(obj->base.dev);
370 struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
372 i915_gem_object_unpin_pages(obj);
373 if (drm_mm_node_allocated(node)) {
374 ggtt->vm.clear_range(&ggtt->vm, node->start, node->size);
375 remove_mappable_node(ggtt, node);
382 i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
383 const struct drm_i915_gem_pread *args)
385 struct drm_i915_private *i915 = to_i915(obj->base.dev);
386 struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
387 unsigned long remain, offset;
388 intel_wakeref_t wakeref;
389 struct drm_mm_node node;
390 void __user *user_data;
391 struct i915_vma *vma;
394 if (overflows_type(args->size, remain) ||
395 overflows_type(args->offset, offset))
398 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
400 vma = i915_gem_gtt_prepare(obj, &node, false);
406 user_data = u64_to_user_ptr(args->data_ptr);
408 offset = args->offset;
411 /* Operation in this page
413 * page_base = page offset within aperture
414 * page_offset = offset within page
415 * page_length = bytes to copy for this page
417 u32 page_base = node.start;
418 unsigned page_offset = offset_in_page(offset);
419 unsigned page_length = PAGE_SIZE - page_offset;
420 page_length = remain < page_length ? remain : page_length;
421 if (drm_mm_node_allocated(&node)) {
422 ggtt->vm.insert_page(&ggtt->vm,
423 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
424 node.start, I915_CACHE_NONE, 0);
426 page_base += offset & PAGE_MASK;
429 if (gtt_user_read(&ggtt->iomap, page_base, page_offset,
430 user_data, page_length)) {
435 remain -= page_length;
436 user_data += page_length;
437 offset += page_length;
440 i915_gem_gtt_cleanup(obj, &node, vma);
442 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
447 * i915_gem_pread_ioctl - Reads data from the object referenced by handle.
448 * @dev: drm device pointer
449 * @data: ioctl data blob
450 * @file: drm file pointer
452 * On error, the contents of *data are undefined.
455 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
456 struct drm_file *file)
458 struct drm_i915_private *i915 = to_i915(dev);
459 struct drm_i915_gem_pread *args = data;
460 struct drm_i915_gem_object *obj;
463 /* PREAD is disallowed for all platforms after TGL-LP. This also
464 * covers all platforms with local memory.
466 if (GRAPHICS_VER(i915) >= 12 && !IS_TIGERLAKE(i915))
472 if (!access_ok(u64_to_user_ptr(args->data_ptr),
476 obj = i915_gem_object_lookup(file, args->handle);
480 /* Bounds check source. */
481 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
486 trace_i915_gem_object_pread(obj, args->offset, args->size);
489 ret = obj->ops->pread(obj, args);
493 ret = i915_gem_object_wait(obj,
494 I915_WAIT_INTERRUPTIBLE,
495 MAX_SCHEDULE_TIMEOUT);
499 ret = i915_gem_shmem_pread(obj, args);
500 if (ret == -EFAULT || ret == -ENODEV)
501 ret = i915_gem_gtt_pread(obj, args);
504 i915_gem_object_put(obj);
508 /* This is the fast write path which cannot handle
509 * page faults in the source data
513 ggtt_write(struct io_mapping *mapping,
514 loff_t base, int offset,
515 char __user *user_data, int length)
518 unsigned long unwritten;
520 /* We can use the cpu mem copy function because this is X86. */
521 vaddr = io_mapping_map_atomic_wc(mapping, base);
522 unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
524 io_mapping_unmap_atomic(vaddr);
526 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
527 unwritten = copy_from_user((void __force *)vaddr + offset,
529 io_mapping_unmap(vaddr);
536 * i915_gem_gtt_pwrite_fast - This is the fast pwrite path, where we copy the data directly from the
537 * user into the GTT, uncached.
538 * @obj: i915 GEM object
539 * @args: pwrite arguments structure
542 i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
543 const struct drm_i915_gem_pwrite *args)
545 struct drm_i915_private *i915 = to_i915(obj->base.dev);
546 struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
547 struct intel_runtime_pm *rpm = &i915->runtime_pm;
548 unsigned long remain, offset;
549 intel_wakeref_t wakeref;
550 struct drm_mm_node node;
551 struct i915_vma *vma;
552 void __user *user_data;
555 if (overflows_type(args->size, remain) ||
556 overflows_type(args->offset, offset))
559 if (i915_gem_object_has_struct_page(obj)) {
561 * Avoid waking the device up if we can fallback, as
562 * waking/resuming is very slow (worst-case 10-100 ms
563 * depending on PCI sleeps and our own resume time).
564 * This easily dwarfs any performance advantage from
565 * using the cache bypass of indirect GGTT access.
567 wakeref = intel_runtime_pm_get_if_in_use(rpm);
571 /* No backing pages, no fallback, we must force GGTT access */
572 wakeref = intel_runtime_pm_get(rpm);
575 vma = i915_gem_gtt_prepare(obj, &node, true);
581 i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
583 user_data = u64_to_user_ptr(args->data_ptr);
584 offset = args->offset;
587 /* Operation in this page
589 * page_base = page offset within aperture
590 * page_offset = offset within page
591 * page_length = bytes to copy for this page
593 u32 page_base = node.start;
594 unsigned int page_offset = offset_in_page(offset);
595 unsigned int page_length = PAGE_SIZE - page_offset;
596 page_length = remain < page_length ? remain : page_length;
597 if (drm_mm_node_allocated(&node)) {
598 /* flush the write before we modify the GGTT */
599 intel_gt_flush_ggtt_writes(ggtt->vm.gt);
600 ggtt->vm.insert_page(&ggtt->vm,
601 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
602 node.start, I915_CACHE_NONE, 0);
603 wmb(); /* flush modifications to the GGTT (insert_page) */
605 page_base += offset & PAGE_MASK;
607 /* If we get a fault while copying data, then (presumably) our
608 * source page isn't available. Return the error and we'll
609 * retry in the slow path.
610 * If the object is non-shmem backed, we retry again with the
611 * path that handles page fault.
613 if (ggtt_write(&ggtt->iomap, page_base, page_offset,
614 user_data, page_length)) {
619 remain -= page_length;
620 user_data += page_length;
621 offset += page_length;
624 intel_gt_flush_ggtt_writes(ggtt->vm.gt);
625 i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
627 i915_gem_gtt_cleanup(obj, &node, vma);
629 intel_runtime_pm_put(rpm, wakeref);
633 /* Per-page copy function for the shmem pwrite fastpath.
634 * Flushes invalid cachelines before writing to the target if
635 * needs_clflush_before is set and flushes out any written cachelines after
636 * writing if needs_clflush is set.
639 shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
640 bool needs_clflush_before,
641 bool needs_clflush_after)
648 if (needs_clflush_before)
649 drm_clflush_virt_range(vaddr + offset, len);
651 ret = __copy_from_user(vaddr + offset, user_data, len);
652 if (!ret && needs_clflush_after)
653 drm_clflush_virt_range(vaddr + offset, len);
657 return ret ? -EFAULT : 0;
661 i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
662 const struct drm_i915_gem_pwrite *args)
664 unsigned int partial_cacheline_write;
665 unsigned int needs_clflush;
666 void __user *user_data;
667 unsigned long offset;
672 ret = i915_gem_object_lock_interruptible(obj, NULL);
676 ret = i915_gem_object_pin_pages(obj);
680 ret = i915_gem_object_prepare_write(obj, &needs_clflush);
684 i915_gem_object_finish_access(obj);
685 i915_gem_object_unlock(obj);
687 /* If we don't overwrite a cacheline completely we need to be
688 * careful to have up-to-date data by first clflushing. Don't
689 * overcomplicate things and flush the entire patch.
691 partial_cacheline_write = 0;
692 if (needs_clflush & CLFLUSH_BEFORE)
693 partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
695 user_data = u64_to_user_ptr(args->data_ptr);
697 offset = offset_in_page(args->offset);
698 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
699 struct page *page = i915_gem_object_get_page(obj, idx);
700 unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
702 ret = shmem_pwrite(page, offset, length, user_data,
703 (offset | length) & partial_cacheline_write,
704 needs_clflush & CLFLUSH_AFTER);
713 i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
715 i915_gem_object_unpin_pages(obj);
719 i915_gem_object_unpin_pages(obj);
721 i915_gem_object_unlock(obj);
726 * i915_gem_pwrite_ioctl - Writes data to the object referenced by handle.
728 * @data: ioctl data blob
731 * On error, the contents of the buffer that were to be modified are undefined.
734 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
735 struct drm_file *file)
737 struct drm_i915_private *i915 = to_i915(dev);
738 struct drm_i915_gem_pwrite *args = data;
739 struct drm_i915_gem_object *obj;
742 /* PWRITE is disallowed for all platforms after TGL-LP. This also
743 * covers all platforms with local memory.
745 if (GRAPHICS_VER(i915) >= 12 && !IS_TIGERLAKE(i915))
751 if (!access_ok(u64_to_user_ptr(args->data_ptr), args->size))
754 obj = i915_gem_object_lookup(file, args->handle);
758 /* Bounds check destination. */
759 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
764 /* Writes not allowed into this read-only object */
765 if (i915_gem_object_is_readonly(obj)) {
770 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
773 if (obj->ops->pwrite)
774 ret = obj->ops->pwrite(obj, args);
778 ret = i915_gem_object_wait(obj,
779 I915_WAIT_INTERRUPTIBLE |
781 MAX_SCHEDULE_TIMEOUT);
786 /* We can only do the GTT pwrite on untiled buffers, as otherwise
787 * it would end up going through the fenced access, and we'll get
788 * different detiling behavior between reading and writing.
789 * pread/pwrite currently are reading and writing from the CPU
790 * perspective, requiring manual detiling by the client.
792 if (!i915_gem_object_has_struct_page(obj) ||
793 i915_gem_cpu_write_needs_clflush(obj))
794 /* Note that the gtt paths might fail with non-page-backed user
795 * pointers (e.g. gtt mappings when moving data between
796 * textures). Fallback to the shmem path in that case.
798 ret = i915_gem_gtt_pwrite_fast(obj, args);
800 if (ret == -EFAULT || ret == -ENOSPC) {
801 if (i915_gem_object_has_struct_page(obj))
802 ret = i915_gem_shmem_pwrite(obj, args);
806 i915_gem_object_put(obj);
811 * i915_gem_sw_finish_ioctl - Called when user space has done writes to this buffer
813 * @data: ioctl data blob
817 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
818 struct drm_file *file)
820 struct drm_i915_gem_sw_finish *args = data;
821 struct drm_i915_gem_object *obj;
823 obj = i915_gem_object_lookup(file, args->handle);
828 * Proxy objects are barred from CPU access, so there is no
829 * need to ban sw_finish as it is a nop.
832 /* Pinned buffers may be scanout, so flush the cache */
833 i915_gem_object_flush_if_display(obj);
834 i915_gem_object_put(obj);
839 void i915_gem_runtime_suspend(struct drm_i915_private *i915)
841 struct drm_i915_gem_object *obj, *on;
845 * Only called during RPM suspend. All users of the userfault_list
846 * must be holding an RPM wakeref to ensure that this can not
847 * run concurrently with themselves (and use the struct_mutex for
848 * protection between themselves).
851 list_for_each_entry_safe(obj, on,
852 &to_gt(i915)->ggtt->userfault_list, userfault_link)
853 __i915_gem_object_release_mmap_gtt(obj);
855 list_for_each_entry_safe(obj, on,
856 &i915->runtime_pm.lmem_userfault_list, userfault_link)
857 i915_gem_object_runtime_pm_release_mmap_offset(obj);
860 * The fence will be lost when the device powers down. If any were
861 * in use by hardware (i.e. they are pinned), we should not be powering
862 * down! All other fences will be reacquired by the user upon waking.
864 for (i = 0; i < to_gt(i915)->ggtt->num_fences; i++) {
865 struct i915_fence_reg *reg = &to_gt(i915)->ggtt->fence_regs[i];
868 * Ideally we want to assert that the fence register is not
869 * live at this point (i.e. that no piece of code will be
870 * trying to write through fence + GTT, as that both violates
871 * our tracking of activity and associated locking/barriers,
872 * but also is illegal given that the hw is powered down).
874 * Previously we used reg->pin_count as a "liveness" indicator.
875 * That is not sufficient, and we need a more fine-grained
876 * tool if we want to have a sanity check here.
882 GEM_BUG_ON(i915_vma_has_userfault(reg->vma));
887 static void discard_ggtt_vma(struct i915_vma *vma)
889 struct drm_i915_gem_object *obj = vma->obj;
891 spin_lock(&obj->vma.lock);
892 if (!RB_EMPTY_NODE(&vma->obj_node)) {
893 rb_erase(&vma->obj_node, &obj->vma.tree);
894 RB_CLEAR_NODE(&vma->obj_node);
896 spin_unlock(&obj->vma.lock);
900 i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj,
901 struct i915_gem_ww_ctx *ww,
902 const struct i915_gtt_view *view,
903 u64 size, u64 alignment, u64 flags)
905 struct drm_i915_private *i915 = to_i915(obj->base.dev);
906 struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
907 struct i915_vma *vma;
912 if (flags & PIN_MAPPABLE &&
913 (!view || view->type == I915_GTT_VIEW_NORMAL)) {
915 * If the required space is larger than the available
916 * aperture, we will not able to find a slot for the
917 * object and unbinding the object now will be in
918 * vain. Worse, doing so may cause us to ping-pong
919 * the object in and out of the Global GTT and
920 * waste a lot of cycles under the mutex.
922 if (obj->base.size > ggtt->mappable_end)
923 return ERR_PTR(-E2BIG);
926 * If NONBLOCK is set the caller is optimistically
927 * trying to cache the full object within the mappable
928 * aperture, and *must* have a fallback in place for
929 * situations where we cannot bind the object. We
930 * can be a little more lax here and use the fallback
931 * more often to avoid costly migrations of ourselves
932 * and other objects within the aperture.
934 * Half-the-aperture is used as a simple heuristic.
935 * More interesting would to do search for a free
936 * block prior to making the commitment to unbind.
937 * That caters for the self-harm case, and with a
938 * little more heuristics (e.g. NOFAULT, NOEVICT)
939 * we could try to minimise harm to others.
941 if (flags & PIN_NONBLOCK &&
942 obj->base.size > ggtt->mappable_end / 2)
943 return ERR_PTR(-ENOSPC);
947 vma = i915_vma_instance(obj, &ggtt->vm, view);
951 if (i915_vma_misplaced(vma, size, alignment, flags)) {
952 if (flags & PIN_NONBLOCK) {
953 if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))
954 return ERR_PTR(-ENOSPC);
957 * If this misplaced vma is too big (i.e, at-least
958 * half the size of aperture) or hasn't been pinned
959 * mappable before, we ignore the misplacement when
960 * PIN_NONBLOCK is set in order to avoid the ping-pong
961 * issue described above. In other words, we try to
962 * avoid the costly operation of unbinding this vma
963 * from the GGTT and rebinding it back because there
964 * may not be enough space for this vma in the aperture.
966 if (flags & PIN_MAPPABLE &&
967 (vma->fence_size > ggtt->mappable_end / 2 ||
968 !i915_vma_is_map_and_fenceable(vma)))
969 return ERR_PTR(-ENOSPC);
972 if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)) {
973 discard_ggtt_vma(vma);
977 ret = i915_vma_unbind(vma);
982 ret = i915_vma_pin_ww(vma, ww, size, alignment, flags | PIN_GLOBAL);
987 if (vma->fence && !i915_gem_object_is_tiled(obj)) {
988 mutex_lock(&ggtt->vm.mutex);
989 i915_vma_revoke_fence(vma);
990 mutex_unlock(&ggtt->vm.mutex);
993 ret = i915_vma_wait_for_bind(vma);
1002 struct i915_vma * __must_check
1003 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
1004 const struct i915_gtt_view *view,
1005 u64 size, u64 alignment, u64 flags)
1007 struct i915_gem_ww_ctx ww;
1008 struct i915_vma *ret;
1011 for_i915_gem_ww(&ww, err, true) {
1012 err = i915_gem_object_lock(obj, &ww);
1016 ret = i915_gem_object_ggtt_pin_ww(obj, &ww, view, size,
1022 return err ? ERR_PTR(err) : ret;
1026 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
1027 struct drm_file *file_priv)
1029 struct drm_i915_private *i915 = to_i915(dev);
1030 struct drm_i915_gem_madvise *args = data;
1031 struct drm_i915_gem_object *obj;
1034 switch (args->madv) {
1035 case I915_MADV_DONTNEED:
1036 case I915_MADV_WILLNEED:
1042 obj = i915_gem_object_lookup(file_priv, args->handle);
1046 err = i915_gem_object_lock_interruptible(obj, NULL);
1050 if (i915_gem_object_has_pages(obj) &&
1051 i915_gem_object_is_tiled(obj) &&
1052 i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) {
1053 if (obj->mm.madv == I915_MADV_WILLNEED) {
1054 GEM_BUG_ON(!i915_gem_object_has_tiling_quirk(obj));
1055 i915_gem_object_clear_tiling_quirk(obj);
1056 i915_gem_object_make_shrinkable(obj);
1058 if (args->madv == I915_MADV_WILLNEED) {
1059 GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
1060 i915_gem_object_make_unshrinkable(obj);
1061 i915_gem_object_set_tiling_quirk(obj);
1065 if (obj->mm.madv != __I915_MADV_PURGED) {
1066 obj->mm.madv = args->madv;
1067 if (obj->ops->adjust_lru)
1068 obj->ops->adjust_lru(obj);
1071 if (i915_gem_object_has_pages(obj) ||
1072 i915_gem_object_has_self_managed_shrink_list(obj)) {
1073 unsigned long flags;
1075 spin_lock_irqsave(&i915->mm.obj_lock, flags);
1076 if (!list_empty(&obj->mm.link)) {
1077 struct list_head *list;
1079 if (obj->mm.madv != I915_MADV_WILLNEED)
1080 list = &i915->mm.purge_list;
1082 list = &i915->mm.shrink_list;
1083 list_move_tail(&obj->mm.link, list);
1086 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
1089 /* if the object is no longer attached, discard its backing storage */
1090 if (obj->mm.madv == I915_MADV_DONTNEED &&
1091 !i915_gem_object_has_pages(obj))
1092 i915_gem_object_truncate(obj);
1094 args->retained = obj->mm.madv != __I915_MADV_PURGED;
1096 i915_gem_object_unlock(obj);
1098 i915_gem_object_put(obj);
1103 * A single pass should suffice to release all the freed objects (along most
1104 * call paths), but be a little more paranoid in that freeing the objects does
1105 * take a little amount of time, during which the rcu callbacks could have added
1106 * new objects into the freed list, and armed the work again.
1108 void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
1110 while (atomic_read(&i915->mm.free_count)) {
1111 flush_work(&i915->mm.free_work);
1112 drain_workqueue(i915->bdev.wq);
1118 * Similar to objects above (see i915_gem_drain_freed-objects), in general we
1119 * have workers that are armed by RCU and then rearm themselves in their
1120 * callbacks. To be paranoid, we need to drain the workqueue a second time after
1121 * waiting for the RCU grace period so that we catch work queued via RCU from
1122 * the first pass. As neither drain_workqueue() nor flush_workqueue() report a
1123 * result, we make an assumption that we only don't require more than 3 passes
1124 * to catch all _recursive_ RCU delayed work.
1126 void i915_gem_drain_workqueue(struct drm_i915_private *i915)
1130 for (i = 0; i < 3; i++) {
1131 flush_workqueue(i915->wq);
1133 i915_gem_drain_freed_objects(i915);
1136 drain_workqueue(i915->wq);
1139 int i915_gem_init(struct drm_i915_private *dev_priv)
1141 struct intel_gt *gt;
1145 /* We need to fallback to 4K pages if host doesn't support huge gtt. */
1146 if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
1147 RUNTIME_INFO(dev_priv)->page_sizes = I915_GTT_PAGE_SIZE_4K;
1149 ret = i915_gem_init_userptr(dev_priv);
1153 for_each_gt(gt, dev_priv, i) {
1154 intel_uc_fetch_firmwares(>->uc);
1155 intel_wopcm_init(>->wopcm);
1156 if (GRAPHICS_VER(dev_priv) >= 8)
1157 setup_private_pat(gt);
1160 ret = i915_init_ggtt(dev_priv);
1162 GEM_BUG_ON(ret == -EIO);
1167 * Despite its name intel_clock_gating_init applies both display
1168 * clock gating workarounds; GT mmio workarounds and the occasional
1169 * GT power context workaround. Worse, sometimes it includes a context
1170 * register workaround which we need to apply before we record the
1171 * default HW state for all contexts.
1173 * FIXME: break up the workarounds and apply them at the right time!
1175 intel_clock_gating_init(dev_priv);
1177 for_each_gt(gt, dev_priv, i) {
1178 ret = intel_gt_init(gt);
1186 * Unwinding is complicated by that we want to handle -EIO to mean
1187 * disable GPU submission but keep KMS alive. We want to mark the
1188 * HW as irrevisibly wedged, but keep enough state around that the
1189 * driver doesn't explode during runtime.
1192 i915_gem_drain_workqueue(dev_priv);
1195 for_each_gt(gt, dev_priv, i) {
1196 intel_gt_driver_remove(gt);
1197 intel_gt_driver_release(gt);
1198 intel_uc_cleanup_firmwares(>->uc);
1204 * Allow engines or uC initialisation to fail by marking the GPU
1205 * as wedged. But we only want to do this when the GPU is angry,
1206 * for all other failure, such as an allocation failure, bail.
1208 for_each_gt(gt, dev_priv, i) {
1209 if (!intel_gt_is_wedged(gt)) {
1210 i915_probe_error(dev_priv,
1211 "Failed to initialize GPU, declaring it wedged!\n");
1212 intel_gt_set_wedged(gt);
1216 /* Minimal basic recovery for KMS */
1217 ret = i915_ggtt_enable_hw(dev_priv);
1218 i915_ggtt_resume(to_gt(dev_priv)->ggtt);
1219 intel_clock_gating_init(dev_priv);
1222 i915_gem_drain_freed_objects(dev_priv);
1227 void i915_gem_driver_register(struct drm_i915_private *i915)
1229 i915_gem_driver_register__shrinker(i915);
1231 intel_engines_driver_register(i915);
1234 void i915_gem_driver_unregister(struct drm_i915_private *i915)
1236 i915_gem_driver_unregister__shrinker(i915);
1239 void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
1241 struct intel_gt *gt;
1244 i915_gem_suspend_late(dev_priv);
1245 for_each_gt(gt, dev_priv, i)
1246 intel_gt_driver_remove(gt);
1247 dev_priv->uabi_engines = RB_ROOT;
1249 /* Flush any outstanding unpin_work. */
1250 i915_gem_drain_workqueue(dev_priv);
1253 void i915_gem_driver_release(struct drm_i915_private *dev_priv)
1255 struct intel_gt *gt;
1258 for_each_gt(gt, dev_priv, i) {
1259 intel_gt_driver_release(gt);
1260 intel_uc_cleanup_firmwares(>->uc);
1263 /* Flush any outstanding work, including i915_gem_context.release_work. */
1264 i915_gem_drain_workqueue(dev_priv);
1266 drm_WARN_ON(&dev_priv->drm, !list_empty(&dev_priv->gem.contexts.list));
1269 static void i915_gem_init__mm(struct drm_i915_private *i915)
1271 spin_lock_init(&i915->mm.obj_lock);
1273 init_llist_head(&i915->mm.free_list);
1275 INIT_LIST_HEAD(&i915->mm.purge_list);
1276 INIT_LIST_HEAD(&i915->mm.shrink_list);
1278 i915_gem_init__objects(i915);
1281 void i915_gem_init_early(struct drm_i915_private *dev_priv)
1283 i915_gem_init__mm(dev_priv);
1284 i915_gem_init__contexts(dev_priv);
1286 spin_lock_init(&dev_priv->display.fb_tracking.lock);
1289 void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
1291 i915_gem_drain_workqueue(dev_priv);
1292 GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
1293 GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
1294 drm_WARN_ON(&dev_priv->drm, dev_priv->mm.shrink_count);
1297 int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
1299 struct drm_i915_file_private *file_priv;
1300 struct i915_drm_client *client;
1303 drm_dbg(&i915->drm, "\n");
1305 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
1309 client = i915_drm_client_add(&i915->clients);
1310 if (IS_ERR(client)) {
1311 ret = PTR_ERR(client);
1315 file->driver_priv = file_priv;
1316 file_priv->i915 = i915;
1317 file_priv->file = file;
1318 file_priv->client = client;
1320 file_priv->bsd_engine = -1;
1321 file_priv->hang_timestamp = jiffies;
1323 ret = i915_gem_context_open(i915, file);
1330 i915_drm_client_put(client);
1337 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1338 #include "selftests/mock_gem_device.c"
1339 #include "selftests/i915_gem.c"