2 * Copyright © 2008-2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29 #include <drm/drm_vma_manager.h>
30 #include <drm/i915_drm.h>
32 #include "i915_gem_dmabuf.h"
33 #include "i915_vgpu.h"
34 #include "i915_trace.h"
35 #include "intel_drv.h"
36 #include "intel_frontbuffer.h"
37 #include "intel_mocs.h"
38 #include <linux/reservation.h>
39 #include <linux/shmem_fs.h>
40 #include <linux/slab.h>
41 #include <linux/swap.h>
42 #include <linux/pci.h>
43 #include <linux/dma-buf.h>
45 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
46 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
48 static bool cpu_cache_is_coherent(struct drm_device *dev,
49 enum i915_cache_level level)
51 return HAS_LLC(dev) || level != I915_CACHE_NONE;
54 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
56 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
59 if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
62 return obj->pin_display;
66 insert_mappable_node(struct drm_i915_private *i915,
67 struct drm_mm_node *node, u32 size)
69 memset(node, 0, sizeof(*node));
70 return drm_mm_insert_node_in_range_generic(&i915->ggtt.base.mm, node,
72 i915->ggtt.mappable_end,
73 DRM_MM_SEARCH_DEFAULT,
74 DRM_MM_CREATE_DEFAULT);
78 remove_mappable_node(struct drm_mm_node *node)
80 drm_mm_remove_node(node);
83 /* some bookkeeping */
84 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
87 spin_lock(&dev_priv->mm.object_stat_lock);
88 dev_priv->mm.object_count++;
89 dev_priv->mm.object_memory += size;
90 spin_unlock(&dev_priv->mm.object_stat_lock);
93 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
96 spin_lock(&dev_priv->mm.object_stat_lock);
97 dev_priv->mm.object_count--;
98 dev_priv->mm.object_memory -= size;
99 spin_unlock(&dev_priv->mm.object_stat_lock);
103 i915_gem_wait_for_error(struct i915_gpu_error *error)
107 if (!i915_reset_in_progress(error))
111 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
112 * userspace. If it takes that long something really bad is going on and
113 * we should simply try to bail out and fail as gracefully as possible.
115 ret = wait_event_interruptible_timeout(error->reset_queue,
116 !i915_reset_in_progress(error),
119 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
121 } else if (ret < 0) {
128 int i915_mutex_lock_interruptible(struct drm_device *dev)
130 struct drm_i915_private *dev_priv = to_i915(dev);
133 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
137 ret = mutex_lock_interruptible(&dev->struct_mutex);
145 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
146 struct drm_file *file)
148 struct drm_i915_private *dev_priv = to_i915(dev);
149 struct i915_ggtt *ggtt = &dev_priv->ggtt;
150 struct drm_i915_gem_get_aperture *args = data;
151 struct i915_vma *vma;
155 mutex_lock(&dev->struct_mutex);
156 list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
157 if (i915_vma_is_pinned(vma))
158 pinned += vma->node.size;
159 list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
160 if (i915_vma_is_pinned(vma))
161 pinned += vma->node.size;
162 mutex_unlock(&dev->struct_mutex);
164 args->aper_size = ggtt->base.total;
165 args->aper_available_size = args->aper_size - pinned;
171 i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
173 struct address_space *mapping = obj->base.filp->f_mapping;
174 char *vaddr = obj->phys_handle->vaddr;
176 struct scatterlist *sg;
179 if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
182 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
186 page = shmem_read_mapping_page(mapping, i);
188 return PTR_ERR(page);
190 src = kmap_atomic(page);
191 memcpy(vaddr, src, PAGE_SIZE);
192 drm_clflush_virt_range(vaddr, PAGE_SIZE);
199 i915_gem_chipset_flush(to_i915(obj->base.dev));
201 st = kmalloc(sizeof(*st), GFP_KERNEL);
205 if (sg_alloc_table(st, 1, GFP_KERNEL)) {
212 sg->length = obj->base.size;
214 sg_dma_address(sg) = obj->phys_handle->busaddr;
215 sg_dma_len(sg) = obj->base.size;
222 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
226 BUG_ON(obj->madv == __I915_MADV_PURGED);
228 ret = i915_gem_object_set_to_cpu_domain(obj, true);
230 /* In the event of a disaster, abandon all caches and
233 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
236 if (obj->madv == I915_MADV_DONTNEED)
240 struct address_space *mapping = obj->base.filp->f_mapping;
241 char *vaddr = obj->phys_handle->vaddr;
244 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
248 page = shmem_read_mapping_page(mapping, i);
252 dst = kmap_atomic(page);
253 drm_clflush_virt_range(vaddr, PAGE_SIZE);
254 memcpy(dst, vaddr, PAGE_SIZE);
257 set_page_dirty(page);
258 if (obj->madv == I915_MADV_WILLNEED)
259 mark_page_accessed(page);
266 sg_free_table(obj->pages);
271 i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
273 drm_pci_free(obj->base.dev, obj->phys_handle);
276 static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
277 .get_pages = i915_gem_object_get_pages_phys,
278 .put_pages = i915_gem_object_put_pages_phys,
279 .release = i915_gem_object_release_phys,
282 int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
284 struct i915_vma *vma;
285 LIST_HEAD(still_in_list);
288 lockdep_assert_held(&obj->base.dev->struct_mutex);
290 /* Closed vma are removed from the obj->vma_list - but they may
291 * still have an active binding on the object. To remove those we
292 * must wait for all rendering to complete to the object (as unbinding
293 * must anyway), and retire the requests.
295 ret = i915_gem_object_wait_rendering(obj, false);
299 i915_gem_retire_requests(to_i915(obj->base.dev));
301 while ((vma = list_first_entry_or_null(&obj->vma_list,
304 list_move_tail(&vma->obj_link, &still_in_list);
305 ret = i915_vma_unbind(vma);
309 list_splice(&still_in_list, &obj->vma_list);
315 * Ensures that all rendering to the object has completed and the object is
316 * safe to unbind from the GTT or access from the CPU.
317 * @obj: i915 gem object
318 * @readonly: waiting for just read access or read-write access
321 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
324 struct reservation_object *resv;
325 struct i915_gem_active *active;
326 unsigned long active_mask;
329 lockdep_assert_held(&obj->base.dev->struct_mutex);
332 active = obj->last_read;
333 active_mask = i915_gem_object_get_active(obj);
336 active = &obj->last_write;
339 for_each_active(active_mask, idx) {
342 ret = i915_gem_active_wait(&active[idx],
343 &obj->base.dev->struct_mutex);
348 resv = i915_gem_object_get_dmabuf_resv(obj);
352 err = reservation_object_wait_timeout_rcu(resv, !readonly, true,
353 MAX_SCHEDULE_TIMEOUT);
361 /* A nonblocking variant of the above wait. Must be called prior to
362 * acquiring the mutex for the object, as the object state may change
363 * during this call. A reference must be held by the caller for the object.
365 static __must_check int
366 __unsafe_wait_rendering(struct drm_i915_gem_object *obj,
367 struct intel_rps_client *rps,
370 struct i915_gem_active *active;
371 unsigned long active_mask;
374 active_mask = __I915_BO_ACTIVE(obj);
379 active = obj->last_read;
382 active = &obj->last_write;
385 for_each_active(active_mask, idx) {
388 ret = i915_gem_active_wait_unlocked(&active[idx],
389 I915_WAIT_INTERRUPTIBLE,
398 static struct intel_rps_client *to_rps_client(struct drm_file *file)
400 struct drm_i915_file_private *fpriv = file->driver_priv;
406 i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
409 drm_dma_handle_t *phys;
412 if (obj->phys_handle) {
413 if ((unsigned long)obj->phys_handle->vaddr & (align -1))
419 if (obj->madv != I915_MADV_WILLNEED)
422 if (obj->base.filp == NULL)
425 ret = i915_gem_object_unbind(obj);
429 ret = i915_gem_object_put_pages(obj);
433 /* create a new object */
434 phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
438 obj->phys_handle = phys;
439 obj->ops = &i915_gem_phys_ops;
441 return i915_gem_object_get_pages(obj);
445 i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
446 struct drm_i915_gem_pwrite *args,
447 struct drm_file *file_priv)
449 struct drm_device *dev = obj->base.dev;
450 void *vaddr = obj->phys_handle->vaddr + args->offset;
451 char __user *user_data = u64_to_user_ptr(args->data_ptr);
454 /* We manually control the domain here and pretend that it
455 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
457 ret = i915_gem_object_wait_rendering(obj, false);
461 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
462 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
463 unsigned long unwritten;
465 /* The physical object once assigned is fixed for the lifetime
466 * of the obj, so we can safely drop the lock and continue
469 mutex_unlock(&dev->struct_mutex);
470 unwritten = copy_from_user(vaddr, user_data, args->size);
471 mutex_lock(&dev->struct_mutex);
478 drm_clflush_virt_range(vaddr, args->size);
479 i915_gem_chipset_flush(to_i915(dev));
482 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
486 void *i915_gem_object_alloc(struct drm_device *dev)
488 struct drm_i915_private *dev_priv = to_i915(dev);
489 return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
492 void i915_gem_object_free(struct drm_i915_gem_object *obj)
494 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
495 kmem_cache_free(dev_priv->objects, obj);
499 i915_gem_create(struct drm_file *file,
500 struct drm_device *dev,
504 struct drm_i915_gem_object *obj;
508 size = roundup(size, PAGE_SIZE);
512 /* Allocate the new object */
513 obj = i915_gem_object_create(dev, size);
517 ret = drm_gem_handle_create(file, &obj->base, &handle);
518 /* drop reference from allocate - handle holds it now */
519 i915_gem_object_put_unlocked(obj);
528 i915_gem_dumb_create(struct drm_file *file,
529 struct drm_device *dev,
530 struct drm_mode_create_dumb *args)
532 /* have to work out size/pitch and return them */
533 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
534 args->size = args->pitch * args->height;
535 return i915_gem_create(file, dev,
536 args->size, &args->handle);
540 * Creates a new mm object and returns a handle to it.
541 * @dev: drm device pointer
542 * @data: ioctl data blob
543 * @file: drm file pointer
546 i915_gem_create_ioctl(struct drm_device *dev, void *data,
547 struct drm_file *file)
549 struct drm_i915_gem_create *args = data;
551 return i915_gem_create(file, dev,
552 args->size, &args->handle);
556 __copy_to_user_swizzled(char __user *cpu_vaddr,
557 const char *gpu_vaddr, int gpu_offset,
560 int ret, cpu_offset = 0;
563 int cacheline_end = ALIGN(gpu_offset + 1, 64);
564 int this_length = min(cacheline_end - gpu_offset, length);
565 int swizzled_gpu_offset = gpu_offset ^ 64;
567 ret = __copy_to_user(cpu_vaddr + cpu_offset,
568 gpu_vaddr + swizzled_gpu_offset,
573 cpu_offset += this_length;
574 gpu_offset += this_length;
575 length -= this_length;
582 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
583 const char __user *cpu_vaddr,
586 int ret, cpu_offset = 0;
589 int cacheline_end = ALIGN(gpu_offset + 1, 64);
590 int this_length = min(cacheline_end - gpu_offset, length);
591 int swizzled_gpu_offset = gpu_offset ^ 64;
593 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
594 cpu_vaddr + cpu_offset,
599 cpu_offset += this_length;
600 gpu_offset += this_length;
601 length -= this_length;
608 * Pins the specified object's pages and synchronizes the object with
609 * GPU accesses. Sets needs_clflush to non-zero if the caller should
610 * flush the object from the CPU cache.
612 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
613 unsigned int *needs_clflush)
619 if (!i915_gem_object_has_struct_page(obj))
622 ret = i915_gem_object_wait_rendering(obj, true);
626 ret = i915_gem_object_get_pages(obj);
630 i915_gem_object_pin_pages(obj);
632 i915_gem_object_flush_gtt_write_domain(obj);
634 /* If we're not in the cpu read domain, set ourself into the gtt
635 * read domain and manually flush cachelines (if required). This
636 * optimizes for the case when the gpu will dirty the data
637 * anyway again before the next pread happens.
639 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
640 *needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
643 if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
644 ret = i915_gem_object_set_to_cpu_domain(obj, false);
651 /* return with the pages pinned */
655 i915_gem_object_unpin_pages(obj);
659 int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
660 unsigned int *needs_clflush)
665 if (!i915_gem_object_has_struct_page(obj))
668 ret = i915_gem_object_wait_rendering(obj, false);
672 ret = i915_gem_object_get_pages(obj);
676 i915_gem_object_pin_pages(obj);
678 i915_gem_object_flush_gtt_write_domain(obj);
680 /* If we're not in the cpu write domain, set ourself into the
681 * gtt write domain and manually flush cachelines (as required).
682 * This optimizes for the case when the gpu will use the data
683 * right away and we therefore have to clflush anyway.
685 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
686 *needs_clflush |= cpu_write_needs_clflush(obj) << 1;
688 /* Same trick applies to invalidate partially written cachelines read
691 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
692 *needs_clflush |= !cpu_cache_is_coherent(obj->base.dev,
695 if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
696 ret = i915_gem_object_set_to_cpu_domain(obj, true);
703 if ((*needs_clflush & CLFLUSH_AFTER) == 0)
704 obj->cache_dirty = true;
706 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
708 /* return with the pages pinned */
712 i915_gem_object_unpin_pages(obj);
716 /* Per-page copy function for the shmem pread fastpath.
717 * Flushes invalid cachelines before reading the target if
718 * needs_clflush is set. */
720 shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
721 char __user *user_data,
722 bool page_do_bit17_swizzling, bool needs_clflush)
727 if (unlikely(page_do_bit17_swizzling))
730 vaddr = kmap_atomic(page);
732 drm_clflush_virt_range(vaddr + shmem_page_offset,
734 ret = __copy_to_user_inatomic(user_data,
735 vaddr + shmem_page_offset,
737 kunmap_atomic(vaddr);
739 return ret ? -EFAULT : 0;
743 shmem_clflush_swizzled_range(char *addr, unsigned long length,
746 if (unlikely(swizzled)) {
747 unsigned long start = (unsigned long) addr;
748 unsigned long end = (unsigned long) addr + length;
750 /* For swizzling simply ensure that we always flush both
751 * channels. Lame, but simple and it works. Swizzled
752 * pwrite/pread is far from a hotpath - current userspace
753 * doesn't use it at all. */
754 start = round_down(start, 128);
755 end = round_up(end, 128);
757 drm_clflush_virt_range((void *)start, end - start);
759 drm_clflush_virt_range(addr, length);
764 /* Only difference to the fast-path function is that this can handle bit17
765 * and uses non-atomic copy and kmap functions. */
767 shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
768 char __user *user_data,
769 bool page_do_bit17_swizzling, bool needs_clflush)
776 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
778 page_do_bit17_swizzling);
780 if (page_do_bit17_swizzling)
781 ret = __copy_to_user_swizzled(user_data,
782 vaddr, shmem_page_offset,
785 ret = __copy_to_user(user_data,
786 vaddr + shmem_page_offset,
790 return ret ? - EFAULT : 0;
793 static inline unsigned long
794 slow_user_access(struct io_mapping *mapping,
795 uint64_t page_base, int page_offset,
796 char __user *user_data,
797 unsigned long length, bool pwrite)
799 void __iomem *ioaddr;
803 ioaddr = io_mapping_map_wc(mapping, page_base, PAGE_SIZE);
804 /* We can use the cpu mem copy function because this is X86. */
805 vaddr = (void __force *)ioaddr + page_offset;
807 unwritten = __copy_from_user(vaddr, user_data, length);
809 unwritten = __copy_to_user(user_data, vaddr, length);
811 io_mapping_unmap(ioaddr);
816 i915_gem_gtt_pread(struct drm_device *dev,
817 struct drm_i915_gem_object *obj, uint64_t size,
818 uint64_t data_offset, uint64_t data_ptr)
820 struct drm_i915_private *dev_priv = to_i915(dev);
821 struct i915_ggtt *ggtt = &dev_priv->ggtt;
822 struct i915_vma *vma;
823 struct drm_mm_node node;
824 char __user *user_data;
829 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
831 node.start = i915_ggtt_offset(vma);
832 node.allocated = false;
833 ret = i915_vma_put_fence(vma);
840 ret = insert_mappable_node(dev_priv, &node, PAGE_SIZE);
844 ret = i915_gem_object_get_pages(obj);
846 remove_mappable_node(&node);
850 i915_gem_object_pin_pages(obj);
853 ret = i915_gem_object_set_to_gtt_domain(obj, false);
857 user_data = u64_to_user_ptr(data_ptr);
859 offset = data_offset;
861 mutex_unlock(&dev->struct_mutex);
862 if (likely(!i915.prefault_disable)) {
863 ret = fault_in_pages_writeable(user_data, remain);
865 mutex_lock(&dev->struct_mutex);
871 /* Operation in this page
873 * page_base = page offset within aperture
874 * page_offset = offset within page
875 * page_length = bytes to copy for this page
877 u32 page_base = node.start;
878 unsigned page_offset = offset_in_page(offset);
879 unsigned page_length = PAGE_SIZE - page_offset;
880 page_length = remain < page_length ? remain : page_length;
881 if (node.allocated) {
883 ggtt->base.insert_page(&ggtt->base,
884 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
889 page_base += offset & PAGE_MASK;
891 /* This is a slow read/write as it tries to read from
892 * and write to user memory which may result into page
893 * faults, and so we cannot perform this under struct_mutex.
895 if (slow_user_access(&ggtt->mappable, page_base,
896 page_offset, user_data,
897 page_length, false)) {
902 remain -= page_length;
903 user_data += page_length;
904 offset += page_length;
907 mutex_lock(&dev->struct_mutex);
908 if (ret == 0 && (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
909 /* The user has modified the object whilst we tried
910 * reading from it, and we now have no idea what domain
911 * the pages should be in. As we have just been touching
912 * them directly, flush everything back to the GTT
915 ret = i915_gem_object_set_to_gtt_domain(obj, false);
919 if (node.allocated) {
921 ggtt->base.clear_range(&ggtt->base,
922 node.start, node.size,
924 i915_gem_object_unpin_pages(obj);
925 remove_mappable_node(&node);
934 i915_gem_shmem_pread(struct drm_device *dev,
935 struct drm_i915_gem_object *obj,
936 struct drm_i915_gem_pread *args,
937 struct drm_file *file)
939 char __user *user_data;
942 int shmem_page_offset, page_length, ret = 0;
943 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
945 int needs_clflush = 0;
946 struct sg_page_iter sg_iter;
948 ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
952 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
953 user_data = u64_to_user_ptr(args->data_ptr);
954 offset = args->offset;
957 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
958 offset >> PAGE_SHIFT) {
959 struct page *page = sg_page_iter_page(&sg_iter);
964 /* Operation in this page
966 * shmem_page_offset = offset within page in shmem file
967 * page_length = bytes to copy for this page
969 shmem_page_offset = offset_in_page(offset);
970 page_length = remain;
971 if ((shmem_page_offset + page_length) > PAGE_SIZE)
972 page_length = PAGE_SIZE - shmem_page_offset;
974 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
975 (page_to_phys(page) & (1 << 17)) != 0;
977 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
978 user_data, page_do_bit17_swizzling,
983 mutex_unlock(&dev->struct_mutex);
985 if (likely(!i915.prefault_disable) && !prefaulted) {
986 ret = fault_in_pages_writeable(user_data, remain);
987 /* Userspace is tricking us, but we've already clobbered
988 * its pages with the prefault and promised to write the
989 * data up to the first fault. Hence ignore any errors
990 * and just continue. */
995 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
996 user_data, page_do_bit17_swizzling,
999 mutex_lock(&dev->struct_mutex);
1005 remain -= page_length;
1006 user_data += page_length;
1007 offset += page_length;
1011 i915_gem_obj_finish_shmem_access(obj);
1017 * Reads data from the object referenced by handle.
1018 * @dev: drm device pointer
1019 * @data: ioctl data blob
1020 * @file: drm file pointer
1022 * On error, the contents of *data are undefined.
1025 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
1026 struct drm_file *file)
1028 struct drm_i915_gem_pread *args = data;
1029 struct drm_i915_gem_object *obj;
1032 if (args->size == 0)
1035 if (!access_ok(VERIFY_WRITE,
1036 u64_to_user_ptr(args->data_ptr),
1040 obj = i915_gem_object_lookup(file, args->handle);
1044 /* Bounds check source. */
1045 if (args->offset > obj->base.size ||
1046 args->size > obj->base.size - args->offset) {
1051 trace_i915_gem_object_pread(obj, args->offset, args->size);
1053 ret = __unsafe_wait_rendering(obj, to_rps_client(file), true);
1057 ret = i915_mutex_lock_interruptible(dev);
1061 ret = i915_gem_shmem_pread(dev, obj, args, file);
1063 /* pread for non shmem backed objects */
1064 if (ret == -EFAULT || ret == -ENODEV) {
1065 intel_runtime_pm_get(to_i915(dev));
1066 ret = i915_gem_gtt_pread(dev, obj, args->size,
1067 args->offset, args->data_ptr);
1068 intel_runtime_pm_put(to_i915(dev));
1071 i915_gem_object_put(obj);
1072 mutex_unlock(&dev->struct_mutex);
1077 i915_gem_object_put_unlocked(obj);
1081 /* This is the fast write path which cannot handle
1082 * page faults in the source data
1086 fast_user_write(struct io_mapping *mapping,
1087 loff_t page_base, int page_offset,
1088 char __user *user_data,
1091 void __iomem *vaddr_atomic;
1093 unsigned long unwritten;
1095 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
1096 /* We can use the cpu mem copy function because this is X86. */
1097 vaddr = (void __force*)vaddr_atomic + page_offset;
1098 unwritten = __copy_from_user_inatomic_nocache(vaddr,
1100 io_mapping_unmap_atomic(vaddr_atomic);
1105 * This is the fast pwrite path, where we copy the data directly from the
1106 * user into the GTT, uncached.
1107 * @i915: i915 device private data
1108 * @obj: i915 gem object
1109 * @args: pwrite arguments structure
1110 * @file: drm file pointer
1113 i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
1114 struct drm_i915_gem_object *obj,
1115 struct drm_i915_gem_pwrite *args,
1116 struct drm_file *file)
1118 struct i915_ggtt *ggtt = &i915->ggtt;
1119 struct drm_device *dev = obj->base.dev;
1120 struct i915_vma *vma;
1121 struct drm_mm_node node;
1122 uint64_t remain, offset;
1123 char __user *user_data;
1125 bool hit_slow_path = false;
1127 if (i915_gem_object_is_tiled(obj))
1130 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
1131 PIN_MAPPABLE | PIN_NONBLOCK);
1133 node.start = i915_ggtt_offset(vma);
1134 node.allocated = false;
1135 ret = i915_vma_put_fence(vma);
1137 i915_vma_unpin(vma);
1142 ret = insert_mappable_node(i915, &node, PAGE_SIZE);
1146 ret = i915_gem_object_get_pages(obj);
1148 remove_mappable_node(&node);
1152 i915_gem_object_pin_pages(obj);
1155 ret = i915_gem_object_set_to_gtt_domain(obj, true);
1159 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
1162 user_data = u64_to_user_ptr(args->data_ptr);
1163 offset = args->offset;
1164 remain = args->size;
1166 /* Operation in this page
1168 * page_base = page offset within aperture
1169 * page_offset = offset within page
1170 * page_length = bytes to copy for this page
1172 u32 page_base = node.start;
1173 unsigned page_offset = offset_in_page(offset);
1174 unsigned page_length = PAGE_SIZE - page_offset;
1175 page_length = remain < page_length ? remain : page_length;
1176 if (node.allocated) {
1177 wmb(); /* flush the write before we modify the GGTT */
1178 ggtt->base.insert_page(&ggtt->base,
1179 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
1180 node.start, I915_CACHE_NONE, 0);
1181 wmb(); /* flush modifications to the GGTT (insert_page) */
1183 page_base += offset & PAGE_MASK;
1185 /* If we get a fault while copying data, then (presumably) our
1186 * source page isn't available. Return the error and we'll
1187 * retry in the slow path.
1188 * If the object is non-shmem backed, we retry again with the
1189 * path that handles page fault.
1191 if (fast_user_write(&ggtt->mappable, page_base,
1192 page_offset, user_data, page_length)) {
1193 hit_slow_path = true;
1194 mutex_unlock(&dev->struct_mutex);
1195 if (slow_user_access(&ggtt->mappable,
1197 page_offset, user_data,
1198 page_length, true)) {
1200 mutex_lock(&dev->struct_mutex);
1204 mutex_lock(&dev->struct_mutex);
1207 remain -= page_length;
1208 user_data += page_length;
1209 offset += page_length;
1213 if (hit_slow_path) {
1215 (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
1216 /* The user has modified the object whilst we tried
1217 * reading from it, and we now have no idea what domain
1218 * the pages should be in. As we have just been touching
1219 * them directly, flush everything back to the GTT
1222 ret = i915_gem_object_set_to_gtt_domain(obj, false);
1226 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
1228 if (node.allocated) {
1230 ggtt->base.clear_range(&ggtt->base,
1231 node.start, node.size,
1233 i915_gem_object_unpin_pages(obj);
1234 remove_mappable_node(&node);
1236 i915_vma_unpin(vma);
1242 /* Per-page copy function for the shmem pwrite fastpath.
1243 * Flushes invalid cachelines before writing to the target if
1244 * needs_clflush_before is set and flushes out any written cachelines after
1245 * writing if needs_clflush is set. */
1247 shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
1248 char __user *user_data,
1249 bool page_do_bit17_swizzling,
1250 bool needs_clflush_before,
1251 bool needs_clflush_after)
1256 if (unlikely(page_do_bit17_swizzling))
1259 vaddr = kmap_atomic(page);
1260 if (needs_clflush_before)
1261 drm_clflush_virt_range(vaddr + shmem_page_offset,
1263 ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
1264 user_data, page_length);
1265 if (needs_clflush_after)
1266 drm_clflush_virt_range(vaddr + shmem_page_offset,
1268 kunmap_atomic(vaddr);
1270 return ret ? -EFAULT : 0;
1273 /* Only difference to the fast-path function is that this can handle bit17
1274 * and uses non-atomic copy and kmap functions. */
1276 shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
1277 char __user *user_data,
1278 bool page_do_bit17_swizzling,
1279 bool needs_clflush_before,
1280 bool needs_clflush_after)
1286 if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
1287 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
1289 page_do_bit17_swizzling);
1290 if (page_do_bit17_swizzling)
1291 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
1295 ret = __copy_from_user(vaddr + shmem_page_offset,
1298 if (needs_clflush_after)
1299 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
1301 page_do_bit17_swizzling);
1304 return ret ? -EFAULT : 0;
1308 i915_gem_shmem_pwrite(struct drm_device *dev,
1309 struct drm_i915_gem_object *obj,
1310 struct drm_i915_gem_pwrite *args,
1311 struct drm_file *file)
1315 char __user *user_data;
1316 int shmem_page_offset, page_length, ret = 0;
1317 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
1318 int hit_slowpath = 0;
1319 unsigned int needs_clflush;
1320 struct sg_page_iter sg_iter;
1322 ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
1326 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
1327 user_data = u64_to_user_ptr(args->data_ptr);
1328 offset = args->offset;
1329 remain = args->size;
1331 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
1332 offset >> PAGE_SHIFT) {
1333 struct page *page = sg_page_iter_page(&sg_iter);
1334 int partial_cacheline_write;
1339 /* Operation in this page
1341 * shmem_page_offset = offset within page in shmem file
1342 * page_length = bytes to copy for this page
1344 shmem_page_offset = offset_in_page(offset);
1346 page_length = remain;
1347 if ((shmem_page_offset + page_length) > PAGE_SIZE)
1348 page_length = PAGE_SIZE - shmem_page_offset;
1350 /* If we don't overwrite a cacheline completely we need to be
1351 * careful to have up-to-date data by first clflushing. Don't
1352 * overcomplicate things and flush the entire patch. */
1353 partial_cacheline_write = needs_clflush & CLFLUSH_BEFORE &&
1354 ((shmem_page_offset | page_length)
1355 & (boot_cpu_data.x86_clflush_size - 1));
1357 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
1358 (page_to_phys(page) & (1 << 17)) != 0;
1360 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
1361 user_data, page_do_bit17_swizzling,
1362 partial_cacheline_write,
1363 needs_clflush & CLFLUSH_AFTER);
1368 mutex_unlock(&dev->struct_mutex);
1369 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
1370 user_data, page_do_bit17_swizzling,
1371 partial_cacheline_write,
1372 needs_clflush & CLFLUSH_AFTER);
1374 mutex_lock(&dev->struct_mutex);
1380 remain -= page_length;
1381 user_data += page_length;
1382 offset += page_length;
1386 i915_gem_obj_finish_shmem_access(obj);
1390 * Fixup: Flush cpu caches in case we didn't flush the dirty
1391 * cachelines in-line while writing and the object moved
1392 * out of the cpu write domain while we've dropped the lock.
1394 if (!(needs_clflush & CLFLUSH_AFTER) &&
1395 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
1396 if (i915_gem_clflush_object(obj, obj->pin_display))
1397 needs_clflush |= CLFLUSH_AFTER;
1401 if (needs_clflush & CLFLUSH_AFTER)
1402 i915_gem_chipset_flush(to_i915(dev));
1404 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
1409 * Writes data to the object referenced by handle.
1411 * @data: ioctl data blob
1414 * On error, the contents of the buffer that were to be modified are undefined.
1417 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1418 struct drm_file *file)
1420 struct drm_i915_private *dev_priv = to_i915(dev);
1421 struct drm_i915_gem_pwrite *args = data;
1422 struct drm_i915_gem_object *obj;
1425 if (args->size == 0)
1428 if (!access_ok(VERIFY_READ,
1429 u64_to_user_ptr(args->data_ptr),
1433 if (likely(!i915.prefault_disable)) {
1434 ret = fault_in_pages_readable(u64_to_user_ptr(args->data_ptr),
1440 obj = i915_gem_object_lookup(file, args->handle);
1444 /* Bounds check destination. */
1445 if (args->offset > obj->base.size ||
1446 args->size > obj->base.size - args->offset) {
1451 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
1453 ret = __unsafe_wait_rendering(obj, to_rps_client(file), false);
1457 intel_runtime_pm_get(dev_priv);
1459 ret = i915_mutex_lock_interruptible(dev);
1464 /* We can only do the GTT pwrite on untiled buffers, as otherwise
1465 * it would end up going through the fenced access, and we'll get
1466 * different detiling behavior between reading and writing.
1467 * pread/pwrite currently are reading and writing from the CPU
1468 * perspective, requiring manual detiling by the client.
1470 if (!i915_gem_object_has_struct_page(obj) ||
1471 cpu_write_needs_clflush(obj)) {
1472 ret = i915_gem_gtt_pwrite_fast(dev_priv, obj, args, file);
1473 /* Note that the gtt paths might fail with non-page-backed user
1474 * pointers (e.g. gtt mappings when moving data between
1475 * textures). Fallback to the shmem path in that case. */
1478 if (ret == -EFAULT || ret == -ENOSPC) {
1479 if (obj->phys_handle)
1480 ret = i915_gem_phys_pwrite(obj, args, file);
1482 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
1485 i915_gem_object_put(obj);
1486 mutex_unlock(&dev->struct_mutex);
1487 intel_runtime_pm_put(dev_priv);
1492 intel_runtime_pm_put(dev_priv);
1494 i915_gem_object_put_unlocked(obj);
1498 static inline enum fb_op_origin
1499 write_origin(struct drm_i915_gem_object *obj, unsigned domain)
1501 return (domain == I915_GEM_DOMAIN_GTT ?
1502 obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
1506 * Called when user space prepares to use an object with the CPU, either
1507 * through the mmap ioctl's mapping or a GTT mapping.
1509 * @data: ioctl data blob
1513 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1514 struct drm_file *file)
1516 struct drm_i915_gem_set_domain *args = data;
1517 struct drm_i915_gem_object *obj;
1518 uint32_t read_domains = args->read_domains;
1519 uint32_t write_domain = args->write_domain;
1522 /* Only handle setting domains to types used by the CPU. */
1523 if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS)
1526 /* Having something in the write domain implies it's in the read
1527 * domain, and only that read domain. Enforce that in the request.
1529 if (write_domain != 0 && read_domains != write_domain)
1532 obj = i915_gem_object_lookup(file, args->handle);
1536 /* Try to flush the object off the GPU without holding the lock.
1537 * We will repeat the flush holding the lock in the normal manner
1538 * to catch cases where we are gazumped.
1540 ret = __unsafe_wait_rendering(obj, to_rps_client(file), !write_domain);
1544 ret = i915_mutex_lock_interruptible(dev);
1548 if (read_domains & I915_GEM_DOMAIN_GTT)
1549 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1551 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1553 if (write_domain != 0)
1554 intel_fb_obj_invalidate(obj, write_origin(obj, write_domain));
1556 i915_gem_object_put(obj);
1557 mutex_unlock(&dev->struct_mutex);
1561 i915_gem_object_put_unlocked(obj);
1566 * Called when user space has done writes to this buffer
1568 * @data: ioctl data blob
1572 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1573 struct drm_file *file)
1575 struct drm_i915_gem_sw_finish *args = data;
1576 struct drm_i915_gem_object *obj;
1579 obj = i915_gem_object_lookup(file, args->handle);
1583 /* Pinned buffers may be scanout, so flush the cache */
1584 if (READ_ONCE(obj->pin_display)) {
1585 err = i915_mutex_lock_interruptible(dev);
1587 i915_gem_object_flush_cpu_write_domain(obj);
1588 mutex_unlock(&dev->struct_mutex);
1592 i915_gem_object_put_unlocked(obj);
1597 * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
1600 * @data: ioctl data blob
1603 * While the mapping holds a reference on the contents of the object, it doesn't
1604 * imply a ref on the object itself.
1608 * DRM driver writers who look a this function as an example for how to do GEM
1609 * mmap support, please don't implement mmap support like here. The modern way
1610 * to implement DRM mmap support is with an mmap offset ioctl (like
1611 * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
1612 * That way debug tooling like valgrind will understand what's going on, hiding
1613 * the mmap call in a driver private ioctl will break that. The i915 driver only
1614 * does cpu mmaps this way because we didn't know better.
1617 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1618 struct drm_file *file)
1620 struct drm_i915_gem_mmap *args = data;
1621 struct drm_i915_gem_object *obj;
1624 if (args->flags & ~(I915_MMAP_WC))
1627 if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
1630 obj = i915_gem_object_lookup(file, args->handle);
1634 /* prime objects have no backing filp to GEM mmap
1637 if (!obj->base.filp) {
1638 i915_gem_object_put_unlocked(obj);
1642 addr = vm_mmap(obj->base.filp, 0, args->size,
1643 PROT_READ | PROT_WRITE, MAP_SHARED,
1645 if (args->flags & I915_MMAP_WC) {
1646 struct mm_struct *mm = current->mm;
1647 struct vm_area_struct *vma;
1649 if (down_write_killable(&mm->mmap_sem)) {
1650 i915_gem_object_put_unlocked(obj);
1653 vma = find_vma(mm, addr);
1656 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1659 up_write(&mm->mmap_sem);
1661 /* This may race, but that's ok, it only gets set */
1662 WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
1664 i915_gem_object_put_unlocked(obj);
1665 if (IS_ERR((void *)addr))
1668 args->addr_ptr = (uint64_t) addr;
1673 static unsigned int tile_row_pages(struct drm_i915_gem_object *obj)
1677 size = i915_gem_object_get_stride(obj);
1678 size *= i915_gem_object_get_tiling(obj) == I915_TILING_Y ? 32 : 8;
1680 return size >> PAGE_SHIFT;
1684 * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps
1686 * A history of the GTT mmap interface:
1688 * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to
1689 * aligned and suitable for fencing, and still fit into the available
1690 * mappable space left by the pinned display objects. A classic problem
1691 * we called the page-fault-of-doom where we would ping-pong between
1692 * two objects that could not fit inside the GTT and so the memcpy
1693 * would page one object in at the expense of the other between every
1696 * 1 - Objects can be any size, and have any compatible fencing (X Y, or none
1697 * as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the
1698 * object is too large for the available space (or simply too large
1699 * for the mappable aperture!), a view is created instead and faulted
1700 * into userspace. (This view is aligned and sized appropriately for
1705 * * snoopable objects cannot be accessed via the GTT. It can cause machine
1706 * hangs on some architectures, corruption on others. An attempt to service
1707 * a GTT page fault from a snoopable object will generate a SIGBUS.
1709 * * the object must be able to fit into RAM (physical memory, though no
1710 * limited to the mappable aperture).
1715 * * a new GTT page fault will synchronize rendering from the GPU and flush
1716 * all data to system memory. Subsequent access will not be synchronized.
1718 * * all mappings are revoked on runtime device suspend.
1720 * * there are only 8, 16 or 32 fence registers to share between all users
1721 * (older machines require fence register for display and blitter access
1722 * as well). Contention of the fence registers will cause the previous users
1723 * to be unmapped and any new access will generate new page faults.
1725 * * running out of memory while servicing a fault may generate a SIGBUS,
1726 * rather than the expected SIGSEGV.
1728 int i915_gem_mmap_gtt_version(void)
1734 * i915_gem_fault - fault a page into the GTT
1735 * @area: CPU VMA in question
1738 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1739 * from userspace. The fault handler takes care of binding the object to
1740 * the GTT (if needed), allocating and programming a fence register (again,
1741 * only if needed based on whether the old reg is still valid or the object
1742 * is tiled) and inserting a new PTE into the faulting process.
1744 * Note that the faulting process may involve evicting existing objects
1745 * from the GTT and/or fence registers to make room. So performance may
1746 * suffer if the GTT working set is large or there are few fence registers
1749 * The current feature set supported by i915_gem_fault() and thus GTT mmaps
1750 * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version).
1752 int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
1754 #define MIN_CHUNK_PAGES ((1 << 20) >> PAGE_SHIFT) /* 1 MiB */
1755 struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
1756 struct drm_device *dev = obj->base.dev;
1757 struct drm_i915_private *dev_priv = to_i915(dev);
1758 struct i915_ggtt *ggtt = &dev_priv->ggtt;
1759 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1760 struct i915_vma *vma;
1761 pgoff_t page_offset;
1765 /* We don't use vmf->pgoff since that has the fake offset */
1766 page_offset = ((unsigned long)vmf->virtual_address - area->vm_start) >>
1769 trace_i915_gem_object_fault(obj, page_offset, true, write);
1771 /* Try to flush the object off the GPU first without holding the lock.
1772 * Upon acquiring the lock, we will perform our sanity checks and then
1773 * repeat the flush holding the lock in the normal manner to catch cases
1774 * where we are gazumped.
1776 ret = __unsafe_wait_rendering(obj, NULL, !write);
1780 intel_runtime_pm_get(dev_priv);
1782 ret = i915_mutex_lock_interruptible(dev);
1786 /* Access to snoopable pages through the GTT is incoherent. */
1787 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1792 /* If the object is smaller than a couple of partial vma, it is
1793 * not worth only creating a single partial vma - we may as well
1794 * clear enough space for the full object.
1796 flags = PIN_MAPPABLE;
1797 if (obj->base.size > 2 * MIN_CHUNK_PAGES << PAGE_SHIFT)
1798 flags |= PIN_NONBLOCK | PIN_NONFAULT;
1800 /* Now pin it into the GTT as needed */
1801 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags);
1803 struct i915_ggtt_view view;
1804 unsigned int chunk_size;
1806 /* Use a partial view if it is bigger than available space */
1807 chunk_size = MIN_CHUNK_PAGES;
1808 if (i915_gem_object_is_tiled(obj))
1809 chunk_size = max(chunk_size, tile_row_pages(obj));
1811 memset(&view, 0, sizeof(view));
1812 view.type = I915_GGTT_VIEW_PARTIAL;
1813 view.params.partial.offset = rounddown(page_offset, chunk_size);
1814 view.params.partial.size =
1815 min_t(unsigned int, chunk_size,
1816 (area->vm_end - area->vm_start) / PAGE_SIZE -
1817 view.params.partial.offset);
1819 /* If the partial covers the entire object, just create a
1822 if (chunk_size >= obj->base.size >> PAGE_SHIFT)
1823 view.type = I915_GGTT_VIEW_NORMAL;
1825 /* Userspace is now writing through an untracked VMA, abandon
1826 * all hope that the hardware is able to track future writes.
1828 obj->frontbuffer_ggtt_origin = ORIGIN_CPU;
1830 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
1837 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1841 ret = i915_vma_get_fence(vma);
1845 /* Finally, remap it using the new GTT offset */
1846 ret = remap_io_mapping(area,
1847 area->vm_start + (vma->ggtt_view.params.partial.offset << PAGE_SHIFT),
1848 (ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT,
1849 min_t(u64, vma->size, area->vm_end - area->vm_start),
1854 obj->fault_mappable = true;
1856 __i915_vma_unpin(vma);
1858 mutex_unlock(&dev->struct_mutex);
1860 intel_runtime_pm_put(dev_priv);
1865 * We eat errors when the gpu is terminally wedged to avoid
1866 * userspace unduly crashing (gl has no provisions for mmaps to
1867 * fail). But any other -EIO isn't ours (e.g. swap in failure)
1868 * and so needs to be reported.
1870 if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
1871 ret = VM_FAULT_SIGBUS;
1876 * EAGAIN means the gpu is hung and we'll wait for the error
1877 * handler to reset everything when re-faulting in
1878 * i915_mutex_lock_interruptible.
1885 * EBUSY is ok: this just means that another thread
1886 * already did the job.
1888 ret = VM_FAULT_NOPAGE;
1895 ret = VM_FAULT_SIGBUS;
1898 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
1899 ret = VM_FAULT_SIGBUS;
1906 * i915_gem_release_mmap - remove physical page mappings
1907 * @obj: obj in question
1909 * Preserve the reservation of the mmapping with the DRM core code, but
1910 * relinquish ownership of the pages back to the system.
1912 * It is vital that we remove the page mapping if we have mapped a tiled
1913 * object through the GTT and then lose the fence register due to
1914 * resource pressure. Similarly if the object has been moved out of the
1915 * aperture, than pages mapped into userspace must be revoked. Removing the
1916 * mapping will then trigger a page fault on the next user access, allowing
1917 * fixup by i915_gem_fault().
1920 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1922 /* Serialisation between user GTT access and our code depends upon
1923 * revoking the CPU's PTE whilst the mutex is held. The next user
1924 * pagefault then has to wait until we release the mutex.
1926 lockdep_assert_held(&obj->base.dev->struct_mutex);
1928 if (!obj->fault_mappable)
1931 drm_vma_node_unmap(&obj->base.vma_node,
1932 obj->base.dev->anon_inode->i_mapping);
1934 /* Ensure that the CPU's PTE are revoked and there are not outstanding
1935 * memory transactions from userspace before we return. The TLB
1936 * flushing implied above by changing the PTE above *should* be
1937 * sufficient, an extra barrier here just provides us with a bit
1938 * of paranoid documentation about our requirement to serialise
1939 * memory writes before touching registers / GSM.
1943 obj->fault_mappable = false;
1947 i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
1949 struct drm_i915_gem_object *obj;
1951 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
1952 i915_gem_release_mmap(obj);
1956 * i915_gem_get_ggtt_size - return required global GTT size for an object
1957 * @dev_priv: i915 device
1958 * @size: object size
1959 * @tiling_mode: tiling mode
1961 * Return the required global GTT size for an object, taking into account
1962 * potential fence register mapping.
1964 u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv,
1965 u64 size, int tiling_mode)
1969 GEM_BUG_ON(size == 0);
1971 if (INTEL_GEN(dev_priv) >= 4 ||
1972 tiling_mode == I915_TILING_NONE)
1975 /* Previous chips need a power-of-two fence region when tiling */
1976 if (IS_GEN3(dev_priv))
1977 ggtt_size = 1024*1024;
1979 ggtt_size = 512*1024;
1981 while (ggtt_size < size)
1988 * i915_gem_get_ggtt_alignment - return required global GTT alignment
1989 * @dev_priv: i915 device
1990 * @size: object size
1991 * @tiling_mode: tiling mode
1992 * @fenced: is fenced alignment required or not
1994 * Return the required global GTT alignment for an object, taking into account
1995 * potential fence register mapping.
1997 u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size,
1998 int tiling_mode, bool fenced)
2000 GEM_BUG_ON(size == 0);
2003 * Minimum alignment is 4k (GTT page size), but might be greater
2004 * if a fence register is needed for the object.
2006 if (INTEL_GEN(dev_priv) >= 4 || (!fenced && IS_G33(dev_priv)) ||
2007 tiling_mode == I915_TILING_NONE)
2011 * Previous chips need to be aligned to the size of the smallest
2012 * fence register that can contain the object.
2014 return i915_gem_get_ggtt_size(dev_priv, size, tiling_mode);
2017 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
2019 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2022 err = drm_gem_create_mmap_offset(&obj->base);
2026 /* We can idle the GPU locklessly to flush stale objects, but in order
2027 * to claim that space for ourselves, we need to take the big
2028 * struct_mutex to free the requests+objects and allocate our slot.
2030 err = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
2034 err = i915_mutex_lock_interruptible(&dev_priv->drm);
2036 i915_gem_retire_requests(dev_priv);
2037 err = drm_gem_create_mmap_offset(&obj->base);
2038 mutex_unlock(&dev_priv->drm.struct_mutex);
2044 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
2046 drm_gem_free_mmap_offset(&obj->base);
2050 i915_gem_mmap_gtt(struct drm_file *file,
2051 struct drm_device *dev,
2055 struct drm_i915_gem_object *obj;
2058 obj = i915_gem_object_lookup(file, handle);
2062 ret = i915_gem_object_create_mmap_offset(obj);
2064 *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
2066 i915_gem_object_put_unlocked(obj);
2071 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
2073 * @data: GTT mapping ioctl data
2074 * @file: GEM object info
2076 * Simply returns the fake offset to userspace so it can mmap it.
2077 * The mmap call will end up in drm_gem_mmap(), which will set things
2078 * up so we can get faults in the handler above.
2080 * The fault handler will take care of binding the object into the GTT
2081 * (since it may have been evicted to make room for something), allocating
2082 * a fence register, and mapping the appropriate aperture address into
2086 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
2087 struct drm_file *file)
2089 struct drm_i915_gem_mmap_gtt *args = data;
2091 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
2094 /* Immediately discard the backing storage */
2096 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
2098 i915_gem_object_free_mmap_offset(obj);
2100 if (obj->base.filp == NULL)
2103 /* Our goal here is to return as much of the memory as
2104 * is possible back to the system as we are called from OOM.
2105 * To do this we must instruct the shmfs to drop all of its
2106 * backing pages, *now*.
2108 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
2109 obj->madv = __I915_MADV_PURGED;
2112 /* Try to discard unwanted pages */
2114 i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
2116 struct address_space *mapping;
2118 switch (obj->madv) {
2119 case I915_MADV_DONTNEED:
2120 i915_gem_object_truncate(obj);
2121 case __I915_MADV_PURGED:
2125 if (obj->base.filp == NULL)
2128 mapping = obj->base.filp->f_mapping,
2129 invalidate_mapping_pages(mapping, 0, (loff_t)-1);
2133 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
2135 struct sgt_iter sgt_iter;
2139 BUG_ON(obj->madv == __I915_MADV_PURGED);
2141 ret = i915_gem_object_set_to_cpu_domain(obj, true);
2143 /* In the event of a disaster, abandon all caches and
2144 * hope for the best.
2146 i915_gem_clflush_object(obj, true);
2147 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2150 i915_gem_gtt_finish_object(obj);
2152 if (i915_gem_object_needs_bit17_swizzle(obj))
2153 i915_gem_object_save_bit_17_swizzle(obj);
2155 if (obj->madv == I915_MADV_DONTNEED)
2158 for_each_sgt_page(page, sgt_iter, obj->pages) {
2160 set_page_dirty(page);
2162 if (obj->madv == I915_MADV_WILLNEED)
2163 mark_page_accessed(page);
2169 sg_free_table(obj->pages);
2174 i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
2176 const struct drm_i915_gem_object_ops *ops = obj->ops;
2178 if (obj->pages == NULL)
2181 if (obj->pages_pin_count)
2184 GEM_BUG_ON(obj->bind_count);
2186 /* ->put_pages might need to allocate memory for the bit17 swizzle
2187 * array, hence protect them from being reaped by removing them from gtt
2189 list_del(&obj->global_list);
2194 ptr = ptr_mask_bits(obj->mapping);
2195 if (is_vmalloc_addr(ptr))
2198 kunmap(kmap_to_page(ptr));
2200 obj->mapping = NULL;
2203 ops->put_pages(obj);
2206 i915_gem_object_invalidate(obj);
2212 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2214 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2216 struct address_space *mapping;
2217 struct sg_table *st;
2218 struct scatterlist *sg;
2219 struct sgt_iter sgt_iter;
2221 unsigned long last_pfn = 0; /* suppress gcc warning */
2225 /* Assert that the object is not currently in any GPU domain. As it
2226 * wasn't in the GTT, there shouldn't be any way it could have been in
2229 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2230 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2232 st = kmalloc(sizeof(*st), GFP_KERNEL);
2236 page_count = obj->base.size / PAGE_SIZE;
2237 if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
2242 /* Get the list of pages out of our struct file. They'll be pinned
2243 * at this point until we release them.
2245 * Fail silently without starting the shrinker
2247 mapping = obj->base.filp->f_mapping;
2248 gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM));
2249 gfp |= __GFP_NORETRY | __GFP_NOWARN;
2252 for (i = 0; i < page_count; i++) {
2253 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2255 i915_gem_shrink(dev_priv,
2258 I915_SHRINK_UNBOUND |
2259 I915_SHRINK_PURGEABLE);
2260 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2263 /* We've tried hard to allocate the memory by reaping
2264 * our own buffer, now let the real VM do its job and
2265 * go down in flames if truly OOM.
2267 i915_gem_shrink_all(dev_priv);
2268 page = shmem_read_mapping_page(mapping, i);
2270 ret = PTR_ERR(page);
2274 #ifdef CONFIG_SWIOTLB
2275 if (swiotlb_nr_tbl()) {
2277 sg_set_page(sg, page, PAGE_SIZE, 0);
2282 if (!i || page_to_pfn(page) != last_pfn + 1) {
2286 sg_set_page(sg, page, PAGE_SIZE, 0);
2288 sg->length += PAGE_SIZE;
2290 last_pfn = page_to_pfn(page);
2292 /* Check that the i965g/gm workaround works. */
2293 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
2295 #ifdef CONFIG_SWIOTLB
2296 if (!swiotlb_nr_tbl())
2301 ret = i915_gem_gtt_prepare_object(obj);
2305 if (i915_gem_object_needs_bit17_swizzle(obj))
2306 i915_gem_object_do_bit_17_swizzle(obj);
2308 if (i915_gem_object_is_tiled(obj) &&
2309 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2310 i915_gem_object_pin_pages(obj);
2316 for_each_sgt_page(page, sgt_iter, st)
2321 /* shmemfs first checks if there is enough memory to allocate the page
2322 * and reports ENOSPC should there be insufficient, along with the usual
2323 * ENOMEM for a genuine allocation failure.
2325 * We use ENOSPC in our driver to mean that we have run out of aperture
2326 * space and so want to translate the error from shmemfs back to our
2327 * usual understanding of ENOMEM.
2335 /* Ensure that the associated pages are gathered from the backing storage
2336 * and pinned into our object. i915_gem_object_get_pages() may be called
2337 * multiple times before they are released by a single call to
2338 * i915_gem_object_put_pages() - once the pages are no longer referenced
2339 * either as a result of memory pressure (reaping pages under the shrinker)
2340 * or as the object is itself released.
2343 i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2345 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2346 const struct drm_i915_gem_object_ops *ops = obj->ops;
2352 if (obj->madv != I915_MADV_WILLNEED) {
2353 DRM_DEBUG("Attempting to obtain a purgeable object\n");
2357 BUG_ON(obj->pages_pin_count);
2359 ret = ops->get_pages(obj);
2363 list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2365 obj->get_page.sg = obj->pages->sgl;
2366 obj->get_page.last = 0;
2371 /* The 'mapping' part of i915_gem_object_pin_map() below */
2372 static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
2373 enum i915_map_type type)
2375 unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
2376 struct sg_table *sgt = obj->pages;
2377 struct sgt_iter sgt_iter;
2379 struct page *stack_pages[32];
2380 struct page **pages = stack_pages;
2381 unsigned long i = 0;
2385 /* A single page can always be kmapped */
2386 if (n_pages == 1 && type == I915_MAP_WB)
2387 return kmap(sg_page(sgt->sgl));
2389 if (n_pages > ARRAY_SIZE(stack_pages)) {
2390 /* Too big for stack -- allocate temporary array instead */
2391 pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY);
2396 for_each_sgt_page(page, sgt_iter, sgt)
2399 /* Check that we have the expected number of pages */
2400 GEM_BUG_ON(i != n_pages);
2404 pgprot = PAGE_KERNEL;
2407 pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
2410 addr = vmap(pages, n_pages, 0, pgprot);
2412 if (pages != stack_pages)
2413 drm_free_large(pages);
2418 /* get, pin, and map the pages of the object into kernel space */
2419 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
2420 enum i915_map_type type)
2422 enum i915_map_type has_type;
2427 lockdep_assert_held(&obj->base.dev->struct_mutex);
2428 GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
2430 ret = i915_gem_object_get_pages(obj);
2432 return ERR_PTR(ret);
2434 i915_gem_object_pin_pages(obj);
2435 pinned = obj->pages_pin_count > 1;
2437 ptr = ptr_unpack_bits(obj->mapping, has_type);
2438 if (ptr && has_type != type) {
2444 if (is_vmalloc_addr(ptr))
2447 kunmap(kmap_to_page(ptr));
2449 ptr = obj->mapping = NULL;
2453 ptr = i915_gem_object_map(obj, type);
2459 obj->mapping = ptr_pack_bits(ptr, type);
2465 i915_gem_object_unpin_pages(obj);
2466 return ERR_PTR(ret);
2470 i915_gem_object_retire__write(struct i915_gem_active *active,
2471 struct drm_i915_gem_request *request)
2473 struct drm_i915_gem_object *obj =
2474 container_of(active, struct drm_i915_gem_object, last_write);
2476 intel_fb_obj_flush(obj, true, ORIGIN_CS);
2480 i915_gem_object_retire__read(struct i915_gem_active *active,
2481 struct drm_i915_gem_request *request)
2483 int idx = request->engine->id;
2484 struct drm_i915_gem_object *obj =
2485 container_of(active, struct drm_i915_gem_object, last_read[idx]);
2487 GEM_BUG_ON(!i915_gem_object_has_active_engine(obj, idx));
2489 i915_gem_object_clear_active(obj, idx);
2490 if (i915_gem_object_is_active(obj))
2493 /* Bump our place on the bound list to keep it roughly in LRU order
2494 * so that we don't steal from recently used but inactive objects
2495 * (unless we are forced to ofc!)
2497 if (obj->bind_count)
2498 list_move_tail(&obj->global_list,
2499 &request->i915->mm.bound_list);
2501 i915_gem_object_put(obj);
2504 static bool i915_context_is_banned(const struct i915_gem_context *ctx)
2506 unsigned long elapsed;
2508 if (ctx->hang_stats.banned)
2511 elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
2512 if (ctx->hang_stats.ban_period_seconds &&
2513 elapsed <= ctx->hang_stats.ban_period_seconds) {
2514 DRM_DEBUG("context hanging too fast, banning!\n");
2521 static void i915_set_reset_status(struct i915_gem_context *ctx,
2524 struct i915_ctx_hang_stats *hs = &ctx->hang_stats;
2527 hs->banned = i915_context_is_banned(ctx);
2529 hs->guilty_ts = get_seconds();
2531 hs->batch_pending++;
2535 struct drm_i915_gem_request *
2536 i915_gem_find_active_request(struct intel_engine_cs *engine)
2538 struct drm_i915_gem_request *request;
2540 /* We are called by the error capture and reset at a random
2541 * point in time. In particular, note that neither is crucially
2542 * ordered with an interrupt. After a hang, the GPU is dead and we
2543 * assume that no more writes can happen (we waited long enough for
2544 * all writes that were in transaction to be flushed) - adding an
2545 * extra delay for a recent interrupt is pointless. Hence, we do
2546 * not need an engine->irq_seqno_barrier() before the seqno reads.
2548 list_for_each_entry(request, &engine->request_list, link) {
2549 if (i915_gem_request_completed(request))
2552 if (!i915_sw_fence_done(&request->submit))
2561 static void reset_request(struct drm_i915_gem_request *request)
2563 void *vaddr = request->ring->vaddr;
2566 /* As this request likely depends on state from the lost
2567 * context, clear out all the user operations leaving the
2568 * breadcrumb at the end (so we get the fence notifications).
2570 head = request->head;
2571 if (request->postfix < head) {
2572 memset(vaddr + head, 0, request->ring->size - head);
2575 memset(vaddr + head, 0, request->postfix - head);
2578 static void i915_gem_reset_engine(struct intel_engine_cs *engine)
2580 struct drm_i915_gem_request *request;
2581 struct i915_gem_context *incomplete_ctx;
2584 /* Ensure irq handler finishes, and not run again. */
2585 tasklet_kill(&engine->irq_tasklet);
2586 if (engine->irq_seqno_barrier)
2587 engine->irq_seqno_barrier(engine);
2589 request = i915_gem_find_active_request(engine);
2593 ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
2594 i915_set_reset_status(request->ctx, ring_hung);
2598 DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
2599 engine->name, request->fence.seqno);
2601 /* Setup the CS to resume from the breadcrumb of the hung request */
2602 engine->reset_hw(engine, request);
2604 /* Users of the default context do not rely on logical state
2605 * preserved between batches. They have to emit full state on
2606 * every batch and so it is safe to execute queued requests following
2609 * Other contexts preserve state, now corrupt. We want to skip all
2610 * queued requests that reference the corrupt context.
2612 incomplete_ctx = request->ctx;
2613 if (i915_gem_context_is_default(incomplete_ctx))
2616 list_for_each_entry_continue(request, &engine->request_list, link)
2617 if (request->ctx == incomplete_ctx)
2618 reset_request(request);
2621 void i915_gem_reset(struct drm_i915_private *dev_priv)
2623 struct intel_engine_cs *engine;
2625 i915_gem_retire_requests(dev_priv);
2627 for_each_engine(engine, dev_priv)
2628 i915_gem_reset_engine(engine);
2630 i915_gem_restore_fences(&dev_priv->drm);
2632 if (dev_priv->gt.awake) {
2633 intel_sanitize_gt_powersave(dev_priv);
2634 intel_enable_gt_powersave(dev_priv);
2635 if (INTEL_GEN(dev_priv) >= 6)
2636 gen6_rps_busy(dev_priv);
2640 static void nop_submit_request(struct drm_i915_gem_request *request)
2644 static void i915_gem_cleanup_engine(struct intel_engine_cs *engine)
2646 engine->submit_request = nop_submit_request;
2648 /* Mark all pending requests as complete so that any concurrent
2649 * (lockless) lookup doesn't try and wait upon the request as we
2652 intel_engine_init_seqno(engine, engine->last_submitted_seqno);
2655 * Clear the execlists queue up before freeing the requests, as those
2656 * are the ones that keep the context and ringbuffer backing objects
2660 if (i915.enable_execlists) {
2661 spin_lock(&engine->execlist_lock);
2662 INIT_LIST_HEAD(&engine->execlist_queue);
2663 i915_gem_request_put(engine->execlist_port[0].request);
2664 i915_gem_request_put(engine->execlist_port[1].request);
2665 memset(engine->execlist_port, 0, sizeof(engine->execlist_port));
2666 spin_unlock(&engine->execlist_lock);
2669 engine->i915->gt.active_engines &= ~intel_engine_flag(engine);
2672 void i915_gem_set_wedged(struct drm_i915_private *dev_priv)
2674 struct intel_engine_cs *engine;
2676 lockdep_assert_held(&dev_priv->drm.struct_mutex);
2677 set_bit(I915_WEDGED, &dev_priv->gpu_error.flags);
2679 i915_gem_context_lost(dev_priv);
2680 for_each_engine(engine, dev_priv)
2681 i915_gem_cleanup_engine(engine);
2682 mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
2684 i915_gem_retire_requests(dev_priv);
2688 i915_gem_retire_work_handler(struct work_struct *work)
2690 struct drm_i915_private *dev_priv =
2691 container_of(work, typeof(*dev_priv), gt.retire_work.work);
2692 struct drm_device *dev = &dev_priv->drm;
2694 /* Come back later if the device is busy... */
2695 if (mutex_trylock(&dev->struct_mutex)) {
2696 i915_gem_retire_requests(dev_priv);
2697 mutex_unlock(&dev->struct_mutex);
2700 /* Keep the retire handler running until we are finally idle.
2701 * We do not need to do this test under locking as in the worst-case
2702 * we queue the retire worker once too often.
2704 if (READ_ONCE(dev_priv->gt.awake)) {
2705 i915_queue_hangcheck(dev_priv);
2706 queue_delayed_work(dev_priv->wq,
2707 &dev_priv->gt.retire_work,
2708 round_jiffies_up_relative(HZ));
2713 i915_gem_idle_work_handler(struct work_struct *work)
2715 struct drm_i915_private *dev_priv =
2716 container_of(work, typeof(*dev_priv), gt.idle_work.work);
2717 struct drm_device *dev = &dev_priv->drm;
2718 struct intel_engine_cs *engine;
2719 bool rearm_hangcheck;
2721 if (!READ_ONCE(dev_priv->gt.awake))
2724 if (READ_ONCE(dev_priv->gt.active_engines))
2728 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
2730 if (!mutex_trylock(&dev->struct_mutex)) {
2731 /* Currently busy, come back later */
2732 mod_delayed_work(dev_priv->wq,
2733 &dev_priv->gt.idle_work,
2734 msecs_to_jiffies(50));
2738 if (dev_priv->gt.active_engines)
2741 for_each_engine(engine, dev_priv)
2742 i915_gem_batch_pool_fini(&engine->batch_pool);
2744 GEM_BUG_ON(!dev_priv->gt.awake);
2745 dev_priv->gt.awake = false;
2746 rearm_hangcheck = false;
2748 if (INTEL_GEN(dev_priv) >= 6)
2749 gen6_rps_idle(dev_priv);
2750 intel_runtime_pm_put(dev_priv);
2752 mutex_unlock(&dev->struct_mutex);
2755 if (rearm_hangcheck) {
2756 GEM_BUG_ON(!dev_priv->gt.awake);
2757 i915_queue_hangcheck(dev_priv);
2761 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
2763 struct drm_i915_gem_object *obj = to_intel_bo(gem);
2764 struct drm_i915_file_private *fpriv = file->driver_priv;
2765 struct i915_vma *vma, *vn;
2767 mutex_lock(&obj->base.dev->struct_mutex);
2768 list_for_each_entry_safe(vma, vn, &obj->vma_list, obj_link)
2769 if (vma->vm->file == fpriv)
2770 i915_vma_close(vma);
2771 mutex_unlock(&obj->base.dev->struct_mutex);
2775 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2776 * @dev: drm device pointer
2777 * @data: ioctl data blob
2778 * @file: drm file pointer
2780 * Returns 0 if successful, else an error is returned with the remaining time in
2781 * the timeout parameter.
2782 * -ETIME: object is still busy after timeout
2783 * -ERESTARTSYS: signal interrupted the wait
2784 * -ENONENT: object doesn't exist
2785 * Also possible, but rare:
2786 * -EAGAIN: GPU wedged
2788 * -ENODEV: Internal IRQ fail
2789 * -E?: The add request failed
2791 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2792 * non-zero timeout parameter the wait ioctl will wait for the given number of
2793 * nanoseconds on an object becoming unbusy. Since the wait itself does so
2794 * without holding struct_mutex the object may become re-busied before this
2795 * function completes. A similar but shorter * race condition exists in the busy
2799 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2801 struct drm_i915_gem_wait *args = data;
2802 struct intel_rps_client *rps = to_rps_client(file);
2803 struct drm_i915_gem_object *obj;
2804 unsigned long active;
2807 if (args->flags != 0)
2810 obj = i915_gem_object_lookup(file, args->bo_handle);
2814 active = __I915_BO_ACTIVE(obj);
2815 for_each_active(active, idx) {
2816 s64 *timeout = args->timeout_ns >= 0 ? &args->timeout_ns : NULL;
2817 ret = i915_gem_active_wait_unlocked(&obj->last_read[idx],
2818 I915_WAIT_INTERRUPTIBLE,
2824 i915_gem_object_put_unlocked(obj);
2828 static void __i915_vma_iounmap(struct i915_vma *vma)
2830 GEM_BUG_ON(i915_vma_is_pinned(vma));
2832 if (vma->iomap == NULL)
2835 io_mapping_unmap(vma->iomap);
2839 int i915_vma_unbind(struct i915_vma *vma)
2841 struct drm_i915_gem_object *obj = vma->obj;
2842 unsigned long active;
2845 /* First wait upon any activity as retiring the request may
2846 * have side-effects such as unpinning or even unbinding this vma.
2848 active = i915_vma_get_active(vma);
2852 /* When a closed VMA is retired, it is unbound - eek.
2853 * In order to prevent it from being recursively closed,
2854 * take a pin on the vma so that the second unbind is
2857 __i915_vma_pin(vma);
2859 for_each_active(active, idx) {
2860 ret = i915_gem_active_retire(&vma->last_read[idx],
2861 &vma->vm->dev->struct_mutex);
2866 __i915_vma_unpin(vma);
2870 GEM_BUG_ON(i915_vma_is_active(vma));
2873 if (i915_vma_is_pinned(vma))
2876 if (!drm_mm_node_allocated(&vma->node))
2879 GEM_BUG_ON(obj->bind_count == 0);
2880 GEM_BUG_ON(!obj->pages);
2882 if (i915_vma_is_map_and_fenceable(vma)) {
2883 /* release the fence reg _after_ flushing */
2884 ret = i915_vma_put_fence(vma);
2888 /* Force a pagefault for domain tracking on next user access */
2889 i915_gem_release_mmap(obj);
2891 __i915_vma_iounmap(vma);
2892 vma->flags &= ~I915_VMA_CAN_FENCE;
2895 if (likely(!vma->vm->closed)) {
2896 trace_i915_vma_unbind(vma);
2897 vma->vm->unbind_vma(vma);
2899 vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
2901 drm_mm_remove_node(&vma->node);
2902 list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
2904 if (vma->pages != obj->pages) {
2905 GEM_BUG_ON(!vma->pages);
2906 sg_free_table(vma->pages);
2911 /* Since the unbound list is global, only move to that list if
2912 * no more VMAs exist. */
2913 if (--obj->bind_count == 0)
2914 list_move_tail(&obj->global_list,
2915 &to_i915(obj->base.dev)->mm.unbound_list);
2917 /* And finally now the object is completely decoupled from this vma,
2918 * we can drop its hold on the backing storage and allow it to be
2919 * reaped by the shrinker.
2921 i915_gem_object_unpin_pages(obj);
2924 if (unlikely(i915_vma_is_closed(vma)))
2925 i915_vma_destroy(vma);
2930 int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
2933 struct intel_engine_cs *engine;
2936 for_each_engine(engine, dev_priv) {
2937 if (engine->last_context == NULL)
2940 ret = intel_engine_idle(engine, flags);
2948 static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
2949 unsigned long cache_level)
2951 struct drm_mm_node *gtt_space = &vma->node;
2952 struct drm_mm_node *other;
2955 * On some machines we have to be careful when putting differing types
2956 * of snoopable memory together to avoid the prefetcher crossing memory
2957 * domains and dying. During vm initialisation, we decide whether or not
2958 * these constraints apply and set the drm_mm.color_adjust
2961 if (vma->vm->mm.color_adjust == NULL)
2964 if (!drm_mm_node_allocated(gtt_space))
2967 if (list_empty(>t_space->node_list))
2970 other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
2971 if (other->allocated && !other->hole_follows && other->color != cache_level)
2974 other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
2975 if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
2982 * i915_vma_insert - finds a slot for the vma in its address space
2984 * @size: requested size in bytes (can be larger than the VMA)
2985 * @alignment: required alignment
2986 * @flags: mask of PIN_* flags to use
2988 * First we try to allocate some free space that meets the requirements for
2989 * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
2990 * preferrably the oldest idle entry to make room for the new VMA.
2993 * 0 on success, negative error code otherwise.
2996 i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
2998 struct drm_i915_private *dev_priv = to_i915(vma->vm->dev);
2999 struct drm_i915_gem_object *obj = vma->obj;
3003 GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
3004 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
3006 size = max(size, vma->size);
3007 if (flags & PIN_MAPPABLE)
3008 size = i915_gem_get_ggtt_size(dev_priv, size,
3009 i915_gem_object_get_tiling(obj));
3011 alignment = max(max(alignment, vma->display_alignment),
3012 i915_gem_get_ggtt_alignment(dev_priv, size,
3013 i915_gem_object_get_tiling(obj),
3014 flags & PIN_MAPPABLE));
3016 start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
3018 end = vma->vm->total;
3019 if (flags & PIN_MAPPABLE)
3020 end = min_t(u64, end, dev_priv->ggtt.mappable_end);
3021 if (flags & PIN_ZONE_4G)
3022 end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);
3024 /* If binding the object/GGTT view requires more space than the entire
3025 * aperture has, reject it early before evicting everything in a vain
3026 * attempt to find space.
3029 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
3030 size, obj->base.size,
3031 flags & PIN_MAPPABLE ? "mappable" : "total",
3036 ret = i915_gem_object_get_pages(obj);
3040 i915_gem_object_pin_pages(obj);
3042 if (flags & PIN_OFFSET_FIXED) {
3043 u64 offset = flags & PIN_OFFSET_MASK;
3044 if (offset & (alignment - 1) || offset > end - size) {
3049 vma->node.start = offset;
3050 vma->node.size = size;
3051 vma->node.color = obj->cache_level;
3052 ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
3054 ret = i915_gem_evict_for_vma(vma);
3056 ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
3061 u32 search_flag, alloc_flag;
3063 if (flags & PIN_HIGH) {
3064 search_flag = DRM_MM_SEARCH_BELOW;
3065 alloc_flag = DRM_MM_CREATE_TOP;
3067 search_flag = DRM_MM_SEARCH_DEFAULT;
3068 alloc_flag = DRM_MM_CREATE_DEFAULT;
3071 /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
3072 * so we know that we always have a minimum alignment of 4096.
3073 * The drm_mm range manager is optimised to return results
3074 * with zero alignment, so where possible use the optimal
3077 if (alignment <= 4096)
3081 ret = drm_mm_insert_node_in_range_generic(&vma->vm->mm,
3089 ret = i915_gem_evict_something(vma->vm, size, alignment,
3099 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
3101 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
3102 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
3108 i915_gem_object_unpin_pages(obj);
3113 i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3116 /* If we don't have a page list set up, then we're not pinned
3117 * to GPU, and we can ignore the cache flush because it'll happen
3118 * again at bind time.
3120 if (obj->pages == NULL)
3124 * Stolen memory is always coherent with the GPU as it is explicitly
3125 * marked as wc by the system, or the system is cache-coherent.
3127 if (obj->stolen || obj->phys_handle)
3130 /* If the GPU is snooping the contents of the CPU cache,
3131 * we do not need to manually clear the CPU cache lines. However,
3132 * the caches are only snooped when the render cache is
3133 * flushed/invalidated. As we always have to emit invalidations
3134 * and flushes when moving into and out of the RENDER domain, correct
3135 * snooping behaviour occurs naturally as the result of our domain
3138 if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) {
3139 obj->cache_dirty = true;
3143 trace_i915_gem_object_clflush(obj);
3144 drm_clflush_sg(obj->pages);
3145 obj->cache_dirty = false;
3150 /** Flushes the GTT write domain for the object if it's dirty. */
3152 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3154 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
3156 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
3159 /* No actual flushing is required for the GTT write domain. Writes
3160 * to it "immediately" go to main memory as far as we know, so there's
3161 * no chipset flush. It also doesn't land in render cache.
3163 * However, we do have to enforce the order so that all writes through
3164 * the GTT land before any writes to the device, such as updates to
3167 * We also have to wait a bit for the writes to land from the GTT.
3168 * An uncached read (i.e. mmio) seems to be ideal for the round-trip
3169 * timing. This issue has only been observed when switching quickly
3170 * between GTT writes and CPU reads from inside the kernel on recent hw,
3171 * and it appears to only affect discrete GTT blocks (i.e. on LLC
3172 * system agents we cannot reproduce this behaviour).
3175 if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv))
3176 POSTING_READ(RING_ACTHD(dev_priv->engine[RCS].mmio_base));
3178 intel_fb_obj_flush(obj, false, write_origin(obj, I915_GEM_DOMAIN_GTT));
3180 obj->base.write_domain = 0;
3181 trace_i915_gem_object_change_domain(obj,
3182 obj->base.read_domains,
3183 I915_GEM_DOMAIN_GTT);
3186 /** Flushes the CPU write domain for the object if it's dirty. */
3188 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
3190 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3193 if (i915_gem_clflush_object(obj, obj->pin_display))
3194 i915_gem_chipset_flush(to_i915(obj->base.dev));
3196 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
3198 obj->base.write_domain = 0;
3199 trace_i915_gem_object_change_domain(obj,
3200 obj->base.read_domains,
3201 I915_GEM_DOMAIN_CPU);
3204 static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
3206 struct i915_vma *vma;
3208 list_for_each_entry(vma, &obj->vma_list, obj_link) {
3209 if (!i915_vma_is_ggtt(vma))
3212 if (i915_vma_is_active(vma))
3215 if (!drm_mm_node_allocated(&vma->node))
3218 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
3223 * Moves a single object to the GTT read, and possibly write domain.
3224 * @obj: object to act on
3225 * @write: ask for write access or read only
3227 * This function returns when the move is complete, including waiting on
3231 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3233 uint32_t old_write_domain, old_read_domains;
3236 ret = i915_gem_object_wait_rendering(obj, !write);
3240 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3243 /* Flush and acquire obj->pages so that we are coherent through
3244 * direct access in memory with previous cached writes through
3245 * shmemfs and that our cache domain tracking remains valid.
3246 * For example, if the obj->filp was moved to swap without us
3247 * being notified and releasing the pages, we would mistakenly
3248 * continue to assume that the obj remained out of the CPU cached
3251 ret = i915_gem_object_get_pages(obj);
3255 i915_gem_object_flush_cpu_write_domain(obj);
3257 /* Serialise direct access to this object with the barriers for
3258 * coherent writes from the GPU, by effectively invalidating the
3259 * GTT domain upon first access.
3261 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3264 old_write_domain = obj->base.write_domain;
3265 old_read_domains = obj->base.read_domains;
3267 /* It should now be out of any other write domains, and we can update
3268 * the domain values for our changes.
3270 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3271 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3273 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3274 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3278 trace_i915_gem_object_change_domain(obj,
3282 /* And bump the LRU for this access */
3283 i915_gem_object_bump_inactive_ggtt(obj);
3289 * Changes the cache-level of an object across all VMA.
3290 * @obj: object to act on
3291 * @cache_level: new cache level to set for the object
3293 * After this function returns, the object will be in the new cache-level
3294 * across all GTT and the contents of the backing storage will be coherent,
3295 * with respect to the new cache-level. In order to keep the backing storage
3296 * coherent for all users, we only allow a single cache level to be set
3297 * globally on the object and prevent it from being changed whilst the
3298 * hardware is reading from the object. That is if the object is currently
3299 * on the scanout it will be set to uncached (or equivalent display
3300 * cache coherency) and all non-MOCS GPU access will also be uncached so
3301 * that all direct access to the scanout remains coherent.
3303 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3304 enum i915_cache_level cache_level)
3306 struct i915_vma *vma;
3309 if (obj->cache_level == cache_level)
3312 /* Inspect the list of currently bound VMA and unbind any that would
3313 * be invalid given the new cache-level. This is principally to
3314 * catch the issue of the CS prefetch crossing page boundaries and
3315 * reading an invalid PTE on older architectures.
3318 list_for_each_entry(vma, &obj->vma_list, obj_link) {
3319 if (!drm_mm_node_allocated(&vma->node))
3322 if (i915_vma_is_pinned(vma)) {
3323 DRM_DEBUG("can not change the cache level of pinned objects\n");
3327 if (i915_gem_valid_gtt_space(vma, cache_level))
3330 ret = i915_vma_unbind(vma);
3334 /* As unbinding may affect other elements in the
3335 * obj->vma_list (due to side-effects from retiring
3336 * an active vma), play safe and restart the iterator.
3341 /* We can reuse the existing drm_mm nodes but need to change the
3342 * cache-level on the PTE. We could simply unbind them all and
3343 * rebind with the correct cache-level on next use. However since
3344 * we already have a valid slot, dma mapping, pages etc, we may as
3345 * rewrite the PTE in the belief that doing so tramples upon less
3346 * state and so involves less work.
3348 if (obj->bind_count) {
3349 /* Before we change the PTE, the GPU must not be accessing it.
3350 * If we wait upon the object, we know that all the bound
3351 * VMA are no longer active.
3353 ret = i915_gem_object_wait_rendering(obj, false);
3357 if (!HAS_LLC(obj->base.dev) && cache_level != I915_CACHE_NONE) {
3358 /* Access to snoopable pages through the GTT is
3359 * incoherent and on some machines causes a hard
3360 * lockup. Relinquish the CPU mmaping to force
3361 * userspace to refault in the pages and we can
3362 * then double check if the GTT mapping is still
3363 * valid for that pointer access.
3365 i915_gem_release_mmap(obj);
3367 /* As we no longer need a fence for GTT access,
3368 * we can relinquish it now (and so prevent having
3369 * to steal a fence from someone else on the next
3370 * fence request). Note GPU activity would have
3371 * dropped the fence as all snoopable access is
3372 * supposed to be linear.
3374 list_for_each_entry(vma, &obj->vma_list, obj_link) {
3375 ret = i915_vma_put_fence(vma);
3380 /* We either have incoherent backing store and
3381 * so no GTT access or the architecture is fully
3382 * coherent. In such cases, existing GTT mmaps
3383 * ignore the cache bit in the PTE and we can
3384 * rewrite it without confusing the GPU or having
3385 * to force userspace to fault back in its mmaps.
3389 list_for_each_entry(vma, &obj->vma_list, obj_link) {
3390 if (!drm_mm_node_allocated(&vma->node))
3393 ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
3399 list_for_each_entry(vma, &obj->vma_list, obj_link)
3400 vma->node.color = cache_level;
3401 obj->cache_level = cache_level;
3404 /* Flush the dirty CPU caches to the backing storage so that the
3405 * object is now coherent at its new cache level (with respect
3406 * to the access domain).
3408 if (obj->cache_dirty && cpu_write_needs_clflush(obj)) {
3409 if (i915_gem_clflush_object(obj, true))
3410 i915_gem_chipset_flush(to_i915(obj->base.dev));
3416 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3417 struct drm_file *file)
3419 struct drm_i915_gem_caching *args = data;
3420 struct drm_i915_gem_object *obj;
3422 obj = i915_gem_object_lookup(file, args->handle);
3426 switch (obj->cache_level) {
3427 case I915_CACHE_LLC:
3428 case I915_CACHE_L3_LLC:
3429 args->caching = I915_CACHING_CACHED;
3433 args->caching = I915_CACHING_DISPLAY;
3437 args->caching = I915_CACHING_NONE;
3441 i915_gem_object_put_unlocked(obj);
3445 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3446 struct drm_file *file)
3448 struct drm_i915_private *dev_priv = to_i915(dev);
3449 struct drm_i915_gem_caching *args = data;
3450 struct drm_i915_gem_object *obj;
3451 enum i915_cache_level level;
3454 switch (args->caching) {
3455 case I915_CACHING_NONE:
3456 level = I915_CACHE_NONE;
3458 case I915_CACHING_CACHED:
3460 * Due to a HW issue on BXT A stepping, GPU stores via a
3461 * snooped mapping may leave stale data in a corresponding CPU
3462 * cacheline, whereas normally such cachelines would get
3465 if (!HAS_LLC(dev) && !HAS_SNOOP(dev))
3468 level = I915_CACHE_LLC;
3470 case I915_CACHING_DISPLAY:
3471 level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
3477 intel_runtime_pm_get(dev_priv);
3479 ret = i915_mutex_lock_interruptible(dev);
3483 obj = i915_gem_object_lookup(file, args->handle);
3489 ret = i915_gem_object_set_cache_level(obj, level);
3491 i915_gem_object_put(obj);
3493 mutex_unlock(&dev->struct_mutex);
3495 intel_runtime_pm_put(dev_priv);
3501 * Prepare buffer for display plane (scanout, cursors, etc).
3502 * Can be called from an uninterruptible phase (modesetting) and allows
3503 * any flushes to be pipelined (for pageflips).
3506 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3508 const struct i915_ggtt_view *view)
3510 struct i915_vma *vma;
3511 u32 old_read_domains, old_write_domain;
3514 /* Mark the pin_display early so that we account for the
3515 * display coherency whilst setting up the cache domains.
3519 /* The display engine is not coherent with the LLC cache on gen6. As
3520 * a result, we make sure that the pinning that is about to occur is
3521 * done with uncached PTEs. This is lowest common denominator for all
3524 * However for gen6+, we could do better by using the GFDT bit instead
3525 * of uncaching, which would allow us to flush all the LLC-cached data
3526 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3528 ret = i915_gem_object_set_cache_level(obj,
3529 HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
3532 goto err_unpin_display;
3535 /* As the user may map the buffer once pinned in the display plane
3536 * (e.g. libkms for the bootup splash), we have to ensure that we
3537 * always use map_and_fenceable for all scanout buffers. However,
3538 * it may simply be too big to fit into mappable, in which case
3539 * put it anyway and hope that userspace can cope (but always first
3540 * try to preserve the existing ABI).
3542 vma = ERR_PTR(-ENOSPC);
3543 if (view->type == I915_GGTT_VIEW_NORMAL)
3544 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
3545 PIN_MAPPABLE | PIN_NONBLOCK);
3547 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, 0);
3549 goto err_unpin_display;
3551 vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
3553 WARN_ON(obj->pin_display > i915_vma_pin_count(vma));
3555 i915_gem_object_flush_cpu_write_domain(obj);
3557 old_write_domain = obj->base.write_domain;
3558 old_read_domains = obj->base.read_domains;
3560 /* It should now be out of any other write domains, and we can update
3561 * the domain values for our changes.
3563 obj->base.write_domain = 0;
3564 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3566 trace_i915_gem_object_change_domain(obj,
3578 i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
3580 if (WARN_ON(vma->obj->pin_display == 0))
3583 if (--vma->obj->pin_display == 0)
3584 vma->display_alignment = 0;
3586 /* Bump the LRU to try and avoid premature eviction whilst flipping */
3587 if (!i915_vma_is_active(vma))
3588 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
3590 i915_vma_unpin(vma);
3591 WARN_ON(vma->obj->pin_display > i915_vma_pin_count(vma));
3595 * Moves a single object to the CPU read, and possibly write domain.
3596 * @obj: object to act on
3597 * @write: requesting write or read-only access
3599 * This function returns when the move is complete, including waiting on
3603 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3605 uint32_t old_write_domain, old_read_domains;
3608 ret = i915_gem_object_wait_rendering(obj, !write);
3612 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3615 i915_gem_object_flush_gtt_write_domain(obj);
3617 old_write_domain = obj->base.write_domain;
3618 old_read_domains = obj->base.read_domains;
3620 /* Flush the CPU cache if it's still invalid. */
3621 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3622 i915_gem_clflush_object(obj, false);
3624 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3627 /* It should now be out of any other write domains, and we can update
3628 * the domain values for our changes.
3630 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3632 /* If we're writing through the CPU, then the GPU read domains will
3633 * need to be invalidated at next use.
3636 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3637 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3640 trace_i915_gem_object_change_domain(obj,
3647 /* Throttle our rendering by waiting until the ring has completed our requests
3648 * emitted over 20 msec ago.
3650 * Note that if we were to use the current jiffies each time around the loop,
3651 * we wouldn't escape the function with any frames outstanding if the time to
3652 * render a frame was over 20ms.
3654 * This should get us reasonable parallelism between CPU and GPU but also
3655 * relatively low latency when blocking on a particular request to finish.
3658 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3660 struct drm_i915_private *dev_priv = to_i915(dev);
3661 struct drm_i915_file_private *file_priv = file->driver_priv;
3662 unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
3663 struct drm_i915_gem_request *request, *target = NULL;
3666 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
3670 /* ABI: return -EIO if already wedged */
3671 if (i915_terminally_wedged(&dev_priv->gpu_error))
3674 spin_lock(&file_priv->mm.lock);
3675 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3676 if (time_after_eq(request->emitted_jiffies, recent_enough))
3680 * Note that the request might not have been submitted yet.
3681 * In which case emitted_jiffies will be zero.
3683 if (!request->emitted_jiffies)
3689 i915_gem_request_get(target);
3690 spin_unlock(&file_priv->mm.lock);
3695 ret = i915_wait_request(target, I915_WAIT_INTERRUPTIBLE, NULL, NULL);
3696 i915_gem_request_put(target);
3702 i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
3704 if (!drm_mm_node_allocated(&vma->node))
3707 if (vma->node.size < size)
3710 if (alignment && vma->node.start & (alignment - 1))
3713 if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
3716 if (flags & PIN_OFFSET_BIAS &&
3717 vma->node.start < (flags & PIN_OFFSET_MASK))
3720 if (flags & PIN_OFFSET_FIXED &&
3721 vma->node.start != (flags & PIN_OFFSET_MASK))
3727 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
3729 struct drm_i915_gem_object *obj = vma->obj;
3730 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
3731 bool mappable, fenceable;
3732 u32 fence_size, fence_alignment;
3734 fence_size = i915_gem_get_ggtt_size(dev_priv,
3736 i915_gem_object_get_tiling(obj));
3737 fence_alignment = i915_gem_get_ggtt_alignment(dev_priv,
3739 i915_gem_object_get_tiling(obj),
3742 fenceable = (vma->node.size == fence_size &&
3743 (vma->node.start & (fence_alignment - 1)) == 0);
3745 mappable = (vma->node.start + fence_size <=
3746 dev_priv->ggtt.mappable_end);
3748 if (mappable && fenceable)
3749 vma->flags |= I915_VMA_CAN_FENCE;
3751 vma->flags &= ~I915_VMA_CAN_FENCE;
3754 int __i915_vma_do_pin(struct i915_vma *vma,
3755 u64 size, u64 alignment, u64 flags)
3757 unsigned int bound = vma->flags;
3760 GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
3761 GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
3763 if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
3768 if ((bound & I915_VMA_BIND_MASK) == 0) {
3769 ret = i915_vma_insert(vma, size, alignment, flags);
3774 ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
3778 if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
3779 __i915_vma_set_map_and_fenceable(vma);
3781 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
3785 __i915_vma_unpin(vma);
3790 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
3791 const struct i915_ggtt_view *view,
3796 struct i915_address_space *vm = &to_i915(obj->base.dev)->ggtt.base;
3797 struct i915_vma *vma;
3800 vma = i915_gem_obj_lookup_or_create_vma(obj, vm, view);
3804 if (i915_vma_misplaced(vma, size, alignment, flags)) {
3805 if (flags & PIN_NONBLOCK &&
3806 (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)))
3807 return ERR_PTR(-ENOSPC);
3809 WARN(i915_vma_is_pinned(vma),
3810 "bo is already pinned in ggtt with incorrect alignment:"
3811 " offset=%08x, req.alignment=%llx,"
3812 " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n",
3813 i915_ggtt_offset(vma), alignment,
3814 !!(flags & PIN_MAPPABLE),
3815 i915_vma_is_map_and_fenceable(vma));
3816 ret = i915_vma_unbind(vma);
3818 return ERR_PTR(ret);
3821 ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
3823 return ERR_PTR(ret);
3828 static __always_inline unsigned int __busy_read_flag(unsigned int id)
3830 /* Note that we could alias engines in the execbuf API, but
3831 * that would be very unwise as it prevents userspace from
3832 * fine control over engine selection. Ahem.
3834 * This should be something like EXEC_MAX_ENGINE instead of
3837 BUILD_BUG_ON(I915_NUM_ENGINES > 16);
3838 return 0x10000 << id;
3841 static __always_inline unsigned int __busy_write_id(unsigned int id)
3843 /* The uABI guarantees an active writer is also amongst the read
3844 * engines. This would be true if we accessed the activity tracking
3845 * under the lock, but as we perform the lookup of the object and
3846 * its activity locklessly we can not guarantee that the last_write
3847 * being active implies that we have set the same engine flag from
3848 * last_read - hence we always set both read and write busy for
3851 return id | __busy_read_flag(id);
3854 static __always_inline unsigned int
3855 __busy_set_if_active(const struct i915_gem_active *active,
3856 unsigned int (*flag)(unsigned int id))
3858 struct drm_i915_gem_request *request;
3860 request = rcu_dereference(active->request);
3861 if (!request || i915_gem_request_completed(request))
3864 /* This is racy. See __i915_gem_active_get_rcu() for an in detail
3865 * discussion of how to handle the race correctly, but for reporting
3866 * the busy state we err on the side of potentially reporting the
3867 * wrong engine as being busy (but we guarantee that the result
3868 * is at least self-consistent).
3870 * As we use SLAB_DESTROY_BY_RCU, the request may be reallocated
3871 * whilst we are inspecting it, even under the RCU read lock as we are.
3872 * This means that there is a small window for the engine and/or the
3873 * seqno to have been overwritten. The seqno will always be in the
3874 * future compared to the intended, and so we know that if that
3875 * seqno is idle (on whatever engine) our request is idle and the
3876 * return 0 above is correct.
3878 * The issue is that if the engine is switched, it is just as likely
3879 * to report that it is busy (but since the switch happened, we know
3880 * the request should be idle). So there is a small chance that a busy
3881 * result is actually the wrong engine.
3883 * So why don't we care?
3885 * For starters, the busy ioctl is a heuristic that is by definition
3886 * racy. Even with perfect serialisation in the driver, the hardware
3887 * state is constantly advancing - the state we report to the user
3890 * The critical information for the busy-ioctl is whether the object
3891 * is idle as userspace relies on that to detect whether its next
3892 * access will stall, or if it has missed submitting commands to
3893 * the hardware allowing the GPU to stall. We never generate a
3894 * false-positive for idleness, thus busy-ioctl is reliable at the
3895 * most fundamental level, and we maintain the guarantee that a
3896 * busy object left to itself will eventually become idle (and stay
3899 * We allow ourselves the leeway of potentially misreporting the busy
3900 * state because that is an optimisation heuristic that is constantly
3901 * in flux. Being quickly able to detect the busy/idle state is much
3902 * more important than accurate logging of exactly which engines were
3905 * For accuracy in reporting the engine, we could use
3908 * request = __i915_gem_active_get_rcu(active);
3910 * if (!i915_gem_request_completed(request))
3911 * result = flag(request->engine->exec_id);
3912 * i915_gem_request_put(request);
3915 * but that still remains susceptible to both hardware and userspace
3916 * races. So we accept making the result of that race slightly worse,
3917 * given the rarity of the race and its low impact on the result.
3919 return flag(READ_ONCE(request->engine->exec_id));
3922 static __always_inline unsigned int
3923 busy_check_reader(const struct i915_gem_active *active)
3925 return __busy_set_if_active(active, __busy_read_flag);
3928 static __always_inline unsigned int
3929 busy_check_writer(const struct i915_gem_active *active)
3931 return __busy_set_if_active(active, __busy_write_id);
3935 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3936 struct drm_file *file)
3938 struct drm_i915_gem_busy *args = data;
3939 struct drm_i915_gem_object *obj;
3940 unsigned long active;
3942 obj = i915_gem_object_lookup(file, args->handle);
3947 active = __I915_BO_ACTIVE(obj);
3951 /* Yes, the lookups are intentionally racy.
3953 * First, we cannot simply rely on __I915_BO_ACTIVE. We have
3954 * to regard the value as stale and as our ABI guarantees
3955 * forward progress, we confirm the status of each active
3956 * request with the hardware.
3958 * Even though we guard the pointer lookup by RCU, that only
3959 * guarantees that the pointer and its contents remain
3960 * dereferencable and does *not* mean that the request we
3961 * have is the same as the one being tracked by the object.
3963 * Consider that we lookup the request just as it is being
3964 * retired and freed. We take a local copy of the pointer,
3965 * but before we add its engine into the busy set, the other
3966 * thread reallocates it and assigns it to a task on another
3967 * engine with a fresh and incomplete seqno. Guarding against
3968 * that requires careful serialisation and reference counting,
3969 * i.e. using __i915_gem_active_get_request_rcu(). We don't,
3970 * instead we expect that if the result is busy, which engines
3971 * are busy is not completely reliable - we only guarantee
3972 * that the object was busy.
3976 for_each_active(active, idx)
3977 args->busy |= busy_check_reader(&obj->last_read[idx]);
3979 /* For ABI sanity, we only care that the write engine is in
3980 * the set of read engines. This should be ensured by the
3981 * ordering of setting last_read/last_write in
3982 * i915_vma_move_to_active(), and then in reverse in retire.
3983 * However, for good measure, we always report the last_write
3984 * request as a busy read as well as being a busy write.
3986 * We don't care that the set of active read/write engines
3987 * may change during construction of the result, as it is
3988 * equally liable to change before userspace can inspect
3991 args->busy |= busy_check_writer(&obj->last_write);
3996 i915_gem_object_put_unlocked(obj);
4001 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4002 struct drm_file *file_priv)
4004 return i915_gem_ring_throttle(dev, file_priv);
4008 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4009 struct drm_file *file_priv)
4011 struct drm_i915_private *dev_priv = to_i915(dev);
4012 struct drm_i915_gem_madvise *args = data;
4013 struct drm_i915_gem_object *obj;
4016 switch (args->madv) {
4017 case I915_MADV_DONTNEED:
4018 case I915_MADV_WILLNEED:
4024 ret = i915_mutex_lock_interruptible(dev);
4028 obj = i915_gem_object_lookup(file_priv, args->handle);
4035 i915_gem_object_is_tiled(obj) &&
4036 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
4037 if (obj->madv == I915_MADV_WILLNEED)
4038 i915_gem_object_unpin_pages(obj);
4039 if (args->madv == I915_MADV_WILLNEED)
4040 i915_gem_object_pin_pages(obj);
4043 if (obj->madv != __I915_MADV_PURGED)
4044 obj->madv = args->madv;
4046 /* if the object is no longer attached, discard its backing storage */
4047 if (obj->madv == I915_MADV_DONTNEED && obj->pages == NULL)
4048 i915_gem_object_truncate(obj);
4050 args->retained = obj->madv != __I915_MADV_PURGED;
4052 i915_gem_object_put(obj);
4054 mutex_unlock(&dev->struct_mutex);
4058 void i915_gem_object_init(struct drm_i915_gem_object *obj,
4059 const struct drm_i915_gem_object_ops *ops)
4063 INIT_LIST_HEAD(&obj->global_list);
4064 for (i = 0; i < I915_NUM_ENGINES; i++)
4065 init_request_active(&obj->last_read[i],
4066 i915_gem_object_retire__read);
4067 init_request_active(&obj->last_write,
4068 i915_gem_object_retire__write);
4069 INIT_LIST_HEAD(&obj->obj_exec_link);
4070 INIT_LIST_HEAD(&obj->vma_list);
4071 INIT_LIST_HEAD(&obj->batch_pool_link);
4075 obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
4076 obj->madv = I915_MADV_WILLNEED;
4078 i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
4081 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4082 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
4083 .get_pages = i915_gem_object_get_pages_gtt,
4084 .put_pages = i915_gem_object_put_pages_gtt,
4087 struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
4090 struct drm_i915_gem_object *obj;
4091 struct address_space *mapping;
4095 obj = i915_gem_object_alloc(dev);
4097 return ERR_PTR(-ENOMEM);
4099 ret = drm_gem_object_init(dev, &obj->base, size);
4103 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4104 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
4105 /* 965gm cannot relocate objects above 4GiB. */
4106 mask &= ~__GFP_HIGHMEM;
4107 mask |= __GFP_DMA32;
4110 mapping = obj->base.filp->f_mapping;
4111 mapping_set_gfp_mask(mapping, mask);
4113 i915_gem_object_init(obj, &i915_gem_object_ops);
4115 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4116 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4119 /* On some devices, we can have the GPU use the LLC (the CPU
4120 * cache) for about a 10% performance improvement
4121 * compared to uncached. Graphics requests other than
4122 * display scanout are coherent with the CPU in
4123 * accessing this cache. This means in this mode we
4124 * don't need to clflush on the CPU side, and on the
4125 * GPU side we only need to flush internal caches to
4126 * get data visible to the CPU.
4128 * However, we maintain the display planes as UC, and so
4129 * need to rebind when first used as such.
4131 obj->cache_level = I915_CACHE_LLC;
4133 obj->cache_level = I915_CACHE_NONE;
4135 trace_i915_gem_object_create(obj);
4140 i915_gem_object_free(obj);
4142 return ERR_PTR(ret);
4145 static bool discard_backing_storage(struct drm_i915_gem_object *obj)
4147 /* If we are the last user of the backing storage (be it shmemfs
4148 * pages or stolen etc), we know that the pages are going to be
4149 * immediately released. In this case, we can then skip copying
4150 * back the contents from the GPU.
4153 if (obj->madv != I915_MADV_WILLNEED)
4156 if (obj->base.filp == NULL)
4159 /* At first glance, this looks racy, but then again so would be
4160 * userspace racing mmap against close. However, the first external
4161 * reference to the filp can only be obtained through the
4162 * i915_gem_mmap_ioctl() which safeguards us against the user
4163 * acquiring such a reference whilst we are in the middle of
4164 * freeing the object.
4166 return atomic_long_read(&obj->base.filp->f_count) == 1;
4169 void i915_gem_free_object(struct drm_gem_object *gem_obj)
4171 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4172 struct drm_device *dev = obj->base.dev;
4173 struct drm_i915_private *dev_priv = to_i915(dev);
4174 struct i915_vma *vma, *next;
4176 intel_runtime_pm_get(dev_priv);
4178 trace_i915_gem_object_destroy(obj);
4180 /* All file-owned VMA should have been released by this point through
4181 * i915_gem_close_object(), or earlier by i915_gem_context_close().
4182 * However, the object may also be bound into the global GTT (e.g.
4183 * older GPUs without per-process support, or for direct access through
4184 * the GTT either for the user or for scanout). Those VMA still need to
4187 list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
4188 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
4189 GEM_BUG_ON(i915_vma_is_active(vma));
4190 vma->flags &= ~I915_VMA_PIN_MASK;
4191 i915_vma_close(vma);
4193 GEM_BUG_ON(obj->bind_count);
4195 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4196 * before progressing. */
4198 i915_gem_object_unpin_pages(obj);
4200 WARN_ON(atomic_read(&obj->frontbuffer_bits));
4202 if (obj->pages && obj->madv == I915_MADV_WILLNEED &&
4203 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES &&
4204 i915_gem_object_is_tiled(obj))
4205 i915_gem_object_unpin_pages(obj);
4207 if (WARN_ON(obj->pages_pin_count))
4208 obj->pages_pin_count = 0;
4209 if (discard_backing_storage(obj))
4210 obj->madv = I915_MADV_DONTNEED;
4211 i915_gem_object_put_pages(obj);
4215 if (obj->base.import_attach)
4216 drm_prime_gem_destroy(&obj->base, NULL);
4218 if (obj->ops->release)
4219 obj->ops->release(obj);
4221 drm_gem_object_release(&obj->base);
4222 i915_gem_info_remove_obj(dev_priv, obj->base.size);
4225 i915_gem_object_free(obj);
4227 intel_runtime_pm_put(dev_priv);
4230 int i915_gem_suspend(struct drm_device *dev)
4232 struct drm_i915_private *dev_priv = to_i915(dev);
4235 intel_suspend_gt_powersave(dev_priv);
4237 mutex_lock(&dev->struct_mutex);
4239 /* We have to flush all the executing contexts to main memory so
4240 * that they can saved in the hibernation image. To ensure the last
4241 * context image is coherent, we have to switch away from it. That
4242 * leaves the dev_priv->kernel_context still active when
4243 * we actually suspend, and its image in memory may not match the GPU
4244 * state. Fortunately, the kernel_context is disposable and we do
4245 * not rely on its state.
4247 ret = i915_gem_switch_to_kernel_context(dev_priv);
4251 ret = i915_gem_wait_for_idle(dev_priv,
4252 I915_WAIT_INTERRUPTIBLE |
4257 i915_gem_retire_requests(dev_priv);
4259 i915_gem_context_lost(dev_priv);
4260 mutex_unlock(&dev->struct_mutex);
4262 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
4263 cancel_delayed_work_sync(&dev_priv->gt.retire_work);
4264 flush_delayed_work(&dev_priv->gt.idle_work);
4266 /* Assert that we sucessfully flushed all the work and
4267 * reset the GPU back to its idle, low power state.
4269 WARN_ON(dev_priv->gt.awake);
4274 mutex_unlock(&dev->struct_mutex);
4278 void i915_gem_resume(struct drm_device *dev)
4280 struct drm_i915_private *dev_priv = to_i915(dev);
4282 mutex_lock(&dev->struct_mutex);
4283 i915_gem_restore_gtt_mappings(dev);
4285 /* As we didn't flush the kernel context before suspend, we cannot
4286 * guarantee that the context image is complete. So let's just reset
4287 * it and start again.
4289 dev_priv->gt.resume(dev_priv);
4291 mutex_unlock(&dev->struct_mutex);
4294 void i915_gem_init_swizzling(struct drm_device *dev)
4296 struct drm_i915_private *dev_priv = to_i915(dev);
4298 if (INTEL_INFO(dev)->gen < 5 ||
4299 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4302 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4303 DISP_TILE_SURFACE_SWIZZLING);
4308 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4310 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4311 else if (IS_GEN7(dev))
4312 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4313 else if (IS_GEN8(dev))
4314 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
4319 static void init_unused_ring(struct drm_device *dev, u32 base)
4321 struct drm_i915_private *dev_priv = to_i915(dev);
4323 I915_WRITE(RING_CTL(base), 0);
4324 I915_WRITE(RING_HEAD(base), 0);
4325 I915_WRITE(RING_TAIL(base), 0);
4326 I915_WRITE(RING_START(base), 0);
4329 static void init_unused_rings(struct drm_device *dev)
4332 init_unused_ring(dev, PRB1_BASE);
4333 init_unused_ring(dev, SRB0_BASE);
4334 init_unused_ring(dev, SRB1_BASE);
4335 init_unused_ring(dev, SRB2_BASE);
4336 init_unused_ring(dev, SRB3_BASE);
4337 } else if (IS_GEN2(dev)) {
4338 init_unused_ring(dev, SRB0_BASE);
4339 init_unused_ring(dev, SRB1_BASE);
4340 } else if (IS_GEN3(dev)) {
4341 init_unused_ring(dev, PRB1_BASE);
4342 init_unused_ring(dev, PRB2_BASE);
4347 i915_gem_init_hw(struct drm_device *dev)
4349 struct drm_i915_private *dev_priv = to_i915(dev);
4350 struct intel_engine_cs *engine;
4353 /* Double layer security blanket, see i915_gem_init() */
4354 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4356 if (HAS_EDRAM(dev) && INTEL_GEN(dev_priv) < 9)
4357 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4359 if (IS_HASWELL(dev))
4360 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
4361 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
4363 if (HAS_PCH_NOP(dev)) {
4364 if (IS_IVYBRIDGE(dev)) {
4365 u32 temp = I915_READ(GEN7_MSG_CTL);
4366 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4367 I915_WRITE(GEN7_MSG_CTL, temp);
4368 } else if (INTEL_INFO(dev)->gen >= 7) {
4369 u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
4370 temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
4371 I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
4375 i915_gem_init_swizzling(dev);
4378 * At least 830 can leave some of the unused rings
4379 * "active" (ie. head != tail) after resume which
4380 * will prevent c3 entry. Makes sure all unused rings
4383 init_unused_rings(dev);
4385 BUG_ON(!dev_priv->kernel_context);
4387 ret = i915_ppgtt_init_hw(dev);
4389 DRM_ERROR("PPGTT enable HW failed %d\n", ret);
4393 /* Need to do basic initialisation of all rings first: */
4394 for_each_engine(engine, dev_priv) {
4395 ret = engine->init_hw(engine);
4400 intel_mocs_init_l3cc_table(dev);
4402 /* We can't enable contexts until all firmware is loaded */
4403 ret = intel_guc_setup(dev);
4408 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4412 bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value)
4414 if (INTEL_INFO(dev_priv)->gen < 6)
4417 /* TODO: make semaphores and Execlists play nicely together */
4418 if (i915.enable_execlists)
4424 #ifdef CONFIG_INTEL_IOMMU
4425 /* Enable semaphores on SNB when IO remapping is off */
4426 if (INTEL_INFO(dev_priv)->gen == 6 && intel_iommu_gfx_mapped)
4433 int i915_gem_init(struct drm_device *dev)
4435 struct drm_i915_private *dev_priv = to_i915(dev);
4438 mutex_lock(&dev->struct_mutex);
4440 if (!i915.enable_execlists) {
4441 dev_priv->gt.resume = intel_legacy_submission_resume;
4442 dev_priv->gt.cleanup_engine = intel_engine_cleanup;
4444 dev_priv->gt.resume = intel_lr_context_resume;
4445 dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
4448 /* This is just a security blanket to placate dragons.
4449 * On some systems, we very sporadically observe that the first TLBs
4450 * used by the CS may be stale, despite us poking the TLB reset. If
4451 * we hold the forcewake during initialisation these problems
4452 * just magically go away.
4454 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4456 i915_gem_init_userptr(dev_priv);
4458 ret = i915_gem_init_ggtt(dev_priv);
4462 ret = i915_gem_context_init(dev);
4466 ret = intel_engines_init(dev);
4470 ret = i915_gem_init_hw(dev);
4472 /* Allow engine initialisation to fail by marking the GPU as
4473 * wedged. But we only want to do this where the GPU is angry,
4474 * for all other failure, such as an allocation failure, bail.
4476 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
4477 i915_gem_set_wedged(dev_priv);
4482 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4483 mutex_unlock(&dev->struct_mutex);
4489 i915_gem_cleanup_engines(struct drm_device *dev)
4491 struct drm_i915_private *dev_priv = to_i915(dev);
4492 struct intel_engine_cs *engine;
4494 for_each_engine(engine, dev_priv)
4495 dev_priv->gt.cleanup_engine(engine);
4499 init_engine_lists(struct intel_engine_cs *engine)
4501 INIT_LIST_HEAD(&engine->request_list);
4505 i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
4507 struct drm_device *dev = &dev_priv->drm;
4510 if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
4511 !IS_CHERRYVIEW(dev_priv))
4512 dev_priv->num_fence_regs = 32;
4513 else if (INTEL_INFO(dev_priv)->gen >= 4 || IS_I945G(dev_priv) ||
4514 IS_I945GM(dev_priv) || IS_G33(dev_priv))
4515 dev_priv->num_fence_regs = 16;
4517 dev_priv->num_fence_regs = 8;
4519 if (intel_vgpu_active(dev_priv))
4520 dev_priv->num_fence_regs =
4521 I915_READ(vgtif_reg(avail_rs.fence_num));
4523 /* Initialize fence registers to zero */
4524 for (i = 0; i < dev_priv->num_fence_regs; i++) {
4525 struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];
4527 fence->i915 = dev_priv;
4529 list_add_tail(&fence->link, &dev_priv->mm.fence_list);
4531 i915_gem_restore_fences(dev);
4533 i915_gem_detect_bit_6_swizzle(dev);
4537 i915_gem_load_init(struct drm_device *dev)
4539 struct drm_i915_private *dev_priv = to_i915(dev);
4543 kmem_cache_create("i915_gem_object",
4544 sizeof(struct drm_i915_gem_object), 0,
4548 kmem_cache_create("i915_gem_vma",
4549 sizeof(struct i915_vma), 0,
4552 dev_priv->requests =
4553 kmem_cache_create("i915_gem_request",
4554 sizeof(struct drm_i915_gem_request), 0,
4555 SLAB_HWCACHE_ALIGN |
4556 SLAB_RECLAIM_ACCOUNT |
4557 SLAB_DESTROY_BY_RCU,
4560 INIT_LIST_HEAD(&dev_priv->context_list);
4561 INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4562 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
4563 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4564 for (i = 0; i < I915_NUM_ENGINES; i++)
4565 init_engine_lists(&dev_priv->engine[i]);
4566 INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
4567 i915_gem_retire_work_handler);
4568 INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
4569 i915_gem_idle_work_handler);
4570 init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
4571 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
4573 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4575 init_waitqueue_head(&dev_priv->pending_flip_queue);
4577 dev_priv->mm.interruptible = true;
4579 atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
4581 spin_lock_init(&dev_priv->fb_tracking.lock);
4584 void i915_gem_load_cleanup(struct drm_device *dev)
4586 struct drm_i915_private *dev_priv = to_i915(dev);
4588 kmem_cache_destroy(dev_priv->requests);
4589 kmem_cache_destroy(dev_priv->vmas);
4590 kmem_cache_destroy(dev_priv->objects);
4592 /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
4596 int i915_gem_freeze(struct drm_i915_private *dev_priv)
4598 intel_runtime_pm_get(dev_priv);
4600 mutex_lock(&dev_priv->drm.struct_mutex);
4601 i915_gem_shrink_all(dev_priv);
4602 mutex_unlock(&dev_priv->drm.struct_mutex);
4604 intel_runtime_pm_put(dev_priv);
4609 int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
4611 struct drm_i915_gem_object *obj;
4612 struct list_head *phases[] = {
4613 &dev_priv->mm.unbound_list,
4614 &dev_priv->mm.bound_list,
4618 /* Called just before we write the hibernation image.
4620 * We need to update the domain tracking to reflect that the CPU
4621 * will be accessing all the pages to create and restore from the
4622 * hibernation, and so upon restoration those pages will be in the
4625 * To make sure the hibernation image contains the latest state,
4626 * we update that state just before writing out the image.
4628 * To try and reduce the hibernation image, we manually shrink
4629 * the objects as well.
4632 mutex_lock(&dev_priv->drm.struct_mutex);
4633 i915_gem_shrink(dev_priv, -1UL, I915_SHRINK_UNBOUND);
4635 for (p = phases; *p; p++) {
4636 list_for_each_entry(obj, *p, global_list) {
4637 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4638 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4641 mutex_unlock(&dev_priv->drm.struct_mutex);
4646 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4648 struct drm_i915_file_private *file_priv = file->driver_priv;
4649 struct drm_i915_gem_request *request;
4651 /* Clean up our request list when the client is going away, so that
4652 * later retire_requests won't dereference our soon-to-be-gone
4655 spin_lock(&file_priv->mm.lock);
4656 list_for_each_entry(request, &file_priv->mm.request_list, client_list)
4657 request->file_priv = NULL;
4658 spin_unlock(&file_priv->mm.lock);
4660 if (!list_empty(&file_priv->rps.link)) {
4661 spin_lock(&to_i915(dev)->rps.client_lock);
4662 list_del(&file_priv->rps.link);
4663 spin_unlock(&to_i915(dev)->rps.client_lock);
4667 int i915_gem_open(struct drm_device *dev, struct drm_file *file)
4669 struct drm_i915_file_private *file_priv;
4672 DRM_DEBUG_DRIVER("\n");
4674 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
4678 file->driver_priv = file_priv;
4679 file_priv->dev_priv = to_i915(dev);
4680 file_priv->file = file;
4681 INIT_LIST_HEAD(&file_priv->rps.link);
4683 spin_lock_init(&file_priv->mm.lock);
4684 INIT_LIST_HEAD(&file_priv->mm.request_list);
4686 file_priv->bsd_engine = -1;
4688 ret = i915_gem_context_open(dev, file);
4696 * i915_gem_track_fb - update frontbuffer tracking
4697 * @old: current GEM buffer for the frontbuffer slots
4698 * @new: new GEM buffer for the frontbuffer slots
4699 * @frontbuffer_bits: bitmask of frontbuffer slots
4701 * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
4702 * from @old and setting them in @new. Both @old and @new can be NULL.
4704 void i915_gem_track_fb(struct drm_i915_gem_object *old,
4705 struct drm_i915_gem_object *new,
4706 unsigned frontbuffer_bits)
4708 /* Control of individual bits within the mask are guarded by
4709 * the owning plane->mutex, i.e. we can never see concurrent
4710 * manipulation of individual bits. But since the bitfield as a whole
4711 * is updated using RMW, we need to use atomics in order to update
4714 BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
4715 sizeof(atomic_t) * BITS_PER_BYTE);
4718 WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
4719 atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits);
4723 WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits);
4724 atomic_or(frontbuffer_bits, &new->frontbuffer_bits);
4728 /* Like i915_gem_object_get_page(), but mark the returned page dirty */
4730 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n)
4734 /* Only default objects have per-page dirty tracking */
4735 if (WARN_ON(!i915_gem_object_has_struct_page(obj)))
4738 page = i915_gem_object_get_page(obj, n);
4739 set_page_dirty(page);
4743 /* Allocate a new GEM object and fill it with the supplied data */
4744 struct drm_i915_gem_object *
4745 i915_gem_object_create_from_data(struct drm_device *dev,
4746 const void *data, size_t size)
4748 struct drm_i915_gem_object *obj;
4749 struct sg_table *sg;
4753 obj = i915_gem_object_create(dev, round_up(size, PAGE_SIZE));
4757 ret = i915_gem_object_set_to_cpu_domain(obj, true);
4761 ret = i915_gem_object_get_pages(obj);
4765 i915_gem_object_pin_pages(obj);
4767 bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
4768 obj->dirty = 1; /* Backing store is now out of date */
4769 i915_gem_object_unpin_pages(obj);
4771 if (WARN_ON(bytes != size)) {
4772 DRM_ERROR("Incomplete copy, wrote %zu of %zu", bytes, size);
4780 i915_gem_object_put(obj);
4781 return ERR_PTR(ret);