]> Git Repo - linux.git/blob - drivers/gpu/drm/i915/i915_gem.c
Merge tag 'drm-intel-next-2014-09-05' of git://anongit.freedesktop.org/drm-intel...
[linux.git] / drivers / gpu / drm / i915 / i915_gem.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <[email protected]>
25  *
26  */
27
28 #include <drm/drmP.h>
29 #include <drm/drm_vma_manager.h>
30 #include <drm/i915_drm.h>
31 #include "i915_drv.h"
32 #include "i915_trace.h"
33 #include "intel_drv.h"
34 #include <linux/oom.h>
35 #include <linux/shmem_fs.h>
36 #include <linux/slab.h>
37 #include <linux/swap.h>
38 #include <linux/pci.h>
39 #include <linux/dma-buf.h>
40
41 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
42 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
43                                                    bool force);
44 static __must_check int
45 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
46                                bool readonly);
47 static void
48 i915_gem_object_retire(struct drm_i915_gem_object *obj);
49
50 static void i915_gem_write_fence(struct drm_device *dev, int reg,
51                                  struct drm_i915_gem_object *obj);
52 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
53                                          struct drm_i915_fence_reg *fence,
54                                          bool enable);
55
56 static unsigned long i915_gem_shrinker_count(struct shrinker *shrinker,
57                                              struct shrink_control *sc);
58 static unsigned long i915_gem_shrinker_scan(struct shrinker *shrinker,
59                                             struct shrink_control *sc);
60 static int i915_gem_shrinker_oom(struct notifier_block *nb,
61                                  unsigned long event,
62                                  void *ptr);
63 static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
64 static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
65
66 static bool cpu_cache_is_coherent(struct drm_device *dev,
67                                   enum i915_cache_level level)
68 {
69         return HAS_LLC(dev) || level != I915_CACHE_NONE;
70 }
71
72 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
73 {
74         if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
75                 return true;
76
77         return obj->pin_display;
78 }
79
80 static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
81 {
82         if (obj->tiling_mode)
83                 i915_gem_release_mmap(obj);
84
85         /* As we do not have an associated fence register, we will force
86          * a tiling change if we ever need to acquire one.
87          */
88         obj->fence_dirty = false;
89         obj->fence_reg = I915_FENCE_REG_NONE;
90 }
91
92 /* some bookkeeping */
93 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
94                                   size_t size)
95 {
96         spin_lock(&dev_priv->mm.object_stat_lock);
97         dev_priv->mm.object_count++;
98         dev_priv->mm.object_memory += size;
99         spin_unlock(&dev_priv->mm.object_stat_lock);
100 }
101
102 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
103                                      size_t size)
104 {
105         spin_lock(&dev_priv->mm.object_stat_lock);
106         dev_priv->mm.object_count--;
107         dev_priv->mm.object_memory -= size;
108         spin_unlock(&dev_priv->mm.object_stat_lock);
109 }
110
111 static int
112 i915_gem_wait_for_error(struct i915_gpu_error *error)
113 {
114         int ret;
115
116 #define EXIT_COND (!i915_reset_in_progress(error) || \
117                    i915_terminally_wedged(error))
118         if (EXIT_COND)
119                 return 0;
120
121         /*
122          * Only wait 10 seconds for the gpu reset to complete to avoid hanging
123          * userspace. If it takes that long something really bad is going on and
124          * we should simply try to bail out and fail as gracefully as possible.
125          */
126         ret = wait_event_interruptible_timeout(error->reset_queue,
127                                                EXIT_COND,
128                                                10*HZ);
129         if (ret == 0) {
130                 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
131                 return -EIO;
132         } else if (ret < 0) {
133                 return ret;
134         }
135 #undef EXIT_COND
136
137         return 0;
138 }
139
140 int i915_mutex_lock_interruptible(struct drm_device *dev)
141 {
142         struct drm_i915_private *dev_priv = dev->dev_private;
143         int ret;
144
145         ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
146         if (ret)
147                 return ret;
148
149         ret = mutex_lock_interruptible(&dev->struct_mutex);
150         if (ret)
151                 return ret;
152
153         WARN_ON(i915_verify_lists(dev));
154         return 0;
155 }
156
157 static inline bool
158 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
159 {
160         return i915_gem_obj_bound_any(obj) && !obj->active;
161 }
162
163 int
164 i915_gem_init_ioctl(struct drm_device *dev, void *data,
165                     struct drm_file *file)
166 {
167         struct drm_i915_private *dev_priv = dev->dev_private;
168         struct drm_i915_gem_init *args = data;
169
170         if (drm_core_check_feature(dev, DRIVER_MODESET))
171                 return -ENODEV;
172
173         if (args->gtt_start >= args->gtt_end ||
174             (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
175                 return -EINVAL;
176
177         /* GEM with user mode setting was never supported on ilk and later. */
178         if (INTEL_INFO(dev)->gen >= 5)
179                 return -ENODEV;
180
181         mutex_lock(&dev->struct_mutex);
182         i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
183                                   args->gtt_end);
184         dev_priv->gtt.mappable_end = args->gtt_end;
185         mutex_unlock(&dev->struct_mutex);
186
187         return 0;
188 }
189
190 int
191 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
192                             struct drm_file *file)
193 {
194         struct drm_i915_private *dev_priv = dev->dev_private;
195         struct drm_i915_gem_get_aperture *args = data;
196         struct drm_i915_gem_object *obj;
197         size_t pinned;
198
199         pinned = 0;
200         mutex_lock(&dev->struct_mutex);
201         list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
202                 if (i915_gem_obj_is_pinned(obj))
203                         pinned += i915_gem_obj_ggtt_size(obj);
204         mutex_unlock(&dev->struct_mutex);
205
206         args->aper_size = dev_priv->gtt.base.total;
207         args->aper_available_size = args->aper_size - pinned;
208
209         return 0;
210 }
211
212 static void i915_gem_object_detach_phys(struct drm_i915_gem_object *obj)
213 {
214         drm_dma_handle_t *phys = obj->phys_handle;
215
216         if (!phys)
217                 return;
218
219         if (obj->madv == I915_MADV_WILLNEED) {
220                 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
221                 char *vaddr = phys->vaddr;
222                 int i;
223
224                 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
225                         struct page *page = shmem_read_mapping_page(mapping, i);
226                         if (!IS_ERR(page)) {
227                                 char *dst = kmap_atomic(page);
228                                 memcpy(dst, vaddr, PAGE_SIZE);
229                                 drm_clflush_virt_range(dst, PAGE_SIZE);
230                                 kunmap_atomic(dst);
231
232                                 set_page_dirty(page);
233                                 mark_page_accessed(page);
234                                 page_cache_release(page);
235                         }
236                         vaddr += PAGE_SIZE;
237                 }
238                 i915_gem_chipset_flush(obj->base.dev);
239         }
240
241 #ifdef CONFIG_X86
242         set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE);
243 #endif
244         drm_pci_free(obj->base.dev, phys);
245         obj->phys_handle = NULL;
246 }
247
248 int
249 i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
250                             int align)
251 {
252         drm_dma_handle_t *phys;
253         struct address_space *mapping;
254         char *vaddr;
255         int i;
256
257         if (obj->phys_handle) {
258                 if ((unsigned long)obj->phys_handle->vaddr & (align -1))
259                         return -EBUSY;
260
261                 return 0;
262         }
263
264         if (obj->madv != I915_MADV_WILLNEED)
265                 return -EFAULT;
266
267         if (obj->base.filp == NULL)
268                 return -EINVAL;
269
270         /* create a new object */
271         phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
272         if (!phys)
273                 return -ENOMEM;
274
275         vaddr = phys->vaddr;
276 #ifdef CONFIG_X86
277         set_memory_wc((unsigned long)vaddr, phys->size / PAGE_SIZE);
278 #endif
279         mapping = file_inode(obj->base.filp)->i_mapping;
280         for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
281                 struct page *page;
282                 char *src;
283
284                 page = shmem_read_mapping_page(mapping, i);
285                 if (IS_ERR(page)) {
286 #ifdef CONFIG_X86
287                         set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE);
288 #endif
289                         drm_pci_free(obj->base.dev, phys);
290                         return PTR_ERR(page);
291                 }
292
293                 src = kmap_atomic(page);
294                 memcpy(vaddr, src, PAGE_SIZE);
295                 kunmap_atomic(src);
296
297                 mark_page_accessed(page);
298                 page_cache_release(page);
299
300                 vaddr += PAGE_SIZE;
301         }
302
303         obj->phys_handle = phys;
304         return 0;
305 }
306
307 static int
308 i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
309                      struct drm_i915_gem_pwrite *args,
310                      struct drm_file *file_priv)
311 {
312         struct drm_device *dev = obj->base.dev;
313         void *vaddr = obj->phys_handle->vaddr + args->offset;
314         char __user *user_data = to_user_ptr(args->data_ptr);
315
316         if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
317                 unsigned long unwritten;
318
319                 /* The physical object once assigned is fixed for the lifetime
320                  * of the obj, so we can safely drop the lock and continue
321                  * to access vaddr.
322                  */
323                 mutex_unlock(&dev->struct_mutex);
324                 unwritten = copy_from_user(vaddr, user_data, args->size);
325                 mutex_lock(&dev->struct_mutex);
326                 if (unwritten)
327                         return -EFAULT;
328         }
329
330         i915_gem_chipset_flush(dev);
331         return 0;
332 }
333
334 void *i915_gem_object_alloc(struct drm_device *dev)
335 {
336         struct drm_i915_private *dev_priv = dev->dev_private;
337         return kmem_cache_zalloc(dev_priv->slab, GFP_KERNEL);
338 }
339
340 void i915_gem_object_free(struct drm_i915_gem_object *obj)
341 {
342         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
343         kmem_cache_free(dev_priv->slab, obj);
344 }
345
346 static int
347 i915_gem_create(struct drm_file *file,
348                 struct drm_device *dev,
349                 uint64_t size,
350                 uint32_t *handle_p)
351 {
352         struct drm_i915_gem_object *obj;
353         int ret;
354         u32 handle;
355
356         size = roundup(size, PAGE_SIZE);
357         if (size == 0)
358                 return -EINVAL;
359
360         /* Allocate the new object */
361         obj = i915_gem_alloc_object(dev, size);
362         if (obj == NULL)
363                 return -ENOMEM;
364
365         ret = drm_gem_handle_create(file, &obj->base, &handle);
366         /* drop reference from allocate - handle holds it now */
367         drm_gem_object_unreference_unlocked(&obj->base);
368         if (ret)
369                 return ret;
370
371         *handle_p = handle;
372         return 0;
373 }
374
375 int
376 i915_gem_dumb_create(struct drm_file *file,
377                      struct drm_device *dev,
378                      struct drm_mode_create_dumb *args)
379 {
380         /* have to work out size/pitch and return them */
381         args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
382         args->size = args->pitch * args->height;
383         return i915_gem_create(file, dev,
384                                args->size, &args->handle);
385 }
386
387 /**
388  * Creates a new mm object and returns a handle to it.
389  */
390 int
391 i915_gem_create_ioctl(struct drm_device *dev, void *data,
392                       struct drm_file *file)
393 {
394         struct drm_i915_gem_create *args = data;
395
396         return i915_gem_create(file, dev,
397                                args->size, &args->handle);
398 }
399
400 static inline int
401 __copy_to_user_swizzled(char __user *cpu_vaddr,
402                         const char *gpu_vaddr, int gpu_offset,
403                         int length)
404 {
405         int ret, cpu_offset = 0;
406
407         while (length > 0) {
408                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
409                 int this_length = min(cacheline_end - gpu_offset, length);
410                 int swizzled_gpu_offset = gpu_offset ^ 64;
411
412                 ret = __copy_to_user(cpu_vaddr + cpu_offset,
413                                      gpu_vaddr + swizzled_gpu_offset,
414                                      this_length);
415                 if (ret)
416                         return ret + length;
417
418                 cpu_offset += this_length;
419                 gpu_offset += this_length;
420                 length -= this_length;
421         }
422
423         return 0;
424 }
425
426 static inline int
427 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
428                           const char __user *cpu_vaddr,
429                           int length)
430 {
431         int ret, cpu_offset = 0;
432
433         while (length > 0) {
434                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
435                 int this_length = min(cacheline_end - gpu_offset, length);
436                 int swizzled_gpu_offset = gpu_offset ^ 64;
437
438                 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
439                                        cpu_vaddr + cpu_offset,
440                                        this_length);
441                 if (ret)
442                         return ret + length;
443
444                 cpu_offset += this_length;
445                 gpu_offset += this_length;
446                 length -= this_length;
447         }
448
449         return 0;
450 }
451
452 /*
453  * Pins the specified object's pages and synchronizes the object with
454  * GPU accesses. Sets needs_clflush to non-zero if the caller should
455  * flush the object from the CPU cache.
456  */
457 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
458                                     int *needs_clflush)
459 {
460         int ret;
461
462         *needs_clflush = 0;
463
464         if (!obj->base.filp)
465                 return -EINVAL;
466
467         if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
468                 /* If we're not in the cpu read domain, set ourself into the gtt
469                  * read domain and manually flush cachelines (if required). This
470                  * optimizes for the case when the gpu will dirty the data
471                  * anyway again before the next pread happens. */
472                 *needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
473                                                         obj->cache_level);
474                 ret = i915_gem_object_wait_rendering(obj, true);
475                 if (ret)
476                         return ret;
477
478                 i915_gem_object_retire(obj);
479         }
480
481         ret = i915_gem_object_get_pages(obj);
482         if (ret)
483                 return ret;
484
485         i915_gem_object_pin_pages(obj);
486
487         return ret;
488 }
489
490 /* Per-page copy function for the shmem pread fastpath.
491  * Flushes invalid cachelines before reading the target if
492  * needs_clflush is set. */
493 static int
494 shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
495                  char __user *user_data,
496                  bool page_do_bit17_swizzling, bool needs_clflush)
497 {
498         char *vaddr;
499         int ret;
500
501         if (unlikely(page_do_bit17_swizzling))
502                 return -EINVAL;
503
504         vaddr = kmap_atomic(page);
505         if (needs_clflush)
506                 drm_clflush_virt_range(vaddr + shmem_page_offset,
507                                        page_length);
508         ret = __copy_to_user_inatomic(user_data,
509                                       vaddr + shmem_page_offset,
510                                       page_length);
511         kunmap_atomic(vaddr);
512
513         return ret ? -EFAULT : 0;
514 }
515
516 static void
517 shmem_clflush_swizzled_range(char *addr, unsigned long length,
518                              bool swizzled)
519 {
520         if (unlikely(swizzled)) {
521                 unsigned long start = (unsigned long) addr;
522                 unsigned long end = (unsigned long) addr + length;
523
524                 /* For swizzling simply ensure that we always flush both
525                  * channels. Lame, but simple and it works. Swizzled
526                  * pwrite/pread is far from a hotpath - current userspace
527                  * doesn't use it at all. */
528                 start = round_down(start, 128);
529                 end = round_up(end, 128);
530
531                 drm_clflush_virt_range((void *)start, end - start);
532         } else {
533                 drm_clflush_virt_range(addr, length);
534         }
535
536 }
537
538 /* Only difference to the fast-path function is that this can handle bit17
539  * and uses non-atomic copy and kmap functions. */
540 static int
541 shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
542                  char __user *user_data,
543                  bool page_do_bit17_swizzling, bool needs_clflush)
544 {
545         char *vaddr;
546         int ret;
547
548         vaddr = kmap(page);
549         if (needs_clflush)
550                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
551                                              page_length,
552                                              page_do_bit17_swizzling);
553
554         if (page_do_bit17_swizzling)
555                 ret = __copy_to_user_swizzled(user_data,
556                                               vaddr, shmem_page_offset,
557                                               page_length);
558         else
559                 ret = __copy_to_user(user_data,
560                                      vaddr + shmem_page_offset,
561                                      page_length);
562         kunmap(page);
563
564         return ret ? - EFAULT : 0;
565 }
566
567 static int
568 i915_gem_shmem_pread(struct drm_device *dev,
569                      struct drm_i915_gem_object *obj,
570                      struct drm_i915_gem_pread *args,
571                      struct drm_file *file)
572 {
573         char __user *user_data;
574         ssize_t remain;
575         loff_t offset;
576         int shmem_page_offset, page_length, ret = 0;
577         int obj_do_bit17_swizzling, page_do_bit17_swizzling;
578         int prefaulted = 0;
579         int needs_clflush = 0;
580         struct sg_page_iter sg_iter;
581
582         user_data = to_user_ptr(args->data_ptr);
583         remain = args->size;
584
585         obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
586
587         ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
588         if (ret)
589                 return ret;
590
591         offset = args->offset;
592
593         for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
594                          offset >> PAGE_SHIFT) {
595                 struct page *page = sg_page_iter_page(&sg_iter);
596
597                 if (remain <= 0)
598                         break;
599
600                 /* Operation in this page
601                  *
602                  * shmem_page_offset = offset within page in shmem file
603                  * page_length = bytes to copy for this page
604                  */
605                 shmem_page_offset = offset_in_page(offset);
606                 page_length = remain;
607                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
608                         page_length = PAGE_SIZE - shmem_page_offset;
609
610                 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
611                         (page_to_phys(page) & (1 << 17)) != 0;
612
613                 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
614                                        user_data, page_do_bit17_swizzling,
615                                        needs_clflush);
616                 if (ret == 0)
617                         goto next_page;
618
619                 mutex_unlock(&dev->struct_mutex);
620
621                 if (likely(!i915.prefault_disable) && !prefaulted) {
622                         ret = fault_in_multipages_writeable(user_data, remain);
623                         /* Userspace is tricking us, but we've already clobbered
624                          * its pages with the prefault and promised to write the
625                          * data up to the first fault. Hence ignore any errors
626                          * and just continue. */
627                         (void)ret;
628                         prefaulted = 1;
629                 }
630
631                 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
632                                        user_data, page_do_bit17_swizzling,
633                                        needs_clflush);
634
635                 mutex_lock(&dev->struct_mutex);
636
637                 if (ret)
638                         goto out;
639
640 next_page:
641                 remain -= page_length;
642                 user_data += page_length;
643                 offset += page_length;
644         }
645
646 out:
647         i915_gem_object_unpin_pages(obj);
648
649         return ret;
650 }
651
652 /**
653  * Reads data from the object referenced by handle.
654  *
655  * On error, the contents of *data are undefined.
656  */
657 int
658 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
659                      struct drm_file *file)
660 {
661         struct drm_i915_gem_pread *args = data;
662         struct drm_i915_gem_object *obj;
663         int ret = 0;
664
665         if (args->size == 0)
666                 return 0;
667
668         if (!access_ok(VERIFY_WRITE,
669                        to_user_ptr(args->data_ptr),
670                        args->size))
671                 return -EFAULT;
672
673         ret = i915_mutex_lock_interruptible(dev);
674         if (ret)
675                 return ret;
676
677         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
678         if (&obj->base == NULL) {
679                 ret = -ENOENT;
680                 goto unlock;
681         }
682
683         /* Bounds check source.  */
684         if (args->offset > obj->base.size ||
685             args->size > obj->base.size - args->offset) {
686                 ret = -EINVAL;
687                 goto out;
688         }
689
690         /* prime objects have no backing filp to GEM pread/pwrite
691          * pages from.
692          */
693         if (!obj->base.filp) {
694                 ret = -EINVAL;
695                 goto out;
696         }
697
698         trace_i915_gem_object_pread(obj, args->offset, args->size);
699
700         ret = i915_gem_shmem_pread(dev, obj, args, file);
701
702 out:
703         drm_gem_object_unreference(&obj->base);
704 unlock:
705         mutex_unlock(&dev->struct_mutex);
706         return ret;
707 }
708
709 /* This is the fast write path which cannot handle
710  * page faults in the source data
711  */
712
713 static inline int
714 fast_user_write(struct io_mapping *mapping,
715                 loff_t page_base, int page_offset,
716                 char __user *user_data,
717                 int length)
718 {
719         void __iomem *vaddr_atomic;
720         void *vaddr;
721         unsigned long unwritten;
722
723         vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
724         /* We can use the cpu mem copy function because this is X86. */
725         vaddr = (void __force*)vaddr_atomic + page_offset;
726         unwritten = __copy_from_user_inatomic_nocache(vaddr,
727                                                       user_data, length);
728         io_mapping_unmap_atomic(vaddr_atomic);
729         return unwritten;
730 }
731
732 /**
733  * This is the fast pwrite path, where we copy the data directly from the
734  * user into the GTT, uncached.
735  */
736 static int
737 i915_gem_gtt_pwrite_fast(struct drm_device *dev,
738                          struct drm_i915_gem_object *obj,
739                          struct drm_i915_gem_pwrite *args,
740                          struct drm_file *file)
741 {
742         struct drm_i915_private *dev_priv = dev->dev_private;
743         ssize_t remain;
744         loff_t offset, page_base;
745         char __user *user_data;
746         int page_offset, page_length, ret;
747
748         ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
749         if (ret)
750                 goto out;
751
752         ret = i915_gem_object_set_to_gtt_domain(obj, true);
753         if (ret)
754                 goto out_unpin;
755
756         ret = i915_gem_object_put_fence(obj);
757         if (ret)
758                 goto out_unpin;
759
760         user_data = to_user_ptr(args->data_ptr);
761         remain = args->size;
762
763         offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
764
765         while (remain > 0) {
766                 /* Operation in this page
767                  *
768                  * page_base = page offset within aperture
769                  * page_offset = offset within page
770                  * page_length = bytes to copy for this page
771                  */
772                 page_base = offset & PAGE_MASK;
773                 page_offset = offset_in_page(offset);
774                 page_length = remain;
775                 if ((page_offset + remain) > PAGE_SIZE)
776                         page_length = PAGE_SIZE - page_offset;
777
778                 /* If we get a fault while copying data, then (presumably) our
779                  * source page isn't available.  Return the error and we'll
780                  * retry in the slow path.
781                  */
782                 if (fast_user_write(dev_priv->gtt.mappable, page_base,
783                                     page_offset, user_data, page_length)) {
784                         ret = -EFAULT;
785                         goto out_unpin;
786                 }
787
788                 remain -= page_length;
789                 user_data += page_length;
790                 offset += page_length;
791         }
792
793 out_unpin:
794         i915_gem_object_ggtt_unpin(obj);
795 out:
796         return ret;
797 }
798
799 /* Per-page copy function for the shmem pwrite fastpath.
800  * Flushes invalid cachelines before writing to the target if
801  * needs_clflush_before is set and flushes out any written cachelines after
802  * writing if needs_clflush is set. */
803 static int
804 shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
805                   char __user *user_data,
806                   bool page_do_bit17_swizzling,
807                   bool needs_clflush_before,
808                   bool needs_clflush_after)
809 {
810         char *vaddr;
811         int ret;
812
813         if (unlikely(page_do_bit17_swizzling))
814                 return -EINVAL;
815
816         vaddr = kmap_atomic(page);
817         if (needs_clflush_before)
818                 drm_clflush_virt_range(vaddr + shmem_page_offset,
819                                        page_length);
820         ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
821                                         user_data, page_length);
822         if (needs_clflush_after)
823                 drm_clflush_virt_range(vaddr + shmem_page_offset,
824                                        page_length);
825         kunmap_atomic(vaddr);
826
827         return ret ? -EFAULT : 0;
828 }
829
830 /* Only difference to the fast-path function is that this can handle bit17
831  * and uses non-atomic copy and kmap functions. */
832 static int
833 shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
834                   char __user *user_data,
835                   bool page_do_bit17_swizzling,
836                   bool needs_clflush_before,
837                   bool needs_clflush_after)
838 {
839         char *vaddr;
840         int ret;
841
842         vaddr = kmap(page);
843         if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
844                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
845                                              page_length,
846                                              page_do_bit17_swizzling);
847         if (page_do_bit17_swizzling)
848                 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
849                                                 user_data,
850                                                 page_length);
851         else
852                 ret = __copy_from_user(vaddr + shmem_page_offset,
853                                        user_data,
854                                        page_length);
855         if (needs_clflush_after)
856                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
857                                              page_length,
858                                              page_do_bit17_swizzling);
859         kunmap(page);
860
861         return ret ? -EFAULT : 0;
862 }
863
864 static int
865 i915_gem_shmem_pwrite(struct drm_device *dev,
866                       struct drm_i915_gem_object *obj,
867                       struct drm_i915_gem_pwrite *args,
868                       struct drm_file *file)
869 {
870         ssize_t remain;
871         loff_t offset;
872         char __user *user_data;
873         int shmem_page_offset, page_length, ret = 0;
874         int obj_do_bit17_swizzling, page_do_bit17_swizzling;
875         int hit_slowpath = 0;
876         int needs_clflush_after = 0;
877         int needs_clflush_before = 0;
878         struct sg_page_iter sg_iter;
879
880         user_data = to_user_ptr(args->data_ptr);
881         remain = args->size;
882
883         obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
884
885         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
886                 /* If we're not in the cpu write domain, set ourself into the gtt
887                  * write domain and manually flush cachelines (if required). This
888                  * optimizes for the case when the gpu will use the data
889                  * right away and we therefore have to clflush anyway. */
890                 needs_clflush_after = cpu_write_needs_clflush(obj);
891                 ret = i915_gem_object_wait_rendering(obj, false);
892                 if (ret)
893                         return ret;
894
895                 i915_gem_object_retire(obj);
896         }
897         /* Same trick applies to invalidate partially written cachelines read
898          * before writing. */
899         if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
900                 needs_clflush_before =
901                         !cpu_cache_is_coherent(dev, obj->cache_level);
902
903         ret = i915_gem_object_get_pages(obj);
904         if (ret)
905                 return ret;
906
907         i915_gem_object_pin_pages(obj);
908
909         offset = args->offset;
910         obj->dirty = 1;
911
912         for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
913                          offset >> PAGE_SHIFT) {
914                 struct page *page = sg_page_iter_page(&sg_iter);
915                 int partial_cacheline_write;
916
917                 if (remain <= 0)
918                         break;
919
920                 /* Operation in this page
921                  *
922                  * shmem_page_offset = offset within page in shmem file
923                  * page_length = bytes to copy for this page
924                  */
925                 shmem_page_offset = offset_in_page(offset);
926
927                 page_length = remain;
928                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
929                         page_length = PAGE_SIZE - shmem_page_offset;
930
931                 /* If we don't overwrite a cacheline completely we need to be
932                  * careful to have up-to-date data by first clflushing. Don't
933                  * overcomplicate things and flush the entire patch. */
934                 partial_cacheline_write = needs_clflush_before &&
935                         ((shmem_page_offset | page_length)
936                                 & (boot_cpu_data.x86_clflush_size - 1));
937
938                 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
939                         (page_to_phys(page) & (1 << 17)) != 0;
940
941                 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
942                                         user_data, page_do_bit17_swizzling,
943                                         partial_cacheline_write,
944                                         needs_clflush_after);
945                 if (ret == 0)
946                         goto next_page;
947
948                 hit_slowpath = 1;
949                 mutex_unlock(&dev->struct_mutex);
950                 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
951                                         user_data, page_do_bit17_swizzling,
952                                         partial_cacheline_write,
953                                         needs_clflush_after);
954
955                 mutex_lock(&dev->struct_mutex);
956
957                 if (ret)
958                         goto out;
959
960 next_page:
961                 remain -= page_length;
962                 user_data += page_length;
963                 offset += page_length;
964         }
965
966 out:
967         i915_gem_object_unpin_pages(obj);
968
969         if (hit_slowpath) {
970                 /*
971                  * Fixup: Flush cpu caches in case we didn't flush the dirty
972                  * cachelines in-line while writing and the object moved
973                  * out of the cpu write domain while we've dropped the lock.
974                  */
975                 if (!needs_clflush_after &&
976                     obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
977                         if (i915_gem_clflush_object(obj, obj->pin_display))
978                                 i915_gem_chipset_flush(dev);
979                 }
980         }
981
982         if (needs_clflush_after)
983                 i915_gem_chipset_flush(dev);
984
985         return ret;
986 }
987
988 /**
989  * Writes data to the object referenced by handle.
990  *
991  * On error, the contents of the buffer that were to be modified are undefined.
992  */
993 int
994 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
995                       struct drm_file *file)
996 {
997         struct drm_i915_gem_pwrite *args = data;
998         struct drm_i915_gem_object *obj;
999         int ret;
1000
1001         if (args->size == 0)
1002                 return 0;
1003
1004         if (!access_ok(VERIFY_READ,
1005                        to_user_ptr(args->data_ptr),
1006                        args->size))
1007                 return -EFAULT;
1008
1009         if (likely(!i915.prefault_disable)) {
1010                 ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
1011                                                    args->size);
1012                 if (ret)
1013                         return -EFAULT;
1014         }
1015
1016         ret = i915_mutex_lock_interruptible(dev);
1017         if (ret)
1018                 return ret;
1019
1020         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1021         if (&obj->base == NULL) {
1022                 ret = -ENOENT;
1023                 goto unlock;
1024         }
1025
1026         /* Bounds check destination. */
1027         if (args->offset > obj->base.size ||
1028             args->size > obj->base.size - args->offset) {
1029                 ret = -EINVAL;
1030                 goto out;
1031         }
1032
1033         /* prime objects have no backing filp to GEM pread/pwrite
1034          * pages from.
1035          */
1036         if (!obj->base.filp) {
1037                 ret = -EINVAL;
1038                 goto out;
1039         }
1040
1041         trace_i915_gem_object_pwrite(obj, args->offset, args->size);
1042
1043         ret = -EFAULT;
1044         /* We can only do the GTT pwrite on untiled buffers, as otherwise
1045          * it would end up going through the fenced access, and we'll get
1046          * different detiling behavior between reading and writing.
1047          * pread/pwrite currently are reading and writing from the CPU
1048          * perspective, requiring manual detiling by the client.
1049          */
1050         if (obj->phys_handle) {
1051                 ret = i915_gem_phys_pwrite(obj, args, file);
1052                 goto out;
1053         }
1054
1055         if (obj->tiling_mode == I915_TILING_NONE &&
1056             obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
1057             cpu_write_needs_clflush(obj)) {
1058                 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
1059                 /* Note that the gtt paths might fail with non-page-backed user
1060                  * pointers (e.g. gtt mappings when moving data between
1061                  * textures). Fallback to the shmem path in that case. */
1062         }
1063
1064         if (ret == -EFAULT || ret == -ENOSPC)
1065                 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
1066
1067 out:
1068         drm_gem_object_unreference(&obj->base);
1069 unlock:
1070         mutex_unlock(&dev->struct_mutex);
1071         return ret;
1072 }
1073
1074 int
1075 i915_gem_check_wedge(struct i915_gpu_error *error,
1076                      bool interruptible)
1077 {
1078         if (i915_reset_in_progress(error)) {
1079                 /* Non-interruptible callers can't handle -EAGAIN, hence return
1080                  * -EIO unconditionally for these. */
1081                 if (!interruptible)
1082                         return -EIO;
1083
1084                 /* Recovery complete, but the reset failed ... */
1085                 if (i915_terminally_wedged(error))
1086                         return -EIO;
1087
1088                 /*
1089                  * Check if GPU Reset is in progress - we need intel_ring_begin
1090                  * to work properly to reinit the hw state while the gpu is
1091                  * still marked as reset-in-progress. Handle this with a flag.
1092                  */
1093                 if (!error->reload_in_reset)
1094                         return -EAGAIN;
1095         }
1096
1097         return 0;
1098 }
1099
1100 /*
1101  * Compare seqno against outstanding lazy request. Emit a request if they are
1102  * equal.
1103  */
1104 int
1105 i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno)
1106 {
1107         int ret;
1108
1109         BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
1110
1111         ret = 0;
1112         if (seqno == ring->outstanding_lazy_seqno)
1113                 ret = i915_add_request(ring, NULL);
1114
1115         return ret;
1116 }
1117
1118 static void fake_irq(unsigned long data)
1119 {
1120         wake_up_process((struct task_struct *)data);
1121 }
1122
1123 static bool missed_irq(struct drm_i915_private *dev_priv,
1124                        struct intel_engine_cs *ring)
1125 {
1126         return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
1127 }
1128
1129 static bool can_wait_boost(struct drm_i915_file_private *file_priv)
1130 {
1131         if (file_priv == NULL)
1132                 return true;
1133
1134         return !atomic_xchg(&file_priv->rps_wait_boost, true);
1135 }
1136
1137 /**
1138  * __wait_seqno - wait until execution of seqno has finished
1139  * @ring: the ring expected to report seqno
1140  * @seqno: duh!
1141  * @reset_counter: reset sequence associated with the given seqno
1142  * @interruptible: do an interruptible wait (normally yes)
1143  * @timeout: in - how long to wait (NULL forever); out - how much time remaining
1144  *
1145  * Note: It is of utmost importance that the passed in seqno and reset_counter
1146  * values have been read by the caller in an smp safe manner. Where read-side
1147  * locks are involved, it is sufficient to read the reset_counter before
1148  * unlocking the lock that protects the seqno. For lockless tricks, the
1149  * reset_counter _must_ be read before, and an appropriate smp_rmb must be
1150  * inserted.
1151  *
1152  * Returns 0 if the seqno was found within the alloted time. Else returns the
1153  * errno with remaining time filled in timeout argument.
1154  */
1155 static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
1156                         unsigned reset_counter,
1157                         bool interruptible,
1158                         s64 *timeout,
1159                         struct drm_i915_file_private *file_priv)
1160 {
1161         struct drm_device *dev = ring->dev;
1162         struct drm_i915_private *dev_priv = dev->dev_private;
1163         const bool irq_test_in_progress =
1164                 ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
1165         DEFINE_WAIT(wait);
1166         unsigned long timeout_expire;
1167         s64 before, now;
1168         int ret;
1169
1170         WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
1171
1172         if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
1173                 return 0;
1174
1175         timeout_expire = timeout ? jiffies + nsecs_to_jiffies((u64)*timeout) : 0;
1176
1177         if (INTEL_INFO(dev)->gen >= 6 && ring->id == RCS && can_wait_boost(file_priv)) {
1178                 gen6_rps_boost(dev_priv);
1179                 if (file_priv)
1180                         mod_delayed_work(dev_priv->wq,
1181                                          &file_priv->mm.idle_work,
1182                                          msecs_to_jiffies(100));
1183         }
1184
1185         if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring)))
1186                 return -ENODEV;
1187
1188         /* Record current time in case interrupted by signal, or wedged */
1189         trace_i915_gem_request_wait_begin(ring, seqno);
1190         before = ktime_get_raw_ns();
1191         for (;;) {
1192                 struct timer_list timer;
1193
1194                 prepare_to_wait(&ring->irq_queue, &wait,
1195                                 interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
1196
1197                 /* We need to check whether any gpu reset happened in between
1198                  * the caller grabbing the seqno and now ... */
1199                 if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
1200                         /* ... but upgrade the -EAGAIN to an -EIO if the gpu
1201                          * is truely gone. */
1202                         ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1203                         if (ret == 0)
1204                                 ret = -EAGAIN;
1205                         break;
1206                 }
1207
1208                 if (i915_seqno_passed(ring->get_seqno(ring, false), seqno)) {
1209                         ret = 0;
1210                         break;
1211                 }
1212
1213                 if (interruptible && signal_pending(current)) {
1214                         ret = -ERESTARTSYS;
1215                         break;
1216                 }
1217
1218                 if (timeout && time_after_eq(jiffies, timeout_expire)) {
1219                         ret = -ETIME;
1220                         break;
1221                 }
1222
1223                 timer.function = NULL;
1224                 if (timeout || missed_irq(dev_priv, ring)) {
1225                         unsigned long expire;
1226
1227                         setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
1228                         expire = missed_irq(dev_priv, ring) ? jiffies + 1 : timeout_expire;
1229                         mod_timer(&timer, expire);
1230                 }
1231
1232                 io_schedule();
1233
1234                 if (timer.function) {
1235                         del_singleshot_timer_sync(&timer);
1236                         destroy_timer_on_stack(&timer);
1237                 }
1238         }
1239         now = ktime_get_raw_ns();
1240         trace_i915_gem_request_wait_end(ring, seqno);
1241
1242         if (!irq_test_in_progress)
1243                 ring->irq_put(ring);
1244
1245         finish_wait(&ring->irq_queue, &wait);
1246
1247         if (timeout) {
1248                 s64 tres = *timeout - (now - before);
1249
1250                 *timeout = tres < 0 ? 0 : tres;
1251         }
1252
1253         return ret;
1254 }
1255
1256 /**
1257  * Waits for a sequence number to be signaled, and cleans up the
1258  * request and object lists appropriately for that event.
1259  */
1260 int
1261 i915_wait_seqno(struct intel_engine_cs *ring, uint32_t seqno)
1262 {
1263         struct drm_device *dev = ring->dev;
1264         struct drm_i915_private *dev_priv = dev->dev_private;
1265         bool interruptible = dev_priv->mm.interruptible;
1266         int ret;
1267
1268         BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1269         BUG_ON(seqno == 0);
1270
1271         ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1272         if (ret)
1273                 return ret;
1274
1275         ret = i915_gem_check_olr(ring, seqno);
1276         if (ret)
1277                 return ret;
1278
1279         return __wait_seqno(ring, seqno,
1280                             atomic_read(&dev_priv->gpu_error.reset_counter),
1281                             interruptible, NULL, NULL);
1282 }
1283
1284 static int
1285 i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
1286                                      struct intel_engine_cs *ring)
1287 {
1288         if (!obj->active)
1289                 return 0;
1290
1291         /* Manually manage the write flush as we may have not yet
1292          * retired the buffer.
1293          *
1294          * Note that the last_write_seqno is always the earlier of
1295          * the two (read/write) seqno, so if we haved successfully waited,
1296          * we know we have passed the last write.
1297          */
1298         obj->last_write_seqno = 0;
1299
1300         return 0;
1301 }
1302
1303 /**
1304  * Ensures that all rendering to the object has completed and the object is
1305  * safe to unbind from the GTT or access from the CPU.
1306  */
1307 static __must_check int
1308 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1309                                bool readonly)
1310 {
1311         struct intel_engine_cs *ring = obj->ring;
1312         u32 seqno;
1313         int ret;
1314
1315         seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1316         if (seqno == 0)
1317                 return 0;
1318
1319         ret = i915_wait_seqno(ring, seqno);
1320         if (ret)
1321                 return ret;
1322
1323         return i915_gem_object_wait_rendering__tail(obj, ring);
1324 }
1325
1326 /* A nonblocking variant of the above wait. This is a highly dangerous routine
1327  * as the object state may change during this call.
1328  */
1329 static __must_check int
1330 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1331                                             struct drm_i915_file_private *file_priv,
1332                                             bool readonly)
1333 {
1334         struct drm_device *dev = obj->base.dev;
1335         struct drm_i915_private *dev_priv = dev->dev_private;
1336         struct intel_engine_cs *ring = obj->ring;
1337         unsigned reset_counter;
1338         u32 seqno;
1339         int ret;
1340
1341         BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1342         BUG_ON(!dev_priv->mm.interruptible);
1343
1344         seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1345         if (seqno == 0)
1346                 return 0;
1347
1348         ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
1349         if (ret)
1350                 return ret;
1351
1352         ret = i915_gem_check_olr(ring, seqno);
1353         if (ret)
1354                 return ret;
1355
1356         reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
1357         mutex_unlock(&dev->struct_mutex);
1358         ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file_priv);
1359         mutex_lock(&dev->struct_mutex);
1360         if (ret)
1361                 return ret;
1362
1363         return i915_gem_object_wait_rendering__tail(obj, ring);
1364 }
1365
1366 /**
1367  * Called when user space prepares to use an object with the CPU, either
1368  * through the mmap ioctl's mapping or a GTT mapping.
1369  */
1370 int
1371 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1372                           struct drm_file *file)
1373 {
1374         struct drm_i915_gem_set_domain *args = data;
1375         struct drm_i915_gem_object *obj;
1376         uint32_t read_domains = args->read_domains;
1377         uint32_t write_domain = args->write_domain;
1378         int ret;
1379
1380         /* Only handle setting domains to types used by the CPU. */
1381         if (write_domain & I915_GEM_GPU_DOMAINS)
1382                 return -EINVAL;
1383
1384         if (read_domains & I915_GEM_GPU_DOMAINS)
1385                 return -EINVAL;
1386
1387         /* Having something in the write domain implies it's in the read
1388          * domain, and only that read domain.  Enforce that in the request.
1389          */
1390         if (write_domain != 0 && read_domains != write_domain)
1391                 return -EINVAL;
1392
1393         ret = i915_mutex_lock_interruptible(dev);
1394         if (ret)
1395                 return ret;
1396
1397         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1398         if (&obj->base == NULL) {
1399                 ret = -ENOENT;
1400                 goto unlock;
1401         }
1402
1403         /* Try to flush the object off the GPU without holding the lock.
1404          * We will repeat the flush holding the lock in the normal manner
1405          * to catch cases where we are gazumped.
1406          */
1407         ret = i915_gem_object_wait_rendering__nonblocking(obj,
1408                                                           file->driver_priv,
1409                                                           !write_domain);
1410         if (ret)
1411                 goto unref;
1412
1413         if (read_domains & I915_GEM_DOMAIN_GTT) {
1414                 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1415
1416                 /* Silently promote "you're not bound, there was nothing to do"
1417                  * to success, since the client was just asking us to
1418                  * make sure everything was done.
1419                  */
1420                 if (ret == -EINVAL)
1421                         ret = 0;
1422         } else {
1423                 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1424         }
1425
1426 unref:
1427         drm_gem_object_unreference(&obj->base);
1428 unlock:
1429         mutex_unlock(&dev->struct_mutex);
1430         return ret;
1431 }
1432
1433 /**
1434  * Called when user space has done writes to this buffer
1435  */
1436 int
1437 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1438                          struct drm_file *file)
1439 {
1440         struct drm_i915_gem_sw_finish *args = data;
1441         struct drm_i915_gem_object *obj;
1442         int ret = 0;
1443
1444         ret = i915_mutex_lock_interruptible(dev);
1445         if (ret)
1446                 return ret;
1447
1448         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1449         if (&obj->base == NULL) {
1450                 ret = -ENOENT;
1451                 goto unlock;
1452         }
1453
1454         /* Pinned buffers may be scanout, so flush the cache */
1455         if (obj->pin_display)
1456                 i915_gem_object_flush_cpu_write_domain(obj, true);
1457
1458         drm_gem_object_unreference(&obj->base);
1459 unlock:
1460         mutex_unlock(&dev->struct_mutex);
1461         return ret;
1462 }
1463
1464 /**
1465  * Maps the contents of an object, returning the address it is mapped
1466  * into.
1467  *
1468  * While the mapping holds a reference on the contents of the object, it doesn't
1469  * imply a ref on the object itself.
1470  */
1471 int
1472 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1473                     struct drm_file *file)
1474 {
1475         struct drm_i915_gem_mmap *args = data;
1476         struct drm_gem_object *obj;
1477         unsigned long addr;
1478
1479         obj = drm_gem_object_lookup(dev, file, args->handle);
1480         if (obj == NULL)
1481                 return -ENOENT;
1482
1483         /* prime objects have no backing filp to GEM mmap
1484          * pages from.
1485          */
1486         if (!obj->filp) {
1487                 drm_gem_object_unreference_unlocked(obj);
1488                 return -EINVAL;
1489         }
1490
1491         addr = vm_mmap(obj->filp, 0, args->size,
1492                        PROT_READ | PROT_WRITE, MAP_SHARED,
1493                        args->offset);
1494         drm_gem_object_unreference_unlocked(obj);
1495         if (IS_ERR((void *)addr))
1496                 return addr;
1497
1498         args->addr_ptr = (uint64_t) addr;
1499
1500         return 0;
1501 }
1502
1503 /**
1504  * i915_gem_fault - fault a page into the GTT
1505  * vma: VMA in question
1506  * vmf: fault info
1507  *
1508  * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1509  * from userspace.  The fault handler takes care of binding the object to
1510  * the GTT (if needed), allocating and programming a fence register (again,
1511  * only if needed based on whether the old reg is still valid or the object
1512  * is tiled) and inserting a new PTE into the faulting process.
1513  *
1514  * Note that the faulting process may involve evicting existing objects
1515  * from the GTT and/or fence registers to make room.  So performance may
1516  * suffer if the GTT working set is large or there are few fence registers
1517  * left.
1518  */
1519 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1520 {
1521         struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1522         struct drm_device *dev = obj->base.dev;
1523         struct drm_i915_private *dev_priv = dev->dev_private;
1524         pgoff_t page_offset;
1525         unsigned long pfn;
1526         int ret = 0;
1527         bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1528
1529         intel_runtime_pm_get(dev_priv);
1530
1531         /* We don't use vmf->pgoff since that has the fake offset */
1532         page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1533                 PAGE_SHIFT;
1534
1535         ret = i915_mutex_lock_interruptible(dev);
1536         if (ret)
1537                 goto out;
1538
1539         trace_i915_gem_object_fault(obj, page_offset, true, write);
1540
1541         /* Try to flush the object off the GPU first without holding the lock.
1542          * Upon reacquiring the lock, we will perform our sanity checks and then
1543          * repeat the flush holding the lock in the normal manner to catch cases
1544          * where we are gazumped.
1545          */
1546         ret = i915_gem_object_wait_rendering__nonblocking(obj, NULL, !write);
1547         if (ret)
1548                 goto unlock;
1549
1550         /* Access to snoopable pages through the GTT is incoherent. */
1551         if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1552                 ret = -EFAULT;
1553                 goto unlock;
1554         }
1555
1556         /* Now bind it into the GTT if needed */
1557         ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
1558         if (ret)
1559                 goto unlock;
1560
1561         ret = i915_gem_object_set_to_gtt_domain(obj, write);
1562         if (ret)
1563                 goto unpin;
1564
1565         ret = i915_gem_object_get_fence(obj);
1566         if (ret)
1567                 goto unpin;
1568
1569         /* Finally, remap it using the new GTT offset */
1570         pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
1571         pfn >>= PAGE_SHIFT;
1572
1573         if (!obj->fault_mappable) {
1574                 unsigned long size = min_t(unsigned long,
1575                                            vma->vm_end - vma->vm_start,
1576                                            obj->base.size);
1577                 int i;
1578
1579                 for (i = 0; i < size >> PAGE_SHIFT; i++) {
1580                         ret = vm_insert_pfn(vma,
1581                                             (unsigned long)vma->vm_start + i * PAGE_SIZE,
1582                                             pfn + i);
1583                         if (ret)
1584                                 break;
1585                 }
1586
1587                 obj->fault_mappable = true;
1588         } else
1589                 ret = vm_insert_pfn(vma,
1590                                     (unsigned long)vmf->virtual_address,
1591                                     pfn + page_offset);
1592 unpin:
1593         i915_gem_object_ggtt_unpin(obj);
1594 unlock:
1595         mutex_unlock(&dev->struct_mutex);
1596 out:
1597         switch (ret) {
1598         case -EIO:
1599                 /*
1600                  * We eat errors when the gpu is terminally wedged to avoid
1601                  * userspace unduly crashing (gl has no provisions for mmaps to
1602                  * fail). But any other -EIO isn't ours (e.g. swap in failure)
1603                  * and so needs to be reported.
1604                  */
1605                 if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
1606                         ret = VM_FAULT_SIGBUS;
1607                         break;
1608                 }
1609         case -EAGAIN:
1610                 /*
1611                  * EAGAIN means the gpu is hung and we'll wait for the error
1612                  * handler to reset everything when re-faulting in
1613                  * i915_mutex_lock_interruptible.
1614                  */
1615         case 0:
1616         case -ERESTARTSYS:
1617         case -EINTR:
1618         case -EBUSY:
1619                 /*
1620                  * EBUSY is ok: this just means that another thread
1621                  * already did the job.
1622                  */
1623                 ret = VM_FAULT_NOPAGE;
1624                 break;
1625         case -ENOMEM:
1626                 ret = VM_FAULT_OOM;
1627                 break;
1628         case -ENOSPC:
1629         case -EFAULT:
1630                 ret = VM_FAULT_SIGBUS;
1631                 break;
1632         default:
1633                 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
1634                 ret = VM_FAULT_SIGBUS;
1635                 break;
1636         }
1637
1638         intel_runtime_pm_put(dev_priv);
1639         return ret;
1640 }
1641
1642 /**
1643  * i915_gem_release_mmap - remove physical page mappings
1644  * @obj: obj in question
1645  *
1646  * Preserve the reservation of the mmapping with the DRM core code, but
1647  * relinquish ownership of the pages back to the system.
1648  *
1649  * It is vital that we remove the page mapping if we have mapped a tiled
1650  * object through the GTT and then lose the fence register due to
1651  * resource pressure. Similarly if the object has been moved out of the
1652  * aperture, than pages mapped into userspace must be revoked. Removing the
1653  * mapping will then trigger a page fault on the next user access, allowing
1654  * fixup by i915_gem_fault().
1655  */
1656 void
1657 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1658 {
1659         if (!obj->fault_mappable)
1660                 return;
1661
1662         drm_vma_node_unmap(&obj->base.vma_node,
1663                            obj->base.dev->anon_inode->i_mapping);
1664         obj->fault_mappable = false;
1665 }
1666
1667 void
1668 i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
1669 {
1670         struct drm_i915_gem_object *obj;
1671
1672         list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
1673                 i915_gem_release_mmap(obj);
1674 }
1675
1676 uint32_t
1677 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1678 {
1679         uint32_t gtt_size;
1680
1681         if (INTEL_INFO(dev)->gen >= 4 ||
1682             tiling_mode == I915_TILING_NONE)
1683                 return size;
1684
1685         /* Previous chips need a power-of-two fence region when tiling */
1686         if (INTEL_INFO(dev)->gen == 3)
1687                 gtt_size = 1024*1024;
1688         else
1689                 gtt_size = 512*1024;
1690
1691         while (gtt_size < size)
1692                 gtt_size <<= 1;
1693
1694         return gtt_size;
1695 }
1696
1697 /**
1698  * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1699  * @obj: object to check
1700  *
1701  * Return the required GTT alignment for an object, taking into account
1702  * potential fence register mapping.
1703  */
1704 uint32_t
1705 i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1706                            int tiling_mode, bool fenced)
1707 {
1708         /*
1709          * Minimum alignment is 4k (GTT page size), but might be greater
1710          * if a fence register is needed for the object.
1711          */
1712         if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
1713             tiling_mode == I915_TILING_NONE)
1714                 return 4096;
1715
1716         /*
1717          * Previous chips need to be aligned to the size of the smallest
1718          * fence register that can contain the object.
1719          */
1720         return i915_gem_get_gtt_size(dev, size, tiling_mode);
1721 }
1722
1723 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1724 {
1725         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1726         int ret;
1727
1728         if (drm_vma_node_has_offset(&obj->base.vma_node))
1729                 return 0;
1730
1731         dev_priv->mm.shrinker_no_lock_stealing = true;
1732
1733         ret = drm_gem_create_mmap_offset(&obj->base);
1734         if (ret != -ENOSPC)
1735                 goto out;
1736
1737         /* Badly fragmented mmap space? The only way we can recover
1738          * space is by destroying unwanted objects. We can't randomly release
1739          * mmap_offsets as userspace expects them to be persistent for the
1740          * lifetime of the objects. The closest we can is to release the
1741          * offsets on purgeable objects by truncating it and marking it purged,
1742          * which prevents userspace from ever using that object again.
1743          */
1744         i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
1745         ret = drm_gem_create_mmap_offset(&obj->base);
1746         if (ret != -ENOSPC)
1747                 goto out;
1748
1749         i915_gem_shrink_all(dev_priv);
1750         ret = drm_gem_create_mmap_offset(&obj->base);
1751 out:
1752         dev_priv->mm.shrinker_no_lock_stealing = false;
1753
1754         return ret;
1755 }
1756
1757 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1758 {
1759         drm_gem_free_mmap_offset(&obj->base);
1760 }
1761
1762 int
1763 i915_gem_mmap_gtt(struct drm_file *file,
1764                   struct drm_device *dev,
1765                   uint32_t handle,
1766                   uint64_t *offset)
1767 {
1768         struct drm_i915_private *dev_priv = dev->dev_private;
1769         struct drm_i915_gem_object *obj;
1770         int ret;
1771
1772         ret = i915_mutex_lock_interruptible(dev);
1773         if (ret)
1774                 return ret;
1775
1776         obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
1777         if (&obj->base == NULL) {
1778                 ret = -ENOENT;
1779                 goto unlock;
1780         }
1781
1782         if (obj->base.size > dev_priv->gtt.mappable_end) {
1783                 ret = -E2BIG;
1784                 goto out;
1785         }
1786
1787         if (obj->madv != I915_MADV_WILLNEED) {
1788                 DRM_DEBUG("Attempting to mmap a purgeable buffer\n");
1789                 ret = -EFAULT;
1790                 goto out;
1791         }
1792
1793         ret = i915_gem_object_create_mmap_offset(obj);
1794         if (ret)
1795                 goto out;
1796
1797         *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
1798
1799 out:
1800         drm_gem_object_unreference(&obj->base);
1801 unlock:
1802         mutex_unlock(&dev->struct_mutex);
1803         return ret;
1804 }
1805
1806 /**
1807  * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1808  * @dev: DRM device
1809  * @data: GTT mapping ioctl data
1810  * @file: GEM object info
1811  *
1812  * Simply returns the fake offset to userspace so it can mmap it.
1813  * The mmap call will end up in drm_gem_mmap(), which will set things
1814  * up so we can get faults in the handler above.
1815  *
1816  * The fault handler will take care of binding the object into the GTT
1817  * (since it may have been evicted to make room for something), allocating
1818  * a fence register, and mapping the appropriate aperture address into
1819  * userspace.
1820  */
1821 int
1822 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1823                         struct drm_file *file)
1824 {
1825         struct drm_i915_gem_mmap_gtt *args = data;
1826
1827         return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1828 }
1829
1830 static inline int
1831 i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1832 {
1833         return obj->madv == I915_MADV_DONTNEED;
1834 }
1835
1836 /* Immediately discard the backing storage */
1837 static void
1838 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1839 {
1840         i915_gem_object_free_mmap_offset(obj);
1841
1842         if (obj->base.filp == NULL)
1843                 return;
1844
1845         /* Our goal here is to return as much of the memory as
1846          * is possible back to the system as we are called from OOM.
1847          * To do this we must instruct the shmfs to drop all of its
1848          * backing pages, *now*.
1849          */
1850         shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
1851         obj->madv = __I915_MADV_PURGED;
1852 }
1853
1854 /* Try to discard unwanted pages */
1855 static void
1856 i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
1857 {
1858         struct address_space *mapping;
1859
1860         switch (obj->madv) {
1861         case I915_MADV_DONTNEED:
1862                 i915_gem_object_truncate(obj);
1863         case __I915_MADV_PURGED:
1864                 return;
1865         }
1866
1867         if (obj->base.filp == NULL)
1868                 return;
1869
1870         mapping = file_inode(obj->base.filp)->i_mapping,
1871         invalidate_mapping_pages(mapping, 0, (loff_t)-1);
1872 }
1873
1874 static void
1875 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1876 {
1877         struct sg_page_iter sg_iter;
1878         int ret;
1879
1880         BUG_ON(obj->madv == __I915_MADV_PURGED);
1881
1882         ret = i915_gem_object_set_to_cpu_domain(obj, true);
1883         if (ret) {
1884                 /* In the event of a disaster, abandon all caches and
1885                  * hope for the best.
1886                  */
1887                 WARN_ON(ret != -EIO);
1888                 i915_gem_clflush_object(obj, true);
1889                 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1890         }
1891
1892         if (i915_gem_object_needs_bit17_swizzle(obj))
1893                 i915_gem_object_save_bit_17_swizzle(obj);
1894
1895         if (obj->madv == I915_MADV_DONTNEED)
1896                 obj->dirty = 0;
1897
1898         for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
1899                 struct page *page = sg_page_iter_page(&sg_iter);
1900
1901                 if (obj->dirty)
1902                         set_page_dirty(page);
1903
1904                 if (obj->madv == I915_MADV_WILLNEED)
1905                         mark_page_accessed(page);
1906
1907                 page_cache_release(page);
1908         }
1909         obj->dirty = 0;
1910
1911         sg_free_table(obj->pages);
1912         kfree(obj->pages);
1913 }
1914
1915 int
1916 i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1917 {
1918         const struct drm_i915_gem_object_ops *ops = obj->ops;
1919
1920         if (obj->pages == NULL)
1921                 return 0;
1922
1923         if (obj->pages_pin_count)
1924                 return -EBUSY;
1925
1926         BUG_ON(i915_gem_obj_bound_any(obj));
1927
1928         /* ->put_pages might need to allocate memory for the bit17 swizzle
1929          * array, hence protect them from being reaped by removing them from gtt
1930          * lists early. */
1931         list_del(&obj->global_list);
1932
1933         ops->put_pages(obj);
1934         obj->pages = NULL;
1935
1936         i915_gem_object_invalidate(obj);
1937
1938         return 0;
1939 }
1940
1941 static unsigned long
1942 __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
1943                   bool purgeable_only)
1944 {
1945         struct list_head still_in_list;
1946         struct drm_i915_gem_object *obj;
1947         unsigned long count = 0;
1948
1949         /*
1950          * As we may completely rewrite the (un)bound list whilst unbinding
1951          * (due to retiring requests) we have to strictly process only
1952          * one element of the list at the time, and recheck the list
1953          * on every iteration.
1954          *
1955          * In particular, we must hold a reference whilst removing the
1956          * object as we may end up waiting for and/or retiring the objects.
1957          * This might release the final reference (held by the active list)
1958          * and result in the object being freed from under us. This is
1959          * similar to the precautions the eviction code must take whilst
1960          * removing objects.
1961          *
1962          * Also note that although these lists do not hold a reference to
1963          * the object we can safely grab one here: The final object
1964          * unreferencing and the bound_list are both protected by the
1965          * dev->struct_mutex and so we won't ever be able to observe an
1966          * object on the bound_list with a reference count equals 0.
1967          */
1968         INIT_LIST_HEAD(&still_in_list);
1969         while (count < target && !list_empty(&dev_priv->mm.unbound_list)) {
1970                 obj = list_first_entry(&dev_priv->mm.unbound_list,
1971                                        typeof(*obj), global_list);
1972                 list_move_tail(&obj->global_list, &still_in_list);
1973
1974                 if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
1975                         continue;
1976
1977                 drm_gem_object_reference(&obj->base);
1978
1979                 if (i915_gem_object_put_pages(obj) == 0)
1980                         count += obj->base.size >> PAGE_SHIFT;
1981
1982                 drm_gem_object_unreference(&obj->base);
1983         }
1984         list_splice(&still_in_list, &dev_priv->mm.unbound_list);
1985
1986         INIT_LIST_HEAD(&still_in_list);
1987         while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
1988                 struct i915_vma *vma, *v;
1989
1990                 obj = list_first_entry(&dev_priv->mm.bound_list,
1991                                        typeof(*obj), global_list);
1992                 list_move_tail(&obj->global_list, &still_in_list);
1993
1994                 if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
1995                         continue;
1996
1997                 drm_gem_object_reference(&obj->base);
1998
1999                 list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
2000                         if (i915_vma_unbind(vma))
2001                                 break;
2002
2003                 if (i915_gem_object_put_pages(obj) == 0)
2004                         count += obj->base.size >> PAGE_SHIFT;
2005
2006                 drm_gem_object_unreference(&obj->base);
2007         }
2008         list_splice(&still_in_list, &dev_priv->mm.bound_list);
2009
2010         return count;
2011 }
2012
2013 static unsigned long
2014 i915_gem_purge(struct drm_i915_private *dev_priv, long target)
2015 {
2016         return __i915_gem_shrink(dev_priv, target, true);
2017 }
2018
2019 static unsigned long
2020 i915_gem_shrink_all(struct drm_i915_private *dev_priv)
2021 {
2022         i915_gem_evict_everything(dev_priv->dev);
2023         return __i915_gem_shrink(dev_priv, LONG_MAX, false);
2024 }
2025
2026 static int
2027 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2028 {
2029         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2030         int page_count, i;
2031         struct address_space *mapping;
2032         struct sg_table *st;
2033         struct scatterlist *sg;
2034         struct sg_page_iter sg_iter;
2035         struct page *page;
2036         unsigned long last_pfn = 0;     /* suppress gcc warning */
2037         gfp_t gfp;
2038
2039         /* Assert that the object is not currently in any GPU domain. As it
2040          * wasn't in the GTT, there shouldn't be any way it could have been in
2041          * a GPU cache
2042          */
2043         BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2044         BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2045
2046         st = kmalloc(sizeof(*st), GFP_KERNEL);
2047         if (st == NULL)
2048                 return -ENOMEM;
2049
2050         page_count = obj->base.size / PAGE_SIZE;
2051         if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
2052                 kfree(st);
2053                 return -ENOMEM;
2054         }
2055
2056         /* Get the list of pages out of our struct file.  They'll be pinned
2057          * at this point until we release them.
2058          *
2059          * Fail silently without starting the shrinker
2060          */
2061         mapping = file_inode(obj->base.filp)->i_mapping;
2062         gfp = mapping_gfp_mask(mapping);
2063         gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
2064         gfp &= ~(__GFP_IO | __GFP_WAIT);
2065         sg = st->sgl;
2066         st->nents = 0;
2067         for (i = 0; i < page_count; i++) {
2068                 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2069                 if (IS_ERR(page)) {
2070                         i915_gem_purge(dev_priv, page_count);
2071                         page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2072                 }
2073                 if (IS_ERR(page)) {
2074                         /* We've tried hard to allocate the memory by reaping
2075                          * our own buffer, now let the real VM do its job and
2076                          * go down in flames if truly OOM.
2077                          */
2078                         i915_gem_shrink_all(dev_priv);
2079                         page = shmem_read_mapping_page(mapping, i);
2080                         if (IS_ERR(page))
2081                                 goto err_pages;
2082                 }
2083 #ifdef CONFIG_SWIOTLB
2084                 if (swiotlb_nr_tbl()) {
2085                         st->nents++;
2086                         sg_set_page(sg, page, PAGE_SIZE, 0);
2087                         sg = sg_next(sg);
2088                         continue;
2089                 }
2090 #endif
2091                 if (!i || page_to_pfn(page) != last_pfn + 1) {
2092                         if (i)
2093                                 sg = sg_next(sg);
2094                         st->nents++;
2095                         sg_set_page(sg, page, PAGE_SIZE, 0);
2096                 } else {
2097                         sg->length += PAGE_SIZE;
2098                 }
2099                 last_pfn = page_to_pfn(page);
2100
2101                 /* Check that the i965g/gm workaround works. */
2102                 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
2103         }
2104 #ifdef CONFIG_SWIOTLB
2105         if (!swiotlb_nr_tbl())
2106 #endif
2107                 sg_mark_end(sg);
2108         obj->pages = st;
2109
2110         if (i915_gem_object_needs_bit17_swizzle(obj))
2111                 i915_gem_object_do_bit_17_swizzle(obj);
2112
2113         return 0;
2114
2115 err_pages:
2116         sg_mark_end(sg);
2117         for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
2118                 page_cache_release(sg_page_iter_page(&sg_iter));
2119         sg_free_table(st);
2120         kfree(st);
2121
2122         /* shmemfs first checks if there is enough memory to allocate the page
2123          * and reports ENOSPC should there be insufficient, along with the usual
2124          * ENOMEM for a genuine allocation failure.
2125          *
2126          * We use ENOSPC in our driver to mean that we have run out of aperture
2127          * space and so want to translate the error from shmemfs back to our
2128          * usual understanding of ENOMEM.
2129          */
2130         if (PTR_ERR(page) == -ENOSPC)
2131                 return -ENOMEM;
2132         else
2133                 return PTR_ERR(page);
2134 }
2135
2136 /* Ensure that the associated pages are gathered from the backing storage
2137  * and pinned into our object. i915_gem_object_get_pages() may be called
2138  * multiple times before they are released by a single call to
2139  * i915_gem_object_put_pages() - once the pages are no longer referenced
2140  * either as a result of memory pressure (reaping pages under the shrinker)
2141  * or as the object is itself released.
2142  */
2143 int
2144 i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2145 {
2146         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2147         const struct drm_i915_gem_object_ops *ops = obj->ops;
2148         int ret;
2149
2150         if (obj->pages)
2151                 return 0;
2152
2153         if (obj->madv != I915_MADV_WILLNEED) {
2154                 DRM_DEBUG("Attempting to obtain a purgeable object\n");
2155                 return -EFAULT;
2156         }
2157
2158         BUG_ON(obj->pages_pin_count);
2159
2160         ret = ops->get_pages(obj);
2161         if (ret)
2162                 return ret;
2163
2164         list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2165         return 0;
2166 }
2167
2168 static void
2169 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
2170                                struct intel_engine_cs *ring)
2171 {
2172         u32 seqno = intel_ring_get_seqno(ring);
2173
2174         BUG_ON(ring == NULL);
2175         if (obj->ring != ring && obj->last_write_seqno) {
2176                 /* Keep the seqno relative to the current ring */
2177                 obj->last_write_seqno = seqno;
2178         }
2179         obj->ring = ring;
2180
2181         /* Add a reference if we're newly entering the active list. */
2182         if (!obj->active) {
2183                 drm_gem_object_reference(&obj->base);
2184                 obj->active = 1;
2185         }
2186
2187         list_move_tail(&obj->ring_list, &ring->active_list);
2188
2189         obj->last_read_seqno = seqno;
2190 }
2191
2192 void i915_vma_move_to_active(struct i915_vma *vma,
2193                              struct intel_engine_cs *ring)
2194 {
2195         list_move_tail(&vma->mm_list, &vma->vm->active_list);
2196         return i915_gem_object_move_to_active(vma->obj, ring);
2197 }
2198
2199 static void
2200 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
2201 {
2202         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2203         struct i915_address_space *vm;
2204         struct i915_vma *vma;
2205
2206         BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
2207         BUG_ON(!obj->active);
2208
2209         list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
2210                 vma = i915_gem_obj_to_vma(obj, vm);
2211                 if (vma && !list_empty(&vma->mm_list))
2212                         list_move_tail(&vma->mm_list, &vm->inactive_list);
2213         }
2214
2215         intel_fb_obj_flush(obj, true);
2216
2217         list_del_init(&obj->ring_list);
2218         obj->ring = NULL;
2219
2220         obj->last_read_seqno = 0;
2221         obj->last_write_seqno = 0;
2222         obj->base.write_domain = 0;
2223
2224         obj->last_fenced_seqno = 0;
2225
2226         obj->active = 0;
2227         drm_gem_object_unreference(&obj->base);
2228
2229         WARN_ON(i915_verify_lists(dev));
2230 }
2231
2232 static void
2233 i915_gem_object_retire(struct drm_i915_gem_object *obj)
2234 {
2235         struct intel_engine_cs *ring = obj->ring;
2236
2237         if (ring == NULL)
2238                 return;
2239
2240         if (i915_seqno_passed(ring->get_seqno(ring, true),
2241                               obj->last_read_seqno))
2242                 i915_gem_object_move_to_inactive(obj);
2243 }
2244
2245 static int
2246 i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
2247 {
2248         struct drm_i915_private *dev_priv = dev->dev_private;
2249         struct intel_engine_cs *ring;
2250         int ret, i, j;
2251
2252         /* Carefully retire all requests without writing to the rings */
2253         for_each_ring(ring, dev_priv, i) {
2254                 ret = intel_ring_idle(ring);
2255                 if (ret)
2256                         return ret;
2257         }
2258         i915_gem_retire_requests(dev);
2259
2260         /* Finally reset hw state */
2261         for_each_ring(ring, dev_priv, i) {
2262                 intel_ring_init_seqno(ring, seqno);
2263
2264                 for (j = 0; j < ARRAY_SIZE(ring->semaphore.sync_seqno); j++)
2265                         ring->semaphore.sync_seqno[j] = 0;
2266         }
2267
2268         return 0;
2269 }
2270
2271 int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
2272 {
2273         struct drm_i915_private *dev_priv = dev->dev_private;
2274         int ret;
2275
2276         if (seqno == 0)
2277                 return -EINVAL;
2278
2279         /* HWS page needs to be set less than what we
2280          * will inject to ring
2281          */
2282         ret = i915_gem_init_seqno(dev, seqno - 1);
2283         if (ret)
2284                 return ret;
2285
2286         /* Carefully set the last_seqno value so that wrap
2287          * detection still works
2288          */
2289         dev_priv->next_seqno = seqno;
2290         dev_priv->last_seqno = seqno - 1;
2291         if (dev_priv->last_seqno == 0)
2292                 dev_priv->last_seqno--;
2293
2294         return 0;
2295 }
2296
2297 int
2298 i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
2299 {
2300         struct drm_i915_private *dev_priv = dev->dev_private;
2301
2302         /* reserve 0 for non-seqno */
2303         if (dev_priv->next_seqno == 0) {
2304                 int ret = i915_gem_init_seqno(dev, 0);
2305                 if (ret)
2306                         return ret;
2307
2308                 dev_priv->next_seqno = 1;
2309         }
2310
2311         *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
2312         return 0;
2313 }
2314
2315 int __i915_add_request(struct intel_engine_cs *ring,
2316                        struct drm_file *file,
2317                        struct drm_i915_gem_object *obj,
2318                        u32 *out_seqno)
2319 {
2320         struct drm_i915_private *dev_priv = ring->dev->dev_private;
2321         struct drm_i915_gem_request *request;
2322         struct intel_ringbuffer *ringbuf;
2323         u32 request_ring_position, request_start;
2324         int ret;
2325
2326         request = ring->preallocated_lazy_request;
2327         if (WARN_ON(request == NULL))
2328                 return -ENOMEM;
2329
2330         if (i915.enable_execlists) {
2331                 struct intel_context *ctx = request->ctx;
2332                 ringbuf = ctx->engine[ring->id].ringbuf;
2333         } else
2334                 ringbuf = ring->buffer;
2335
2336         request_start = intel_ring_get_tail(ringbuf);
2337         /*
2338          * Emit any outstanding flushes - execbuf can fail to emit the flush
2339          * after having emitted the batchbuffer command. Hence we need to fix
2340          * things up similar to emitting the lazy request. The difference here
2341          * is that the flush _must_ happen before the next request, no matter
2342          * what.
2343          */
2344         if (i915.enable_execlists) {
2345                 ret = logical_ring_flush_all_caches(ringbuf);
2346                 if (ret)
2347                         return ret;
2348         } else {
2349                 ret = intel_ring_flush_all_caches(ring);
2350                 if (ret)
2351                         return ret;
2352         }
2353
2354         /* Record the position of the start of the request so that
2355          * should we detect the updated seqno part-way through the
2356          * GPU processing the request, we never over-estimate the
2357          * position of the head.
2358          */
2359         request_ring_position = intel_ring_get_tail(ringbuf);
2360
2361         if (i915.enable_execlists) {
2362                 ret = ring->emit_request(ringbuf);
2363                 if (ret)
2364                         return ret;
2365         } else {
2366                 ret = ring->add_request(ring);
2367                 if (ret)
2368                         return ret;
2369         }
2370
2371         request->seqno = intel_ring_get_seqno(ring);
2372         request->ring = ring;
2373         request->head = request_start;
2374         request->tail = request_ring_position;
2375
2376         /* Whilst this request exists, batch_obj will be on the
2377          * active_list, and so will hold the active reference. Only when this
2378          * request is retired will the the batch_obj be moved onto the
2379          * inactive_list and lose its active reference. Hence we do not need
2380          * to explicitly hold another reference here.
2381          */
2382         request->batch_obj = obj;
2383
2384         if (!i915.enable_execlists) {
2385                 /* Hold a reference to the current context so that we can inspect
2386                  * it later in case a hangcheck error event fires.
2387                  */
2388                 request->ctx = ring->last_context;
2389                 if (request->ctx)
2390                         i915_gem_context_reference(request->ctx);
2391         }
2392
2393         request->emitted_jiffies = jiffies;
2394         list_add_tail(&request->list, &ring->request_list);
2395         request->file_priv = NULL;
2396
2397         if (file) {
2398                 struct drm_i915_file_private *file_priv = file->driver_priv;
2399
2400                 spin_lock(&file_priv->mm.lock);
2401                 request->file_priv = file_priv;
2402                 list_add_tail(&request->client_list,
2403                               &file_priv->mm.request_list);
2404                 spin_unlock(&file_priv->mm.lock);
2405         }
2406
2407         trace_i915_gem_request_add(ring, request->seqno);
2408         ring->outstanding_lazy_seqno = 0;
2409         ring->preallocated_lazy_request = NULL;
2410
2411         if (!dev_priv->ums.mm_suspended) {
2412                 i915_queue_hangcheck(ring->dev);
2413
2414                 cancel_delayed_work_sync(&dev_priv->mm.idle_work);
2415                 queue_delayed_work(dev_priv->wq,
2416                                    &dev_priv->mm.retire_work,
2417                                    round_jiffies_up_relative(HZ));
2418                 intel_mark_busy(dev_priv->dev);
2419         }
2420
2421         if (out_seqno)
2422                 *out_seqno = request->seqno;
2423         return 0;
2424 }
2425
2426 static inline void
2427 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
2428 {
2429         struct drm_i915_file_private *file_priv = request->file_priv;
2430
2431         if (!file_priv)
2432                 return;
2433
2434         spin_lock(&file_priv->mm.lock);
2435         list_del(&request->client_list);
2436         request->file_priv = NULL;
2437         spin_unlock(&file_priv->mm.lock);
2438 }
2439
2440 static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
2441                                    const struct intel_context *ctx)
2442 {
2443         unsigned long elapsed;
2444
2445         elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
2446
2447         if (ctx->hang_stats.banned)
2448                 return true;
2449
2450         if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
2451                 if (!i915_gem_context_is_default(ctx)) {
2452                         DRM_DEBUG("context hanging too fast, banning!\n");
2453                         return true;
2454                 } else if (i915_stop_ring_allow_ban(dev_priv)) {
2455                         if (i915_stop_ring_allow_warn(dev_priv))
2456                                 DRM_ERROR("gpu hanging too fast, banning!\n");
2457                         return true;
2458                 }
2459         }
2460
2461         return false;
2462 }
2463
2464 static void i915_set_reset_status(struct drm_i915_private *dev_priv,
2465                                   struct intel_context *ctx,
2466                                   const bool guilty)
2467 {
2468         struct i915_ctx_hang_stats *hs;
2469
2470         if (WARN_ON(!ctx))
2471                 return;
2472
2473         hs = &ctx->hang_stats;
2474
2475         if (guilty) {
2476                 hs->banned = i915_context_is_banned(dev_priv, ctx);
2477                 hs->batch_active++;
2478                 hs->guilty_ts = get_seconds();
2479         } else {
2480                 hs->batch_pending++;
2481         }
2482 }
2483
2484 static void i915_gem_free_request(struct drm_i915_gem_request *request)
2485 {
2486         list_del(&request->list);
2487         i915_gem_request_remove_from_client(request);
2488
2489         if (request->ctx)
2490                 i915_gem_context_unreference(request->ctx);
2491
2492         kfree(request);
2493 }
2494
2495 struct drm_i915_gem_request *
2496 i915_gem_find_active_request(struct intel_engine_cs *ring)
2497 {
2498         struct drm_i915_gem_request *request;
2499         u32 completed_seqno;
2500
2501         completed_seqno = ring->get_seqno(ring, false);
2502
2503         list_for_each_entry(request, &ring->request_list, list) {
2504                 if (i915_seqno_passed(completed_seqno, request->seqno))
2505                         continue;
2506
2507                 return request;
2508         }
2509
2510         return NULL;
2511 }
2512
2513 static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
2514                                        struct intel_engine_cs *ring)
2515 {
2516         struct drm_i915_gem_request *request;
2517         bool ring_hung;
2518
2519         request = i915_gem_find_active_request(ring);
2520
2521         if (request == NULL)
2522                 return;
2523
2524         ring_hung = ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
2525
2526         i915_set_reset_status(dev_priv, request->ctx, ring_hung);
2527
2528         list_for_each_entry_continue(request, &ring->request_list, list)
2529                 i915_set_reset_status(dev_priv, request->ctx, false);
2530 }
2531
2532 static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
2533                                         struct intel_engine_cs *ring)
2534 {
2535         while (!list_empty(&ring->active_list)) {
2536                 struct drm_i915_gem_object *obj;
2537
2538                 obj = list_first_entry(&ring->active_list,
2539                                        struct drm_i915_gem_object,
2540                                        ring_list);
2541
2542                 i915_gem_object_move_to_inactive(obj);
2543         }
2544
2545         /*
2546          * We must free the requests after all the corresponding objects have
2547          * been moved off active lists. Which is the same order as the normal
2548          * retire_requests function does. This is important if object hold
2549          * implicit references on things like e.g. ppgtt address spaces through
2550          * the request.
2551          */
2552         while (!list_empty(&ring->request_list)) {
2553                 struct drm_i915_gem_request *request;
2554
2555                 request = list_first_entry(&ring->request_list,
2556                                            struct drm_i915_gem_request,
2557                                            list);
2558
2559                 i915_gem_free_request(request);
2560         }
2561
2562         while (!list_empty(&ring->execlist_queue)) {
2563                 struct intel_ctx_submit_request *submit_req;
2564
2565                 submit_req = list_first_entry(&ring->execlist_queue,
2566                                 struct intel_ctx_submit_request,
2567                                 execlist_link);
2568                 list_del(&submit_req->execlist_link);
2569                 intel_runtime_pm_put(dev_priv);
2570                 i915_gem_context_unreference(submit_req->ctx);
2571                 kfree(submit_req);
2572         }
2573
2574         /* These may not have been flush before the reset, do so now */
2575         kfree(ring->preallocated_lazy_request);
2576         ring->preallocated_lazy_request = NULL;
2577         ring->outstanding_lazy_seqno = 0;
2578 }
2579
2580 void i915_gem_restore_fences(struct drm_device *dev)
2581 {
2582         struct drm_i915_private *dev_priv = dev->dev_private;
2583         int i;
2584
2585         for (i = 0; i < dev_priv->num_fence_regs; i++) {
2586                 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
2587
2588                 /*
2589                  * Commit delayed tiling changes if we have an object still
2590                  * attached to the fence, otherwise just clear the fence.
2591                  */
2592                 if (reg->obj) {
2593                         i915_gem_object_update_fence(reg->obj, reg,
2594                                                      reg->obj->tiling_mode);
2595                 } else {
2596                         i915_gem_write_fence(dev, i, NULL);
2597                 }
2598         }
2599 }
2600
2601 void i915_gem_reset(struct drm_device *dev)
2602 {
2603         struct drm_i915_private *dev_priv = dev->dev_private;
2604         struct intel_engine_cs *ring;
2605         int i;
2606
2607         /*
2608          * Before we free the objects from the requests, we need to inspect
2609          * them for finding the guilty party. As the requests only borrow
2610          * their reference to the objects, the inspection must be done first.
2611          */
2612         for_each_ring(ring, dev_priv, i)
2613                 i915_gem_reset_ring_status(dev_priv, ring);
2614
2615         for_each_ring(ring, dev_priv, i)
2616                 i915_gem_reset_ring_cleanup(dev_priv, ring);
2617
2618         i915_gem_context_reset(dev);
2619
2620         i915_gem_restore_fences(dev);
2621 }
2622
2623 /**
2624  * This function clears the request list as sequence numbers are passed.
2625  */
2626 void
2627 i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
2628 {
2629         uint32_t seqno;
2630
2631         if (list_empty(&ring->request_list))
2632                 return;
2633
2634         WARN_ON(i915_verify_lists(ring->dev));
2635
2636         seqno = ring->get_seqno(ring, true);
2637
2638         /* Move any buffers on the active list that are no longer referenced
2639          * by the ringbuffer to the flushing/inactive lists as appropriate,
2640          * before we free the context associated with the requests.
2641          */
2642         while (!list_empty(&ring->active_list)) {
2643                 struct drm_i915_gem_object *obj;
2644
2645                 obj = list_first_entry(&ring->active_list,
2646                                       struct drm_i915_gem_object,
2647                                       ring_list);
2648
2649                 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
2650                         break;
2651
2652                 i915_gem_object_move_to_inactive(obj);
2653         }
2654
2655
2656         while (!list_empty(&ring->request_list)) {
2657                 struct drm_i915_gem_request *request;
2658                 struct intel_ringbuffer *ringbuf;
2659
2660                 request = list_first_entry(&ring->request_list,
2661                                            struct drm_i915_gem_request,
2662                                            list);
2663
2664                 if (!i915_seqno_passed(seqno, request->seqno))
2665                         break;
2666
2667                 trace_i915_gem_request_retire(ring, request->seqno);
2668
2669                 /* This is one of the few common intersection points
2670                  * between legacy ringbuffer submission and execlists:
2671                  * we need to tell them apart in order to find the correct
2672                  * ringbuffer to which the request belongs to.
2673                  */
2674                 if (i915.enable_execlists) {
2675                         struct intel_context *ctx = request->ctx;
2676                         ringbuf = ctx->engine[ring->id].ringbuf;
2677                 } else
2678                         ringbuf = ring->buffer;
2679
2680                 /* We know the GPU must have read the request to have
2681                  * sent us the seqno + interrupt, so use the position
2682                  * of tail of the request to update the last known position
2683                  * of the GPU head.
2684                  */
2685                 ringbuf->last_retired_head = request->tail;
2686
2687                 i915_gem_free_request(request);
2688         }
2689
2690         if (unlikely(ring->trace_irq_seqno &&
2691                      i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
2692                 ring->irq_put(ring);
2693                 ring->trace_irq_seqno = 0;
2694         }
2695
2696         WARN_ON(i915_verify_lists(ring->dev));
2697 }
2698
2699 bool
2700 i915_gem_retire_requests(struct drm_device *dev)
2701 {
2702         struct drm_i915_private *dev_priv = dev->dev_private;
2703         struct intel_engine_cs *ring;
2704         bool idle = true;
2705         int i;
2706
2707         for_each_ring(ring, dev_priv, i) {
2708                 i915_gem_retire_requests_ring(ring);
2709                 idle &= list_empty(&ring->request_list);
2710         }
2711
2712         if (idle)
2713                 mod_delayed_work(dev_priv->wq,
2714                                    &dev_priv->mm.idle_work,
2715                                    msecs_to_jiffies(100));
2716
2717         return idle;
2718 }
2719
2720 static void
2721 i915_gem_retire_work_handler(struct work_struct *work)
2722 {
2723         struct drm_i915_private *dev_priv =
2724                 container_of(work, typeof(*dev_priv), mm.retire_work.work);
2725         struct drm_device *dev = dev_priv->dev;
2726         bool idle;
2727
2728         /* Come back later if the device is busy... */
2729         idle = false;
2730         if (mutex_trylock(&dev->struct_mutex)) {
2731                 idle = i915_gem_retire_requests(dev);
2732                 mutex_unlock(&dev->struct_mutex);
2733         }
2734         if (!idle)
2735                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2736                                    round_jiffies_up_relative(HZ));
2737 }
2738
2739 static void
2740 i915_gem_idle_work_handler(struct work_struct *work)
2741 {
2742         struct drm_i915_private *dev_priv =
2743                 container_of(work, typeof(*dev_priv), mm.idle_work.work);
2744
2745         intel_mark_idle(dev_priv->dev);
2746 }
2747
2748 /**
2749  * Ensures that an object will eventually get non-busy by flushing any required
2750  * write domains, emitting any outstanding lazy request and retiring and
2751  * completed requests.
2752  */
2753 static int
2754 i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
2755 {
2756         int ret;
2757
2758         if (obj->active) {
2759                 ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
2760                 if (ret)
2761                         return ret;
2762
2763                 i915_gem_retire_requests_ring(obj->ring);
2764         }
2765
2766         return 0;
2767 }
2768
2769 /**
2770  * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2771  * @DRM_IOCTL_ARGS: standard ioctl arguments
2772  *
2773  * Returns 0 if successful, else an error is returned with the remaining time in
2774  * the timeout parameter.
2775  *  -ETIME: object is still busy after timeout
2776  *  -ERESTARTSYS: signal interrupted the wait
2777  *  -ENONENT: object doesn't exist
2778  * Also possible, but rare:
2779  *  -EAGAIN: GPU wedged
2780  *  -ENOMEM: damn
2781  *  -ENODEV: Internal IRQ fail
2782  *  -E?: The add request failed
2783  *
2784  * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2785  * non-zero timeout parameter the wait ioctl will wait for the given number of
2786  * nanoseconds on an object becoming unbusy. Since the wait itself does so
2787  * without holding struct_mutex the object may become re-busied before this
2788  * function completes. A similar but shorter * race condition exists in the busy
2789  * ioctl
2790  */
2791 int
2792 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2793 {
2794         struct drm_i915_private *dev_priv = dev->dev_private;
2795         struct drm_i915_gem_wait *args = data;
2796         struct drm_i915_gem_object *obj;
2797         struct intel_engine_cs *ring = NULL;
2798         unsigned reset_counter;
2799         u32 seqno = 0;
2800         int ret = 0;
2801
2802         ret = i915_mutex_lock_interruptible(dev);
2803         if (ret)
2804                 return ret;
2805
2806         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
2807         if (&obj->base == NULL) {
2808                 mutex_unlock(&dev->struct_mutex);
2809                 return -ENOENT;
2810         }
2811
2812         /* Need to make sure the object gets inactive eventually. */
2813         ret = i915_gem_object_flush_active(obj);
2814         if (ret)
2815                 goto out;
2816
2817         if (obj->active) {
2818                 seqno = obj->last_read_seqno;
2819                 ring = obj->ring;
2820         }
2821
2822         if (seqno == 0)
2823                  goto out;
2824
2825         /* Do this after OLR check to make sure we make forward progress polling
2826          * on this IOCTL with a timeout <=0 (like busy ioctl)
2827          */
2828         if (args->timeout_ns <= 0) {
2829                 ret = -ETIME;
2830                 goto out;
2831         }
2832
2833         drm_gem_object_unreference(&obj->base);
2834         reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
2835         mutex_unlock(&dev->struct_mutex);
2836
2837         return __wait_seqno(ring, seqno, reset_counter, true, &args->timeout_ns,
2838                             file->driver_priv);
2839
2840 out:
2841         drm_gem_object_unreference(&obj->base);
2842         mutex_unlock(&dev->struct_mutex);
2843         return ret;
2844 }
2845
2846 /**
2847  * i915_gem_object_sync - sync an object to a ring.
2848  *
2849  * @obj: object which may be in use on another ring.
2850  * @to: ring we wish to use the object on. May be NULL.
2851  *
2852  * This code is meant to abstract object synchronization with the GPU.
2853  * Calling with NULL implies synchronizing the object with the CPU
2854  * rather than a particular GPU ring.
2855  *
2856  * Returns 0 if successful, else propagates up the lower layer error.
2857  */
2858 int
2859 i915_gem_object_sync(struct drm_i915_gem_object *obj,
2860                      struct intel_engine_cs *to)
2861 {
2862         struct intel_engine_cs *from = obj->ring;
2863         u32 seqno;
2864         int ret, idx;
2865
2866         if (from == NULL || to == from)
2867                 return 0;
2868
2869         if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
2870                 return i915_gem_object_wait_rendering(obj, false);
2871
2872         idx = intel_ring_sync_index(from, to);
2873
2874         seqno = obj->last_read_seqno;
2875         /* Optimization: Avoid semaphore sync when we are sure we already
2876          * waited for an object with higher seqno */
2877         if (seqno <= from->semaphore.sync_seqno[idx])
2878                 return 0;
2879
2880         ret = i915_gem_check_olr(obj->ring, seqno);
2881         if (ret)
2882                 return ret;
2883
2884         trace_i915_gem_ring_sync_to(from, to, seqno);
2885         ret = to->semaphore.sync_to(to, from, seqno);
2886         if (!ret)
2887                 /* We use last_read_seqno because sync_to()
2888                  * might have just caused seqno wrap under
2889                  * the radar.
2890                  */
2891                 from->semaphore.sync_seqno[idx] = obj->last_read_seqno;
2892
2893         return ret;
2894 }
2895
2896 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2897 {
2898         u32 old_write_domain, old_read_domains;
2899
2900         /* Force a pagefault for domain tracking on next user access */
2901         i915_gem_release_mmap(obj);
2902
2903         if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2904                 return;
2905
2906         /* Wait for any direct GTT access to complete */
2907         mb();
2908
2909         old_read_domains = obj->base.read_domains;
2910         old_write_domain = obj->base.write_domain;
2911
2912         obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2913         obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2914
2915         trace_i915_gem_object_change_domain(obj,
2916                                             old_read_domains,
2917                                             old_write_domain);
2918 }
2919
2920 int i915_vma_unbind(struct i915_vma *vma)
2921 {
2922         struct drm_i915_gem_object *obj = vma->obj;
2923         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2924         int ret;
2925
2926         if (list_empty(&vma->vma_link))
2927                 return 0;
2928
2929         if (!drm_mm_node_allocated(&vma->node)) {
2930                 i915_gem_vma_destroy(vma);
2931                 return 0;
2932         }
2933
2934         if (vma->pin_count)
2935                 return -EBUSY;
2936
2937         BUG_ON(obj->pages == NULL);
2938
2939         ret = i915_gem_object_finish_gpu(obj);
2940         if (ret)
2941                 return ret;
2942         /* Continue on if we fail due to EIO, the GPU is hung so we
2943          * should be safe and we need to cleanup or else we might
2944          * cause memory corruption through use-after-free.
2945          */
2946
2947         if (i915_is_ggtt(vma->vm)) {
2948                 i915_gem_object_finish_gtt(obj);
2949
2950                 /* release the fence reg _after_ flushing */
2951                 ret = i915_gem_object_put_fence(obj);
2952                 if (ret)
2953                         return ret;
2954         }
2955
2956         trace_i915_vma_unbind(vma);
2957
2958         vma->unbind_vma(vma);
2959
2960         list_del_init(&vma->mm_list);
2961         if (i915_is_ggtt(vma->vm))
2962                 obj->map_and_fenceable = false;
2963
2964         drm_mm_remove_node(&vma->node);
2965         i915_gem_vma_destroy(vma);
2966
2967         /* Since the unbound list is global, only move to that list if
2968          * no more VMAs exist. */
2969         if (list_empty(&obj->vma_list)) {
2970                 i915_gem_gtt_finish_object(obj);
2971                 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2972         }
2973
2974         /* And finally now the object is completely decoupled from this vma,
2975          * we can drop its hold on the backing storage and allow it to be
2976          * reaped by the shrinker.
2977          */
2978         i915_gem_object_unpin_pages(obj);
2979
2980         return 0;
2981 }
2982
2983 int i915_gpu_idle(struct drm_device *dev)
2984 {
2985         struct drm_i915_private *dev_priv = dev->dev_private;
2986         struct intel_engine_cs *ring;
2987         int ret, i;
2988
2989         /* Flush everything onto the inactive list. */
2990         for_each_ring(ring, dev_priv, i) {
2991                 if (!i915.enable_execlists) {
2992                         ret = i915_switch_context(ring, ring->default_context);
2993                         if (ret)
2994                                 return ret;
2995                 }
2996
2997                 ret = intel_ring_idle(ring);
2998                 if (ret)
2999                         return ret;
3000         }
3001
3002         return 0;
3003 }
3004
3005 static void i965_write_fence_reg(struct drm_device *dev, int reg,
3006                                  struct drm_i915_gem_object *obj)
3007 {
3008         struct drm_i915_private *dev_priv = dev->dev_private;
3009         int fence_reg;
3010         int fence_pitch_shift;
3011
3012         if (INTEL_INFO(dev)->gen >= 6) {
3013                 fence_reg = FENCE_REG_SANDYBRIDGE_0;
3014                 fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
3015         } else {
3016                 fence_reg = FENCE_REG_965_0;
3017                 fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
3018         }
3019
3020         fence_reg += reg * 8;
3021
3022         /* To w/a incoherency with non-atomic 64-bit register updates,
3023          * we split the 64-bit update into two 32-bit writes. In order
3024          * for a partial fence not to be evaluated between writes, we
3025          * precede the update with write to turn off the fence register,
3026          * and only enable the fence as the last step.
3027          *
3028          * For extra levels of paranoia, we make sure each step lands
3029          * before applying the next step.
3030          */
3031         I915_WRITE(fence_reg, 0);
3032         POSTING_READ(fence_reg);
3033
3034         if (obj) {
3035                 u32 size = i915_gem_obj_ggtt_size(obj);
3036                 uint64_t val;
3037
3038                 val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
3039                                  0xfffff000) << 32;
3040                 val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
3041                 val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
3042                 if (obj->tiling_mode == I915_TILING_Y)
3043                         val |= 1 << I965_FENCE_TILING_Y_SHIFT;
3044                 val |= I965_FENCE_REG_VALID;
3045
3046                 I915_WRITE(fence_reg + 4, val >> 32);
3047                 POSTING_READ(fence_reg + 4);
3048
3049                 I915_WRITE(fence_reg + 0, val);
3050                 POSTING_READ(fence_reg);
3051         } else {
3052                 I915_WRITE(fence_reg + 4, 0);
3053                 POSTING_READ(fence_reg + 4);
3054         }
3055 }
3056
3057 static void i915_write_fence_reg(struct drm_device *dev, int reg,
3058                                  struct drm_i915_gem_object *obj)
3059 {
3060         struct drm_i915_private *dev_priv = dev->dev_private;
3061         u32 val;
3062
3063         if (obj) {
3064                 u32 size = i915_gem_obj_ggtt_size(obj);
3065                 int pitch_val;
3066                 int tile_width;
3067
3068                 WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
3069                      (size & -size) != size ||
3070                      (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
3071                      "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
3072                      i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
3073
3074                 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
3075                         tile_width = 128;
3076                 else
3077                         tile_width = 512;
3078
3079                 /* Note: pitch better be a power of two tile widths */
3080                 pitch_val = obj->stride / tile_width;
3081                 pitch_val = ffs(pitch_val) - 1;
3082
3083                 val = i915_gem_obj_ggtt_offset(obj);
3084                 if (obj->tiling_mode == I915_TILING_Y)
3085                         val |= 1 << I830_FENCE_TILING_Y_SHIFT;
3086                 val |= I915_FENCE_SIZE_BITS(size);
3087                 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
3088                 val |= I830_FENCE_REG_VALID;
3089         } else
3090                 val = 0;
3091
3092         if (reg < 8)
3093                 reg = FENCE_REG_830_0 + reg * 4;
3094         else
3095                 reg = FENCE_REG_945_8 + (reg - 8) * 4;
3096
3097         I915_WRITE(reg, val);
3098         POSTING_READ(reg);
3099 }
3100
3101 static void i830_write_fence_reg(struct drm_device *dev, int reg,
3102                                 struct drm_i915_gem_object *obj)
3103 {
3104         struct drm_i915_private *dev_priv = dev->dev_private;
3105         uint32_t val;
3106
3107         if (obj) {
3108                 u32 size = i915_gem_obj_ggtt_size(obj);
3109                 uint32_t pitch_val;
3110
3111                 WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
3112                      (size & -size) != size ||
3113                      (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
3114                      "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
3115                      i915_gem_obj_ggtt_offset(obj), size);
3116
3117                 pitch_val = obj->stride / 128;
3118                 pitch_val = ffs(pitch_val) - 1;
3119
3120                 val = i915_gem_obj_ggtt_offset(obj);
3121                 if (obj->tiling_mode == I915_TILING_Y)
3122                         val |= 1 << I830_FENCE_TILING_Y_SHIFT;
3123                 val |= I830_FENCE_SIZE_BITS(size);
3124                 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
3125                 val |= I830_FENCE_REG_VALID;
3126         } else
3127                 val = 0;
3128
3129         I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
3130         POSTING_READ(FENCE_REG_830_0 + reg * 4);
3131 }
3132
3133 inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
3134 {
3135         return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
3136 }
3137
3138 static void i915_gem_write_fence(struct drm_device *dev, int reg,
3139                                  struct drm_i915_gem_object *obj)
3140 {
3141         struct drm_i915_private *dev_priv = dev->dev_private;
3142
3143         /* Ensure that all CPU reads are completed before installing a fence
3144          * and all writes before removing the fence.
3145          */
3146         if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
3147                 mb();
3148
3149         WARN(obj && (!obj->stride || !obj->tiling_mode),
3150              "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
3151              obj->stride, obj->tiling_mode);
3152
3153         switch (INTEL_INFO(dev)->gen) {
3154         case 8:
3155         case 7:
3156         case 6:
3157         case 5:
3158         case 4: i965_write_fence_reg(dev, reg, obj); break;
3159         case 3: i915_write_fence_reg(dev, reg, obj); break;
3160         case 2: i830_write_fence_reg(dev, reg, obj); break;
3161         default: BUG();
3162         }
3163
3164         /* And similarly be paranoid that no direct access to this region
3165          * is reordered to before the fence is installed.
3166          */
3167         if (i915_gem_object_needs_mb(obj))
3168                 mb();
3169 }
3170
3171 static inline int fence_number(struct drm_i915_private *dev_priv,
3172                                struct drm_i915_fence_reg *fence)
3173 {
3174         return fence - dev_priv->fence_regs;
3175 }
3176
3177 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
3178                                          struct drm_i915_fence_reg *fence,
3179                                          bool enable)
3180 {
3181         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3182         int reg = fence_number(dev_priv, fence);
3183
3184         i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
3185
3186         if (enable) {
3187                 obj->fence_reg = reg;
3188                 fence->obj = obj;
3189                 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
3190         } else {
3191                 obj->fence_reg = I915_FENCE_REG_NONE;
3192                 fence->obj = NULL;
3193                 list_del_init(&fence->lru_list);
3194         }
3195         obj->fence_dirty = false;
3196 }
3197
3198 static int
3199 i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
3200 {
3201         if (obj->last_fenced_seqno) {
3202                 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
3203                 if (ret)
3204                         return ret;
3205
3206                 obj->last_fenced_seqno = 0;
3207         }
3208
3209         return 0;
3210 }
3211
3212 int
3213 i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
3214 {
3215         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3216         struct drm_i915_fence_reg *fence;
3217         int ret;
3218
3219         ret = i915_gem_object_wait_fence(obj);
3220         if (ret)
3221                 return ret;
3222
3223         if (obj->fence_reg == I915_FENCE_REG_NONE)
3224                 return 0;
3225
3226         fence = &dev_priv->fence_regs[obj->fence_reg];
3227
3228         if (WARN_ON(fence->pin_count))
3229                 return -EBUSY;
3230
3231         i915_gem_object_fence_lost(obj);
3232         i915_gem_object_update_fence(obj, fence, false);
3233
3234         return 0;
3235 }
3236
3237 static struct drm_i915_fence_reg *
3238 i915_find_fence_reg(struct drm_device *dev)
3239 {
3240         struct drm_i915_private *dev_priv = dev->dev_private;
3241         struct drm_i915_fence_reg *reg, *avail;
3242         int i;
3243
3244         /* First try to find a free reg */
3245         avail = NULL;
3246         for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
3247                 reg = &dev_priv->fence_regs[i];
3248                 if (!reg->obj)
3249                         return reg;
3250
3251                 if (!reg->pin_count)
3252                         avail = reg;
3253         }
3254
3255         if (avail == NULL)
3256                 goto deadlock;
3257
3258         /* None available, try to steal one or wait for a user to finish */
3259         list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
3260                 if (reg->pin_count)
3261                         continue;
3262
3263                 return reg;
3264         }
3265
3266 deadlock:
3267         /* Wait for completion of pending flips which consume fences */
3268         if (intel_has_pending_fb_unpin(dev))
3269                 return ERR_PTR(-EAGAIN);
3270
3271         return ERR_PTR(-EDEADLK);
3272 }
3273
3274 /**
3275  * i915_gem_object_get_fence - set up fencing for an object
3276  * @obj: object to map through a fence reg
3277  *
3278  * When mapping objects through the GTT, userspace wants to be able to write
3279  * to them without having to worry about swizzling if the object is tiled.
3280  * This function walks the fence regs looking for a free one for @obj,
3281  * stealing one if it can't find any.
3282  *
3283  * It then sets up the reg based on the object's properties: address, pitch
3284  * and tiling format.
3285  *
3286  * For an untiled surface, this removes any existing fence.
3287  */
3288 int
3289 i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
3290 {
3291         struct drm_device *dev = obj->base.dev;
3292         struct drm_i915_private *dev_priv = dev->dev_private;
3293         bool enable = obj->tiling_mode != I915_TILING_NONE;
3294         struct drm_i915_fence_reg *reg;
3295         int ret;
3296
3297         /* Have we updated the tiling parameters upon the object and so
3298          * will need to serialise the write to the associated fence register?
3299          */
3300         if (obj->fence_dirty) {
3301                 ret = i915_gem_object_wait_fence(obj);
3302                 if (ret)
3303                         return ret;
3304         }
3305
3306         /* Just update our place in the LRU if our fence is getting reused. */
3307         if (obj->fence_reg != I915_FENCE_REG_NONE) {
3308                 reg = &dev_priv->fence_regs[obj->fence_reg];
3309                 if (!obj->fence_dirty) {
3310                         list_move_tail(&reg->lru_list,
3311                                        &dev_priv->mm.fence_list);
3312                         return 0;
3313                 }
3314         } else if (enable) {
3315                 if (WARN_ON(!obj->map_and_fenceable))
3316                         return -EINVAL;
3317
3318                 reg = i915_find_fence_reg(dev);
3319                 if (IS_ERR(reg))
3320                         return PTR_ERR(reg);
3321
3322                 if (reg->obj) {
3323                         struct drm_i915_gem_object *old = reg->obj;
3324
3325                         ret = i915_gem_object_wait_fence(old);
3326                         if (ret)
3327                                 return ret;
3328
3329                         i915_gem_object_fence_lost(old);
3330                 }
3331         } else
3332                 return 0;
3333
3334         i915_gem_object_update_fence(obj, reg, enable);
3335
3336         return 0;
3337 }
3338
3339 static bool i915_gem_valid_gtt_space(struct drm_device *dev,
3340                                      struct drm_mm_node *gtt_space,
3341                                      unsigned long cache_level)
3342 {
3343         struct drm_mm_node *other;
3344
3345         /* On non-LLC machines we have to be careful when putting differing
3346          * types of snoopable memory together to avoid the prefetcher
3347          * crossing memory domains and dying.
3348          */
3349         if (HAS_LLC(dev))
3350                 return true;
3351
3352         if (!drm_mm_node_allocated(gtt_space))
3353                 return true;
3354
3355         if (list_empty(&gtt_space->node_list))
3356                 return true;
3357
3358         other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
3359         if (other->allocated && !other->hole_follows && other->color != cache_level)
3360                 return false;
3361
3362         other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
3363         if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
3364                 return false;
3365
3366         return true;
3367 }
3368
3369 static void i915_gem_verify_gtt(struct drm_device *dev)
3370 {
3371 #if WATCH_GTT
3372         struct drm_i915_private *dev_priv = dev->dev_private;
3373         struct drm_i915_gem_object *obj;
3374         int err = 0;
3375
3376         list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) {
3377                 if (obj->gtt_space == NULL) {
3378                         printk(KERN_ERR "object found on GTT list with no space reserved\n");
3379                         err++;
3380                         continue;
3381                 }
3382
3383                 if (obj->cache_level != obj->gtt_space->color) {
3384                         printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
3385                                i915_gem_obj_ggtt_offset(obj),
3386                                i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
3387                                obj->cache_level,
3388                                obj->gtt_space->color);
3389                         err++;
3390                         continue;
3391                 }
3392
3393                 if (!i915_gem_valid_gtt_space(dev,
3394                                               obj->gtt_space,
3395                                               obj->cache_level)) {
3396                         printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
3397                                i915_gem_obj_ggtt_offset(obj),
3398                                i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
3399                                obj->cache_level);
3400                         err++;
3401                         continue;
3402                 }
3403         }
3404
3405         WARN_ON(err);
3406 #endif
3407 }
3408
3409 /**
3410  * Finds free space in the GTT aperture and binds the object there.
3411  */
3412 static struct i915_vma *
3413 i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3414                            struct i915_address_space *vm,
3415                            unsigned alignment,
3416                            uint64_t flags)
3417 {
3418         struct drm_device *dev = obj->base.dev;
3419         struct drm_i915_private *dev_priv = dev->dev_private;
3420         u32 size, fence_size, fence_alignment, unfenced_alignment;
3421         unsigned long start =
3422                 flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
3423         unsigned long end =
3424                 flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
3425         struct i915_vma *vma;
3426         int ret;
3427
3428         fence_size = i915_gem_get_gtt_size(dev,
3429                                            obj->base.size,
3430                                            obj->tiling_mode);
3431         fence_alignment = i915_gem_get_gtt_alignment(dev,
3432                                                      obj->base.size,
3433                                                      obj->tiling_mode, true);
3434         unfenced_alignment =
3435                 i915_gem_get_gtt_alignment(dev,
3436                                            obj->base.size,
3437                                            obj->tiling_mode, false);
3438
3439         if (alignment == 0)
3440                 alignment = flags & PIN_MAPPABLE ? fence_alignment :
3441                                                 unfenced_alignment;
3442         if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
3443                 DRM_DEBUG("Invalid object alignment requested %u\n", alignment);
3444                 return ERR_PTR(-EINVAL);
3445         }
3446
3447         size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
3448
3449         /* If the object is bigger than the entire aperture, reject it early
3450          * before evicting everything in a vain attempt to find space.
3451          */
3452         if (obj->base.size > end) {
3453                 DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%lu\n",
3454                           obj->base.size,
3455                           flags & PIN_MAPPABLE ? "mappable" : "total",
3456                           end);
3457                 return ERR_PTR(-E2BIG);
3458         }
3459
3460         ret = i915_gem_object_get_pages(obj);
3461         if (ret)
3462                 return ERR_PTR(ret);
3463
3464         i915_gem_object_pin_pages(obj);
3465
3466         vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
3467         if (IS_ERR(vma))
3468                 goto err_unpin;
3469
3470 search_free:
3471         ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
3472                                                   size, alignment,
3473                                                   obj->cache_level,
3474                                                   start, end,
3475                                                   DRM_MM_SEARCH_DEFAULT,
3476                                                   DRM_MM_CREATE_DEFAULT);
3477         if (ret) {
3478                 ret = i915_gem_evict_something(dev, vm, size, alignment,
3479                                                obj->cache_level,
3480                                                start, end,
3481                                                flags);
3482                 if (ret == 0)
3483                         goto search_free;
3484
3485                 goto err_free_vma;
3486         }
3487         if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
3488                                               obj->cache_level))) {
3489                 ret = -EINVAL;
3490                 goto err_remove_node;
3491         }
3492
3493         ret = i915_gem_gtt_prepare_object(obj);
3494         if (ret)
3495                 goto err_remove_node;
3496
3497         list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
3498         list_add_tail(&vma->mm_list, &vm->inactive_list);
3499
3500         if (i915_is_ggtt(vm)) {
3501                 bool mappable, fenceable;
3502
3503                 fenceable = (vma->node.size == fence_size &&
3504                              (vma->node.start & (fence_alignment - 1)) == 0);
3505
3506                 mappable = (vma->node.start + obj->base.size <=
3507                             dev_priv->gtt.mappable_end);
3508
3509                 obj->map_and_fenceable = mappable && fenceable;
3510         }
3511
3512         WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
3513
3514         trace_i915_vma_bind(vma, flags);
3515         vma->bind_vma(vma, obj->cache_level,
3516                       flags & (PIN_MAPPABLE | PIN_GLOBAL) ? GLOBAL_BIND : 0);
3517
3518         i915_gem_verify_gtt(dev);
3519         return vma;
3520
3521 err_remove_node:
3522         drm_mm_remove_node(&vma->node);
3523 err_free_vma:
3524         i915_gem_vma_destroy(vma);
3525         vma = ERR_PTR(ret);
3526 err_unpin:
3527         i915_gem_object_unpin_pages(obj);
3528         return vma;
3529 }
3530
3531 bool
3532 i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3533                         bool force)
3534 {
3535         /* If we don't have a page list set up, then we're not pinned
3536          * to GPU, and we can ignore the cache flush because it'll happen
3537          * again at bind time.
3538          */
3539         if (obj->pages == NULL)
3540                 return false;
3541
3542         /*
3543          * Stolen memory is always coherent with the GPU as it is explicitly
3544          * marked as wc by the system, or the system is cache-coherent.
3545          */
3546         if (obj->stolen)
3547                 return false;
3548
3549         /* If the GPU is snooping the contents of the CPU cache,
3550          * we do not need to manually clear the CPU cache lines.  However,
3551          * the caches are only snooped when the render cache is
3552          * flushed/invalidated.  As we always have to emit invalidations
3553          * and flushes when moving into and out of the RENDER domain, correct
3554          * snooping behaviour occurs naturally as the result of our domain
3555          * tracking.
3556          */
3557         if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
3558                 return false;
3559
3560         trace_i915_gem_object_clflush(obj);
3561         drm_clflush_sg(obj->pages);
3562
3563         return true;
3564 }
3565
3566 /** Flushes the GTT write domain for the object if it's dirty. */
3567 static void
3568 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3569 {
3570         uint32_t old_write_domain;
3571
3572         if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
3573                 return;
3574
3575         /* No actual flushing is required for the GTT write domain.  Writes
3576          * to it immediately go to main memory as far as we know, so there's
3577          * no chipset flush.  It also doesn't land in render cache.
3578          *
3579          * However, we do have to enforce the order so that all writes through
3580          * the GTT land before any writes to the device, such as updates to
3581          * the GATT itself.
3582          */
3583         wmb();
3584
3585         old_write_domain = obj->base.write_domain;
3586         obj->base.write_domain = 0;
3587
3588         intel_fb_obj_flush(obj, false);
3589
3590         trace_i915_gem_object_change_domain(obj,
3591                                             obj->base.read_domains,
3592                                             old_write_domain);
3593 }
3594
3595 /** Flushes the CPU write domain for the object if it's dirty. */
3596 static void
3597 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
3598                                        bool force)
3599 {
3600         uint32_t old_write_domain;
3601
3602         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3603                 return;
3604
3605         if (i915_gem_clflush_object(obj, force))
3606                 i915_gem_chipset_flush(obj->base.dev);
3607
3608         old_write_domain = obj->base.write_domain;
3609         obj->base.write_domain = 0;
3610
3611         intel_fb_obj_flush(obj, false);
3612
3613         trace_i915_gem_object_change_domain(obj,
3614                                             obj->base.read_domains,
3615                                             old_write_domain);
3616 }
3617
3618 /**
3619  * Moves a single object to the GTT read, and possibly write domain.
3620  *
3621  * This function returns when the move is complete, including waiting on
3622  * flushes to occur.
3623  */
3624 int
3625 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3626 {
3627         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3628         struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
3629         uint32_t old_write_domain, old_read_domains;
3630         int ret;
3631
3632         /* Not valid to be called on unbound objects. */
3633         if (vma == NULL)
3634                 return -EINVAL;
3635
3636         if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3637                 return 0;
3638
3639         ret = i915_gem_object_wait_rendering(obj, !write);
3640         if (ret)
3641                 return ret;
3642
3643         i915_gem_object_retire(obj);
3644         i915_gem_object_flush_cpu_write_domain(obj, false);
3645
3646         /* Serialise direct access to this object with the barriers for
3647          * coherent writes from the GPU, by effectively invalidating the
3648          * GTT domain upon first access.
3649          */
3650         if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3651                 mb();
3652
3653         old_write_domain = obj->base.write_domain;
3654         old_read_domains = obj->base.read_domains;
3655
3656         /* It should now be out of any other write domains, and we can update
3657          * the domain values for our changes.
3658          */
3659         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3660         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3661         if (write) {
3662                 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3663                 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3664                 obj->dirty = 1;
3665         }
3666
3667         if (write)
3668                 intel_fb_obj_invalidate(obj, NULL);
3669
3670         trace_i915_gem_object_change_domain(obj,
3671                                             old_read_domains,
3672                                             old_write_domain);
3673
3674         /* And bump the LRU for this access */
3675         if (i915_gem_object_is_inactive(obj))
3676                 list_move_tail(&vma->mm_list,
3677                                &dev_priv->gtt.base.inactive_list);
3678
3679         return 0;
3680 }
3681
3682 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3683                                     enum i915_cache_level cache_level)
3684 {
3685         struct drm_device *dev = obj->base.dev;
3686         struct i915_vma *vma, *next;
3687         int ret;
3688
3689         if (obj->cache_level == cache_level)
3690                 return 0;
3691
3692         if (i915_gem_obj_is_pinned(obj)) {
3693                 DRM_DEBUG("can not change the cache level of pinned objects\n");
3694                 return -EBUSY;
3695         }
3696
3697         list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
3698                 if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
3699                         ret = i915_vma_unbind(vma);
3700                         if (ret)
3701                                 return ret;
3702                 }
3703         }
3704
3705         if (i915_gem_obj_bound_any(obj)) {
3706                 ret = i915_gem_object_finish_gpu(obj);
3707                 if (ret)
3708                         return ret;
3709
3710                 i915_gem_object_finish_gtt(obj);
3711
3712                 /* Before SandyBridge, you could not use tiling or fence
3713                  * registers with snooped memory, so relinquish any fences
3714                  * currently pointing to our region in the aperture.
3715                  */
3716                 if (INTEL_INFO(dev)->gen < 6) {
3717                         ret = i915_gem_object_put_fence(obj);
3718                         if (ret)
3719                                 return ret;
3720                 }
3721
3722                 list_for_each_entry(vma, &obj->vma_list, vma_link)
3723                         if (drm_mm_node_allocated(&vma->node))
3724                                 vma->bind_vma(vma, cache_level,
3725                                               obj->has_global_gtt_mapping ? GLOBAL_BIND : 0);
3726         }
3727
3728         list_for_each_entry(vma, &obj->vma_list, vma_link)
3729                 vma->node.color = cache_level;
3730         obj->cache_level = cache_level;
3731
3732         if (cpu_write_needs_clflush(obj)) {
3733                 u32 old_read_domains, old_write_domain;
3734
3735                 /* If we're coming from LLC cached, then we haven't
3736                  * actually been tracking whether the data is in the
3737                  * CPU cache or not, since we only allow one bit set
3738                  * in obj->write_domain and have been skipping the clflushes.
3739                  * Just set it to the CPU cache for now.
3740                  */
3741                 i915_gem_object_retire(obj);
3742                 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
3743
3744                 old_read_domains = obj->base.read_domains;
3745                 old_write_domain = obj->base.write_domain;
3746
3747                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3748                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3749
3750                 trace_i915_gem_object_change_domain(obj,
3751                                                     old_read_domains,
3752                                                     old_write_domain);
3753         }
3754
3755         i915_gem_verify_gtt(dev);
3756         return 0;
3757 }
3758
3759 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3760                                struct drm_file *file)
3761 {
3762         struct drm_i915_gem_caching *args = data;
3763         struct drm_i915_gem_object *obj;
3764         int ret;
3765
3766         ret = i915_mutex_lock_interruptible(dev);
3767         if (ret)
3768                 return ret;
3769
3770         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3771         if (&obj->base == NULL) {
3772                 ret = -ENOENT;
3773                 goto unlock;
3774         }
3775
3776         switch (obj->cache_level) {
3777         case I915_CACHE_LLC:
3778         case I915_CACHE_L3_LLC:
3779                 args->caching = I915_CACHING_CACHED;
3780                 break;
3781
3782         case I915_CACHE_WT:
3783                 args->caching = I915_CACHING_DISPLAY;
3784                 break;
3785
3786         default:
3787                 args->caching = I915_CACHING_NONE;
3788                 break;
3789         }
3790
3791         drm_gem_object_unreference(&obj->base);
3792 unlock:
3793         mutex_unlock(&dev->struct_mutex);
3794         return ret;
3795 }
3796
3797 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3798                                struct drm_file *file)
3799 {
3800         struct drm_i915_gem_caching *args = data;
3801         struct drm_i915_gem_object *obj;
3802         enum i915_cache_level level;
3803         int ret;
3804
3805         switch (args->caching) {
3806         case I915_CACHING_NONE:
3807                 level = I915_CACHE_NONE;
3808                 break;
3809         case I915_CACHING_CACHED:
3810                 level = I915_CACHE_LLC;
3811                 break;
3812         case I915_CACHING_DISPLAY:
3813                 level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
3814                 break;
3815         default:
3816                 return -EINVAL;
3817         }
3818
3819         ret = i915_mutex_lock_interruptible(dev);
3820         if (ret)
3821                 return ret;
3822
3823         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3824         if (&obj->base == NULL) {
3825                 ret = -ENOENT;
3826                 goto unlock;
3827         }
3828
3829         ret = i915_gem_object_set_cache_level(obj, level);
3830
3831         drm_gem_object_unreference(&obj->base);
3832 unlock:
3833         mutex_unlock(&dev->struct_mutex);
3834         return ret;
3835 }
3836
3837 static bool is_pin_display(struct drm_i915_gem_object *obj)
3838 {
3839         struct i915_vma *vma;
3840
3841         vma = i915_gem_obj_to_ggtt(obj);
3842         if (!vma)
3843                 return false;
3844
3845         /* There are 3 sources that pin objects:
3846          *   1. The display engine (scanouts, sprites, cursors);
3847          *   2. Reservations for execbuffer;
3848          *   3. The user.
3849          *
3850          * We can ignore reservations as we hold the struct_mutex and
3851          * are only called outside of the reservation path.  The user
3852          * can only increment pin_count once, and so if after
3853          * subtracting the potential reference by the user, any pin_count
3854          * remains, it must be due to another use by the display engine.
3855          */
3856         return vma->pin_count - !!obj->user_pin_count;
3857 }
3858
3859 /*
3860  * Prepare buffer for display plane (scanout, cursors, etc).
3861  * Can be called from an uninterruptible phase (modesetting) and allows
3862  * any flushes to be pipelined (for pageflips).
3863  */
3864 int
3865 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3866                                      u32 alignment,
3867                                      struct intel_engine_cs *pipelined)
3868 {
3869         u32 old_read_domains, old_write_domain;
3870         bool was_pin_display;
3871         int ret;
3872
3873         if (pipelined != obj->ring) {
3874                 ret = i915_gem_object_sync(obj, pipelined);
3875                 if (ret)
3876                         return ret;
3877         }
3878
3879         /* Mark the pin_display early so that we account for the
3880          * display coherency whilst setting up the cache domains.
3881          */
3882         was_pin_display = obj->pin_display;
3883         obj->pin_display = true;
3884
3885         /* The display engine is not coherent with the LLC cache on gen6.  As
3886          * a result, we make sure that the pinning that is about to occur is
3887          * done with uncached PTEs. This is lowest common denominator for all
3888          * chipsets.
3889          *
3890          * However for gen6+, we could do better by using the GFDT bit instead
3891          * of uncaching, which would allow us to flush all the LLC-cached data
3892          * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3893          */
3894         ret = i915_gem_object_set_cache_level(obj,
3895                                               HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
3896         if (ret)
3897                 goto err_unpin_display;
3898
3899         /* As the user may map the buffer once pinned in the display plane
3900          * (e.g. libkms for the bootup splash), we have to ensure that we
3901          * always use map_and_fenceable for all scanout buffers.
3902          */
3903         ret = i915_gem_obj_ggtt_pin(obj, alignment, PIN_MAPPABLE);
3904         if (ret)
3905                 goto err_unpin_display;
3906
3907         i915_gem_object_flush_cpu_write_domain(obj, true);
3908
3909         old_write_domain = obj->base.write_domain;
3910         old_read_domains = obj->base.read_domains;
3911
3912         /* It should now be out of any other write domains, and we can update
3913          * the domain values for our changes.
3914          */
3915         obj->base.write_domain = 0;
3916         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3917
3918         trace_i915_gem_object_change_domain(obj,
3919                                             old_read_domains,
3920                                             old_write_domain);
3921
3922         return 0;
3923
3924 err_unpin_display:
3925         WARN_ON(was_pin_display != is_pin_display(obj));
3926         obj->pin_display = was_pin_display;
3927         return ret;
3928 }
3929
3930 void
3931 i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
3932 {
3933         i915_gem_object_ggtt_unpin(obj);
3934         obj->pin_display = is_pin_display(obj);
3935 }
3936
3937 int
3938 i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
3939 {
3940         int ret;
3941
3942         if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
3943                 return 0;
3944
3945         ret = i915_gem_object_wait_rendering(obj, false);
3946         if (ret)
3947                 return ret;
3948
3949         /* Ensure that we invalidate the GPU's caches and TLBs. */
3950         obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
3951         return 0;
3952 }
3953
3954 /**
3955  * Moves a single object to the CPU read, and possibly write domain.
3956  *
3957  * This function returns when the move is complete, including waiting on
3958  * flushes to occur.
3959  */
3960 int
3961 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3962 {
3963         uint32_t old_write_domain, old_read_domains;
3964         int ret;
3965
3966         if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3967                 return 0;
3968
3969         ret = i915_gem_object_wait_rendering(obj, !write);
3970         if (ret)
3971                 return ret;
3972
3973         i915_gem_object_retire(obj);
3974         i915_gem_object_flush_gtt_write_domain(obj);
3975
3976         old_write_domain = obj->base.write_domain;
3977         old_read_domains = obj->base.read_domains;
3978
3979         /* Flush the CPU cache if it's still invalid. */
3980         if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3981                 i915_gem_clflush_object(obj, false);
3982
3983                 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3984         }
3985
3986         /* It should now be out of any other write domains, and we can update
3987          * the domain values for our changes.
3988          */
3989         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3990
3991         /* If we're writing through the CPU, then the GPU read domains will
3992          * need to be invalidated at next use.
3993          */
3994         if (write) {
3995                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3996                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3997         }
3998
3999         if (write)
4000                 intel_fb_obj_invalidate(obj, NULL);
4001
4002         trace_i915_gem_object_change_domain(obj,
4003                                             old_read_domains,
4004                                             old_write_domain);
4005
4006         return 0;
4007 }
4008
4009 /* Throttle our rendering by waiting until the ring has completed our requests
4010  * emitted over 20 msec ago.
4011  *
4012  * Note that if we were to use the current jiffies each time around the loop,
4013  * we wouldn't escape the function with any frames outstanding if the time to
4014  * render a frame was over 20ms.
4015  *
4016  * This should get us reasonable parallelism between CPU and GPU but also
4017  * relatively low latency when blocking on a particular request to finish.
4018  */
4019 static int
4020 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
4021 {
4022         struct drm_i915_private *dev_priv = dev->dev_private;
4023         struct drm_i915_file_private *file_priv = file->driver_priv;
4024         unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
4025         struct drm_i915_gem_request *request;
4026         struct intel_engine_cs *ring = NULL;
4027         unsigned reset_counter;
4028         u32 seqno = 0;
4029         int ret;
4030
4031         ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
4032         if (ret)
4033                 return ret;
4034
4035         ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
4036         if (ret)
4037                 return ret;
4038
4039         spin_lock(&file_priv->mm.lock);
4040         list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
4041                 if (time_after_eq(request->emitted_jiffies, recent_enough))
4042                         break;
4043
4044                 ring = request->ring;
4045                 seqno = request->seqno;
4046         }
4047         reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
4048         spin_unlock(&file_priv->mm.lock);
4049
4050         if (seqno == 0)
4051                 return 0;
4052
4053         ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
4054         if (ret == 0)
4055                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
4056
4057         return ret;
4058 }
4059
4060 static bool
4061 i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
4062 {
4063         struct drm_i915_gem_object *obj = vma->obj;
4064
4065         if (alignment &&
4066             vma->node.start & (alignment - 1))
4067                 return true;
4068
4069         if (flags & PIN_MAPPABLE && !obj->map_and_fenceable)
4070                 return true;
4071
4072         if (flags & PIN_OFFSET_BIAS &&
4073             vma->node.start < (flags & PIN_OFFSET_MASK))
4074                 return true;
4075
4076         return false;
4077 }
4078
4079 int
4080 i915_gem_object_pin(struct drm_i915_gem_object *obj,
4081                     struct i915_address_space *vm,
4082                     uint32_t alignment,
4083                     uint64_t flags)
4084 {
4085         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
4086         struct i915_vma *vma;
4087         int ret;
4088
4089         if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base))
4090                 return -ENODEV;
4091
4092         if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
4093                 return -EINVAL;
4094
4095         vma = i915_gem_obj_to_vma(obj, vm);
4096         if (vma) {
4097                 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
4098                         return -EBUSY;
4099
4100                 if (i915_vma_misplaced(vma, alignment, flags)) {
4101                         WARN(vma->pin_count,
4102                              "bo is already pinned with incorrect alignment:"
4103                              " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
4104                              " obj->map_and_fenceable=%d\n",
4105                              i915_gem_obj_offset(obj, vm), alignment,
4106                              !!(flags & PIN_MAPPABLE),
4107                              obj->map_and_fenceable);
4108                         ret = i915_vma_unbind(vma);
4109                         if (ret)
4110                                 return ret;
4111
4112                         vma = NULL;
4113                 }
4114         }
4115
4116         if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
4117                 vma = i915_gem_object_bind_to_vm(obj, vm, alignment, flags);
4118                 if (IS_ERR(vma))
4119                         return PTR_ERR(vma);
4120         }
4121
4122         if (flags & PIN_GLOBAL && !obj->has_global_gtt_mapping)
4123                 vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
4124
4125         vma->pin_count++;
4126         if (flags & PIN_MAPPABLE)
4127                 obj->pin_mappable |= true;
4128
4129         return 0;
4130 }
4131
4132 void
4133 i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
4134 {
4135         struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
4136
4137         BUG_ON(!vma);
4138         BUG_ON(vma->pin_count == 0);
4139         BUG_ON(!i915_gem_obj_ggtt_bound(obj));
4140
4141         if (--vma->pin_count == 0)
4142                 obj->pin_mappable = false;
4143 }
4144
4145 bool
4146 i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
4147 {
4148         if (obj->fence_reg != I915_FENCE_REG_NONE) {
4149                 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
4150                 struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
4151
4152                 WARN_ON(!ggtt_vma ||
4153                         dev_priv->fence_regs[obj->fence_reg].pin_count >
4154                         ggtt_vma->pin_count);
4155                 dev_priv->fence_regs[obj->fence_reg].pin_count++;
4156                 return true;
4157         } else
4158                 return false;
4159 }
4160
4161 void
4162 i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
4163 {
4164         if (obj->fence_reg != I915_FENCE_REG_NONE) {
4165                 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
4166                 WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
4167                 dev_priv->fence_regs[obj->fence_reg].pin_count--;
4168         }
4169 }
4170
4171 int
4172 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
4173                    struct drm_file *file)
4174 {
4175         struct drm_i915_gem_pin *args = data;
4176         struct drm_i915_gem_object *obj;
4177         int ret;
4178
4179         if (INTEL_INFO(dev)->gen >= 6)
4180                 return -ENODEV;
4181
4182         ret = i915_mutex_lock_interruptible(dev);
4183         if (ret)
4184                 return ret;
4185
4186         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
4187         if (&obj->base == NULL) {
4188                 ret = -ENOENT;
4189                 goto unlock;
4190         }
4191
4192         if (obj->madv != I915_MADV_WILLNEED) {
4193                 DRM_DEBUG("Attempting to pin a purgeable buffer\n");
4194                 ret = -EFAULT;
4195                 goto out;
4196         }
4197
4198         if (obj->pin_filp != NULL && obj->pin_filp != file) {
4199                 DRM_DEBUG("Already pinned in i915_gem_pin_ioctl(): %d\n",
4200                           args->handle);
4201                 ret = -EINVAL;
4202                 goto out;
4203         }
4204
4205         if (obj->user_pin_count == ULONG_MAX) {
4206                 ret = -EBUSY;
4207                 goto out;
4208         }
4209
4210         if (obj->user_pin_count == 0) {
4211                 ret = i915_gem_obj_ggtt_pin(obj, args->alignment, PIN_MAPPABLE);
4212                 if (ret)
4213                         goto out;
4214         }
4215
4216         obj->user_pin_count++;
4217         obj->pin_filp = file;
4218
4219         args->offset = i915_gem_obj_ggtt_offset(obj);
4220 out:
4221         drm_gem_object_unreference(&obj->base);
4222 unlock:
4223         mutex_unlock(&dev->struct_mutex);
4224         return ret;
4225 }
4226
4227 int
4228 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
4229                      struct drm_file *file)
4230 {
4231         struct drm_i915_gem_pin *args = data;
4232         struct drm_i915_gem_object *obj;
4233         int ret;
4234
4235         ret = i915_mutex_lock_interruptible(dev);
4236         if (ret)
4237                 return ret;
4238
4239         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
4240         if (&obj->base == NULL) {
4241                 ret = -ENOENT;
4242                 goto unlock;
4243         }
4244
4245         if (obj->pin_filp != file) {
4246                 DRM_DEBUG("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
4247                           args->handle);
4248                 ret = -EINVAL;
4249                 goto out;
4250         }
4251         obj->user_pin_count--;
4252         if (obj->user_pin_count == 0) {
4253                 obj->pin_filp = NULL;
4254                 i915_gem_object_ggtt_unpin(obj);
4255         }
4256
4257 out:
4258         drm_gem_object_unreference(&obj->base);
4259 unlock:
4260         mutex_unlock(&dev->struct_mutex);
4261         return ret;
4262 }
4263
4264 int
4265 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4266                     struct drm_file *file)
4267 {
4268         struct drm_i915_gem_busy *args = data;
4269         struct drm_i915_gem_object *obj;
4270         int ret;
4271
4272         ret = i915_mutex_lock_interruptible(dev);
4273         if (ret)
4274                 return ret;
4275
4276         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
4277         if (&obj->base == NULL) {
4278                 ret = -ENOENT;
4279                 goto unlock;
4280         }
4281
4282         /* Count all active objects as busy, even if they are currently not used
4283          * by the gpu. Users of this interface expect objects to eventually
4284          * become non-busy without any further actions, therefore emit any
4285          * necessary flushes here.
4286          */
4287         ret = i915_gem_object_flush_active(obj);
4288
4289         args->busy = obj->active;
4290         if (obj->ring) {
4291                 BUILD_BUG_ON(I915_NUM_RINGS > 16);
4292                 args->busy |= intel_ring_flag(obj->ring) << 16;
4293         }
4294
4295         drm_gem_object_unreference(&obj->base);
4296 unlock:
4297         mutex_unlock(&dev->struct_mutex);
4298         return ret;
4299 }
4300
4301 int
4302 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4303                         struct drm_file *file_priv)
4304 {
4305         return i915_gem_ring_throttle(dev, file_priv);
4306 }
4307
4308 int
4309 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4310                        struct drm_file *file_priv)
4311 {
4312         struct drm_i915_gem_madvise *args = data;
4313         struct drm_i915_gem_object *obj;
4314         int ret;
4315
4316         switch (args->madv) {
4317         case I915_MADV_DONTNEED:
4318         case I915_MADV_WILLNEED:
4319             break;
4320         default:
4321             return -EINVAL;
4322         }
4323
4324         ret = i915_mutex_lock_interruptible(dev);
4325         if (ret)
4326                 return ret;
4327
4328         obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
4329         if (&obj->base == NULL) {
4330                 ret = -ENOENT;
4331                 goto unlock;
4332         }
4333
4334         if (i915_gem_obj_is_pinned(obj)) {
4335                 ret = -EINVAL;
4336                 goto out;
4337         }
4338
4339         if (obj->madv != __I915_MADV_PURGED)
4340                 obj->madv = args->madv;
4341
4342         /* if the object is no longer attached, discard its backing storage */
4343         if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
4344                 i915_gem_object_truncate(obj);
4345
4346         args->retained = obj->madv != __I915_MADV_PURGED;
4347
4348 out:
4349         drm_gem_object_unreference(&obj->base);
4350 unlock:
4351         mutex_unlock(&dev->struct_mutex);
4352         return ret;
4353 }
4354
4355 void i915_gem_object_init(struct drm_i915_gem_object *obj,
4356                           const struct drm_i915_gem_object_ops *ops)
4357 {
4358         INIT_LIST_HEAD(&obj->global_list);
4359         INIT_LIST_HEAD(&obj->ring_list);
4360         INIT_LIST_HEAD(&obj->obj_exec_link);
4361         INIT_LIST_HEAD(&obj->vma_list);
4362
4363         obj->ops = ops;
4364
4365         obj->fence_reg = I915_FENCE_REG_NONE;
4366         obj->madv = I915_MADV_WILLNEED;
4367
4368         i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
4369 }
4370
4371 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4372         .get_pages = i915_gem_object_get_pages_gtt,
4373         .put_pages = i915_gem_object_put_pages_gtt,
4374 };
4375
4376 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
4377                                                   size_t size)
4378 {
4379         struct drm_i915_gem_object *obj;
4380         struct address_space *mapping;
4381         gfp_t mask;
4382
4383         obj = i915_gem_object_alloc(dev);
4384         if (obj == NULL)
4385                 return NULL;
4386
4387         if (drm_gem_object_init(dev, &obj->base, size) != 0) {
4388                 i915_gem_object_free(obj);
4389                 return NULL;
4390         }
4391
4392         mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4393         if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
4394                 /* 965gm cannot relocate objects above 4GiB. */
4395                 mask &= ~__GFP_HIGHMEM;
4396                 mask |= __GFP_DMA32;
4397         }
4398
4399         mapping = file_inode(obj->base.filp)->i_mapping;
4400         mapping_set_gfp_mask(mapping, mask);
4401
4402         i915_gem_object_init(obj, &i915_gem_object_ops);
4403
4404         obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4405         obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4406
4407         if (HAS_LLC(dev)) {
4408                 /* On some devices, we can have the GPU use the LLC (the CPU
4409                  * cache) for about a 10% performance improvement
4410                  * compared to uncached.  Graphics requests other than
4411                  * display scanout are coherent with the CPU in
4412                  * accessing this cache.  This means in this mode we
4413                  * don't need to clflush on the CPU side, and on the
4414                  * GPU side we only need to flush internal caches to
4415                  * get data visible to the CPU.
4416                  *
4417                  * However, we maintain the display planes as UC, and so
4418                  * need to rebind when first used as such.
4419                  */
4420                 obj->cache_level = I915_CACHE_LLC;
4421         } else
4422                 obj->cache_level = I915_CACHE_NONE;
4423
4424         trace_i915_gem_object_create(obj);
4425
4426         return obj;
4427 }
4428
4429 static bool discard_backing_storage(struct drm_i915_gem_object *obj)
4430 {
4431         /* If we are the last user of the backing storage (be it shmemfs
4432          * pages or stolen etc), we know that the pages are going to be
4433          * immediately released. In this case, we can then skip copying
4434          * back the contents from the GPU.
4435          */
4436
4437         if (obj->madv != I915_MADV_WILLNEED)
4438                 return false;
4439
4440         if (obj->base.filp == NULL)
4441                 return true;
4442
4443         /* At first glance, this looks racy, but then again so would be
4444          * userspace racing mmap against close. However, the first external
4445          * reference to the filp can only be obtained through the
4446          * i915_gem_mmap_ioctl() which safeguards us against the user
4447          * acquiring such a reference whilst we are in the middle of
4448          * freeing the object.
4449          */
4450         return atomic_long_read(&obj->base.filp->f_count) == 1;
4451 }
4452
4453 void i915_gem_free_object(struct drm_gem_object *gem_obj)
4454 {
4455         struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4456         struct drm_device *dev = obj->base.dev;
4457         struct drm_i915_private *dev_priv = dev->dev_private;
4458         struct i915_vma *vma, *next;
4459
4460         intel_runtime_pm_get(dev_priv);
4461
4462         trace_i915_gem_object_destroy(obj);
4463
4464         list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
4465                 int ret;
4466
4467                 vma->pin_count = 0;
4468                 ret = i915_vma_unbind(vma);
4469                 if (WARN_ON(ret == -ERESTARTSYS)) {
4470                         bool was_interruptible;
4471
4472                         was_interruptible = dev_priv->mm.interruptible;
4473                         dev_priv->mm.interruptible = false;
4474
4475                         WARN_ON(i915_vma_unbind(vma));
4476
4477                         dev_priv->mm.interruptible = was_interruptible;
4478                 }
4479         }
4480
4481         i915_gem_object_detach_phys(obj);
4482
4483         /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4484          * before progressing. */
4485         if (obj->stolen)
4486                 i915_gem_object_unpin_pages(obj);
4487
4488         WARN_ON(obj->frontbuffer_bits);
4489
4490         if (WARN_ON(obj->pages_pin_count))
4491                 obj->pages_pin_count = 0;
4492         if (discard_backing_storage(obj))
4493                 obj->madv = I915_MADV_DONTNEED;
4494         i915_gem_object_put_pages(obj);
4495         i915_gem_object_free_mmap_offset(obj);
4496
4497         BUG_ON(obj->pages);
4498
4499         if (obj->base.import_attach)
4500                 drm_prime_gem_destroy(&obj->base, NULL);
4501
4502         if (obj->ops->release)
4503                 obj->ops->release(obj);
4504
4505         drm_gem_object_release(&obj->base);
4506         i915_gem_info_remove_obj(dev_priv, obj->base.size);
4507
4508         kfree(obj->bit_17);
4509         i915_gem_object_free(obj);
4510
4511         intel_runtime_pm_put(dev_priv);
4512 }
4513
4514 struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
4515                                      struct i915_address_space *vm)
4516 {
4517         struct i915_vma *vma;
4518         list_for_each_entry(vma, &obj->vma_list, vma_link)
4519                 if (vma->vm == vm)
4520                         return vma;
4521
4522         return NULL;
4523 }
4524
4525 void i915_gem_vma_destroy(struct i915_vma *vma)
4526 {
4527         struct i915_address_space *vm = NULL;
4528         WARN_ON(vma->node.allocated);
4529
4530         /* Keep the vma as a placeholder in the execbuffer reservation lists */
4531         if (!list_empty(&vma->exec_list))
4532                 return;
4533
4534         vm = vma->vm;
4535
4536         if (!i915_is_ggtt(vm))
4537                 i915_ppgtt_put(i915_vm_to_ppgtt(vm));
4538
4539         list_del(&vma->vma_link);
4540
4541         kfree(vma);
4542 }
4543
4544 static void
4545 i915_gem_stop_ringbuffers(struct drm_device *dev)
4546 {
4547         struct drm_i915_private *dev_priv = dev->dev_private;
4548         struct intel_engine_cs *ring;
4549         int i;
4550
4551         for_each_ring(ring, dev_priv, i)
4552                 dev_priv->gt.stop_ring(ring);
4553 }
4554
4555 int
4556 i915_gem_suspend(struct drm_device *dev)
4557 {
4558         struct drm_i915_private *dev_priv = dev->dev_private;
4559         int ret = 0;
4560
4561         mutex_lock(&dev->struct_mutex);
4562         if (dev_priv->ums.mm_suspended)
4563                 goto err;
4564
4565         ret = i915_gpu_idle(dev);
4566         if (ret)
4567                 goto err;
4568
4569         i915_gem_retire_requests(dev);
4570
4571         /* Under UMS, be paranoid and evict. */
4572         if (!drm_core_check_feature(dev, DRIVER_MODESET))
4573                 i915_gem_evict_everything(dev);
4574
4575         i915_kernel_lost_context(dev);
4576         i915_gem_stop_ringbuffers(dev);
4577
4578         /* Hack!  Don't let anybody do execbuf while we don't control the chip.
4579          * We need to replace this with a semaphore, or something.
4580          * And not confound ums.mm_suspended!
4581          */
4582         dev_priv->ums.mm_suspended = !drm_core_check_feature(dev,
4583                                                              DRIVER_MODESET);
4584         mutex_unlock(&dev->struct_mutex);
4585
4586         del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
4587         cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4588         flush_delayed_work(&dev_priv->mm.idle_work);
4589
4590         return 0;
4591
4592 err:
4593         mutex_unlock(&dev->struct_mutex);
4594         return ret;
4595 }
4596
4597 int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice)
4598 {
4599         struct drm_device *dev = ring->dev;
4600         struct drm_i915_private *dev_priv = dev->dev_private;
4601         u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
4602         u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
4603         int i, ret;
4604
4605         if (!HAS_L3_DPF(dev) || !remap_info)
4606                 return 0;
4607
4608         ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3);
4609         if (ret)
4610                 return ret;
4611
4612         /*
4613          * Note: We do not worry about the concurrent register cacheline hang
4614          * here because no other code should access these registers other than
4615          * at initialization time.
4616          */
4617         for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
4618                 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
4619                 intel_ring_emit(ring, reg_base + i);
4620                 intel_ring_emit(ring, remap_info[i/4]);
4621         }
4622
4623         intel_ring_advance(ring);
4624
4625         return ret;
4626 }
4627
4628 void i915_gem_init_swizzling(struct drm_device *dev)
4629 {
4630         struct drm_i915_private *dev_priv = dev->dev_private;
4631
4632         if (INTEL_INFO(dev)->gen < 5 ||
4633             dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4634                 return;
4635
4636         I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4637                                  DISP_TILE_SURFACE_SWIZZLING);
4638
4639         if (IS_GEN5(dev))
4640                 return;
4641
4642         I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4643         if (IS_GEN6(dev))
4644                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4645         else if (IS_GEN7(dev))
4646                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4647         else if (IS_GEN8(dev))
4648                 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
4649         else
4650                 BUG();
4651 }
4652
4653 static bool
4654 intel_enable_blt(struct drm_device *dev)
4655 {
4656         if (!HAS_BLT(dev))
4657                 return false;
4658
4659         /* The blitter was dysfunctional on early prototypes */
4660         if (IS_GEN6(dev) && dev->pdev->revision < 8) {
4661                 DRM_INFO("BLT not supported on this pre-production hardware;"
4662                          " graphics performance will be degraded.\n");
4663                 return false;
4664         }
4665
4666         return true;
4667 }
4668
4669 static void init_unused_ring(struct drm_device *dev, u32 base)
4670 {
4671         struct drm_i915_private *dev_priv = dev->dev_private;
4672
4673         I915_WRITE(RING_CTL(base), 0);
4674         I915_WRITE(RING_HEAD(base), 0);
4675         I915_WRITE(RING_TAIL(base), 0);
4676         I915_WRITE(RING_START(base), 0);
4677 }
4678
4679 static void init_unused_rings(struct drm_device *dev)
4680 {
4681         if (IS_I830(dev)) {
4682                 init_unused_ring(dev, PRB1_BASE);
4683                 init_unused_ring(dev, SRB0_BASE);
4684                 init_unused_ring(dev, SRB1_BASE);
4685                 init_unused_ring(dev, SRB2_BASE);
4686                 init_unused_ring(dev, SRB3_BASE);
4687         } else if (IS_GEN2(dev)) {
4688                 init_unused_ring(dev, SRB0_BASE);
4689                 init_unused_ring(dev, SRB1_BASE);
4690         } else if (IS_GEN3(dev)) {
4691                 init_unused_ring(dev, PRB1_BASE);
4692                 init_unused_ring(dev, PRB2_BASE);
4693         }
4694 }
4695
4696 int i915_gem_init_rings(struct drm_device *dev)
4697 {
4698         struct drm_i915_private *dev_priv = dev->dev_private;
4699         int ret;
4700
4701         /*
4702          * At least 830 can leave some of the unused rings
4703          * "active" (ie. head != tail) after resume which
4704          * will prevent c3 entry. Makes sure all unused rings
4705          * are totally idle.
4706          */
4707         init_unused_rings(dev);
4708
4709         ret = intel_init_render_ring_buffer(dev);
4710         if (ret)
4711                 return ret;
4712
4713         if (HAS_BSD(dev)) {
4714                 ret = intel_init_bsd_ring_buffer(dev);
4715                 if (ret)
4716                         goto cleanup_render_ring;
4717         }
4718
4719         if (intel_enable_blt(dev)) {
4720                 ret = intel_init_blt_ring_buffer(dev);
4721                 if (ret)
4722                         goto cleanup_bsd_ring;
4723         }
4724
4725         if (HAS_VEBOX(dev)) {
4726                 ret = intel_init_vebox_ring_buffer(dev);
4727                 if (ret)
4728                         goto cleanup_blt_ring;
4729         }
4730
4731         if (HAS_BSD2(dev)) {
4732                 ret = intel_init_bsd2_ring_buffer(dev);
4733                 if (ret)
4734                         goto cleanup_vebox_ring;
4735         }
4736
4737         ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
4738         if (ret)
4739                 goto cleanup_bsd2_ring;
4740
4741         return 0;
4742
4743 cleanup_bsd2_ring:
4744         intel_cleanup_ring_buffer(&dev_priv->ring[VCS2]);
4745 cleanup_vebox_ring:
4746         intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
4747 cleanup_blt_ring:
4748         intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
4749 cleanup_bsd_ring:
4750         intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
4751 cleanup_render_ring:
4752         intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
4753
4754         return ret;
4755 }
4756
4757 int
4758 i915_gem_init_hw(struct drm_device *dev)
4759 {
4760         struct drm_i915_private *dev_priv = dev->dev_private;
4761         int ret, i;
4762
4763         if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
4764                 return -EIO;
4765
4766         if (dev_priv->ellc_size)
4767                 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4768
4769         if (IS_HASWELL(dev))
4770                 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
4771                            LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
4772
4773         if (HAS_PCH_NOP(dev)) {
4774                 if (IS_IVYBRIDGE(dev)) {
4775                         u32 temp = I915_READ(GEN7_MSG_CTL);
4776                         temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4777                         I915_WRITE(GEN7_MSG_CTL, temp);
4778                 } else if (INTEL_INFO(dev)->gen >= 7) {
4779                         u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
4780                         temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
4781                         I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
4782                 }
4783         }
4784
4785         i915_gem_init_swizzling(dev);
4786
4787         ret = dev_priv->gt.init_rings(dev);
4788         if (ret)
4789                 return ret;
4790
4791         for (i = 0; i < NUM_L3_SLICES(dev); i++)
4792                 i915_gem_l3_remap(&dev_priv->ring[RCS], i);
4793
4794         /*
4795          * XXX: Contexts should only be initialized once. Doing a switch to the
4796          * default context switch however is something we'd like to do after
4797          * reset or thaw (the latter may not actually be necessary for HW, but
4798          * goes with our code better). Context switching requires rings (for
4799          * the do_switch), but before enabling PPGTT. So don't move this.
4800          */
4801         ret = i915_gem_context_enable(dev_priv);
4802         if (ret && ret != -EIO) {
4803                 DRM_ERROR("Context enable failed %d\n", ret);
4804                 i915_gem_cleanup_ringbuffer(dev);
4805
4806                 return ret;
4807         }
4808
4809         ret = i915_ppgtt_init_hw(dev);
4810         if (ret && ret != -EIO) {
4811                 DRM_ERROR("PPGTT enable failed %d\n", ret);
4812                 i915_gem_cleanup_ringbuffer(dev);
4813         }
4814
4815         return ret;
4816 }
4817
4818 int i915_gem_init(struct drm_device *dev)
4819 {
4820         struct drm_i915_private *dev_priv = dev->dev_private;
4821         int ret;
4822
4823         i915.enable_execlists = intel_sanitize_enable_execlists(dev,
4824                         i915.enable_execlists);
4825
4826         mutex_lock(&dev->struct_mutex);
4827
4828         if (IS_VALLEYVIEW(dev)) {
4829                 /* VLVA0 (potential hack), BIOS isn't actually waking us */
4830                 I915_WRITE(VLV_GTLC_WAKE_CTRL, VLV_GTLC_ALLOWWAKEREQ);
4831                 if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) &
4832                               VLV_GTLC_ALLOWWAKEACK), 10))
4833                         DRM_DEBUG_DRIVER("allow wake ack timed out\n");
4834         }
4835
4836         if (!i915.enable_execlists) {
4837                 dev_priv->gt.do_execbuf = i915_gem_ringbuffer_submission;
4838                 dev_priv->gt.init_rings = i915_gem_init_rings;
4839                 dev_priv->gt.cleanup_ring = intel_cleanup_ring_buffer;
4840                 dev_priv->gt.stop_ring = intel_stop_ring_buffer;
4841         } else {
4842                 dev_priv->gt.do_execbuf = intel_execlists_submission;
4843                 dev_priv->gt.init_rings = intel_logical_rings_init;
4844                 dev_priv->gt.cleanup_ring = intel_logical_ring_cleanup;
4845                 dev_priv->gt.stop_ring = intel_logical_ring_stop;
4846         }
4847
4848         ret = i915_gem_init_userptr(dev);
4849         if (ret) {
4850                 mutex_unlock(&dev->struct_mutex);
4851                 return ret;
4852         }
4853
4854         i915_gem_init_global_gtt(dev);
4855
4856         ret = i915_gem_context_init(dev);
4857         if (ret) {
4858                 mutex_unlock(&dev->struct_mutex);
4859                 return ret;
4860         }
4861
4862         ret = i915_gem_init_hw(dev);
4863         if (ret == -EIO) {
4864                 /* Allow ring initialisation to fail by marking the GPU as
4865                  * wedged. But we only want to do this where the GPU is angry,
4866                  * for all other failure, such as an allocation failure, bail.
4867                  */
4868                 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
4869                 atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
4870                 ret = 0;
4871         }
4872         mutex_unlock(&dev->struct_mutex);
4873
4874         /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
4875         if (!drm_core_check_feature(dev, DRIVER_MODESET))
4876                 dev_priv->dri1.allow_batchbuffer = 1;
4877         return ret;
4878 }
4879
4880 void
4881 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4882 {
4883         struct drm_i915_private *dev_priv = dev->dev_private;
4884         struct intel_engine_cs *ring;
4885         int i;
4886
4887         for_each_ring(ring, dev_priv, i)
4888                 dev_priv->gt.cleanup_ring(ring);
4889 }
4890
4891 int
4892 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4893                        struct drm_file *file_priv)
4894 {
4895         struct drm_i915_private *dev_priv = dev->dev_private;
4896         int ret;
4897
4898         if (drm_core_check_feature(dev, DRIVER_MODESET))
4899                 return 0;
4900
4901         if (i915_reset_in_progress(&dev_priv->gpu_error)) {
4902                 DRM_ERROR("Reenabling wedged hardware, good luck\n");
4903                 atomic_set(&dev_priv->gpu_error.reset_counter, 0);
4904         }
4905
4906         mutex_lock(&dev->struct_mutex);
4907         dev_priv->ums.mm_suspended = 0;
4908
4909         ret = i915_gem_init_hw(dev);
4910         if (ret != 0) {
4911                 mutex_unlock(&dev->struct_mutex);
4912                 return ret;
4913         }
4914
4915         BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
4916
4917         ret = drm_irq_install(dev, dev->pdev->irq);
4918         if (ret)
4919                 goto cleanup_ringbuffer;
4920         mutex_unlock(&dev->struct_mutex);
4921
4922         return 0;
4923
4924 cleanup_ringbuffer:
4925         i915_gem_cleanup_ringbuffer(dev);
4926         dev_priv->ums.mm_suspended = 1;
4927         mutex_unlock(&dev->struct_mutex);
4928
4929         return ret;
4930 }
4931
4932 int
4933 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4934                        struct drm_file *file_priv)
4935 {
4936         if (drm_core_check_feature(dev, DRIVER_MODESET))
4937                 return 0;
4938
4939         mutex_lock(&dev->struct_mutex);
4940         drm_irq_uninstall(dev);
4941         mutex_unlock(&dev->struct_mutex);
4942
4943         return i915_gem_suspend(dev);
4944 }
4945
4946 void
4947 i915_gem_lastclose(struct drm_device *dev)
4948 {
4949         int ret;
4950
4951         if (drm_core_check_feature(dev, DRIVER_MODESET))
4952                 return;
4953
4954         ret = i915_gem_suspend(dev);
4955         if (ret)
4956                 DRM_ERROR("failed to idle hardware: %d\n", ret);
4957 }
4958
4959 static void
4960 init_ring_lists(struct intel_engine_cs *ring)
4961 {
4962         INIT_LIST_HEAD(&ring->active_list);
4963         INIT_LIST_HEAD(&ring->request_list);
4964 }
4965
4966 void i915_init_vm(struct drm_i915_private *dev_priv,
4967                   struct i915_address_space *vm)
4968 {
4969         if (!i915_is_ggtt(vm))
4970                 drm_mm_init(&vm->mm, vm->start, vm->total);
4971         vm->dev = dev_priv->dev;
4972         INIT_LIST_HEAD(&vm->active_list);
4973         INIT_LIST_HEAD(&vm->inactive_list);
4974         INIT_LIST_HEAD(&vm->global_link);
4975         list_add_tail(&vm->global_link, &dev_priv->vm_list);
4976 }
4977
4978 void
4979 i915_gem_load(struct drm_device *dev)
4980 {
4981         struct drm_i915_private *dev_priv = dev->dev_private;
4982         int i;
4983
4984         dev_priv->slab =
4985                 kmem_cache_create("i915_gem_object",
4986                                   sizeof(struct drm_i915_gem_object), 0,
4987                                   SLAB_HWCACHE_ALIGN,
4988                                   NULL);
4989
4990         INIT_LIST_HEAD(&dev_priv->vm_list);
4991         i915_init_vm(dev_priv, &dev_priv->gtt.base);
4992
4993         INIT_LIST_HEAD(&dev_priv->context_list);
4994         INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4995         INIT_LIST_HEAD(&dev_priv->mm.bound_list);
4996         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4997         for (i = 0; i < I915_NUM_RINGS; i++)
4998                 init_ring_lists(&dev_priv->ring[i]);
4999         for (i = 0; i < I915_MAX_NUM_FENCES; i++)
5000                 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
5001         INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
5002                           i915_gem_retire_work_handler);
5003         INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
5004                           i915_gem_idle_work_handler);
5005         init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
5006
5007         /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
5008         if (!drm_core_check_feature(dev, DRIVER_MODESET) && IS_GEN3(dev)) {
5009                 I915_WRITE(MI_ARB_STATE,
5010                            _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
5011         }
5012
5013         dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
5014
5015         /* Old X drivers will take 0-2 for front, back, depth buffers */
5016         if (!drm_core_check_feature(dev, DRIVER_MODESET))
5017                 dev_priv->fence_reg_start = 3;
5018
5019         if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
5020                 dev_priv->num_fence_regs = 32;
5021         else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
5022                 dev_priv->num_fence_regs = 16;
5023         else
5024                 dev_priv->num_fence_regs = 8;
5025
5026         /* Initialize fence registers to zero */
5027         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
5028         i915_gem_restore_fences(dev);
5029
5030         i915_gem_detect_bit_6_swizzle(dev);
5031         init_waitqueue_head(&dev_priv->pending_flip_queue);
5032
5033         dev_priv->mm.interruptible = true;
5034
5035         dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
5036         dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
5037         dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
5038         register_shrinker(&dev_priv->mm.shrinker);
5039
5040         dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
5041         register_oom_notifier(&dev_priv->mm.oom_notifier);
5042
5043         mutex_init(&dev_priv->fb_tracking.lock);
5044 }
5045
5046 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
5047 {
5048         struct drm_i915_file_private *file_priv = file->driver_priv;
5049
5050         cancel_delayed_work_sync(&file_priv->mm.idle_work);
5051
5052         /* Clean up our request list when the client is going away, so that
5053          * later retire_requests won't dereference our soon-to-be-gone
5054          * file_priv.
5055          */
5056         spin_lock(&file_priv->mm.lock);
5057         while (!list_empty(&file_priv->mm.request_list)) {
5058                 struct drm_i915_gem_request *request;
5059
5060                 request = list_first_entry(&file_priv->mm.request_list,
5061                                            struct drm_i915_gem_request,
5062                                            client_list);
5063                 list_del(&request->client_list);
5064                 request->file_priv = NULL;
5065         }
5066         spin_unlock(&file_priv->mm.lock);
5067 }
5068
5069 static void
5070 i915_gem_file_idle_work_handler(struct work_struct *work)
5071 {
5072         struct drm_i915_file_private *file_priv =
5073                 container_of(work, typeof(*file_priv), mm.idle_work.work);
5074
5075         atomic_set(&file_priv->rps_wait_boost, false);
5076 }
5077
5078 int i915_gem_open(struct drm_device *dev, struct drm_file *file)
5079 {
5080         struct drm_i915_file_private *file_priv;
5081         int ret;
5082
5083         DRM_DEBUG_DRIVER("\n");
5084
5085         file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
5086         if (!file_priv)
5087                 return -ENOMEM;
5088
5089         file->driver_priv = file_priv;
5090         file_priv->dev_priv = dev->dev_private;
5091         file_priv->file = file;
5092
5093         spin_lock_init(&file_priv->mm.lock);
5094         INIT_LIST_HEAD(&file_priv->mm.request_list);
5095         INIT_DELAYED_WORK(&file_priv->mm.idle_work,
5096                           i915_gem_file_idle_work_handler);
5097
5098         ret = i915_gem_context_open(dev, file);
5099         if (ret)
5100                 kfree(file_priv);
5101
5102         return ret;
5103 }
5104
5105 void i915_gem_track_fb(struct drm_i915_gem_object *old,
5106                        struct drm_i915_gem_object *new,
5107                        unsigned frontbuffer_bits)
5108 {
5109         if (old) {
5110                 WARN_ON(!mutex_is_locked(&old->base.dev->struct_mutex));
5111                 WARN_ON(!(old->frontbuffer_bits & frontbuffer_bits));
5112                 old->frontbuffer_bits &= ~frontbuffer_bits;
5113         }
5114
5115         if (new) {
5116                 WARN_ON(!mutex_is_locked(&new->base.dev->struct_mutex));
5117                 WARN_ON(new->frontbuffer_bits & frontbuffer_bits);
5118                 new->frontbuffer_bits |= frontbuffer_bits;
5119         }
5120 }
5121
5122 static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
5123 {
5124         if (!mutex_is_locked(mutex))
5125                 return false;
5126
5127 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
5128         return mutex->owner == task;
5129 #else
5130         /* Since UP may be pre-empted, we cannot assume that we own the lock */
5131         return false;
5132 #endif
5133 }
5134
5135 static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
5136 {
5137         if (!mutex_trylock(&dev->struct_mutex)) {
5138                 if (!mutex_is_locked_by(&dev->struct_mutex, current))
5139                         return false;
5140
5141                 if (to_i915(dev)->mm.shrinker_no_lock_stealing)
5142                         return false;
5143
5144                 *unlock = false;
5145         } else
5146                 *unlock = true;
5147
5148         return true;
5149 }
5150
5151 static int num_vma_bound(struct drm_i915_gem_object *obj)
5152 {
5153         struct i915_vma *vma;
5154         int count = 0;
5155
5156         list_for_each_entry(vma, &obj->vma_list, vma_link)
5157                 if (drm_mm_node_allocated(&vma->node))
5158                         count++;
5159
5160         return count;
5161 }
5162
5163 static unsigned long
5164 i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
5165 {
5166         struct drm_i915_private *dev_priv =
5167                 container_of(shrinker, struct drm_i915_private, mm.shrinker);
5168         struct drm_device *dev = dev_priv->dev;
5169         struct drm_i915_gem_object *obj;
5170         unsigned long count;
5171         bool unlock;
5172
5173         if (!i915_gem_shrinker_lock(dev, &unlock))
5174                 return 0;
5175
5176         count = 0;
5177         list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
5178                 if (obj->pages_pin_count == 0)
5179                         count += obj->base.size >> PAGE_SHIFT;
5180
5181         list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
5182                 if (!i915_gem_obj_is_pinned(obj) &&
5183                     obj->pages_pin_count == num_vma_bound(obj))
5184                         count += obj->base.size >> PAGE_SHIFT;
5185         }
5186
5187         if (unlock)
5188                 mutex_unlock(&dev->struct_mutex);
5189
5190         return count;
5191 }
5192
5193 /* All the new VM stuff */
5194 unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
5195                                   struct i915_address_space *vm)
5196 {
5197         struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5198         struct i915_vma *vma;
5199
5200         WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
5201
5202         list_for_each_entry(vma, &o->vma_list, vma_link) {
5203                 if (vma->vm == vm)
5204                         return vma->node.start;
5205
5206         }
5207         WARN(1, "%s vma for this object not found.\n",
5208              i915_is_ggtt(vm) ? "global" : "ppgtt");
5209         return -1;
5210 }
5211
5212 bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
5213                         struct i915_address_space *vm)
5214 {
5215         struct i915_vma *vma;
5216
5217         list_for_each_entry(vma, &o->vma_list, vma_link)
5218                 if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
5219                         return true;
5220
5221         return false;
5222 }
5223
5224 bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
5225 {
5226         struct i915_vma *vma;
5227
5228         list_for_each_entry(vma, &o->vma_list, vma_link)
5229                 if (drm_mm_node_allocated(&vma->node))
5230                         return true;
5231
5232         return false;
5233 }
5234
5235 unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
5236                                 struct i915_address_space *vm)
5237 {
5238         struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5239         struct i915_vma *vma;
5240
5241         WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
5242
5243         BUG_ON(list_empty(&o->vma_list));
5244
5245         list_for_each_entry(vma, &o->vma_list, vma_link)
5246                 if (vma->vm == vm)
5247                         return vma->node.size;
5248
5249         return 0;
5250 }
5251
5252 static unsigned long
5253 i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
5254 {
5255         struct drm_i915_private *dev_priv =
5256                 container_of(shrinker, struct drm_i915_private, mm.shrinker);
5257         struct drm_device *dev = dev_priv->dev;
5258         unsigned long freed;
5259         bool unlock;
5260
5261         if (!i915_gem_shrinker_lock(dev, &unlock))
5262                 return SHRINK_STOP;
5263
5264         freed = i915_gem_purge(dev_priv, sc->nr_to_scan);
5265         if (freed < sc->nr_to_scan)
5266                 freed += __i915_gem_shrink(dev_priv,
5267                                            sc->nr_to_scan - freed,
5268                                            false);
5269         if (unlock)
5270                 mutex_unlock(&dev->struct_mutex);
5271
5272         return freed;
5273 }
5274
5275 static int
5276 i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
5277 {
5278         struct drm_i915_private *dev_priv =
5279                 container_of(nb, struct drm_i915_private, mm.oom_notifier);
5280         struct drm_device *dev = dev_priv->dev;
5281         struct drm_i915_gem_object *obj;
5282         unsigned long timeout = msecs_to_jiffies(5000) + 1;
5283         unsigned long pinned, bound, unbound, freed;
5284         bool was_interruptible;
5285         bool unlock;
5286
5287         while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout) {
5288                 schedule_timeout_killable(1);
5289                 if (fatal_signal_pending(current))
5290                         return NOTIFY_DONE;
5291         }
5292         if (timeout == 0) {
5293                 pr_err("Unable to purge GPU memory due lock contention.\n");
5294                 return NOTIFY_DONE;
5295         }
5296
5297         was_interruptible = dev_priv->mm.interruptible;
5298         dev_priv->mm.interruptible = false;
5299
5300         freed = i915_gem_shrink_all(dev_priv);
5301
5302         dev_priv->mm.interruptible = was_interruptible;
5303
5304         /* Because we may be allocating inside our own driver, we cannot
5305          * assert that there are no objects with pinned pages that are not
5306          * being pointed to by hardware.
5307          */
5308         unbound = bound = pinned = 0;
5309         list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
5310                 if (!obj->base.filp) /* not backed by a freeable object */
5311                         continue;
5312
5313                 if (obj->pages_pin_count)
5314                         pinned += obj->base.size;
5315                 else
5316                         unbound += obj->base.size;
5317         }
5318         list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
5319                 if (!obj->base.filp)
5320                         continue;
5321
5322                 if (obj->pages_pin_count)
5323                         pinned += obj->base.size;
5324                 else
5325                         bound += obj->base.size;
5326         }
5327
5328         if (unlock)
5329                 mutex_unlock(&dev->struct_mutex);
5330
5331         pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n",
5332                 freed, pinned);
5333         if (unbound || bound)
5334                 pr_err("%lu and %lu bytes still available in the "
5335                        "bound and unbound GPU page lists.\n",
5336                        bound, unbound);
5337
5338         *(unsigned long *)ptr += freed;
5339         return NOTIFY_DONE;
5340 }
5341
5342 struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
5343 {
5344         struct i915_vma *vma;
5345
5346         vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
5347         if (vma->vm != i915_obj_to_ggtt(obj))
5348                 return NULL;
5349
5350         return vma;
5351 }
This page took 0.345299 seconds and 4 git commands to generate.