]> Git Repo - linux.git/blob - drivers/gpu/drm/i915/gem/i915_gem_pages.c
Merge tag 'amd-drm-next-6.5-2023-06-09' of https://gitlab.freedesktop.org/agd5f/linux...
[linux.git] / drivers / gpu / drm / i915 / gem / i915_gem_pages.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2014-2016 Intel Corporation
5  */
6
7 #include <drm/drm_cache.h>
8
9 #include "gt/intel_gt.h"
10 #include "gt/intel_gt_pm.h"
11
12 #include "i915_drv.h"
13 #include "i915_gem_object.h"
14 #include "i915_scatterlist.h"
15 #include "i915_gem_lmem.h"
16 #include "i915_gem_mman.h"
17
18 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
19                                  struct sg_table *pages)
20 {
21         struct drm_i915_private *i915 = to_i915(obj->base.dev);
22         unsigned long supported = RUNTIME_INFO(i915)->page_sizes;
23         bool shrinkable;
24         int i;
25
26         assert_object_held_shared(obj);
27
28         if (i915_gem_object_is_volatile(obj))
29                 obj->mm.madv = I915_MADV_DONTNEED;
30
31         /* Make the pages coherent with the GPU (flushing any swapin). */
32         if (obj->cache_dirty) {
33                 WARN_ON_ONCE(IS_DGFX(i915));
34                 obj->write_domain = 0;
35                 if (i915_gem_object_has_struct_page(obj))
36                         drm_clflush_sg(pages);
37                 obj->cache_dirty = false;
38         }
39
40         obj->mm.get_page.sg_pos = pages->sgl;
41         obj->mm.get_page.sg_idx = 0;
42         obj->mm.get_dma_page.sg_pos = pages->sgl;
43         obj->mm.get_dma_page.sg_idx = 0;
44
45         obj->mm.pages = pages;
46
47         obj->mm.page_sizes.phys = i915_sg_dma_sizes(pages->sgl);
48         GEM_BUG_ON(!obj->mm.page_sizes.phys);
49
50         /*
51          * Calculate the supported page-sizes which fit into the given
52          * sg_page_sizes. This will give us the page-sizes which we may be able
53          * to use opportunistically when later inserting into the GTT. For
54          * example if phys=2G, then in theory we should be able to use 1G, 2M,
55          * 64K or 4K pages, although in practice this will depend on a number of
56          * other factors.
57          */
58         obj->mm.page_sizes.sg = 0;
59         for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
60                 if (obj->mm.page_sizes.phys & ~0u << i)
61                         obj->mm.page_sizes.sg |= BIT(i);
62         }
63         GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
64
65         shrinkable = i915_gem_object_is_shrinkable(obj);
66
67         if (i915_gem_object_is_tiled(obj) &&
68             i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) {
69                 GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
70                 i915_gem_object_set_tiling_quirk(obj);
71                 GEM_BUG_ON(!list_empty(&obj->mm.link));
72                 atomic_inc(&obj->mm.shrink_pin);
73                 shrinkable = false;
74         }
75
76         if (shrinkable && !i915_gem_object_has_self_managed_shrink_list(obj)) {
77                 struct list_head *list;
78                 unsigned long flags;
79
80                 assert_object_held(obj);
81                 spin_lock_irqsave(&i915->mm.obj_lock, flags);
82
83                 i915->mm.shrink_count++;
84                 i915->mm.shrink_memory += obj->base.size;
85
86                 if (obj->mm.madv != I915_MADV_WILLNEED)
87                         list = &i915->mm.purge_list;
88                 else
89                         list = &i915->mm.shrink_list;
90                 list_add_tail(&obj->mm.link, list);
91
92                 atomic_set(&obj->mm.shrink_pin, 0);
93                 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
94         }
95 }
96
97 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
98 {
99         struct drm_i915_private *i915 = to_i915(obj->base.dev);
100         int err;
101
102         assert_object_held_shared(obj);
103
104         if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
105                 drm_dbg(&i915->drm,
106                         "Attempting to obtain a purgeable object\n");
107                 return -EFAULT;
108         }
109
110         err = obj->ops->get_pages(obj);
111         GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));
112
113         return err;
114 }
115
116 /* Ensure that the associated pages are gathered from the backing storage
117  * and pinned into our object. i915_gem_object_pin_pages() may be called
118  * multiple times before they are released by a single call to
119  * i915_gem_object_unpin_pages() - once the pages are no longer referenced
120  * either as a result of memory pressure (reaping pages under the shrinker)
121  * or as the object is itself released.
122  */
123 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
124 {
125         int err;
126
127         assert_object_held(obj);
128
129         assert_object_held_shared(obj);
130
131         if (unlikely(!i915_gem_object_has_pages(obj))) {
132                 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
133
134                 err = ____i915_gem_object_get_pages(obj);
135                 if (err)
136                         return err;
137
138                 smp_mb__before_atomic();
139         }
140         atomic_inc(&obj->mm.pages_pin_count);
141
142         return 0;
143 }
144
145 int i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object *obj)
146 {
147         struct i915_gem_ww_ctx ww;
148         int err;
149
150         i915_gem_ww_ctx_init(&ww, true);
151 retry:
152         err = i915_gem_object_lock(obj, &ww);
153         if (!err)
154                 err = i915_gem_object_pin_pages(obj);
155
156         if (err == -EDEADLK) {
157                 err = i915_gem_ww_ctx_backoff(&ww);
158                 if (!err)
159                         goto retry;
160         }
161         i915_gem_ww_ctx_fini(&ww);
162         return err;
163 }
164
165 /* Immediately discard the backing storage */
166 int i915_gem_object_truncate(struct drm_i915_gem_object *obj)
167 {
168         if (obj->ops->truncate)
169                 return obj->ops->truncate(obj);
170
171         return 0;
172 }
173
174 static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
175 {
176         struct radix_tree_iter iter;
177         void __rcu **slot;
178
179         rcu_read_lock();
180         radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
181                 radix_tree_delete(&obj->mm.get_page.radix, iter.index);
182         radix_tree_for_each_slot(slot, &obj->mm.get_dma_page.radix, &iter, 0)
183                 radix_tree_delete(&obj->mm.get_dma_page.radix, iter.index);
184         rcu_read_unlock();
185 }
186
187 static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
188 {
189         if (is_vmalloc_addr(ptr))
190                 vunmap(ptr);
191 }
192
193 static void flush_tlb_invalidate(struct drm_i915_gem_object *obj)
194 {
195         struct drm_i915_private *i915 = to_i915(obj->base.dev);
196         struct intel_gt *gt = to_gt(i915);
197
198         if (!obj->mm.tlb)
199                 return;
200
201         intel_gt_invalidate_tlb(gt, obj->mm.tlb);
202         obj->mm.tlb = 0;
203 }
204
205 struct sg_table *
206 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
207 {
208         struct sg_table *pages;
209
210         assert_object_held_shared(obj);
211
212         pages = fetch_and_zero(&obj->mm.pages);
213         if (IS_ERR_OR_NULL(pages))
214                 return pages;
215
216         if (i915_gem_object_is_volatile(obj))
217                 obj->mm.madv = I915_MADV_WILLNEED;
218
219         if (!i915_gem_object_has_self_managed_shrink_list(obj))
220                 i915_gem_object_make_unshrinkable(obj);
221
222         if (obj->mm.mapping) {
223                 unmap_object(obj, page_mask_bits(obj->mm.mapping));
224                 obj->mm.mapping = NULL;
225         }
226
227         __i915_gem_object_reset_page_iter(obj);
228         obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
229
230         flush_tlb_invalidate(obj);
231
232         return pages;
233 }
234
235 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
236 {
237         struct sg_table *pages;
238
239         if (i915_gem_object_has_pinned_pages(obj))
240                 return -EBUSY;
241
242         /* May be called by shrinker from within get_pages() (on another bo) */
243         assert_object_held_shared(obj);
244
245         i915_gem_object_release_mmap_offset(obj);
246
247         /*
248          * ->put_pages might need to allocate memory for the bit17 swizzle
249          * array, hence protect them from being reaped by removing them from gtt
250          * lists early.
251          */
252         pages = __i915_gem_object_unset_pages(obj);
253
254         /*
255          * XXX Temporary hijinx to avoid updating all backends to handle
256          * NULL pages. In the future, when we have more asynchronous
257          * get_pages backends we should be better able to handle the
258          * cancellation of the async task in a more uniform manner.
259          */
260         if (!IS_ERR_OR_NULL(pages))
261                 obj->ops->put_pages(obj, pages);
262
263         return 0;
264 }
265
266 /* The 'mapping' part of i915_gem_object_pin_map() below */
267 static void *i915_gem_object_map_page(struct drm_i915_gem_object *obj,
268                                       enum i915_map_type type)
269 {
270         unsigned long n_pages = obj->base.size >> PAGE_SHIFT, i;
271         struct page *stack[32], **pages = stack, *page;
272         struct sgt_iter iter;
273         pgprot_t pgprot;
274         void *vaddr;
275
276         switch (type) {
277         default:
278                 MISSING_CASE(type);
279                 fallthrough;    /* to use PAGE_KERNEL anyway */
280         case I915_MAP_WB:
281                 /*
282                  * On 32b, highmem using a finite set of indirect PTE (i.e.
283                  * vmap) to provide virtual mappings of the high pages.
284                  * As these are finite, map_new_virtual() must wait for some
285                  * other kmap() to finish when it runs out. If we map a large
286                  * number of objects, there is no method for it to tell us
287                  * to release the mappings, and we deadlock.
288                  *
289                  * However, if we make an explicit vmap of the page, that
290                  * uses a larger vmalloc arena, and also has the ability
291                  * to tell us to release unwanted mappings. Most importantly,
292                  * it will fail and propagate an error instead of waiting
293                  * forever.
294                  *
295                  * So if the page is beyond the 32b boundary, make an explicit
296                  * vmap.
297                  */
298                 if (n_pages == 1 && !PageHighMem(sg_page(obj->mm.pages->sgl)))
299                         return page_address(sg_page(obj->mm.pages->sgl));
300                 pgprot = PAGE_KERNEL;
301                 break;
302         case I915_MAP_WC:
303                 pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
304                 break;
305         }
306
307         if (n_pages > ARRAY_SIZE(stack)) {
308                 /* Too big for stack -- allocate temporary array instead */
309                 pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
310                 if (!pages)
311                         return ERR_PTR(-ENOMEM);
312         }
313
314         i = 0;
315         for_each_sgt_page(page, iter, obj->mm.pages)
316                 pages[i++] = page;
317         vaddr = vmap(pages, n_pages, 0, pgprot);
318         if (pages != stack)
319                 kvfree(pages);
320
321         return vaddr ?: ERR_PTR(-ENOMEM);
322 }
323
324 static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj,
325                                      enum i915_map_type type)
326 {
327         resource_size_t iomap = obj->mm.region->iomap.base -
328                 obj->mm.region->region.start;
329         unsigned long n_pfn = obj->base.size >> PAGE_SHIFT;
330         unsigned long stack[32], *pfns = stack, i;
331         struct sgt_iter iter;
332         dma_addr_t addr;
333         void *vaddr;
334
335         GEM_BUG_ON(type != I915_MAP_WC);
336
337         if (n_pfn > ARRAY_SIZE(stack)) {
338                 /* Too big for stack -- allocate temporary array instead */
339                 pfns = kvmalloc_array(n_pfn, sizeof(*pfns), GFP_KERNEL);
340                 if (!pfns)
341                         return ERR_PTR(-ENOMEM);
342         }
343
344         i = 0;
345         for_each_sgt_daddr(addr, iter, obj->mm.pages)
346                 pfns[i++] = (iomap + addr) >> PAGE_SHIFT;
347         vaddr = vmap_pfn(pfns, n_pfn, pgprot_writecombine(PAGE_KERNEL_IO));
348         if (pfns != stack)
349                 kvfree(pfns);
350
351         return vaddr ?: ERR_PTR(-ENOMEM);
352 }
353
354 /* get, pin, and map the pages of the object into kernel space */
355 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
356                               enum i915_map_type type)
357 {
358         enum i915_map_type has_type;
359         bool pinned;
360         void *ptr;
361         int err;
362
363         if (!i915_gem_object_has_struct_page(obj) &&
364             !i915_gem_object_has_iomem(obj))
365                 return ERR_PTR(-ENXIO);
366
367         if (WARN_ON_ONCE(obj->flags & I915_BO_ALLOC_GPU_ONLY))
368                 return ERR_PTR(-EINVAL);
369
370         assert_object_held(obj);
371
372         pinned = !(type & I915_MAP_OVERRIDE);
373         type &= ~I915_MAP_OVERRIDE;
374
375         if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
376                 if (unlikely(!i915_gem_object_has_pages(obj))) {
377                         GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
378
379                         err = ____i915_gem_object_get_pages(obj);
380                         if (err)
381                                 return ERR_PTR(err);
382
383                         smp_mb__before_atomic();
384                 }
385                 atomic_inc(&obj->mm.pages_pin_count);
386                 pinned = false;
387         }
388         GEM_BUG_ON(!i915_gem_object_has_pages(obj));
389
390         /*
391          * For discrete our CPU mappings needs to be consistent in order to
392          * function correctly on !x86. When mapping things through TTM, we use
393          * the same rules to determine the caching type.
394          *
395          * The caching rules, starting from DG1:
396          *
397          *      - If the object can be placed in device local-memory, then the
398          *        pages should be allocated and mapped as write-combined only.
399          *
400          *      - Everything else is always allocated and mapped as write-back,
401          *        with the guarantee that everything is also coherent with the
402          *        GPU.
403          *
404          * Internal users of lmem are already expected to get this right, so no
405          * fudging needed there.
406          */
407         if (i915_gem_object_placement_possible(obj, INTEL_MEMORY_LOCAL)) {
408                 if (type != I915_MAP_WC && !obj->mm.n_placements) {
409                         ptr = ERR_PTR(-ENODEV);
410                         goto err_unpin;
411                 }
412
413                 type = I915_MAP_WC;
414         } else if (IS_DGFX(to_i915(obj->base.dev))) {
415                 type = I915_MAP_WB;
416         }
417
418         ptr = page_unpack_bits(obj->mm.mapping, &has_type);
419         if (ptr && has_type != type) {
420                 if (pinned) {
421                         ptr = ERR_PTR(-EBUSY);
422                         goto err_unpin;
423                 }
424
425                 unmap_object(obj, ptr);
426
427                 ptr = obj->mm.mapping = NULL;
428         }
429
430         if (!ptr) {
431                 err = i915_gem_object_wait_moving_fence(obj, true);
432                 if (err) {
433                         ptr = ERR_PTR(err);
434                         goto err_unpin;
435                 }
436
437                 if (GEM_WARN_ON(type == I915_MAP_WC && !pat_enabled()))
438                         ptr = ERR_PTR(-ENODEV);
439                 else if (i915_gem_object_has_struct_page(obj))
440                         ptr = i915_gem_object_map_page(obj, type);
441                 else
442                         ptr = i915_gem_object_map_pfn(obj, type);
443                 if (IS_ERR(ptr))
444                         goto err_unpin;
445
446                 obj->mm.mapping = page_pack_bits(ptr, type);
447         }
448
449         return ptr;
450
451 err_unpin:
452         atomic_dec(&obj->mm.pages_pin_count);
453         return ptr;
454 }
455
456 void *i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object *obj,
457                                        enum i915_map_type type)
458 {
459         void *ret;
460
461         i915_gem_object_lock(obj, NULL);
462         ret = i915_gem_object_pin_map(obj, type);
463         i915_gem_object_unlock(obj);
464
465         return ret;
466 }
467
468 enum i915_map_type i915_coherent_map_type(struct drm_i915_private *i915,
469                                           struct drm_i915_gem_object *obj,
470                                           bool always_coherent)
471 {
472         /*
473          * Wa_22016122933: always return I915_MAP_WC for MTL
474          */
475         if (i915_gem_object_is_lmem(obj) || IS_METEORLAKE(i915))
476                 return I915_MAP_WC;
477         if (HAS_LLC(i915) || always_coherent)
478                 return I915_MAP_WB;
479         else
480                 return I915_MAP_WC;
481 }
482
483 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
484                                  unsigned long offset,
485                                  unsigned long size)
486 {
487         enum i915_map_type has_type;
488         void *ptr;
489
490         GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
491         GEM_BUG_ON(range_overflows_t(typeof(obj->base.size),
492                                      offset, size, obj->base.size));
493
494         wmb(); /* let all previous writes be visible to coherent partners */
495         obj->mm.dirty = true;
496
497         if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)
498                 return;
499
500         ptr = page_unpack_bits(obj->mm.mapping, &has_type);
501         if (has_type == I915_MAP_WC)
502                 return;
503
504         drm_clflush_virt_range(ptr + offset, size);
505         if (size == obj->base.size) {
506                 obj->write_domain &= ~I915_GEM_DOMAIN_CPU;
507                 obj->cache_dirty = false;
508         }
509 }
510
511 void __i915_gem_object_release_map(struct drm_i915_gem_object *obj)
512 {
513         GEM_BUG_ON(!obj->mm.mapping);
514
515         /*
516          * We allow removing the mapping from underneath pinned pages!
517          *
518          * Furthermore, since this is an unsafe operation reserved only
519          * for construction time manipulation, we ignore locking prudence.
520          */
521         unmap_object(obj, page_mask_bits(fetch_and_zero(&obj->mm.mapping)));
522
523         i915_gem_object_unpin_map(obj);
524 }
525
526 struct scatterlist *
527 __i915_gem_object_page_iter_get_sg(struct drm_i915_gem_object *obj,
528                                    struct i915_gem_object_page_iter *iter,
529                                    pgoff_t n,
530                                    unsigned int *offset)
531
532 {
533         const bool dma = iter == &obj->mm.get_dma_page ||
534                          iter == &obj->ttm.get_io_page;
535         unsigned int idx, count;
536         struct scatterlist *sg;
537
538         might_sleep();
539         GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
540         if (!i915_gem_object_has_pinned_pages(obj))
541                 assert_object_held(obj);
542
543         /* As we iterate forward through the sg, we record each entry in a
544          * radixtree for quick repeated (backwards) lookups. If we have seen
545          * this index previously, we will have an entry for it.
546          *
547          * Initial lookup is O(N), but this is amortized to O(1) for
548          * sequential page access (where each new request is consecutive
549          * to the previous one). Repeated lookups are O(lg(obj->base.size)),
550          * i.e. O(1) with a large constant!
551          */
552         if (n < READ_ONCE(iter->sg_idx))
553                 goto lookup;
554
555         mutex_lock(&iter->lock);
556
557         /* We prefer to reuse the last sg so that repeated lookup of this
558          * (or the subsequent) sg are fast - comparing against the last
559          * sg is faster than going through the radixtree.
560          */
561
562         sg = iter->sg_pos;
563         idx = iter->sg_idx;
564         count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
565
566         while (idx + count <= n) {
567                 void *entry;
568                 unsigned long i;
569                 int ret;
570
571                 /* If we cannot allocate and insert this entry, or the
572                  * individual pages from this range, cancel updating the
573                  * sg_idx so that on this lookup we are forced to linearly
574                  * scan onwards, but on future lookups we will try the
575                  * insertion again (in which case we need to be careful of
576                  * the error return reporting that we have already inserted
577                  * this index).
578                  */
579                 ret = radix_tree_insert(&iter->radix, idx, sg);
580                 if (ret && ret != -EEXIST)
581                         goto scan;
582
583                 entry = xa_mk_value(idx);
584                 for (i = 1; i < count; i++) {
585                         ret = radix_tree_insert(&iter->radix, idx + i, entry);
586                         if (ret && ret != -EEXIST)
587                                 goto scan;
588                 }
589
590                 idx += count;
591                 sg = ____sg_next(sg);
592                 count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
593         }
594
595 scan:
596         iter->sg_pos = sg;
597         iter->sg_idx = idx;
598
599         mutex_unlock(&iter->lock);
600
601         if (unlikely(n < idx)) /* insertion completed by another thread */
602                 goto lookup;
603
604         /* In case we failed to insert the entry into the radixtree, we need
605          * to look beyond the current sg.
606          */
607         while (idx + count <= n) {
608                 idx += count;
609                 sg = ____sg_next(sg);
610                 count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
611         }
612
613         *offset = n - idx;
614         return sg;
615
616 lookup:
617         rcu_read_lock();
618
619         sg = radix_tree_lookup(&iter->radix, n);
620         GEM_BUG_ON(!sg);
621
622         /* If this index is in the middle of multi-page sg entry,
623          * the radix tree will contain a value entry that points
624          * to the start of that range. We will return the pointer to
625          * the base page and the offset of this page within the
626          * sg entry's range.
627          */
628         *offset = 0;
629         if (unlikely(xa_is_value(sg))) {
630                 unsigned long base = xa_to_value(sg);
631
632                 sg = radix_tree_lookup(&iter->radix, base);
633                 GEM_BUG_ON(!sg);
634
635                 *offset = n - base;
636         }
637
638         rcu_read_unlock();
639
640         return sg;
641 }
642
643 struct page *
644 __i915_gem_object_get_page(struct drm_i915_gem_object *obj, pgoff_t n)
645 {
646         struct scatterlist *sg;
647         unsigned int offset;
648
649         GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
650
651         sg = i915_gem_object_get_sg(obj, n, &offset);
652         return nth_page(sg_page(sg), offset);
653 }
654
655 /* Like i915_gem_object_get_page(), but mark the returned page dirty */
656 struct page *
657 __i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, pgoff_t n)
658 {
659         struct page *page;
660
661         page = i915_gem_object_get_page(obj, n);
662         if (!obj->mm.dirty)
663                 set_page_dirty(page);
664
665         return page;
666 }
667
668 dma_addr_t
669 __i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
670                                       pgoff_t n, unsigned int *len)
671 {
672         struct scatterlist *sg;
673         unsigned int offset;
674
675         sg = i915_gem_object_get_sg_dma(obj, n, &offset);
676
677         if (len)
678                 *len = sg_dma_len(sg) - (offset << PAGE_SHIFT);
679
680         return sg_dma_address(sg) + (offset << PAGE_SHIFT);
681 }
682
683 dma_addr_t
684 __i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, pgoff_t n)
685 {
686         return i915_gem_object_get_dma_address_len(obj, n, NULL);
687 }
This page took 0.078375 seconds and 4 git commands to generate.