]> Git Repo - linux.git/blob - drivers/gpu/drm/etnaviv/etnaviv_gem.c
ASoC: Merge v6.5-rc2
[linux.git] / drivers / gpu / drm / etnaviv / etnaviv_gem.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2015-2018 Etnaviv Project
4  */
5
6 #include <drm/drm_prime.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/shmem_fs.h>
9 #include <linux/spinlock.h>
10 #include <linux/vmalloc.h>
11
12 #include "etnaviv_drv.h"
13 #include "etnaviv_gem.h"
14 #include "etnaviv_gpu.h"
15 #include "etnaviv_mmu.h"
16
17 static struct lock_class_key etnaviv_shm_lock_class;
18 static struct lock_class_key etnaviv_userptr_lock_class;
19
20 static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
21 {
22         struct drm_device *dev = etnaviv_obj->base.dev;
23         struct sg_table *sgt = etnaviv_obj->sgt;
24
25         /*
26          * For non-cached buffers, ensure the new pages are clean
27          * because display controller, GPU, etc. are not coherent.
28          */
29         if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
30                 dma_map_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
31 }
32
33 static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
34 {
35         struct drm_device *dev = etnaviv_obj->base.dev;
36         struct sg_table *sgt = etnaviv_obj->sgt;
37
38         /*
39          * For non-cached buffers, ensure the new pages are clean
40          * because display controller, GPU, etc. are not coherent:
41          *
42          * WARNING: The DMA API does not support concurrent CPU
43          * and device access to the memory area.  With BIDIRECTIONAL,
44          * we will clean the cache lines which overlap the region,
45          * and invalidate all cache lines (partially) contained in
46          * the region.
47          *
48          * If you have dirty data in the overlapping cache lines,
49          * that will corrupt the GPU-written data.  If you have
50          * written into the remainder of the region, this can
51          * discard those writes.
52          */
53         if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
54                 dma_unmap_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
55 }
56
57 /* called with etnaviv_obj->lock held */
58 static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
59 {
60         struct drm_device *dev = etnaviv_obj->base.dev;
61         struct page **p = drm_gem_get_pages(&etnaviv_obj->base);
62
63         if (IS_ERR(p)) {
64                 dev_dbg(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
65                 return PTR_ERR(p);
66         }
67
68         etnaviv_obj->pages = p;
69
70         return 0;
71 }
72
73 static void put_pages(struct etnaviv_gem_object *etnaviv_obj)
74 {
75         if (etnaviv_obj->sgt) {
76                 etnaviv_gem_scatterlist_unmap(etnaviv_obj);
77                 sg_free_table(etnaviv_obj->sgt);
78                 kfree(etnaviv_obj->sgt);
79                 etnaviv_obj->sgt = NULL;
80         }
81         if (etnaviv_obj->pages) {
82                 drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages,
83                                   true, false);
84
85                 etnaviv_obj->pages = NULL;
86         }
87 }
88
89 struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
90 {
91         int ret;
92
93         lockdep_assert_held(&etnaviv_obj->lock);
94
95         if (!etnaviv_obj->pages) {
96                 ret = etnaviv_obj->ops->get_pages(etnaviv_obj);
97                 if (ret < 0)
98                         return ERR_PTR(ret);
99         }
100
101         if (!etnaviv_obj->sgt) {
102                 struct drm_device *dev = etnaviv_obj->base.dev;
103                 int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
104                 struct sg_table *sgt;
105
106                 sgt = drm_prime_pages_to_sg(etnaviv_obj->base.dev,
107                                             etnaviv_obj->pages, npages);
108                 if (IS_ERR(sgt)) {
109                         dev_err(dev->dev, "failed to allocate sgt: %ld\n",
110                                 PTR_ERR(sgt));
111                         return ERR_CAST(sgt);
112                 }
113
114                 etnaviv_obj->sgt = sgt;
115
116                 etnaviv_gem_scatter_map(etnaviv_obj);
117         }
118
119         return etnaviv_obj->pages;
120 }
121
122 void etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj)
123 {
124         lockdep_assert_held(&etnaviv_obj->lock);
125         /* when we start tracking the pin count, then do something here */
126 }
127
128 static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
129                 struct vm_area_struct *vma)
130 {
131         pgprot_t vm_page_prot;
132
133         vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
134
135         vm_page_prot = vm_get_page_prot(vma->vm_flags);
136
137         if (etnaviv_obj->flags & ETNA_BO_WC) {
138                 vma->vm_page_prot = pgprot_writecombine(vm_page_prot);
139         } else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) {
140                 vma->vm_page_prot = pgprot_noncached(vm_page_prot);
141         } else {
142                 /*
143                  * Shunt off cached objs to shmem file so they have their own
144                  * address_space (so unmap_mapping_range does what we want,
145                  * in particular in the case of mmap'd dmabufs)
146                  */
147                 vma->vm_pgoff = 0;
148                 vma_set_file(vma, etnaviv_obj->base.filp);
149
150                 vma->vm_page_prot = vm_page_prot;
151         }
152
153         return 0;
154 }
155
156 static int etnaviv_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
157 {
158         struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
159
160         return etnaviv_obj->ops->mmap(etnaviv_obj, vma);
161 }
162
163 static vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf)
164 {
165         struct vm_area_struct *vma = vmf->vma;
166         struct drm_gem_object *obj = vma->vm_private_data;
167         struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
168         struct page **pages;
169         unsigned long pfn;
170         pgoff_t pgoff;
171         int err;
172
173         /*
174          * Make sure we don't parallel update on a fault, nor move or remove
175          * something from beneath our feet.  Note that vmf_insert_page() is
176          * specifically coded to take care of this, so we don't have to.
177          */
178         err = mutex_lock_interruptible(&etnaviv_obj->lock);
179         if (err)
180                 return VM_FAULT_NOPAGE;
181         /* make sure we have pages attached now */
182         pages = etnaviv_gem_get_pages(etnaviv_obj);
183         mutex_unlock(&etnaviv_obj->lock);
184
185         if (IS_ERR(pages)) {
186                 err = PTR_ERR(pages);
187                 return vmf_error(err);
188         }
189
190         /* We don't use vmf->pgoff since that has the fake offset: */
191         pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
192
193         pfn = page_to_pfn(pages[pgoff]);
194
195         VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
196              pfn, pfn << PAGE_SHIFT);
197
198         return vmf_insert_pfn(vma, vmf->address, pfn);
199 }
200
201 int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
202 {
203         int ret;
204
205         /* Make it mmapable */
206         ret = drm_gem_create_mmap_offset(obj);
207         if (ret)
208                 dev_err(obj->dev->dev, "could not allocate mmap offset\n");
209         else
210                 *offset = drm_vma_node_offset_addr(&obj->vma_node);
211
212         return ret;
213 }
214
215 static struct etnaviv_vram_mapping *
216 etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
217                              struct etnaviv_iommu_context *context)
218 {
219         struct etnaviv_vram_mapping *mapping;
220
221         list_for_each_entry(mapping, &obj->vram_list, obj_node) {
222                 if (mapping->context == context)
223                         return mapping;
224         }
225
226         return NULL;
227 }
228
229 void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
230 {
231         struct etnaviv_gem_object *etnaviv_obj = mapping->object;
232
233         mutex_lock(&etnaviv_obj->lock);
234         WARN_ON(mapping->use == 0);
235         mapping->use -= 1;
236         mutex_unlock(&etnaviv_obj->lock);
237
238         drm_gem_object_put(&etnaviv_obj->base);
239 }
240
241 struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
242         struct drm_gem_object *obj, struct etnaviv_iommu_context *mmu_context,
243         u64 va)
244 {
245         struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
246         struct etnaviv_vram_mapping *mapping;
247         struct page **pages;
248         int ret = 0;
249
250         mutex_lock(&etnaviv_obj->lock);
251         mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, mmu_context);
252         if (mapping) {
253                 /*
254                  * Holding the object lock prevents the use count changing
255                  * beneath us.  If the use count is zero, the MMU might be
256                  * reaping this object, so take the lock and re-check that
257                  * the MMU owns this mapping to close this race.
258                  */
259                 if (mapping->use == 0) {
260                         mutex_lock(&mmu_context->lock);
261                         if (mapping->context == mmu_context)
262                                 if (va && mapping->iova != va) {
263                                         etnaviv_iommu_reap_mapping(mapping);
264                                         mapping = NULL;
265                                 } else {
266                                         mapping->use += 1;
267                                 }
268                         else
269                                 mapping = NULL;
270                         mutex_unlock(&mmu_context->lock);
271                         if (mapping)
272                                 goto out;
273                 } else {
274                         mapping->use += 1;
275                         goto out;
276                 }
277         }
278
279         pages = etnaviv_gem_get_pages(etnaviv_obj);
280         if (IS_ERR(pages)) {
281                 ret = PTR_ERR(pages);
282                 goto out;
283         }
284
285         /*
286          * See if we have a reaped vram mapping we can re-use before
287          * allocating a fresh mapping.
288          */
289         mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL);
290         if (!mapping) {
291                 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
292                 if (!mapping) {
293                         ret = -ENOMEM;
294                         goto out;
295                 }
296
297                 INIT_LIST_HEAD(&mapping->scan_node);
298                 mapping->object = etnaviv_obj;
299         } else {
300                 list_del(&mapping->obj_node);
301         }
302
303         mapping->use = 1;
304
305         ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj,
306                                     mmu_context->global->memory_base,
307                                     mapping, va);
308         if (ret < 0)
309                 kfree(mapping);
310         else
311                 list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list);
312
313 out:
314         mutex_unlock(&etnaviv_obj->lock);
315
316         if (ret)
317                 return ERR_PTR(ret);
318
319         /* Take a reference on the object */
320         drm_gem_object_get(obj);
321         return mapping;
322 }
323
324 void *etnaviv_gem_vmap(struct drm_gem_object *obj)
325 {
326         struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
327
328         if (etnaviv_obj->vaddr)
329                 return etnaviv_obj->vaddr;
330
331         mutex_lock(&etnaviv_obj->lock);
332         /*
333          * Need to check again, as we might have raced with another thread
334          * while waiting for the mutex.
335          */
336         if (!etnaviv_obj->vaddr)
337                 etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj);
338         mutex_unlock(&etnaviv_obj->lock);
339
340         return etnaviv_obj->vaddr;
341 }
342
343 static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
344 {
345         struct page **pages;
346
347         lockdep_assert_held(&obj->lock);
348
349         pages = etnaviv_gem_get_pages(obj);
350         if (IS_ERR(pages))
351                 return NULL;
352
353         return vmap(pages, obj->base.size >> PAGE_SHIFT,
354                         VM_MAP, pgprot_writecombine(PAGE_KERNEL));
355 }
356
357 static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
358 {
359         if (op & ETNA_PREP_READ)
360                 return DMA_FROM_DEVICE;
361         else if (op & ETNA_PREP_WRITE)
362                 return DMA_TO_DEVICE;
363         else
364                 return DMA_BIDIRECTIONAL;
365 }
366
367 int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
368                 struct drm_etnaviv_timespec *timeout)
369 {
370         struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
371         struct drm_device *dev = obj->dev;
372         bool write = !!(op & ETNA_PREP_WRITE);
373         int ret;
374
375         if (!etnaviv_obj->sgt) {
376                 void *ret;
377
378                 mutex_lock(&etnaviv_obj->lock);
379                 ret = etnaviv_gem_get_pages(etnaviv_obj);
380                 mutex_unlock(&etnaviv_obj->lock);
381                 if (IS_ERR(ret))
382                         return PTR_ERR(ret);
383         }
384
385         if (op & ETNA_PREP_NOSYNC) {
386                 if (!dma_resv_test_signaled(obj->resv,
387                                             dma_resv_usage_rw(write)))
388                         return -EBUSY;
389         } else {
390                 unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
391
392                 ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write),
393                                             true, remain);
394                 if (ret <= 0)
395                         return ret == 0 ? -ETIMEDOUT : ret;
396         }
397
398         if (etnaviv_obj->flags & ETNA_BO_CACHED) {
399                 dma_sync_sgtable_for_cpu(dev->dev, etnaviv_obj->sgt,
400                                          etnaviv_op_to_dma_dir(op));
401                 etnaviv_obj->last_cpu_prep_op = op;
402         }
403
404         return 0;
405 }
406
407 int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
408 {
409         struct drm_device *dev = obj->dev;
410         struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
411
412         if (etnaviv_obj->flags & ETNA_BO_CACHED) {
413                 /* fini without a prep is almost certainly a userspace error */
414                 WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
415                 dma_sync_sgtable_for_device(dev->dev, etnaviv_obj->sgt,
416                         etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
417                 etnaviv_obj->last_cpu_prep_op = 0;
418         }
419
420         return 0;
421 }
422
423 int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
424         struct drm_etnaviv_timespec *timeout)
425 {
426         struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
427
428         return etnaviv_gpu_wait_obj_inactive(gpu, etnaviv_obj, timeout);
429 }
430
431 #ifdef CONFIG_DEBUG_FS
432 static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
433 {
434         struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
435         struct dma_resv *robj = obj->resv;
436         unsigned long off = drm_vma_node_start(&obj->vma_node);
437         int r;
438
439         seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
440                         etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I',
441                         obj->name, kref_read(&obj->refcount),
442                         off, etnaviv_obj->vaddr, obj->size);
443
444         r = dma_resv_lock(robj, NULL);
445         if (r)
446                 return;
447
448         dma_resv_describe(robj, m);
449         dma_resv_unlock(robj);
450 }
451
452 void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
453         struct seq_file *m)
454 {
455         struct etnaviv_gem_object *etnaviv_obj;
456         int count = 0;
457         size_t size = 0;
458
459         mutex_lock(&priv->gem_lock);
460         list_for_each_entry(etnaviv_obj, &priv->gem_list, gem_node) {
461                 struct drm_gem_object *obj = &etnaviv_obj->base;
462
463                 seq_puts(m, "   ");
464                 etnaviv_gem_describe(obj, m);
465                 count++;
466                 size += obj->size;
467         }
468         mutex_unlock(&priv->gem_lock);
469
470         seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
471 }
472 #endif
473
474 static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
475 {
476         vunmap(etnaviv_obj->vaddr);
477         put_pages(etnaviv_obj);
478 }
479
480 static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
481         .get_pages = etnaviv_gem_shmem_get_pages,
482         .release = etnaviv_gem_shmem_release,
483         .vmap = etnaviv_gem_vmap_impl,
484         .mmap = etnaviv_gem_mmap_obj,
485 };
486
487 void etnaviv_gem_free_object(struct drm_gem_object *obj)
488 {
489         struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
490         struct etnaviv_drm_private *priv = obj->dev->dev_private;
491         struct etnaviv_vram_mapping *mapping, *tmp;
492
493         /* object should not be active */
494         WARN_ON(is_active(etnaviv_obj));
495
496         mutex_lock(&priv->gem_lock);
497         list_del(&etnaviv_obj->gem_node);
498         mutex_unlock(&priv->gem_lock);
499
500         list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
501                                  obj_node) {
502                 struct etnaviv_iommu_context *context = mapping->context;
503
504                 WARN_ON(mapping->use);
505
506                 if (context)
507                         etnaviv_iommu_unmap_gem(context, mapping);
508
509                 list_del(&mapping->obj_node);
510                 kfree(mapping);
511         }
512
513         etnaviv_obj->ops->release(etnaviv_obj);
514         drm_gem_object_release(obj);
515
516         kfree(etnaviv_obj);
517 }
518
519 void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
520 {
521         struct etnaviv_drm_private *priv = dev->dev_private;
522         struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
523
524         mutex_lock(&priv->gem_lock);
525         list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list);
526         mutex_unlock(&priv->gem_lock);
527 }
528
529 static const struct vm_operations_struct vm_ops = {
530         .fault = etnaviv_gem_fault,
531         .open = drm_gem_vm_open,
532         .close = drm_gem_vm_close,
533 };
534
535 static const struct drm_gem_object_funcs etnaviv_gem_object_funcs = {
536         .free = etnaviv_gem_free_object,
537         .pin = etnaviv_gem_prime_pin,
538         .unpin = etnaviv_gem_prime_unpin,
539         .get_sg_table = etnaviv_gem_prime_get_sg_table,
540         .vmap = etnaviv_gem_prime_vmap,
541         .mmap = etnaviv_gem_mmap,
542         .vm_ops = &vm_ops,
543 };
544
545 static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
546         const struct etnaviv_gem_ops *ops, struct drm_gem_object **obj)
547 {
548         struct etnaviv_gem_object *etnaviv_obj;
549         unsigned sz = sizeof(*etnaviv_obj);
550         bool valid = true;
551
552         /* validate flags */
553         switch (flags & ETNA_BO_CACHE_MASK) {
554         case ETNA_BO_UNCACHED:
555         case ETNA_BO_CACHED:
556         case ETNA_BO_WC:
557                 break;
558         default:
559                 valid = false;
560         }
561
562         if (!valid) {
563                 dev_err(dev->dev, "invalid cache flag: %x\n",
564                         (flags & ETNA_BO_CACHE_MASK));
565                 return -EINVAL;
566         }
567
568         etnaviv_obj = kzalloc(sz, GFP_KERNEL);
569         if (!etnaviv_obj)
570                 return -ENOMEM;
571
572         etnaviv_obj->flags = flags;
573         etnaviv_obj->ops = ops;
574
575         mutex_init(&etnaviv_obj->lock);
576         INIT_LIST_HEAD(&etnaviv_obj->vram_list);
577
578         *obj = &etnaviv_obj->base;
579         (*obj)->funcs = &etnaviv_gem_object_funcs;
580
581         return 0;
582 }
583
584 /* convenience method to construct a GEM buffer object, and userspace handle */
585 int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
586         u32 size, u32 flags, u32 *handle)
587 {
588         struct etnaviv_drm_private *priv = dev->dev_private;
589         struct drm_gem_object *obj = NULL;
590         int ret;
591
592         size = PAGE_ALIGN(size);
593
594         ret = etnaviv_gem_new_impl(dev, size, flags,
595                                    &etnaviv_gem_shmem_ops, &obj);
596         if (ret)
597                 goto fail;
598
599         lockdep_set_class(&to_etnaviv_bo(obj)->lock, &etnaviv_shm_lock_class);
600
601         ret = drm_gem_object_init(dev, obj, size);
602         if (ret)
603                 goto fail;
604
605         /*
606          * Our buffers are kept pinned, so allocating them from the MOVABLE
607          * zone is a really bad idea, and conflicts with CMA. See comments
608          * above new_inode() why this is required _and_ expected if you're
609          * going to pin these pages.
610          */
611         mapping_set_gfp_mask(obj->filp->f_mapping, priv->shm_gfp_mask);
612
613         etnaviv_gem_obj_add(dev, obj);
614
615         ret = drm_gem_handle_create(file, obj, handle);
616
617         /* drop reference from allocate - handle holds it now */
618 fail:
619         drm_gem_object_put(obj);
620
621         return ret;
622 }
623
624 int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
625         const struct etnaviv_gem_ops *ops, struct etnaviv_gem_object **res)
626 {
627         struct drm_gem_object *obj;
628         int ret;
629
630         ret = etnaviv_gem_new_impl(dev, size, flags, ops, &obj);
631         if (ret)
632                 return ret;
633
634         drm_gem_private_object_init(dev, obj, size);
635
636         *res = to_etnaviv_bo(obj);
637
638         return 0;
639 }
640
641 static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
642 {
643         struct page **pvec = NULL;
644         struct etnaviv_gem_userptr *userptr = &etnaviv_obj->userptr;
645         int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
646         unsigned int gup_flags = FOLL_LONGTERM;
647
648         might_lock_read(&current->mm->mmap_lock);
649
650         if (userptr->mm != current->mm)
651                 return -EPERM;
652
653         pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
654         if (!pvec)
655                 return -ENOMEM;
656
657         if (!userptr->ro)
658                 gup_flags |= FOLL_WRITE;
659
660         do {
661                 unsigned num_pages = npages - pinned;
662                 uint64_t ptr = userptr->ptr + pinned * PAGE_SIZE;
663                 struct page **pages = pvec + pinned;
664
665                 ret = pin_user_pages_fast(ptr, num_pages, gup_flags, pages);
666                 if (ret < 0) {
667                         unpin_user_pages(pvec, pinned);
668                         kvfree(pvec);
669                         return ret;
670                 }
671
672                 pinned += ret;
673
674         } while (pinned < npages);
675
676         etnaviv_obj->pages = pvec;
677
678         return 0;
679 }
680
681 static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
682 {
683         if (etnaviv_obj->sgt) {
684                 etnaviv_gem_scatterlist_unmap(etnaviv_obj);
685                 sg_free_table(etnaviv_obj->sgt);
686                 kfree(etnaviv_obj->sgt);
687         }
688         if (etnaviv_obj->pages) {
689                 int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
690
691                 unpin_user_pages(etnaviv_obj->pages, npages);
692                 kvfree(etnaviv_obj->pages);
693         }
694 }
695
696 static int etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
697                 struct vm_area_struct *vma)
698 {
699         return -EINVAL;
700 }
701
702 static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = {
703         .get_pages = etnaviv_gem_userptr_get_pages,
704         .release = etnaviv_gem_userptr_release,
705         .vmap = etnaviv_gem_vmap_impl,
706         .mmap = etnaviv_gem_userptr_mmap_obj,
707 };
708
709 int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
710         uintptr_t ptr, u32 size, u32 flags, u32 *handle)
711 {
712         struct etnaviv_gem_object *etnaviv_obj;
713         int ret;
714
715         ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED,
716                                       &etnaviv_gem_userptr_ops, &etnaviv_obj);
717         if (ret)
718                 return ret;
719
720         lockdep_set_class(&etnaviv_obj->lock, &etnaviv_userptr_lock_class);
721
722         etnaviv_obj->userptr.ptr = ptr;
723         etnaviv_obj->userptr.mm = current->mm;
724         etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE);
725
726         etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
727
728         ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
729
730         /* drop reference from allocate - handle holds it now */
731         drm_gem_object_put(&etnaviv_obj->base);
732         return ret;
733 }
This page took 0.079014 seconds and 4 git commands to generate.