]> Git Repo - linux.git/blob - drivers/gpu/drm/udl/udl_gem.c
Merge tag 'dax-for-5.3' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm
[linux.git] / drivers / gpu / drm / udl / udl_gem.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012 Red Hat
4  */
5
6 #include <drm/drmP.h>
7 #include "udl_drv.h"
8 #include <linux/shmem_fs.h>
9 #include <linux/dma-buf.h>
10
11 struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
12                                             size_t size)
13 {
14         struct udl_gem_object *obj;
15
16         obj = kzalloc(sizeof(*obj), GFP_KERNEL);
17         if (obj == NULL)
18                 return NULL;
19
20         if (drm_gem_object_init(dev, &obj->base, size) != 0) {
21                 kfree(obj);
22                 return NULL;
23         }
24
25         obj->flags = UDL_BO_CACHEABLE;
26         return obj;
27 }
28
29 static int
30 udl_gem_create(struct drm_file *file,
31                struct drm_device *dev,
32                uint64_t size,
33                uint32_t *handle_p)
34 {
35         struct udl_gem_object *obj;
36         int ret;
37         u32 handle;
38
39         size = roundup(size, PAGE_SIZE);
40
41         obj = udl_gem_alloc_object(dev, size);
42         if (obj == NULL)
43                 return -ENOMEM;
44
45         ret = drm_gem_handle_create(file, &obj->base, &handle);
46         if (ret) {
47                 drm_gem_object_release(&obj->base);
48                 kfree(obj);
49                 return ret;
50         }
51
52         drm_gem_object_put_unlocked(&obj->base);
53         *handle_p = handle;
54         return 0;
55 }
56
57 static void update_vm_cache_attr(struct udl_gem_object *obj,
58                                  struct vm_area_struct *vma)
59 {
60         DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
61
62         /* non-cacheable as default. */
63         if (obj->flags & UDL_BO_CACHEABLE) {
64                 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
65         } else if (obj->flags & UDL_BO_WC) {
66                 vma->vm_page_prot =
67                         pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
68         } else {
69                 vma->vm_page_prot =
70                         pgprot_noncached(vm_get_page_prot(vma->vm_flags));
71         }
72 }
73
74 int udl_dumb_create(struct drm_file *file,
75                     struct drm_device *dev,
76                     struct drm_mode_create_dumb *args)
77 {
78         args->pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
79         args->size = args->pitch * args->height;
80         return udl_gem_create(file, dev,
81                               args->size, &args->handle);
82 }
83
84 int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
85 {
86         int ret;
87
88         ret = drm_gem_mmap(filp, vma);
89         if (ret)
90                 return ret;
91
92         vma->vm_flags &= ~VM_PFNMAP;
93         vma->vm_flags |= VM_MIXEDMAP;
94
95         update_vm_cache_attr(to_udl_bo(vma->vm_private_data), vma);
96
97         return ret;
98 }
99
100 vm_fault_t udl_gem_fault(struct vm_fault *vmf)
101 {
102         struct vm_area_struct *vma = vmf->vma;
103         struct udl_gem_object *obj = to_udl_bo(vma->vm_private_data);
104         struct page *page;
105         unsigned int page_offset;
106
107         page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
108
109         if (!obj->pages)
110                 return VM_FAULT_SIGBUS;
111
112         page = obj->pages[page_offset];
113         return vmf_insert_page(vma, vmf->address, page);
114 }
115
116 int udl_gem_get_pages(struct udl_gem_object *obj)
117 {
118         struct page **pages;
119
120         if (obj->pages)
121                 return 0;
122
123         pages = drm_gem_get_pages(&obj->base);
124         if (IS_ERR(pages))
125                 return PTR_ERR(pages);
126
127         obj->pages = pages;
128
129         return 0;
130 }
131
132 void udl_gem_put_pages(struct udl_gem_object *obj)
133 {
134         if (obj->base.import_attach) {
135                 kvfree(obj->pages);
136                 obj->pages = NULL;
137                 return;
138         }
139
140         drm_gem_put_pages(&obj->base, obj->pages, false, false);
141         obj->pages = NULL;
142 }
143
144 int udl_gem_vmap(struct udl_gem_object *obj)
145 {
146         int page_count = obj->base.size / PAGE_SIZE;
147         int ret;
148
149         if (obj->base.import_attach) {
150                 obj->vmapping = dma_buf_vmap(obj->base.import_attach->dmabuf);
151                 if (!obj->vmapping)
152                         return -ENOMEM;
153                 return 0;
154         }
155                 
156         ret = udl_gem_get_pages(obj);
157         if (ret)
158                 return ret;
159
160         obj->vmapping = vmap(obj->pages, page_count, 0, PAGE_KERNEL);
161         if (!obj->vmapping)
162                 return -ENOMEM;
163         return 0;
164 }
165
166 void udl_gem_vunmap(struct udl_gem_object *obj)
167 {
168         if (obj->base.import_attach) {
169                 dma_buf_vunmap(obj->base.import_attach->dmabuf, obj->vmapping);
170                 return;
171         }
172
173         vunmap(obj->vmapping);
174
175         udl_gem_put_pages(obj);
176 }
177
178 void udl_gem_free_object(struct drm_gem_object *gem_obj)
179 {
180         struct udl_gem_object *obj = to_udl_bo(gem_obj);
181
182         if (obj->vmapping)
183                 udl_gem_vunmap(obj);
184
185         if (gem_obj->import_attach) {
186                 drm_prime_gem_destroy(gem_obj, obj->sg);
187                 put_device(gem_obj->dev->dev);
188         }
189
190         if (obj->pages)
191                 udl_gem_put_pages(obj);
192
193         drm_gem_free_mmap_offset(gem_obj);
194 }
195
196 /* the dumb interface doesn't work with the GEM straight MMAP
197    interface, it expects to do MMAP on the drm fd, like normal */
198 int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
199                  uint32_t handle, uint64_t *offset)
200 {
201         struct udl_gem_object *gobj;
202         struct drm_gem_object *obj;
203         struct udl_device *udl = to_udl(dev);
204         int ret = 0;
205
206         mutex_lock(&udl->gem_lock);
207         obj = drm_gem_object_lookup(file, handle);
208         if (obj == NULL) {
209                 ret = -ENOENT;
210                 goto unlock;
211         }
212         gobj = to_udl_bo(obj);
213
214         ret = udl_gem_get_pages(gobj);
215         if (ret)
216                 goto out;
217         ret = drm_gem_create_mmap_offset(obj);
218         if (ret)
219                 goto out;
220
221         *offset = drm_vma_node_offset_addr(&gobj->base.vma_node);
222
223 out:
224         drm_gem_object_put_unlocked(&gobj->base);
225 unlock:
226         mutex_unlock(&udl->gem_lock);
227         return ret;
228 }
This page took 0.046896 seconds and 4 git commands to generate.