]> Git Repo - linux.git/blob - drivers/gpu/drm/vkms/vkms_gem.c
Merge tag 'smack-for-5.4-rc1' of git://github.com/cschaufler/smack-next
[linux.git] / drivers / gpu / drm / vkms / vkms_gem.c
1 // SPDX-License-Identifier: GPL-2.0+
2
3 #include <linux/shmem_fs.h>
4 #include <linux/vmalloc.h>
5
6 #include "vkms_drv.h"
7
8 static struct vkms_gem_object *__vkms_gem_create(struct drm_device *dev,
9                                                  u64 size)
10 {
11         struct vkms_gem_object *obj;
12         int ret;
13
14         obj = kzalloc(sizeof(*obj), GFP_KERNEL);
15         if (!obj)
16                 return ERR_PTR(-ENOMEM);
17
18         size = roundup(size, PAGE_SIZE);
19         ret = drm_gem_object_init(dev, &obj->gem, size);
20         if (ret) {
21                 kfree(obj);
22                 return ERR_PTR(ret);
23         }
24
25         mutex_init(&obj->pages_lock);
26
27         return obj;
28 }
29
30 void vkms_gem_free_object(struct drm_gem_object *obj)
31 {
32         struct vkms_gem_object *gem = container_of(obj, struct vkms_gem_object,
33                                                    gem);
34
35         WARN_ON(gem->pages);
36         WARN_ON(gem->vaddr);
37
38         mutex_destroy(&gem->pages_lock);
39         drm_gem_object_release(obj);
40         kfree(gem);
41 }
42
43 vm_fault_t vkms_gem_fault(struct vm_fault *vmf)
44 {
45         struct vm_area_struct *vma = vmf->vma;
46         struct vkms_gem_object *obj = vma->vm_private_data;
47         unsigned long vaddr = vmf->address;
48         pgoff_t page_offset;
49         loff_t num_pages;
50         vm_fault_t ret = VM_FAULT_SIGBUS;
51
52         page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT;
53         num_pages = DIV_ROUND_UP(obj->gem.size, PAGE_SIZE);
54
55         if (page_offset > num_pages)
56                 return VM_FAULT_SIGBUS;
57
58         mutex_lock(&obj->pages_lock);
59         if (obj->pages) {
60                 get_page(obj->pages[page_offset]);
61                 vmf->page = obj->pages[page_offset];
62                 ret = 0;
63         }
64         mutex_unlock(&obj->pages_lock);
65         if (ret) {
66                 struct page *page;
67                 struct address_space *mapping;
68
69                 mapping = file_inode(obj->gem.filp)->i_mapping;
70                 page = shmem_read_mapping_page(mapping, page_offset);
71
72                 if (!IS_ERR(page)) {
73                         vmf->page = page;
74                         ret = 0;
75                 } else {
76                         switch (PTR_ERR(page)) {
77                         case -ENOSPC:
78                         case -ENOMEM:
79                                 ret = VM_FAULT_OOM;
80                                 break;
81                         case -EBUSY:
82                                 ret = VM_FAULT_RETRY;
83                                 break;
84                         case -EFAULT:
85                         case -EINVAL:
86                                 ret = VM_FAULT_SIGBUS;
87                                 break;
88                         default:
89                                 WARN_ON(PTR_ERR(page));
90                                 ret = VM_FAULT_SIGBUS;
91                                 break;
92                         }
93                 }
94         }
95         return ret;
96 }
97
98 struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
99                                        struct drm_file *file,
100                                        u32 *handle,
101                                        u64 size)
102 {
103         struct vkms_gem_object *obj;
104         int ret;
105
106         if (!file || !dev || !handle)
107                 return ERR_PTR(-EINVAL);
108
109         obj = __vkms_gem_create(dev, size);
110         if (IS_ERR(obj))
111                 return ERR_CAST(obj);
112
113         ret = drm_gem_handle_create(file, &obj->gem, handle);
114         drm_gem_object_put_unlocked(&obj->gem);
115         if (ret)
116                 return ERR_PTR(ret);
117
118         return &obj->gem;
119 }
120
121 int vkms_dumb_create(struct drm_file *file, struct drm_device *dev,
122                      struct drm_mode_create_dumb *args)
123 {
124         struct drm_gem_object *gem_obj;
125         u64 pitch, size;
126
127         if (!args || !dev || !file)
128                 return -EINVAL;
129
130         pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
131         size = pitch * args->height;
132
133         if (!size)
134                 return -EINVAL;
135
136         gem_obj = vkms_gem_create(dev, file, &args->handle, size);
137         if (IS_ERR(gem_obj))
138                 return PTR_ERR(gem_obj);
139
140         args->size = gem_obj->size;
141         args->pitch = pitch;
142
143         DRM_DEBUG_DRIVER("Created object of size %lld\n", size);
144
145         return 0;
146 }
147
148 static struct page **_get_pages(struct vkms_gem_object *vkms_obj)
149 {
150         struct drm_gem_object *gem_obj = &vkms_obj->gem;
151
152         if (!vkms_obj->pages) {
153                 struct page **pages = drm_gem_get_pages(gem_obj);
154
155                 if (IS_ERR(pages))
156                         return pages;
157
158                 if (cmpxchg(&vkms_obj->pages, NULL, pages))
159                         drm_gem_put_pages(gem_obj, pages, false, true);
160         }
161
162         return vkms_obj->pages;
163 }
164
165 void vkms_gem_vunmap(struct drm_gem_object *obj)
166 {
167         struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(obj);
168
169         mutex_lock(&vkms_obj->pages_lock);
170         if (vkms_obj->vmap_count < 1) {
171                 WARN_ON(vkms_obj->vaddr);
172                 WARN_ON(vkms_obj->pages);
173                 mutex_unlock(&vkms_obj->pages_lock);
174                 return;
175         }
176
177         vkms_obj->vmap_count--;
178
179         if (vkms_obj->vmap_count == 0) {
180                 vunmap(vkms_obj->vaddr);
181                 vkms_obj->vaddr = NULL;
182                 drm_gem_put_pages(obj, vkms_obj->pages, false, true);
183                 vkms_obj->pages = NULL;
184         }
185
186         mutex_unlock(&vkms_obj->pages_lock);
187 }
188
189 int vkms_gem_vmap(struct drm_gem_object *obj)
190 {
191         struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(obj);
192         int ret = 0;
193
194         mutex_lock(&vkms_obj->pages_lock);
195
196         if (!vkms_obj->vaddr) {
197                 unsigned int n_pages = obj->size >> PAGE_SHIFT;
198                 struct page **pages = _get_pages(vkms_obj);
199
200                 if (IS_ERR(pages)) {
201                         ret = PTR_ERR(pages);
202                         goto out;
203                 }
204
205                 vkms_obj->vaddr = vmap(pages, n_pages, VM_MAP, PAGE_KERNEL);
206                 if (!vkms_obj->vaddr)
207                         goto err_vmap;
208         }
209
210         vkms_obj->vmap_count++;
211         goto out;
212
213 err_vmap:
214         ret = -ENOMEM;
215         drm_gem_put_pages(obj, vkms_obj->pages, false, true);
216         vkms_obj->pages = NULL;
217 out:
218         mutex_unlock(&vkms_obj->pages_lock);
219         return ret;
220 }
This page took 0.047656 seconds and 4 git commands to generate.