1 // SPDX-License-Identifier: GPL-2.0+
3 #include <linux/shmem_fs.h>
4 #include <linux/vmalloc.h>
8 static struct vkms_gem_object *__vkms_gem_create(struct drm_device *dev,
11 struct vkms_gem_object *obj;
14 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
16 return ERR_PTR(-ENOMEM);
18 size = roundup(size, PAGE_SIZE);
19 ret = drm_gem_object_init(dev, &obj->gem, size);
25 mutex_init(&obj->pages_lock);
30 void vkms_gem_free_object(struct drm_gem_object *obj)
32 struct vkms_gem_object *gem = container_of(obj, struct vkms_gem_object,
38 mutex_destroy(&gem->pages_lock);
39 drm_gem_object_release(obj);
43 vm_fault_t vkms_gem_fault(struct vm_fault *vmf)
45 struct vm_area_struct *vma = vmf->vma;
46 struct vkms_gem_object *obj = vma->vm_private_data;
47 unsigned long vaddr = vmf->address;
50 vm_fault_t ret = VM_FAULT_SIGBUS;
52 page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT;
53 num_pages = DIV_ROUND_UP(obj->gem.size, PAGE_SIZE);
55 if (page_offset > num_pages)
56 return VM_FAULT_SIGBUS;
58 mutex_lock(&obj->pages_lock);
60 get_page(obj->pages[page_offset]);
61 vmf->page = obj->pages[page_offset];
64 mutex_unlock(&obj->pages_lock);
67 struct address_space *mapping;
69 mapping = file_inode(obj->gem.filp)->i_mapping;
70 page = shmem_read_mapping_page(mapping, page_offset);
76 switch (PTR_ERR(page)) {
86 ret = VM_FAULT_SIGBUS;
89 WARN_ON(PTR_ERR(page));
90 ret = VM_FAULT_SIGBUS;
98 struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
99 struct drm_file *file,
103 struct vkms_gem_object *obj;
106 if (!file || !dev || !handle)
107 return ERR_PTR(-EINVAL);
109 obj = __vkms_gem_create(dev, size);
111 return ERR_CAST(obj);
113 ret = drm_gem_handle_create(file, &obj->gem, handle);
114 drm_gem_object_put_unlocked(&obj->gem);
121 int vkms_dumb_create(struct drm_file *file, struct drm_device *dev,
122 struct drm_mode_create_dumb *args)
124 struct drm_gem_object *gem_obj;
127 if (!args || !dev || !file)
130 pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
131 size = pitch * args->height;
136 gem_obj = vkms_gem_create(dev, file, &args->handle, size);
138 return PTR_ERR(gem_obj);
140 args->size = gem_obj->size;
143 DRM_DEBUG_DRIVER("Created object of size %lld\n", size);
148 static struct page **_get_pages(struct vkms_gem_object *vkms_obj)
150 struct drm_gem_object *gem_obj = &vkms_obj->gem;
152 if (!vkms_obj->pages) {
153 struct page **pages = drm_gem_get_pages(gem_obj);
158 if (cmpxchg(&vkms_obj->pages, NULL, pages))
159 drm_gem_put_pages(gem_obj, pages, false, true);
162 return vkms_obj->pages;
165 void vkms_gem_vunmap(struct drm_gem_object *obj)
167 struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(obj);
169 mutex_lock(&vkms_obj->pages_lock);
170 if (vkms_obj->vmap_count < 1) {
171 WARN_ON(vkms_obj->vaddr);
172 WARN_ON(vkms_obj->pages);
173 mutex_unlock(&vkms_obj->pages_lock);
177 vkms_obj->vmap_count--;
179 if (vkms_obj->vmap_count == 0) {
180 vunmap(vkms_obj->vaddr);
181 vkms_obj->vaddr = NULL;
182 drm_gem_put_pages(obj, vkms_obj->pages, false, true);
183 vkms_obj->pages = NULL;
186 mutex_unlock(&vkms_obj->pages_lock);
189 int vkms_gem_vmap(struct drm_gem_object *obj)
191 struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(obj);
194 mutex_lock(&vkms_obj->pages_lock);
196 if (!vkms_obj->vaddr) {
197 unsigned int n_pages = obj->size >> PAGE_SHIFT;
198 struct page **pages = _get_pages(vkms_obj);
201 ret = PTR_ERR(pages);
205 vkms_obj->vaddr = vmap(pages, n_pages, VM_MAP, PAGE_KERNEL);
206 if (!vkms_obj->vaddr)
210 vkms_obj->vmap_count++;
215 drm_gem_put_pages(obj, vkms_obj->pages, false, true);
216 vkms_obj->pages = NULL;
218 mutex_unlock(&vkms_obj->pages_lock);