]> Git Repo - linux.git/blob - drivers/gpu/drm/lima/lima_gem.c
Merge tag 'x86-mm-2025-01-31' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
[linux.git] / drivers / gpu / drm / lima / lima_gem.c
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /* Copyright 2017-2019 Qiang Yu <[email protected]> */
3
4 #include <linux/mm.h>
5 #include <linux/iosys-map.h>
6 #include <linux/sync_file.h>
7 #include <linux/pagemap.h>
8 #include <linux/shmem_fs.h>
9 #include <linux/dma-mapping.h>
10
11 #include <drm/drm_file.h>
12 #include <drm/drm_syncobj.h>
13 #include <drm/drm_utils.h>
14
15 #include <drm/lima_drm.h>
16
17 #include "lima_drv.h"
18 #include "lima_gem.h"
19 #include "lima_vm.h"
20
21 int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
22 {
23         struct page **pages;
24         struct address_space *mapping = bo->base.base.filp->f_mapping;
25         struct device *dev = bo->base.base.dev->dev;
26         size_t old_size = bo->heap_size;
27         size_t new_size = bo->heap_size ? bo->heap_size * 2 :
28                 (lima_heap_init_nr_pages << PAGE_SHIFT);
29         struct sg_table sgt;
30         int i, ret;
31
32         if (bo->heap_size >= bo->base.base.size)
33                 return -ENOSPC;
34
35         new_size = min(new_size, bo->base.base.size);
36
37         dma_resv_lock(bo->base.base.resv, NULL);
38
39         if (bo->base.pages) {
40                 pages = bo->base.pages;
41         } else {
42                 pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT,
43                                        sizeof(*pages), GFP_KERNEL | __GFP_ZERO);
44                 if (!pages) {
45                         dma_resv_unlock(bo->base.base.resv);
46                         return -ENOMEM;
47                 }
48
49                 bo->base.pages = pages;
50                 bo->base.pages_use_count = 1;
51
52                 mapping_set_unevictable(mapping);
53         }
54
55         for (i = old_size >> PAGE_SHIFT; i < new_size >> PAGE_SHIFT; i++) {
56                 struct page *page = shmem_read_mapping_page(mapping, i);
57
58                 if (IS_ERR(page)) {
59                         dma_resv_unlock(bo->base.base.resv);
60                         return PTR_ERR(page);
61                 }
62                 pages[i] = page;
63         }
64
65         dma_resv_unlock(bo->base.base.resv);
66
67         ret = sg_alloc_table_from_pages(&sgt, pages, i, 0,
68                                         new_size, GFP_KERNEL);
69         if (ret)
70                 return ret;
71
72         if (bo->base.sgt) {
73                 dma_unmap_sgtable(dev, bo->base.sgt, DMA_BIDIRECTIONAL, 0);
74                 sg_free_table(bo->base.sgt);
75         } else {
76                 bo->base.sgt = kmalloc(sizeof(*bo->base.sgt), GFP_KERNEL);
77                 if (!bo->base.sgt) {
78                         ret = -ENOMEM;
79                         goto err_out0;
80                 }
81         }
82
83         ret = dma_map_sgtable(dev, &sgt, DMA_BIDIRECTIONAL, 0);
84         if (ret)
85                 goto err_out1;
86
87         *bo->base.sgt = sgt;
88
89         if (vm) {
90                 ret = lima_vm_map_bo(vm, bo, old_size >> PAGE_SHIFT);
91                 if (ret)
92                         goto err_out2;
93         }
94
95         bo->heap_size = new_size;
96         return 0;
97
98 err_out2:
99         dma_unmap_sgtable(dev, &sgt, DMA_BIDIRECTIONAL, 0);
100 err_out1:
101         kfree(bo->base.sgt);
102         bo->base.sgt = NULL;
103 err_out0:
104         sg_free_table(&sgt);
105         return ret;
106 }
107
108 int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file,
109                            u32 size, u32 flags, u32 *handle)
110 {
111         int err;
112         gfp_t mask;
113         struct drm_gem_shmem_object *shmem;
114         struct drm_gem_object *obj;
115         struct lima_bo *bo;
116         bool is_heap = flags & LIMA_BO_FLAG_HEAP;
117
118         shmem = drm_gem_shmem_create(dev, size);
119         if (IS_ERR(shmem))
120                 return PTR_ERR(shmem);
121
122         obj = &shmem->base;
123
124         /* Mali Utgard GPU can only support 32bit address space */
125         mask = mapping_gfp_mask(obj->filp->f_mapping);
126         mask &= ~__GFP_HIGHMEM;
127         mask |= __GFP_DMA32;
128         mapping_set_gfp_mask(obj->filp->f_mapping, mask);
129
130         if (is_heap) {
131                 bo = to_lima_bo(obj);
132                 err = lima_heap_alloc(bo, NULL);
133                 if (err)
134                         goto out;
135         } else {
136                 struct sg_table *sgt = drm_gem_shmem_get_pages_sgt(shmem);
137
138                 if (IS_ERR(sgt)) {
139                         err = PTR_ERR(sgt);
140                         goto out;
141                 }
142         }
143
144         err = drm_gem_handle_create(file, obj, handle);
145
146 out:
147         /* drop reference from allocate - handle holds it now */
148         drm_gem_object_put(obj);
149
150         return err;
151 }
152
153 static void lima_gem_free_object(struct drm_gem_object *obj)
154 {
155         struct lima_bo *bo = to_lima_bo(obj);
156
157         if (!list_empty(&bo->va))
158                 dev_err(obj->dev->dev, "lima gem free bo still has va\n");
159
160         drm_gem_shmem_free(&bo->base);
161 }
162
163 static int lima_gem_object_open(struct drm_gem_object *obj, struct drm_file *file)
164 {
165         struct lima_bo *bo = to_lima_bo(obj);
166         struct lima_drm_priv *priv = to_lima_drm_priv(file);
167         struct lima_vm *vm = priv->vm;
168
169         return lima_vm_bo_add(vm, bo, true);
170 }
171
172 static void lima_gem_object_close(struct drm_gem_object *obj, struct drm_file *file)
173 {
174         struct lima_bo *bo = to_lima_bo(obj);
175         struct lima_drm_priv *priv = to_lima_drm_priv(file);
176         struct lima_vm *vm = priv->vm;
177
178         lima_vm_bo_del(vm, bo);
179 }
180
181 static int lima_gem_pin(struct drm_gem_object *obj)
182 {
183         struct lima_bo *bo = to_lima_bo(obj);
184
185         if (bo->heap_size)
186                 return -EINVAL;
187
188         return drm_gem_shmem_pin_locked(&bo->base);
189 }
190
191 static int lima_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
192 {
193         struct lima_bo *bo = to_lima_bo(obj);
194
195         if (bo->heap_size)
196                 return -EINVAL;
197
198         return drm_gem_shmem_vmap(&bo->base, map);
199 }
200
201 static int lima_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
202 {
203         struct lima_bo *bo = to_lima_bo(obj);
204
205         if (bo->heap_size)
206                 return -EINVAL;
207
208         return drm_gem_shmem_mmap(&bo->base, vma);
209 }
210
211 static const struct drm_gem_object_funcs lima_gem_funcs = {
212         .free = lima_gem_free_object,
213         .open = lima_gem_object_open,
214         .close = lima_gem_object_close,
215         .print_info = drm_gem_shmem_object_print_info,
216         .pin = lima_gem_pin,
217         .unpin = drm_gem_shmem_object_unpin,
218         .get_sg_table = drm_gem_shmem_object_get_sg_table,
219         .vmap = lima_gem_vmap,
220         .vunmap = drm_gem_shmem_object_vunmap,
221         .mmap = lima_gem_mmap,
222         .vm_ops = &drm_gem_shmem_vm_ops,
223 };
224
225 struct drm_gem_object *lima_gem_create_object(struct drm_device *dev, size_t size)
226 {
227         struct lima_bo *bo;
228
229         bo = kzalloc(sizeof(*bo), GFP_KERNEL);
230         if (!bo)
231                 return ERR_PTR(-ENOMEM);
232
233         mutex_init(&bo->lock);
234         INIT_LIST_HEAD(&bo->va);
235         bo->base.map_wc = true;
236         bo->base.base.funcs = &lima_gem_funcs;
237
238         return &bo->base.base;
239 }
240
241 int lima_gem_get_info(struct drm_file *file, u32 handle, u32 *va, u64 *offset)
242 {
243         struct drm_gem_object *obj;
244         struct lima_bo *bo;
245         struct lima_drm_priv *priv = to_lima_drm_priv(file);
246         struct lima_vm *vm = priv->vm;
247
248         obj = drm_gem_object_lookup(file, handle);
249         if (!obj)
250                 return -ENOENT;
251
252         bo = to_lima_bo(obj);
253
254         *va = lima_vm_get_va(vm, bo);
255
256         *offset = drm_vma_node_offset_addr(&obj->vma_node);
257
258         drm_gem_object_put(obj);
259         return 0;
260 }
261
262 static int lima_gem_sync_bo(struct lima_sched_task *task, struct lima_bo *bo,
263                             bool write, bool explicit)
264 {
265         int err;
266
267         err = dma_resv_reserve_fences(lima_bo_resv(bo), 1);
268         if (err)
269                 return err;
270
271         /* explicit sync use user passed dep fence */
272         if (explicit)
273                 return 0;
274
275         return drm_sched_job_add_implicit_dependencies(&task->base,
276                                                        &bo->base.base,
277                                                        write);
278 }
279
280 static int lima_gem_add_deps(struct drm_file *file, struct lima_submit *submit)
281 {
282         int i, err;
283
284         for (i = 0; i < ARRAY_SIZE(submit->in_sync); i++) {
285                 if (!submit->in_sync[i])
286                         continue;
287
288                 err = drm_sched_job_add_syncobj_dependency(&submit->task->base, file,
289                                                            submit->in_sync[i], 0);
290                 if (err)
291                         return err;
292         }
293
294         return 0;
295 }
296
297 int lima_gem_submit(struct drm_file *file, struct lima_submit *submit)
298 {
299         int i, err = 0;
300         struct ww_acquire_ctx ctx;
301         struct lima_drm_priv *priv = to_lima_drm_priv(file);
302         struct lima_vm *vm = priv->vm;
303         struct drm_syncobj *out_sync = NULL;
304         struct dma_fence *fence;
305         struct lima_bo **bos = submit->lbos;
306
307         if (submit->out_sync) {
308                 out_sync = drm_syncobj_find(file, submit->out_sync);
309                 if (!out_sync)
310                         return -ENOENT;
311         }
312
313         for (i = 0; i < submit->nr_bos; i++) {
314                 struct drm_gem_object *obj;
315                 struct lima_bo *bo;
316
317                 obj = drm_gem_object_lookup(file, submit->bos[i].handle);
318                 if (!obj) {
319                         err = -ENOENT;
320                         goto err_out0;
321                 }
322
323                 bo = to_lima_bo(obj);
324
325                 /* increase refcnt of gpu va map to prevent unmapped when executing,
326                  * will be decreased when task done
327                  */
328                 err = lima_vm_bo_add(vm, bo, false);
329                 if (err) {
330                         drm_gem_object_put(obj);
331                         goto err_out0;
332                 }
333
334                 bos[i] = bo;
335         }
336
337         err = drm_gem_lock_reservations((struct drm_gem_object **)bos,
338                                         submit->nr_bos, &ctx);
339         if (err)
340                 goto err_out0;
341
342         err = lima_sched_task_init(
343                 submit->task, submit->ctx->context + submit->pipe,
344                 bos, submit->nr_bos, vm);
345         if (err)
346                 goto err_out1;
347
348         err = lima_gem_add_deps(file, submit);
349         if (err)
350                 goto err_out2;
351
352         for (i = 0; i < submit->nr_bos; i++) {
353                 err = lima_gem_sync_bo(
354                         submit->task, bos[i],
355                         submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE,
356                         submit->flags & LIMA_SUBMIT_FLAG_EXPLICIT_FENCE);
357                 if (err)
358                         goto err_out2;
359         }
360
361         fence = lima_sched_context_queue_task(submit->task);
362
363         for (i = 0; i < submit->nr_bos; i++) {
364                 dma_resv_add_fence(lima_bo_resv(bos[i]), fence,
365                                    submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE ?
366                                    DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ);
367         }
368
369         drm_gem_unlock_reservations((struct drm_gem_object **)bos,
370                                     submit->nr_bos, &ctx);
371
372         for (i = 0; i < submit->nr_bos; i++)
373                 drm_gem_object_put(&bos[i]->base.base);
374
375         if (out_sync) {
376                 drm_syncobj_replace_fence(out_sync, fence);
377                 drm_syncobj_put(out_sync);
378         }
379
380         dma_fence_put(fence);
381
382         return 0;
383
384 err_out2:
385         lima_sched_task_fini(submit->task);
386 err_out1:
387         drm_gem_unlock_reservations((struct drm_gem_object **)bos,
388                                     submit->nr_bos, &ctx);
389 err_out0:
390         for (i = 0; i < submit->nr_bos; i++) {
391                 if (!bos[i])
392                         break;
393                 lima_vm_bo_del(vm, bos[i]);
394                 drm_gem_object_put(&bos[i]->base.base);
395         }
396         if (out_sync)
397                 drm_syncobj_put(out_sync);
398         return err;
399 }
400
401 int lima_gem_wait(struct drm_file *file, u32 handle, u32 op, s64 timeout_ns)
402 {
403         bool write = op & LIMA_GEM_WAIT_WRITE;
404         long ret, timeout;
405
406         if (!op)
407                 return 0;
408
409         timeout = drm_timeout_abs_to_jiffies(timeout_ns);
410
411         ret = drm_gem_dma_resv_wait(file, handle, write, timeout);
412         if (ret == -ETIME)
413                 ret = timeout ? -ETIMEDOUT : -EBUSY;
414
415         return ret;
416 }
This page took 0.054208 seconds and 4 git commands to generate.