]> Git Repo - linux.git/blob - drivers/gpu/drm/panfrost/panfrost_gem.c
net: wan: Add framer framework support
[linux.git] / drivers / gpu / drm / panfrost / panfrost_gem.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright 2019 Linaro, Ltd, Rob Herring <[email protected]> */
3
4 #include <linux/err.h>
5 #include <linux/slab.h>
6 #include <linux/dma-buf.h>
7 #include <linux/dma-mapping.h>
8
9 #include <drm/panfrost_drm.h>
10 #include "panfrost_device.h"
11 #include "panfrost_gem.h"
12 #include "panfrost_mmu.h"
13
14 /* Called DRM core on the last userspace/kernel unreference of the
15  * BO.
16  */
17 static void panfrost_gem_free_object(struct drm_gem_object *obj)
18 {
19         struct panfrost_gem_object *bo = to_panfrost_bo(obj);
20         struct panfrost_device *pfdev = obj->dev->dev_private;
21
22         /*
23          * Make sure the BO is no longer inserted in the shrinker list before
24          * taking care of the destruction itself. If we don't do that we have a
25          * race condition between this function and what's done in
26          * panfrost_gem_shrinker_scan().
27          */
28         mutex_lock(&pfdev->shrinker_lock);
29         list_del_init(&bo->base.madv_list);
30         mutex_unlock(&pfdev->shrinker_lock);
31
32         /*
33          * If we still have mappings attached to the BO, there's a problem in
34          * our refcounting.
35          */
36         WARN_ON_ONCE(!list_empty(&bo->mappings.list));
37
38         if (bo->sgts) {
39                 int i;
40                 int n_sgt = bo->base.base.size / SZ_2M;
41
42                 for (i = 0; i < n_sgt; i++) {
43                         if (bo->sgts[i].sgl) {
44                                 dma_unmap_sgtable(pfdev->dev, &bo->sgts[i],
45                                                   DMA_BIDIRECTIONAL, 0);
46                                 sg_free_table(&bo->sgts[i]);
47                         }
48                 }
49                 kvfree(bo->sgts);
50         }
51
52         drm_gem_shmem_free(&bo->base);
53 }
54
55 struct panfrost_gem_mapping *
56 panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
57                          struct panfrost_file_priv *priv)
58 {
59         struct panfrost_gem_mapping *iter, *mapping = NULL;
60
61         mutex_lock(&bo->mappings.lock);
62         list_for_each_entry(iter, &bo->mappings.list, node) {
63                 if (iter->mmu == priv->mmu) {
64                         kref_get(&iter->refcount);
65                         mapping = iter;
66                         break;
67                 }
68         }
69         mutex_unlock(&bo->mappings.lock);
70
71         return mapping;
72 }
73
74 static void
75 panfrost_gem_teardown_mapping(struct panfrost_gem_mapping *mapping)
76 {
77         if (mapping->active)
78                 panfrost_mmu_unmap(mapping);
79
80         spin_lock(&mapping->mmu->mm_lock);
81         if (drm_mm_node_allocated(&mapping->mmnode))
82                 drm_mm_remove_node(&mapping->mmnode);
83         spin_unlock(&mapping->mmu->mm_lock);
84 }
85
86 static void panfrost_gem_mapping_release(struct kref *kref)
87 {
88         struct panfrost_gem_mapping *mapping;
89
90         mapping = container_of(kref, struct panfrost_gem_mapping, refcount);
91
92         panfrost_gem_teardown_mapping(mapping);
93         drm_gem_object_put(&mapping->obj->base.base);
94         panfrost_mmu_ctx_put(mapping->mmu);
95         kfree(mapping);
96 }
97
98 void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping)
99 {
100         if (!mapping)
101                 return;
102
103         kref_put(&mapping->refcount, panfrost_gem_mapping_release);
104 }
105
106 void panfrost_gem_teardown_mappings_locked(struct panfrost_gem_object *bo)
107 {
108         struct panfrost_gem_mapping *mapping;
109
110         list_for_each_entry(mapping, &bo->mappings.list, node)
111                 panfrost_gem_teardown_mapping(mapping);
112 }
113
114 int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
115 {
116         int ret;
117         size_t size = obj->size;
118         u64 align;
119         struct panfrost_gem_object *bo = to_panfrost_bo(obj);
120         unsigned long color = bo->noexec ? PANFROST_BO_NOEXEC : 0;
121         struct panfrost_file_priv *priv = file_priv->driver_priv;
122         struct panfrost_gem_mapping *mapping;
123
124         mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
125         if (!mapping)
126                 return -ENOMEM;
127
128         INIT_LIST_HEAD(&mapping->node);
129         kref_init(&mapping->refcount);
130         drm_gem_object_get(obj);
131         mapping->obj = bo;
132
133         /*
134          * Executable buffers cannot cross a 16MB boundary as the program
135          * counter is 24-bits. We assume executable buffers will be less than
136          * 16MB and aligning executable buffers to their size will avoid
137          * crossing a 16MB boundary.
138          */
139         if (!bo->noexec)
140                 align = size >> PAGE_SHIFT;
141         else
142                 align = size >= SZ_2M ? SZ_2M >> PAGE_SHIFT : 0;
143
144         mapping->mmu = panfrost_mmu_ctx_get(priv->mmu);
145         spin_lock(&mapping->mmu->mm_lock);
146         ret = drm_mm_insert_node_generic(&mapping->mmu->mm, &mapping->mmnode,
147                                          size >> PAGE_SHIFT, align, color, 0);
148         spin_unlock(&mapping->mmu->mm_lock);
149         if (ret)
150                 goto err;
151
152         if (!bo->is_heap) {
153                 ret = panfrost_mmu_map(mapping);
154                 if (ret)
155                         goto err;
156         }
157
158         mutex_lock(&bo->mappings.lock);
159         WARN_ON(bo->base.madv != PANFROST_MADV_WILLNEED);
160         list_add_tail(&mapping->node, &bo->mappings.list);
161         mutex_unlock(&bo->mappings.lock);
162
163 err:
164         if (ret)
165                 panfrost_gem_mapping_put(mapping);
166         return ret;
167 }
168
169 void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv)
170 {
171         struct panfrost_file_priv *priv = file_priv->driver_priv;
172         struct panfrost_gem_object *bo = to_panfrost_bo(obj);
173         struct panfrost_gem_mapping *mapping = NULL, *iter;
174
175         mutex_lock(&bo->mappings.lock);
176         list_for_each_entry(iter, &bo->mappings.list, node) {
177                 if (iter->mmu == priv->mmu) {
178                         mapping = iter;
179                         list_del(&iter->node);
180                         break;
181                 }
182         }
183         mutex_unlock(&bo->mappings.lock);
184
185         panfrost_gem_mapping_put(mapping);
186 }
187
188 static int panfrost_gem_pin(struct drm_gem_object *obj)
189 {
190         struct panfrost_gem_object *bo = to_panfrost_bo(obj);
191
192         if (bo->is_heap)
193                 return -EINVAL;
194
195         return drm_gem_shmem_pin(&bo->base);
196 }
197
198 static enum drm_gem_object_status panfrost_gem_status(struct drm_gem_object *obj)
199 {
200         struct panfrost_gem_object *bo = to_panfrost_bo(obj);
201         enum drm_gem_object_status res = 0;
202
203         if (bo->base.pages)
204                 res |= DRM_GEM_OBJECT_RESIDENT;
205
206         if (bo->base.madv == PANFROST_MADV_DONTNEED)
207                 res |= DRM_GEM_OBJECT_PURGEABLE;
208
209         return res;
210 }
211
212 static size_t panfrost_gem_rss(struct drm_gem_object *obj)
213 {
214         struct panfrost_gem_object *bo = to_panfrost_bo(obj);
215
216         if (bo->is_heap) {
217                 return bo->heap_rss_size;
218         } else if (bo->base.pages) {
219                 WARN_ON(bo->heap_rss_size);
220                 return bo->base.base.size;
221         }
222
223         return 0;
224 }
225
226 static const struct drm_gem_object_funcs panfrost_gem_funcs = {
227         .free = panfrost_gem_free_object,
228         .open = panfrost_gem_open,
229         .close = panfrost_gem_close,
230         .print_info = drm_gem_shmem_object_print_info,
231         .pin = panfrost_gem_pin,
232         .unpin = drm_gem_shmem_object_unpin,
233         .get_sg_table = drm_gem_shmem_object_get_sg_table,
234         .vmap = drm_gem_shmem_object_vmap,
235         .vunmap = drm_gem_shmem_object_vunmap,
236         .mmap = drm_gem_shmem_object_mmap,
237         .status = panfrost_gem_status,
238         .rss = panfrost_gem_rss,
239         .vm_ops = &drm_gem_shmem_vm_ops,
240 };
241
242 /**
243  * panfrost_gem_create_object - Implementation of driver->gem_create_object.
244  * @dev: DRM device
245  * @size: Size in bytes of the memory the object will reference
246  *
247  * This lets the GEM helpers allocate object structs for us, and keep
248  * our BO stats correct.
249  */
250 struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size)
251 {
252         struct panfrost_device *pfdev = dev->dev_private;
253         struct panfrost_gem_object *obj;
254
255         obj = kzalloc(sizeof(*obj), GFP_KERNEL);
256         if (!obj)
257                 return ERR_PTR(-ENOMEM);
258
259         INIT_LIST_HEAD(&obj->mappings.list);
260         mutex_init(&obj->mappings.lock);
261         obj->base.base.funcs = &panfrost_gem_funcs;
262         obj->base.map_wc = !pfdev->coherent;
263
264         return &obj->base.base;
265 }
266
267 struct panfrost_gem_object *
268 panfrost_gem_create(struct drm_device *dev, size_t size, u32 flags)
269 {
270         struct drm_gem_shmem_object *shmem;
271         struct panfrost_gem_object *bo;
272
273         /* Round up heap allocations to 2MB to keep fault handling simple */
274         if (flags & PANFROST_BO_HEAP)
275                 size = roundup(size, SZ_2M);
276
277         shmem = drm_gem_shmem_create(dev, size);
278         if (IS_ERR(shmem))
279                 return ERR_CAST(shmem);
280
281         bo = to_panfrost_bo(&shmem->base);
282         bo->noexec = !!(flags & PANFROST_BO_NOEXEC);
283         bo->is_heap = !!(flags & PANFROST_BO_HEAP);
284
285         return bo;
286 }
287
288 struct drm_gem_object *
289 panfrost_gem_prime_import_sg_table(struct drm_device *dev,
290                                    struct dma_buf_attachment *attach,
291                                    struct sg_table *sgt)
292 {
293         struct drm_gem_object *obj;
294         struct panfrost_gem_object *bo;
295
296         obj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt);
297         if (IS_ERR(obj))
298                 return ERR_CAST(obj);
299
300         bo = to_panfrost_bo(obj);
301         bo->noexec = true;
302
303         return obj;
304 }
This page took 0.051152 seconds and 4 git commands to generate.