]>
Commit | Line | Data |
---|---|---|
2194a63a NT |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * Copyright 2018 Noralf Trønnes | |
4 | */ | |
5 | ||
6 | #include <linux/dma-buf.h> | |
7 | #include <linux/export.h> | |
4b2b5e14 | 8 | #include <linux/module.h> |
2194a63a NT |
9 | #include <linux/mutex.h> |
10 | #include <linux/shmem_fs.h> | |
11 | #include <linux/slab.h> | |
12 | #include <linux/vmalloc.h> | |
8581fd40 | 13 | #include <linux/module.h> |
2194a63a | 14 | |
804b6e5e SV |
15 | #ifdef CONFIG_X86 |
16 | #include <asm/set_memory.h> | |
17 | #endif | |
18 | ||
d3ea256a | 19 | #include <drm/drm.h> |
2194a63a NT |
20 | #include <drm/drm_device.h> |
21 | #include <drm/drm_drv.h> | |
22 | #include <drm/drm_gem_shmem_helper.h> | |
23 | #include <drm/drm_prime.h> | |
24 | #include <drm/drm_print.h> | |
25 | ||
08e438e6 SR |
26 | MODULE_IMPORT_NS(DMA_BUF); |
27 | ||
2194a63a NT |
28 | /** |
29 | * DOC: overview | |
30 | * | |
31 | * This library provides helpers for GEM objects backed by shmem buffers | |
32 | * allocated using anonymous pageable memory. | |
a193f3b4 TZ |
33 | * |
34 | * Functions that operate on the GEM object receive struct &drm_gem_shmem_object. | |
35 | * For GEM callback helpers in struct &drm_gem_object functions, see likewise | |
36 | * named functions with an _object_ infix (e.g., drm_gem_shmem_object_vmap() wraps | |
37 | * drm_gem_shmem_vmap()). These helpers perform the necessary type conversion. | |
2194a63a NT |
38 | */ |
39 | ||
40 | static const struct drm_gem_object_funcs drm_gem_shmem_funcs = { | |
c7fbcb71 TZ |
41 | .free = drm_gem_shmem_object_free, |
42 | .print_info = drm_gem_shmem_object_print_info, | |
43 | .pin = drm_gem_shmem_object_pin, | |
44 | .unpin = drm_gem_shmem_object_unpin, | |
45 | .get_sg_table = drm_gem_shmem_object_get_sg_table, | |
46 | .vmap = drm_gem_shmem_object_vmap, | |
47 | .vunmap = drm_gem_shmem_object_vunmap, | |
48 | .mmap = drm_gem_shmem_object_mmap, | |
d315bdbf | 49 | .vm_ops = &drm_gem_shmem_vm_ops, |
2194a63a NT |
50 | }; |
51 | ||
7d2cd72a SV |
52 | static struct drm_gem_shmem_object * |
53 | __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private) | |
2194a63a NT |
54 | { |
55 | struct drm_gem_shmem_object *shmem; | |
56 | struct drm_gem_object *obj; | |
7d2cd72a | 57 | int ret = 0; |
2194a63a NT |
58 | |
59 | size = PAGE_ALIGN(size); | |
60 | ||
4ff22f48 | 61 | if (dev->driver->gem_create_object) { |
2194a63a | 62 | obj = dev->driver->gem_create_object(dev, size); |
4ff22f48 TZ |
63 | if (IS_ERR(obj)) |
64 | return ERR_CAST(obj); | |
65 | shmem = to_drm_gem_shmem_obj(obj); | |
66 | } else { | |
67 | shmem = kzalloc(sizeof(*shmem), GFP_KERNEL); | |
68 | if (!shmem) | |
69 | return ERR_PTR(-ENOMEM); | |
70 | obj = &shmem->base; | |
71 | } | |
0cf2ef46 | 72 | |
2194a63a NT |
73 | if (!obj->funcs) |
74 | obj->funcs = &drm_gem_shmem_funcs; | |
75 | ||
0cf2ef46 | 76 | if (private) { |
7d2cd72a | 77 | drm_gem_private_object_init(dev, obj, size); |
0cf2ef46 TZ |
78 | shmem->map_wc = false; /* dma-buf mappings use always writecombine */ |
79 | } else { | |
7d2cd72a | 80 | ret = drm_gem_object_init(dev, obj, size); |
0cf2ef46 | 81 | } |
2194a63a NT |
82 | if (ret) |
83 | goto err_free; | |
84 | ||
85 | ret = drm_gem_create_mmap_offset(obj); | |
86 | if (ret) | |
87 | goto err_release; | |
88 | ||
2194a63a NT |
89 | mutex_init(&shmem->pages_lock); |
90 | mutex_init(&shmem->vmap_lock); | |
17acb9f3 | 91 | INIT_LIST_HEAD(&shmem->madv_list); |
2194a63a | 92 | |
5b9f5f11 SV |
93 | if (!private) { |
94 | /* | |
95 | * Our buffers are kept pinned, so allocating them | |
96 | * from the MOVABLE zone is a really bad idea, and | |
97 | * conflicts with CMA. See comments above new_inode() | |
98 | * why this is required _and_ expected if you're | |
99 | * going to pin these pages. | |
100 | */ | |
101 | mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER | | |
102 | __GFP_RETRY_MAYFAIL | __GFP_NOWARN); | |
103 | } | |
2194a63a NT |
104 | |
105 | return shmem; | |
106 | ||
107 | err_release: | |
108 | drm_gem_object_release(obj); | |
109 | err_free: | |
110 | kfree(obj); | |
111 | ||
112 | return ERR_PTR(ret); | |
113 | } | |
7d2cd72a SV |
114 | /** |
115 | * drm_gem_shmem_create - Allocate an object with the given size | |
116 | * @dev: DRM device | |
117 | * @size: Size of the object to allocate | |
118 | * | |
119 | * This function creates a shmem GEM object. | |
120 | * | |
121 | * Returns: | |
122 | * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative | |
123 | * error code on failure. | |
124 | */ | |
125 | struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size) | |
126 | { | |
127 | return __drm_gem_shmem_create(dev, size, false); | |
128 | } | |
2194a63a NT |
129 | EXPORT_SYMBOL_GPL(drm_gem_shmem_create); |
130 | ||
131 | /** | |
a193f3b4 TZ |
132 | * drm_gem_shmem_free - Free resources associated with a shmem GEM object |
133 | * @shmem: shmem GEM object to free | |
2194a63a NT |
134 | * |
135 | * This function cleans up the GEM object state and frees the memory used to | |
c7fbcb71 | 136 | * store the object itself. |
2194a63a | 137 | */ |
a193f3b4 | 138 | void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem) |
2194a63a | 139 | { |
a193f3b4 | 140 | struct drm_gem_object *obj = &shmem->base; |
2194a63a NT |
141 | |
142 | WARN_ON(shmem->vmap_use_count); | |
143 | ||
144 | if (obj->import_attach) { | |
2194a63a | 145 | drm_prime_gem_destroy(obj, shmem->sgt); |
2194a63a NT |
146 | } else { |
147 | if (shmem->sgt) { | |
6c6fa39c MS |
148 | dma_unmap_sgtable(obj->dev->dev, shmem->sgt, |
149 | DMA_BIDIRECTIONAL, 0); | |
2194a63a NT |
150 | sg_free_table(shmem->sgt); |
151 | kfree(shmem->sgt); | |
152 | } | |
3bf5189d RH |
153 | if (shmem->pages) |
154 | drm_gem_shmem_put_pages(shmem); | |
2194a63a NT |
155 | } |
156 | ||
157 | WARN_ON(shmem->pages_use_count); | |
158 | ||
159 | drm_gem_object_release(obj); | |
160 | mutex_destroy(&shmem->pages_lock); | |
161 | mutex_destroy(&shmem->vmap_lock); | |
162 | kfree(shmem); | |
163 | } | |
a193f3b4 | 164 | EXPORT_SYMBOL_GPL(drm_gem_shmem_free); |
2194a63a NT |
165 | |
166 | static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem) | |
167 | { | |
168 | struct drm_gem_object *obj = &shmem->base; | |
169 | struct page **pages; | |
170 | ||
171 | if (shmem->pages_use_count++ > 0) | |
172 | return 0; | |
173 | ||
174 | pages = drm_gem_get_pages(obj); | |
175 | if (IS_ERR(pages)) { | |
176 | DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages)); | |
177 | shmem->pages_use_count = 0; | |
178 | return PTR_ERR(pages); | |
179 | } | |
180 | ||
804b6e5e SV |
181 | /* |
182 | * TODO: Allocating WC pages which are correctly flushed is only | |
183 | * supported on x86. Ideal solution would be a GFP_WC flag, which also | |
184 | * ttm_pool.c could use. | |
185 | */ | |
186 | #ifdef CONFIG_X86 | |
187 | if (shmem->map_wc) | |
188 | set_pages_array_wc(pages, obj->size >> PAGE_SHIFT); | |
189 | #endif | |
190 | ||
2194a63a NT |
191 | shmem->pages = pages; |
192 | ||
193 | return 0; | |
194 | } | |
195 | ||
196 | /* | |
197 | * drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object | |
198 | * @shmem: shmem GEM object | |
199 | * | |
200 | * This function makes sure that backing pages exists for the shmem GEM object | |
201 | * and increases the use count. | |
202 | * | |
203 | * Returns: | |
204 | * 0 on success or a negative error code on failure. | |
205 | */ | |
206 | int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem) | |
207 | { | |
208 | int ret; | |
209 | ||
52640835 SV |
210 | WARN_ON(shmem->base.import_attach); |
211 | ||
2194a63a NT |
212 | ret = mutex_lock_interruptible(&shmem->pages_lock); |
213 | if (ret) | |
214 | return ret; | |
215 | ret = drm_gem_shmem_get_pages_locked(shmem); | |
216 | mutex_unlock(&shmem->pages_lock); | |
217 | ||
218 | return ret; | |
219 | } | |
220 | EXPORT_SYMBOL(drm_gem_shmem_get_pages); | |
221 | ||
222 | static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem) | |
223 | { | |
224 | struct drm_gem_object *obj = &shmem->base; | |
225 | ||
226 | if (WARN_ON_ONCE(!shmem->pages_use_count)) | |
227 | return; | |
228 | ||
229 | if (--shmem->pages_use_count > 0) | |
230 | return; | |
231 | ||
804b6e5e SV |
232 | #ifdef CONFIG_X86 |
233 | if (shmem->map_wc) | |
234 | set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT); | |
235 | #endif | |
236 | ||
2194a63a NT |
237 | drm_gem_put_pages(obj, shmem->pages, |
238 | shmem->pages_mark_dirty_on_put, | |
239 | shmem->pages_mark_accessed_on_put); | |
240 | shmem->pages = NULL; | |
241 | } | |
242 | ||
243 | /* | |
244 | * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object | |
245 | * @shmem: shmem GEM object | |
246 | * | |
247 | * This function decreases the use count and puts the backing pages when use drops to zero. | |
248 | */ | |
249 | void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem) | |
250 | { | |
251 | mutex_lock(&shmem->pages_lock); | |
252 | drm_gem_shmem_put_pages_locked(shmem); | |
253 | mutex_unlock(&shmem->pages_lock); | |
254 | } | |
255 | EXPORT_SYMBOL(drm_gem_shmem_put_pages); | |
256 | ||
257 | /** | |
258 | * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object | |
a193f3b4 | 259 | * @shmem: shmem GEM object |
2194a63a NT |
260 | * |
261 | * This function makes sure the backing pages are pinned in memory while the | |
c7fbcb71 | 262 | * buffer is exported. |
2194a63a NT |
263 | * |
264 | * Returns: | |
265 | * 0 on success or a negative error code on failure. | |
266 | */ | |
a193f3b4 | 267 | int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem) |
2194a63a | 268 | { |
52640835 SV |
269 | WARN_ON(shmem->base.import_attach); |
270 | ||
2194a63a NT |
271 | return drm_gem_shmem_get_pages(shmem); |
272 | } | |
273 | EXPORT_SYMBOL(drm_gem_shmem_pin); | |
274 | ||
275 | /** | |
276 | * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object | |
a193f3b4 | 277 | * @shmem: shmem GEM object |
2194a63a NT |
278 | * |
279 | * This function removes the requirement that the backing pages are pinned in | |
c7fbcb71 | 280 | * memory. |
2194a63a | 281 | */ |
a193f3b4 | 282 | void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem) |
2194a63a | 283 | { |
52640835 SV |
284 | WARN_ON(shmem->base.import_attach); |
285 | ||
2194a63a NT |
286 | drm_gem_shmem_put_pages(shmem); |
287 | } | |
288 | EXPORT_SYMBOL(drm_gem_shmem_unpin); | |
289 | ||
7938f421 LDM |
290 | static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, |
291 | struct iosys_map *map) | |
2194a63a NT |
292 | { |
293 | struct drm_gem_object *obj = &shmem->base; | |
6619ccf1 | 294 | int ret = 0; |
2194a63a | 295 | |
49a3f51d | 296 | if (shmem->vmap_use_count++ > 0) { |
7938f421 | 297 | iosys_map_set_vaddr(map, shmem->vaddr); |
49a3f51d TZ |
298 | return 0; |
299 | } | |
2194a63a | 300 | |
1cad6292 | 301 | if (obj->import_attach) { |
49a3f51d TZ |
302 | ret = dma_buf_vmap(obj->import_attach->dmabuf, map); |
303 | if (!ret) { | |
304 | if (WARN_ON(map->is_iomem)) { | |
305 | ret = -EIO; | |
306 | goto err_put_pages; | |
307 | } | |
308 | shmem->vaddr = map->vaddr; | |
309 | } | |
1cad6292 GH |
310 | } else { |
311 | pgprot_t prot = PAGE_KERNEL; | |
312 | ||
0cc5fb4e SV |
313 | ret = drm_gem_shmem_get_pages(shmem); |
314 | if (ret) | |
315 | goto err_zero_use; | |
316 | ||
0cf2ef46 | 317 | if (shmem->map_wc) |
1cad6292 | 318 | prot = pgprot_writecombine(prot); |
be7d9f05 | 319 | shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT, |
1cad6292 | 320 | VM_MAP, prot); |
6619ccf1 TZ |
321 | if (!shmem->vaddr) |
322 | ret = -ENOMEM; | |
49a3f51d | 323 | else |
7938f421 | 324 | iosys_map_set_vaddr(map, shmem->vaddr); |
1cad6292 | 325 | } |
2194a63a | 326 | |
6619ccf1 TZ |
327 | if (ret) { |
328 | DRM_DEBUG_KMS("Failed to vmap pages, error %d\n", ret); | |
2194a63a NT |
329 | goto err_put_pages; |
330 | } | |
331 | ||
49a3f51d | 332 | return 0; |
2194a63a NT |
333 | |
334 | err_put_pages: | |
0cc5fb4e SV |
335 | if (!obj->import_attach) |
336 | drm_gem_shmem_put_pages(shmem); | |
2194a63a NT |
337 | err_zero_use: |
338 | shmem->vmap_use_count = 0; | |
339 | ||
49a3f51d | 340 | return ret; |
2194a63a NT |
341 | } |
342 | ||
343 | /* | |
344 | * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object | |
345 | * @shmem: shmem GEM object | |
49a3f51d TZ |
346 | * @map: Returns the kernel virtual address of the SHMEM GEM object's backing |
347 | * store. | |
2194a63a | 348 | * |
0b638559 | 349 | * This function makes sure that a contiguous kernel virtual address mapping |
c7fbcb71 TZ |
350 | * exists for the buffer backing the shmem GEM object. It hides the differences |
351 | * between dma-buf imported and natively allocated objects. | |
0b638559 SV |
352 | * |
353 | * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap(). | |
2194a63a NT |
354 | * |
355 | * Returns: | |
356 | * 0 on success or a negative error code on failure. | |
357 | */ | |
7938f421 LDM |
358 | int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, |
359 | struct iosys_map *map) | |
2194a63a | 360 | { |
2194a63a NT |
361 | int ret; |
362 | ||
363 | ret = mutex_lock_interruptible(&shmem->vmap_lock); | |
364 | if (ret) | |
49a3f51d TZ |
365 | return ret; |
366 | ret = drm_gem_shmem_vmap_locked(shmem, map); | |
2194a63a NT |
367 | mutex_unlock(&shmem->vmap_lock); |
368 | ||
49a3f51d | 369 | return ret; |
2194a63a NT |
370 | } |
371 | EXPORT_SYMBOL(drm_gem_shmem_vmap); | |
372 | ||
49a3f51d | 373 | static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem, |
7938f421 | 374 | struct iosys_map *map) |
2194a63a NT |
375 | { |
376 | struct drm_gem_object *obj = &shmem->base; | |
377 | ||
378 | if (WARN_ON_ONCE(!shmem->vmap_use_count)) | |
379 | return; | |
380 | ||
381 | if (--shmem->vmap_use_count > 0) | |
382 | return; | |
383 | ||
64e194e2 | 384 | if (obj->import_attach) { |
49a3f51d | 385 | dma_buf_vunmap(obj->import_attach->dmabuf, map); |
64e194e2 | 386 | } else { |
2194a63a | 387 | vunmap(shmem->vaddr); |
64e194e2 NT |
388 | drm_gem_shmem_put_pages(shmem); |
389 | } | |
2194a63a NT |
390 | |
391 | shmem->vaddr = NULL; | |
2194a63a NT |
392 | } |
393 | ||
394 | /* | |
0ae865ef | 395 | * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object |
2194a63a | 396 | * @shmem: shmem GEM object |
49a3f51d | 397 | * @map: Kernel virtual address where the SHMEM GEM object was mapped |
2194a63a | 398 | * |
0b638559 SV |
399 | * This function cleans up a kernel virtual address mapping acquired by |
400 | * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to | |
401 | * zero. | |
402 | * | |
c7fbcb71 TZ |
403 | * This function hides the differences between dma-buf imported and natively |
404 | * allocated objects. | |
2194a63a | 405 | */ |
7938f421 LDM |
406 | void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, |
407 | struct iosys_map *map) | |
2194a63a | 408 | { |
2194a63a | 409 | mutex_lock(&shmem->vmap_lock); |
49a3f51d | 410 | drm_gem_shmem_vunmap_locked(shmem, map); |
2194a63a NT |
411 | mutex_unlock(&shmem->vmap_lock); |
412 | } | |
413 | EXPORT_SYMBOL(drm_gem_shmem_vunmap); | |
414 | ||
5a363c20 | 415 | static struct drm_gem_shmem_object * |
2194a63a NT |
416 | drm_gem_shmem_create_with_handle(struct drm_file *file_priv, |
417 | struct drm_device *dev, size_t size, | |
418 | uint32_t *handle) | |
419 | { | |
420 | struct drm_gem_shmem_object *shmem; | |
421 | int ret; | |
422 | ||
cfe28f90 | 423 | shmem = drm_gem_shmem_create(dev, size); |
2194a63a NT |
424 | if (IS_ERR(shmem)) |
425 | return shmem; | |
426 | ||
427 | /* | |
428 | * Allocate an id of idr table where the obj is registered | |
429 | * and handle has the id what user can see. | |
430 | */ | |
431 | ret = drm_gem_handle_create(file_priv, &shmem->base, handle); | |
432 | /* drop reference from allocate - handle holds it now. */ | |
be6ee102 | 433 | drm_gem_object_put(&shmem->base); |
2194a63a NT |
434 | if (ret) |
435 | return ERR_PTR(ret); | |
436 | ||
437 | return shmem; | |
438 | } | |
2194a63a | 439 | |
17acb9f3 RH |
440 | /* Update madvise status, returns true if not purged, else |
441 | * false or -errno. | |
442 | */ | |
a193f3b4 | 443 | int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv) |
17acb9f3 | 444 | { |
17acb9f3 RH |
445 | mutex_lock(&shmem->pages_lock); |
446 | ||
447 | if (shmem->madv >= 0) | |
448 | shmem->madv = madv; | |
449 | ||
450 | madv = shmem->madv; | |
451 | ||
452 | mutex_unlock(&shmem->pages_lock); | |
453 | ||
454 | return (madv >= 0); | |
455 | } | |
456 | EXPORT_SYMBOL(drm_gem_shmem_madvise); | |
457 | ||
a193f3b4 | 458 | void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem) |
17acb9f3 | 459 | { |
a193f3b4 | 460 | struct drm_gem_object *obj = &shmem->base; |
17acb9f3 | 461 | struct drm_device *dev = obj->dev; |
17acb9f3 RH |
462 | |
463 | WARN_ON(!drm_gem_shmem_is_purgeable(shmem)); | |
464 | ||
a193f3b4 | 465 | dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0); |
4fa3d66f RH |
466 | sg_free_table(shmem->sgt); |
467 | kfree(shmem->sgt); | |
468 | shmem->sgt = NULL; | |
469 | ||
17acb9f3 RH |
470 | drm_gem_shmem_put_pages_locked(shmem); |
471 | ||
472 | shmem->madv = -1; | |
473 | ||
474 | drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); | |
475 | drm_gem_free_mmap_offset(obj); | |
476 | ||
477 | /* Our goal here is to return as much of the memory as | |
478 | * is possible back to the system as we are called from OOM. | |
479 | * To do this we must instruct the shmfs to drop all of its | |
480 | * backing pages, *now*. | |
481 | */ | |
482 | shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1); | |
483 | ||
a193f3b4 | 484 | invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1); |
17acb9f3 RH |
485 | } |
486 | EXPORT_SYMBOL(drm_gem_shmem_purge_locked); | |
487 | ||
a193f3b4 | 488 | bool drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem) |
17acb9f3 | 489 | { |
dfbc7a46 RH |
490 | if (!mutex_trylock(&shmem->pages_lock)) |
491 | return false; | |
a193f3b4 | 492 | drm_gem_shmem_purge_locked(shmem); |
17acb9f3 | 493 | mutex_unlock(&shmem->pages_lock); |
dfbc7a46 RH |
494 | |
495 | return true; | |
17acb9f3 RH |
496 | } |
497 | EXPORT_SYMBOL(drm_gem_shmem_purge); | |
498 | ||
2194a63a NT |
499 | /** |
500 | * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object | |
501 | * @file: DRM file structure to create the dumb buffer for | |
502 | * @dev: DRM device | |
503 | * @args: IOCTL data | |
504 | * | |
505 | * This function computes the pitch of the dumb buffer and rounds it up to an | |
506 | * integer number of bytes per pixel. Drivers for hardware that doesn't have | |
507 | * any additional restrictions on the pitch can directly use this function as | |
508 | * their &drm_driver.dumb_create callback. | |
509 | * | |
510 | * For hardware with additional restrictions, drivers can adjust the fields | |
511 | * set up by userspace before calling into this function. | |
512 | * | |
513 | * Returns: | |
514 | * 0 on success or a negative error code on failure. | |
515 | */ | |
516 | int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev, | |
517 | struct drm_mode_create_dumb *args) | |
518 | { | |
519 | u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); | |
520 | struct drm_gem_shmem_object *shmem; | |
521 | ||
522 | if (!args->pitch || !args->size) { | |
523 | args->pitch = min_pitch; | |
35d28365 | 524 | args->size = PAGE_ALIGN(args->pitch * args->height); |
2194a63a NT |
525 | } else { |
526 | /* ensure sane minimum values */ | |
527 | if (args->pitch < min_pitch) | |
528 | args->pitch = min_pitch; | |
529 | if (args->size < args->pitch * args->height) | |
35d28365 | 530 | args->size = PAGE_ALIGN(args->pitch * args->height); |
2194a63a NT |
531 | } |
532 | ||
533 | shmem = drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle); | |
534 | ||
535 | return PTR_ERR_OR_ZERO(shmem); | |
536 | } | |
537 | EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create); | |
538 | ||
539 | static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf) | |
540 | { | |
541 | struct vm_area_struct *vma = vmf->vma; | |
542 | struct drm_gem_object *obj = vma->vm_private_data; | |
543 | struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); | |
544 | loff_t num_pages = obj->size >> PAGE_SHIFT; | |
d611b4a0 | 545 | vm_fault_t ret; |
2194a63a | 546 | struct page *page; |
11d5a474 NR |
547 | pgoff_t page_offset; |
548 | ||
549 | /* We don't use vmf->pgoff since that has the fake offset */ | |
550 | page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; | |
2194a63a | 551 | |
d611b4a0 NR |
552 | mutex_lock(&shmem->pages_lock); |
553 | ||
11d5a474 | 554 | if (page_offset >= num_pages || |
d611b4a0 NR |
555 | WARN_ON_ONCE(!shmem->pages) || |
556 | shmem->madv < 0) { | |
557 | ret = VM_FAULT_SIGBUS; | |
558 | } else { | |
11d5a474 | 559 | page = shmem->pages[page_offset]; |
2194a63a | 560 | |
8b93d1d7 | 561 | ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page)); |
d611b4a0 | 562 | } |
2194a63a | 563 | |
d611b4a0 NR |
564 | mutex_unlock(&shmem->pages_lock); |
565 | ||
566 | return ret; | |
2194a63a NT |
567 | } |
568 | ||
569 | static void drm_gem_shmem_vm_open(struct vm_area_struct *vma) | |
570 | { | |
571 | struct drm_gem_object *obj = vma->vm_private_data; | |
572 | struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); | |
573 | int ret; | |
574 | ||
52640835 SV |
575 | WARN_ON(shmem->base.import_attach); |
576 | ||
2194a63a NT |
577 | ret = drm_gem_shmem_get_pages(shmem); |
578 | WARN_ON_ONCE(ret != 0); | |
579 | ||
580 | drm_gem_vm_open(vma); | |
581 | } | |
582 | ||
583 | static void drm_gem_shmem_vm_close(struct vm_area_struct *vma) | |
584 | { | |
585 | struct drm_gem_object *obj = vma->vm_private_data; | |
586 | struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); | |
587 | ||
588 | drm_gem_shmem_put_pages(shmem); | |
589 | drm_gem_vm_close(vma); | |
590 | } | |
591 | ||
d315bdbf | 592 | const struct vm_operations_struct drm_gem_shmem_vm_ops = { |
2194a63a NT |
593 | .fault = drm_gem_shmem_fault, |
594 | .open = drm_gem_shmem_vm_open, | |
595 | .close = drm_gem_shmem_vm_close, | |
596 | }; | |
d315bdbf | 597 | EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops); |
2194a63a NT |
598 | |
599 | /** | |
600 | * drm_gem_shmem_mmap - Memory-map a shmem GEM object | |
a193f3b4 | 601 | * @shmem: shmem GEM object |
2194a63a NT |
602 | * @vma: VMA for the area to be mapped |
603 | * | |
604 | * This function implements an augmented version of the GEM DRM file mmap | |
c7fbcb71 | 605 | * operation for shmem objects. |
2194a63a NT |
606 | * |
607 | * Returns: | |
608 | * 0 on success or a negative error code on failure. | |
609 | */ | |
a193f3b4 | 610 | int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma) |
2194a63a | 611 | { |
a193f3b4 | 612 | struct drm_gem_object *obj = &shmem->base; |
2194a63a NT |
613 | int ret; |
614 | ||
f49a51bf SV |
615 | if (obj->import_attach) { |
616 | /* Drop the reference drm_gem_mmap_obj() acquired.*/ | |
617 | drm_gem_object_put(obj); | |
618 | vma->vm_private_data = NULL; | |
619 | ||
26d3ac3c | 620 | return dma_buf_mmap(obj->dma_buf, vma, 0); |
f49a51bf | 621 | } |
26d3ac3c | 622 | |
2194a63a NT |
623 | ret = drm_gem_shmem_get_pages(shmem); |
624 | if (ret) { | |
625 | drm_gem_vm_close(vma); | |
626 | return ret; | |
627 | } | |
628 | ||
c6fc8364 | 629 | vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; |
1cad6292 | 630 | vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); |
0cf2ef46 | 631 | if (shmem->map_wc) |
1cad6292 | 632 | vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); |
2194a63a | 633 | |
2194a63a NT |
634 | return 0; |
635 | } | |
636 | EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap); | |
637 | ||
638 | /** | |
639 | * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs | |
a193f3b4 | 640 | * @shmem: shmem GEM object |
2194a63a NT |
641 | * @p: DRM printer |
642 | * @indent: Tab indentation level | |
2194a63a | 643 | */ |
a193f3b4 TZ |
644 | void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem, |
645 | struct drm_printer *p, unsigned int indent) | |
2194a63a | 646 | { |
2194a63a NT |
647 | drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count); |
648 | drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count); | |
649 | drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr); | |
650 | } | |
651 | EXPORT_SYMBOL(drm_gem_shmem_print_info); | |
652 | ||
653 | /** | |
654 | * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned | |
655 | * pages for a shmem GEM object | |
a193f3b4 | 656 | * @shmem: shmem GEM object |
2194a63a NT |
657 | * |
658 | * This function exports a scatter/gather table suitable for PRIME usage by | |
c7fbcb71 | 659 | * calling the standard DMA mapping API. |
0b638559 SV |
660 | * |
661 | * Drivers who need to acquire an scatter/gather table for objects need to call | |
662 | * drm_gem_shmem_get_pages_sgt() instead. | |
2194a63a NT |
663 | * |
664 | * Returns: | |
665 | * A pointer to the scatter/gather table of pinned pages or NULL on failure. | |
666 | */ | |
a193f3b4 | 667 | struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem) |
2194a63a | 668 | { |
a193f3b4 | 669 | struct drm_gem_object *obj = &shmem->base; |
2194a63a | 670 | |
52640835 SV |
671 | WARN_ON(shmem->base.import_attach); |
672 | ||
707d561f | 673 | return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT); |
2194a63a NT |
674 | } |
675 | EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table); | |
676 | ||
677 | /** | |
678 | * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a | |
679 | * scatter/gather table for a shmem GEM object. | |
a193f3b4 | 680 | * @shmem: shmem GEM object |
2194a63a NT |
681 | * |
682 | * This function returns a scatter/gather table suitable for driver usage. If | |
683 | * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg | |
684 | * table created. | |
685 | * | |
0b638559 SV |
686 | * This is the main function for drivers to get at backing storage, and it hides |
687 | * and difference between dma-buf imported and natively allocated objects. | |
688 | * drm_gem_shmem_get_sg_table() should not be directly called by drivers. | |
689 | * | |
2194a63a NT |
690 | * Returns: |
691 | * A pointer to the scatter/gather table of pinned pages or errno on failure. | |
692 | */ | |
a193f3b4 | 693 | struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem) |
2194a63a | 694 | { |
a193f3b4 | 695 | struct drm_gem_object *obj = &shmem->base; |
2194a63a | 696 | int ret; |
2194a63a NT |
697 | struct sg_table *sgt; |
698 | ||
699 | if (shmem->sgt) | |
700 | return shmem->sgt; | |
701 | ||
702 | WARN_ON(obj->import_attach); | |
703 | ||
704 | ret = drm_gem_shmem_get_pages(shmem); | |
705 | if (ret) | |
706 | return ERR_PTR(ret); | |
707 | ||
a193f3b4 | 708 | sgt = drm_gem_shmem_get_sg_table(shmem); |
2194a63a NT |
709 | if (IS_ERR(sgt)) { |
710 | ret = PTR_ERR(sgt); | |
711 | goto err_put_pages; | |
712 | } | |
713 | /* Map the pages for use by the h/w. */ | |
6c6fa39c MS |
714 | ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0); |
715 | if (ret) | |
716 | goto err_free_sgt; | |
2194a63a NT |
717 | |
718 | shmem->sgt = sgt; | |
719 | ||
720 | return sgt; | |
721 | ||
6c6fa39c MS |
722 | err_free_sgt: |
723 | sg_free_table(sgt); | |
724 | kfree(sgt); | |
2194a63a NT |
725 | err_put_pages: |
726 | drm_gem_shmem_put_pages(shmem); | |
727 | return ERR_PTR(ret); | |
728 | } | |
729 | EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt); | |
730 | ||
731 | /** | |
732 | * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from | |
733 | * another driver's scatter/gather table of pinned pages | |
734 | * @dev: Device to import into | |
735 | * @attach: DMA-BUF attachment | |
736 | * @sgt: Scatter/gather table of pinned pages | |
737 | * | |
738 | * This function imports a scatter/gather table exported via DMA-BUF by | |
739 | * another driver. Drivers that use the shmem helpers should set this as their | |
740 | * &drm_driver.gem_prime_import_sg_table callback. | |
741 | * | |
742 | * Returns: | |
743 | * A pointer to a newly created GEM object or an ERR_PTR-encoded negative | |
744 | * error code on failure. | |
745 | */ | |
746 | struct drm_gem_object * | |
747 | drm_gem_shmem_prime_import_sg_table(struct drm_device *dev, | |
748 | struct dma_buf_attachment *attach, | |
749 | struct sg_table *sgt) | |
750 | { | |
751 | size_t size = PAGE_ALIGN(attach->dmabuf->size); | |
2194a63a | 752 | struct drm_gem_shmem_object *shmem; |
2194a63a | 753 | |
cfe28f90 | 754 | shmem = __drm_gem_shmem_create(dev, size, true); |
2194a63a NT |
755 | if (IS_ERR(shmem)) |
756 | return ERR_CAST(shmem); | |
757 | ||
2194a63a | 758 | shmem->sgt = sgt; |
2194a63a NT |
759 | |
760 | DRM_DEBUG_PRIME("size = %zu\n", size); | |
761 | ||
762 | return &shmem->base; | |
2194a63a NT |
763 | } |
764 | EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table); | |
4b2b5e14 TZ |
765 | |
766 | MODULE_DESCRIPTION("DRM SHMEM memory-management helpers"); | |
67505311 | 767 | MODULE_IMPORT_NS(DMA_BUF); |
4b2b5e14 | 768 | MODULE_LICENSE("GPL v2"); |