]>
Commit | Line | Data |
---|---|---|
c30707be | 1 | /* |
7e416174 | 2 | * |
c30707be RSZ |
3 | * drivers/staging/android/ion/ion.c |
4 | * | |
5 | * Copyright (C) 2011 Google, Inc. | |
6 | * | |
7 | * This software is licensed under the terms of the GNU General Public | |
8 | * License version 2, as published by the Free Software Foundation, and | |
9 | * may be copied, distributed, and modified under those terms. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | */ | |
17 | ||
18 | #include <linux/device.h> | |
ab0c069a | 19 | #include <linux/err.h> |
c30707be | 20 | #include <linux/file.h> |
fe2faea7 | 21 | #include <linux/freezer.h> |
c30707be RSZ |
22 | #include <linux/fs.h> |
23 | #include <linux/anon_inodes.h> | |
fe2faea7 | 24 | #include <linux/kthread.h> |
c30707be | 25 | #include <linux/list.h> |
2991b7a0 | 26 | #include <linux/memblock.h> |
c30707be RSZ |
27 | #include <linux/miscdevice.h> |
28 | #include <linux/export.h> | |
29 | #include <linux/mm.h> | |
30 | #include <linux/mm_types.h> | |
31 | #include <linux/rbtree.h> | |
c30707be RSZ |
32 | #include <linux/slab.h> |
33 | #include <linux/seq_file.h> | |
34 | #include <linux/uaccess.h> | |
c13bd1c4 | 35 | #include <linux/vmalloc.h> |
c30707be | 36 | #include <linux/debugfs.h> |
b892bf75 | 37 | #include <linux/dma-buf.h> |
47b40458 | 38 | #include <linux/idr.h> |
c30707be RSZ |
39 | |
40 | #include "ion.h" | |
41 | #include "ion_priv.h" | |
827c849e | 42 | #include "compat_ion.h" |
c30707be | 43 | |
13ba7805 RSZ |
44 | bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer) |
45 | { | |
e1d855b0 JS |
46 | return (buffer->flags & ION_FLAG_CACHED) && |
47 | !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC); | |
13ba7805 RSZ |
48 | } |
49 | ||
45b17a80 RSZ |
50 | bool ion_buffer_cached(struct ion_buffer *buffer) |
51 | { | |
c13bd1c4 RSZ |
52 | return !!(buffer->flags & ION_FLAG_CACHED); |
53 | } | |
54 | ||
55 | static inline struct page *ion_buffer_page(struct page *page) | |
56 | { | |
57 | return (struct page *)((unsigned long)page & ~(1UL)); | |
58 | } | |
59 | ||
60 | static inline bool ion_buffer_page_is_dirty(struct page *page) | |
61 | { | |
62 | return !!((unsigned long)page & 1UL); | |
63 | } | |
64 | ||
65 | static inline void ion_buffer_page_dirty(struct page **page) | |
66 | { | |
67 | *page = (struct page *)((unsigned long)(*page) | 1UL); | |
68 | } | |
69 | ||
70 | static inline void ion_buffer_page_clean(struct page **page) | |
71 | { | |
72 | *page = (struct page *)((unsigned long)(*page) & ~(1UL)); | |
45b17a80 RSZ |
73 | } |
74 | ||
c30707be RSZ |
75 | /* this function should only be called while dev->lock is held */ |
76 | static void ion_buffer_add(struct ion_device *dev, | |
77 | struct ion_buffer *buffer) | |
78 | { | |
79 | struct rb_node **p = &dev->buffers.rb_node; | |
80 | struct rb_node *parent = NULL; | |
81 | struct ion_buffer *entry; | |
82 | ||
83 | while (*p) { | |
84 | parent = *p; | |
85 | entry = rb_entry(parent, struct ion_buffer, node); | |
86 | ||
87 | if (buffer < entry) { | |
88 | p = &(*p)->rb_left; | |
89 | } else if (buffer > entry) { | |
90 | p = &(*p)->rb_right; | |
91 | } else { | |
92 | pr_err("%s: buffer already found.", __func__); | |
93 | BUG(); | |
94 | } | |
95 | } | |
96 | ||
97 | rb_link_node(&buffer->node, parent, p); | |
98 | rb_insert_color(&buffer->node, &dev->buffers); | |
99 | } | |
100 | ||
101 | /* this function should only be called while dev->lock is held */ | |
102 | static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, | |
121ca0c6 JA |
103 | struct ion_device *dev, |
104 | unsigned long len, | |
105 | unsigned long align, | |
106 | unsigned long flags) | |
c30707be RSZ |
107 | { |
108 | struct ion_buffer *buffer; | |
29ae6bc7 | 109 | struct sg_table *table; |
a46b6b2d RSZ |
110 | struct scatterlist *sg; |
111 | int i, ret; | |
c30707be | 112 | |
411059f7 | 113 | buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); |
c30707be RSZ |
114 | if (!buffer) |
115 | return ERR_PTR(-ENOMEM); | |
116 | ||
117 | buffer->heap = heap; | |
13ba7805 | 118 | buffer->flags = flags; |
c30707be RSZ |
119 | kref_init(&buffer->ref); |
120 | ||
121 | ret = heap->ops->allocate(heap, buffer, len, align, flags); | |
fe2faea7 | 122 | |
c30707be | 123 | if (ret) { |
fe2faea7 RSZ |
124 | if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE)) |
125 | goto err2; | |
126 | ||
ea313b5f | 127 | ion_heap_freelist_drain(heap, 0); |
fe2faea7 RSZ |
128 | ret = heap->ops->allocate(heap, buffer, len, align, |
129 | flags); | |
130 | if (ret) | |
131 | goto err2; | |
c30707be | 132 | } |
29ae6bc7 | 133 | |
f82ad60e LA |
134 | if (buffer->sg_table == NULL) { |
135 | WARN_ONCE(1, "This heap needs to set the sgtable"); | |
a56d092a R |
136 | ret = -EINVAL; |
137 | goto err1; | |
29ae6bc7 | 138 | } |
a56d092a | 139 | |
f82ad60e LA |
140 | table = buffer->sg_table; |
141 | buffer->dev = dev; | |
142 | buffer->size = len; | |
143 | ||
13ba7805 | 144 | if (ion_buffer_fault_user_mappings(buffer)) { |
c13bd1c4 RSZ |
145 | int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; |
146 | struct scatterlist *sg; | |
147 | int i, j, k = 0; | |
148 | ||
149 | buffer->pages = vmalloc(sizeof(struct page *) * num_pages); | |
150 | if (!buffer->pages) { | |
151 | ret = -ENOMEM; | |
f82ad60e | 152 | goto err1; |
c13bd1c4 RSZ |
153 | } |
154 | ||
155 | for_each_sg(table->sgl, sg, table->nents, i) { | |
156 | struct page *page = sg_page(sg); | |
157 | ||
06e0dcae | 158 | for (j = 0; j < sg->length / PAGE_SIZE; j++) |
c13bd1c4 | 159 | buffer->pages[k++] = page++; |
56a7c185 | 160 | } |
56a7c185 RSZ |
161 | } |
162 | ||
163 | buffer->dev = dev; | |
164 | buffer->size = len; | |
165 | INIT_LIST_HEAD(&buffer->vmas); | |
c30707be | 166 | mutex_init(&buffer->lock); |
7e416174 SR |
167 | /* |
168 | * this will set up dma addresses for the sglist -- it is not | |
169 | * technically correct as per the dma api -- a specific | |
170 | * device isn't really taking ownership here. However, in practice on | |
171 | * our systems the only dma_address space is physical addresses. | |
172 | * Additionally, we can't afford the overhead of invalidating every | |
173 | * allocation via dma_map_sg. The implicit contract here is that | |
174 | * memory coming from the heaps is ready for dma, ie if it has a | |
175 | * cached mapping that mapping has been invalidated | |
176 | */ | |
70bc916b | 177 | for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) { |
a46b6b2d | 178 | sg_dma_address(sg) = sg_phys(sg); |
70bc916b LD |
179 | sg_dma_len(sg) = sg->length; |
180 | } | |
8d7ab9a9 | 181 | mutex_lock(&dev->buffer_lock); |
c30707be | 182 | ion_buffer_add(dev, buffer); |
8d7ab9a9 | 183 | mutex_unlock(&dev->buffer_lock); |
c30707be | 184 | return buffer; |
d3c0bced | 185 | |
c13bd1c4 | 186 | err1: |
a56d092a | 187 | heap->ops->free(buffer); |
fe2faea7 | 188 | err2: |
d3c0bced RSZ |
189 | kfree(buffer); |
190 | return ERR_PTR(ret); | |
c30707be RSZ |
191 | } |
192 | ||
ea313b5f | 193 | void ion_buffer_destroy(struct ion_buffer *buffer) |
c30707be | 194 | { |
54ac0784 KC |
195 | if (WARN_ON(buffer->kmap_cnt > 0)) |
196 | buffer->heap->ops->unmap_kernel(buffer->heap, buffer); | |
c30707be | 197 | buffer->heap->ops->free(buffer); |
698f140d | 198 | vfree(buffer->pages); |
c30707be RSZ |
199 | kfree(buffer); |
200 | } | |
201 | ||
ea313b5f | 202 | static void _ion_buffer_destroy(struct kref *kref) |
fe2faea7 RSZ |
203 | { |
204 | struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref); | |
205 | struct ion_heap *heap = buffer->heap; | |
206 | struct ion_device *dev = buffer->dev; | |
207 | ||
208 | mutex_lock(&dev->buffer_lock); | |
209 | rb_erase(&buffer->node, &dev->buffers); | |
210 | mutex_unlock(&dev->buffer_lock); | |
211 | ||
ea313b5f RSZ |
212 | if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) |
213 | ion_heap_freelist_add(heap, buffer); | |
214 | else | |
215 | ion_buffer_destroy(buffer); | |
fe2faea7 RSZ |
216 | } |
217 | ||
c30707be RSZ |
218 | static void ion_buffer_get(struct ion_buffer *buffer) |
219 | { | |
220 | kref_get(&buffer->ref); | |
221 | } | |
222 | ||
223 | static int ion_buffer_put(struct ion_buffer *buffer) | |
224 | { | |
ea313b5f | 225 | return kref_put(&buffer->ref, _ion_buffer_destroy); |
c30707be RSZ |
226 | } |
227 | ||
5ad7bc3a RSZ |
228 | static void ion_buffer_add_to_handle(struct ion_buffer *buffer) |
229 | { | |
8d7ab9a9 | 230 | mutex_lock(&buffer->lock); |
5ad7bc3a | 231 | buffer->handle_count++; |
8d7ab9a9 | 232 | mutex_unlock(&buffer->lock); |
5ad7bc3a RSZ |
233 | } |
234 | ||
235 | static void ion_buffer_remove_from_handle(struct ion_buffer *buffer) | |
236 | { | |
237 | /* | |
238 | * when a buffer is removed from a handle, if it is not in | |
239 | * any other handles, copy the taskcomm and the pid of the | |
240 | * process it's being removed from into the buffer. At this | |
241 | * point there will be no way to track what processes this buffer is | |
242 | * being used by, it only exists as a dma_buf file descriptor. | |
243 | * The taskcomm and pid can provide a debug hint as to where this fd | |
244 | * is in the system | |
245 | */ | |
8d7ab9a9 | 246 | mutex_lock(&buffer->lock); |
5ad7bc3a RSZ |
247 | buffer->handle_count--; |
248 | BUG_ON(buffer->handle_count < 0); | |
249 | if (!buffer->handle_count) { | |
250 | struct task_struct *task; | |
251 | ||
252 | task = current->group_leader; | |
253 | get_task_comm(buffer->task_comm, task); | |
254 | buffer->pid = task_pid_nr(task); | |
255 | } | |
8d7ab9a9 | 256 | mutex_unlock(&buffer->lock); |
5ad7bc3a RSZ |
257 | } |
258 | ||
c30707be | 259 | static struct ion_handle *ion_handle_create(struct ion_client *client, |
121ca0c6 | 260 | struct ion_buffer *buffer) |
c30707be RSZ |
261 | { |
262 | struct ion_handle *handle; | |
263 | ||
411059f7 | 264 | handle = kzalloc(sizeof(*handle), GFP_KERNEL); |
c30707be RSZ |
265 | if (!handle) |
266 | return ERR_PTR(-ENOMEM); | |
267 | kref_init(&handle->ref); | |
268 | RB_CLEAR_NODE(&handle->node); | |
269 | handle->client = client; | |
270 | ion_buffer_get(buffer); | |
5ad7bc3a | 271 | ion_buffer_add_to_handle(buffer); |
c30707be RSZ |
272 | handle->buffer = buffer; |
273 | ||
274 | return handle; | |
275 | } | |
276 | ||
b892bf75 RSZ |
277 | static void ion_handle_kmap_put(struct ion_handle *); |
278 | ||
c30707be RSZ |
279 | static void ion_handle_destroy(struct kref *kref) |
280 | { | |
281 | struct ion_handle *handle = container_of(kref, struct ion_handle, ref); | |
b892bf75 RSZ |
282 | struct ion_client *client = handle->client; |
283 | struct ion_buffer *buffer = handle->buffer; | |
284 | ||
b892bf75 | 285 | mutex_lock(&buffer->lock); |
2900cd76 | 286 | while (handle->kmap_cnt) |
b892bf75 RSZ |
287 | ion_handle_kmap_put(handle); |
288 | mutex_unlock(&buffer->lock); | |
289 | ||
47b40458 | 290 | idr_remove(&client->idr, handle->id); |
c30707be | 291 | if (!RB_EMPTY_NODE(&handle->node)) |
b892bf75 | 292 | rb_erase(&handle->node, &client->handles); |
b892bf75 | 293 | |
5ad7bc3a | 294 | ion_buffer_remove_from_handle(buffer); |
b892bf75 | 295 | ion_buffer_put(buffer); |
5ad7bc3a | 296 | |
c30707be RSZ |
297 | kfree(handle); |
298 | } | |
299 | ||
c30707be RSZ |
300 | static void ion_handle_get(struct ion_handle *handle) |
301 | { | |
302 | kref_get(&handle->ref); | |
303 | } | |
304 | ||
b1fa6d8a | 305 | int ion_handle_put_nolock(struct ion_handle *handle) |
9590232b | 306 | { |
45052461 | 307 | return kref_put(&handle->ref, ion_handle_destroy); |
9590232b EL |
308 | } |
309 | ||
b1fa6d8a | 310 | int ion_handle_put(struct ion_handle *handle) |
c30707be | 311 | { |
83271f62 CC |
312 | struct ion_client *client = handle->client; |
313 | int ret; | |
314 | ||
315 | mutex_lock(&client->lock); | |
9590232b | 316 | ret = ion_handle_put_nolock(handle); |
83271f62 CC |
317 | mutex_unlock(&client->lock); |
318 | ||
319 | return ret; | |
c30707be RSZ |
320 | } |
321 | ||
322 | static struct ion_handle *ion_handle_lookup(struct ion_client *client, | |
323 | struct ion_buffer *buffer) | |
324 | { | |
e1cf3682 CC |
325 | struct rb_node *n = client->handles.rb_node; |
326 | ||
327 | while (n) { | |
328 | struct ion_handle *entry = rb_entry(n, struct ion_handle, node); | |
10f62861 | 329 | |
e1cf3682 CC |
330 | if (buffer < entry->buffer) |
331 | n = n->rb_left; | |
332 | else if (buffer > entry->buffer) | |
333 | n = n->rb_right; | |
334 | else | |
335 | return entry; | |
c30707be | 336 | } |
9e907654 | 337 | return ERR_PTR(-EINVAL); |
c30707be RSZ |
338 | } |
339 | ||
b1fa6d8a LA |
340 | struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client, |
341 | int id) | |
47b40458 | 342 | { |
83271f62 CC |
343 | struct ion_handle *handle; |
344 | ||
83271f62 CC |
345 | handle = idr_find(&client->idr, id); |
346 | if (handle) | |
347 | ion_handle_get(handle); | |
83271f62 CC |
348 | |
349 | return handle ? handle : ERR_PTR(-EINVAL); | |
47b40458 CC |
350 | } |
351 | ||
b1fa6d8a | 352 | struct ion_handle *ion_handle_get_by_id(struct ion_client *client, |
0045c8dd | 353 | int id) |
9590232b EL |
354 | { |
355 | struct ion_handle *handle; | |
356 | ||
357 | mutex_lock(&client->lock); | |
358 | handle = ion_handle_get_by_id_nolock(client, id); | |
359 | mutex_unlock(&client->lock); | |
360 | ||
361 | return handle; | |
362 | } | |
363 | ||
e1d855b0 JS |
364 | static bool ion_handle_validate(struct ion_client *client, |
365 | struct ion_handle *handle) | |
c30707be | 366 | { |
83271f62 | 367 | WARN_ON(!mutex_is_locked(&client->lock)); |
51108985 | 368 | return idr_find(&client->idr, handle->id) == handle; |
c30707be RSZ |
369 | } |
370 | ||
47b40458 | 371 | static int ion_handle_add(struct ion_client *client, struct ion_handle *handle) |
c30707be | 372 | { |
b26661d1 | 373 | int id; |
c30707be RSZ |
374 | struct rb_node **p = &client->handles.rb_node; |
375 | struct rb_node *parent = NULL; | |
376 | struct ion_handle *entry; | |
377 | ||
b26661d1 CC |
378 | id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL); |
379 | if (id < 0) | |
380 | return id; | |
47b40458 | 381 | |
b26661d1 | 382 | handle->id = id; |
47b40458 | 383 | |
c30707be RSZ |
384 | while (*p) { |
385 | parent = *p; | |
386 | entry = rb_entry(parent, struct ion_handle, node); | |
387 | ||
e1cf3682 | 388 | if (handle->buffer < entry->buffer) |
c30707be | 389 | p = &(*p)->rb_left; |
e1cf3682 | 390 | else if (handle->buffer > entry->buffer) |
c30707be RSZ |
391 | p = &(*p)->rb_right; |
392 | else | |
393 | WARN(1, "%s: buffer already found.", __func__); | |
394 | } | |
395 | ||
396 | rb_link_node(&handle->node, parent, p); | |
397 | rb_insert_color(&handle->node, &client->handles); | |
47b40458 CC |
398 | |
399 | return 0; | |
c30707be RSZ |
400 | } |
401 | ||
402 | struct ion_handle *ion_alloc(struct ion_client *client, size_t len, | |
38eeeb51 | 403 | size_t align, unsigned int heap_id_mask, |
56a7c185 | 404 | unsigned int flags) |
c30707be | 405 | { |
c30707be RSZ |
406 | struct ion_handle *handle; |
407 | struct ion_device *dev = client->dev; | |
408 | struct ion_buffer *buffer = NULL; | |
cd69488c | 409 | struct ion_heap *heap; |
47b40458 | 410 | int ret; |
c30707be | 411 | |
e61fc915 | 412 | pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__, |
38eeeb51 | 413 | len, align, heap_id_mask, flags); |
c30707be RSZ |
414 | /* |
415 | * traverse the list of heaps available in this system in priority | |
416 | * order. If the heap type is supported by the client, and matches the | |
417 | * request of the caller allocate from it. Repeat until allocate has | |
418 | * succeeded or all heaps have been tried | |
419 | */ | |
54ac0784 KC |
420 | len = PAGE_ALIGN(len); |
421 | ||
a14baf71 CC |
422 | if (!len) |
423 | return ERR_PTR(-EINVAL); | |
424 | ||
8d7ab9a9 | 425 | down_read(&dev->lock); |
cd69488c | 426 | plist_for_each_entry(heap, &dev->heaps, node) { |
38eeeb51 RSZ |
427 | /* if the caller didn't specify this heap id */ |
428 | if (!((1 << heap->id) & heap_id_mask)) | |
c30707be RSZ |
429 | continue; |
430 | buffer = ion_buffer_create(heap, dev, len, align, flags); | |
9e907654 | 431 | if (!IS_ERR(buffer)) |
c30707be RSZ |
432 | break; |
433 | } | |
8d7ab9a9 | 434 | up_read(&dev->lock); |
c30707be | 435 | |
54ac0784 KC |
436 | if (buffer == NULL) |
437 | return ERR_PTR(-ENODEV); | |
438 | ||
439 | if (IS_ERR(buffer)) | |
464a5028 | 440 | return ERR_CAST(buffer); |
c30707be RSZ |
441 | |
442 | handle = ion_handle_create(client, buffer); | |
443 | ||
c30707be RSZ |
444 | /* |
445 | * ion_buffer_create will create a buffer with a ref_cnt of 1, | |
446 | * and ion_handle_create will take a second reference, drop one here | |
447 | */ | |
448 | ion_buffer_put(buffer); | |
449 | ||
47b40458 CC |
450 | if (IS_ERR(handle)) |
451 | return handle; | |
c30707be | 452 | |
47b40458 CC |
453 | mutex_lock(&client->lock); |
454 | ret = ion_handle_add(client, handle); | |
83271f62 | 455 | mutex_unlock(&client->lock); |
47b40458 CC |
456 | if (ret) { |
457 | ion_handle_put(handle); | |
458 | handle = ERR_PTR(ret); | |
459 | } | |
29ae6bc7 | 460 | |
c30707be RSZ |
461 | return handle; |
462 | } | |
ee4c8aa9 | 463 | EXPORT_SYMBOL(ion_alloc); |
c30707be | 464 | |
b1fa6d8a LA |
465 | void ion_free_nolock(struct ion_client *client, |
466 | struct ion_handle *handle) | |
c30707be | 467 | { |
c2bbedf0 | 468 | if (!ion_handle_validate(client, handle)) { |
a9bb075d | 469 | WARN(1, "%s: invalid handle passed to free.\n", __func__); |
c30707be RSZ |
470 | return; |
471 | } | |
9590232b EL |
472 | ion_handle_put_nolock(handle); |
473 | } | |
474 | ||
475 | void ion_free(struct ion_client *client, struct ion_handle *handle) | |
476 | { | |
477 | BUG_ON(client != handle->client); | |
478 | ||
479 | mutex_lock(&client->lock); | |
480 | ion_free_nolock(client, handle); | |
0e9c03a5 | 481 | mutex_unlock(&client->lock); |
c30707be | 482 | } |
ee4c8aa9 | 483 | EXPORT_SYMBOL(ion_free); |
c30707be | 484 | |
0f34faf8 RSZ |
485 | static void *ion_buffer_kmap_get(struct ion_buffer *buffer) |
486 | { | |
487 | void *vaddr; | |
488 | ||
489 | if (buffer->kmap_cnt) { | |
490 | buffer->kmap_cnt++; | |
491 | return buffer->vaddr; | |
492 | } | |
493 | vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer); | |
e1d855b0 | 494 | if (WARN_ONCE(vaddr == NULL, |
121ca0c6 | 495 | "heap->ops->map_kernel should return ERR_PTR on error")) |
9e907654 CC |
496 | return ERR_PTR(-EINVAL); |
497 | if (IS_ERR(vaddr)) | |
0f34faf8 RSZ |
498 | return vaddr; |
499 | buffer->vaddr = vaddr; | |
500 | buffer->kmap_cnt++; | |
501 | return vaddr; | |
502 | } | |
503 | ||
b892bf75 | 504 | static void *ion_handle_kmap_get(struct ion_handle *handle) |
c30707be | 505 | { |
b892bf75 | 506 | struct ion_buffer *buffer = handle->buffer; |
c30707be RSZ |
507 | void *vaddr; |
508 | ||
b892bf75 RSZ |
509 | if (handle->kmap_cnt) { |
510 | handle->kmap_cnt++; | |
511 | return buffer->vaddr; | |
c30707be | 512 | } |
0f34faf8 | 513 | vaddr = ion_buffer_kmap_get(buffer); |
9e907654 | 514 | if (IS_ERR(vaddr)) |
b892bf75 | 515 | return vaddr; |
b892bf75 | 516 | handle->kmap_cnt++; |
b892bf75 RSZ |
517 | return vaddr; |
518 | } | |
c30707be | 519 | |
0f34faf8 RSZ |
520 | static void ion_buffer_kmap_put(struct ion_buffer *buffer) |
521 | { | |
522 | buffer->kmap_cnt--; | |
523 | if (!buffer->kmap_cnt) { | |
524 | buffer->heap->ops->unmap_kernel(buffer->heap, buffer); | |
525 | buffer->vaddr = NULL; | |
526 | } | |
527 | } | |
528 | ||
b892bf75 RSZ |
529 | static void ion_handle_kmap_put(struct ion_handle *handle) |
530 | { | |
531 | struct ion_buffer *buffer = handle->buffer; | |
532 | ||
22f6b978 MH |
533 | if (!handle->kmap_cnt) { |
534 | WARN(1, "%s: Double unmap detected! bailing...\n", __func__); | |
535 | return; | |
536 | } | |
b892bf75 RSZ |
537 | handle->kmap_cnt--; |
538 | if (!handle->kmap_cnt) | |
0f34faf8 | 539 | ion_buffer_kmap_put(buffer); |
c30707be RSZ |
540 | } |
541 | ||
b892bf75 | 542 | void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle) |
c30707be RSZ |
543 | { |
544 | struct ion_buffer *buffer; | |
b892bf75 | 545 | void *vaddr; |
c30707be RSZ |
546 | |
547 | mutex_lock(&client->lock); | |
548 | if (!ion_handle_validate(client, handle)) { | |
b892bf75 | 549 | pr_err("%s: invalid handle passed to map_kernel.\n", |
c30707be RSZ |
550 | __func__); |
551 | mutex_unlock(&client->lock); | |
552 | return ERR_PTR(-EINVAL); | |
553 | } | |
b892bf75 | 554 | |
c30707be | 555 | buffer = handle->buffer; |
c30707be | 556 | |
b892bf75 | 557 | if (!handle->buffer->heap->ops->map_kernel) { |
c30707be RSZ |
558 | pr_err("%s: map_kernel is not implemented by this heap.\n", |
559 | __func__); | |
c30707be RSZ |
560 | mutex_unlock(&client->lock); |
561 | return ERR_PTR(-ENODEV); | |
562 | } | |
c30707be | 563 | |
c30707be | 564 | mutex_lock(&buffer->lock); |
b892bf75 | 565 | vaddr = ion_handle_kmap_get(handle); |
c30707be RSZ |
566 | mutex_unlock(&buffer->lock); |
567 | mutex_unlock(&client->lock); | |
b892bf75 | 568 | return vaddr; |
c30707be | 569 | } |
ee4c8aa9 | 570 | EXPORT_SYMBOL(ion_map_kernel); |
c30707be | 571 | |
b892bf75 | 572 | void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle) |
c30707be RSZ |
573 | { |
574 | struct ion_buffer *buffer; | |
575 | ||
576 | mutex_lock(&client->lock); | |
577 | buffer = handle->buffer; | |
578 | mutex_lock(&buffer->lock); | |
b892bf75 | 579 | ion_handle_kmap_put(handle); |
c30707be RSZ |
580 | mutex_unlock(&buffer->lock); |
581 | mutex_unlock(&client->lock); | |
582 | } | |
ee4c8aa9 | 583 | EXPORT_SYMBOL(ion_unmap_kernel); |
c30707be | 584 | |
948c4db4 NZ |
585 | static struct mutex debugfs_mutex; |
586 | static struct rb_root *ion_root_client; | |
587 | static int is_client_alive(struct ion_client *client) | |
588 | { | |
589 | struct rb_node *node; | |
590 | struct ion_client *tmp; | |
591 | struct ion_device *dev; | |
592 | ||
593 | node = ion_root_client->rb_node; | |
594 | dev = container_of(ion_root_client, struct ion_device, clients); | |
595 | ||
596 | down_read(&dev->lock); | |
597 | while (node) { | |
598 | tmp = rb_entry(node, struct ion_client, node); | |
599 | if (client < tmp) { | |
600 | node = node->rb_left; | |
601 | } else if (client > tmp) { | |
602 | node = node->rb_right; | |
603 | } else { | |
604 | up_read(&dev->lock); | |
605 | return 1; | |
606 | } | |
607 | } | |
608 | ||
609 | up_read(&dev->lock); | |
610 | return 0; | |
611 | } | |
612 | ||
c30707be RSZ |
613 | static int ion_debug_client_show(struct seq_file *s, void *unused) |
614 | { | |
615 | struct ion_client *client = s->private; | |
616 | struct rb_node *n; | |
38eeeb51 | 617 | size_t sizes[ION_NUM_HEAP_IDS] = {0}; |
f63958d8 | 618 | const char *names[ION_NUM_HEAP_IDS] = {NULL}; |
c30707be RSZ |
619 | int i; |
620 | ||
948c4db4 NZ |
621 | mutex_lock(&debugfs_mutex); |
622 | if (!is_client_alive(client)) { | |
623 | seq_printf(s, "ion_client 0x%p dead, can't dump its buffers\n", | |
624 | client); | |
625 | mutex_unlock(&debugfs_mutex); | |
626 | return 0; | |
627 | } | |
628 | ||
c30707be RSZ |
629 | mutex_lock(&client->lock); |
630 | for (n = rb_first(&client->handles); n; n = rb_next(n)) { | |
631 | struct ion_handle *handle = rb_entry(n, struct ion_handle, | |
632 | node); | |
38eeeb51 | 633 | unsigned int id = handle->buffer->heap->id; |
c30707be | 634 | |
38eeeb51 RSZ |
635 | if (!names[id]) |
636 | names[id] = handle->buffer->heap->name; | |
637 | sizes[id] += handle->buffer->size; | |
c30707be RSZ |
638 | } |
639 | mutex_unlock(&client->lock); | |
948c4db4 | 640 | mutex_unlock(&debugfs_mutex); |
c30707be RSZ |
641 | |
642 | seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes"); | |
38eeeb51 | 643 | for (i = 0; i < ION_NUM_HEAP_IDS; i++) { |
c30707be RSZ |
644 | if (!names[i]) |
645 | continue; | |
e61fc915 | 646 | seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]); |
c30707be RSZ |
647 | } |
648 | return 0; | |
649 | } | |
650 | ||
651 | static int ion_debug_client_open(struct inode *inode, struct file *file) | |
652 | { | |
653 | return single_open(file, ion_debug_client_show, inode->i_private); | |
654 | } | |
655 | ||
656 | static const struct file_operations debug_client_fops = { | |
657 | .open = ion_debug_client_open, | |
658 | .read = seq_read, | |
659 | .llseek = seq_lseek, | |
660 | .release = single_release, | |
661 | }; | |
662 | ||
2803ac7b | 663 | static int ion_get_client_serial(const struct rb_root *root, |
121ca0c6 | 664 | const unsigned char *name) |
2803ac7b MH |
665 | { |
666 | int serial = -1; | |
667 | struct rb_node *node; | |
10f62861 | 668 | |
2803ac7b MH |
669 | for (node = rb_first(root); node; node = rb_next(node)) { |
670 | struct ion_client *client = rb_entry(node, struct ion_client, | |
b2bcdadc | 671 | node); |
10f62861 | 672 | |
2803ac7b MH |
673 | if (strcmp(client->name, name)) |
674 | continue; | |
675 | serial = max(serial, client->display_serial); | |
676 | } | |
677 | return serial + 1; | |
678 | } | |
679 | ||
c30707be | 680 | struct ion_client *ion_client_create(struct ion_device *dev, |
c30707be RSZ |
681 | const char *name) |
682 | { | |
683 | struct ion_client *client; | |
684 | struct task_struct *task; | |
685 | struct rb_node **p; | |
686 | struct rb_node *parent = NULL; | |
687 | struct ion_client *entry; | |
c30707be RSZ |
688 | pid_t pid; |
689 | ||
2803ac7b MH |
690 | if (!name) { |
691 | pr_err("%s: Name cannot be null\n", __func__); | |
692 | return ERR_PTR(-EINVAL); | |
693 | } | |
694 | ||
c30707be RSZ |
695 | get_task_struct(current->group_leader); |
696 | task_lock(current->group_leader); | |
697 | pid = task_pid_nr(current->group_leader); | |
7e416174 SR |
698 | /* |
699 | * don't bother to store task struct for kernel threads, | |
700 | * they can't be killed anyway | |
701 | */ | |
c30707be RSZ |
702 | if (current->group_leader->flags & PF_KTHREAD) { |
703 | put_task_struct(current->group_leader); | |
704 | task = NULL; | |
705 | } else { | |
706 | task = current->group_leader; | |
707 | } | |
708 | task_unlock(current->group_leader); | |
709 | ||
411059f7 | 710 | client = kzalloc(sizeof(*client), GFP_KERNEL); |
ae5cbf4a MH |
711 | if (!client) |
712 | goto err_put_task_struct; | |
c30707be RSZ |
713 | |
714 | client->dev = dev; | |
715 | client->handles = RB_ROOT; | |
47b40458 | 716 | idr_init(&client->idr); |
c30707be | 717 | mutex_init(&client->lock); |
c30707be RSZ |
718 | client->task = task; |
719 | client->pid = pid; | |
ae5cbf4a MH |
720 | client->name = kstrdup(name, GFP_KERNEL); |
721 | if (!client->name) | |
722 | goto err_free_client; | |
c30707be | 723 | |
8d7ab9a9 | 724 | down_write(&dev->lock); |
2803ac7b MH |
725 | client->display_serial = ion_get_client_serial(&dev->clients, name); |
726 | client->display_name = kasprintf( | |
727 | GFP_KERNEL, "%s-%d", name, client->display_serial); | |
728 | if (!client->display_name) { | |
729 | up_write(&dev->lock); | |
730 | goto err_free_client_name; | |
731 | } | |
b892bf75 RSZ |
732 | p = &dev->clients.rb_node; |
733 | while (*p) { | |
734 | parent = *p; | |
735 | entry = rb_entry(parent, struct ion_client, node); | |
736 | ||
737 | if (client < entry) | |
738 | p = &(*p)->rb_left; | |
739 | else if (client > entry) | |
740 | p = &(*p)->rb_right; | |
c30707be | 741 | } |
b892bf75 RSZ |
742 | rb_link_node(&client->node, parent, p); |
743 | rb_insert_color(&client->node, &dev->clients); | |
c30707be | 744 | |
2803ac7b | 745 | client->debug_root = debugfs_create_file(client->display_name, 0664, |
b2bcdadc DS |
746 | dev->clients_debug_root, |
747 | client, &debug_client_fops); | |
b08585fb MH |
748 | if (!client->debug_root) { |
749 | char buf[256], *path; | |
04e14356 | 750 | |
b08585fb MH |
751 | path = dentry_path(dev->clients_debug_root, buf, 256); |
752 | pr_err("Failed to create client debugfs at %s/%s\n", | |
121ca0c6 | 753 | path, client->display_name); |
b08585fb MH |
754 | } |
755 | ||
8d7ab9a9 | 756 | up_write(&dev->lock); |
c30707be RSZ |
757 | |
758 | return client; | |
ae5cbf4a | 759 | |
2803ac7b MH |
760 | err_free_client_name: |
761 | kfree(client->name); | |
ae5cbf4a MH |
762 | err_free_client: |
763 | kfree(client); | |
764 | err_put_task_struct: | |
765 | if (task) | |
766 | put_task_struct(current->group_leader); | |
767 | return ERR_PTR(-ENOMEM); | |
c30707be | 768 | } |
9122fe86 | 769 | EXPORT_SYMBOL(ion_client_create); |
c30707be | 770 | |
b892bf75 | 771 | void ion_client_destroy(struct ion_client *client) |
c30707be | 772 | { |
c30707be RSZ |
773 | struct ion_device *dev = client->dev; |
774 | struct rb_node *n; | |
775 | ||
776 | pr_debug("%s: %d\n", __func__, __LINE__); | |
948c4db4 | 777 | mutex_lock(&debugfs_mutex); |
c30707be RSZ |
778 | while ((n = rb_first(&client->handles))) { |
779 | struct ion_handle *handle = rb_entry(n, struct ion_handle, | |
780 | node); | |
781 | ion_handle_destroy(&handle->ref); | |
782 | } | |
47b40458 | 783 | |
47b40458 CC |
784 | idr_destroy(&client->idr); |
785 | ||
8d7ab9a9 | 786 | down_write(&dev->lock); |
b892bf75 | 787 | if (client->task) |
c30707be | 788 | put_task_struct(client->task); |
b892bf75 | 789 | rb_erase(&client->node, &dev->clients); |
c30707be | 790 | debugfs_remove_recursive(client->debug_root); |
8d7ab9a9 | 791 | up_write(&dev->lock); |
c30707be | 792 | |
2803ac7b | 793 | kfree(client->display_name); |
ae5cbf4a | 794 | kfree(client->name); |
c30707be | 795 | kfree(client); |
948c4db4 | 796 | mutex_unlock(&debugfs_mutex); |
c30707be | 797 | } |
ee4c8aa9 | 798 | EXPORT_SYMBOL(ion_client_destroy); |
c30707be | 799 | |
56a7c185 RSZ |
800 | static void ion_buffer_sync_for_device(struct ion_buffer *buffer, |
801 | struct device *dev, | |
802 | enum dma_data_direction direction); | |
803 | ||
29ae6bc7 RSZ |
804 | static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment, |
805 | enum dma_data_direction direction) | |
c30707be | 806 | { |
b892bf75 RSZ |
807 | struct dma_buf *dmabuf = attachment->dmabuf; |
808 | struct ion_buffer *buffer = dmabuf->priv; | |
c30707be | 809 | |
0b9ec1cf | 810 | ion_buffer_sync_for_device(buffer, attachment->dev, direction); |
29ae6bc7 RSZ |
811 | return buffer->sg_table; |
812 | } | |
813 | ||
814 | static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment, | |
815 | struct sg_table *table, | |
816 | enum dma_data_direction direction) | |
817 | { | |
c30707be RSZ |
818 | } |
819 | ||
e946b209 | 820 | void ion_pages_sync_for_device(struct device *dev, struct page *page, |
121ca0c6 | 821 | size_t size, enum dma_data_direction dir) |
e946b209 CC |
822 | { |
823 | struct scatterlist sg; | |
824 | ||
825 | sg_init_table(&sg, 1); | |
826 | sg_set_page(&sg, page, size, 0); | |
827 | /* | |
828 | * This is not correct - sg_dma_address needs a dma_addr_t that is valid | |
8e4ec4fe | 829 | * for the targeted device, but this works on the currently targeted |
e946b209 CC |
830 | * hardware. |
831 | */ | |
832 | sg_dma_address(&sg) = page_to_phys(page); | |
833 | dma_sync_sg_for_device(dev, &sg, 1, dir); | |
834 | } | |
835 | ||
56a7c185 RSZ |
836 | struct ion_vma_list { |
837 | struct list_head list; | |
838 | struct vm_area_struct *vma; | |
839 | }; | |
840 | ||
841 | static void ion_buffer_sync_for_device(struct ion_buffer *buffer, | |
842 | struct device *dev, | |
843 | enum dma_data_direction dir) | |
844 | { | |
56a7c185 | 845 | struct ion_vma_list *vma_list; |
c13bd1c4 RSZ |
846 | int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; |
847 | int i; | |
56a7c185 RSZ |
848 | |
849 | pr_debug("%s: syncing for device %s\n", __func__, | |
850 | dev ? dev_name(dev) : "null"); | |
0b9ec1cf | 851 | |
13ba7805 | 852 | if (!ion_buffer_fault_user_mappings(buffer)) |
0b9ec1cf RSZ |
853 | return; |
854 | ||
56a7c185 | 855 | mutex_lock(&buffer->lock); |
c13bd1c4 RSZ |
856 | for (i = 0; i < pages; i++) { |
857 | struct page *page = buffer->pages[i]; | |
858 | ||
859 | if (ion_buffer_page_is_dirty(page)) | |
e946b209 | 860 | ion_pages_sync_for_device(dev, ion_buffer_page(page), |
121ca0c6 | 861 | PAGE_SIZE, dir); |
e946b209 | 862 | |
c13bd1c4 | 863 | ion_buffer_page_clean(buffer->pages + i); |
56a7c185 RSZ |
864 | } |
865 | list_for_each_entry(vma_list, &buffer->vmas, list) { | |
866 | struct vm_area_struct *vma = vma_list->vma; | |
867 | ||
868 | zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start, | |
869 | NULL); | |
870 | } | |
871 | mutex_unlock(&buffer->lock); | |
872 | } | |
873 | ||
f63958d8 | 874 | static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) |
56a7c185 RSZ |
875 | { |
876 | struct ion_buffer *buffer = vma->vm_private_data; | |
462be0c6 | 877 | unsigned long pfn; |
c13bd1c4 | 878 | int ret; |
56a7c185 RSZ |
879 | |
880 | mutex_lock(&buffer->lock); | |
c13bd1c4 | 881 | ion_buffer_page_dirty(buffer->pages + vmf->pgoff); |
c13bd1c4 | 882 | BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]); |
462be0c6 CC |
883 | |
884 | pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff])); | |
1a29d85e | 885 | ret = vm_insert_pfn(vma, vmf->address, pfn); |
56a7c185 | 886 | mutex_unlock(&buffer->lock); |
c13bd1c4 RSZ |
887 | if (ret) |
888 | return VM_FAULT_ERROR; | |
889 | ||
56a7c185 RSZ |
890 | return VM_FAULT_NOPAGE; |
891 | } | |
892 | ||
893 | static void ion_vm_open(struct vm_area_struct *vma) | |
894 | { | |
895 | struct ion_buffer *buffer = vma->vm_private_data; | |
896 | struct ion_vma_list *vma_list; | |
897 | ||
411059f7 | 898 | vma_list = kmalloc(sizeof(*vma_list), GFP_KERNEL); |
56a7c185 RSZ |
899 | if (!vma_list) |
900 | return; | |
901 | vma_list->vma = vma; | |
902 | mutex_lock(&buffer->lock); | |
903 | list_add(&vma_list->list, &buffer->vmas); | |
904 | mutex_unlock(&buffer->lock); | |
905 | pr_debug("%s: adding %p\n", __func__, vma); | |
906 | } | |
907 | ||
908 | static void ion_vm_close(struct vm_area_struct *vma) | |
909 | { | |
910 | struct ion_buffer *buffer = vma->vm_private_data; | |
911 | struct ion_vma_list *vma_list, *tmp; | |
912 | ||
913 | pr_debug("%s\n", __func__); | |
914 | mutex_lock(&buffer->lock); | |
915 | list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) { | |
916 | if (vma_list->vma != vma) | |
917 | continue; | |
918 | list_del(&vma_list->list); | |
919 | kfree(vma_list); | |
920 | pr_debug("%s: deleting %p\n", __func__, vma); | |
921 | break; | |
922 | } | |
923 | mutex_unlock(&buffer->lock); | |
924 | } | |
925 | ||
7cbea8dc | 926 | static const struct vm_operations_struct ion_vma_ops = { |
56a7c185 RSZ |
927 | .open = ion_vm_open, |
928 | .close = ion_vm_close, | |
929 | .fault = ion_vm_fault, | |
930 | }; | |
931 | ||
b892bf75 | 932 | static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) |
c30707be | 933 | { |
b892bf75 | 934 | struct ion_buffer *buffer = dmabuf->priv; |
56a7c185 | 935 | int ret = 0; |
c30707be | 936 | |
b892bf75 | 937 | if (!buffer->heap->ops->map_user) { |
7287bb52 | 938 | pr_err("%s: this heap does not define a method for mapping to userspace\n", |
121ca0c6 | 939 | __func__); |
b892bf75 | 940 | return -EINVAL; |
c30707be RSZ |
941 | } |
942 | ||
13ba7805 | 943 | if (ion_buffer_fault_user_mappings(buffer)) { |
462be0c6 CC |
944 | vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | |
945 | VM_DONTDUMP; | |
56a7c185 RSZ |
946 | vma->vm_private_data = buffer; |
947 | vma->vm_ops = &ion_vma_ops; | |
948 | ion_vm_open(vma); | |
856661d5 | 949 | return 0; |
56a7c185 | 950 | } |
b892bf75 | 951 | |
856661d5 RSZ |
952 | if (!(buffer->flags & ION_FLAG_CACHED)) |
953 | vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); | |
954 | ||
955 | mutex_lock(&buffer->lock); | |
956 | /* now map it to userspace */ | |
957 | ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma); | |
958 | mutex_unlock(&buffer->lock); | |
959 | ||
b892bf75 | 960 | if (ret) |
c30707be RSZ |
961 | pr_err("%s: failure mapping buffer to userspace\n", |
962 | __func__); | |
c30707be | 963 | |
c30707be RSZ |
964 | return ret; |
965 | } | |
966 | ||
b892bf75 RSZ |
967 | static void ion_dma_buf_release(struct dma_buf *dmabuf) |
968 | { | |
969 | struct ion_buffer *buffer = dmabuf->priv; | |
10f62861 | 970 | |
b892bf75 RSZ |
971 | ion_buffer_put(buffer); |
972 | } | |
c30707be | 973 | |
b892bf75 | 974 | static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset) |
c30707be | 975 | { |
0f34faf8 | 976 | struct ion_buffer *buffer = dmabuf->priv; |
10f62861 | 977 | |
12edf53d | 978 | return buffer->vaddr + offset * PAGE_SIZE; |
b892bf75 | 979 | } |
c30707be | 980 | |
b892bf75 RSZ |
981 | static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset, |
982 | void *ptr) | |
983 | { | |
b892bf75 RSZ |
984 | } |
985 | ||
831e9da7 | 986 | static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, |
0f34faf8 | 987 | enum dma_data_direction direction) |
b892bf75 | 988 | { |
0f34faf8 RSZ |
989 | struct ion_buffer *buffer = dmabuf->priv; |
990 | void *vaddr; | |
991 | ||
992 | if (!buffer->heap->ops->map_kernel) { | |
993 | pr_err("%s: map kernel is not implemented by this heap.\n", | |
994 | __func__); | |
995 | return -ENODEV; | |
996 | } | |
997 | ||
998 | mutex_lock(&buffer->lock); | |
999 | vaddr = ion_buffer_kmap_get(buffer); | |
1000 | mutex_unlock(&buffer->lock); | |
ab0c069a | 1001 | return PTR_ERR_OR_ZERO(vaddr); |
b892bf75 RSZ |
1002 | } |
1003 | ||
18b862dc CW |
1004 | static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, |
1005 | enum dma_data_direction direction) | |
b892bf75 | 1006 | { |
0f34faf8 | 1007 | struct ion_buffer *buffer = dmabuf->priv; |
c30707be | 1008 | |
0f34faf8 RSZ |
1009 | mutex_lock(&buffer->lock); |
1010 | ion_buffer_kmap_put(buffer); | |
1011 | mutex_unlock(&buffer->lock); | |
18b862dc CW |
1012 | |
1013 | return 0; | |
0f34faf8 | 1014 | } |
c30707be | 1015 | |
2328ed66 | 1016 | static const struct dma_buf_ops dma_buf_ops = { |
b892bf75 RSZ |
1017 | .map_dma_buf = ion_map_dma_buf, |
1018 | .unmap_dma_buf = ion_unmap_dma_buf, | |
1019 | .mmap = ion_mmap, | |
1020 | .release = ion_dma_buf_release, | |
0f34faf8 RSZ |
1021 | .begin_cpu_access = ion_dma_buf_begin_cpu_access, |
1022 | .end_cpu_access = ion_dma_buf_end_cpu_access, | |
1023 | .kmap_atomic = ion_dma_buf_kmap, | |
1024 | .kunmap_atomic = ion_dma_buf_kunmap, | |
b892bf75 RSZ |
1025 | .kmap = ion_dma_buf_kmap, |
1026 | .kunmap = ion_dma_buf_kunmap, | |
1027 | }; | |
1028 | ||
22ba4322 | 1029 | struct dma_buf *ion_share_dma_buf(struct ion_client *client, |
121ca0c6 | 1030 | struct ion_handle *handle) |
b892bf75 | 1031 | { |
5605b188 | 1032 | DEFINE_DMA_BUF_EXPORT_INFO(exp_info); |
b892bf75 RSZ |
1033 | struct ion_buffer *buffer; |
1034 | struct dma_buf *dmabuf; | |
1035 | bool valid_handle; | |
d8fbe341 | 1036 | |
b892bf75 RSZ |
1037 | mutex_lock(&client->lock); |
1038 | valid_handle = ion_handle_validate(client, handle); | |
b892bf75 | 1039 | if (!valid_handle) { |
a9bb075d | 1040 | WARN(1, "%s: invalid handle passed to share.\n", __func__); |
83271f62 | 1041 | mutex_unlock(&client->lock); |
22ba4322 | 1042 | return ERR_PTR(-EINVAL); |
b892bf75 | 1043 | } |
b892bf75 RSZ |
1044 | buffer = handle->buffer; |
1045 | ion_buffer_get(buffer); | |
83271f62 CC |
1046 | mutex_unlock(&client->lock); |
1047 | ||
72449cb4 SS |
1048 | exp_info.ops = &dma_buf_ops; |
1049 | exp_info.size = buffer->size; | |
1050 | exp_info.flags = O_RDWR; | |
1051 | exp_info.priv = buffer; | |
1052 | ||
d8fbe341 | 1053 | dmabuf = dma_buf_export(&exp_info); |
b892bf75 RSZ |
1054 | if (IS_ERR(dmabuf)) { |
1055 | ion_buffer_put(buffer); | |
22ba4322 | 1056 | return dmabuf; |
b892bf75 | 1057 | } |
22ba4322 JM |
1058 | |
1059 | return dmabuf; | |
1060 | } | |
1061 | EXPORT_SYMBOL(ion_share_dma_buf); | |
1062 | ||
1063 | int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle) | |
1064 | { | |
1065 | struct dma_buf *dmabuf; | |
1066 | int fd; | |
1067 | ||
1068 | dmabuf = ion_share_dma_buf(client, handle); | |
1069 | if (IS_ERR(dmabuf)) | |
1070 | return PTR_ERR(dmabuf); | |
1071 | ||
b892bf75 | 1072 | fd = dma_buf_fd(dmabuf, O_CLOEXEC); |
55808b8d | 1073 | if (fd < 0) |
b892bf75 | 1074 | dma_buf_put(dmabuf); |
55808b8d | 1075 | |
c30707be | 1076 | return fd; |
b892bf75 | 1077 | } |
22ba4322 | 1078 | EXPORT_SYMBOL(ion_share_dma_buf_fd); |
c30707be | 1079 | |
9f90381b R |
1080 | struct ion_handle *ion_import_dma_buf(struct ion_client *client, |
1081 | struct dma_buf *dmabuf) | |
b892bf75 | 1082 | { |
b892bf75 RSZ |
1083 | struct ion_buffer *buffer; |
1084 | struct ion_handle *handle; | |
47b40458 | 1085 | int ret; |
b892bf75 | 1086 | |
b892bf75 RSZ |
1087 | /* if this memory came from ion */ |
1088 | ||
1089 | if (dmabuf->ops != &dma_buf_ops) { | |
1090 | pr_err("%s: can not import dmabuf from another exporter\n", | |
1091 | __func__); | |
b892bf75 RSZ |
1092 | return ERR_PTR(-EINVAL); |
1093 | } | |
1094 | buffer = dmabuf->priv; | |
1095 | ||
1096 | mutex_lock(&client->lock); | |
1097 | /* if a handle exists for this buffer just take a reference to it */ | |
1098 | handle = ion_handle_lookup(client, buffer); | |
9e907654 | 1099 | if (!IS_ERR(handle)) { |
b892bf75 | 1100 | ion_handle_get(handle); |
83271f62 | 1101 | mutex_unlock(&client->lock); |
b892bf75 RSZ |
1102 | goto end; |
1103 | } | |
83271f62 | 1104 | |
b892bf75 | 1105 | handle = ion_handle_create(client, buffer); |
6fa92e2b SL |
1106 | if (IS_ERR(handle)) { |
1107 | mutex_unlock(&client->lock); | |
b892bf75 | 1108 | goto end; |
6fa92e2b | 1109 | } |
83271f62 | 1110 | |
47b40458 | 1111 | ret = ion_handle_add(client, handle); |
83271f62 | 1112 | mutex_unlock(&client->lock); |
47b40458 CC |
1113 | if (ret) { |
1114 | ion_handle_put(handle); | |
1115 | handle = ERR_PTR(ret); | |
1116 | } | |
83271f62 | 1117 | |
b892bf75 | 1118 | end: |
b892bf75 | 1119 | return handle; |
c30707be | 1120 | } |
ee4c8aa9 | 1121 | EXPORT_SYMBOL(ion_import_dma_buf); |
c30707be | 1122 | |
9f90381b R |
1123 | struct ion_handle *ion_import_dma_buf_fd(struct ion_client *client, int fd) |
1124 | { | |
1125 | struct dma_buf *dmabuf; | |
1126 | struct ion_handle *handle; | |
1127 | ||
1128 | dmabuf = dma_buf_get(fd); | |
1129 | if (IS_ERR(dmabuf)) | |
1130 | return ERR_CAST(dmabuf); | |
1131 | ||
1132 | handle = ion_import_dma_buf(client, dmabuf); | |
1133 | dma_buf_put(dmabuf); | |
1134 | return handle; | |
1135 | } | |
1136 | EXPORT_SYMBOL(ion_import_dma_buf_fd); | |
1137 | ||
b1fa6d8a | 1138 | int ion_sync_for_device(struct ion_client *client, int fd) |
0b9ec1cf RSZ |
1139 | { |
1140 | struct dma_buf *dmabuf; | |
1141 | struct ion_buffer *buffer; | |
1142 | ||
1143 | dmabuf = dma_buf_get(fd); | |
9e907654 | 1144 | if (IS_ERR(dmabuf)) |
0b9ec1cf RSZ |
1145 | return PTR_ERR(dmabuf); |
1146 | ||
1147 | /* if this memory came from ion */ | |
1148 | if (dmabuf->ops != &dma_buf_ops) { | |
1149 | pr_err("%s: can not sync dmabuf from another exporter\n", | |
1150 | __func__); | |
1151 | dma_buf_put(dmabuf); | |
1152 | return -EINVAL; | |
1153 | } | |
1154 | buffer = dmabuf->priv; | |
856661d5 RSZ |
1155 | |
1156 | dma_sync_sg_for_device(NULL, buffer->sg_table->sgl, | |
1157 | buffer->sg_table->nents, DMA_BIDIRECTIONAL); | |
0b9ec1cf RSZ |
1158 | dma_buf_put(dmabuf); |
1159 | return 0; | |
1160 | } | |
1161 | ||
02b23803 LA |
1162 | int ion_query_heaps(struct ion_client *client, struct ion_heap_query *query) |
1163 | { | |
1164 | struct ion_device *dev = client->dev; | |
1165 | struct ion_heap_data __user *buffer = u64_to_user_ptr(query->heaps); | |
1166 | int ret = -EINVAL, cnt = 0, max_cnt; | |
1167 | struct ion_heap *heap; | |
1168 | struct ion_heap_data hdata; | |
1169 | ||
1170 | memset(&hdata, 0, sizeof(hdata)); | |
1171 | ||
1172 | down_read(&dev->lock); | |
1173 | if (!buffer) { | |
1174 | query->cnt = dev->heap_cnt; | |
1175 | ret = 0; | |
1176 | goto out; | |
1177 | } | |
1178 | ||
1179 | if (query->cnt <= 0) | |
1180 | goto out; | |
1181 | ||
1182 | max_cnt = query->cnt; | |
1183 | ||
1184 | plist_for_each_entry(heap, &dev->heaps, node) { | |
1185 | strncpy(hdata.name, heap->name, MAX_HEAP_NAME); | |
1186 | hdata.name[sizeof(hdata.name) - 1] = '\0'; | |
1187 | hdata.type = heap->type; | |
1188 | hdata.heap_id = heap->id; | |
1189 | ||
cf55902b DC |
1190 | if (copy_to_user(&buffer[cnt], &hdata, sizeof(hdata))) { |
1191 | ret = -EFAULT; | |
1192 | goto out; | |
1193 | } | |
02b23803 LA |
1194 | |
1195 | cnt++; | |
1196 | if (cnt >= max_cnt) | |
1197 | break; | |
1198 | } | |
1199 | ||
1200 | query->cnt = cnt; | |
1201 | out: | |
1202 | up_read(&dev->lock); | |
1203 | return ret; | |
1204 | } | |
1205 | ||
c30707be RSZ |
1206 | static int ion_release(struct inode *inode, struct file *file) |
1207 | { | |
1208 | struct ion_client *client = file->private_data; | |
1209 | ||
1210 | pr_debug("%s: %d\n", __func__, __LINE__); | |
b892bf75 | 1211 | ion_client_destroy(client); |
c30707be RSZ |
1212 | return 0; |
1213 | } | |
1214 | ||
1215 | static int ion_open(struct inode *inode, struct file *file) | |
1216 | { | |
1217 | struct miscdevice *miscdev = file->private_data; | |
1218 | struct ion_device *dev = container_of(miscdev, struct ion_device, dev); | |
1219 | struct ion_client *client; | |
483ed03f | 1220 | char debug_name[64]; |
c30707be RSZ |
1221 | |
1222 | pr_debug("%s: %d\n", __func__, __LINE__); | |
483ed03f LA |
1223 | snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader)); |
1224 | client = ion_client_create(dev, debug_name); | |
9e907654 | 1225 | if (IS_ERR(client)) |
c30707be RSZ |
1226 | return PTR_ERR(client); |
1227 | file->private_data = client; | |
1228 | ||
1229 | return 0; | |
1230 | } | |
1231 | ||
1232 | static const struct file_operations ion_fops = { | |
1233 | .owner = THIS_MODULE, | |
1234 | .open = ion_open, | |
1235 | .release = ion_release, | |
1236 | .unlocked_ioctl = ion_ioctl, | |
827c849e | 1237 | .compat_ioctl = compat_ion_ioctl, |
c30707be RSZ |
1238 | }; |
1239 | ||
1240 | static size_t ion_debug_heap_total(struct ion_client *client, | |
2bb9f503 | 1241 | unsigned int id) |
c30707be RSZ |
1242 | { |
1243 | size_t size = 0; | |
1244 | struct rb_node *n; | |
1245 | ||
1246 | mutex_lock(&client->lock); | |
1247 | for (n = rb_first(&client->handles); n; n = rb_next(n)) { | |
1248 | struct ion_handle *handle = rb_entry(n, | |
1249 | struct ion_handle, | |
1250 | node); | |
2bb9f503 | 1251 | if (handle->buffer->heap->id == id) |
c30707be RSZ |
1252 | size += handle->buffer->size; |
1253 | } | |
1254 | mutex_unlock(&client->lock); | |
1255 | return size; | |
1256 | } | |
1257 | ||
1258 | static int ion_debug_heap_show(struct seq_file *s, void *unused) | |
1259 | { | |
1260 | struct ion_heap *heap = s->private; | |
1261 | struct ion_device *dev = heap->dev; | |
1262 | struct rb_node *n; | |
5ad7bc3a RSZ |
1263 | size_t total_size = 0; |
1264 | size_t total_orphaned_size = 0; | |
c30707be | 1265 | |
b5693964 | 1266 | seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size"); |
164ad86d | 1267 | seq_puts(s, "----------------------------------------------------\n"); |
c30707be | 1268 | |
948c4db4 | 1269 | mutex_lock(&debugfs_mutex); |
b892bf75 | 1270 | for (n = rb_first(&dev->clients); n; n = rb_next(n)) { |
c30707be RSZ |
1271 | struct ion_client *client = rb_entry(n, struct ion_client, |
1272 | node); | |
2bb9f503 | 1273 | size_t size = ion_debug_heap_total(client, heap->id); |
10f62861 | 1274 | |
c30707be RSZ |
1275 | if (!size) |
1276 | continue; | |
b892bf75 RSZ |
1277 | if (client->task) { |
1278 | char task_comm[TASK_COMM_LEN]; | |
1279 | ||
1280 | get_task_comm(task_comm, client->task); | |
b5693964 | 1281 | seq_printf(s, "%16s %16u %16zu\n", task_comm, |
b892bf75 RSZ |
1282 | client->pid, size); |
1283 | } else { | |
b5693964 | 1284 | seq_printf(s, "%16s %16u %16zu\n", client->name, |
b892bf75 RSZ |
1285 | client->pid, size); |
1286 | } | |
c30707be | 1287 | } |
948c4db4 NZ |
1288 | mutex_unlock(&debugfs_mutex); |
1289 | ||
164ad86d IM |
1290 | seq_puts(s, "----------------------------------------------------\n"); |
1291 | seq_puts(s, "orphaned allocations (info is from last known client):\n"); | |
8d7ab9a9 | 1292 | mutex_lock(&dev->buffer_lock); |
5ad7bc3a RSZ |
1293 | for (n = rb_first(&dev->buffers); n; n = rb_next(n)) { |
1294 | struct ion_buffer *buffer = rb_entry(n, struct ion_buffer, | |
1295 | node); | |
2bb9f503 | 1296 | if (buffer->heap->id != heap->id) |
45b17a80 RSZ |
1297 | continue; |
1298 | total_size += buffer->size; | |
5ad7bc3a | 1299 | if (!buffer->handle_count) { |
b5693964 | 1300 | seq_printf(s, "%16s %16u %16zu %d %d\n", |
e61fc915 CC |
1301 | buffer->task_comm, buffer->pid, |
1302 | buffer->size, buffer->kmap_cnt, | |
2c935bc5 | 1303 | kref_read(&buffer->ref)); |
5ad7bc3a RSZ |
1304 | total_orphaned_size += buffer->size; |
1305 | } | |
1306 | } | |
8d7ab9a9 | 1307 | mutex_unlock(&dev->buffer_lock); |
164ad86d | 1308 | seq_puts(s, "----------------------------------------------------\n"); |
b5693964 | 1309 | seq_printf(s, "%16s %16zu\n", "total orphaned", |
5ad7bc3a | 1310 | total_orphaned_size); |
b5693964 | 1311 | seq_printf(s, "%16s %16zu\n", "total ", total_size); |
2540c73a | 1312 | if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) |
b5693964 | 1313 | seq_printf(s, "%16s %16zu\n", "deferred free", |
121ca0c6 | 1314 | heap->free_list_size); |
164ad86d | 1315 | seq_puts(s, "----------------------------------------------------\n"); |
45b17a80 RSZ |
1316 | |
1317 | if (heap->debug_show) | |
1318 | heap->debug_show(heap, s, unused); | |
5ad7bc3a | 1319 | |
c30707be RSZ |
1320 | return 0; |
1321 | } | |
1322 | ||
1323 | static int ion_debug_heap_open(struct inode *inode, struct file *file) | |
1324 | { | |
1325 | return single_open(file, ion_debug_heap_show, inode->i_private); | |
1326 | } | |
1327 | ||
1328 | static const struct file_operations debug_heap_fops = { | |
1329 | .open = ion_debug_heap_open, | |
1330 | .read = seq_read, | |
1331 | .llseek = seq_lseek, | |
1332 | .release = single_release, | |
1333 | }; | |
1334 | ||
ea313b5f | 1335 | static int debug_shrink_set(void *data, u64 val) |
fe2faea7 | 1336 | { |
e1d855b0 JS |
1337 | struct ion_heap *heap = data; |
1338 | struct shrink_control sc; | |
1339 | int objs; | |
fe2faea7 | 1340 | |
3b0ae7be | 1341 | sc.gfp_mask = GFP_HIGHUSER; |
aeb7fa7b | 1342 | sc.nr_to_scan = val; |
fe2faea7 | 1343 | |
aeb7fa7b GK |
1344 | if (!val) { |
1345 | objs = heap->shrinker.count_objects(&heap->shrinker, &sc); | |
1346 | sc.nr_to_scan = objs; | |
1347 | } | |
fe2faea7 | 1348 | |
aeb7fa7b | 1349 | heap->shrinker.scan_objects(&heap->shrinker, &sc); |
e1d855b0 | 1350 | return 0; |
fe2faea7 RSZ |
1351 | } |
1352 | ||
ea313b5f | 1353 | static int debug_shrink_get(void *data, u64 *val) |
fe2faea7 | 1354 | { |
e1d855b0 JS |
1355 | struct ion_heap *heap = data; |
1356 | struct shrink_control sc; | |
1357 | int objs; | |
fe2faea7 | 1358 | |
3b0ae7be | 1359 | sc.gfp_mask = GFP_HIGHUSER; |
e1d855b0 | 1360 | sc.nr_to_scan = 0; |
fe2faea7 | 1361 | |
aeb7fa7b | 1362 | objs = heap->shrinker.count_objects(&heap->shrinker, &sc); |
e1d855b0 JS |
1363 | *val = objs; |
1364 | return 0; | |
fe2faea7 RSZ |
1365 | } |
1366 | ||
ea313b5f | 1367 | DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get, |
e1d855b0 | 1368 | debug_shrink_set, "%llu\n"); |
ea313b5f | 1369 | |
c30707be RSZ |
1370 | void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap) |
1371 | { | |
b08585fb MH |
1372 | struct dentry *debug_file; |
1373 | ||
f82ad60e | 1374 | if (!heap->ops->allocate || !heap->ops->free) |
29ae6bc7 RSZ |
1375 | pr_err("%s: can not add heap with invalid ops struct.\n", |
1376 | __func__); | |
1377 | ||
95e53ddd MH |
1378 | spin_lock_init(&heap->free_lock); |
1379 | heap->free_list_size = 0; | |
1380 | ||
ea313b5f RSZ |
1381 | if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) |
1382 | ion_heap_init_deferred_free(heap); | |
fe2faea7 | 1383 | |
b9daf0b6 CC |
1384 | if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink) |
1385 | ion_heap_init_shrinker(heap); | |
1386 | ||
c30707be | 1387 | heap->dev = dev; |
8d7ab9a9 | 1388 | down_write(&dev->lock); |
7e416174 SR |
1389 | /* |
1390 | * use negative heap->id to reverse the priority -- when traversing | |
1391 | * the list later attempt higher id numbers first | |
1392 | */ | |
cd69488c RSZ |
1393 | plist_node_init(&heap->node, -heap->id); |
1394 | plist_add(&heap->node, &dev->heaps); | |
b08585fb | 1395 | debug_file = debugfs_create_file(heap->name, 0664, |
121ca0c6 JA |
1396 | dev->heaps_debug_root, heap, |
1397 | &debug_heap_fops); | |
b08585fb MH |
1398 | |
1399 | if (!debug_file) { | |
1400 | char buf[256], *path; | |
10f62861 | 1401 | |
b08585fb MH |
1402 | path = dentry_path(dev->heaps_debug_root, buf, 256); |
1403 | pr_err("Failed to create heap debugfs at %s/%s\n", | |
121ca0c6 | 1404 | path, heap->name); |
b08585fb MH |
1405 | } |
1406 | ||
aeb7fa7b | 1407 | if (heap->shrinker.count_objects && heap->shrinker.scan_objects) { |
ea313b5f RSZ |
1408 | char debug_name[64]; |
1409 | ||
1410 | snprintf(debug_name, 64, "%s_shrink", heap->name); | |
b08585fb MH |
1411 | debug_file = debugfs_create_file( |
1412 | debug_name, 0644, dev->heaps_debug_root, heap, | |
1413 | &debug_shrink_fops); | |
1414 | if (!debug_file) { | |
1415 | char buf[256], *path; | |
10f62861 | 1416 | |
b08585fb MH |
1417 | path = dentry_path(dev->heaps_debug_root, buf, 256); |
1418 | pr_err("Failed to create heap shrinker debugfs at %s/%s\n", | |
121ca0c6 | 1419 | path, debug_name); |
b08585fb | 1420 | } |
ea313b5f | 1421 | } |
aeb7fa7b | 1422 | |
02b23803 | 1423 | dev->heap_cnt++; |
8d7ab9a9 | 1424 | up_write(&dev->lock); |
c30707be | 1425 | } |
8c6c463e | 1426 | EXPORT_SYMBOL(ion_device_add_heap); |
c30707be RSZ |
1427 | |
1428 | struct ion_device *ion_device_create(long (*custom_ioctl) | |
1429 | (struct ion_client *client, | |
1430 | unsigned int cmd, | |
1431 | unsigned long arg)) | |
1432 | { | |
1433 | struct ion_device *idev; | |
1434 | int ret; | |
1435 | ||
411059f7 | 1436 | idev = kzalloc(sizeof(*idev), GFP_KERNEL); |
c30707be RSZ |
1437 | if (!idev) |
1438 | return ERR_PTR(-ENOMEM); | |
1439 | ||
1440 | idev->dev.minor = MISC_DYNAMIC_MINOR; | |
1441 | idev->dev.name = "ion"; | |
1442 | idev->dev.fops = &ion_fops; | |
1443 | idev->dev.parent = NULL; | |
1444 | ret = misc_register(&idev->dev); | |
1445 | if (ret) { | |
1446 | pr_err("ion: failed to register misc device.\n"); | |
283d9304 | 1447 | kfree(idev); |
c30707be RSZ |
1448 | return ERR_PTR(ret); |
1449 | } | |
1450 | ||
1451 | idev->debug_root = debugfs_create_dir("ion", NULL); | |
b08585fb MH |
1452 | if (!idev->debug_root) { |
1453 | pr_err("ion: failed to create debugfs root directory.\n"); | |
1454 | goto debugfs_done; | |
1455 | } | |
1456 | idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root); | |
1457 | if (!idev->heaps_debug_root) { | |
1458 | pr_err("ion: failed to create debugfs heaps directory.\n"); | |
1459 | goto debugfs_done; | |
1460 | } | |
1461 | idev->clients_debug_root = debugfs_create_dir("clients", | |
1462 | idev->debug_root); | |
1463 | if (!idev->clients_debug_root) | |
1464 | pr_err("ion: failed to create debugfs clients directory.\n"); | |
1465 | ||
1466 | debugfs_done: | |
c30707be RSZ |
1467 | |
1468 | idev->custom_ioctl = custom_ioctl; | |
1469 | idev->buffers = RB_ROOT; | |
8d7ab9a9 RSZ |
1470 | mutex_init(&idev->buffer_lock); |
1471 | init_rwsem(&idev->lock); | |
cd69488c | 1472 | plist_head_init(&idev->heaps); |
b892bf75 | 1473 | idev->clients = RB_ROOT; |
948c4db4 NZ |
1474 | ion_root_client = &idev->clients; |
1475 | mutex_init(&debugfs_mutex); | |
c30707be RSZ |
1476 | return idev; |
1477 | } | |
8c6c463e | 1478 | EXPORT_SYMBOL(ion_device_create); |
c30707be RSZ |
1479 | |
1480 | void ion_device_destroy(struct ion_device *dev) | |
1481 | { | |
1482 | misc_deregister(&dev->dev); | |
b08585fb | 1483 | debugfs_remove_recursive(dev->debug_root); |
c30707be RSZ |
1484 | /* XXX need to free the heaps and clients ? */ |
1485 | kfree(dev); | |
1486 | } | |
8c6c463e | 1487 | EXPORT_SYMBOL(ion_device_destroy); |