]>
Commit | Line | Data |
---|---|---|
c30707be RSZ |
1 | /* |
2 | * drivers/staging/android/ion/ion.c | |
3 | * | |
4 | * Copyright (C) 2011 Google, Inc. | |
5 | * | |
6 | * This software is licensed under the terms of the GNU General Public | |
7 | * License version 2, as published by the Free Software Foundation, and | |
8 | * may be copied, distributed, and modified under those terms. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | * | |
15 | */ | |
16 | ||
17 | #include <linux/device.h> | |
18 | #include <linux/file.h> | |
19 | #include <linux/fs.h> | |
20 | #include <linux/anon_inodes.h> | |
21 | #include <linux/list.h> | |
2991b7a0 | 22 | #include <linux/memblock.h> |
c30707be RSZ |
23 | #include <linux/miscdevice.h> |
24 | #include <linux/export.h> | |
25 | #include <linux/mm.h> | |
26 | #include <linux/mm_types.h> | |
27 | #include <linux/rbtree.h> | |
28 | #include <linux/sched.h> | |
29 | #include <linux/slab.h> | |
30 | #include <linux/seq_file.h> | |
31 | #include <linux/uaccess.h> | |
32 | #include <linux/debugfs.h> | |
b892bf75 | 33 | #include <linux/dma-buf.h> |
c30707be RSZ |
34 | |
35 | #include "ion.h" | |
36 | #include "ion_priv.h" | |
c30707be RSZ |
37 | |
38 | /** | |
39 | * struct ion_device - the metadata of the ion device node | |
40 | * @dev: the actual misc device | |
41 | * @buffers: an rb tree of all the existing buffers | |
42 | * @lock: lock protecting the buffers & heaps trees | |
43 | * @heaps: list of all the heaps in the system | |
44 | * @user_clients: list of all the clients created from userspace | |
45 | */ | |
46 | struct ion_device { | |
47 | struct miscdevice dev; | |
48 | struct rb_root buffers; | |
49 | struct mutex lock; | |
50 | struct rb_root heaps; | |
51 | long (*custom_ioctl) (struct ion_client *client, unsigned int cmd, | |
52 | unsigned long arg); | |
b892bf75 | 53 | struct rb_root clients; |
c30707be RSZ |
54 | struct dentry *debug_root; |
55 | }; | |
56 | ||
57 | /** | |
58 | * struct ion_client - a process/hw block local address space | |
c30707be RSZ |
59 | * @node: node in the tree of all clients |
60 | * @dev: backpointer to ion device | |
61 | * @handles: an rb tree of all the handles in this client | |
62 | * @lock: lock protecting the tree of handles | |
63 | * @heap_mask: mask of all supported heaps | |
64 | * @name: used for debugging | |
65 | * @task: used for debugging | |
66 | * | |
67 | * A client represents a list of buffers this client may access. | |
68 | * The mutex stored here is used to protect both handles tree | |
69 | * as well as the handles themselves, and should be held while modifying either. | |
70 | */ | |
71 | struct ion_client { | |
c30707be RSZ |
72 | struct rb_node node; |
73 | struct ion_device *dev; | |
74 | struct rb_root handles; | |
75 | struct mutex lock; | |
76 | unsigned int heap_mask; | |
77 | const char *name; | |
78 | struct task_struct *task; | |
79 | pid_t pid; | |
80 | struct dentry *debug_root; | |
81 | }; | |
82 | ||
83 | /** | |
84 | * ion_handle - a client local reference to a buffer | |
85 | * @ref: reference count | |
86 | * @client: back pointer to the client the buffer resides in | |
87 | * @buffer: pointer to the buffer | |
88 | * @node: node in the client's handle rbtree | |
89 | * @kmap_cnt: count of times this client has mapped to kernel | |
90 | * @dmap_cnt: count of times this client has mapped for dma | |
c30707be RSZ |
91 | * |
92 | * Modifications to node, map_cnt or mapping should be protected by the | |
93 | * lock in the client. Other fields are never changed after initialization. | |
94 | */ | |
95 | struct ion_handle { | |
96 | struct kref ref; | |
97 | struct ion_client *client; | |
98 | struct ion_buffer *buffer; | |
99 | struct rb_node node; | |
100 | unsigned int kmap_cnt; | |
c30707be RSZ |
101 | }; |
102 | ||
103 | /* this function should only be called while dev->lock is held */ | |
104 | static void ion_buffer_add(struct ion_device *dev, | |
105 | struct ion_buffer *buffer) | |
106 | { | |
107 | struct rb_node **p = &dev->buffers.rb_node; | |
108 | struct rb_node *parent = NULL; | |
109 | struct ion_buffer *entry; | |
110 | ||
111 | while (*p) { | |
112 | parent = *p; | |
113 | entry = rb_entry(parent, struct ion_buffer, node); | |
114 | ||
115 | if (buffer < entry) { | |
116 | p = &(*p)->rb_left; | |
117 | } else if (buffer > entry) { | |
118 | p = &(*p)->rb_right; | |
119 | } else { | |
120 | pr_err("%s: buffer already found.", __func__); | |
121 | BUG(); | |
122 | } | |
123 | } | |
124 | ||
125 | rb_link_node(&buffer->node, parent, p); | |
126 | rb_insert_color(&buffer->node, &dev->buffers); | |
127 | } | |
128 | ||
56a7c185 RSZ |
129 | static int ion_buffer_alloc_dirty(struct ion_buffer *buffer); |
130 | ||
c30707be RSZ |
131 | /* this function should only be called while dev->lock is held */ |
132 | static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, | |
133 | struct ion_device *dev, | |
134 | unsigned long len, | |
135 | unsigned long align, | |
136 | unsigned long flags) | |
137 | { | |
138 | struct ion_buffer *buffer; | |
29ae6bc7 | 139 | struct sg_table *table; |
a46b6b2d RSZ |
140 | struct scatterlist *sg; |
141 | int i, ret; | |
c30707be RSZ |
142 | |
143 | buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL); | |
144 | if (!buffer) | |
145 | return ERR_PTR(-ENOMEM); | |
146 | ||
147 | buffer->heap = heap; | |
148 | kref_init(&buffer->ref); | |
149 | ||
150 | ret = heap->ops->allocate(heap, buffer, len, align, flags); | |
151 | if (ret) { | |
152 | kfree(buffer); | |
153 | return ERR_PTR(ret); | |
154 | } | |
29ae6bc7 | 155 | |
056be396 GH |
156 | buffer->dev = dev; |
157 | buffer->size = len; | |
56a7c185 | 158 | buffer->flags = flags; |
056be396 | 159 | |
56a7c185 | 160 | table = heap->ops->map_dma(heap, buffer); |
29ae6bc7 RSZ |
161 | if (IS_ERR_OR_NULL(table)) { |
162 | heap->ops->free(buffer); | |
163 | kfree(buffer); | |
164 | return ERR_PTR(PTR_ERR(table)); | |
165 | } | |
166 | buffer->sg_table = table; | |
d3c0bced | 167 | if (buffer->flags & ION_FLAG_CACHED) { |
56a7c185 RSZ |
168 | for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, |
169 | i) { | |
170 | if (sg_dma_len(sg) == PAGE_SIZE) | |
171 | continue; | |
172 | pr_err("%s: cached mappings must have pagewise " | |
173 | "sg_lists\n", __func__); | |
d3c0bced RSZ |
174 | ret = -EINVAL; |
175 | goto err; | |
56a7c185 | 176 | } |
29ae6bc7 | 177 | |
d3c0bced RSZ |
178 | ret = ion_buffer_alloc_dirty(buffer); |
179 | if (ret) | |
180 | goto err; | |
56a7c185 RSZ |
181 | } |
182 | ||
183 | buffer->dev = dev; | |
184 | buffer->size = len; | |
185 | INIT_LIST_HEAD(&buffer->vmas); | |
c30707be | 186 | mutex_init(&buffer->lock); |
a46b6b2d RSZ |
187 | /* this will set up dma addresses for the sglist -- it is not |
188 | technically correct as per the dma api -- a specific | |
189 | device isn't really taking ownership here. However, in practice on | |
190 | our systems the only dma_address space is physical addresses. | |
191 | Additionally, we can't afford the overhead of invalidating every | |
192 | allocation via dma_map_sg. The implicit contract here is that | |
193 | memory comming from the heaps is ready for dma, ie if it has a | |
194 | cached mapping that mapping has been invalidated */ | |
195 | for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) | |
196 | sg_dma_address(sg) = sg_phys(sg); | |
c30707be RSZ |
197 | ion_buffer_add(dev, buffer); |
198 | return buffer; | |
d3c0bced RSZ |
199 | |
200 | err: | |
201 | heap->ops->unmap_dma(heap, buffer); | |
202 | heap->ops->free(buffer); | |
203 | kfree(buffer); | |
204 | return ERR_PTR(ret); | |
c30707be RSZ |
205 | } |
206 | ||
207 | static void ion_buffer_destroy(struct kref *kref) | |
208 | { | |
209 | struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref); | |
210 | struct ion_device *dev = buffer->dev; | |
211 | ||
54ac0784 KC |
212 | if (WARN_ON(buffer->kmap_cnt > 0)) |
213 | buffer->heap->ops->unmap_kernel(buffer->heap, buffer); | |
29ae6bc7 | 214 | buffer->heap->ops->unmap_dma(buffer->heap, buffer); |
c30707be RSZ |
215 | buffer->heap->ops->free(buffer); |
216 | mutex_lock(&dev->lock); | |
217 | rb_erase(&buffer->node, &dev->buffers); | |
218 | mutex_unlock(&dev->lock); | |
d3c0bced RSZ |
219 | if (buffer->flags & ION_FLAG_CACHED) |
220 | kfree(buffer->dirty); | |
c30707be RSZ |
221 | kfree(buffer); |
222 | } | |
223 | ||
224 | static void ion_buffer_get(struct ion_buffer *buffer) | |
225 | { | |
226 | kref_get(&buffer->ref); | |
227 | } | |
228 | ||
229 | static int ion_buffer_put(struct ion_buffer *buffer) | |
230 | { | |
231 | return kref_put(&buffer->ref, ion_buffer_destroy); | |
232 | } | |
233 | ||
5ad7bc3a RSZ |
234 | static void ion_buffer_add_to_handle(struct ion_buffer *buffer) |
235 | { | |
236 | mutex_lock(&buffer->dev->lock); | |
237 | buffer->handle_count++; | |
238 | mutex_unlock(&buffer->dev->lock); | |
239 | } | |
240 | ||
241 | static void ion_buffer_remove_from_handle(struct ion_buffer *buffer) | |
242 | { | |
243 | /* | |
244 | * when a buffer is removed from a handle, if it is not in | |
245 | * any other handles, copy the taskcomm and the pid of the | |
246 | * process it's being removed from into the buffer. At this | |
247 | * point there will be no way to track what processes this buffer is | |
248 | * being used by, it only exists as a dma_buf file descriptor. | |
249 | * The taskcomm and pid can provide a debug hint as to where this fd | |
250 | * is in the system | |
251 | */ | |
252 | mutex_lock(&buffer->dev->lock); | |
253 | buffer->handle_count--; | |
254 | BUG_ON(buffer->handle_count < 0); | |
255 | if (!buffer->handle_count) { | |
256 | struct task_struct *task; | |
257 | ||
258 | task = current->group_leader; | |
259 | get_task_comm(buffer->task_comm, task); | |
260 | buffer->pid = task_pid_nr(task); | |
261 | } | |
262 | mutex_unlock(&buffer->dev->lock); | |
263 | } | |
264 | ||
c30707be RSZ |
265 | static struct ion_handle *ion_handle_create(struct ion_client *client, |
266 | struct ion_buffer *buffer) | |
267 | { | |
268 | struct ion_handle *handle; | |
269 | ||
270 | handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL); | |
271 | if (!handle) | |
272 | return ERR_PTR(-ENOMEM); | |
273 | kref_init(&handle->ref); | |
274 | RB_CLEAR_NODE(&handle->node); | |
275 | handle->client = client; | |
276 | ion_buffer_get(buffer); | |
5ad7bc3a | 277 | ion_buffer_add_to_handle(buffer); |
c30707be RSZ |
278 | handle->buffer = buffer; |
279 | ||
280 | return handle; | |
281 | } | |
282 | ||
b892bf75 RSZ |
283 | static void ion_handle_kmap_put(struct ion_handle *); |
284 | ||
c30707be RSZ |
285 | static void ion_handle_destroy(struct kref *kref) |
286 | { | |
287 | struct ion_handle *handle = container_of(kref, struct ion_handle, ref); | |
b892bf75 RSZ |
288 | struct ion_client *client = handle->client; |
289 | struct ion_buffer *buffer = handle->buffer; | |
290 | ||
b892bf75 | 291 | mutex_lock(&buffer->lock); |
2900cd76 | 292 | while (handle->kmap_cnt) |
b892bf75 RSZ |
293 | ion_handle_kmap_put(handle); |
294 | mutex_unlock(&buffer->lock); | |
295 | ||
c30707be | 296 | if (!RB_EMPTY_NODE(&handle->node)) |
b892bf75 | 297 | rb_erase(&handle->node, &client->handles); |
b892bf75 | 298 | |
5ad7bc3a | 299 | ion_buffer_remove_from_handle(buffer); |
b892bf75 | 300 | ion_buffer_put(buffer); |
5ad7bc3a | 301 | |
c30707be RSZ |
302 | kfree(handle); |
303 | } | |
304 | ||
305 | struct ion_buffer *ion_handle_buffer(struct ion_handle *handle) | |
306 | { | |
307 | return handle->buffer; | |
308 | } | |
309 | ||
310 | static void ion_handle_get(struct ion_handle *handle) | |
311 | { | |
312 | kref_get(&handle->ref); | |
313 | } | |
314 | ||
315 | static int ion_handle_put(struct ion_handle *handle) | |
316 | { | |
317 | return kref_put(&handle->ref, ion_handle_destroy); | |
318 | } | |
319 | ||
320 | static struct ion_handle *ion_handle_lookup(struct ion_client *client, | |
321 | struct ion_buffer *buffer) | |
322 | { | |
323 | struct rb_node *n; | |
324 | ||
325 | for (n = rb_first(&client->handles); n; n = rb_next(n)) { | |
326 | struct ion_handle *handle = rb_entry(n, struct ion_handle, | |
327 | node); | |
328 | if (handle->buffer == buffer) | |
329 | return handle; | |
330 | } | |
331 | return NULL; | |
332 | } | |
333 | ||
334 | static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle) | |
335 | { | |
336 | struct rb_node *n = client->handles.rb_node; | |
337 | ||
338 | while (n) { | |
339 | struct ion_handle *handle_node = rb_entry(n, struct ion_handle, | |
340 | node); | |
341 | if (handle < handle_node) | |
342 | n = n->rb_left; | |
343 | else if (handle > handle_node) | |
344 | n = n->rb_right; | |
345 | else | |
346 | return true; | |
347 | } | |
348 | return false; | |
349 | } | |
350 | ||
351 | static void ion_handle_add(struct ion_client *client, struct ion_handle *handle) | |
352 | { | |
353 | struct rb_node **p = &client->handles.rb_node; | |
354 | struct rb_node *parent = NULL; | |
355 | struct ion_handle *entry; | |
356 | ||
357 | while (*p) { | |
358 | parent = *p; | |
359 | entry = rb_entry(parent, struct ion_handle, node); | |
360 | ||
361 | if (handle < entry) | |
362 | p = &(*p)->rb_left; | |
363 | else if (handle > entry) | |
364 | p = &(*p)->rb_right; | |
365 | else | |
366 | WARN(1, "%s: buffer already found.", __func__); | |
367 | } | |
368 | ||
369 | rb_link_node(&handle->node, parent, p); | |
370 | rb_insert_color(&handle->node, &client->handles); | |
371 | } | |
372 | ||
373 | struct ion_handle *ion_alloc(struct ion_client *client, size_t len, | |
56a7c185 RSZ |
374 | size_t align, unsigned int heap_mask, |
375 | unsigned int flags) | |
c30707be RSZ |
376 | { |
377 | struct rb_node *n; | |
378 | struct ion_handle *handle; | |
379 | struct ion_device *dev = client->dev; | |
380 | struct ion_buffer *buffer = NULL; | |
381 | ||
56a7c185 RSZ |
382 | pr_debug("%s: len %d align %d heap_mask %u flags %x\n", __func__, len, |
383 | align, heap_mask, flags); | |
c30707be RSZ |
384 | /* |
385 | * traverse the list of heaps available in this system in priority | |
386 | * order. If the heap type is supported by the client, and matches the | |
387 | * request of the caller allocate from it. Repeat until allocate has | |
388 | * succeeded or all heaps have been tried | |
389 | */ | |
54ac0784 KC |
390 | if (WARN_ON(!len)) |
391 | return ERR_PTR(-EINVAL); | |
392 | ||
393 | len = PAGE_ALIGN(len); | |
394 | ||
c30707be RSZ |
395 | mutex_lock(&dev->lock); |
396 | for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) { | |
397 | struct ion_heap *heap = rb_entry(n, struct ion_heap, node); | |
398 | /* if the client doesn't support this heap type */ | |
399 | if (!((1 << heap->type) & client->heap_mask)) | |
400 | continue; | |
401 | /* if the caller didn't specify this heap type */ | |
56a7c185 | 402 | if (!((1 << heap->id) & heap_mask)) |
c30707be RSZ |
403 | continue; |
404 | buffer = ion_buffer_create(heap, dev, len, align, flags); | |
405 | if (!IS_ERR_OR_NULL(buffer)) | |
406 | break; | |
407 | } | |
408 | mutex_unlock(&dev->lock); | |
409 | ||
54ac0784 KC |
410 | if (buffer == NULL) |
411 | return ERR_PTR(-ENODEV); | |
412 | ||
413 | if (IS_ERR(buffer)) | |
c30707be RSZ |
414 | return ERR_PTR(PTR_ERR(buffer)); |
415 | ||
416 | handle = ion_handle_create(client, buffer); | |
417 | ||
c30707be RSZ |
418 | /* |
419 | * ion_buffer_create will create a buffer with a ref_cnt of 1, | |
420 | * and ion_handle_create will take a second reference, drop one here | |
421 | */ | |
422 | ion_buffer_put(buffer); | |
423 | ||
54ac0784 KC |
424 | if (!IS_ERR(handle)) { |
425 | mutex_lock(&client->lock); | |
426 | ion_handle_add(client, handle); | |
427 | mutex_unlock(&client->lock); | |
428 | } | |
c30707be | 429 | |
29ae6bc7 | 430 | |
c30707be RSZ |
431 | return handle; |
432 | } | |
ee4c8aa9 | 433 | EXPORT_SYMBOL(ion_alloc); |
c30707be RSZ |
434 | |
435 | void ion_free(struct ion_client *client, struct ion_handle *handle) | |
436 | { | |
437 | bool valid_handle; | |
438 | ||
439 | BUG_ON(client != handle->client); | |
440 | ||
441 | mutex_lock(&client->lock); | |
442 | valid_handle = ion_handle_validate(client, handle); | |
c30707be RSZ |
443 | |
444 | if (!valid_handle) { | |
a9bb075d | 445 | WARN(1, "%s: invalid handle passed to free.\n", __func__); |
37bdbf00 | 446 | mutex_unlock(&client->lock); |
c30707be RSZ |
447 | return; |
448 | } | |
449 | ion_handle_put(handle); | |
0e9c03a5 | 450 | mutex_unlock(&client->lock); |
c30707be | 451 | } |
ee4c8aa9 | 452 | EXPORT_SYMBOL(ion_free); |
c30707be | 453 | |
c30707be RSZ |
454 | int ion_phys(struct ion_client *client, struct ion_handle *handle, |
455 | ion_phys_addr_t *addr, size_t *len) | |
456 | { | |
457 | struct ion_buffer *buffer; | |
458 | int ret; | |
459 | ||
460 | mutex_lock(&client->lock); | |
461 | if (!ion_handle_validate(client, handle)) { | |
462 | mutex_unlock(&client->lock); | |
463 | return -EINVAL; | |
464 | } | |
465 | ||
466 | buffer = handle->buffer; | |
467 | ||
468 | if (!buffer->heap->ops->phys) { | |
469 | pr_err("%s: ion_phys is not implemented by this heap.\n", | |
470 | __func__); | |
471 | mutex_unlock(&client->lock); | |
472 | return -ENODEV; | |
473 | } | |
474 | mutex_unlock(&client->lock); | |
475 | ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len); | |
476 | return ret; | |
477 | } | |
ee4c8aa9 | 478 | EXPORT_SYMBOL(ion_phys); |
c30707be | 479 | |
0f34faf8 RSZ |
480 | static void *ion_buffer_kmap_get(struct ion_buffer *buffer) |
481 | { | |
482 | void *vaddr; | |
483 | ||
484 | if (buffer->kmap_cnt) { | |
485 | buffer->kmap_cnt++; | |
486 | return buffer->vaddr; | |
487 | } | |
488 | vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer); | |
489 | if (IS_ERR_OR_NULL(vaddr)) | |
490 | return vaddr; | |
491 | buffer->vaddr = vaddr; | |
492 | buffer->kmap_cnt++; | |
493 | return vaddr; | |
494 | } | |
495 | ||
b892bf75 | 496 | static void *ion_handle_kmap_get(struct ion_handle *handle) |
c30707be | 497 | { |
b892bf75 | 498 | struct ion_buffer *buffer = handle->buffer; |
c30707be RSZ |
499 | void *vaddr; |
500 | ||
b892bf75 RSZ |
501 | if (handle->kmap_cnt) { |
502 | handle->kmap_cnt++; | |
503 | return buffer->vaddr; | |
c30707be | 504 | } |
0f34faf8 RSZ |
505 | vaddr = ion_buffer_kmap_get(buffer); |
506 | if (IS_ERR_OR_NULL(vaddr)) | |
b892bf75 | 507 | return vaddr; |
b892bf75 | 508 | handle->kmap_cnt++; |
b892bf75 RSZ |
509 | return vaddr; |
510 | } | |
c30707be | 511 | |
0f34faf8 RSZ |
512 | static void ion_buffer_kmap_put(struct ion_buffer *buffer) |
513 | { | |
514 | buffer->kmap_cnt--; | |
515 | if (!buffer->kmap_cnt) { | |
516 | buffer->heap->ops->unmap_kernel(buffer->heap, buffer); | |
517 | buffer->vaddr = NULL; | |
518 | } | |
519 | } | |
520 | ||
b892bf75 RSZ |
521 | static void ion_handle_kmap_put(struct ion_handle *handle) |
522 | { | |
523 | struct ion_buffer *buffer = handle->buffer; | |
524 | ||
525 | handle->kmap_cnt--; | |
526 | if (!handle->kmap_cnt) | |
0f34faf8 | 527 | ion_buffer_kmap_put(buffer); |
c30707be RSZ |
528 | } |
529 | ||
b892bf75 | 530 | void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle) |
c30707be RSZ |
531 | { |
532 | struct ion_buffer *buffer; | |
b892bf75 | 533 | void *vaddr; |
c30707be RSZ |
534 | |
535 | mutex_lock(&client->lock); | |
536 | if (!ion_handle_validate(client, handle)) { | |
b892bf75 | 537 | pr_err("%s: invalid handle passed to map_kernel.\n", |
c30707be RSZ |
538 | __func__); |
539 | mutex_unlock(&client->lock); | |
540 | return ERR_PTR(-EINVAL); | |
541 | } | |
b892bf75 | 542 | |
c30707be | 543 | buffer = handle->buffer; |
c30707be | 544 | |
b892bf75 | 545 | if (!handle->buffer->heap->ops->map_kernel) { |
c30707be RSZ |
546 | pr_err("%s: map_kernel is not implemented by this heap.\n", |
547 | __func__); | |
c30707be RSZ |
548 | mutex_unlock(&client->lock); |
549 | return ERR_PTR(-ENODEV); | |
550 | } | |
c30707be | 551 | |
c30707be | 552 | mutex_lock(&buffer->lock); |
b892bf75 | 553 | vaddr = ion_handle_kmap_get(handle); |
c30707be RSZ |
554 | mutex_unlock(&buffer->lock); |
555 | mutex_unlock(&client->lock); | |
b892bf75 | 556 | return vaddr; |
c30707be | 557 | } |
ee4c8aa9 | 558 | EXPORT_SYMBOL(ion_map_kernel); |
c30707be | 559 | |
b892bf75 | 560 | void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle) |
c30707be RSZ |
561 | { |
562 | struct ion_buffer *buffer; | |
563 | ||
564 | mutex_lock(&client->lock); | |
565 | buffer = handle->buffer; | |
566 | mutex_lock(&buffer->lock); | |
b892bf75 | 567 | ion_handle_kmap_put(handle); |
c30707be RSZ |
568 | mutex_unlock(&buffer->lock); |
569 | mutex_unlock(&client->lock); | |
570 | } | |
ee4c8aa9 | 571 | EXPORT_SYMBOL(ion_unmap_kernel); |
c30707be | 572 | |
c30707be RSZ |
573 | static int ion_debug_client_show(struct seq_file *s, void *unused) |
574 | { | |
575 | struct ion_client *client = s->private; | |
576 | struct rb_node *n; | |
577 | size_t sizes[ION_NUM_HEAPS] = {0}; | |
578 | const char *names[ION_NUM_HEAPS] = {0}; | |
579 | int i; | |
580 | ||
581 | mutex_lock(&client->lock); | |
582 | for (n = rb_first(&client->handles); n; n = rb_next(n)) { | |
583 | struct ion_handle *handle = rb_entry(n, struct ion_handle, | |
584 | node); | |
585 | enum ion_heap_type type = handle->buffer->heap->type; | |
586 | ||
587 | if (!names[type]) | |
588 | names[type] = handle->buffer->heap->name; | |
589 | sizes[type] += handle->buffer->size; | |
590 | } | |
591 | mutex_unlock(&client->lock); | |
592 | ||
593 | seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes"); | |
594 | for (i = 0; i < ION_NUM_HEAPS; i++) { | |
595 | if (!names[i]) | |
596 | continue; | |
b892bf75 | 597 | seq_printf(s, "%16.16s: %16u\n", names[i], sizes[i]); |
c30707be RSZ |
598 | } |
599 | return 0; | |
600 | } | |
601 | ||
602 | static int ion_debug_client_open(struct inode *inode, struct file *file) | |
603 | { | |
604 | return single_open(file, ion_debug_client_show, inode->i_private); | |
605 | } | |
606 | ||
607 | static const struct file_operations debug_client_fops = { | |
608 | .open = ion_debug_client_open, | |
609 | .read = seq_read, | |
610 | .llseek = seq_lseek, | |
611 | .release = single_release, | |
612 | }; | |
613 | ||
c30707be RSZ |
614 | struct ion_client *ion_client_create(struct ion_device *dev, |
615 | unsigned int heap_mask, | |
616 | const char *name) | |
617 | { | |
618 | struct ion_client *client; | |
619 | struct task_struct *task; | |
620 | struct rb_node **p; | |
621 | struct rb_node *parent = NULL; | |
622 | struct ion_client *entry; | |
623 | char debug_name[64]; | |
624 | pid_t pid; | |
625 | ||
626 | get_task_struct(current->group_leader); | |
627 | task_lock(current->group_leader); | |
628 | pid = task_pid_nr(current->group_leader); | |
629 | /* don't bother to store task struct for kernel threads, | |
630 | they can't be killed anyway */ | |
631 | if (current->group_leader->flags & PF_KTHREAD) { | |
632 | put_task_struct(current->group_leader); | |
633 | task = NULL; | |
634 | } else { | |
635 | task = current->group_leader; | |
636 | } | |
637 | task_unlock(current->group_leader); | |
638 | ||
c30707be RSZ |
639 | client = kzalloc(sizeof(struct ion_client), GFP_KERNEL); |
640 | if (!client) { | |
54ac0784 KC |
641 | if (task) |
642 | put_task_struct(current->group_leader); | |
c30707be RSZ |
643 | return ERR_PTR(-ENOMEM); |
644 | } | |
645 | ||
646 | client->dev = dev; | |
647 | client->handles = RB_ROOT; | |
648 | mutex_init(&client->lock); | |
649 | client->name = name; | |
650 | client->heap_mask = heap_mask; | |
651 | client->task = task; | |
652 | client->pid = pid; | |
c30707be RSZ |
653 | |
654 | mutex_lock(&dev->lock); | |
b892bf75 RSZ |
655 | p = &dev->clients.rb_node; |
656 | while (*p) { | |
657 | parent = *p; | |
658 | entry = rb_entry(parent, struct ion_client, node); | |
659 | ||
660 | if (client < entry) | |
661 | p = &(*p)->rb_left; | |
662 | else if (client > entry) | |
663 | p = &(*p)->rb_right; | |
c30707be | 664 | } |
b892bf75 RSZ |
665 | rb_link_node(&client->node, parent, p); |
666 | rb_insert_color(&client->node, &dev->clients); | |
c30707be RSZ |
667 | |
668 | snprintf(debug_name, 64, "%u", client->pid); | |
669 | client->debug_root = debugfs_create_file(debug_name, 0664, | |
670 | dev->debug_root, client, | |
671 | &debug_client_fops); | |
672 | mutex_unlock(&dev->lock); | |
673 | ||
674 | return client; | |
675 | } | |
676 | ||
b892bf75 | 677 | void ion_client_destroy(struct ion_client *client) |
c30707be | 678 | { |
c30707be RSZ |
679 | struct ion_device *dev = client->dev; |
680 | struct rb_node *n; | |
681 | ||
682 | pr_debug("%s: %d\n", __func__, __LINE__); | |
683 | while ((n = rb_first(&client->handles))) { | |
684 | struct ion_handle *handle = rb_entry(n, struct ion_handle, | |
685 | node); | |
686 | ion_handle_destroy(&handle->ref); | |
687 | } | |
688 | mutex_lock(&dev->lock); | |
b892bf75 | 689 | if (client->task) |
c30707be | 690 | put_task_struct(client->task); |
b892bf75 | 691 | rb_erase(&client->node, &dev->clients); |
c30707be RSZ |
692 | debugfs_remove_recursive(client->debug_root); |
693 | mutex_unlock(&dev->lock); | |
694 | ||
695 | kfree(client); | |
696 | } | |
ee4c8aa9 | 697 | EXPORT_SYMBOL(ion_client_destroy); |
c30707be | 698 | |
ce1f147a RSZ |
699 | struct sg_table *ion_sg_table(struct ion_client *client, |
700 | struct ion_handle *handle) | |
c30707be | 701 | { |
29ae6bc7 | 702 | struct ion_buffer *buffer; |
b892bf75 | 703 | struct sg_table *table; |
c30707be | 704 | |
29ae6bc7 RSZ |
705 | mutex_lock(&client->lock); |
706 | if (!ion_handle_validate(client, handle)) { | |
707 | pr_err("%s: invalid handle passed to map_dma.\n", | |
b892bf75 | 708 | __func__); |
29ae6bc7 RSZ |
709 | mutex_unlock(&client->lock); |
710 | return ERR_PTR(-EINVAL); | |
54ac0784 | 711 | } |
29ae6bc7 RSZ |
712 | buffer = handle->buffer; |
713 | table = buffer->sg_table; | |
714 | mutex_unlock(&client->lock); | |
b892bf75 | 715 | return table; |
c30707be | 716 | } |
ee4c8aa9 | 717 | EXPORT_SYMBOL(ion_sg_table); |
c30707be | 718 | |
56a7c185 RSZ |
719 | static void ion_buffer_sync_for_device(struct ion_buffer *buffer, |
720 | struct device *dev, | |
721 | enum dma_data_direction direction); | |
722 | ||
29ae6bc7 RSZ |
723 | static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment, |
724 | enum dma_data_direction direction) | |
c30707be | 725 | { |
b892bf75 RSZ |
726 | struct dma_buf *dmabuf = attachment->dmabuf; |
727 | struct ion_buffer *buffer = dmabuf->priv; | |
c30707be | 728 | |
0b9ec1cf | 729 | ion_buffer_sync_for_device(buffer, attachment->dev, direction); |
29ae6bc7 RSZ |
730 | return buffer->sg_table; |
731 | } | |
732 | ||
733 | static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment, | |
734 | struct sg_table *table, | |
735 | enum dma_data_direction direction) | |
736 | { | |
c30707be RSZ |
737 | } |
738 | ||
56a7c185 RSZ |
739 | static int ion_buffer_alloc_dirty(struct ion_buffer *buffer) |
740 | { | |
741 | unsigned long pages = buffer->sg_table->nents; | |
742 | unsigned long length = (pages + BITS_PER_LONG - 1)/BITS_PER_LONG; | |
743 | ||
744 | buffer->dirty = kzalloc(length * sizeof(unsigned long), GFP_KERNEL); | |
745 | if (!buffer->dirty) | |
746 | return -ENOMEM; | |
747 | return 0; | |
748 | } | |
749 | ||
750 | struct ion_vma_list { | |
751 | struct list_head list; | |
752 | struct vm_area_struct *vma; | |
753 | }; | |
754 | ||
755 | static void ion_buffer_sync_for_device(struct ion_buffer *buffer, | |
756 | struct device *dev, | |
757 | enum dma_data_direction dir) | |
758 | { | |
759 | struct scatterlist *sg; | |
760 | int i; | |
761 | struct ion_vma_list *vma_list; | |
762 | ||
763 | pr_debug("%s: syncing for device %s\n", __func__, | |
764 | dev ? dev_name(dev) : "null"); | |
0b9ec1cf RSZ |
765 | |
766 | if (!(buffer->flags & ION_FLAG_CACHED)) | |
767 | return; | |
768 | ||
56a7c185 RSZ |
769 | mutex_lock(&buffer->lock); |
770 | for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) { | |
771 | if (!test_bit(i, buffer->dirty)) | |
772 | continue; | |
773 | dma_sync_sg_for_device(dev, sg, 1, dir); | |
774 | clear_bit(i, buffer->dirty); | |
775 | } | |
776 | list_for_each_entry(vma_list, &buffer->vmas, list) { | |
777 | struct vm_area_struct *vma = vma_list->vma; | |
778 | ||
779 | zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start, | |
780 | NULL); | |
781 | } | |
782 | mutex_unlock(&buffer->lock); | |
783 | } | |
784 | ||
785 | int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |
786 | { | |
787 | struct ion_buffer *buffer = vma->vm_private_data; | |
788 | struct scatterlist *sg; | |
789 | int i; | |
790 | ||
791 | mutex_lock(&buffer->lock); | |
792 | set_bit(vmf->pgoff, buffer->dirty); | |
793 | ||
794 | for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) { | |
795 | if (i != vmf->pgoff) | |
796 | continue; | |
797 | dma_sync_sg_for_cpu(NULL, sg, 1, DMA_BIDIRECTIONAL); | |
798 | vm_insert_page(vma, (unsigned long)vmf->virtual_address, | |
799 | sg_page(sg)); | |
800 | break; | |
801 | } | |
802 | mutex_unlock(&buffer->lock); | |
803 | return VM_FAULT_NOPAGE; | |
804 | } | |
805 | ||
806 | static void ion_vm_open(struct vm_area_struct *vma) | |
807 | { | |
808 | struct ion_buffer *buffer = vma->vm_private_data; | |
809 | struct ion_vma_list *vma_list; | |
810 | ||
811 | vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL); | |
812 | if (!vma_list) | |
813 | return; | |
814 | vma_list->vma = vma; | |
815 | mutex_lock(&buffer->lock); | |
816 | list_add(&vma_list->list, &buffer->vmas); | |
817 | mutex_unlock(&buffer->lock); | |
818 | pr_debug("%s: adding %p\n", __func__, vma); | |
819 | } | |
820 | ||
821 | static void ion_vm_close(struct vm_area_struct *vma) | |
822 | { | |
823 | struct ion_buffer *buffer = vma->vm_private_data; | |
824 | struct ion_vma_list *vma_list, *tmp; | |
825 | ||
826 | pr_debug("%s\n", __func__); | |
827 | mutex_lock(&buffer->lock); | |
828 | list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) { | |
829 | if (vma_list->vma != vma) | |
830 | continue; | |
831 | list_del(&vma_list->list); | |
832 | kfree(vma_list); | |
833 | pr_debug("%s: deleting %p\n", __func__, vma); | |
834 | break; | |
835 | } | |
836 | mutex_unlock(&buffer->lock); | |
837 | } | |
838 | ||
839 | struct vm_operations_struct ion_vma_ops = { | |
840 | .open = ion_vm_open, | |
841 | .close = ion_vm_close, | |
842 | .fault = ion_vm_fault, | |
843 | }; | |
844 | ||
b892bf75 | 845 | static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) |
c30707be | 846 | { |
b892bf75 | 847 | struct ion_buffer *buffer = dmabuf->priv; |
56a7c185 | 848 | int ret = 0; |
c30707be | 849 | |
b892bf75 | 850 | if (!buffer->heap->ops->map_user) { |
c30707be RSZ |
851 | pr_err("%s: this heap does not define a method for mapping " |
852 | "to userspace\n", __func__); | |
b892bf75 | 853 | return -EINVAL; |
c30707be RSZ |
854 | } |
855 | ||
56a7c185 RSZ |
856 | if (buffer->flags & ION_FLAG_CACHED) { |
857 | vma->vm_private_data = buffer; | |
858 | vma->vm_ops = &ion_vma_ops; | |
859 | ion_vm_open(vma); | |
860 | } else { | |
861 | vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); | |
862 | mutex_lock(&buffer->lock); | |
863 | /* now map it to userspace */ | |
864 | ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma); | |
865 | mutex_unlock(&buffer->lock); | |
866 | } | |
b892bf75 RSZ |
867 | |
868 | if (ret) | |
c30707be RSZ |
869 | pr_err("%s: failure mapping buffer to userspace\n", |
870 | __func__); | |
c30707be | 871 | |
c30707be RSZ |
872 | return ret; |
873 | } | |
874 | ||
b892bf75 RSZ |
875 | static void ion_dma_buf_release(struct dma_buf *dmabuf) |
876 | { | |
877 | struct ion_buffer *buffer = dmabuf->priv; | |
878 | ion_buffer_put(buffer); | |
879 | } | |
c30707be | 880 | |
b892bf75 | 881 | static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset) |
c30707be | 882 | { |
0f34faf8 | 883 | struct ion_buffer *buffer = dmabuf->priv; |
12edf53d | 884 | return buffer->vaddr + offset * PAGE_SIZE; |
b892bf75 | 885 | } |
c30707be | 886 | |
b892bf75 RSZ |
887 | static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset, |
888 | void *ptr) | |
889 | { | |
890 | return; | |
891 | } | |
892 | ||
0f34faf8 RSZ |
893 | static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, |
894 | size_t len, | |
895 | enum dma_data_direction direction) | |
b892bf75 | 896 | { |
0f34faf8 RSZ |
897 | struct ion_buffer *buffer = dmabuf->priv; |
898 | void *vaddr; | |
899 | ||
900 | if (!buffer->heap->ops->map_kernel) { | |
901 | pr_err("%s: map kernel is not implemented by this heap.\n", | |
902 | __func__); | |
903 | return -ENODEV; | |
904 | } | |
905 | ||
906 | mutex_lock(&buffer->lock); | |
907 | vaddr = ion_buffer_kmap_get(buffer); | |
908 | mutex_unlock(&buffer->lock); | |
909 | if (IS_ERR(vaddr)) | |
910 | return PTR_ERR(vaddr); | |
911 | if (!vaddr) | |
912 | return -ENOMEM; | |
913 | return 0; | |
b892bf75 RSZ |
914 | } |
915 | ||
0f34faf8 RSZ |
916 | static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start, |
917 | size_t len, | |
918 | enum dma_data_direction direction) | |
b892bf75 | 919 | { |
0f34faf8 | 920 | struct ion_buffer *buffer = dmabuf->priv; |
c30707be | 921 | |
0f34faf8 RSZ |
922 | mutex_lock(&buffer->lock); |
923 | ion_buffer_kmap_put(buffer); | |
924 | mutex_unlock(&buffer->lock); | |
925 | } | |
c30707be | 926 | |
b892bf75 RSZ |
927 | struct dma_buf_ops dma_buf_ops = { |
928 | .map_dma_buf = ion_map_dma_buf, | |
929 | .unmap_dma_buf = ion_unmap_dma_buf, | |
930 | .mmap = ion_mmap, | |
931 | .release = ion_dma_buf_release, | |
0f34faf8 RSZ |
932 | .begin_cpu_access = ion_dma_buf_begin_cpu_access, |
933 | .end_cpu_access = ion_dma_buf_end_cpu_access, | |
934 | .kmap_atomic = ion_dma_buf_kmap, | |
935 | .kunmap_atomic = ion_dma_buf_kunmap, | |
b892bf75 RSZ |
936 | .kmap = ion_dma_buf_kmap, |
937 | .kunmap = ion_dma_buf_kunmap, | |
938 | }; | |
939 | ||
940 | int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle) | |
941 | { | |
942 | struct ion_buffer *buffer; | |
943 | struct dma_buf *dmabuf; | |
944 | bool valid_handle; | |
945 | int fd; | |
946 | ||
947 | mutex_lock(&client->lock); | |
948 | valid_handle = ion_handle_validate(client, handle); | |
949 | mutex_unlock(&client->lock); | |
950 | if (!valid_handle) { | |
a9bb075d | 951 | WARN(1, "%s: invalid handle passed to share.\n", __func__); |
b892bf75 RSZ |
952 | return -EINVAL; |
953 | } | |
954 | ||
955 | buffer = handle->buffer; | |
956 | ion_buffer_get(buffer); | |
957 | dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR); | |
958 | if (IS_ERR(dmabuf)) { | |
959 | ion_buffer_put(buffer); | |
960 | return PTR_ERR(dmabuf); | |
961 | } | |
962 | fd = dma_buf_fd(dmabuf, O_CLOEXEC); | |
55808b8d | 963 | if (fd < 0) |
b892bf75 | 964 | dma_buf_put(dmabuf); |
55808b8d | 965 | |
c30707be | 966 | return fd; |
b892bf75 | 967 | } |
ee4c8aa9 | 968 | EXPORT_SYMBOL(ion_share_dma_buf); |
c30707be | 969 | |
b892bf75 RSZ |
970 | struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd) |
971 | { | |
972 | struct dma_buf *dmabuf; | |
973 | struct ion_buffer *buffer; | |
974 | struct ion_handle *handle; | |
975 | ||
976 | dmabuf = dma_buf_get(fd); | |
977 | if (IS_ERR_OR_NULL(dmabuf)) | |
978 | return ERR_PTR(PTR_ERR(dmabuf)); | |
979 | /* if this memory came from ion */ | |
980 | ||
981 | if (dmabuf->ops != &dma_buf_ops) { | |
982 | pr_err("%s: can not import dmabuf from another exporter\n", | |
983 | __func__); | |
984 | dma_buf_put(dmabuf); | |
985 | return ERR_PTR(-EINVAL); | |
986 | } | |
987 | buffer = dmabuf->priv; | |
988 | ||
989 | mutex_lock(&client->lock); | |
990 | /* if a handle exists for this buffer just take a reference to it */ | |
991 | handle = ion_handle_lookup(client, buffer); | |
992 | if (!IS_ERR_OR_NULL(handle)) { | |
993 | ion_handle_get(handle); | |
994 | goto end; | |
995 | } | |
996 | handle = ion_handle_create(client, buffer); | |
997 | if (IS_ERR_OR_NULL(handle)) | |
998 | goto end; | |
999 | ion_handle_add(client, handle); | |
1000 | end: | |
1001 | mutex_unlock(&client->lock); | |
1002 | dma_buf_put(dmabuf); | |
1003 | return handle; | |
c30707be | 1004 | } |
ee4c8aa9 | 1005 | EXPORT_SYMBOL(ion_import_dma_buf); |
c30707be | 1006 | |
0b9ec1cf RSZ |
1007 | static int ion_sync_for_device(struct ion_client *client, int fd) |
1008 | { | |
1009 | struct dma_buf *dmabuf; | |
1010 | struct ion_buffer *buffer; | |
1011 | ||
1012 | dmabuf = dma_buf_get(fd); | |
1013 | if (IS_ERR_OR_NULL(dmabuf)) | |
1014 | return PTR_ERR(dmabuf); | |
1015 | ||
1016 | /* if this memory came from ion */ | |
1017 | if (dmabuf->ops != &dma_buf_ops) { | |
1018 | pr_err("%s: can not sync dmabuf from another exporter\n", | |
1019 | __func__); | |
1020 | dma_buf_put(dmabuf); | |
1021 | return -EINVAL; | |
1022 | } | |
1023 | buffer = dmabuf->priv; | |
1024 | ion_buffer_sync_for_device(buffer, NULL, DMA_BIDIRECTIONAL); | |
1025 | dma_buf_put(dmabuf); | |
1026 | return 0; | |
1027 | } | |
1028 | ||
c30707be RSZ |
1029 | static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) |
1030 | { | |
1031 | struct ion_client *client = filp->private_data; | |
1032 | ||
1033 | switch (cmd) { | |
1034 | case ION_IOC_ALLOC: | |
1035 | { | |
1036 | struct ion_allocation_data data; | |
1037 | ||
1038 | if (copy_from_user(&data, (void __user *)arg, sizeof(data))) | |
1039 | return -EFAULT; | |
1040 | data.handle = ion_alloc(client, data.len, data.align, | |
56a7c185 | 1041 | data.heap_mask, data.flags); |
54ac0784 KC |
1042 | |
1043 | if (IS_ERR(data.handle)) | |
1044 | return PTR_ERR(data.handle); | |
1045 | ||
1046 | if (copy_to_user((void __user *)arg, &data, sizeof(data))) { | |
1047 | ion_free(client, data.handle); | |
c30707be | 1048 | return -EFAULT; |
54ac0784 | 1049 | } |
c30707be RSZ |
1050 | break; |
1051 | } | |
1052 | case ION_IOC_FREE: | |
1053 | { | |
1054 | struct ion_handle_data data; | |
1055 | bool valid; | |
1056 | ||
1057 | if (copy_from_user(&data, (void __user *)arg, | |
1058 | sizeof(struct ion_handle_data))) | |
1059 | return -EFAULT; | |
1060 | mutex_lock(&client->lock); | |
1061 | valid = ion_handle_validate(client, data.handle); | |
1062 | mutex_unlock(&client->lock); | |
1063 | if (!valid) | |
1064 | return -EINVAL; | |
1065 | ion_free(client, data.handle); | |
1066 | break; | |
1067 | } | |
c30707be RSZ |
1068 | case ION_IOC_SHARE: |
1069 | { | |
1070 | struct ion_fd_data data; | |
1071 | ||
1072 | if (copy_from_user(&data, (void __user *)arg, sizeof(data))) | |
1073 | return -EFAULT; | |
b892bf75 | 1074 | data.fd = ion_share_dma_buf(client, data.handle); |
c30707be RSZ |
1075 | if (copy_to_user((void __user *)arg, &data, sizeof(data))) |
1076 | return -EFAULT; | |
a1c6b996 OH |
1077 | if (data.fd < 0) |
1078 | return data.fd; | |
c30707be RSZ |
1079 | break; |
1080 | } | |
1081 | case ION_IOC_IMPORT: | |
1082 | { | |
1083 | struct ion_fd_data data; | |
a1c6b996 | 1084 | int ret = 0; |
c30707be RSZ |
1085 | if (copy_from_user(&data, (void __user *)arg, |
1086 | sizeof(struct ion_fd_data))) | |
1087 | return -EFAULT; | |
b892bf75 | 1088 | data.handle = ion_import_dma_buf(client, data.fd); |
a1c6b996 OH |
1089 | if (IS_ERR(data.handle)) { |
1090 | ret = PTR_ERR(data.handle); | |
c30707be | 1091 | data.handle = NULL; |
a1c6b996 | 1092 | } |
c30707be RSZ |
1093 | if (copy_to_user((void __user *)arg, &data, |
1094 | sizeof(struct ion_fd_data))) | |
1095 | return -EFAULT; | |
a1c6b996 OH |
1096 | if (ret < 0) |
1097 | return ret; | |
c30707be RSZ |
1098 | break; |
1099 | } | |
0b9ec1cf RSZ |
1100 | case ION_IOC_SYNC: |
1101 | { | |
1102 | struct ion_fd_data data; | |
1103 | if (copy_from_user(&data, (void __user *)arg, | |
1104 | sizeof(struct ion_fd_data))) | |
1105 | return -EFAULT; | |
1106 | ion_sync_for_device(client, data.fd); | |
1107 | break; | |
1108 | } | |
c30707be RSZ |
1109 | case ION_IOC_CUSTOM: |
1110 | { | |
1111 | struct ion_device *dev = client->dev; | |
1112 | struct ion_custom_data data; | |
1113 | ||
1114 | if (!dev->custom_ioctl) | |
1115 | return -ENOTTY; | |
1116 | if (copy_from_user(&data, (void __user *)arg, | |
1117 | sizeof(struct ion_custom_data))) | |
1118 | return -EFAULT; | |
1119 | return dev->custom_ioctl(client, data.cmd, data.arg); | |
1120 | } | |
1121 | default: | |
1122 | return -ENOTTY; | |
1123 | } | |
1124 | return 0; | |
1125 | } | |
1126 | ||
1127 | static int ion_release(struct inode *inode, struct file *file) | |
1128 | { | |
1129 | struct ion_client *client = file->private_data; | |
1130 | ||
1131 | pr_debug("%s: %d\n", __func__, __LINE__); | |
b892bf75 | 1132 | ion_client_destroy(client); |
c30707be RSZ |
1133 | return 0; |
1134 | } | |
1135 | ||
1136 | static int ion_open(struct inode *inode, struct file *file) | |
1137 | { | |
1138 | struct miscdevice *miscdev = file->private_data; | |
1139 | struct ion_device *dev = container_of(miscdev, struct ion_device, dev); | |
1140 | struct ion_client *client; | |
1141 | ||
1142 | pr_debug("%s: %d\n", __func__, __LINE__); | |
1143 | client = ion_client_create(dev, -1, "user"); | |
1144 | if (IS_ERR_OR_NULL(client)) | |
1145 | return PTR_ERR(client); | |
1146 | file->private_data = client; | |
1147 | ||
1148 | return 0; | |
1149 | } | |
1150 | ||
1151 | static const struct file_operations ion_fops = { | |
1152 | .owner = THIS_MODULE, | |
1153 | .open = ion_open, | |
1154 | .release = ion_release, | |
1155 | .unlocked_ioctl = ion_ioctl, | |
1156 | }; | |
1157 | ||
1158 | static size_t ion_debug_heap_total(struct ion_client *client, | |
1159 | enum ion_heap_type type) | |
1160 | { | |
1161 | size_t size = 0; | |
1162 | struct rb_node *n; | |
1163 | ||
1164 | mutex_lock(&client->lock); | |
1165 | for (n = rb_first(&client->handles); n; n = rb_next(n)) { | |
1166 | struct ion_handle *handle = rb_entry(n, | |
1167 | struct ion_handle, | |
1168 | node); | |
1169 | if (handle->buffer->heap->type == type) | |
1170 | size += handle->buffer->size; | |
1171 | } | |
1172 | mutex_unlock(&client->lock); | |
1173 | return size; | |
1174 | } | |
1175 | ||
1176 | static int ion_debug_heap_show(struct seq_file *s, void *unused) | |
1177 | { | |
1178 | struct ion_heap *heap = s->private; | |
1179 | struct ion_device *dev = heap->dev; | |
1180 | struct rb_node *n; | |
5ad7bc3a RSZ |
1181 | size_t total_size = 0; |
1182 | size_t total_orphaned_size = 0; | |
c30707be RSZ |
1183 | |
1184 | seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size"); | |
5ad7bc3a | 1185 | seq_printf(s, "----------------------------------------------------\n"); |
c30707be | 1186 | |
b892bf75 | 1187 | for (n = rb_first(&dev->clients); n; n = rb_next(n)) { |
c30707be RSZ |
1188 | struct ion_client *client = rb_entry(n, struct ion_client, |
1189 | node); | |
1190 | size_t size = ion_debug_heap_total(client, heap->type); | |
1191 | if (!size) | |
1192 | continue; | |
b892bf75 RSZ |
1193 | if (client->task) { |
1194 | char task_comm[TASK_COMM_LEN]; | |
1195 | ||
1196 | get_task_comm(task_comm, client->task); | |
1197 | seq_printf(s, "%16.s %16u %16u\n", task_comm, | |
1198 | client->pid, size); | |
1199 | } else { | |
1200 | seq_printf(s, "%16.s %16u %16u\n", client->name, | |
1201 | client->pid, size); | |
1202 | } | |
c30707be | 1203 | } |
5ad7bc3a RSZ |
1204 | seq_printf(s, "----------------------------------------------------\n"); |
1205 | seq_printf(s, "orphaned allocations (info is from last known client):" | |
1206 | "\n"); | |
1207 | mutex_lock(&dev->lock); | |
1208 | for (n = rb_first(&dev->buffers); n; n = rb_next(n)) { | |
1209 | struct ion_buffer *buffer = rb_entry(n, struct ion_buffer, | |
1210 | node); | |
1211 | if (buffer->heap->type == heap->type) | |
1212 | total_size += buffer->size; | |
1213 | if (!buffer->handle_count) { | |
1214 | seq_printf(s, "%16.s %16u %16u\n", buffer->task_comm, | |
1215 | buffer->pid, buffer->size); | |
1216 | total_orphaned_size += buffer->size; | |
1217 | } | |
1218 | } | |
1219 | mutex_unlock(&dev->lock); | |
1220 | seq_printf(s, "----------------------------------------------------\n"); | |
1221 | seq_printf(s, "%16.s %16u\n", "total orphaned", | |
1222 | total_orphaned_size); | |
1223 | seq_printf(s, "%16.s %16u\n", "total ", total_size); | |
1224 | ||
c30707be RSZ |
1225 | return 0; |
1226 | } | |
1227 | ||
1228 | static int ion_debug_heap_open(struct inode *inode, struct file *file) | |
1229 | { | |
1230 | return single_open(file, ion_debug_heap_show, inode->i_private); | |
1231 | } | |
1232 | ||
1233 | static const struct file_operations debug_heap_fops = { | |
1234 | .open = ion_debug_heap_open, | |
1235 | .read = seq_read, | |
1236 | .llseek = seq_lseek, | |
1237 | .release = single_release, | |
1238 | }; | |
1239 | ||
1240 | void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap) | |
1241 | { | |
1242 | struct rb_node **p = &dev->heaps.rb_node; | |
1243 | struct rb_node *parent = NULL; | |
1244 | struct ion_heap *entry; | |
1245 | ||
29ae6bc7 RSZ |
1246 | if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma || |
1247 | !heap->ops->unmap_dma) | |
1248 | pr_err("%s: can not add heap with invalid ops struct.\n", | |
1249 | __func__); | |
1250 | ||
c30707be RSZ |
1251 | heap->dev = dev; |
1252 | mutex_lock(&dev->lock); | |
1253 | while (*p) { | |
1254 | parent = *p; | |
1255 | entry = rb_entry(parent, struct ion_heap, node); | |
1256 | ||
1257 | if (heap->id < entry->id) { | |
1258 | p = &(*p)->rb_left; | |
1259 | } else if (heap->id > entry->id ) { | |
1260 | p = &(*p)->rb_right; | |
1261 | } else { | |
1262 | pr_err("%s: can not insert multiple heaps with " | |
1263 | "id %d\n", __func__, heap->id); | |
1264 | goto end; | |
1265 | } | |
1266 | } | |
1267 | ||
1268 | rb_link_node(&heap->node, parent, p); | |
1269 | rb_insert_color(&heap->node, &dev->heaps); | |
1270 | debugfs_create_file(heap->name, 0664, dev->debug_root, heap, | |
1271 | &debug_heap_fops); | |
1272 | end: | |
1273 | mutex_unlock(&dev->lock); | |
1274 | } | |
1275 | ||
1276 | struct ion_device *ion_device_create(long (*custom_ioctl) | |
1277 | (struct ion_client *client, | |
1278 | unsigned int cmd, | |
1279 | unsigned long arg)) | |
1280 | { | |
1281 | struct ion_device *idev; | |
1282 | int ret; | |
1283 | ||
1284 | idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL); | |
1285 | if (!idev) | |
1286 | return ERR_PTR(-ENOMEM); | |
1287 | ||
1288 | idev->dev.minor = MISC_DYNAMIC_MINOR; | |
1289 | idev->dev.name = "ion"; | |
1290 | idev->dev.fops = &ion_fops; | |
1291 | idev->dev.parent = NULL; | |
1292 | ret = misc_register(&idev->dev); | |
1293 | if (ret) { | |
1294 | pr_err("ion: failed to register misc device.\n"); | |
1295 | return ERR_PTR(ret); | |
1296 | } | |
1297 | ||
1298 | idev->debug_root = debugfs_create_dir("ion", NULL); | |
1299 | if (IS_ERR_OR_NULL(idev->debug_root)) | |
1300 | pr_err("ion: failed to create debug files.\n"); | |
1301 | ||
1302 | idev->custom_ioctl = custom_ioctl; | |
1303 | idev->buffers = RB_ROOT; | |
1304 | mutex_init(&idev->lock); | |
1305 | idev->heaps = RB_ROOT; | |
b892bf75 | 1306 | idev->clients = RB_ROOT; |
c30707be RSZ |
1307 | return idev; |
1308 | } | |
1309 | ||
1310 | void ion_device_destroy(struct ion_device *dev) | |
1311 | { | |
1312 | misc_deregister(&dev->dev); | |
1313 | /* XXX need to free the heaps and clients ? */ | |
1314 | kfree(dev); | |
1315 | } | |
2991b7a0 RSZ |
1316 | |
1317 | void __init ion_reserve(struct ion_platform_data *data) | |
1318 | { | |
1319 | int i, ret; | |
1320 | ||
1321 | for (i = 0; i < data->nr; i++) { | |
1322 | if (data->heaps[i].size == 0) | |
1323 | continue; | |
1324 | ret = memblock_reserve(data->heaps[i].base, | |
1325 | data->heaps[i].size); | |
1326 | if (ret) | |
1327 | pr_err("memblock reserve of %x@%lx failed\n", | |
1328 | data->heaps[i].size, | |
1329 | data->heaps[i].base); | |
1330 | } | |
1331 | } |