]> Git Repo - linux.git/blob - drivers/staging/android/ion/ion.c
selinux: Remove security_ops extern
[linux.git] / drivers / staging / android / ion / ion.c
1 /*
2
3  * drivers/staging/android/ion/ion.c
4  *
5  * Copyright (C) 2011 Google, Inc.
6  *
7  * This software is licensed under the terms of the GNU General Public
8  * License version 2, as published by the Free Software Foundation, and
9  * may be copied, distributed, and modified under those terms.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  */
17
18 #include <linux/device.h>
19 #include <linux/err.h>
20 #include <linux/file.h>
21 #include <linux/freezer.h>
22 #include <linux/fs.h>
23 #include <linux/anon_inodes.h>
24 #include <linux/kthread.h>
25 #include <linux/list.h>
26 #include <linux/memblock.h>
27 #include <linux/miscdevice.h>
28 #include <linux/export.h>
29 #include <linux/mm.h>
30 #include <linux/mm_types.h>
31 #include <linux/rbtree.h>
32 #include <linux/slab.h>
33 #include <linux/seq_file.h>
34 #include <linux/uaccess.h>
35 #include <linux/vmalloc.h>
36 #include <linux/debugfs.h>
37 #include <linux/dma-buf.h>
38 #include <linux/idr.h>
39
40 #include "ion.h"
41 #include "ion_priv.h"
42 #include "compat_ion.h"
43
44 /**
45  * struct ion_device - the metadata of the ion device node
46  * @dev:                the actual misc device
47  * @buffers:            an rb tree of all the existing buffers
48  * @buffer_lock:        lock protecting the tree of buffers
49  * @lock:               rwsem protecting the tree of heaps and clients
50  * @heaps:              list of all the heaps in the system
51  * @user_clients:       list of all the clients created from userspace
52  */
53 struct ion_device {
54         struct miscdevice dev;
55         struct rb_root buffers;
56         struct mutex buffer_lock;
57         struct rw_semaphore lock;
58         struct plist_head heaps;
59         long (*custom_ioctl)(struct ion_client *client, unsigned int cmd,
60                              unsigned long arg);
61         struct rb_root clients;
62         struct dentry *debug_root;
63         struct dentry *heaps_debug_root;
64         struct dentry *clients_debug_root;
65 };
66
67 /**
68  * struct ion_client - a process/hw block local address space
69  * @node:               node in the tree of all clients
70  * @dev:                backpointer to ion device
71  * @handles:            an rb tree of all the handles in this client
72  * @idr:                an idr space for allocating handle ids
73  * @lock:               lock protecting the tree of handles
74  * @name:               used for debugging
75  * @display_name:       used for debugging (unique version of @name)
76  * @display_serial:     used for debugging (to make display_name unique)
77  * @task:               used for debugging
78  *
79  * A client represents a list of buffers this client may access.
80  * The mutex stored here is used to protect both handles tree
81  * as well as the handles themselves, and should be held while modifying either.
82  */
83 struct ion_client {
84         struct rb_node node;
85         struct ion_device *dev;
86         struct rb_root handles;
87         struct idr idr;
88         struct mutex lock;
89         const char *name;
90         char *display_name;
91         int display_serial;
92         struct task_struct *task;
93         pid_t pid;
94         struct dentry *debug_root;
95 };
96
97 /**
98  * ion_handle - a client local reference to a buffer
99  * @ref:                reference count
100  * @client:             back pointer to the client the buffer resides in
101  * @buffer:             pointer to the buffer
102  * @node:               node in the client's handle rbtree
103  * @kmap_cnt:           count of times this client has mapped to kernel
104  * @id:                 client-unique id allocated by client->idr
105  *
106  * Modifications to node, map_cnt or mapping should be protected by the
107  * lock in the client.  Other fields are never changed after initialization.
108  */
109 struct ion_handle {
110         struct kref ref;
111         struct ion_client *client;
112         struct ion_buffer *buffer;
113         struct rb_node node;
114         unsigned int kmap_cnt;
115         int id;
116 };
117
118 bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
119 {
120         return (buffer->flags & ION_FLAG_CACHED) &&
121                 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
122 }
123
124 bool ion_buffer_cached(struct ion_buffer *buffer)
125 {
126         return !!(buffer->flags & ION_FLAG_CACHED);
127 }
128
129 static inline struct page *ion_buffer_page(struct page *page)
130 {
131         return (struct page *)((unsigned long)page & ~(1UL));
132 }
133
134 static inline bool ion_buffer_page_is_dirty(struct page *page)
135 {
136         return !!((unsigned long)page & 1UL);
137 }
138
139 static inline void ion_buffer_page_dirty(struct page **page)
140 {
141         *page = (struct page *)((unsigned long)(*page) | 1UL);
142 }
143
144 static inline void ion_buffer_page_clean(struct page **page)
145 {
146         *page = (struct page *)((unsigned long)(*page) & ~(1UL));
147 }
148
149 /* this function should only be called while dev->lock is held */
150 static void ion_buffer_add(struct ion_device *dev,
151                            struct ion_buffer *buffer)
152 {
153         struct rb_node **p = &dev->buffers.rb_node;
154         struct rb_node *parent = NULL;
155         struct ion_buffer *entry;
156
157         while (*p) {
158                 parent = *p;
159                 entry = rb_entry(parent, struct ion_buffer, node);
160
161                 if (buffer < entry) {
162                         p = &(*p)->rb_left;
163                 } else if (buffer > entry) {
164                         p = &(*p)->rb_right;
165                 } else {
166                         pr_err("%s: buffer already found.", __func__);
167                         BUG();
168                 }
169         }
170
171         rb_link_node(&buffer->node, parent, p);
172         rb_insert_color(&buffer->node, &dev->buffers);
173 }
174
175 /* this function should only be called while dev->lock is held */
176 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
177                                      struct ion_device *dev,
178                                      unsigned long len,
179                                      unsigned long align,
180                                      unsigned long flags)
181 {
182         struct ion_buffer *buffer;
183         struct sg_table *table;
184         struct scatterlist *sg;
185         int i, ret;
186
187         buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
188         if (!buffer)
189                 return ERR_PTR(-ENOMEM);
190
191         buffer->heap = heap;
192         buffer->flags = flags;
193         kref_init(&buffer->ref);
194
195         ret = heap->ops->allocate(heap, buffer, len, align, flags);
196
197         if (ret) {
198                 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
199                         goto err2;
200
201                 ion_heap_freelist_drain(heap, 0);
202                 ret = heap->ops->allocate(heap, buffer, len, align,
203                                           flags);
204                 if (ret)
205                         goto err2;
206         }
207
208         buffer->dev = dev;
209         buffer->size = len;
210
211         table = heap->ops->map_dma(heap, buffer);
212         if (WARN_ONCE(table == NULL,
213                         "heap->ops->map_dma should return ERR_PTR on error"))
214                 table = ERR_PTR(-EINVAL);
215         if (IS_ERR(table)) {
216                 heap->ops->free(buffer);
217                 kfree(buffer);
218                 return ERR_CAST(table);
219         }
220         buffer->sg_table = table;
221         if (ion_buffer_fault_user_mappings(buffer)) {
222                 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
223                 struct scatterlist *sg;
224                 int i, j, k = 0;
225
226                 buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
227                 if (!buffer->pages) {
228                         ret = -ENOMEM;
229                         goto err1;
230                 }
231
232                 for_each_sg(table->sgl, sg, table->nents, i) {
233                         struct page *page = sg_page(sg);
234
235                         for (j = 0; j < sg->length / PAGE_SIZE; j++)
236                                 buffer->pages[k++] = page++;
237                 }
238
239                 if (ret)
240                         goto err;
241         }
242
243         buffer->dev = dev;
244         buffer->size = len;
245         INIT_LIST_HEAD(&buffer->vmas);
246         mutex_init(&buffer->lock);
247         /* this will set up dma addresses for the sglist -- it is not
248            technically correct as per the dma api -- a specific
249            device isn't really taking ownership here.  However, in practice on
250            our systems the only dma_address space is physical addresses.
251            Additionally, we can't afford the overhead of invalidating every
252            allocation via dma_map_sg. The implicit contract here is that
253            memory comming from the heaps is ready for dma, ie if it has a
254            cached mapping that mapping has been invalidated */
255         for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
256                 sg_dma_address(sg) = sg_phys(sg);
257         mutex_lock(&dev->buffer_lock);
258         ion_buffer_add(dev, buffer);
259         mutex_unlock(&dev->buffer_lock);
260         return buffer;
261
262 err:
263         heap->ops->unmap_dma(heap, buffer);
264         heap->ops->free(buffer);
265 err1:
266         if (buffer->pages)
267                 vfree(buffer->pages);
268 err2:
269         kfree(buffer);
270         return ERR_PTR(ret);
271 }
272
273 void ion_buffer_destroy(struct ion_buffer *buffer)
274 {
275         if (WARN_ON(buffer->kmap_cnt > 0))
276                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
277         buffer->heap->ops->unmap_dma(buffer->heap, buffer);
278         buffer->heap->ops->free(buffer);
279         if (buffer->pages)
280                 vfree(buffer->pages);
281         kfree(buffer);
282 }
283
284 static void _ion_buffer_destroy(struct kref *kref)
285 {
286         struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
287         struct ion_heap *heap = buffer->heap;
288         struct ion_device *dev = buffer->dev;
289
290         mutex_lock(&dev->buffer_lock);
291         rb_erase(&buffer->node, &dev->buffers);
292         mutex_unlock(&dev->buffer_lock);
293
294         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
295                 ion_heap_freelist_add(heap, buffer);
296         else
297                 ion_buffer_destroy(buffer);
298 }
299
300 static void ion_buffer_get(struct ion_buffer *buffer)
301 {
302         kref_get(&buffer->ref);
303 }
304
305 static int ion_buffer_put(struct ion_buffer *buffer)
306 {
307         return kref_put(&buffer->ref, _ion_buffer_destroy);
308 }
309
310 static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
311 {
312         mutex_lock(&buffer->lock);
313         buffer->handle_count++;
314         mutex_unlock(&buffer->lock);
315 }
316
317 static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
318 {
319         /*
320          * when a buffer is removed from a handle, if it is not in
321          * any other handles, copy the taskcomm and the pid of the
322          * process it's being removed from into the buffer.  At this
323          * point there will be no way to track what processes this buffer is
324          * being used by, it only exists as a dma_buf file descriptor.
325          * The taskcomm and pid can provide a debug hint as to where this fd
326          * is in the system
327          */
328         mutex_lock(&buffer->lock);
329         buffer->handle_count--;
330         BUG_ON(buffer->handle_count < 0);
331         if (!buffer->handle_count) {
332                 struct task_struct *task;
333
334                 task = current->group_leader;
335                 get_task_comm(buffer->task_comm, task);
336                 buffer->pid = task_pid_nr(task);
337         }
338         mutex_unlock(&buffer->lock);
339 }
340
341 static struct ion_handle *ion_handle_create(struct ion_client *client,
342                                      struct ion_buffer *buffer)
343 {
344         struct ion_handle *handle;
345
346         handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
347         if (!handle)
348                 return ERR_PTR(-ENOMEM);
349         kref_init(&handle->ref);
350         RB_CLEAR_NODE(&handle->node);
351         handle->client = client;
352         ion_buffer_get(buffer);
353         ion_buffer_add_to_handle(buffer);
354         handle->buffer = buffer;
355
356         return handle;
357 }
358
359 static void ion_handle_kmap_put(struct ion_handle *);
360
361 static void ion_handle_destroy(struct kref *kref)
362 {
363         struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
364         struct ion_client *client = handle->client;
365         struct ion_buffer *buffer = handle->buffer;
366
367         mutex_lock(&buffer->lock);
368         while (handle->kmap_cnt)
369                 ion_handle_kmap_put(handle);
370         mutex_unlock(&buffer->lock);
371
372         idr_remove(&client->idr, handle->id);
373         if (!RB_EMPTY_NODE(&handle->node))
374                 rb_erase(&handle->node, &client->handles);
375
376         ion_buffer_remove_from_handle(buffer);
377         ion_buffer_put(buffer);
378
379         kfree(handle);
380 }
381
382 struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
383 {
384         return handle->buffer;
385 }
386
387 static void ion_handle_get(struct ion_handle *handle)
388 {
389         kref_get(&handle->ref);
390 }
391
392 static int ion_handle_put(struct ion_handle *handle)
393 {
394         struct ion_client *client = handle->client;
395         int ret;
396
397         mutex_lock(&client->lock);
398         ret = kref_put(&handle->ref, ion_handle_destroy);
399         mutex_unlock(&client->lock);
400
401         return ret;
402 }
403
404 static struct ion_handle *ion_handle_lookup(struct ion_client *client,
405                                             struct ion_buffer *buffer)
406 {
407         struct rb_node *n = client->handles.rb_node;
408
409         while (n) {
410                 struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
411
412                 if (buffer < entry->buffer)
413                         n = n->rb_left;
414                 else if (buffer > entry->buffer)
415                         n = n->rb_right;
416                 else
417                         return entry;
418         }
419         return ERR_PTR(-EINVAL);
420 }
421
422 static struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
423                                                 int id)
424 {
425         struct ion_handle *handle;
426
427         mutex_lock(&client->lock);
428         handle = idr_find(&client->idr, id);
429         if (handle)
430                 ion_handle_get(handle);
431         mutex_unlock(&client->lock);
432
433         return handle ? handle : ERR_PTR(-EINVAL);
434 }
435
436 static bool ion_handle_validate(struct ion_client *client,
437                                 struct ion_handle *handle)
438 {
439         WARN_ON(!mutex_is_locked(&client->lock));
440         return idr_find(&client->idr, handle->id) == handle;
441 }
442
443 static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
444 {
445         int id;
446         struct rb_node **p = &client->handles.rb_node;
447         struct rb_node *parent = NULL;
448         struct ion_handle *entry;
449
450         id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
451         if (id < 0)
452                 return id;
453
454         handle->id = id;
455
456         while (*p) {
457                 parent = *p;
458                 entry = rb_entry(parent, struct ion_handle, node);
459
460                 if (handle->buffer < entry->buffer)
461                         p = &(*p)->rb_left;
462                 else if (handle->buffer > entry->buffer)
463                         p = &(*p)->rb_right;
464                 else
465                         WARN(1, "%s: buffer already found.", __func__);
466         }
467
468         rb_link_node(&handle->node, parent, p);
469         rb_insert_color(&handle->node, &client->handles);
470
471         return 0;
472 }
473
474 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
475                              size_t align, unsigned int heap_id_mask,
476                              unsigned int flags)
477 {
478         struct ion_handle *handle;
479         struct ion_device *dev = client->dev;
480         struct ion_buffer *buffer = NULL;
481         struct ion_heap *heap;
482         int ret;
483
484         pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
485                  len, align, heap_id_mask, flags);
486         /*
487          * traverse the list of heaps available in this system in priority
488          * order.  If the heap type is supported by the client, and matches the
489          * request of the caller allocate from it.  Repeat until allocate has
490          * succeeded or all heaps have been tried
491          */
492         len = PAGE_ALIGN(len);
493
494         if (!len)
495                 return ERR_PTR(-EINVAL);
496
497         down_read(&dev->lock);
498         plist_for_each_entry(heap, &dev->heaps, node) {
499                 /* if the caller didn't specify this heap id */
500                 if (!((1 << heap->id) & heap_id_mask))
501                         continue;
502                 buffer = ion_buffer_create(heap, dev, len, align, flags);
503                 if (!IS_ERR(buffer))
504                         break;
505         }
506         up_read(&dev->lock);
507
508         if (buffer == NULL)
509                 return ERR_PTR(-ENODEV);
510
511         if (IS_ERR(buffer))
512                 return ERR_CAST(buffer);
513
514         handle = ion_handle_create(client, buffer);
515
516         /*
517          * ion_buffer_create will create a buffer with a ref_cnt of 1,
518          * and ion_handle_create will take a second reference, drop one here
519          */
520         ion_buffer_put(buffer);
521
522         if (IS_ERR(handle))
523                 return handle;
524
525         mutex_lock(&client->lock);
526         ret = ion_handle_add(client, handle);
527         mutex_unlock(&client->lock);
528         if (ret) {
529                 ion_handle_put(handle);
530                 handle = ERR_PTR(ret);
531         }
532
533         return handle;
534 }
535 EXPORT_SYMBOL(ion_alloc);
536
537 void ion_free(struct ion_client *client, struct ion_handle *handle)
538 {
539         bool valid_handle;
540
541         BUG_ON(client != handle->client);
542
543         mutex_lock(&client->lock);
544         valid_handle = ion_handle_validate(client, handle);
545
546         if (!valid_handle) {
547                 WARN(1, "%s: invalid handle passed to free.\n", __func__);
548                 mutex_unlock(&client->lock);
549                 return;
550         }
551         mutex_unlock(&client->lock);
552         ion_handle_put(handle);
553 }
554 EXPORT_SYMBOL(ion_free);
555
556 int ion_phys(struct ion_client *client, struct ion_handle *handle,
557              ion_phys_addr_t *addr, size_t *len)
558 {
559         struct ion_buffer *buffer;
560         int ret;
561
562         mutex_lock(&client->lock);
563         if (!ion_handle_validate(client, handle)) {
564                 mutex_unlock(&client->lock);
565                 return -EINVAL;
566         }
567
568         buffer = handle->buffer;
569
570         if (!buffer->heap->ops->phys) {
571                 pr_err("%s: ion_phys is not implemented by this heap.\n",
572                        __func__);
573                 mutex_unlock(&client->lock);
574                 return -ENODEV;
575         }
576         mutex_unlock(&client->lock);
577         ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
578         return ret;
579 }
580 EXPORT_SYMBOL(ion_phys);
581
582 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
583 {
584         void *vaddr;
585
586         if (buffer->kmap_cnt) {
587                 buffer->kmap_cnt++;
588                 return buffer->vaddr;
589         }
590         vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
591         if (WARN_ONCE(vaddr == NULL,
592                         "heap->ops->map_kernel should return ERR_PTR on error"))
593                 return ERR_PTR(-EINVAL);
594         if (IS_ERR(vaddr))
595                 return vaddr;
596         buffer->vaddr = vaddr;
597         buffer->kmap_cnt++;
598         return vaddr;
599 }
600
601 static void *ion_handle_kmap_get(struct ion_handle *handle)
602 {
603         struct ion_buffer *buffer = handle->buffer;
604         void *vaddr;
605
606         if (handle->kmap_cnt) {
607                 handle->kmap_cnt++;
608                 return buffer->vaddr;
609         }
610         vaddr = ion_buffer_kmap_get(buffer);
611         if (IS_ERR(vaddr))
612                 return vaddr;
613         handle->kmap_cnt++;
614         return vaddr;
615 }
616
617 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
618 {
619         buffer->kmap_cnt--;
620         if (!buffer->kmap_cnt) {
621                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
622                 buffer->vaddr = NULL;
623         }
624 }
625
626 static void ion_handle_kmap_put(struct ion_handle *handle)
627 {
628         struct ion_buffer *buffer = handle->buffer;
629
630         if (!handle->kmap_cnt) {
631                 WARN(1, "%s: Double unmap detected! bailing...\n", __func__);
632                 return;
633         }
634         handle->kmap_cnt--;
635         if (!handle->kmap_cnt)
636                 ion_buffer_kmap_put(buffer);
637 }
638
639 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
640 {
641         struct ion_buffer *buffer;
642         void *vaddr;
643
644         mutex_lock(&client->lock);
645         if (!ion_handle_validate(client, handle)) {
646                 pr_err("%s: invalid handle passed to map_kernel.\n",
647                        __func__);
648                 mutex_unlock(&client->lock);
649                 return ERR_PTR(-EINVAL);
650         }
651
652         buffer = handle->buffer;
653
654         if (!handle->buffer->heap->ops->map_kernel) {
655                 pr_err("%s: map_kernel is not implemented by this heap.\n",
656                        __func__);
657                 mutex_unlock(&client->lock);
658                 return ERR_PTR(-ENODEV);
659         }
660
661         mutex_lock(&buffer->lock);
662         vaddr = ion_handle_kmap_get(handle);
663         mutex_unlock(&buffer->lock);
664         mutex_unlock(&client->lock);
665         return vaddr;
666 }
667 EXPORT_SYMBOL(ion_map_kernel);
668
669 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
670 {
671         struct ion_buffer *buffer;
672
673         mutex_lock(&client->lock);
674         buffer = handle->buffer;
675         mutex_lock(&buffer->lock);
676         ion_handle_kmap_put(handle);
677         mutex_unlock(&buffer->lock);
678         mutex_unlock(&client->lock);
679 }
680 EXPORT_SYMBOL(ion_unmap_kernel);
681
682 static int ion_debug_client_show(struct seq_file *s, void *unused)
683 {
684         struct ion_client *client = s->private;
685         struct rb_node *n;
686         size_t sizes[ION_NUM_HEAP_IDS] = {0};
687         const char *names[ION_NUM_HEAP_IDS] = {NULL};
688         int i;
689
690         mutex_lock(&client->lock);
691         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
692                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
693                                                      node);
694                 unsigned int id = handle->buffer->heap->id;
695
696                 if (!names[id])
697                         names[id] = handle->buffer->heap->name;
698                 sizes[id] += handle->buffer->size;
699         }
700         mutex_unlock(&client->lock);
701
702         seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
703         for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
704                 if (!names[i])
705                         continue;
706                 seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
707         }
708         return 0;
709 }
710
711 static int ion_debug_client_open(struct inode *inode, struct file *file)
712 {
713         return single_open(file, ion_debug_client_show, inode->i_private);
714 }
715
716 static const struct file_operations debug_client_fops = {
717         .open = ion_debug_client_open,
718         .read = seq_read,
719         .llseek = seq_lseek,
720         .release = single_release,
721 };
722
723 static int ion_get_client_serial(const struct rb_root *root,
724                                         const unsigned char *name)
725 {
726         int serial = -1;
727         struct rb_node *node;
728
729         for (node = rb_first(root); node; node = rb_next(node)) {
730                 struct ion_client *client = rb_entry(node, struct ion_client,
731                                                 node);
732
733                 if (strcmp(client->name, name))
734                         continue;
735                 serial = max(serial, client->display_serial);
736         }
737         return serial + 1;
738 }
739
740 struct ion_client *ion_client_create(struct ion_device *dev,
741                                      const char *name)
742 {
743         struct ion_client *client;
744         struct task_struct *task;
745         struct rb_node **p;
746         struct rb_node *parent = NULL;
747         struct ion_client *entry;
748         pid_t pid;
749
750         if (!name) {
751                 pr_err("%s: Name cannot be null\n", __func__);
752                 return ERR_PTR(-EINVAL);
753         }
754
755         get_task_struct(current->group_leader);
756         task_lock(current->group_leader);
757         pid = task_pid_nr(current->group_leader);
758         /* don't bother to store task struct for kernel threads,
759            they can't be killed anyway */
760         if (current->group_leader->flags & PF_KTHREAD) {
761                 put_task_struct(current->group_leader);
762                 task = NULL;
763         } else {
764                 task = current->group_leader;
765         }
766         task_unlock(current->group_leader);
767
768         client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
769         if (!client)
770                 goto err_put_task_struct;
771
772         client->dev = dev;
773         client->handles = RB_ROOT;
774         idr_init(&client->idr);
775         mutex_init(&client->lock);
776         client->task = task;
777         client->pid = pid;
778         client->name = kstrdup(name, GFP_KERNEL);
779         if (!client->name)
780                 goto err_free_client;
781
782         down_write(&dev->lock);
783         client->display_serial = ion_get_client_serial(&dev->clients, name);
784         client->display_name = kasprintf(
785                 GFP_KERNEL, "%s-%d", name, client->display_serial);
786         if (!client->display_name) {
787                 up_write(&dev->lock);
788                 goto err_free_client_name;
789         }
790         p = &dev->clients.rb_node;
791         while (*p) {
792                 parent = *p;
793                 entry = rb_entry(parent, struct ion_client, node);
794
795                 if (client < entry)
796                         p = &(*p)->rb_left;
797                 else if (client > entry)
798                         p = &(*p)->rb_right;
799         }
800         rb_link_node(&client->node, parent, p);
801         rb_insert_color(&client->node, &dev->clients);
802
803         client->debug_root = debugfs_create_file(client->display_name, 0664,
804                                                 dev->clients_debug_root,
805                                                 client, &debug_client_fops);
806         if (!client->debug_root) {
807                 char buf[256], *path;
808                 path = dentry_path(dev->clients_debug_root, buf, 256);
809                 pr_err("Failed to create client debugfs at %s/%s\n",
810                         path, client->display_name);
811         }
812
813         up_write(&dev->lock);
814
815         return client;
816
817 err_free_client_name:
818         kfree(client->name);
819 err_free_client:
820         kfree(client);
821 err_put_task_struct:
822         if (task)
823                 put_task_struct(current->group_leader);
824         return ERR_PTR(-ENOMEM);
825 }
826 EXPORT_SYMBOL(ion_client_create);
827
828 void ion_client_destroy(struct ion_client *client)
829 {
830         struct ion_device *dev = client->dev;
831         struct rb_node *n;
832
833         pr_debug("%s: %d\n", __func__, __LINE__);
834         while ((n = rb_first(&client->handles))) {
835                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
836                                                      node);
837                 ion_handle_destroy(&handle->ref);
838         }
839
840         idr_destroy(&client->idr);
841
842         down_write(&dev->lock);
843         if (client->task)
844                 put_task_struct(client->task);
845         rb_erase(&client->node, &dev->clients);
846         debugfs_remove_recursive(client->debug_root);
847         up_write(&dev->lock);
848
849         kfree(client->display_name);
850         kfree(client->name);
851         kfree(client);
852 }
853 EXPORT_SYMBOL(ion_client_destroy);
854
855 struct sg_table *ion_sg_table(struct ion_client *client,
856                               struct ion_handle *handle)
857 {
858         struct ion_buffer *buffer;
859         struct sg_table *table;
860
861         mutex_lock(&client->lock);
862         if (!ion_handle_validate(client, handle)) {
863                 pr_err("%s: invalid handle passed to map_dma.\n",
864                        __func__);
865                 mutex_unlock(&client->lock);
866                 return ERR_PTR(-EINVAL);
867         }
868         buffer = handle->buffer;
869         table = buffer->sg_table;
870         mutex_unlock(&client->lock);
871         return table;
872 }
873 EXPORT_SYMBOL(ion_sg_table);
874
875 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
876                                        struct device *dev,
877                                        enum dma_data_direction direction);
878
879 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
880                                         enum dma_data_direction direction)
881 {
882         struct dma_buf *dmabuf = attachment->dmabuf;
883         struct ion_buffer *buffer = dmabuf->priv;
884
885         ion_buffer_sync_for_device(buffer, attachment->dev, direction);
886         return buffer->sg_table;
887 }
888
889 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
890                               struct sg_table *table,
891                               enum dma_data_direction direction)
892 {
893 }
894
895 void ion_pages_sync_for_device(struct device *dev, struct page *page,
896                 size_t size, enum dma_data_direction dir)
897 {
898         struct scatterlist sg;
899
900         sg_init_table(&sg, 1);
901         sg_set_page(&sg, page, size, 0);
902         /*
903          * This is not correct - sg_dma_address needs a dma_addr_t that is valid
904          * for the the targeted device, but this works on the currently targeted
905          * hardware.
906          */
907         sg_dma_address(&sg) = page_to_phys(page);
908         dma_sync_sg_for_device(dev, &sg, 1, dir);
909 }
910
911 struct ion_vma_list {
912         struct list_head list;
913         struct vm_area_struct *vma;
914 };
915
916 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
917                                        struct device *dev,
918                                        enum dma_data_direction dir)
919 {
920         struct ion_vma_list *vma_list;
921         int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
922         int i;
923
924         pr_debug("%s: syncing for device %s\n", __func__,
925                  dev ? dev_name(dev) : "null");
926
927         if (!ion_buffer_fault_user_mappings(buffer))
928                 return;
929
930         mutex_lock(&buffer->lock);
931         for (i = 0; i < pages; i++) {
932                 struct page *page = buffer->pages[i];
933
934                 if (ion_buffer_page_is_dirty(page))
935                         ion_pages_sync_for_device(dev, ion_buffer_page(page),
936                                                         PAGE_SIZE, dir);
937
938                 ion_buffer_page_clean(buffer->pages + i);
939         }
940         list_for_each_entry(vma_list, &buffer->vmas, list) {
941                 struct vm_area_struct *vma = vma_list->vma;
942
943                 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
944                                NULL);
945         }
946         mutex_unlock(&buffer->lock);
947 }
948
949 static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
950 {
951         struct ion_buffer *buffer = vma->vm_private_data;
952         unsigned long pfn;
953         int ret;
954
955         mutex_lock(&buffer->lock);
956         ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
957         BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
958
959         pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
960         ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
961         mutex_unlock(&buffer->lock);
962         if (ret)
963                 return VM_FAULT_ERROR;
964
965         return VM_FAULT_NOPAGE;
966 }
967
968 static void ion_vm_open(struct vm_area_struct *vma)
969 {
970         struct ion_buffer *buffer = vma->vm_private_data;
971         struct ion_vma_list *vma_list;
972
973         vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
974         if (!vma_list)
975                 return;
976         vma_list->vma = vma;
977         mutex_lock(&buffer->lock);
978         list_add(&vma_list->list, &buffer->vmas);
979         mutex_unlock(&buffer->lock);
980         pr_debug("%s: adding %p\n", __func__, vma);
981 }
982
983 static void ion_vm_close(struct vm_area_struct *vma)
984 {
985         struct ion_buffer *buffer = vma->vm_private_data;
986         struct ion_vma_list *vma_list, *tmp;
987
988         pr_debug("%s\n", __func__);
989         mutex_lock(&buffer->lock);
990         list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
991                 if (vma_list->vma != vma)
992                         continue;
993                 list_del(&vma_list->list);
994                 kfree(vma_list);
995                 pr_debug("%s: deleting %p\n", __func__, vma);
996                 break;
997         }
998         mutex_unlock(&buffer->lock);
999 }
1000
1001 static struct vm_operations_struct ion_vma_ops = {
1002         .open = ion_vm_open,
1003         .close = ion_vm_close,
1004         .fault = ion_vm_fault,
1005 };
1006
1007 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
1008 {
1009         struct ion_buffer *buffer = dmabuf->priv;
1010         int ret = 0;
1011
1012         if (!buffer->heap->ops->map_user) {
1013                 pr_err("%s: this heap does not define a method for mapping to userspace\n",
1014                         __func__);
1015                 return -EINVAL;
1016         }
1017
1018         if (ion_buffer_fault_user_mappings(buffer)) {
1019                 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
1020                                                         VM_DONTDUMP;
1021                 vma->vm_private_data = buffer;
1022                 vma->vm_ops = &ion_vma_ops;
1023                 ion_vm_open(vma);
1024                 return 0;
1025         }
1026
1027         if (!(buffer->flags & ION_FLAG_CACHED))
1028                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1029
1030         mutex_lock(&buffer->lock);
1031         /* now map it to userspace */
1032         ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
1033         mutex_unlock(&buffer->lock);
1034
1035         if (ret)
1036                 pr_err("%s: failure mapping buffer to userspace\n",
1037                        __func__);
1038
1039         return ret;
1040 }
1041
1042 static void ion_dma_buf_release(struct dma_buf *dmabuf)
1043 {
1044         struct ion_buffer *buffer = dmabuf->priv;
1045
1046         ion_buffer_put(buffer);
1047 }
1048
1049 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1050 {
1051         struct ion_buffer *buffer = dmabuf->priv;
1052
1053         return buffer->vaddr + offset * PAGE_SIZE;
1054 }
1055
1056 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1057                                void *ptr)
1058 {
1059         return;
1060 }
1061
1062 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1063                                         size_t len,
1064                                         enum dma_data_direction direction)
1065 {
1066         struct ion_buffer *buffer = dmabuf->priv;
1067         void *vaddr;
1068
1069         if (!buffer->heap->ops->map_kernel) {
1070                 pr_err("%s: map kernel is not implemented by this heap.\n",
1071                        __func__);
1072                 return -ENODEV;
1073         }
1074
1075         mutex_lock(&buffer->lock);
1076         vaddr = ion_buffer_kmap_get(buffer);
1077         mutex_unlock(&buffer->lock);
1078         return PTR_ERR_OR_ZERO(vaddr);
1079 }
1080
1081 static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1082                                        size_t len,
1083                                        enum dma_data_direction direction)
1084 {
1085         struct ion_buffer *buffer = dmabuf->priv;
1086
1087         mutex_lock(&buffer->lock);
1088         ion_buffer_kmap_put(buffer);
1089         mutex_unlock(&buffer->lock);
1090 }
1091
1092 static struct dma_buf_ops dma_buf_ops = {
1093         .map_dma_buf = ion_map_dma_buf,
1094         .unmap_dma_buf = ion_unmap_dma_buf,
1095         .mmap = ion_mmap,
1096         .release = ion_dma_buf_release,
1097         .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1098         .end_cpu_access = ion_dma_buf_end_cpu_access,
1099         .kmap_atomic = ion_dma_buf_kmap,
1100         .kunmap_atomic = ion_dma_buf_kunmap,
1101         .kmap = ion_dma_buf_kmap,
1102         .kunmap = ion_dma_buf_kunmap,
1103 };
1104
1105 struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1106                                                 struct ion_handle *handle)
1107 {
1108         struct ion_buffer *buffer;
1109         struct dma_buf *dmabuf;
1110         bool valid_handle;
1111
1112         mutex_lock(&client->lock);
1113         valid_handle = ion_handle_validate(client, handle);
1114         if (!valid_handle) {
1115                 WARN(1, "%s: invalid handle passed to share.\n", __func__);
1116                 mutex_unlock(&client->lock);
1117                 return ERR_PTR(-EINVAL);
1118         }
1119         buffer = handle->buffer;
1120         ion_buffer_get(buffer);
1121         mutex_unlock(&client->lock);
1122
1123         dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
1124         if (IS_ERR(dmabuf)) {
1125                 ion_buffer_put(buffer);
1126                 return dmabuf;
1127         }
1128
1129         return dmabuf;
1130 }
1131 EXPORT_SYMBOL(ion_share_dma_buf);
1132
1133 int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1134 {
1135         struct dma_buf *dmabuf;
1136         int fd;
1137
1138         dmabuf = ion_share_dma_buf(client, handle);
1139         if (IS_ERR(dmabuf))
1140                 return PTR_ERR(dmabuf);
1141
1142         fd = dma_buf_fd(dmabuf, O_CLOEXEC);
1143         if (fd < 0)
1144                 dma_buf_put(dmabuf);
1145
1146         return fd;
1147 }
1148 EXPORT_SYMBOL(ion_share_dma_buf_fd);
1149
1150 struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1151 {
1152         struct dma_buf *dmabuf;
1153         struct ion_buffer *buffer;
1154         struct ion_handle *handle;
1155         int ret;
1156
1157         dmabuf = dma_buf_get(fd);
1158         if (IS_ERR(dmabuf))
1159                 return ERR_CAST(dmabuf);
1160         /* if this memory came from ion */
1161
1162         if (dmabuf->ops != &dma_buf_ops) {
1163                 pr_err("%s: can not import dmabuf from another exporter\n",
1164                        __func__);
1165                 dma_buf_put(dmabuf);
1166                 return ERR_PTR(-EINVAL);
1167         }
1168         buffer = dmabuf->priv;
1169
1170         mutex_lock(&client->lock);
1171         /* if a handle exists for this buffer just take a reference to it */
1172         handle = ion_handle_lookup(client, buffer);
1173         if (!IS_ERR(handle)) {
1174                 ion_handle_get(handle);
1175                 mutex_unlock(&client->lock);
1176                 goto end;
1177         }
1178         mutex_unlock(&client->lock);
1179
1180         handle = ion_handle_create(client, buffer);
1181         if (IS_ERR(handle))
1182                 goto end;
1183
1184         mutex_lock(&client->lock);
1185         ret = ion_handle_add(client, handle);
1186         mutex_unlock(&client->lock);
1187         if (ret) {
1188                 ion_handle_put(handle);
1189                 handle = ERR_PTR(ret);
1190         }
1191
1192 end:
1193         dma_buf_put(dmabuf);
1194         return handle;
1195 }
1196 EXPORT_SYMBOL(ion_import_dma_buf);
1197
1198 static int ion_sync_for_device(struct ion_client *client, int fd)
1199 {
1200         struct dma_buf *dmabuf;
1201         struct ion_buffer *buffer;
1202
1203         dmabuf = dma_buf_get(fd);
1204         if (IS_ERR(dmabuf))
1205                 return PTR_ERR(dmabuf);
1206
1207         /* if this memory came from ion */
1208         if (dmabuf->ops != &dma_buf_ops) {
1209                 pr_err("%s: can not sync dmabuf from another exporter\n",
1210                        __func__);
1211                 dma_buf_put(dmabuf);
1212                 return -EINVAL;
1213         }
1214         buffer = dmabuf->priv;
1215
1216         dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1217                                buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1218         dma_buf_put(dmabuf);
1219         return 0;
1220 }
1221
1222 /* fix up the cases where the ioctl direction bits are incorrect */
1223 static unsigned int ion_ioctl_dir(unsigned int cmd)
1224 {
1225         switch (cmd) {
1226         case ION_IOC_SYNC:
1227         case ION_IOC_FREE:
1228         case ION_IOC_CUSTOM:
1229                 return _IOC_WRITE;
1230         default:
1231                 return _IOC_DIR(cmd);
1232         }
1233 }
1234
1235 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1236 {
1237         struct ion_client *client = filp->private_data;
1238         struct ion_device *dev = client->dev;
1239         struct ion_handle *cleanup_handle = NULL;
1240         int ret = 0;
1241         unsigned int dir;
1242
1243         union {
1244                 struct ion_fd_data fd;
1245                 struct ion_allocation_data allocation;
1246                 struct ion_handle_data handle;
1247                 struct ion_custom_data custom;
1248         } data;
1249
1250         dir = ion_ioctl_dir(cmd);
1251
1252         if (_IOC_SIZE(cmd) > sizeof(data))
1253                 return -EINVAL;
1254
1255         if (dir & _IOC_WRITE)
1256                 if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
1257                         return -EFAULT;
1258
1259         switch (cmd) {
1260         case ION_IOC_ALLOC:
1261         {
1262                 struct ion_handle *handle;
1263
1264                 handle = ion_alloc(client, data.allocation.len,
1265                                                 data.allocation.align,
1266                                                 data.allocation.heap_id_mask,
1267                                                 data.allocation.flags);
1268                 if (IS_ERR(handle))
1269                         return PTR_ERR(handle);
1270
1271                 data.allocation.handle = handle->id;
1272
1273                 cleanup_handle = handle;
1274                 break;
1275         }
1276         case ION_IOC_FREE:
1277         {
1278                 struct ion_handle *handle;
1279
1280                 handle = ion_handle_get_by_id(client, data.handle.handle);
1281                 if (IS_ERR(handle))
1282                         return PTR_ERR(handle);
1283                 ion_free(client, handle);
1284                 ion_handle_put(handle);
1285                 break;
1286         }
1287         case ION_IOC_SHARE:
1288         case ION_IOC_MAP:
1289         {
1290                 struct ion_handle *handle;
1291
1292                 handle = ion_handle_get_by_id(client, data.handle.handle);
1293                 if (IS_ERR(handle))
1294                         return PTR_ERR(handle);
1295                 data.fd.fd = ion_share_dma_buf_fd(client, handle);
1296                 ion_handle_put(handle);
1297                 if (data.fd.fd < 0)
1298                         ret = data.fd.fd;
1299                 break;
1300         }
1301         case ION_IOC_IMPORT:
1302         {
1303                 struct ion_handle *handle;
1304
1305                 handle = ion_import_dma_buf(client, data.fd.fd);
1306                 if (IS_ERR(handle))
1307                         ret = PTR_ERR(handle);
1308                 else
1309                         data.handle.handle = handle->id;
1310                 break;
1311         }
1312         case ION_IOC_SYNC:
1313         {
1314                 ret = ion_sync_for_device(client, data.fd.fd);
1315                 break;
1316         }
1317         case ION_IOC_CUSTOM:
1318         {
1319                 if (!dev->custom_ioctl)
1320                         return -ENOTTY;
1321                 ret = dev->custom_ioctl(client, data.custom.cmd,
1322                                                 data.custom.arg);
1323                 break;
1324         }
1325         default:
1326                 return -ENOTTY;
1327         }
1328
1329         if (dir & _IOC_READ) {
1330                 if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
1331                         if (cleanup_handle)
1332                                 ion_free(client, cleanup_handle);
1333                         return -EFAULT;
1334                 }
1335         }
1336         return ret;
1337 }
1338
1339 static int ion_release(struct inode *inode, struct file *file)
1340 {
1341         struct ion_client *client = file->private_data;
1342
1343         pr_debug("%s: %d\n", __func__, __LINE__);
1344         ion_client_destroy(client);
1345         return 0;
1346 }
1347
1348 static int ion_open(struct inode *inode, struct file *file)
1349 {
1350         struct miscdevice *miscdev = file->private_data;
1351         struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1352         struct ion_client *client;
1353         char debug_name[64];
1354
1355         pr_debug("%s: %d\n", __func__, __LINE__);
1356         snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1357         client = ion_client_create(dev, debug_name);
1358         if (IS_ERR(client))
1359                 return PTR_ERR(client);
1360         file->private_data = client;
1361
1362         return 0;
1363 }
1364
1365 static const struct file_operations ion_fops = {
1366         .owner          = THIS_MODULE,
1367         .open           = ion_open,
1368         .release        = ion_release,
1369         .unlocked_ioctl = ion_ioctl,
1370         .compat_ioctl   = compat_ion_ioctl,
1371 };
1372
1373 static size_t ion_debug_heap_total(struct ion_client *client,
1374                                    unsigned int id)
1375 {
1376         size_t size = 0;
1377         struct rb_node *n;
1378
1379         mutex_lock(&client->lock);
1380         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1381                 struct ion_handle *handle = rb_entry(n,
1382                                                      struct ion_handle,
1383                                                      node);
1384                 if (handle->buffer->heap->id == id)
1385                         size += handle->buffer->size;
1386         }
1387         mutex_unlock(&client->lock);
1388         return size;
1389 }
1390
1391 static int ion_debug_heap_show(struct seq_file *s, void *unused)
1392 {
1393         struct ion_heap *heap = s->private;
1394         struct ion_device *dev = heap->dev;
1395         struct rb_node *n;
1396         size_t total_size = 0;
1397         size_t total_orphaned_size = 0;
1398
1399         seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
1400         seq_puts(s, "----------------------------------------------------\n");
1401
1402         for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1403                 struct ion_client *client = rb_entry(n, struct ion_client,
1404                                                      node);
1405                 size_t size = ion_debug_heap_total(client, heap->id);
1406
1407                 if (!size)
1408                         continue;
1409                 if (client->task) {
1410                         char task_comm[TASK_COMM_LEN];
1411
1412                         get_task_comm(task_comm, client->task);
1413                         seq_printf(s, "%16.s %16u %16zu\n", task_comm,
1414                                    client->pid, size);
1415                 } else {
1416                         seq_printf(s, "%16.s %16u %16zu\n", client->name,
1417                                    client->pid, size);
1418                 }
1419         }
1420         seq_puts(s, "----------------------------------------------------\n");
1421         seq_puts(s, "orphaned allocations (info is from last known client):\n");
1422         mutex_lock(&dev->buffer_lock);
1423         for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1424                 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1425                                                      node);
1426                 if (buffer->heap->id != heap->id)
1427                         continue;
1428                 total_size += buffer->size;
1429                 if (!buffer->handle_count) {
1430                         seq_printf(s, "%16.s %16u %16zu %d %d\n",
1431                                    buffer->task_comm, buffer->pid,
1432                                    buffer->size, buffer->kmap_cnt,
1433                                    atomic_read(&buffer->ref.refcount));
1434                         total_orphaned_size += buffer->size;
1435                 }
1436         }
1437         mutex_unlock(&dev->buffer_lock);
1438         seq_puts(s, "----------------------------------------------------\n");
1439         seq_printf(s, "%16.s %16zu\n", "total orphaned",
1440                    total_orphaned_size);
1441         seq_printf(s, "%16.s %16zu\n", "total ", total_size);
1442         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1443                 seq_printf(s, "%16.s %16zu\n", "deferred free",
1444                                 heap->free_list_size);
1445         seq_puts(s, "----------------------------------------------------\n");
1446
1447         if (heap->debug_show)
1448                 heap->debug_show(heap, s, unused);
1449
1450         return 0;
1451 }
1452
1453 static int ion_debug_heap_open(struct inode *inode, struct file *file)
1454 {
1455         return single_open(file, ion_debug_heap_show, inode->i_private);
1456 }
1457
1458 static const struct file_operations debug_heap_fops = {
1459         .open = ion_debug_heap_open,
1460         .read = seq_read,
1461         .llseek = seq_lseek,
1462         .release = single_release,
1463 };
1464
1465 #ifdef DEBUG_HEAP_SHRINKER
1466 static int debug_shrink_set(void *data, u64 val)
1467 {
1468         struct ion_heap *heap = data;
1469         struct shrink_control sc;
1470         int objs;
1471
1472         sc.gfp_mask = -1;
1473         sc.nr_to_scan = 0;
1474
1475         if (!val)
1476                 return 0;
1477
1478         objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1479         sc.nr_to_scan = objs;
1480
1481         heap->shrinker.shrink(&heap->shrinker, &sc);
1482         return 0;
1483 }
1484
1485 static int debug_shrink_get(void *data, u64 *val)
1486 {
1487         struct ion_heap *heap = data;
1488         struct shrink_control sc;
1489         int objs;
1490
1491         sc.gfp_mask = -1;
1492         sc.nr_to_scan = 0;
1493
1494         objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1495         *val = objs;
1496         return 0;
1497 }
1498
1499 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1500                         debug_shrink_set, "%llu\n");
1501 #endif
1502
1503 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1504 {
1505         struct dentry *debug_file;
1506
1507         if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1508             !heap->ops->unmap_dma)
1509                 pr_err("%s: can not add heap with invalid ops struct.\n",
1510                        __func__);
1511
1512         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1513                 ion_heap_init_deferred_free(heap);
1514
1515         if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
1516                 ion_heap_init_shrinker(heap);
1517
1518         heap->dev = dev;
1519         down_write(&dev->lock);
1520         /* use negative heap->id to reverse the priority -- when traversing
1521            the list later attempt higher id numbers first */
1522         plist_node_init(&heap->node, -heap->id);
1523         plist_add(&heap->node, &dev->heaps);
1524         debug_file = debugfs_create_file(heap->name, 0664,
1525                                         dev->heaps_debug_root, heap,
1526                                         &debug_heap_fops);
1527
1528         if (!debug_file) {
1529                 char buf[256], *path;
1530
1531                 path = dentry_path(dev->heaps_debug_root, buf, 256);
1532                 pr_err("Failed to create heap debugfs at %s/%s\n",
1533                         path, heap->name);
1534         }
1535
1536 #ifdef DEBUG_HEAP_SHRINKER
1537         if (heap->shrinker.shrink) {
1538                 char debug_name[64];
1539
1540                 snprintf(debug_name, 64, "%s_shrink", heap->name);
1541                 debug_file = debugfs_create_file(
1542                         debug_name, 0644, dev->heaps_debug_root, heap,
1543                         &debug_shrink_fops);
1544                 if (!debug_file) {
1545                         char buf[256], *path;
1546
1547                         path = dentry_path(dev->heaps_debug_root, buf, 256);
1548                         pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
1549                                 path, debug_name);
1550                 }
1551         }
1552 #endif
1553         up_write(&dev->lock);
1554 }
1555
1556 struct ion_device *ion_device_create(long (*custom_ioctl)
1557                                      (struct ion_client *client,
1558                                       unsigned int cmd,
1559                                       unsigned long arg))
1560 {
1561         struct ion_device *idev;
1562         int ret;
1563
1564         idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1565         if (!idev)
1566                 return ERR_PTR(-ENOMEM);
1567
1568         idev->dev.minor = MISC_DYNAMIC_MINOR;
1569         idev->dev.name = "ion";
1570         idev->dev.fops = &ion_fops;
1571         idev->dev.parent = NULL;
1572         ret = misc_register(&idev->dev);
1573         if (ret) {
1574                 pr_err("ion: failed to register misc device.\n");
1575                 return ERR_PTR(ret);
1576         }
1577
1578         idev->debug_root = debugfs_create_dir("ion", NULL);
1579         if (!idev->debug_root) {
1580                 pr_err("ion: failed to create debugfs root directory.\n");
1581                 goto debugfs_done;
1582         }
1583         idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
1584         if (!idev->heaps_debug_root) {
1585                 pr_err("ion: failed to create debugfs heaps directory.\n");
1586                 goto debugfs_done;
1587         }
1588         idev->clients_debug_root = debugfs_create_dir("clients",
1589                                                 idev->debug_root);
1590         if (!idev->clients_debug_root)
1591                 pr_err("ion: failed to create debugfs clients directory.\n");
1592
1593 debugfs_done:
1594
1595         idev->custom_ioctl = custom_ioctl;
1596         idev->buffers = RB_ROOT;
1597         mutex_init(&idev->buffer_lock);
1598         init_rwsem(&idev->lock);
1599         plist_head_init(&idev->heaps);
1600         idev->clients = RB_ROOT;
1601         return idev;
1602 }
1603
1604 void ion_device_destroy(struct ion_device *dev)
1605 {
1606         misc_deregister(&dev->dev);
1607         debugfs_remove_recursive(dev->debug_root);
1608         /* XXX need to free the heaps and clients ? */
1609         kfree(dev);
1610 }
1611
1612 void __init ion_reserve(struct ion_platform_data *data)
1613 {
1614         int i;
1615
1616         for (i = 0; i < data->nr; i++) {
1617                 if (data->heaps[i].size == 0)
1618                         continue;
1619
1620                 if (data->heaps[i].base == 0) {
1621                         phys_addr_t paddr;
1622
1623                         paddr = memblock_alloc_base(data->heaps[i].size,
1624                                                     data->heaps[i].align,
1625                                                     MEMBLOCK_ALLOC_ANYWHERE);
1626                         if (!paddr) {
1627                                 pr_err("%s: error allocating memblock for heap %d\n",
1628                                         __func__, i);
1629                                 continue;
1630                         }
1631                         data->heaps[i].base = paddr;
1632                 } else {
1633                         int ret = memblock_reserve(data->heaps[i].base,
1634                                                data->heaps[i].size);
1635                         if (ret)
1636                                 pr_err("memblock reserve of %zx@%lx failed\n",
1637                                        data->heaps[i].size,
1638                                        data->heaps[i].base);
1639                 }
1640                 pr_info("%s: %s reserved base %lx size %zu\n", __func__,
1641                         data->heaps[i].name,
1642                         data->heaps[i].base,
1643                         data->heaps[i].size);
1644         }
1645 }
This page took 0.135954 seconds and 4 git commands to generate.