]> Git Repo - linux.git/blame - drivers/android/binder.c
binder: move binder_alloc to separate file
[linux.git] / drivers / android / binder.c
CommitLineData
355b0502
GKH
1/* binder.c
2 *
3 * Android IPC Subsystem
4 *
5 * Copyright (C) 2007-2008 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
56b468fc
AS
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
355b0502
GKH
20#include <asm/cacheflush.h>
21#include <linux/fdtable.h>
22#include <linux/file.h>
e2610b26 23#include <linux/freezer.h>
355b0502
GKH
24#include <linux/fs.h>
25#include <linux/list.h>
26#include <linux/miscdevice.h>
355b0502
GKH
27#include <linux/module.h>
28#include <linux/mutex.h>
29#include <linux/nsproxy.h>
30#include <linux/poll.h>
16b66554 31#include <linux/debugfs.h>
355b0502 32#include <linux/rbtree.h>
3f07c014 33#include <linux/sched/signal.h>
6e84f315 34#include <linux/sched/mm.h>
5249f488 35#include <linux/seq_file.h>
355b0502 36#include <linux/uaccess.h>
17cf22c3 37#include <linux/pid_namespace.h>
79af7307 38#include <linux/security.h>
355b0502 39
9246a4a9
GKH
40#ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
41#define BINDER_IPC_32BIT 1
42#endif
43
44#include <uapi/linux/android/binder.h>
0c972a05 45#include "binder_alloc.h"
975a1ac9 46#include "binder_trace.h"
355b0502 47
975a1ac9 48static DEFINE_MUTEX(binder_main_lock);
355b0502
GKH
49static DEFINE_MUTEX(binder_deferred_lock);
50
ac4812c5 51static HLIST_HEAD(binder_devices);
355b0502
GKH
52static HLIST_HEAD(binder_procs);
53static HLIST_HEAD(binder_deferred_list);
54static HLIST_HEAD(binder_dead_nodes);
55
16b66554
AH
56static struct dentry *binder_debugfs_dir_entry_root;
57static struct dentry *binder_debugfs_dir_entry_proc;
355b0502
GKH
58static int binder_last_id;
59
5249f488
AH
60#define BINDER_DEBUG_ENTRY(name) \
61static int binder_##name##_open(struct inode *inode, struct file *file) \
62{ \
16b66554 63 return single_open(file, binder_##name##_show, inode->i_private); \
5249f488
AH
64} \
65\
66static const struct file_operations binder_##name##_fops = { \
67 .owner = THIS_MODULE, \
68 .open = binder_##name##_open, \
69 .read = seq_read, \
70 .llseek = seq_lseek, \
71 .release = single_release, \
72}
73
74static int binder_proc_show(struct seq_file *m, void *unused);
75BINDER_DEBUG_ENTRY(proc);
355b0502
GKH
76
77/* This is only defined in include/asm-arm/sizes.h */
78#ifndef SZ_1K
79#define SZ_1K 0x400
80#endif
81
82#ifndef SZ_4M
83#define SZ_4M 0x400000
84#endif
85
86#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
87
88#define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
89
90enum {
91 BINDER_DEBUG_USER_ERROR = 1U << 0,
92 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
93 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
94 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
95 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
96 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
97 BINDER_DEBUG_READ_WRITE = 1U << 6,
98 BINDER_DEBUG_USER_REFS = 1U << 7,
99 BINDER_DEBUG_THREADS = 1U << 8,
100 BINDER_DEBUG_TRANSACTION = 1U << 9,
101 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
102 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
103 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
19c98724 104 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
355b0502
GKH
105};
106static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
107 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
108module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
109
2c52325e 110static bool binder_debug_no_lock;
355b0502
GKH
111module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO);
112
ac4812c5
MC
113static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
114module_param_named(devices, binder_devices_param, charp, 0444);
115
355b0502
GKH
116static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
117static int binder_stop_on_user_error;
118
119static int binder_set_stop_on_user_error(const char *val,
120 struct kernel_param *kp)
121{
122 int ret;
10f62861 123
355b0502
GKH
124 ret = param_set_int(val, kp);
125 if (binder_stop_on_user_error < 2)
126 wake_up(&binder_user_error_wait);
127 return ret;
128}
129module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
130 param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
131
132#define binder_debug(mask, x...) \
133 do { \
134 if (binder_debug_mask & mask) \
258767fe 135 pr_info(x); \
355b0502
GKH
136 } while (0)
137
138#define binder_user_error(x...) \
139 do { \
140 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
258767fe 141 pr_info(x); \
355b0502
GKH
142 if (binder_stop_on_user_error) \
143 binder_stop_on_user_error = 2; \
144 } while (0)
145
feba3900
MC
146#define to_flat_binder_object(hdr) \
147 container_of(hdr, struct flat_binder_object, hdr)
148
149#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
150
7980240b
MC
151#define to_binder_buffer_object(hdr) \
152 container_of(hdr, struct binder_buffer_object, hdr)
153
def95c73
MC
154#define to_binder_fd_array_object(hdr) \
155 container_of(hdr, struct binder_fd_array_object, hdr)
156
355b0502
GKH
157enum binder_stat_types {
158 BINDER_STAT_PROC,
159 BINDER_STAT_THREAD,
160 BINDER_STAT_NODE,
161 BINDER_STAT_REF,
162 BINDER_STAT_DEATH,
163 BINDER_STAT_TRANSACTION,
164 BINDER_STAT_TRANSACTION_COMPLETE,
165 BINDER_STAT_COUNT
166};
167
168struct binder_stats {
169 int br[_IOC_NR(BR_FAILED_REPLY) + 1];
7980240b 170 int bc[_IOC_NR(BC_REPLY_SG) + 1];
355b0502
GKH
171 int obj_created[BINDER_STAT_COUNT];
172 int obj_deleted[BINDER_STAT_COUNT];
173};
174
175static struct binder_stats binder_stats;
176
177static inline void binder_stats_deleted(enum binder_stat_types type)
178{
179 binder_stats.obj_deleted[type]++;
180}
181
182static inline void binder_stats_created(enum binder_stat_types type)
183{
184 binder_stats.obj_created[type]++;
185}
186
187struct binder_transaction_log_entry {
188 int debug_id;
189 int call_type;
190 int from_proc;
191 int from_thread;
192 int target_handle;
193 int to_proc;
194 int to_thread;
195 int to_node;
196 int data_size;
197 int offsets_size;
14db3181 198 const char *context_name;
355b0502
GKH
199};
200struct binder_transaction_log {
201 int next;
202 int full;
203 struct binder_transaction_log_entry entry[32];
204};
205static struct binder_transaction_log binder_transaction_log;
206static struct binder_transaction_log binder_transaction_log_failed;
207
208static struct binder_transaction_log_entry *binder_transaction_log_add(
209 struct binder_transaction_log *log)
210{
211 struct binder_transaction_log_entry *e;
10f62861 212
355b0502
GKH
213 e = &log->entry[log->next];
214 memset(e, 0, sizeof(*e));
215 log->next++;
216 if (log->next == ARRAY_SIZE(log->entry)) {
217 log->next = 0;
218 log->full = 1;
219 }
220 return e;
221}
222
342e5c90
MC
223struct binder_context {
224 struct binder_node *binder_context_mgr_node;
225 kuid_t binder_context_mgr_uid;
14db3181 226 const char *name;
342e5c90
MC
227};
228
ac4812c5
MC
229struct binder_device {
230 struct hlist_node hlist;
231 struct miscdevice miscdev;
232 struct binder_context context;
342e5c90
MC
233};
234
355b0502
GKH
235struct binder_work {
236 struct list_head entry;
237 enum {
238 BINDER_WORK_TRANSACTION = 1,
239 BINDER_WORK_TRANSACTION_COMPLETE,
240 BINDER_WORK_NODE,
241 BINDER_WORK_DEAD_BINDER,
242 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
243 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
244 } type;
245};
246
247struct binder_node {
248 int debug_id;
249 struct binder_work work;
250 union {
251 struct rb_node rb_node;
252 struct hlist_node dead_node;
253 };
254 struct binder_proc *proc;
255 struct hlist_head refs;
256 int internal_strong_refs;
257 int local_weak_refs;
258 int local_strong_refs;
da49889d
AH
259 binder_uintptr_t ptr;
260 binder_uintptr_t cookie;
355b0502
GKH
261 unsigned has_strong_ref:1;
262 unsigned pending_strong_ref:1;
263 unsigned has_weak_ref:1;
264 unsigned pending_weak_ref:1;
265 unsigned has_async_transaction:1;
266 unsigned accept_fds:1;
267 unsigned min_priority:8;
268 struct list_head async_todo;
269};
270
271struct binder_ref_death {
272 struct binder_work work;
da49889d 273 binder_uintptr_t cookie;
355b0502
GKH
274};
275
276struct binder_ref {
277 /* Lookups needed: */
278 /* node + proc => ref (transaction) */
279 /* desc + proc => ref (transaction, inc/dec ref) */
280 /* node => refs + procs (proc exit) */
281 int debug_id;
282 struct rb_node rb_node_desc;
283 struct rb_node rb_node_node;
284 struct hlist_node node_entry;
285 struct binder_proc *proc;
286 struct binder_node *node;
287 uint32_t desc;
288 int strong;
289 int weak;
290 struct binder_ref_death *death;
291};
292
355b0502
GKH
293enum binder_deferred_state {
294 BINDER_DEFERRED_PUT_FILES = 0x01,
295 BINDER_DEFERRED_FLUSH = 0x02,
296 BINDER_DEFERRED_RELEASE = 0x04,
297};
298
299struct binder_proc {
300 struct hlist_node proc_node;
301 struct rb_root threads;
302 struct rb_root nodes;
303 struct rb_root refs_by_desc;
304 struct rb_root refs_by_node;
305 int pid;
355b0502
GKH
306 struct task_struct *tsk;
307 struct files_struct *files;
308 struct hlist_node deferred_work_node;
309 int deferred_work;
355b0502 310
355b0502
GKH
311 struct list_head todo;
312 wait_queue_head_t wait;
313 struct binder_stats stats;
314 struct list_head delivered_death;
315 int max_threads;
316 int requested_threads;
317 int requested_threads_started;
318 int ready_threads;
319 long default_priority;
16b66554 320 struct dentry *debugfs_entry;
fdfb4a99 321 struct binder_alloc alloc;
342e5c90 322 struct binder_context *context;
355b0502
GKH
323};
324
325enum {
326 BINDER_LOOPER_STATE_REGISTERED = 0x01,
327 BINDER_LOOPER_STATE_ENTERED = 0x02,
328 BINDER_LOOPER_STATE_EXITED = 0x04,
329 BINDER_LOOPER_STATE_INVALID = 0x08,
330 BINDER_LOOPER_STATE_WAITING = 0x10,
331 BINDER_LOOPER_STATE_NEED_RETURN = 0x20
332};
333
334struct binder_thread {
335 struct binder_proc *proc;
336 struct rb_node rb_node;
337 int pid;
338 int looper;
339 struct binder_transaction *transaction_stack;
340 struct list_head todo;
341 uint32_t return_error; /* Write failed, return error code in read buf */
342 uint32_t return_error2; /* Write failed, return error code in read */
343 /* buffer. Used when sending a reply to a dead process that */
344 /* we are also waiting on */
345 wait_queue_head_t wait;
346 struct binder_stats stats;
347};
348
349struct binder_transaction {
350 int debug_id;
351 struct binder_work work;
352 struct binder_thread *from;
353 struct binder_transaction *from_parent;
354 struct binder_proc *to_proc;
355 struct binder_thread *to_thread;
356 struct binder_transaction *to_parent;
357 unsigned need_reply:1;
358 /* unsigned is_dead:1; */ /* not used at the moment */
359
360 struct binder_buffer *buffer;
361 unsigned int code;
362 unsigned int flags;
363 long priority;
364 long saved_priority;
4a2ebb93 365 kuid_t sender_euid;
355b0502
GKH
366};
367
368static void
369binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
370
efde99cd 371static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
355b0502
GKH
372{
373 struct files_struct *files = proc->files;
355b0502
GKH
374 unsigned long rlim_cur;
375 unsigned long irqs;
376
377 if (files == NULL)
378 return -ESRCH;
379
dcfadfa4
AV
380 if (!lock_task_sighand(proc->tsk, &irqs))
381 return -EMFILE;
bf202361 382
dcfadfa4
AV
383 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
384 unlock_task_sighand(proc->tsk, &irqs);
355b0502 385
dcfadfa4 386 return __alloc_fd(files, 0, rlim_cur, flags);
355b0502
GKH
387}
388
389/*
390 * copied from fd_install
391 */
392static void task_fd_install(
393 struct binder_proc *proc, unsigned int fd, struct file *file)
394{
f869e8a7
AV
395 if (proc->files)
396 __fd_install(proc->files, fd, file);
355b0502
GKH
397}
398
399/*
400 * copied from sys_close
401 */
402static long task_close_fd(struct binder_proc *proc, unsigned int fd)
403{
355b0502
GKH
404 int retval;
405
483ce1d4 406 if (proc->files == NULL)
355b0502
GKH
407 return -ESRCH;
408
483ce1d4 409 retval = __close_fd(proc->files, fd);
355b0502
GKH
410 /* can't restart close syscall because file table entry was cleared */
411 if (unlikely(retval == -ERESTARTSYS ||
412 retval == -ERESTARTNOINTR ||
413 retval == -ERESTARTNOHAND ||
414 retval == -ERESTART_RESTARTBLOCK))
415 retval = -EINTR;
416
417 return retval;
355b0502
GKH
418}
419
975a1ac9
AH
420static inline void binder_lock(const char *tag)
421{
422 trace_binder_lock(tag);
423 mutex_lock(&binder_main_lock);
424 trace_binder_locked(tag);
425}
426
427static inline void binder_unlock(const char *tag)
428{
429 trace_binder_unlock(tag);
430 mutex_unlock(&binder_main_lock);
431}
432
355b0502
GKH
433static void binder_set_nice(long nice)
434{
435 long min_nice;
10f62861 436
355b0502
GKH
437 if (can_nice(current, nice)) {
438 set_user_nice(current, nice);
439 return;
440 }
7aa2c016 441 min_nice = rlimit_to_nice(current->signal->rlim[RLIMIT_NICE].rlim_cur);
355b0502 442 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
56b468fc
AS
443 "%d: nice value %ld not allowed use %ld instead\n",
444 current->pid, nice, min_nice);
355b0502 445 set_user_nice(current, min_nice);
8698a745 446 if (min_nice <= MAX_NICE)
355b0502 447 return;
56b468fc 448 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
355b0502
GKH
449}
450
355b0502 451static struct binder_node *binder_get_node(struct binder_proc *proc,
da49889d 452 binder_uintptr_t ptr)
355b0502
GKH
453{
454 struct rb_node *n = proc->nodes.rb_node;
455 struct binder_node *node;
456
457 while (n) {
458 node = rb_entry(n, struct binder_node, rb_node);
459
460 if (ptr < node->ptr)
461 n = n->rb_left;
462 else if (ptr > node->ptr)
463 n = n->rb_right;
464 else
465 return node;
466 }
467 return NULL;
468}
469
470static struct binder_node *binder_new_node(struct binder_proc *proc,
da49889d
AH
471 binder_uintptr_t ptr,
472 binder_uintptr_t cookie)
355b0502
GKH
473{
474 struct rb_node **p = &proc->nodes.rb_node;
475 struct rb_node *parent = NULL;
476 struct binder_node *node;
477
478 while (*p) {
479 parent = *p;
480 node = rb_entry(parent, struct binder_node, rb_node);
481
482 if (ptr < node->ptr)
483 p = &(*p)->rb_left;
484 else if (ptr > node->ptr)
485 p = &(*p)->rb_right;
486 else
487 return NULL;
488 }
489
490 node = kzalloc(sizeof(*node), GFP_KERNEL);
491 if (node == NULL)
492 return NULL;
493 binder_stats_created(BINDER_STAT_NODE);
494 rb_link_node(&node->rb_node, parent, p);
495 rb_insert_color(&node->rb_node, &proc->nodes);
496 node->debug_id = ++binder_last_id;
497 node->proc = proc;
498 node->ptr = ptr;
499 node->cookie = cookie;
500 node->work.type = BINDER_WORK_NODE;
501 INIT_LIST_HEAD(&node->work.entry);
502 INIT_LIST_HEAD(&node->async_todo);
503 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
da49889d 504 "%d:%d node %d u%016llx c%016llx created\n",
355b0502 505 proc->pid, current->pid, node->debug_id,
da49889d 506 (u64)node->ptr, (u64)node->cookie);
355b0502
GKH
507 return node;
508}
509
510static int binder_inc_node(struct binder_node *node, int strong, int internal,
511 struct list_head *target_list)
512{
513 if (strong) {
514 if (internal) {
515 if (target_list == NULL &&
516 node->internal_strong_refs == 0 &&
342e5c90
MC
517 !(node->proc &&
518 node == node->proc->context->binder_context_mgr_node &&
519 node->has_strong_ref)) {
56b468fc
AS
520 pr_err("invalid inc strong node for %d\n",
521 node->debug_id);
355b0502
GKH
522 return -EINVAL;
523 }
524 node->internal_strong_refs++;
525 } else
526 node->local_strong_refs++;
527 if (!node->has_strong_ref && target_list) {
528 list_del_init(&node->work.entry);
529 list_add_tail(&node->work.entry, target_list);
530 }
531 } else {
532 if (!internal)
533 node->local_weak_refs++;
534 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
535 if (target_list == NULL) {
56b468fc
AS
536 pr_err("invalid inc weak node for %d\n",
537 node->debug_id);
355b0502
GKH
538 return -EINVAL;
539 }
540 list_add_tail(&node->work.entry, target_list);
541 }
542 }
543 return 0;
544}
545
546static int binder_dec_node(struct binder_node *node, int strong, int internal)
547{
548 if (strong) {
549 if (internal)
550 node->internal_strong_refs--;
551 else
552 node->local_strong_refs--;
553 if (node->local_strong_refs || node->internal_strong_refs)
554 return 0;
555 } else {
556 if (!internal)
557 node->local_weak_refs--;
558 if (node->local_weak_refs || !hlist_empty(&node->refs))
559 return 0;
560 }
561 if (node->proc && (node->has_strong_ref || node->has_weak_ref)) {
562 if (list_empty(&node->work.entry)) {
563 list_add_tail(&node->work.entry, &node->proc->todo);
564 wake_up_interruptible(&node->proc->wait);
565 }
566 } else {
567 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
568 !node->local_weak_refs) {
569 list_del_init(&node->work.entry);
570 if (node->proc) {
571 rb_erase(&node->rb_node, &node->proc->nodes);
572 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
56b468fc 573 "refless node %d deleted\n",
355b0502
GKH
574 node->debug_id);
575 } else {
576 hlist_del(&node->dead_node);
577 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
56b468fc 578 "dead node %d deleted\n",
355b0502
GKH
579 node->debug_id);
580 }
581 kfree(node);
582 binder_stats_deleted(BINDER_STAT_NODE);
583 }
584 }
585
586 return 0;
587}
588
589
590static struct binder_ref *binder_get_ref(struct binder_proc *proc,
0a3ffab9 591 u32 desc, bool need_strong_ref)
355b0502
GKH
592{
593 struct rb_node *n = proc->refs_by_desc.rb_node;
594 struct binder_ref *ref;
595
596 while (n) {
597 ref = rb_entry(n, struct binder_ref, rb_node_desc);
598
0a3ffab9 599 if (desc < ref->desc) {
355b0502 600 n = n->rb_left;
0a3ffab9 601 } else if (desc > ref->desc) {
355b0502 602 n = n->rb_right;
0a3ffab9
AH
603 } else if (need_strong_ref && !ref->strong) {
604 binder_user_error("tried to use weak ref as strong ref\n");
605 return NULL;
606 } else {
355b0502 607 return ref;
0a3ffab9 608 }
355b0502
GKH
609 }
610 return NULL;
611}
612
613static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
614 struct binder_node *node)
615{
616 struct rb_node *n;
617 struct rb_node **p = &proc->refs_by_node.rb_node;
618 struct rb_node *parent = NULL;
619 struct binder_ref *ref, *new_ref;
342e5c90 620 struct binder_context *context = proc->context;
355b0502
GKH
621
622 while (*p) {
623 parent = *p;
624 ref = rb_entry(parent, struct binder_ref, rb_node_node);
625
626 if (node < ref->node)
627 p = &(*p)->rb_left;
628 else if (node > ref->node)
629 p = &(*p)->rb_right;
630 else
631 return ref;
632 }
633 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
634 if (new_ref == NULL)
635 return NULL;
636 binder_stats_created(BINDER_STAT_REF);
637 new_ref->debug_id = ++binder_last_id;
638 new_ref->proc = proc;
639 new_ref->node = node;
640 rb_link_node(&new_ref->rb_node_node, parent, p);
641 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
642
342e5c90 643 new_ref->desc = (node == context->binder_context_mgr_node) ? 0 : 1;
355b0502
GKH
644 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
645 ref = rb_entry(n, struct binder_ref, rb_node_desc);
646 if (ref->desc > new_ref->desc)
647 break;
648 new_ref->desc = ref->desc + 1;
649 }
650
651 p = &proc->refs_by_desc.rb_node;
652 while (*p) {
653 parent = *p;
654 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
655
656 if (new_ref->desc < ref->desc)
657 p = &(*p)->rb_left;
658 else if (new_ref->desc > ref->desc)
659 p = &(*p)->rb_right;
660 else
661 BUG();
662 }
663 rb_link_node(&new_ref->rb_node_desc, parent, p);
664 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
665 if (node) {
666 hlist_add_head(&new_ref->node_entry, &node->refs);
667
668 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
56b468fc
AS
669 "%d new ref %d desc %d for node %d\n",
670 proc->pid, new_ref->debug_id, new_ref->desc,
671 node->debug_id);
355b0502
GKH
672 } else {
673 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
56b468fc
AS
674 "%d new ref %d desc %d for dead node\n",
675 proc->pid, new_ref->debug_id, new_ref->desc);
355b0502
GKH
676 }
677 return new_ref;
678}
679
680static void binder_delete_ref(struct binder_ref *ref)
681{
682 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
56b468fc
AS
683 "%d delete ref %d desc %d for node %d\n",
684 ref->proc->pid, ref->debug_id, ref->desc,
685 ref->node->debug_id);
355b0502
GKH
686
687 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
688 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
689 if (ref->strong)
690 binder_dec_node(ref->node, 1, 1);
691 hlist_del(&ref->node_entry);
692 binder_dec_node(ref->node, 0, 1);
693 if (ref->death) {
694 binder_debug(BINDER_DEBUG_DEAD_BINDER,
56b468fc
AS
695 "%d delete ref %d desc %d has death notification\n",
696 ref->proc->pid, ref->debug_id, ref->desc);
355b0502
GKH
697 list_del(&ref->death->work.entry);
698 kfree(ref->death);
699 binder_stats_deleted(BINDER_STAT_DEATH);
700 }
701 kfree(ref);
702 binder_stats_deleted(BINDER_STAT_REF);
703}
704
705static int binder_inc_ref(struct binder_ref *ref, int strong,
706 struct list_head *target_list)
707{
708 int ret;
10f62861 709
355b0502
GKH
710 if (strong) {
711 if (ref->strong == 0) {
712 ret = binder_inc_node(ref->node, 1, 1, target_list);
713 if (ret)
714 return ret;
715 }
716 ref->strong++;
717 } else {
718 if (ref->weak == 0) {
719 ret = binder_inc_node(ref->node, 0, 1, target_list);
720 if (ret)
721 return ret;
722 }
723 ref->weak++;
724 }
725 return 0;
726}
727
728
729static int binder_dec_ref(struct binder_ref *ref, int strong)
730{
731 if (strong) {
732 if (ref->strong == 0) {
56b468fc 733 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
355b0502
GKH
734 ref->proc->pid, ref->debug_id,
735 ref->desc, ref->strong, ref->weak);
736 return -EINVAL;
737 }
738 ref->strong--;
739 if (ref->strong == 0) {
740 int ret;
10f62861 741
355b0502
GKH
742 ret = binder_dec_node(ref->node, strong, 1);
743 if (ret)
744 return ret;
745 }
746 } else {
747 if (ref->weak == 0) {
56b468fc 748 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
355b0502
GKH
749 ref->proc->pid, ref->debug_id,
750 ref->desc, ref->strong, ref->weak);
751 return -EINVAL;
752 }
753 ref->weak--;
754 }
755 if (ref->strong == 0 && ref->weak == 0)
756 binder_delete_ref(ref);
757 return 0;
758}
759
760static void binder_pop_transaction(struct binder_thread *target_thread,
761 struct binder_transaction *t)
762{
763 if (target_thread) {
764 BUG_ON(target_thread->transaction_stack != t);
765 BUG_ON(target_thread->transaction_stack->from != target_thread);
766 target_thread->transaction_stack =
767 target_thread->transaction_stack->from_parent;
768 t->from = NULL;
769 }
770 t->need_reply = 0;
771 if (t->buffer)
772 t->buffer->transaction = NULL;
773 kfree(t);
774 binder_stats_deleted(BINDER_STAT_TRANSACTION);
775}
776
777static void binder_send_failed_reply(struct binder_transaction *t,
778 uint32_t error_code)
779{
780 struct binder_thread *target_thread;
d4ec15e1 781 struct binder_transaction *next;
10f62861 782
355b0502
GKH
783 BUG_ON(t->flags & TF_ONE_WAY);
784 while (1) {
785 target_thread = t->from;
786 if (target_thread) {
787 if (target_thread->return_error != BR_OK &&
788 target_thread->return_error2 == BR_OK) {
789 target_thread->return_error2 =
790 target_thread->return_error;
791 target_thread->return_error = BR_OK;
792 }
793 if (target_thread->return_error == BR_OK) {
794 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
56b468fc 795 "send failed reply for transaction %d to %d:%d\n",
0232a42c
WP
796 t->debug_id,
797 target_thread->proc->pid,
355b0502
GKH
798 target_thread->pid);
799
800 binder_pop_transaction(target_thread, t);
801 target_thread->return_error = error_code;
802 wake_up_interruptible(&target_thread->wait);
803 } else {
56b468fc
AS
804 pr_err("reply failed, target thread, %d:%d, has error code %d already\n",
805 target_thread->proc->pid,
355b0502
GKH
806 target_thread->pid,
807 target_thread->return_error);
808 }
809 return;
d4ec15e1
LT
810 }
811 next = t->from_parent;
812
813 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
814 "send failed reply for transaction %d, target dead\n",
815 t->debug_id);
816
817 binder_pop_transaction(target_thread, t);
818 if (next == NULL) {
355b0502 819 binder_debug(BINDER_DEBUG_DEAD_BINDER,
d4ec15e1
LT
820 "reply failed, no target thread at root\n");
821 return;
355b0502 822 }
d4ec15e1
LT
823 t = next;
824 binder_debug(BINDER_DEBUG_DEAD_BINDER,
825 "reply failed, no target thread -- retry %d\n",
826 t->debug_id);
355b0502
GKH
827 }
828}
829
feba3900
MC
830/**
831 * binder_validate_object() - checks for a valid metadata object in a buffer.
832 * @buffer: binder_buffer that we're parsing.
833 * @offset: offset in the buffer at which to validate an object.
834 *
835 * Return: If there's a valid metadata object at @offset in @buffer, the
836 * size of that object. Otherwise, it returns zero.
837 */
838static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
839{
840 /* Check if we can read a header first */
841 struct binder_object_header *hdr;
842 size_t object_size = 0;
843
844 if (offset > buffer->data_size - sizeof(*hdr) ||
845 buffer->data_size < sizeof(*hdr) ||
846 !IS_ALIGNED(offset, sizeof(u32)))
847 return 0;
848
849 /* Ok, now see if we can read a complete object. */
850 hdr = (struct binder_object_header *)(buffer->data + offset);
851 switch (hdr->type) {
852 case BINDER_TYPE_BINDER:
853 case BINDER_TYPE_WEAK_BINDER:
854 case BINDER_TYPE_HANDLE:
855 case BINDER_TYPE_WEAK_HANDLE:
856 object_size = sizeof(struct flat_binder_object);
857 break;
858 case BINDER_TYPE_FD:
859 object_size = sizeof(struct binder_fd_object);
860 break;
7980240b
MC
861 case BINDER_TYPE_PTR:
862 object_size = sizeof(struct binder_buffer_object);
863 break;
def95c73
MC
864 case BINDER_TYPE_FDA:
865 object_size = sizeof(struct binder_fd_array_object);
866 break;
feba3900
MC
867 default:
868 return 0;
869 }
870 if (offset <= buffer->data_size - object_size &&
871 buffer->data_size >= object_size)
872 return object_size;
873 else
874 return 0;
875}
876
7980240b
MC
877/**
878 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
879 * @b: binder_buffer containing the object
880 * @index: index in offset array at which the binder_buffer_object is
881 * located
882 * @start: points to the start of the offset array
883 * @num_valid: the number of valid offsets in the offset array
884 *
885 * Return: If @index is within the valid range of the offset array
886 * described by @start and @num_valid, and if there's a valid
887 * binder_buffer_object at the offset found in index @index
888 * of the offset array, that object is returned. Otherwise,
889 * %NULL is returned.
890 * Note that the offset found in index @index itself is not
891 * verified; this function assumes that @num_valid elements
892 * from @start were previously verified to have valid offsets.
893 */
894static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
895 binder_size_t index,
896 binder_size_t *start,
897 binder_size_t num_valid)
898{
899 struct binder_buffer_object *buffer_obj;
900 binder_size_t *offp;
901
902 if (index >= num_valid)
903 return NULL;
904
905 offp = start + index;
906 buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
907 if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
908 return NULL;
909
910 return buffer_obj;
911}
912
913/**
914 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
915 * @b: transaction buffer
916 * @objects_start start of objects buffer
917 * @buffer: binder_buffer_object in which to fix up
918 * @offset: start offset in @buffer to fix up
919 * @last_obj: last binder_buffer_object that we fixed up in
920 * @last_min_offset: minimum fixup offset in @last_obj
921 *
922 * Return: %true if a fixup in buffer @buffer at offset @offset is
923 * allowed.
924 *
925 * For safety reasons, we only allow fixups inside a buffer to happen
926 * at increasing offsets; additionally, we only allow fixup on the last
927 * buffer object that was verified, or one of its parents.
928 *
929 * Example of what is allowed:
930 *
931 * A
932 * B (parent = A, offset = 0)
933 * C (parent = A, offset = 16)
934 * D (parent = C, offset = 0)
935 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
936 *
937 * Examples of what is not allowed:
938 *
939 * Decreasing offsets within the same parent:
940 * A
941 * C (parent = A, offset = 16)
942 * B (parent = A, offset = 0) // decreasing offset within A
943 *
944 * Referring to a parent that wasn't the last object or any of its parents:
945 * A
946 * B (parent = A, offset = 0)
947 * C (parent = A, offset = 0)
948 * C (parent = A, offset = 16)
949 * D (parent = B, offset = 0) // B is not A or any of A's parents
950 */
951static bool binder_validate_fixup(struct binder_buffer *b,
952 binder_size_t *objects_start,
953 struct binder_buffer_object *buffer,
954 binder_size_t fixup_offset,
955 struct binder_buffer_object *last_obj,
956 binder_size_t last_min_offset)
957{
958 if (!last_obj) {
959 /* Nothing to fix up in */
960 return false;
961 }
962
963 while (last_obj != buffer) {
964 /*
965 * Safe to retrieve the parent of last_obj, since it
966 * was already previously verified by the driver.
967 */
968 if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
969 return false;
970 last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
971 last_obj = (struct binder_buffer_object *)
972 (b->data + *(objects_start + last_obj->parent));
973 }
974 return (fixup_offset >= last_min_offset);
975}
976
355b0502
GKH
977static void binder_transaction_buffer_release(struct binder_proc *proc,
978 struct binder_buffer *buffer,
da49889d 979 binder_size_t *failed_at)
355b0502 980{
7980240b 981 binder_size_t *offp, *off_start, *off_end;
355b0502
GKH
982 int debug_id = buffer->debug_id;
983
984 binder_debug(BINDER_DEBUG_TRANSACTION,
56b468fc 985 "%d buffer release %d, size %zd-%zd, failed at %p\n",
355b0502
GKH
986 proc->pid, buffer->debug_id,
987 buffer->data_size, buffer->offsets_size, failed_at);
988
989 if (buffer->target_node)
990 binder_dec_node(buffer->target_node, 1, 0);
991
7980240b
MC
992 off_start = (binder_size_t *)(buffer->data +
993 ALIGN(buffer->data_size, sizeof(void *)));
355b0502
GKH
994 if (failed_at)
995 off_end = failed_at;
996 else
7980240b
MC
997 off_end = (void *)off_start + buffer->offsets_size;
998 for (offp = off_start; offp < off_end; offp++) {
feba3900
MC
999 struct binder_object_header *hdr;
1000 size_t object_size = binder_validate_object(buffer, *offp);
10f62861 1001
feba3900
MC
1002 if (object_size == 0) {
1003 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
da49889d 1004 debug_id, (u64)*offp, buffer->data_size);
355b0502
GKH
1005 continue;
1006 }
feba3900
MC
1007 hdr = (struct binder_object_header *)(buffer->data + *offp);
1008 switch (hdr->type) {
355b0502
GKH
1009 case BINDER_TYPE_BINDER:
1010 case BINDER_TYPE_WEAK_BINDER: {
feba3900
MC
1011 struct flat_binder_object *fp;
1012 struct binder_node *node;
10f62861 1013
feba3900
MC
1014 fp = to_flat_binder_object(hdr);
1015 node = binder_get_node(proc, fp->binder);
355b0502 1016 if (node == NULL) {
da49889d
AH
1017 pr_err("transaction release %d bad node %016llx\n",
1018 debug_id, (u64)fp->binder);
355b0502
GKH
1019 break;
1020 }
1021 binder_debug(BINDER_DEBUG_TRANSACTION,
da49889d
AH
1022 " node %d u%016llx\n",
1023 node->debug_id, (u64)node->ptr);
feba3900
MC
1024 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
1025 0);
355b0502
GKH
1026 } break;
1027 case BINDER_TYPE_HANDLE:
1028 case BINDER_TYPE_WEAK_HANDLE: {
feba3900 1029 struct flat_binder_object *fp;
0a3ffab9
AH
1030 struct binder_ref *ref;
1031
feba3900 1032 fp = to_flat_binder_object(hdr);
0a3ffab9 1033 ref = binder_get_ref(proc, fp->handle,
feba3900 1034 hdr->type == BINDER_TYPE_HANDLE);
355b0502 1035 if (ref == NULL) {
64dcfe6b 1036 pr_err("transaction release %d bad handle %d\n",
56b468fc 1037 debug_id, fp->handle);
355b0502
GKH
1038 break;
1039 }
1040 binder_debug(BINDER_DEBUG_TRANSACTION,
1041 " ref %d desc %d (node %d)\n",
1042 ref->debug_id, ref->desc, ref->node->debug_id);
feba3900 1043 binder_dec_ref(ref, hdr->type == BINDER_TYPE_HANDLE);
355b0502
GKH
1044 } break;
1045
feba3900
MC
1046 case BINDER_TYPE_FD: {
1047 struct binder_fd_object *fp = to_binder_fd_object(hdr);
1048
355b0502 1049 binder_debug(BINDER_DEBUG_TRANSACTION,
feba3900 1050 " fd %d\n", fp->fd);
355b0502 1051 if (failed_at)
feba3900
MC
1052 task_close_fd(proc, fp->fd);
1053 } break;
7980240b
MC
1054 case BINDER_TYPE_PTR:
1055 /*
1056 * Nothing to do here, this will get cleaned up when the
1057 * transaction buffer gets freed
1058 */
1059 break;
def95c73
MC
1060 case BINDER_TYPE_FDA: {
1061 struct binder_fd_array_object *fda;
1062 struct binder_buffer_object *parent;
1063 uintptr_t parent_buffer;
1064 u32 *fd_array;
1065 size_t fd_index;
1066 binder_size_t fd_buf_size;
1067
1068 fda = to_binder_fd_array_object(hdr);
1069 parent = binder_validate_ptr(buffer, fda->parent,
1070 off_start,
1071 offp - off_start);
1072 if (!parent) {
1073 pr_err("transaction release %d bad parent offset",
1074 debug_id);
1075 continue;
1076 }
1077 /*
1078 * Since the parent was already fixed up, convert it
1079 * back to kernel address space to access it
1080 */
1081 parent_buffer = parent->buffer -
19c98724
TK
1082 binder_alloc_get_user_buffer_offset(
1083 &proc->alloc);
def95c73
MC
1084
1085 fd_buf_size = sizeof(u32) * fda->num_fds;
1086 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
1087 pr_err("transaction release %d invalid number of fds (%lld)\n",
1088 debug_id, (u64)fda->num_fds);
1089 continue;
1090 }
1091 if (fd_buf_size > parent->length ||
1092 fda->parent_offset > parent->length - fd_buf_size) {
1093 /* No space for all file descriptors here. */
1094 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
1095 debug_id, (u64)fda->num_fds);
1096 continue;
1097 }
1098 fd_array = (u32 *)(parent_buffer + fda->parent_offset);
1099 for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
1100 task_close_fd(proc, fd_array[fd_index]);
1101 } break;
355b0502 1102 default:
64dcfe6b 1103 pr_err("transaction release %d bad object type %x\n",
feba3900 1104 debug_id, hdr->type);
355b0502
GKH
1105 break;
1106 }
1107 }
1108}
1109
a056af42
MC
1110static int binder_translate_binder(struct flat_binder_object *fp,
1111 struct binder_transaction *t,
1112 struct binder_thread *thread)
1113{
1114 struct binder_node *node;
1115 struct binder_ref *ref;
1116 struct binder_proc *proc = thread->proc;
1117 struct binder_proc *target_proc = t->to_proc;
1118
1119 node = binder_get_node(proc, fp->binder);
1120 if (!node) {
1121 node = binder_new_node(proc, fp->binder, fp->cookie);
1122 if (!node)
1123 return -ENOMEM;
1124
1125 node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1126 node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1127 }
1128 if (fp->cookie != node->cookie) {
1129 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
1130 proc->pid, thread->pid, (u64)fp->binder,
1131 node->debug_id, (u64)fp->cookie,
1132 (u64)node->cookie);
1133 return -EINVAL;
1134 }
1135 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk))
1136 return -EPERM;
1137
1138 ref = binder_get_ref_for_node(target_proc, node);
1139 if (!ref)
1140 return -EINVAL;
1141
1142 if (fp->hdr.type == BINDER_TYPE_BINDER)
1143 fp->hdr.type = BINDER_TYPE_HANDLE;
1144 else
1145 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
1146 fp->binder = 0;
1147 fp->handle = ref->desc;
1148 fp->cookie = 0;
1149 binder_inc_ref(ref, fp->hdr.type == BINDER_TYPE_HANDLE, &thread->todo);
1150
1151 trace_binder_transaction_node_to_ref(t, node, ref);
1152 binder_debug(BINDER_DEBUG_TRANSACTION,
1153 " node %d u%016llx -> ref %d desc %d\n",
1154 node->debug_id, (u64)node->ptr,
1155 ref->debug_id, ref->desc);
1156
1157 return 0;
1158}
1159
1160static int binder_translate_handle(struct flat_binder_object *fp,
1161 struct binder_transaction *t,
1162 struct binder_thread *thread)
1163{
1164 struct binder_ref *ref;
1165 struct binder_proc *proc = thread->proc;
1166 struct binder_proc *target_proc = t->to_proc;
1167
1168 ref = binder_get_ref(proc, fp->handle,
1169 fp->hdr.type == BINDER_TYPE_HANDLE);
1170 if (!ref) {
1171 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
1172 proc->pid, thread->pid, fp->handle);
1173 return -EINVAL;
1174 }
1175 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk))
1176 return -EPERM;
1177
1178 if (ref->node->proc == target_proc) {
1179 if (fp->hdr.type == BINDER_TYPE_HANDLE)
1180 fp->hdr.type = BINDER_TYPE_BINDER;
1181 else
1182 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
1183 fp->binder = ref->node->ptr;
1184 fp->cookie = ref->node->cookie;
1185 binder_inc_node(ref->node, fp->hdr.type == BINDER_TYPE_BINDER,
1186 0, NULL);
1187 trace_binder_transaction_ref_to_node(t, ref);
1188 binder_debug(BINDER_DEBUG_TRANSACTION,
1189 " ref %d desc %d -> node %d u%016llx\n",
1190 ref->debug_id, ref->desc, ref->node->debug_id,
1191 (u64)ref->node->ptr);
1192 } else {
1193 struct binder_ref *new_ref;
1194
1195 new_ref = binder_get_ref_for_node(target_proc, ref->node);
1196 if (!new_ref)
1197 return -EINVAL;
1198
1199 fp->binder = 0;
1200 fp->handle = new_ref->desc;
1201 fp->cookie = 0;
1202 binder_inc_ref(new_ref, fp->hdr.type == BINDER_TYPE_HANDLE,
1203 NULL);
1204 trace_binder_transaction_ref_to_ref(t, ref, new_ref);
1205 binder_debug(BINDER_DEBUG_TRANSACTION,
1206 " ref %d desc %d -> ref %d desc %d (node %d)\n",
1207 ref->debug_id, ref->desc, new_ref->debug_id,
1208 new_ref->desc, ref->node->debug_id);
1209 }
1210 return 0;
1211}
1212
1213static int binder_translate_fd(int fd,
1214 struct binder_transaction *t,
1215 struct binder_thread *thread,
1216 struct binder_transaction *in_reply_to)
1217{
1218 struct binder_proc *proc = thread->proc;
1219 struct binder_proc *target_proc = t->to_proc;
1220 int target_fd;
1221 struct file *file;
1222 int ret;
1223 bool target_allows_fd;
1224
1225 if (in_reply_to)
1226 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
1227 else
1228 target_allows_fd = t->buffer->target_node->accept_fds;
1229 if (!target_allows_fd) {
1230 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
1231 proc->pid, thread->pid,
1232 in_reply_to ? "reply" : "transaction",
1233 fd);
1234 ret = -EPERM;
1235 goto err_fd_not_accepted;
1236 }
1237
1238 file = fget(fd);
1239 if (!file) {
1240 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
1241 proc->pid, thread->pid, fd);
1242 ret = -EBADF;
1243 goto err_fget;
1244 }
1245 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
1246 if (ret < 0) {
1247 ret = -EPERM;
1248 goto err_security;
1249 }
1250
1251 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
1252 if (target_fd < 0) {
1253 ret = -ENOMEM;
1254 goto err_get_unused_fd;
1255 }
1256 task_fd_install(target_proc, target_fd, file);
1257 trace_binder_transaction_fd(t, fd, target_fd);
1258 binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n",
1259 fd, target_fd);
1260
1261 return target_fd;
1262
1263err_get_unused_fd:
1264err_security:
1265 fput(file);
1266err_fget:
1267err_fd_not_accepted:
1268 return ret;
1269}
1270
def95c73
MC
1271static int binder_translate_fd_array(struct binder_fd_array_object *fda,
1272 struct binder_buffer_object *parent,
1273 struct binder_transaction *t,
1274 struct binder_thread *thread,
1275 struct binder_transaction *in_reply_to)
1276{
1277 binder_size_t fdi, fd_buf_size, num_installed_fds;
1278 int target_fd;
1279 uintptr_t parent_buffer;
1280 u32 *fd_array;
1281 struct binder_proc *proc = thread->proc;
1282 struct binder_proc *target_proc = t->to_proc;
1283
1284 fd_buf_size = sizeof(u32) * fda->num_fds;
1285 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
1286 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
1287 proc->pid, thread->pid, (u64)fda->num_fds);
1288 return -EINVAL;
1289 }
1290 if (fd_buf_size > parent->length ||
1291 fda->parent_offset > parent->length - fd_buf_size) {
1292 /* No space for all file descriptors here. */
1293 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
1294 proc->pid, thread->pid, (u64)fda->num_fds);
1295 return -EINVAL;
1296 }
1297 /*
1298 * Since the parent was already fixed up, convert it
1299 * back to the kernel address space to access it
1300 */
19c98724
TK
1301 parent_buffer = parent->buffer -
1302 binder_alloc_get_user_buffer_offset(&target_proc->alloc);
def95c73
MC
1303 fd_array = (u32 *)(parent_buffer + fda->parent_offset);
1304 if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
1305 binder_user_error("%d:%d parent offset not aligned correctly.\n",
1306 proc->pid, thread->pid);
1307 return -EINVAL;
1308 }
1309 for (fdi = 0; fdi < fda->num_fds; fdi++) {
1310 target_fd = binder_translate_fd(fd_array[fdi], t, thread,
1311 in_reply_to);
1312 if (target_fd < 0)
1313 goto err_translate_fd_failed;
1314 fd_array[fdi] = target_fd;
1315 }
1316 return 0;
1317
1318err_translate_fd_failed:
1319 /*
1320 * Failed to allocate fd or security error, free fds
1321 * installed so far.
1322 */
1323 num_installed_fds = fdi;
1324 for (fdi = 0; fdi < num_installed_fds; fdi++)
1325 task_close_fd(target_proc, fd_array[fdi]);
1326 return target_fd;
1327}
1328
7980240b
MC
1329static int binder_fixup_parent(struct binder_transaction *t,
1330 struct binder_thread *thread,
1331 struct binder_buffer_object *bp,
1332 binder_size_t *off_start,
1333 binder_size_t num_valid,
1334 struct binder_buffer_object *last_fixup_obj,
1335 binder_size_t last_fixup_min_off)
1336{
1337 struct binder_buffer_object *parent;
1338 u8 *parent_buffer;
1339 struct binder_buffer *b = t->buffer;
1340 struct binder_proc *proc = thread->proc;
1341 struct binder_proc *target_proc = t->to_proc;
1342
1343 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
1344 return 0;
1345
1346 parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
1347 if (!parent) {
1348 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
1349 proc->pid, thread->pid);
1350 return -EINVAL;
1351 }
1352
1353 if (!binder_validate_fixup(b, off_start,
1354 parent, bp->parent_offset,
1355 last_fixup_obj,
1356 last_fixup_min_off)) {
1357 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
1358 proc->pid, thread->pid);
1359 return -EINVAL;
1360 }
1361
1362 if (parent->length < sizeof(binder_uintptr_t) ||
1363 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
1364 /* No space for a pointer here! */
1365 binder_user_error("%d:%d got transaction with invalid parent offset\n",
1366 proc->pid, thread->pid);
1367 return -EINVAL;
1368 }
1369 parent_buffer = (u8 *)(parent->buffer -
19c98724
TK
1370 binder_alloc_get_user_buffer_offset(
1371 &target_proc->alloc));
7980240b
MC
1372 *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
1373
1374 return 0;
1375}
1376
355b0502
GKH
1377static void binder_transaction(struct binder_proc *proc,
1378 struct binder_thread *thread,
4bfac80a
MC
1379 struct binder_transaction_data *tr, int reply,
1380 binder_size_t extra_buffers_size)
355b0502 1381{
a056af42 1382 int ret;
355b0502
GKH
1383 struct binder_transaction *t;
1384 struct binder_work *tcomplete;
7980240b 1385 binder_size_t *offp, *off_end, *off_start;
212265e5 1386 binder_size_t off_min;
7980240b 1387 u8 *sg_bufp, *sg_buf_end;
355b0502
GKH
1388 struct binder_proc *target_proc;
1389 struct binder_thread *target_thread = NULL;
1390 struct binder_node *target_node = NULL;
1391 struct list_head *target_list;
1392 wait_queue_head_t *target_wait;
1393 struct binder_transaction *in_reply_to = NULL;
1394 struct binder_transaction_log_entry *e;
1395 uint32_t return_error;
7980240b
MC
1396 struct binder_buffer_object *last_fixup_obj = NULL;
1397 binder_size_t last_fixup_min_off = 0;
342e5c90 1398 struct binder_context *context = proc->context;
355b0502
GKH
1399
1400 e = binder_transaction_log_add(&binder_transaction_log);
1401 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
1402 e->from_proc = proc->pid;
1403 e->from_thread = thread->pid;
1404 e->target_handle = tr->target.handle;
1405 e->data_size = tr->data_size;
1406 e->offsets_size = tr->offsets_size;
14db3181 1407 e->context_name = proc->context->name;
355b0502
GKH
1408
1409 if (reply) {
1410 in_reply_to = thread->transaction_stack;
1411 if (in_reply_to == NULL) {
56b468fc 1412 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
355b0502
GKH
1413 proc->pid, thread->pid);
1414 return_error = BR_FAILED_REPLY;
1415 goto err_empty_call_stack;
1416 }
1417 binder_set_nice(in_reply_to->saved_priority);
1418 if (in_reply_to->to_thread != thread) {
56b468fc 1419 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
355b0502
GKH
1420 proc->pid, thread->pid, in_reply_to->debug_id,
1421 in_reply_to->to_proc ?
1422 in_reply_to->to_proc->pid : 0,
1423 in_reply_to->to_thread ?
1424 in_reply_to->to_thread->pid : 0);
1425 return_error = BR_FAILED_REPLY;
1426 in_reply_to = NULL;
1427 goto err_bad_call_stack;
1428 }
1429 thread->transaction_stack = in_reply_to->to_parent;
1430 target_thread = in_reply_to->from;
1431 if (target_thread == NULL) {
1432 return_error = BR_DEAD_REPLY;
1433 goto err_dead_binder;
1434 }
1435 if (target_thread->transaction_stack != in_reply_to) {
56b468fc 1436 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
355b0502
GKH
1437 proc->pid, thread->pid,
1438 target_thread->transaction_stack ?
1439 target_thread->transaction_stack->debug_id : 0,
1440 in_reply_to->debug_id);
1441 return_error = BR_FAILED_REPLY;
1442 in_reply_to = NULL;
1443 target_thread = NULL;
1444 goto err_dead_binder;
1445 }
1446 target_proc = target_thread->proc;
1447 } else {
1448 if (tr->target.handle) {
1449 struct binder_ref *ref;
10f62861 1450
0a3ffab9 1451 ref = binder_get_ref(proc, tr->target.handle, true);
355b0502 1452 if (ref == NULL) {
56b468fc 1453 binder_user_error("%d:%d got transaction to invalid handle\n",
355b0502
GKH
1454 proc->pid, thread->pid);
1455 return_error = BR_FAILED_REPLY;
1456 goto err_invalid_target_handle;
1457 }
1458 target_node = ref->node;
1459 } else {
342e5c90 1460 target_node = context->binder_context_mgr_node;
355b0502
GKH
1461 if (target_node == NULL) {
1462 return_error = BR_DEAD_REPLY;
1463 goto err_no_context_mgr_node;
1464 }
1465 }
1466 e->to_node = target_node->debug_id;
1467 target_proc = target_node->proc;
1468 if (target_proc == NULL) {
1469 return_error = BR_DEAD_REPLY;
1470 goto err_dead_binder;
1471 }
79af7307
SS
1472 if (security_binder_transaction(proc->tsk,
1473 target_proc->tsk) < 0) {
1474 return_error = BR_FAILED_REPLY;
1475 goto err_invalid_target_handle;
1476 }
355b0502
GKH
1477 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
1478 struct binder_transaction *tmp;
10f62861 1479
355b0502
GKH
1480 tmp = thread->transaction_stack;
1481 if (tmp->to_thread != thread) {
56b468fc 1482 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
355b0502
GKH
1483 proc->pid, thread->pid, tmp->debug_id,
1484 tmp->to_proc ? tmp->to_proc->pid : 0,
1485 tmp->to_thread ?
1486 tmp->to_thread->pid : 0);
1487 return_error = BR_FAILED_REPLY;
1488 goto err_bad_call_stack;
1489 }
1490 while (tmp) {
1491 if (tmp->from && tmp->from->proc == target_proc)
1492 target_thread = tmp->from;
1493 tmp = tmp->from_parent;
1494 }
1495 }
1496 }
1497 if (target_thread) {
1498 e->to_thread = target_thread->pid;
1499 target_list = &target_thread->todo;
1500 target_wait = &target_thread->wait;
1501 } else {
1502 target_list = &target_proc->todo;
1503 target_wait = &target_proc->wait;
1504 }
1505 e->to_proc = target_proc->pid;
1506
1507 /* TODO: reuse incoming transaction for reply */
1508 t = kzalloc(sizeof(*t), GFP_KERNEL);
1509 if (t == NULL) {
1510 return_error = BR_FAILED_REPLY;
1511 goto err_alloc_t_failed;
1512 }
1513 binder_stats_created(BINDER_STAT_TRANSACTION);
1514
1515 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
1516 if (tcomplete == NULL) {
1517 return_error = BR_FAILED_REPLY;
1518 goto err_alloc_tcomplete_failed;
1519 }
1520 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
1521
1522 t->debug_id = ++binder_last_id;
1523 e->debug_id = t->debug_id;
1524
1525 if (reply)
1526 binder_debug(BINDER_DEBUG_TRANSACTION,
4bfac80a 1527 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
355b0502
GKH
1528 proc->pid, thread->pid, t->debug_id,
1529 target_proc->pid, target_thread->pid,
da49889d
AH
1530 (u64)tr->data.ptr.buffer,
1531 (u64)tr->data.ptr.offsets,
4bfac80a
MC
1532 (u64)tr->data_size, (u64)tr->offsets_size,
1533 (u64)extra_buffers_size);
355b0502
GKH
1534 else
1535 binder_debug(BINDER_DEBUG_TRANSACTION,
4bfac80a 1536 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
355b0502
GKH
1537 proc->pid, thread->pid, t->debug_id,
1538 target_proc->pid, target_node->debug_id,
da49889d
AH
1539 (u64)tr->data.ptr.buffer,
1540 (u64)tr->data.ptr.offsets,
4bfac80a
MC
1541 (u64)tr->data_size, (u64)tr->offsets_size,
1542 (u64)extra_buffers_size);
355b0502
GKH
1543
1544 if (!reply && !(tr->flags & TF_ONE_WAY))
1545 t->from = thread;
1546 else
1547 t->from = NULL;
57bab7cb 1548 t->sender_euid = task_euid(proc->tsk);
355b0502
GKH
1549 t->to_proc = target_proc;
1550 t->to_thread = target_thread;
1551 t->code = tr->code;
1552 t->flags = tr->flags;
1553 t->priority = task_nice(current);
975a1ac9
AH
1554
1555 trace_binder_transaction(reply, t, target_node);
1556
19c98724 1557 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
4bfac80a
MC
1558 tr->offsets_size, extra_buffers_size,
1559 !reply && (t->flags & TF_ONE_WAY));
355b0502
GKH
1560 if (t->buffer == NULL) {
1561 return_error = BR_FAILED_REPLY;
1562 goto err_binder_alloc_buf_failed;
1563 }
1564 t->buffer->allow_user_free = 0;
1565 t->buffer->debug_id = t->debug_id;
1566 t->buffer->transaction = t;
1567 t->buffer->target_node = target_node;
975a1ac9 1568 trace_binder_transaction_alloc_buf(t->buffer);
355b0502
GKH
1569 if (target_node)
1570 binder_inc_node(target_node, 1, 0, NULL);
1571
7980240b
MC
1572 off_start = (binder_size_t *)(t->buffer->data +
1573 ALIGN(tr->data_size, sizeof(void *)));
1574 offp = off_start;
355b0502 1575
da49889d
AH
1576 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
1577 tr->data.ptr.buffer, tr->data_size)) {
56b468fc
AS
1578 binder_user_error("%d:%d got transaction with invalid data ptr\n",
1579 proc->pid, thread->pid);
355b0502
GKH
1580 return_error = BR_FAILED_REPLY;
1581 goto err_copy_data_failed;
1582 }
da49889d
AH
1583 if (copy_from_user(offp, (const void __user *)(uintptr_t)
1584 tr->data.ptr.offsets, tr->offsets_size)) {
56b468fc
AS
1585 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
1586 proc->pid, thread->pid);
355b0502
GKH
1587 return_error = BR_FAILED_REPLY;
1588 goto err_copy_data_failed;
1589 }
da49889d
AH
1590 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
1591 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
1592 proc->pid, thread->pid, (u64)tr->offsets_size);
355b0502
GKH
1593 return_error = BR_FAILED_REPLY;
1594 goto err_bad_offset;
1595 }
7980240b
MC
1596 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
1597 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
1598 proc->pid, thread->pid,
1599 (u64)extra_buffers_size);
1600 return_error = BR_FAILED_REPLY;
1601 goto err_bad_offset;
1602 }
1603 off_end = (void *)off_start + tr->offsets_size;
1604 sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
1605 sg_buf_end = sg_bufp + extra_buffers_size;
212265e5 1606 off_min = 0;
355b0502 1607 for (; offp < off_end; offp++) {
feba3900
MC
1608 struct binder_object_header *hdr;
1609 size_t object_size = binder_validate_object(t->buffer, *offp);
10f62861 1610
feba3900
MC
1611 if (object_size == 0 || *offp < off_min) {
1612 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
212265e5
AH
1613 proc->pid, thread->pid, (u64)*offp,
1614 (u64)off_min,
feba3900 1615 (u64)t->buffer->data_size);
355b0502
GKH
1616 return_error = BR_FAILED_REPLY;
1617 goto err_bad_offset;
1618 }
feba3900
MC
1619
1620 hdr = (struct binder_object_header *)(t->buffer->data + *offp);
1621 off_min = *offp + object_size;
1622 switch (hdr->type) {
355b0502
GKH
1623 case BINDER_TYPE_BINDER:
1624 case BINDER_TYPE_WEAK_BINDER: {
feba3900 1625 struct flat_binder_object *fp;
10f62861 1626
feba3900 1627 fp = to_flat_binder_object(hdr);
a056af42
MC
1628 ret = binder_translate_binder(fp, t, thread);
1629 if (ret < 0) {
355b0502 1630 return_error = BR_FAILED_REPLY;
a056af42 1631 goto err_translate_failed;
355b0502 1632 }
355b0502
GKH
1633 } break;
1634 case BINDER_TYPE_HANDLE:
1635 case BINDER_TYPE_WEAK_HANDLE: {
feba3900 1636 struct flat_binder_object *fp;
0a3ffab9 1637
feba3900 1638 fp = to_flat_binder_object(hdr);
a056af42
MC
1639 ret = binder_translate_handle(fp, t, thread);
1640 if (ret < 0) {
79af7307 1641 return_error = BR_FAILED_REPLY;
a056af42 1642 goto err_translate_failed;
355b0502
GKH
1643 }
1644 } break;
1645
1646 case BINDER_TYPE_FD: {
feba3900 1647 struct binder_fd_object *fp = to_binder_fd_object(hdr);
a056af42
MC
1648 int target_fd = binder_translate_fd(fp->fd, t, thread,
1649 in_reply_to);
355b0502 1650
355b0502 1651 if (target_fd < 0) {
355b0502 1652 return_error = BR_FAILED_REPLY;
a056af42 1653 goto err_translate_failed;
355b0502 1654 }
feba3900
MC
1655 fp->pad_binder = 0;
1656 fp->fd = target_fd;
355b0502 1657 } break;
def95c73
MC
1658 case BINDER_TYPE_FDA: {
1659 struct binder_fd_array_object *fda =
1660 to_binder_fd_array_object(hdr);
1661 struct binder_buffer_object *parent =
1662 binder_validate_ptr(t->buffer, fda->parent,
1663 off_start,
1664 offp - off_start);
1665 if (!parent) {
1666 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
1667 proc->pid, thread->pid);
1668 return_error = BR_FAILED_REPLY;
1669 goto err_bad_parent;
1670 }
1671 if (!binder_validate_fixup(t->buffer, off_start,
1672 parent, fda->parent_offset,
1673 last_fixup_obj,
1674 last_fixup_min_off)) {
1675 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
1676 proc->pid, thread->pid);
1677 return_error = BR_FAILED_REPLY;
1678 goto err_bad_parent;
1679 }
1680 ret = binder_translate_fd_array(fda, parent, t, thread,
1681 in_reply_to);
1682 if (ret < 0) {
1683 return_error = BR_FAILED_REPLY;
1684 goto err_translate_failed;
1685 }
1686 last_fixup_obj = parent;
1687 last_fixup_min_off =
1688 fda->parent_offset + sizeof(u32) * fda->num_fds;
1689 } break;
7980240b
MC
1690 case BINDER_TYPE_PTR: {
1691 struct binder_buffer_object *bp =
1692 to_binder_buffer_object(hdr);
1693 size_t buf_left = sg_buf_end - sg_bufp;
1694
1695 if (bp->length > buf_left) {
1696 binder_user_error("%d:%d got transaction with too large buffer\n",
1697 proc->pid, thread->pid);
1698 return_error = BR_FAILED_REPLY;
1699 goto err_bad_offset;
1700 }
1701 if (copy_from_user(sg_bufp,
1702 (const void __user *)(uintptr_t)
1703 bp->buffer, bp->length)) {
1704 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
1705 proc->pid, thread->pid);
1706 return_error = BR_FAILED_REPLY;
1707 goto err_copy_data_failed;
1708 }
1709 /* Fixup buffer pointer to target proc address space */
1710 bp->buffer = (uintptr_t)sg_bufp +
19c98724
TK
1711 binder_alloc_get_user_buffer_offset(
1712 &target_proc->alloc);
7980240b
MC
1713 sg_bufp += ALIGN(bp->length, sizeof(u64));
1714
1715 ret = binder_fixup_parent(t, thread, bp, off_start,
1716 offp - off_start,
1717 last_fixup_obj,
1718 last_fixup_min_off);
1719 if (ret < 0) {
1720 return_error = BR_FAILED_REPLY;
1721 goto err_translate_failed;
1722 }
1723 last_fixup_obj = bp;
1724 last_fixup_min_off = 0;
1725 } break;
355b0502 1726 default:
64dcfe6b 1727 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
feba3900 1728 proc->pid, thread->pid, hdr->type);
355b0502
GKH
1729 return_error = BR_FAILED_REPLY;
1730 goto err_bad_object_type;
1731 }
1732 }
1733 if (reply) {
1734 BUG_ON(t->buffer->async_transaction != 0);
1735 binder_pop_transaction(target_thread, in_reply_to);
1736 } else if (!(t->flags & TF_ONE_WAY)) {
1737 BUG_ON(t->buffer->async_transaction != 0);
1738 t->need_reply = 1;
1739 t->from_parent = thread->transaction_stack;
1740 thread->transaction_stack = t;
1741 } else {
1742 BUG_ON(target_node == NULL);
1743 BUG_ON(t->buffer->async_transaction != 1);
1744 if (target_node->has_async_transaction) {
1745 target_list = &target_node->async_todo;
1746 target_wait = NULL;
1747 } else
1748 target_node->has_async_transaction = 1;
1749 }
1750 t->work.type = BINDER_WORK_TRANSACTION;
1751 list_add_tail(&t->work.entry, target_list);
1752 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
1753 list_add_tail(&tcomplete->entry, &thread->todo);
00b40d61
RA
1754 if (target_wait) {
1755 if (reply || !(t->flags & TF_ONE_WAY))
1756 wake_up_interruptible_sync(target_wait);
1757 else
1758 wake_up_interruptible(target_wait);
1759 }
355b0502
GKH
1760 return;
1761
a056af42 1762err_translate_failed:
355b0502
GKH
1763err_bad_object_type:
1764err_bad_offset:
def95c73 1765err_bad_parent:
355b0502 1766err_copy_data_failed:
975a1ac9 1767 trace_binder_transaction_failed_buffer_release(t->buffer);
355b0502
GKH
1768 binder_transaction_buffer_release(target_proc, t->buffer, offp);
1769 t->buffer->transaction = NULL;
19c98724 1770 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
355b0502
GKH
1771err_binder_alloc_buf_failed:
1772 kfree(tcomplete);
1773 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
1774err_alloc_tcomplete_failed:
1775 kfree(t);
1776 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1777err_alloc_t_failed:
1778err_bad_call_stack:
1779err_empty_call_stack:
1780err_dead_binder:
1781err_invalid_target_handle:
1782err_no_context_mgr_node:
1783 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
da49889d 1784 "%d:%d transaction failed %d, size %lld-%lld\n",
355b0502 1785 proc->pid, thread->pid, return_error,
da49889d 1786 (u64)tr->data_size, (u64)tr->offsets_size);
355b0502
GKH
1787
1788 {
1789 struct binder_transaction_log_entry *fe;
10f62861 1790
355b0502
GKH
1791 fe = binder_transaction_log_add(&binder_transaction_log_failed);
1792 *fe = *e;
1793 }
1794
1795 BUG_ON(thread->return_error != BR_OK);
1796 if (in_reply_to) {
1797 thread->return_error = BR_TRANSACTION_COMPLETE;
1798 binder_send_failed_reply(in_reply_to, return_error);
1799 } else
1800 thread->return_error = return_error;
1801}
1802
fb07ebc3
BP
1803static int binder_thread_write(struct binder_proc *proc,
1804 struct binder_thread *thread,
da49889d
AH
1805 binder_uintptr_t binder_buffer, size_t size,
1806 binder_size_t *consumed)
355b0502
GKH
1807{
1808 uint32_t cmd;
342e5c90 1809 struct binder_context *context = proc->context;
da49889d 1810 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
355b0502
GKH
1811 void __user *ptr = buffer + *consumed;
1812 void __user *end = buffer + size;
1813
1814 while (ptr < end && thread->return_error == BR_OK) {
1815 if (get_user(cmd, (uint32_t __user *)ptr))
1816 return -EFAULT;
1817 ptr += sizeof(uint32_t);
975a1ac9 1818 trace_binder_command(cmd);
355b0502
GKH
1819 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
1820 binder_stats.bc[_IOC_NR(cmd)]++;
1821 proc->stats.bc[_IOC_NR(cmd)]++;
1822 thread->stats.bc[_IOC_NR(cmd)]++;
1823 }
1824 switch (cmd) {
1825 case BC_INCREFS:
1826 case BC_ACQUIRE:
1827 case BC_RELEASE:
1828 case BC_DECREFS: {
1829 uint32_t target;
1830 struct binder_ref *ref;
1831 const char *debug_string;
1832
1833 if (get_user(target, (uint32_t __user *)ptr))
1834 return -EFAULT;
1835 ptr += sizeof(uint32_t);
342e5c90 1836 if (target == 0 && context->binder_context_mgr_node &&
355b0502
GKH
1837 (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
1838 ref = binder_get_ref_for_node(proc,
342e5c90 1839 context->binder_context_mgr_node);
355b0502 1840 if (ref->desc != target) {
56b468fc 1841 binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n",
355b0502
GKH
1842 proc->pid, thread->pid,
1843 ref->desc);
1844 }
1845 } else
0a3ffab9
AH
1846 ref = binder_get_ref(proc, target,
1847 cmd == BC_ACQUIRE ||
1848 cmd == BC_RELEASE);
355b0502 1849 if (ref == NULL) {
56b468fc 1850 binder_user_error("%d:%d refcount change on invalid ref %d\n",
355b0502
GKH
1851 proc->pid, thread->pid, target);
1852 break;
1853 }
1854 switch (cmd) {
1855 case BC_INCREFS:
1856 debug_string = "IncRefs";
1857 binder_inc_ref(ref, 0, NULL);
1858 break;
1859 case BC_ACQUIRE:
1860 debug_string = "Acquire";
1861 binder_inc_ref(ref, 1, NULL);
1862 break;
1863 case BC_RELEASE:
1864 debug_string = "Release";
1865 binder_dec_ref(ref, 1);
1866 break;
1867 case BC_DECREFS:
1868 default:
1869 debug_string = "DecRefs";
1870 binder_dec_ref(ref, 0);
1871 break;
1872 }
1873 binder_debug(BINDER_DEBUG_USER_REFS,
56b468fc 1874 "%d:%d %s ref %d desc %d s %d w %d for node %d\n",
355b0502
GKH
1875 proc->pid, thread->pid, debug_string, ref->debug_id,
1876 ref->desc, ref->strong, ref->weak, ref->node->debug_id);
1877 break;
1878 }
1879 case BC_INCREFS_DONE:
1880 case BC_ACQUIRE_DONE: {
da49889d
AH
1881 binder_uintptr_t node_ptr;
1882 binder_uintptr_t cookie;
355b0502
GKH
1883 struct binder_node *node;
1884
da49889d 1885 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
355b0502 1886 return -EFAULT;
da49889d
AH
1887 ptr += sizeof(binder_uintptr_t);
1888 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
355b0502 1889 return -EFAULT;
da49889d 1890 ptr += sizeof(binder_uintptr_t);
355b0502
GKH
1891 node = binder_get_node(proc, node_ptr);
1892 if (node == NULL) {
da49889d 1893 binder_user_error("%d:%d %s u%016llx no match\n",
355b0502
GKH
1894 proc->pid, thread->pid,
1895 cmd == BC_INCREFS_DONE ?
1896 "BC_INCREFS_DONE" :
1897 "BC_ACQUIRE_DONE",
da49889d 1898 (u64)node_ptr);
355b0502
GKH
1899 break;
1900 }
1901 if (cookie != node->cookie) {
da49889d 1902 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
355b0502
GKH
1903 proc->pid, thread->pid,
1904 cmd == BC_INCREFS_DONE ?
1905 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
da49889d
AH
1906 (u64)node_ptr, node->debug_id,
1907 (u64)cookie, (u64)node->cookie);
355b0502
GKH
1908 break;
1909 }
1910 if (cmd == BC_ACQUIRE_DONE) {
1911 if (node->pending_strong_ref == 0) {
56b468fc 1912 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
355b0502
GKH
1913 proc->pid, thread->pid,
1914 node->debug_id);
1915 break;
1916 }
1917 node->pending_strong_ref = 0;
1918 } else {
1919 if (node->pending_weak_ref == 0) {
56b468fc 1920 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
355b0502
GKH
1921 proc->pid, thread->pid,
1922 node->debug_id);
1923 break;
1924 }
1925 node->pending_weak_ref = 0;
1926 }
1927 binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0);
1928 binder_debug(BINDER_DEBUG_USER_REFS,
56b468fc 1929 "%d:%d %s node %d ls %d lw %d\n",
355b0502
GKH
1930 proc->pid, thread->pid,
1931 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
1932 node->debug_id, node->local_strong_refs, node->local_weak_refs);
1933 break;
1934 }
1935 case BC_ATTEMPT_ACQUIRE:
56b468fc 1936 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
355b0502
GKH
1937 return -EINVAL;
1938 case BC_ACQUIRE_RESULT:
56b468fc 1939 pr_err("BC_ACQUIRE_RESULT not supported\n");
355b0502
GKH
1940 return -EINVAL;
1941
1942 case BC_FREE_BUFFER: {
da49889d 1943 binder_uintptr_t data_ptr;
355b0502
GKH
1944 struct binder_buffer *buffer;
1945
da49889d 1946 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
355b0502 1947 return -EFAULT;
da49889d 1948 ptr += sizeof(binder_uintptr_t);
355b0502 1949
19c98724
TK
1950 buffer = binder_alloc_buffer_lookup(&proc->alloc,
1951 data_ptr);
355b0502 1952 if (buffer == NULL) {
da49889d
AH
1953 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
1954 proc->pid, thread->pid, (u64)data_ptr);
355b0502
GKH
1955 break;
1956 }
1957 if (!buffer->allow_user_free) {
da49889d
AH
1958 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
1959 proc->pid, thread->pid, (u64)data_ptr);
355b0502
GKH
1960 break;
1961 }
1962 binder_debug(BINDER_DEBUG_FREE_BUFFER,
da49889d
AH
1963 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
1964 proc->pid, thread->pid, (u64)data_ptr,
1965 buffer->debug_id,
355b0502
GKH
1966 buffer->transaction ? "active" : "finished");
1967
1968 if (buffer->transaction) {
1969 buffer->transaction->buffer = NULL;
1970 buffer->transaction = NULL;
1971 }
1972 if (buffer->async_transaction && buffer->target_node) {
1973 BUG_ON(!buffer->target_node->has_async_transaction);
1974 if (list_empty(&buffer->target_node->async_todo))
1975 buffer->target_node->has_async_transaction = 0;
1976 else
1977 list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
1978 }
975a1ac9 1979 trace_binder_transaction_buffer_release(buffer);
355b0502 1980 binder_transaction_buffer_release(proc, buffer, NULL);
19c98724 1981 binder_alloc_free_buf(&proc->alloc, buffer);
355b0502
GKH
1982 break;
1983 }
1984
7980240b
MC
1985 case BC_TRANSACTION_SG:
1986 case BC_REPLY_SG: {
1987 struct binder_transaction_data_sg tr;
1988
1989 if (copy_from_user(&tr, ptr, sizeof(tr)))
1990 return -EFAULT;
1991 ptr += sizeof(tr);
1992 binder_transaction(proc, thread, &tr.transaction_data,
1993 cmd == BC_REPLY_SG, tr.buffers_size);
1994 break;
1995 }
355b0502
GKH
1996 case BC_TRANSACTION:
1997 case BC_REPLY: {
1998 struct binder_transaction_data tr;
1999
2000 if (copy_from_user(&tr, ptr, sizeof(tr)))
2001 return -EFAULT;
2002 ptr += sizeof(tr);
4bfac80a
MC
2003 binder_transaction(proc, thread, &tr,
2004 cmd == BC_REPLY, 0);
355b0502
GKH
2005 break;
2006 }
2007
2008 case BC_REGISTER_LOOPER:
2009 binder_debug(BINDER_DEBUG_THREADS,
56b468fc 2010 "%d:%d BC_REGISTER_LOOPER\n",
355b0502
GKH
2011 proc->pid, thread->pid);
2012 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
2013 thread->looper |= BINDER_LOOPER_STATE_INVALID;
56b468fc 2014 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
355b0502
GKH
2015 proc->pid, thread->pid);
2016 } else if (proc->requested_threads == 0) {
2017 thread->looper |= BINDER_LOOPER_STATE_INVALID;
56b468fc 2018 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
355b0502
GKH
2019 proc->pid, thread->pid);
2020 } else {
2021 proc->requested_threads--;
2022 proc->requested_threads_started++;
2023 }
2024 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
2025 break;
2026 case BC_ENTER_LOOPER:
2027 binder_debug(BINDER_DEBUG_THREADS,
56b468fc 2028 "%d:%d BC_ENTER_LOOPER\n",
355b0502
GKH
2029 proc->pid, thread->pid);
2030 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
2031 thread->looper |= BINDER_LOOPER_STATE_INVALID;
56b468fc 2032 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
355b0502
GKH
2033 proc->pid, thread->pid);
2034 }
2035 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
2036 break;
2037 case BC_EXIT_LOOPER:
2038 binder_debug(BINDER_DEBUG_THREADS,
56b468fc 2039 "%d:%d BC_EXIT_LOOPER\n",
355b0502
GKH
2040 proc->pid, thread->pid);
2041 thread->looper |= BINDER_LOOPER_STATE_EXITED;
2042 break;
2043
2044 case BC_REQUEST_DEATH_NOTIFICATION:
2045 case BC_CLEAR_DEATH_NOTIFICATION: {
2046 uint32_t target;
da49889d 2047 binder_uintptr_t cookie;
355b0502
GKH
2048 struct binder_ref *ref;
2049 struct binder_ref_death *death;
2050
2051 if (get_user(target, (uint32_t __user *)ptr))
2052 return -EFAULT;
2053 ptr += sizeof(uint32_t);
da49889d 2054 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
355b0502 2055 return -EFAULT;
da49889d 2056 ptr += sizeof(binder_uintptr_t);
0a3ffab9 2057 ref = binder_get_ref(proc, target, false);
355b0502 2058 if (ref == NULL) {
56b468fc 2059 binder_user_error("%d:%d %s invalid ref %d\n",
355b0502
GKH
2060 proc->pid, thread->pid,
2061 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
2062 "BC_REQUEST_DEATH_NOTIFICATION" :
2063 "BC_CLEAR_DEATH_NOTIFICATION",
2064 target);
2065 break;
2066 }
2067
2068 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
da49889d 2069 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
355b0502
GKH
2070 proc->pid, thread->pid,
2071 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
2072 "BC_REQUEST_DEATH_NOTIFICATION" :
2073 "BC_CLEAR_DEATH_NOTIFICATION",
da49889d 2074 (u64)cookie, ref->debug_id, ref->desc,
355b0502
GKH
2075 ref->strong, ref->weak, ref->node->debug_id);
2076
2077 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
2078 if (ref->death) {
56b468fc 2079 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
355b0502
GKH
2080 proc->pid, thread->pid);
2081 break;
2082 }
2083 death = kzalloc(sizeof(*death), GFP_KERNEL);
2084 if (death == NULL) {
2085 thread->return_error = BR_ERROR;
2086 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
56b468fc 2087 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
355b0502
GKH
2088 proc->pid, thread->pid);
2089 break;
2090 }
2091 binder_stats_created(BINDER_STAT_DEATH);
2092 INIT_LIST_HEAD(&death->work.entry);
2093 death->cookie = cookie;
2094 ref->death = death;
2095 if (ref->node->proc == NULL) {
2096 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
2097 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2098 list_add_tail(&ref->death->work.entry, &thread->todo);
2099 } else {
2100 list_add_tail(&ref->death->work.entry, &proc->todo);
2101 wake_up_interruptible(&proc->wait);
2102 }
2103 }
2104 } else {
2105 if (ref->death == NULL) {
56b468fc 2106 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
355b0502
GKH
2107 proc->pid, thread->pid);
2108 break;
2109 }
2110 death = ref->death;
2111 if (death->cookie != cookie) {
da49889d 2112 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
355b0502 2113 proc->pid, thread->pid,
da49889d
AH
2114 (u64)death->cookie,
2115 (u64)cookie);
355b0502
GKH
2116 break;
2117 }
2118 ref->death = NULL;
2119 if (list_empty(&death->work.entry)) {
2120 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
2121 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2122 list_add_tail(&death->work.entry, &thread->todo);
2123 } else {
2124 list_add_tail(&death->work.entry, &proc->todo);
2125 wake_up_interruptible(&proc->wait);
2126 }
2127 } else {
2128 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
2129 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
2130 }
2131 }
2132 } break;
2133 case BC_DEAD_BINDER_DONE: {
2134 struct binder_work *w;
da49889d 2135 binder_uintptr_t cookie;
355b0502 2136 struct binder_ref_death *death = NULL;
10f62861 2137
da49889d 2138 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
355b0502
GKH
2139 return -EFAULT;
2140
7a64cd88 2141 ptr += sizeof(cookie);
355b0502
GKH
2142 list_for_each_entry(w, &proc->delivered_death, entry) {
2143 struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
10f62861 2144
355b0502
GKH
2145 if (tmp_death->cookie == cookie) {
2146 death = tmp_death;
2147 break;
2148 }
2149 }
2150 binder_debug(BINDER_DEBUG_DEAD_BINDER,
da49889d
AH
2151 "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
2152 proc->pid, thread->pid, (u64)cookie,
2153 death);
355b0502 2154 if (death == NULL) {
da49889d
AH
2155 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
2156 proc->pid, thread->pid, (u64)cookie);
355b0502
GKH
2157 break;
2158 }
2159
2160 list_del_init(&death->work.entry);
2161 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
2162 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
2163 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2164 list_add_tail(&death->work.entry, &thread->todo);
2165 } else {
2166 list_add_tail(&death->work.entry, &proc->todo);
2167 wake_up_interruptible(&proc->wait);
2168 }
2169 }
2170 } break;
2171
2172 default:
56b468fc 2173 pr_err("%d:%d unknown command %d\n",
355b0502
GKH
2174 proc->pid, thread->pid, cmd);
2175 return -EINVAL;
2176 }
2177 *consumed = ptr - buffer;
2178 }
2179 return 0;
2180}
2181
fb07ebc3
BP
2182static void binder_stat_br(struct binder_proc *proc,
2183 struct binder_thread *thread, uint32_t cmd)
355b0502 2184{
975a1ac9 2185 trace_binder_return(cmd);
355b0502
GKH
2186 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
2187 binder_stats.br[_IOC_NR(cmd)]++;
2188 proc->stats.br[_IOC_NR(cmd)]++;
2189 thread->stats.br[_IOC_NR(cmd)]++;
2190 }
2191}
2192
2193static int binder_has_proc_work(struct binder_proc *proc,
2194 struct binder_thread *thread)
2195{
2196 return !list_empty(&proc->todo) ||
2197 (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
2198}
2199
2200static int binder_has_thread_work(struct binder_thread *thread)
2201{
2202 return !list_empty(&thread->todo) || thread->return_error != BR_OK ||
2203 (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
2204}
2205
2206static int binder_thread_read(struct binder_proc *proc,
2207 struct binder_thread *thread,
da49889d
AH
2208 binder_uintptr_t binder_buffer, size_t size,
2209 binder_size_t *consumed, int non_block)
355b0502 2210{
da49889d 2211 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
355b0502
GKH
2212 void __user *ptr = buffer + *consumed;
2213 void __user *end = buffer + size;
2214
2215 int ret = 0;
2216 int wait_for_proc_work;
2217
2218 if (*consumed == 0) {
2219 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
2220 return -EFAULT;
2221 ptr += sizeof(uint32_t);
2222 }
2223
2224retry:
2225 wait_for_proc_work = thread->transaction_stack == NULL &&
2226 list_empty(&thread->todo);
2227
2228 if (thread->return_error != BR_OK && ptr < end) {
2229 if (thread->return_error2 != BR_OK) {
2230 if (put_user(thread->return_error2, (uint32_t __user *)ptr))
2231 return -EFAULT;
2232 ptr += sizeof(uint32_t);
89334ab4 2233 binder_stat_br(proc, thread, thread->return_error2);
355b0502
GKH
2234 if (ptr == end)
2235 goto done;
2236 thread->return_error2 = BR_OK;
2237 }
2238 if (put_user(thread->return_error, (uint32_t __user *)ptr))
2239 return -EFAULT;
2240 ptr += sizeof(uint32_t);
89334ab4 2241 binder_stat_br(proc, thread, thread->return_error);
355b0502
GKH
2242 thread->return_error = BR_OK;
2243 goto done;
2244 }
2245
2246
2247 thread->looper |= BINDER_LOOPER_STATE_WAITING;
2248 if (wait_for_proc_work)
2249 proc->ready_threads++;
975a1ac9
AH
2250
2251 binder_unlock(__func__);
2252
2253 trace_binder_wait_for_work(wait_for_proc_work,
2254 !!thread->transaction_stack,
2255 !list_empty(&thread->todo));
355b0502
GKH
2256 if (wait_for_proc_work) {
2257 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
2258 BINDER_LOOPER_STATE_ENTERED))) {
56b468fc 2259 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
355b0502
GKH
2260 proc->pid, thread->pid, thread->looper);
2261 wait_event_interruptible(binder_user_error_wait,
2262 binder_stop_on_user_error < 2);
2263 }
2264 binder_set_nice(proc->default_priority);
2265 if (non_block) {
2266 if (!binder_has_proc_work(proc, thread))
2267 ret = -EAGAIN;
2268 } else
e2610b26 2269 ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
355b0502
GKH
2270 } else {
2271 if (non_block) {
2272 if (!binder_has_thread_work(thread))
2273 ret = -EAGAIN;
2274 } else
e2610b26 2275 ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
355b0502 2276 }
975a1ac9
AH
2277
2278 binder_lock(__func__);
2279
355b0502
GKH
2280 if (wait_for_proc_work)
2281 proc->ready_threads--;
2282 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
2283
2284 if (ret)
2285 return ret;
2286
2287 while (1) {
2288 uint32_t cmd;
2289 struct binder_transaction_data tr;
2290 struct binder_work *w;
2291 struct binder_transaction *t = NULL;
2292
395262a9
DV
2293 if (!list_empty(&thread->todo)) {
2294 w = list_first_entry(&thread->todo, struct binder_work,
2295 entry);
2296 } else if (!list_empty(&proc->todo) && wait_for_proc_work) {
2297 w = list_first_entry(&proc->todo, struct binder_work,
2298 entry);
2299 } else {
2300 /* no data added */
2301 if (ptr - buffer == 4 &&
2302 !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN))
355b0502
GKH
2303 goto retry;
2304 break;
2305 }
2306
2307 if (end - ptr < sizeof(tr) + 4)
2308 break;
2309
2310 switch (w->type) {
2311 case BINDER_WORK_TRANSACTION: {
2312 t = container_of(w, struct binder_transaction, work);
2313 } break;
2314 case BINDER_WORK_TRANSACTION_COMPLETE: {
2315 cmd = BR_TRANSACTION_COMPLETE;
2316 if (put_user(cmd, (uint32_t __user *)ptr))
2317 return -EFAULT;
2318 ptr += sizeof(uint32_t);
2319
2320 binder_stat_br(proc, thread, cmd);
2321 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
56b468fc 2322 "%d:%d BR_TRANSACTION_COMPLETE\n",
355b0502
GKH
2323 proc->pid, thread->pid);
2324
2325 list_del(&w->entry);
2326 kfree(w);
2327 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2328 } break;
2329 case BINDER_WORK_NODE: {
2330 struct binder_node *node = container_of(w, struct binder_node, work);
2331 uint32_t cmd = BR_NOOP;
2332 const char *cmd_name;
2333 int strong = node->internal_strong_refs || node->local_strong_refs;
2334 int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong;
10f62861 2335
355b0502
GKH
2336 if (weak && !node->has_weak_ref) {
2337 cmd = BR_INCREFS;
2338 cmd_name = "BR_INCREFS";
2339 node->has_weak_ref = 1;
2340 node->pending_weak_ref = 1;
2341 node->local_weak_refs++;
2342 } else if (strong && !node->has_strong_ref) {
2343 cmd = BR_ACQUIRE;
2344 cmd_name = "BR_ACQUIRE";
2345 node->has_strong_ref = 1;
2346 node->pending_strong_ref = 1;
2347 node->local_strong_refs++;
2348 } else if (!strong && node->has_strong_ref) {
2349 cmd = BR_RELEASE;
2350 cmd_name = "BR_RELEASE";
2351 node->has_strong_ref = 0;
2352 } else if (!weak && node->has_weak_ref) {
2353 cmd = BR_DECREFS;
2354 cmd_name = "BR_DECREFS";
2355 node->has_weak_ref = 0;
2356 }
2357 if (cmd != BR_NOOP) {
2358 if (put_user(cmd, (uint32_t __user *)ptr))
2359 return -EFAULT;
2360 ptr += sizeof(uint32_t);
da49889d
AH
2361 if (put_user(node->ptr,
2362 (binder_uintptr_t __user *)ptr))
355b0502 2363 return -EFAULT;
da49889d
AH
2364 ptr += sizeof(binder_uintptr_t);
2365 if (put_user(node->cookie,
2366 (binder_uintptr_t __user *)ptr))
355b0502 2367 return -EFAULT;
da49889d 2368 ptr += sizeof(binder_uintptr_t);
355b0502
GKH
2369
2370 binder_stat_br(proc, thread, cmd);
2371 binder_debug(BINDER_DEBUG_USER_REFS,
da49889d
AH
2372 "%d:%d %s %d u%016llx c%016llx\n",
2373 proc->pid, thread->pid, cmd_name,
2374 node->debug_id,
2375 (u64)node->ptr, (u64)node->cookie);
355b0502
GKH
2376 } else {
2377 list_del_init(&w->entry);
2378 if (!weak && !strong) {
2379 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
da49889d
AH
2380 "%d:%d node %d u%016llx c%016llx deleted\n",
2381 proc->pid, thread->pid,
2382 node->debug_id,
2383 (u64)node->ptr,
2384 (u64)node->cookie);
355b0502
GKH
2385 rb_erase(&node->rb_node, &proc->nodes);
2386 kfree(node);
2387 binder_stats_deleted(BINDER_STAT_NODE);
2388 } else {
2389 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
da49889d
AH
2390 "%d:%d node %d u%016llx c%016llx state unchanged\n",
2391 proc->pid, thread->pid,
2392 node->debug_id,
2393 (u64)node->ptr,
2394 (u64)node->cookie);
355b0502
GKH
2395 }
2396 }
2397 } break;
2398 case BINDER_WORK_DEAD_BINDER:
2399 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
2400 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
2401 struct binder_ref_death *death;
2402 uint32_t cmd;
2403
2404 death = container_of(w, struct binder_ref_death, work);
2405 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
2406 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
2407 else
2408 cmd = BR_DEAD_BINDER;
2409 if (put_user(cmd, (uint32_t __user *)ptr))
2410 return -EFAULT;
2411 ptr += sizeof(uint32_t);
da49889d
AH
2412 if (put_user(death->cookie,
2413 (binder_uintptr_t __user *)ptr))
355b0502 2414 return -EFAULT;
da49889d 2415 ptr += sizeof(binder_uintptr_t);
89334ab4 2416 binder_stat_br(proc, thread, cmd);
355b0502 2417 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
da49889d 2418 "%d:%d %s %016llx\n",
355b0502
GKH
2419 proc->pid, thread->pid,
2420 cmd == BR_DEAD_BINDER ?
2421 "BR_DEAD_BINDER" :
2422 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
da49889d 2423 (u64)death->cookie);
355b0502
GKH
2424
2425 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
2426 list_del(&w->entry);
2427 kfree(death);
2428 binder_stats_deleted(BINDER_STAT_DEATH);
2429 } else
2430 list_move(&w->entry, &proc->delivered_death);
2431 if (cmd == BR_DEAD_BINDER)
2432 goto done; /* DEAD_BINDER notifications can cause transactions */
2433 } break;
2434 }
2435
2436 if (!t)
2437 continue;
2438
2439 BUG_ON(t->buffer == NULL);
2440 if (t->buffer->target_node) {
2441 struct binder_node *target_node = t->buffer->target_node;
10f62861 2442
355b0502
GKH
2443 tr.target.ptr = target_node->ptr;
2444 tr.cookie = target_node->cookie;
2445 t->saved_priority = task_nice(current);
2446 if (t->priority < target_node->min_priority &&
2447 !(t->flags & TF_ONE_WAY))
2448 binder_set_nice(t->priority);
2449 else if (!(t->flags & TF_ONE_WAY) ||
2450 t->saved_priority > target_node->min_priority)
2451 binder_set_nice(target_node->min_priority);
2452 cmd = BR_TRANSACTION;
2453 } else {
da49889d
AH
2454 tr.target.ptr = 0;
2455 tr.cookie = 0;
355b0502
GKH
2456 cmd = BR_REPLY;
2457 }
2458 tr.code = t->code;
2459 tr.flags = t->flags;
4a2ebb93 2460 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
355b0502
GKH
2461
2462 if (t->from) {
2463 struct task_struct *sender = t->from->proc->tsk;
10f62861 2464
355b0502 2465 tr.sender_pid = task_tgid_nr_ns(sender,
17cf22c3 2466 task_active_pid_ns(current));
355b0502
GKH
2467 } else {
2468 tr.sender_pid = 0;
2469 }
2470
2471 tr.data_size = t->buffer->data_size;
2472 tr.offsets_size = t->buffer->offsets_size;
19c98724
TK
2473 tr.data.ptr.buffer = (binder_uintptr_t)
2474 ((uintptr_t)t->buffer->data +
2475 binder_alloc_get_user_buffer_offset(&proc->alloc));
355b0502
GKH
2476 tr.data.ptr.offsets = tr.data.ptr.buffer +
2477 ALIGN(t->buffer->data_size,
2478 sizeof(void *));
2479
2480 if (put_user(cmd, (uint32_t __user *)ptr))
2481 return -EFAULT;
2482 ptr += sizeof(uint32_t);
2483 if (copy_to_user(ptr, &tr, sizeof(tr)))
2484 return -EFAULT;
2485 ptr += sizeof(tr);
2486
975a1ac9 2487 trace_binder_transaction_received(t);
355b0502
GKH
2488 binder_stat_br(proc, thread, cmd);
2489 binder_debug(BINDER_DEBUG_TRANSACTION,
da49889d 2490 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
355b0502
GKH
2491 proc->pid, thread->pid,
2492 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
2493 "BR_REPLY",
2494 t->debug_id, t->from ? t->from->proc->pid : 0,
2495 t->from ? t->from->pid : 0, cmd,
2496 t->buffer->data_size, t->buffer->offsets_size,
da49889d 2497 (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
355b0502
GKH
2498
2499 list_del(&t->work.entry);
2500 t->buffer->allow_user_free = 1;
2501 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
2502 t->to_parent = thread->transaction_stack;
2503 t->to_thread = thread;
2504 thread->transaction_stack = t;
2505 } else {
2506 t->buffer->transaction = NULL;
2507 kfree(t);
2508 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2509 }
2510 break;
2511 }
2512
2513done:
2514
2515 *consumed = ptr - buffer;
2516 if (proc->requested_threads + proc->ready_threads == 0 &&
2517 proc->requested_threads_started < proc->max_threads &&
2518 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
2519 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
2520 /*spawn a new thread if we leave this out */) {
2521 proc->requested_threads++;
2522 binder_debug(BINDER_DEBUG_THREADS,
56b468fc 2523 "%d:%d BR_SPAWN_LOOPER\n",
355b0502
GKH
2524 proc->pid, thread->pid);
2525 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
2526 return -EFAULT;
89334ab4 2527 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
355b0502
GKH
2528 }
2529 return 0;
2530}
2531
2532static void binder_release_work(struct list_head *list)
2533{
2534 struct binder_work *w;
10f62861 2535
355b0502
GKH
2536 while (!list_empty(list)) {
2537 w = list_first_entry(list, struct binder_work, entry);
2538 list_del_init(&w->entry);
2539 switch (w->type) {
2540 case BINDER_WORK_TRANSACTION: {
2541 struct binder_transaction *t;
2542
2543 t = container_of(w, struct binder_transaction, work);
675d66b0
AH
2544 if (t->buffer->target_node &&
2545 !(t->flags & TF_ONE_WAY)) {
355b0502 2546 binder_send_failed_reply(t, BR_DEAD_REPLY);
675d66b0
AH
2547 } else {
2548 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
56b468fc 2549 "undelivered transaction %d\n",
675d66b0
AH
2550 t->debug_id);
2551 t->buffer->transaction = NULL;
2552 kfree(t);
2553 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2554 }
355b0502
GKH
2555 } break;
2556 case BINDER_WORK_TRANSACTION_COMPLETE: {
675d66b0 2557 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
56b468fc 2558 "undelivered TRANSACTION_COMPLETE\n");
355b0502
GKH
2559 kfree(w);
2560 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2561 } break;
675d66b0
AH
2562 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
2563 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
2564 struct binder_ref_death *death;
2565
2566 death = container_of(w, struct binder_ref_death, work);
2567 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
da49889d
AH
2568 "undelivered death notification, %016llx\n",
2569 (u64)death->cookie);
675d66b0
AH
2570 kfree(death);
2571 binder_stats_deleted(BINDER_STAT_DEATH);
2572 } break;
355b0502 2573 default:
56b468fc 2574 pr_err("unexpected work type, %d, not freed\n",
675d66b0 2575 w->type);
355b0502
GKH
2576 break;
2577 }
2578 }
2579
2580}
2581
2582static struct binder_thread *binder_get_thread(struct binder_proc *proc)
2583{
2584 struct binder_thread *thread = NULL;
2585 struct rb_node *parent = NULL;
2586 struct rb_node **p = &proc->threads.rb_node;
2587
2588 while (*p) {
2589 parent = *p;
2590 thread = rb_entry(parent, struct binder_thread, rb_node);
2591
2592 if (current->pid < thread->pid)
2593 p = &(*p)->rb_left;
2594 else if (current->pid > thread->pid)
2595 p = &(*p)->rb_right;
2596 else
2597 break;
2598 }
2599 if (*p == NULL) {
2600 thread = kzalloc(sizeof(*thread), GFP_KERNEL);
2601 if (thread == NULL)
2602 return NULL;
2603 binder_stats_created(BINDER_STAT_THREAD);
2604 thread->proc = proc;
2605 thread->pid = current->pid;
2606 init_waitqueue_head(&thread->wait);
2607 INIT_LIST_HEAD(&thread->todo);
2608 rb_link_node(&thread->rb_node, parent, p);
2609 rb_insert_color(&thread->rb_node, &proc->threads);
2610 thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
2611 thread->return_error = BR_OK;
2612 thread->return_error2 = BR_OK;
2613 }
2614 return thread;
2615}
2616
2617static int binder_free_thread(struct binder_proc *proc,
2618 struct binder_thread *thread)
2619{
2620 struct binder_transaction *t;
2621 struct binder_transaction *send_reply = NULL;
2622 int active_transactions = 0;
2623
2624 rb_erase(&thread->rb_node, &proc->threads);
2625 t = thread->transaction_stack;
2626 if (t && t->to_thread == thread)
2627 send_reply = t;
2628 while (t) {
2629 active_transactions++;
2630 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
56b468fc
AS
2631 "release %d:%d transaction %d %s, still active\n",
2632 proc->pid, thread->pid,
355b0502
GKH
2633 t->debug_id,
2634 (t->to_thread == thread) ? "in" : "out");
2635
2636 if (t->to_thread == thread) {
2637 t->to_proc = NULL;
2638 t->to_thread = NULL;
2639 if (t->buffer) {
2640 t->buffer->transaction = NULL;
2641 t->buffer = NULL;
2642 }
2643 t = t->to_parent;
2644 } else if (t->from == thread) {
2645 t->from = NULL;
2646 t = t->from_parent;
2647 } else
2648 BUG();
2649 }
2650 if (send_reply)
2651 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
2652 binder_release_work(&thread->todo);
2653 kfree(thread);
2654 binder_stats_deleted(BINDER_STAT_THREAD);
2655 return active_transactions;
2656}
2657
2658static unsigned int binder_poll(struct file *filp,
2659 struct poll_table_struct *wait)
2660{
2661 struct binder_proc *proc = filp->private_data;
2662 struct binder_thread *thread = NULL;
2663 int wait_for_proc_work;
2664
975a1ac9
AH
2665 binder_lock(__func__);
2666
355b0502
GKH
2667 thread = binder_get_thread(proc);
2668
2669 wait_for_proc_work = thread->transaction_stack == NULL &&
2670 list_empty(&thread->todo) && thread->return_error == BR_OK;
975a1ac9
AH
2671
2672 binder_unlock(__func__);
355b0502
GKH
2673
2674 if (wait_for_proc_work) {
2675 if (binder_has_proc_work(proc, thread))
2676 return POLLIN;
2677 poll_wait(filp, &proc->wait, wait);
2678 if (binder_has_proc_work(proc, thread))
2679 return POLLIN;
2680 } else {
2681 if (binder_has_thread_work(thread))
2682 return POLLIN;
2683 poll_wait(filp, &thread->wait, wait);
2684 if (binder_has_thread_work(thread))
2685 return POLLIN;
2686 }
2687 return 0;
2688}
2689
78260ac6
TR
2690static int binder_ioctl_write_read(struct file *filp,
2691 unsigned int cmd, unsigned long arg,
2692 struct binder_thread *thread)
2693{
2694 int ret = 0;
2695 struct binder_proc *proc = filp->private_data;
2696 unsigned int size = _IOC_SIZE(cmd);
2697 void __user *ubuf = (void __user *)arg;
2698 struct binder_write_read bwr;
2699
2700 if (size != sizeof(struct binder_write_read)) {
2701 ret = -EINVAL;
2702 goto out;
2703 }
2704 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
2705 ret = -EFAULT;
2706 goto out;
2707 }
2708 binder_debug(BINDER_DEBUG_READ_WRITE,
2709 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
2710 proc->pid, thread->pid,
2711 (u64)bwr.write_size, (u64)bwr.write_buffer,
2712 (u64)bwr.read_size, (u64)bwr.read_buffer);
2713
2714 if (bwr.write_size > 0) {
2715 ret = binder_thread_write(proc, thread,
2716 bwr.write_buffer,
2717 bwr.write_size,
2718 &bwr.write_consumed);
2719 trace_binder_write_done(ret);
2720 if (ret < 0) {
2721 bwr.read_consumed = 0;
2722 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
2723 ret = -EFAULT;
2724 goto out;
2725 }
2726 }
2727 if (bwr.read_size > 0) {
2728 ret = binder_thread_read(proc, thread, bwr.read_buffer,
2729 bwr.read_size,
2730 &bwr.read_consumed,
2731 filp->f_flags & O_NONBLOCK);
2732 trace_binder_read_done(ret);
2733 if (!list_empty(&proc->todo))
2734 wake_up_interruptible(&proc->wait);
2735 if (ret < 0) {
2736 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
2737 ret = -EFAULT;
2738 goto out;
2739 }
2740 }
2741 binder_debug(BINDER_DEBUG_READ_WRITE,
2742 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
2743 proc->pid, thread->pid,
2744 (u64)bwr.write_consumed, (u64)bwr.write_size,
2745 (u64)bwr.read_consumed, (u64)bwr.read_size);
2746 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
2747 ret = -EFAULT;
2748 goto out;
2749 }
2750out:
2751 return ret;
2752}
2753
2754static int binder_ioctl_set_ctx_mgr(struct file *filp)
2755{
2756 int ret = 0;
2757 struct binder_proc *proc = filp->private_data;
342e5c90
MC
2758 struct binder_context *context = proc->context;
2759
78260ac6
TR
2760 kuid_t curr_euid = current_euid();
2761
342e5c90 2762 if (context->binder_context_mgr_node) {
78260ac6
TR
2763 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
2764 ret = -EBUSY;
2765 goto out;
2766 }
79af7307
SS
2767 ret = security_binder_set_context_mgr(proc->tsk);
2768 if (ret < 0)
2769 goto out;
342e5c90
MC
2770 if (uid_valid(context->binder_context_mgr_uid)) {
2771 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
78260ac6
TR
2772 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
2773 from_kuid(&init_user_ns, curr_euid),
2774 from_kuid(&init_user_ns,
342e5c90 2775 context->binder_context_mgr_uid));
78260ac6
TR
2776 ret = -EPERM;
2777 goto out;
2778 }
2779 } else {
342e5c90 2780 context->binder_context_mgr_uid = curr_euid;
78260ac6 2781 }
342e5c90
MC
2782 context->binder_context_mgr_node = binder_new_node(proc, 0, 0);
2783 if (!context->binder_context_mgr_node) {
78260ac6
TR
2784 ret = -ENOMEM;
2785 goto out;
2786 }
342e5c90
MC
2787 context->binder_context_mgr_node->local_weak_refs++;
2788 context->binder_context_mgr_node->local_strong_refs++;
2789 context->binder_context_mgr_node->has_strong_ref = 1;
2790 context->binder_context_mgr_node->has_weak_ref = 1;
78260ac6
TR
2791out:
2792 return ret;
2793}
2794
355b0502
GKH
2795static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
2796{
2797 int ret;
2798 struct binder_proc *proc = filp->private_data;
2799 struct binder_thread *thread;
2800 unsigned int size = _IOC_SIZE(cmd);
2801 void __user *ubuf = (void __user *)arg;
2802
78260ac6
TR
2803 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
2804 proc->pid, current->pid, cmd, arg);*/
355b0502 2805
975a1ac9
AH
2806 trace_binder_ioctl(cmd, arg);
2807
355b0502
GKH
2808 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
2809 if (ret)
975a1ac9 2810 goto err_unlocked;
355b0502 2811
975a1ac9 2812 binder_lock(__func__);
355b0502
GKH
2813 thread = binder_get_thread(proc);
2814 if (thread == NULL) {
2815 ret = -ENOMEM;
2816 goto err;
2817 }
2818
2819 switch (cmd) {
78260ac6
TR
2820 case BINDER_WRITE_READ:
2821 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
2822 if (ret)
355b0502 2823 goto err;
355b0502 2824 break;
355b0502
GKH
2825 case BINDER_SET_MAX_THREADS:
2826 if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
2827 ret = -EINVAL;
2828 goto err;
2829 }
2830 break;
2831 case BINDER_SET_CONTEXT_MGR:
78260ac6
TR
2832 ret = binder_ioctl_set_ctx_mgr(filp);
2833 if (ret)
355b0502 2834 goto err;
355b0502
GKH
2835 break;
2836 case BINDER_THREAD_EXIT:
56b468fc 2837 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
355b0502
GKH
2838 proc->pid, thread->pid);
2839 binder_free_thread(proc, thread);
2840 thread = NULL;
2841 break;
36c89c0a
MM
2842 case BINDER_VERSION: {
2843 struct binder_version __user *ver = ubuf;
2844
355b0502
GKH
2845 if (size != sizeof(struct binder_version)) {
2846 ret = -EINVAL;
2847 goto err;
2848 }
36c89c0a
MM
2849 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
2850 &ver->protocol_version)) {
355b0502
GKH
2851 ret = -EINVAL;
2852 goto err;
2853 }
2854 break;
36c89c0a 2855 }
355b0502
GKH
2856 default:
2857 ret = -EINVAL;
2858 goto err;
2859 }
2860 ret = 0;
2861err:
2862 if (thread)
2863 thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
975a1ac9 2864 binder_unlock(__func__);
355b0502
GKH
2865 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
2866 if (ret && ret != -ERESTARTSYS)
56b468fc 2867 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
975a1ac9
AH
2868err_unlocked:
2869 trace_binder_ioctl_done(ret);
355b0502
GKH
2870 return ret;
2871}
2872
2873static void binder_vma_open(struct vm_area_struct *vma)
2874{
2875 struct binder_proc *proc = vma->vm_private_data;
10f62861 2876
355b0502 2877 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
56b468fc 2878 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
355b0502
GKH
2879 proc->pid, vma->vm_start, vma->vm_end,
2880 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
2881 (unsigned long)pgprot_val(vma->vm_page_prot));
355b0502
GKH
2882}
2883
2884static void binder_vma_close(struct vm_area_struct *vma)
2885{
2886 struct binder_proc *proc = vma->vm_private_data;
10f62861 2887
355b0502 2888 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
56b468fc 2889 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
355b0502
GKH
2890 proc->pid, vma->vm_start, vma->vm_end,
2891 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
2892 (unsigned long)pgprot_val(vma->vm_page_prot));
19c98724 2893 binder_alloc_vma_close(&proc->alloc);
355b0502
GKH
2894 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
2895}
2896
11bac800 2897static int binder_vm_fault(struct vm_fault *vmf)
ddac7d5f
VM
2898{
2899 return VM_FAULT_SIGBUS;
2900}
2901
7cbea8dc 2902static const struct vm_operations_struct binder_vm_ops = {
355b0502
GKH
2903 .open = binder_vma_open,
2904 .close = binder_vma_close,
ddac7d5f 2905 .fault = binder_vm_fault,
355b0502
GKH
2906};
2907
19c98724
TK
2908static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
2909{
2910 int ret;
2911 struct binder_proc *proc = filp->private_data;
2912 const char *failure_string;
2913
2914 if (proc->tsk != current->group_leader)
2915 return -EINVAL;
2916
2917 if ((vma->vm_end - vma->vm_start) > SZ_4M)
2918 vma->vm_end = vma->vm_start + SZ_4M;
2919
2920 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
2921 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
2922 __func__, proc->pid, vma->vm_start, vma->vm_end,
2923 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
2924 (unsigned long)pgprot_val(vma->vm_page_prot));
2925
2926 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
2927 ret = -EPERM;
2928 failure_string = "bad vm_flags";
2929 goto err_bad_arg;
2930 }
2931 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
2932 vma->vm_ops = &binder_vm_ops;
2933 vma->vm_private_data = proc;
2934
2935 ret = binder_alloc_mmap_handler(&proc->alloc, vma);
2936 if (ret)
2937 return ret;
2938 proc->files = get_files_struct(current);
2939 return 0;
2940
355b0502 2941err_bad_arg:
258767fe 2942 pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
355b0502
GKH
2943 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
2944 return ret;
2945}
2946
2947static int binder_open(struct inode *nodp, struct file *filp)
2948{
2949 struct binder_proc *proc;
ac4812c5 2950 struct binder_device *binder_dev;
355b0502
GKH
2951
2952 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
2953 current->group_leader->pid, current->pid);
2954
2955 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
2956 if (proc == NULL)
2957 return -ENOMEM;
c4ea41ba
TK
2958 get_task_struct(current->group_leader);
2959 proc->tsk = current->group_leader;
355b0502
GKH
2960 INIT_LIST_HEAD(&proc->todo);
2961 init_waitqueue_head(&proc->wait);
2962 proc->default_priority = task_nice(current);
ac4812c5
MC
2963 binder_dev = container_of(filp->private_data, struct binder_device,
2964 miscdev);
2965 proc->context = &binder_dev->context;
19c98724 2966 binder_alloc_init(&proc->alloc);
975a1ac9
AH
2967
2968 binder_lock(__func__);
2969
355b0502
GKH
2970 binder_stats_created(BINDER_STAT_PROC);
2971 hlist_add_head(&proc->proc_node, &binder_procs);
2972 proc->pid = current->group_leader->pid;
2973 INIT_LIST_HEAD(&proc->delivered_death);
2974 filp->private_data = proc;
975a1ac9
AH
2975
2976 binder_unlock(__func__);
355b0502 2977
16b66554 2978 if (binder_debugfs_dir_entry_proc) {
355b0502 2979 char strbuf[11];
10f62861 2980
355b0502 2981 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
14db3181
MC
2982 /*
2983 * proc debug entries are shared between contexts, so
2984 * this will fail if the process tries to open the driver
2985 * again with a different context. The priting code will
2986 * anyway print all contexts that a given PID has, so this
2987 * is not a problem.
2988 */
16b66554 2989 proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
14db3181
MC
2990 binder_debugfs_dir_entry_proc,
2991 (void *)(unsigned long)proc->pid,
2992 &binder_proc_fops);
355b0502
GKH
2993 }
2994
2995 return 0;
2996}
2997
2998static int binder_flush(struct file *filp, fl_owner_t id)
2999{
3000 struct binder_proc *proc = filp->private_data;
3001
3002 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
3003
3004 return 0;
3005}
3006
3007static void binder_deferred_flush(struct binder_proc *proc)
3008{
3009 struct rb_node *n;
3010 int wake_count = 0;
10f62861 3011
355b0502
GKH
3012 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
3013 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
10f62861 3014
355b0502
GKH
3015 thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
3016 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
3017 wake_up_interruptible(&thread->wait);
3018 wake_count++;
3019 }
3020 }
3021 wake_up_interruptible_all(&proc->wait);
3022
3023 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3024 "binder_flush: %d woke %d threads\n", proc->pid,
3025 wake_count);
3026}
3027
3028static int binder_release(struct inode *nodp, struct file *filp)
3029{
3030 struct binder_proc *proc = filp->private_data;
10f62861 3031
16b66554 3032 debugfs_remove(proc->debugfs_entry);
355b0502
GKH
3033 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
3034
3035 return 0;
3036}
3037
008fa749
ME
3038static int binder_node_release(struct binder_node *node, int refs)
3039{
3040 struct binder_ref *ref;
3041 int death = 0;
3042
3043 list_del_init(&node->work.entry);
3044 binder_release_work(&node->async_todo);
3045
3046 if (hlist_empty(&node->refs)) {
3047 kfree(node);
3048 binder_stats_deleted(BINDER_STAT_NODE);
3049
3050 return refs;
3051 }
3052
3053 node->proc = NULL;
3054 node->local_strong_refs = 0;
3055 node->local_weak_refs = 0;
3056 hlist_add_head(&node->dead_node, &binder_dead_nodes);
3057
3058 hlist_for_each_entry(ref, &node->refs, node_entry) {
3059 refs++;
3060
3061 if (!ref->death)
e194fd8a 3062 continue;
008fa749
ME
3063
3064 death++;
3065
3066 if (list_empty(&ref->death->work.entry)) {
3067 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3068 list_add_tail(&ref->death->work.entry,
3069 &ref->proc->todo);
3070 wake_up_interruptible(&ref->proc->wait);
3071 } else
3072 BUG();
3073 }
3074
008fa749
ME
3075 binder_debug(BINDER_DEBUG_DEAD_BINDER,
3076 "node %d now dead, refs %d, death %d\n",
3077 node->debug_id, refs, death);
3078
3079 return refs;
3080}
3081
355b0502
GKH
3082static void binder_deferred_release(struct binder_proc *proc)
3083{
342e5c90 3084 struct binder_context *context = proc->context;
355b0502 3085 struct rb_node *n;
19c98724 3086 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
355b0502 3087
355b0502
GKH
3088 BUG_ON(proc->files);
3089
3090 hlist_del(&proc->proc_node);
53413e7d 3091
342e5c90
MC
3092 if (context->binder_context_mgr_node &&
3093 context->binder_context_mgr_node->proc == proc) {
355b0502 3094 binder_debug(BINDER_DEBUG_DEAD_BINDER,
c07c933f
ME
3095 "%s: %d context_mgr_node gone\n",
3096 __func__, proc->pid);
342e5c90 3097 context->binder_context_mgr_node = NULL;
355b0502
GKH
3098 }
3099
3100 threads = 0;
3101 active_transactions = 0;
3102 while ((n = rb_first(&proc->threads))) {
53413e7d
ME
3103 struct binder_thread *thread;
3104
3105 thread = rb_entry(n, struct binder_thread, rb_node);
355b0502
GKH
3106 threads++;
3107 active_transactions += binder_free_thread(proc, thread);
3108 }
53413e7d 3109
355b0502
GKH
3110 nodes = 0;
3111 incoming_refs = 0;
3112 while ((n = rb_first(&proc->nodes))) {
53413e7d 3113 struct binder_node *node;
355b0502 3114
53413e7d 3115 node = rb_entry(n, struct binder_node, rb_node);
355b0502
GKH
3116 nodes++;
3117 rb_erase(&node->rb_node, &proc->nodes);
008fa749 3118 incoming_refs = binder_node_release(node, incoming_refs);
355b0502 3119 }
53413e7d 3120
355b0502
GKH
3121 outgoing_refs = 0;
3122 while ((n = rb_first(&proc->refs_by_desc))) {
53413e7d
ME
3123 struct binder_ref *ref;
3124
3125 ref = rb_entry(n, struct binder_ref, rb_node_desc);
355b0502
GKH
3126 outgoing_refs++;
3127 binder_delete_ref(ref);
3128 }
53413e7d 3129
355b0502 3130 binder_release_work(&proc->todo);
675d66b0 3131 binder_release_work(&proc->delivered_death);
355b0502 3132
19c98724 3133 binder_alloc_deferred_release(&proc->alloc);
355b0502
GKH
3134 binder_stats_deleted(BINDER_STAT_PROC);
3135
355b0502
GKH
3136 put_task_struct(proc->tsk);
3137
3138 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
19c98724 3139 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
c07c933f 3140 __func__, proc->pid, threads, nodes, incoming_refs,
19c98724 3141 outgoing_refs, active_transactions);
355b0502
GKH
3142
3143 kfree(proc);
3144}
3145
3146static void binder_deferred_func(struct work_struct *work)
3147{
3148 struct binder_proc *proc;
3149 struct files_struct *files;
3150
3151 int defer;
10f62861 3152
355b0502 3153 do {
975a1ac9 3154 binder_lock(__func__);
355b0502
GKH
3155 mutex_lock(&binder_deferred_lock);
3156 if (!hlist_empty(&binder_deferred_list)) {
3157 proc = hlist_entry(binder_deferred_list.first,
3158 struct binder_proc, deferred_work_node);
3159 hlist_del_init(&proc->deferred_work_node);
3160 defer = proc->deferred_work;
3161 proc->deferred_work = 0;
3162 } else {
3163 proc = NULL;
3164 defer = 0;
3165 }
3166 mutex_unlock(&binder_deferred_lock);
3167
3168 files = NULL;
3169 if (defer & BINDER_DEFERRED_PUT_FILES) {
3170 files = proc->files;
3171 if (files)
3172 proc->files = NULL;
3173 }
3174
3175 if (defer & BINDER_DEFERRED_FLUSH)
3176 binder_deferred_flush(proc);
3177
3178 if (defer & BINDER_DEFERRED_RELEASE)
3179 binder_deferred_release(proc); /* frees proc */
3180
975a1ac9 3181 binder_unlock(__func__);
355b0502
GKH
3182 if (files)
3183 put_files_struct(files);
3184 } while (proc);
3185}
3186static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
3187
3188static void
3189binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
3190{
3191 mutex_lock(&binder_deferred_lock);
3192 proc->deferred_work |= defer;
3193 if (hlist_unhashed(&proc->deferred_work_node)) {
3194 hlist_add_head(&proc->deferred_work_node,
3195 &binder_deferred_list);
1beba52d 3196 schedule_work(&binder_deferred_work);
355b0502
GKH
3197 }
3198 mutex_unlock(&binder_deferred_lock);
3199}
3200
5249f488
AH
3201static void print_binder_transaction(struct seq_file *m, const char *prefix,
3202 struct binder_transaction *t)
3203{
3204 seq_printf(m,
3205 "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
3206 prefix, t->debug_id, t,
3207 t->from ? t->from->proc->pid : 0,
3208 t->from ? t->from->pid : 0,
3209 t->to_proc ? t->to_proc->pid : 0,
3210 t->to_thread ? t->to_thread->pid : 0,
3211 t->code, t->flags, t->priority, t->need_reply);
355b0502 3212 if (t->buffer == NULL) {
5249f488
AH
3213 seq_puts(m, " buffer free\n");
3214 return;
355b0502 3215 }
5249f488
AH
3216 if (t->buffer->target_node)
3217 seq_printf(m, " node %d",
3218 t->buffer->target_node->debug_id);
3219 seq_printf(m, " size %zd:%zd data %p\n",
3220 t->buffer->data_size, t->buffer->offsets_size,
3221 t->buffer->data);
355b0502
GKH
3222}
3223
5249f488
AH
3224static void print_binder_work(struct seq_file *m, const char *prefix,
3225 const char *transaction_prefix,
3226 struct binder_work *w)
355b0502
GKH
3227{
3228 struct binder_node *node;
3229 struct binder_transaction *t;
3230
3231 switch (w->type) {
3232 case BINDER_WORK_TRANSACTION:
3233 t = container_of(w, struct binder_transaction, work);
5249f488 3234 print_binder_transaction(m, transaction_prefix, t);
355b0502
GKH
3235 break;
3236 case BINDER_WORK_TRANSACTION_COMPLETE:
5249f488 3237 seq_printf(m, "%stransaction complete\n", prefix);
355b0502
GKH
3238 break;
3239 case BINDER_WORK_NODE:
3240 node = container_of(w, struct binder_node, work);
da49889d
AH
3241 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
3242 prefix, node->debug_id,
3243 (u64)node->ptr, (u64)node->cookie);
355b0502
GKH
3244 break;
3245 case BINDER_WORK_DEAD_BINDER:
5249f488 3246 seq_printf(m, "%shas dead binder\n", prefix);
355b0502
GKH
3247 break;
3248 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5249f488 3249 seq_printf(m, "%shas cleared dead binder\n", prefix);
355b0502
GKH
3250 break;
3251 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5249f488 3252 seq_printf(m, "%shas cleared death notification\n", prefix);
355b0502
GKH
3253 break;
3254 default:
5249f488 3255 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
355b0502
GKH
3256 break;
3257 }
355b0502
GKH
3258}
3259
5249f488
AH
3260static void print_binder_thread(struct seq_file *m,
3261 struct binder_thread *thread,
3262 int print_always)
355b0502
GKH
3263{
3264 struct binder_transaction *t;
3265 struct binder_work *w;
5249f488
AH
3266 size_t start_pos = m->count;
3267 size_t header_pos;
355b0502 3268
5249f488
AH
3269 seq_printf(m, " thread %d: l %02x\n", thread->pid, thread->looper);
3270 header_pos = m->count;
355b0502
GKH
3271 t = thread->transaction_stack;
3272 while (t) {
355b0502 3273 if (t->from == thread) {
5249f488
AH
3274 print_binder_transaction(m,
3275 " outgoing transaction", t);
355b0502
GKH
3276 t = t->from_parent;
3277 } else if (t->to_thread == thread) {
5249f488
AH
3278 print_binder_transaction(m,
3279 " incoming transaction", t);
355b0502
GKH
3280 t = t->to_parent;
3281 } else {
5249f488 3282 print_binder_transaction(m, " bad transaction", t);
355b0502
GKH
3283 t = NULL;
3284 }
3285 }
3286 list_for_each_entry(w, &thread->todo, entry) {
5249f488 3287 print_binder_work(m, " ", " pending transaction", w);
355b0502 3288 }
5249f488
AH
3289 if (!print_always && m->count == header_pos)
3290 m->count = start_pos;
355b0502
GKH
3291}
3292
5249f488 3293static void print_binder_node(struct seq_file *m, struct binder_node *node)
355b0502
GKH
3294{
3295 struct binder_ref *ref;
355b0502
GKH
3296 struct binder_work *w;
3297 int count;
3298
3299 count = 0;
b67bfe0d 3300 hlist_for_each_entry(ref, &node->refs, node_entry)
355b0502
GKH
3301 count++;
3302
da49889d
AH
3303 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d",
3304 node->debug_id, (u64)node->ptr, (u64)node->cookie,
5249f488
AH
3305 node->has_strong_ref, node->has_weak_ref,
3306 node->local_strong_refs, node->local_weak_refs,
3307 node->internal_strong_refs, count);
355b0502 3308 if (count) {
5249f488 3309 seq_puts(m, " proc");
b67bfe0d 3310 hlist_for_each_entry(ref, &node->refs, node_entry)
5249f488 3311 seq_printf(m, " %d", ref->proc->pid);
355b0502 3312 }
5249f488
AH
3313 seq_puts(m, "\n");
3314 list_for_each_entry(w, &node->async_todo, entry)
3315 print_binder_work(m, " ",
3316 " pending async transaction", w);
355b0502
GKH
3317}
3318
5249f488 3319static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
355b0502 3320{
5249f488
AH
3321 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %p\n",
3322 ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ",
3323 ref->node->debug_id, ref->strong, ref->weak, ref->death);
355b0502
GKH
3324}
3325
5249f488
AH
3326static void print_binder_proc(struct seq_file *m,
3327 struct binder_proc *proc, int print_all)
355b0502
GKH
3328{
3329 struct binder_work *w;
3330 struct rb_node *n;
5249f488
AH
3331 size_t start_pos = m->count;
3332 size_t header_pos;
3333
3334 seq_printf(m, "proc %d\n", proc->pid);
14db3181 3335 seq_printf(m, "context %s\n", proc->context->name);
5249f488
AH
3336 header_pos = m->count;
3337
3338 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
3339 print_binder_thread(m, rb_entry(n, struct binder_thread,
3340 rb_node), print_all);
3341 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
355b0502
GKH
3342 struct binder_node *node = rb_entry(n, struct binder_node,
3343 rb_node);
3344 if (print_all || node->has_async_transaction)
5249f488 3345 print_binder_node(m, node);
355b0502
GKH
3346 }
3347 if (print_all) {
3348 for (n = rb_first(&proc->refs_by_desc);
5249f488 3349 n != NULL;
355b0502 3350 n = rb_next(n))
5249f488
AH
3351 print_binder_ref(m, rb_entry(n, struct binder_ref,
3352 rb_node_desc));
355b0502 3353 }
19c98724 3354 binder_alloc_print_allocated(m, &proc->alloc);
5249f488
AH
3355 list_for_each_entry(w, &proc->todo, entry)
3356 print_binder_work(m, " ", " pending transaction", w);
355b0502 3357 list_for_each_entry(w, &proc->delivered_death, entry) {
5249f488 3358 seq_puts(m, " has delivered dead binder\n");
355b0502
GKH
3359 break;
3360 }
5249f488
AH
3361 if (!print_all && m->count == header_pos)
3362 m->count = start_pos;
355b0502
GKH
3363}
3364
167bccbd 3365static const char * const binder_return_strings[] = {
355b0502
GKH
3366 "BR_ERROR",
3367 "BR_OK",
3368 "BR_TRANSACTION",
3369 "BR_REPLY",
3370 "BR_ACQUIRE_RESULT",
3371 "BR_DEAD_REPLY",
3372 "BR_TRANSACTION_COMPLETE",
3373 "BR_INCREFS",
3374 "BR_ACQUIRE",
3375 "BR_RELEASE",
3376 "BR_DECREFS",
3377 "BR_ATTEMPT_ACQUIRE",
3378 "BR_NOOP",
3379 "BR_SPAWN_LOOPER",
3380 "BR_FINISHED",
3381 "BR_DEAD_BINDER",
3382 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
3383 "BR_FAILED_REPLY"
3384};
3385
167bccbd 3386static const char * const binder_command_strings[] = {
355b0502
GKH
3387 "BC_TRANSACTION",
3388 "BC_REPLY",
3389 "BC_ACQUIRE_RESULT",
3390 "BC_FREE_BUFFER",
3391 "BC_INCREFS",
3392 "BC_ACQUIRE",
3393 "BC_RELEASE",
3394 "BC_DECREFS",
3395 "BC_INCREFS_DONE",
3396 "BC_ACQUIRE_DONE",
3397 "BC_ATTEMPT_ACQUIRE",
3398 "BC_REGISTER_LOOPER",
3399 "BC_ENTER_LOOPER",
3400 "BC_EXIT_LOOPER",
3401 "BC_REQUEST_DEATH_NOTIFICATION",
3402 "BC_CLEAR_DEATH_NOTIFICATION",
7980240b
MC
3403 "BC_DEAD_BINDER_DONE",
3404 "BC_TRANSACTION_SG",
3405 "BC_REPLY_SG",
355b0502
GKH
3406};
3407
167bccbd 3408static const char * const binder_objstat_strings[] = {
355b0502
GKH
3409 "proc",
3410 "thread",
3411 "node",
3412 "ref",
3413 "death",
3414 "transaction",
3415 "transaction_complete"
3416};
3417
5249f488
AH
3418static void print_binder_stats(struct seq_file *m, const char *prefix,
3419 struct binder_stats *stats)
355b0502
GKH
3420{
3421 int i;
3422
3423 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
5249f488 3424 ARRAY_SIZE(binder_command_strings));
355b0502
GKH
3425 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
3426 if (stats->bc[i])
5249f488
AH
3427 seq_printf(m, "%s%s: %d\n", prefix,
3428 binder_command_strings[i], stats->bc[i]);
355b0502
GKH
3429 }
3430
3431 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
5249f488 3432 ARRAY_SIZE(binder_return_strings));
355b0502
GKH
3433 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
3434 if (stats->br[i])
5249f488
AH
3435 seq_printf(m, "%s%s: %d\n", prefix,
3436 binder_return_strings[i], stats->br[i]);
355b0502
GKH
3437 }
3438
3439 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5249f488 3440 ARRAY_SIZE(binder_objstat_strings));
355b0502 3441 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5249f488 3442 ARRAY_SIZE(stats->obj_deleted));
355b0502
GKH
3443 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
3444 if (stats->obj_created[i] || stats->obj_deleted[i])
5249f488
AH
3445 seq_printf(m, "%s%s: active %d total %d\n", prefix,
3446 binder_objstat_strings[i],
3447 stats->obj_created[i] - stats->obj_deleted[i],
3448 stats->obj_created[i]);
355b0502 3449 }
355b0502
GKH
3450}
3451
5249f488
AH
3452static void print_binder_proc_stats(struct seq_file *m,
3453 struct binder_proc *proc)
355b0502
GKH
3454{
3455 struct binder_work *w;
3456 struct rb_node *n;
3457 int count, strong, weak;
3458
5249f488 3459 seq_printf(m, "proc %d\n", proc->pid);
14db3181 3460 seq_printf(m, "context %s\n", proc->context->name);
355b0502
GKH
3461 count = 0;
3462 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
3463 count++;
5249f488
AH
3464 seq_printf(m, " threads: %d\n", count);
3465 seq_printf(m, " requested threads: %d+%d/%d\n"
355b0502
GKH
3466 " ready threads %d\n"
3467 " free async space %zd\n", proc->requested_threads,
3468 proc->requested_threads_started, proc->max_threads,
19c98724
TK
3469 proc->ready_threads,
3470 binder_alloc_get_free_async_space(&proc->alloc));
355b0502
GKH
3471 count = 0;
3472 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
3473 count++;
5249f488 3474 seq_printf(m, " nodes: %d\n", count);
355b0502
GKH
3475 count = 0;
3476 strong = 0;
3477 weak = 0;
3478 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
3479 struct binder_ref *ref = rb_entry(n, struct binder_ref,
3480 rb_node_desc);
3481 count++;
3482 strong += ref->strong;
3483 weak += ref->weak;
3484 }
5249f488 3485 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
355b0502 3486
19c98724 3487 count = binder_alloc_get_allocated_count(&proc->alloc);
5249f488 3488 seq_printf(m, " buffers: %d\n", count);
355b0502
GKH
3489
3490 count = 0;
3491 list_for_each_entry(w, &proc->todo, entry) {
3492 switch (w->type) {
3493 case BINDER_WORK_TRANSACTION:
3494 count++;
3495 break;
3496 default:
3497 break;
3498 }
3499 }
5249f488 3500 seq_printf(m, " pending transactions: %d\n", count);
355b0502 3501
5249f488 3502 print_binder_stats(m, " ", &proc->stats);
355b0502
GKH
3503}
3504
3505
5249f488 3506static int binder_state_show(struct seq_file *m, void *unused)
355b0502
GKH
3507{
3508 struct binder_proc *proc;
355b0502 3509 struct binder_node *node;
355b0502
GKH
3510 int do_lock = !binder_debug_no_lock;
3511
355b0502 3512 if (do_lock)
975a1ac9 3513 binder_lock(__func__);
355b0502 3514
5249f488 3515 seq_puts(m, "binder state:\n");
355b0502
GKH
3516
3517 if (!hlist_empty(&binder_dead_nodes))
5249f488 3518 seq_puts(m, "dead nodes:\n");
b67bfe0d 3519 hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
5249f488 3520 print_binder_node(m, node);
355b0502 3521
b67bfe0d 3522 hlist_for_each_entry(proc, &binder_procs, proc_node)
5249f488 3523 print_binder_proc(m, proc, 1);
355b0502 3524 if (do_lock)
975a1ac9 3525 binder_unlock(__func__);
5249f488 3526 return 0;
355b0502
GKH
3527}
3528
5249f488 3529static int binder_stats_show(struct seq_file *m, void *unused)
355b0502
GKH
3530{
3531 struct binder_proc *proc;
355b0502
GKH
3532 int do_lock = !binder_debug_no_lock;
3533
355b0502 3534 if (do_lock)
975a1ac9 3535 binder_lock(__func__);
355b0502 3536
5249f488 3537 seq_puts(m, "binder stats:\n");
355b0502 3538
5249f488 3539 print_binder_stats(m, "", &binder_stats);
355b0502 3540
b67bfe0d 3541 hlist_for_each_entry(proc, &binder_procs, proc_node)
5249f488 3542 print_binder_proc_stats(m, proc);
355b0502 3543 if (do_lock)
975a1ac9 3544 binder_unlock(__func__);
5249f488 3545 return 0;
355b0502
GKH
3546}
3547
5249f488 3548static int binder_transactions_show(struct seq_file *m, void *unused)
355b0502
GKH
3549{
3550 struct binder_proc *proc;
355b0502
GKH
3551 int do_lock = !binder_debug_no_lock;
3552
355b0502 3553 if (do_lock)
975a1ac9 3554 binder_lock(__func__);
355b0502 3555
5249f488 3556 seq_puts(m, "binder transactions:\n");
b67bfe0d 3557 hlist_for_each_entry(proc, &binder_procs, proc_node)
5249f488 3558 print_binder_proc(m, proc, 0);
355b0502 3559 if (do_lock)
975a1ac9 3560 binder_unlock(__func__);
5249f488 3561 return 0;
355b0502
GKH
3562}
3563
5249f488 3564static int binder_proc_show(struct seq_file *m, void *unused)
355b0502 3565{
83050a4e 3566 struct binder_proc *itr;
14db3181 3567 int pid = (unsigned long)m->private;
355b0502
GKH
3568 int do_lock = !binder_debug_no_lock;
3569
355b0502 3570 if (do_lock)
975a1ac9 3571 binder_lock(__func__);
83050a4e
RA
3572
3573 hlist_for_each_entry(itr, &binder_procs, proc_node) {
14db3181
MC
3574 if (itr->pid == pid) {
3575 seq_puts(m, "binder proc state:\n");
3576 print_binder_proc(m, itr, 1);
83050a4e
RA
3577 }
3578 }
355b0502 3579 if (do_lock)
975a1ac9 3580 binder_unlock(__func__);
5249f488 3581 return 0;
355b0502
GKH
3582}
3583
5249f488 3584static void print_binder_transaction_log_entry(struct seq_file *m,
355b0502
GKH
3585 struct binder_transaction_log_entry *e)
3586{
5249f488 3587 seq_printf(m,
14db3181 3588 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d\n",
5249f488
AH
3589 e->debug_id, (e->call_type == 2) ? "reply" :
3590 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
14db3181
MC
3591 e->from_thread, e->to_proc, e->to_thread, e->context_name,
3592 e->to_node, e->target_handle, e->data_size, e->offsets_size);
355b0502
GKH
3593}
3594
5249f488 3595static int binder_transaction_log_show(struct seq_file *m, void *unused)
355b0502 3596{
5249f488 3597 struct binder_transaction_log *log = m->private;
355b0502 3598 int i;
355b0502
GKH
3599
3600 if (log->full) {
5249f488
AH
3601 for (i = log->next; i < ARRAY_SIZE(log->entry); i++)
3602 print_binder_transaction_log_entry(m, &log->entry[i]);
355b0502 3603 }
5249f488
AH
3604 for (i = 0; i < log->next; i++)
3605 print_binder_transaction_log_entry(m, &log->entry[i]);
3606 return 0;
355b0502
GKH
3607}
3608
3609static const struct file_operations binder_fops = {
3610 .owner = THIS_MODULE,
3611 .poll = binder_poll,
3612 .unlocked_ioctl = binder_ioctl,
da49889d 3613 .compat_ioctl = binder_ioctl,
355b0502
GKH
3614 .mmap = binder_mmap,
3615 .open = binder_open,
3616 .flush = binder_flush,
3617 .release = binder_release,
3618};
3619
5249f488
AH
3620BINDER_DEBUG_ENTRY(state);
3621BINDER_DEBUG_ENTRY(stats);
3622BINDER_DEBUG_ENTRY(transactions);
3623BINDER_DEBUG_ENTRY(transaction_log);
3624
ac4812c5
MC
3625static int __init init_binder_device(const char *name)
3626{
3627 int ret;
3628 struct binder_device *binder_device;
3629
3630 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
3631 if (!binder_device)
3632 return -ENOMEM;
3633
3634 binder_device->miscdev.fops = &binder_fops;
3635 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
3636 binder_device->miscdev.name = name;
3637
3638 binder_device->context.binder_context_mgr_uid = INVALID_UID;
3639 binder_device->context.name = name;
3640
3641 ret = misc_register(&binder_device->miscdev);
3642 if (ret < 0) {
3643 kfree(binder_device);
3644 return ret;
3645 }
3646
3647 hlist_add_head(&binder_device->hlist, &binder_devices);
3648
3649 return ret;
3650}
3651
355b0502
GKH
3652static int __init binder_init(void)
3653{
3654 int ret;
ac4812c5
MC
3655 char *device_name, *device_names;
3656 struct binder_device *device;
3657 struct hlist_node *tmp;
355b0502 3658
16b66554
AH
3659 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
3660 if (binder_debugfs_dir_entry_root)
3661 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
3662 binder_debugfs_dir_entry_root);
ac4812c5 3663
16b66554
AH
3664 if (binder_debugfs_dir_entry_root) {
3665 debugfs_create_file("state",
3666 S_IRUGO,
3667 binder_debugfs_dir_entry_root,
3668 NULL,
3669 &binder_state_fops);
3670 debugfs_create_file("stats",
3671 S_IRUGO,
3672 binder_debugfs_dir_entry_root,
3673 NULL,
3674 &binder_stats_fops);
3675 debugfs_create_file("transactions",
3676 S_IRUGO,
3677 binder_debugfs_dir_entry_root,
3678 NULL,
3679 &binder_transactions_fops);
3680 debugfs_create_file("transaction_log",
3681 S_IRUGO,
3682 binder_debugfs_dir_entry_root,
3683 &binder_transaction_log,
3684 &binder_transaction_log_fops);
3685 debugfs_create_file("failed_transaction_log",
3686 S_IRUGO,
3687 binder_debugfs_dir_entry_root,
3688 &binder_transaction_log_failed,
3689 &binder_transaction_log_fops);
355b0502 3690 }
ac4812c5
MC
3691
3692 /*
3693 * Copy the module_parameter string, because we don't want to
3694 * tokenize it in-place.
3695 */
3696 device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
3697 if (!device_names) {
3698 ret = -ENOMEM;
3699 goto err_alloc_device_names_failed;
3700 }
3701 strcpy(device_names, binder_devices_param);
3702
3703 while ((device_name = strsep(&device_names, ","))) {
3704 ret = init_binder_device(device_name);
3705 if (ret)
3706 goto err_init_binder_device_failed;
3707 }
3708
3709 return ret;
3710
3711err_init_binder_device_failed:
3712 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
3713 misc_deregister(&device->miscdev);
3714 hlist_del(&device->hlist);
3715 kfree(device);
3716 }
3717err_alloc_device_names_failed:
3718 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
3719
355b0502
GKH
3720 return ret;
3721}
3722
3723device_initcall(binder_init);
3724
975a1ac9
AH
3725#define CREATE_TRACE_POINTS
3726#include "binder_trace.h"
3727
355b0502 3728MODULE_LICENSE("GPL v2");
This page took 1.294392 seconds and 4 git commands to generate.