3 * Android IPC Subsystem
5 * Copyright (C) 2007-2008 Google, Inc.
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
21 * There are 3 main spinlocks which must be acquired in the
24 * 1) proc->outer_lock : protects binder_ref
25 * binder_proc_lock() and binder_proc_unlock() are
27 * 2) node->lock : protects most fields of binder_node.
28 * binder_node_lock() and binder_node_unlock() are
30 * 3) proc->inner_lock : protects the thread and node lists
31 * (proc->threads, proc->waiting_threads, proc->nodes)
32 * and all todo lists associated with the binder_proc
33 * (proc->todo, thread->todo, proc->delivered_death and
34 * node->async_todo), as well as thread->transaction_stack
35 * binder_inner_proc_lock() and binder_inner_proc_unlock()
38 * Any lock under procA must never be nested under any lock at the same
39 * level or below on procB.
41 * Functions that require a lock held on entry indicate which lock
42 * in the suffix of the function name:
44 * foo_olocked() : requires node->outer_lock
45 * foo_nlocked() : requires node->lock
46 * foo_ilocked() : requires proc->inner_lock
47 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
48 * foo_nilocked(): requires node->lock and proc->inner_lock
52 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
54 #include <linux/fdtable.h>
55 #include <linux/file.h>
56 #include <linux/freezer.h>
58 #include <linux/list.h>
59 #include <linux/miscdevice.h>
60 #include <linux/module.h>
61 #include <linux/mutex.h>
62 #include <linux/nsproxy.h>
63 #include <linux/poll.h>
64 #include <linux/debugfs.h>
65 #include <linux/rbtree.h>
66 #include <linux/sched/signal.h>
67 #include <linux/sched/mm.h>
68 #include <linux/seq_file.h>
69 #include <linux/uaccess.h>
70 #include <linux/pid_namespace.h>
71 #include <linux/security.h>
72 #include <linux/spinlock.h>
73 #include <linux/ratelimit.h>
74 #include <linux/syscalls.h>
75 #include <linux/task_work.h>
77 #include <uapi/linux/android/binder.h>
79 #include <asm/cacheflush.h>
81 #include "binder_alloc.h"
82 #include "binder_trace.h"
84 static HLIST_HEAD(binder_deferred_list);
85 static DEFINE_MUTEX(binder_deferred_lock);
87 static HLIST_HEAD(binder_devices);
88 static HLIST_HEAD(binder_procs);
89 static DEFINE_MUTEX(binder_procs_lock);
91 static HLIST_HEAD(binder_dead_nodes);
92 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
94 static struct dentry *binder_debugfs_dir_entry_root;
95 static struct dentry *binder_debugfs_dir_entry_proc;
96 static atomic_t binder_last_id;
98 static int proc_show(struct seq_file *m, void *unused);
99 DEFINE_SHOW_ATTRIBUTE(proc);
101 /* This is only defined in include/asm-arm/sizes.h */
107 #define SZ_4M 0x400000
110 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
113 BINDER_DEBUG_USER_ERROR = 1U << 0,
114 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
115 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
116 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
117 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
118 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
119 BINDER_DEBUG_READ_WRITE = 1U << 6,
120 BINDER_DEBUG_USER_REFS = 1U << 7,
121 BINDER_DEBUG_THREADS = 1U << 8,
122 BINDER_DEBUG_TRANSACTION = 1U << 9,
123 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
124 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
125 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
126 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
127 BINDER_DEBUG_SPINLOCKS = 1U << 14,
129 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
130 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
131 module_param_named(debug_mask, binder_debug_mask, uint, 0644);
133 static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
134 module_param_named(devices, binder_devices_param, charp, 0444);
136 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
137 static int binder_stop_on_user_error;
139 static int binder_set_stop_on_user_error(const char *val,
140 const struct kernel_param *kp)
144 ret = param_set_int(val, kp);
145 if (binder_stop_on_user_error < 2)
146 wake_up(&binder_user_error_wait);
149 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
150 param_get_int, &binder_stop_on_user_error, 0644);
152 #define binder_debug(mask, x...) \
154 if (binder_debug_mask & mask) \
155 pr_info_ratelimited(x); \
158 #define binder_user_error(x...) \
160 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
161 pr_info_ratelimited(x); \
162 if (binder_stop_on_user_error) \
163 binder_stop_on_user_error = 2; \
166 #define to_flat_binder_object(hdr) \
167 container_of(hdr, struct flat_binder_object, hdr)
169 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
171 #define to_binder_buffer_object(hdr) \
172 container_of(hdr, struct binder_buffer_object, hdr)
174 #define to_binder_fd_array_object(hdr) \
175 container_of(hdr, struct binder_fd_array_object, hdr)
177 enum binder_stat_types {
183 BINDER_STAT_TRANSACTION,
184 BINDER_STAT_TRANSACTION_COMPLETE,
188 struct binder_stats {
189 atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
190 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
191 atomic_t obj_created[BINDER_STAT_COUNT];
192 atomic_t obj_deleted[BINDER_STAT_COUNT];
195 static struct binder_stats binder_stats;
197 static inline void binder_stats_deleted(enum binder_stat_types type)
199 atomic_inc(&binder_stats.obj_deleted[type]);
202 static inline void binder_stats_created(enum binder_stat_types type)
204 atomic_inc(&binder_stats.obj_created[type]);
207 struct binder_transaction_log_entry {
219 int return_error_line;
220 uint32_t return_error;
221 uint32_t return_error_param;
222 const char *context_name;
224 struct binder_transaction_log {
227 struct binder_transaction_log_entry entry[32];
229 static struct binder_transaction_log binder_transaction_log;
230 static struct binder_transaction_log binder_transaction_log_failed;
232 static struct binder_transaction_log_entry *binder_transaction_log_add(
233 struct binder_transaction_log *log)
235 struct binder_transaction_log_entry *e;
236 unsigned int cur = atomic_inc_return(&log->cur);
238 if (cur >= ARRAY_SIZE(log->entry))
240 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
241 WRITE_ONCE(e->debug_id_done, 0);
243 * write-barrier to synchronize access to e->debug_id_done.
244 * We make sure the initialized 0 value is seen before
245 * memset() other fields are zeroed by memset.
248 memset(e, 0, sizeof(*e));
252 struct binder_context {
253 struct binder_node *binder_context_mgr_node;
254 struct mutex context_mgr_node_lock;
256 kuid_t binder_context_mgr_uid;
260 struct binder_device {
261 struct hlist_node hlist;
262 struct miscdevice miscdev;
263 struct binder_context context;
267 * struct binder_work - work enqueued on a worklist
268 * @entry: node enqueued on list
269 * @type: type of work to be performed
271 * There are separate work lists for proc, thread, and node (async).
274 struct list_head entry;
277 BINDER_WORK_TRANSACTION = 1,
278 BINDER_WORK_TRANSACTION_COMPLETE,
279 BINDER_WORK_RETURN_ERROR,
281 BINDER_WORK_DEAD_BINDER,
282 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
283 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
287 struct binder_error {
288 struct binder_work work;
293 * struct binder_node - binder node bookkeeping
294 * @debug_id: unique ID for debugging
295 * (invariant after initialized)
296 * @lock: lock for node fields
297 * @work: worklist element for node work
298 * (protected by @proc->inner_lock)
299 * @rb_node: element for proc->nodes tree
300 * (protected by @proc->inner_lock)
301 * @dead_node: element for binder_dead_nodes list
302 * (protected by binder_dead_nodes_lock)
303 * @proc: binder_proc that owns this node
304 * (invariant after initialized)
305 * @refs: list of references on this node
306 * (protected by @lock)
307 * @internal_strong_refs: used to take strong references when
308 * initiating a transaction
309 * (protected by @proc->inner_lock if @proc
311 * @local_weak_refs: weak user refs from local process
312 * (protected by @proc->inner_lock if @proc
314 * @local_strong_refs: strong user refs from local process
315 * (protected by @proc->inner_lock if @proc
317 * @tmp_refs: temporary kernel refs
318 * (protected by @proc->inner_lock while @proc
319 * is valid, and by binder_dead_nodes_lock
320 * if @proc is NULL. During inc/dec and node release
321 * it is also protected by @lock to provide safety
322 * as the node dies and @proc becomes NULL)
323 * @ptr: userspace pointer for node
324 * (invariant, no lock needed)
325 * @cookie: userspace cookie for node
326 * (invariant, no lock needed)
327 * @has_strong_ref: userspace notified of strong ref
328 * (protected by @proc->inner_lock if @proc
330 * @pending_strong_ref: userspace has acked notification of strong ref
331 * (protected by @proc->inner_lock if @proc
333 * @has_weak_ref: userspace notified of weak ref
334 * (protected by @proc->inner_lock if @proc
336 * @pending_weak_ref: userspace has acked notification of weak ref
337 * (protected by @proc->inner_lock if @proc
339 * @has_async_transaction: async transaction to node in progress
340 * (protected by @lock)
341 * @accept_fds: file descriptor operations supported for node
342 * (invariant after initialized)
343 * @min_priority: minimum scheduling priority
344 * (invariant after initialized)
345 * @async_todo: list of async work items
346 * (protected by @proc->inner_lock)
348 * Bookkeeping structure for binder nodes.
353 struct binder_work work;
355 struct rb_node rb_node;
356 struct hlist_node dead_node;
358 struct binder_proc *proc;
359 struct hlist_head refs;
360 int internal_strong_refs;
362 int local_strong_refs;
364 binder_uintptr_t ptr;
365 binder_uintptr_t cookie;
368 * bitfield elements protected by
372 u8 pending_strong_ref:1;
374 u8 pending_weak_ref:1;
378 * invariant after initialization
383 bool has_async_transaction;
384 struct list_head async_todo;
387 struct binder_ref_death {
389 * @work: worklist element for death notifications
390 * (protected by inner_lock of the proc that
391 * this ref belongs to)
393 struct binder_work work;
394 binder_uintptr_t cookie;
398 * struct binder_ref_data - binder_ref counts and id
399 * @debug_id: unique ID for the ref
400 * @desc: unique userspace handle for ref
401 * @strong: strong ref count (debugging only if not locked)
402 * @weak: weak ref count (debugging only if not locked)
404 * Structure to hold ref count and ref id information. Since
405 * the actual ref can only be accessed with a lock, this structure
406 * is used to return information about the ref to callers of
407 * ref inc/dec functions.
409 struct binder_ref_data {
417 * struct binder_ref - struct to track references on nodes
418 * @data: binder_ref_data containing id, handle, and current refcounts
419 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
420 * @rb_node_node: node for lookup by @node in proc's rb_tree
421 * @node_entry: list entry for node->refs list in target node
422 * (protected by @node->lock)
423 * @proc: binder_proc containing ref
424 * @node: binder_node of target node. When cleaning up a
425 * ref for deletion in binder_cleanup_ref, a non-NULL
426 * @node indicates the node must be freed
427 * @death: pointer to death notification (ref_death) if requested
428 * (protected by @node->lock)
430 * Structure to track references from procA to target node (on procB). This
431 * structure is unsafe to access without holding @proc->outer_lock.
434 /* Lookups needed: */
435 /* node + proc => ref (transaction) */
436 /* desc + proc => ref (transaction, inc/dec ref) */
437 /* node => refs + procs (proc exit) */
438 struct binder_ref_data data;
439 struct rb_node rb_node_desc;
440 struct rb_node rb_node_node;
441 struct hlist_node node_entry;
442 struct binder_proc *proc;
443 struct binder_node *node;
444 struct binder_ref_death *death;
447 enum binder_deferred_state {
448 BINDER_DEFERRED_FLUSH = 0x01,
449 BINDER_DEFERRED_RELEASE = 0x02,
453 * struct binder_proc - binder process bookkeeping
454 * @proc_node: element for binder_procs list
455 * @threads: rbtree of binder_threads in this proc
456 * (protected by @inner_lock)
457 * @nodes: rbtree of binder nodes associated with
458 * this proc ordered by node->ptr
459 * (protected by @inner_lock)
460 * @refs_by_desc: rbtree of refs ordered by ref->desc
461 * (protected by @outer_lock)
462 * @refs_by_node: rbtree of refs ordered by ref->node
463 * (protected by @outer_lock)
464 * @waiting_threads: threads currently waiting for proc work
465 * (protected by @inner_lock)
466 * @pid PID of group_leader of process
467 * (invariant after initialized)
468 * @tsk task_struct for group_leader of process
469 * (invariant after initialized)
470 * @deferred_work_node: element for binder_deferred_list
471 * (protected by binder_deferred_lock)
472 * @deferred_work: bitmap of deferred work to perform
473 * (protected by binder_deferred_lock)
474 * @is_dead: process is dead and awaiting free
475 * when outstanding transactions are cleaned up
476 * (protected by @inner_lock)
477 * @todo: list of work for this process
478 * (protected by @inner_lock)
479 * @stats: per-process binder statistics
480 * (atomics, no lock needed)
481 * @delivered_death: list of delivered death notification
482 * (protected by @inner_lock)
483 * @max_threads: cap on number of binder threads
484 * (protected by @inner_lock)
485 * @requested_threads: number of binder threads requested but not
486 * yet started. In current implementation, can
488 * (protected by @inner_lock)
489 * @requested_threads_started: number binder threads started
490 * (protected by @inner_lock)
491 * @tmp_ref: temporary reference to indicate proc is in use
492 * (protected by @inner_lock)
493 * @default_priority: default scheduler priority
494 * (invariant after initialized)
495 * @debugfs_entry: debugfs node
496 * @alloc: binder allocator bookkeeping
497 * @context: binder_context for this proc
498 * (invariant after initialized)
499 * @inner_lock: can nest under outer_lock and/or node lock
500 * @outer_lock: no nesting under innor or node lock
501 * Lock order: 1) outer, 2) node, 3) inner
503 * Bookkeeping structure for binder processes
506 struct hlist_node proc_node;
507 struct rb_root threads;
508 struct rb_root nodes;
509 struct rb_root refs_by_desc;
510 struct rb_root refs_by_node;
511 struct list_head waiting_threads;
513 struct task_struct *tsk;
514 struct hlist_node deferred_work_node;
518 struct list_head todo;
519 struct binder_stats stats;
520 struct list_head delivered_death;
522 int requested_threads;
523 int requested_threads_started;
525 long default_priority;
526 struct dentry *debugfs_entry;
527 struct binder_alloc alloc;
528 struct binder_context *context;
529 spinlock_t inner_lock;
530 spinlock_t outer_lock;
534 BINDER_LOOPER_STATE_REGISTERED = 0x01,
535 BINDER_LOOPER_STATE_ENTERED = 0x02,
536 BINDER_LOOPER_STATE_EXITED = 0x04,
537 BINDER_LOOPER_STATE_INVALID = 0x08,
538 BINDER_LOOPER_STATE_WAITING = 0x10,
539 BINDER_LOOPER_STATE_POLL = 0x20,
543 * struct binder_thread - binder thread bookkeeping
544 * @proc: binder process for this thread
545 * (invariant after initialization)
546 * @rb_node: element for proc->threads rbtree
547 * (protected by @proc->inner_lock)
548 * @waiting_thread_node: element for @proc->waiting_threads list
549 * (protected by @proc->inner_lock)
550 * @pid: PID for this thread
551 * (invariant after initialization)
552 * @looper: bitmap of looping state
553 * (only accessed by this thread)
554 * @looper_needs_return: looping thread needs to exit driver
556 * @transaction_stack: stack of in-progress transactions for this thread
557 * (protected by @proc->inner_lock)
558 * @todo: list of work to do for this thread
559 * (protected by @proc->inner_lock)
560 * @process_todo: whether work in @todo should be processed
561 * (protected by @proc->inner_lock)
562 * @return_error: transaction errors reported by this thread
563 * (only accessed by this thread)
564 * @reply_error: transaction errors reported by target thread
565 * (protected by @proc->inner_lock)
566 * @wait: wait queue for thread work
567 * @stats: per-thread statistics
568 * (atomics, no lock needed)
569 * @tmp_ref: temporary reference to indicate thread is in use
570 * (atomic since @proc->inner_lock cannot
571 * always be acquired)
572 * @is_dead: thread is dead and awaiting free
573 * when outstanding transactions are cleaned up
574 * (protected by @proc->inner_lock)
576 * Bookkeeping structure for binder threads.
578 struct binder_thread {
579 struct binder_proc *proc;
580 struct rb_node rb_node;
581 struct list_head waiting_thread_node;
583 int looper; /* only modified by this thread */
584 bool looper_need_return; /* can be written by other thread */
585 struct binder_transaction *transaction_stack;
586 struct list_head todo;
588 struct binder_error return_error;
589 struct binder_error reply_error;
590 wait_queue_head_t wait;
591 struct binder_stats stats;
597 * struct binder_txn_fd_fixup - transaction fd fixup list element
598 * @fixup_entry: list entry
599 * @file: struct file to be associated with new fd
600 * @offset: offset in buffer data to this fixup
602 * List element for fd fixups in a transaction. Since file
603 * descriptors need to be allocated in the context of the
604 * target process, we pass each fd to be processed in this
607 struct binder_txn_fd_fixup {
608 struct list_head fixup_entry;
613 struct binder_transaction {
615 struct binder_work work;
616 struct binder_thread *from;
617 struct binder_transaction *from_parent;
618 struct binder_proc *to_proc;
619 struct binder_thread *to_thread;
620 struct binder_transaction *to_parent;
621 unsigned need_reply:1;
622 /* unsigned is_dead:1; */ /* not used at the moment */
624 struct binder_buffer *buffer;
630 struct list_head fd_fixups;
632 * @lock: protects @from, @to_proc, and @to_thread
634 * @from, @to_proc, and @to_thread can be set to NULL
635 * during thread teardown
641 * binder_proc_lock() - Acquire outer lock for given binder_proc
642 * @proc: struct binder_proc to acquire
644 * Acquires proc->outer_lock. Used to protect binder_ref
645 * structures associated with the given proc.
647 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
649 _binder_proc_lock(struct binder_proc *proc, int line)
650 __acquires(&proc->outer_lock)
652 binder_debug(BINDER_DEBUG_SPINLOCKS,
653 "%s: line=%d\n", __func__, line);
654 spin_lock(&proc->outer_lock);
658 * binder_proc_unlock() - Release spinlock for given binder_proc
659 * @proc: struct binder_proc to acquire
661 * Release lock acquired via binder_proc_lock()
663 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
665 _binder_proc_unlock(struct binder_proc *proc, int line)
666 __releases(&proc->outer_lock)
668 binder_debug(BINDER_DEBUG_SPINLOCKS,
669 "%s: line=%d\n", __func__, line);
670 spin_unlock(&proc->outer_lock);
674 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
675 * @proc: struct binder_proc to acquire
677 * Acquires proc->inner_lock. Used to protect todo lists
679 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
681 _binder_inner_proc_lock(struct binder_proc *proc, int line)
682 __acquires(&proc->inner_lock)
684 binder_debug(BINDER_DEBUG_SPINLOCKS,
685 "%s: line=%d\n", __func__, line);
686 spin_lock(&proc->inner_lock);
690 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
691 * @proc: struct binder_proc to acquire
693 * Release lock acquired via binder_inner_proc_lock()
695 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
697 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
698 __releases(&proc->inner_lock)
700 binder_debug(BINDER_DEBUG_SPINLOCKS,
701 "%s: line=%d\n", __func__, line);
702 spin_unlock(&proc->inner_lock);
706 * binder_node_lock() - Acquire spinlock for given binder_node
707 * @node: struct binder_node to acquire
709 * Acquires node->lock. Used to protect binder_node fields
711 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
713 _binder_node_lock(struct binder_node *node, int line)
714 __acquires(&node->lock)
716 binder_debug(BINDER_DEBUG_SPINLOCKS,
717 "%s: line=%d\n", __func__, line);
718 spin_lock(&node->lock);
722 * binder_node_unlock() - Release spinlock for given binder_proc
723 * @node: struct binder_node to acquire
725 * Release lock acquired via binder_node_lock()
727 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
729 _binder_node_unlock(struct binder_node *node, int line)
730 __releases(&node->lock)
732 binder_debug(BINDER_DEBUG_SPINLOCKS,
733 "%s: line=%d\n", __func__, line);
734 spin_unlock(&node->lock);
738 * binder_node_inner_lock() - Acquire node and inner locks
739 * @node: struct binder_node to acquire
741 * Acquires node->lock. If node->proc also acquires
742 * proc->inner_lock. Used to protect binder_node fields
744 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
746 _binder_node_inner_lock(struct binder_node *node, int line)
747 __acquires(&node->lock) __acquires(&node->proc->inner_lock)
749 binder_debug(BINDER_DEBUG_SPINLOCKS,
750 "%s: line=%d\n", __func__, line);
751 spin_lock(&node->lock);
753 binder_inner_proc_lock(node->proc);
755 /* annotation for sparse */
756 __acquire(&node->proc->inner_lock);
760 * binder_node_unlock() - Release node and inner locks
761 * @node: struct binder_node to acquire
763 * Release lock acquired via binder_node_lock()
765 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
767 _binder_node_inner_unlock(struct binder_node *node, int line)
768 __releases(&node->lock) __releases(&node->proc->inner_lock)
770 struct binder_proc *proc = node->proc;
772 binder_debug(BINDER_DEBUG_SPINLOCKS,
773 "%s: line=%d\n", __func__, line);
775 binder_inner_proc_unlock(proc);
777 /* annotation for sparse */
778 __release(&node->proc->inner_lock);
779 spin_unlock(&node->lock);
782 static bool binder_worklist_empty_ilocked(struct list_head *list)
784 return list_empty(list);
788 * binder_worklist_empty() - Check if no items on the work list
789 * @proc: binder_proc associated with list
790 * @list: list to check
792 * Return: true if there are no items on list, else false
794 static bool binder_worklist_empty(struct binder_proc *proc,
795 struct list_head *list)
799 binder_inner_proc_lock(proc);
800 ret = binder_worklist_empty_ilocked(list);
801 binder_inner_proc_unlock(proc);
806 * binder_enqueue_work_ilocked() - Add an item to the work list
807 * @work: struct binder_work to add to list
808 * @target_list: list to add work to
810 * Adds the work to the specified list. Asserts that work
811 * is not already on a list.
813 * Requires the proc->inner_lock to be held.
816 binder_enqueue_work_ilocked(struct binder_work *work,
817 struct list_head *target_list)
819 BUG_ON(target_list == NULL);
820 BUG_ON(work->entry.next && !list_empty(&work->entry));
821 list_add_tail(&work->entry, target_list);
825 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
826 * @thread: thread to queue work to
827 * @work: struct binder_work to add to list
829 * Adds the work to the todo list of the thread. Doesn't set the process_todo
830 * flag, which means that (if it wasn't already set) the thread will go to
831 * sleep without handling this work when it calls read.
833 * Requires the proc->inner_lock to be held.
836 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
837 struct binder_work *work)
839 WARN_ON(!list_empty(&thread->waiting_thread_node));
840 binder_enqueue_work_ilocked(work, &thread->todo);
844 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
845 * @thread: thread to queue work to
846 * @work: struct binder_work to add to list
848 * Adds the work to the todo list of the thread, and enables processing
851 * Requires the proc->inner_lock to be held.
854 binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
855 struct binder_work *work)
857 WARN_ON(!list_empty(&thread->waiting_thread_node));
858 binder_enqueue_work_ilocked(work, &thread->todo);
859 thread->process_todo = true;
863 * binder_enqueue_thread_work() - Add an item to the thread work list
864 * @thread: thread to queue work to
865 * @work: struct binder_work to add to list
867 * Adds the work to the todo list of the thread, and enables processing
871 binder_enqueue_thread_work(struct binder_thread *thread,
872 struct binder_work *work)
874 binder_inner_proc_lock(thread->proc);
875 binder_enqueue_thread_work_ilocked(thread, work);
876 binder_inner_proc_unlock(thread->proc);
880 binder_dequeue_work_ilocked(struct binder_work *work)
882 list_del_init(&work->entry);
886 * binder_dequeue_work() - Removes an item from the work list
887 * @proc: binder_proc associated with list
888 * @work: struct binder_work to remove from list
890 * Removes the specified work item from whatever list it is on.
891 * Can safely be called if work is not on any list.
894 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
896 binder_inner_proc_lock(proc);
897 binder_dequeue_work_ilocked(work);
898 binder_inner_proc_unlock(proc);
901 static struct binder_work *binder_dequeue_work_head_ilocked(
902 struct list_head *list)
904 struct binder_work *w;
906 w = list_first_entry_or_null(list, struct binder_work, entry);
908 list_del_init(&w->entry);
913 * binder_dequeue_work_head() - Dequeues the item at head of list
914 * @proc: binder_proc associated with list
915 * @list: list to dequeue head
917 * Removes the head of the list if there are items on the list
919 * Return: pointer dequeued binder_work, NULL if list was empty
921 static struct binder_work *binder_dequeue_work_head(
922 struct binder_proc *proc,
923 struct list_head *list)
925 struct binder_work *w;
927 binder_inner_proc_lock(proc);
928 w = binder_dequeue_work_head_ilocked(list);
929 binder_inner_proc_unlock(proc);
934 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
935 static void binder_free_thread(struct binder_thread *thread);
936 static void binder_free_proc(struct binder_proc *proc);
937 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
939 static bool binder_has_work_ilocked(struct binder_thread *thread,
942 return thread->process_todo ||
943 thread->looper_need_return ||
945 !binder_worklist_empty_ilocked(&thread->proc->todo));
948 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
952 binder_inner_proc_lock(thread->proc);
953 has_work = binder_has_work_ilocked(thread, do_proc_work);
954 binder_inner_proc_unlock(thread->proc);
959 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
961 return !thread->transaction_stack &&
962 binder_worklist_empty_ilocked(&thread->todo) &&
963 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
964 BINDER_LOOPER_STATE_REGISTERED));
967 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
971 struct binder_thread *thread;
973 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
974 thread = rb_entry(n, struct binder_thread, rb_node);
975 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
976 binder_available_for_proc_work_ilocked(thread)) {
978 wake_up_interruptible_sync(&thread->wait);
980 wake_up_interruptible(&thread->wait);
986 * binder_select_thread_ilocked() - selects a thread for doing proc work.
987 * @proc: process to select a thread from
989 * Note that calling this function moves the thread off the waiting_threads
990 * list, so it can only be woken up by the caller of this function, or a
991 * signal. Therefore, callers *should* always wake up the thread this function
994 * Return: If there's a thread currently waiting for process work,
995 * returns that thread. Otherwise returns NULL.
997 static struct binder_thread *
998 binder_select_thread_ilocked(struct binder_proc *proc)
1000 struct binder_thread *thread;
1002 assert_spin_locked(&proc->inner_lock);
1003 thread = list_first_entry_or_null(&proc->waiting_threads,
1004 struct binder_thread,
1005 waiting_thread_node);
1008 list_del_init(&thread->waiting_thread_node);
1014 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
1015 * @proc: process to wake up a thread in
1016 * @thread: specific thread to wake-up (may be NULL)
1017 * @sync: whether to do a synchronous wake-up
1019 * This function wakes up a thread in the @proc process.
1020 * The caller may provide a specific thread to wake-up in
1021 * the @thread parameter. If @thread is NULL, this function
1022 * will wake up threads that have called poll().
1024 * Note that for this function to work as expected, callers
1025 * should first call binder_select_thread() to find a thread
1026 * to handle the work (if they don't have a thread already),
1027 * and pass the result into the @thread parameter.
1029 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
1030 struct binder_thread *thread,
1033 assert_spin_locked(&proc->inner_lock);
1037 wake_up_interruptible_sync(&thread->wait);
1039 wake_up_interruptible(&thread->wait);
1043 /* Didn't find a thread waiting for proc work; this can happen
1045 * 1. All threads are busy handling transactions
1046 * In that case, one of those threads should call back into
1047 * the kernel driver soon and pick up this work.
1048 * 2. Threads are using the (e)poll interface, in which case
1049 * they may be blocked on the waitqueue without having been
1050 * added to waiting_threads. For this case, we just iterate
1051 * over all threads not handling transaction work, and
1052 * wake them all up. We wake all because we don't know whether
1053 * a thread that called into (e)poll is handling non-binder
1056 binder_wakeup_poll_threads_ilocked(proc, sync);
1059 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
1061 struct binder_thread *thread = binder_select_thread_ilocked(proc);
1063 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
1066 static void binder_set_nice(long nice)
1070 if (can_nice(current, nice)) {
1071 set_user_nice(current, nice);
1074 min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
1075 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
1076 "%d: nice value %ld not allowed use %ld instead\n",
1077 current->pid, nice, min_nice);
1078 set_user_nice(current, min_nice);
1079 if (min_nice <= MAX_NICE)
1081 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
1084 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
1085 binder_uintptr_t ptr)
1087 struct rb_node *n = proc->nodes.rb_node;
1088 struct binder_node *node;
1090 assert_spin_locked(&proc->inner_lock);
1093 node = rb_entry(n, struct binder_node, rb_node);
1095 if (ptr < node->ptr)
1097 else if (ptr > node->ptr)
1101 * take an implicit weak reference
1102 * to ensure node stays alive until
1103 * call to binder_put_node()
1105 binder_inc_node_tmpref_ilocked(node);
1112 static struct binder_node *binder_get_node(struct binder_proc *proc,
1113 binder_uintptr_t ptr)
1115 struct binder_node *node;
1117 binder_inner_proc_lock(proc);
1118 node = binder_get_node_ilocked(proc, ptr);
1119 binder_inner_proc_unlock(proc);
1123 static struct binder_node *binder_init_node_ilocked(
1124 struct binder_proc *proc,
1125 struct binder_node *new_node,
1126 struct flat_binder_object *fp)
1128 struct rb_node **p = &proc->nodes.rb_node;
1129 struct rb_node *parent = NULL;
1130 struct binder_node *node;
1131 binder_uintptr_t ptr = fp ? fp->binder : 0;
1132 binder_uintptr_t cookie = fp ? fp->cookie : 0;
1133 __u32 flags = fp ? fp->flags : 0;
1135 assert_spin_locked(&proc->inner_lock);
1140 node = rb_entry(parent, struct binder_node, rb_node);
1142 if (ptr < node->ptr)
1144 else if (ptr > node->ptr)
1145 p = &(*p)->rb_right;
1148 * A matching node is already in
1149 * the rb tree. Abandon the init
1152 binder_inc_node_tmpref_ilocked(node);
1157 binder_stats_created(BINDER_STAT_NODE);
1159 rb_link_node(&node->rb_node, parent, p);
1160 rb_insert_color(&node->rb_node, &proc->nodes);
1161 node->debug_id = atomic_inc_return(&binder_last_id);
1164 node->cookie = cookie;
1165 node->work.type = BINDER_WORK_NODE;
1166 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1167 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1168 spin_lock_init(&node->lock);
1169 INIT_LIST_HEAD(&node->work.entry);
1170 INIT_LIST_HEAD(&node->async_todo);
1171 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1172 "%d:%d node %d u%016llx c%016llx created\n",
1173 proc->pid, current->pid, node->debug_id,
1174 (u64)node->ptr, (u64)node->cookie);
1179 static struct binder_node *binder_new_node(struct binder_proc *proc,
1180 struct flat_binder_object *fp)
1182 struct binder_node *node;
1183 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
1187 binder_inner_proc_lock(proc);
1188 node = binder_init_node_ilocked(proc, new_node, fp);
1189 binder_inner_proc_unlock(proc);
1190 if (node != new_node)
1192 * The node was already added by another thread
1199 static void binder_free_node(struct binder_node *node)
1202 binder_stats_deleted(BINDER_STAT_NODE);
1205 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1207 struct list_head *target_list)
1209 struct binder_proc *proc = node->proc;
1211 assert_spin_locked(&node->lock);
1213 assert_spin_locked(&proc->inner_lock);
1216 if (target_list == NULL &&
1217 node->internal_strong_refs == 0 &&
1219 node == node->proc->context->binder_context_mgr_node &&
1220 node->has_strong_ref)) {
1221 pr_err("invalid inc strong node for %d\n",
1225 node->internal_strong_refs++;
1227 node->local_strong_refs++;
1228 if (!node->has_strong_ref && target_list) {
1229 struct binder_thread *thread = container_of(target_list,
1230 struct binder_thread, todo);
1231 binder_dequeue_work_ilocked(&node->work);
1232 BUG_ON(&thread->todo != target_list);
1233 binder_enqueue_deferred_thread_work_ilocked(thread,
1238 node->local_weak_refs++;
1239 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1240 if (target_list == NULL) {
1241 pr_err("invalid inc weak node for %d\n",
1248 binder_enqueue_work_ilocked(&node->work, target_list);
1254 static int binder_inc_node(struct binder_node *node, int strong, int internal,
1255 struct list_head *target_list)
1259 binder_node_inner_lock(node);
1260 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1261 binder_node_inner_unlock(node);
1266 static bool binder_dec_node_nilocked(struct binder_node *node,
1267 int strong, int internal)
1269 struct binder_proc *proc = node->proc;
1271 assert_spin_locked(&node->lock);
1273 assert_spin_locked(&proc->inner_lock);
1276 node->internal_strong_refs--;
1278 node->local_strong_refs--;
1279 if (node->local_strong_refs || node->internal_strong_refs)
1283 node->local_weak_refs--;
1284 if (node->local_weak_refs || node->tmp_refs ||
1285 !hlist_empty(&node->refs))
1289 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
1290 if (list_empty(&node->work.entry)) {
1291 binder_enqueue_work_ilocked(&node->work, &proc->todo);
1292 binder_wakeup_proc_ilocked(proc);
1295 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
1296 !node->local_weak_refs && !node->tmp_refs) {
1298 binder_dequeue_work_ilocked(&node->work);
1299 rb_erase(&node->rb_node, &proc->nodes);
1300 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1301 "refless node %d deleted\n",
1304 BUG_ON(!list_empty(&node->work.entry));
1305 spin_lock(&binder_dead_nodes_lock);
1307 * tmp_refs could have changed so
1310 if (node->tmp_refs) {
1311 spin_unlock(&binder_dead_nodes_lock);
1314 hlist_del(&node->dead_node);
1315 spin_unlock(&binder_dead_nodes_lock);
1316 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1317 "dead node %d deleted\n",
1326 static void binder_dec_node(struct binder_node *node, int strong, int internal)
1330 binder_node_inner_lock(node);
1331 free_node = binder_dec_node_nilocked(node, strong, internal);
1332 binder_node_inner_unlock(node);
1334 binder_free_node(node);
1337 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
1340 * No call to binder_inc_node() is needed since we
1341 * don't need to inform userspace of any changes to
1348 * binder_inc_node_tmpref() - take a temporary reference on node
1349 * @node: node to reference
1351 * Take reference on node to prevent the node from being freed
1352 * while referenced only by a local variable. The inner lock is
1353 * needed to serialize with the node work on the queue (which
1354 * isn't needed after the node is dead). If the node is dead
1355 * (node->proc is NULL), use binder_dead_nodes_lock to protect
1356 * node->tmp_refs against dead-node-only cases where the node
1357 * lock cannot be acquired (eg traversing the dead node list to
1360 static void binder_inc_node_tmpref(struct binder_node *node)
1362 binder_node_lock(node);
1364 binder_inner_proc_lock(node->proc);
1366 spin_lock(&binder_dead_nodes_lock);
1367 binder_inc_node_tmpref_ilocked(node);
1369 binder_inner_proc_unlock(node->proc);
1371 spin_unlock(&binder_dead_nodes_lock);
1372 binder_node_unlock(node);
1376 * binder_dec_node_tmpref() - remove a temporary reference on node
1377 * @node: node to reference
1379 * Release temporary reference on node taken via binder_inc_node_tmpref()
1381 static void binder_dec_node_tmpref(struct binder_node *node)
1385 binder_node_inner_lock(node);
1387 spin_lock(&binder_dead_nodes_lock);
1389 __acquire(&binder_dead_nodes_lock);
1391 BUG_ON(node->tmp_refs < 0);
1393 spin_unlock(&binder_dead_nodes_lock);
1395 __release(&binder_dead_nodes_lock);
1397 * Call binder_dec_node() to check if all refcounts are 0
1398 * and cleanup is needed. Calling with strong=0 and internal=1
1399 * causes no actual reference to be released in binder_dec_node().
1400 * If that changes, a change is needed here too.
1402 free_node = binder_dec_node_nilocked(node, 0, 1);
1403 binder_node_inner_unlock(node);
1405 binder_free_node(node);
1408 static void binder_put_node(struct binder_node *node)
1410 binder_dec_node_tmpref(node);
1413 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1414 u32 desc, bool need_strong_ref)
1416 struct rb_node *n = proc->refs_by_desc.rb_node;
1417 struct binder_ref *ref;
1420 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1422 if (desc < ref->data.desc) {
1424 } else if (desc > ref->data.desc) {
1426 } else if (need_strong_ref && !ref->data.strong) {
1427 binder_user_error("tried to use weak ref as strong ref\n");
1437 * binder_get_ref_for_node_olocked() - get the ref associated with given node
1438 * @proc: binder_proc that owns the ref
1439 * @node: binder_node of target
1440 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1442 * Look up the ref for the given node and return it if it exists
1444 * If it doesn't exist and the caller provides a newly allocated
1445 * ref, initialize the fields of the newly allocated ref and insert
1446 * into the given proc rb_trees and node refs list.
1448 * Return: the ref for node. It is possible that another thread
1449 * allocated/initialized the ref first in which case the
1450 * returned ref would be different than the passed-in
1451 * new_ref. new_ref must be kfree'd by the caller in
1454 static struct binder_ref *binder_get_ref_for_node_olocked(
1455 struct binder_proc *proc,
1456 struct binder_node *node,
1457 struct binder_ref *new_ref)
1459 struct binder_context *context = proc->context;
1460 struct rb_node **p = &proc->refs_by_node.rb_node;
1461 struct rb_node *parent = NULL;
1462 struct binder_ref *ref;
1467 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1469 if (node < ref->node)
1471 else if (node > ref->node)
1472 p = &(*p)->rb_right;
1479 binder_stats_created(BINDER_STAT_REF);
1480 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1481 new_ref->proc = proc;
1482 new_ref->node = node;
1483 rb_link_node(&new_ref->rb_node_node, parent, p);
1484 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1486 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1487 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1488 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1489 if (ref->data.desc > new_ref->data.desc)
1491 new_ref->data.desc = ref->data.desc + 1;
1494 p = &proc->refs_by_desc.rb_node;
1497 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1499 if (new_ref->data.desc < ref->data.desc)
1501 else if (new_ref->data.desc > ref->data.desc)
1502 p = &(*p)->rb_right;
1506 rb_link_node(&new_ref->rb_node_desc, parent, p);
1507 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1509 binder_node_lock(node);
1510 hlist_add_head(&new_ref->node_entry, &node->refs);
1512 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1513 "%d new ref %d desc %d for node %d\n",
1514 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1516 binder_node_unlock(node);
1520 static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1522 bool delete_node = false;
1524 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1525 "%d delete ref %d desc %d for node %d\n",
1526 ref->proc->pid, ref->data.debug_id, ref->data.desc,
1527 ref->node->debug_id);
1529 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1530 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1532 binder_node_inner_lock(ref->node);
1533 if (ref->data.strong)
1534 binder_dec_node_nilocked(ref->node, 1, 1);
1536 hlist_del(&ref->node_entry);
1537 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1538 binder_node_inner_unlock(ref->node);
1540 * Clear ref->node unless we want the caller to free the node
1544 * The caller uses ref->node to determine
1545 * whether the node needs to be freed. Clear
1546 * it since the node is still alive.
1552 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1553 "%d delete ref %d desc %d has death notification\n",
1554 ref->proc->pid, ref->data.debug_id,
1556 binder_dequeue_work(ref->proc, &ref->death->work);
1557 binder_stats_deleted(BINDER_STAT_DEATH);
1559 binder_stats_deleted(BINDER_STAT_REF);
1563 * binder_inc_ref_olocked() - increment the ref for given handle
1564 * @ref: ref to be incremented
1565 * @strong: if true, strong increment, else weak
1566 * @target_list: list to queue node work on
1568 * Increment the ref. @ref->proc->outer_lock must be held on entry
1570 * Return: 0, if successful, else errno
1572 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1573 struct list_head *target_list)
1578 if (ref->data.strong == 0) {
1579 ret = binder_inc_node(ref->node, 1, 1, target_list);
1585 if (ref->data.weak == 0) {
1586 ret = binder_inc_node(ref->node, 0, 1, target_list);
1596 * binder_dec_ref() - dec the ref for given handle
1597 * @ref: ref to be decremented
1598 * @strong: if true, strong decrement, else weak
1600 * Decrement the ref.
1602 * Return: true if ref is cleaned up and ready to be freed
1604 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1607 if (ref->data.strong == 0) {
1608 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1609 ref->proc->pid, ref->data.debug_id,
1610 ref->data.desc, ref->data.strong,
1615 if (ref->data.strong == 0)
1616 binder_dec_node(ref->node, strong, 1);
1618 if (ref->data.weak == 0) {
1619 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1620 ref->proc->pid, ref->data.debug_id,
1621 ref->data.desc, ref->data.strong,
1627 if (ref->data.strong == 0 && ref->data.weak == 0) {
1628 binder_cleanup_ref_olocked(ref);
1635 * binder_get_node_from_ref() - get the node from the given proc/desc
1636 * @proc: proc containing the ref
1637 * @desc: the handle associated with the ref
1638 * @need_strong_ref: if true, only return node if ref is strong
1639 * @rdata: the id/refcount data for the ref
1641 * Given a proc and ref handle, return the associated binder_node
1643 * Return: a binder_node or NULL if not found or not strong when strong required
1645 static struct binder_node *binder_get_node_from_ref(
1646 struct binder_proc *proc,
1647 u32 desc, bool need_strong_ref,
1648 struct binder_ref_data *rdata)
1650 struct binder_node *node;
1651 struct binder_ref *ref;
1653 binder_proc_lock(proc);
1654 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1659 * Take an implicit reference on the node to ensure
1660 * it stays alive until the call to binder_put_node()
1662 binder_inc_node_tmpref(node);
1665 binder_proc_unlock(proc);
1670 binder_proc_unlock(proc);
1675 * binder_free_ref() - free the binder_ref
1678 * Free the binder_ref. Free the binder_node indicated by ref->node
1679 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1681 static void binder_free_ref(struct binder_ref *ref)
1684 binder_free_node(ref->node);
1690 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1691 * @proc: proc containing the ref
1692 * @desc: the handle associated with the ref
1693 * @increment: true=inc reference, false=dec reference
1694 * @strong: true=strong reference, false=weak reference
1695 * @rdata: the id/refcount data for the ref
1697 * Given a proc and ref handle, increment or decrement the ref
1698 * according to "increment" arg.
1700 * Return: 0 if successful, else errno
1702 static int binder_update_ref_for_handle(struct binder_proc *proc,
1703 uint32_t desc, bool increment, bool strong,
1704 struct binder_ref_data *rdata)
1707 struct binder_ref *ref;
1708 bool delete_ref = false;
1710 binder_proc_lock(proc);
1711 ref = binder_get_ref_olocked(proc, desc, strong);
1717 ret = binder_inc_ref_olocked(ref, strong, NULL);
1719 delete_ref = binder_dec_ref_olocked(ref, strong);
1723 binder_proc_unlock(proc);
1726 binder_free_ref(ref);
1730 binder_proc_unlock(proc);
1735 * binder_dec_ref_for_handle() - dec the ref for given handle
1736 * @proc: proc containing the ref
1737 * @desc: the handle associated with the ref
1738 * @strong: true=strong reference, false=weak reference
1739 * @rdata: the id/refcount data for the ref
1741 * Just calls binder_update_ref_for_handle() to decrement the ref.
1743 * Return: 0 if successful, else errno
1745 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1746 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1748 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1753 * binder_inc_ref_for_node() - increment the ref for given proc/node
1754 * @proc: proc containing the ref
1755 * @node: target node
1756 * @strong: true=strong reference, false=weak reference
1757 * @target_list: worklist to use if node is incremented
1758 * @rdata: the id/refcount data for the ref
1760 * Given a proc and node, increment the ref. Create the ref if it
1761 * doesn't already exist
1763 * Return: 0 if successful, else errno
1765 static int binder_inc_ref_for_node(struct binder_proc *proc,
1766 struct binder_node *node,
1768 struct list_head *target_list,
1769 struct binder_ref_data *rdata)
1771 struct binder_ref *ref;
1772 struct binder_ref *new_ref = NULL;
1775 binder_proc_lock(proc);
1776 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1778 binder_proc_unlock(proc);
1779 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1782 binder_proc_lock(proc);
1783 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1785 ret = binder_inc_ref_olocked(ref, strong, target_list);
1787 binder_proc_unlock(proc);
1788 if (new_ref && ref != new_ref)
1790 * Another thread created the ref first so
1791 * free the one we allocated
1797 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1798 struct binder_transaction *t)
1800 BUG_ON(!target_thread);
1801 assert_spin_locked(&target_thread->proc->inner_lock);
1802 BUG_ON(target_thread->transaction_stack != t);
1803 BUG_ON(target_thread->transaction_stack->from != target_thread);
1804 target_thread->transaction_stack =
1805 target_thread->transaction_stack->from_parent;
1810 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1811 * @thread: thread to decrement
1813 * A thread needs to be kept alive while being used to create or
1814 * handle a transaction. binder_get_txn_from() is used to safely
1815 * extract t->from from a binder_transaction and keep the thread
1816 * indicated by t->from from being freed. When done with that
1817 * binder_thread, this function is called to decrement the
1818 * tmp_ref and free if appropriate (thread has been released
1819 * and no transaction being processed by the driver)
1821 static void binder_thread_dec_tmpref(struct binder_thread *thread)
1824 * atomic is used to protect the counter value while
1825 * it cannot reach zero or thread->is_dead is false
1827 binder_inner_proc_lock(thread->proc);
1828 atomic_dec(&thread->tmp_ref);
1829 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1830 binder_inner_proc_unlock(thread->proc);
1831 binder_free_thread(thread);
1834 binder_inner_proc_unlock(thread->proc);
1838 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1839 * @proc: proc to decrement
1841 * A binder_proc needs to be kept alive while being used to create or
1842 * handle a transaction. proc->tmp_ref is incremented when
1843 * creating a new transaction or the binder_proc is currently in-use
1844 * by threads that are being released. When done with the binder_proc,
1845 * this function is called to decrement the counter and free the
1846 * proc if appropriate (proc has been released, all threads have
1847 * been released and not currenly in-use to process a transaction).
1849 static void binder_proc_dec_tmpref(struct binder_proc *proc)
1851 binder_inner_proc_lock(proc);
1853 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1855 binder_inner_proc_unlock(proc);
1856 binder_free_proc(proc);
1859 binder_inner_proc_unlock(proc);
1863 * binder_get_txn_from() - safely extract the "from" thread in transaction
1864 * @t: binder transaction for t->from
1866 * Atomically return the "from" thread and increment the tmp_ref
1867 * count for the thread to ensure it stays alive until
1868 * binder_thread_dec_tmpref() is called.
1870 * Return: the value of t->from
1872 static struct binder_thread *binder_get_txn_from(
1873 struct binder_transaction *t)
1875 struct binder_thread *from;
1877 spin_lock(&t->lock);
1880 atomic_inc(&from->tmp_ref);
1881 spin_unlock(&t->lock);
1886 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1887 * @t: binder transaction for t->from
1889 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1890 * to guarantee that the thread cannot be released while operating on it.
1891 * The caller must call binder_inner_proc_unlock() to release the inner lock
1892 * as well as call binder_dec_thread_txn() to release the reference.
1894 * Return: the value of t->from
1896 static struct binder_thread *binder_get_txn_from_and_acq_inner(
1897 struct binder_transaction *t)
1898 __acquires(&t->from->proc->inner_lock)
1900 struct binder_thread *from;
1902 from = binder_get_txn_from(t);
1904 __acquire(&from->proc->inner_lock);
1907 binder_inner_proc_lock(from->proc);
1909 BUG_ON(from != t->from);
1912 binder_inner_proc_unlock(from->proc);
1913 __acquire(&from->proc->inner_lock);
1914 binder_thread_dec_tmpref(from);
1919 * binder_free_txn_fixups() - free unprocessed fd fixups
1920 * @t: binder transaction for t->from
1922 * If the transaction is being torn down prior to being
1923 * processed by the target process, free all of the
1924 * fd fixups and fput the file structs. It is safe to
1925 * call this function after the fixups have been
1926 * processed -- in that case, the list will be empty.
1928 static void binder_free_txn_fixups(struct binder_transaction *t)
1930 struct binder_txn_fd_fixup *fixup, *tmp;
1932 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1934 list_del(&fixup->fixup_entry);
1939 static void binder_free_transaction(struct binder_transaction *t)
1942 t->buffer->transaction = NULL;
1943 binder_free_txn_fixups(t);
1945 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1948 static void binder_send_failed_reply(struct binder_transaction *t,
1949 uint32_t error_code)
1951 struct binder_thread *target_thread;
1952 struct binder_transaction *next;
1954 BUG_ON(t->flags & TF_ONE_WAY);
1956 target_thread = binder_get_txn_from_and_acq_inner(t);
1957 if (target_thread) {
1958 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1959 "send failed reply for transaction %d to %d:%d\n",
1961 target_thread->proc->pid,
1962 target_thread->pid);
1964 binder_pop_transaction_ilocked(target_thread, t);
1965 if (target_thread->reply_error.cmd == BR_OK) {
1966 target_thread->reply_error.cmd = error_code;
1967 binder_enqueue_thread_work_ilocked(
1969 &target_thread->reply_error.work);
1970 wake_up_interruptible(&target_thread->wait);
1973 * Cannot get here for normal operation, but
1974 * we can if multiple synchronous transactions
1975 * are sent without blocking for responses.
1976 * Just ignore the 2nd error in this case.
1978 pr_warn("Unexpected reply error: %u\n",
1979 target_thread->reply_error.cmd);
1981 binder_inner_proc_unlock(target_thread->proc);
1982 binder_thread_dec_tmpref(target_thread);
1983 binder_free_transaction(t);
1986 __release(&target_thread->proc->inner_lock);
1988 next = t->from_parent;
1990 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1991 "send failed reply for transaction %d, target dead\n",
1994 binder_free_transaction(t);
1996 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1997 "reply failed, no target thread at root\n");
2001 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2002 "reply failed, no target thread -- retry %d\n",
2008 * binder_cleanup_transaction() - cleans up undelivered transaction
2009 * @t: transaction that needs to be cleaned up
2010 * @reason: reason the transaction wasn't delivered
2011 * @error_code: error to return to caller (if synchronous call)
2013 static void binder_cleanup_transaction(struct binder_transaction *t,
2015 uint32_t error_code)
2017 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
2018 binder_send_failed_reply(t, error_code);
2020 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2021 "undelivered transaction %d, %s\n",
2022 t->debug_id, reason);
2023 binder_free_transaction(t);
2028 * binder_validate_object() - checks for a valid metadata object in a buffer.
2029 * @buffer: binder_buffer that we're parsing.
2030 * @offset: offset in the buffer at which to validate an object.
2032 * Return: If there's a valid metadata object at @offset in @buffer, the
2033 * size of that object. Otherwise, it returns zero.
2035 static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
2037 /* Check if we can read a header first */
2038 struct binder_object_header *hdr;
2039 size_t object_size = 0;
2041 if (buffer->data_size < sizeof(*hdr) ||
2042 offset > buffer->data_size - sizeof(*hdr) ||
2043 !IS_ALIGNED(offset, sizeof(u32)))
2046 /* Ok, now see if we can read a complete object. */
2047 hdr = (struct binder_object_header *)(buffer->data + offset);
2048 switch (hdr->type) {
2049 case BINDER_TYPE_BINDER:
2050 case BINDER_TYPE_WEAK_BINDER:
2051 case BINDER_TYPE_HANDLE:
2052 case BINDER_TYPE_WEAK_HANDLE:
2053 object_size = sizeof(struct flat_binder_object);
2055 case BINDER_TYPE_FD:
2056 object_size = sizeof(struct binder_fd_object);
2058 case BINDER_TYPE_PTR:
2059 object_size = sizeof(struct binder_buffer_object);
2061 case BINDER_TYPE_FDA:
2062 object_size = sizeof(struct binder_fd_array_object);
2067 if (offset <= buffer->data_size - object_size &&
2068 buffer->data_size >= object_size)
2075 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
2076 * @b: binder_buffer containing the object
2077 * @index: index in offset array at which the binder_buffer_object is
2079 * @start: points to the start of the offset array
2080 * @num_valid: the number of valid offsets in the offset array
2082 * Return: If @index is within the valid range of the offset array
2083 * described by @start and @num_valid, and if there's a valid
2084 * binder_buffer_object at the offset found in index @index
2085 * of the offset array, that object is returned. Otherwise,
2086 * %NULL is returned.
2087 * Note that the offset found in index @index itself is not
2088 * verified; this function assumes that @num_valid elements
2089 * from @start were previously verified to have valid offsets.
2091 static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
2092 binder_size_t index,
2093 binder_size_t *start,
2094 binder_size_t num_valid)
2096 struct binder_buffer_object *buffer_obj;
2097 binder_size_t *offp;
2099 if (index >= num_valid)
2102 offp = start + index;
2103 buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
2104 if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
2111 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
2112 * @b: transaction buffer
2113 * @objects_start start of objects buffer
2114 * @buffer: binder_buffer_object in which to fix up
2115 * @offset: start offset in @buffer to fix up
2116 * @last_obj: last binder_buffer_object that we fixed up in
2117 * @last_min_offset: minimum fixup offset in @last_obj
2119 * Return: %true if a fixup in buffer @buffer at offset @offset is
2122 * For safety reasons, we only allow fixups inside a buffer to happen
2123 * at increasing offsets; additionally, we only allow fixup on the last
2124 * buffer object that was verified, or one of its parents.
2126 * Example of what is allowed:
2129 * B (parent = A, offset = 0)
2130 * C (parent = A, offset = 16)
2131 * D (parent = C, offset = 0)
2132 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
2134 * Examples of what is not allowed:
2136 * Decreasing offsets within the same parent:
2138 * C (parent = A, offset = 16)
2139 * B (parent = A, offset = 0) // decreasing offset within A
2141 * Referring to a parent that wasn't the last object or any of its parents:
2143 * B (parent = A, offset = 0)
2144 * C (parent = A, offset = 0)
2145 * C (parent = A, offset = 16)
2146 * D (parent = B, offset = 0) // B is not A or any of A's parents
2148 static bool binder_validate_fixup(struct binder_buffer *b,
2149 binder_size_t *objects_start,
2150 struct binder_buffer_object *buffer,
2151 binder_size_t fixup_offset,
2152 struct binder_buffer_object *last_obj,
2153 binder_size_t last_min_offset)
2156 /* Nothing to fix up in */
2160 while (last_obj != buffer) {
2162 * Safe to retrieve the parent of last_obj, since it
2163 * was already previously verified by the driver.
2165 if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
2167 last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
2168 last_obj = (struct binder_buffer_object *)
2169 (b->data + *(objects_start + last_obj->parent));
2171 return (fixup_offset >= last_min_offset);
2175 * struct binder_task_work_cb - for deferred close
2177 * @twork: callback_head for task work
2180 * Structure to pass task work to be handled after
2181 * returning from binder_ioctl() via task_work_add().
2183 struct binder_task_work_cb {
2184 struct callback_head twork;
2189 * binder_do_fd_close() - close list of file descriptors
2190 * @twork: callback head for task work
2192 * It is not safe to call ksys_close() during the binder_ioctl()
2193 * function if there is a chance that binder's own file descriptor
2194 * might be closed. This is to meet the requirements for using
2195 * fdget() (see comments for __fget_light()). Therefore use
2196 * task_work_add() to schedule the close operation once we have
2197 * returned from binder_ioctl(). This function is a callback
2198 * for that mechanism and does the actual ksys_close() on the
2199 * given file descriptor.
2201 static void binder_do_fd_close(struct callback_head *twork)
2203 struct binder_task_work_cb *twcb = container_of(twork,
2204 struct binder_task_work_cb, twork);
2211 * binder_deferred_fd_close() - schedule a close for the given file-descriptor
2212 * @fd: file-descriptor to close
2214 * See comments in binder_do_fd_close(). This function is used to schedule
2215 * a file-descriptor to be closed after returning from binder_ioctl().
2217 static void binder_deferred_fd_close(int fd)
2219 struct binder_task_work_cb *twcb;
2221 twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
2224 init_task_work(&twcb->twork, binder_do_fd_close);
2225 __close_fd_get_file(fd, &twcb->file);
2227 task_work_add(current, &twcb->twork, true);
2232 static void binder_transaction_buffer_release(struct binder_proc *proc,
2233 struct binder_buffer *buffer,
2234 binder_size_t *failed_at)
2236 binder_size_t *offp, *off_start, *off_end;
2237 int debug_id = buffer->debug_id;
2239 binder_debug(BINDER_DEBUG_TRANSACTION,
2240 "%d buffer release %d, size %zd-%zd, failed at %pK\n",
2241 proc->pid, buffer->debug_id,
2242 buffer->data_size, buffer->offsets_size, failed_at);
2244 if (buffer->target_node)
2245 binder_dec_node(buffer->target_node, 1, 0);
2247 off_start = (binder_size_t *)(buffer->data +
2248 ALIGN(buffer->data_size, sizeof(void *)));
2250 off_end = failed_at;
2252 off_end = (void *)off_start + buffer->offsets_size;
2253 for (offp = off_start; offp < off_end; offp++) {
2254 struct binder_object_header *hdr;
2255 size_t object_size = binder_validate_object(buffer, *offp);
2257 if (object_size == 0) {
2258 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2259 debug_id, (u64)*offp, buffer->data_size);
2262 hdr = (struct binder_object_header *)(buffer->data + *offp);
2263 switch (hdr->type) {
2264 case BINDER_TYPE_BINDER:
2265 case BINDER_TYPE_WEAK_BINDER: {
2266 struct flat_binder_object *fp;
2267 struct binder_node *node;
2269 fp = to_flat_binder_object(hdr);
2270 node = binder_get_node(proc, fp->binder);
2272 pr_err("transaction release %d bad node %016llx\n",
2273 debug_id, (u64)fp->binder);
2276 binder_debug(BINDER_DEBUG_TRANSACTION,
2277 " node %d u%016llx\n",
2278 node->debug_id, (u64)node->ptr);
2279 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2281 binder_put_node(node);
2283 case BINDER_TYPE_HANDLE:
2284 case BINDER_TYPE_WEAK_HANDLE: {
2285 struct flat_binder_object *fp;
2286 struct binder_ref_data rdata;
2289 fp = to_flat_binder_object(hdr);
2290 ret = binder_dec_ref_for_handle(proc, fp->handle,
2291 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2294 pr_err("transaction release %d bad handle %d, ret = %d\n",
2295 debug_id, fp->handle, ret);
2298 binder_debug(BINDER_DEBUG_TRANSACTION,
2299 " ref %d desc %d\n",
2300 rdata.debug_id, rdata.desc);
2303 case BINDER_TYPE_FD: {
2305 * No need to close the file here since user-space
2306 * closes it for for successfully delivered
2307 * transactions. For transactions that weren't
2308 * delivered, the new fd was never allocated so
2309 * there is no need to close and the fput on the
2310 * file is done when the transaction is torn
2313 WARN_ON(failed_at &&
2314 proc->tsk == current->group_leader);
2316 case BINDER_TYPE_PTR:
2318 * Nothing to do here, this will get cleaned up when the
2319 * transaction buffer gets freed
2322 case BINDER_TYPE_FDA: {
2323 struct binder_fd_array_object *fda;
2324 struct binder_buffer_object *parent;
2325 uintptr_t parent_buffer;
2328 binder_size_t fd_buf_size;
2330 if (proc->tsk != current->group_leader) {
2332 * Nothing to do if running in sender context
2333 * The fd fixups have not been applied so no
2334 * fds need to be closed.
2339 fda = to_binder_fd_array_object(hdr);
2340 parent = binder_validate_ptr(buffer, fda->parent,
2344 pr_err("transaction release %d bad parent offset\n",
2349 * Since the parent was already fixed up, convert it
2350 * back to kernel address space to access it
2352 parent_buffer = parent->buffer -
2353 binder_alloc_get_user_buffer_offset(
2356 fd_buf_size = sizeof(u32) * fda->num_fds;
2357 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2358 pr_err("transaction release %d invalid number of fds (%lld)\n",
2359 debug_id, (u64)fda->num_fds);
2362 if (fd_buf_size > parent->length ||
2363 fda->parent_offset > parent->length - fd_buf_size) {
2364 /* No space for all file descriptors here. */
2365 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2366 debug_id, (u64)fda->num_fds);
2369 fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
2370 for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
2371 binder_deferred_fd_close(fd_array[fd_index]);
2374 pr_err("transaction release %d bad object type %x\n",
2375 debug_id, hdr->type);
2381 static int binder_translate_binder(struct flat_binder_object *fp,
2382 struct binder_transaction *t,
2383 struct binder_thread *thread)
2385 struct binder_node *node;
2386 struct binder_proc *proc = thread->proc;
2387 struct binder_proc *target_proc = t->to_proc;
2388 struct binder_ref_data rdata;
2391 node = binder_get_node(proc, fp->binder);
2393 node = binder_new_node(proc, fp);
2397 if (fp->cookie != node->cookie) {
2398 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2399 proc->pid, thread->pid, (u64)fp->binder,
2400 node->debug_id, (u64)fp->cookie,
2405 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2410 ret = binder_inc_ref_for_node(target_proc, node,
2411 fp->hdr.type == BINDER_TYPE_BINDER,
2412 &thread->todo, &rdata);
2416 if (fp->hdr.type == BINDER_TYPE_BINDER)
2417 fp->hdr.type = BINDER_TYPE_HANDLE;
2419 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2421 fp->handle = rdata.desc;
2424 trace_binder_transaction_node_to_ref(t, node, &rdata);
2425 binder_debug(BINDER_DEBUG_TRANSACTION,
2426 " node %d u%016llx -> ref %d desc %d\n",
2427 node->debug_id, (u64)node->ptr,
2428 rdata.debug_id, rdata.desc);
2430 binder_put_node(node);
2434 static int binder_translate_handle(struct flat_binder_object *fp,
2435 struct binder_transaction *t,
2436 struct binder_thread *thread)
2438 struct binder_proc *proc = thread->proc;
2439 struct binder_proc *target_proc = t->to_proc;
2440 struct binder_node *node;
2441 struct binder_ref_data src_rdata;
2444 node = binder_get_node_from_ref(proc, fp->handle,
2445 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2447 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2448 proc->pid, thread->pid, fp->handle);
2451 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2456 binder_node_lock(node);
2457 if (node->proc == target_proc) {
2458 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2459 fp->hdr.type = BINDER_TYPE_BINDER;
2461 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2462 fp->binder = node->ptr;
2463 fp->cookie = node->cookie;
2465 binder_inner_proc_lock(node->proc);
2467 __acquire(&node->proc->inner_lock);
2468 binder_inc_node_nilocked(node,
2469 fp->hdr.type == BINDER_TYPE_BINDER,
2472 binder_inner_proc_unlock(node->proc);
2474 __release(&node->proc->inner_lock);
2475 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2476 binder_debug(BINDER_DEBUG_TRANSACTION,
2477 " ref %d desc %d -> node %d u%016llx\n",
2478 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2480 binder_node_unlock(node);
2482 struct binder_ref_data dest_rdata;
2484 binder_node_unlock(node);
2485 ret = binder_inc_ref_for_node(target_proc, node,
2486 fp->hdr.type == BINDER_TYPE_HANDLE,
2492 fp->handle = dest_rdata.desc;
2494 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2496 binder_debug(BINDER_DEBUG_TRANSACTION,
2497 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2498 src_rdata.debug_id, src_rdata.desc,
2499 dest_rdata.debug_id, dest_rdata.desc,
2503 binder_put_node(node);
2507 static int binder_translate_fd(u32 *fdp,
2508 struct binder_transaction *t,
2509 struct binder_thread *thread,
2510 struct binder_transaction *in_reply_to)
2512 struct binder_proc *proc = thread->proc;
2513 struct binder_proc *target_proc = t->to_proc;
2514 struct binder_txn_fd_fixup *fixup;
2517 bool target_allows_fd;
2521 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2523 target_allows_fd = t->buffer->target_node->accept_fds;
2524 if (!target_allows_fd) {
2525 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2526 proc->pid, thread->pid,
2527 in_reply_to ? "reply" : "transaction",
2530 goto err_fd_not_accepted;
2535 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2536 proc->pid, thread->pid, fd);
2540 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
2547 * Add fixup record for this transaction. The allocation
2548 * of the fd in the target needs to be done from a
2551 fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2557 fixup->offset = (uintptr_t)fdp - (uintptr_t)t->buffer->data;
2558 trace_binder_transaction_fd_send(t, fd, fixup->offset);
2559 list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2567 err_fd_not_accepted:
2571 static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2572 struct binder_buffer_object *parent,
2573 struct binder_transaction *t,
2574 struct binder_thread *thread,
2575 struct binder_transaction *in_reply_to)
2577 binder_size_t fdi, fd_buf_size;
2578 uintptr_t parent_buffer;
2580 struct binder_proc *proc = thread->proc;
2581 struct binder_proc *target_proc = t->to_proc;
2583 fd_buf_size = sizeof(u32) * fda->num_fds;
2584 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2585 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2586 proc->pid, thread->pid, (u64)fda->num_fds);
2589 if (fd_buf_size > parent->length ||
2590 fda->parent_offset > parent->length - fd_buf_size) {
2591 /* No space for all file descriptors here. */
2592 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2593 proc->pid, thread->pid, (u64)fda->num_fds);
2597 * Since the parent was already fixed up, convert it
2598 * back to the kernel address space to access it
2600 parent_buffer = parent->buffer -
2601 binder_alloc_get_user_buffer_offset(&target_proc->alloc);
2602 fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
2603 if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
2604 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2605 proc->pid, thread->pid);
2608 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2609 int ret = binder_translate_fd(&fd_array[fdi], t, thread,
2617 static int binder_fixup_parent(struct binder_transaction *t,
2618 struct binder_thread *thread,
2619 struct binder_buffer_object *bp,
2620 binder_size_t *off_start,
2621 binder_size_t num_valid,
2622 struct binder_buffer_object *last_fixup_obj,
2623 binder_size_t last_fixup_min_off)
2625 struct binder_buffer_object *parent;
2627 struct binder_buffer *b = t->buffer;
2628 struct binder_proc *proc = thread->proc;
2629 struct binder_proc *target_proc = t->to_proc;
2631 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2634 parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
2636 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2637 proc->pid, thread->pid);
2641 if (!binder_validate_fixup(b, off_start,
2642 parent, bp->parent_offset,
2644 last_fixup_min_off)) {
2645 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2646 proc->pid, thread->pid);
2650 if (parent->length < sizeof(binder_uintptr_t) ||
2651 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2652 /* No space for a pointer here! */
2653 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2654 proc->pid, thread->pid);
2657 parent_buffer = (u8 *)((uintptr_t)parent->buffer -
2658 binder_alloc_get_user_buffer_offset(
2659 &target_proc->alloc));
2660 *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
2666 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2667 * @t: transaction to send
2668 * @proc: process to send the transaction to
2669 * @thread: thread in @proc to send the transaction to (may be NULL)
2671 * This function queues a transaction to the specified process. It will try
2672 * to find a thread in the target process to handle the transaction and
2673 * wake it up. If no thread is found, the work is queued to the proc
2676 * If the @thread parameter is not NULL, the transaction is always queued
2677 * to the waitlist of that specific thread.
2679 * Return: true if the transactions was successfully queued
2680 * false if the target process or thread is dead
2682 static bool binder_proc_transaction(struct binder_transaction *t,
2683 struct binder_proc *proc,
2684 struct binder_thread *thread)
2686 struct binder_node *node = t->buffer->target_node;
2687 bool oneway = !!(t->flags & TF_ONE_WAY);
2688 bool pending_async = false;
2691 binder_node_lock(node);
2694 if (node->has_async_transaction) {
2695 pending_async = true;
2697 node->has_async_transaction = true;
2701 binder_inner_proc_lock(proc);
2703 if (proc->is_dead || (thread && thread->is_dead)) {
2704 binder_inner_proc_unlock(proc);
2705 binder_node_unlock(node);
2709 if (!thread && !pending_async)
2710 thread = binder_select_thread_ilocked(proc);
2713 binder_enqueue_thread_work_ilocked(thread, &t->work);
2714 else if (!pending_async)
2715 binder_enqueue_work_ilocked(&t->work, &proc->todo);
2717 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2720 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2722 binder_inner_proc_unlock(proc);
2723 binder_node_unlock(node);
2729 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2730 * @node: struct binder_node for which to get refs
2731 * @proc: returns @node->proc if valid
2732 * @error: if no @proc then returns BR_DEAD_REPLY
2734 * User-space normally keeps the node alive when creating a transaction
2735 * since it has a reference to the target. The local strong ref keeps it
2736 * alive if the sending process dies before the target process processes
2737 * the transaction. If the source process is malicious or has a reference
2738 * counting bug, relying on the local strong ref can fail.
2740 * Since user-space can cause the local strong ref to go away, we also take
2741 * a tmpref on the node to ensure it survives while we are constructing
2742 * the transaction. We also need a tmpref on the proc while we are
2743 * constructing the transaction, so we take that here as well.
2745 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2746 * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2747 * target proc has died, @error is set to BR_DEAD_REPLY
2749 static struct binder_node *binder_get_node_refs_for_txn(
2750 struct binder_node *node,
2751 struct binder_proc **procp,
2754 struct binder_node *target_node = NULL;
2756 binder_node_inner_lock(node);
2759 binder_inc_node_nilocked(node, 1, 0, NULL);
2760 binder_inc_node_tmpref_ilocked(node);
2761 node->proc->tmp_ref++;
2762 *procp = node->proc;
2764 *error = BR_DEAD_REPLY;
2765 binder_node_inner_unlock(node);
2770 static void binder_transaction(struct binder_proc *proc,
2771 struct binder_thread *thread,
2772 struct binder_transaction_data *tr, int reply,
2773 binder_size_t extra_buffers_size)
2776 struct binder_transaction *t;
2777 struct binder_work *w;
2778 struct binder_work *tcomplete;
2779 binder_size_t *offp, *off_end, *off_start;
2780 binder_size_t off_min;
2781 u8 *sg_bufp, *sg_buf_end;
2782 struct binder_proc *target_proc = NULL;
2783 struct binder_thread *target_thread = NULL;
2784 struct binder_node *target_node = NULL;
2785 struct binder_transaction *in_reply_to = NULL;
2786 struct binder_transaction_log_entry *e;
2787 uint32_t return_error = 0;
2788 uint32_t return_error_param = 0;
2789 uint32_t return_error_line = 0;
2790 struct binder_buffer_object *last_fixup_obj = NULL;
2791 binder_size_t last_fixup_min_off = 0;
2792 struct binder_context *context = proc->context;
2793 int t_debug_id = atomic_inc_return(&binder_last_id);
2795 e = binder_transaction_log_add(&binder_transaction_log);
2796 e->debug_id = t_debug_id;
2797 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2798 e->from_proc = proc->pid;
2799 e->from_thread = thread->pid;
2800 e->target_handle = tr->target.handle;
2801 e->data_size = tr->data_size;
2802 e->offsets_size = tr->offsets_size;
2803 e->context_name = proc->context->name;
2806 binder_inner_proc_lock(proc);
2807 in_reply_to = thread->transaction_stack;
2808 if (in_reply_to == NULL) {
2809 binder_inner_proc_unlock(proc);
2810 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2811 proc->pid, thread->pid);
2812 return_error = BR_FAILED_REPLY;
2813 return_error_param = -EPROTO;
2814 return_error_line = __LINE__;
2815 goto err_empty_call_stack;
2817 if (in_reply_to->to_thread != thread) {
2818 spin_lock(&in_reply_to->lock);
2819 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2820 proc->pid, thread->pid, in_reply_to->debug_id,
2821 in_reply_to->to_proc ?
2822 in_reply_to->to_proc->pid : 0,
2823 in_reply_to->to_thread ?
2824 in_reply_to->to_thread->pid : 0);
2825 spin_unlock(&in_reply_to->lock);
2826 binder_inner_proc_unlock(proc);
2827 return_error = BR_FAILED_REPLY;
2828 return_error_param = -EPROTO;
2829 return_error_line = __LINE__;
2831 goto err_bad_call_stack;
2833 thread->transaction_stack = in_reply_to->to_parent;
2834 binder_inner_proc_unlock(proc);
2835 binder_set_nice(in_reply_to->saved_priority);
2836 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
2837 if (target_thread == NULL) {
2838 /* annotation for sparse */
2839 __release(&target_thread->proc->inner_lock);
2840 return_error = BR_DEAD_REPLY;
2841 return_error_line = __LINE__;
2842 goto err_dead_binder;
2844 if (target_thread->transaction_stack != in_reply_to) {
2845 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2846 proc->pid, thread->pid,
2847 target_thread->transaction_stack ?
2848 target_thread->transaction_stack->debug_id : 0,
2849 in_reply_to->debug_id);
2850 binder_inner_proc_unlock(target_thread->proc);
2851 return_error = BR_FAILED_REPLY;
2852 return_error_param = -EPROTO;
2853 return_error_line = __LINE__;
2855 target_thread = NULL;
2856 goto err_dead_binder;
2858 target_proc = target_thread->proc;
2859 target_proc->tmp_ref++;
2860 binder_inner_proc_unlock(target_thread->proc);
2862 if (tr->target.handle) {
2863 struct binder_ref *ref;
2866 * There must already be a strong ref
2867 * on this node. If so, do a strong
2868 * increment on the node to ensure it
2869 * stays alive until the transaction is
2872 binder_proc_lock(proc);
2873 ref = binder_get_ref_olocked(proc, tr->target.handle,
2876 target_node = binder_get_node_refs_for_txn(
2877 ref->node, &target_proc,
2880 binder_user_error("%d:%d got transaction to invalid handle\n",
2881 proc->pid, thread->pid);
2882 return_error = BR_FAILED_REPLY;
2884 binder_proc_unlock(proc);
2886 mutex_lock(&context->context_mgr_node_lock);
2887 target_node = context->binder_context_mgr_node;
2889 target_node = binder_get_node_refs_for_txn(
2890 target_node, &target_proc,
2893 return_error = BR_DEAD_REPLY;
2894 mutex_unlock(&context->context_mgr_node_lock);
2895 if (target_node && target_proc == proc) {
2896 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
2897 proc->pid, thread->pid);
2898 return_error = BR_FAILED_REPLY;
2899 return_error_param = -EINVAL;
2900 return_error_line = __LINE__;
2901 goto err_invalid_target_handle;
2906 * return_error is set above
2908 return_error_param = -EINVAL;
2909 return_error_line = __LINE__;
2910 goto err_dead_binder;
2912 e->to_node = target_node->debug_id;
2913 if (security_binder_transaction(proc->tsk,
2914 target_proc->tsk) < 0) {
2915 return_error = BR_FAILED_REPLY;
2916 return_error_param = -EPERM;
2917 return_error_line = __LINE__;
2918 goto err_invalid_target_handle;
2920 binder_inner_proc_lock(proc);
2922 w = list_first_entry_or_null(&thread->todo,
2923 struct binder_work, entry);
2924 if (!(tr->flags & TF_ONE_WAY) && w &&
2925 w->type == BINDER_WORK_TRANSACTION) {
2927 * Do not allow new outgoing transaction from a
2928 * thread that has a transaction at the head of
2929 * its todo list. Only need to check the head
2930 * because binder_select_thread_ilocked picks a
2931 * thread from proc->waiting_threads to enqueue
2932 * the transaction, and nothing is queued to the
2933 * todo list while the thread is on waiting_threads.
2935 binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
2936 proc->pid, thread->pid);
2937 binder_inner_proc_unlock(proc);
2938 return_error = BR_FAILED_REPLY;
2939 return_error_param = -EPROTO;
2940 return_error_line = __LINE__;
2941 goto err_bad_todo_list;
2944 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
2945 struct binder_transaction *tmp;
2947 tmp = thread->transaction_stack;
2948 if (tmp->to_thread != thread) {
2949 spin_lock(&tmp->lock);
2950 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
2951 proc->pid, thread->pid, tmp->debug_id,
2952 tmp->to_proc ? tmp->to_proc->pid : 0,
2954 tmp->to_thread->pid : 0);
2955 spin_unlock(&tmp->lock);
2956 binder_inner_proc_unlock(proc);
2957 return_error = BR_FAILED_REPLY;
2958 return_error_param = -EPROTO;
2959 return_error_line = __LINE__;
2960 goto err_bad_call_stack;
2963 struct binder_thread *from;
2965 spin_lock(&tmp->lock);
2967 if (from && from->proc == target_proc) {
2968 atomic_inc(&from->tmp_ref);
2969 target_thread = from;
2970 spin_unlock(&tmp->lock);
2973 spin_unlock(&tmp->lock);
2974 tmp = tmp->from_parent;
2977 binder_inner_proc_unlock(proc);
2980 e->to_thread = target_thread->pid;
2981 e->to_proc = target_proc->pid;
2983 /* TODO: reuse incoming transaction for reply */
2984 t = kzalloc(sizeof(*t), GFP_KERNEL);
2986 return_error = BR_FAILED_REPLY;
2987 return_error_param = -ENOMEM;
2988 return_error_line = __LINE__;
2989 goto err_alloc_t_failed;
2991 INIT_LIST_HEAD(&t->fd_fixups);
2992 binder_stats_created(BINDER_STAT_TRANSACTION);
2993 spin_lock_init(&t->lock);
2995 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
2996 if (tcomplete == NULL) {
2997 return_error = BR_FAILED_REPLY;
2998 return_error_param = -ENOMEM;
2999 return_error_line = __LINE__;
3000 goto err_alloc_tcomplete_failed;
3002 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3004 t->debug_id = t_debug_id;
3007 binder_debug(BINDER_DEBUG_TRANSACTION,
3008 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
3009 proc->pid, thread->pid, t->debug_id,
3010 target_proc->pid, target_thread->pid,
3011 (u64)tr->data.ptr.buffer,
3012 (u64)tr->data.ptr.offsets,
3013 (u64)tr->data_size, (u64)tr->offsets_size,
3014 (u64)extra_buffers_size);
3016 binder_debug(BINDER_DEBUG_TRANSACTION,
3017 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
3018 proc->pid, thread->pid, t->debug_id,
3019 target_proc->pid, target_node->debug_id,
3020 (u64)tr->data.ptr.buffer,
3021 (u64)tr->data.ptr.offsets,
3022 (u64)tr->data_size, (u64)tr->offsets_size,
3023 (u64)extra_buffers_size);
3025 if (!reply && !(tr->flags & TF_ONE_WAY))
3029 t->sender_euid = task_euid(proc->tsk);
3030 t->to_proc = target_proc;
3031 t->to_thread = target_thread;
3033 t->flags = tr->flags;
3034 t->priority = task_nice(current);
3036 trace_binder_transaction(reply, t, target_node);
3038 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3039 tr->offsets_size, extra_buffers_size,
3040 !reply && (t->flags & TF_ONE_WAY));
3041 if (IS_ERR(t->buffer)) {
3043 * -ESRCH indicates VMA cleared. The target is dying.
3045 return_error_param = PTR_ERR(t->buffer);
3046 return_error = return_error_param == -ESRCH ?
3047 BR_DEAD_REPLY : BR_FAILED_REPLY;
3048 return_error_line = __LINE__;
3050 goto err_binder_alloc_buf_failed;
3052 t->buffer->debug_id = t->debug_id;
3053 t->buffer->transaction = t;
3054 t->buffer->target_node = target_node;
3055 trace_binder_transaction_alloc_buf(t->buffer);
3056 off_start = (binder_size_t *)(t->buffer->data +
3057 ALIGN(tr->data_size, sizeof(void *)));
3060 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
3061 tr->data.ptr.buffer, tr->data_size)) {
3062 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3063 proc->pid, thread->pid);
3064 return_error = BR_FAILED_REPLY;
3065 return_error_param = -EFAULT;
3066 return_error_line = __LINE__;
3067 goto err_copy_data_failed;
3069 if (copy_from_user(offp, (const void __user *)(uintptr_t)
3070 tr->data.ptr.offsets, tr->offsets_size)) {
3071 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3072 proc->pid, thread->pid);
3073 return_error = BR_FAILED_REPLY;
3074 return_error_param = -EFAULT;
3075 return_error_line = __LINE__;
3076 goto err_copy_data_failed;
3078 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3079 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3080 proc->pid, thread->pid, (u64)tr->offsets_size);
3081 return_error = BR_FAILED_REPLY;
3082 return_error_param = -EINVAL;
3083 return_error_line = __LINE__;
3084 goto err_bad_offset;
3086 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3087 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3088 proc->pid, thread->pid,
3089 (u64)extra_buffers_size);
3090 return_error = BR_FAILED_REPLY;
3091 return_error_param = -EINVAL;
3092 return_error_line = __LINE__;
3093 goto err_bad_offset;
3095 off_end = (void *)off_start + tr->offsets_size;
3096 sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
3097 sg_buf_end = sg_bufp + extra_buffers_size;
3099 for (; offp < off_end; offp++) {
3100 struct binder_object_header *hdr;
3101 size_t object_size = binder_validate_object(t->buffer, *offp);
3103 if (object_size == 0 || *offp < off_min) {
3104 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3105 proc->pid, thread->pid, (u64)*offp,
3107 (u64)t->buffer->data_size);
3108 return_error = BR_FAILED_REPLY;
3109 return_error_param = -EINVAL;
3110 return_error_line = __LINE__;
3111 goto err_bad_offset;
3114 hdr = (struct binder_object_header *)(t->buffer->data + *offp);
3115 off_min = *offp + object_size;
3116 switch (hdr->type) {
3117 case BINDER_TYPE_BINDER:
3118 case BINDER_TYPE_WEAK_BINDER: {
3119 struct flat_binder_object *fp;
3121 fp = to_flat_binder_object(hdr);
3122 ret = binder_translate_binder(fp, t, thread);
3124 return_error = BR_FAILED_REPLY;
3125 return_error_param = ret;
3126 return_error_line = __LINE__;
3127 goto err_translate_failed;
3130 case BINDER_TYPE_HANDLE:
3131 case BINDER_TYPE_WEAK_HANDLE: {
3132 struct flat_binder_object *fp;
3134 fp = to_flat_binder_object(hdr);
3135 ret = binder_translate_handle(fp, t, thread);
3137 return_error = BR_FAILED_REPLY;
3138 return_error_param = ret;
3139 return_error_line = __LINE__;
3140 goto err_translate_failed;
3144 case BINDER_TYPE_FD: {
3145 struct binder_fd_object *fp = to_binder_fd_object(hdr);
3146 int ret = binder_translate_fd(&fp->fd, t, thread,
3150 return_error = BR_FAILED_REPLY;
3151 return_error_param = ret;
3152 return_error_line = __LINE__;
3153 goto err_translate_failed;
3157 case BINDER_TYPE_FDA: {
3158 struct binder_fd_array_object *fda =
3159 to_binder_fd_array_object(hdr);
3160 struct binder_buffer_object *parent =
3161 binder_validate_ptr(t->buffer, fda->parent,
3165 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3166 proc->pid, thread->pid);
3167 return_error = BR_FAILED_REPLY;
3168 return_error_param = -EINVAL;
3169 return_error_line = __LINE__;
3170 goto err_bad_parent;
3172 if (!binder_validate_fixup(t->buffer, off_start,
3173 parent, fda->parent_offset,
3175 last_fixup_min_off)) {
3176 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3177 proc->pid, thread->pid);
3178 return_error = BR_FAILED_REPLY;
3179 return_error_param = -EINVAL;
3180 return_error_line = __LINE__;
3181 goto err_bad_parent;
3183 ret = binder_translate_fd_array(fda, parent, t, thread,
3186 return_error = BR_FAILED_REPLY;
3187 return_error_param = ret;
3188 return_error_line = __LINE__;
3189 goto err_translate_failed;
3191 last_fixup_obj = parent;
3192 last_fixup_min_off =
3193 fda->parent_offset + sizeof(u32) * fda->num_fds;
3195 case BINDER_TYPE_PTR: {
3196 struct binder_buffer_object *bp =
3197 to_binder_buffer_object(hdr);
3198 size_t buf_left = sg_buf_end - sg_bufp;
3200 if (bp->length > buf_left) {
3201 binder_user_error("%d:%d got transaction with too large buffer\n",
3202 proc->pid, thread->pid);
3203 return_error = BR_FAILED_REPLY;
3204 return_error_param = -EINVAL;
3205 return_error_line = __LINE__;
3206 goto err_bad_offset;
3208 if (copy_from_user(sg_bufp,
3209 (const void __user *)(uintptr_t)
3210 bp->buffer, bp->length)) {
3211 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3212 proc->pid, thread->pid);
3213 return_error_param = -EFAULT;
3214 return_error = BR_FAILED_REPLY;
3215 return_error_line = __LINE__;
3216 goto err_copy_data_failed;
3218 /* Fixup buffer pointer to target proc address space */
3219 bp->buffer = (uintptr_t)sg_bufp +
3220 binder_alloc_get_user_buffer_offset(
3221 &target_proc->alloc);
3222 sg_bufp += ALIGN(bp->length, sizeof(u64));
3224 ret = binder_fixup_parent(t, thread, bp, off_start,
3227 last_fixup_min_off);
3229 return_error = BR_FAILED_REPLY;
3230 return_error_param = ret;
3231 return_error_line = __LINE__;
3232 goto err_translate_failed;
3234 last_fixup_obj = bp;
3235 last_fixup_min_off = 0;
3238 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3239 proc->pid, thread->pid, hdr->type);
3240 return_error = BR_FAILED_REPLY;
3241 return_error_param = -EINVAL;
3242 return_error_line = __LINE__;
3243 goto err_bad_object_type;
3246 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3247 t->work.type = BINDER_WORK_TRANSACTION;
3250 binder_enqueue_thread_work(thread, tcomplete);
3251 binder_inner_proc_lock(target_proc);
3252 if (target_thread->is_dead) {
3253 binder_inner_proc_unlock(target_proc);
3254 goto err_dead_proc_or_thread;
3256 BUG_ON(t->buffer->async_transaction != 0);
3257 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3258 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3259 binder_inner_proc_unlock(target_proc);
3260 wake_up_interruptible_sync(&target_thread->wait);
3261 binder_free_transaction(in_reply_to);
3262 } else if (!(t->flags & TF_ONE_WAY)) {
3263 BUG_ON(t->buffer->async_transaction != 0);
3264 binder_inner_proc_lock(proc);
3266 * Defer the TRANSACTION_COMPLETE, so we don't return to
3267 * userspace immediately; this allows the target process to
3268 * immediately start processing this transaction, reducing
3269 * latency. We will then return the TRANSACTION_COMPLETE when
3270 * the target replies (or there is an error).
3272 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3274 t->from_parent = thread->transaction_stack;
3275 thread->transaction_stack = t;
3276 binder_inner_proc_unlock(proc);
3277 if (!binder_proc_transaction(t, target_proc, target_thread)) {
3278 binder_inner_proc_lock(proc);
3279 binder_pop_transaction_ilocked(thread, t);
3280 binder_inner_proc_unlock(proc);
3281 goto err_dead_proc_or_thread;
3284 BUG_ON(target_node == NULL);
3285 BUG_ON(t->buffer->async_transaction != 1);
3286 binder_enqueue_thread_work(thread, tcomplete);
3287 if (!binder_proc_transaction(t, target_proc, NULL))
3288 goto err_dead_proc_or_thread;
3291 binder_thread_dec_tmpref(target_thread);
3292 binder_proc_dec_tmpref(target_proc);
3294 binder_dec_node_tmpref(target_node);
3296 * write barrier to synchronize with initialization
3300 WRITE_ONCE(e->debug_id_done, t_debug_id);
3303 err_dead_proc_or_thread:
3304 return_error = BR_DEAD_REPLY;
3305 return_error_line = __LINE__;
3306 binder_dequeue_work(proc, tcomplete);
3307 err_translate_failed:
3308 err_bad_object_type:
3311 err_copy_data_failed:
3312 binder_free_txn_fixups(t);
3313 trace_binder_transaction_failed_buffer_release(t->buffer);
3314 binder_transaction_buffer_release(target_proc, t->buffer, offp);
3316 binder_dec_node_tmpref(target_node);
3318 t->buffer->transaction = NULL;
3319 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3320 err_binder_alloc_buf_failed:
3322 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3323 err_alloc_tcomplete_failed:
3325 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3329 err_empty_call_stack:
3331 err_invalid_target_handle:
3333 binder_thread_dec_tmpref(target_thread);
3335 binder_proc_dec_tmpref(target_proc);
3337 binder_dec_node(target_node, 1, 0);
3338 binder_dec_node_tmpref(target_node);
3341 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3342 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3343 proc->pid, thread->pid, return_error, return_error_param,
3344 (u64)tr->data_size, (u64)tr->offsets_size,
3348 struct binder_transaction_log_entry *fe;
3350 e->return_error = return_error;
3351 e->return_error_param = return_error_param;
3352 e->return_error_line = return_error_line;
3353 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3356 * write barrier to synchronize with initialization
3360 WRITE_ONCE(e->debug_id_done, t_debug_id);
3361 WRITE_ONCE(fe->debug_id_done, t_debug_id);
3364 BUG_ON(thread->return_error.cmd != BR_OK);
3366 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3367 binder_enqueue_thread_work(thread, &thread->return_error.work);
3368 binder_send_failed_reply(in_reply_to, return_error);
3370 thread->return_error.cmd = return_error;
3371 binder_enqueue_thread_work(thread, &thread->return_error.work);
3376 * binder_free_buf() - free the specified buffer
3377 * @proc: binder proc that owns buffer
3378 * @buffer: buffer to be freed
3380 * If buffer for an async transaction, enqueue the next async
3381 * transaction from the node.
3383 * Cleanup buffer and free it.
3386 binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer)
3388 if (buffer->transaction) {
3389 buffer->transaction->buffer = NULL;
3390 buffer->transaction = NULL;
3392 if (buffer->async_transaction && buffer->target_node) {
3393 struct binder_node *buf_node;
3394 struct binder_work *w;
3396 buf_node = buffer->target_node;
3397 binder_node_inner_lock(buf_node);
3398 BUG_ON(!buf_node->has_async_transaction);
3399 BUG_ON(buf_node->proc != proc);
3400 w = binder_dequeue_work_head_ilocked(
3401 &buf_node->async_todo);
3403 buf_node->has_async_transaction = false;
3405 binder_enqueue_work_ilocked(
3407 binder_wakeup_proc_ilocked(proc);
3409 binder_node_inner_unlock(buf_node);
3411 trace_binder_transaction_buffer_release(buffer);
3412 binder_transaction_buffer_release(proc, buffer, NULL);
3413 binder_alloc_free_buf(&proc->alloc, buffer);
3416 static int binder_thread_write(struct binder_proc *proc,
3417 struct binder_thread *thread,
3418 binder_uintptr_t binder_buffer, size_t size,
3419 binder_size_t *consumed)
3422 struct binder_context *context = proc->context;
3423 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3424 void __user *ptr = buffer + *consumed;
3425 void __user *end = buffer + size;
3427 while (ptr < end && thread->return_error.cmd == BR_OK) {
3430 if (get_user(cmd, (uint32_t __user *)ptr))
3432 ptr += sizeof(uint32_t);
3433 trace_binder_command(cmd);
3434 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3435 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3436 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3437 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3445 const char *debug_string;
3446 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3447 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3448 struct binder_ref_data rdata;
3450 if (get_user(target, (uint32_t __user *)ptr))
3453 ptr += sizeof(uint32_t);
3455 if (increment && !target) {
3456 struct binder_node *ctx_mgr_node;
3457 mutex_lock(&context->context_mgr_node_lock);
3458 ctx_mgr_node = context->binder_context_mgr_node;
3460 ret = binder_inc_ref_for_node(
3462 strong, NULL, &rdata);
3463 mutex_unlock(&context->context_mgr_node_lock);
3466 ret = binder_update_ref_for_handle(
3467 proc, target, increment, strong,
3469 if (!ret && rdata.desc != target) {
3470 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3471 proc->pid, thread->pid,
3472 target, rdata.desc);
3476 debug_string = "IncRefs";
3479 debug_string = "Acquire";
3482 debug_string = "Release";
3486 debug_string = "DecRefs";
3490 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3491 proc->pid, thread->pid, debug_string,
3492 strong, target, ret);
3495 binder_debug(BINDER_DEBUG_USER_REFS,
3496 "%d:%d %s ref %d desc %d s %d w %d\n",
3497 proc->pid, thread->pid, debug_string,
3498 rdata.debug_id, rdata.desc, rdata.strong,
3502 case BC_INCREFS_DONE:
3503 case BC_ACQUIRE_DONE: {
3504 binder_uintptr_t node_ptr;
3505 binder_uintptr_t cookie;
3506 struct binder_node *node;
3509 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3511 ptr += sizeof(binder_uintptr_t);
3512 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3514 ptr += sizeof(binder_uintptr_t);
3515 node = binder_get_node(proc, node_ptr);
3517 binder_user_error("%d:%d %s u%016llx no match\n",
3518 proc->pid, thread->pid,
3519 cmd == BC_INCREFS_DONE ?
3525 if (cookie != node->cookie) {
3526 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3527 proc->pid, thread->pid,
3528 cmd == BC_INCREFS_DONE ?
3529 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3530 (u64)node_ptr, node->debug_id,
3531 (u64)cookie, (u64)node->cookie);
3532 binder_put_node(node);
3535 binder_node_inner_lock(node);
3536 if (cmd == BC_ACQUIRE_DONE) {
3537 if (node->pending_strong_ref == 0) {
3538 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3539 proc->pid, thread->pid,
3541 binder_node_inner_unlock(node);
3542 binder_put_node(node);
3545 node->pending_strong_ref = 0;
3547 if (node->pending_weak_ref == 0) {
3548 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3549 proc->pid, thread->pid,
3551 binder_node_inner_unlock(node);
3552 binder_put_node(node);
3555 node->pending_weak_ref = 0;
3557 free_node = binder_dec_node_nilocked(node,
3558 cmd == BC_ACQUIRE_DONE, 0);
3560 binder_debug(BINDER_DEBUG_USER_REFS,
3561 "%d:%d %s node %d ls %d lw %d tr %d\n",
3562 proc->pid, thread->pid,
3563 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3564 node->debug_id, node->local_strong_refs,
3565 node->local_weak_refs, node->tmp_refs);
3566 binder_node_inner_unlock(node);
3567 binder_put_node(node);
3570 case BC_ATTEMPT_ACQUIRE:
3571 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3573 case BC_ACQUIRE_RESULT:
3574 pr_err("BC_ACQUIRE_RESULT not supported\n");
3577 case BC_FREE_BUFFER: {
3578 binder_uintptr_t data_ptr;
3579 struct binder_buffer *buffer;
3581 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3583 ptr += sizeof(binder_uintptr_t);
3585 buffer = binder_alloc_prepare_to_free(&proc->alloc,
3587 if (IS_ERR_OR_NULL(buffer)) {
3588 if (PTR_ERR(buffer) == -EPERM) {
3590 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
3591 proc->pid, thread->pid,
3595 "%d:%d BC_FREE_BUFFER u%016llx no match\n",
3596 proc->pid, thread->pid,
3601 binder_debug(BINDER_DEBUG_FREE_BUFFER,
3602 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3603 proc->pid, thread->pid, (u64)data_ptr,
3605 buffer->transaction ? "active" : "finished");
3606 binder_free_buf(proc, buffer);
3610 case BC_TRANSACTION_SG:
3612 struct binder_transaction_data_sg tr;
3614 if (copy_from_user(&tr, ptr, sizeof(tr)))
3617 binder_transaction(proc, thread, &tr.transaction_data,
3618 cmd == BC_REPLY_SG, tr.buffers_size);
3621 case BC_TRANSACTION:
3623 struct binder_transaction_data tr;
3625 if (copy_from_user(&tr, ptr, sizeof(tr)))
3628 binder_transaction(proc, thread, &tr,
3629 cmd == BC_REPLY, 0);
3633 case BC_REGISTER_LOOPER:
3634 binder_debug(BINDER_DEBUG_THREADS,
3635 "%d:%d BC_REGISTER_LOOPER\n",
3636 proc->pid, thread->pid);
3637 binder_inner_proc_lock(proc);
3638 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3639 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3640 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3641 proc->pid, thread->pid);
3642 } else if (proc->requested_threads == 0) {
3643 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3644 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3645 proc->pid, thread->pid);
3647 proc->requested_threads--;
3648 proc->requested_threads_started++;
3650 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
3651 binder_inner_proc_unlock(proc);
3653 case BC_ENTER_LOOPER:
3654 binder_debug(BINDER_DEBUG_THREADS,
3655 "%d:%d BC_ENTER_LOOPER\n",
3656 proc->pid, thread->pid);
3657 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3658 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3659 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3660 proc->pid, thread->pid);
3662 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3664 case BC_EXIT_LOOPER:
3665 binder_debug(BINDER_DEBUG_THREADS,
3666 "%d:%d BC_EXIT_LOOPER\n",
3667 proc->pid, thread->pid);
3668 thread->looper |= BINDER_LOOPER_STATE_EXITED;
3671 case BC_REQUEST_DEATH_NOTIFICATION:
3672 case BC_CLEAR_DEATH_NOTIFICATION: {
3674 binder_uintptr_t cookie;
3675 struct binder_ref *ref;
3676 struct binder_ref_death *death = NULL;
3678 if (get_user(target, (uint32_t __user *)ptr))
3680 ptr += sizeof(uint32_t);
3681 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3683 ptr += sizeof(binder_uintptr_t);
3684 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3686 * Allocate memory for death notification
3687 * before taking lock
3689 death = kzalloc(sizeof(*death), GFP_KERNEL);
3690 if (death == NULL) {
3691 WARN_ON(thread->return_error.cmd !=
3693 thread->return_error.cmd = BR_ERROR;
3694 binder_enqueue_thread_work(
3696 &thread->return_error.work);
3698 BINDER_DEBUG_FAILED_TRANSACTION,
3699 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3700 proc->pid, thread->pid);
3704 binder_proc_lock(proc);
3705 ref = binder_get_ref_olocked(proc, target, false);
3707 binder_user_error("%d:%d %s invalid ref %d\n",
3708 proc->pid, thread->pid,
3709 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3710 "BC_REQUEST_DEATH_NOTIFICATION" :
3711 "BC_CLEAR_DEATH_NOTIFICATION",
3713 binder_proc_unlock(proc);
3718 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
3719 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3720 proc->pid, thread->pid,
3721 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3722 "BC_REQUEST_DEATH_NOTIFICATION" :
3723 "BC_CLEAR_DEATH_NOTIFICATION",
3724 (u64)cookie, ref->data.debug_id,
3725 ref->data.desc, ref->data.strong,
3726 ref->data.weak, ref->node->debug_id);
3728 binder_node_lock(ref->node);
3729 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3731 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3732 proc->pid, thread->pid);
3733 binder_node_unlock(ref->node);
3734 binder_proc_unlock(proc);
3738 binder_stats_created(BINDER_STAT_DEATH);
3739 INIT_LIST_HEAD(&death->work.entry);
3740 death->cookie = cookie;
3742 if (ref->node->proc == NULL) {
3743 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3745 binder_inner_proc_lock(proc);
3746 binder_enqueue_work_ilocked(
3747 &ref->death->work, &proc->todo);
3748 binder_wakeup_proc_ilocked(proc);
3749 binder_inner_proc_unlock(proc);
3752 if (ref->death == NULL) {
3753 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3754 proc->pid, thread->pid);
3755 binder_node_unlock(ref->node);
3756 binder_proc_unlock(proc);
3760 if (death->cookie != cookie) {
3761 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
3762 proc->pid, thread->pid,
3765 binder_node_unlock(ref->node);
3766 binder_proc_unlock(proc);
3770 binder_inner_proc_lock(proc);
3771 if (list_empty(&death->work.entry)) {
3772 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3773 if (thread->looper &
3774 (BINDER_LOOPER_STATE_REGISTERED |
3775 BINDER_LOOPER_STATE_ENTERED))
3776 binder_enqueue_thread_work_ilocked(
3780 binder_enqueue_work_ilocked(
3783 binder_wakeup_proc_ilocked(
3787 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3788 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3790 binder_inner_proc_unlock(proc);
3792 binder_node_unlock(ref->node);
3793 binder_proc_unlock(proc);
3795 case BC_DEAD_BINDER_DONE: {
3796 struct binder_work *w;
3797 binder_uintptr_t cookie;
3798 struct binder_ref_death *death = NULL;
3800 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3803 ptr += sizeof(cookie);
3804 binder_inner_proc_lock(proc);
3805 list_for_each_entry(w, &proc->delivered_death,
3807 struct binder_ref_death *tmp_death =
3809 struct binder_ref_death,
3812 if (tmp_death->cookie == cookie) {
3817 binder_debug(BINDER_DEBUG_DEAD_BINDER,
3818 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
3819 proc->pid, thread->pid, (u64)cookie,
3821 if (death == NULL) {
3822 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3823 proc->pid, thread->pid, (u64)cookie);
3824 binder_inner_proc_unlock(proc);
3827 binder_dequeue_work_ilocked(&death->work);
3828 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
3829 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3830 if (thread->looper &
3831 (BINDER_LOOPER_STATE_REGISTERED |
3832 BINDER_LOOPER_STATE_ENTERED))
3833 binder_enqueue_thread_work_ilocked(
3834 thread, &death->work);
3836 binder_enqueue_work_ilocked(
3839 binder_wakeup_proc_ilocked(proc);
3842 binder_inner_proc_unlock(proc);
3846 pr_err("%d:%d unknown command %d\n",
3847 proc->pid, thread->pid, cmd);
3850 *consumed = ptr - buffer;
3855 static void binder_stat_br(struct binder_proc *proc,
3856 struct binder_thread *thread, uint32_t cmd)
3858 trace_binder_return(cmd);
3859 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
3860 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
3861 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
3862 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
3866 static int binder_put_node_cmd(struct binder_proc *proc,
3867 struct binder_thread *thread,
3869 binder_uintptr_t node_ptr,
3870 binder_uintptr_t node_cookie,
3872 uint32_t cmd, const char *cmd_name)
3874 void __user *ptr = *ptrp;
3876 if (put_user(cmd, (uint32_t __user *)ptr))
3878 ptr += sizeof(uint32_t);
3880 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
3882 ptr += sizeof(binder_uintptr_t);
3884 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
3886 ptr += sizeof(binder_uintptr_t);
3888 binder_stat_br(proc, thread, cmd);
3889 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
3890 proc->pid, thread->pid, cmd_name, node_debug_id,
3891 (u64)node_ptr, (u64)node_cookie);
3897 static int binder_wait_for_work(struct binder_thread *thread,
3901 struct binder_proc *proc = thread->proc;
3904 freezer_do_not_count();
3905 binder_inner_proc_lock(proc);
3907 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
3908 if (binder_has_work_ilocked(thread, do_proc_work))
3911 list_add(&thread->waiting_thread_node,
3912 &proc->waiting_threads);
3913 binder_inner_proc_unlock(proc);
3915 binder_inner_proc_lock(proc);
3916 list_del_init(&thread->waiting_thread_node);
3917 if (signal_pending(current)) {
3922 finish_wait(&thread->wait, &wait);
3923 binder_inner_proc_unlock(proc);
3930 * binder_apply_fd_fixups() - finish fd translation
3931 * @t: binder transaction with list of fd fixups
3933 * Now that we are in the context of the transaction target
3934 * process, we can allocate and install fds. Process the
3935 * list of fds to translate and fixup the buffer with the
3938 * If we fail to allocate an fd, then free the resources by
3939 * fput'ing files that have not been processed and ksys_close'ing
3940 * any fds that have already been allocated.
3942 static int binder_apply_fd_fixups(struct binder_transaction *t)
3944 struct binder_txn_fd_fixup *fixup, *tmp;
3947 list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
3948 int fd = get_unused_fd_flags(O_CLOEXEC);
3952 binder_debug(BINDER_DEBUG_TRANSACTION,
3953 "failed fd fixup txn %d fd %d\n",
3958 binder_debug(BINDER_DEBUG_TRANSACTION,
3959 "fd fixup txn %d fd %d\n",
3961 trace_binder_transaction_fd_recv(t, fd, fixup->offset);
3962 fd_install(fd, fixup->file);
3964 fdp = (u32 *)(t->buffer->data + fixup->offset);
3966 * This store can cause problems for CPUs with a
3967 * VIVT cache (eg ARMv5) since the cache cannot
3968 * detect virtual aliases to the same physical cacheline.
3969 * To support VIVT, this address and the user-space VA
3970 * would both need to be flushed. Since this kernel
3971 * VA is not constructed via page_to_virt(), we can't
3972 * use flush_dcache_page() on it, so we'd have to use
3973 * an internal function. If devices with VIVT ever
3974 * need to run Android, we'll either need to go back
3975 * to patching the translated fd from the sender side
3976 * (using the non-standard kernel functions), or rework
3977 * how the kernel uses the buffer to use page_to_virt()
3978 * addresses instead of allocating in our own vm area.
3980 * For now, we disable compilation if CONFIG_CPU_CACHE_VIVT.
3984 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
3988 u32 *fdp = (u32 *)(t->buffer->data + fixup->offset);
3990 binder_deferred_fd_close(*fdp);
3992 list_del(&fixup->fixup_entry);
3999 static int binder_thread_read(struct binder_proc *proc,
4000 struct binder_thread *thread,
4001 binder_uintptr_t binder_buffer, size_t size,
4002 binder_size_t *consumed, int non_block)
4004 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4005 void __user *ptr = buffer + *consumed;
4006 void __user *end = buffer + size;
4009 int wait_for_proc_work;
4011 if (*consumed == 0) {
4012 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4014 ptr += sizeof(uint32_t);
4018 binder_inner_proc_lock(proc);
4019 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4020 binder_inner_proc_unlock(proc);
4022 thread->looper |= BINDER_LOOPER_STATE_WAITING;
4024 trace_binder_wait_for_work(wait_for_proc_work,
4025 !!thread->transaction_stack,
4026 !binder_worklist_empty(proc, &thread->todo));
4027 if (wait_for_proc_work) {
4028 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4029 BINDER_LOOPER_STATE_ENTERED))) {
4030 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4031 proc->pid, thread->pid, thread->looper);
4032 wait_event_interruptible(binder_user_error_wait,
4033 binder_stop_on_user_error < 2);
4035 binder_set_nice(proc->default_priority);
4039 if (!binder_has_work(thread, wait_for_proc_work))
4042 ret = binder_wait_for_work(thread, wait_for_proc_work);
4045 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4052 struct binder_transaction_data tr;
4053 struct binder_work *w = NULL;
4054 struct list_head *list = NULL;
4055 struct binder_transaction *t = NULL;
4056 struct binder_thread *t_from;
4058 binder_inner_proc_lock(proc);
4059 if (!binder_worklist_empty_ilocked(&thread->todo))
4060 list = &thread->todo;
4061 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4065 binder_inner_proc_unlock(proc);
4068 if (ptr - buffer == 4 && !thread->looper_need_return)
4073 if (end - ptr < sizeof(tr) + 4) {
4074 binder_inner_proc_unlock(proc);
4077 w = binder_dequeue_work_head_ilocked(list);
4078 if (binder_worklist_empty_ilocked(&thread->todo))
4079 thread->process_todo = false;
4082 case BINDER_WORK_TRANSACTION: {
4083 binder_inner_proc_unlock(proc);
4084 t = container_of(w, struct binder_transaction, work);
4086 case BINDER_WORK_RETURN_ERROR: {
4087 struct binder_error *e = container_of(
4088 w, struct binder_error, work);
4090 WARN_ON(e->cmd == BR_OK);
4091 binder_inner_proc_unlock(proc);
4092 if (put_user(e->cmd, (uint32_t __user *)ptr))
4096 ptr += sizeof(uint32_t);
4098 binder_stat_br(proc, thread, cmd);
4100 case BINDER_WORK_TRANSACTION_COMPLETE: {
4101 binder_inner_proc_unlock(proc);
4102 cmd = BR_TRANSACTION_COMPLETE;
4103 if (put_user(cmd, (uint32_t __user *)ptr))
4105 ptr += sizeof(uint32_t);
4107 binder_stat_br(proc, thread, cmd);
4108 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4109 "%d:%d BR_TRANSACTION_COMPLETE\n",
4110 proc->pid, thread->pid);
4112 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4114 case BINDER_WORK_NODE: {
4115 struct binder_node *node = container_of(w, struct binder_node, work);
4117 binder_uintptr_t node_ptr = node->ptr;
4118 binder_uintptr_t node_cookie = node->cookie;
4119 int node_debug_id = node->debug_id;
4122 void __user *orig_ptr = ptr;
4124 BUG_ON(proc != node->proc);
4125 strong = node->internal_strong_refs ||
4126 node->local_strong_refs;
4127 weak = !hlist_empty(&node->refs) ||
4128 node->local_weak_refs ||
4129 node->tmp_refs || strong;
4130 has_strong_ref = node->has_strong_ref;
4131 has_weak_ref = node->has_weak_ref;
4133 if (weak && !has_weak_ref) {
4134 node->has_weak_ref = 1;
4135 node->pending_weak_ref = 1;
4136 node->local_weak_refs++;
4138 if (strong && !has_strong_ref) {
4139 node->has_strong_ref = 1;
4140 node->pending_strong_ref = 1;
4141 node->local_strong_refs++;
4143 if (!strong && has_strong_ref)
4144 node->has_strong_ref = 0;
4145 if (!weak && has_weak_ref)
4146 node->has_weak_ref = 0;
4147 if (!weak && !strong) {
4148 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4149 "%d:%d node %d u%016llx c%016llx deleted\n",
4150 proc->pid, thread->pid,
4154 rb_erase(&node->rb_node, &proc->nodes);
4155 binder_inner_proc_unlock(proc);
4156 binder_node_lock(node);
4158 * Acquire the node lock before freeing the
4159 * node to serialize with other threads that
4160 * may have been holding the node lock while
4161 * decrementing this node (avoids race where
4162 * this thread frees while the other thread
4163 * is unlocking the node after the final
4166 binder_node_unlock(node);
4167 binder_free_node(node);
4169 binder_inner_proc_unlock(proc);
4171 if (weak && !has_weak_ref)
4172 ret = binder_put_node_cmd(
4173 proc, thread, &ptr, node_ptr,
4174 node_cookie, node_debug_id,
4175 BR_INCREFS, "BR_INCREFS");
4176 if (!ret && strong && !has_strong_ref)
4177 ret = binder_put_node_cmd(
4178 proc, thread, &ptr, node_ptr,
4179 node_cookie, node_debug_id,
4180 BR_ACQUIRE, "BR_ACQUIRE");
4181 if (!ret && !strong && has_strong_ref)
4182 ret = binder_put_node_cmd(
4183 proc, thread, &ptr, node_ptr,
4184 node_cookie, node_debug_id,
4185 BR_RELEASE, "BR_RELEASE");
4186 if (!ret && !weak && has_weak_ref)
4187 ret = binder_put_node_cmd(
4188 proc, thread, &ptr, node_ptr,
4189 node_cookie, node_debug_id,
4190 BR_DECREFS, "BR_DECREFS");
4191 if (orig_ptr == ptr)
4192 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4193 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4194 proc->pid, thread->pid,
4201 case BINDER_WORK_DEAD_BINDER:
4202 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4203 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4204 struct binder_ref_death *death;
4206 binder_uintptr_t cookie;
4208 death = container_of(w, struct binder_ref_death, work);
4209 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4210 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4212 cmd = BR_DEAD_BINDER;
4213 cookie = death->cookie;
4215 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4216 "%d:%d %s %016llx\n",
4217 proc->pid, thread->pid,
4218 cmd == BR_DEAD_BINDER ?
4220 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4222 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4223 binder_inner_proc_unlock(proc);
4225 binder_stats_deleted(BINDER_STAT_DEATH);
4227 binder_enqueue_work_ilocked(
4228 w, &proc->delivered_death);
4229 binder_inner_proc_unlock(proc);
4231 if (put_user(cmd, (uint32_t __user *)ptr))
4233 ptr += sizeof(uint32_t);
4234 if (put_user(cookie,
4235 (binder_uintptr_t __user *)ptr))
4237 ptr += sizeof(binder_uintptr_t);
4238 binder_stat_br(proc, thread, cmd);
4239 if (cmd == BR_DEAD_BINDER)
4240 goto done; /* DEAD_BINDER notifications can cause transactions */
4243 binder_inner_proc_unlock(proc);
4244 pr_err("%d:%d: bad work type %d\n",
4245 proc->pid, thread->pid, w->type);
4252 BUG_ON(t->buffer == NULL);
4253 if (t->buffer->target_node) {
4254 struct binder_node *target_node = t->buffer->target_node;
4256 tr.target.ptr = target_node->ptr;
4257 tr.cookie = target_node->cookie;
4258 t->saved_priority = task_nice(current);
4259 if (t->priority < target_node->min_priority &&
4260 !(t->flags & TF_ONE_WAY))
4261 binder_set_nice(t->priority);
4262 else if (!(t->flags & TF_ONE_WAY) ||
4263 t->saved_priority > target_node->min_priority)
4264 binder_set_nice(target_node->min_priority);
4265 cmd = BR_TRANSACTION;
4272 tr.flags = t->flags;
4273 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4275 t_from = binder_get_txn_from(t);
4277 struct task_struct *sender = t_from->proc->tsk;
4279 tr.sender_pid = task_tgid_nr_ns(sender,
4280 task_active_pid_ns(current));
4285 ret = binder_apply_fd_fixups(t);
4287 struct binder_buffer *buffer = t->buffer;
4288 bool oneway = !!(t->flags & TF_ONE_WAY);
4289 int tid = t->debug_id;
4292 binder_thread_dec_tmpref(t_from);
4293 buffer->transaction = NULL;
4294 binder_cleanup_transaction(t, "fd fixups failed",
4296 binder_free_buf(proc, buffer);
4297 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
4298 "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
4299 proc->pid, thread->pid,
4301 (cmd == BR_REPLY ? "reply " : ""),
4302 tid, BR_FAILED_REPLY, ret, __LINE__);
4303 if (cmd == BR_REPLY) {
4304 cmd = BR_FAILED_REPLY;
4305 if (put_user(cmd, (uint32_t __user *)ptr))
4307 ptr += sizeof(uint32_t);
4308 binder_stat_br(proc, thread, cmd);
4313 tr.data_size = t->buffer->data_size;
4314 tr.offsets_size = t->buffer->offsets_size;
4315 tr.data.ptr.buffer = (binder_uintptr_t)
4316 ((uintptr_t)t->buffer->data +
4317 binder_alloc_get_user_buffer_offset(&proc->alloc));
4318 tr.data.ptr.offsets = tr.data.ptr.buffer +
4319 ALIGN(t->buffer->data_size,
4322 if (put_user(cmd, (uint32_t __user *)ptr)) {
4324 binder_thread_dec_tmpref(t_from);
4326 binder_cleanup_transaction(t, "put_user failed",
4331 ptr += sizeof(uint32_t);
4332 if (copy_to_user(ptr, &tr, sizeof(tr))) {
4334 binder_thread_dec_tmpref(t_from);
4336 binder_cleanup_transaction(t, "copy_to_user failed",
4343 trace_binder_transaction_received(t);
4344 binder_stat_br(proc, thread, cmd);
4345 binder_debug(BINDER_DEBUG_TRANSACTION,
4346 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4347 proc->pid, thread->pid,
4348 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4350 t->debug_id, t_from ? t_from->proc->pid : 0,
4351 t_from ? t_from->pid : 0, cmd,
4352 t->buffer->data_size, t->buffer->offsets_size,
4353 (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
4356 binder_thread_dec_tmpref(t_from);
4357 t->buffer->allow_user_free = 1;
4358 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
4359 binder_inner_proc_lock(thread->proc);
4360 t->to_parent = thread->transaction_stack;
4361 t->to_thread = thread;
4362 thread->transaction_stack = t;
4363 binder_inner_proc_unlock(thread->proc);
4365 binder_free_transaction(t);
4372 *consumed = ptr - buffer;
4373 binder_inner_proc_lock(proc);
4374 if (proc->requested_threads == 0 &&
4375 list_empty(&thread->proc->waiting_threads) &&
4376 proc->requested_threads_started < proc->max_threads &&
4377 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4378 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4379 /*spawn a new thread if we leave this out */) {
4380 proc->requested_threads++;
4381 binder_inner_proc_unlock(proc);
4382 binder_debug(BINDER_DEBUG_THREADS,
4383 "%d:%d BR_SPAWN_LOOPER\n",
4384 proc->pid, thread->pid);
4385 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4387 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4389 binder_inner_proc_unlock(proc);
4393 static void binder_release_work(struct binder_proc *proc,
4394 struct list_head *list)
4396 struct binder_work *w;
4399 w = binder_dequeue_work_head(proc, list);
4404 case BINDER_WORK_TRANSACTION: {
4405 struct binder_transaction *t;
4407 t = container_of(w, struct binder_transaction, work);
4409 binder_cleanup_transaction(t, "process died.",
4412 case BINDER_WORK_RETURN_ERROR: {
4413 struct binder_error *e = container_of(
4414 w, struct binder_error, work);
4416 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4417 "undelivered TRANSACTION_ERROR: %u\n",
4420 case BINDER_WORK_TRANSACTION_COMPLETE: {
4421 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4422 "undelivered TRANSACTION_COMPLETE\n");
4424 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4426 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4427 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4428 struct binder_ref_death *death;
4430 death = container_of(w, struct binder_ref_death, work);
4431 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4432 "undelivered death notification, %016llx\n",
4433 (u64)death->cookie);
4435 binder_stats_deleted(BINDER_STAT_DEATH);
4438 pr_err("unexpected work type, %d, not freed\n",
4446 static struct binder_thread *binder_get_thread_ilocked(
4447 struct binder_proc *proc, struct binder_thread *new_thread)
4449 struct binder_thread *thread = NULL;
4450 struct rb_node *parent = NULL;
4451 struct rb_node **p = &proc->threads.rb_node;
4455 thread = rb_entry(parent, struct binder_thread, rb_node);
4457 if (current->pid < thread->pid)
4459 else if (current->pid > thread->pid)
4460 p = &(*p)->rb_right;
4466 thread = new_thread;
4467 binder_stats_created(BINDER_STAT_THREAD);
4468 thread->proc = proc;
4469 thread->pid = current->pid;
4470 atomic_set(&thread->tmp_ref, 0);
4471 init_waitqueue_head(&thread->wait);
4472 INIT_LIST_HEAD(&thread->todo);
4473 rb_link_node(&thread->rb_node, parent, p);
4474 rb_insert_color(&thread->rb_node, &proc->threads);
4475 thread->looper_need_return = true;
4476 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4477 thread->return_error.cmd = BR_OK;
4478 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4479 thread->reply_error.cmd = BR_OK;
4480 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4484 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4486 struct binder_thread *thread;
4487 struct binder_thread *new_thread;
4489 binder_inner_proc_lock(proc);
4490 thread = binder_get_thread_ilocked(proc, NULL);
4491 binder_inner_proc_unlock(proc);
4493 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4494 if (new_thread == NULL)
4496 binder_inner_proc_lock(proc);
4497 thread = binder_get_thread_ilocked(proc, new_thread);
4498 binder_inner_proc_unlock(proc);
4499 if (thread != new_thread)
4505 static void binder_free_proc(struct binder_proc *proc)
4507 BUG_ON(!list_empty(&proc->todo));
4508 BUG_ON(!list_empty(&proc->delivered_death));
4509 binder_alloc_deferred_release(&proc->alloc);
4510 put_task_struct(proc->tsk);
4511 binder_stats_deleted(BINDER_STAT_PROC);
4515 static void binder_free_thread(struct binder_thread *thread)
4517 BUG_ON(!list_empty(&thread->todo));
4518 binder_stats_deleted(BINDER_STAT_THREAD);
4519 binder_proc_dec_tmpref(thread->proc);
4523 static int binder_thread_release(struct binder_proc *proc,
4524 struct binder_thread *thread)
4526 struct binder_transaction *t;
4527 struct binder_transaction *send_reply = NULL;
4528 int active_transactions = 0;
4529 struct binder_transaction *last_t = NULL;
4531 binder_inner_proc_lock(thread->proc);
4533 * take a ref on the proc so it survives
4534 * after we remove this thread from proc->threads.
4535 * The corresponding dec is when we actually
4536 * free the thread in binder_free_thread()
4540 * take a ref on this thread to ensure it
4541 * survives while we are releasing it
4543 atomic_inc(&thread->tmp_ref);
4544 rb_erase(&thread->rb_node, &proc->threads);
4545 t = thread->transaction_stack;
4547 spin_lock(&t->lock);
4548 if (t->to_thread == thread)
4551 __acquire(&t->lock);
4553 thread->is_dead = true;
4557 active_transactions++;
4558 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4559 "release %d:%d transaction %d %s, still active\n",
4560 proc->pid, thread->pid,
4562 (t->to_thread == thread) ? "in" : "out");
4564 if (t->to_thread == thread) {
4566 t->to_thread = NULL;
4568 t->buffer->transaction = NULL;
4572 } else if (t->from == thread) {
4577 spin_unlock(&last_t->lock);
4579 spin_lock(&t->lock);
4581 __acquire(&t->lock);
4583 /* annotation for sparse, lock not acquired in last iteration above */
4584 __release(&t->lock);
4587 * If this thread used poll, make sure we remove the waitqueue
4588 * from any epoll data structures holding it with POLLFREE.
4589 * waitqueue_active() is safe to use here because we're holding
4592 if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
4593 waitqueue_active(&thread->wait)) {
4594 wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE);
4597 binder_inner_proc_unlock(thread->proc);
4600 * This is needed to avoid races between wake_up_poll() above and
4601 * and ep_remove_waitqueue() called for other reasons (eg the epoll file
4602 * descriptor being closed); ep_remove_waitqueue() holds an RCU read
4603 * lock, so we can be sure it's done after calling synchronize_rcu().
4605 if (thread->looper & BINDER_LOOPER_STATE_POLL)
4609 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
4610 binder_release_work(proc, &thread->todo);
4611 binder_thread_dec_tmpref(thread);
4612 return active_transactions;
4615 static __poll_t binder_poll(struct file *filp,
4616 struct poll_table_struct *wait)
4618 struct binder_proc *proc = filp->private_data;
4619 struct binder_thread *thread = NULL;
4620 bool wait_for_proc_work;
4622 thread = binder_get_thread(proc);
4626 binder_inner_proc_lock(thread->proc);
4627 thread->looper |= BINDER_LOOPER_STATE_POLL;
4628 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4630 binder_inner_proc_unlock(thread->proc);
4632 poll_wait(filp, &thread->wait, wait);
4634 if (binder_has_work(thread, wait_for_proc_work))
4640 static int binder_ioctl_write_read(struct file *filp,
4641 unsigned int cmd, unsigned long arg,
4642 struct binder_thread *thread)
4645 struct binder_proc *proc = filp->private_data;
4646 unsigned int size = _IOC_SIZE(cmd);
4647 void __user *ubuf = (void __user *)arg;
4648 struct binder_write_read bwr;
4650 if (size != sizeof(struct binder_write_read)) {
4654 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4658 binder_debug(BINDER_DEBUG_READ_WRITE,
4659 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4660 proc->pid, thread->pid,
4661 (u64)bwr.write_size, (u64)bwr.write_buffer,
4662 (u64)bwr.read_size, (u64)bwr.read_buffer);
4664 if (bwr.write_size > 0) {
4665 ret = binder_thread_write(proc, thread,
4668 &bwr.write_consumed);
4669 trace_binder_write_done(ret);
4671 bwr.read_consumed = 0;
4672 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4677 if (bwr.read_size > 0) {
4678 ret = binder_thread_read(proc, thread, bwr.read_buffer,
4681 filp->f_flags & O_NONBLOCK);
4682 trace_binder_read_done(ret);
4683 binder_inner_proc_lock(proc);
4684 if (!binder_worklist_empty_ilocked(&proc->todo))
4685 binder_wakeup_proc_ilocked(proc);
4686 binder_inner_proc_unlock(proc);
4688 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4693 binder_debug(BINDER_DEBUG_READ_WRITE,
4694 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4695 proc->pid, thread->pid,
4696 (u64)bwr.write_consumed, (u64)bwr.write_size,
4697 (u64)bwr.read_consumed, (u64)bwr.read_size);
4698 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4706 static int binder_ioctl_set_ctx_mgr(struct file *filp)
4709 struct binder_proc *proc = filp->private_data;
4710 struct binder_context *context = proc->context;
4711 struct binder_node *new_node;
4712 kuid_t curr_euid = current_euid();
4714 mutex_lock(&context->context_mgr_node_lock);
4715 if (context->binder_context_mgr_node) {
4716 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4720 ret = security_binder_set_context_mgr(proc->tsk);
4723 if (uid_valid(context->binder_context_mgr_uid)) {
4724 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
4725 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4726 from_kuid(&init_user_ns, curr_euid),
4727 from_kuid(&init_user_ns,
4728 context->binder_context_mgr_uid));
4733 context->binder_context_mgr_uid = curr_euid;
4735 new_node = binder_new_node(proc, NULL);
4740 binder_node_lock(new_node);
4741 new_node->local_weak_refs++;
4742 new_node->local_strong_refs++;
4743 new_node->has_strong_ref = 1;
4744 new_node->has_weak_ref = 1;
4745 context->binder_context_mgr_node = new_node;
4746 binder_node_unlock(new_node);
4747 binder_put_node(new_node);
4749 mutex_unlock(&context->context_mgr_node_lock);
4753 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
4754 struct binder_node_info_for_ref *info)
4756 struct binder_node *node;
4757 struct binder_context *context = proc->context;
4758 __u32 handle = info->handle;
4760 if (info->strong_count || info->weak_count || info->reserved1 ||
4761 info->reserved2 || info->reserved3) {
4762 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
4767 /* This ioctl may only be used by the context manager */
4768 mutex_lock(&context->context_mgr_node_lock);
4769 if (!context->binder_context_mgr_node ||
4770 context->binder_context_mgr_node->proc != proc) {
4771 mutex_unlock(&context->context_mgr_node_lock);
4774 mutex_unlock(&context->context_mgr_node_lock);
4776 node = binder_get_node_from_ref(proc, handle, true, NULL);
4780 info->strong_count = node->local_strong_refs +
4781 node->internal_strong_refs;
4782 info->weak_count = node->local_weak_refs;
4784 binder_put_node(node);
4789 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
4790 struct binder_node_debug_info *info)
4793 binder_uintptr_t ptr = info->ptr;
4795 memset(info, 0, sizeof(*info));
4797 binder_inner_proc_lock(proc);
4798 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
4799 struct binder_node *node = rb_entry(n, struct binder_node,
4801 if (node->ptr > ptr) {
4802 info->ptr = node->ptr;
4803 info->cookie = node->cookie;
4804 info->has_strong_ref = node->has_strong_ref;
4805 info->has_weak_ref = node->has_weak_ref;
4809 binder_inner_proc_unlock(proc);
4814 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4817 struct binder_proc *proc = filp->private_data;
4818 struct binder_thread *thread;
4819 unsigned int size = _IOC_SIZE(cmd);
4820 void __user *ubuf = (void __user *)arg;
4822 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
4823 proc->pid, current->pid, cmd, arg);*/
4825 binder_selftest_alloc(&proc->alloc);
4827 trace_binder_ioctl(cmd, arg);
4829 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4833 thread = binder_get_thread(proc);
4834 if (thread == NULL) {
4840 case BINDER_WRITE_READ:
4841 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
4845 case BINDER_SET_MAX_THREADS: {
4848 if (copy_from_user(&max_threads, ubuf,
4849 sizeof(max_threads))) {
4853 binder_inner_proc_lock(proc);
4854 proc->max_threads = max_threads;
4855 binder_inner_proc_unlock(proc);
4858 case BINDER_SET_CONTEXT_MGR:
4859 ret = binder_ioctl_set_ctx_mgr(filp);
4863 case BINDER_THREAD_EXIT:
4864 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
4865 proc->pid, thread->pid);
4866 binder_thread_release(proc, thread);
4869 case BINDER_VERSION: {
4870 struct binder_version __user *ver = ubuf;
4872 if (size != sizeof(struct binder_version)) {
4876 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
4877 &ver->protocol_version)) {
4883 case BINDER_GET_NODE_INFO_FOR_REF: {
4884 struct binder_node_info_for_ref info;
4886 if (copy_from_user(&info, ubuf, sizeof(info))) {
4891 ret = binder_ioctl_get_node_info_for_ref(proc, &info);
4895 if (copy_to_user(ubuf, &info, sizeof(info))) {
4902 case BINDER_GET_NODE_DEBUG_INFO: {
4903 struct binder_node_debug_info info;
4905 if (copy_from_user(&info, ubuf, sizeof(info))) {
4910 ret = binder_ioctl_get_node_debug_info(proc, &info);
4914 if (copy_to_user(ubuf, &info, sizeof(info))) {
4927 thread->looper_need_return = false;
4928 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4929 if (ret && ret != -ERESTARTSYS)
4930 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
4932 trace_binder_ioctl_done(ret);
4936 static void binder_vma_open(struct vm_area_struct *vma)
4938 struct binder_proc *proc = vma->vm_private_data;
4940 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4941 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4942 proc->pid, vma->vm_start, vma->vm_end,
4943 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4944 (unsigned long)pgprot_val(vma->vm_page_prot));
4947 static void binder_vma_close(struct vm_area_struct *vma)
4949 struct binder_proc *proc = vma->vm_private_data;
4951 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4952 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4953 proc->pid, vma->vm_start, vma->vm_end,
4954 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4955 (unsigned long)pgprot_val(vma->vm_page_prot));
4956 binder_alloc_vma_close(&proc->alloc);
4959 static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
4961 return VM_FAULT_SIGBUS;
4964 static const struct vm_operations_struct binder_vm_ops = {
4965 .open = binder_vma_open,
4966 .close = binder_vma_close,
4967 .fault = binder_vm_fault,
4970 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
4973 struct binder_proc *proc = filp->private_data;
4974 const char *failure_string;
4976 if (proc->tsk != current->group_leader)
4979 if ((vma->vm_end - vma->vm_start) > SZ_4M)
4980 vma->vm_end = vma->vm_start + SZ_4M;
4982 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4983 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
4984 __func__, proc->pid, vma->vm_start, vma->vm_end,
4985 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4986 (unsigned long)pgprot_val(vma->vm_page_prot));
4988 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
4990 failure_string = "bad vm_flags";
4993 vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
4994 vma->vm_flags &= ~VM_MAYWRITE;
4996 vma->vm_ops = &binder_vm_ops;
4997 vma->vm_private_data = proc;
4999 ret = binder_alloc_mmap_handler(&proc->alloc, vma);
5005 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
5006 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
5010 static int binder_open(struct inode *nodp, struct file *filp)
5012 struct binder_proc *proc;
5013 struct binder_device *binder_dev;
5015 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
5016 current->group_leader->pid, current->pid);
5018 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5021 spin_lock_init(&proc->inner_lock);
5022 spin_lock_init(&proc->outer_lock);
5023 get_task_struct(current->group_leader);
5024 proc->tsk = current->group_leader;
5025 INIT_LIST_HEAD(&proc->todo);
5026 proc->default_priority = task_nice(current);
5027 binder_dev = container_of(filp->private_data, struct binder_device,
5029 proc->context = &binder_dev->context;
5030 binder_alloc_init(&proc->alloc);
5032 binder_stats_created(BINDER_STAT_PROC);
5033 proc->pid = current->group_leader->pid;
5034 INIT_LIST_HEAD(&proc->delivered_death);
5035 INIT_LIST_HEAD(&proc->waiting_threads);
5036 filp->private_data = proc;
5038 mutex_lock(&binder_procs_lock);
5039 hlist_add_head(&proc->proc_node, &binder_procs);
5040 mutex_unlock(&binder_procs_lock);
5042 if (binder_debugfs_dir_entry_proc) {
5045 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5047 * proc debug entries are shared between contexts, so
5048 * this will fail if the process tries to open the driver
5049 * again with a different context. The priting code will
5050 * anyway print all contexts that a given PID has, so this
5053 proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
5054 binder_debugfs_dir_entry_proc,
5055 (void *)(unsigned long)proc->pid,
5062 static int binder_flush(struct file *filp, fl_owner_t id)
5064 struct binder_proc *proc = filp->private_data;
5066 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5071 static void binder_deferred_flush(struct binder_proc *proc)
5076 binder_inner_proc_lock(proc);
5077 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5078 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
5080 thread->looper_need_return = true;
5081 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
5082 wake_up_interruptible(&thread->wait);
5086 binder_inner_proc_unlock(proc);
5088 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5089 "binder_flush: %d woke %d threads\n", proc->pid,
5093 static int binder_release(struct inode *nodp, struct file *filp)
5095 struct binder_proc *proc = filp->private_data;
5097 debugfs_remove(proc->debugfs_entry);
5098 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5103 static int binder_node_release(struct binder_node *node, int refs)
5105 struct binder_ref *ref;
5107 struct binder_proc *proc = node->proc;
5109 binder_release_work(proc, &node->async_todo);
5111 binder_node_lock(node);
5112 binder_inner_proc_lock(proc);
5113 binder_dequeue_work_ilocked(&node->work);
5115 * The caller must have taken a temporary ref on the node,
5117 BUG_ON(!node->tmp_refs);
5118 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
5119 binder_inner_proc_unlock(proc);
5120 binder_node_unlock(node);
5121 binder_free_node(node);
5127 node->local_strong_refs = 0;
5128 node->local_weak_refs = 0;
5129 binder_inner_proc_unlock(proc);
5131 spin_lock(&binder_dead_nodes_lock);
5132 hlist_add_head(&node->dead_node, &binder_dead_nodes);
5133 spin_unlock(&binder_dead_nodes_lock);
5135 hlist_for_each_entry(ref, &node->refs, node_entry) {
5138 * Need the node lock to synchronize
5139 * with new notification requests and the
5140 * inner lock to synchronize with queued
5141 * death notifications.
5143 binder_inner_proc_lock(ref->proc);
5145 binder_inner_proc_unlock(ref->proc);
5151 BUG_ON(!list_empty(&ref->death->work.entry));
5152 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5153 binder_enqueue_work_ilocked(&ref->death->work,
5155 binder_wakeup_proc_ilocked(ref->proc);
5156 binder_inner_proc_unlock(ref->proc);
5159 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5160 "node %d now dead, refs %d, death %d\n",
5161 node->debug_id, refs, death);
5162 binder_node_unlock(node);
5163 binder_put_node(node);
5168 static void binder_deferred_release(struct binder_proc *proc)
5170 struct binder_context *context = proc->context;
5172 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
5174 mutex_lock(&binder_procs_lock);
5175 hlist_del(&proc->proc_node);
5176 mutex_unlock(&binder_procs_lock);
5178 mutex_lock(&context->context_mgr_node_lock);
5179 if (context->binder_context_mgr_node &&
5180 context->binder_context_mgr_node->proc == proc) {
5181 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5182 "%s: %d context_mgr_node gone\n",
5183 __func__, proc->pid);
5184 context->binder_context_mgr_node = NULL;
5186 mutex_unlock(&context->context_mgr_node_lock);
5187 binder_inner_proc_lock(proc);
5189 * Make sure proc stays alive after we
5190 * remove all the threads
5194 proc->is_dead = true;
5196 active_transactions = 0;
5197 while ((n = rb_first(&proc->threads))) {
5198 struct binder_thread *thread;
5200 thread = rb_entry(n, struct binder_thread, rb_node);
5201 binder_inner_proc_unlock(proc);
5203 active_transactions += binder_thread_release(proc, thread);
5204 binder_inner_proc_lock(proc);
5209 while ((n = rb_first(&proc->nodes))) {
5210 struct binder_node *node;
5212 node = rb_entry(n, struct binder_node, rb_node);
5215 * take a temporary ref on the node before
5216 * calling binder_node_release() which will either
5217 * kfree() the node or call binder_put_node()
5219 binder_inc_node_tmpref_ilocked(node);
5220 rb_erase(&node->rb_node, &proc->nodes);
5221 binder_inner_proc_unlock(proc);
5222 incoming_refs = binder_node_release(node, incoming_refs);
5223 binder_inner_proc_lock(proc);
5225 binder_inner_proc_unlock(proc);
5228 binder_proc_lock(proc);
5229 while ((n = rb_first(&proc->refs_by_desc))) {
5230 struct binder_ref *ref;
5232 ref = rb_entry(n, struct binder_ref, rb_node_desc);
5234 binder_cleanup_ref_olocked(ref);
5235 binder_proc_unlock(proc);
5236 binder_free_ref(ref);
5237 binder_proc_lock(proc);
5239 binder_proc_unlock(proc);
5241 binder_release_work(proc, &proc->todo);
5242 binder_release_work(proc, &proc->delivered_death);
5244 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5245 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5246 __func__, proc->pid, threads, nodes, incoming_refs,
5247 outgoing_refs, active_transactions);
5249 binder_proc_dec_tmpref(proc);
5252 static void binder_deferred_func(struct work_struct *work)
5254 struct binder_proc *proc;
5259 mutex_lock(&binder_deferred_lock);
5260 if (!hlist_empty(&binder_deferred_list)) {
5261 proc = hlist_entry(binder_deferred_list.first,
5262 struct binder_proc, deferred_work_node);
5263 hlist_del_init(&proc->deferred_work_node);
5264 defer = proc->deferred_work;
5265 proc->deferred_work = 0;
5270 mutex_unlock(&binder_deferred_lock);
5272 if (defer & BINDER_DEFERRED_FLUSH)
5273 binder_deferred_flush(proc);
5275 if (defer & BINDER_DEFERRED_RELEASE)
5276 binder_deferred_release(proc); /* frees proc */
5279 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5282 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5284 mutex_lock(&binder_deferred_lock);
5285 proc->deferred_work |= defer;
5286 if (hlist_unhashed(&proc->deferred_work_node)) {
5287 hlist_add_head(&proc->deferred_work_node,
5288 &binder_deferred_list);
5289 schedule_work(&binder_deferred_work);
5291 mutex_unlock(&binder_deferred_lock);
5294 static void print_binder_transaction_ilocked(struct seq_file *m,
5295 struct binder_proc *proc,
5297 struct binder_transaction *t)
5299 struct binder_proc *to_proc;
5300 struct binder_buffer *buffer = t->buffer;
5302 spin_lock(&t->lock);
5303 to_proc = t->to_proc;
5305 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
5306 prefix, t->debug_id, t,
5307 t->from ? t->from->proc->pid : 0,
5308 t->from ? t->from->pid : 0,
5309 to_proc ? to_proc->pid : 0,
5310 t->to_thread ? t->to_thread->pid : 0,
5311 t->code, t->flags, t->priority, t->need_reply);
5312 spin_unlock(&t->lock);
5314 if (proc != to_proc) {
5316 * Can only safely deref buffer if we are holding the
5317 * correct proc inner lock for this node
5323 if (buffer == NULL) {
5324 seq_puts(m, " buffer free\n");
5327 if (buffer->target_node)
5328 seq_printf(m, " node %d", buffer->target_node->debug_id);
5329 seq_printf(m, " size %zd:%zd data %pK\n",
5330 buffer->data_size, buffer->offsets_size,
5334 static void print_binder_work_ilocked(struct seq_file *m,
5335 struct binder_proc *proc,
5337 const char *transaction_prefix,
5338 struct binder_work *w)
5340 struct binder_node *node;
5341 struct binder_transaction *t;
5344 case BINDER_WORK_TRANSACTION:
5345 t = container_of(w, struct binder_transaction, work);
5346 print_binder_transaction_ilocked(
5347 m, proc, transaction_prefix, t);
5349 case BINDER_WORK_RETURN_ERROR: {
5350 struct binder_error *e = container_of(
5351 w, struct binder_error, work);
5353 seq_printf(m, "%stransaction error: %u\n",
5356 case BINDER_WORK_TRANSACTION_COMPLETE:
5357 seq_printf(m, "%stransaction complete\n", prefix);
5359 case BINDER_WORK_NODE:
5360 node = container_of(w, struct binder_node, work);
5361 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5362 prefix, node->debug_id,
5363 (u64)node->ptr, (u64)node->cookie);
5365 case BINDER_WORK_DEAD_BINDER:
5366 seq_printf(m, "%shas dead binder\n", prefix);
5368 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5369 seq_printf(m, "%shas cleared dead binder\n", prefix);
5371 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5372 seq_printf(m, "%shas cleared death notification\n", prefix);
5375 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
5380 static void print_binder_thread_ilocked(struct seq_file *m,
5381 struct binder_thread *thread,
5384 struct binder_transaction *t;
5385 struct binder_work *w;
5386 size_t start_pos = m->count;
5389 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
5390 thread->pid, thread->looper,
5391 thread->looper_need_return,
5392 atomic_read(&thread->tmp_ref));
5393 header_pos = m->count;
5394 t = thread->transaction_stack;
5396 if (t->from == thread) {
5397 print_binder_transaction_ilocked(m, thread->proc,
5398 " outgoing transaction", t);
5400 } else if (t->to_thread == thread) {
5401 print_binder_transaction_ilocked(m, thread->proc,
5402 " incoming transaction", t);
5405 print_binder_transaction_ilocked(m, thread->proc,
5406 " bad transaction", t);
5410 list_for_each_entry(w, &thread->todo, entry) {
5411 print_binder_work_ilocked(m, thread->proc, " ",
5412 " pending transaction", w);
5414 if (!print_always && m->count == header_pos)
5415 m->count = start_pos;
5418 static void print_binder_node_nilocked(struct seq_file *m,
5419 struct binder_node *node)
5421 struct binder_ref *ref;
5422 struct binder_work *w;
5426 hlist_for_each_entry(ref, &node->refs, node_entry)
5429 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
5430 node->debug_id, (u64)node->ptr, (u64)node->cookie,
5431 node->has_strong_ref, node->has_weak_ref,
5432 node->local_strong_refs, node->local_weak_refs,
5433 node->internal_strong_refs, count, node->tmp_refs);
5435 seq_puts(m, " proc");
5436 hlist_for_each_entry(ref, &node->refs, node_entry)
5437 seq_printf(m, " %d", ref->proc->pid);
5441 list_for_each_entry(w, &node->async_todo, entry)
5442 print_binder_work_ilocked(m, node->proc, " ",
5443 " pending async transaction", w);
5447 static void print_binder_ref_olocked(struct seq_file *m,
5448 struct binder_ref *ref)
5450 binder_node_lock(ref->node);
5451 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
5452 ref->data.debug_id, ref->data.desc,
5453 ref->node->proc ? "" : "dead ",
5454 ref->node->debug_id, ref->data.strong,
5455 ref->data.weak, ref->death);
5456 binder_node_unlock(ref->node);
5459 static void print_binder_proc(struct seq_file *m,
5460 struct binder_proc *proc, int print_all)
5462 struct binder_work *w;
5464 size_t start_pos = m->count;
5466 struct binder_node *last_node = NULL;
5468 seq_printf(m, "proc %d\n", proc->pid);
5469 seq_printf(m, "context %s\n", proc->context->name);
5470 header_pos = m->count;
5472 binder_inner_proc_lock(proc);
5473 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5474 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
5475 rb_node), print_all);
5477 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5478 struct binder_node *node = rb_entry(n, struct binder_node,
5480 if (!print_all && !node->has_async_transaction)
5484 * take a temporary reference on the node so it
5485 * survives and isn't removed from the tree
5486 * while we print it.
5488 binder_inc_node_tmpref_ilocked(node);
5489 /* Need to drop inner lock to take node lock */
5490 binder_inner_proc_unlock(proc);
5492 binder_put_node(last_node);
5493 binder_node_inner_lock(node);
5494 print_binder_node_nilocked(m, node);
5495 binder_node_inner_unlock(node);
5497 binder_inner_proc_lock(proc);
5499 binder_inner_proc_unlock(proc);
5501 binder_put_node(last_node);
5504 binder_proc_lock(proc);
5505 for (n = rb_first(&proc->refs_by_desc);
5508 print_binder_ref_olocked(m, rb_entry(n,
5511 binder_proc_unlock(proc);
5513 binder_alloc_print_allocated(m, &proc->alloc);
5514 binder_inner_proc_lock(proc);
5515 list_for_each_entry(w, &proc->todo, entry)
5516 print_binder_work_ilocked(m, proc, " ",
5517 " pending transaction", w);
5518 list_for_each_entry(w, &proc->delivered_death, entry) {
5519 seq_puts(m, " has delivered dead binder\n");
5522 binder_inner_proc_unlock(proc);
5523 if (!print_all && m->count == header_pos)
5524 m->count = start_pos;
5527 static const char * const binder_return_strings[] = {
5532 "BR_ACQUIRE_RESULT",
5534 "BR_TRANSACTION_COMPLETE",
5539 "BR_ATTEMPT_ACQUIRE",
5544 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
5548 static const char * const binder_command_strings[] = {
5551 "BC_ACQUIRE_RESULT",
5559 "BC_ATTEMPT_ACQUIRE",
5560 "BC_REGISTER_LOOPER",
5563 "BC_REQUEST_DEATH_NOTIFICATION",
5564 "BC_CLEAR_DEATH_NOTIFICATION",
5565 "BC_DEAD_BINDER_DONE",
5566 "BC_TRANSACTION_SG",
5570 static const char * const binder_objstat_strings[] = {
5577 "transaction_complete"
5580 static void print_binder_stats(struct seq_file *m, const char *prefix,
5581 struct binder_stats *stats)
5585 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
5586 ARRAY_SIZE(binder_command_strings));
5587 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
5588 int temp = atomic_read(&stats->bc[i]);
5591 seq_printf(m, "%s%s: %d\n", prefix,
5592 binder_command_strings[i], temp);
5595 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
5596 ARRAY_SIZE(binder_return_strings));
5597 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
5598 int temp = atomic_read(&stats->br[i]);
5601 seq_printf(m, "%s%s: %d\n", prefix,
5602 binder_return_strings[i], temp);
5605 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5606 ARRAY_SIZE(binder_objstat_strings));
5607 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5608 ARRAY_SIZE(stats->obj_deleted));
5609 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
5610 int created = atomic_read(&stats->obj_created[i]);
5611 int deleted = atomic_read(&stats->obj_deleted[i]);
5613 if (created || deleted)
5614 seq_printf(m, "%s%s: active %d total %d\n",
5616 binder_objstat_strings[i],
5622 static void print_binder_proc_stats(struct seq_file *m,
5623 struct binder_proc *proc)
5625 struct binder_work *w;
5626 struct binder_thread *thread;
5628 int count, strong, weak, ready_threads;
5629 size_t free_async_space =
5630 binder_alloc_get_free_async_space(&proc->alloc);
5632 seq_printf(m, "proc %d\n", proc->pid);
5633 seq_printf(m, "context %s\n", proc->context->name);
5636 binder_inner_proc_lock(proc);
5637 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5640 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
5643 seq_printf(m, " threads: %d\n", count);
5644 seq_printf(m, " requested threads: %d+%d/%d\n"
5645 " ready threads %d\n"
5646 " free async space %zd\n", proc->requested_threads,
5647 proc->requested_threads_started, proc->max_threads,
5651 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
5653 binder_inner_proc_unlock(proc);
5654 seq_printf(m, " nodes: %d\n", count);
5658 binder_proc_lock(proc);
5659 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
5660 struct binder_ref *ref = rb_entry(n, struct binder_ref,
5663 strong += ref->data.strong;
5664 weak += ref->data.weak;
5666 binder_proc_unlock(proc);
5667 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
5669 count = binder_alloc_get_allocated_count(&proc->alloc);
5670 seq_printf(m, " buffers: %d\n", count);
5672 binder_alloc_print_pages(m, &proc->alloc);
5675 binder_inner_proc_lock(proc);
5676 list_for_each_entry(w, &proc->todo, entry) {
5677 if (w->type == BINDER_WORK_TRANSACTION)
5680 binder_inner_proc_unlock(proc);
5681 seq_printf(m, " pending transactions: %d\n", count);
5683 print_binder_stats(m, " ", &proc->stats);
5687 static int state_show(struct seq_file *m, void *unused)
5689 struct binder_proc *proc;
5690 struct binder_node *node;
5691 struct binder_node *last_node = NULL;
5693 seq_puts(m, "binder state:\n");
5695 spin_lock(&binder_dead_nodes_lock);
5696 if (!hlist_empty(&binder_dead_nodes))
5697 seq_puts(m, "dead nodes:\n");
5698 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
5700 * take a temporary reference on the node so it
5701 * survives and isn't removed from the list
5702 * while we print it.
5705 spin_unlock(&binder_dead_nodes_lock);
5707 binder_put_node(last_node);
5708 binder_node_lock(node);
5709 print_binder_node_nilocked(m, node);
5710 binder_node_unlock(node);
5712 spin_lock(&binder_dead_nodes_lock);
5714 spin_unlock(&binder_dead_nodes_lock);
5716 binder_put_node(last_node);
5718 mutex_lock(&binder_procs_lock);
5719 hlist_for_each_entry(proc, &binder_procs, proc_node)
5720 print_binder_proc(m, proc, 1);
5721 mutex_unlock(&binder_procs_lock);
5726 static int stats_show(struct seq_file *m, void *unused)
5728 struct binder_proc *proc;
5730 seq_puts(m, "binder stats:\n");
5732 print_binder_stats(m, "", &binder_stats);
5734 mutex_lock(&binder_procs_lock);
5735 hlist_for_each_entry(proc, &binder_procs, proc_node)
5736 print_binder_proc_stats(m, proc);
5737 mutex_unlock(&binder_procs_lock);
5742 static int transactions_show(struct seq_file *m, void *unused)
5744 struct binder_proc *proc;
5746 seq_puts(m, "binder transactions:\n");
5747 mutex_lock(&binder_procs_lock);
5748 hlist_for_each_entry(proc, &binder_procs, proc_node)
5749 print_binder_proc(m, proc, 0);
5750 mutex_unlock(&binder_procs_lock);
5755 static int proc_show(struct seq_file *m, void *unused)
5757 struct binder_proc *itr;
5758 int pid = (unsigned long)m->private;
5760 mutex_lock(&binder_procs_lock);
5761 hlist_for_each_entry(itr, &binder_procs, proc_node) {
5762 if (itr->pid == pid) {
5763 seq_puts(m, "binder proc state:\n");
5764 print_binder_proc(m, itr, 1);
5767 mutex_unlock(&binder_procs_lock);
5772 static void print_binder_transaction_log_entry(struct seq_file *m,
5773 struct binder_transaction_log_entry *e)
5775 int debug_id = READ_ONCE(e->debug_id_done);
5777 * read barrier to guarantee debug_id_done read before
5778 * we print the log values
5782 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
5783 e->debug_id, (e->call_type == 2) ? "reply" :
5784 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
5785 e->from_thread, e->to_proc, e->to_thread, e->context_name,
5786 e->to_node, e->target_handle, e->data_size, e->offsets_size,
5787 e->return_error, e->return_error_param,
5788 e->return_error_line);
5790 * read-barrier to guarantee read of debug_id_done after
5791 * done printing the fields of the entry
5794 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
5795 "\n" : " (incomplete)\n");
5798 static int transaction_log_show(struct seq_file *m, void *unused)
5800 struct binder_transaction_log *log = m->private;
5801 unsigned int log_cur = atomic_read(&log->cur);
5806 count = log_cur + 1;
5807 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
5808 0 : count % ARRAY_SIZE(log->entry);
5809 if (count > ARRAY_SIZE(log->entry) || log->full)
5810 count = ARRAY_SIZE(log->entry);
5811 for (i = 0; i < count; i++) {
5812 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
5814 print_binder_transaction_log_entry(m, &log->entry[index]);
5819 static const struct file_operations binder_fops = {
5820 .owner = THIS_MODULE,
5821 .poll = binder_poll,
5822 .unlocked_ioctl = binder_ioctl,
5823 .compat_ioctl = binder_ioctl,
5824 .mmap = binder_mmap,
5825 .open = binder_open,
5826 .flush = binder_flush,
5827 .release = binder_release,
5830 DEFINE_SHOW_ATTRIBUTE(state);
5831 DEFINE_SHOW_ATTRIBUTE(stats);
5832 DEFINE_SHOW_ATTRIBUTE(transactions);
5833 DEFINE_SHOW_ATTRIBUTE(transaction_log);
5835 static int __init init_binder_device(const char *name)
5838 struct binder_device *binder_device;
5840 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
5844 binder_device->miscdev.fops = &binder_fops;
5845 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
5846 binder_device->miscdev.name = name;
5848 binder_device->context.binder_context_mgr_uid = INVALID_UID;
5849 binder_device->context.name = name;
5850 mutex_init(&binder_device->context.context_mgr_node_lock);
5852 ret = misc_register(&binder_device->miscdev);
5854 kfree(binder_device);
5858 hlist_add_head(&binder_device->hlist, &binder_devices);
5863 static int __init binder_init(void)
5866 char *device_name, *device_names, *device_tmp;
5867 struct binder_device *device;
5868 struct hlist_node *tmp;
5870 ret = binder_alloc_shrinker_init();
5874 atomic_set(&binder_transaction_log.cur, ~0U);
5875 atomic_set(&binder_transaction_log_failed.cur, ~0U);
5877 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
5878 if (binder_debugfs_dir_entry_root)
5879 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
5880 binder_debugfs_dir_entry_root);
5882 if (binder_debugfs_dir_entry_root) {
5883 debugfs_create_file("state",
5885 binder_debugfs_dir_entry_root,
5888 debugfs_create_file("stats",
5890 binder_debugfs_dir_entry_root,
5893 debugfs_create_file("transactions",
5895 binder_debugfs_dir_entry_root,
5897 &transactions_fops);
5898 debugfs_create_file("transaction_log",
5900 binder_debugfs_dir_entry_root,
5901 &binder_transaction_log,
5902 &transaction_log_fops);
5903 debugfs_create_file("failed_transaction_log",
5905 binder_debugfs_dir_entry_root,
5906 &binder_transaction_log_failed,
5907 &transaction_log_fops);
5911 * Copy the module_parameter string, because we don't want to
5912 * tokenize it in-place.
5914 device_names = kstrdup(binder_devices_param, GFP_KERNEL);
5915 if (!device_names) {
5917 goto err_alloc_device_names_failed;
5920 device_tmp = device_names;
5921 while ((device_name = strsep(&device_tmp, ","))) {
5922 ret = init_binder_device(device_name);
5924 goto err_init_binder_device_failed;
5929 err_init_binder_device_failed:
5930 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
5931 misc_deregister(&device->miscdev);
5932 hlist_del(&device->hlist);
5936 kfree(device_names);
5938 err_alloc_device_names_failed:
5939 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
5944 device_initcall(binder_init);
5946 #define CREATE_TRACE_POINTS
5947 #include "binder_trace.h"
5949 MODULE_LICENSE("GPL v2");