1 // SPDX-License-Identifier: GPL-2.0-only
4 * Android IPC Subsystem
6 * Copyright (C) 2007-2008 Google, Inc.
12 * There are 3 main spinlocks which must be acquired in the
15 * 1) proc->outer_lock : protects binder_ref
16 * binder_proc_lock() and binder_proc_unlock() are
18 * 2) node->lock : protects most fields of binder_node.
19 * binder_node_lock() and binder_node_unlock() are
21 * 3) proc->inner_lock : protects the thread and node lists
22 * (proc->threads, proc->waiting_threads, proc->nodes)
23 * and all todo lists associated with the binder_proc
24 * (proc->todo, thread->todo, proc->delivered_death and
25 * node->async_todo), as well as thread->transaction_stack
26 * binder_inner_proc_lock() and binder_inner_proc_unlock()
29 * Any lock under procA must never be nested under any lock at the same
30 * level or below on procB.
32 * Functions that require a lock held on entry indicate which lock
33 * in the suffix of the function name:
35 * foo_olocked() : requires node->outer_lock
36 * foo_nlocked() : requires node->lock
37 * foo_ilocked() : requires proc->inner_lock
38 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
39 * foo_nilocked(): requires node->lock and proc->inner_lock
43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
45 #include <linux/fdtable.h>
46 #include <linux/file.h>
47 #include <linux/freezer.h>
49 #include <linux/list.h>
50 #include <linux/miscdevice.h>
51 #include <linux/module.h>
52 #include <linux/mutex.h>
53 #include <linux/nsproxy.h>
54 #include <linux/poll.h>
55 #include <linux/debugfs.h>
56 #include <linux/rbtree.h>
57 #include <linux/sched/signal.h>
58 #include <linux/sched/mm.h>
59 #include <linux/seq_file.h>
60 #include <linux/string.h>
61 #include <linux/uaccess.h>
62 #include <linux/pid_namespace.h>
63 #include <linux/security.h>
64 #include <linux/spinlock.h>
65 #include <linux/ratelimit.h>
66 #include <linux/syscalls.h>
67 #include <linux/task_work.h>
68 #include <linux/sizes.h>
70 #include <uapi/linux/android/binder.h>
72 #include <linux/cacheflush.h>
74 #include "binder_internal.h"
75 #include "binder_trace.h"
77 static HLIST_HEAD(binder_deferred_list);
78 static DEFINE_MUTEX(binder_deferred_lock);
80 static HLIST_HEAD(binder_devices);
81 static HLIST_HEAD(binder_procs);
82 static DEFINE_MUTEX(binder_procs_lock);
84 static HLIST_HEAD(binder_dead_nodes);
85 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
87 static struct dentry *binder_debugfs_dir_entry_root;
88 static struct dentry *binder_debugfs_dir_entry_proc;
89 static atomic_t binder_last_id;
91 static int proc_show(struct seq_file *m, void *unused);
92 DEFINE_SHOW_ATTRIBUTE(proc);
94 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
97 BINDER_DEBUG_USER_ERROR = 1U << 0,
98 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
99 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
100 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
101 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
102 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
103 BINDER_DEBUG_READ_WRITE = 1U << 6,
104 BINDER_DEBUG_USER_REFS = 1U << 7,
105 BINDER_DEBUG_THREADS = 1U << 8,
106 BINDER_DEBUG_TRANSACTION = 1U << 9,
107 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
108 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
109 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
110 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
111 BINDER_DEBUG_SPINLOCKS = 1U << 14,
113 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
114 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
115 module_param_named(debug_mask, binder_debug_mask, uint, 0644);
117 char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
118 module_param_named(devices, binder_devices_param, charp, 0444);
120 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
121 static int binder_stop_on_user_error;
123 static int binder_set_stop_on_user_error(const char *val,
124 const struct kernel_param *kp)
128 ret = param_set_int(val, kp);
129 if (binder_stop_on_user_error < 2)
130 wake_up(&binder_user_error_wait);
133 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
134 param_get_int, &binder_stop_on_user_error, 0644);
136 static __printf(2, 3) void binder_debug(int mask, const char *format, ...)
138 struct va_format vaf;
141 if (binder_debug_mask & mask) {
142 va_start(args, format);
145 pr_info_ratelimited("%pV", &vaf);
150 #define binder_txn_error(x...) \
151 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, x)
153 static __printf(1, 2) void binder_user_error(const char *format, ...)
155 struct va_format vaf;
158 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) {
159 va_start(args, format);
162 pr_info_ratelimited("%pV", &vaf);
166 if (binder_stop_on_user_error)
167 binder_stop_on_user_error = 2;
170 #define binder_set_extended_error(ee, _id, _command, _param) \
173 (ee)->command = _command; \
174 (ee)->param = _param; \
177 #define to_flat_binder_object(hdr) \
178 container_of(hdr, struct flat_binder_object, hdr)
180 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
182 #define to_binder_buffer_object(hdr) \
183 container_of(hdr, struct binder_buffer_object, hdr)
185 #define to_binder_fd_array_object(hdr) \
186 container_of(hdr, struct binder_fd_array_object, hdr)
188 static struct binder_stats binder_stats;
190 static inline void binder_stats_deleted(enum binder_stat_types type)
192 atomic_inc(&binder_stats.obj_deleted[type]);
195 static inline void binder_stats_created(enum binder_stat_types type)
197 atomic_inc(&binder_stats.obj_created[type]);
200 struct binder_transaction_log_entry {
212 int return_error_line;
213 uint32_t return_error;
214 uint32_t return_error_param;
215 char context_name[BINDERFS_MAX_NAME + 1];
218 struct binder_transaction_log {
221 struct binder_transaction_log_entry entry[32];
224 static struct binder_transaction_log binder_transaction_log;
225 static struct binder_transaction_log binder_transaction_log_failed;
227 static struct binder_transaction_log_entry *binder_transaction_log_add(
228 struct binder_transaction_log *log)
230 struct binder_transaction_log_entry *e;
231 unsigned int cur = atomic_inc_return(&log->cur);
233 if (cur >= ARRAY_SIZE(log->entry))
235 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
236 WRITE_ONCE(e->debug_id_done, 0);
238 * write-barrier to synchronize access to e->debug_id_done.
239 * We make sure the initialized 0 value is seen before
240 * memset() other fields are zeroed by memset.
243 memset(e, 0, sizeof(*e));
247 enum binder_deferred_state {
248 BINDER_DEFERRED_FLUSH = 0x01,
249 BINDER_DEFERRED_RELEASE = 0x02,
253 BINDER_LOOPER_STATE_REGISTERED = 0x01,
254 BINDER_LOOPER_STATE_ENTERED = 0x02,
255 BINDER_LOOPER_STATE_EXITED = 0x04,
256 BINDER_LOOPER_STATE_INVALID = 0x08,
257 BINDER_LOOPER_STATE_WAITING = 0x10,
258 BINDER_LOOPER_STATE_POLL = 0x20,
262 * binder_proc_lock() - Acquire outer lock for given binder_proc
263 * @proc: struct binder_proc to acquire
265 * Acquires proc->outer_lock. Used to protect binder_ref
266 * structures associated with the given proc.
268 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
270 _binder_proc_lock(struct binder_proc *proc, int line)
271 __acquires(&proc->outer_lock)
273 binder_debug(BINDER_DEBUG_SPINLOCKS,
274 "%s: line=%d\n", __func__, line);
275 spin_lock(&proc->outer_lock);
279 * binder_proc_unlock() - Release spinlock for given binder_proc
280 * @proc: struct binder_proc to acquire
282 * Release lock acquired via binder_proc_lock()
284 #define binder_proc_unlock(proc) _binder_proc_unlock(proc, __LINE__)
286 _binder_proc_unlock(struct binder_proc *proc, int line)
287 __releases(&proc->outer_lock)
289 binder_debug(BINDER_DEBUG_SPINLOCKS,
290 "%s: line=%d\n", __func__, line);
291 spin_unlock(&proc->outer_lock);
295 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
296 * @proc: struct binder_proc to acquire
298 * Acquires proc->inner_lock. Used to protect todo lists
300 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
302 _binder_inner_proc_lock(struct binder_proc *proc, int line)
303 __acquires(&proc->inner_lock)
305 binder_debug(BINDER_DEBUG_SPINLOCKS,
306 "%s: line=%d\n", __func__, line);
307 spin_lock(&proc->inner_lock);
311 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
312 * @proc: struct binder_proc to acquire
314 * Release lock acquired via binder_inner_proc_lock()
316 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
318 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
319 __releases(&proc->inner_lock)
321 binder_debug(BINDER_DEBUG_SPINLOCKS,
322 "%s: line=%d\n", __func__, line);
323 spin_unlock(&proc->inner_lock);
327 * binder_node_lock() - Acquire spinlock for given binder_node
328 * @node: struct binder_node to acquire
330 * Acquires node->lock. Used to protect binder_node fields
332 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
334 _binder_node_lock(struct binder_node *node, int line)
335 __acquires(&node->lock)
337 binder_debug(BINDER_DEBUG_SPINLOCKS,
338 "%s: line=%d\n", __func__, line);
339 spin_lock(&node->lock);
343 * binder_node_unlock() - Release spinlock for given binder_proc
344 * @node: struct binder_node to acquire
346 * Release lock acquired via binder_node_lock()
348 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
350 _binder_node_unlock(struct binder_node *node, int line)
351 __releases(&node->lock)
353 binder_debug(BINDER_DEBUG_SPINLOCKS,
354 "%s: line=%d\n", __func__, line);
355 spin_unlock(&node->lock);
359 * binder_node_inner_lock() - Acquire node and inner locks
360 * @node: struct binder_node to acquire
362 * Acquires node->lock. If node->proc also acquires
363 * proc->inner_lock. Used to protect binder_node fields
365 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
367 _binder_node_inner_lock(struct binder_node *node, int line)
368 __acquires(&node->lock) __acquires(&node->proc->inner_lock)
370 binder_debug(BINDER_DEBUG_SPINLOCKS,
371 "%s: line=%d\n", __func__, line);
372 spin_lock(&node->lock);
374 binder_inner_proc_lock(node->proc);
376 /* annotation for sparse */
377 __acquire(&node->proc->inner_lock);
381 * binder_node_inner_unlock() - Release node and inner locks
382 * @node: struct binder_node to acquire
384 * Release lock acquired via binder_node_lock()
386 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
388 _binder_node_inner_unlock(struct binder_node *node, int line)
389 __releases(&node->lock) __releases(&node->proc->inner_lock)
391 struct binder_proc *proc = node->proc;
393 binder_debug(BINDER_DEBUG_SPINLOCKS,
394 "%s: line=%d\n", __func__, line);
396 binder_inner_proc_unlock(proc);
398 /* annotation for sparse */
399 __release(&node->proc->inner_lock);
400 spin_unlock(&node->lock);
403 static bool binder_worklist_empty_ilocked(struct list_head *list)
405 return list_empty(list);
409 * binder_worklist_empty() - Check if no items on the work list
410 * @proc: binder_proc associated with list
411 * @list: list to check
413 * Return: true if there are no items on list, else false
415 static bool binder_worklist_empty(struct binder_proc *proc,
416 struct list_head *list)
420 binder_inner_proc_lock(proc);
421 ret = binder_worklist_empty_ilocked(list);
422 binder_inner_proc_unlock(proc);
427 * binder_enqueue_work_ilocked() - Add an item to the work list
428 * @work: struct binder_work to add to list
429 * @target_list: list to add work to
431 * Adds the work to the specified list. Asserts that work
432 * is not already on a list.
434 * Requires the proc->inner_lock to be held.
437 binder_enqueue_work_ilocked(struct binder_work *work,
438 struct list_head *target_list)
440 BUG_ON(target_list == NULL);
441 BUG_ON(work->entry.next && !list_empty(&work->entry));
442 list_add_tail(&work->entry, target_list);
446 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
447 * @thread: thread to queue work to
448 * @work: struct binder_work to add to list
450 * Adds the work to the todo list of the thread. Doesn't set the process_todo
451 * flag, which means that (if it wasn't already set) the thread will go to
452 * sleep without handling this work when it calls read.
454 * Requires the proc->inner_lock to be held.
457 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
458 struct binder_work *work)
460 WARN_ON(!list_empty(&thread->waiting_thread_node));
461 binder_enqueue_work_ilocked(work, &thread->todo);
465 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
466 * @thread: thread to queue work to
467 * @work: struct binder_work to add to list
469 * Adds the work to the todo list of the thread, and enables processing
472 * Requires the proc->inner_lock to be held.
475 binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
476 struct binder_work *work)
478 WARN_ON(!list_empty(&thread->waiting_thread_node));
479 binder_enqueue_work_ilocked(work, &thread->todo);
480 thread->process_todo = true;
484 * binder_enqueue_thread_work() - Add an item to the thread work list
485 * @thread: thread to queue work to
486 * @work: struct binder_work to add to list
488 * Adds the work to the todo list of the thread, and enables processing
492 binder_enqueue_thread_work(struct binder_thread *thread,
493 struct binder_work *work)
495 binder_inner_proc_lock(thread->proc);
496 binder_enqueue_thread_work_ilocked(thread, work);
497 binder_inner_proc_unlock(thread->proc);
501 binder_dequeue_work_ilocked(struct binder_work *work)
503 list_del_init(&work->entry);
507 * binder_dequeue_work() - Removes an item from the work list
508 * @proc: binder_proc associated with list
509 * @work: struct binder_work to remove from list
511 * Removes the specified work item from whatever list it is on.
512 * Can safely be called if work is not on any list.
515 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
517 binder_inner_proc_lock(proc);
518 binder_dequeue_work_ilocked(work);
519 binder_inner_proc_unlock(proc);
522 static struct binder_work *binder_dequeue_work_head_ilocked(
523 struct list_head *list)
525 struct binder_work *w;
527 w = list_first_entry_or_null(list, struct binder_work, entry);
529 list_del_init(&w->entry);
534 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
535 static void binder_free_thread(struct binder_thread *thread);
536 static void binder_free_proc(struct binder_proc *proc);
537 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
539 static bool binder_has_work_ilocked(struct binder_thread *thread,
542 return thread->process_todo ||
543 thread->looper_need_return ||
545 !binder_worklist_empty_ilocked(&thread->proc->todo));
548 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
552 binder_inner_proc_lock(thread->proc);
553 has_work = binder_has_work_ilocked(thread, do_proc_work);
554 binder_inner_proc_unlock(thread->proc);
559 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
561 return !thread->transaction_stack &&
562 binder_worklist_empty_ilocked(&thread->todo) &&
563 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
564 BINDER_LOOPER_STATE_REGISTERED));
567 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
571 struct binder_thread *thread;
573 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
574 thread = rb_entry(n, struct binder_thread, rb_node);
575 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
576 binder_available_for_proc_work_ilocked(thread)) {
578 wake_up_interruptible_sync(&thread->wait);
580 wake_up_interruptible(&thread->wait);
586 * binder_select_thread_ilocked() - selects a thread for doing proc work.
587 * @proc: process to select a thread from
589 * Note that calling this function moves the thread off the waiting_threads
590 * list, so it can only be woken up by the caller of this function, or a
591 * signal. Therefore, callers *should* always wake up the thread this function
594 * Return: If there's a thread currently waiting for process work,
595 * returns that thread. Otherwise returns NULL.
597 static struct binder_thread *
598 binder_select_thread_ilocked(struct binder_proc *proc)
600 struct binder_thread *thread;
602 assert_spin_locked(&proc->inner_lock);
603 thread = list_first_entry_or_null(&proc->waiting_threads,
604 struct binder_thread,
605 waiting_thread_node);
608 list_del_init(&thread->waiting_thread_node);
614 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
615 * @proc: process to wake up a thread in
616 * @thread: specific thread to wake-up (may be NULL)
617 * @sync: whether to do a synchronous wake-up
619 * This function wakes up a thread in the @proc process.
620 * The caller may provide a specific thread to wake-up in
621 * the @thread parameter. If @thread is NULL, this function
622 * will wake up threads that have called poll().
624 * Note that for this function to work as expected, callers
625 * should first call binder_select_thread() to find a thread
626 * to handle the work (if they don't have a thread already),
627 * and pass the result into the @thread parameter.
629 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
630 struct binder_thread *thread,
633 assert_spin_locked(&proc->inner_lock);
637 wake_up_interruptible_sync(&thread->wait);
639 wake_up_interruptible(&thread->wait);
643 /* Didn't find a thread waiting for proc work; this can happen
645 * 1. All threads are busy handling transactions
646 * In that case, one of those threads should call back into
647 * the kernel driver soon and pick up this work.
648 * 2. Threads are using the (e)poll interface, in which case
649 * they may be blocked on the waitqueue without having been
650 * added to waiting_threads. For this case, we just iterate
651 * over all threads not handling transaction work, and
652 * wake them all up. We wake all because we don't know whether
653 * a thread that called into (e)poll is handling non-binder
656 binder_wakeup_poll_threads_ilocked(proc, sync);
659 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
661 struct binder_thread *thread = binder_select_thread_ilocked(proc);
663 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
666 static void binder_set_nice(long nice)
670 if (can_nice(current, nice)) {
671 set_user_nice(current, nice);
674 min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
675 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
676 "%d: nice value %ld not allowed use %ld instead\n",
677 current->pid, nice, min_nice);
678 set_user_nice(current, min_nice);
679 if (min_nice <= MAX_NICE)
681 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
684 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
685 binder_uintptr_t ptr)
687 struct rb_node *n = proc->nodes.rb_node;
688 struct binder_node *node;
690 assert_spin_locked(&proc->inner_lock);
693 node = rb_entry(n, struct binder_node, rb_node);
697 else if (ptr > node->ptr)
701 * take an implicit weak reference
702 * to ensure node stays alive until
703 * call to binder_put_node()
705 binder_inc_node_tmpref_ilocked(node);
712 static struct binder_node *binder_get_node(struct binder_proc *proc,
713 binder_uintptr_t ptr)
715 struct binder_node *node;
717 binder_inner_proc_lock(proc);
718 node = binder_get_node_ilocked(proc, ptr);
719 binder_inner_proc_unlock(proc);
723 static struct binder_node *binder_init_node_ilocked(
724 struct binder_proc *proc,
725 struct binder_node *new_node,
726 struct flat_binder_object *fp)
728 struct rb_node **p = &proc->nodes.rb_node;
729 struct rb_node *parent = NULL;
730 struct binder_node *node;
731 binder_uintptr_t ptr = fp ? fp->binder : 0;
732 binder_uintptr_t cookie = fp ? fp->cookie : 0;
733 __u32 flags = fp ? fp->flags : 0;
735 assert_spin_locked(&proc->inner_lock);
740 node = rb_entry(parent, struct binder_node, rb_node);
744 else if (ptr > node->ptr)
748 * A matching node is already in
749 * the rb tree. Abandon the init
752 binder_inc_node_tmpref_ilocked(node);
757 binder_stats_created(BINDER_STAT_NODE);
759 rb_link_node(&node->rb_node, parent, p);
760 rb_insert_color(&node->rb_node, &proc->nodes);
761 node->debug_id = atomic_inc_return(&binder_last_id);
764 node->cookie = cookie;
765 node->work.type = BINDER_WORK_NODE;
766 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
767 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
768 node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
769 spin_lock_init(&node->lock);
770 INIT_LIST_HEAD(&node->work.entry);
771 INIT_LIST_HEAD(&node->async_todo);
772 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
773 "%d:%d node %d u%016llx c%016llx created\n",
774 proc->pid, current->pid, node->debug_id,
775 (u64)node->ptr, (u64)node->cookie);
780 static struct binder_node *binder_new_node(struct binder_proc *proc,
781 struct flat_binder_object *fp)
783 struct binder_node *node;
784 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
788 binder_inner_proc_lock(proc);
789 node = binder_init_node_ilocked(proc, new_node, fp);
790 binder_inner_proc_unlock(proc);
791 if (node != new_node)
793 * The node was already added by another thread
800 static void binder_free_node(struct binder_node *node)
803 binder_stats_deleted(BINDER_STAT_NODE);
806 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
808 struct list_head *target_list)
810 struct binder_proc *proc = node->proc;
812 assert_spin_locked(&node->lock);
814 assert_spin_locked(&proc->inner_lock);
817 if (target_list == NULL &&
818 node->internal_strong_refs == 0 &&
820 node == node->proc->context->binder_context_mgr_node &&
821 node->has_strong_ref)) {
822 pr_err("invalid inc strong node for %d\n",
826 node->internal_strong_refs++;
828 node->local_strong_refs++;
829 if (!node->has_strong_ref && target_list) {
830 struct binder_thread *thread = container_of(target_list,
831 struct binder_thread, todo);
832 binder_dequeue_work_ilocked(&node->work);
833 BUG_ON(&thread->todo != target_list);
834 binder_enqueue_deferred_thread_work_ilocked(thread,
839 node->local_weak_refs++;
840 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
841 if (target_list == NULL) {
842 pr_err("invalid inc weak node for %d\n",
849 binder_enqueue_work_ilocked(&node->work, target_list);
855 static int binder_inc_node(struct binder_node *node, int strong, int internal,
856 struct list_head *target_list)
860 binder_node_inner_lock(node);
861 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
862 binder_node_inner_unlock(node);
867 static bool binder_dec_node_nilocked(struct binder_node *node,
868 int strong, int internal)
870 struct binder_proc *proc = node->proc;
872 assert_spin_locked(&node->lock);
874 assert_spin_locked(&proc->inner_lock);
877 node->internal_strong_refs--;
879 node->local_strong_refs--;
880 if (node->local_strong_refs || node->internal_strong_refs)
884 node->local_weak_refs--;
885 if (node->local_weak_refs || node->tmp_refs ||
886 !hlist_empty(&node->refs))
890 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
891 if (list_empty(&node->work.entry)) {
892 binder_enqueue_work_ilocked(&node->work, &proc->todo);
893 binder_wakeup_proc_ilocked(proc);
896 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
897 !node->local_weak_refs && !node->tmp_refs) {
899 binder_dequeue_work_ilocked(&node->work);
900 rb_erase(&node->rb_node, &proc->nodes);
901 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
902 "refless node %d deleted\n",
905 BUG_ON(!list_empty(&node->work.entry));
906 spin_lock(&binder_dead_nodes_lock);
908 * tmp_refs could have changed so
911 if (node->tmp_refs) {
912 spin_unlock(&binder_dead_nodes_lock);
915 hlist_del(&node->dead_node);
916 spin_unlock(&binder_dead_nodes_lock);
917 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
918 "dead node %d deleted\n",
927 static void binder_dec_node(struct binder_node *node, int strong, int internal)
931 binder_node_inner_lock(node);
932 free_node = binder_dec_node_nilocked(node, strong, internal);
933 binder_node_inner_unlock(node);
935 binder_free_node(node);
938 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
941 * No call to binder_inc_node() is needed since we
942 * don't need to inform userspace of any changes to
949 * binder_inc_node_tmpref() - take a temporary reference on node
950 * @node: node to reference
952 * Take reference on node to prevent the node from being freed
953 * while referenced only by a local variable. The inner lock is
954 * needed to serialize with the node work on the queue (which
955 * isn't needed after the node is dead). If the node is dead
956 * (node->proc is NULL), use binder_dead_nodes_lock to protect
957 * node->tmp_refs against dead-node-only cases where the node
958 * lock cannot be acquired (eg traversing the dead node list to
961 static void binder_inc_node_tmpref(struct binder_node *node)
963 binder_node_lock(node);
965 binder_inner_proc_lock(node->proc);
967 spin_lock(&binder_dead_nodes_lock);
968 binder_inc_node_tmpref_ilocked(node);
970 binder_inner_proc_unlock(node->proc);
972 spin_unlock(&binder_dead_nodes_lock);
973 binder_node_unlock(node);
977 * binder_dec_node_tmpref() - remove a temporary reference on node
978 * @node: node to reference
980 * Release temporary reference on node taken via binder_inc_node_tmpref()
982 static void binder_dec_node_tmpref(struct binder_node *node)
986 binder_node_inner_lock(node);
988 spin_lock(&binder_dead_nodes_lock);
990 __acquire(&binder_dead_nodes_lock);
992 BUG_ON(node->tmp_refs < 0);
994 spin_unlock(&binder_dead_nodes_lock);
996 __release(&binder_dead_nodes_lock);
998 * Call binder_dec_node() to check if all refcounts are 0
999 * and cleanup is needed. Calling with strong=0 and internal=1
1000 * causes no actual reference to be released in binder_dec_node().
1001 * If that changes, a change is needed here too.
1003 free_node = binder_dec_node_nilocked(node, 0, 1);
1004 binder_node_inner_unlock(node);
1006 binder_free_node(node);
1009 static void binder_put_node(struct binder_node *node)
1011 binder_dec_node_tmpref(node);
1014 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1015 u32 desc, bool need_strong_ref)
1017 struct rb_node *n = proc->refs_by_desc.rb_node;
1018 struct binder_ref *ref;
1021 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1023 if (desc < ref->data.desc) {
1025 } else if (desc > ref->data.desc) {
1027 } else if (need_strong_ref && !ref->data.strong) {
1028 binder_user_error("tried to use weak ref as strong ref\n");
1038 * binder_get_ref_for_node_olocked() - get the ref associated with given node
1039 * @proc: binder_proc that owns the ref
1040 * @node: binder_node of target
1041 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1043 * Look up the ref for the given node and return it if it exists
1045 * If it doesn't exist and the caller provides a newly allocated
1046 * ref, initialize the fields of the newly allocated ref and insert
1047 * into the given proc rb_trees and node refs list.
1049 * Return: the ref for node. It is possible that another thread
1050 * allocated/initialized the ref first in which case the
1051 * returned ref would be different than the passed-in
1052 * new_ref. new_ref must be kfree'd by the caller in
1055 static struct binder_ref *binder_get_ref_for_node_olocked(
1056 struct binder_proc *proc,
1057 struct binder_node *node,
1058 struct binder_ref *new_ref)
1060 struct binder_context *context = proc->context;
1061 struct rb_node **p = &proc->refs_by_node.rb_node;
1062 struct rb_node *parent = NULL;
1063 struct binder_ref *ref;
1068 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1070 if (node < ref->node)
1072 else if (node > ref->node)
1073 p = &(*p)->rb_right;
1080 binder_stats_created(BINDER_STAT_REF);
1081 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1082 new_ref->proc = proc;
1083 new_ref->node = node;
1084 rb_link_node(&new_ref->rb_node_node, parent, p);
1085 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1087 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1088 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1089 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1090 if (ref->data.desc > new_ref->data.desc)
1092 new_ref->data.desc = ref->data.desc + 1;
1095 p = &proc->refs_by_desc.rb_node;
1098 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1100 if (new_ref->data.desc < ref->data.desc)
1102 else if (new_ref->data.desc > ref->data.desc)
1103 p = &(*p)->rb_right;
1107 rb_link_node(&new_ref->rb_node_desc, parent, p);
1108 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1110 binder_node_lock(node);
1111 hlist_add_head(&new_ref->node_entry, &node->refs);
1113 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1114 "%d new ref %d desc %d for node %d\n",
1115 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1117 binder_node_unlock(node);
1121 static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1123 bool delete_node = false;
1125 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1126 "%d delete ref %d desc %d for node %d\n",
1127 ref->proc->pid, ref->data.debug_id, ref->data.desc,
1128 ref->node->debug_id);
1130 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1131 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1133 binder_node_inner_lock(ref->node);
1134 if (ref->data.strong)
1135 binder_dec_node_nilocked(ref->node, 1, 1);
1137 hlist_del(&ref->node_entry);
1138 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1139 binder_node_inner_unlock(ref->node);
1141 * Clear ref->node unless we want the caller to free the node
1145 * The caller uses ref->node to determine
1146 * whether the node needs to be freed. Clear
1147 * it since the node is still alive.
1153 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1154 "%d delete ref %d desc %d has death notification\n",
1155 ref->proc->pid, ref->data.debug_id,
1157 binder_dequeue_work(ref->proc, &ref->death->work);
1158 binder_stats_deleted(BINDER_STAT_DEATH);
1160 binder_stats_deleted(BINDER_STAT_REF);
1164 * binder_inc_ref_olocked() - increment the ref for given handle
1165 * @ref: ref to be incremented
1166 * @strong: if true, strong increment, else weak
1167 * @target_list: list to queue node work on
1169 * Increment the ref. @ref->proc->outer_lock must be held on entry
1171 * Return: 0, if successful, else errno
1173 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1174 struct list_head *target_list)
1179 if (ref->data.strong == 0) {
1180 ret = binder_inc_node(ref->node, 1, 1, target_list);
1186 if (ref->data.weak == 0) {
1187 ret = binder_inc_node(ref->node, 0, 1, target_list);
1197 * binder_dec_ref_olocked() - dec the ref for given handle
1198 * @ref: ref to be decremented
1199 * @strong: if true, strong decrement, else weak
1201 * Decrement the ref.
1203 * Return: %true if ref is cleaned up and ready to be freed.
1205 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1208 if (ref->data.strong == 0) {
1209 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1210 ref->proc->pid, ref->data.debug_id,
1211 ref->data.desc, ref->data.strong,
1216 if (ref->data.strong == 0)
1217 binder_dec_node(ref->node, strong, 1);
1219 if (ref->data.weak == 0) {
1220 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1221 ref->proc->pid, ref->data.debug_id,
1222 ref->data.desc, ref->data.strong,
1228 if (ref->data.strong == 0 && ref->data.weak == 0) {
1229 binder_cleanup_ref_olocked(ref);
1236 * binder_get_node_from_ref() - get the node from the given proc/desc
1237 * @proc: proc containing the ref
1238 * @desc: the handle associated with the ref
1239 * @need_strong_ref: if true, only return node if ref is strong
1240 * @rdata: the id/refcount data for the ref
1242 * Given a proc and ref handle, return the associated binder_node
1244 * Return: a binder_node or NULL if not found or not strong when strong required
1246 static struct binder_node *binder_get_node_from_ref(
1247 struct binder_proc *proc,
1248 u32 desc, bool need_strong_ref,
1249 struct binder_ref_data *rdata)
1251 struct binder_node *node;
1252 struct binder_ref *ref;
1254 binder_proc_lock(proc);
1255 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1260 * Take an implicit reference on the node to ensure
1261 * it stays alive until the call to binder_put_node()
1263 binder_inc_node_tmpref(node);
1266 binder_proc_unlock(proc);
1271 binder_proc_unlock(proc);
1276 * binder_free_ref() - free the binder_ref
1279 * Free the binder_ref. Free the binder_node indicated by ref->node
1280 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1282 static void binder_free_ref(struct binder_ref *ref)
1285 binder_free_node(ref->node);
1291 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1292 * @proc: proc containing the ref
1293 * @desc: the handle associated with the ref
1294 * @increment: true=inc reference, false=dec reference
1295 * @strong: true=strong reference, false=weak reference
1296 * @rdata: the id/refcount data for the ref
1298 * Given a proc and ref handle, increment or decrement the ref
1299 * according to "increment" arg.
1301 * Return: 0 if successful, else errno
1303 static int binder_update_ref_for_handle(struct binder_proc *proc,
1304 uint32_t desc, bool increment, bool strong,
1305 struct binder_ref_data *rdata)
1308 struct binder_ref *ref;
1309 bool delete_ref = false;
1311 binder_proc_lock(proc);
1312 ref = binder_get_ref_olocked(proc, desc, strong);
1318 ret = binder_inc_ref_olocked(ref, strong, NULL);
1320 delete_ref = binder_dec_ref_olocked(ref, strong);
1324 binder_proc_unlock(proc);
1327 binder_free_ref(ref);
1331 binder_proc_unlock(proc);
1336 * binder_dec_ref_for_handle() - dec the ref for given handle
1337 * @proc: proc containing the ref
1338 * @desc: the handle associated with the ref
1339 * @strong: true=strong reference, false=weak reference
1340 * @rdata: the id/refcount data for the ref
1342 * Just calls binder_update_ref_for_handle() to decrement the ref.
1344 * Return: 0 if successful, else errno
1346 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1347 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1349 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1354 * binder_inc_ref_for_node() - increment the ref for given proc/node
1355 * @proc: proc containing the ref
1356 * @node: target node
1357 * @strong: true=strong reference, false=weak reference
1358 * @target_list: worklist to use if node is incremented
1359 * @rdata: the id/refcount data for the ref
1361 * Given a proc and node, increment the ref. Create the ref if it
1362 * doesn't already exist
1364 * Return: 0 if successful, else errno
1366 static int binder_inc_ref_for_node(struct binder_proc *proc,
1367 struct binder_node *node,
1369 struct list_head *target_list,
1370 struct binder_ref_data *rdata)
1372 struct binder_ref *ref;
1373 struct binder_ref *new_ref = NULL;
1376 binder_proc_lock(proc);
1377 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1379 binder_proc_unlock(proc);
1380 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1383 binder_proc_lock(proc);
1384 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1386 ret = binder_inc_ref_olocked(ref, strong, target_list);
1388 if (ret && ref == new_ref) {
1390 * Cleanup the failed reference here as the target
1391 * could now be dead and have already released its
1392 * references by now. Calling on the new reference
1393 * with strong=0 and a tmp_refs will not decrement
1394 * the node. The new_ref gets kfree'd below.
1396 binder_cleanup_ref_olocked(new_ref);
1400 binder_proc_unlock(proc);
1401 if (new_ref && ref != new_ref)
1403 * Another thread created the ref first so
1404 * free the one we allocated
1410 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1411 struct binder_transaction *t)
1413 BUG_ON(!target_thread);
1414 assert_spin_locked(&target_thread->proc->inner_lock);
1415 BUG_ON(target_thread->transaction_stack != t);
1416 BUG_ON(target_thread->transaction_stack->from != target_thread);
1417 target_thread->transaction_stack =
1418 target_thread->transaction_stack->from_parent;
1423 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1424 * @thread: thread to decrement
1426 * A thread needs to be kept alive while being used to create or
1427 * handle a transaction. binder_get_txn_from() is used to safely
1428 * extract t->from from a binder_transaction and keep the thread
1429 * indicated by t->from from being freed. When done with that
1430 * binder_thread, this function is called to decrement the
1431 * tmp_ref and free if appropriate (thread has been released
1432 * and no transaction being processed by the driver)
1434 static void binder_thread_dec_tmpref(struct binder_thread *thread)
1437 * atomic is used to protect the counter value while
1438 * it cannot reach zero or thread->is_dead is false
1440 binder_inner_proc_lock(thread->proc);
1441 atomic_dec(&thread->tmp_ref);
1442 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1443 binder_inner_proc_unlock(thread->proc);
1444 binder_free_thread(thread);
1447 binder_inner_proc_unlock(thread->proc);
1451 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1452 * @proc: proc to decrement
1454 * A binder_proc needs to be kept alive while being used to create or
1455 * handle a transaction. proc->tmp_ref is incremented when
1456 * creating a new transaction or the binder_proc is currently in-use
1457 * by threads that are being released. When done with the binder_proc,
1458 * this function is called to decrement the counter and free the
1459 * proc if appropriate (proc has been released, all threads have
1460 * been released and not currenly in-use to process a transaction).
1462 static void binder_proc_dec_tmpref(struct binder_proc *proc)
1464 binder_inner_proc_lock(proc);
1466 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1468 binder_inner_proc_unlock(proc);
1469 binder_free_proc(proc);
1472 binder_inner_proc_unlock(proc);
1476 * binder_get_txn_from() - safely extract the "from" thread in transaction
1477 * @t: binder transaction for t->from
1479 * Atomically return the "from" thread and increment the tmp_ref
1480 * count for the thread to ensure it stays alive until
1481 * binder_thread_dec_tmpref() is called.
1483 * Return: the value of t->from
1485 static struct binder_thread *binder_get_txn_from(
1486 struct binder_transaction *t)
1488 struct binder_thread *from;
1490 spin_lock(&t->lock);
1493 atomic_inc(&from->tmp_ref);
1494 spin_unlock(&t->lock);
1499 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1500 * @t: binder transaction for t->from
1502 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1503 * to guarantee that the thread cannot be released while operating on it.
1504 * The caller must call binder_inner_proc_unlock() to release the inner lock
1505 * as well as call binder_dec_thread_txn() to release the reference.
1507 * Return: the value of t->from
1509 static struct binder_thread *binder_get_txn_from_and_acq_inner(
1510 struct binder_transaction *t)
1511 __acquires(&t->from->proc->inner_lock)
1513 struct binder_thread *from;
1515 from = binder_get_txn_from(t);
1517 __acquire(&from->proc->inner_lock);
1520 binder_inner_proc_lock(from->proc);
1522 BUG_ON(from != t->from);
1525 binder_inner_proc_unlock(from->proc);
1526 __acquire(&from->proc->inner_lock);
1527 binder_thread_dec_tmpref(from);
1532 * binder_free_txn_fixups() - free unprocessed fd fixups
1533 * @t: binder transaction for t->from
1535 * If the transaction is being torn down prior to being
1536 * processed by the target process, free all of the
1537 * fd fixups and fput the file structs. It is safe to
1538 * call this function after the fixups have been
1539 * processed -- in that case, the list will be empty.
1541 static void binder_free_txn_fixups(struct binder_transaction *t)
1543 struct binder_txn_fd_fixup *fixup, *tmp;
1545 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1547 if (fixup->target_fd >= 0)
1548 put_unused_fd(fixup->target_fd);
1549 list_del(&fixup->fixup_entry);
1554 static void binder_txn_latency_free(struct binder_transaction *t)
1556 int from_proc, from_thread, to_proc, to_thread;
1558 spin_lock(&t->lock);
1559 from_proc = t->from ? t->from->proc->pid : 0;
1560 from_thread = t->from ? t->from->pid : 0;
1561 to_proc = t->to_proc ? t->to_proc->pid : 0;
1562 to_thread = t->to_thread ? t->to_thread->pid : 0;
1563 spin_unlock(&t->lock);
1565 trace_binder_txn_latency_free(t, from_proc, from_thread, to_proc, to_thread);
1568 static void binder_free_transaction(struct binder_transaction *t)
1570 struct binder_proc *target_proc = t->to_proc;
1573 binder_inner_proc_lock(target_proc);
1574 target_proc->outstanding_txns--;
1575 if (target_proc->outstanding_txns < 0)
1576 pr_warn("%s: Unexpected outstanding_txns %d\n",
1577 __func__, target_proc->outstanding_txns);
1578 if (!target_proc->outstanding_txns && target_proc->is_frozen)
1579 wake_up_interruptible_all(&target_proc->freeze_wait);
1581 t->buffer->transaction = NULL;
1582 binder_inner_proc_unlock(target_proc);
1584 if (trace_binder_txn_latency_free_enabled())
1585 binder_txn_latency_free(t);
1587 * If the transaction has no target_proc, then
1588 * t->buffer->transaction has already been cleared.
1590 binder_free_txn_fixups(t);
1592 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1595 static void binder_send_failed_reply(struct binder_transaction *t,
1596 uint32_t error_code)
1598 struct binder_thread *target_thread;
1599 struct binder_transaction *next;
1601 BUG_ON(t->flags & TF_ONE_WAY);
1603 target_thread = binder_get_txn_from_and_acq_inner(t);
1604 if (target_thread) {
1605 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1606 "send failed reply for transaction %d to %d:%d\n",
1608 target_thread->proc->pid,
1609 target_thread->pid);
1611 binder_pop_transaction_ilocked(target_thread, t);
1612 if (target_thread->reply_error.cmd == BR_OK) {
1613 target_thread->reply_error.cmd = error_code;
1614 binder_enqueue_thread_work_ilocked(
1616 &target_thread->reply_error.work);
1617 wake_up_interruptible(&target_thread->wait);
1620 * Cannot get here for normal operation, but
1621 * we can if multiple synchronous transactions
1622 * are sent without blocking for responses.
1623 * Just ignore the 2nd error in this case.
1625 pr_warn("Unexpected reply error: %u\n",
1626 target_thread->reply_error.cmd);
1628 binder_inner_proc_unlock(target_thread->proc);
1629 binder_thread_dec_tmpref(target_thread);
1630 binder_free_transaction(t);
1633 __release(&target_thread->proc->inner_lock);
1634 next = t->from_parent;
1636 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1637 "send failed reply for transaction %d, target dead\n",
1640 binder_free_transaction(t);
1642 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1643 "reply failed, no target thread at root\n");
1647 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1648 "reply failed, no target thread -- retry %d\n",
1654 * binder_cleanup_transaction() - cleans up undelivered transaction
1655 * @t: transaction that needs to be cleaned up
1656 * @reason: reason the transaction wasn't delivered
1657 * @error_code: error to return to caller (if synchronous call)
1659 static void binder_cleanup_transaction(struct binder_transaction *t,
1661 uint32_t error_code)
1663 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
1664 binder_send_failed_reply(t, error_code);
1666 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
1667 "undelivered transaction %d, %s\n",
1668 t->debug_id, reason);
1669 binder_free_transaction(t);
1674 * binder_get_object() - gets object and checks for valid metadata
1675 * @proc: binder_proc owning the buffer
1676 * @u: sender's user pointer to base of buffer
1677 * @buffer: binder_buffer that we're parsing.
1678 * @offset: offset in the @buffer at which to validate an object.
1679 * @object: struct binder_object to read into
1681 * Copy the binder object at the given offset into @object. If @u is
1682 * provided then the copy is from the sender's buffer. If not, then
1683 * it is copied from the target's @buffer.
1685 * Return: If there's a valid metadata object at @offset, the
1686 * size of that object. Otherwise, it returns zero. The object
1687 * is read into the struct binder_object pointed to by @object.
1689 static size_t binder_get_object(struct binder_proc *proc,
1690 const void __user *u,
1691 struct binder_buffer *buffer,
1692 unsigned long offset,
1693 struct binder_object *object)
1696 struct binder_object_header *hdr;
1697 size_t object_size = 0;
1699 read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
1700 if (offset > buffer->data_size || read_size < sizeof(*hdr))
1703 if (copy_from_user(object, u + offset, read_size))
1706 if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
1711 /* Ok, now see if we read a complete object. */
1713 switch (hdr->type) {
1714 case BINDER_TYPE_BINDER:
1715 case BINDER_TYPE_WEAK_BINDER:
1716 case BINDER_TYPE_HANDLE:
1717 case BINDER_TYPE_WEAK_HANDLE:
1718 object_size = sizeof(struct flat_binder_object);
1720 case BINDER_TYPE_FD:
1721 object_size = sizeof(struct binder_fd_object);
1723 case BINDER_TYPE_PTR:
1724 object_size = sizeof(struct binder_buffer_object);
1726 case BINDER_TYPE_FDA:
1727 object_size = sizeof(struct binder_fd_array_object);
1732 if (offset <= buffer->data_size - object_size &&
1733 buffer->data_size >= object_size)
1740 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1741 * @proc: binder_proc owning the buffer
1742 * @b: binder_buffer containing the object
1743 * @object: struct binder_object to read into
1744 * @index: index in offset array at which the binder_buffer_object is
1746 * @start_offset: points to the start of the offset array
1747 * @object_offsetp: offset of @object read from @b
1748 * @num_valid: the number of valid offsets in the offset array
1750 * Return: If @index is within the valid range of the offset array
1751 * described by @start and @num_valid, and if there's a valid
1752 * binder_buffer_object at the offset found in index @index
1753 * of the offset array, that object is returned. Otherwise,
1754 * %NULL is returned.
1755 * Note that the offset found in index @index itself is not
1756 * verified; this function assumes that @num_valid elements
1757 * from @start were previously verified to have valid offsets.
1758 * If @object_offsetp is non-NULL, then the offset within
1759 * @b is written to it.
1761 static struct binder_buffer_object *binder_validate_ptr(
1762 struct binder_proc *proc,
1763 struct binder_buffer *b,
1764 struct binder_object *object,
1765 binder_size_t index,
1766 binder_size_t start_offset,
1767 binder_size_t *object_offsetp,
1768 binder_size_t num_valid)
1771 binder_size_t object_offset;
1772 unsigned long buffer_offset;
1774 if (index >= num_valid)
1777 buffer_offset = start_offset + sizeof(binder_size_t) * index;
1778 if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1780 sizeof(object_offset)))
1782 object_size = binder_get_object(proc, NULL, b, object_offset, object);
1783 if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
1786 *object_offsetp = object_offset;
1788 return &object->bbo;
1792 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
1793 * @proc: binder_proc owning the buffer
1794 * @b: transaction buffer
1795 * @objects_start_offset: offset to start of objects buffer
1796 * @buffer_obj_offset: offset to binder_buffer_object in which to fix up
1797 * @fixup_offset: start offset in @buffer to fix up
1798 * @last_obj_offset: offset to last binder_buffer_object that we fixed
1799 * @last_min_offset: minimum fixup offset in object at @last_obj_offset
1801 * Return: %true if a fixup in buffer @buffer at offset @offset is
1804 * For safety reasons, we only allow fixups inside a buffer to happen
1805 * at increasing offsets; additionally, we only allow fixup on the last
1806 * buffer object that was verified, or one of its parents.
1808 * Example of what is allowed:
1811 * B (parent = A, offset = 0)
1812 * C (parent = A, offset = 16)
1813 * D (parent = C, offset = 0)
1814 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
1816 * Examples of what is not allowed:
1818 * Decreasing offsets within the same parent:
1820 * C (parent = A, offset = 16)
1821 * B (parent = A, offset = 0) // decreasing offset within A
1823 * Referring to a parent that wasn't the last object or any of its parents:
1825 * B (parent = A, offset = 0)
1826 * C (parent = A, offset = 0)
1827 * C (parent = A, offset = 16)
1828 * D (parent = B, offset = 0) // B is not A or any of A's parents
1830 static bool binder_validate_fixup(struct binder_proc *proc,
1831 struct binder_buffer *b,
1832 binder_size_t objects_start_offset,
1833 binder_size_t buffer_obj_offset,
1834 binder_size_t fixup_offset,
1835 binder_size_t last_obj_offset,
1836 binder_size_t last_min_offset)
1838 if (!last_obj_offset) {
1839 /* Nothing to fix up in */
1843 while (last_obj_offset != buffer_obj_offset) {
1844 unsigned long buffer_offset;
1845 struct binder_object last_object;
1846 struct binder_buffer_object *last_bbo;
1847 size_t object_size = binder_get_object(proc, NULL, b,
1850 if (object_size != sizeof(*last_bbo))
1853 last_bbo = &last_object.bbo;
1855 * Safe to retrieve the parent of last_obj, since it
1856 * was already previously verified by the driver.
1858 if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
1860 last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
1861 buffer_offset = objects_start_offset +
1862 sizeof(binder_size_t) * last_bbo->parent;
1863 if (binder_alloc_copy_from_buffer(&proc->alloc,
1866 sizeof(last_obj_offset)))
1869 return (fixup_offset >= last_min_offset);
1873 * struct binder_task_work_cb - for deferred close
1875 * @twork: callback_head for task work
1878 * Structure to pass task work to be handled after
1879 * returning from binder_ioctl() via task_work_add().
1881 struct binder_task_work_cb {
1882 struct callback_head twork;
1887 * binder_do_fd_close() - close list of file descriptors
1888 * @twork: callback head for task work
1890 * It is not safe to call ksys_close() during the binder_ioctl()
1891 * function if there is a chance that binder's own file descriptor
1892 * might be closed. This is to meet the requirements for using
1893 * fdget() (see comments for __fget_light()). Therefore use
1894 * task_work_add() to schedule the close operation once we have
1895 * returned from binder_ioctl(). This function is a callback
1896 * for that mechanism and does the actual ksys_close() on the
1897 * given file descriptor.
1899 static void binder_do_fd_close(struct callback_head *twork)
1901 struct binder_task_work_cb *twcb = container_of(twork,
1902 struct binder_task_work_cb, twork);
1909 * binder_deferred_fd_close() - schedule a close for the given file-descriptor
1910 * @fd: file-descriptor to close
1912 * See comments in binder_do_fd_close(). This function is used to schedule
1913 * a file-descriptor to be closed after returning from binder_ioctl().
1915 static void binder_deferred_fd_close(int fd)
1917 struct binder_task_work_cb *twcb;
1919 twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
1922 init_task_work(&twcb->twork, binder_do_fd_close);
1923 twcb->file = close_fd_get_file(fd);
1925 // pin it until binder_do_fd_close(); see comments there
1926 get_file(twcb->file);
1927 filp_close(twcb->file, current->files);
1928 task_work_add(current, &twcb->twork, TWA_RESUME);
1934 static void binder_transaction_buffer_release(struct binder_proc *proc,
1935 struct binder_thread *thread,
1936 struct binder_buffer *buffer,
1937 binder_size_t failed_at,
1940 int debug_id = buffer->debug_id;
1941 binder_size_t off_start_offset, buffer_offset, off_end_offset;
1943 binder_debug(BINDER_DEBUG_TRANSACTION,
1944 "%d buffer release %d, size %zd-%zd, failed at %llx\n",
1945 proc->pid, buffer->debug_id,
1946 buffer->data_size, buffer->offsets_size,
1947 (unsigned long long)failed_at);
1949 if (buffer->target_node)
1950 binder_dec_node(buffer->target_node, 1, 0);
1952 off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
1953 off_end_offset = is_failure && failed_at ? failed_at :
1954 off_start_offset + buffer->offsets_size;
1955 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
1956 buffer_offset += sizeof(binder_size_t)) {
1957 struct binder_object_header *hdr;
1958 size_t object_size = 0;
1959 struct binder_object object;
1960 binder_size_t object_offset;
1962 if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1963 buffer, buffer_offset,
1964 sizeof(object_offset)))
1965 object_size = binder_get_object(proc, NULL, buffer,
1966 object_offset, &object);
1967 if (object_size == 0) {
1968 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
1969 debug_id, (u64)object_offset, buffer->data_size);
1973 switch (hdr->type) {
1974 case BINDER_TYPE_BINDER:
1975 case BINDER_TYPE_WEAK_BINDER: {
1976 struct flat_binder_object *fp;
1977 struct binder_node *node;
1979 fp = to_flat_binder_object(hdr);
1980 node = binder_get_node(proc, fp->binder);
1982 pr_err("transaction release %d bad node %016llx\n",
1983 debug_id, (u64)fp->binder);
1986 binder_debug(BINDER_DEBUG_TRANSACTION,
1987 " node %d u%016llx\n",
1988 node->debug_id, (u64)node->ptr);
1989 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
1991 binder_put_node(node);
1993 case BINDER_TYPE_HANDLE:
1994 case BINDER_TYPE_WEAK_HANDLE: {
1995 struct flat_binder_object *fp;
1996 struct binder_ref_data rdata;
1999 fp = to_flat_binder_object(hdr);
2000 ret = binder_dec_ref_for_handle(proc, fp->handle,
2001 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2004 pr_err("transaction release %d bad handle %d, ret = %d\n",
2005 debug_id, fp->handle, ret);
2008 binder_debug(BINDER_DEBUG_TRANSACTION,
2009 " ref %d desc %d\n",
2010 rdata.debug_id, rdata.desc);
2013 case BINDER_TYPE_FD: {
2015 * No need to close the file here since user-space
2016 * closes it for successfully delivered
2017 * transactions. For transactions that weren't
2018 * delivered, the new fd was never allocated so
2019 * there is no need to close and the fput on the
2020 * file is done when the transaction is torn
2024 case BINDER_TYPE_PTR:
2026 * Nothing to do here, this will get cleaned up when the
2027 * transaction buffer gets freed
2030 case BINDER_TYPE_FDA: {
2031 struct binder_fd_array_object *fda;
2032 struct binder_buffer_object *parent;
2033 struct binder_object ptr_object;
2034 binder_size_t fda_offset;
2036 binder_size_t fd_buf_size;
2037 binder_size_t num_valid;
2041 * The fd fixups have not been applied so no
2042 * fds need to be closed.
2047 num_valid = (buffer_offset - off_start_offset) /
2048 sizeof(binder_size_t);
2049 fda = to_binder_fd_array_object(hdr);
2050 parent = binder_validate_ptr(proc, buffer, &ptr_object,
2056 pr_err("transaction release %d bad parent offset\n",
2060 fd_buf_size = sizeof(u32) * fda->num_fds;
2061 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2062 pr_err("transaction release %d invalid number of fds (%lld)\n",
2063 debug_id, (u64)fda->num_fds);
2066 if (fd_buf_size > parent->length ||
2067 fda->parent_offset > parent->length - fd_buf_size) {
2068 /* No space for all file descriptors here. */
2069 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2070 debug_id, (u64)fda->num_fds);
2074 * the source data for binder_buffer_object is visible
2075 * to user-space and the @buffer element is the user
2076 * pointer to the buffer_object containing the fd_array.
2077 * Convert the address to an offset relative to
2078 * the base of the transaction buffer.
2081 (parent->buffer - (uintptr_t)buffer->user_data) +
2083 for (fd_index = 0; fd_index < fda->num_fds;
2087 binder_size_t offset = fda_offset +
2088 fd_index * sizeof(fd);
2090 err = binder_alloc_copy_from_buffer(
2091 &proc->alloc, &fd, buffer,
2092 offset, sizeof(fd));
2095 binder_deferred_fd_close(fd);
2097 * Need to make sure the thread goes
2098 * back to userspace to complete the
2102 thread->looper_need_return = true;
2107 pr_err("transaction release %d bad object type %x\n",
2108 debug_id, hdr->type);
2114 static int binder_translate_binder(struct flat_binder_object *fp,
2115 struct binder_transaction *t,
2116 struct binder_thread *thread)
2118 struct binder_node *node;
2119 struct binder_proc *proc = thread->proc;
2120 struct binder_proc *target_proc = t->to_proc;
2121 struct binder_ref_data rdata;
2124 node = binder_get_node(proc, fp->binder);
2126 node = binder_new_node(proc, fp);
2130 if (fp->cookie != node->cookie) {
2131 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2132 proc->pid, thread->pid, (u64)fp->binder,
2133 node->debug_id, (u64)fp->cookie,
2138 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2143 ret = binder_inc_ref_for_node(target_proc, node,
2144 fp->hdr.type == BINDER_TYPE_BINDER,
2145 &thread->todo, &rdata);
2149 if (fp->hdr.type == BINDER_TYPE_BINDER)
2150 fp->hdr.type = BINDER_TYPE_HANDLE;
2152 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2154 fp->handle = rdata.desc;
2157 trace_binder_transaction_node_to_ref(t, node, &rdata);
2158 binder_debug(BINDER_DEBUG_TRANSACTION,
2159 " node %d u%016llx -> ref %d desc %d\n",
2160 node->debug_id, (u64)node->ptr,
2161 rdata.debug_id, rdata.desc);
2163 binder_put_node(node);
2167 static int binder_translate_handle(struct flat_binder_object *fp,
2168 struct binder_transaction *t,
2169 struct binder_thread *thread)
2171 struct binder_proc *proc = thread->proc;
2172 struct binder_proc *target_proc = t->to_proc;
2173 struct binder_node *node;
2174 struct binder_ref_data src_rdata;
2177 node = binder_get_node_from_ref(proc, fp->handle,
2178 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2180 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2181 proc->pid, thread->pid, fp->handle);
2184 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2189 binder_node_lock(node);
2190 if (node->proc == target_proc) {
2191 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2192 fp->hdr.type = BINDER_TYPE_BINDER;
2194 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2195 fp->binder = node->ptr;
2196 fp->cookie = node->cookie;
2198 binder_inner_proc_lock(node->proc);
2200 __acquire(&node->proc->inner_lock);
2201 binder_inc_node_nilocked(node,
2202 fp->hdr.type == BINDER_TYPE_BINDER,
2205 binder_inner_proc_unlock(node->proc);
2207 __release(&node->proc->inner_lock);
2208 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2209 binder_debug(BINDER_DEBUG_TRANSACTION,
2210 " ref %d desc %d -> node %d u%016llx\n",
2211 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2213 binder_node_unlock(node);
2215 struct binder_ref_data dest_rdata;
2217 binder_node_unlock(node);
2218 ret = binder_inc_ref_for_node(target_proc, node,
2219 fp->hdr.type == BINDER_TYPE_HANDLE,
2225 fp->handle = dest_rdata.desc;
2227 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2229 binder_debug(BINDER_DEBUG_TRANSACTION,
2230 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2231 src_rdata.debug_id, src_rdata.desc,
2232 dest_rdata.debug_id, dest_rdata.desc,
2236 binder_put_node(node);
2240 static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2241 struct binder_transaction *t,
2242 struct binder_thread *thread,
2243 struct binder_transaction *in_reply_to)
2245 struct binder_proc *proc = thread->proc;
2246 struct binder_proc *target_proc = t->to_proc;
2247 struct binder_txn_fd_fixup *fixup;
2250 bool target_allows_fd;
2253 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2255 target_allows_fd = t->buffer->target_node->accept_fds;
2256 if (!target_allows_fd) {
2257 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2258 proc->pid, thread->pid,
2259 in_reply_to ? "reply" : "transaction",
2262 goto err_fd_not_accepted;
2267 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2268 proc->pid, thread->pid, fd);
2272 ret = security_binder_transfer_file(proc->cred, target_proc->cred, file);
2279 * Add fixup record for this transaction. The allocation
2280 * of the fd in the target needs to be done from a
2283 fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2289 fixup->offset = fd_offset;
2290 fixup->target_fd = -1;
2291 trace_binder_transaction_fd_send(t, fd, fixup->offset);
2292 list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2300 err_fd_not_accepted:
2305 * struct binder_ptr_fixup - data to be fixed-up in target buffer
2306 * @offset offset in target buffer to fixup
2307 * @skip_size bytes to skip in copy (fixup will be written later)
2308 * @fixup_data data to write at fixup offset
2311 * This is used for the pointer fixup list (pf) which is created and consumed
2312 * during binder_transaction() and is only accessed locally. No
2313 * locking is necessary.
2315 * The list is ordered by @offset.
2317 struct binder_ptr_fixup {
2318 binder_size_t offset;
2320 binder_uintptr_t fixup_data;
2321 struct list_head node;
2325 * struct binder_sg_copy - scatter-gather data to be copied
2326 * @offset offset in target buffer
2327 * @sender_uaddr user address in source buffer
2328 * @length bytes to copy
2331 * This is used for the sg copy list (sgc) which is created and consumed
2332 * during binder_transaction() and is only accessed locally. No
2333 * locking is necessary.
2335 * The list is ordered by @offset.
2337 struct binder_sg_copy {
2338 binder_size_t offset;
2339 const void __user *sender_uaddr;
2341 struct list_head node;
2345 * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data
2346 * @alloc: binder_alloc associated with @buffer
2347 * @buffer: binder buffer in target process
2348 * @sgc_head: list_head of scatter-gather copy list
2349 * @pf_head: list_head of pointer fixup list
2351 * Processes all elements of @sgc_head, applying fixups from @pf_head
2352 * and copying the scatter-gather data from the source process' user
2353 * buffer to the target's buffer. It is expected that the list creation
2354 * and processing all occurs during binder_transaction() so these lists
2355 * are only accessed in local context.
2357 * Return: 0=success, else -errno
2359 static int binder_do_deferred_txn_copies(struct binder_alloc *alloc,
2360 struct binder_buffer *buffer,
2361 struct list_head *sgc_head,
2362 struct list_head *pf_head)
2365 struct binder_sg_copy *sgc, *tmpsgc;
2366 struct binder_ptr_fixup *tmppf;
2367 struct binder_ptr_fixup *pf =
2368 list_first_entry_or_null(pf_head, struct binder_ptr_fixup,
2371 list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2372 size_t bytes_copied = 0;
2374 while (bytes_copied < sgc->length) {
2376 size_t bytes_left = sgc->length - bytes_copied;
2377 size_t offset = sgc->offset + bytes_copied;
2380 * We copy up to the fixup (pointed to by pf)
2382 copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset)
2384 if (!ret && copy_size)
2385 ret = binder_alloc_copy_user_to_buffer(
2388 sgc->sender_uaddr + bytes_copied,
2390 bytes_copied += copy_size;
2391 if (copy_size != bytes_left) {
2393 /* we stopped at a fixup offset */
2394 if (pf->skip_size) {
2396 * we are just skipping. This is for
2397 * BINDER_TYPE_FDA where the translated
2398 * fds will be fixed up when we get
2399 * to target context.
2401 bytes_copied += pf->skip_size;
2403 /* apply the fixup indicated by pf */
2405 ret = binder_alloc_copy_to_buffer(
2409 sizeof(pf->fixup_data));
2410 bytes_copied += sizeof(pf->fixup_data);
2412 list_del(&pf->node);
2414 pf = list_first_entry_or_null(pf_head,
2415 struct binder_ptr_fixup, node);
2418 list_del(&sgc->node);
2421 list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2422 BUG_ON(pf->skip_size == 0);
2423 list_del(&pf->node);
2426 BUG_ON(!list_empty(sgc_head));
2428 return ret > 0 ? -EINVAL : ret;
2432 * binder_cleanup_deferred_txn_lists() - free specified lists
2433 * @sgc_head: list_head of scatter-gather copy list
2434 * @pf_head: list_head of pointer fixup list
2436 * Called to clean up @sgc_head and @pf_head if there is an
2439 static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head,
2440 struct list_head *pf_head)
2442 struct binder_sg_copy *sgc, *tmpsgc;
2443 struct binder_ptr_fixup *pf, *tmppf;
2445 list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2446 list_del(&sgc->node);
2449 list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2450 list_del(&pf->node);
2456 * binder_defer_copy() - queue a scatter-gather buffer for copy
2457 * @sgc_head: list_head of scatter-gather copy list
2458 * @offset: binder buffer offset in target process
2459 * @sender_uaddr: user address in source process
2460 * @length: bytes to copy
2462 * Specify a scatter-gather block to be copied. The actual copy must
2463 * be deferred until all the needed fixups are identified and queued.
2464 * Then the copy and fixups are done together so un-translated values
2465 * from the source are never visible in the target buffer.
2467 * We are guaranteed that repeated calls to this function will have
2468 * monotonically increasing @offset values so the list will naturally
2471 * Return: 0=success, else -errno
2473 static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset,
2474 const void __user *sender_uaddr, size_t length)
2476 struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL);
2481 bc->offset = offset;
2482 bc->sender_uaddr = sender_uaddr;
2483 bc->length = length;
2484 INIT_LIST_HEAD(&bc->node);
2487 * We are guaranteed that the deferred copies are in-order
2488 * so just add to the tail.
2490 list_add_tail(&bc->node, sgc_head);
2496 * binder_add_fixup() - queue a fixup to be applied to sg copy
2497 * @pf_head: list_head of binder ptr fixup list
2498 * @offset: binder buffer offset in target process
2499 * @fixup: bytes to be copied for fixup
2500 * @skip_size: bytes to skip when copying (fixup will be applied later)
2502 * Add the specified fixup to a list ordered by @offset. When copying
2503 * the scatter-gather buffers, the fixup will be copied instead of
2504 * data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup
2505 * will be applied later (in target process context), so we just skip
2506 * the bytes specified by @skip_size. If @skip_size is 0, we copy the
2509 * This function is called *mostly* in @offset order, but there are
2510 * exceptions. Since out-of-order inserts are relatively uncommon,
2511 * we insert the new element by searching backward from the tail of
2514 * Return: 0=success, else -errno
2516 static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset,
2517 binder_uintptr_t fixup, size_t skip_size)
2519 struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL);
2520 struct binder_ptr_fixup *tmppf;
2525 pf->offset = offset;
2526 pf->fixup_data = fixup;
2527 pf->skip_size = skip_size;
2528 INIT_LIST_HEAD(&pf->node);
2530 /* Fixups are *mostly* added in-order, but there are some
2531 * exceptions. Look backwards through list for insertion point.
2533 list_for_each_entry_reverse(tmppf, pf_head, node) {
2534 if (tmppf->offset < pf->offset) {
2535 list_add(&pf->node, &tmppf->node);
2540 * if we get here, then the new offset is the lowest so
2541 * insert at the head
2543 list_add(&pf->node, pf_head);
2547 static int binder_translate_fd_array(struct list_head *pf_head,
2548 struct binder_fd_array_object *fda,
2549 const void __user *sender_ubuffer,
2550 struct binder_buffer_object *parent,
2551 struct binder_buffer_object *sender_uparent,
2552 struct binder_transaction *t,
2553 struct binder_thread *thread,
2554 struct binder_transaction *in_reply_to)
2556 binder_size_t fdi, fd_buf_size;
2557 binder_size_t fda_offset;
2558 const void __user *sender_ufda_base;
2559 struct binder_proc *proc = thread->proc;
2562 if (fda->num_fds == 0)
2565 fd_buf_size = sizeof(u32) * fda->num_fds;
2566 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2567 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2568 proc->pid, thread->pid, (u64)fda->num_fds);
2571 if (fd_buf_size > parent->length ||
2572 fda->parent_offset > parent->length - fd_buf_size) {
2573 /* No space for all file descriptors here. */
2574 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2575 proc->pid, thread->pid, (u64)fda->num_fds);
2579 * the source data for binder_buffer_object is visible
2580 * to user-space and the @buffer element is the user
2581 * pointer to the buffer_object containing the fd_array.
2582 * Convert the address to an offset relative to
2583 * the base of the transaction buffer.
2585 fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
2587 sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer +
2590 if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) ||
2591 !IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) {
2592 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2593 proc->pid, thread->pid);
2596 ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32));
2600 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2602 binder_size_t offset = fda_offset + fdi * sizeof(fd);
2603 binder_size_t sender_uoffset = fdi * sizeof(fd);
2605 ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd));
2607 ret = binder_translate_fd(fd, offset, t, thread,
2610 return ret > 0 ? -EINVAL : ret;
2615 static int binder_fixup_parent(struct list_head *pf_head,
2616 struct binder_transaction *t,
2617 struct binder_thread *thread,
2618 struct binder_buffer_object *bp,
2619 binder_size_t off_start_offset,
2620 binder_size_t num_valid,
2621 binder_size_t last_fixup_obj_off,
2622 binder_size_t last_fixup_min_off)
2624 struct binder_buffer_object *parent;
2625 struct binder_buffer *b = t->buffer;
2626 struct binder_proc *proc = thread->proc;
2627 struct binder_proc *target_proc = t->to_proc;
2628 struct binder_object object;
2629 binder_size_t buffer_offset;
2630 binder_size_t parent_offset;
2632 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2635 parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2636 off_start_offset, &parent_offset,
2639 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2640 proc->pid, thread->pid);
2644 if (!binder_validate_fixup(target_proc, b, off_start_offset,
2645 parent_offset, bp->parent_offset,
2647 last_fixup_min_off)) {
2648 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2649 proc->pid, thread->pid);
2653 if (parent->length < sizeof(binder_uintptr_t) ||
2654 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2655 /* No space for a pointer here! */
2656 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2657 proc->pid, thread->pid);
2660 buffer_offset = bp->parent_offset +
2661 (uintptr_t)parent->buffer - (uintptr_t)b->user_data;
2662 return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0);
2666 * binder_can_update_transaction() - Can a txn be superseded by an updated one?
2667 * @t1: the pending async txn in the frozen process
2668 * @t2: the new async txn to supersede the outdated pending one
2670 * Return: true if t2 can supersede t1
2671 * false if t2 can not supersede t1
2673 static bool binder_can_update_transaction(struct binder_transaction *t1,
2674 struct binder_transaction *t2)
2676 if ((t1->flags & t2->flags & (TF_ONE_WAY | TF_UPDATE_TXN)) !=
2677 (TF_ONE_WAY | TF_UPDATE_TXN) || !t1->to_proc || !t2->to_proc)
2679 if (t1->to_proc->tsk == t2->to_proc->tsk && t1->code == t2->code &&
2680 t1->flags == t2->flags && t1->buffer->pid == t2->buffer->pid &&
2681 t1->buffer->target_node->ptr == t2->buffer->target_node->ptr &&
2682 t1->buffer->target_node->cookie == t2->buffer->target_node->cookie)
2688 * binder_find_outdated_transaction_ilocked() - Find the outdated transaction
2689 * @t: new async transaction
2690 * @target_list: list to find outdated transaction
2692 * Return: the outdated transaction if found
2693 * NULL if no outdated transacton can be found
2695 * Requires the proc->inner_lock to be held.
2697 static struct binder_transaction *
2698 binder_find_outdated_transaction_ilocked(struct binder_transaction *t,
2699 struct list_head *target_list)
2701 struct binder_work *w;
2703 list_for_each_entry(w, target_list, entry) {
2704 struct binder_transaction *t_queued;
2706 if (w->type != BINDER_WORK_TRANSACTION)
2708 t_queued = container_of(w, struct binder_transaction, work);
2709 if (binder_can_update_transaction(t_queued, t))
2716 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2717 * @t: transaction to send
2718 * @proc: process to send the transaction to
2719 * @thread: thread in @proc to send the transaction to (may be NULL)
2721 * This function queues a transaction to the specified process. It will try
2722 * to find a thread in the target process to handle the transaction and
2723 * wake it up. If no thread is found, the work is queued to the proc
2726 * If the @thread parameter is not NULL, the transaction is always queued
2727 * to the waitlist of that specific thread.
2729 * Return: 0 if the transaction was successfully queued
2730 * BR_DEAD_REPLY if the target process or thread is dead
2731 * BR_FROZEN_REPLY if the target process or thread is frozen and
2732 * the sync transaction was rejected
2733 * BR_TRANSACTION_PENDING_FROZEN if the target process is frozen
2734 * and the async transaction was successfully queued
2736 static int binder_proc_transaction(struct binder_transaction *t,
2737 struct binder_proc *proc,
2738 struct binder_thread *thread)
2740 struct binder_node *node = t->buffer->target_node;
2741 bool oneway = !!(t->flags & TF_ONE_WAY);
2742 bool pending_async = false;
2743 struct binder_transaction *t_outdated = NULL;
2744 bool frozen = false;
2747 binder_node_lock(node);
2750 if (node->has_async_transaction)
2751 pending_async = true;
2753 node->has_async_transaction = true;
2756 binder_inner_proc_lock(proc);
2757 if (proc->is_frozen) {
2759 proc->sync_recv |= !oneway;
2760 proc->async_recv |= oneway;
2763 if ((frozen && !oneway) || proc->is_dead ||
2764 (thread && thread->is_dead)) {
2765 binder_inner_proc_unlock(proc);
2766 binder_node_unlock(node);
2767 return frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY;
2770 if (!thread && !pending_async)
2771 thread = binder_select_thread_ilocked(proc);
2774 binder_enqueue_thread_work_ilocked(thread, &t->work);
2775 } else if (!pending_async) {
2776 binder_enqueue_work_ilocked(&t->work, &proc->todo);
2778 if ((t->flags & TF_UPDATE_TXN) && frozen) {
2779 t_outdated = binder_find_outdated_transaction_ilocked(t,
2782 binder_debug(BINDER_DEBUG_TRANSACTION,
2783 "txn %d supersedes %d\n",
2784 t->debug_id, t_outdated->debug_id);
2785 list_del_init(&t_outdated->work.entry);
2786 proc->outstanding_txns--;
2789 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2793 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2795 proc->outstanding_txns++;
2796 binder_inner_proc_unlock(proc);
2797 binder_node_unlock(node);
2800 * To reduce potential contention, free the outdated transaction and
2801 * buffer after releasing the locks.
2804 struct binder_buffer *buffer = t_outdated->buffer;
2806 t_outdated->buffer = NULL;
2807 buffer->transaction = NULL;
2808 trace_binder_transaction_update_buffer_release(buffer);
2809 binder_transaction_buffer_release(proc, NULL, buffer, 0, 0);
2810 binder_alloc_free_buf(&proc->alloc, buffer);
2812 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2815 if (oneway && frozen)
2816 return BR_TRANSACTION_PENDING_FROZEN;
2822 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2823 * @node: struct binder_node for which to get refs
2824 * @procp: returns @node->proc if valid
2825 * @error: if no @procp then returns BR_DEAD_REPLY
2827 * User-space normally keeps the node alive when creating a transaction
2828 * since it has a reference to the target. The local strong ref keeps it
2829 * alive if the sending process dies before the target process processes
2830 * the transaction. If the source process is malicious or has a reference
2831 * counting bug, relying on the local strong ref can fail.
2833 * Since user-space can cause the local strong ref to go away, we also take
2834 * a tmpref on the node to ensure it survives while we are constructing
2835 * the transaction. We also need a tmpref on the proc while we are
2836 * constructing the transaction, so we take that here as well.
2838 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2839 * Also sets @procp if valid. If the @node->proc is NULL indicating that the
2840 * target proc has died, @error is set to BR_DEAD_REPLY.
2842 static struct binder_node *binder_get_node_refs_for_txn(
2843 struct binder_node *node,
2844 struct binder_proc **procp,
2847 struct binder_node *target_node = NULL;
2849 binder_node_inner_lock(node);
2852 binder_inc_node_nilocked(node, 1, 0, NULL);
2853 binder_inc_node_tmpref_ilocked(node);
2854 node->proc->tmp_ref++;
2855 *procp = node->proc;
2857 *error = BR_DEAD_REPLY;
2858 binder_node_inner_unlock(node);
2863 static void binder_set_txn_from_error(struct binder_transaction *t, int id,
2864 uint32_t command, int32_t param)
2866 struct binder_thread *from = binder_get_txn_from_and_acq_inner(t);
2869 /* annotation for sparse */
2870 __release(&from->proc->inner_lock);
2874 /* don't override existing errors */
2875 if (from->ee.command == BR_OK)
2876 binder_set_extended_error(&from->ee, id, command, param);
2877 binder_inner_proc_unlock(from->proc);
2878 binder_thread_dec_tmpref(from);
2881 static void binder_transaction(struct binder_proc *proc,
2882 struct binder_thread *thread,
2883 struct binder_transaction_data *tr, int reply,
2884 binder_size_t extra_buffers_size)
2887 struct binder_transaction *t;
2888 struct binder_work *w;
2889 struct binder_work *tcomplete;
2890 binder_size_t buffer_offset = 0;
2891 binder_size_t off_start_offset, off_end_offset;
2892 binder_size_t off_min;
2893 binder_size_t sg_buf_offset, sg_buf_end_offset;
2894 binder_size_t user_offset = 0;
2895 struct binder_proc *target_proc = NULL;
2896 struct binder_thread *target_thread = NULL;
2897 struct binder_node *target_node = NULL;
2898 struct binder_transaction *in_reply_to = NULL;
2899 struct binder_transaction_log_entry *e;
2900 uint32_t return_error = 0;
2901 uint32_t return_error_param = 0;
2902 uint32_t return_error_line = 0;
2903 binder_size_t last_fixup_obj_off = 0;
2904 binder_size_t last_fixup_min_off = 0;
2905 struct binder_context *context = proc->context;
2906 int t_debug_id = atomic_inc_return(&binder_last_id);
2907 char *secctx = NULL;
2909 struct list_head sgc_head;
2910 struct list_head pf_head;
2911 const void __user *user_buffer = (const void __user *)
2912 (uintptr_t)tr->data.ptr.buffer;
2913 INIT_LIST_HEAD(&sgc_head);
2914 INIT_LIST_HEAD(&pf_head);
2916 e = binder_transaction_log_add(&binder_transaction_log);
2917 e->debug_id = t_debug_id;
2918 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2919 e->from_proc = proc->pid;
2920 e->from_thread = thread->pid;
2921 e->target_handle = tr->target.handle;
2922 e->data_size = tr->data_size;
2923 e->offsets_size = tr->offsets_size;
2924 strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
2926 binder_inner_proc_lock(proc);
2927 binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0);
2928 binder_inner_proc_unlock(proc);
2931 binder_inner_proc_lock(proc);
2932 in_reply_to = thread->transaction_stack;
2933 if (in_reply_to == NULL) {
2934 binder_inner_proc_unlock(proc);
2935 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2936 proc->pid, thread->pid);
2937 return_error = BR_FAILED_REPLY;
2938 return_error_param = -EPROTO;
2939 return_error_line = __LINE__;
2940 goto err_empty_call_stack;
2942 if (in_reply_to->to_thread != thread) {
2943 spin_lock(&in_reply_to->lock);
2944 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2945 proc->pid, thread->pid, in_reply_to->debug_id,
2946 in_reply_to->to_proc ?
2947 in_reply_to->to_proc->pid : 0,
2948 in_reply_to->to_thread ?
2949 in_reply_to->to_thread->pid : 0);
2950 spin_unlock(&in_reply_to->lock);
2951 binder_inner_proc_unlock(proc);
2952 return_error = BR_FAILED_REPLY;
2953 return_error_param = -EPROTO;
2954 return_error_line = __LINE__;
2956 goto err_bad_call_stack;
2958 thread->transaction_stack = in_reply_to->to_parent;
2959 binder_inner_proc_unlock(proc);
2960 binder_set_nice(in_reply_to->saved_priority);
2961 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
2962 if (target_thread == NULL) {
2963 /* annotation for sparse */
2964 __release(&target_thread->proc->inner_lock);
2965 binder_txn_error("%d:%d reply target not found\n",
2966 thread->pid, proc->pid);
2967 return_error = BR_DEAD_REPLY;
2968 return_error_line = __LINE__;
2969 goto err_dead_binder;
2971 if (target_thread->transaction_stack != in_reply_to) {
2972 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2973 proc->pid, thread->pid,
2974 target_thread->transaction_stack ?
2975 target_thread->transaction_stack->debug_id : 0,
2976 in_reply_to->debug_id);
2977 binder_inner_proc_unlock(target_thread->proc);
2978 return_error = BR_FAILED_REPLY;
2979 return_error_param = -EPROTO;
2980 return_error_line = __LINE__;
2982 target_thread = NULL;
2983 goto err_dead_binder;
2985 target_proc = target_thread->proc;
2986 target_proc->tmp_ref++;
2987 binder_inner_proc_unlock(target_thread->proc);
2989 if (tr->target.handle) {
2990 struct binder_ref *ref;
2993 * There must already be a strong ref
2994 * on this node. If so, do a strong
2995 * increment on the node to ensure it
2996 * stays alive until the transaction is
2999 binder_proc_lock(proc);
3000 ref = binder_get_ref_olocked(proc, tr->target.handle,
3003 target_node = binder_get_node_refs_for_txn(
3004 ref->node, &target_proc,
3007 binder_user_error("%d:%d got transaction to invalid handle, %u\n",
3008 proc->pid, thread->pid, tr->target.handle);
3009 return_error = BR_FAILED_REPLY;
3011 binder_proc_unlock(proc);
3013 mutex_lock(&context->context_mgr_node_lock);
3014 target_node = context->binder_context_mgr_node;
3016 target_node = binder_get_node_refs_for_txn(
3017 target_node, &target_proc,
3020 return_error = BR_DEAD_REPLY;
3021 mutex_unlock(&context->context_mgr_node_lock);
3022 if (target_node && target_proc->pid == proc->pid) {
3023 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
3024 proc->pid, thread->pid);
3025 return_error = BR_FAILED_REPLY;
3026 return_error_param = -EINVAL;
3027 return_error_line = __LINE__;
3028 goto err_invalid_target_handle;
3032 binder_txn_error("%d:%d cannot find target node\n",
3033 thread->pid, proc->pid);
3035 * return_error is set above
3037 return_error_param = -EINVAL;
3038 return_error_line = __LINE__;
3039 goto err_dead_binder;
3041 e->to_node = target_node->debug_id;
3042 if (WARN_ON(proc == target_proc)) {
3043 binder_txn_error("%d:%d self transactions not allowed\n",
3044 thread->pid, proc->pid);
3045 return_error = BR_FAILED_REPLY;
3046 return_error_param = -EINVAL;
3047 return_error_line = __LINE__;
3048 goto err_invalid_target_handle;
3050 if (security_binder_transaction(proc->cred,
3051 target_proc->cred) < 0) {
3052 binder_txn_error("%d:%d transaction credentials failed\n",
3053 thread->pid, proc->pid);
3054 return_error = BR_FAILED_REPLY;
3055 return_error_param = -EPERM;
3056 return_error_line = __LINE__;
3057 goto err_invalid_target_handle;
3059 binder_inner_proc_lock(proc);
3061 w = list_first_entry_or_null(&thread->todo,
3062 struct binder_work, entry);
3063 if (!(tr->flags & TF_ONE_WAY) && w &&
3064 w->type == BINDER_WORK_TRANSACTION) {
3066 * Do not allow new outgoing transaction from a
3067 * thread that has a transaction at the head of
3068 * its todo list. Only need to check the head
3069 * because binder_select_thread_ilocked picks a
3070 * thread from proc->waiting_threads to enqueue
3071 * the transaction, and nothing is queued to the
3072 * todo list while the thread is on waiting_threads.
3074 binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
3075 proc->pid, thread->pid);
3076 binder_inner_proc_unlock(proc);
3077 return_error = BR_FAILED_REPLY;
3078 return_error_param = -EPROTO;
3079 return_error_line = __LINE__;
3080 goto err_bad_todo_list;
3083 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3084 struct binder_transaction *tmp;
3086 tmp = thread->transaction_stack;
3087 if (tmp->to_thread != thread) {
3088 spin_lock(&tmp->lock);
3089 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3090 proc->pid, thread->pid, tmp->debug_id,
3091 tmp->to_proc ? tmp->to_proc->pid : 0,
3093 tmp->to_thread->pid : 0);
3094 spin_unlock(&tmp->lock);
3095 binder_inner_proc_unlock(proc);
3096 return_error = BR_FAILED_REPLY;
3097 return_error_param = -EPROTO;
3098 return_error_line = __LINE__;
3099 goto err_bad_call_stack;
3102 struct binder_thread *from;
3104 spin_lock(&tmp->lock);
3106 if (from && from->proc == target_proc) {
3107 atomic_inc(&from->tmp_ref);
3108 target_thread = from;
3109 spin_unlock(&tmp->lock);
3112 spin_unlock(&tmp->lock);
3113 tmp = tmp->from_parent;
3116 binder_inner_proc_unlock(proc);
3119 e->to_thread = target_thread->pid;
3120 e->to_proc = target_proc->pid;
3122 /* TODO: reuse incoming transaction for reply */
3123 t = kzalloc(sizeof(*t), GFP_KERNEL);
3125 binder_txn_error("%d:%d cannot allocate transaction\n",
3126 thread->pid, proc->pid);
3127 return_error = BR_FAILED_REPLY;
3128 return_error_param = -ENOMEM;
3129 return_error_line = __LINE__;
3130 goto err_alloc_t_failed;
3132 INIT_LIST_HEAD(&t->fd_fixups);
3133 binder_stats_created(BINDER_STAT_TRANSACTION);
3134 spin_lock_init(&t->lock);
3136 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3137 if (tcomplete == NULL) {
3138 binder_txn_error("%d:%d cannot allocate work for transaction\n",
3139 thread->pid, proc->pid);
3140 return_error = BR_FAILED_REPLY;
3141 return_error_param = -ENOMEM;
3142 return_error_line = __LINE__;
3143 goto err_alloc_tcomplete_failed;
3145 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3147 t->debug_id = t_debug_id;
3150 binder_debug(BINDER_DEBUG_TRANSACTION,
3151 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
3152 proc->pid, thread->pid, t->debug_id,
3153 target_proc->pid, target_thread->pid,
3154 (u64)tr->data.ptr.buffer,
3155 (u64)tr->data.ptr.offsets,
3156 (u64)tr->data_size, (u64)tr->offsets_size,
3157 (u64)extra_buffers_size);
3159 binder_debug(BINDER_DEBUG_TRANSACTION,
3160 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
3161 proc->pid, thread->pid, t->debug_id,
3162 target_proc->pid, target_node->debug_id,
3163 (u64)tr->data.ptr.buffer,
3164 (u64)tr->data.ptr.offsets,
3165 (u64)tr->data_size, (u64)tr->offsets_size,
3166 (u64)extra_buffers_size);
3168 if (!reply && !(tr->flags & TF_ONE_WAY))
3172 t->sender_euid = task_euid(proc->tsk);
3173 t->to_proc = target_proc;
3174 t->to_thread = target_thread;
3176 t->flags = tr->flags;
3177 t->priority = task_nice(current);
3179 if (target_node && target_node->txn_security_ctx) {
3183 security_cred_getsecid(proc->cred, &secid);
3184 ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
3186 binder_txn_error("%d:%d failed to get security context\n",
3187 thread->pid, proc->pid);
3188 return_error = BR_FAILED_REPLY;
3189 return_error_param = ret;
3190 return_error_line = __LINE__;
3191 goto err_get_secctx_failed;
3193 added_size = ALIGN(secctx_sz, sizeof(u64));
3194 extra_buffers_size += added_size;
3195 if (extra_buffers_size < added_size) {
3196 binder_txn_error("%d:%d integer overflow of extra_buffers_size\n",
3197 thread->pid, proc->pid);
3198 return_error = BR_FAILED_REPLY;
3199 return_error_param = -EINVAL;
3200 return_error_line = __LINE__;
3201 goto err_bad_extra_size;
3205 trace_binder_transaction(reply, t, target_node);
3207 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3208 tr->offsets_size, extra_buffers_size,
3209 !reply && (t->flags & TF_ONE_WAY), current->tgid);
3210 if (IS_ERR(t->buffer)) {
3213 ret = PTR_ERR(t->buffer);
3214 s = (ret == -ESRCH) ? ": vma cleared, target dead or dying"
3215 : (ret == -ENOSPC) ? ": no space left"
3216 : (ret == -ENOMEM) ? ": memory allocation failed"
3218 binder_txn_error("cannot allocate buffer%s", s);
3220 return_error_param = PTR_ERR(t->buffer);
3221 return_error = return_error_param == -ESRCH ?
3222 BR_DEAD_REPLY : BR_FAILED_REPLY;
3223 return_error_line = __LINE__;
3225 goto err_binder_alloc_buf_failed;
3229 size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3230 ALIGN(tr->offsets_size, sizeof(void *)) +
3231 ALIGN(extra_buffers_size, sizeof(void *)) -
3232 ALIGN(secctx_sz, sizeof(u64));
3234 t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
3235 err = binder_alloc_copy_to_buffer(&target_proc->alloc,
3236 t->buffer, buf_offset,
3239 t->security_ctx = 0;
3242 security_release_secctx(secctx, secctx_sz);
3245 t->buffer->debug_id = t->debug_id;
3246 t->buffer->transaction = t;
3247 t->buffer->target_node = target_node;
3248 t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
3249 trace_binder_transaction_alloc_buf(t->buffer);
3251 if (binder_alloc_copy_user_to_buffer(
3252 &target_proc->alloc,
3254 ALIGN(tr->data_size, sizeof(void *)),
3255 (const void __user *)
3256 (uintptr_t)tr->data.ptr.offsets,
3257 tr->offsets_size)) {
3258 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3259 proc->pid, thread->pid);
3260 return_error = BR_FAILED_REPLY;
3261 return_error_param = -EFAULT;
3262 return_error_line = __LINE__;
3263 goto err_copy_data_failed;
3265 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3266 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3267 proc->pid, thread->pid, (u64)tr->offsets_size);
3268 return_error = BR_FAILED_REPLY;
3269 return_error_param = -EINVAL;
3270 return_error_line = __LINE__;
3271 goto err_bad_offset;
3273 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3274 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3275 proc->pid, thread->pid,
3276 (u64)extra_buffers_size);
3277 return_error = BR_FAILED_REPLY;
3278 return_error_param = -EINVAL;
3279 return_error_line = __LINE__;
3280 goto err_bad_offset;
3282 off_start_offset = ALIGN(tr->data_size, sizeof(void *));
3283 buffer_offset = off_start_offset;
3284 off_end_offset = off_start_offset + tr->offsets_size;
3285 sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
3286 sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
3287 ALIGN(secctx_sz, sizeof(u64));
3289 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
3290 buffer_offset += sizeof(binder_size_t)) {
3291 struct binder_object_header *hdr;
3293 struct binder_object object;
3294 binder_size_t object_offset;
3295 binder_size_t copy_size;
3297 if (binder_alloc_copy_from_buffer(&target_proc->alloc,
3301 sizeof(object_offset))) {
3302 binder_txn_error("%d:%d copy offset from buffer failed\n",
3303 thread->pid, proc->pid);
3304 return_error = BR_FAILED_REPLY;
3305 return_error_param = -EINVAL;
3306 return_error_line = __LINE__;
3307 goto err_bad_offset;
3311 * Copy the source user buffer up to the next object
3312 * that will be processed.
3314 copy_size = object_offset - user_offset;
3315 if (copy_size && (user_offset > object_offset ||
3316 binder_alloc_copy_user_to_buffer(
3317 &target_proc->alloc,
3318 t->buffer, user_offset,
3319 user_buffer + user_offset,
3321 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3322 proc->pid, thread->pid);
3323 return_error = BR_FAILED_REPLY;
3324 return_error_param = -EFAULT;
3325 return_error_line = __LINE__;
3326 goto err_copy_data_failed;
3328 object_size = binder_get_object(target_proc, user_buffer,
3329 t->buffer, object_offset, &object);
3330 if (object_size == 0 || object_offset < off_min) {
3331 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3332 proc->pid, thread->pid,
3335 (u64)t->buffer->data_size);
3336 return_error = BR_FAILED_REPLY;
3337 return_error_param = -EINVAL;
3338 return_error_line = __LINE__;
3339 goto err_bad_offset;
3342 * Set offset to the next buffer fragment to be
3345 user_offset = object_offset + object_size;
3348 off_min = object_offset + object_size;
3349 switch (hdr->type) {
3350 case BINDER_TYPE_BINDER:
3351 case BINDER_TYPE_WEAK_BINDER: {
3352 struct flat_binder_object *fp;
3354 fp = to_flat_binder_object(hdr);
3355 ret = binder_translate_binder(fp, t, thread);
3358 binder_alloc_copy_to_buffer(&target_proc->alloc,
3362 binder_txn_error("%d:%d translate binder failed\n",
3363 thread->pid, proc->pid);
3364 return_error = BR_FAILED_REPLY;
3365 return_error_param = ret;
3366 return_error_line = __LINE__;
3367 goto err_translate_failed;
3370 case BINDER_TYPE_HANDLE:
3371 case BINDER_TYPE_WEAK_HANDLE: {
3372 struct flat_binder_object *fp;
3374 fp = to_flat_binder_object(hdr);
3375 ret = binder_translate_handle(fp, t, thread);
3377 binder_alloc_copy_to_buffer(&target_proc->alloc,
3381 binder_txn_error("%d:%d translate handle failed\n",
3382 thread->pid, proc->pid);
3383 return_error = BR_FAILED_REPLY;
3384 return_error_param = ret;
3385 return_error_line = __LINE__;
3386 goto err_translate_failed;
3390 case BINDER_TYPE_FD: {
3391 struct binder_fd_object *fp = to_binder_fd_object(hdr);
3392 binder_size_t fd_offset = object_offset +
3393 (uintptr_t)&fp->fd - (uintptr_t)fp;
3394 int ret = binder_translate_fd(fp->fd, fd_offset, t,
3395 thread, in_reply_to);
3399 binder_alloc_copy_to_buffer(&target_proc->alloc,
3403 binder_txn_error("%d:%d translate fd failed\n",
3404 thread->pid, proc->pid);
3405 return_error = BR_FAILED_REPLY;
3406 return_error_param = ret;
3407 return_error_line = __LINE__;
3408 goto err_translate_failed;
3411 case BINDER_TYPE_FDA: {
3412 struct binder_object ptr_object;
3413 binder_size_t parent_offset;
3414 struct binder_object user_object;
3415 size_t user_parent_size;
3416 struct binder_fd_array_object *fda =
3417 to_binder_fd_array_object(hdr);
3418 size_t num_valid = (buffer_offset - off_start_offset) /
3419 sizeof(binder_size_t);
3420 struct binder_buffer_object *parent =
3421 binder_validate_ptr(target_proc, t->buffer,
3422 &ptr_object, fda->parent,
3427 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3428 proc->pid, thread->pid);
3429 return_error = BR_FAILED_REPLY;
3430 return_error_param = -EINVAL;
3431 return_error_line = __LINE__;
3432 goto err_bad_parent;
3434 if (!binder_validate_fixup(target_proc, t->buffer,
3439 last_fixup_min_off)) {
3440 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3441 proc->pid, thread->pid);
3442 return_error = BR_FAILED_REPLY;
3443 return_error_param = -EINVAL;
3444 return_error_line = __LINE__;
3445 goto err_bad_parent;
3448 * We need to read the user version of the parent
3449 * object to get the original user offset
3452 binder_get_object(proc, user_buffer, t->buffer,
3453 parent_offset, &user_object);
3454 if (user_parent_size != sizeof(user_object.bbo)) {
3455 binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n",
3456 proc->pid, thread->pid,
3458 sizeof(user_object.bbo));
3459 return_error = BR_FAILED_REPLY;
3460 return_error_param = -EINVAL;
3461 return_error_line = __LINE__;
3462 goto err_bad_parent;
3464 ret = binder_translate_fd_array(&pf_head, fda,
3465 user_buffer, parent,
3466 &user_object.bbo, t,
3467 thread, in_reply_to);
3469 ret = binder_alloc_copy_to_buffer(&target_proc->alloc,
3474 binder_txn_error("%d:%d translate fd array failed\n",
3475 thread->pid, proc->pid);
3476 return_error = BR_FAILED_REPLY;
3477 return_error_param = ret > 0 ? -EINVAL : ret;
3478 return_error_line = __LINE__;
3479 goto err_translate_failed;
3481 last_fixup_obj_off = parent_offset;
3482 last_fixup_min_off =
3483 fda->parent_offset + sizeof(u32) * fda->num_fds;
3485 case BINDER_TYPE_PTR: {
3486 struct binder_buffer_object *bp =
3487 to_binder_buffer_object(hdr);
3488 size_t buf_left = sg_buf_end_offset - sg_buf_offset;
3491 if (bp->length > buf_left) {
3492 binder_user_error("%d:%d got transaction with too large buffer\n",
3493 proc->pid, thread->pid);
3494 return_error = BR_FAILED_REPLY;
3495 return_error_param = -EINVAL;
3496 return_error_line = __LINE__;
3497 goto err_bad_offset;
3499 ret = binder_defer_copy(&sgc_head, sg_buf_offset,
3500 (const void __user *)(uintptr_t)bp->buffer,
3503 binder_txn_error("%d:%d deferred copy failed\n",
3504 thread->pid, proc->pid);
3505 return_error = BR_FAILED_REPLY;
3506 return_error_param = ret;
3507 return_error_line = __LINE__;
3508 goto err_translate_failed;
3510 /* Fixup buffer pointer to target proc address space */
3511 bp->buffer = (uintptr_t)
3512 t->buffer->user_data + sg_buf_offset;
3513 sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3515 num_valid = (buffer_offset - off_start_offset) /
3516 sizeof(binder_size_t);
3517 ret = binder_fixup_parent(&pf_head, t,
3522 last_fixup_min_off);
3524 binder_alloc_copy_to_buffer(&target_proc->alloc,
3528 binder_txn_error("%d:%d failed to fixup parent\n",
3529 thread->pid, proc->pid);
3530 return_error = BR_FAILED_REPLY;
3531 return_error_param = ret;
3532 return_error_line = __LINE__;
3533 goto err_translate_failed;
3535 last_fixup_obj_off = object_offset;
3536 last_fixup_min_off = 0;
3539 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3540 proc->pid, thread->pid, hdr->type);
3541 return_error = BR_FAILED_REPLY;
3542 return_error_param = -EINVAL;
3543 return_error_line = __LINE__;
3544 goto err_bad_object_type;
3547 /* Done processing objects, copy the rest of the buffer */
3548 if (binder_alloc_copy_user_to_buffer(
3549 &target_proc->alloc,
3550 t->buffer, user_offset,
3551 user_buffer + user_offset,
3552 tr->data_size - user_offset)) {
3553 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3554 proc->pid, thread->pid);
3555 return_error = BR_FAILED_REPLY;
3556 return_error_param = -EFAULT;
3557 return_error_line = __LINE__;
3558 goto err_copy_data_failed;
3561 ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer,
3562 &sgc_head, &pf_head);
3564 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3565 proc->pid, thread->pid);
3566 return_error = BR_FAILED_REPLY;
3567 return_error_param = ret;
3568 return_error_line = __LINE__;
3569 goto err_copy_data_failed;
3571 if (t->buffer->oneway_spam_suspect)
3572 tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
3574 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3575 t->work.type = BINDER_WORK_TRANSACTION;
3578 binder_enqueue_thread_work(thread, tcomplete);
3579 binder_inner_proc_lock(target_proc);
3580 if (target_thread->is_dead) {
3581 return_error = BR_DEAD_REPLY;
3582 binder_inner_proc_unlock(target_proc);
3583 goto err_dead_proc_or_thread;
3585 BUG_ON(t->buffer->async_transaction != 0);
3586 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3587 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3588 target_proc->outstanding_txns++;
3589 binder_inner_proc_unlock(target_proc);
3590 wake_up_interruptible_sync(&target_thread->wait);
3591 binder_free_transaction(in_reply_to);
3592 } else if (!(t->flags & TF_ONE_WAY)) {
3593 BUG_ON(t->buffer->async_transaction != 0);
3594 binder_inner_proc_lock(proc);
3596 * Defer the TRANSACTION_COMPLETE, so we don't return to
3597 * userspace immediately; this allows the target process to
3598 * immediately start processing this transaction, reducing
3599 * latency. We will then return the TRANSACTION_COMPLETE when
3600 * the target replies (or there is an error).
3602 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3604 t->from_parent = thread->transaction_stack;
3605 thread->transaction_stack = t;
3606 binder_inner_proc_unlock(proc);
3607 return_error = binder_proc_transaction(t,
3608 target_proc, target_thread);
3610 binder_inner_proc_lock(proc);
3611 binder_pop_transaction_ilocked(thread, t);
3612 binder_inner_proc_unlock(proc);
3613 goto err_dead_proc_or_thread;
3616 BUG_ON(target_node == NULL);
3617 BUG_ON(t->buffer->async_transaction != 1);
3618 return_error = binder_proc_transaction(t, target_proc, NULL);
3620 * Let the caller know when async transaction reaches a frozen
3621 * process and is put in a pending queue, waiting for the target
3622 * process to be unfrozen.
3624 if (return_error == BR_TRANSACTION_PENDING_FROZEN)
3625 tcomplete->type = BINDER_WORK_TRANSACTION_PENDING;
3626 binder_enqueue_thread_work(thread, tcomplete);
3628 return_error != BR_TRANSACTION_PENDING_FROZEN)
3629 goto err_dead_proc_or_thread;
3632 binder_thread_dec_tmpref(target_thread);
3633 binder_proc_dec_tmpref(target_proc);
3635 binder_dec_node_tmpref(target_node);
3637 * write barrier to synchronize with initialization
3641 WRITE_ONCE(e->debug_id_done, t_debug_id);
3644 err_dead_proc_or_thread:
3645 binder_txn_error("%d:%d dead process or thread\n",
3646 thread->pid, proc->pid);
3647 return_error_line = __LINE__;
3648 binder_dequeue_work(proc, tcomplete);
3649 err_translate_failed:
3650 err_bad_object_type:
3653 err_copy_data_failed:
3654 binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head);
3655 binder_free_txn_fixups(t);
3656 trace_binder_transaction_failed_buffer_release(t->buffer);
3657 binder_transaction_buffer_release(target_proc, NULL, t->buffer,
3658 buffer_offset, true);
3660 binder_dec_node_tmpref(target_node);
3662 t->buffer->transaction = NULL;
3663 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3664 err_binder_alloc_buf_failed:
3667 security_release_secctx(secctx, secctx_sz);
3668 err_get_secctx_failed:
3670 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3671 err_alloc_tcomplete_failed:
3672 if (trace_binder_txn_latency_free_enabled())
3673 binder_txn_latency_free(t);
3675 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3679 err_empty_call_stack:
3681 err_invalid_target_handle:
3683 binder_dec_node(target_node, 1, 0);
3684 binder_dec_node_tmpref(target_node);
3687 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3688 "%d:%d transaction %s to %d:%d failed %d/%d/%d, size %lld-%lld line %d\n",
3689 proc->pid, thread->pid, reply ? "reply" :
3690 (tr->flags & TF_ONE_WAY ? "async" : "call"),
3691 target_proc ? target_proc->pid : 0,
3692 target_thread ? target_thread->pid : 0,
3693 t_debug_id, return_error, return_error_param,
3694 (u64)tr->data_size, (u64)tr->offsets_size,
3698 binder_thread_dec_tmpref(target_thread);
3700 binder_proc_dec_tmpref(target_proc);
3703 struct binder_transaction_log_entry *fe;
3705 e->return_error = return_error;
3706 e->return_error_param = return_error_param;
3707 e->return_error_line = return_error_line;
3708 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3711 * write barrier to synchronize with initialization
3715 WRITE_ONCE(e->debug_id_done, t_debug_id);
3716 WRITE_ONCE(fe->debug_id_done, t_debug_id);
3719 BUG_ON(thread->return_error.cmd != BR_OK);
3721 binder_set_txn_from_error(in_reply_to, t_debug_id,
3722 return_error, return_error_param);
3723 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3724 binder_enqueue_thread_work(thread, &thread->return_error.work);
3725 binder_send_failed_reply(in_reply_to, return_error);
3727 binder_inner_proc_lock(proc);
3728 binder_set_extended_error(&thread->ee, t_debug_id,
3729 return_error, return_error_param);
3730 binder_inner_proc_unlock(proc);
3731 thread->return_error.cmd = return_error;
3732 binder_enqueue_thread_work(thread, &thread->return_error.work);
3737 * binder_free_buf() - free the specified buffer
3738 * @proc: binder proc that owns buffer
3739 * @buffer: buffer to be freed
3740 * @is_failure: failed to send transaction
3742 * If buffer for an async transaction, enqueue the next async
3743 * transaction from the node.
3745 * Cleanup buffer and free it.
3748 binder_free_buf(struct binder_proc *proc,
3749 struct binder_thread *thread,
3750 struct binder_buffer *buffer, bool is_failure)
3752 binder_inner_proc_lock(proc);
3753 if (buffer->transaction) {
3754 buffer->transaction->buffer = NULL;
3755 buffer->transaction = NULL;
3757 binder_inner_proc_unlock(proc);
3758 if (buffer->async_transaction && buffer->target_node) {
3759 struct binder_node *buf_node;
3760 struct binder_work *w;
3762 buf_node = buffer->target_node;
3763 binder_node_inner_lock(buf_node);
3764 BUG_ON(!buf_node->has_async_transaction);
3765 BUG_ON(buf_node->proc != proc);
3766 w = binder_dequeue_work_head_ilocked(
3767 &buf_node->async_todo);
3769 buf_node->has_async_transaction = false;
3771 binder_enqueue_work_ilocked(
3773 binder_wakeup_proc_ilocked(proc);
3775 binder_node_inner_unlock(buf_node);
3777 trace_binder_transaction_buffer_release(buffer);
3778 binder_transaction_buffer_release(proc, thread, buffer, 0, is_failure);
3779 binder_alloc_free_buf(&proc->alloc, buffer);
3782 static int binder_thread_write(struct binder_proc *proc,
3783 struct binder_thread *thread,
3784 binder_uintptr_t binder_buffer, size_t size,
3785 binder_size_t *consumed)
3788 struct binder_context *context = proc->context;
3789 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3790 void __user *ptr = buffer + *consumed;
3791 void __user *end = buffer + size;
3793 while (ptr < end && thread->return_error.cmd == BR_OK) {
3796 if (get_user(cmd, (uint32_t __user *)ptr))
3798 ptr += sizeof(uint32_t);
3799 trace_binder_command(cmd);
3800 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3801 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3802 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3803 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3811 const char *debug_string;
3812 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3813 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3814 struct binder_ref_data rdata;
3816 if (get_user(target, (uint32_t __user *)ptr))
3819 ptr += sizeof(uint32_t);
3821 if (increment && !target) {
3822 struct binder_node *ctx_mgr_node;
3824 mutex_lock(&context->context_mgr_node_lock);
3825 ctx_mgr_node = context->binder_context_mgr_node;
3827 if (ctx_mgr_node->proc == proc) {
3828 binder_user_error("%d:%d context manager tried to acquire desc 0\n",
3829 proc->pid, thread->pid);
3830 mutex_unlock(&context->context_mgr_node_lock);
3833 ret = binder_inc_ref_for_node(
3835 strong, NULL, &rdata);
3837 mutex_unlock(&context->context_mgr_node_lock);
3840 ret = binder_update_ref_for_handle(
3841 proc, target, increment, strong,
3843 if (!ret && rdata.desc != target) {
3844 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3845 proc->pid, thread->pid,
3846 target, rdata.desc);
3850 debug_string = "IncRefs";
3853 debug_string = "Acquire";
3856 debug_string = "Release";
3860 debug_string = "DecRefs";
3864 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3865 proc->pid, thread->pid, debug_string,
3866 strong, target, ret);
3869 binder_debug(BINDER_DEBUG_USER_REFS,
3870 "%d:%d %s ref %d desc %d s %d w %d\n",
3871 proc->pid, thread->pid, debug_string,
3872 rdata.debug_id, rdata.desc, rdata.strong,
3876 case BC_INCREFS_DONE:
3877 case BC_ACQUIRE_DONE: {
3878 binder_uintptr_t node_ptr;
3879 binder_uintptr_t cookie;
3880 struct binder_node *node;
3883 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3885 ptr += sizeof(binder_uintptr_t);
3886 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3888 ptr += sizeof(binder_uintptr_t);
3889 node = binder_get_node(proc, node_ptr);
3891 binder_user_error("%d:%d %s u%016llx no match\n",
3892 proc->pid, thread->pid,
3893 cmd == BC_INCREFS_DONE ?
3899 if (cookie != node->cookie) {
3900 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3901 proc->pid, thread->pid,
3902 cmd == BC_INCREFS_DONE ?
3903 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3904 (u64)node_ptr, node->debug_id,
3905 (u64)cookie, (u64)node->cookie);
3906 binder_put_node(node);
3909 binder_node_inner_lock(node);
3910 if (cmd == BC_ACQUIRE_DONE) {
3911 if (node->pending_strong_ref == 0) {
3912 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3913 proc->pid, thread->pid,
3915 binder_node_inner_unlock(node);
3916 binder_put_node(node);
3919 node->pending_strong_ref = 0;
3921 if (node->pending_weak_ref == 0) {
3922 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3923 proc->pid, thread->pid,
3925 binder_node_inner_unlock(node);
3926 binder_put_node(node);
3929 node->pending_weak_ref = 0;
3931 free_node = binder_dec_node_nilocked(node,
3932 cmd == BC_ACQUIRE_DONE, 0);
3934 binder_debug(BINDER_DEBUG_USER_REFS,
3935 "%d:%d %s node %d ls %d lw %d tr %d\n",
3936 proc->pid, thread->pid,
3937 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3938 node->debug_id, node->local_strong_refs,
3939 node->local_weak_refs, node->tmp_refs);
3940 binder_node_inner_unlock(node);
3941 binder_put_node(node);
3944 case BC_ATTEMPT_ACQUIRE:
3945 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3947 case BC_ACQUIRE_RESULT:
3948 pr_err("BC_ACQUIRE_RESULT not supported\n");
3951 case BC_FREE_BUFFER: {
3952 binder_uintptr_t data_ptr;
3953 struct binder_buffer *buffer;
3955 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3957 ptr += sizeof(binder_uintptr_t);
3959 buffer = binder_alloc_prepare_to_free(&proc->alloc,
3961 if (IS_ERR_OR_NULL(buffer)) {
3962 if (PTR_ERR(buffer) == -EPERM) {
3964 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
3965 proc->pid, thread->pid,
3969 "%d:%d BC_FREE_BUFFER u%016llx no match\n",
3970 proc->pid, thread->pid,
3975 binder_debug(BINDER_DEBUG_FREE_BUFFER,
3976 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3977 proc->pid, thread->pid, (u64)data_ptr,
3979 buffer->transaction ? "active" : "finished");
3980 binder_free_buf(proc, thread, buffer, false);
3984 case BC_TRANSACTION_SG:
3986 struct binder_transaction_data_sg tr;
3988 if (copy_from_user(&tr, ptr, sizeof(tr)))
3991 binder_transaction(proc, thread, &tr.transaction_data,
3992 cmd == BC_REPLY_SG, tr.buffers_size);
3995 case BC_TRANSACTION:
3997 struct binder_transaction_data tr;
3999 if (copy_from_user(&tr, ptr, sizeof(tr)))
4002 binder_transaction(proc, thread, &tr,
4003 cmd == BC_REPLY, 0);
4007 case BC_REGISTER_LOOPER:
4008 binder_debug(BINDER_DEBUG_THREADS,
4009 "%d:%d BC_REGISTER_LOOPER\n",
4010 proc->pid, thread->pid);
4011 binder_inner_proc_lock(proc);
4012 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
4013 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4014 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
4015 proc->pid, thread->pid);
4016 } else if (proc->requested_threads == 0) {
4017 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4018 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
4019 proc->pid, thread->pid);
4021 proc->requested_threads--;
4022 proc->requested_threads_started++;
4024 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
4025 binder_inner_proc_unlock(proc);
4027 case BC_ENTER_LOOPER:
4028 binder_debug(BINDER_DEBUG_THREADS,
4029 "%d:%d BC_ENTER_LOOPER\n",
4030 proc->pid, thread->pid);
4031 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
4032 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4033 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
4034 proc->pid, thread->pid);
4036 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
4038 case BC_EXIT_LOOPER:
4039 binder_debug(BINDER_DEBUG_THREADS,
4040 "%d:%d BC_EXIT_LOOPER\n",
4041 proc->pid, thread->pid);
4042 thread->looper |= BINDER_LOOPER_STATE_EXITED;
4045 case BC_REQUEST_DEATH_NOTIFICATION:
4046 case BC_CLEAR_DEATH_NOTIFICATION: {
4048 binder_uintptr_t cookie;
4049 struct binder_ref *ref;
4050 struct binder_ref_death *death = NULL;
4052 if (get_user(target, (uint32_t __user *)ptr))
4054 ptr += sizeof(uint32_t);
4055 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4057 ptr += sizeof(binder_uintptr_t);
4058 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4060 * Allocate memory for death notification
4061 * before taking lock
4063 death = kzalloc(sizeof(*death), GFP_KERNEL);
4064 if (death == NULL) {
4065 WARN_ON(thread->return_error.cmd !=
4067 thread->return_error.cmd = BR_ERROR;
4068 binder_enqueue_thread_work(
4070 &thread->return_error.work);
4072 BINDER_DEBUG_FAILED_TRANSACTION,
4073 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
4074 proc->pid, thread->pid);
4078 binder_proc_lock(proc);
4079 ref = binder_get_ref_olocked(proc, target, false);
4081 binder_user_error("%d:%d %s invalid ref %d\n",
4082 proc->pid, thread->pid,
4083 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4084 "BC_REQUEST_DEATH_NOTIFICATION" :
4085 "BC_CLEAR_DEATH_NOTIFICATION",
4087 binder_proc_unlock(proc);
4092 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4093 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
4094 proc->pid, thread->pid,
4095 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4096 "BC_REQUEST_DEATH_NOTIFICATION" :
4097 "BC_CLEAR_DEATH_NOTIFICATION",
4098 (u64)cookie, ref->data.debug_id,
4099 ref->data.desc, ref->data.strong,
4100 ref->data.weak, ref->node->debug_id);
4102 binder_node_lock(ref->node);
4103 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4105 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
4106 proc->pid, thread->pid);
4107 binder_node_unlock(ref->node);
4108 binder_proc_unlock(proc);
4112 binder_stats_created(BINDER_STAT_DEATH);
4113 INIT_LIST_HEAD(&death->work.entry);
4114 death->cookie = cookie;
4116 if (ref->node->proc == NULL) {
4117 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
4119 binder_inner_proc_lock(proc);
4120 binder_enqueue_work_ilocked(
4121 &ref->death->work, &proc->todo);
4122 binder_wakeup_proc_ilocked(proc);
4123 binder_inner_proc_unlock(proc);
4126 if (ref->death == NULL) {
4127 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
4128 proc->pid, thread->pid);
4129 binder_node_unlock(ref->node);
4130 binder_proc_unlock(proc);
4134 if (death->cookie != cookie) {
4135 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
4136 proc->pid, thread->pid,
4139 binder_node_unlock(ref->node);
4140 binder_proc_unlock(proc);
4144 binder_inner_proc_lock(proc);
4145 if (list_empty(&death->work.entry)) {
4146 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4147 if (thread->looper &
4148 (BINDER_LOOPER_STATE_REGISTERED |
4149 BINDER_LOOPER_STATE_ENTERED))
4150 binder_enqueue_thread_work_ilocked(
4154 binder_enqueue_work_ilocked(
4157 binder_wakeup_proc_ilocked(
4161 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
4162 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
4164 binder_inner_proc_unlock(proc);
4166 binder_node_unlock(ref->node);
4167 binder_proc_unlock(proc);
4169 case BC_DEAD_BINDER_DONE: {
4170 struct binder_work *w;
4171 binder_uintptr_t cookie;
4172 struct binder_ref_death *death = NULL;
4174 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4177 ptr += sizeof(cookie);
4178 binder_inner_proc_lock(proc);
4179 list_for_each_entry(w, &proc->delivered_death,
4181 struct binder_ref_death *tmp_death =
4183 struct binder_ref_death,
4186 if (tmp_death->cookie == cookie) {
4191 binder_debug(BINDER_DEBUG_DEAD_BINDER,
4192 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
4193 proc->pid, thread->pid, (u64)cookie,
4195 if (death == NULL) {
4196 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
4197 proc->pid, thread->pid, (u64)cookie);
4198 binder_inner_proc_unlock(proc);
4201 binder_dequeue_work_ilocked(&death->work);
4202 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
4203 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4204 if (thread->looper &
4205 (BINDER_LOOPER_STATE_REGISTERED |
4206 BINDER_LOOPER_STATE_ENTERED))
4207 binder_enqueue_thread_work_ilocked(
4208 thread, &death->work);
4210 binder_enqueue_work_ilocked(
4213 binder_wakeup_proc_ilocked(proc);
4216 binder_inner_proc_unlock(proc);
4220 pr_err("%d:%d unknown command %u\n",
4221 proc->pid, thread->pid, cmd);
4224 *consumed = ptr - buffer;
4229 static void binder_stat_br(struct binder_proc *proc,
4230 struct binder_thread *thread, uint32_t cmd)
4232 trace_binder_return(cmd);
4233 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
4234 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
4235 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
4236 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
4240 static int binder_put_node_cmd(struct binder_proc *proc,
4241 struct binder_thread *thread,
4243 binder_uintptr_t node_ptr,
4244 binder_uintptr_t node_cookie,
4246 uint32_t cmd, const char *cmd_name)
4248 void __user *ptr = *ptrp;
4250 if (put_user(cmd, (uint32_t __user *)ptr))
4252 ptr += sizeof(uint32_t);
4254 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
4256 ptr += sizeof(binder_uintptr_t);
4258 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
4260 ptr += sizeof(binder_uintptr_t);
4262 binder_stat_br(proc, thread, cmd);
4263 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
4264 proc->pid, thread->pid, cmd_name, node_debug_id,
4265 (u64)node_ptr, (u64)node_cookie);
4271 static int binder_wait_for_work(struct binder_thread *thread,
4275 struct binder_proc *proc = thread->proc;
4278 binder_inner_proc_lock(proc);
4280 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE|TASK_FREEZABLE);
4281 if (binder_has_work_ilocked(thread, do_proc_work))
4284 list_add(&thread->waiting_thread_node,
4285 &proc->waiting_threads);
4286 binder_inner_proc_unlock(proc);
4288 binder_inner_proc_lock(proc);
4289 list_del_init(&thread->waiting_thread_node);
4290 if (signal_pending(current)) {
4295 finish_wait(&thread->wait, &wait);
4296 binder_inner_proc_unlock(proc);
4302 * binder_apply_fd_fixups() - finish fd translation
4303 * @proc: binder_proc associated @t->buffer
4304 * @t: binder transaction with list of fd fixups
4306 * Now that we are in the context of the transaction target
4307 * process, we can allocate and install fds. Process the
4308 * list of fds to translate and fixup the buffer with the
4309 * new fds first and only then install the files.
4311 * If we fail to allocate an fd, skip the install and release
4312 * any fds that have already been allocated.
4314 static int binder_apply_fd_fixups(struct binder_proc *proc,
4315 struct binder_transaction *t)
4317 struct binder_txn_fd_fixup *fixup, *tmp;
4320 list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
4321 int fd = get_unused_fd_flags(O_CLOEXEC);
4324 binder_debug(BINDER_DEBUG_TRANSACTION,
4325 "failed fd fixup txn %d fd %d\n",
4330 binder_debug(BINDER_DEBUG_TRANSACTION,
4331 "fd fixup txn %d fd %d\n",
4333 trace_binder_transaction_fd_recv(t, fd, fixup->offset);
4334 fixup->target_fd = fd;
4335 if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
4342 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
4343 fd_install(fixup->target_fd, fixup->file);
4344 list_del(&fixup->fixup_entry);
4351 binder_free_txn_fixups(t);
4355 static int binder_thread_read(struct binder_proc *proc,
4356 struct binder_thread *thread,
4357 binder_uintptr_t binder_buffer, size_t size,
4358 binder_size_t *consumed, int non_block)
4360 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4361 void __user *ptr = buffer + *consumed;
4362 void __user *end = buffer + size;
4365 int wait_for_proc_work;
4367 if (*consumed == 0) {
4368 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4370 ptr += sizeof(uint32_t);
4374 binder_inner_proc_lock(proc);
4375 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4376 binder_inner_proc_unlock(proc);
4378 thread->looper |= BINDER_LOOPER_STATE_WAITING;
4380 trace_binder_wait_for_work(wait_for_proc_work,
4381 !!thread->transaction_stack,
4382 !binder_worklist_empty(proc, &thread->todo));
4383 if (wait_for_proc_work) {
4384 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4385 BINDER_LOOPER_STATE_ENTERED))) {
4386 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4387 proc->pid, thread->pid, thread->looper);
4388 wait_event_interruptible(binder_user_error_wait,
4389 binder_stop_on_user_error < 2);
4391 binder_set_nice(proc->default_priority);
4395 if (!binder_has_work(thread, wait_for_proc_work))
4398 ret = binder_wait_for_work(thread, wait_for_proc_work);
4401 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4408 struct binder_transaction_data_secctx tr;
4409 struct binder_transaction_data *trd = &tr.transaction_data;
4410 struct binder_work *w = NULL;
4411 struct list_head *list = NULL;
4412 struct binder_transaction *t = NULL;
4413 struct binder_thread *t_from;
4414 size_t trsize = sizeof(*trd);
4416 binder_inner_proc_lock(proc);
4417 if (!binder_worklist_empty_ilocked(&thread->todo))
4418 list = &thread->todo;
4419 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4423 binder_inner_proc_unlock(proc);
4426 if (ptr - buffer == 4 && !thread->looper_need_return)
4431 if (end - ptr < sizeof(tr) + 4) {
4432 binder_inner_proc_unlock(proc);
4435 w = binder_dequeue_work_head_ilocked(list);
4436 if (binder_worklist_empty_ilocked(&thread->todo))
4437 thread->process_todo = false;
4440 case BINDER_WORK_TRANSACTION: {
4441 binder_inner_proc_unlock(proc);
4442 t = container_of(w, struct binder_transaction, work);
4444 case BINDER_WORK_RETURN_ERROR: {
4445 struct binder_error *e = container_of(
4446 w, struct binder_error, work);
4448 WARN_ON(e->cmd == BR_OK);
4449 binder_inner_proc_unlock(proc);
4450 if (put_user(e->cmd, (uint32_t __user *)ptr))
4454 ptr += sizeof(uint32_t);
4456 binder_stat_br(proc, thread, cmd);
4458 case BINDER_WORK_TRANSACTION_COMPLETE:
4459 case BINDER_WORK_TRANSACTION_PENDING:
4460 case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: {
4461 if (proc->oneway_spam_detection_enabled &&
4462 w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT)
4463 cmd = BR_ONEWAY_SPAM_SUSPECT;
4464 else if (w->type == BINDER_WORK_TRANSACTION_PENDING)
4465 cmd = BR_TRANSACTION_PENDING_FROZEN;
4467 cmd = BR_TRANSACTION_COMPLETE;
4468 binder_inner_proc_unlock(proc);
4470 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4471 if (put_user(cmd, (uint32_t __user *)ptr))
4473 ptr += sizeof(uint32_t);
4475 binder_stat_br(proc, thread, cmd);
4476 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4477 "%d:%d BR_TRANSACTION_COMPLETE\n",
4478 proc->pid, thread->pid);
4480 case BINDER_WORK_NODE: {
4481 struct binder_node *node = container_of(w, struct binder_node, work);
4483 binder_uintptr_t node_ptr = node->ptr;
4484 binder_uintptr_t node_cookie = node->cookie;
4485 int node_debug_id = node->debug_id;
4488 void __user *orig_ptr = ptr;
4490 BUG_ON(proc != node->proc);
4491 strong = node->internal_strong_refs ||
4492 node->local_strong_refs;
4493 weak = !hlist_empty(&node->refs) ||
4494 node->local_weak_refs ||
4495 node->tmp_refs || strong;
4496 has_strong_ref = node->has_strong_ref;
4497 has_weak_ref = node->has_weak_ref;
4499 if (weak && !has_weak_ref) {
4500 node->has_weak_ref = 1;
4501 node->pending_weak_ref = 1;
4502 node->local_weak_refs++;
4504 if (strong && !has_strong_ref) {
4505 node->has_strong_ref = 1;
4506 node->pending_strong_ref = 1;
4507 node->local_strong_refs++;
4509 if (!strong && has_strong_ref)
4510 node->has_strong_ref = 0;
4511 if (!weak && has_weak_ref)
4512 node->has_weak_ref = 0;
4513 if (!weak && !strong) {
4514 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4515 "%d:%d node %d u%016llx c%016llx deleted\n",
4516 proc->pid, thread->pid,
4520 rb_erase(&node->rb_node, &proc->nodes);
4521 binder_inner_proc_unlock(proc);
4522 binder_node_lock(node);
4524 * Acquire the node lock before freeing the
4525 * node to serialize with other threads that
4526 * may have been holding the node lock while
4527 * decrementing this node (avoids race where
4528 * this thread frees while the other thread
4529 * is unlocking the node after the final
4532 binder_node_unlock(node);
4533 binder_free_node(node);
4535 binder_inner_proc_unlock(proc);
4537 if (weak && !has_weak_ref)
4538 ret = binder_put_node_cmd(
4539 proc, thread, &ptr, node_ptr,
4540 node_cookie, node_debug_id,
4541 BR_INCREFS, "BR_INCREFS");
4542 if (!ret && strong && !has_strong_ref)
4543 ret = binder_put_node_cmd(
4544 proc, thread, &ptr, node_ptr,
4545 node_cookie, node_debug_id,
4546 BR_ACQUIRE, "BR_ACQUIRE");
4547 if (!ret && !strong && has_strong_ref)
4548 ret = binder_put_node_cmd(
4549 proc, thread, &ptr, node_ptr,
4550 node_cookie, node_debug_id,
4551 BR_RELEASE, "BR_RELEASE");
4552 if (!ret && !weak && has_weak_ref)
4553 ret = binder_put_node_cmd(
4554 proc, thread, &ptr, node_ptr,
4555 node_cookie, node_debug_id,
4556 BR_DECREFS, "BR_DECREFS");
4557 if (orig_ptr == ptr)
4558 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4559 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4560 proc->pid, thread->pid,
4567 case BINDER_WORK_DEAD_BINDER:
4568 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4569 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4570 struct binder_ref_death *death;
4572 binder_uintptr_t cookie;
4574 death = container_of(w, struct binder_ref_death, work);
4575 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4576 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4578 cmd = BR_DEAD_BINDER;
4579 cookie = death->cookie;
4581 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4582 "%d:%d %s %016llx\n",
4583 proc->pid, thread->pid,
4584 cmd == BR_DEAD_BINDER ?
4586 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4588 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4589 binder_inner_proc_unlock(proc);
4591 binder_stats_deleted(BINDER_STAT_DEATH);
4593 binder_enqueue_work_ilocked(
4594 w, &proc->delivered_death);
4595 binder_inner_proc_unlock(proc);
4597 if (put_user(cmd, (uint32_t __user *)ptr))
4599 ptr += sizeof(uint32_t);
4600 if (put_user(cookie,
4601 (binder_uintptr_t __user *)ptr))
4603 ptr += sizeof(binder_uintptr_t);
4604 binder_stat_br(proc, thread, cmd);
4605 if (cmd == BR_DEAD_BINDER)
4606 goto done; /* DEAD_BINDER notifications can cause transactions */
4609 binder_inner_proc_unlock(proc);
4610 pr_err("%d:%d: bad work type %d\n",
4611 proc->pid, thread->pid, w->type);
4618 BUG_ON(t->buffer == NULL);
4619 if (t->buffer->target_node) {
4620 struct binder_node *target_node = t->buffer->target_node;
4622 trd->target.ptr = target_node->ptr;
4623 trd->cookie = target_node->cookie;
4624 t->saved_priority = task_nice(current);
4625 if (t->priority < target_node->min_priority &&
4626 !(t->flags & TF_ONE_WAY))
4627 binder_set_nice(t->priority);
4628 else if (!(t->flags & TF_ONE_WAY) ||
4629 t->saved_priority > target_node->min_priority)
4630 binder_set_nice(target_node->min_priority);
4631 cmd = BR_TRANSACTION;
4633 trd->target.ptr = 0;
4637 trd->code = t->code;
4638 trd->flags = t->flags;
4639 trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4641 t_from = binder_get_txn_from(t);
4643 struct task_struct *sender = t_from->proc->tsk;
4646 task_tgid_nr_ns(sender,
4647 task_active_pid_ns(current));
4649 trd->sender_pid = 0;
4652 ret = binder_apply_fd_fixups(proc, t);
4654 struct binder_buffer *buffer = t->buffer;
4655 bool oneway = !!(t->flags & TF_ONE_WAY);
4656 int tid = t->debug_id;
4659 binder_thread_dec_tmpref(t_from);
4660 buffer->transaction = NULL;
4661 binder_cleanup_transaction(t, "fd fixups failed",
4663 binder_free_buf(proc, thread, buffer, true);
4664 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
4665 "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
4666 proc->pid, thread->pid,
4668 (cmd == BR_REPLY ? "reply " : ""),
4669 tid, BR_FAILED_REPLY, ret, __LINE__);
4670 if (cmd == BR_REPLY) {
4671 cmd = BR_FAILED_REPLY;
4672 if (put_user(cmd, (uint32_t __user *)ptr))
4674 ptr += sizeof(uint32_t);
4675 binder_stat_br(proc, thread, cmd);
4680 trd->data_size = t->buffer->data_size;
4681 trd->offsets_size = t->buffer->offsets_size;
4682 trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data;
4683 trd->data.ptr.offsets = trd->data.ptr.buffer +
4684 ALIGN(t->buffer->data_size,
4687 tr.secctx = t->security_ctx;
4688 if (t->security_ctx) {
4689 cmd = BR_TRANSACTION_SEC_CTX;
4690 trsize = sizeof(tr);
4692 if (put_user(cmd, (uint32_t __user *)ptr)) {
4694 binder_thread_dec_tmpref(t_from);
4696 binder_cleanup_transaction(t, "put_user failed",
4701 ptr += sizeof(uint32_t);
4702 if (copy_to_user(ptr, &tr, trsize)) {
4704 binder_thread_dec_tmpref(t_from);
4706 binder_cleanup_transaction(t, "copy_to_user failed",
4713 trace_binder_transaction_received(t);
4714 binder_stat_br(proc, thread, cmd);
4715 binder_debug(BINDER_DEBUG_TRANSACTION,
4716 "%d:%d %s %d %d:%d, cmd %u size %zd-%zd ptr %016llx-%016llx\n",
4717 proc->pid, thread->pid,
4718 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4719 (cmd == BR_TRANSACTION_SEC_CTX) ?
4720 "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
4721 t->debug_id, t_from ? t_from->proc->pid : 0,
4722 t_from ? t_from->pid : 0, cmd,
4723 t->buffer->data_size, t->buffer->offsets_size,
4724 (u64)trd->data.ptr.buffer,
4725 (u64)trd->data.ptr.offsets);
4728 binder_thread_dec_tmpref(t_from);
4729 t->buffer->allow_user_free = 1;
4730 if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
4731 binder_inner_proc_lock(thread->proc);
4732 t->to_parent = thread->transaction_stack;
4733 t->to_thread = thread;
4734 thread->transaction_stack = t;
4735 binder_inner_proc_unlock(thread->proc);
4737 binder_free_transaction(t);
4744 *consumed = ptr - buffer;
4745 binder_inner_proc_lock(proc);
4746 if (proc->requested_threads == 0 &&
4747 list_empty(&thread->proc->waiting_threads) &&
4748 proc->requested_threads_started < proc->max_threads &&
4749 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4750 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4751 /*spawn a new thread if we leave this out */) {
4752 proc->requested_threads++;
4753 binder_inner_proc_unlock(proc);
4754 binder_debug(BINDER_DEBUG_THREADS,
4755 "%d:%d BR_SPAWN_LOOPER\n",
4756 proc->pid, thread->pid);
4757 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4759 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4761 binder_inner_proc_unlock(proc);
4765 static void binder_release_work(struct binder_proc *proc,
4766 struct list_head *list)
4768 struct binder_work *w;
4769 enum binder_work_type wtype;
4772 binder_inner_proc_lock(proc);
4773 w = binder_dequeue_work_head_ilocked(list);
4774 wtype = w ? w->type : 0;
4775 binder_inner_proc_unlock(proc);
4780 case BINDER_WORK_TRANSACTION: {
4781 struct binder_transaction *t;
4783 t = container_of(w, struct binder_transaction, work);
4785 binder_cleanup_transaction(t, "process died.",
4788 case BINDER_WORK_RETURN_ERROR: {
4789 struct binder_error *e = container_of(
4790 w, struct binder_error, work);
4792 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4793 "undelivered TRANSACTION_ERROR: %u\n",
4796 case BINDER_WORK_TRANSACTION_COMPLETE: {
4797 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4798 "undelivered TRANSACTION_COMPLETE\n");
4800 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4802 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4803 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4804 struct binder_ref_death *death;
4806 death = container_of(w, struct binder_ref_death, work);
4807 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4808 "undelivered death notification, %016llx\n",
4809 (u64)death->cookie);
4811 binder_stats_deleted(BINDER_STAT_DEATH);
4813 case BINDER_WORK_NODE:
4816 pr_err("unexpected work type, %d, not freed\n",
4824 static struct binder_thread *binder_get_thread_ilocked(
4825 struct binder_proc *proc, struct binder_thread *new_thread)
4827 struct binder_thread *thread = NULL;
4828 struct rb_node *parent = NULL;
4829 struct rb_node **p = &proc->threads.rb_node;
4833 thread = rb_entry(parent, struct binder_thread, rb_node);
4835 if (current->pid < thread->pid)
4837 else if (current->pid > thread->pid)
4838 p = &(*p)->rb_right;
4844 thread = new_thread;
4845 binder_stats_created(BINDER_STAT_THREAD);
4846 thread->proc = proc;
4847 thread->pid = current->pid;
4848 atomic_set(&thread->tmp_ref, 0);
4849 init_waitqueue_head(&thread->wait);
4850 INIT_LIST_HEAD(&thread->todo);
4851 rb_link_node(&thread->rb_node, parent, p);
4852 rb_insert_color(&thread->rb_node, &proc->threads);
4853 thread->looper_need_return = true;
4854 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4855 thread->return_error.cmd = BR_OK;
4856 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4857 thread->reply_error.cmd = BR_OK;
4858 thread->ee.command = BR_OK;
4859 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4863 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4865 struct binder_thread *thread;
4866 struct binder_thread *new_thread;
4868 binder_inner_proc_lock(proc);
4869 thread = binder_get_thread_ilocked(proc, NULL);
4870 binder_inner_proc_unlock(proc);
4872 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4873 if (new_thread == NULL)
4875 binder_inner_proc_lock(proc);
4876 thread = binder_get_thread_ilocked(proc, new_thread);
4877 binder_inner_proc_unlock(proc);
4878 if (thread != new_thread)
4884 static void binder_free_proc(struct binder_proc *proc)
4886 struct binder_device *device;
4888 BUG_ON(!list_empty(&proc->todo));
4889 BUG_ON(!list_empty(&proc->delivered_death));
4890 if (proc->outstanding_txns)
4891 pr_warn("%s: Unexpected outstanding_txns %d\n",
4892 __func__, proc->outstanding_txns);
4893 device = container_of(proc->context, struct binder_device, context);
4894 if (refcount_dec_and_test(&device->ref)) {
4895 kfree(proc->context->name);
4898 binder_alloc_deferred_release(&proc->alloc);
4899 put_task_struct(proc->tsk);
4900 put_cred(proc->cred);
4901 binder_stats_deleted(BINDER_STAT_PROC);
4905 static void binder_free_thread(struct binder_thread *thread)
4907 BUG_ON(!list_empty(&thread->todo));
4908 binder_stats_deleted(BINDER_STAT_THREAD);
4909 binder_proc_dec_tmpref(thread->proc);
4913 static int binder_thread_release(struct binder_proc *proc,
4914 struct binder_thread *thread)
4916 struct binder_transaction *t;
4917 struct binder_transaction *send_reply = NULL;
4918 int active_transactions = 0;
4919 struct binder_transaction *last_t = NULL;
4921 binder_inner_proc_lock(thread->proc);
4923 * take a ref on the proc so it survives
4924 * after we remove this thread from proc->threads.
4925 * The corresponding dec is when we actually
4926 * free the thread in binder_free_thread()
4930 * take a ref on this thread to ensure it
4931 * survives while we are releasing it
4933 atomic_inc(&thread->tmp_ref);
4934 rb_erase(&thread->rb_node, &proc->threads);
4935 t = thread->transaction_stack;
4937 spin_lock(&t->lock);
4938 if (t->to_thread == thread)
4941 __acquire(&t->lock);
4943 thread->is_dead = true;
4947 active_transactions++;
4948 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4949 "release %d:%d transaction %d %s, still active\n",
4950 proc->pid, thread->pid,
4952 (t->to_thread == thread) ? "in" : "out");
4954 if (t->to_thread == thread) {
4955 thread->proc->outstanding_txns--;
4957 t->to_thread = NULL;
4959 t->buffer->transaction = NULL;
4963 } else if (t->from == thread) {
4968 spin_unlock(&last_t->lock);
4970 spin_lock(&t->lock);
4972 __acquire(&t->lock);
4974 /* annotation for sparse, lock not acquired in last iteration above */
4975 __release(&t->lock);
4978 * If this thread used poll, make sure we remove the waitqueue from any
4979 * poll data structures holding it.
4981 if (thread->looper & BINDER_LOOPER_STATE_POLL)
4982 wake_up_pollfree(&thread->wait);
4984 binder_inner_proc_unlock(thread->proc);
4987 * This is needed to avoid races between wake_up_pollfree() above and
4988 * someone else removing the last entry from the queue for other reasons
4989 * (e.g. ep_remove_wait_queue() being called due to an epoll file
4990 * descriptor being closed). Such other users hold an RCU read lock, so
4991 * we can be sure they're done after we call synchronize_rcu().
4993 if (thread->looper & BINDER_LOOPER_STATE_POLL)
4997 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
4998 binder_release_work(proc, &thread->todo);
4999 binder_thread_dec_tmpref(thread);
5000 return active_transactions;
5003 static __poll_t binder_poll(struct file *filp,
5004 struct poll_table_struct *wait)
5006 struct binder_proc *proc = filp->private_data;
5007 struct binder_thread *thread = NULL;
5008 bool wait_for_proc_work;
5010 thread = binder_get_thread(proc);
5014 binder_inner_proc_lock(thread->proc);
5015 thread->looper |= BINDER_LOOPER_STATE_POLL;
5016 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
5018 binder_inner_proc_unlock(thread->proc);
5020 poll_wait(filp, &thread->wait, wait);
5022 if (binder_has_work(thread, wait_for_proc_work))
5028 static int binder_ioctl_write_read(struct file *filp, unsigned long arg,
5029 struct binder_thread *thread)
5032 struct binder_proc *proc = filp->private_data;
5033 void __user *ubuf = (void __user *)arg;
5034 struct binder_write_read bwr;
5036 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
5040 binder_debug(BINDER_DEBUG_READ_WRITE,
5041 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
5042 proc->pid, thread->pid,
5043 (u64)bwr.write_size, (u64)bwr.write_buffer,
5044 (u64)bwr.read_size, (u64)bwr.read_buffer);
5046 if (bwr.write_size > 0) {
5047 ret = binder_thread_write(proc, thread,
5050 &bwr.write_consumed);
5051 trace_binder_write_done(ret);
5053 bwr.read_consumed = 0;
5054 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5059 if (bwr.read_size > 0) {
5060 ret = binder_thread_read(proc, thread, bwr.read_buffer,
5063 filp->f_flags & O_NONBLOCK);
5064 trace_binder_read_done(ret);
5065 binder_inner_proc_lock(proc);
5066 if (!binder_worklist_empty_ilocked(&proc->todo))
5067 binder_wakeup_proc_ilocked(proc);
5068 binder_inner_proc_unlock(proc);
5070 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5075 binder_debug(BINDER_DEBUG_READ_WRITE,
5076 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
5077 proc->pid, thread->pid,
5078 (u64)bwr.write_consumed, (u64)bwr.write_size,
5079 (u64)bwr.read_consumed, (u64)bwr.read_size);
5080 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
5088 static int binder_ioctl_set_ctx_mgr(struct file *filp,
5089 struct flat_binder_object *fbo)
5092 struct binder_proc *proc = filp->private_data;
5093 struct binder_context *context = proc->context;
5094 struct binder_node *new_node;
5095 kuid_t curr_euid = current_euid();
5097 mutex_lock(&context->context_mgr_node_lock);
5098 if (context->binder_context_mgr_node) {
5099 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
5103 ret = security_binder_set_context_mgr(proc->cred);
5106 if (uid_valid(context->binder_context_mgr_uid)) {
5107 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
5108 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
5109 from_kuid(&init_user_ns, curr_euid),
5110 from_kuid(&init_user_ns,
5111 context->binder_context_mgr_uid));
5116 context->binder_context_mgr_uid = curr_euid;
5118 new_node = binder_new_node(proc, fbo);
5123 binder_node_lock(new_node);
5124 new_node->local_weak_refs++;
5125 new_node->local_strong_refs++;
5126 new_node->has_strong_ref = 1;
5127 new_node->has_weak_ref = 1;
5128 context->binder_context_mgr_node = new_node;
5129 binder_node_unlock(new_node);
5130 binder_put_node(new_node);
5132 mutex_unlock(&context->context_mgr_node_lock);
5136 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
5137 struct binder_node_info_for_ref *info)
5139 struct binder_node *node;
5140 struct binder_context *context = proc->context;
5141 __u32 handle = info->handle;
5143 if (info->strong_count || info->weak_count || info->reserved1 ||
5144 info->reserved2 || info->reserved3) {
5145 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
5150 /* This ioctl may only be used by the context manager */
5151 mutex_lock(&context->context_mgr_node_lock);
5152 if (!context->binder_context_mgr_node ||
5153 context->binder_context_mgr_node->proc != proc) {
5154 mutex_unlock(&context->context_mgr_node_lock);
5157 mutex_unlock(&context->context_mgr_node_lock);
5159 node = binder_get_node_from_ref(proc, handle, true, NULL);
5163 info->strong_count = node->local_strong_refs +
5164 node->internal_strong_refs;
5165 info->weak_count = node->local_weak_refs;
5167 binder_put_node(node);
5172 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
5173 struct binder_node_debug_info *info)
5176 binder_uintptr_t ptr = info->ptr;
5178 memset(info, 0, sizeof(*info));
5180 binder_inner_proc_lock(proc);
5181 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5182 struct binder_node *node = rb_entry(n, struct binder_node,
5184 if (node->ptr > ptr) {
5185 info->ptr = node->ptr;
5186 info->cookie = node->cookie;
5187 info->has_strong_ref = node->has_strong_ref;
5188 info->has_weak_ref = node->has_weak_ref;
5192 binder_inner_proc_unlock(proc);
5197 static bool binder_txns_pending_ilocked(struct binder_proc *proc)
5200 struct binder_thread *thread;
5202 if (proc->outstanding_txns > 0)
5205 for (n = rb_first(&proc->threads); n; n = rb_next(n)) {
5206 thread = rb_entry(n, struct binder_thread, rb_node);
5207 if (thread->transaction_stack)
5213 static int binder_ioctl_freeze(struct binder_freeze_info *info,
5214 struct binder_proc *target_proc)
5218 if (!info->enable) {
5219 binder_inner_proc_lock(target_proc);
5220 target_proc->sync_recv = false;
5221 target_proc->async_recv = false;
5222 target_proc->is_frozen = false;
5223 binder_inner_proc_unlock(target_proc);
5228 * Freezing the target. Prevent new transactions by
5229 * setting frozen state. If timeout specified, wait
5230 * for transactions to drain.
5232 binder_inner_proc_lock(target_proc);
5233 target_proc->sync_recv = false;
5234 target_proc->async_recv = false;
5235 target_proc->is_frozen = true;
5236 binder_inner_proc_unlock(target_proc);
5238 if (info->timeout_ms > 0)
5239 ret = wait_event_interruptible_timeout(
5240 target_proc->freeze_wait,
5241 (!target_proc->outstanding_txns),
5242 msecs_to_jiffies(info->timeout_ms));
5244 /* Check pending transactions that wait for reply */
5246 binder_inner_proc_lock(target_proc);
5247 if (binder_txns_pending_ilocked(target_proc))
5249 binder_inner_proc_unlock(target_proc);
5253 binder_inner_proc_lock(target_proc);
5254 target_proc->is_frozen = false;
5255 binder_inner_proc_unlock(target_proc);
5261 static int binder_ioctl_get_freezer_info(
5262 struct binder_frozen_status_info *info)
5264 struct binder_proc *target_proc;
5268 info->sync_recv = 0;
5269 info->async_recv = 0;
5271 mutex_lock(&binder_procs_lock);
5272 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5273 if (target_proc->pid == info->pid) {
5275 binder_inner_proc_lock(target_proc);
5276 txns_pending = binder_txns_pending_ilocked(target_proc);
5277 info->sync_recv |= target_proc->sync_recv |
5278 (txns_pending << 1);
5279 info->async_recv |= target_proc->async_recv;
5280 binder_inner_proc_unlock(target_proc);
5283 mutex_unlock(&binder_procs_lock);
5291 static int binder_ioctl_get_extended_error(struct binder_thread *thread,
5294 struct binder_extended_error ee;
5296 binder_inner_proc_lock(thread->proc);
5298 binder_set_extended_error(&thread->ee, 0, BR_OK, 0);
5299 binder_inner_proc_unlock(thread->proc);
5301 if (copy_to_user(ubuf, &ee, sizeof(ee)))
5307 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
5310 struct binder_proc *proc = filp->private_data;
5311 struct binder_thread *thread;
5312 void __user *ubuf = (void __user *)arg;
5314 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
5315 proc->pid, current->pid, cmd, arg);*/
5317 binder_selftest_alloc(&proc->alloc);
5319 trace_binder_ioctl(cmd, arg);
5321 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5325 thread = binder_get_thread(proc);
5326 if (thread == NULL) {
5332 case BINDER_WRITE_READ:
5333 ret = binder_ioctl_write_read(filp, arg, thread);
5337 case BINDER_SET_MAX_THREADS: {
5340 if (copy_from_user(&max_threads, ubuf,
5341 sizeof(max_threads))) {
5345 binder_inner_proc_lock(proc);
5346 proc->max_threads = max_threads;
5347 binder_inner_proc_unlock(proc);
5350 case BINDER_SET_CONTEXT_MGR_EXT: {
5351 struct flat_binder_object fbo;
5353 if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
5357 ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
5362 case BINDER_SET_CONTEXT_MGR:
5363 ret = binder_ioctl_set_ctx_mgr(filp, NULL);
5367 case BINDER_THREAD_EXIT:
5368 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
5369 proc->pid, thread->pid);
5370 binder_thread_release(proc, thread);
5373 case BINDER_VERSION: {
5374 struct binder_version __user *ver = ubuf;
5376 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
5377 &ver->protocol_version)) {
5383 case BINDER_GET_NODE_INFO_FOR_REF: {
5384 struct binder_node_info_for_ref info;
5386 if (copy_from_user(&info, ubuf, sizeof(info))) {
5391 ret = binder_ioctl_get_node_info_for_ref(proc, &info);
5395 if (copy_to_user(ubuf, &info, sizeof(info))) {
5402 case BINDER_GET_NODE_DEBUG_INFO: {
5403 struct binder_node_debug_info info;
5405 if (copy_from_user(&info, ubuf, sizeof(info))) {
5410 ret = binder_ioctl_get_node_debug_info(proc, &info);
5414 if (copy_to_user(ubuf, &info, sizeof(info))) {
5420 case BINDER_FREEZE: {
5421 struct binder_freeze_info info;
5422 struct binder_proc **target_procs = NULL, *target_proc;
5423 int target_procs_count = 0, i = 0;
5427 if (copy_from_user(&info, ubuf, sizeof(info))) {
5432 mutex_lock(&binder_procs_lock);
5433 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5434 if (target_proc->pid == info.pid)
5435 target_procs_count++;
5438 if (target_procs_count == 0) {
5439 mutex_unlock(&binder_procs_lock);
5444 target_procs = kcalloc(target_procs_count,
5445 sizeof(struct binder_proc *),
5448 if (!target_procs) {
5449 mutex_unlock(&binder_procs_lock);
5454 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5455 if (target_proc->pid != info.pid)
5458 binder_inner_proc_lock(target_proc);
5459 target_proc->tmp_ref++;
5460 binder_inner_proc_unlock(target_proc);
5462 target_procs[i++] = target_proc;
5464 mutex_unlock(&binder_procs_lock);
5466 for (i = 0; i < target_procs_count; i++) {
5468 ret = binder_ioctl_freeze(&info,
5471 binder_proc_dec_tmpref(target_procs[i]);
5474 kfree(target_procs);
5480 case BINDER_GET_FROZEN_INFO: {
5481 struct binder_frozen_status_info info;
5483 if (copy_from_user(&info, ubuf, sizeof(info))) {
5488 ret = binder_ioctl_get_freezer_info(&info);
5492 if (copy_to_user(ubuf, &info, sizeof(info))) {
5498 case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: {
5501 if (copy_from_user(&enable, ubuf, sizeof(enable))) {
5505 binder_inner_proc_lock(proc);
5506 proc->oneway_spam_detection_enabled = (bool)enable;
5507 binder_inner_proc_unlock(proc);
5510 case BINDER_GET_EXTENDED_ERROR:
5511 ret = binder_ioctl_get_extended_error(thread, ubuf);
5522 thread->looper_need_return = false;
5523 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5524 if (ret && ret != -EINTR)
5525 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
5527 trace_binder_ioctl_done(ret);
5531 static void binder_vma_open(struct vm_area_struct *vma)
5533 struct binder_proc *proc = vma->vm_private_data;
5535 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5536 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5537 proc->pid, vma->vm_start, vma->vm_end,
5538 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5539 (unsigned long)pgprot_val(vma->vm_page_prot));
5542 static void binder_vma_close(struct vm_area_struct *vma)
5544 struct binder_proc *proc = vma->vm_private_data;
5546 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5547 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5548 proc->pid, vma->vm_start, vma->vm_end,
5549 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5550 (unsigned long)pgprot_val(vma->vm_page_prot));
5551 binder_alloc_vma_close(&proc->alloc);
5554 static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
5556 return VM_FAULT_SIGBUS;
5559 static const struct vm_operations_struct binder_vm_ops = {
5560 .open = binder_vma_open,
5561 .close = binder_vma_close,
5562 .fault = binder_vm_fault,
5565 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
5567 struct binder_proc *proc = filp->private_data;
5569 if (proc->tsk != current->group_leader)
5572 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5573 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
5574 __func__, proc->pid, vma->vm_start, vma->vm_end,
5575 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5576 (unsigned long)pgprot_val(vma->vm_page_prot));
5578 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
5579 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
5580 proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
5583 vm_flags_mod(vma, VM_DONTCOPY | VM_MIXEDMAP, VM_MAYWRITE);
5585 vma->vm_ops = &binder_vm_ops;
5586 vma->vm_private_data = proc;
5588 return binder_alloc_mmap_handler(&proc->alloc, vma);
5591 static int binder_open(struct inode *nodp, struct file *filp)
5593 struct binder_proc *proc, *itr;
5594 struct binder_device *binder_dev;
5595 struct binderfs_info *info;
5596 struct dentry *binder_binderfs_dir_entry_proc = NULL;
5597 bool existing_pid = false;
5599 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
5600 current->group_leader->pid, current->pid);
5602 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5605 spin_lock_init(&proc->inner_lock);
5606 spin_lock_init(&proc->outer_lock);
5607 get_task_struct(current->group_leader);
5608 proc->tsk = current->group_leader;
5609 proc->cred = get_cred(filp->f_cred);
5610 INIT_LIST_HEAD(&proc->todo);
5611 init_waitqueue_head(&proc->freeze_wait);
5612 proc->default_priority = task_nice(current);
5613 /* binderfs stashes devices in i_private */
5614 if (is_binderfs_device(nodp)) {
5615 binder_dev = nodp->i_private;
5616 info = nodp->i_sb->s_fs_info;
5617 binder_binderfs_dir_entry_proc = info->proc_log_dir;
5619 binder_dev = container_of(filp->private_data,
5620 struct binder_device, miscdev);
5622 refcount_inc(&binder_dev->ref);
5623 proc->context = &binder_dev->context;
5624 binder_alloc_init(&proc->alloc);
5626 binder_stats_created(BINDER_STAT_PROC);
5627 proc->pid = current->group_leader->pid;
5628 INIT_LIST_HEAD(&proc->delivered_death);
5629 INIT_LIST_HEAD(&proc->waiting_threads);
5630 filp->private_data = proc;
5632 mutex_lock(&binder_procs_lock);
5633 hlist_for_each_entry(itr, &binder_procs, proc_node) {
5634 if (itr->pid == proc->pid) {
5635 existing_pid = true;
5639 hlist_add_head(&proc->proc_node, &binder_procs);
5640 mutex_unlock(&binder_procs_lock);
5642 if (binder_debugfs_dir_entry_proc && !existing_pid) {
5645 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5647 * proc debug entries are shared between contexts.
5648 * Only create for the first PID to avoid debugfs log spamming
5649 * The printing code will anyway print all contexts for a given
5650 * PID so this is not a problem.
5652 proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
5653 binder_debugfs_dir_entry_proc,
5654 (void *)(unsigned long)proc->pid,
5658 if (binder_binderfs_dir_entry_proc && !existing_pid) {
5660 struct dentry *binderfs_entry;
5662 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5664 * Similar to debugfs, the process specific log file is shared
5665 * between contexts. Only create for the first PID.
5666 * This is ok since same as debugfs, the log file will contain
5667 * information on all contexts of a given PID.
5669 binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
5670 strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
5671 if (!IS_ERR(binderfs_entry)) {
5672 proc->binderfs_entry = binderfs_entry;
5676 error = PTR_ERR(binderfs_entry);
5677 pr_warn("Unable to create file %s in binderfs (error %d)\n",
5685 static int binder_flush(struct file *filp, fl_owner_t id)
5687 struct binder_proc *proc = filp->private_data;
5689 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5694 static void binder_deferred_flush(struct binder_proc *proc)
5699 binder_inner_proc_lock(proc);
5700 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5701 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
5703 thread->looper_need_return = true;
5704 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
5705 wake_up_interruptible(&thread->wait);
5709 binder_inner_proc_unlock(proc);
5711 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5712 "binder_flush: %d woke %d threads\n", proc->pid,
5716 static int binder_release(struct inode *nodp, struct file *filp)
5718 struct binder_proc *proc = filp->private_data;
5720 debugfs_remove(proc->debugfs_entry);
5722 if (proc->binderfs_entry) {
5723 binderfs_remove_file(proc->binderfs_entry);
5724 proc->binderfs_entry = NULL;
5727 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5732 static int binder_node_release(struct binder_node *node, int refs)
5734 struct binder_ref *ref;
5736 struct binder_proc *proc = node->proc;
5738 binder_release_work(proc, &node->async_todo);
5740 binder_node_lock(node);
5741 binder_inner_proc_lock(proc);
5742 binder_dequeue_work_ilocked(&node->work);
5744 * The caller must have taken a temporary ref on the node,
5746 BUG_ON(!node->tmp_refs);
5747 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
5748 binder_inner_proc_unlock(proc);
5749 binder_node_unlock(node);
5750 binder_free_node(node);
5756 node->local_strong_refs = 0;
5757 node->local_weak_refs = 0;
5758 binder_inner_proc_unlock(proc);
5760 spin_lock(&binder_dead_nodes_lock);
5761 hlist_add_head(&node->dead_node, &binder_dead_nodes);
5762 spin_unlock(&binder_dead_nodes_lock);
5764 hlist_for_each_entry(ref, &node->refs, node_entry) {
5767 * Need the node lock to synchronize
5768 * with new notification requests and the
5769 * inner lock to synchronize with queued
5770 * death notifications.
5772 binder_inner_proc_lock(ref->proc);
5774 binder_inner_proc_unlock(ref->proc);
5780 BUG_ON(!list_empty(&ref->death->work.entry));
5781 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5782 binder_enqueue_work_ilocked(&ref->death->work,
5784 binder_wakeup_proc_ilocked(ref->proc);
5785 binder_inner_proc_unlock(ref->proc);
5788 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5789 "node %d now dead, refs %d, death %d\n",
5790 node->debug_id, refs, death);
5791 binder_node_unlock(node);
5792 binder_put_node(node);
5797 static void binder_deferred_release(struct binder_proc *proc)
5799 struct binder_context *context = proc->context;
5801 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
5803 mutex_lock(&binder_procs_lock);
5804 hlist_del(&proc->proc_node);
5805 mutex_unlock(&binder_procs_lock);
5807 mutex_lock(&context->context_mgr_node_lock);
5808 if (context->binder_context_mgr_node &&
5809 context->binder_context_mgr_node->proc == proc) {
5810 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5811 "%s: %d context_mgr_node gone\n",
5812 __func__, proc->pid);
5813 context->binder_context_mgr_node = NULL;
5815 mutex_unlock(&context->context_mgr_node_lock);
5816 binder_inner_proc_lock(proc);
5818 * Make sure proc stays alive after we
5819 * remove all the threads
5823 proc->is_dead = true;
5824 proc->is_frozen = false;
5825 proc->sync_recv = false;
5826 proc->async_recv = false;
5828 active_transactions = 0;
5829 while ((n = rb_first(&proc->threads))) {
5830 struct binder_thread *thread;
5832 thread = rb_entry(n, struct binder_thread, rb_node);
5833 binder_inner_proc_unlock(proc);
5835 active_transactions += binder_thread_release(proc, thread);
5836 binder_inner_proc_lock(proc);
5841 while ((n = rb_first(&proc->nodes))) {
5842 struct binder_node *node;
5844 node = rb_entry(n, struct binder_node, rb_node);
5847 * take a temporary ref on the node before
5848 * calling binder_node_release() which will either
5849 * kfree() the node or call binder_put_node()
5851 binder_inc_node_tmpref_ilocked(node);
5852 rb_erase(&node->rb_node, &proc->nodes);
5853 binder_inner_proc_unlock(proc);
5854 incoming_refs = binder_node_release(node, incoming_refs);
5855 binder_inner_proc_lock(proc);
5857 binder_inner_proc_unlock(proc);
5860 binder_proc_lock(proc);
5861 while ((n = rb_first(&proc->refs_by_desc))) {
5862 struct binder_ref *ref;
5864 ref = rb_entry(n, struct binder_ref, rb_node_desc);
5866 binder_cleanup_ref_olocked(ref);
5867 binder_proc_unlock(proc);
5868 binder_free_ref(ref);
5869 binder_proc_lock(proc);
5871 binder_proc_unlock(proc);
5873 binder_release_work(proc, &proc->todo);
5874 binder_release_work(proc, &proc->delivered_death);
5876 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5877 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5878 __func__, proc->pid, threads, nodes, incoming_refs,
5879 outgoing_refs, active_transactions);
5881 binder_proc_dec_tmpref(proc);
5884 static void binder_deferred_func(struct work_struct *work)
5886 struct binder_proc *proc;
5891 mutex_lock(&binder_deferred_lock);
5892 if (!hlist_empty(&binder_deferred_list)) {
5893 proc = hlist_entry(binder_deferred_list.first,
5894 struct binder_proc, deferred_work_node);
5895 hlist_del_init(&proc->deferred_work_node);
5896 defer = proc->deferred_work;
5897 proc->deferred_work = 0;
5902 mutex_unlock(&binder_deferred_lock);
5904 if (defer & BINDER_DEFERRED_FLUSH)
5905 binder_deferred_flush(proc);
5907 if (defer & BINDER_DEFERRED_RELEASE)
5908 binder_deferred_release(proc); /* frees proc */
5911 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5914 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5916 mutex_lock(&binder_deferred_lock);
5917 proc->deferred_work |= defer;
5918 if (hlist_unhashed(&proc->deferred_work_node)) {
5919 hlist_add_head(&proc->deferred_work_node,
5920 &binder_deferred_list);
5921 schedule_work(&binder_deferred_work);
5923 mutex_unlock(&binder_deferred_lock);
5926 static void print_binder_transaction_ilocked(struct seq_file *m,
5927 struct binder_proc *proc,
5929 struct binder_transaction *t)
5931 struct binder_proc *to_proc;
5932 struct binder_buffer *buffer = t->buffer;
5934 spin_lock(&t->lock);
5935 to_proc = t->to_proc;
5937 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
5938 prefix, t->debug_id, t,
5939 t->from ? t->from->proc->pid : 0,
5940 t->from ? t->from->pid : 0,
5941 to_proc ? to_proc->pid : 0,
5942 t->to_thread ? t->to_thread->pid : 0,
5943 t->code, t->flags, t->priority, t->need_reply);
5944 spin_unlock(&t->lock);
5946 if (proc != to_proc) {
5948 * Can only safely deref buffer if we are holding the
5949 * correct proc inner lock for this node
5955 if (buffer == NULL) {
5956 seq_puts(m, " buffer free\n");
5959 if (buffer->target_node)
5960 seq_printf(m, " node %d", buffer->target_node->debug_id);
5961 seq_printf(m, " size %zd:%zd data %pK\n",
5962 buffer->data_size, buffer->offsets_size,
5966 static void print_binder_work_ilocked(struct seq_file *m,
5967 struct binder_proc *proc,
5969 const char *transaction_prefix,
5970 struct binder_work *w)
5972 struct binder_node *node;
5973 struct binder_transaction *t;
5976 case BINDER_WORK_TRANSACTION:
5977 t = container_of(w, struct binder_transaction, work);
5978 print_binder_transaction_ilocked(
5979 m, proc, transaction_prefix, t);
5981 case BINDER_WORK_RETURN_ERROR: {
5982 struct binder_error *e = container_of(
5983 w, struct binder_error, work);
5985 seq_printf(m, "%stransaction error: %u\n",
5988 case BINDER_WORK_TRANSACTION_COMPLETE:
5989 seq_printf(m, "%stransaction complete\n", prefix);
5991 case BINDER_WORK_NODE:
5992 node = container_of(w, struct binder_node, work);
5993 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5994 prefix, node->debug_id,
5995 (u64)node->ptr, (u64)node->cookie);
5997 case BINDER_WORK_DEAD_BINDER:
5998 seq_printf(m, "%shas dead binder\n", prefix);
6000 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
6001 seq_printf(m, "%shas cleared dead binder\n", prefix);
6003 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
6004 seq_printf(m, "%shas cleared death notification\n", prefix);
6007 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
6012 static void print_binder_thread_ilocked(struct seq_file *m,
6013 struct binder_thread *thread,
6016 struct binder_transaction *t;
6017 struct binder_work *w;
6018 size_t start_pos = m->count;
6021 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
6022 thread->pid, thread->looper,
6023 thread->looper_need_return,
6024 atomic_read(&thread->tmp_ref));
6025 header_pos = m->count;
6026 t = thread->transaction_stack;
6028 if (t->from == thread) {
6029 print_binder_transaction_ilocked(m, thread->proc,
6030 " outgoing transaction", t);
6032 } else if (t->to_thread == thread) {
6033 print_binder_transaction_ilocked(m, thread->proc,
6034 " incoming transaction", t);
6037 print_binder_transaction_ilocked(m, thread->proc,
6038 " bad transaction", t);
6042 list_for_each_entry(w, &thread->todo, entry) {
6043 print_binder_work_ilocked(m, thread->proc, " ",
6044 " pending transaction", w);
6046 if (!print_always && m->count == header_pos)
6047 m->count = start_pos;
6050 static void print_binder_node_nilocked(struct seq_file *m,
6051 struct binder_node *node)
6053 struct binder_ref *ref;
6054 struct binder_work *w;
6058 hlist_for_each_entry(ref, &node->refs, node_entry)
6061 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
6062 node->debug_id, (u64)node->ptr, (u64)node->cookie,
6063 node->has_strong_ref, node->has_weak_ref,
6064 node->local_strong_refs, node->local_weak_refs,
6065 node->internal_strong_refs, count, node->tmp_refs);
6067 seq_puts(m, " proc");
6068 hlist_for_each_entry(ref, &node->refs, node_entry)
6069 seq_printf(m, " %d", ref->proc->pid);
6073 list_for_each_entry(w, &node->async_todo, entry)
6074 print_binder_work_ilocked(m, node->proc, " ",
6075 " pending async transaction", w);
6079 static void print_binder_ref_olocked(struct seq_file *m,
6080 struct binder_ref *ref)
6082 binder_node_lock(ref->node);
6083 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
6084 ref->data.debug_id, ref->data.desc,
6085 ref->node->proc ? "" : "dead ",
6086 ref->node->debug_id, ref->data.strong,
6087 ref->data.weak, ref->death);
6088 binder_node_unlock(ref->node);
6091 static void print_binder_proc(struct seq_file *m,
6092 struct binder_proc *proc, int print_all)
6094 struct binder_work *w;
6096 size_t start_pos = m->count;
6098 struct binder_node *last_node = NULL;
6100 seq_printf(m, "proc %d\n", proc->pid);
6101 seq_printf(m, "context %s\n", proc->context->name);
6102 header_pos = m->count;
6104 binder_inner_proc_lock(proc);
6105 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
6106 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
6107 rb_node), print_all);
6109 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
6110 struct binder_node *node = rb_entry(n, struct binder_node,
6112 if (!print_all && !node->has_async_transaction)
6116 * take a temporary reference on the node so it
6117 * survives and isn't removed from the tree
6118 * while we print it.
6120 binder_inc_node_tmpref_ilocked(node);
6121 /* Need to drop inner lock to take node lock */
6122 binder_inner_proc_unlock(proc);
6124 binder_put_node(last_node);
6125 binder_node_inner_lock(node);
6126 print_binder_node_nilocked(m, node);
6127 binder_node_inner_unlock(node);
6129 binder_inner_proc_lock(proc);
6131 binder_inner_proc_unlock(proc);
6133 binder_put_node(last_node);
6136 binder_proc_lock(proc);
6137 for (n = rb_first(&proc->refs_by_desc);
6140 print_binder_ref_olocked(m, rb_entry(n,
6143 binder_proc_unlock(proc);
6145 binder_alloc_print_allocated(m, &proc->alloc);
6146 binder_inner_proc_lock(proc);
6147 list_for_each_entry(w, &proc->todo, entry)
6148 print_binder_work_ilocked(m, proc, " ",
6149 " pending transaction", w);
6150 list_for_each_entry(w, &proc->delivered_death, entry) {
6151 seq_puts(m, " has delivered dead binder\n");
6154 binder_inner_proc_unlock(proc);
6155 if (!print_all && m->count == header_pos)
6156 m->count = start_pos;
6159 static const char * const binder_return_strings[] = {
6164 "BR_ACQUIRE_RESULT",
6166 "BR_TRANSACTION_COMPLETE",
6171 "BR_ATTEMPT_ACQUIRE",
6176 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
6179 "BR_ONEWAY_SPAM_SUSPECT",
6180 "BR_TRANSACTION_PENDING_FROZEN"
6183 static const char * const binder_command_strings[] = {
6186 "BC_ACQUIRE_RESULT",
6194 "BC_ATTEMPT_ACQUIRE",
6195 "BC_REGISTER_LOOPER",
6198 "BC_REQUEST_DEATH_NOTIFICATION",
6199 "BC_CLEAR_DEATH_NOTIFICATION",
6200 "BC_DEAD_BINDER_DONE",
6201 "BC_TRANSACTION_SG",
6205 static const char * const binder_objstat_strings[] = {
6212 "transaction_complete"
6215 static void print_binder_stats(struct seq_file *m, const char *prefix,
6216 struct binder_stats *stats)
6220 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
6221 ARRAY_SIZE(binder_command_strings));
6222 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
6223 int temp = atomic_read(&stats->bc[i]);
6226 seq_printf(m, "%s%s: %d\n", prefix,
6227 binder_command_strings[i], temp);
6230 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
6231 ARRAY_SIZE(binder_return_strings));
6232 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
6233 int temp = atomic_read(&stats->br[i]);
6236 seq_printf(m, "%s%s: %d\n", prefix,
6237 binder_return_strings[i], temp);
6240 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6241 ARRAY_SIZE(binder_objstat_strings));
6242 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6243 ARRAY_SIZE(stats->obj_deleted));
6244 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
6245 int created = atomic_read(&stats->obj_created[i]);
6246 int deleted = atomic_read(&stats->obj_deleted[i]);
6248 if (created || deleted)
6249 seq_printf(m, "%s%s: active %d total %d\n",
6251 binder_objstat_strings[i],
6257 static void print_binder_proc_stats(struct seq_file *m,
6258 struct binder_proc *proc)
6260 struct binder_work *w;
6261 struct binder_thread *thread;
6263 int count, strong, weak, ready_threads;
6264 size_t free_async_space =
6265 binder_alloc_get_free_async_space(&proc->alloc);
6267 seq_printf(m, "proc %d\n", proc->pid);
6268 seq_printf(m, "context %s\n", proc->context->name);
6271 binder_inner_proc_lock(proc);
6272 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
6275 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
6278 seq_printf(m, " threads: %d\n", count);
6279 seq_printf(m, " requested threads: %d+%d/%d\n"
6280 " ready threads %d\n"
6281 " free async space %zd\n", proc->requested_threads,
6282 proc->requested_threads_started, proc->max_threads,
6286 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
6288 binder_inner_proc_unlock(proc);
6289 seq_printf(m, " nodes: %d\n", count);
6293 binder_proc_lock(proc);
6294 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
6295 struct binder_ref *ref = rb_entry(n, struct binder_ref,
6298 strong += ref->data.strong;
6299 weak += ref->data.weak;
6301 binder_proc_unlock(proc);
6302 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
6304 count = binder_alloc_get_allocated_count(&proc->alloc);
6305 seq_printf(m, " buffers: %d\n", count);
6307 binder_alloc_print_pages(m, &proc->alloc);
6310 binder_inner_proc_lock(proc);
6311 list_for_each_entry(w, &proc->todo, entry) {
6312 if (w->type == BINDER_WORK_TRANSACTION)
6315 binder_inner_proc_unlock(proc);
6316 seq_printf(m, " pending transactions: %d\n", count);
6318 print_binder_stats(m, " ", &proc->stats);
6321 static int state_show(struct seq_file *m, void *unused)
6323 struct binder_proc *proc;
6324 struct binder_node *node;
6325 struct binder_node *last_node = NULL;
6327 seq_puts(m, "binder state:\n");
6329 spin_lock(&binder_dead_nodes_lock);
6330 if (!hlist_empty(&binder_dead_nodes))
6331 seq_puts(m, "dead nodes:\n");
6332 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
6334 * take a temporary reference on the node so it
6335 * survives and isn't removed from the list
6336 * while we print it.
6339 spin_unlock(&binder_dead_nodes_lock);
6341 binder_put_node(last_node);
6342 binder_node_lock(node);
6343 print_binder_node_nilocked(m, node);
6344 binder_node_unlock(node);
6346 spin_lock(&binder_dead_nodes_lock);
6348 spin_unlock(&binder_dead_nodes_lock);
6350 binder_put_node(last_node);
6352 mutex_lock(&binder_procs_lock);
6353 hlist_for_each_entry(proc, &binder_procs, proc_node)
6354 print_binder_proc(m, proc, 1);
6355 mutex_unlock(&binder_procs_lock);
6360 static int stats_show(struct seq_file *m, void *unused)
6362 struct binder_proc *proc;
6364 seq_puts(m, "binder stats:\n");
6366 print_binder_stats(m, "", &binder_stats);
6368 mutex_lock(&binder_procs_lock);
6369 hlist_for_each_entry(proc, &binder_procs, proc_node)
6370 print_binder_proc_stats(m, proc);
6371 mutex_unlock(&binder_procs_lock);
6376 static int transactions_show(struct seq_file *m, void *unused)
6378 struct binder_proc *proc;
6380 seq_puts(m, "binder transactions:\n");
6381 mutex_lock(&binder_procs_lock);
6382 hlist_for_each_entry(proc, &binder_procs, proc_node)
6383 print_binder_proc(m, proc, 0);
6384 mutex_unlock(&binder_procs_lock);
6389 static int proc_show(struct seq_file *m, void *unused)
6391 struct binder_proc *itr;
6392 int pid = (unsigned long)m->private;
6394 mutex_lock(&binder_procs_lock);
6395 hlist_for_each_entry(itr, &binder_procs, proc_node) {
6396 if (itr->pid == pid) {
6397 seq_puts(m, "binder proc state:\n");
6398 print_binder_proc(m, itr, 1);
6401 mutex_unlock(&binder_procs_lock);
6406 static void print_binder_transaction_log_entry(struct seq_file *m,
6407 struct binder_transaction_log_entry *e)
6409 int debug_id = READ_ONCE(e->debug_id_done);
6411 * read barrier to guarantee debug_id_done read before
6412 * we print the log values
6416 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
6417 e->debug_id, (e->call_type == 2) ? "reply" :
6418 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
6419 e->from_thread, e->to_proc, e->to_thread, e->context_name,
6420 e->to_node, e->target_handle, e->data_size, e->offsets_size,
6421 e->return_error, e->return_error_param,
6422 e->return_error_line);
6424 * read-barrier to guarantee read of debug_id_done after
6425 * done printing the fields of the entry
6428 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
6429 "\n" : " (incomplete)\n");
6432 static int transaction_log_show(struct seq_file *m, void *unused)
6434 struct binder_transaction_log *log = m->private;
6435 unsigned int log_cur = atomic_read(&log->cur);
6440 count = log_cur + 1;
6441 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
6442 0 : count % ARRAY_SIZE(log->entry);
6443 if (count > ARRAY_SIZE(log->entry) || log->full)
6444 count = ARRAY_SIZE(log->entry);
6445 for (i = 0; i < count; i++) {
6446 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
6448 print_binder_transaction_log_entry(m, &log->entry[index]);
6453 const struct file_operations binder_fops = {
6454 .owner = THIS_MODULE,
6455 .poll = binder_poll,
6456 .unlocked_ioctl = binder_ioctl,
6457 .compat_ioctl = compat_ptr_ioctl,
6458 .mmap = binder_mmap,
6459 .open = binder_open,
6460 .flush = binder_flush,
6461 .release = binder_release,
6464 DEFINE_SHOW_ATTRIBUTE(state);
6465 DEFINE_SHOW_ATTRIBUTE(stats);
6466 DEFINE_SHOW_ATTRIBUTE(transactions);
6467 DEFINE_SHOW_ATTRIBUTE(transaction_log);
6469 const struct binder_debugfs_entry binder_debugfs_entries[] = {
6473 .fops = &state_fops,
6479 .fops = &stats_fops,
6483 .name = "transactions",
6485 .fops = &transactions_fops,
6489 .name = "transaction_log",
6491 .fops = &transaction_log_fops,
6492 .data = &binder_transaction_log,
6495 .name = "failed_transaction_log",
6497 .fops = &transaction_log_fops,
6498 .data = &binder_transaction_log_failed,
6503 static int __init init_binder_device(const char *name)
6506 struct binder_device *binder_device;
6508 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
6512 binder_device->miscdev.fops = &binder_fops;
6513 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
6514 binder_device->miscdev.name = name;
6516 refcount_set(&binder_device->ref, 1);
6517 binder_device->context.binder_context_mgr_uid = INVALID_UID;
6518 binder_device->context.name = name;
6519 mutex_init(&binder_device->context.context_mgr_node_lock);
6521 ret = misc_register(&binder_device->miscdev);
6523 kfree(binder_device);
6527 hlist_add_head(&binder_device->hlist, &binder_devices);
6532 static int __init binder_init(void)
6535 char *device_name, *device_tmp;
6536 struct binder_device *device;
6537 struct hlist_node *tmp;
6538 char *device_names = NULL;
6540 ret = binder_alloc_shrinker_init();
6544 atomic_set(&binder_transaction_log.cur, ~0U);
6545 atomic_set(&binder_transaction_log_failed.cur, ~0U);
6547 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
6548 if (binder_debugfs_dir_entry_root) {
6549 const struct binder_debugfs_entry *db_entry;
6551 binder_for_each_debugfs_entry(db_entry)
6552 debugfs_create_file(db_entry->name,
6554 binder_debugfs_dir_entry_root,
6558 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
6559 binder_debugfs_dir_entry_root);
6562 if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
6563 strcmp(binder_devices_param, "") != 0) {
6565 * Copy the module_parameter string, because we don't want to
6566 * tokenize it in-place.
6568 device_names = kstrdup(binder_devices_param, GFP_KERNEL);
6569 if (!device_names) {
6571 goto err_alloc_device_names_failed;
6574 device_tmp = device_names;
6575 while ((device_name = strsep(&device_tmp, ","))) {
6576 ret = init_binder_device(device_name);
6578 goto err_init_binder_device_failed;
6582 ret = init_binderfs();
6584 goto err_init_binder_device_failed;
6588 err_init_binder_device_failed:
6589 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
6590 misc_deregister(&device->miscdev);
6591 hlist_del(&device->hlist);
6595 kfree(device_names);
6597 err_alloc_device_names_failed:
6598 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
6603 device_initcall(binder_init);
6605 #define CREATE_TRACE_POINTS
6606 #include "binder_trace.h"
6608 MODULE_LICENSE("GPL v2");