1 // SPDX-License-Identifier: GPL-2.0-only
4 * Android IPC Subsystem
6 * Copyright (C) 2007-2008 Google, Inc.
12 * There are 3 main spinlocks which must be acquired in the
15 * 1) proc->outer_lock : protects binder_ref
16 * binder_proc_lock() and binder_proc_unlock() are
18 * 2) node->lock : protects most fields of binder_node.
19 * binder_node_lock() and binder_node_unlock() are
21 * 3) proc->inner_lock : protects the thread and node lists
22 * (proc->threads, proc->waiting_threads, proc->nodes)
23 * and all todo lists associated with the binder_proc
24 * (proc->todo, thread->todo, proc->delivered_death and
25 * node->async_todo), as well as thread->transaction_stack
26 * binder_inner_proc_lock() and binder_inner_proc_unlock()
29 * Any lock under procA must never be nested under any lock at the same
30 * level or below on procB.
32 * Functions that require a lock held on entry indicate which lock
33 * in the suffix of the function name:
35 * foo_olocked() : requires node->outer_lock
36 * foo_nlocked() : requires node->lock
37 * foo_ilocked() : requires proc->inner_lock
38 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
39 * foo_nilocked(): requires node->lock and proc->inner_lock
43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
45 #include <linux/fdtable.h>
46 #include <linux/file.h>
47 #include <linux/freezer.h>
49 #include <linux/list.h>
50 #include <linux/miscdevice.h>
51 #include <linux/module.h>
52 #include <linux/mutex.h>
53 #include <linux/nsproxy.h>
54 #include <linux/poll.h>
55 #include <linux/debugfs.h>
56 #include <linux/rbtree.h>
57 #include <linux/sched/signal.h>
58 #include <linux/sched/mm.h>
59 #include <linux/seq_file.h>
60 #include <linux/string.h>
61 #include <linux/uaccess.h>
62 #include <linux/pid_namespace.h>
63 #include <linux/security.h>
64 #include <linux/spinlock.h>
65 #include <linux/ratelimit.h>
66 #include <linux/syscalls.h>
67 #include <linux/task_work.h>
68 #include <linux/sizes.h>
69 #include <linux/ktime.h>
71 #include <uapi/linux/android/binder.h>
73 #include <linux/cacheflush.h>
75 #include "binder_internal.h"
76 #include "binder_trace.h"
78 static HLIST_HEAD(binder_deferred_list);
79 static DEFINE_MUTEX(binder_deferred_lock);
81 static HLIST_HEAD(binder_devices);
82 static HLIST_HEAD(binder_procs);
83 static DEFINE_MUTEX(binder_procs_lock);
85 static HLIST_HEAD(binder_dead_nodes);
86 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
88 static struct dentry *binder_debugfs_dir_entry_root;
89 static struct dentry *binder_debugfs_dir_entry_proc;
90 static atomic_t binder_last_id;
92 static int proc_show(struct seq_file *m, void *unused);
93 DEFINE_SHOW_ATTRIBUTE(proc);
95 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
98 BINDER_DEBUG_USER_ERROR = 1U << 0,
99 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
100 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
101 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
102 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
103 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
104 BINDER_DEBUG_READ_WRITE = 1U << 6,
105 BINDER_DEBUG_USER_REFS = 1U << 7,
106 BINDER_DEBUG_THREADS = 1U << 8,
107 BINDER_DEBUG_TRANSACTION = 1U << 9,
108 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
109 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
110 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
111 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
112 BINDER_DEBUG_SPINLOCKS = 1U << 14,
114 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
115 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
116 module_param_named(debug_mask, binder_debug_mask, uint, 0644);
118 char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
119 module_param_named(devices, binder_devices_param, charp, 0444);
121 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
122 static int binder_stop_on_user_error;
124 static int binder_set_stop_on_user_error(const char *val,
125 const struct kernel_param *kp)
129 ret = param_set_int(val, kp);
130 if (binder_stop_on_user_error < 2)
131 wake_up(&binder_user_error_wait);
134 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
135 param_get_int, &binder_stop_on_user_error, 0644);
137 static __printf(2, 3) void binder_debug(int mask, const char *format, ...)
139 struct va_format vaf;
142 if (binder_debug_mask & mask) {
143 va_start(args, format);
146 pr_info_ratelimited("%pV", &vaf);
151 #define binder_txn_error(x...) \
152 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, x)
154 static __printf(1, 2) void binder_user_error(const char *format, ...)
156 struct va_format vaf;
159 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) {
160 va_start(args, format);
163 pr_info_ratelimited("%pV", &vaf);
167 if (binder_stop_on_user_error)
168 binder_stop_on_user_error = 2;
171 #define binder_set_extended_error(ee, _id, _command, _param) \
174 (ee)->command = _command; \
175 (ee)->param = _param; \
178 #define to_flat_binder_object(hdr) \
179 container_of(hdr, struct flat_binder_object, hdr)
181 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
183 #define to_binder_buffer_object(hdr) \
184 container_of(hdr, struct binder_buffer_object, hdr)
186 #define to_binder_fd_array_object(hdr) \
187 container_of(hdr, struct binder_fd_array_object, hdr)
189 static struct binder_stats binder_stats;
191 static inline void binder_stats_deleted(enum binder_stat_types type)
193 atomic_inc(&binder_stats.obj_deleted[type]);
196 static inline void binder_stats_created(enum binder_stat_types type)
198 atomic_inc(&binder_stats.obj_created[type]);
201 struct binder_transaction_log_entry {
213 int return_error_line;
214 uint32_t return_error;
215 uint32_t return_error_param;
216 char context_name[BINDERFS_MAX_NAME + 1];
219 struct binder_transaction_log {
222 struct binder_transaction_log_entry entry[32];
225 static struct binder_transaction_log binder_transaction_log;
226 static struct binder_transaction_log binder_transaction_log_failed;
228 static struct binder_transaction_log_entry *binder_transaction_log_add(
229 struct binder_transaction_log *log)
231 struct binder_transaction_log_entry *e;
232 unsigned int cur = atomic_inc_return(&log->cur);
234 if (cur >= ARRAY_SIZE(log->entry))
236 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
237 WRITE_ONCE(e->debug_id_done, 0);
239 * write-barrier to synchronize access to e->debug_id_done.
240 * We make sure the initialized 0 value is seen before
241 * memset() other fields are zeroed by memset.
244 memset(e, 0, sizeof(*e));
248 enum binder_deferred_state {
249 BINDER_DEFERRED_FLUSH = 0x01,
250 BINDER_DEFERRED_RELEASE = 0x02,
254 BINDER_LOOPER_STATE_REGISTERED = 0x01,
255 BINDER_LOOPER_STATE_ENTERED = 0x02,
256 BINDER_LOOPER_STATE_EXITED = 0x04,
257 BINDER_LOOPER_STATE_INVALID = 0x08,
258 BINDER_LOOPER_STATE_WAITING = 0x10,
259 BINDER_LOOPER_STATE_POLL = 0x20,
263 * binder_proc_lock() - Acquire outer lock for given binder_proc
264 * @proc: struct binder_proc to acquire
266 * Acquires proc->outer_lock. Used to protect binder_ref
267 * structures associated with the given proc.
269 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
271 _binder_proc_lock(struct binder_proc *proc, int line)
272 __acquires(&proc->outer_lock)
274 binder_debug(BINDER_DEBUG_SPINLOCKS,
275 "%s: line=%d\n", __func__, line);
276 spin_lock(&proc->outer_lock);
280 * binder_proc_unlock() - Release outer lock for given binder_proc
281 * @proc: struct binder_proc to acquire
283 * Release lock acquired via binder_proc_lock()
285 #define binder_proc_unlock(proc) _binder_proc_unlock(proc, __LINE__)
287 _binder_proc_unlock(struct binder_proc *proc, int line)
288 __releases(&proc->outer_lock)
290 binder_debug(BINDER_DEBUG_SPINLOCKS,
291 "%s: line=%d\n", __func__, line);
292 spin_unlock(&proc->outer_lock);
296 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
297 * @proc: struct binder_proc to acquire
299 * Acquires proc->inner_lock. Used to protect todo lists
301 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
303 _binder_inner_proc_lock(struct binder_proc *proc, int line)
304 __acquires(&proc->inner_lock)
306 binder_debug(BINDER_DEBUG_SPINLOCKS,
307 "%s: line=%d\n", __func__, line);
308 spin_lock(&proc->inner_lock);
312 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
313 * @proc: struct binder_proc to acquire
315 * Release lock acquired via binder_inner_proc_lock()
317 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
319 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
320 __releases(&proc->inner_lock)
322 binder_debug(BINDER_DEBUG_SPINLOCKS,
323 "%s: line=%d\n", __func__, line);
324 spin_unlock(&proc->inner_lock);
328 * binder_node_lock() - Acquire spinlock for given binder_node
329 * @node: struct binder_node to acquire
331 * Acquires node->lock. Used to protect binder_node fields
333 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
335 _binder_node_lock(struct binder_node *node, int line)
336 __acquires(&node->lock)
338 binder_debug(BINDER_DEBUG_SPINLOCKS,
339 "%s: line=%d\n", __func__, line);
340 spin_lock(&node->lock);
344 * binder_node_unlock() - Release spinlock for given binder_proc
345 * @node: struct binder_node to acquire
347 * Release lock acquired via binder_node_lock()
349 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
351 _binder_node_unlock(struct binder_node *node, int line)
352 __releases(&node->lock)
354 binder_debug(BINDER_DEBUG_SPINLOCKS,
355 "%s: line=%d\n", __func__, line);
356 spin_unlock(&node->lock);
360 * binder_node_inner_lock() - Acquire node and inner locks
361 * @node: struct binder_node to acquire
363 * Acquires node->lock. If node->proc also acquires
364 * proc->inner_lock. Used to protect binder_node fields
366 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
368 _binder_node_inner_lock(struct binder_node *node, int line)
369 __acquires(&node->lock) __acquires(&node->proc->inner_lock)
371 binder_debug(BINDER_DEBUG_SPINLOCKS,
372 "%s: line=%d\n", __func__, line);
373 spin_lock(&node->lock);
375 binder_inner_proc_lock(node->proc);
377 /* annotation for sparse */
378 __acquire(&node->proc->inner_lock);
382 * binder_node_inner_unlock() - Release node and inner locks
383 * @node: struct binder_node to acquire
385 * Release lock acquired via binder_node_lock()
387 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
389 _binder_node_inner_unlock(struct binder_node *node, int line)
390 __releases(&node->lock) __releases(&node->proc->inner_lock)
392 struct binder_proc *proc = node->proc;
394 binder_debug(BINDER_DEBUG_SPINLOCKS,
395 "%s: line=%d\n", __func__, line);
397 binder_inner_proc_unlock(proc);
399 /* annotation for sparse */
400 __release(&node->proc->inner_lock);
401 spin_unlock(&node->lock);
404 static bool binder_worklist_empty_ilocked(struct list_head *list)
406 return list_empty(list);
410 * binder_worklist_empty() - Check if no items on the work list
411 * @proc: binder_proc associated with list
412 * @list: list to check
414 * Return: true if there are no items on list, else false
416 static bool binder_worklist_empty(struct binder_proc *proc,
417 struct list_head *list)
421 binder_inner_proc_lock(proc);
422 ret = binder_worklist_empty_ilocked(list);
423 binder_inner_proc_unlock(proc);
428 * binder_enqueue_work_ilocked() - Add an item to the work list
429 * @work: struct binder_work to add to list
430 * @target_list: list to add work to
432 * Adds the work to the specified list. Asserts that work
433 * is not already on a list.
435 * Requires the proc->inner_lock to be held.
438 binder_enqueue_work_ilocked(struct binder_work *work,
439 struct list_head *target_list)
441 BUG_ON(target_list == NULL);
442 BUG_ON(work->entry.next && !list_empty(&work->entry));
443 list_add_tail(&work->entry, target_list);
447 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
448 * @thread: thread to queue work to
449 * @work: struct binder_work to add to list
451 * Adds the work to the todo list of the thread. Doesn't set the process_todo
452 * flag, which means that (if it wasn't already set) the thread will go to
453 * sleep without handling this work when it calls read.
455 * Requires the proc->inner_lock to be held.
458 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
459 struct binder_work *work)
461 WARN_ON(!list_empty(&thread->waiting_thread_node));
462 binder_enqueue_work_ilocked(work, &thread->todo);
466 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
467 * @thread: thread to queue work to
468 * @work: struct binder_work to add to list
470 * Adds the work to the todo list of the thread, and enables processing
473 * Requires the proc->inner_lock to be held.
476 binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
477 struct binder_work *work)
479 WARN_ON(!list_empty(&thread->waiting_thread_node));
480 binder_enqueue_work_ilocked(work, &thread->todo);
482 /* (e)poll-based threads require an explicit wakeup signal when
483 * queuing their own work; they rely on these events to consume
484 * messages without I/O block. Without it, threads risk waiting
485 * indefinitely without handling the work.
487 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
488 thread->pid == current->pid && !thread->process_todo)
489 wake_up_interruptible_sync(&thread->wait);
491 thread->process_todo = true;
495 * binder_enqueue_thread_work() - Add an item to the thread work list
496 * @thread: thread to queue work to
497 * @work: struct binder_work to add to list
499 * Adds the work to the todo list of the thread, and enables processing
503 binder_enqueue_thread_work(struct binder_thread *thread,
504 struct binder_work *work)
506 binder_inner_proc_lock(thread->proc);
507 binder_enqueue_thread_work_ilocked(thread, work);
508 binder_inner_proc_unlock(thread->proc);
512 binder_dequeue_work_ilocked(struct binder_work *work)
514 list_del_init(&work->entry);
518 * binder_dequeue_work() - Removes an item from the work list
519 * @proc: binder_proc associated with list
520 * @work: struct binder_work to remove from list
522 * Removes the specified work item from whatever list it is on.
523 * Can safely be called if work is not on any list.
526 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
528 binder_inner_proc_lock(proc);
529 binder_dequeue_work_ilocked(work);
530 binder_inner_proc_unlock(proc);
533 static struct binder_work *binder_dequeue_work_head_ilocked(
534 struct list_head *list)
536 struct binder_work *w;
538 w = list_first_entry_or_null(list, struct binder_work, entry);
540 list_del_init(&w->entry);
545 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
546 static void binder_free_thread(struct binder_thread *thread);
547 static void binder_free_proc(struct binder_proc *proc);
548 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
550 static bool binder_has_work_ilocked(struct binder_thread *thread,
553 return thread->process_todo ||
554 thread->looper_need_return ||
556 !binder_worklist_empty_ilocked(&thread->proc->todo));
559 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
563 binder_inner_proc_lock(thread->proc);
564 has_work = binder_has_work_ilocked(thread, do_proc_work);
565 binder_inner_proc_unlock(thread->proc);
570 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
572 return !thread->transaction_stack &&
573 binder_worklist_empty_ilocked(&thread->todo);
576 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
580 struct binder_thread *thread;
582 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
583 thread = rb_entry(n, struct binder_thread, rb_node);
584 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
585 binder_available_for_proc_work_ilocked(thread)) {
587 wake_up_interruptible_sync(&thread->wait);
589 wake_up_interruptible(&thread->wait);
595 * binder_select_thread_ilocked() - selects a thread for doing proc work.
596 * @proc: process to select a thread from
598 * Note that calling this function moves the thread off the waiting_threads
599 * list, so it can only be woken up by the caller of this function, or a
600 * signal. Therefore, callers *should* always wake up the thread this function
603 * Return: If there's a thread currently waiting for process work,
604 * returns that thread. Otherwise returns NULL.
606 static struct binder_thread *
607 binder_select_thread_ilocked(struct binder_proc *proc)
609 struct binder_thread *thread;
611 assert_spin_locked(&proc->inner_lock);
612 thread = list_first_entry_or_null(&proc->waiting_threads,
613 struct binder_thread,
614 waiting_thread_node);
617 list_del_init(&thread->waiting_thread_node);
623 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
624 * @proc: process to wake up a thread in
625 * @thread: specific thread to wake-up (may be NULL)
626 * @sync: whether to do a synchronous wake-up
628 * This function wakes up a thread in the @proc process.
629 * The caller may provide a specific thread to wake-up in
630 * the @thread parameter. If @thread is NULL, this function
631 * will wake up threads that have called poll().
633 * Note that for this function to work as expected, callers
634 * should first call binder_select_thread() to find a thread
635 * to handle the work (if they don't have a thread already),
636 * and pass the result into the @thread parameter.
638 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
639 struct binder_thread *thread,
642 assert_spin_locked(&proc->inner_lock);
646 wake_up_interruptible_sync(&thread->wait);
648 wake_up_interruptible(&thread->wait);
652 /* Didn't find a thread waiting for proc work; this can happen
654 * 1. All threads are busy handling transactions
655 * In that case, one of those threads should call back into
656 * the kernel driver soon and pick up this work.
657 * 2. Threads are using the (e)poll interface, in which case
658 * they may be blocked on the waitqueue without having been
659 * added to waiting_threads. For this case, we just iterate
660 * over all threads not handling transaction work, and
661 * wake them all up. We wake all because we don't know whether
662 * a thread that called into (e)poll is handling non-binder
665 binder_wakeup_poll_threads_ilocked(proc, sync);
668 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
670 struct binder_thread *thread = binder_select_thread_ilocked(proc);
672 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
675 static void binder_set_nice(long nice)
679 if (can_nice(current, nice)) {
680 set_user_nice(current, nice);
683 min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
684 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
685 "%d: nice value %ld not allowed use %ld instead\n",
686 current->pid, nice, min_nice);
687 set_user_nice(current, min_nice);
688 if (min_nice <= MAX_NICE)
690 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
693 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
694 binder_uintptr_t ptr)
696 struct rb_node *n = proc->nodes.rb_node;
697 struct binder_node *node;
699 assert_spin_locked(&proc->inner_lock);
702 node = rb_entry(n, struct binder_node, rb_node);
706 else if (ptr > node->ptr)
710 * take an implicit weak reference
711 * to ensure node stays alive until
712 * call to binder_put_node()
714 binder_inc_node_tmpref_ilocked(node);
721 static struct binder_node *binder_get_node(struct binder_proc *proc,
722 binder_uintptr_t ptr)
724 struct binder_node *node;
726 binder_inner_proc_lock(proc);
727 node = binder_get_node_ilocked(proc, ptr);
728 binder_inner_proc_unlock(proc);
732 static struct binder_node *binder_init_node_ilocked(
733 struct binder_proc *proc,
734 struct binder_node *new_node,
735 struct flat_binder_object *fp)
737 struct rb_node **p = &proc->nodes.rb_node;
738 struct rb_node *parent = NULL;
739 struct binder_node *node;
740 binder_uintptr_t ptr = fp ? fp->binder : 0;
741 binder_uintptr_t cookie = fp ? fp->cookie : 0;
742 __u32 flags = fp ? fp->flags : 0;
744 assert_spin_locked(&proc->inner_lock);
749 node = rb_entry(parent, struct binder_node, rb_node);
753 else if (ptr > node->ptr)
757 * A matching node is already in
758 * the rb tree. Abandon the init
761 binder_inc_node_tmpref_ilocked(node);
766 binder_stats_created(BINDER_STAT_NODE);
768 rb_link_node(&node->rb_node, parent, p);
769 rb_insert_color(&node->rb_node, &proc->nodes);
770 node->debug_id = atomic_inc_return(&binder_last_id);
773 node->cookie = cookie;
774 node->work.type = BINDER_WORK_NODE;
775 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
776 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
777 node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
778 spin_lock_init(&node->lock);
779 INIT_LIST_HEAD(&node->work.entry);
780 INIT_LIST_HEAD(&node->async_todo);
781 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
782 "%d:%d node %d u%016llx c%016llx created\n",
783 proc->pid, current->pid, node->debug_id,
784 (u64)node->ptr, (u64)node->cookie);
789 static struct binder_node *binder_new_node(struct binder_proc *proc,
790 struct flat_binder_object *fp)
792 struct binder_node *node;
793 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
797 binder_inner_proc_lock(proc);
798 node = binder_init_node_ilocked(proc, new_node, fp);
799 binder_inner_proc_unlock(proc);
800 if (node != new_node)
802 * The node was already added by another thread
809 static void binder_free_node(struct binder_node *node)
812 binder_stats_deleted(BINDER_STAT_NODE);
815 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
817 struct list_head *target_list)
819 struct binder_proc *proc = node->proc;
821 assert_spin_locked(&node->lock);
823 assert_spin_locked(&proc->inner_lock);
826 if (target_list == NULL &&
827 node->internal_strong_refs == 0 &&
829 node == node->proc->context->binder_context_mgr_node &&
830 node->has_strong_ref)) {
831 pr_err("invalid inc strong node for %d\n",
835 node->internal_strong_refs++;
837 node->local_strong_refs++;
838 if (!node->has_strong_ref && target_list) {
839 struct binder_thread *thread = container_of(target_list,
840 struct binder_thread, todo);
841 binder_dequeue_work_ilocked(&node->work);
842 BUG_ON(&thread->todo != target_list);
843 binder_enqueue_deferred_thread_work_ilocked(thread,
848 node->local_weak_refs++;
849 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
850 if (target_list == NULL) {
851 pr_err("invalid inc weak node for %d\n",
858 binder_enqueue_work_ilocked(&node->work, target_list);
864 static int binder_inc_node(struct binder_node *node, int strong, int internal,
865 struct list_head *target_list)
869 binder_node_inner_lock(node);
870 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
871 binder_node_inner_unlock(node);
876 static bool binder_dec_node_nilocked(struct binder_node *node,
877 int strong, int internal)
879 struct binder_proc *proc = node->proc;
881 assert_spin_locked(&node->lock);
883 assert_spin_locked(&proc->inner_lock);
886 node->internal_strong_refs--;
888 node->local_strong_refs--;
889 if (node->local_strong_refs || node->internal_strong_refs)
893 node->local_weak_refs--;
894 if (node->local_weak_refs || node->tmp_refs ||
895 !hlist_empty(&node->refs))
899 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
900 if (list_empty(&node->work.entry)) {
901 binder_enqueue_work_ilocked(&node->work, &proc->todo);
902 binder_wakeup_proc_ilocked(proc);
905 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
906 !node->local_weak_refs && !node->tmp_refs) {
908 binder_dequeue_work_ilocked(&node->work);
909 rb_erase(&node->rb_node, &proc->nodes);
910 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
911 "refless node %d deleted\n",
914 BUG_ON(!list_empty(&node->work.entry));
915 spin_lock(&binder_dead_nodes_lock);
917 * tmp_refs could have changed so
920 if (node->tmp_refs) {
921 spin_unlock(&binder_dead_nodes_lock);
924 hlist_del(&node->dead_node);
925 spin_unlock(&binder_dead_nodes_lock);
926 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
927 "dead node %d deleted\n",
936 static void binder_dec_node(struct binder_node *node, int strong, int internal)
940 binder_node_inner_lock(node);
941 free_node = binder_dec_node_nilocked(node, strong, internal);
942 binder_node_inner_unlock(node);
944 binder_free_node(node);
947 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
950 * No call to binder_inc_node() is needed since we
951 * don't need to inform userspace of any changes to
958 * binder_inc_node_tmpref() - take a temporary reference on node
959 * @node: node to reference
961 * Take reference on node to prevent the node from being freed
962 * while referenced only by a local variable. The inner lock is
963 * needed to serialize with the node work on the queue (which
964 * isn't needed after the node is dead). If the node is dead
965 * (node->proc is NULL), use binder_dead_nodes_lock to protect
966 * node->tmp_refs against dead-node-only cases where the node
967 * lock cannot be acquired (eg traversing the dead node list to
970 static void binder_inc_node_tmpref(struct binder_node *node)
972 binder_node_lock(node);
974 binder_inner_proc_lock(node->proc);
976 spin_lock(&binder_dead_nodes_lock);
977 binder_inc_node_tmpref_ilocked(node);
979 binder_inner_proc_unlock(node->proc);
981 spin_unlock(&binder_dead_nodes_lock);
982 binder_node_unlock(node);
986 * binder_dec_node_tmpref() - remove a temporary reference on node
987 * @node: node to reference
989 * Release temporary reference on node taken via binder_inc_node_tmpref()
991 static void binder_dec_node_tmpref(struct binder_node *node)
995 binder_node_inner_lock(node);
997 spin_lock(&binder_dead_nodes_lock);
999 __acquire(&binder_dead_nodes_lock);
1001 BUG_ON(node->tmp_refs < 0);
1003 spin_unlock(&binder_dead_nodes_lock);
1005 __release(&binder_dead_nodes_lock);
1007 * Call binder_dec_node() to check if all refcounts are 0
1008 * and cleanup is needed. Calling with strong=0 and internal=1
1009 * causes no actual reference to be released in binder_dec_node().
1010 * If that changes, a change is needed here too.
1012 free_node = binder_dec_node_nilocked(node, 0, 1);
1013 binder_node_inner_unlock(node);
1015 binder_free_node(node);
1018 static void binder_put_node(struct binder_node *node)
1020 binder_dec_node_tmpref(node);
1023 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1024 u32 desc, bool need_strong_ref)
1026 struct rb_node *n = proc->refs_by_desc.rb_node;
1027 struct binder_ref *ref;
1030 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1032 if (desc < ref->data.desc) {
1034 } else if (desc > ref->data.desc) {
1036 } else if (need_strong_ref && !ref->data.strong) {
1037 binder_user_error("tried to use weak ref as strong ref\n");
1046 /* Find the smallest unused descriptor the "slow way" */
1047 static u32 slow_desc_lookup_olocked(struct binder_proc *proc, u32 offset)
1049 struct binder_ref *ref;
1054 for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n)) {
1055 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1056 if (ref->data.desc > desc)
1058 desc = ref->data.desc + 1;
1065 * Find an available reference descriptor ID. The proc->outer_lock might
1066 * be released in the process, in which case -EAGAIN is returned and the
1067 * @desc should be considered invalid.
1069 static int get_ref_desc_olocked(struct binder_proc *proc,
1070 struct binder_node *node,
1073 struct dbitmap *dmap = &proc->dmap;
1074 unsigned int nbits, offset;
1075 unsigned long *new, bit;
1077 /* 0 is reserved for the context manager */
1078 offset = (node == proc->context->binder_context_mgr_node) ? 0 : 1;
1080 if (!dbitmap_enabled(dmap)) {
1081 *desc = slow_desc_lookup_olocked(proc, offset);
1085 if (dbitmap_acquire_next_zero_bit(dmap, offset, &bit) == 0) {
1091 * The dbitmap is full and needs to grow. The proc->outer_lock
1092 * is briefly released to allocate the new bitmap safely.
1094 nbits = dbitmap_grow_nbits(dmap);
1095 binder_proc_unlock(proc);
1096 new = bitmap_zalloc(nbits, GFP_KERNEL);
1097 binder_proc_lock(proc);
1098 dbitmap_grow(dmap, new, nbits);
1104 * binder_get_ref_for_node_olocked() - get the ref associated with given node
1105 * @proc: binder_proc that owns the ref
1106 * @node: binder_node of target
1107 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1109 * Look up the ref for the given node and return it if it exists
1111 * If it doesn't exist and the caller provides a newly allocated
1112 * ref, initialize the fields of the newly allocated ref and insert
1113 * into the given proc rb_trees and node refs list.
1115 * Return: the ref for node. It is possible that another thread
1116 * allocated/initialized the ref first in which case the
1117 * returned ref would be different than the passed-in
1118 * new_ref. new_ref must be kfree'd by the caller in
1121 static struct binder_ref *binder_get_ref_for_node_olocked(
1122 struct binder_proc *proc,
1123 struct binder_node *node,
1124 struct binder_ref *new_ref)
1126 struct binder_ref *ref;
1127 struct rb_node *parent;
1132 p = &proc->refs_by_node.rb_node;
1136 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1138 if (node < ref->node)
1140 else if (node > ref->node)
1141 p = &(*p)->rb_right;
1148 /* might release the proc->outer_lock */
1149 if (get_ref_desc_olocked(proc, node, &desc) == -EAGAIN)
1152 binder_stats_created(BINDER_STAT_REF);
1153 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1154 new_ref->proc = proc;
1155 new_ref->node = node;
1156 rb_link_node(&new_ref->rb_node_node, parent, p);
1157 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1159 new_ref->data.desc = desc;
1160 p = &proc->refs_by_desc.rb_node;
1163 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1165 if (new_ref->data.desc < ref->data.desc)
1167 else if (new_ref->data.desc > ref->data.desc)
1168 p = &(*p)->rb_right;
1172 rb_link_node(&new_ref->rb_node_desc, parent, p);
1173 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1175 binder_node_lock(node);
1176 hlist_add_head(&new_ref->node_entry, &node->refs);
1178 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1179 "%d new ref %d desc %d for node %d\n",
1180 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1182 binder_node_unlock(node);
1186 static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1188 struct dbitmap *dmap = &ref->proc->dmap;
1189 bool delete_node = false;
1191 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1192 "%d delete ref %d desc %d for node %d\n",
1193 ref->proc->pid, ref->data.debug_id, ref->data.desc,
1194 ref->node->debug_id);
1196 if (dbitmap_enabled(dmap))
1197 dbitmap_clear_bit(dmap, ref->data.desc);
1198 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1199 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1201 binder_node_inner_lock(ref->node);
1202 if (ref->data.strong)
1203 binder_dec_node_nilocked(ref->node, 1, 1);
1205 hlist_del(&ref->node_entry);
1206 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1207 binder_node_inner_unlock(ref->node);
1209 * Clear ref->node unless we want the caller to free the node
1213 * The caller uses ref->node to determine
1214 * whether the node needs to be freed. Clear
1215 * it since the node is still alive.
1221 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1222 "%d delete ref %d desc %d has death notification\n",
1223 ref->proc->pid, ref->data.debug_id,
1225 binder_dequeue_work(ref->proc, &ref->death->work);
1226 binder_stats_deleted(BINDER_STAT_DEATH);
1230 binder_dequeue_work(ref->proc, &ref->freeze->work);
1231 binder_stats_deleted(BINDER_STAT_FREEZE);
1234 binder_stats_deleted(BINDER_STAT_REF);
1238 * binder_inc_ref_olocked() - increment the ref for given handle
1239 * @ref: ref to be incremented
1240 * @strong: if true, strong increment, else weak
1241 * @target_list: list to queue node work on
1243 * Increment the ref. @ref->proc->outer_lock must be held on entry
1245 * Return: 0, if successful, else errno
1247 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1248 struct list_head *target_list)
1253 if (ref->data.strong == 0) {
1254 ret = binder_inc_node(ref->node, 1, 1, target_list);
1260 if (ref->data.weak == 0) {
1261 ret = binder_inc_node(ref->node, 0, 1, target_list);
1271 * binder_dec_ref_olocked() - dec the ref for given handle
1272 * @ref: ref to be decremented
1273 * @strong: if true, strong decrement, else weak
1275 * Decrement the ref.
1277 * Return: %true if ref is cleaned up and ready to be freed.
1279 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1282 if (ref->data.strong == 0) {
1283 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1284 ref->proc->pid, ref->data.debug_id,
1285 ref->data.desc, ref->data.strong,
1290 if (ref->data.strong == 0)
1291 binder_dec_node(ref->node, strong, 1);
1293 if (ref->data.weak == 0) {
1294 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1295 ref->proc->pid, ref->data.debug_id,
1296 ref->data.desc, ref->data.strong,
1302 if (ref->data.strong == 0 && ref->data.weak == 0) {
1303 binder_cleanup_ref_olocked(ref);
1310 * binder_get_node_from_ref() - get the node from the given proc/desc
1311 * @proc: proc containing the ref
1312 * @desc: the handle associated with the ref
1313 * @need_strong_ref: if true, only return node if ref is strong
1314 * @rdata: the id/refcount data for the ref
1316 * Given a proc and ref handle, return the associated binder_node
1318 * Return: a binder_node or NULL if not found or not strong when strong required
1320 static struct binder_node *binder_get_node_from_ref(
1321 struct binder_proc *proc,
1322 u32 desc, bool need_strong_ref,
1323 struct binder_ref_data *rdata)
1325 struct binder_node *node;
1326 struct binder_ref *ref;
1328 binder_proc_lock(proc);
1329 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1334 * Take an implicit reference on the node to ensure
1335 * it stays alive until the call to binder_put_node()
1337 binder_inc_node_tmpref(node);
1340 binder_proc_unlock(proc);
1345 binder_proc_unlock(proc);
1350 * binder_free_ref() - free the binder_ref
1353 * Free the binder_ref. Free the binder_node indicated by ref->node
1354 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1356 static void binder_free_ref(struct binder_ref *ref)
1359 binder_free_node(ref->node);
1365 /* shrink descriptor bitmap if needed */
1366 static void try_shrink_dmap(struct binder_proc *proc)
1371 binder_proc_lock(proc);
1372 nbits = dbitmap_shrink_nbits(&proc->dmap);
1373 binder_proc_unlock(proc);
1378 new = bitmap_zalloc(nbits, GFP_KERNEL);
1379 binder_proc_lock(proc);
1380 dbitmap_shrink(&proc->dmap, new, nbits);
1381 binder_proc_unlock(proc);
1385 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1386 * @proc: proc containing the ref
1387 * @desc: the handle associated with the ref
1388 * @increment: true=inc reference, false=dec reference
1389 * @strong: true=strong reference, false=weak reference
1390 * @rdata: the id/refcount data for the ref
1392 * Given a proc and ref handle, increment or decrement the ref
1393 * according to "increment" arg.
1395 * Return: 0 if successful, else errno
1397 static int binder_update_ref_for_handle(struct binder_proc *proc,
1398 uint32_t desc, bool increment, bool strong,
1399 struct binder_ref_data *rdata)
1402 struct binder_ref *ref;
1403 bool delete_ref = false;
1405 binder_proc_lock(proc);
1406 ref = binder_get_ref_olocked(proc, desc, strong);
1412 ret = binder_inc_ref_olocked(ref, strong, NULL);
1414 delete_ref = binder_dec_ref_olocked(ref, strong);
1418 binder_proc_unlock(proc);
1421 binder_free_ref(ref);
1422 try_shrink_dmap(proc);
1427 binder_proc_unlock(proc);
1432 * binder_dec_ref_for_handle() - dec the ref for given handle
1433 * @proc: proc containing the ref
1434 * @desc: the handle associated with the ref
1435 * @strong: true=strong reference, false=weak reference
1436 * @rdata: the id/refcount data for the ref
1438 * Just calls binder_update_ref_for_handle() to decrement the ref.
1440 * Return: 0 if successful, else errno
1442 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1443 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1445 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1450 * binder_inc_ref_for_node() - increment the ref for given proc/node
1451 * @proc: proc containing the ref
1452 * @node: target node
1453 * @strong: true=strong reference, false=weak reference
1454 * @target_list: worklist to use if node is incremented
1455 * @rdata: the id/refcount data for the ref
1457 * Given a proc and node, increment the ref. Create the ref if it
1458 * doesn't already exist
1460 * Return: 0 if successful, else errno
1462 static int binder_inc_ref_for_node(struct binder_proc *proc,
1463 struct binder_node *node,
1465 struct list_head *target_list,
1466 struct binder_ref_data *rdata)
1468 struct binder_ref *ref;
1469 struct binder_ref *new_ref = NULL;
1472 binder_proc_lock(proc);
1473 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1475 binder_proc_unlock(proc);
1476 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1479 binder_proc_lock(proc);
1480 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1482 ret = binder_inc_ref_olocked(ref, strong, target_list);
1484 if (ret && ref == new_ref) {
1486 * Cleanup the failed reference here as the target
1487 * could now be dead and have already released its
1488 * references by now. Calling on the new reference
1489 * with strong=0 and a tmp_refs will not decrement
1490 * the node. The new_ref gets kfree'd below.
1492 binder_cleanup_ref_olocked(new_ref);
1496 binder_proc_unlock(proc);
1497 if (new_ref && ref != new_ref)
1499 * Another thread created the ref first so
1500 * free the one we allocated
1506 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1507 struct binder_transaction *t)
1509 BUG_ON(!target_thread);
1510 assert_spin_locked(&target_thread->proc->inner_lock);
1511 BUG_ON(target_thread->transaction_stack != t);
1512 BUG_ON(target_thread->transaction_stack->from != target_thread);
1513 target_thread->transaction_stack =
1514 target_thread->transaction_stack->from_parent;
1519 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1520 * @thread: thread to decrement
1522 * A thread needs to be kept alive while being used to create or
1523 * handle a transaction. binder_get_txn_from() is used to safely
1524 * extract t->from from a binder_transaction and keep the thread
1525 * indicated by t->from from being freed. When done with that
1526 * binder_thread, this function is called to decrement the
1527 * tmp_ref and free if appropriate (thread has been released
1528 * and no transaction being processed by the driver)
1530 static void binder_thread_dec_tmpref(struct binder_thread *thread)
1533 * atomic is used to protect the counter value while
1534 * it cannot reach zero or thread->is_dead is false
1536 binder_inner_proc_lock(thread->proc);
1537 atomic_dec(&thread->tmp_ref);
1538 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1539 binder_inner_proc_unlock(thread->proc);
1540 binder_free_thread(thread);
1543 binder_inner_proc_unlock(thread->proc);
1547 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1548 * @proc: proc to decrement
1550 * A binder_proc needs to be kept alive while being used to create or
1551 * handle a transaction. proc->tmp_ref is incremented when
1552 * creating a new transaction or the binder_proc is currently in-use
1553 * by threads that are being released. When done with the binder_proc,
1554 * this function is called to decrement the counter and free the
1555 * proc if appropriate (proc has been released, all threads have
1556 * been released and not currently in-use to process a transaction).
1558 static void binder_proc_dec_tmpref(struct binder_proc *proc)
1560 binder_inner_proc_lock(proc);
1562 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1564 binder_inner_proc_unlock(proc);
1565 binder_free_proc(proc);
1568 binder_inner_proc_unlock(proc);
1572 * binder_get_txn_from() - safely extract the "from" thread in transaction
1573 * @t: binder transaction for t->from
1575 * Atomically return the "from" thread and increment the tmp_ref
1576 * count for the thread to ensure it stays alive until
1577 * binder_thread_dec_tmpref() is called.
1579 * Return: the value of t->from
1581 static struct binder_thread *binder_get_txn_from(
1582 struct binder_transaction *t)
1584 struct binder_thread *from;
1586 spin_lock(&t->lock);
1589 atomic_inc(&from->tmp_ref);
1590 spin_unlock(&t->lock);
1595 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1596 * @t: binder transaction for t->from
1598 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1599 * to guarantee that the thread cannot be released while operating on it.
1600 * The caller must call binder_inner_proc_unlock() to release the inner lock
1601 * as well as call binder_dec_thread_txn() to release the reference.
1603 * Return: the value of t->from
1605 static struct binder_thread *binder_get_txn_from_and_acq_inner(
1606 struct binder_transaction *t)
1607 __acquires(&t->from->proc->inner_lock)
1609 struct binder_thread *from;
1611 from = binder_get_txn_from(t);
1613 __acquire(&from->proc->inner_lock);
1616 binder_inner_proc_lock(from->proc);
1618 BUG_ON(from != t->from);
1621 binder_inner_proc_unlock(from->proc);
1622 __acquire(&from->proc->inner_lock);
1623 binder_thread_dec_tmpref(from);
1628 * binder_free_txn_fixups() - free unprocessed fd fixups
1629 * @t: binder transaction for t->from
1631 * If the transaction is being torn down prior to being
1632 * processed by the target process, free all of the
1633 * fd fixups and fput the file structs. It is safe to
1634 * call this function after the fixups have been
1635 * processed -- in that case, the list will be empty.
1637 static void binder_free_txn_fixups(struct binder_transaction *t)
1639 struct binder_txn_fd_fixup *fixup, *tmp;
1641 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1643 if (fixup->target_fd >= 0)
1644 put_unused_fd(fixup->target_fd);
1645 list_del(&fixup->fixup_entry);
1650 static void binder_txn_latency_free(struct binder_transaction *t)
1652 int from_proc, from_thread, to_proc, to_thread;
1654 spin_lock(&t->lock);
1655 from_proc = t->from ? t->from->proc->pid : 0;
1656 from_thread = t->from ? t->from->pid : 0;
1657 to_proc = t->to_proc ? t->to_proc->pid : 0;
1658 to_thread = t->to_thread ? t->to_thread->pid : 0;
1659 spin_unlock(&t->lock);
1661 trace_binder_txn_latency_free(t, from_proc, from_thread, to_proc, to_thread);
1664 static void binder_free_transaction(struct binder_transaction *t)
1666 struct binder_proc *target_proc = t->to_proc;
1669 binder_inner_proc_lock(target_proc);
1670 target_proc->outstanding_txns--;
1671 if (target_proc->outstanding_txns < 0)
1672 pr_warn("%s: Unexpected outstanding_txns %d\n",
1673 __func__, target_proc->outstanding_txns);
1674 if (!target_proc->outstanding_txns && target_proc->is_frozen)
1675 wake_up_interruptible_all(&target_proc->freeze_wait);
1677 t->buffer->transaction = NULL;
1678 binder_inner_proc_unlock(target_proc);
1680 if (trace_binder_txn_latency_free_enabled())
1681 binder_txn_latency_free(t);
1683 * If the transaction has no target_proc, then
1684 * t->buffer->transaction has already been cleared.
1686 binder_free_txn_fixups(t);
1688 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1691 static void binder_send_failed_reply(struct binder_transaction *t,
1692 uint32_t error_code)
1694 struct binder_thread *target_thread;
1695 struct binder_transaction *next;
1697 BUG_ON(t->flags & TF_ONE_WAY);
1699 target_thread = binder_get_txn_from_and_acq_inner(t);
1700 if (target_thread) {
1701 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1702 "send failed reply for transaction %d to %d:%d\n",
1704 target_thread->proc->pid,
1705 target_thread->pid);
1707 binder_pop_transaction_ilocked(target_thread, t);
1708 if (target_thread->reply_error.cmd == BR_OK) {
1709 target_thread->reply_error.cmd = error_code;
1710 binder_enqueue_thread_work_ilocked(
1712 &target_thread->reply_error.work);
1713 wake_up_interruptible(&target_thread->wait);
1716 * Cannot get here for normal operation, but
1717 * we can if multiple synchronous transactions
1718 * are sent without blocking for responses.
1719 * Just ignore the 2nd error in this case.
1721 pr_warn("Unexpected reply error: %u\n",
1722 target_thread->reply_error.cmd);
1724 binder_inner_proc_unlock(target_thread->proc);
1725 binder_thread_dec_tmpref(target_thread);
1726 binder_free_transaction(t);
1729 __release(&target_thread->proc->inner_lock);
1730 next = t->from_parent;
1732 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1733 "send failed reply for transaction %d, target dead\n",
1736 binder_free_transaction(t);
1738 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1739 "reply failed, no target thread at root\n");
1743 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1744 "reply failed, no target thread -- retry %d\n",
1750 * binder_cleanup_transaction() - cleans up undelivered transaction
1751 * @t: transaction that needs to be cleaned up
1752 * @reason: reason the transaction wasn't delivered
1753 * @error_code: error to return to caller (if synchronous call)
1755 static void binder_cleanup_transaction(struct binder_transaction *t,
1757 uint32_t error_code)
1759 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
1760 binder_send_failed_reply(t, error_code);
1762 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
1763 "undelivered transaction %d, %s\n",
1764 t->debug_id, reason);
1765 binder_free_transaction(t);
1770 * binder_get_object() - gets object and checks for valid metadata
1771 * @proc: binder_proc owning the buffer
1772 * @u: sender's user pointer to base of buffer
1773 * @buffer: binder_buffer that we're parsing.
1774 * @offset: offset in the @buffer at which to validate an object.
1775 * @object: struct binder_object to read into
1777 * Copy the binder object at the given offset into @object. If @u is
1778 * provided then the copy is from the sender's buffer. If not, then
1779 * it is copied from the target's @buffer.
1781 * Return: If there's a valid metadata object at @offset, the
1782 * size of that object. Otherwise, it returns zero. The object
1783 * is read into the struct binder_object pointed to by @object.
1785 static size_t binder_get_object(struct binder_proc *proc,
1786 const void __user *u,
1787 struct binder_buffer *buffer,
1788 unsigned long offset,
1789 struct binder_object *object)
1792 struct binder_object_header *hdr;
1793 size_t object_size = 0;
1795 read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
1796 if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
1797 !IS_ALIGNED(offset, sizeof(u32)))
1801 if (copy_from_user(object, u + offset, read_size))
1804 if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
1809 /* Ok, now see if we read a complete object. */
1811 switch (hdr->type) {
1812 case BINDER_TYPE_BINDER:
1813 case BINDER_TYPE_WEAK_BINDER:
1814 case BINDER_TYPE_HANDLE:
1815 case BINDER_TYPE_WEAK_HANDLE:
1816 object_size = sizeof(struct flat_binder_object);
1818 case BINDER_TYPE_FD:
1819 object_size = sizeof(struct binder_fd_object);
1821 case BINDER_TYPE_PTR:
1822 object_size = sizeof(struct binder_buffer_object);
1824 case BINDER_TYPE_FDA:
1825 object_size = sizeof(struct binder_fd_array_object);
1830 if (offset <= buffer->data_size - object_size &&
1831 buffer->data_size >= object_size)
1838 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1839 * @proc: binder_proc owning the buffer
1840 * @b: binder_buffer containing the object
1841 * @object: struct binder_object to read into
1842 * @index: index in offset array at which the binder_buffer_object is
1844 * @start_offset: points to the start of the offset array
1845 * @object_offsetp: offset of @object read from @b
1846 * @num_valid: the number of valid offsets in the offset array
1848 * Return: If @index is within the valid range of the offset array
1849 * described by @start and @num_valid, and if there's a valid
1850 * binder_buffer_object at the offset found in index @index
1851 * of the offset array, that object is returned. Otherwise,
1852 * %NULL is returned.
1853 * Note that the offset found in index @index itself is not
1854 * verified; this function assumes that @num_valid elements
1855 * from @start were previously verified to have valid offsets.
1856 * If @object_offsetp is non-NULL, then the offset within
1857 * @b is written to it.
1859 static struct binder_buffer_object *binder_validate_ptr(
1860 struct binder_proc *proc,
1861 struct binder_buffer *b,
1862 struct binder_object *object,
1863 binder_size_t index,
1864 binder_size_t start_offset,
1865 binder_size_t *object_offsetp,
1866 binder_size_t num_valid)
1869 binder_size_t object_offset;
1870 unsigned long buffer_offset;
1872 if (index >= num_valid)
1875 buffer_offset = start_offset + sizeof(binder_size_t) * index;
1876 if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1878 sizeof(object_offset)))
1880 object_size = binder_get_object(proc, NULL, b, object_offset, object);
1881 if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
1884 *object_offsetp = object_offset;
1886 return &object->bbo;
1890 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
1891 * @proc: binder_proc owning the buffer
1892 * @b: transaction buffer
1893 * @objects_start_offset: offset to start of objects buffer
1894 * @buffer_obj_offset: offset to binder_buffer_object in which to fix up
1895 * @fixup_offset: start offset in @buffer to fix up
1896 * @last_obj_offset: offset to last binder_buffer_object that we fixed
1897 * @last_min_offset: minimum fixup offset in object at @last_obj_offset
1899 * Return: %true if a fixup in buffer @buffer at offset @offset is
1902 * For safety reasons, we only allow fixups inside a buffer to happen
1903 * at increasing offsets; additionally, we only allow fixup on the last
1904 * buffer object that was verified, or one of its parents.
1906 * Example of what is allowed:
1909 * B (parent = A, offset = 0)
1910 * C (parent = A, offset = 16)
1911 * D (parent = C, offset = 0)
1912 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
1914 * Examples of what is not allowed:
1916 * Decreasing offsets within the same parent:
1918 * C (parent = A, offset = 16)
1919 * B (parent = A, offset = 0) // decreasing offset within A
1921 * Referring to a parent that wasn't the last object or any of its parents:
1923 * B (parent = A, offset = 0)
1924 * C (parent = A, offset = 0)
1925 * C (parent = A, offset = 16)
1926 * D (parent = B, offset = 0) // B is not A or any of A's parents
1928 static bool binder_validate_fixup(struct binder_proc *proc,
1929 struct binder_buffer *b,
1930 binder_size_t objects_start_offset,
1931 binder_size_t buffer_obj_offset,
1932 binder_size_t fixup_offset,
1933 binder_size_t last_obj_offset,
1934 binder_size_t last_min_offset)
1936 if (!last_obj_offset) {
1937 /* Nothing to fix up in */
1941 while (last_obj_offset != buffer_obj_offset) {
1942 unsigned long buffer_offset;
1943 struct binder_object last_object;
1944 struct binder_buffer_object *last_bbo;
1945 size_t object_size = binder_get_object(proc, NULL, b,
1948 if (object_size != sizeof(*last_bbo))
1951 last_bbo = &last_object.bbo;
1953 * Safe to retrieve the parent of last_obj, since it
1954 * was already previously verified by the driver.
1956 if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
1958 last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
1959 buffer_offset = objects_start_offset +
1960 sizeof(binder_size_t) * last_bbo->parent;
1961 if (binder_alloc_copy_from_buffer(&proc->alloc,
1964 sizeof(last_obj_offset)))
1967 return (fixup_offset >= last_min_offset);
1971 * struct binder_task_work_cb - for deferred close
1973 * @twork: callback_head for task work
1976 * Structure to pass task work to be handled after
1977 * returning from binder_ioctl() via task_work_add().
1979 struct binder_task_work_cb {
1980 struct callback_head twork;
1985 * binder_do_fd_close() - close list of file descriptors
1986 * @twork: callback head for task work
1988 * It is not safe to call ksys_close() during the binder_ioctl()
1989 * function if there is a chance that binder's own file descriptor
1990 * might be closed. This is to meet the requirements for using
1991 * fdget() (see comments for __fget_light()). Therefore use
1992 * task_work_add() to schedule the close operation once we have
1993 * returned from binder_ioctl(). This function is a callback
1994 * for that mechanism and does the actual ksys_close() on the
1995 * given file descriptor.
1997 static void binder_do_fd_close(struct callback_head *twork)
1999 struct binder_task_work_cb *twcb = container_of(twork,
2000 struct binder_task_work_cb, twork);
2007 * binder_deferred_fd_close() - schedule a close for the given file-descriptor
2008 * @fd: file-descriptor to close
2010 * See comments in binder_do_fd_close(). This function is used to schedule
2011 * a file-descriptor to be closed after returning from binder_ioctl().
2013 static void binder_deferred_fd_close(int fd)
2015 struct binder_task_work_cb *twcb;
2017 twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
2020 init_task_work(&twcb->twork, binder_do_fd_close);
2021 twcb->file = file_close_fd(fd);
2023 // pin it until binder_do_fd_close(); see comments there
2024 get_file(twcb->file);
2025 filp_close(twcb->file, current->files);
2026 task_work_add(current, &twcb->twork, TWA_RESUME);
2032 static void binder_transaction_buffer_release(struct binder_proc *proc,
2033 struct binder_thread *thread,
2034 struct binder_buffer *buffer,
2035 binder_size_t off_end_offset,
2038 int debug_id = buffer->debug_id;
2039 binder_size_t off_start_offset, buffer_offset;
2041 binder_debug(BINDER_DEBUG_TRANSACTION,
2042 "%d buffer release %d, size %zd-%zd, failed at %llx\n",
2043 proc->pid, buffer->debug_id,
2044 buffer->data_size, buffer->offsets_size,
2045 (unsigned long long)off_end_offset);
2047 if (buffer->target_node)
2048 binder_dec_node(buffer->target_node, 1, 0);
2050 off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
2052 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
2053 buffer_offset += sizeof(binder_size_t)) {
2054 struct binder_object_header *hdr;
2055 size_t object_size = 0;
2056 struct binder_object object;
2057 binder_size_t object_offset;
2059 if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2060 buffer, buffer_offset,
2061 sizeof(object_offset)))
2062 object_size = binder_get_object(proc, NULL, buffer,
2063 object_offset, &object);
2064 if (object_size == 0) {
2065 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2066 debug_id, (u64)object_offset, buffer->data_size);
2070 switch (hdr->type) {
2071 case BINDER_TYPE_BINDER:
2072 case BINDER_TYPE_WEAK_BINDER: {
2073 struct flat_binder_object *fp;
2074 struct binder_node *node;
2076 fp = to_flat_binder_object(hdr);
2077 node = binder_get_node(proc, fp->binder);
2079 pr_err("transaction release %d bad node %016llx\n",
2080 debug_id, (u64)fp->binder);
2083 binder_debug(BINDER_DEBUG_TRANSACTION,
2084 " node %d u%016llx\n",
2085 node->debug_id, (u64)node->ptr);
2086 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2088 binder_put_node(node);
2090 case BINDER_TYPE_HANDLE:
2091 case BINDER_TYPE_WEAK_HANDLE: {
2092 struct flat_binder_object *fp;
2093 struct binder_ref_data rdata;
2096 fp = to_flat_binder_object(hdr);
2097 ret = binder_dec_ref_for_handle(proc, fp->handle,
2098 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2101 pr_err("transaction release %d bad handle %d, ret = %d\n",
2102 debug_id, fp->handle, ret);
2105 binder_debug(BINDER_DEBUG_TRANSACTION,
2106 " ref %d desc %d\n",
2107 rdata.debug_id, rdata.desc);
2110 case BINDER_TYPE_FD: {
2112 * No need to close the file here since user-space
2113 * closes it for successfully delivered
2114 * transactions. For transactions that weren't
2115 * delivered, the new fd was never allocated so
2116 * there is no need to close and the fput on the
2117 * file is done when the transaction is torn
2121 case BINDER_TYPE_PTR:
2123 * Nothing to do here, this will get cleaned up when the
2124 * transaction buffer gets freed
2127 case BINDER_TYPE_FDA: {
2128 struct binder_fd_array_object *fda;
2129 struct binder_buffer_object *parent;
2130 struct binder_object ptr_object;
2131 binder_size_t fda_offset;
2133 binder_size_t fd_buf_size;
2134 binder_size_t num_valid;
2138 * The fd fixups have not been applied so no
2139 * fds need to be closed.
2144 num_valid = (buffer_offset - off_start_offset) /
2145 sizeof(binder_size_t);
2146 fda = to_binder_fd_array_object(hdr);
2147 parent = binder_validate_ptr(proc, buffer, &ptr_object,
2153 pr_err("transaction release %d bad parent offset\n",
2157 fd_buf_size = sizeof(u32) * fda->num_fds;
2158 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2159 pr_err("transaction release %d invalid number of fds (%lld)\n",
2160 debug_id, (u64)fda->num_fds);
2163 if (fd_buf_size > parent->length ||
2164 fda->parent_offset > parent->length - fd_buf_size) {
2165 /* No space for all file descriptors here. */
2166 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2167 debug_id, (u64)fda->num_fds);
2171 * the source data for binder_buffer_object is visible
2172 * to user-space and the @buffer element is the user
2173 * pointer to the buffer_object containing the fd_array.
2174 * Convert the address to an offset relative to
2175 * the base of the transaction buffer.
2177 fda_offset = parent->buffer - buffer->user_data +
2179 for (fd_index = 0; fd_index < fda->num_fds;
2183 binder_size_t offset = fda_offset +
2184 fd_index * sizeof(fd);
2186 err = binder_alloc_copy_from_buffer(
2187 &proc->alloc, &fd, buffer,
2188 offset, sizeof(fd));
2191 binder_deferred_fd_close(fd);
2193 * Need to make sure the thread goes
2194 * back to userspace to complete the
2198 thread->looper_need_return = true;
2203 pr_err("transaction release %d bad object type %x\n",
2204 debug_id, hdr->type);
2210 /* Clean up all the objects in the buffer */
2211 static inline void binder_release_entire_buffer(struct binder_proc *proc,
2212 struct binder_thread *thread,
2213 struct binder_buffer *buffer,
2216 binder_size_t off_end_offset;
2218 off_end_offset = ALIGN(buffer->data_size, sizeof(void *));
2219 off_end_offset += buffer->offsets_size;
2221 binder_transaction_buffer_release(proc, thread, buffer,
2222 off_end_offset, is_failure);
2225 static int binder_translate_binder(struct flat_binder_object *fp,
2226 struct binder_transaction *t,
2227 struct binder_thread *thread)
2229 struct binder_node *node;
2230 struct binder_proc *proc = thread->proc;
2231 struct binder_proc *target_proc = t->to_proc;
2232 struct binder_ref_data rdata;
2235 node = binder_get_node(proc, fp->binder);
2237 node = binder_new_node(proc, fp);
2241 if (fp->cookie != node->cookie) {
2242 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2243 proc->pid, thread->pid, (u64)fp->binder,
2244 node->debug_id, (u64)fp->cookie,
2249 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2254 ret = binder_inc_ref_for_node(target_proc, node,
2255 fp->hdr.type == BINDER_TYPE_BINDER,
2256 &thread->todo, &rdata);
2260 if (fp->hdr.type == BINDER_TYPE_BINDER)
2261 fp->hdr.type = BINDER_TYPE_HANDLE;
2263 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2265 fp->handle = rdata.desc;
2268 trace_binder_transaction_node_to_ref(t, node, &rdata);
2269 binder_debug(BINDER_DEBUG_TRANSACTION,
2270 " node %d u%016llx -> ref %d desc %d\n",
2271 node->debug_id, (u64)node->ptr,
2272 rdata.debug_id, rdata.desc);
2274 binder_put_node(node);
2278 static int binder_translate_handle(struct flat_binder_object *fp,
2279 struct binder_transaction *t,
2280 struct binder_thread *thread)
2282 struct binder_proc *proc = thread->proc;
2283 struct binder_proc *target_proc = t->to_proc;
2284 struct binder_node *node;
2285 struct binder_ref_data src_rdata;
2288 node = binder_get_node_from_ref(proc, fp->handle,
2289 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2291 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2292 proc->pid, thread->pid, fp->handle);
2295 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2300 binder_node_lock(node);
2301 if (node->proc == target_proc) {
2302 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2303 fp->hdr.type = BINDER_TYPE_BINDER;
2305 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2306 fp->binder = node->ptr;
2307 fp->cookie = node->cookie;
2309 binder_inner_proc_lock(node->proc);
2311 __acquire(&node->proc->inner_lock);
2312 binder_inc_node_nilocked(node,
2313 fp->hdr.type == BINDER_TYPE_BINDER,
2316 binder_inner_proc_unlock(node->proc);
2318 __release(&node->proc->inner_lock);
2319 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2320 binder_debug(BINDER_DEBUG_TRANSACTION,
2321 " ref %d desc %d -> node %d u%016llx\n",
2322 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2324 binder_node_unlock(node);
2326 struct binder_ref_data dest_rdata;
2328 binder_node_unlock(node);
2329 ret = binder_inc_ref_for_node(target_proc, node,
2330 fp->hdr.type == BINDER_TYPE_HANDLE,
2336 fp->handle = dest_rdata.desc;
2338 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2340 binder_debug(BINDER_DEBUG_TRANSACTION,
2341 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2342 src_rdata.debug_id, src_rdata.desc,
2343 dest_rdata.debug_id, dest_rdata.desc,
2347 binder_put_node(node);
2351 static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2352 struct binder_transaction *t,
2353 struct binder_thread *thread,
2354 struct binder_transaction *in_reply_to)
2356 struct binder_proc *proc = thread->proc;
2357 struct binder_proc *target_proc = t->to_proc;
2358 struct binder_txn_fd_fixup *fixup;
2361 bool target_allows_fd;
2364 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2366 target_allows_fd = t->buffer->target_node->accept_fds;
2367 if (!target_allows_fd) {
2368 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2369 proc->pid, thread->pid,
2370 in_reply_to ? "reply" : "transaction",
2373 goto err_fd_not_accepted;
2378 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2379 proc->pid, thread->pid, fd);
2383 ret = security_binder_transfer_file(proc->cred, target_proc->cred, file);
2390 * Add fixup record for this transaction. The allocation
2391 * of the fd in the target needs to be done from a
2394 fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2400 fixup->offset = fd_offset;
2401 fixup->target_fd = -1;
2402 trace_binder_transaction_fd_send(t, fd, fixup->offset);
2403 list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2411 err_fd_not_accepted:
2416 * struct binder_ptr_fixup - data to be fixed-up in target buffer
2417 * @offset offset in target buffer to fixup
2418 * @skip_size bytes to skip in copy (fixup will be written later)
2419 * @fixup_data data to write at fixup offset
2422 * This is used for the pointer fixup list (pf) which is created and consumed
2423 * during binder_transaction() and is only accessed locally. No
2424 * locking is necessary.
2426 * The list is ordered by @offset.
2428 struct binder_ptr_fixup {
2429 binder_size_t offset;
2431 binder_uintptr_t fixup_data;
2432 struct list_head node;
2436 * struct binder_sg_copy - scatter-gather data to be copied
2437 * @offset offset in target buffer
2438 * @sender_uaddr user address in source buffer
2439 * @length bytes to copy
2442 * This is used for the sg copy list (sgc) which is created and consumed
2443 * during binder_transaction() and is only accessed locally. No
2444 * locking is necessary.
2446 * The list is ordered by @offset.
2448 struct binder_sg_copy {
2449 binder_size_t offset;
2450 const void __user *sender_uaddr;
2452 struct list_head node;
2456 * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data
2457 * @alloc: binder_alloc associated with @buffer
2458 * @buffer: binder buffer in target process
2459 * @sgc_head: list_head of scatter-gather copy list
2460 * @pf_head: list_head of pointer fixup list
2462 * Processes all elements of @sgc_head, applying fixups from @pf_head
2463 * and copying the scatter-gather data from the source process' user
2464 * buffer to the target's buffer. It is expected that the list creation
2465 * and processing all occurs during binder_transaction() so these lists
2466 * are only accessed in local context.
2468 * Return: 0=success, else -errno
2470 static int binder_do_deferred_txn_copies(struct binder_alloc *alloc,
2471 struct binder_buffer *buffer,
2472 struct list_head *sgc_head,
2473 struct list_head *pf_head)
2476 struct binder_sg_copy *sgc, *tmpsgc;
2477 struct binder_ptr_fixup *tmppf;
2478 struct binder_ptr_fixup *pf =
2479 list_first_entry_or_null(pf_head, struct binder_ptr_fixup,
2482 list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2483 size_t bytes_copied = 0;
2485 while (bytes_copied < sgc->length) {
2487 size_t bytes_left = sgc->length - bytes_copied;
2488 size_t offset = sgc->offset + bytes_copied;
2491 * We copy up to the fixup (pointed to by pf)
2493 copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset)
2495 if (!ret && copy_size)
2496 ret = binder_alloc_copy_user_to_buffer(
2499 sgc->sender_uaddr + bytes_copied,
2501 bytes_copied += copy_size;
2502 if (copy_size != bytes_left) {
2504 /* we stopped at a fixup offset */
2505 if (pf->skip_size) {
2507 * we are just skipping. This is for
2508 * BINDER_TYPE_FDA where the translated
2509 * fds will be fixed up when we get
2510 * to target context.
2512 bytes_copied += pf->skip_size;
2514 /* apply the fixup indicated by pf */
2516 ret = binder_alloc_copy_to_buffer(
2520 sizeof(pf->fixup_data));
2521 bytes_copied += sizeof(pf->fixup_data);
2523 list_del(&pf->node);
2525 pf = list_first_entry_or_null(pf_head,
2526 struct binder_ptr_fixup, node);
2529 list_del(&sgc->node);
2532 list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2533 BUG_ON(pf->skip_size == 0);
2534 list_del(&pf->node);
2537 BUG_ON(!list_empty(sgc_head));
2539 return ret > 0 ? -EINVAL : ret;
2543 * binder_cleanup_deferred_txn_lists() - free specified lists
2544 * @sgc_head: list_head of scatter-gather copy list
2545 * @pf_head: list_head of pointer fixup list
2547 * Called to clean up @sgc_head and @pf_head if there is an
2550 static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head,
2551 struct list_head *pf_head)
2553 struct binder_sg_copy *sgc, *tmpsgc;
2554 struct binder_ptr_fixup *pf, *tmppf;
2556 list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2557 list_del(&sgc->node);
2560 list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2561 list_del(&pf->node);
2567 * binder_defer_copy() - queue a scatter-gather buffer for copy
2568 * @sgc_head: list_head of scatter-gather copy list
2569 * @offset: binder buffer offset in target process
2570 * @sender_uaddr: user address in source process
2571 * @length: bytes to copy
2573 * Specify a scatter-gather block to be copied. The actual copy must
2574 * be deferred until all the needed fixups are identified and queued.
2575 * Then the copy and fixups are done together so un-translated values
2576 * from the source are never visible in the target buffer.
2578 * We are guaranteed that repeated calls to this function will have
2579 * monotonically increasing @offset values so the list will naturally
2582 * Return: 0=success, else -errno
2584 static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset,
2585 const void __user *sender_uaddr, size_t length)
2587 struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL);
2592 bc->offset = offset;
2593 bc->sender_uaddr = sender_uaddr;
2594 bc->length = length;
2595 INIT_LIST_HEAD(&bc->node);
2598 * We are guaranteed that the deferred copies are in-order
2599 * so just add to the tail.
2601 list_add_tail(&bc->node, sgc_head);
2607 * binder_add_fixup() - queue a fixup to be applied to sg copy
2608 * @pf_head: list_head of binder ptr fixup list
2609 * @offset: binder buffer offset in target process
2610 * @fixup: bytes to be copied for fixup
2611 * @skip_size: bytes to skip when copying (fixup will be applied later)
2613 * Add the specified fixup to a list ordered by @offset. When copying
2614 * the scatter-gather buffers, the fixup will be copied instead of
2615 * data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup
2616 * will be applied later (in target process context), so we just skip
2617 * the bytes specified by @skip_size. If @skip_size is 0, we copy the
2620 * This function is called *mostly* in @offset order, but there are
2621 * exceptions. Since out-of-order inserts are relatively uncommon,
2622 * we insert the new element by searching backward from the tail of
2625 * Return: 0=success, else -errno
2627 static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset,
2628 binder_uintptr_t fixup, size_t skip_size)
2630 struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL);
2631 struct binder_ptr_fixup *tmppf;
2636 pf->offset = offset;
2637 pf->fixup_data = fixup;
2638 pf->skip_size = skip_size;
2639 INIT_LIST_HEAD(&pf->node);
2641 /* Fixups are *mostly* added in-order, but there are some
2642 * exceptions. Look backwards through list for insertion point.
2644 list_for_each_entry_reverse(tmppf, pf_head, node) {
2645 if (tmppf->offset < pf->offset) {
2646 list_add(&pf->node, &tmppf->node);
2651 * if we get here, then the new offset is the lowest so
2652 * insert at the head
2654 list_add(&pf->node, pf_head);
2658 static int binder_translate_fd_array(struct list_head *pf_head,
2659 struct binder_fd_array_object *fda,
2660 const void __user *sender_ubuffer,
2661 struct binder_buffer_object *parent,
2662 struct binder_buffer_object *sender_uparent,
2663 struct binder_transaction *t,
2664 struct binder_thread *thread,
2665 struct binder_transaction *in_reply_to)
2667 binder_size_t fdi, fd_buf_size;
2668 binder_size_t fda_offset;
2669 const void __user *sender_ufda_base;
2670 struct binder_proc *proc = thread->proc;
2673 if (fda->num_fds == 0)
2676 fd_buf_size = sizeof(u32) * fda->num_fds;
2677 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2678 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2679 proc->pid, thread->pid, (u64)fda->num_fds);
2682 if (fd_buf_size > parent->length ||
2683 fda->parent_offset > parent->length - fd_buf_size) {
2684 /* No space for all file descriptors here. */
2685 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2686 proc->pid, thread->pid, (u64)fda->num_fds);
2690 * the source data for binder_buffer_object is visible
2691 * to user-space and the @buffer element is the user
2692 * pointer to the buffer_object containing the fd_array.
2693 * Convert the address to an offset relative to
2694 * the base of the transaction buffer.
2696 fda_offset = parent->buffer - t->buffer->user_data +
2698 sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer +
2701 if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) ||
2702 !IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) {
2703 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2704 proc->pid, thread->pid);
2707 ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32));
2711 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2713 binder_size_t offset = fda_offset + fdi * sizeof(fd);
2714 binder_size_t sender_uoffset = fdi * sizeof(fd);
2716 ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd));
2718 ret = binder_translate_fd(fd, offset, t, thread,
2721 return ret > 0 ? -EINVAL : ret;
2726 static int binder_fixup_parent(struct list_head *pf_head,
2727 struct binder_transaction *t,
2728 struct binder_thread *thread,
2729 struct binder_buffer_object *bp,
2730 binder_size_t off_start_offset,
2731 binder_size_t num_valid,
2732 binder_size_t last_fixup_obj_off,
2733 binder_size_t last_fixup_min_off)
2735 struct binder_buffer_object *parent;
2736 struct binder_buffer *b = t->buffer;
2737 struct binder_proc *proc = thread->proc;
2738 struct binder_proc *target_proc = t->to_proc;
2739 struct binder_object object;
2740 binder_size_t buffer_offset;
2741 binder_size_t parent_offset;
2743 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2746 parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2747 off_start_offset, &parent_offset,
2750 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2751 proc->pid, thread->pid);
2755 if (!binder_validate_fixup(target_proc, b, off_start_offset,
2756 parent_offset, bp->parent_offset,
2758 last_fixup_min_off)) {
2759 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2760 proc->pid, thread->pid);
2764 if (parent->length < sizeof(binder_uintptr_t) ||
2765 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2766 /* No space for a pointer here! */
2767 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2768 proc->pid, thread->pid);
2772 buffer_offset = bp->parent_offset + parent->buffer - b->user_data;
2774 return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0);
2778 * binder_can_update_transaction() - Can a txn be superseded by an updated one?
2779 * @t1: the pending async txn in the frozen process
2780 * @t2: the new async txn to supersede the outdated pending one
2782 * Return: true if t2 can supersede t1
2783 * false if t2 can not supersede t1
2785 static bool binder_can_update_transaction(struct binder_transaction *t1,
2786 struct binder_transaction *t2)
2788 if ((t1->flags & t2->flags & (TF_ONE_WAY | TF_UPDATE_TXN)) !=
2789 (TF_ONE_WAY | TF_UPDATE_TXN) || !t1->to_proc || !t2->to_proc)
2791 if (t1->to_proc->tsk == t2->to_proc->tsk && t1->code == t2->code &&
2792 t1->flags == t2->flags && t1->buffer->pid == t2->buffer->pid &&
2793 t1->buffer->target_node->ptr == t2->buffer->target_node->ptr &&
2794 t1->buffer->target_node->cookie == t2->buffer->target_node->cookie)
2800 * binder_find_outdated_transaction_ilocked() - Find the outdated transaction
2801 * @t: new async transaction
2802 * @target_list: list to find outdated transaction
2804 * Return: the outdated transaction if found
2805 * NULL if no outdated transacton can be found
2807 * Requires the proc->inner_lock to be held.
2809 static struct binder_transaction *
2810 binder_find_outdated_transaction_ilocked(struct binder_transaction *t,
2811 struct list_head *target_list)
2813 struct binder_work *w;
2815 list_for_each_entry(w, target_list, entry) {
2816 struct binder_transaction *t_queued;
2818 if (w->type != BINDER_WORK_TRANSACTION)
2820 t_queued = container_of(w, struct binder_transaction, work);
2821 if (binder_can_update_transaction(t_queued, t))
2828 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2829 * @t: transaction to send
2830 * @proc: process to send the transaction to
2831 * @thread: thread in @proc to send the transaction to (may be NULL)
2833 * This function queues a transaction to the specified process. It will try
2834 * to find a thread in the target process to handle the transaction and
2835 * wake it up. If no thread is found, the work is queued to the proc
2838 * If the @thread parameter is not NULL, the transaction is always queued
2839 * to the waitlist of that specific thread.
2841 * Return: 0 if the transaction was successfully queued
2842 * BR_DEAD_REPLY if the target process or thread is dead
2843 * BR_FROZEN_REPLY if the target process or thread is frozen and
2844 * the sync transaction was rejected
2845 * BR_TRANSACTION_PENDING_FROZEN if the target process is frozen
2846 * and the async transaction was successfully queued
2848 static int binder_proc_transaction(struct binder_transaction *t,
2849 struct binder_proc *proc,
2850 struct binder_thread *thread)
2852 struct binder_node *node = t->buffer->target_node;
2853 bool oneway = !!(t->flags & TF_ONE_WAY);
2854 bool pending_async = false;
2855 struct binder_transaction *t_outdated = NULL;
2856 bool frozen = false;
2859 binder_node_lock(node);
2862 if (node->has_async_transaction)
2863 pending_async = true;
2865 node->has_async_transaction = true;
2868 binder_inner_proc_lock(proc);
2869 if (proc->is_frozen) {
2871 proc->sync_recv |= !oneway;
2872 proc->async_recv |= oneway;
2875 if ((frozen && !oneway) || proc->is_dead ||
2876 (thread && thread->is_dead)) {
2877 binder_inner_proc_unlock(proc);
2878 binder_node_unlock(node);
2879 return frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY;
2882 if (!thread && !pending_async)
2883 thread = binder_select_thread_ilocked(proc);
2886 binder_enqueue_thread_work_ilocked(thread, &t->work);
2887 } else if (!pending_async) {
2888 binder_enqueue_work_ilocked(&t->work, &proc->todo);
2890 if ((t->flags & TF_UPDATE_TXN) && frozen) {
2891 t_outdated = binder_find_outdated_transaction_ilocked(t,
2894 binder_debug(BINDER_DEBUG_TRANSACTION,
2895 "txn %d supersedes %d\n",
2896 t->debug_id, t_outdated->debug_id);
2897 list_del_init(&t_outdated->work.entry);
2898 proc->outstanding_txns--;
2901 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2905 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2907 proc->outstanding_txns++;
2908 binder_inner_proc_unlock(proc);
2909 binder_node_unlock(node);
2912 * To reduce potential contention, free the outdated transaction and
2913 * buffer after releasing the locks.
2916 struct binder_buffer *buffer = t_outdated->buffer;
2918 t_outdated->buffer = NULL;
2919 buffer->transaction = NULL;
2920 trace_binder_transaction_update_buffer_release(buffer);
2921 binder_release_entire_buffer(proc, NULL, buffer, false);
2922 binder_alloc_free_buf(&proc->alloc, buffer);
2924 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2927 if (oneway && frozen)
2928 return BR_TRANSACTION_PENDING_FROZEN;
2934 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2935 * @node: struct binder_node for which to get refs
2936 * @procp: returns @node->proc if valid
2937 * @error: if no @procp then returns BR_DEAD_REPLY
2939 * User-space normally keeps the node alive when creating a transaction
2940 * since it has a reference to the target. The local strong ref keeps it
2941 * alive if the sending process dies before the target process processes
2942 * the transaction. If the source process is malicious or has a reference
2943 * counting bug, relying on the local strong ref can fail.
2945 * Since user-space can cause the local strong ref to go away, we also take
2946 * a tmpref on the node to ensure it survives while we are constructing
2947 * the transaction. We also need a tmpref on the proc while we are
2948 * constructing the transaction, so we take that here as well.
2950 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2951 * Also sets @procp if valid. If the @node->proc is NULL indicating that the
2952 * target proc has died, @error is set to BR_DEAD_REPLY.
2954 static struct binder_node *binder_get_node_refs_for_txn(
2955 struct binder_node *node,
2956 struct binder_proc **procp,
2959 struct binder_node *target_node = NULL;
2961 binder_node_inner_lock(node);
2964 binder_inc_node_nilocked(node, 1, 0, NULL);
2965 binder_inc_node_tmpref_ilocked(node);
2966 node->proc->tmp_ref++;
2967 *procp = node->proc;
2969 *error = BR_DEAD_REPLY;
2970 binder_node_inner_unlock(node);
2975 static void binder_set_txn_from_error(struct binder_transaction *t, int id,
2976 uint32_t command, int32_t param)
2978 struct binder_thread *from = binder_get_txn_from_and_acq_inner(t);
2981 /* annotation for sparse */
2982 __release(&from->proc->inner_lock);
2986 /* don't override existing errors */
2987 if (from->ee.command == BR_OK)
2988 binder_set_extended_error(&from->ee, id, command, param);
2989 binder_inner_proc_unlock(from->proc);
2990 binder_thread_dec_tmpref(from);
2993 static void binder_transaction(struct binder_proc *proc,
2994 struct binder_thread *thread,
2995 struct binder_transaction_data *tr, int reply,
2996 binder_size_t extra_buffers_size)
2999 struct binder_transaction *t;
3000 struct binder_work *w;
3001 struct binder_work *tcomplete;
3002 binder_size_t buffer_offset = 0;
3003 binder_size_t off_start_offset, off_end_offset;
3004 binder_size_t off_min;
3005 binder_size_t sg_buf_offset, sg_buf_end_offset;
3006 binder_size_t user_offset = 0;
3007 struct binder_proc *target_proc = NULL;
3008 struct binder_thread *target_thread = NULL;
3009 struct binder_node *target_node = NULL;
3010 struct binder_transaction *in_reply_to = NULL;
3011 struct binder_transaction_log_entry *e;
3012 uint32_t return_error = 0;
3013 uint32_t return_error_param = 0;
3014 uint32_t return_error_line = 0;
3015 binder_size_t last_fixup_obj_off = 0;
3016 binder_size_t last_fixup_min_off = 0;
3017 struct binder_context *context = proc->context;
3018 int t_debug_id = atomic_inc_return(&binder_last_id);
3019 ktime_t t_start_time = ktime_get();
3020 struct lsm_context lsmctx = { };
3021 struct list_head sgc_head;
3022 struct list_head pf_head;
3023 const void __user *user_buffer = (const void __user *)
3024 (uintptr_t)tr->data.ptr.buffer;
3025 INIT_LIST_HEAD(&sgc_head);
3026 INIT_LIST_HEAD(&pf_head);
3028 e = binder_transaction_log_add(&binder_transaction_log);
3029 e->debug_id = t_debug_id;
3030 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
3031 e->from_proc = proc->pid;
3032 e->from_thread = thread->pid;
3033 e->target_handle = tr->target.handle;
3034 e->data_size = tr->data_size;
3035 e->offsets_size = tr->offsets_size;
3036 strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
3038 binder_inner_proc_lock(proc);
3039 binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0);
3040 binder_inner_proc_unlock(proc);
3043 binder_inner_proc_lock(proc);
3044 in_reply_to = thread->transaction_stack;
3045 if (in_reply_to == NULL) {
3046 binder_inner_proc_unlock(proc);
3047 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
3048 proc->pid, thread->pid);
3049 return_error = BR_FAILED_REPLY;
3050 return_error_param = -EPROTO;
3051 return_error_line = __LINE__;
3052 goto err_empty_call_stack;
3054 if (in_reply_to->to_thread != thread) {
3055 spin_lock(&in_reply_to->lock);
3056 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
3057 proc->pid, thread->pid, in_reply_to->debug_id,
3058 in_reply_to->to_proc ?
3059 in_reply_to->to_proc->pid : 0,
3060 in_reply_to->to_thread ?
3061 in_reply_to->to_thread->pid : 0);
3062 spin_unlock(&in_reply_to->lock);
3063 binder_inner_proc_unlock(proc);
3064 return_error = BR_FAILED_REPLY;
3065 return_error_param = -EPROTO;
3066 return_error_line = __LINE__;
3068 goto err_bad_call_stack;
3070 thread->transaction_stack = in_reply_to->to_parent;
3071 binder_inner_proc_unlock(proc);
3072 binder_set_nice(in_reply_to->saved_priority);
3073 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
3074 if (target_thread == NULL) {
3075 /* annotation for sparse */
3076 __release(&target_thread->proc->inner_lock);
3077 binder_txn_error("%d:%d reply target not found\n",
3078 thread->pid, proc->pid);
3079 return_error = BR_DEAD_REPLY;
3080 return_error_line = __LINE__;
3081 goto err_dead_binder;
3083 if (target_thread->transaction_stack != in_reply_to) {
3084 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
3085 proc->pid, thread->pid,
3086 target_thread->transaction_stack ?
3087 target_thread->transaction_stack->debug_id : 0,
3088 in_reply_to->debug_id);
3089 binder_inner_proc_unlock(target_thread->proc);
3090 return_error = BR_FAILED_REPLY;
3091 return_error_param = -EPROTO;
3092 return_error_line = __LINE__;
3094 target_thread = NULL;
3095 goto err_dead_binder;
3097 target_proc = target_thread->proc;
3098 target_proc->tmp_ref++;
3099 binder_inner_proc_unlock(target_thread->proc);
3101 if (tr->target.handle) {
3102 struct binder_ref *ref;
3105 * There must already be a strong ref
3106 * on this node. If so, do a strong
3107 * increment on the node to ensure it
3108 * stays alive until the transaction is
3111 binder_proc_lock(proc);
3112 ref = binder_get_ref_olocked(proc, tr->target.handle,
3115 target_node = binder_get_node_refs_for_txn(
3116 ref->node, &target_proc,
3119 binder_user_error("%d:%d got transaction to invalid handle, %u\n",
3120 proc->pid, thread->pid, tr->target.handle);
3121 return_error = BR_FAILED_REPLY;
3123 binder_proc_unlock(proc);
3125 mutex_lock(&context->context_mgr_node_lock);
3126 target_node = context->binder_context_mgr_node;
3128 target_node = binder_get_node_refs_for_txn(
3129 target_node, &target_proc,
3132 return_error = BR_DEAD_REPLY;
3133 mutex_unlock(&context->context_mgr_node_lock);
3134 if (target_node && target_proc->pid == proc->pid) {
3135 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
3136 proc->pid, thread->pid);
3137 return_error = BR_FAILED_REPLY;
3138 return_error_param = -EINVAL;
3139 return_error_line = __LINE__;
3140 goto err_invalid_target_handle;
3144 binder_txn_error("%d:%d cannot find target node\n",
3145 thread->pid, proc->pid);
3147 * return_error is set above
3149 return_error_param = -EINVAL;
3150 return_error_line = __LINE__;
3151 goto err_dead_binder;
3153 e->to_node = target_node->debug_id;
3154 if (WARN_ON(proc == target_proc)) {
3155 binder_txn_error("%d:%d self transactions not allowed\n",
3156 thread->pid, proc->pid);
3157 return_error = BR_FAILED_REPLY;
3158 return_error_param = -EINVAL;
3159 return_error_line = __LINE__;
3160 goto err_invalid_target_handle;
3162 if (security_binder_transaction(proc->cred,
3163 target_proc->cred) < 0) {
3164 binder_txn_error("%d:%d transaction credentials failed\n",
3165 thread->pid, proc->pid);
3166 return_error = BR_FAILED_REPLY;
3167 return_error_param = -EPERM;
3168 return_error_line = __LINE__;
3169 goto err_invalid_target_handle;
3171 binder_inner_proc_lock(proc);
3173 w = list_first_entry_or_null(&thread->todo,
3174 struct binder_work, entry);
3175 if (!(tr->flags & TF_ONE_WAY) && w &&
3176 w->type == BINDER_WORK_TRANSACTION) {
3178 * Do not allow new outgoing transaction from a
3179 * thread that has a transaction at the head of
3180 * its todo list. Only need to check the head
3181 * because binder_select_thread_ilocked picks a
3182 * thread from proc->waiting_threads to enqueue
3183 * the transaction, and nothing is queued to the
3184 * todo list while the thread is on waiting_threads.
3186 binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
3187 proc->pid, thread->pid);
3188 binder_inner_proc_unlock(proc);
3189 return_error = BR_FAILED_REPLY;
3190 return_error_param = -EPROTO;
3191 return_error_line = __LINE__;
3192 goto err_bad_todo_list;
3195 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3196 struct binder_transaction *tmp;
3198 tmp = thread->transaction_stack;
3199 if (tmp->to_thread != thread) {
3200 spin_lock(&tmp->lock);
3201 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3202 proc->pid, thread->pid, tmp->debug_id,
3203 tmp->to_proc ? tmp->to_proc->pid : 0,
3205 tmp->to_thread->pid : 0);
3206 spin_unlock(&tmp->lock);
3207 binder_inner_proc_unlock(proc);
3208 return_error = BR_FAILED_REPLY;
3209 return_error_param = -EPROTO;
3210 return_error_line = __LINE__;
3211 goto err_bad_call_stack;
3214 struct binder_thread *from;
3216 spin_lock(&tmp->lock);
3218 if (from && from->proc == target_proc) {
3219 atomic_inc(&from->tmp_ref);
3220 target_thread = from;
3221 spin_unlock(&tmp->lock);
3224 spin_unlock(&tmp->lock);
3225 tmp = tmp->from_parent;
3228 binder_inner_proc_unlock(proc);
3231 e->to_thread = target_thread->pid;
3232 e->to_proc = target_proc->pid;
3234 /* TODO: reuse incoming transaction for reply */
3235 t = kzalloc(sizeof(*t), GFP_KERNEL);
3237 binder_txn_error("%d:%d cannot allocate transaction\n",
3238 thread->pid, proc->pid);
3239 return_error = BR_FAILED_REPLY;
3240 return_error_param = -ENOMEM;
3241 return_error_line = __LINE__;
3242 goto err_alloc_t_failed;
3244 INIT_LIST_HEAD(&t->fd_fixups);
3245 binder_stats_created(BINDER_STAT_TRANSACTION);
3246 spin_lock_init(&t->lock);
3248 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3249 if (tcomplete == NULL) {
3250 binder_txn_error("%d:%d cannot allocate work for transaction\n",
3251 thread->pid, proc->pid);
3252 return_error = BR_FAILED_REPLY;
3253 return_error_param = -ENOMEM;
3254 return_error_line = __LINE__;
3255 goto err_alloc_tcomplete_failed;
3257 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3259 t->debug_id = t_debug_id;
3260 t->start_time = t_start_time;
3263 binder_debug(BINDER_DEBUG_TRANSACTION,
3264 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
3265 proc->pid, thread->pid, t->debug_id,
3266 target_proc->pid, target_thread->pid,
3267 (u64)tr->data.ptr.buffer,
3268 (u64)tr->data.ptr.offsets,
3269 (u64)tr->data_size, (u64)tr->offsets_size,
3270 (u64)extra_buffers_size);
3272 binder_debug(BINDER_DEBUG_TRANSACTION,
3273 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
3274 proc->pid, thread->pid, t->debug_id,
3275 target_proc->pid, target_node->debug_id,
3276 (u64)tr->data.ptr.buffer,
3277 (u64)tr->data.ptr.offsets,
3278 (u64)tr->data_size, (u64)tr->offsets_size,
3279 (u64)extra_buffers_size);
3281 if (!reply && !(tr->flags & TF_ONE_WAY))
3285 t->from_pid = proc->pid;
3286 t->from_tid = thread->pid;
3287 t->sender_euid = task_euid(proc->tsk);
3288 t->to_proc = target_proc;
3289 t->to_thread = target_thread;
3291 t->flags = tr->flags;
3292 t->priority = task_nice(current);
3294 if (target_node && target_node->txn_security_ctx) {
3298 security_cred_getsecid(proc->cred, &secid);
3299 ret = security_secid_to_secctx(secid, &lsmctx);
3301 binder_txn_error("%d:%d failed to get security context\n",
3302 thread->pid, proc->pid);
3303 return_error = BR_FAILED_REPLY;
3304 return_error_param = ret;
3305 return_error_line = __LINE__;
3306 goto err_get_secctx_failed;
3308 added_size = ALIGN(lsmctx.len, sizeof(u64));
3309 extra_buffers_size += added_size;
3310 if (extra_buffers_size < added_size) {
3311 binder_txn_error("%d:%d integer overflow of extra_buffers_size\n",
3312 thread->pid, proc->pid);
3313 return_error = BR_FAILED_REPLY;
3314 return_error_param = -EINVAL;
3315 return_error_line = __LINE__;
3316 goto err_bad_extra_size;
3320 trace_binder_transaction(reply, t, target_node);
3322 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3323 tr->offsets_size, extra_buffers_size,
3324 !reply && (t->flags & TF_ONE_WAY));
3325 if (IS_ERR(t->buffer)) {
3328 ret = PTR_ERR(t->buffer);
3329 s = (ret == -ESRCH) ? ": vma cleared, target dead or dying"
3330 : (ret == -ENOSPC) ? ": no space left"
3331 : (ret == -ENOMEM) ? ": memory allocation failed"
3333 binder_txn_error("cannot allocate buffer%s", s);
3335 return_error_param = PTR_ERR(t->buffer);
3336 return_error = return_error_param == -ESRCH ?
3337 BR_DEAD_REPLY : BR_FAILED_REPLY;
3338 return_error_line = __LINE__;
3340 goto err_binder_alloc_buf_failed;
3342 if (lsmctx.context) {
3344 size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3345 ALIGN(tr->offsets_size, sizeof(void *)) +
3346 ALIGN(extra_buffers_size, sizeof(void *)) -
3347 ALIGN(lsmctx.len, sizeof(u64));
3349 t->security_ctx = t->buffer->user_data + buf_offset;
3350 err = binder_alloc_copy_to_buffer(&target_proc->alloc,
3351 t->buffer, buf_offset,
3352 lsmctx.context, lsmctx.len);
3354 t->security_ctx = 0;
3357 security_release_secctx(&lsmctx);
3358 lsmctx.context = NULL;
3360 t->buffer->debug_id = t->debug_id;
3361 t->buffer->transaction = t;
3362 t->buffer->target_node = target_node;
3363 t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
3364 trace_binder_transaction_alloc_buf(t->buffer);
3366 if (binder_alloc_copy_user_to_buffer(
3367 &target_proc->alloc,
3369 ALIGN(tr->data_size, sizeof(void *)),
3370 (const void __user *)
3371 (uintptr_t)tr->data.ptr.offsets,
3372 tr->offsets_size)) {
3373 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3374 proc->pid, thread->pid);
3375 return_error = BR_FAILED_REPLY;
3376 return_error_param = -EFAULT;
3377 return_error_line = __LINE__;
3378 goto err_copy_data_failed;
3380 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3381 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3382 proc->pid, thread->pid, (u64)tr->offsets_size);
3383 return_error = BR_FAILED_REPLY;
3384 return_error_param = -EINVAL;
3385 return_error_line = __LINE__;
3386 goto err_bad_offset;
3388 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3389 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3390 proc->pid, thread->pid,
3391 (u64)extra_buffers_size);
3392 return_error = BR_FAILED_REPLY;
3393 return_error_param = -EINVAL;
3394 return_error_line = __LINE__;
3395 goto err_bad_offset;
3397 off_start_offset = ALIGN(tr->data_size, sizeof(void *));
3398 buffer_offset = off_start_offset;
3399 off_end_offset = off_start_offset + tr->offsets_size;
3400 sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
3401 sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
3402 ALIGN(lsmctx.len, sizeof(u64));
3404 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
3405 buffer_offset += sizeof(binder_size_t)) {
3406 struct binder_object_header *hdr;
3408 struct binder_object object;
3409 binder_size_t object_offset;
3410 binder_size_t copy_size;
3412 if (binder_alloc_copy_from_buffer(&target_proc->alloc,
3416 sizeof(object_offset))) {
3417 binder_txn_error("%d:%d copy offset from buffer failed\n",
3418 thread->pid, proc->pid);
3419 return_error = BR_FAILED_REPLY;
3420 return_error_param = -EINVAL;
3421 return_error_line = __LINE__;
3422 goto err_bad_offset;
3426 * Copy the source user buffer up to the next object
3427 * that will be processed.
3429 copy_size = object_offset - user_offset;
3430 if (copy_size && (user_offset > object_offset ||
3431 object_offset > tr->data_size ||
3432 binder_alloc_copy_user_to_buffer(
3433 &target_proc->alloc,
3434 t->buffer, user_offset,
3435 user_buffer + user_offset,
3437 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3438 proc->pid, thread->pid);
3439 return_error = BR_FAILED_REPLY;
3440 return_error_param = -EFAULT;
3441 return_error_line = __LINE__;
3442 goto err_copy_data_failed;
3444 object_size = binder_get_object(target_proc, user_buffer,
3445 t->buffer, object_offset, &object);
3446 if (object_size == 0 || object_offset < off_min) {
3447 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3448 proc->pid, thread->pid,
3451 (u64)t->buffer->data_size);
3452 return_error = BR_FAILED_REPLY;
3453 return_error_param = -EINVAL;
3454 return_error_line = __LINE__;
3455 goto err_bad_offset;
3458 * Set offset to the next buffer fragment to be
3461 user_offset = object_offset + object_size;
3464 off_min = object_offset + object_size;
3465 switch (hdr->type) {
3466 case BINDER_TYPE_BINDER:
3467 case BINDER_TYPE_WEAK_BINDER: {
3468 struct flat_binder_object *fp;
3470 fp = to_flat_binder_object(hdr);
3471 ret = binder_translate_binder(fp, t, thread);
3474 binder_alloc_copy_to_buffer(&target_proc->alloc,
3478 binder_txn_error("%d:%d translate binder failed\n",
3479 thread->pid, proc->pid);
3480 return_error = BR_FAILED_REPLY;
3481 return_error_param = ret;
3482 return_error_line = __LINE__;
3483 goto err_translate_failed;
3486 case BINDER_TYPE_HANDLE:
3487 case BINDER_TYPE_WEAK_HANDLE: {
3488 struct flat_binder_object *fp;
3490 fp = to_flat_binder_object(hdr);
3491 ret = binder_translate_handle(fp, t, thread);
3493 binder_alloc_copy_to_buffer(&target_proc->alloc,
3497 binder_txn_error("%d:%d translate handle failed\n",
3498 thread->pid, proc->pid);
3499 return_error = BR_FAILED_REPLY;
3500 return_error_param = ret;
3501 return_error_line = __LINE__;
3502 goto err_translate_failed;
3506 case BINDER_TYPE_FD: {
3507 struct binder_fd_object *fp = to_binder_fd_object(hdr);
3508 binder_size_t fd_offset = object_offset +
3509 (uintptr_t)&fp->fd - (uintptr_t)fp;
3510 int ret = binder_translate_fd(fp->fd, fd_offset, t,
3511 thread, in_reply_to);
3515 binder_alloc_copy_to_buffer(&target_proc->alloc,
3519 binder_txn_error("%d:%d translate fd failed\n",
3520 thread->pid, proc->pid);
3521 return_error = BR_FAILED_REPLY;
3522 return_error_param = ret;
3523 return_error_line = __LINE__;
3524 goto err_translate_failed;
3527 case BINDER_TYPE_FDA: {
3528 struct binder_object ptr_object;
3529 binder_size_t parent_offset;
3530 struct binder_object user_object;
3531 size_t user_parent_size;
3532 struct binder_fd_array_object *fda =
3533 to_binder_fd_array_object(hdr);
3534 size_t num_valid = (buffer_offset - off_start_offset) /
3535 sizeof(binder_size_t);
3536 struct binder_buffer_object *parent =
3537 binder_validate_ptr(target_proc, t->buffer,
3538 &ptr_object, fda->parent,
3543 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3544 proc->pid, thread->pid);
3545 return_error = BR_FAILED_REPLY;
3546 return_error_param = -EINVAL;
3547 return_error_line = __LINE__;
3548 goto err_bad_parent;
3550 if (!binder_validate_fixup(target_proc, t->buffer,
3555 last_fixup_min_off)) {
3556 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3557 proc->pid, thread->pid);
3558 return_error = BR_FAILED_REPLY;
3559 return_error_param = -EINVAL;
3560 return_error_line = __LINE__;
3561 goto err_bad_parent;
3564 * We need to read the user version of the parent
3565 * object to get the original user offset
3568 binder_get_object(proc, user_buffer, t->buffer,
3569 parent_offset, &user_object);
3570 if (user_parent_size != sizeof(user_object.bbo)) {
3571 binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n",
3572 proc->pid, thread->pid,
3574 sizeof(user_object.bbo));
3575 return_error = BR_FAILED_REPLY;
3576 return_error_param = -EINVAL;
3577 return_error_line = __LINE__;
3578 goto err_bad_parent;
3580 ret = binder_translate_fd_array(&pf_head, fda,
3581 user_buffer, parent,
3582 &user_object.bbo, t,
3583 thread, in_reply_to);
3585 ret = binder_alloc_copy_to_buffer(&target_proc->alloc,
3590 binder_txn_error("%d:%d translate fd array failed\n",
3591 thread->pid, proc->pid);
3592 return_error = BR_FAILED_REPLY;
3593 return_error_param = ret > 0 ? -EINVAL : ret;
3594 return_error_line = __LINE__;
3595 goto err_translate_failed;
3597 last_fixup_obj_off = parent_offset;
3598 last_fixup_min_off =
3599 fda->parent_offset + sizeof(u32) * fda->num_fds;
3601 case BINDER_TYPE_PTR: {
3602 struct binder_buffer_object *bp =
3603 to_binder_buffer_object(hdr);
3604 size_t buf_left = sg_buf_end_offset - sg_buf_offset;
3607 if (bp->length > buf_left) {
3608 binder_user_error("%d:%d got transaction with too large buffer\n",
3609 proc->pid, thread->pid);
3610 return_error = BR_FAILED_REPLY;
3611 return_error_param = -EINVAL;
3612 return_error_line = __LINE__;
3613 goto err_bad_offset;
3615 ret = binder_defer_copy(&sgc_head, sg_buf_offset,
3616 (const void __user *)(uintptr_t)bp->buffer,
3619 binder_txn_error("%d:%d deferred copy failed\n",
3620 thread->pid, proc->pid);
3621 return_error = BR_FAILED_REPLY;
3622 return_error_param = ret;
3623 return_error_line = __LINE__;
3624 goto err_translate_failed;
3626 /* Fixup buffer pointer to target proc address space */
3627 bp->buffer = t->buffer->user_data + sg_buf_offset;
3628 sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3630 num_valid = (buffer_offset - off_start_offset) /
3631 sizeof(binder_size_t);
3632 ret = binder_fixup_parent(&pf_head, t,
3637 last_fixup_min_off);
3639 binder_alloc_copy_to_buffer(&target_proc->alloc,
3643 binder_txn_error("%d:%d failed to fixup parent\n",
3644 thread->pid, proc->pid);
3645 return_error = BR_FAILED_REPLY;
3646 return_error_param = ret;
3647 return_error_line = __LINE__;
3648 goto err_translate_failed;
3650 last_fixup_obj_off = object_offset;
3651 last_fixup_min_off = 0;
3654 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3655 proc->pid, thread->pid, hdr->type);
3656 return_error = BR_FAILED_REPLY;
3657 return_error_param = -EINVAL;
3658 return_error_line = __LINE__;
3659 goto err_bad_object_type;
3662 /* Done processing objects, copy the rest of the buffer */
3663 if (binder_alloc_copy_user_to_buffer(
3664 &target_proc->alloc,
3665 t->buffer, user_offset,
3666 user_buffer + user_offset,
3667 tr->data_size - user_offset)) {
3668 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3669 proc->pid, thread->pid);
3670 return_error = BR_FAILED_REPLY;
3671 return_error_param = -EFAULT;
3672 return_error_line = __LINE__;
3673 goto err_copy_data_failed;
3676 ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer,
3677 &sgc_head, &pf_head);
3679 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3680 proc->pid, thread->pid);
3681 return_error = BR_FAILED_REPLY;
3682 return_error_param = ret;
3683 return_error_line = __LINE__;
3684 goto err_copy_data_failed;
3686 if (t->buffer->oneway_spam_suspect)
3687 tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
3689 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3690 t->work.type = BINDER_WORK_TRANSACTION;
3693 binder_enqueue_thread_work(thread, tcomplete);
3694 binder_inner_proc_lock(target_proc);
3695 if (target_thread->is_dead) {
3696 return_error = BR_DEAD_REPLY;
3697 binder_inner_proc_unlock(target_proc);
3698 goto err_dead_proc_or_thread;
3700 BUG_ON(t->buffer->async_transaction != 0);
3701 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3702 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3703 target_proc->outstanding_txns++;
3704 binder_inner_proc_unlock(target_proc);
3705 wake_up_interruptible_sync(&target_thread->wait);
3706 binder_free_transaction(in_reply_to);
3707 } else if (!(t->flags & TF_ONE_WAY)) {
3708 BUG_ON(t->buffer->async_transaction != 0);
3709 binder_inner_proc_lock(proc);
3711 * Defer the TRANSACTION_COMPLETE, so we don't return to
3712 * userspace immediately; this allows the target process to
3713 * immediately start processing this transaction, reducing
3714 * latency. We will then return the TRANSACTION_COMPLETE when
3715 * the target replies (or there is an error).
3717 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3719 t->from_parent = thread->transaction_stack;
3720 thread->transaction_stack = t;
3721 binder_inner_proc_unlock(proc);
3722 return_error = binder_proc_transaction(t,
3723 target_proc, target_thread);
3725 binder_inner_proc_lock(proc);
3726 binder_pop_transaction_ilocked(thread, t);
3727 binder_inner_proc_unlock(proc);
3728 goto err_dead_proc_or_thread;
3731 BUG_ON(target_node == NULL);
3732 BUG_ON(t->buffer->async_transaction != 1);
3733 return_error = binder_proc_transaction(t, target_proc, NULL);
3735 * Let the caller know when async transaction reaches a frozen
3736 * process and is put in a pending queue, waiting for the target
3737 * process to be unfrozen.
3739 if (return_error == BR_TRANSACTION_PENDING_FROZEN)
3740 tcomplete->type = BINDER_WORK_TRANSACTION_PENDING;
3741 binder_enqueue_thread_work(thread, tcomplete);
3743 return_error != BR_TRANSACTION_PENDING_FROZEN)
3744 goto err_dead_proc_or_thread;
3747 binder_thread_dec_tmpref(target_thread);
3748 binder_proc_dec_tmpref(target_proc);
3750 binder_dec_node_tmpref(target_node);
3752 * write barrier to synchronize with initialization
3756 WRITE_ONCE(e->debug_id_done, t_debug_id);
3759 err_dead_proc_or_thread:
3760 binder_txn_error("%d:%d dead process or thread\n",
3761 thread->pid, proc->pid);
3762 return_error_line = __LINE__;
3763 binder_dequeue_work(proc, tcomplete);
3764 err_translate_failed:
3765 err_bad_object_type:
3768 err_copy_data_failed:
3769 binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head);
3770 binder_free_txn_fixups(t);
3771 trace_binder_transaction_failed_buffer_release(t->buffer);
3772 binder_transaction_buffer_release(target_proc, NULL, t->buffer,
3773 buffer_offset, true);
3775 binder_dec_node_tmpref(target_node);
3777 t->buffer->transaction = NULL;
3778 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3779 err_binder_alloc_buf_failed:
3782 security_release_secctx(&lsmctx);
3783 err_get_secctx_failed:
3785 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3786 err_alloc_tcomplete_failed:
3787 if (trace_binder_txn_latency_free_enabled())
3788 binder_txn_latency_free(t);
3790 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3794 err_empty_call_stack:
3796 err_invalid_target_handle:
3798 binder_dec_node(target_node, 1, 0);
3799 binder_dec_node_tmpref(target_node);
3802 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3803 "%d:%d transaction %s to %d:%d failed %d/%d/%d, size %lld-%lld line %d\n",
3804 proc->pid, thread->pid, reply ? "reply" :
3805 (tr->flags & TF_ONE_WAY ? "async" : "call"),
3806 target_proc ? target_proc->pid : 0,
3807 target_thread ? target_thread->pid : 0,
3808 t_debug_id, return_error, return_error_param,
3809 (u64)tr->data_size, (u64)tr->offsets_size,
3813 binder_thread_dec_tmpref(target_thread);
3815 binder_proc_dec_tmpref(target_proc);
3818 struct binder_transaction_log_entry *fe;
3820 e->return_error = return_error;
3821 e->return_error_param = return_error_param;
3822 e->return_error_line = return_error_line;
3823 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3826 * write barrier to synchronize with initialization
3830 WRITE_ONCE(e->debug_id_done, t_debug_id);
3831 WRITE_ONCE(fe->debug_id_done, t_debug_id);
3834 BUG_ON(thread->return_error.cmd != BR_OK);
3836 binder_set_txn_from_error(in_reply_to, t_debug_id,
3837 return_error, return_error_param);
3838 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3839 binder_enqueue_thread_work(thread, &thread->return_error.work);
3840 binder_send_failed_reply(in_reply_to, return_error);
3842 binder_inner_proc_lock(proc);
3843 binder_set_extended_error(&thread->ee, t_debug_id,
3844 return_error, return_error_param);
3845 binder_inner_proc_unlock(proc);
3846 thread->return_error.cmd = return_error;
3847 binder_enqueue_thread_work(thread, &thread->return_error.work);
3852 binder_request_freeze_notification(struct binder_proc *proc,
3853 struct binder_thread *thread,
3854 struct binder_handle_cookie *handle_cookie)
3856 struct binder_ref_freeze *freeze;
3857 struct binder_ref *ref;
3859 freeze = kzalloc(sizeof(*freeze), GFP_KERNEL);
3862 binder_proc_lock(proc);
3863 ref = binder_get_ref_olocked(proc, handle_cookie->handle, false);
3865 binder_user_error("%d:%d BC_REQUEST_FREEZE_NOTIFICATION invalid ref %d\n",
3866 proc->pid, thread->pid, handle_cookie->handle);
3867 binder_proc_unlock(proc);
3872 binder_node_lock(ref->node);
3874 binder_user_error("%d:%d BC_REQUEST_FREEZE_NOTIFICATION already set\n",
3875 proc->pid, thread->pid);
3876 binder_node_unlock(ref->node);
3877 binder_proc_unlock(proc);
3882 binder_stats_created(BINDER_STAT_FREEZE);
3883 INIT_LIST_HEAD(&freeze->work.entry);
3884 freeze->cookie = handle_cookie->cookie;
3885 freeze->work.type = BINDER_WORK_FROZEN_BINDER;
3886 ref->freeze = freeze;
3888 if (ref->node->proc) {
3889 binder_inner_proc_lock(ref->node->proc);
3890 freeze->is_frozen = ref->node->proc->is_frozen;
3891 binder_inner_proc_unlock(ref->node->proc);
3893 binder_inner_proc_lock(proc);
3894 binder_enqueue_work_ilocked(&freeze->work, &proc->todo);
3895 binder_wakeup_proc_ilocked(proc);
3896 binder_inner_proc_unlock(proc);
3899 binder_node_unlock(ref->node);
3900 binder_proc_unlock(proc);
3905 binder_clear_freeze_notification(struct binder_proc *proc,
3906 struct binder_thread *thread,
3907 struct binder_handle_cookie *handle_cookie)
3909 struct binder_ref_freeze *freeze;
3910 struct binder_ref *ref;
3912 binder_proc_lock(proc);
3913 ref = binder_get_ref_olocked(proc, handle_cookie->handle, false);
3915 binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION invalid ref %d\n",
3916 proc->pid, thread->pid, handle_cookie->handle);
3917 binder_proc_unlock(proc);
3921 binder_node_lock(ref->node);
3924 binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION freeze notification not active\n",
3925 proc->pid, thread->pid);
3926 binder_node_unlock(ref->node);
3927 binder_proc_unlock(proc);
3930 freeze = ref->freeze;
3931 binder_inner_proc_lock(proc);
3932 if (freeze->cookie != handle_cookie->cookie) {
3933 binder_user_error("%d:%d BC_CLEAR_FREEZE_NOTIFICATION freeze notification cookie mismatch %016llx != %016llx\n",
3934 proc->pid, thread->pid, (u64)freeze->cookie,
3935 (u64)handle_cookie->cookie);
3936 binder_inner_proc_unlock(proc);
3937 binder_node_unlock(ref->node);
3938 binder_proc_unlock(proc);
3943 * Take the existing freeze object and overwrite its work type. There are three cases here:
3944 * 1. No pending notification. In this case just add the work to the queue.
3945 * 2. A notification was sent and is pending an ack from userspace. Once an ack arrives, we
3946 * should resend with the new work type.
3947 * 3. A notification is pending to be sent. Since the work is already in the queue, nothing
3948 * needs to be done here.
3950 freeze->work.type = BINDER_WORK_CLEAR_FREEZE_NOTIFICATION;
3951 if (list_empty(&freeze->work.entry)) {
3952 binder_enqueue_work_ilocked(&freeze->work, &proc->todo);
3953 binder_wakeup_proc_ilocked(proc);
3954 } else if (freeze->sent) {
3955 freeze->resend = true;
3957 binder_inner_proc_unlock(proc);
3958 binder_node_unlock(ref->node);
3959 binder_proc_unlock(proc);
3964 binder_freeze_notification_done(struct binder_proc *proc,
3965 struct binder_thread *thread,
3966 binder_uintptr_t cookie)
3968 struct binder_ref_freeze *freeze = NULL;
3969 struct binder_work *w;
3971 binder_inner_proc_lock(proc);
3972 list_for_each_entry(w, &proc->delivered_freeze, entry) {
3973 struct binder_ref_freeze *tmp_freeze =
3974 container_of(w, struct binder_ref_freeze, work);
3976 if (tmp_freeze->cookie == cookie) {
3977 freeze = tmp_freeze;
3982 binder_user_error("%d:%d BC_FREEZE_NOTIFICATION_DONE %016llx not found\n",
3983 proc->pid, thread->pid, (u64)cookie);
3984 binder_inner_proc_unlock(proc);
3987 binder_dequeue_work_ilocked(&freeze->work);
3988 freeze->sent = false;
3989 if (freeze->resend) {
3990 freeze->resend = false;
3991 binder_enqueue_work_ilocked(&freeze->work, &proc->todo);
3992 binder_wakeup_proc_ilocked(proc);
3994 binder_inner_proc_unlock(proc);
3999 * binder_free_buf() - free the specified buffer
4000 * @proc: binder proc that owns buffer
4001 * @buffer: buffer to be freed
4002 * @is_failure: failed to send transaction
4004 * If buffer for an async transaction, enqueue the next async
4005 * transaction from the node.
4007 * Cleanup buffer and free it.
4010 binder_free_buf(struct binder_proc *proc,
4011 struct binder_thread *thread,
4012 struct binder_buffer *buffer, bool is_failure)
4014 binder_inner_proc_lock(proc);
4015 if (buffer->transaction) {
4016 buffer->transaction->buffer = NULL;
4017 buffer->transaction = NULL;
4019 binder_inner_proc_unlock(proc);
4020 if (buffer->async_transaction && buffer->target_node) {
4021 struct binder_node *buf_node;
4022 struct binder_work *w;
4024 buf_node = buffer->target_node;
4025 binder_node_inner_lock(buf_node);
4026 BUG_ON(!buf_node->has_async_transaction);
4027 BUG_ON(buf_node->proc != proc);
4028 w = binder_dequeue_work_head_ilocked(
4029 &buf_node->async_todo);
4031 buf_node->has_async_transaction = false;
4033 binder_enqueue_work_ilocked(
4035 binder_wakeup_proc_ilocked(proc);
4037 binder_node_inner_unlock(buf_node);
4039 trace_binder_transaction_buffer_release(buffer);
4040 binder_release_entire_buffer(proc, thread, buffer, is_failure);
4041 binder_alloc_free_buf(&proc->alloc, buffer);
4044 static int binder_thread_write(struct binder_proc *proc,
4045 struct binder_thread *thread,
4046 binder_uintptr_t binder_buffer, size_t size,
4047 binder_size_t *consumed)
4050 struct binder_context *context = proc->context;
4051 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4052 void __user *ptr = buffer + *consumed;
4053 void __user *end = buffer + size;
4055 while (ptr < end && thread->return_error.cmd == BR_OK) {
4058 if (get_user(cmd, (uint32_t __user *)ptr))
4060 ptr += sizeof(uint32_t);
4061 trace_binder_command(cmd);
4062 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
4063 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
4064 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
4065 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
4073 const char *debug_string;
4074 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
4075 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
4076 struct binder_ref_data rdata;
4078 if (get_user(target, (uint32_t __user *)ptr))
4081 ptr += sizeof(uint32_t);
4083 if (increment && !target) {
4084 struct binder_node *ctx_mgr_node;
4086 mutex_lock(&context->context_mgr_node_lock);
4087 ctx_mgr_node = context->binder_context_mgr_node;
4089 if (ctx_mgr_node->proc == proc) {
4090 binder_user_error("%d:%d context manager tried to acquire desc 0\n",
4091 proc->pid, thread->pid);
4092 mutex_unlock(&context->context_mgr_node_lock);
4095 ret = binder_inc_ref_for_node(
4097 strong, NULL, &rdata);
4099 mutex_unlock(&context->context_mgr_node_lock);
4102 ret = binder_update_ref_for_handle(
4103 proc, target, increment, strong,
4105 if (!ret && rdata.desc != target) {
4106 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
4107 proc->pid, thread->pid,
4108 target, rdata.desc);
4112 debug_string = "IncRefs";
4115 debug_string = "Acquire";
4118 debug_string = "Release";
4122 debug_string = "DecRefs";
4126 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
4127 proc->pid, thread->pid, debug_string,
4128 strong, target, ret);
4131 binder_debug(BINDER_DEBUG_USER_REFS,
4132 "%d:%d %s ref %d desc %d s %d w %d\n",
4133 proc->pid, thread->pid, debug_string,
4134 rdata.debug_id, rdata.desc, rdata.strong,
4138 case BC_INCREFS_DONE:
4139 case BC_ACQUIRE_DONE: {
4140 binder_uintptr_t node_ptr;
4141 binder_uintptr_t cookie;
4142 struct binder_node *node;
4145 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
4147 ptr += sizeof(binder_uintptr_t);
4148 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4150 ptr += sizeof(binder_uintptr_t);
4151 node = binder_get_node(proc, node_ptr);
4153 binder_user_error("%d:%d %s u%016llx no match\n",
4154 proc->pid, thread->pid,
4155 cmd == BC_INCREFS_DONE ?
4161 if (cookie != node->cookie) {
4162 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
4163 proc->pid, thread->pid,
4164 cmd == BC_INCREFS_DONE ?
4165 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
4166 (u64)node_ptr, node->debug_id,
4167 (u64)cookie, (u64)node->cookie);
4168 binder_put_node(node);
4171 binder_node_inner_lock(node);
4172 if (cmd == BC_ACQUIRE_DONE) {
4173 if (node->pending_strong_ref == 0) {
4174 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
4175 proc->pid, thread->pid,
4177 binder_node_inner_unlock(node);
4178 binder_put_node(node);
4181 node->pending_strong_ref = 0;
4183 if (node->pending_weak_ref == 0) {
4184 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
4185 proc->pid, thread->pid,
4187 binder_node_inner_unlock(node);
4188 binder_put_node(node);
4191 node->pending_weak_ref = 0;
4193 free_node = binder_dec_node_nilocked(node,
4194 cmd == BC_ACQUIRE_DONE, 0);
4196 binder_debug(BINDER_DEBUG_USER_REFS,
4197 "%d:%d %s node %d ls %d lw %d tr %d\n",
4198 proc->pid, thread->pid,
4199 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
4200 node->debug_id, node->local_strong_refs,
4201 node->local_weak_refs, node->tmp_refs);
4202 binder_node_inner_unlock(node);
4203 binder_put_node(node);
4206 case BC_ATTEMPT_ACQUIRE:
4207 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
4209 case BC_ACQUIRE_RESULT:
4210 pr_err("BC_ACQUIRE_RESULT not supported\n");
4213 case BC_FREE_BUFFER: {
4214 binder_uintptr_t data_ptr;
4215 struct binder_buffer *buffer;
4217 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
4219 ptr += sizeof(binder_uintptr_t);
4221 buffer = binder_alloc_prepare_to_free(&proc->alloc,
4223 if (IS_ERR_OR_NULL(buffer)) {
4224 if (PTR_ERR(buffer) == -EPERM) {
4226 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
4227 proc->pid, thread->pid,
4231 "%d:%d BC_FREE_BUFFER u%016llx no match\n",
4232 proc->pid, thread->pid,
4237 binder_debug(BINDER_DEBUG_FREE_BUFFER,
4238 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
4239 proc->pid, thread->pid, (u64)data_ptr,
4241 buffer->transaction ? "active" : "finished");
4242 binder_free_buf(proc, thread, buffer, false);
4246 case BC_TRANSACTION_SG:
4248 struct binder_transaction_data_sg tr;
4250 if (copy_from_user(&tr, ptr, sizeof(tr)))
4253 binder_transaction(proc, thread, &tr.transaction_data,
4254 cmd == BC_REPLY_SG, tr.buffers_size);
4257 case BC_TRANSACTION:
4259 struct binder_transaction_data tr;
4261 if (copy_from_user(&tr, ptr, sizeof(tr)))
4264 binder_transaction(proc, thread, &tr,
4265 cmd == BC_REPLY, 0);
4269 case BC_REGISTER_LOOPER:
4270 binder_debug(BINDER_DEBUG_THREADS,
4271 "%d:%d BC_REGISTER_LOOPER\n",
4272 proc->pid, thread->pid);
4273 binder_inner_proc_lock(proc);
4274 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
4275 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4276 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
4277 proc->pid, thread->pid);
4278 } else if (proc->requested_threads == 0) {
4279 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4280 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
4281 proc->pid, thread->pid);
4283 proc->requested_threads--;
4284 proc->requested_threads_started++;
4286 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
4287 binder_inner_proc_unlock(proc);
4289 case BC_ENTER_LOOPER:
4290 binder_debug(BINDER_DEBUG_THREADS,
4291 "%d:%d BC_ENTER_LOOPER\n",
4292 proc->pid, thread->pid);
4293 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
4294 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4295 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
4296 proc->pid, thread->pid);
4298 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
4300 case BC_EXIT_LOOPER:
4301 binder_debug(BINDER_DEBUG_THREADS,
4302 "%d:%d BC_EXIT_LOOPER\n",
4303 proc->pid, thread->pid);
4304 thread->looper |= BINDER_LOOPER_STATE_EXITED;
4307 case BC_REQUEST_DEATH_NOTIFICATION:
4308 case BC_CLEAR_DEATH_NOTIFICATION: {
4310 binder_uintptr_t cookie;
4311 struct binder_ref *ref;
4312 struct binder_ref_death *death = NULL;
4314 if (get_user(target, (uint32_t __user *)ptr))
4316 ptr += sizeof(uint32_t);
4317 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4319 ptr += sizeof(binder_uintptr_t);
4320 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4322 * Allocate memory for death notification
4323 * before taking lock
4325 death = kzalloc(sizeof(*death), GFP_KERNEL);
4326 if (death == NULL) {
4327 WARN_ON(thread->return_error.cmd !=
4329 thread->return_error.cmd = BR_ERROR;
4330 binder_enqueue_thread_work(
4332 &thread->return_error.work);
4334 BINDER_DEBUG_FAILED_TRANSACTION,
4335 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
4336 proc->pid, thread->pid);
4340 binder_proc_lock(proc);
4341 ref = binder_get_ref_olocked(proc, target, false);
4343 binder_user_error("%d:%d %s invalid ref %d\n",
4344 proc->pid, thread->pid,
4345 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4346 "BC_REQUEST_DEATH_NOTIFICATION" :
4347 "BC_CLEAR_DEATH_NOTIFICATION",
4349 binder_proc_unlock(proc);
4354 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4355 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
4356 proc->pid, thread->pid,
4357 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4358 "BC_REQUEST_DEATH_NOTIFICATION" :
4359 "BC_CLEAR_DEATH_NOTIFICATION",
4360 (u64)cookie, ref->data.debug_id,
4361 ref->data.desc, ref->data.strong,
4362 ref->data.weak, ref->node->debug_id);
4364 binder_node_lock(ref->node);
4365 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4367 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
4368 proc->pid, thread->pid);
4369 binder_node_unlock(ref->node);
4370 binder_proc_unlock(proc);
4374 binder_stats_created(BINDER_STAT_DEATH);
4375 INIT_LIST_HEAD(&death->work.entry);
4376 death->cookie = cookie;
4378 if (ref->node->proc == NULL) {
4379 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
4381 binder_inner_proc_lock(proc);
4382 binder_enqueue_work_ilocked(
4383 &ref->death->work, &proc->todo);
4384 binder_wakeup_proc_ilocked(proc);
4385 binder_inner_proc_unlock(proc);
4388 if (ref->death == NULL) {
4389 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
4390 proc->pid, thread->pid);
4391 binder_node_unlock(ref->node);
4392 binder_proc_unlock(proc);
4396 if (death->cookie != cookie) {
4397 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
4398 proc->pid, thread->pid,
4401 binder_node_unlock(ref->node);
4402 binder_proc_unlock(proc);
4406 binder_inner_proc_lock(proc);
4407 if (list_empty(&death->work.entry)) {
4408 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4409 if (thread->looper &
4410 (BINDER_LOOPER_STATE_REGISTERED |
4411 BINDER_LOOPER_STATE_ENTERED))
4412 binder_enqueue_thread_work_ilocked(
4416 binder_enqueue_work_ilocked(
4419 binder_wakeup_proc_ilocked(
4423 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
4424 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
4426 binder_inner_proc_unlock(proc);
4428 binder_node_unlock(ref->node);
4429 binder_proc_unlock(proc);
4431 case BC_DEAD_BINDER_DONE: {
4432 struct binder_work *w;
4433 binder_uintptr_t cookie;
4434 struct binder_ref_death *death = NULL;
4436 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4439 ptr += sizeof(cookie);
4440 binder_inner_proc_lock(proc);
4441 list_for_each_entry(w, &proc->delivered_death,
4443 struct binder_ref_death *tmp_death =
4445 struct binder_ref_death,
4448 if (tmp_death->cookie == cookie) {
4453 binder_debug(BINDER_DEBUG_DEAD_BINDER,
4454 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
4455 proc->pid, thread->pid, (u64)cookie,
4457 if (death == NULL) {
4458 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
4459 proc->pid, thread->pid, (u64)cookie);
4460 binder_inner_proc_unlock(proc);
4463 binder_dequeue_work_ilocked(&death->work);
4464 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
4465 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4466 if (thread->looper &
4467 (BINDER_LOOPER_STATE_REGISTERED |
4468 BINDER_LOOPER_STATE_ENTERED))
4469 binder_enqueue_thread_work_ilocked(
4470 thread, &death->work);
4472 binder_enqueue_work_ilocked(
4475 binder_wakeup_proc_ilocked(proc);
4478 binder_inner_proc_unlock(proc);
4481 case BC_REQUEST_FREEZE_NOTIFICATION: {
4482 struct binder_handle_cookie handle_cookie;
4485 if (copy_from_user(&handle_cookie, ptr, sizeof(handle_cookie)))
4487 ptr += sizeof(handle_cookie);
4488 error = binder_request_freeze_notification(proc, thread,
4494 case BC_CLEAR_FREEZE_NOTIFICATION: {
4495 struct binder_handle_cookie handle_cookie;
4498 if (copy_from_user(&handle_cookie, ptr, sizeof(handle_cookie)))
4500 ptr += sizeof(handle_cookie);
4501 error = binder_clear_freeze_notification(proc, thread, &handle_cookie);
4506 case BC_FREEZE_NOTIFICATION_DONE: {
4507 binder_uintptr_t cookie;
4510 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4513 ptr += sizeof(cookie);
4514 error = binder_freeze_notification_done(proc, thread, cookie);
4520 pr_err("%d:%d unknown command %u\n",
4521 proc->pid, thread->pid, cmd);
4524 *consumed = ptr - buffer;
4529 static void binder_stat_br(struct binder_proc *proc,
4530 struct binder_thread *thread, uint32_t cmd)
4532 trace_binder_return(cmd);
4533 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
4534 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
4535 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
4536 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
4540 static int binder_put_node_cmd(struct binder_proc *proc,
4541 struct binder_thread *thread,
4543 binder_uintptr_t node_ptr,
4544 binder_uintptr_t node_cookie,
4546 uint32_t cmd, const char *cmd_name)
4548 void __user *ptr = *ptrp;
4550 if (put_user(cmd, (uint32_t __user *)ptr))
4552 ptr += sizeof(uint32_t);
4554 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
4556 ptr += sizeof(binder_uintptr_t);
4558 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
4560 ptr += sizeof(binder_uintptr_t);
4562 binder_stat_br(proc, thread, cmd);
4563 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
4564 proc->pid, thread->pid, cmd_name, node_debug_id,
4565 (u64)node_ptr, (u64)node_cookie);
4571 static int binder_wait_for_work(struct binder_thread *thread,
4575 struct binder_proc *proc = thread->proc;
4578 binder_inner_proc_lock(proc);
4580 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE|TASK_FREEZABLE);
4581 if (binder_has_work_ilocked(thread, do_proc_work))
4584 list_add(&thread->waiting_thread_node,
4585 &proc->waiting_threads);
4586 binder_inner_proc_unlock(proc);
4588 binder_inner_proc_lock(proc);
4589 list_del_init(&thread->waiting_thread_node);
4590 if (signal_pending(current)) {
4595 finish_wait(&thread->wait, &wait);
4596 binder_inner_proc_unlock(proc);
4602 * binder_apply_fd_fixups() - finish fd translation
4603 * @proc: binder_proc associated @t->buffer
4604 * @t: binder transaction with list of fd fixups
4606 * Now that we are in the context of the transaction target
4607 * process, we can allocate and install fds. Process the
4608 * list of fds to translate and fixup the buffer with the
4609 * new fds first and only then install the files.
4611 * If we fail to allocate an fd, skip the install and release
4612 * any fds that have already been allocated.
4614 static int binder_apply_fd_fixups(struct binder_proc *proc,
4615 struct binder_transaction *t)
4617 struct binder_txn_fd_fixup *fixup, *tmp;
4620 list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
4621 int fd = get_unused_fd_flags(O_CLOEXEC);
4624 binder_debug(BINDER_DEBUG_TRANSACTION,
4625 "failed fd fixup txn %d fd %d\n",
4630 binder_debug(BINDER_DEBUG_TRANSACTION,
4631 "fd fixup txn %d fd %d\n",
4633 trace_binder_transaction_fd_recv(t, fd, fixup->offset);
4634 fixup->target_fd = fd;
4635 if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
4642 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
4643 fd_install(fixup->target_fd, fixup->file);
4644 list_del(&fixup->fixup_entry);
4651 binder_free_txn_fixups(t);
4655 static int binder_thread_read(struct binder_proc *proc,
4656 struct binder_thread *thread,
4657 binder_uintptr_t binder_buffer, size_t size,
4658 binder_size_t *consumed, int non_block)
4660 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4661 void __user *ptr = buffer + *consumed;
4662 void __user *end = buffer + size;
4665 int wait_for_proc_work;
4667 if (*consumed == 0) {
4668 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4670 ptr += sizeof(uint32_t);
4674 binder_inner_proc_lock(proc);
4675 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4676 binder_inner_proc_unlock(proc);
4678 thread->looper |= BINDER_LOOPER_STATE_WAITING;
4680 trace_binder_wait_for_work(wait_for_proc_work,
4681 !!thread->transaction_stack,
4682 !binder_worklist_empty(proc, &thread->todo));
4683 if (wait_for_proc_work) {
4684 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4685 BINDER_LOOPER_STATE_ENTERED))) {
4686 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4687 proc->pid, thread->pid, thread->looper);
4688 wait_event_interruptible(binder_user_error_wait,
4689 binder_stop_on_user_error < 2);
4691 binder_set_nice(proc->default_priority);
4695 if (!binder_has_work(thread, wait_for_proc_work))
4698 ret = binder_wait_for_work(thread, wait_for_proc_work);
4701 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4708 struct binder_transaction_data_secctx tr;
4709 struct binder_transaction_data *trd = &tr.transaction_data;
4710 struct binder_work *w = NULL;
4711 struct list_head *list = NULL;
4712 struct binder_transaction *t = NULL;
4713 struct binder_thread *t_from;
4714 size_t trsize = sizeof(*trd);
4716 binder_inner_proc_lock(proc);
4717 if (!binder_worklist_empty_ilocked(&thread->todo))
4718 list = &thread->todo;
4719 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4723 binder_inner_proc_unlock(proc);
4726 if (ptr - buffer == 4 && !thread->looper_need_return)
4731 if (end - ptr < sizeof(tr) + 4) {
4732 binder_inner_proc_unlock(proc);
4735 w = binder_dequeue_work_head_ilocked(list);
4736 if (binder_worklist_empty_ilocked(&thread->todo))
4737 thread->process_todo = false;
4740 case BINDER_WORK_TRANSACTION: {
4741 binder_inner_proc_unlock(proc);
4742 t = container_of(w, struct binder_transaction, work);
4744 case BINDER_WORK_RETURN_ERROR: {
4745 struct binder_error *e = container_of(
4746 w, struct binder_error, work);
4748 WARN_ON(e->cmd == BR_OK);
4749 binder_inner_proc_unlock(proc);
4750 if (put_user(e->cmd, (uint32_t __user *)ptr))
4754 ptr += sizeof(uint32_t);
4756 binder_stat_br(proc, thread, cmd);
4758 case BINDER_WORK_TRANSACTION_COMPLETE:
4759 case BINDER_WORK_TRANSACTION_PENDING:
4760 case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: {
4761 if (proc->oneway_spam_detection_enabled &&
4762 w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT)
4763 cmd = BR_ONEWAY_SPAM_SUSPECT;
4764 else if (w->type == BINDER_WORK_TRANSACTION_PENDING)
4765 cmd = BR_TRANSACTION_PENDING_FROZEN;
4767 cmd = BR_TRANSACTION_COMPLETE;
4768 binder_inner_proc_unlock(proc);
4770 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4771 if (put_user(cmd, (uint32_t __user *)ptr))
4773 ptr += sizeof(uint32_t);
4775 binder_stat_br(proc, thread, cmd);
4776 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4777 "%d:%d BR_TRANSACTION_COMPLETE\n",
4778 proc->pid, thread->pid);
4780 case BINDER_WORK_NODE: {
4781 struct binder_node *node = container_of(w, struct binder_node, work);
4783 binder_uintptr_t node_ptr = node->ptr;
4784 binder_uintptr_t node_cookie = node->cookie;
4785 int node_debug_id = node->debug_id;
4788 void __user *orig_ptr = ptr;
4790 BUG_ON(proc != node->proc);
4791 strong = node->internal_strong_refs ||
4792 node->local_strong_refs;
4793 weak = !hlist_empty(&node->refs) ||
4794 node->local_weak_refs ||
4795 node->tmp_refs || strong;
4796 has_strong_ref = node->has_strong_ref;
4797 has_weak_ref = node->has_weak_ref;
4799 if (weak && !has_weak_ref) {
4800 node->has_weak_ref = 1;
4801 node->pending_weak_ref = 1;
4802 node->local_weak_refs++;
4804 if (strong && !has_strong_ref) {
4805 node->has_strong_ref = 1;
4806 node->pending_strong_ref = 1;
4807 node->local_strong_refs++;
4809 if (!strong && has_strong_ref)
4810 node->has_strong_ref = 0;
4811 if (!weak && has_weak_ref)
4812 node->has_weak_ref = 0;
4813 if (!weak && !strong) {
4814 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4815 "%d:%d node %d u%016llx c%016llx deleted\n",
4816 proc->pid, thread->pid,
4820 rb_erase(&node->rb_node, &proc->nodes);
4821 binder_inner_proc_unlock(proc);
4822 binder_node_lock(node);
4824 * Acquire the node lock before freeing the
4825 * node to serialize with other threads that
4826 * may have been holding the node lock while
4827 * decrementing this node (avoids race where
4828 * this thread frees while the other thread
4829 * is unlocking the node after the final
4832 binder_node_unlock(node);
4833 binder_free_node(node);
4835 binder_inner_proc_unlock(proc);
4837 if (weak && !has_weak_ref)
4838 ret = binder_put_node_cmd(
4839 proc, thread, &ptr, node_ptr,
4840 node_cookie, node_debug_id,
4841 BR_INCREFS, "BR_INCREFS");
4842 if (!ret && strong && !has_strong_ref)
4843 ret = binder_put_node_cmd(
4844 proc, thread, &ptr, node_ptr,
4845 node_cookie, node_debug_id,
4846 BR_ACQUIRE, "BR_ACQUIRE");
4847 if (!ret && !strong && has_strong_ref)
4848 ret = binder_put_node_cmd(
4849 proc, thread, &ptr, node_ptr,
4850 node_cookie, node_debug_id,
4851 BR_RELEASE, "BR_RELEASE");
4852 if (!ret && !weak && has_weak_ref)
4853 ret = binder_put_node_cmd(
4854 proc, thread, &ptr, node_ptr,
4855 node_cookie, node_debug_id,
4856 BR_DECREFS, "BR_DECREFS");
4857 if (orig_ptr == ptr)
4858 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4859 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4860 proc->pid, thread->pid,
4867 case BINDER_WORK_DEAD_BINDER:
4868 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4869 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4870 struct binder_ref_death *death;
4872 binder_uintptr_t cookie;
4874 death = container_of(w, struct binder_ref_death, work);
4875 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4876 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4878 cmd = BR_DEAD_BINDER;
4879 cookie = death->cookie;
4881 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4882 "%d:%d %s %016llx\n",
4883 proc->pid, thread->pid,
4884 cmd == BR_DEAD_BINDER ?
4886 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4888 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4889 binder_inner_proc_unlock(proc);
4891 binder_stats_deleted(BINDER_STAT_DEATH);
4893 binder_enqueue_work_ilocked(
4894 w, &proc->delivered_death);
4895 binder_inner_proc_unlock(proc);
4897 if (put_user(cmd, (uint32_t __user *)ptr))
4899 ptr += sizeof(uint32_t);
4900 if (put_user(cookie,
4901 (binder_uintptr_t __user *)ptr))
4903 ptr += sizeof(binder_uintptr_t);
4904 binder_stat_br(proc, thread, cmd);
4905 if (cmd == BR_DEAD_BINDER)
4906 goto done; /* DEAD_BINDER notifications can cause transactions */
4909 case BINDER_WORK_FROZEN_BINDER: {
4910 struct binder_ref_freeze *freeze;
4911 struct binder_frozen_state_info info;
4913 memset(&info, 0, sizeof(info));
4914 freeze = container_of(w, struct binder_ref_freeze, work);
4915 info.is_frozen = freeze->is_frozen;
4916 info.cookie = freeze->cookie;
4917 freeze->sent = true;
4918 binder_enqueue_work_ilocked(w, &proc->delivered_freeze);
4919 binder_inner_proc_unlock(proc);
4921 if (put_user(BR_FROZEN_BINDER, (uint32_t __user *)ptr))
4923 ptr += sizeof(uint32_t);
4924 if (copy_to_user(ptr, &info, sizeof(info)))
4926 ptr += sizeof(info);
4927 binder_stat_br(proc, thread, BR_FROZEN_BINDER);
4928 goto done; /* BR_FROZEN_BINDER notifications can cause transactions */
4931 case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION: {
4932 struct binder_ref_freeze *freeze =
4933 container_of(w, struct binder_ref_freeze, work);
4934 binder_uintptr_t cookie = freeze->cookie;
4936 binder_inner_proc_unlock(proc);
4938 binder_stats_deleted(BINDER_STAT_FREEZE);
4939 if (put_user(BR_CLEAR_FREEZE_NOTIFICATION_DONE, (uint32_t __user *)ptr))
4941 ptr += sizeof(uint32_t);
4942 if (put_user(cookie, (binder_uintptr_t __user *)ptr))
4944 ptr += sizeof(binder_uintptr_t);
4945 binder_stat_br(proc, thread, BR_CLEAR_FREEZE_NOTIFICATION_DONE);
4949 binder_inner_proc_unlock(proc);
4950 pr_err("%d:%d: bad work type %d\n",
4951 proc->pid, thread->pid, w->type);
4958 BUG_ON(t->buffer == NULL);
4959 if (t->buffer->target_node) {
4960 struct binder_node *target_node = t->buffer->target_node;
4962 trd->target.ptr = target_node->ptr;
4963 trd->cookie = target_node->cookie;
4964 t->saved_priority = task_nice(current);
4965 if (t->priority < target_node->min_priority &&
4966 !(t->flags & TF_ONE_WAY))
4967 binder_set_nice(t->priority);
4968 else if (!(t->flags & TF_ONE_WAY) ||
4969 t->saved_priority > target_node->min_priority)
4970 binder_set_nice(target_node->min_priority);
4971 cmd = BR_TRANSACTION;
4973 trd->target.ptr = 0;
4977 trd->code = t->code;
4978 trd->flags = t->flags;
4979 trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4981 t_from = binder_get_txn_from(t);
4983 struct task_struct *sender = t_from->proc->tsk;
4986 task_tgid_nr_ns(sender,
4987 task_active_pid_ns(current));
4989 trd->sender_pid = 0;
4992 ret = binder_apply_fd_fixups(proc, t);
4994 struct binder_buffer *buffer = t->buffer;
4995 bool oneway = !!(t->flags & TF_ONE_WAY);
4996 int tid = t->debug_id;
4999 binder_thread_dec_tmpref(t_from);
5000 buffer->transaction = NULL;
5001 binder_cleanup_transaction(t, "fd fixups failed",
5003 binder_free_buf(proc, thread, buffer, true);
5004 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
5005 "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
5006 proc->pid, thread->pid,
5008 (cmd == BR_REPLY ? "reply " : ""),
5009 tid, BR_FAILED_REPLY, ret, __LINE__);
5010 if (cmd == BR_REPLY) {
5011 cmd = BR_FAILED_REPLY;
5012 if (put_user(cmd, (uint32_t __user *)ptr))
5014 ptr += sizeof(uint32_t);
5015 binder_stat_br(proc, thread, cmd);
5020 trd->data_size = t->buffer->data_size;
5021 trd->offsets_size = t->buffer->offsets_size;
5022 trd->data.ptr.buffer = t->buffer->user_data;
5023 trd->data.ptr.offsets = trd->data.ptr.buffer +
5024 ALIGN(t->buffer->data_size,
5027 tr.secctx = t->security_ctx;
5028 if (t->security_ctx) {
5029 cmd = BR_TRANSACTION_SEC_CTX;
5030 trsize = sizeof(tr);
5032 if (put_user(cmd, (uint32_t __user *)ptr)) {
5034 binder_thread_dec_tmpref(t_from);
5036 binder_cleanup_transaction(t, "put_user failed",
5041 ptr += sizeof(uint32_t);
5042 if (copy_to_user(ptr, &tr, trsize)) {
5044 binder_thread_dec_tmpref(t_from);
5046 binder_cleanup_transaction(t, "copy_to_user failed",
5053 trace_binder_transaction_received(t);
5054 binder_stat_br(proc, thread, cmd);
5055 binder_debug(BINDER_DEBUG_TRANSACTION,
5056 "%d:%d %s %d %d:%d, cmd %u size %zd-%zd ptr %016llx-%016llx\n",
5057 proc->pid, thread->pid,
5058 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
5059 (cmd == BR_TRANSACTION_SEC_CTX) ?
5060 "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
5061 t->debug_id, t_from ? t_from->proc->pid : 0,
5062 t_from ? t_from->pid : 0, cmd,
5063 t->buffer->data_size, t->buffer->offsets_size,
5064 (u64)trd->data.ptr.buffer,
5065 (u64)trd->data.ptr.offsets);
5068 binder_thread_dec_tmpref(t_from);
5069 t->buffer->allow_user_free = 1;
5070 if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
5071 binder_inner_proc_lock(thread->proc);
5072 t->to_parent = thread->transaction_stack;
5073 t->to_thread = thread;
5074 thread->transaction_stack = t;
5075 binder_inner_proc_unlock(thread->proc);
5077 binder_free_transaction(t);
5084 *consumed = ptr - buffer;
5085 binder_inner_proc_lock(proc);
5086 if (proc->requested_threads == 0 &&
5087 list_empty(&thread->proc->waiting_threads) &&
5088 proc->requested_threads_started < proc->max_threads &&
5089 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
5090 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
5091 /*spawn a new thread if we leave this out */) {
5092 proc->requested_threads++;
5093 binder_inner_proc_unlock(proc);
5094 binder_debug(BINDER_DEBUG_THREADS,
5095 "%d:%d BR_SPAWN_LOOPER\n",
5096 proc->pid, thread->pid);
5097 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
5099 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
5101 binder_inner_proc_unlock(proc);
5105 static void binder_release_work(struct binder_proc *proc,
5106 struct list_head *list)
5108 struct binder_work *w;
5109 enum binder_work_type wtype;
5112 binder_inner_proc_lock(proc);
5113 w = binder_dequeue_work_head_ilocked(list);
5114 wtype = w ? w->type : 0;
5115 binder_inner_proc_unlock(proc);
5120 case BINDER_WORK_TRANSACTION: {
5121 struct binder_transaction *t;
5123 t = container_of(w, struct binder_transaction, work);
5125 binder_cleanup_transaction(t, "process died.",
5128 case BINDER_WORK_RETURN_ERROR: {
5129 struct binder_error *e = container_of(
5130 w, struct binder_error, work);
5132 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5133 "undelivered TRANSACTION_ERROR: %u\n",
5136 case BINDER_WORK_TRANSACTION_PENDING:
5137 case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT:
5138 case BINDER_WORK_TRANSACTION_COMPLETE: {
5139 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5140 "undelivered TRANSACTION_COMPLETE\n");
5142 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
5144 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5145 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
5146 struct binder_ref_death *death;
5148 death = container_of(w, struct binder_ref_death, work);
5149 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5150 "undelivered death notification, %016llx\n",
5151 (u64)death->cookie);
5153 binder_stats_deleted(BINDER_STAT_DEATH);
5155 case BINDER_WORK_NODE:
5157 case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION: {
5158 struct binder_ref_freeze *freeze;
5160 freeze = container_of(w, struct binder_ref_freeze, work);
5161 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5162 "undelivered freeze notification, %016llx\n",
5163 (u64)freeze->cookie);
5165 binder_stats_deleted(BINDER_STAT_FREEZE);
5168 pr_err("unexpected work type, %d, not freed\n",
5176 static struct binder_thread *binder_get_thread_ilocked(
5177 struct binder_proc *proc, struct binder_thread *new_thread)
5179 struct binder_thread *thread = NULL;
5180 struct rb_node *parent = NULL;
5181 struct rb_node **p = &proc->threads.rb_node;
5185 thread = rb_entry(parent, struct binder_thread, rb_node);
5187 if (current->pid < thread->pid)
5189 else if (current->pid > thread->pid)
5190 p = &(*p)->rb_right;
5196 thread = new_thread;
5197 binder_stats_created(BINDER_STAT_THREAD);
5198 thread->proc = proc;
5199 thread->pid = current->pid;
5200 atomic_set(&thread->tmp_ref, 0);
5201 init_waitqueue_head(&thread->wait);
5202 INIT_LIST_HEAD(&thread->todo);
5203 rb_link_node(&thread->rb_node, parent, p);
5204 rb_insert_color(&thread->rb_node, &proc->threads);
5205 thread->looper_need_return = true;
5206 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
5207 thread->return_error.cmd = BR_OK;
5208 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
5209 thread->reply_error.cmd = BR_OK;
5210 thread->ee.command = BR_OK;
5211 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
5215 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
5217 struct binder_thread *thread;
5218 struct binder_thread *new_thread;
5220 binder_inner_proc_lock(proc);
5221 thread = binder_get_thread_ilocked(proc, NULL);
5222 binder_inner_proc_unlock(proc);
5224 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
5225 if (new_thread == NULL)
5227 binder_inner_proc_lock(proc);
5228 thread = binder_get_thread_ilocked(proc, new_thread);
5229 binder_inner_proc_unlock(proc);
5230 if (thread != new_thread)
5236 static void binder_free_proc(struct binder_proc *proc)
5238 struct binder_device *device;
5240 BUG_ON(!list_empty(&proc->todo));
5241 BUG_ON(!list_empty(&proc->delivered_death));
5242 if (proc->outstanding_txns)
5243 pr_warn("%s: Unexpected outstanding_txns %d\n",
5244 __func__, proc->outstanding_txns);
5245 device = container_of(proc->context, struct binder_device, context);
5246 if (refcount_dec_and_test(&device->ref)) {
5247 kfree(proc->context->name);
5250 binder_alloc_deferred_release(&proc->alloc);
5251 put_task_struct(proc->tsk);
5252 put_cred(proc->cred);
5253 binder_stats_deleted(BINDER_STAT_PROC);
5254 dbitmap_free(&proc->dmap);
5258 static void binder_free_thread(struct binder_thread *thread)
5260 BUG_ON(!list_empty(&thread->todo));
5261 binder_stats_deleted(BINDER_STAT_THREAD);
5262 binder_proc_dec_tmpref(thread->proc);
5266 static int binder_thread_release(struct binder_proc *proc,
5267 struct binder_thread *thread)
5269 struct binder_transaction *t;
5270 struct binder_transaction *send_reply = NULL;
5271 int active_transactions = 0;
5272 struct binder_transaction *last_t = NULL;
5274 binder_inner_proc_lock(thread->proc);
5276 * take a ref on the proc so it survives
5277 * after we remove this thread from proc->threads.
5278 * The corresponding dec is when we actually
5279 * free the thread in binder_free_thread()
5283 * take a ref on this thread to ensure it
5284 * survives while we are releasing it
5286 atomic_inc(&thread->tmp_ref);
5287 rb_erase(&thread->rb_node, &proc->threads);
5288 t = thread->transaction_stack;
5290 spin_lock(&t->lock);
5291 if (t->to_thread == thread)
5294 __acquire(&t->lock);
5296 thread->is_dead = true;
5300 active_transactions++;
5301 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5302 "release %d:%d transaction %d %s, still active\n",
5303 proc->pid, thread->pid,
5305 (t->to_thread == thread) ? "in" : "out");
5307 if (t->to_thread == thread) {
5308 thread->proc->outstanding_txns--;
5310 t->to_thread = NULL;
5312 t->buffer->transaction = NULL;
5316 } else if (t->from == thread) {
5321 spin_unlock(&last_t->lock);
5323 spin_lock(&t->lock);
5325 __acquire(&t->lock);
5327 /* annotation for sparse, lock not acquired in last iteration above */
5328 __release(&t->lock);
5331 * If this thread used poll, make sure we remove the waitqueue from any
5332 * poll data structures holding it.
5334 if (thread->looper & BINDER_LOOPER_STATE_POLL)
5335 wake_up_pollfree(&thread->wait);
5337 binder_inner_proc_unlock(thread->proc);
5340 * This is needed to avoid races between wake_up_pollfree() above and
5341 * someone else removing the last entry from the queue for other reasons
5342 * (e.g. ep_remove_wait_queue() being called due to an epoll file
5343 * descriptor being closed). Such other users hold an RCU read lock, so
5344 * we can be sure they're done after we call synchronize_rcu().
5346 if (thread->looper & BINDER_LOOPER_STATE_POLL)
5350 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
5351 binder_release_work(proc, &thread->todo);
5352 binder_thread_dec_tmpref(thread);
5353 return active_transactions;
5356 static __poll_t binder_poll(struct file *filp,
5357 struct poll_table_struct *wait)
5359 struct binder_proc *proc = filp->private_data;
5360 struct binder_thread *thread = NULL;
5361 bool wait_for_proc_work;
5363 thread = binder_get_thread(proc);
5367 binder_inner_proc_lock(thread->proc);
5368 thread->looper |= BINDER_LOOPER_STATE_POLL;
5369 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
5371 binder_inner_proc_unlock(thread->proc);
5373 poll_wait(filp, &thread->wait, wait);
5375 if (binder_has_work(thread, wait_for_proc_work))
5381 static int binder_ioctl_write_read(struct file *filp, unsigned long arg,
5382 struct binder_thread *thread)
5385 struct binder_proc *proc = filp->private_data;
5386 void __user *ubuf = (void __user *)arg;
5387 struct binder_write_read bwr;
5389 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
5393 binder_debug(BINDER_DEBUG_READ_WRITE,
5394 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
5395 proc->pid, thread->pid,
5396 (u64)bwr.write_size, (u64)bwr.write_buffer,
5397 (u64)bwr.read_size, (u64)bwr.read_buffer);
5399 if (bwr.write_size > 0) {
5400 ret = binder_thread_write(proc, thread,
5403 &bwr.write_consumed);
5404 trace_binder_write_done(ret);
5406 bwr.read_consumed = 0;
5407 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5412 if (bwr.read_size > 0) {
5413 ret = binder_thread_read(proc, thread, bwr.read_buffer,
5416 filp->f_flags & O_NONBLOCK);
5417 trace_binder_read_done(ret);
5418 binder_inner_proc_lock(proc);
5419 if (!binder_worklist_empty_ilocked(&proc->todo))
5420 binder_wakeup_proc_ilocked(proc);
5421 binder_inner_proc_unlock(proc);
5423 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5428 binder_debug(BINDER_DEBUG_READ_WRITE,
5429 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
5430 proc->pid, thread->pid,
5431 (u64)bwr.write_consumed, (u64)bwr.write_size,
5432 (u64)bwr.read_consumed, (u64)bwr.read_size);
5433 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
5441 static int binder_ioctl_set_ctx_mgr(struct file *filp,
5442 struct flat_binder_object *fbo)
5445 struct binder_proc *proc = filp->private_data;
5446 struct binder_context *context = proc->context;
5447 struct binder_node *new_node;
5448 kuid_t curr_euid = current_euid();
5450 mutex_lock(&context->context_mgr_node_lock);
5451 if (context->binder_context_mgr_node) {
5452 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
5456 ret = security_binder_set_context_mgr(proc->cred);
5459 if (uid_valid(context->binder_context_mgr_uid)) {
5460 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
5461 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
5462 from_kuid(&init_user_ns, curr_euid),
5463 from_kuid(&init_user_ns,
5464 context->binder_context_mgr_uid));
5469 context->binder_context_mgr_uid = curr_euid;
5471 new_node = binder_new_node(proc, fbo);
5476 binder_node_lock(new_node);
5477 new_node->local_weak_refs++;
5478 new_node->local_strong_refs++;
5479 new_node->has_strong_ref = 1;
5480 new_node->has_weak_ref = 1;
5481 context->binder_context_mgr_node = new_node;
5482 binder_node_unlock(new_node);
5483 binder_put_node(new_node);
5485 mutex_unlock(&context->context_mgr_node_lock);
5489 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
5490 struct binder_node_info_for_ref *info)
5492 struct binder_node *node;
5493 struct binder_context *context = proc->context;
5494 __u32 handle = info->handle;
5496 if (info->strong_count || info->weak_count || info->reserved1 ||
5497 info->reserved2 || info->reserved3) {
5498 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
5503 /* This ioctl may only be used by the context manager */
5504 mutex_lock(&context->context_mgr_node_lock);
5505 if (!context->binder_context_mgr_node ||
5506 context->binder_context_mgr_node->proc != proc) {
5507 mutex_unlock(&context->context_mgr_node_lock);
5510 mutex_unlock(&context->context_mgr_node_lock);
5512 node = binder_get_node_from_ref(proc, handle, true, NULL);
5516 info->strong_count = node->local_strong_refs +
5517 node->internal_strong_refs;
5518 info->weak_count = node->local_weak_refs;
5520 binder_put_node(node);
5525 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
5526 struct binder_node_debug_info *info)
5529 binder_uintptr_t ptr = info->ptr;
5531 memset(info, 0, sizeof(*info));
5533 binder_inner_proc_lock(proc);
5534 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5535 struct binder_node *node = rb_entry(n, struct binder_node,
5537 if (node->ptr > ptr) {
5538 info->ptr = node->ptr;
5539 info->cookie = node->cookie;
5540 info->has_strong_ref = node->has_strong_ref;
5541 info->has_weak_ref = node->has_weak_ref;
5545 binder_inner_proc_unlock(proc);
5550 static bool binder_txns_pending_ilocked(struct binder_proc *proc)
5553 struct binder_thread *thread;
5555 if (proc->outstanding_txns > 0)
5558 for (n = rb_first(&proc->threads); n; n = rb_next(n)) {
5559 thread = rb_entry(n, struct binder_thread, rb_node);
5560 if (thread->transaction_stack)
5566 static void binder_add_freeze_work(struct binder_proc *proc, bool is_frozen)
5568 struct binder_node *prev = NULL;
5570 struct binder_ref *ref;
5572 binder_inner_proc_lock(proc);
5573 for (n = rb_first(&proc->nodes); n; n = rb_next(n)) {
5574 struct binder_node *node;
5576 node = rb_entry(n, struct binder_node, rb_node);
5577 binder_inc_node_tmpref_ilocked(node);
5578 binder_inner_proc_unlock(proc);
5580 binder_put_node(prev);
5581 binder_node_lock(node);
5582 hlist_for_each_entry(ref, &node->refs, node_entry) {
5584 * Need the node lock to synchronize
5585 * with new notification requests and the
5586 * inner lock to synchronize with queued
5587 * freeze notifications.
5589 binder_inner_proc_lock(ref->proc);
5591 binder_inner_proc_unlock(ref->proc);
5594 ref->freeze->work.type = BINDER_WORK_FROZEN_BINDER;
5595 if (list_empty(&ref->freeze->work.entry)) {
5596 ref->freeze->is_frozen = is_frozen;
5597 binder_enqueue_work_ilocked(&ref->freeze->work, &ref->proc->todo);
5598 binder_wakeup_proc_ilocked(ref->proc);
5600 if (ref->freeze->sent && ref->freeze->is_frozen != is_frozen)
5601 ref->freeze->resend = true;
5602 ref->freeze->is_frozen = is_frozen;
5604 binder_inner_proc_unlock(ref->proc);
5607 binder_node_unlock(node);
5608 binder_inner_proc_lock(proc);
5612 binder_inner_proc_unlock(proc);
5614 binder_put_node(prev);
5617 static int binder_ioctl_freeze(struct binder_freeze_info *info,
5618 struct binder_proc *target_proc)
5622 if (!info->enable) {
5623 binder_inner_proc_lock(target_proc);
5624 target_proc->sync_recv = false;
5625 target_proc->async_recv = false;
5626 target_proc->is_frozen = false;
5627 binder_inner_proc_unlock(target_proc);
5628 binder_add_freeze_work(target_proc, false);
5633 * Freezing the target. Prevent new transactions by
5634 * setting frozen state. If timeout specified, wait
5635 * for transactions to drain.
5637 binder_inner_proc_lock(target_proc);
5638 target_proc->sync_recv = false;
5639 target_proc->async_recv = false;
5640 target_proc->is_frozen = true;
5641 binder_inner_proc_unlock(target_proc);
5643 if (info->timeout_ms > 0)
5644 ret = wait_event_interruptible_timeout(
5645 target_proc->freeze_wait,
5646 (!target_proc->outstanding_txns),
5647 msecs_to_jiffies(info->timeout_ms));
5649 /* Check pending transactions that wait for reply */
5651 binder_inner_proc_lock(target_proc);
5652 if (binder_txns_pending_ilocked(target_proc))
5654 binder_inner_proc_unlock(target_proc);
5658 binder_inner_proc_lock(target_proc);
5659 target_proc->is_frozen = false;
5660 binder_inner_proc_unlock(target_proc);
5662 binder_add_freeze_work(target_proc, true);
5668 static int binder_ioctl_get_freezer_info(
5669 struct binder_frozen_status_info *info)
5671 struct binder_proc *target_proc;
5675 info->sync_recv = 0;
5676 info->async_recv = 0;
5678 mutex_lock(&binder_procs_lock);
5679 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5680 if (target_proc->pid == info->pid) {
5682 binder_inner_proc_lock(target_proc);
5683 txns_pending = binder_txns_pending_ilocked(target_proc);
5684 info->sync_recv |= target_proc->sync_recv |
5685 (txns_pending << 1);
5686 info->async_recv |= target_proc->async_recv;
5687 binder_inner_proc_unlock(target_proc);
5690 mutex_unlock(&binder_procs_lock);
5698 static int binder_ioctl_get_extended_error(struct binder_thread *thread,
5701 struct binder_extended_error ee;
5703 binder_inner_proc_lock(thread->proc);
5705 binder_set_extended_error(&thread->ee, 0, BR_OK, 0);
5706 binder_inner_proc_unlock(thread->proc);
5708 if (copy_to_user(ubuf, &ee, sizeof(ee)))
5714 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
5717 struct binder_proc *proc = filp->private_data;
5718 struct binder_thread *thread;
5719 void __user *ubuf = (void __user *)arg;
5721 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
5722 proc->pid, current->pid, cmd, arg);*/
5724 binder_selftest_alloc(&proc->alloc);
5726 trace_binder_ioctl(cmd, arg);
5728 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5732 thread = binder_get_thread(proc);
5733 if (thread == NULL) {
5739 case BINDER_WRITE_READ:
5740 ret = binder_ioctl_write_read(filp, arg, thread);
5744 case BINDER_SET_MAX_THREADS: {
5747 if (copy_from_user(&max_threads, ubuf,
5748 sizeof(max_threads))) {
5752 binder_inner_proc_lock(proc);
5753 proc->max_threads = max_threads;
5754 binder_inner_proc_unlock(proc);
5757 case BINDER_SET_CONTEXT_MGR_EXT: {
5758 struct flat_binder_object fbo;
5760 if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
5764 ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
5769 case BINDER_SET_CONTEXT_MGR:
5770 ret = binder_ioctl_set_ctx_mgr(filp, NULL);
5774 case BINDER_THREAD_EXIT:
5775 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
5776 proc->pid, thread->pid);
5777 binder_thread_release(proc, thread);
5780 case BINDER_VERSION: {
5781 struct binder_version __user *ver = ubuf;
5783 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
5784 &ver->protocol_version)) {
5790 case BINDER_GET_NODE_INFO_FOR_REF: {
5791 struct binder_node_info_for_ref info;
5793 if (copy_from_user(&info, ubuf, sizeof(info))) {
5798 ret = binder_ioctl_get_node_info_for_ref(proc, &info);
5802 if (copy_to_user(ubuf, &info, sizeof(info))) {
5809 case BINDER_GET_NODE_DEBUG_INFO: {
5810 struct binder_node_debug_info info;
5812 if (copy_from_user(&info, ubuf, sizeof(info))) {
5817 ret = binder_ioctl_get_node_debug_info(proc, &info);
5821 if (copy_to_user(ubuf, &info, sizeof(info))) {
5827 case BINDER_FREEZE: {
5828 struct binder_freeze_info info;
5829 struct binder_proc **target_procs = NULL, *target_proc;
5830 int target_procs_count = 0, i = 0;
5834 if (copy_from_user(&info, ubuf, sizeof(info))) {
5839 mutex_lock(&binder_procs_lock);
5840 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5841 if (target_proc->pid == info.pid)
5842 target_procs_count++;
5845 if (target_procs_count == 0) {
5846 mutex_unlock(&binder_procs_lock);
5851 target_procs = kcalloc(target_procs_count,
5852 sizeof(struct binder_proc *),
5855 if (!target_procs) {
5856 mutex_unlock(&binder_procs_lock);
5861 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5862 if (target_proc->pid != info.pid)
5865 binder_inner_proc_lock(target_proc);
5866 target_proc->tmp_ref++;
5867 binder_inner_proc_unlock(target_proc);
5869 target_procs[i++] = target_proc;
5871 mutex_unlock(&binder_procs_lock);
5873 for (i = 0; i < target_procs_count; i++) {
5875 ret = binder_ioctl_freeze(&info,
5878 binder_proc_dec_tmpref(target_procs[i]);
5881 kfree(target_procs);
5887 case BINDER_GET_FROZEN_INFO: {
5888 struct binder_frozen_status_info info;
5890 if (copy_from_user(&info, ubuf, sizeof(info))) {
5895 ret = binder_ioctl_get_freezer_info(&info);
5899 if (copy_to_user(ubuf, &info, sizeof(info))) {
5905 case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: {
5908 if (copy_from_user(&enable, ubuf, sizeof(enable))) {
5912 binder_inner_proc_lock(proc);
5913 proc->oneway_spam_detection_enabled = (bool)enable;
5914 binder_inner_proc_unlock(proc);
5917 case BINDER_GET_EXTENDED_ERROR:
5918 ret = binder_ioctl_get_extended_error(thread, ubuf);
5929 thread->looper_need_return = false;
5930 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5931 if (ret && ret != -EINTR)
5932 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
5934 trace_binder_ioctl_done(ret);
5938 static void binder_vma_open(struct vm_area_struct *vma)
5940 struct binder_proc *proc = vma->vm_private_data;
5942 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5943 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5944 proc->pid, vma->vm_start, vma->vm_end,
5945 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5946 (unsigned long)pgprot_val(vma->vm_page_prot));
5949 static void binder_vma_close(struct vm_area_struct *vma)
5951 struct binder_proc *proc = vma->vm_private_data;
5953 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5954 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5955 proc->pid, vma->vm_start, vma->vm_end,
5956 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5957 (unsigned long)pgprot_val(vma->vm_page_prot));
5958 binder_alloc_vma_close(&proc->alloc);
5961 static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
5963 return VM_FAULT_SIGBUS;
5966 static const struct vm_operations_struct binder_vm_ops = {
5967 .open = binder_vma_open,
5968 .close = binder_vma_close,
5969 .fault = binder_vm_fault,
5972 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
5974 struct binder_proc *proc = filp->private_data;
5976 if (proc->tsk != current->group_leader)
5979 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5980 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
5981 __func__, proc->pid, vma->vm_start, vma->vm_end,
5982 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5983 (unsigned long)pgprot_val(vma->vm_page_prot));
5985 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
5986 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
5987 proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
5990 vm_flags_mod(vma, VM_DONTCOPY | VM_MIXEDMAP, VM_MAYWRITE);
5992 vma->vm_ops = &binder_vm_ops;
5993 vma->vm_private_data = proc;
5995 return binder_alloc_mmap_handler(&proc->alloc, vma);
5998 static int binder_open(struct inode *nodp, struct file *filp)
6000 struct binder_proc *proc, *itr;
6001 struct binder_device *binder_dev;
6002 struct binderfs_info *info;
6003 struct dentry *binder_binderfs_dir_entry_proc = NULL;
6004 bool existing_pid = false;
6006 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
6007 current->group_leader->pid, current->pid);
6009 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
6013 dbitmap_init(&proc->dmap);
6014 spin_lock_init(&proc->inner_lock);
6015 spin_lock_init(&proc->outer_lock);
6016 get_task_struct(current->group_leader);
6017 proc->tsk = current->group_leader;
6018 proc->cred = get_cred(filp->f_cred);
6019 INIT_LIST_HEAD(&proc->todo);
6020 init_waitqueue_head(&proc->freeze_wait);
6021 proc->default_priority = task_nice(current);
6022 /* binderfs stashes devices in i_private */
6023 if (is_binderfs_device(nodp)) {
6024 binder_dev = nodp->i_private;
6025 info = nodp->i_sb->s_fs_info;
6026 binder_binderfs_dir_entry_proc = info->proc_log_dir;
6028 binder_dev = container_of(filp->private_data,
6029 struct binder_device, miscdev);
6031 refcount_inc(&binder_dev->ref);
6032 proc->context = &binder_dev->context;
6033 binder_alloc_init(&proc->alloc);
6035 binder_stats_created(BINDER_STAT_PROC);
6036 proc->pid = current->group_leader->pid;
6037 INIT_LIST_HEAD(&proc->delivered_death);
6038 INIT_LIST_HEAD(&proc->delivered_freeze);
6039 INIT_LIST_HEAD(&proc->waiting_threads);
6040 filp->private_data = proc;
6042 mutex_lock(&binder_procs_lock);
6043 hlist_for_each_entry(itr, &binder_procs, proc_node) {
6044 if (itr->pid == proc->pid) {
6045 existing_pid = true;
6049 hlist_add_head(&proc->proc_node, &binder_procs);
6050 mutex_unlock(&binder_procs_lock);
6052 if (binder_debugfs_dir_entry_proc && !existing_pid) {
6055 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
6057 * proc debug entries are shared between contexts.
6058 * Only create for the first PID to avoid debugfs log spamming
6059 * The printing code will anyway print all contexts for a given
6060 * PID so this is not a problem.
6062 proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
6063 binder_debugfs_dir_entry_proc,
6064 (void *)(unsigned long)proc->pid,
6068 if (binder_binderfs_dir_entry_proc && !existing_pid) {
6070 struct dentry *binderfs_entry;
6072 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
6074 * Similar to debugfs, the process specific log file is shared
6075 * between contexts. Only create for the first PID.
6076 * This is ok since same as debugfs, the log file will contain
6077 * information on all contexts of a given PID.
6079 binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
6080 strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
6081 if (!IS_ERR(binderfs_entry)) {
6082 proc->binderfs_entry = binderfs_entry;
6086 error = PTR_ERR(binderfs_entry);
6087 pr_warn("Unable to create file %s in binderfs (error %d)\n",
6095 static int binder_flush(struct file *filp, fl_owner_t id)
6097 struct binder_proc *proc = filp->private_data;
6099 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
6104 static void binder_deferred_flush(struct binder_proc *proc)
6109 binder_inner_proc_lock(proc);
6110 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
6111 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
6113 thread->looper_need_return = true;
6114 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
6115 wake_up_interruptible(&thread->wait);
6119 binder_inner_proc_unlock(proc);
6121 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
6122 "binder_flush: %d woke %d threads\n", proc->pid,
6126 static int binder_release(struct inode *nodp, struct file *filp)
6128 struct binder_proc *proc = filp->private_data;
6130 debugfs_remove(proc->debugfs_entry);
6132 if (proc->binderfs_entry) {
6133 binderfs_remove_file(proc->binderfs_entry);
6134 proc->binderfs_entry = NULL;
6137 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
6142 static int binder_node_release(struct binder_node *node, int refs)
6144 struct binder_ref *ref;
6146 struct binder_proc *proc = node->proc;
6148 binder_release_work(proc, &node->async_todo);
6150 binder_node_lock(node);
6151 binder_inner_proc_lock(proc);
6152 binder_dequeue_work_ilocked(&node->work);
6154 * The caller must have taken a temporary ref on the node,
6156 BUG_ON(!node->tmp_refs);
6157 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
6158 binder_inner_proc_unlock(proc);
6159 binder_node_unlock(node);
6160 binder_free_node(node);
6166 node->local_strong_refs = 0;
6167 node->local_weak_refs = 0;
6168 binder_inner_proc_unlock(proc);
6170 spin_lock(&binder_dead_nodes_lock);
6171 hlist_add_head(&node->dead_node, &binder_dead_nodes);
6172 spin_unlock(&binder_dead_nodes_lock);
6174 hlist_for_each_entry(ref, &node->refs, node_entry) {
6177 * Need the node lock to synchronize
6178 * with new notification requests and the
6179 * inner lock to synchronize with queued
6180 * death notifications.
6182 binder_inner_proc_lock(ref->proc);
6184 binder_inner_proc_unlock(ref->proc);
6190 BUG_ON(!list_empty(&ref->death->work.entry));
6191 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
6192 binder_enqueue_work_ilocked(&ref->death->work,
6194 binder_wakeup_proc_ilocked(ref->proc);
6195 binder_inner_proc_unlock(ref->proc);
6198 binder_debug(BINDER_DEBUG_DEAD_BINDER,
6199 "node %d now dead, refs %d, death %d\n",
6200 node->debug_id, refs, death);
6201 binder_node_unlock(node);
6202 binder_put_node(node);
6207 static void binder_deferred_release(struct binder_proc *proc)
6209 struct binder_context *context = proc->context;
6211 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
6213 mutex_lock(&binder_procs_lock);
6214 hlist_del(&proc->proc_node);
6215 mutex_unlock(&binder_procs_lock);
6217 mutex_lock(&context->context_mgr_node_lock);
6218 if (context->binder_context_mgr_node &&
6219 context->binder_context_mgr_node->proc == proc) {
6220 binder_debug(BINDER_DEBUG_DEAD_BINDER,
6221 "%s: %d context_mgr_node gone\n",
6222 __func__, proc->pid);
6223 context->binder_context_mgr_node = NULL;
6225 mutex_unlock(&context->context_mgr_node_lock);
6226 binder_inner_proc_lock(proc);
6228 * Make sure proc stays alive after we
6229 * remove all the threads
6233 proc->is_dead = true;
6234 proc->is_frozen = false;
6235 proc->sync_recv = false;
6236 proc->async_recv = false;
6238 active_transactions = 0;
6239 while ((n = rb_first(&proc->threads))) {
6240 struct binder_thread *thread;
6242 thread = rb_entry(n, struct binder_thread, rb_node);
6243 binder_inner_proc_unlock(proc);
6245 active_transactions += binder_thread_release(proc, thread);
6246 binder_inner_proc_lock(proc);
6251 while ((n = rb_first(&proc->nodes))) {
6252 struct binder_node *node;
6254 node = rb_entry(n, struct binder_node, rb_node);
6257 * take a temporary ref on the node before
6258 * calling binder_node_release() which will either
6259 * kfree() the node or call binder_put_node()
6261 binder_inc_node_tmpref_ilocked(node);
6262 rb_erase(&node->rb_node, &proc->nodes);
6263 binder_inner_proc_unlock(proc);
6264 incoming_refs = binder_node_release(node, incoming_refs);
6265 binder_inner_proc_lock(proc);
6267 binder_inner_proc_unlock(proc);
6270 binder_proc_lock(proc);
6271 while ((n = rb_first(&proc->refs_by_desc))) {
6272 struct binder_ref *ref;
6274 ref = rb_entry(n, struct binder_ref, rb_node_desc);
6276 binder_cleanup_ref_olocked(ref);
6277 binder_proc_unlock(proc);
6278 binder_free_ref(ref);
6279 binder_proc_lock(proc);
6281 binder_proc_unlock(proc);
6283 binder_release_work(proc, &proc->todo);
6284 binder_release_work(proc, &proc->delivered_death);
6285 binder_release_work(proc, &proc->delivered_freeze);
6287 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
6288 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
6289 __func__, proc->pid, threads, nodes, incoming_refs,
6290 outgoing_refs, active_transactions);
6292 binder_proc_dec_tmpref(proc);
6295 static void binder_deferred_func(struct work_struct *work)
6297 struct binder_proc *proc;
6302 mutex_lock(&binder_deferred_lock);
6303 if (!hlist_empty(&binder_deferred_list)) {
6304 proc = hlist_entry(binder_deferred_list.first,
6305 struct binder_proc, deferred_work_node);
6306 hlist_del_init(&proc->deferred_work_node);
6307 defer = proc->deferred_work;
6308 proc->deferred_work = 0;
6313 mutex_unlock(&binder_deferred_lock);
6315 if (defer & BINDER_DEFERRED_FLUSH)
6316 binder_deferred_flush(proc);
6318 if (defer & BINDER_DEFERRED_RELEASE)
6319 binder_deferred_release(proc); /* frees proc */
6322 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
6325 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
6327 mutex_lock(&binder_deferred_lock);
6328 proc->deferred_work |= defer;
6329 if (hlist_unhashed(&proc->deferred_work_node)) {
6330 hlist_add_head(&proc->deferred_work_node,
6331 &binder_deferred_list);
6332 schedule_work(&binder_deferred_work);
6334 mutex_unlock(&binder_deferred_lock);
6337 static void print_binder_transaction_ilocked(struct seq_file *m,
6338 struct binder_proc *proc,
6340 struct binder_transaction *t)
6342 struct binder_proc *to_proc;
6343 struct binder_buffer *buffer = t->buffer;
6344 ktime_t current_time = ktime_get();
6346 spin_lock(&t->lock);
6347 to_proc = t->to_proc;
6349 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d elapsed %lldms",
6350 prefix, t->debug_id, t,
6353 to_proc ? to_proc->pid : 0,
6354 t->to_thread ? t->to_thread->pid : 0,
6355 t->code, t->flags, t->priority, t->need_reply,
6356 ktime_ms_delta(current_time, t->start_time));
6357 spin_unlock(&t->lock);
6359 if (proc != to_proc) {
6361 * Can only safely deref buffer if we are holding the
6362 * correct proc inner lock for this node
6368 if (buffer == NULL) {
6369 seq_puts(m, " buffer free\n");
6372 if (buffer->target_node)
6373 seq_printf(m, " node %d", buffer->target_node->debug_id);
6374 seq_printf(m, " size %zd:%zd offset %lx\n",
6375 buffer->data_size, buffer->offsets_size,
6376 proc->alloc.buffer - buffer->user_data);
6379 static void print_binder_work_ilocked(struct seq_file *m,
6380 struct binder_proc *proc,
6382 const char *transaction_prefix,
6383 struct binder_work *w)
6385 struct binder_node *node;
6386 struct binder_transaction *t;
6389 case BINDER_WORK_TRANSACTION:
6390 t = container_of(w, struct binder_transaction, work);
6391 print_binder_transaction_ilocked(
6392 m, proc, transaction_prefix, t);
6394 case BINDER_WORK_RETURN_ERROR: {
6395 struct binder_error *e = container_of(
6396 w, struct binder_error, work);
6398 seq_printf(m, "%stransaction error: %u\n",
6401 case BINDER_WORK_TRANSACTION_COMPLETE:
6402 seq_printf(m, "%stransaction complete\n", prefix);
6404 case BINDER_WORK_NODE:
6405 node = container_of(w, struct binder_node, work);
6406 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
6407 prefix, node->debug_id,
6408 (u64)node->ptr, (u64)node->cookie);
6410 case BINDER_WORK_DEAD_BINDER:
6411 seq_printf(m, "%shas dead binder\n", prefix);
6413 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
6414 seq_printf(m, "%shas cleared dead binder\n", prefix);
6416 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
6417 seq_printf(m, "%shas cleared death notification\n", prefix);
6419 case BINDER_WORK_FROZEN_BINDER:
6420 seq_printf(m, "%shas frozen binder\n", prefix);
6422 case BINDER_WORK_CLEAR_FREEZE_NOTIFICATION:
6423 seq_printf(m, "%shas cleared freeze notification\n", prefix);
6426 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
6431 static void print_binder_thread_ilocked(struct seq_file *m,
6432 struct binder_thread *thread,
6435 struct binder_transaction *t;
6436 struct binder_work *w;
6437 size_t start_pos = m->count;
6440 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
6441 thread->pid, thread->looper,
6442 thread->looper_need_return,
6443 atomic_read(&thread->tmp_ref));
6444 header_pos = m->count;
6445 t = thread->transaction_stack;
6447 if (t->from == thread) {
6448 print_binder_transaction_ilocked(m, thread->proc,
6449 " outgoing transaction", t);
6451 } else if (t->to_thread == thread) {
6452 print_binder_transaction_ilocked(m, thread->proc,
6453 " incoming transaction", t);
6456 print_binder_transaction_ilocked(m, thread->proc,
6457 " bad transaction", t);
6461 list_for_each_entry(w, &thread->todo, entry) {
6462 print_binder_work_ilocked(m, thread->proc, " ",
6463 " pending transaction", w);
6465 if (!print_always && m->count == header_pos)
6466 m->count = start_pos;
6469 static void print_binder_node_nilocked(struct seq_file *m,
6470 struct binder_node *node)
6472 struct binder_ref *ref;
6473 struct binder_work *w;
6476 count = hlist_count_nodes(&node->refs);
6478 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
6479 node->debug_id, (u64)node->ptr, (u64)node->cookie,
6480 node->has_strong_ref, node->has_weak_ref,
6481 node->local_strong_refs, node->local_weak_refs,
6482 node->internal_strong_refs, count, node->tmp_refs);
6484 seq_puts(m, " proc");
6485 hlist_for_each_entry(ref, &node->refs, node_entry)
6486 seq_printf(m, " %d", ref->proc->pid);
6490 list_for_each_entry(w, &node->async_todo, entry)
6491 print_binder_work_ilocked(m, node->proc, " ",
6492 " pending async transaction", w);
6496 static void print_binder_ref_olocked(struct seq_file *m,
6497 struct binder_ref *ref)
6499 binder_node_lock(ref->node);
6500 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
6501 ref->data.debug_id, ref->data.desc,
6502 ref->node->proc ? "" : "dead ",
6503 ref->node->debug_id, ref->data.strong,
6504 ref->data.weak, ref->death);
6505 binder_node_unlock(ref->node);
6508 static void print_binder_proc(struct seq_file *m,
6509 struct binder_proc *proc, int print_all)
6511 struct binder_work *w;
6513 size_t start_pos = m->count;
6515 struct binder_node *last_node = NULL;
6517 seq_printf(m, "proc %d\n", proc->pid);
6518 seq_printf(m, "context %s\n", proc->context->name);
6519 header_pos = m->count;
6521 binder_inner_proc_lock(proc);
6522 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
6523 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
6524 rb_node), print_all);
6526 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
6527 struct binder_node *node = rb_entry(n, struct binder_node,
6529 if (!print_all && !node->has_async_transaction)
6533 * take a temporary reference on the node so it
6534 * survives and isn't removed from the tree
6535 * while we print it.
6537 binder_inc_node_tmpref_ilocked(node);
6538 /* Need to drop inner lock to take node lock */
6539 binder_inner_proc_unlock(proc);
6541 binder_put_node(last_node);
6542 binder_node_inner_lock(node);
6543 print_binder_node_nilocked(m, node);
6544 binder_node_inner_unlock(node);
6546 binder_inner_proc_lock(proc);
6548 binder_inner_proc_unlock(proc);
6550 binder_put_node(last_node);
6553 binder_proc_lock(proc);
6554 for (n = rb_first(&proc->refs_by_desc);
6557 print_binder_ref_olocked(m, rb_entry(n,
6560 binder_proc_unlock(proc);
6562 binder_alloc_print_allocated(m, &proc->alloc);
6563 binder_inner_proc_lock(proc);
6564 list_for_each_entry(w, &proc->todo, entry)
6565 print_binder_work_ilocked(m, proc, " ",
6566 " pending transaction", w);
6567 list_for_each_entry(w, &proc->delivered_death, entry) {
6568 seq_puts(m, " has delivered dead binder\n");
6571 list_for_each_entry(w, &proc->delivered_freeze, entry) {
6572 seq_puts(m, " has delivered freeze binder\n");
6575 binder_inner_proc_unlock(proc);
6576 if (!print_all && m->count == header_pos)
6577 m->count = start_pos;
6580 static const char * const binder_return_strings[] = {
6585 "BR_ACQUIRE_RESULT",
6587 "BR_TRANSACTION_COMPLETE",
6592 "BR_ATTEMPT_ACQUIRE",
6597 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
6600 "BR_ONEWAY_SPAM_SUSPECT",
6601 "BR_TRANSACTION_PENDING_FROZEN",
6603 "BR_CLEAR_FREEZE_NOTIFICATION_DONE",
6606 static const char * const binder_command_strings[] = {
6609 "BC_ACQUIRE_RESULT",
6617 "BC_ATTEMPT_ACQUIRE",
6618 "BC_REGISTER_LOOPER",
6621 "BC_REQUEST_DEATH_NOTIFICATION",
6622 "BC_CLEAR_DEATH_NOTIFICATION",
6623 "BC_DEAD_BINDER_DONE",
6624 "BC_TRANSACTION_SG",
6626 "BC_REQUEST_FREEZE_NOTIFICATION",
6627 "BC_CLEAR_FREEZE_NOTIFICATION",
6628 "BC_FREEZE_NOTIFICATION_DONE",
6631 static const char * const binder_objstat_strings[] = {
6638 "transaction_complete",
6642 static void print_binder_stats(struct seq_file *m, const char *prefix,
6643 struct binder_stats *stats)
6647 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
6648 ARRAY_SIZE(binder_command_strings));
6649 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
6650 int temp = atomic_read(&stats->bc[i]);
6653 seq_printf(m, "%s%s: %d\n", prefix,
6654 binder_command_strings[i], temp);
6657 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
6658 ARRAY_SIZE(binder_return_strings));
6659 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
6660 int temp = atomic_read(&stats->br[i]);
6663 seq_printf(m, "%s%s: %d\n", prefix,
6664 binder_return_strings[i], temp);
6667 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6668 ARRAY_SIZE(binder_objstat_strings));
6669 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6670 ARRAY_SIZE(stats->obj_deleted));
6671 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
6672 int created = atomic_read(&stats->obj_created[i]);
6673 int deleted = atomic_read(&stats->obj_deleted[i]);
6675 if (created || deleted)
6676 seq_printf(m, "%s%s: active %d total %d\n",
6678 binder_objstat_strings[i],
6684 static void print_binder_proc_stats(struct seq_file *m,
6685 struct binder_proc *proc)
6687 struct binder_work *w;
6688 struct binder_thread *thread;
6690 int count, strong, weak, ready_threads;
6691 size_t free_async_space =
6692 binder_alloc_get_free_async_space(&proc->alloc);
6694 seq_printf(m, "proc %d\n", proc->pid);
6695 seq_printf(m, "context %s\n", proc->context->name);
6698 binder_inner_proc_lock(proc);
6699 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
6702 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
6705 seq_printf(m, " threads: %d\n", count);
6706 seq_printf(m, " requested threads: %d+%d/%d\n"
6707 " ready threads %d\n"
6708 " free async space %zd\n", proc->requested_threads,
6709 proc->requested_threads_started, proc->max_threads,
6713 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
6715 binder_inner_proc_unlock(proc);
6716 seq_printf(m, " nodes: %d\n", count);
6720 binder_proc_lock(proc);
6721 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
6722 struct binder_ref *ref = rb_entry(n, struct binder_ref,
6725 strong += ref->data.strong;
6726 weak += ref->data.weak;
6728 binder_proc_unlock(proc);
6729 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
6731 count = binder_alloc_get_allocated_count(&proc->alloc);
6732 seq_printf(m, " buffers: %d\n", count);
6734 binder_alloc_print_pages(m, &proc->alloc);
6737 binder_inner_proc_lock(proc);
6738 list_for_each_entry(w, &proc->todo, entry) {
6739 if (w->type == BINDER_WORK_TRANSACTION)
6742 binder_inner_proc_unlock(proc);
6743 seq_printf(m, " pending transactions: %d\n", count);
6745 print_binder_stats(m, " ", &proc->stats);
6748 static int state_show(struct seq_file *m, void *unused)
6750 struct binder_proc *proc;
6751 struct binder_node *node;
6752 struct binder_node *last_node = NULL;
6754 seq_puts(m, "binder state:\n");
6756 spin_lock(&binder_dead_nodes_lock);
6757 if (!hlist_empty(&binder_dead_nodes))
6758 seq_puts(m, "dead nodes:\n");
6759 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
6761 * take a temporary reference on the node so it
6762 * survives and isn't removed from the list
6763 * while we print it.
6766 spin_unlock(&binder_dead_nodes_lock);
6768 binder_put_node(last_node);
6769 binder_node_lock(node);
6770 print_binder_node_nilocked(m, node);
6771 binder_node_unlock(node);
6773 spin_lock(&binder_dead_nodes_lock);
6775 spin_unlock(&binder_dead_nodes_lock);
6777 binder_put_node(last_node);
6779 mutex_lock(&binder_procs_lock);
6780 hlist_for_each_entry(proc, &binder_procs, proc_node)
6781 print_binder_proc(m, proc, 1);
6782 mutex_unlock(&binder_procs_lock);
6787 static int stats_show(struct seq_file *m, void *unused)
6789 struct binder_proc *proc;
6791 seq_puts(m, "binder stats:\n");
6793 print_binder_stats(m, "", &binder_stats);
6795 mutex_lock(&binder_procs_lock);
6796 hlist_for_each_entry(proc, &binder_procs, proc_node)
6797 print_binder_proc_stats(m, proc);
6798 mutex_unlock(&binder_procs_lock);
6803 static int transactions_show(struct seq_file *m, void *unused)
6805 struct binder_proc *proc;
6807 seq_puts(m, "binder transactions:\n");
6808 mutex_lock(&binder_procs_lock);
6809 hlist_for_each_entry(proc, &binder_procs, proc_node)
6810 print_binder_proc(m, proc, 0);
6811 mutex_unlock(&binder_procs_lock);
6816 static int proc_show(struct seq_file *m, void *unused)
6818 struct binder_proc *itr;
6819 int pid = (unsigned long)m->private;
6821 mutex_lock(&binder_procs_lock);
6822 hlist_for_each_entry(itr, &binder_procs, proc_node) {
6823 if (itr->pid == pid) {
6824 seq_puts(m, "binder proc state:\n");
6825 print_binder_proc(m, itr, 1);
6828 mutex_unlock(&binder_procs_lock);
6833 static void print_binder_transaction_log_entry(struct seq_file *m,
6834 struct binder_transaction_log_entry *e)
6836 int debug_id = READ_ONCE(e->debug_id_done);
6838 * read barrier to guarantee debug_id_done read before
6839 * we print the log values
6843 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
6844 e->debug_id, (e->call_type == 2) ? "reply" :
6845 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
6846 e->from_thread, e->to_proc, e->to_thread, e->context_name,
6847 e->to_node, e->target_handle, e->data_size, e->offsets_size,
6848 e->return_error, e->return_error_param,
6849 e->return_error_line);
6851 * read-barrier to guarantee read of debug_id_done after
6852 * done printing the fields of the entry
6855 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
6856 "\n" : " (incomplete)\n");
6859 static int transaction_log_show(struct seq_file *m, void *unused)
6861 struct binder_transaction_log *log = m->private;
6862 unsigned int log_cur = atomic_read(&log->cur);
6867 count = log_cur + 1;
6868 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
6869 0 : count % ARRAY_SIZE(log->entry);
6870 if (count > ARRAY_SIZE(log->entry) || log->full)
6871 count = ARRAY_SIZE(log->entry);
6872 for (i = 0; i < count; i++) {
6873 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
6875 print_binder_transaction_log_entry(m, &log->entry[index]);
6880 const struct file_operations binder_fops = {
6881 .owner = THIS_MODULE,
6882 .poll = binder_poll,
6883 .unlocked_ioctl = binder_ioctl,
6884 .compat_ioctl = compat_ptr_ioctl,
6885 .mmap = binder_mmap,
6886 .open = binder_open,
6887 .flush = binder_flush,
6888 .release = binder_release,
6891 DEFINE_SHOW_ATTRIBUTE(state);
6892 DEFINE_SHOW_ATTRIBUTE(stats);
6893 DEFINE_SHOW_ATTRIBUTE(transactions);
6894 DEFINE_SHOW_ATTRIBUTE(transaction_log);
6896 const struct binder_debugfs_entry binder_debugfs_entries[] = {
6900 .fops = &state_fops,
6906 .fops = &stats_fops,
6910 .name = "transactions",
6912 .fops = &transactions_fops,
6916 .name = "transaction_log",
6918 .fops = &transaction_log_fops,
6919 .data = &binder_transaction_log,
6922 .name = "failed_transaction_log",
6924 .fops = &transaction_log_fops,
6925 .data = &binder_transaction_log_failed,
6930 static int __init init_binder_device(const char *name)
6933 struct binder_device *binder_device;
6935 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
6939 binder_device->miscdev.fops = &binder_fops;
6940 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
6941 binder_device->miscdev.name = name;
6943 refcount_set(&binder_device->ref, 1);
6944 binder_device->context.binder_context_mgr_uid = INVALID_UID;
6945 binder_device->context.name = name;
6946 mutex_init(&binder_device->context.context_mgr_node_lock);
6948 ret = misc_register(&binder_device->miscdev);
6950 kfree(binder_device);
6954 hlist_add_head(&binder_device->hlist, &binder_devices);
6959 static int __init binder_init(void)
6962 char *device_name, *device_tmp;
6963 struct binder_device *device;
6964 struct hlist_node *tmp;
6965 char *device_names = NULL;
6966 const struct binder_debugfs_entry *db_entry;
6968 ret = binder_alloc_shrinker_init();
6972 atomic_set(&binder_transaction_log.cur, ~0U);
6973 atomic_set(&binder_transaction_log_failed.cur, ~0U);
6975 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
6977 binder_for_each_debugfs_entry(db_entry)
6978 debugfs_create_file(db_entry->name,
6980 binder_debugfs_dir_entry_root,
6984 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
6985 binder_debugfs_dir_entry_root);
6987 if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
6988 strcmp(binder_devices_param, "") != 0) {
6990 * Copy the module_parameter string, because we don't want to
6991 * tokenize it in-place.
6993 device_names = kstrdup(binder_devices_param, GFP_KERNEL);
6994 if (!device_names) {
6996 goto err_alloc_device_names_failed;
6999 device_tmp = device_names;
7000 while ((device_name = strsep(&device_tmp, ","))) {
7001 ret = init_binder_device(device_name);
7003 goto err_init_binder_device_failed;
7007 ret = init_binderfs();
7009 goto err_init_binder_device_failed;
7013 err_init_binder_device_failed:
7014 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
7015 misc_deregister(&device->miscdev);
7016 hlist_del(&device->hlist);
7020 kfree(device_names);
7022 err_alloc_device_names_failed:
7023 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
7024 binder_alloc_shrinker_exit();
7029 device_initcall(binder_init);
7031 #define CREATE_TRACE_POINTS
7032 #include "binder_trace.h"
7034 MODULE_LICENSE("GPL v2");