]> Git Repo - linux.git/blob - drivers/android/binder.c
binder: fix use-after-free due to ksys_close() during fdget()
[linux.git] / drivers / android / binder.c
1 /* binder.c
2  *
3  * Android IPC Subsystem
4  *
5  * Copyright (C) 2007-2008 Google, Inc.
6  *
7  * This software is licensed under the terms of the GNU General Public
8  * License version 2, as published by the Free Software Foundation, and
9  * may be copied, distributed, and modified under those terms.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  */
17
18 /*
19  * Locking overview
20  *
21  * There are 3 main spinlocks which must be acquired in the
22  * order shown:
23  *
24  * 1) proc->outer_lock : protects binder_ref
25  *    binder_proc_lock() and binder_proc_unlock() are
26  *    used to acq/rel.
27  * 2) node->lock : protects most fields of binder_node.
28  *    binder_node_lock() and binder_node_unlock() are
29  *    used to acq/rel
30  * 3) proc->inner_lock : protects the thread and node lists
31  *    (proc->threads, proc->waiting_threads, proc->nodes)
32  *    and all todo lists associated with the binder_proc
33  *    (proc->todo, thread->todo, proc->delivered_death and
34  *    node->async_todo), as well as thread->transaction_stack
35  *    binder_inner_proc_lock() and binder_inner_proc_unlock()
36  *    are used to acq/rel
37  *
38  * Any lock under procA must never be nested under any lock at the same
39  * level or below on procB.
40  *
41  * Functions that require a lock held on entry indicate which lock
42  * in the suffix of the function name:
43  *
44  * foo_olocked() : requires node->outer_lock
45  * foo_nlocked() : requires node->lock
46  * foo_ilocked() : requires proc->inner_lock
47  * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
48  * foo_nilocked(): requires node->lock and proc->inner_lock
49  * ...
50  */
51
52 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
53
54 #include <linux/fdtable.h>
55 #include <linux/file.h>
56 #include <linux/freezer.h>
57 #include <linux/fs.h>
58 #include <linux/list.h>
59 #include <linux/miscdevice.h>
60 #include <linux/module.h>
61 #include <linux/mutex.h>
62 #include <linux/nsproxy.h>
63 #include <linux/poll.h>
64 #include <linux/debugfs.h>
65 #include <linux/rbtree.h>
66 #include <linux/sched/signal.h>
67 #include <linux/sched/mm.h>
68 #include <linux/seq_file.h>
69 #include <linux/uaccess.h>
70 #include <linux/pid_namespace.h>
71 #include <linux/security.h>
72 #include <linux/spinlock.h>
73 #include <linux/ratelimit.h>
74 #include <linux/syscalls.h>
75 #include <linux/task_work.h>
76
77 #include <uapi/linux/android/binder.h>
78
79 #include <asm/cacheflush.h>
80
81 #include "binder_alloc.h"
82 #include "binder_trace.h"
83
84 static HLIST_HEAD(binder_deferred_list);
85 static DEFINE_MUTEX(binder_deferred_lock);
86
87 static HLIST_HEAD(binder_devices);
88 static HLIST_HEAD(binder_procs);
89 static DEFINE_MUTEX(binder_procs_lock);
90
91 static HLIST_HEAD(binder_dead_nodes);
92 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
93
94 static struct dentry *binder_debugfs_dir_entry_root;
95 static struct dentry *binder_debugfs_dir_entry_proc;
96 static atomic_t binder_last_id;
97
98 static int proc_show(struct seq_file *m, void *unused);
99 DEFINE_SHOW_ATTRIBUTE(proc);
100
101 /* This is only defined in include/asm-arm/sizes.h */
102 #ifndef SZ_1K
103 #define SZ_1K                               0x400
104 #endif
105
106 #ifndef SZ_4M
107 #define SZ_4M                               0x400000
108 #endif
109
110 #define FORBIDDEN_MMAP_FLAGS                (VM_WRITE)
111
112 enum {
113         BINDER_DEBUG_USER_ERROR             = 1U << 0,
114         BINDER_DEBUG_FAILED_TRANSACTION     = 1U << 1,
115         BINDER_DEBUG_DEAD_TRANSACTION       = 1U << 2,
116         BINDER_DEBUG_OPEN_CLOSE             = 1U << 3,
117         BINDER_DEBUG_DEAD_BINDER            = 1U << 4,
118         BINDER_DEBUG_DEATH_NOTIFICATION     = 1U << 5,
119         BINDER_DEBUG_READ_WRITE             = 1U << 6,
120         BINDER_DEBUG_USER_REFS              = 1U << 7,
121         BINDER_DEBUG_THREADS                = 1U << 8,
122         BINDER_DEBUG_TRANSACTION            = 1U << 9,
123         BINDER_DEBUG_TRANSACTION_COMPLETE   = 1U << 10,
124         BINDER_DEBUG_FREE_BUFFER            = 1U << 11,
125         BINDER_DEBUG_INTERNAL_REFS          = 1U << 12,
126         BINDER_DEBUG_PRIORITY_CAP           = 1U << 13,
127         BINDER_DEBUG_SPINLOCKS              = 1U << 14,
128 };
129 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
130         BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
131 module_param_named(debug_mask, binder_debug_mask, uint, 0644);
132
133 static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
134 module_param_named(devices, binder_devices_param, charp, 0444);
135
136 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
137 static int binder_stop_on_user_error;
138
139 static int binder_set_stop_on_user_error(const char *val,
140                                          const struct kernel_param *kp)
141 {
142         int ret;
143
144         ret = param_set_int(val, kp);
145         if (binder_stop_on_user_error < 2)
146                 wake_up(&binder_user_error_wait);
147         return ret;
148 }
149 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
150         param_get_int, &binder_stop_on_user_error, 0644);
151
152 #define binder_debug(mask, x...) \
153         do { \
154                 if (binder_debug_mask & mask) \
155                         pr_info_ratelimited(x); \
156         } while (0)
157
158 #define binder_user_error(x...) \
159         do { \
160                 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
161                         pr_info_ratelimited(x); \
162                 if (binder_stop_on_user_error) \
163                         binder_stop_on_user_error = 2; \
164         } while (0)
165
166 #define to_flat_binder_object(hdr) \
167         container_of(hdr, struct flat_binder_object, hdr)
168
169 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
170
171 #define to_binder_buffer_object(hdr) \
172         container_of(hdr, struct binder_buffer_object, hdr)
173
174 #define to_binder_fd_array_object(hdr) \
175         container_of(hdr, struct binder_fd_array_object, hdr)
176
177 enum binder_stat_types {
178         BINDER_STAT_PROC,
179         BINDER_STAT_THREAD,
180         BINDER_STAT_NODE,
181         BINDER_STAT_REF,
182         BINDER_STAT_DEATH,
183         BINDER_STAT_TRANSACTION,
184         BINDER_STAT_TRANSACTION_COMPLETE,
185         BINDER_STAT_COUNT
186 };
187
188 struct binder_stats {
189         atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
190         atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
191         atomic_t obj_created[BINDER_STAT_COUNT];
192         atomic_t obj_deleted[BINDER_STAT_COUNT];
193 };
194
195 static struct binder_stats binder_stats;
196
197 static inline void binder_stats_deleted(enum binder_stat_types type)
198 {
199         atomic_inc(&binder_stats.obj_deleted[type]);
200 }
201
202 static inline void binder_stats_created(enum binder_stat_types type)
203 {
204         atomic_inc(&binder_stats.obj_created[type]);
205 }
206
207 struct binder_transaction_log_entry {
208         int debug_id;
209         int debug_id_done;
210         int call_type;
211         int from_proc;
212         int from_thread;
213         int target_handle;
214         int to_proc;
215         int to_thread;
216         int to_node;
217         int data_size;
218         int offsets_size;
219         int return_error_line;
220         uint32_t return_error;
221         uint32_t return_error_param;
222         const char *context_name;
223 };
224 struct binder_transaction_log {
225         atomic_t cur;
226         bool full;
227         struct binder_transaction_log_entry entry[32];
228 };
229 static struct binder_transaction_log binder_transaction_log;
230 static struct binder_transaction_log binder_transaction_log_failed;
231
232 static struct binder_transaction_log_entry *binder_transaction_log_add(
233         struct binder_transaction_log *log)
234 {
235         struct binder_transaction_log_entry *e;
236         unsigned int cur = atomic_inc_return(&log->cur);
237
238         if (cur >= ARRAY_SIZE(log->entry))
239                 log->full = true;
240         e = &log->entry[cur % ARRAY_SIZE(log->entry)];
241         WRITE_ONCE(e->debug_id_done, 0);
242         /*
243          * write-barrier to synchronize access to e->debug_id_done.
244          * We make sure the initialized 0 value is seen before
245          * memset() other fields are zeroed by memset.
246          */
247         smp_wmb();
248         memset(e, 0, sizeof(*e));
249         return e;
250 }
251
252 struct binder_context {
253         struct binder_node *binder_context_mgr_node;
254         struct mutex context_mgr_node_lock;
255
256         kuid_t binder_context_mgr_uid;
257         const char *name;
258 };
259
260 struct binder_device {
261         struct hlist_node hlist;
262         struct miscdevice miscdev;
263         struct binder_context context;
264 };
265
266 /**
267  * struct binder_work - work enqueued on a worklist
268  * @entry:             node enqueued on list
269  * @type:              type of work to be performed
270  *
271  * There are separate work lists for proc, thread, and node (async).
272  */
273 struct binder_work {
274         struct list_head entry;
275
276         enum {
277                 BINDER_WORK_TRANSACTION = 1,
278                 BINDER_WORK_TRANSACTION_COMPLETE,
279                 BINDER_WORK_RETURN_ERROR,
280                 BINDER_WORK_NODE,
281                 BINDER_WORK_DEAD_BINDER,
282                 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
283                 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
284         } type;
285 };
286
287 struct binder_error {
288         struct binder_work work;
289         uint32_t cmd;
290 };
291
292 /**
293  * struct binder_node - binder node bookkeeping
294  * @debug_id:             unique ID for debugging
295  *                        (invariant after initialized)
296  * @lock:                 lock for node fields
297  * @work:                 worklist element for node work
298  *                        (protected by @proc->inner_lock)
299  * @rb_node:              element for proc->nodes tree
300  *                        (protected by @proc->inner_lock)
301  * @dead_node:            element for binder_dead_nodes list
302  *                        (protected by binder_dead_nodes_lock)
303  * @proc:                 binder_proc that owns this node
304  *                        (invariant after initialized)
305  * @refs:                 list of references on this node
306  *                        (protected by @lock)
307  * @internal_strong_refs: used to take strong references when
308  *                        initiating a transaction
309  *                        (protected by @proc->inner_lock if @proc
310  *                        and by @lock)
311  * @local_weak_refs:      weak user refs from local process
312  *                        (protected by @proc->inner_lock if @proc
313  *                        and by @lock)
314  * @local_strong_refs:    strong user refs from local process
315  *                        (protected by @proc->inner_lock if @proc
316  *                        and by @lock)
317  * @tmp_refs:             temporary kernel refs
318  *                        (protected by @proc->inner_lock while @proc
319  *                        is valid, and by binder_dead_nodes_lock
320  *                        if @proc is NULL. During inc/dec and node release
321  *                        it is also protected by @lock to provide safety
322  *                        as the node dies and @proc becomes NULL)
323  * @ptr:                  userspace pointer for node
324  *                        (invariant, no lock needed)
325  * @cookie:               userspace cookie for node
326  *                        (invariant, no lock needed)
327  * @has_strong_ref:       userspace notified of strong ref
328  *                        (protected by @proc->inner_lock if @proc
329  *                        and by @lock)
330  * @pending_strong_ref:   userspace has acked notification of strong ref
331  *                        (protected by @proc->inner_lock if @proc
332  *                        and by @lock)
333  * @has_weak_ref:         userspace notified of weak ref
334  *                        (protected by @proc->inner_lock if @proc
335  *                        and by @lock)
336  * @pending_weak_ref:     userspace has acked notification of weak ref
337  *                        (protected by @proc->inner_lock if @proc
338  *                        and by @lock)
339  * @has_async_transaction: async transaction to node in progress
340  *                        (protected by @lock)
341  * @accept_fds:           file descriptor operations supported for node
342  *                        (invariant after initialized)
343  * @min_priority:         minimum scheduling priority
344  *                        (invariant after initialized)
345  * @async_todo:           list of async work items
346  *                        (protected by @proc->inner_lock)
347  *
348  * Bookkeeping structure for binder nodes.
349  */
350 struct binder_node {
351         int debug_id;
352         spinlock_t lock;
353         struct binder_work work;
354         union {
355                 struct rb_node rb_node;
356                 struct hlist_node dead_node;
357         };
358         struct binder_proc *proc;
359         struct hlist_head refs;
360         int internal_strong_refs;
361         int local_weak_refs;
362         int local_strong_refs;
363         int tmp_refs;
364         binder_uintptr_t ptr;
365         binder_uintptr_t cookie;
366         struct {
367                 /*
368                  * bitfield elements protected by
369                  * proc inner_lock
370                  */
371                 u8 has_strong_ref:1;
372                 u8 pending_strong_ref:1;
373                 u8 has_weak_ref:1;
374                 u8 pending_weak_ref:1;
375         };
376         struct {
377                 /*
378                  * invariant after initialization
379                  */
380                 u8 accept_fds:1;
381                 u8 min_priority;
382         };
383         bool has_async_transaction;
384         struct list_head async_todo;
385 };
386
387 struct binder_ref_death {
388         /**
389          * @work: worklist element for death notifications
390          *        (protected by inner_lock of the proc that
391          *        this ref belongs to)
392          */
393         struct binder_work work;
394         binder_uintptr_t cookie;
395 };
396
397 /**
398  * struct binder_ref_data - binder_ref counts and id
399  * @debug_id:        unique ID for the ref
400  * @desc:            unique userspace handle for ref
401  * @strong:          strong ref count (debugging only if not locked)
402  * @weak:            weak ref count (debugging only if not locked)
403  *
404  * Structure to hold ref count and ref id information. Since
405  * the actual ref can only be accessed with a lock, this structure
406  * is used to return information about the ref to callers of
407  * ref inc/dec functions.
408  */
409 struct binder_ref_data {
410         int debug_id;
411         uint32_t desc;
412         int strong;
413         int weak;
414 };
415
416 /**
417  * struct binder_ref - struct to track references on nodes
418  * @data:        binder_ref_data containing id, handle, and current refcounts
419  * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
420  * @rb_node_node: node for lookup by @node in proc's rb_tree
421  * @node_entry:  list entry for node->refs list in target node
422  *               (protected by @node->lock)
423  * @proc:        binder_proc containing ref
424  * @node:        binder_node of target node. When cleaning up a
425  *               ref for deletion in binder_cleanup_ref, a non-NULL
426  *               @node indicates the node must be freed
427  * @death:       pointer to death notification (ref_death) if requested
428  *               (protected by @node->lock)
429  *
430  * Structure to track references from procA to target node (on procB). This
431  * structure is unsafe to access without holding @proc->outer_lock.
432  */
433 struct binder_ref {
434         /* Lookups needed: */
435         /*   node + proc => ref (transaction) */
436         /*   desc + proc => ref (transaction, inc/dec ref) */
437         /*   node => refs + procs (proc exit) */
438         struct binder_ref_data data;
439         struct rb_node rb_node_desc;
440         struct rb_node rb_node_node;
441         struct hlist_node node_entry;
442         struct binder_proc *proc;
443         struct binder_node *node;
444         struct binder_ref_death *death;
445 };
446
447 enum binder_deferred_state {
448         BINDER_DEFERRED_FLUSH        = 0x01,
449         BINDER_DEFERRED_RELEASE      = 0x02,
450 };
451
452 /**
453  * struct binder_proc - binder process bookkeeping
454  * @proc_node:            element for binder_procs list
455  * @threads:              rbtree of binder_threads in this proc
456  *                        (protected by @inner_lock)
457  * @nodes:                rbtree of binder nodes associated with
458  *                        this proc ordered by node->ptr
459  *                        (protected by @inner_lock)
460  * @refs_by_desc:         rbtree of refs ordered by ref->desc
461  *                        (protected by @outer_lock)
462  * @refs_by_node:         rbtree of refs ordered by ref->node
463  *                        (protected by @outer_lock)
464  * @waiting_threads:      threads currently waiting for proc work
465  *                        (protected by @inner_lock)
466  * @pid                   PID of group_leader of process
467  *                        (invariant after initialized)
468  * @tsk                   task_struct for group_leader of process
469  *                        (invariant after initialized)
470  * @deferred_work_node:   element for binder_deferred_list
471  *                        (protected by binder_deferred_lock)
472  * @deferred_work:        bitmap of deferred work to perform
473  *                        (protected by binder_deferred_lock)
474  * @is_dead:              process is dead and awaiting free
475  *                        when outstanding transactions are cleaned up
476  *                        (protected by @inner_lock)
477  * @todo:                 list of work for this process
478  *                        (protected by @inner_lock)
479  * @stats:                per-process binder statistics
480  *                        (atomics, no lock needed)
481  * @delivered_death:      list of delivered death notification
482  *                        (protected by @inner_lock)
483  * @max_threads:          cap on number of binder threads
484  *                        (protected by @inner_lock)
485  * @requested_threads:    number of binder threads requested but not
486  *                        yet started. In current implementation, can
487  *                        only be 0 or 1.
488  *                        (protected by @inner_lock)
489  * @requested_threads_started: number binder threads started
490  *                        (protected by @inner_lock)
491  * @tmp_ref:              temporary reference to indicate proc is in use
492  *                        (protected by @inner_lock)
493  * @default_priority:     default scheduler priority
494  *                        (invariant after initialized)
495  * @debugfs_entry:        debugfs node
496  * @alloc:                binder allocator bookkeeping
497  * @context:              binder_context for this proc
498  *                        (invariant after initialized)
499  * @inner_lock:           can nest under outer_lock and/or node lock
500  * @outer_lock:           no nesting under innor or node lock
501  *                        Lock order: 1) outer, 2) node, 3) inner
502  *
503  * Bookkeeping structure for binder processes
504  */
505 struct binder_proc {
506         struct hlist_node proc_node;
507         struct rb_root threads;
508         struct rb_root nodes;
509         struct rb_root refs_by_desc;
510         struct rb_root refs_by_node;
511         struct list_head waiting_threads;
512         int pid;
513         struct task_struct *tsk;
514         struct hlist_node deferred_work_node;
515         int deferred_work;
516         bool is_dead;
517
518         struct list_head todo;
519         struct binder_stats stats;
520         struct list_head delivered_death;
521         int max_threads;
522         int requested_threads;
523         int requested_threads_started;
524         int tmp_ref;
525         long default_priority;
526         struct dentry *debugfs_entry;
527         struct binder_alloc alloc;
528         struct binder_context *context;
529         spinlock_t inner_lock;
530         spinlock_t outer_lock;
531 };
532
533 enum {
534         BINDER_LOOPER_STATE_REGISTERED  = 0x01,
535         BINDER_LOOPER_STATE_ENTERED     = 0x02,
536         BINDER_LOOPER_STATE_EXITED      = 0x04,
537         BINDER_LOOPER_STATE_INVALID     = 0x08,
538         BINDER_LOOPER_STATE_WAITING     = 0x10,
539         BINDER_LOOPER_STATE_POLL        = 0x20,
540 };
541
542 /**
543  * struct binder_thread - binder thread bookkeeping
544  * @proc:                 binder process for this thread
545  *                        (invariant after initialization)
546  * @rb_node:              element for proc->threads rbtree
547  *                        (protected by @proc->inner_lock)
548  * @waiting_thread_node:  element for @proc->waiting_threads list
549  *                        (protected by @proc->inner_lock)
550  * @pid:                  PID for this thread
551  *                        (invariant after initialization)
552  * @looper:               bitmap of looping state
553  *                        (only accessed by this thread)
554  * @looper_needs_return:  looping thread needs to exit driver
555  *                        (no lock needed)
556  * @transaction_stack:    stack of in-progress transactions for this thread
557  *                        (protected by @proc->inner_lock)
558  * @todo:                 list of work to do for this thread
559  *                        (protected by @proc->inner_lock)
560  * @process_todo:         whether work in @todo should be processed
561  *                        (protected by @proc->inner_lock)
562  * @return_error:         transaction errors reported by this thread
563  *                        (only accessed by this thread)
564  * @reply_error:          transaction errors reported by target thread
565  *                        (protected by @proc->inner_lock)
566  * @wait:                 wait queue for thread work
567  * @stats:                per-thread statistics
568  *                        (atomics, no lock needed)
569  * @tmp_ref:              temporary reference to indicate thread is in use
570  *                        (atomic since @proc->inner_lock cannot
571  *                        always be acquired)
572  * @is_dead:              thread is dead and awaiting free
573  *                        when outstanding transactions are cleaned up
574  *                        (protected by @proc->inner_lock)
575  *
576  * Bookkeeping structure for binder threads.
577  */
578 struct binder_thread {
579         struct binder_proc *proc;
580         struct rb_node rb_node;
581         struct list_head waiting_thread_node;
582         int pid;
583         int looper;              /* only modified by this thread */
584         bool looper_need_return; /* can be written by other thread */
585         struct binder_transaction *transaction_stack;
586         struct list_head todo;
587         bool process_todo;
588         struct binder_error return_error;
589         struct binder_error reply_error;
590         wait_queue_head_t wait;
591         struct binder_stats stats;
592         atomic_t tmp_ref;
593         bool is_dead;
594 };
595
596 /**
597  * struct binder_txn_fd_fixup - transaction fd fixup list element
598  * @fixup_entry:          list entry
599  * @file:                 struct file to be associated with new fd
600  * @offset:               offset in buffer data to this fixup
601  *
602  * List element for fd fixups in a transaction. Since file
603  * descriptors need to be allocated in the context of the
604  * target process, we pass each fd to be processed in this
605  * struct.
606  */
607 struct binder_txn_fd_fixup {
608         struct list_head fixup_entry;
609         struct file *file;
610         size_t offset;
611 };
612
613 struct binder_transaction {
614         int debug_id;
615         struct binder_work work;
616         struct binder_thread *from;
617         struct binder_transaction *from_parent;
618         struct binder_proc *to_proc;
619         struct binder_thread *to_thread;
620         struct binder_transaction *to_parent;
621         unsigned need_reply:1;
622         /* unsigned is_dead:1; */       /* not used at the moment */
623
624         struct binder_buffer *buffer;
625         unsigned int    code;
626         unsigned int    flags;
627         long    priority;
628         long    saved_priority;
629         kuid_t  sender_euid;
630         struct list_head fd_fixups;
631         /**
632          * @lock:  protects @from, @to_proc, and @to_thread
633          *
634          * @from, @to_proc, and @to_thread can be set to NULL
635          * during thread teardown
636          */
637         spinlock_t lock;
638 };
639
640 /**
641  * binder_proc_lock() - Acquire outer lock for given binder_proc
642  * @proc:         struct binder_proc to acquire
643  *
644  * Acquires proc->outer_lock. Used to protect binder_ref
645  * structures associated with the given proc.
646  */
647 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
648 static void
649 _binder_proc_lock(struct binder_proc *proc, int line)
650         __acquires(&proc->outer_lock)
651 {
652         binder_debug(BINDER_DEBUG_SPINLOCKS,
653                      "%s: line=%d\n", __func__, line);
654         spin_lock(&proc->outer_lock);
655 }
656
657 /**
658  * binder_proc_unlock() - Release spinlock for given binder_proc
659  * @proc:         struct binder_proc to acquire
660  *
661  * Release lock acquired via binder_proc_lock()
662  */
663 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
664 static void
665 _binder_proc_unlock(struct binder_proc *proc, int line)
666         __releases(&proc->outer_lock)
667 {
668         binder_debug(BINDER_DEBUG_SPINLOCKS,
669                      "%s: line=%d\n", __func__, line);
670         spin_unlock(&proc->outer_lock);
671 }
672
673 /**
674  * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
675  * @proc:         struct binder_proc to acquire
676  *
677  * Acquires proc->inner_lock. Used to protect todo lists
678  */
679 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
680 static void
681 _binder_inner_proc_lock(struct binder_proc *proc, int line)
682         __acquires(&proc->inner_lock)
683 {
684         binder_debug(BINDER_DEBUG_SPINLOCKS,
685                      "%s: line=%d\n", __func__, line);
686         spin_lock(&proc->inner_lock);
687 }
688
689 /**
690  * binder_inner_proc_unlock() - Release inner lock for given binder_proc
691  * @proc:         struct binder_proc to acquire
692  *
693  * Release lock acquired via binder_inner_proc_lock()
694  */
695 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
696 static void
697 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
698         __releases(&proc->inner_lock)
699 {
700         binder_debug(BINDER_DEBUG_SPINLOCKS,
701                      "%s: line=%d\n", __func__, line);
702         spin_unlock(&proc->inner_lock);
703 }
704
705 /**
706  * binder_node_lock() - Acquire spinlock for given binder_node
707  * @node:         struct binder_node to acquire
708  *
709  * Acquires node->lock. Used to protect binder_node fields
710  */
711 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
712 static void
713 _binder_node_lock(struct binder_node *node, int line)
714         __acquires(&node->lock)
715 {
716         binder_debug(BINDER_DEBUG_SPINLOCKS,
717                      "%s: line=%d\n", __func__, line);
718         spin_lock(&node->lock);
719 }
720
721 /**
722  * binder_node_unlock() - Release spinlock for given binder_proc
723  * @node:         struct binder_node to acquire
724  *
725  * Release lock acquired via binder_node_lock()
726  */
727 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
728 static void
729 _binder_node_unlock(struct binder_node *node, int line)
730         __releases(&node->lock)
731 {
732         binder_debug(BINDER_DEBUG_SPINLOCKS,
733                      "%s: line=%d\n", __func__, line);
734         spin_unlock(&node->lock);
735 }
736
737 /**
738  * binder_node_inner_lock() - Acquire node and inner locks
739  * @node:         struct binder_node to acquire
740  *
741  * Acquires node->lock. If node->proc also acquires
742  * proc->inner_lock. Used to protect binder_node fields
743  */
744 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
745 static void
746 _binder_node_inner_lock(struct binder_node *node, int line)
747         __acquires(&node->lock) __acquires(&node->proc->inner_lock)
748 {
749         binder_debug(BINDER_DEBUG_SPINLOCKS,
750                      "%s: line=%d\n", __func__, line);
751         spin_lock(&node->lock);
752         if (node->proc)
753                 binder_inner_proc_lock(node->proc);
754         else
755                 /* annotation for sparse */
756                 __acquire(&node->proc->inner_lock);
757 }
758
759 /**
760  * binder_node_unlock() - Release node and inner locks
761  * @node:         struct binder_node to acquire
762  *
763  * Release lock acquired via binder_node_lock()
764  */
765 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
766 static void
767 _binder_node_inner_unlock(struct binder_node *node, int line)
768         __releases(&node->lock) __releases(&node->proc->inner_lock)
769 {
770         struct binder_proc *proc = node->proc;
771
772         binder_debug(BINDER_DEBUG_SPINLOCKS,
773                      "%s: line=%d\n", __func__, line);
774         if (proc)
775                 binder_inner_proc_unlock(proc);
776         else
777                 /* annotation for sparse */
778                 __release(&node->proc->inner_lock);
779         spin_unlock(&node->lock);
780 }
781
782 static bool binder_worklist_empty_ilocked(struct list_head *list)
783 {
784         return list_empty(list);
785 }
786
787 /**
788  * binder_worklist_empty() - Check if no items on the work list
789  * @proc:       binder_proc associated with list
790  * @list:       list to check
791  *
792  * Return: true if there are no items on list, else false
793  */
794 static bool binder_worklist_empty(struct binder_proc *proc,
795                                   struct list_head *list)
796 {
797         bool ret;
798
799         binder_inner_proc_lock(proc);
800         ret = binder_worklist_empty_ilocked(list);
801         binder_inner_proc_unlock(proc);
802         return ret;
803 }
804
805 /**
806  * binder_enqueue_work_ilocked() - Add an item to the work list
807  * @work:         struct binder_work to add to list
808  * @target_list:  list to add work to
809  *
810  * Adds the work to the specified list. Asserts that work
811  * is not already on a list.
812  *
813  * Requires the proc->inner_lock to be held.
814  */
815 static void
816 binder_enqueue_work_ilocked(struct binder_work *work,
817                            struct list_head *target_list)
818 {
819         BUG_ON(target_list == NULL);
820         BUG_ON(work->entry.next && !list_empty(&work->entry));
821         list_add_tail(&work->entry, target_list);
822 }
823
824 /**
825  * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
826  * @thread:       thread to queue work to
827  * @work:         struct binder_work to add to list
828  *
829  * Adds the work to the todo list of the thread. Doesn't set the process_todo
830  * flag, which means that (if it wasn't already set) the thread will go to
831  * sleep without handling this work when it calls read.
832  *
833  * Requires the proc->inner_lock to be held.
834  */
835 static void
836 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
837                                             struct binder_work *work)
838 {
839         WARN_ON(!list_empty(&thread->waiting_thread_node));
840         binder_enqueue_work_ilocked(work, &thread->todo);
841 }
842
843 /**
844  * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
845  * @thread:       thread to queue work to
846  * @work:         struct binder_work to add to list
847  *
848  * Adds the work to the todo list of the thread, and enables processing
849  * of the todo queue.
850  *
851  * Requires the proc->inner_lock to be held.
852  */
853 static void
854 binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
855                                    struct binder_work *work)
856 {
857         WARN_ON(!list_empty(&thread->waiting_thread_node));
858         binder_enqueue_work_ilocked(work, &thread->todo);
859         thread->process_todo = true;
860 }
861
862 /**
863  * binder_enqueue_thread_work() - Add an item to the thread work list
864  * @thread:       thread to queue work to
865  * @work:         struct binder_work to add to list
866  *
867  * Adds the work to the todo list of the thread, and enables processing
868  * of the todo queue.
869  */
870 static void
871 binder_enqueue_thread_work(struct binder_thread *thread,
872                            struct binder_work *work)
873 {
874         binder_inner_proc_lock(thread->proc);
875         binder_enqueue_thread_work_ilocked(thread, work);
876         binder_inner_proc_unlock(thread->proc);
877 }
878
879 static void
880 binder_dequeue_work_ilocked(struct binder_work *work)
881 {
882         list_del_init(&work->entry);
883 }
884
885 /**
886  * binder_dequeue_work() - Removes an item from the work list
887  * @proc:         binder_proc associated with list
888  * @work:         struct binder_work to remove from list
889  *
890  * Removes the specified work item from whatever list it is on.
891  * Can safely be called if work is not on any list.
892  */
893 static void
894 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
895 {
896         binder_inner_proc_lock(proc);
897         binder_dequeue_work_ilocked(work);
898         binder_inner_proc_unlock(proc);
899 }
900
901 static struct binder_work *binder_dequeue_work_head_ilocked(
902                                         struct list_head *list)
903 {
904         struct binder_work *w;
905
906         w = list_first_entry_or_null(list, struct binder_work, entry);
907         if (w)
908                 list_del_init(&w->entry);
909         return w;
910 }
911
912 /**
913  * binder_dequeue_work_head() - Dequeues the item at head of list
914  * @proc:         binder_proc associated with list
915  * @list:         list to dequeue head
916  *
917  * Removes the head of the list if there are items on the list
918  *
919  * Return: pointer dequeued binder_work, NULL if list was empty
920  */
921 static struct binder_work *binder_dequeue_work_head(
922                                         struct binder_proc *proc,
923                                         struct list_head *list)
924 {
925         struct binder_work *w;
926
927         binder_inner_proc_lock(proc);
928         w = binder_dequeue_work_head_ilocked(list);
929         binder_inner_proc_unlock(proc);
930         return w;
931 }
932
933 static void
934 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
935 static void binder_free_thread(struct binder_thread *thread);
936 static void binder_free_proc(struct binder_proc *proc);
937 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
938
939 static bool binder_has_work_ilocked(struct binder_thread *thread,
940                                     bool do_proc_work)
941 {
942         return thread->process_todo ||
943                 thread->looper_need_return ||
944                 (do_proc_work &&
945                  !binder_worklist_empty_ilocked(&thread->proc->todo));
946 }
947
948 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
949 {
950         bool has_work;
951
952         binder_inner_proc_lock(thread->proc);
953         has_work = binder_has_work_ilocked(thread, do_proc_work);
954         binder_inner_proc_unlock(thread->proc);
955
956         return has_work;
957 }
958
959 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
960 {
961         return !thread->transaction_stack &&
962                 binder_worklist_empty_ilocked(&thread->todo) &&
963                 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
964                                    BINDER_LOOPER_STATE_REGISTERED));
965 }
966
967 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
968                                                bool sync)
969 {
970         struct rb_node *n;
971         struct binder_thread *thread;
972
973         for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
974                 thread = rb_entry(n, struct binder_thread, rb_node);
975                 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
976                     binder_available_for_proc_work_ilocked(thread)) {
977                         if (sync)
978                                 wake_up_interruptible_sync(&thread->wait);
979                         else
980                                 wake_up_interruptible(&thread->wait);
981                 }
982         }
983 }
984
985 /**
986  * binder_select_thread_ilocked() - selects a thread for doing proc work.
987  * @proc:       process to select a thread from
988  *
989  * Note that calling this function moves the thread off the waiting_threads
990  * list, so it can only be woken up by the caller of this function, or a
991  * signal. Therefore, callers *should* always wake up the thread this function
992  * returns.
993  *
994  * Return:      If there's a thread currently waiting for process work,
995  *              returns that thread. Otherwise returns NULL.
996  */
997 static struct binder_thread *
998 binder_select_thread_ilocked(struct binder_proc *proc)
999 {
1000         struct binder_thread *thread;
1001
1002         assert_spin_locked(&proc->inner_lock);
1003         thread = list_first_entry_or_null(&proc->waiting_threads,
1004                                           struct binder_thread,
1005                                           waiting_thread_node);
1006
1007         if (thread)
1008                 list_del_init(&thread->waiting_thread_node);
1009
1010         return thread;
1011 }
1012
1013 /**
1014  * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
1015  * @proc:       process to wake up a thread in
1016  * @thread:     specific thread to wake-up (may be NULL)
1017  * @sync:       whether to do a synchronous wake-up
1018  *
1019  * This function wakes up a thread in the @proc process.
1020  * The caller may provide a specific thread to wake-up in
1021  * the @thread parameter. If @thread is NULL, this function
1022  * will wake up threads that have called poll().
1023  *
1024  * Note that for this function to work as expected, callers
1025  * should first call binder_select_thread() to find a thread
1026  * to handle the work (if they don't have a thread already),
1027  * and pass the result into the @thread parameter.
1028  */
1029 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
1030                                          struct binder_thread *thread,
1031                                          bool sync)
1032 {
1033         assert_spin_locked(&proc->inner_lock);
1034
1035         if (thread) {
1036                 if (sync)
1037                         wake_up_interruptible_sync(&thread->wait);
1038                 else
1039                         wake_up_interruptible(&thread->wait);
1040                 return;
1041         }
1042
1043         /* Didn't find a thread waiting for proc work; this can happen
1044          * in two scenarios:
1045          * 1. All threads are busy handling transactions
1046          *    In that case, one of those threads should call back into
1047          *    the kernel driver soon and pick up this work.
1048          * 2. Threads are using the (e)poll interface, in which case
1049          *    they may be blocked on the waitqueue without having been
1050          *    added to waiting_threads. For this case, we just iterate
1051          *    over all threads not handling transaction work, and
1052          *    wake them all up. We wake all because we don't know whether
1053          *    a thread that called into (e)poll is handling non-binder
1054          *    work currently.
1055          */
1056         binder_wakeup_poll_threads_ilocked(proc, sync);
1057 }
1058
1059 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
1060 {
1061         struct binder_thread *thread = binder_select_thread_ilocked(proc);
1062
1063         binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
1064 }
1065
1066 static void binder_set_nice(long nice)
1067 {
1068         long min_nice;
1069
1070         if (can_nice(current, nice)) {
1071                 set_user_nice(current, nice);
1072                 return;
1073         }
1074         min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
1075         binder_debug(BINDER_DEBUG_PRIORITY_CAP,
1076                      "%d: nice value %ld not allowed use %ld instead\n",
1077                       current->pid, nice, min_nice);
1078         set_user_nice(current, min_nice);
1079         if (min_nice <= MAX_NICE)
1080                 return;
1081         binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
1082 }
1083
1084 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
1085                                                    binder_uintptr_t ptr)
1086 {
1087         struct rb_node *n = proc->nodes.rb_node;
1088         struct binder_node *node;
1089
1090         assert_spin_locked(&proc->inner_lock);
1091
1092         while (n) {
1093                 node = rb_entry(n, struct binder_node, rb_node);
1094
1095                 if (ptr < node->ptr)
1096                         n = n->rb_left;
1097                 else if (ptr > node->ptr)
1098                         n = n->rb_right;
1099                 else {
1100                         /*
1101                          * take an implicit weak reference
1102                          * to ensure node stays alive until
1103                          * call to binder_put_node()
1104                          */
1105                         binder_inc_node_tmpref_ilocked(node);
1106                         return node;
1107                 }
1108         }
1109         return NULL;
1110 }
1111
1112 static struct binder_node *binder_get_node(struct binder_proc *proc,
1113                                            binder_uintptr_t ptr)
1114 {
1115         struct binder_node *node;
1116
1117         binder_inner_proc_lock(proc);
1118         node = binder_get_node_ilocked(proc, ptr);
1119         binder_inner_proc_unlock(proc);
1120         return node;
1121 }
1122
1123 static struct binder_node *binder_init_node_ilocked(
1124                                                 struct binder_proc *proc,
1125                                                 struct binder_node *new_node,
1126                                                 struct flat_binder_object *fp)
1127 {
1128         struct rb_node **p = &proc->nodes.rb_node;
1129         struct rb_node *parent = NULL;
1130         struct binder_node *node;
1131         binder_uintptr_t ptr = fp ? fp->binder : 0;
1132         binder_uintptr_t cookie = fp ? fp->cookie : 0;
1133         __u32 flags = fp ? fp->flags : 0;
1134
1135         assert_spin_locked(&proc->inner_lock);
1136
1137         while (*p) {
1138
1139                 parent = *p;
1140                 node = rb_entry(parent, struct binder_node, rb_node);
1141
1142                 if (ptr < node->ptr)
1143                         p = &(*p)->rb_left;
1144                 else if (ptr > node->ptr)
1145                         p = &(*p)->rb_right;
1146                 else {
1147                         /*
1148                          * A matching node is already in
1149                          * the rb tree. Abandon the init
1150                          * and return it.
1151                          */
1152                         binder_inc_node_tmpref_ilocked(node);
1153                         return node;
1154                 }
1155         }
1156         node = new_node;
1157         binder_stats_created(BINDER_STAT_NODE);
1158         node->tmp_refs++;
1159         rb_link_node(&node->rb_node, parent, p);
1160         rb_insert_color(&node->rb_node, &proc->nodes);
1161         node->debug_id = atomic_inc_return(&binder_last_id);
1162         node->proc = proc;
1163         node->ptr = ptr;
1164         node->cookie = cookie;
1165         node->work.type = BINDER_WORK_NODE;
1166         node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1167         node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1168         spin_lock_init(&node->lock);
1169         INIT_LIST_HEAD(&node->work.entry);
1170         INIT_LIST_HEAD(&node->async_todo);
1171         binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1172                      "%d:%d node %d u%016llx c%016llx created\n",
1173                      proc->pid, current->pid, node->debug_id,
1174                      (u64)node->ptr, (u64)node->cookie);
1175
1176         return node;
1177 }
1178
1179 static struct binder_node *binder_new_node(struct binder_proc *proc,
1180                                            struct flat_binder_object *fp)
1181 {
1182         struct binder_node *node;
1183         struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
1184
1185         if (!new_node)
1186                 return NULL;
1187         binder_inner_proc_lock(proc);
1188         node = binder_init_node_ilocked(proc, new_node, fp);
1189         binder_inner_proc_unlock(proc);
1190         if (node != new_node)
1191                 /*
1192                  * The node was already added by another thread
1193                  */
1194                 kfree(new_node);
1195
1196         return node;
1197 }
1198
1199 static void binder_free_node(struct binder_node *node)
1200 {
1201         kfree(node);
1202         binder_stats_deleted(BINDER_STAT_NODE);
1203 }
1204
1205 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1206                                     int internal,
1207                                     struct list_head *target_list)
1208 {
1209         struct binder_proc *proc = node->proc;
1210
1211         assert_spin_locked(&node->lock);
1212         if (proc)
1213                 assert_spin_locked(&proc->inner_lock);
1214         if (strong) {
1215                 if (internal) {
1216                         if (target_list == NULL &&
1217                             node->internal_strong_refs == 0 &&
1218                             !(node->proc &&
1219                               node == node->proc->context->binder_context_mgr_node &&
1220                               node->has_strong_ref)) {
1221                                 pr_err("invalid inc strong node for %d\n",
1222                                         node->debug_id);
1223                                 return -EINVAL;
1224                         }
1225                         node->internal_strong_refs++;
1226                 } else
1227                         node->local_strong_refs++;
1228                 if (!node->has_strong_ref && target_list) {
1229                         struct binder_thread *thread = container_of(target_list,
1230                                                     struct binder_thread, todo);
1231                         binder_dequeue_work_ilocked(&node->work);
1232                         BUG_ON(&thread->todo != target_list);
1233                         binder_enqueue_deferred_thread_work_ilocked(thread,
1234                                                                    &node->work);
1235                 }
1236         } else {
1237                 if (!internal)
1238                         node->local_weak_refs++;
1239                 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1240                         if (target_list == NULL) {
1241                                 pr_err("invalid inc weak node for %d\n",
1242                                         node->debug_id);
1243                                 return -EINVAL;
1244                         }
1245                         /*
1246                          * See comment above
1247                          */
1248                         binder_enqueue_work_ilocked(&node->work, target_list);
1249                 }
1250         }
1251         return 0;
1252 }
1253
1254 static int binder_inc_node(struct binder_node *node, int strong, int internal,
1255                            struct list_head *target_list)
1256 {
1257         int ret;
1258
1259         binder_node_inner_lock(node);
1260         ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1261         binder_node_inner_unlock(node);
1262
1263         return ret;
1264 }
1265
1266 static bool binder_dec_node_nilocked(struct binder_node *node,
1267                                      int strong, int internal)
1268 {
1269         struct binder_proc *proc = node->proc;
1270
1271         assert_spin_locked(&node->lock);
1272         if (proc)
1273                 assert_spin_locked(&proc->inner_lock);
1274         if (strong) {
1275                 if (internal)
1276                         node->internal_strong_refs--;
1277                 else
1278                         node->local_strong_refs--;
1279                 if (node->local_strong_refs || node->internal_strong_refs)
1280                         return false;
1281         } else {
1282                 if (!internal)
1283                         node->local_weak_refs--;
1284                 if (node->local_weak_refs || node->tmp_refs ||
1285                                 !hlist_empty(&node->refs))
1286                         return false;
1287         }
1288
1289         if (proc && (node->has_strong_ref || node->has_weak_ref)) {
1290                 if (list_empty(&node->work.entry)) {
1291                         binder_enqueue_work_ilocked(&node->work, &proc->todo);
1292                         binder_wakeup_proc_ilocked(proc);
1293                 }
1294         } else {
1295                 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
1296                     !node->local_weak_refs && !node->tmp_refs) {
1297                         if (proc) {
1298                                 binder_dequeue_work_ilocked(&node->work);
1299                                 rb_erase(&node->rb_node, &proc->nodes);
1300                                 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1301                                              "refless node %d deleted\n",
1302                                              node->debug_id);
1303                         } else {
1304                                 BUG_ON(!list_empty(&node->work.entry));
1305                                 spin_lock(&binder_dead_nodes_lock);
1306                                 /*
1307                                  * tmp_refs could have changed so
1308                                  * check it again
1309                                  */
1310                                 if (node->tmp_refs) {
1311                                         spin_unlock(&binder_dead_nodes_lock);
1312                                         return false;
1313                                 }
1314                                 hlist_del(&node->dead_node);
1315                                 spin_unlock(&binder_dead_nodes_lock);
1316                                 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1317                                              "dead node %d deleted\n",
1318                                              node->debug_id);
1319                         }
1320                         return true;
1321                 }
1322         }
1323         return false;
1324 }
1325
1326 static void binder_dec_node(struct binder_node *node, int strong, int internal)
1327 {
1328         bool free_node;
1329
1330         binder_node_inner_lock(node);
1331         free_node = binder_dec_node_nilocked(node, strong, internal);
1332         binder_node_inner_unlock(node);
1333         if (free_node)
1334                 binder_free_node(node);
1335 }
1336
1337 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
1338 {
1339         /*
1340          * No call to binder_inc_node() is needed since we
1341          * don't need to inform userspace of any changes to
1342          * tmp_refs
1343          */
1344         node->tmp_refs++;
1345 }
1346
1347 /**
1348  * binder_inc_node_tmpref() - take a temporary reference on node
1349  * @node:       node to reference
1350  *
1351  * Take reference on node to prevent the node from being freed
1352  * while referenced only by a local variable. The inner lock is
1353  * needed to serialize with the node work on the queue (which
1354  * isn't needed after the node is dead). If the node is dead
1355  * (node->proc is NULL), use binder_dead_nodes_lock to protect
1356  * node->tmp_refs against dead-node-only cases where the node
1357  * lock cannot be acquired (eg traversing the dead node list to
1358  * print nodes)
1359  */
1360 static void binder_inc_node_tmpref(struct binder_node *node)
1361 {
1362         binder_node_lock(node);
1363         if (node->proc)
1364                 binder_inner_proc_lock(node->proc);
1365         else
1366                 spin_lock(&binder_dead_nodes_lock);
1367         binder_inc_node_tmpref_ilocked(node);
1368         if (node->proc)
1369                 binder_inner_proc_unlock(node->proc);
1370         else
1371                 spin_unlock(&binder_dead_nodes_lock);
1372         binder_node_unlock(node);
1373 }
1374
1375 /**
1376  * binder_dec_node_tmpref() - remove a temporary reference on node
1377  * @node:       node to reference
1378  *
1379  * Release temporary reference on node taken via binder_inc_node_tmpref()
1380  */
1381 static void binder_dec_node_tmpref(struct binder_node *node)
1382 {
1383         bool free_node;
1384
1385         binder_node_inner_lock(node);
1386         if (!node->proc)
1387                 spin_lock(&binder_dead_nodes_lock);
1388         else
1389                 __acquire(&binder_dead_nodes_lock);
1390         node->tmp_refs--;
1391         BUG_ON(node->tmp_refs < 0);
1392         if (!node->proc)
1393                 spin_unlock(&binder_dead_nodes_lock);
1394         else
1395                 __release(&binder_dead_nodes_lock);
1396         /*
1397          * Call binder_dec_node() to check if all refcounts are 0
1398          * and cleanup is needed. Calling with strong=0 and internal=1
1399          * causes no actual reference to be released in binder_dec_node().
1400          * If that changes, a change is needed here too.
1401          */
1402         free_node = binder_dec_node_nilocked(node, 0, 1);
1403         binder_node_inner_unlock(node);
1404         if (free_node)
1405                 binder_free_node(node);
1406 }
1407
1408 static void binder_put_node(struct binder_node *node)
1409 {
1410         binder_dec_node_tmpref(node);
1411 }
1412
1413 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1414                                                  u32 desc, bool need_strong_ref)
1415 {
1416         struct rb_node *n = proc->refs_by_desc.rb_node;
1417         struct binder_ref *ref;
1418
1419         while (n) {
1420                 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1421
1422                 if (desc < ref->data.desc) {
1423                         n = n->rb_left;
1424                 } else if (desc > ref->data.desc) {
1425                         n = n->rb_right;
1426                 } else if (need_strong_ref && !ref->data.strong) {
1427                         binder_user_error("tried to use weak ref as strong ref\n");
1428                         return NULL;
1429                 } else {
1430                         return ref;
1431                 }
1432         }
1433         return NULL;
1434 }
1435
1436 /**
1437  * binder_get_ref_for_node_olocked() - get the ref associated with given node
1438  * @proc:       binder_proc that owns the ref
1439  * @node:       binder_node of target
1440  * @new_ref:    newly allocated binder_ref to be initialized or %NULL
1441  *
1442  * Look up the ref for the given node and return it if it exists
1443  *
1444  * If it doesn't exist and the caller provides a newly allocated
1445  * ref, initialize the fields of the newly allocated ref and insert
1446  * into the given proc rb_trees and node refs list.
1447  *
1448  * Return:      the ref for node. It is possible that another thread
1449  *              allocated/initialized the ref first in which case the
1450  *              returned ref would be different than the passed-in
1451  *              new_ref. new_ref must be kfree'd by the caller in
1452  *              this case.
1453  */
1454 static struct binder_ref *binder_get_ref_for_node_olocked(
1455                                         struct binder_proc *proc,
1456                                         struct binder_node *node,
1457                                         struct binder_ref *new_ref)
1458 {
1459         struct binder_context *context = proc->context;
1460         struct rb_node **p = &proc->refs_by_node.rb_node;
1461         struct rb_node *parent = NULL;
1462         struct binder_ref *ref;
1463         struct rb_node *n;
1464
1465         while (*p) {
1466                 parent = *p;
1467                 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1468
1469                 if (node < ref->node)
1470                         p = &(*p)->rb_left;
1471                 else if (node > ref->node)
1472                         p = &(*p)->rb_right;
1473                 else
1474                         return ref;
1475         }
1476         if (!new_ref)
1477                 return NULL;
1478
1479         binder_stats_created(BINDER_STAT_REF);
1480         new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1481         new_ref->proc = proc;
1482         new_ref->node = node;
1483         rb_link_node(&new_ref->rb_node_node, parent, p);
1484         rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1485
1486         new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1487         for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1488                 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1489                 if (ref->data.desc > new_ref->data.desc)
1490                         break;
1491                 new_ref->data.desc = ref->data.desc + 1;
1492         }
1493
1494         p = &proc->refs_by_desc.rb_node;
1495         while (*p) {
1496                 parent = *p;
1497                 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1498
1499                 if (new_ref->data.desc < ref->data.desc)
1500                         p = &(*p)->rb_left;
1501                 else if (new_ref->data.desc > ref->data.desc)
1502                         p = &(*p)->rb_right;
1503                 else
1504                         BUG();
1505         }
1506         rb_link_node(&new_ref->rb_node_desc, parent, p);
1507         rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1508
1509         binder_node_lock(node);
1510         hlist_add_head(&new_ref->node_entry, &node->refs);
1511
1512         binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1513                      "%d new ref %d desc %d for node %d\n",
1514                       proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1515                       node->debug_id);
1516         binder_node_unlock(node);
1517         return new_ref;
1518 }
1519
1520 static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1521 {
1522         bool delete_node = false;
1523
1524         binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1525                      "%d delete ref %d desc %d for node %d\n",
1526                       ref->proc->pid, ref->data.debug_id, ref->data.desc,
1527                       ref->node->debug_id);
1528
1529         rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1530         rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1531
1532         binder_node_inner_lock(ref->node);
1533         if (ref->data.strong)
1534                 binder_dec_node_nilocked(ref->node, 1, 1);
1535
1536         hlist_del(&ref->node_entry);
1537         delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1538         binder_node_inner_unlock(ref->node);
1539         /*
1540          * Clear ref->node unless we want the caller to free the node
1541          */
1542         if (!delete_node) {
1543                 /*
1544                  * The caller uses ref->node to determine
1545                  * whether the node needs to be freed. Clear
1546                  * it since the node is still alive.
1547                  */
1548                 ref->node = NULL;
1549         }
1550
1551         if (ref->death) {
1552                 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1553                              "%d delete ref %d desc %d has death notification\n",
1554                               ref->proc->pid, ref->data.debug_id,
1555                               ref->data.desc);
1556                 binder_dequeue_work(ref->proc, &ref->death->work);
1557                 binder_stats_deleted(BINDER_STAT_DEATH);
1558         }
1559         binder_stats_deleted(BINDER_STAT_REF);
1560 }
1561
1562 /**
1563  * binder_inc_ref_olocked() - increment the ref for given handle
1564  * @ref:         ref to be incremented
1565  * @strong:      if true, strong increment, else weak
1566  * @target_list: list to queue node work on
1567  *
1568  * Increment the ref. @ref->proc->outer_lock must be held on entry
1569  *
1570  * Return: 0, if successful, else errno
1571  */
1572 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1573                                   struct list_head *target_list)
1574 {
1575         int ret;
1576
1577         if (strong) {
1578                 if (ref->data.strong == 0) {
1579                         ret = binder_inc_node(ref->node, 1, 1, target_list);
1580                         if (ret)
1581                                 return ret;
1582                 }
1583                 ref->data.strong++;
1584         } else {
1585                 if (ref->data.weak == 0) {
1586                         ret = binder_inc_node(ref->node, 0, 1, target_list);
1587                         if (ret)
1588                                 return ret;
1589                 }
1590                 ref->data.weak++;
1591         }
1592         return 0;
1593 }
1594
1595 /**
1596  * binder_dec_ref() - dec the ref for given handle
1597  * @ref:        ref to be decremented
1598  * @strong:     if true, strong decrement, else weak
1599  *
1600  * Decrement the ref.
1601  *
1602  * Return: true if ref is cleaned up and ready to be freed
1603  */
1604 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1605 {
1606         if (strong) {
1607                 if (ref->data.strong == 0) {
1608                         binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1609                                           ref->proc->pid, ref->data.debug_id,
1610                                           ref->data.desc, ref->data.strong,
1611                                           ref->data.weak);
1612                         return false;
1613                 }
1614                 ref->data.strong--;
1615                 if (ref->data.strong == 0)
1616                         binder_dec_node(ref->node, strong, 1);
1617         } else {
1618                 if (ref->data.weak == 0) {
1619                         binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1620                                           ref->proc->pid, ref->data.debug_id,
1621                                           ref->data.desc, ref->data.strong,
1622                                           ref->data.weak);
1623                         return false;
1624                 }
1625                 ref->data.weak--;
1626         }
1627         if (ref->data.strong == 0 && ref->data.weak == 0) {
1628                 binder_cleanup_ref_olocked(ref);
1629                 return true;
1630         }
1631         return false;
1632 }
1633
1634 /**
1635  * binder_get_node_from_ref() - get the node from the given proc/desc
1636  * @proc:       proc containing the ref
1637  * @desc:       the handle associated with the ref
1638  * @need_strong_ref: if true, only return node if ref is strong
1639  * @rdata:      the id/refcount data for the ref
1640  *
1641  * Given a proc and ref handle, return the associated binder_node
1642  *
1643  * Return: a binder_node or NULL if not found or not strong when strong required
1644  */
1645 static struct binder_node *binder_get_node_from_ref(
1646                 struct binder_proc *proc,
1647                 u32 desc, bool need_strong_ref,
1648                 struct binder_ref_data *rdata)
1649 {
1650         struct binder_node *node;
1651         struct binder_ref *ref;
1652
1653         binder_proc_lock(proc);
1654         ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1655         if (!ref)
1656                 goto err_no_ref;
1657         node = ref->node;
1658         /*
1659          * Take an implicit reference on the node to ensure
1660          * it stays alive until the call to binder_put_node()
1661          */
1662         binder_inc_node_tmpref(node);
1663         if (rdata)
1664                 *rdata = ref->data;
1665         binder_proc_unlock(proc);
1666
1667         return node;
1668
1669 err_no_ref:
1670         binder_proc_unlock(proc);
1671         return NULL;
1672 }
1673
1674 /**
1675  * binder_free_ref() - free the binder_ref
1676  * @ref:        ref to free
1677  *
1678  * Free the binder_ref. Free the binder_node indicated by ref->node
1679  * (if non-NULL) and the binder_ref_death indicated by ref->death.
1680  */
1681 static void binder_free_ref(struct binder_ref *ref)
1682 {
1683         if (ref->node)
1684                 binder_free_node(ref->node);
1685         kfree(ref->death);
1686         kfree(ref);
1687 }
1688
1689 /**
1690  * binder_update_ref_for_handle() - inc/dec the ref for given handle
1691  * @proc:       proc containing the ref
1692  * @desc:       the handle associated with the ref
1693  * @increment:  true=inc reference, false=dec reference
1694  * @strong:     true=strong reference, false=weak reference
1695  * @rdata:      the id/refcount data for the ref
1696  *
1697  * Given a proc and ref handle, increment or decrement the ref
1698  * according to "increment" arg.
1699  *
1700  * Return: 0 if successful, else errno
1701  */
1702 static int binder_update_ref_for_handle(struct binder_proc *proc,
1703                 uint32_t desc, bool increment, bool strong,
1704                 struct binder_ref_data *rdata)
1705 {
1706         int ret = 0;
1707         struct binder_ref *ref;
1708         bool delete_ref = false;
1709
1710         binder_proc_lock(proc);
1711         ref = binder_get_ref_olocked(proc, desc, strong);
1712         if (!ref) {
1713                 ret = -EINVAL;
1714                 goto err_no_ref;
1715         }
1716         if (increment)
1717                 ret = binder_inc_ref_olocked(ref, strong, NULL);
1718         else
1719                 delete_ref = binder_dec_ref_olocked(ref, strong);
1720
1721         if (rdata)
1722                 *rdata = ref->data;
1723         binder_proc_unlock(proc);
1724
1725         if (delete_ref)
1726                 binder_free_ref(ref);
1727         return ret;
1728
1729 err_no_ref:
1730         binder_proc_unlock(proc);
1731         return ret;
1732 }
1733
1734 /**
1735  * binder_dec_ref_for_handle() - dec the ref for given handle
1736  * @proc:       proc containing the ref
1737  * @desc:       the handle associated with the ref
1738  * @strong:     true=strong reference, false=weak reference
1739  * @rdata:      the id/refcount data for the ref
1740  *
1741  * Just calls binder_update_ref_for_handle() to decrement the ref.
1742  *
1743  * Return: 0 if successful, else errno
1744  */
1745 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1746                 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1747 {
1748         return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1749 }
1750
1751
1752 /**
1753  * binder_inc_ref_for_node() - increment the ref for given proc/node
1754  * @proc:        proc containing the ref
1755  * @node:        target node
1756  * @strong:      true=strong reference, false=weak reference
1757  * @target_list: worklist to use if node is incremented
1758  * @rdata:       the id/refcount data for the ref
1759  *
1760  * Given a proc and node, increment the ref. Create the ref if it
1761  * doesn't already exist
1762  *
1763  * Return: 0 if successful, else errno
1764  */
1765 static int binder_inc_ref_for_node(struct binder_proc *proc,
1766                         struct binder_node *node,
1767                         bool strong,
1768                         struct list_head *target_list,
1769                         struct binder_ref_data *rdata)
1770 {
1771         struct binder_ref *ref;
1772         struct binder_ref *new_ref = NULL;
1773         int ret = 0;
1774
1775         binder_proc_lock(proc);
1776         ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1777         if (!ref) {
1778                 binder_proc_unlock(proc);
1779                 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1780                 if (!new_ref)
1781                         return -ENOMEM;
1782                 binder_proc_lock(proc);
1783                 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1784         }
1785         ret = binder_inc_ref_olocked(ref, strong, target_list);
1786         *rdata = ref->data;
1787         binder_proc_unlock(proc);
1788         if (new_ref && ref != new_ref)
1789                 /*
1790                  * Another thread created the ref first so
1791                  * free the one we allocated
1792                  */
1793                 kfree(new_ref);
1794         return ret;
1795 }
1796
1797 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1798                                            struct binder_transaction *t)
1799 {
1800         BUG_ON(!target_thread);
1801         assert_spin_locked(&target_thread->proc->inner_lock);
1802         BUG_ON(target_thread->transaction_stack != t);
1803         BUG_ON(target_thread->transaction_stack->from != target_thread);
1804         target_thread->transaction_stack =
1805                 target_thread->transaction_stack->from_parent;
1806         t->from = NULL;
1807 }
1808
1809 /**
1810  * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1811  * @thread:     thread to decrement
1812  *
1813  * A thread needs to be kept alive while being used to create or
1814  * handle a transaction. binder_get_txn_from() is used to safely
1815  * extract t->from from a binder_transaction and keep the thread
1816  * indicated by t->from from being freed. When done with that
1817  * binder_thread, this function is called to decrement the
1818  * tmp_ref and free if appropriate (thread has been released
1819  * and no transaction being processed by the driver)
1820  */
1821 static void binder_thread_dec_tmpref(struct binder_thread *thread)
1822 {
1823         /*
1824          * atomic is used to protect the counter value while
1825          * it cannot reach zero or thread->is_dead is false
1826          */
1827         binder_inner_proc_lock(thread->proc);
1828         atomic_dec(&thread->tmp_ref);
1829         if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1830                 binder_inner_proc_unlock(thread->proc);
1831                 binder_free_thread(thread);
1832                 return;
1833         }
1834         binder_inner_proc_unlock(thread->proc);
1835 }
1836
1837 /**
1838  * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1839  * @proc:       proc to decrement
1840  *
1841  * A binder_proc needs to be kept alive while being used to create or
1842  * handle a transaction. proc->tmp_ref is incremented when
1843  * creating a new transaction or the binder_proc is currently in-use
1844  * by threads that are being released. When done with the binder_proc,
1845  * this function is called to decrement the counter and free the
1846  * proc if appropriate (proc has been released, all threads have
1847  * been released and not currenly in-use to process a transaction).
1848  */
1849 static void binder_proc_dec_tmpref(struct binder_proc *proc)
1850 {
1851         binder_inner_proc_lock(proc);
1852         proc->tmp_ref--;
1853         if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1854                         !proc->tmp_ref) {
1855                 binder_inner_proc_unlock(proc);
1856                 binder_free_proc(proc);
1857                 return;
1858         }
1859         binder_inner_proc_unlock(proc);
1860 }
1861
1862 /**
1863  * binder_get_txn_from() - safely extract the "from" thread in transaction
1864  * @t:  binder transaction for t->from
1865  *
1866  * Atomically return the "from" thread and increment the tmp_ref
1867  * count for the thread to ensure it stays alive until
1868  * binder_thread_dec_tmpref() is called.
1869  *
1870  * Return: the value of t->from
1871  */
1872 static struct binder_thread *binder_get_txn_from(
1873                 struct binder_transaction *t)
1874 {
1875         struct binder_thread *from;
1876
1877         spin_lock(&t->lock);
1878         from = t->from;
1879         if (from)
1880                 atomic_inc(&from->tmp_ref);
1881         spin_unlock(&t->lock);
1882         return from;
1883 }
1884
1885 /**
1886  * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1887  * @t:  binder transaction for t->from
1888  *
1889  * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1890  * to guarantee that the thread cannot be released while operating on it.
1891  * The caller must call binder_inner_proc_unlock() to release the inner lock
1892  * as well as call binder_dec_thread_txn() to release the reference.
1893  *
1894  * Return: the value of t->from
1895  */
1896 static struct binder_thread *binder_get_txn_from_and_acq_inner(
1897                 struct binder_transaction *t)
1898         __acquires(&t->from->proc->inner_lock)
1899 {
1900         struct binder_thread *from;
1901
1902         from = binder_get_txn_from(t);
1903         if (!from) {
1904                 __acquire(&from->proc->inner_lock);
1905                 return NULL;
1906         }
1907         binder_inner_proc_lock(from->proc);
1908         if (t->from) {
1909                 BUG_ON(from != t->from);
1910                 return from;
1911         }
1912         binder_inner_proc_unlock(from->proc);
1913         __acquire(&from->proc->inner_lock);
1914         binder_thread_dec_tmpref(from);
1915         return NULL;
1916 }
1917
1918 /**
1919  * binder_free_txn_fixups() - free unprocessed fd fixups
1920  * @t:  binder transaction for t->from
1921  *
1922  * If the transaction is being torn down prior to being
1923  * processed by the target process, free all of the
1924  * fd fixups and fput the file structs. It is safe to
1925  * call this function after the fixups have been
1926  * processed -- in that case, the list will be empty.
1927  */
1928 static void binder_free_txn_fixups(struct binder_transaction *t)
1929 {
1930         struct binder_txn_fd_fixup *fixup, *tmp;
1931
1932         list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1933                 fput(fixup->file);
1934                 list_del(&fixup->fixup_entry);
1935                 kfree(fixup);
1936         }
1937 }
1938
1939 static void binder_free_transaction(struct binder_transaction *t)
1940 {
1941         if (t->buffer)
1942                 t->buffer->transaction = NULL;
1943         binder_free_txn_fixups(t);
1944         kfree(t);
1945         binder_stats_deleted(BINDER_STAT_TRANSACTION);
1946 }
1947
1948 static void binder_send_failed_reply(struct binder_transaction *t,
1949                                      uint32_t error_code)
1950 {
1951         struct binder_thread *target_thread;
1952         struct binder_transaction *next;
1953
1954         BUG_ON(t->flags & TF_ONE_WAY);
1955         while (1) {
1956                 target_thread = binder_get_txn_from_and_acq_inner(t);
1957                 if (target_thread) {
1958                         binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1959                                      "send failed reply for transaction %d to %d:%d\n",
1960                                       t->debug_id,
1961                                       target_thread->proc->pid,
1962                                       target_thread->pid);
1963
1964                         binder_pop_transaction_ilocked(target_thread, t);
1965                         if (target_thread->reply_error.cmd == BR_OK) {
1966                                 target_thread->reply_error.cmd = error_code;
1967                                 binder_enqueue_thread_work_ilocked(
1968                                         target_thread,
1969                                         &target_thread->reply_error.work);
1970                                 wake_up_interruptible(&target_thread->wait);
1971                         } else {
1972                                 /*
1973                                  * Cannot get here for normal operation, but
1974                                  * we can if multiple synchronous transactions
1975                                  * are sent without blocking for responses.
1976                                  * Just ignore the 2nd error in this case.
1977                                  */
1978                                 pr_warn("Unexpected reply error: %u\n",
1979                                         target_thread->reply_error.cmd);
1980                         }
1981                         binder_inner_proc_unlock(target_thread->proc);
1982                         binder_thread_dec_tmpref(target_thread);
1983                         binder_free_transaction(t);
1984                         return;
1985                 } else {
1986                         __release(&target_thread->proc->inner_lock);
1987                 }
1988                 next = t->from_parent;
1989
1990                 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1991                              "send failed reply for transaction %d, target dead\n",
1992                              t->debug_id);
1993
1994                 binder_free_transaction(t);
1995                 if (next == NULL) {
1996                         binder_debug(BINDER_DEBUG_DEAD_BINDER,
1997                                      "reply failed, no target thread at root\n");
1998                         return;
1999                 }
2000                 t = next;
2001                 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2002                              "reply failed, no target thread -- retry %d\n",
2003                               t->debug_id);
2004         }
2005 }
2006
2007 /**
2008  * binder_cleanup_transaction() - cleans up undelivered transaction
2009  * @t:          transaction that needs to be cleaned up
2010  * @reason:     reason the transaction wasn't delivered
2011  * @error_code: error to return to caller (if synchronous call)
2012  */
2013 static void binder_cleanup_transaction(struct binder_transaction *t,
2014                                        const char *reason,
2015                                        uint32_t error_code)
2016 {
2017         if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
2018                 binder_send_failed_reply(t, error_code);
2019         } else {
2020                 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2021                         "undelivered transaction %d, %s\n",
2022                         t->debug_id, reason);
2023                 binder_free_transaction(t);
2024         }
2025 }
2026
2027 /**
2028  * binder_validate_object() - checks for a valid metadata object in a buffer.
2029  * @buffer:     binder_buffer that we're parsing.
2030  * @offset:     offset in the buffer at which to validate an object.
2031  *
2032  * Return:      If there's a valid metadata object at @offset in @buffer, the
2033  *              size of that object. Otherwise, it returns zero.
2034  */
2035 static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
2036 {
2037         /* Check if we can read a header first */
2038         struct binder_object_header *hdr;
2039         size_t object_size = 0;
2040
2041         if (buffer->data_size < sizeof(*hdr) ||
2042             offset > buffer->data_size - sizeof(*hdr) ||
2043             !IS_ALIGNED(offset, sizeof(u32)))
2044                 return 0;
2045
2046         /* Ok, now see if we can read a complete object. */
2047         hdr = (struct binder_object_header *)(buffer->data + offset);
2048         switch (hdr->type) {
2049         case BINDER_TYPE_BINDER:
2050         case BINDER_TYPE_WEAK_BINDER:
2051         case BINDER_TYPE_HANDLE:
2052         case BINDER_TYPE_WEAK_HANDLE:
2053                 object_size = sizeof(struct flat_binder_object);
2054                 break;
2055         case BINDER_TYPE_FD:
2056                 object_size = sizeof(struct binder_fd_object);
2057                 break;
2058         case BINDER_TYPE_PTR:
2059                 object_size = sizeof(struct binder_buffer_object);
2060                 break;
2061         case BINDER_TYPE_FDA:
2062                 object_size = sizeof(struct binder_fd_array_object);
2063                 break;
2064         default:
2065                 return 0;
2066         }
2067         if (offset <= buffer->data_size - object_size &&
2068             buffer->data_size >= object_size)
2069                 return object_size;
2070         else
2071                 return 0;
2072 }
2073
2074 /**
2075  * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
2076  * @b:          binder_buffer containing the object
2077  * @index:      index in offset array at which the binder_buffer_object is
2078  *              located
2079  * @start:      points to the start of the offset array
2080  * @num_valid:  the number of valid offsets in the offset array
2081  *
2082  * Return:      If @index is within the valid range of the offset array
2083  *              described by @start and @num_valid, and if there's a valid
2084  *              binder_buffer_object at the offset found in index @index
2085  *              of the offset array, that object is returned. Otherwise,
2086  *              %NULL is returned.
2087  *              Note that the offset found in index @index itself is not
2088  *              verified; this function assumes that @num_valid elements
2089  *              from @start were previously verified to have valid offsets.
2090  */
2091 static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
2092                                                         binder_size_t index,
2093                                                         binder_size_t *start,
2094                                                         binder_size_t num_valid)
2095 {
2096         struct binder_buffer_object *buffer_obj;
2097         binder_size_t *offp;
2098
2099         if (index >= num_valid)
2100                 return NULL;
2101
2102         offp = start + index;
2103         buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
2104         if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
2105                 return NULL;
2106
2107         return buffer_obj;
2108 }
2109
2110 /**
2111  * binder_validate_fixup() - validates pointer/fd fixups happen in order.
2112  * @b:                  transaction buffer
2113  * @objects_start       start of objects buffer
2114  * @buffer:             binder_buffer_object in which to fix up
2115  * @offset:             start offset in @buffer to fix up
2116  * @last_obj:           last binder_buffer_object that we fixed up in
2117  * @last_min_offset:    minimum fixup offset in @last_obj
2118  *
2119  * Return:              %true if a fixup in buffer @buffer at offset @offset is
2120  *                      allowed.
2121  *
2122  * For safety reasons, we only allow fixups inside a buffer to happen
2123  * at increasing offsets; additionally, we only allow fixup on the last
2124  * buffer object that was verified, or one of its parents.
2125  *
2126  * Example of what is allowed:
2127  *
2128  * A
2129  *   B (parent = A, offset = 0)
2130  *   C (parent = A, offset = 16)
2131  *     D (parent = C, offset = 0)
2132  *   E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
2133  *
2134  * Examples of what is not allowed:
2135  *
2136  * Decreasing offsets within the same parent:
2137  * A
2138  *   C (parent = A, offset = 16)
2139  *   B (parent = A, offset = 0) // decreasing offset within A
2140  *
2141  * Referring to a parent that wasn't the last object or any of its parents:
2142  * A
2143  *   B (parent = A, offset = 0)
2144  *   C (parent = A, offset = 0)
2145  *   C (parent = A, offset = 16)
2146  *     D (parent = B, offset = 0) // B is not A or any of A's parents
2147  */
2148 static bool binder_validate_fixup(struct binder_buffer *b,
2149                                   binder_size_t *objects_start,
2150                                   struct binder_buffer_object *buffer,
2151                                   binder_size_t fixup_offset,
2152                                   struct binder_buffer_object *last_obj,
2153                                   binder_size_t last_min_offset)
2154 {
2155         if (!last_obj) {
2156                 /* Nothing to fix up in */
2157                 return false;
2158         }
2159
2160         while (last_obj != buffer) {
2161                 /*
2162                  * Safe to retrieve the parent of last_obj, since it
2163                  * was already previously verified by the driver.
2164                  */
2165                 if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
2166                         return false;
2167                 last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
2168                 last_obj = (struct binder_buffer_object *)
2169                         (b->data + *(objects_start + last_obj->parent));
2170         }
2171         return (fixup_offset >= last_min_offset);
2172 }
2173
2174 /**
2175  * struct binder_task_work_cb - for deferred close
2176  *
2177  * @twork:                callback_head for task work
2178  * @fd:                   fd to close
2179  *
2180  * Structure to pass task work to be handled after
2181  * returning from binder_ioctl() via task_work_add().
2182  */
2183 struct binder_task_work_cb {
2184         struct callback_head twork;
2185         struct file *file;
2186 };
2187
2188 /**
2189  * binder_do_fd_close() - close list of file descriptors
2190  * @twork:      callback head for task work
2191  *
2192  * It is not safe to call ksys_close() during the binder_ioctl()
2193  * function if there is a chance that binder's own file descriptor
2194  * might be closed. This is to meet the requirements for using
2195  * fdget() (see comments for __fget_light()). Therefore use
2196  * task_work_add() to schedule the close operation once we have
2197  * returned from binder_ioctl(). This function is a callback
2198  * for that mechanism and does the actual ksys_close() on the
2199  * given file descriptor.
2200  */
2201 static void binder_do_fd_close(struct callback_head *twork)
2202 {
2203         struct binder_task_work_cb *twcb = container_of(twork,
2204                         struct binder_task_work_cb, twork);
2205
2206         fput(twcb->file);
2207         kfree(twcb);
2208 }
2209
2210 /**
2211  * binder_deferred_fd_close() - schedule a close for the given file-descriptor
2212  * @fd:         file-descriptor to close
2213  *
2214  * See comments in binder_do_fd_close(). This function is used to schedule
2215  * a file-descriptor to be closed after returning from binder_ioctl().
2216  */
2217 static void binder_deferred_fd_close(int fd)
2218 {
2219         struct binder_task_work_cb *twcb;
2220
2221         twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
2222         if (!twcb)
2223                 return;
2224         init_task_work(&twcb->twork, binder_do_fd_close);
2225         __close_fd_get_file(fd, &twcb->file);
2226         if (twcb->file)
2227                 task_work_add(current, &twcb->twork, true);
2228         else
2229                 kfree(twcb);
2230 }
2231
2232 static void binder_transaction_buffer_release(struct binder_proc *proc,
2233                                               struct binder_buffer *buffer,
2234                                               binder_size_t *failed_at)
2235 {
2236         binder_size_t *offp, *off_start, *off_end;
2237         int debug_id = buffer->debug_id;
2238
2239         binder_debug(BINDER_DEBUG_TRANSACTION,
2240                      "%d buffer release %d, size %zd-%zd, failed at %pK\n",
2241                      proc->pid, buffer->debug_id,
2242                      buffer->data_size, buffer->offsets_size, failed_at);
2243
2244         if (buffer->target_node)
2245                 binder_dec_node(buffer->target_node, 1, 0);
2246
2247         off_start = (binder_size_t *)(buffer->data +
2248                                       ALIGN(buffer->data_size, sizeof(void *)));
2249         if (failed_at)
2250                 off_end = failed_at;
2251         else
2252                 off_end = (void *)off_start + buffer->offsets_size;
2253         for (offp = off_start; offp < off_end; offp++) {
2254                 struct binder_object_header *hdr;
2255                 size_t object_size = binder_validate_object(buffer, *offp);
2256
2257                 if (object_size == 0) {
2258                         pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2259                                debug_id, (u64)*offp, buffer->data_size);
2260                         continue;
2261                 }
2262                 hdr = (struct binder_object_header *)(buffer->data + *offp);
2263                 switch (hdr->type) {
2264                 case BINDER_TYPE_BINDER:
2265                 case BINDER_TYPE_WEAK_BINDER: {
2266                         struct flat_binder_object *fp;
2267                         struct binder_node *node;
2268
2269                         fp = to_flat_binder_object(hdr);
2270                         node = binder_get_node(proc, fp->binder);
2271                         if (node == NULL) {
2272                                 pr_err("transaction release %d bad node %016llx\n",
2273                                        debug_id, (u64)fp->binder);
2274                                 break;
2275                         }
2276                         binder_debug(BINDER_DEBUG_TRANSACTION,
2277                                      "        node %d u%016llx\n",
2278                                      node->debug_id, (u64)node->ptr);
2279                         binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2280                                         0);
2281                         binder_put_node(node);
2282                 } break;
2283                 case BINDER_TYPE_HANDLE:
2284                 case BINDER_TYPE_WEAK_HANDLE: {
2285                         struct flat_binder_object *fp;
2286                         struct binder_ref_data rdata;
2287                         int ret;
2288
2289                         fp = to_flat_binder_object(hdr);
2290                         ret = binder_dec_ref_for_handle(proc, fp->handle,
2291                                 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2292
2293                         if (ret) {
2294                                 pr_err("transaction release %d bad handle %d, ret = %d\n",
2295                                  debug_id, fp->handle, ret);
2296                                 break;
2297                         }
2298                         binder_debug(BINDER_DEBUG_TRANSACTION,
2299                                      "        ref %d desc %d\n",
2300                                      rdata.debug_id, rdata.desc);
2301                 } break;
2302
2303                 case BINDER_TYPE_FD: {
2304                         /*
2305                          * No need to close the file here since user-space
2306                          * closes it for for successfully delivered
2307                          * transactions. For transactions that weren't
2308                          * delivered, the new fd was never allocated so
2309                          * there is no need to close and the fput on the
2310                          * file is done when the transaction is torn
2311                          * down.
2312                          */
2313                         WARN_ON(failed_at &&
2314                                 proc->tsk == current->group_leader);
2315                 } break;
2316                 case BINDER_TYPE_PTR:
2317                         /*
2318                          * Nothing to do here, this will get cleaned up when the
2319                          * transaction buffer gets freed
2320                          */
2321                         break;
2322                 case BINDER_TYPE_FDA: {
2323                         struct binder_fd_array_object *fda;
2324                         struct binder_buffer_object *parent;
2325                         uintptr_t parent_buffer;
2326                         u32 *fd_array;
2327                         size_t fd_index;
2328                         binder_size_t fd_buf_size;
2329
2330                         if (proc->tsk != current->group_leader) {
2331                                 /*
2332                                  * Nothing to do if running in sender context
2333                                  * The fd fixups have not been applied so no
2334                                  * fds need to be closed.
2335                                  */
2336                                 continue;
2337                         }
2338
2339                         fda = to_binder_fd_array_object(hdr);
2340                         parent = binder_validate_ptr(buffer, fda->parent,
2341                                                      off_start,
2342                                                      offp - off_start);
2343                         if (!parent) {
2344                                 pr_err("transaction release %d bad parent offset\n",
2345                                        debug_id);
2346                                 continue;
2347                         }
2348                         /*
2349                          * Since the parent was already fixed up, convert it
2350                          * back to kernel address space to access it
2351                          */
2352                         parent_buffer = parent->buffer -
2353                                 binder_alloc_get_user_buffer_offset(
2354                                                 &proc->alloc);
2355
2356                         fd_buf_size = sizeof(u32) * fda->num_fds;
2357                         if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2358                                 pr_err("transaction release %d invalid number of fds (%lld)\n",
2359                                        debug_id, (u64)fda->num_fds);
2360                                 continue;
2361                         }
2362                         if (fd_buf_size > parent->length ||
2363                             fda->parent_offset > parent->length - fd_buf_size) {
2364                                 /* No space for all file descriptors here. */
2365                                 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2366                                        debug_id, (u64)fda->num_fds);
2367                                 continue;
2368                         }
2369                         fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
2370                         for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
2371                                 binder_deferred_fd_close(fd_array[fd_index]);
2372                 } break;
2373                 default:
2374                         pr_err("transaction release %d bad object type %x\n",
2375                                 debug_id, hdr->type);
2376                         break;
2377                 }
2378         }
2379 }
2380
2381 static int binder_translate_binder(struct flat_binder_object *fp,
2382                                    struct binder_transaction *t,
2383                                    struct binder_thread *thread)
2384 {
2385         struct binder_node *node;
2386         struct binder_proc *proc = thread->proc;
2387         struct binder_proc *target_proc = t->to_proc;
2388         struct binder_ref_data rdata;
2389         int ret = 0;
2390
2391         node = binder_get_node(proc, fp->binder);
2392         if (!node) {
2393                 node = binder_new_node(proc, fp);
2394                 if (!node)
2395                         return -ENOMEM;
2396         }
2397         if (fp->cookie != node->cookie) {
2398                 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2399                                   proc->pid, thread->pid, (u64)fp->binder,
2400                                   node->debug_id, (u64)fp->cookie,
2401                                   (u64)node->cookie);
2402                 ret = -EINVAL;
2403                 goto done;
2404         }
2405         if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2406                 ret = -EPERM;
2407                 goto done;
2408         }
2409
2410         ret = binder_inc_ref_for_node(target_proc, node,
2411                         fp->hdr.type == BINDER_TYPE_BINDER,
2412                         &thread->todo, &rdata);
2413         if (ret)
2414                 goto done;
2415
2416         if (fp->hdr.type == BINDER_TYPE_BINDER)
2417                 fp->hdr.type = BINDER_TYPE_HANDLE;
2418         else
2419                 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2420         fp->binder = 0;
2421         fp->handle = rdata.desc;
2422         fp->cookie = 0;
2423
2424         trace_binder_transaction_node_to_ref(t, node, &rdata);
2425         binder_debug(BINDER_DEBUG_TRANSACTION,
2426                      "        node %d u%016llx -> ref %d desc %d\n",
2427                      node->debug_id, (u64)node->ptr,
2428                      rdata.debug_id, rdata.desc);
2429 done:
2430         binder_put_node(node);
2431         return ret;
2432 }
2433
2434 static int binder_translate_handle(struct flat_binder_object *fp,
2435                                    struct binder_transaction *t,
2436                                    struct binder_thread *thread)
2437 {
2438         struct binder_proc *proc = thread->proc;
2439         struct binder_proc *target_proc = t->to_proc;
2440         struct binder_node *node;
2441         struct binder_ref_data src_rdata;
2442         int ret = 0;
2443
2444         node = binder_get_node_from_ref(proc, fp->handle,
2445                         fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2446         if (!node) {
2447                 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2448                                   proc->pid, thread->pid, fp->handle);
2449                 return -EINVAL;
2450         }
2451         if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2452                 ret = -EPERM;
2453                 goto done;
2454         }
2455
2456         binder_node_lock(node);
2457         if (node->proc == target_proc) {
2458                 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2459                         fp->hdr.type = BINDER_TYPE_BINDER;
2460                 else
2461                         fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2462                 fp->binder = node->ptr;
2463                 fp->cookie = node->cookie;
2464                 if (node->proc)
2465                         binder_inner_proc_lock(node->proc);
2466                 else
2467                         __acquire(&node->proc->inner_lock);
2468                 binder_inc_node_nilocked(node,
2469                                          fp->hdr.type == BINDER_TYPE_BINDER,
2470                                          0, NULL);
2471                 if (node->proc)
2472                         binder_inner_proc_unlock(node->proc);
2473                 else
2474                         __release(&node->proc->inner_lock);
2475                 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2476                 binder_debug(BINDER_DEBUG_TRANSACTION,
2477                              "        ref %d desc %d -> node %d u%016llx\n",
2478                              src_rdata.debug_id, src_rdata.desc, node->debug_id,
2479                              (u64)node->ptr);
2480                 binder_node_unlock(node);
2481         } else {
2482                 struct binder_ref_data dest_rdata;
2483
2484                 binder_node_unlock(node);
2485                 ret = binder_inc_ref_for_node(target_proc, node,
2486                                 fp->hdr.type == BINDER_TYPE_HANDLE,
2487                                 NULL, &dest_rdata);
2488                 if (ret)
2489                         goto done;
2490
2491                 fp->binder = 0;
2492                 fp->handle = dest_rdata.desc;
2493                 fp->cookie = 0;
2494                 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2495                                                     &dest_rdata);
2496                 binder_debug(BINDER_DEBUG_TRANSACTION,
2497                              "        ref %d desc %d -> ref %d desc %d (node %d)\n",
2498                              src_rdata.debug_id, src_rdata.desc,
2499                              dest_rdata.debug_id, dest_rdata.desc,
2500                              node->debug_id);
2501         }
2502 done:
2503         binder_put_node(node);
2504         return ret;
2505 }
2506
2507 static int binder_translate_fd(u32 *fdp,
2508                                struct binder_transaction *t,
2509                                struct binder_thread *thread,
2510                                struct binder_transaction *in_reply_to)
2511 {
2512         struct binder_proc *proc = thread->proc;
2513         struct binder_proc *target_proc = t->to_proc;
2514         struct binder_txn_fd_fixup *fixup;
2515         struct file *file;
2516         int ret = 0;
2517         bool target_allows_fd;
2518         int fd = *fdp;
2519
2520         if (in_reply_to)
2521                 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2522         else
2523                 target_allows_fd = t->buffer->target_node->accept_fds;
2524         if (!target_allows_fd) {
2525                 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2526                                   proc->pid, thread->pid,
2527                                   in_reply_to ? "reply" : "transaction",
2528                                   fd);
2529                 ret = -EPERM;
2530                 goto err_fd_not_accepted;
2531         }
2532
2533         file = fget(fd);
2534         if (!file) {
2535                 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2536                                   proc->pid, thread->pid, fd);
2537                 ret = -EBADF;
2538                 goto err_fget;
2539         }
2540         ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
2541         if (ret < 0) {
2542                 ret = -EPERM;
2543                 goto err_security;
2544         }
2545
2546         /*
2547          * Add fixup record for this transaction. The allocation
2548          * of the fd in the target needs to be done from a
2549          * target thread.
2550          */
2551         fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2552         if (!fixup) {
2553                 ret = -ENOMEM;
2554                 goto err_alloc;
2555         }
2556         fixup->file = file;
2557         fixup->offset = (uintptr_t)fdp - (uintptr_t)t->buffer->data;
2558         trace_binder_transaction_fd_send(t, fd, fixup->offset);
2559         list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2560
2561         return ret;
2562
2563 err_alloc:
2564 err_security:
2565         fput(file);
2566 err_fget:
2567 err_fd_not_accepted:
2568         return ret;
2569 }
2570
2571 static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2572                                      struct binder_buffer_object *parent,
2573                                      struct binder_transaction *t,
2574                                      struct binder_thread *thread,
2575                                      struct binder_transaction *in_reply_to)
2576 {
2577         binder_size_t fdi, fd_buf_size;
2578         uintptr_t parent_buffer;
2579         u32 *fd_array;
2580         struct binder_proc *proc = thread->proc;
2581         struct binder_proc *target_proc = t->to_proc;
2582
2583         fd_buf_size = sizeof(u32) * fda->num_fds;
2584         if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2585                 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2586                                   proc->pid, thread->pid, (u64)fda->num_fds);
2587                 return -EINVAL;
2588         }
2589         if (fd_buf_size > parent->length ||
2590             fda->parent_offset > parent->length - fd_buf_size) {
2591                 /* No space for all file descriptors here. */
2592                 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2593                                   proc->pid, thread->pid, (u64)fda->num_fds);
2594                 return -EINVAL;
2595         }
2596         /*
2597          * Since the parent was already fixed up, convert it
2598          * back to the kernel address space to access it
2599          */
2600         parent_buffer = parent->buffer -
2601                 binder_alloc_get_user_buffer_offset(&target_proc->alloc);
2602         fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
2603         if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
2604                 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2605                                   proc->pid, thread->pid);
2606                 return -EINVAL;
2607         }
2608         for (fdi = 0; fdi < fda->num_fds; fdi++) {
2609                 int ret = binder_translate_fd(&fd_array[fdi], t, thread,
2610                                                 in_reply_to);
2611                 if (ret < 0)
2612                         return ret;
2613         }
2614         return 0;
2615 }
2616
2617 static int binder_fixup_parent(struct binder_transaction *t,
2618                                struct binder_thread *thread,
2619                                struct binder_buffer_object *bp,
2620                                binder_size_t *off_start,
2621                                binder_size_t num_valid,
2622                                struct binder_buffer_object *last_fixup_obj,
2623                                binder_size_t last_fixup_min_off)
2624 {
2625         struct binder_buffer_object *parent;
2626         u8 *parent_buffer;
2627         struct binder_buffer *b = t->buffer;
2628         struct binder_proc *proc = thread->proc;
2629         struct binder_proc *target_proc = t->to_proc;
2630
2631         if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2632                 return 0;
2633
2634         parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
2635         if (!parent) {
2636                 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2637                                   proc->pid, thread->pid);
2638                 return -EINVAL;
2639         }
2640
2641         if (!binder_validate_fixup(b, off_start,
2642                                    parent, bp->parent_offset,
2643                                    last_fixup_obj,
2644                                    last_fixup_min_off)) {
2645                 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2646                                   proc->pid, thread->pid);
2647                 return -EINVAL;
2648         }
2649
2650         if (parent->length < sizeof(binder_uintptr_t) ||
2651             bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2652                 /* No space for a pointer here! */
2653                 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2654                                   proc->pid, thread->pid);
2655                 return -EINVAL;
2656         }
2657         parent_buffer = (u8 *)((uintptr_t)parent->buffer -
2658                         binder_alloc_get_user_buffer_offset(
2659                                 &target_proc->alloc));
2660         *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
2661
2662         return 0;
2663 }
2664
2665 /**
2666  * binder_proc_transaction() - sends a transaction to a process and wakes it up
2667  * @t:          transaction to send
2668  * @proc:       process to send the transaction to
2669  * @thread:     thread in @proc to send the transaction to (may be NULL)
2670  *
2671  * This function queues a transaction to the specified process. It will try
2672  * to find a thread in the target process to handle the transaction and
2673  * wake it up. If no thread is found, the work is queued to the proc
2674  * waitqueue.
2675  *
2676  * If the @thread parameter is not NULL, the transaction is always queued
2677  * to the waitlist of that specific thread.
2678  *
2679  * Return:      true if the transactions was successfully queued
2680  *              false if the target process or thread is dead
2681  */
2682 static bool binder_proc_transaction(struct binder_transaction *t,
2683                                     struct binder_proc *proc,
2684                                     struct binder_thread *thread)
2685 {
2686         struct binder_node *node = t->buffer->target_node;
2687         bool oneway = !!(t->flags & TF_ONE_WAY);
2688         bool pending_async = false;
2689
2690         BUG_ON(!node);
2691         binder_node_lock(node);
2692         if (oneway) {
2693                 BUG_ON(thread);
2694                 if (node->has_async_transaction) {
2695                         pending_async = true;
2696                 } else {
2697                         node->has_async_transaction = true;
2698                 }
2699         }
2700
2701         binder_inner_proc_lock(proc);
2702
2703         if (proc->is_dead || (thread && thread->is_dead)) {
2704                 binder_inner_proc_unlock(proc);
2705                 binder_node_unlock(node);
2706                 return false;
2707         }
2708
2709         if (!thread && !pending_async)
2710                 thread = binder_select_thread_ilocked(proc);
2711
2712         if (thread)
2713                 binder_enqueue_thread_work_ilocked(thread, &t->work);
2714         else if (!pending_async)
2715                 binder_enqueue_work_ilocked(&t->work, &proc->todo);
2716         else
2717                 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2718
2719         if (!pending_async)
2720                 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2721
2722         binder_inner_proc_unlock(proc);
2723         binder_node_unlock(node);
2724
2725         return true;
2726 }
2727
2728 /**
2729  * binder_get_node_refs_for_txn() - Get required refs on node for txn
2730  * @node:         struct binder_node for which to get refs
2731  * @proc:         returns @node->proc if valid
2732  * @error:        if no @proc then returns BR_DEAD_REPLY
2733  *
2734  * User-space normally keeps the node alive when creating a transaction
2735  * since it has a reference to the target. The local strong ref keeps it
2736  * alive if the sending process dies before the target process processes
2737  * the transaction. If the source process is malicious or has a reference
2738  * counting bug, relying on the local strong ref can fail.
2739  *
2740  * Since user-space can cause the local strong ref to go away, we also take
2741  * a tmpref on the node to ensure it survives while we are constructing
2742  * the transaction. We also need a tmpref on the proc while we are
2743  * constructing the transaction, so we take that here as well.
2744  *
2745  * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2746  * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2747  * target proc has died, @error is set to BR_DEAD_REPLY
2748  */
2749 static struct binder_node *binder_get_node_refs_for_txn(
2750                 struct binder_node *node,
2751                 struct binder_proc **procp,
2752                 uint32_t *error)
2753 {
2754         struct binder_node *target_node = NULL;
2755
2756         binder_node_inner_lock(node);
2757         if (node->proc) {
2758                 target_node = node;
2759                 binder_inc_node_nilocked(node, 1, 0, NULL);
2760                 binder_inc_node_tmpref_ilocked(node);
2761                 node->proc->tmp_ref++;
2762                 *procp = node->proc;
2763         } else
2764                 *error = BR_DEAD_REPLY;
2765         binder_node_inner_unlock(node);
2766
2767         return target_node;
2768 }
2769
2770 static void binder_transaction(struct binder_proc *proc,
2771                                struct binder_thread *thread,
2772                                struct binder_transaction_data *tr, int reply,
2773                                binder_size_t extra_buffers_size)
2774 {
2775         int ret;
2776         struct binder_transaction *t;
2777         struct binder_work *w;
2778         struct binder_work *tcomplete;
2779         binder_size_t *offp, *off_end, *off_start;
2780         binder_size_t off_min;
2781         u8 *sg_bufp, *sg_buf_end;
2782         struct binder_proc *target_proc = NULL;
2783         struct binder_thread *target_thread = NULL;
2784         struct binder_node *target_node = NULL;
2785         struct binder_transaction *in_reply_to = NULL;
2786         struct binder_transaction_log_entry *e;
2787         uint32_t return_error = 0;
2788         uint32_t return_error_param = 0;
2789         uint32_t return_error_line = 0;
2790         struct binder_buffer_object *last_fixup_obj = NULL;
2791         binder_size_t last_fixup_min_off = 0;
2792         struct binder_context *context = proc->context;
2793         int t_debug_id = atomic_inc_return(&binder_last_id);
2794
2795         e = binder_transaction_log_add(&binder_transaction_log);
2796         e->debug_id = t_debug_id;
2797         e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2798         e->from_proc = proc->pid;
2799         e->from_thread = thread->pid;
2800         e->target_handle = tr->target.handle;
2801         e->data_size = tr->data_size;
2802         e->offsets_size = tr->offsets_size;
2803         e->context_name = proc->context->name;
2804
2805         if (reply) {
2806                 binder_inner_proc_lock(proc);
2807                 in_reply_to = thread->transaction_stack;
2808                 if (in_reply_to == NULL) {
2809                         binder_inner_proc_unlock(proc);
2810                         binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2811                                           proc->pid, thread->pid);
2812                         return_error = BR_FAILED_REPLY;
2813                         return_error_param = -EPROTO;
2814                         return_error_line = __LINE__;
2815                         goto err_empty_call_stack;
2816                 }
2817                 if (in_reply_to->to_thread != thread) {
2818                         spin_lock(&in_reply_to->lock);
2819                         binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2820                                 proc->pid, thread->pid, in_reply_to->debug_id,
2821                                 in_reply_to->to_proc ?
2822                                 in_reply_to->to_proc->pid : 0,
2823                                 in_reply_to->to_thread ?
2824                                 in_reply_to->to_thread->pid : 0);
2825                         spin_unlock(&in_reply_to->lock);
2826                         binder_inner_proc_unlock(proc);
2827                         return_error = BR_FAILED_REPLY;
2828                         return_error_param = -EPROTO;
2829                         return_error_line = __LINE__;
2830                         in_reply_to = NULL;
2831                         goto err_bad_call_stack;
2832                 }
2833                 thread->transaction_stack = in_reply_to->to_parent;
2834                 binder_inner_proc_unlock(proc);
2835                 binder_set_nice(in_reply_to->saved_priority);
2836                 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
2837                 if (target_thread == NULL) {
2838                         /* annotation for sparse */
2839                         __release(&target_thread->proc->inner_lock);
2840                         return_error = BR_DEAD_REPLY;
2841                         return_error_line = __LINE__;
2842                         goto err_dead_binder;
2843                 }
2844                 if (target_thread->transaction_stack != in_reply_to) {
2845                         binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2846                                 proc->pid, thread->pid,
2847                                 target_thread->transaction_stack ?
2848                                 target_thread->transaction_stack->debug_id : 0,
2849                                 in_reply_to->debug_id);
2850                         binder_inner_proc_unlock(target_thread->proc);
2851                         return_error = BR_FAILED_REPLY;
2852                         return_error_param = -EPROTO;
2853                         return_error_line = __LINE__;
2854                         in_reply_to = NULL;
2855                         target_thread = NULL;
2856                         goto err_dead_binder;
2857                 }
2858                 target_proc = target_thread->proc;
2859                 target_proc->tmp_ref++;
2860                 binder_inner_proc_unlock(target_thread->proc);
2861         } else {
2862                 if (tr->target.handle) {
2863                         struct binder_ref *ref;
2864
2865                         /*
2866                          * There must already be a strong ref
2867                          * on this node. If so, do a strong
2868                          * increment on the node to ensure it
2869                          * stays alive until the transaction is
2870                          * done.
2871                          */
2872                         binder_proc_lock(proc);
2873                         ref = binder_get_ref_olocked(proc, tr->target.handle,
2874                                                      true);
2875                         if (ref) {
2876                                 target_node = binder_get_node_refs_for_txn(
2877                                                 ref->node, &target_proc,
2878                                                 &return_error);
2879                         } else {
2880                                 binder_user_error("%d:%d got transaction to invalid handle\n",
2881                                                   proc->pid, thread->pid);
2882                                 return_error = BR_FAILED_REPLY;
2883                         }
2884                         binder_proc_unlock(proc);
2885                 } else {
2886                         mutex_lock(&context->context_mgr_node_lock);
2887                         target_node = context->binder_context_mgr_node;
2888                         if (target_node)
2889                                 target_node = binder_get_node_refs_for_txn(
2890                                                 target_node, &target_proc,
2891                                                 &return_error);
2892                         else
2893                                 return_error = BR_DEAD_REPLY;
2894                         mutex_unlock(&context->context_mgr_node_lock);
2895                         if (target_node && target_proc == proc) {
2896                                 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
2897                                                   proc->pid, thread->pid);
2898                                 return_error = BR_FAILED_REPLY;
2899                                 return_error_param = -EINVAL;
2900                                 return_error_line = __LINE__;
2901                                 goto err_invalid_target_handle;
2902                         }
2903                 }
2904                 if (!target_node) {
2905                         /*
2906                          * return_error is set above
2907                          */
2908                         return_error_param = -EINVAL;
2909                         return_error_line = __LINE__;
2910                         goto err_dead_binder;
2911                 }
2912                 e->to_node = target_node->debug_id;
2913                 if (security_binder_transaction(proc->tsk,
2914                                                 target_proc->tsk) < 0) {
2915                         return_error = BR_FAILED_REPLY;
2916                         return_error_param = -EPERM;
2917                         return_error_line = __LINE__;
2918                         goto err_invalid_target_handle;
2919                 }
2920                 binder_inner_proc_lock(proc);
2921
2922                 w = list_first_entry_or_null(&thread->todo,
2923                                              struct binder_work, entry);
2924                 if (!(tr->flags & TF_ONE_WAY) && w &&
2925                     w->type == BINDER_WORK_TRANSACTION) {
2926                         /*
2927                          * Do not allow new outgoing transaction from a
2928                          * thread that has a transaction at the head of
2929                          * its todo list. Only need to check the head
2930                          * because binder_select_thread_ilocked picks a
2931                          * thread from proc->waiting_threads to enqueue
2932                          * the transaction, and nothing is queued to the
2933                          * todo list while the thread is on waiting_threads.
2934                          */
2935                         binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
2936                                           proc->pid, thread->pid);
2937                         binder_inner_proc_unlock(proc);
2938                         return_error = BR_FAILED_REPLY;
2939                         return_error_param = -EPROTO;
2940                         return_error_line = __LINE__;
2941                         goto err_bad_todo_list;
2942                 }
2943
2944                 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
2945                         struct binder_transaction *tmp;
2946
2947                         tmp = thread->transaction_stack;
2948                         if (tmp->to_thread != thread) {
2949                                 spin_lock(&tmp->lock);
2950                                 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
2951                                         proc->pid, thread->pid, tmp->debug_id,
2952                                         tmp->to_proc ? tmp->to_proc->pid : 0,
2953                                         tmp->to_thread ?
2954                                         tmp->to_thread->pid : 0);
2955                                 spin_unlock(&tmp->lock);
2956                                 binder_inner_proc_unlock(proc);
2957                                 return_error = BR_FAILED_REPLY;
2958                                 return_error_param = -EPROTO;
2959                                 return_error_line = __LINE__;
2960                                 goto err_bad_call_stack;
2961                         }
2962                         while (tmp) {
2963                                 struct binder_thread *from;
2964
2965                                 spin_lock(&tmp->lock);
2966                                 from = tmp->from;
2967                                 if (from && from->proc == target_proc) {
2968                                         atomic_inc(&from->tmp_ref);
2969                                         target_thread = from;
2970                                         spin_unlock(&tmp->lock);
2971                                         break;
2972                                 }
2973                                 spin_unlock(&tmp->lock);
2974                                 tmp = tmp->from_parent;
2975                         }
2976                 }
2977                 binder_inner_proc_unlock(proc);
2978         }
2979         if (target_thread)
2980                 e->to_thread = target_thread->pid;
2981         e->to_proc = target_proc->pid;
2982
2983         /* TODO: reuse incoming transaction for reply */
2984         t = kzalloc(sizeof(*t), GFP_KERNEL);
2985         if (t == NULL) {
2986                 return_error = BR_FAILED_REPLY;
2987                 return_error_param = -ENOMEM;
2988                 return_error_line = __LINE__;
2989                 goto err_alloc_t_failed;
2990         }
2991         INIT_LIST_HEAD(&t->fd_fixups);
2992         binder_stats_created(BINDER_STAT_TRANSACTION);
2993         spin_lock_init(&t->lock);
2994
2995         tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
2996         if (tcomplete == NULL) {
2997                 return_error = BR_FAILED_REPLY;
2998                 return_error_param = -ENOMEM;
2999                 return_error_line = __LINE__;
3000                 goto err_alloc_tcomplete_failed;
3001         }
3002         binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3003
3004         t->debug_id = t_debug_id;
3005
3006         if (reply)
3007                 binder_debug(BINDER_DEBUG_TRANSACTION,
3008                              "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
3009                              proc->pid, thread->pid, t->debug_id,
3010                              target_proc->pid, target_thread->pid,
3011                              (u64)tr->data.ptr.buffer,
3012                              (u64)tr->data.ptr.offsets,
3013                              (u64)tr->data_size, (u64)tr->offsets_size,
3014                              (u64)extra_buffers_size);
3015         else
3016                 binder_debug(BINDER_DEBUG_TRANSACTION,
3017                              "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
3018                              proc->pid, thread->pid, t->debug_id,
3019                              target_proc->pid, target_node->debug_id,
3020                              (u64)tr->data.ptr.buffer,
3021                              (u64)tr->data.ptr.offsets,
3022                              (u64)tr->data_size, (u64)tr->offsets_size,
3023                              (u64)extra_buffers_size);
3024
3025         if (!reply && !(tr->flags & TF_ONE_WAY))
3026                 t->from = thread;
3027         else
3028                 t->from = NULL;
3029         t->sender_euid = task_euid(proc->tsk);
3030         t->to_proc = target_proc;
3031         t->to_thread = target_thread;
3032         t->code = tr->code;
3033         t->flags = tr->flags;
3034         t->priority = task_nice(current);
3035
3036         trace_binder_transaction(reply, t, target_node);
3037
3038         t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3039                 tr->offsets_size, extra_buffers_size,
3040                 !reply && (t->flags & TF_ONE_WAY));
3041         if (IS_ERR(t->buffer)) {
3042                 /*
3043                  * -ESRCH indicates VMA cleared. The target is dying.
3044                  */
3045                 return_error_param = PTR_ERR(t->buffer);
3046                 return_error = return_error_param == -ESRCH ?
3047                         BR_DEAD_REPLY : BR_FAILED_REPLY;
3048                 return_error_line = __LINE__;
3049                 t->buffer = NULL;
3050                 goto err_binder_alloc_buf_failed;
3051         }
3052         t->buffer->debug_id = t->debug_id;
3053         t->buffer->transaction = t;
3054         t->buffer->target_node = target_node;
3055         trace_binder_transaction_alloc_buf(t->buffer);
3056         off_start = (binder_size_t *)(t->buffer->data +
3057                                       ALIGN(tr->data_size, sizeof(void *)));
3058         offp = off_start;
3059
3060         if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
3061                            tr->data.ptr.buffer, tr->data_size)) {
3062                 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3063                                 proc->pid, thread->pid);
3064                 return_error = BR_FAILED_REPLY;
3065                 return_error_param = -EFAULT;
3066                 return_error_line = __LINE__;
3067                 goto err_copy_data_failed;
3068         }
3069         if (copy_from_user(offp, (const void __user *)(uintptr_t)
3070                            tr->data.ptr.offsets, tr->offsets_size)) {
3071                 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3072                                 proc->pid, thread->pid);
3073                 return_error = BR_FAILED_REPLY;
3074                 return_error_param = -EFAULT;
3075                 return_error_line = __LINE__;
3076                 goto err_copy_data_failed;
3077         }
3078         if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3079                 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3080                                 proc->pid, thread->pid, (u64)tr->offsets_size);
3081                 return_error = BR_FAILED_REPLY;
3082                 return_error_param = -EINVAL;
3083                 return_error_line = __LINE__;
3084                 goto err_bad_offset;
3085         }
3086         if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3087                 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3088                                   proc->pid, thread->pid,
3089                                   (u64)extra_buffers_size);
3090                 return_error = BR_FAILED_REPLY;
3091                 return_error_param = -EINVAL;
3092                 return_error_line = __LINE__;
3093                 goto err_bad_offset;
3094         }
3095         off_end = (void *)off_start + tr->offsets_size;
3096         sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
3097         sg_buf_end = sg_bufp + extra_buffers_size;
3098         off_min = 0;
3099         for (; offp < off_end; offp++) {
3100                 struct binder_object_header *hdr;
3101                 size_t object_size = binder_validate_object(t->buffer, *offp);
3102
3103                 if (object_size == 0 || *offp < off_min) {
3104                         binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3105                                           proc->pid, thread->pid, (u64)*offp,
3106                                           (u64)off_min,
3107                                           (u64)t->buffer->data_size);
3108                         return_error = BR_FAILED_REPLY;
3109                         return_error_param = -EINVAL;
3110                         return_error_line = __LINE__;
3111                         goto err_bad_offset;
3112                 }
3113
3114                 hdr = (struct binder_object_header *)(t->buffer->data + *offp);
3115                 off_min = *offp + object_size;
3116                 switch (hdr->type) {
3117                 case BINDER_TYPE_BINDER:
3118                 case BINDER_TYPE_WEAK_BINDER: {
3119                         struct flat_binder_object *fp;
3120
3121                         fp = to_flat_binder_object(hdr);
3122                         ret = binder_translate_binder(fp, t, thread);
3123                         if (ret < 0) {
3124                                 return_error = BR_FAILED_REPLY;
3125                                 return_error_param = ret;
3126                                 return_error_line = __LINE__;
3127                                 goto err_translate_failed;
3128                         }
3129                 } break;
3130                 case BINDER_TYPE_HANDLE:
3131                 case BINDER_TYPE_WEAK_HANDLE: {
3132                         struct flat_binder_object *fp;
3133
3134                         fp = to_flat_binder_object(hdr);
3135                         ret = binder_translate_handle(fp, t, thread);
3136                         if (ret < 0) {
3137                                 return_error = BR_FAILED_REPLY;
3138                                 return_error_param = ret;
3139                                 return_error_line = __LINE__;
3140                                 goto err_translate_failed;
3141                         }
3142                 } break;
3143
3144                 case BINDER_TYPE_FD: {
3145                         struct binder_fd_object *fp = to_binder_fd_object(hdr);
3146                         int ret = binder_translate_fd(&fp->fd, t, thread,
3147                                                       in_reply_to);
3148
3149                         if (ret < 0) {
3150                                 return_error = BR_FAILED_REPLY;
3151                                 return_error_param = ret;
3152                                 return_error_line = __LINE__;
3153                                 goto err_translate_failed;
3154                         }
3155                         fp->pad_binder = 0;
3156                 } break;
3157                 case BINDER_TYPE_FDA: {
3158                         struct binder_fd_array_object *fda =
3159                                 to_binder_fd_array_object(hdr);
3160                         struct binder_buffer_object *parent =
3161                                 binder_validate_ptr(t->buffer, fda->parent,
3162                                                     off_start,
3163                                                     offp - off_start);
3164                         if (!parent) {
3165                                 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3166                                                   proc->pid, thread->pid);
3167                                 return_error = BR_FAILED_REPLY;
3168                                 return_error_param = -EINVAL;
3169                                 return_error_line = __LINE__;
3170                                 goto err_bad_parent;
3171                         }
3172                         if (!binder_validate_fixup(t->buffer, off_start,
3173                                                    parent, fda->parent_offset,
3174                                                    last_fixup_obj,
3175                                                    last_fixup_min_off)) {
3176                                 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3177                                                   proc->pid, thread->pid);
3178                                 return_error = BR_FAILED_REPLY;
3179                                 return_error_param = -EINVAL;
3180                                 return_error_line = __LINE__;
3181                                 goto err_bad_parent;
3182                         }
3183                         ret = binder_translate_fd_array(fda, parent, t, thread,
3184                                                         in_reply_to);
3185                         if (ret < 0) {
3186                                 return_error = BR_FAILED_REPLY;
3187                                 return_error_param = ret;
3188                                 return_error_line = __LINE__;
3189                                 goto err_translate_failed;
3190                         }
3191                         last_fixup_obj = parent;
3192                         last_fixup_min_off =
3193                                 fda->parent_offset + sizeof(u32) * fda->num_fds;
3194                 } break;
3195                 case BINDER_TYPE_PTR: {
3196                         struct binder_buffer_object *bp =
3197                                 to_binder_buffer_object(hdr);
3198                         size_t buf_left = sg_buf_end - sg_bufp;
3199
3200                         if (bp->length > buf_left) {
3201                                 binder_user_error("%d:%d got transaction with too large buffer\n",
3202                                                   proc->pid, thread->pid);
3203                                 return_error = BR_FAILED_REPLY;
3204                                 return_error_param = -EINVAL;
3205                                 return_error_line = __LINE__;
3206                                 goto err_bad_offset;
3207                         }
3208                         if (copy_from_user(sg_bufp,
3209                                            (const void __user *)(uintptr_t)
3210                                            bp->buffer, bp->length)) {
3211                                 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3212                                                   proc->pid, thread->pid);
3213                                 return_error_param = -EFAULT;
3214                                 return_error = BR_FAILED_REPLY;
3215                                 return_error_line = __LINE__;
3216                                 goto err_copy_data_failed;
3217                         }
3218                         /* Fixup buffer pointer to target proc address space */
3219                         bp->buffer = (uintptr_t)sg_bufp +
3220                                 binder_alloc_get_user_buffer_offset(
3221                                                 &target_proc->alloc);
3222                         sg_bufp += ALIGN(bp->length, sizeof(u64));
3223
3224                         ret = binder_fixup_parent(t, thread, bp, off_start,
3225                                                   offp - off_start,
3226                                                   last_fixup_obj,
3227                                                   last_fixup_min_off);
3228                         if (ret < 0) {
3229                                 return_error = BR_FAILED_REPLY;
3230                                 return_error_param = ret;
3231                                 return_error_line = __LINE__;
3232                                 goto err_translate_failed;
3233                         }
3234                         last_fixup_obj = bp;
3235                         last_fixup_min_off = 0;
3236                 } break;
3237                 default:
3238                         binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3239                                 proc->pid, thread->pid, hdr->type);
3240                         return_error = BR_FAILED_REPLY;
3241                         return_error_param = -EINVAL;
3242                         return_error_line = __LINE__;
3243                         goto err_bad_object_type;
3244                 }
3245         }
3246         tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3247         t->work.type = BINDER_WORK_TRANSACTION;
3248
3249         if (reply) {
3250                 binder_enqueue_thread_work(thread, tcomplete);
3251                 binder_inner_proc_lock(target_proc);
3252                 if (target_thread->is_dead) {
3253                         binder_inner_proc_unlock(target_proc);
3254                         goto err_dead_proc_or_thread;
3255                 }
3256                 BUG_ON(t->buffer->async_transaction != 0);
3257                 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3258                 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3259                 binder_inner_proc_unlock(target_proc);
3260                 wake_up_interruptible_sync(&target_thread->wait);
3261                 binder_free_transaction(in_reply_to);
3262         } else if (!(t->flags & TF_ONE_WAY)) {
3263                 BUG_ON(t->buffer->async_transaction != 0);
3264                 binder_inner_proc_lock(proc);
3265                 /*
3266                  * Defer the TRANSACTION_COMPLETE, so we don't return to
3267                  * userspace immediately; this allows the target process to
3268                  * immediately start processing this transaction, reducing
3269                  * latency. We will then return the TRANSACTION_COMPLETE when
3270                  * the target replies (or there is an error).
3271                  */
3272                 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3273                 t->need_reply = 1;
3274                 t->from_parent = thread->transaction_stack;
3275                 thread->transaction_stack = t;
3276                 binder_inner_proc_unlock(proc);
3277                 if (!binder_proc_transaction(t, target_proc, target_thread)) {
3278                         binder_inner_proc_lock(proc);
3279                         binder_pop_transaction_ilocked(thread, t);
3280                         binder_inner_proc_unlock(proc);
3281                         goto err_dead_proc_or_thread;
3282                 }
3283         } else {
3284                 BUG_ON(target_node == NULL);
3285                 BUG_ON(t->buffer->async_transaction != 1);
3286                 binder_enqueue_thread_work(thread, tcomplete);
3287                 if (!binder_proc_transaction(t, target_proc, NULL))
3288                         goto err_dead_proc_or_thread;
3289         }
3290         if (target_thread)
3291                 binder_thread_dec_tmpref(target_thread);
3292         binder_proc_dec_tmpref(target_proc);
3293         if (target_node)
3294                 binder_dec_node_tmpref(target_node);
3295         /*
3296          * write barrier to synchronize with initialization
3297          * of log entry
3298          */
3299         smp_wmb();
3300         WRITE_ONCE(e->debug_id_done, t_debug_id);
3301         return;
3302
3303 err_dead_proc_or_thread:
3304         return_error = BR_DEAD_REPLY;
3305         return_error_line = __LINE__;
3306         binder_dequeue_work(proc, tcomplete);
3307 err_translate_failed:
3308 err_bad_object_type:
3309 err_bad_offset:
3310 err_bad_parent:
3311 err_copy_data_failed:
3312         binder_free_txn_fixups(t);
3313         trace_binder_transaction_failed_buffer_release(t->buffer);
3314         binder_transaction_buffer_release(target_proc, t->buffer, offp);
3315         if (target_node)
3316                 binder_dec_node_tmpref(target_node);
3317         target_node = NULL;
3318         t->buffer->transaction = NULL;
3319         binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3320 err_binder_alloc_buf_failed:
3321         kfree(tcomplete);
3322         binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3323 err_alloc_tcomplete_failed:
3324         kfree(t);
3325         binder_stats_deleted(BINDER_STAT_TRANSACTION);
3326 err_alloc_t_failed:
3327 err_bad_todo_list:
3328 err_bad_call_stack:
3329 err_empty_call_stack:
3330 err_dead_binder:
3331 err_invalid_target_handle:
3332         if (target_thread)
3333                 binder_thread_dec_tmpref(target_thread);
3334         if (target_proc)
3335                 binder_proc_dec_tmpref(target_proc);
3336         if (target_node) {
3337                 binder_dec_node(target_node, 1, 0);
3338                 binder_dec_node_tmpref(target_node);
3339         }
3340
3341         binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3342                      "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3343                      proc->pid, thread->pid, return_error, return_error_param,
3344                      (u64)tr->data_size, (u64)tr->offsets_size,
3345                      return_error_line);
3346
3347         {
3348                 struct binder_transaction_log_entry *fe;
3349
3350                 e->return_error = return_error;
3351                 e->return_error_param = return_error_param;
3352                 e->return_error_line = return_error_line;
3353                 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3354                 *fe = *e;
3355                 /*
3356                  * write barrier to synchronize with initialization
3357                  * of log entry
3358                  */
3359                 smp_wmb();
3360                 WRITE_ONCE(e->debug_id_done, t_debug_id);
3361                 WRITE_ONCE(fe->debug_id_done, t_debug_id);
3362         }
3363
3364         BUG_ON(thread->return_error.cmd != BR_OK);
3365         if (in_reply_to) {
3366                 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3367                 binder_enqueue_thread_work(thread, &thread->return_error.work);
3368                 binder_send_failed_reply(in_reply_to, return_error);
3369         } else {
3370                 thread->return_error.cmd = return_error;
3371                 binder_enqueue_thread_work(thread, &thread->return_error.work);
3372         }
3373 }
3374
3375 /**
3376  * binder_free_buf() - free the specified buffer
3377  * @proc:       binder proc that owns buffer
3378  * @buffer:     buffer to be freed
3379  *
3380  * If buffer for an async transaction, enqueue the next async
3381  * transaction from the node.
3382  *
3383  * Cleanup buffer and free it.
3384  */
3385 static void
3386 binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer)
3387 {
3388         if (buffer->transaction) {
3389                 buffer->transaction->buffer = NULL;
3390                 buffer->transaction = NULL;
3391         }
3392         if (buffer->async_transaction && buffer->target_node) {
3393                 struct binder_node *buf_node;
3394                 struct binder_work *w;
3395
3396                 buf_node = buffer->target_node;
3397                 binder_node_inner_lock(buf_node);
3398                 BUG_ON(!buf_node->has_async_transaction);
3399                 BUG_ON(buf_node->proc != proc);
3400                 w = binder_dequeue_work_head_ilocked(
3401                                 &buf_node->async_todo);
3402                 if (!w) {
3403                         buf_node->has_async_transaction = false;
3404                 } else {
3405                         binder_enqueue_work_ilocked(
3406                                         w, &proc->todo);
3407                         binder_wakeup_proc_ilocked(proc);
3408                 }
3409                 binder_node_inner_unlock(buf_node);
3410         }
3411         trace_binder_transaction_buffer_release(buffer);
3412         binder_transaction_buffer_release(proc, buffer, NULL);
3413         binder_alloc_free_buf(&proc->alloc, buffer);
3414 }
3415
3416 static int binder_thread_write(struct binder_proc *proc,
3417                         struct binder_thread *thread,
3418                         binder_uintptr_t binder_buffer, size_t size,
3419                         binder_size_t *consumed)
3420 {
3421         uint32_t cmd;
3422         struct binder_context *context = proc->context;
3423         void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3424         void __user *ptr = buffer + *consumed;
3425         void __user *end = buffer + size;
3426
3427         while (ptr < end && thread->return_error.cmd == BR_OK) {
3428                 int ret;
3429
3430                 if (get_user(cmd, (uint32_t __user *)ptr))
3431                         return -EFAULT;
3432                 ptr += sizeof(uint32_t);
3433                 trace_binder_command(cmd);
3434                 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3435                         atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3436                         atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3437                         atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3438                 }
3439                 switch (cmd) {
3440                 case BC_INCREFS:
3441                 case BC_ACQUIRE:
3442                 case BC_RELEASE:
3443                 case BC_DECREFS: {
3444                         uint32_t target;
3445                         const char *debug_string;
3446                         bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3447                         bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3448                         struct binder_ref_data rdata;
3449
3450                         if (get_user(target, (uint32_t __user *)ptr))
3451                                 return -EFAULT;
3452
3453                         ptr += sizeof(uint32_t);
3454                         ret = -1;
3455                         if (increment && !target) {
3456                                 struct binder_node *ctx_mgr_node;
3457                                 mutex_lock(&context->context_mgr_node_lock);
3458                                 ctx_mgr_node = context->binder_context_mgr_node;
3459                                 if (ctx_mgr_node)
3460                                         ret = binder_inc_ref_for_node(
3461                                                         proc, ctx_mgr_node,
3462                                                         strong, NULL, &rdata);
3463                                 mutex_unlock(&context->context_mgr_node_lock);
3464                         }
3465                         if (ret)
3466                                 ret = binder_update_ref_for_handle(
3467                                                 proc, target, increment, strong,
3468                                                 &rdata);
3469                         if (!ret && rdata.desc != target) {
3470                                 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3471                                         proc->pid, thread->pid,
3472                                         target, rdata.desc);
3473                         }
3474                         switch (cmd) {
3475                         case BC_INCREFS:
3476                                 debug_string = "IncRefs";
3477                                 break;
3478                         case BC_ACQUIRE:
3479                                 debug_string = "Acquire";
3480                                 break;
3481                         case BC_RELEASE:
3482                                 debug_string = "Release";
3483                                 break;
3484                         case BC_DECREFS:
3485                         default:
3486                                 debug_string = "DecRefs";
3487                                 break;
3488                         }
3489                         if (ret) {
3490                                 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3491                                         proc->pid, thread->pid, debug_string,
3492                                         strong, target, ret);
3493                                 break;
3494                         }
3495                         binder_debug(BINDER_DEBUG_USER_REFS,
3496                                      "%d:%d %s ref %d desc %d s %d w %d\n",
3497                                      proc->pid, thread->pid, debug_string,
3498                                      rdata.debug_id, rdata.desc, rdata.strong,
3499                                      rdata.weak);
3500                         break;
3501                 }
3502                 case BC_INCREFS_DONE:
3503                 case BC_ACQUIRE_DONE: {
3504                         binder_uintptr_t node_ptr;
3505                         binder_uintptr_t cookie;
3506                         struct binder_node *node;
3507                         bool free_node;
3508
3509                         if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3510                                 return -EFAULT;
3511                         ptr += sizeof(binder_uintptr_t);
3512                         if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3513                                 return -EFAULT;
3514                         ptr += sizeof(binder_uintptr_t);
3515                         node = binder_get_node(proc, node_ptr);
3516                         if (node == NULL) {
3517                                 binder_user_error("%d:%d %s u%016llx no match\n",
3518                                         proc->pid, thread->pid,
3519                                         cmd == BC_INCREFS_DONE ?
3520                                         "BC_INCREFS_DONE" :
3521                                         "BC_ACQUIRE_DONE",
3522                                         (u64)node_ptr);
3523                                 break;
3524                         }
3525                         if (cookie != node->cookie) {
3526                                 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3527                                         proc->pid, thread->pid,
3528                                         cmd == BC_INCREFS_DONE ?
3529                                         "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3530                                         (u64)node_ptr, node->debug_id,
3531                                         (u64)cookie, (u64)node->cookie);
3532                                 binder_put_node(node);
3533                                 break;
3534                         }
3535                         binder_node_inner_lock(node);
3536                         if (cmd == BC_ACQUIRE_DONE) {
3537                                 if (node->pending_strong_ref == 0) {
3538                                         binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3539                                                 proc->pid, thread->pid,
3540                                                 node->debug_id);
3541                                         binder_node_inner_unlock(node);
3542                                         binder_put_node(node);
3543                                         break;
3544                                 }
3545                                 node->pending_strong_ref = 0;
3546                         } else {
3547                                 if (node->pending_weak_ref == 0) {
3548                                         binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3549                                                 proc->pid, thread->pid,
3550                                                 node->debug_id);
3551                                         binder_node_inner_unlock(node);
3552                                         binder_put_node(node);
3553                                         break;
3554                                 }
3555                                 node->pending_weak_ref = 0;
3556                         }
3557                         free_node = binder_dec_node_nilocked(node,
3558                                         cmd == BC_ACQUIRE_DONE, 0);
3559                         WARN_ON(free_node);
3560                         binder_debug(BINDER_DEBUG_USER_REFS,
3561                                      "%d:%d %s node %d ls %d lw %d tr %d\n",
3562                                      proc->pid, thread->pid,
3563                                      cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3564                                      node->debug_id, node->local_strong_refs,
3565                                      node->local_weak_refs, node->tmp_refs);
3566                         binder_node_inner_unlock(node);
3567                         binder_put_node(node);
3568                         break;
3569                 }
3570                 case BC_ATTEMPT_ACQUIRE:
3571                         pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3572                         return -EINVAL;
3573                 case BC_ACQUIRE_RESULT:
3574                         pr_err("BC_ACQUIRE_RESULT not supported\n");
3575                         return -EINVAL;
3576
3577                 case BC_FREE_BUFFER: {
3578                         binder_uintptr_t data_ptr;
3579                         struct binder_buffer *buffer;
3580
3581                         if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3582                                 return -EFAULT;
3583                         ptr += sizeof(binder_uintptr_t);
3584
3585                         buffer = binder_alloc_prepare_to_free(&proc->alloc,
3586                                                               data_ptr);
3587                         if (IS_ERR_OR_NULL(buffer)) {
3588                                 if (PTR_ERR(buffer) == -EPERM) {
3589                                         binder_user_error(
3590                                                 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
3591                                                 proc->pid, thread->pid,
3592                                                 (u64)data_ptr);
3593                                 } else {
3594                                         binder_user_error(
3595                                                 "%d:%d BC_FREE_BUFFER u%016llx no match\n",
3596                                                 proc->pid, thread->pid,
3597                                                 (u64)data_ptr);
3598                                 }
3599                                 break;
3600                         }
3601                         binder_debug(BINDER_DEBUG_FREE_BUFFER,
3602                                      "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3603                                      proc->pid, thread->pid, (u64)data_ptr,
3604                                      buffer->debug_id,
3605                                      buffer->transaction ? "active" : "finished");
3606                         binder_free_buf(proc, buffer);
3607                         break;
3608                 }
3609
3610                 case BC_TRANSACTION_SG:
3611                 case BC_REPLY_SG: {
3612                         struct binder_transaction_data_sg tr;
3613
3614                         if (copy_from_user(&tr, ptr, sizeof(tr)))
3615                                 return -EFAULT;
3616                         ptr += sizeof(tr);
3617                         binder_transaction(proc, thread, &tr.transaction_data,
3618                                            cmd == BC_REPLY_SG, tr.buffers_size);
3619                         break;
3620                 }
3621                 case BC_TRANSACTION:
3622                 case BC_REPLY: {
3623                         struct binder_transaction_data tr;
3624
3625                         if (copy_from_user(&tr, ptr, sizeof(tr)))
3626                                 return -EFAULT;
3627                         ptr += sizeof(tr);
3628                         binder_transaction(proc, thread, &tr,
3629                                            cmd == BC_REPLY, 0);
3630                         break;
3631                 }
3632
3633                 case BC_REGISTER_LOOPER:
3634                         binder_debug(BINDER_DEBUG_THREADS,
3635                                      "%d:%d BC_REGISTER_LOOPER\n",
3636                                      proc->pid, thread->pid);
3637                         binder_inner_proc_lock(proc);
3638                         if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3639                                 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3640                                 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3641                                         proc->pid, thread->pid);
3642                         } else if (proc->requested_threads == 0) {
3643                                 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3644                                 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3645                                         proc->pid, thread->pid);
3646                         } else {
3647                                 proc->requested_threads--;
3648                                 proc->requested_threads_started++;
3649                         }
3650                         thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
3651                         binder_inner_proc_unlock(proc);
3652                         break;
3653                 case BC_ENTER_LOOPER:
3654                         binder_debug(BINDER_DEBUG_THREADS,
3655                                      "%d:%d BC_ENTER_LOOPER\n",
3656                                      proc->pid, thread->pid);
3657                         if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3658                                 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3659                                 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3660                                         proc->pid, thread->pid);
3661                         }
3662                         thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3663                         break;
3664                 case BC_EXIT_LOOPER:
3665                         binder_debug(BINDER_DEBUG_THREADS,
3666                                      "%d:%d BC_EXIT_LOOPER\n",
3667                                      proc->pid, thread->pid);
3668                         thread->looper |= BINDER_LOOPER_STATE_EXITED;
3669                         break;
3670
3671                 case BC_REQUEST_DEATH_NOTIFICATION:
3672                 case BC_CLEAR_DEATH_NOTIFICATION: {
3673                         uint32_t target;
3674                         binder_uintptr_t cookie;
3675                         struct binder_ref *ref;
3676                         struct binder_ref_death *death = NULL;
3677
3678                         if (get_user(target, (uint32_t __user *)ptr))
3679                                 return -EFAULT;
3680                         ptr += sizeof(uint32_t);
3681                         if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3682                                 return -EFAULT;
3683                         ptr += sizeof(binder_uintptr_t);
3684                         if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3685                                 /*
3686                                  * Allocate memory for death notification
3687                                  * before taking lock
3688                                  */
3689                                 death = kzalloc(sizeof(*death), GFP_KERNEL);
3690                                 if (death == NULL) {
3691                                         WARN_ON(thread->return_error.cmd !=
3692                                                 BR_OK);
3693                                         thread->return_error.cmd = BR_ERROR;
3694                                         binder_enqueue_thread_work(
3695                                                 thread,
3696                                                 &thread->return_error.work);
3697                                         binder_debug(
3698                                                 BINDER_DEBUG_FAILED_TRANSACTION,
3699                                                 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3700                                                 proc->pid, thread->pid);
3701                                         break;
3702                                 }
3703                         }
3704                         binder_proc_lock(proc);
3705                         ref = binder_get_ref_olocked(proc, target, false);
3706                         if (ref == NULL) {
3707                                 binder_user_error("%d:%d %s invalid ref %d\n",
3708                                         proc->pid, thread->pid,
3709                                         cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3710                                         "BC_REQUEST_DEATH_NOTIFICATION" :
3711                                         "BC_CLEAR_DEATH_NOTIFICATION",
3712                                         target);
3713                                 binder_proc_unlock(proc);
3714                                 kfree(death);
3715                                 break;
3716                         }
3717
3718                         binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
3719                                      "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3720                                      proc->pid, thread->pid,
3721                                      cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3722                                      "BC_REQUEST_DEATH_NOTIFICATION" :
3723                                      "BC_CLEAR_DEATH_NOTIFICATION",
3724                                      (u64)cookie, ref->data.debug_id,
3725                                      ref->data.desc, ref->data.strong,
3726                                      ref->data.weak, ref->node->debug_id);
3727
3728                         binder_node_lock(ref->node);
3729                         if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3730                                 if (ref->death) {
3731                                         binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3732                                                 proc->pid, thread->pid);
3733                                         binder_node_unlock(ref->node);
3734                                         binder_proc_unlock(proc);
3735                                         kfree(death);
3736                                         break;
3737                                 }
3738                                 binder_stats_created(BINDER_STAT_DEATH);
3739                                 INIT_LIST_HEAD(&death->work.entry);
3740                                 death->cookie = cookie;
3741                                 ref->death = death;
3742                                 if (ref->node->proc == NULL) {
3743                                         ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3744
3745                                         binder_inner_proc_lock(proc);
3746                                         binder_enqueue_work_ilocked(
3747                                                 &ref->death->work, &proc->todo);
3748                                         binder_wakeup_proc_ilocked(proc);
3749                                         binder_inner_proc_unlock(proc);
3750                                 }
3751                         } else {
3752                                 if (ref->death == NULL) {
3753                                         binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3754                                                 proc->pid, thread->pid);
3755                                         binder_node_unlock(ref->node);
3756                                         binder_proc_unlock(proc);
3757                                         break;
3758                                 }
3759                                 death = ref->death;
3760                                 if (death->cookie != cookie) {
3761                                         binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
3762                                                 proc->pid, thread->pid,
3763                                                 (u64)death->cookie,
3764                                                 (u64)cookie);
3765                                         binder_node_unlock(ref->node);
3766                                         binder_proc_unlock(proc);
3767                                         break;
3768                                 }
3769                                 ref->death = NULL;
3770                                 binder_inner_proc_lock(proc);
3771                                 if (list_empty(&death->work.entry)) {
3772                                         death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3773                                         if (thread->looper &
3774                                             (BINDER_LOOPER_STATE_REGISTERED |
3775                                              BINDER_LOOPER_STATE_ENTERED))
3776                                                 binder_enqueue_thread_work_ilocked(
3777                                                                 thread,
3778                                                                 &death->work);
3779                                         else {
3780                                                 binder_enqueue_work_ilocked(
3781                                                                 &death->work,
3782                                                                 &proc->todo);
3783                                                 binder_wakeup_proc_ilocked(
3784                                                                 proc);
3785                                         }
3786                                 } else {
3787                                         BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3788                                         death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3789                                 }
3790                                 binder_inner_proc_unlock(proc);
3791                         }
3792                         binder_node_unlock(ref->node);
3793                         binder_proc_unlock(proc);
3794                 } break;
3795                 case BC_DEAD_BINDER_DONE: {
3796                         struct binder_work *w;
3797                         binder_uintptr_t cookie;
3798                         struct binder_ref_death *death = NULL;
3799
3800                         if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3801                                 return -EFAULT;
3802
3803                         ptr += sizeof(cookie);
3804                         binder_inner_proc_lock(proc);
3805                         list_for_each_entry(w, &proc->delivered_death,
3806                                             entry) {
3807                                 struct binder_ref_death *tmp_death =
3808                                         container_of(w,
3809                                                      struct binder_ref_death,
3810                                                      work);
3811
3812                                 if (tmp_death->cookie == cookie) {
3813                                         death = tmp_death;
3814                                         break;
3815                                 }
3816                         }
3817                         binder_debug(BINDER_DEBUG_DEAD_BINDER,
3818                                      "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
3819                                      proc->pid, thread->pid, (u64)cookie,
3820                                      death);
3821                         if (death == NULL) {
3822                                 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3823                                         proc->pid, thread->pid, (u64)cookie);
3824                                 binder_inner_proc_unlock(proc);
3825                                 break;
3826                         }
3827                         binder_dequeue_work_ilocked(&death->work);
3828                         if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
3829                                 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3830                                 if (thread->looper &
3831                                         (BINDER_LOOPER_STATE_REGISTERED |
3832                                          BINDER_LOOPER_STATE_ENTERED))
3833                                         binder_enqueue_thread_work_ilocked(
3834                                                 thread, &death->work);
3835                                 else {
3836                                         binder_enqueue_work_ilocked(
3837                                                         &death->work,
3838                                                         &proc->todo);
3839                                         binder_wakeup_proc_ilocked(proc);
3840                                 }
3841                         }
3842                         binder_inner_proc_unlock(proc);
3843                 } break;
3844
3845                 default:
3846                         pr_err("%d:%d unknown command %d\n",
3847                                proc->pid, thread->pid, cmd);
3848                         return -EINVAL;
3849                 }
3850                 *consumed = ptr - buffer;
3851         }
3852         return 0;
3853 }
3854
3855 static void binder_stat_br(struct binder_proc *proc,
3856                            struct binder_thread *thread, uint32_t cmd)
3857 {
3858         trace_binder_return(cmd);
3859         if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
3860                 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
3861                 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
3862                 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
3863         }
3864 }
3865
3866 static int binder_put_node_cmd(struct binder_proc *proc,
3867                                struct binder_thread *thread,
3868                                void __user **ptrp,
3869                                binder_uintptr_t node_ptr,
3870                                binder_uintptr_t node_cookie,
3871                                int node_debug_id,
3872                                uint32_t cmd, const char *cmd_name)
3873 {
3874         void __user *ptr = *ptrp;
3875
3876         if (put_user(cmd, (uint32_t __user *)ptr))
3877                 return -EFAULT;
3878         ptr += sizeof(uint32_t);
3879
3880         if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
3881                 return -EFAULT;
3882         ptr += sizeof(binder_uintptr_t);
3883
3884         if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
3885                 return -EFAULT;
3886         ptr += sizeof(binder_uintptr_t);
3887
3888         binder_stat_br(proc, thread, cmd);
3889         binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
3890                      proc->pid, thread->pid, cmd_name, node_debug_id,
3891                      (u64)node_ptr, (u64)node_cookie);
3892
3893         *ptrp = ptr;
3894         return 0;
3895 }
3896
3897 static int binder_wait_for_work(struct binder_thread *thread,
3898                                 bool do_proc_work)
3899 {
3900         DEFINE_WAIT(wait);
3901         struct binder_proc *proc = thread->proc;
3902         int ret = 0;
3903
3904         freezer_do_not_count();
3905         binder_inner_proc_lock(proc);
3906         for (;;) {
3907                 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
3908                 if (binder_has_work_ilocked(thread, do_proc_work))
3909                         break;
3910                 if (do_proc_work)
3911                         list_add(&thread->waiting_thread_node,
3912                                  &proc->waiting_threads);
3913                 binder_inner_proc_unlock(proc);
3914                 schedule();
3915                 binder_inner_proc_lock(proc);
3916                 list_del_init(&thread->waiting_thread_node);
3917                 if (signal_pending(current)) {
3918                         ret = -ERESTARTSYS;
3919                         break;
3920                 }
3921         }
3922         finish_wait(&thread->wait, &wait);
3923         binder_inner_proc_unlock(proc);
3924         freezer_count();
3925
3926         return ret;
3927 }
3928
3929 /**
3930  * binder_apply_fd_fixups() - finish fd translation
3931  * @t:  binder transaction with list of fd fixups
3932  *
3933  * Now that we are in the context of the transaction target
3934  * process, we can allocate and install fds. Process the
3935  * list of fds to translate and fixup the buffer with the
3936  * new fds.
3937  *
3938  * If we fail to allocate an fd, then free the resources by
3939  * fput'ing files that have not been processed and ksys_close'ing
3940  * any fds that have already been allocated.
3941  */
3942 static int binder_apply_fd_fixups(struct binder_transaction *t)
3943 {
3944         struct binder_txn_fd_fixup *fixup, *tmp;
3945         int ret = 0;
3946
3947         list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
3948                 int fd = get_unused_fd_flags(O_CLOEXEC);
3949                 u32 *fdp;
3950
3951                 if (fd < 0) {
3952                         binder_debug(BINDER_DEBUG_TRANSACTION,
3953                                      "failed fd fixup txn %d fd %d\n",
3954                                      t->debug_id, fd);
3955                         ret = -ENOMEM;
3956                         break;
3957                 }
3958                 binder_debug(BINDER_DEBUG_TRANSACTION,
3959                              "fd fixup txn %d fd %d\n",
3960                              t->debug_id, fd);
3961                 trace_binder_transaction_fd_recv(t, fd, fixup->offset);
3962                 fd_install(fd, fixup->file);
3963                 fixup->file = NULL;
3964                 fdp = (u32 *)(t->buffer->data + fixup->offset);
3965                 /*
3966                  * This store can cause problems for CPUs with a
3967                  * VIVT cache (eg ARMv5) since the cache cannot
3968                  * detect virtual aliases to the same physical cacheline.
3969                  * To support VIVT, this address and the user-space VA
3970                  * would both need to be flushed. Since this kernel
3971                  * VA is not constructed via page_to_virt(), we can't
3972                  * use flush_dcache_page() on it, so we'd have to use
3973                  * an internal function. If devices with VIVT ever
3974                  * need to run Android, we'll either need to go back
3975                  * to patching the translated fd from the sender side
3976                  * (using the non-standard kernel functions), or rework
3977                  * how the kernel uses the buffer to use page_to_virt()
3978                  * addresses instead of allocating in our own vm area.
3979                  *
3980                  * For now, we disable compilation if CONFIG_CPU_CACHE_VIVT.
3981                  */
3982                 *fdp = fd;
3983         }
3984         list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
3985                 if (fixup->file) {
3986                         fput(fixup->file);
3987                 } else if (ret) {
3988                         u32 *fdp = (u32 *)(t->buffer->data + fixup->offset);
3989
3990                         binder_deferred_fd_close(*fdp);
3991                 }
3992                 list_del(&fixup->fixup_entry);
3993                 kfree(fixup);
3994         }
3995
3996         return ret;
3997 }
3998
3999 static int binder_thread_read(struct binder_proc *proc,
4000                               struct binder_thread *thread,
4001                               binder_uintptr_t binder_buffer, size_t size,
4002                               binder_size_t *consumed, int non_block)
4003 {
4004         void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4005         void __user *ptr = buffer + *consumed;
4006         void __user *end = buffer + size;
4007
4008         int ret = 0;
4009         int wait_for_proc_work;
4010
4011         if (*consumed == 0) {
4012                 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4013                         return -EFAULT;
4014                 ptr += sizeof(uint32_t);
4015         }
4016
4017 retry:
4018         binder_inner_proc_lock(proc);
4019         wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4020         binder_inner_proc_unlock(proc);
4021
4022         thread->looper |= BINDER_LOOPER_STATE_WAITING;
4023
4024         trace_binder_wait_for_work(wait_for_proc_work,
4025                                    !!thread->transaction_stack,
4026                                    !binder_worklist_empty(proc, &thread->todo));
4027         if (wait_for_proc_work) {
4028                 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4029                                         BINDER_LOOPER_STATE_ENTERED))) {
4030                         binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4031                                 proc->pid, thread->pid, thread->looper);
4032                         wait_event_interruptible(binder_user_error_wait,
4033                                                  binder_stop_on_user_error < 2);
4034                 }
4035                 binder_set_nice(proc->default_priority);
4036         }
4037
4038         if (non_block) {
4039                 if (!binder_has_work(thread, wait_for_proc_work))
4040                         ret = -EAGAIN;
4041         } else {
4042                 ret = binder_wait_for_work(thread, wait_for_proc_work);
4043         }
4044
4045         thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4046
4047         if (ret)
4048                 return ret;
4049
4050         while (1) {
4051                 uint32_t cmd;
4052                 struct binder_transaction_data tr;
4053                 struct binder_work *w = NULL;
4054                 struct list_head *list = NULL;
4055                 struct binder_transaction *t = NULL;
4056                 struct binder_thread *t_from;
4057
4058                 binder_inner_proc_lock(proc);
4059                 if (!binder_worklist_empty_ilocked(&thread->todo))
4060                         list = &thread->todo;
4061                 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4062                            wait_for_proc_work)
4063                         list = &proc->todo;
4064                 else {
4065                         binder_inner_proc_unlock(proc);
4066
4067                         /* no data added */
4068                         if (ptr - buffer == 4 && !thread->looper_need_return)
4069                                 goto retry;
4070                         break;
4071                 }
4072
4073                 if (end - ptr < sizeof(tr) + 4) {
4074                         binder_inner_proc_unlock(proc);
4075                         break;
4076                 }
4077                 w = binder_dequeue_work_head_ilocked(list);
4078                 if (binder_worklist_empty_ilocked(&thread->todo))
4079                         thread->process_todo = false;
4080
4081                 switch (w->type) {
4082                 case BINDER_WORK_TRANSACTION: {
4083                         binder_inner_proc_unlock(proc);
4084                         t = container_of(w, struct binder_transaction, work);
4085                 } break;
4086                 case BINDER_WORK_RETURN_ERROR: {
4087                         struct binder_error *e = container_of(
4088                                         w, struct binder_error, work);
4089
4090                         WARN_ON(e->cmd == BR_OK);
4091                         binder_inner_proc_unlock(proc);
4092                         if (put_user(e->cmd, (uint32_t __user *)ptr))
4093                                 return -EFAULT;
4094                         cmd = e->cmd;
4095                         e->cmd = BR_OK;
4096                         ptr += sizeof(uint32_t);
4097
4098                         binder_stat_br(proc, thread, cmd);
4099                 } break;
4100                 case BINDER_WORK_TRANSACTION_COMPLETE: {
4101                         binder_inner_proc_unlock(proc);
4102                         cmd = BR_TRANSACTION_COMPLETE;
4103                         if (put_user(cmd, (uint32_t __user *)ptr))
4104                                 return -EFAULT;
4105                         ptr += sizeof(uint32_t);
4106
4107                         binder_stat_br(proc, thread, cmd);
4108                         binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4109                                      "%d:%d BR_TRANSACTION_COMPLETE\n",
4110                                      proc->pid, thread->pid);
4111                         kfree(w);
4112                         binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4113                 } break;
4114                 case BINDER_WORK_NODE: {
4115                         struct binder_node *node = container_of(w, struct binder_node, work);
4116                         int strong, weak;
4117                         binder_uintptr_t node_ptr = node->ptr;
4118                         binder_uintptr_t node_cookie = node->cookie;
4119                         int node_debug_id = node->debug_id;
4120                         int has_weak_ref;
4121                         int has_strong_ref;
4122                         void __user *orig_ptr = ptr;
4123
4124                         BUG_ON(proc != node->proc);
4125                         strong = node->internal_strong_refs ||
4126                                         node->local_strong_refs;
4127                         weak = !hlist_empty(&node->refs) ||
4128                                         node->local_weak_refs ||
4129                                         node->tmp_refs || strong;
4130                         has_strong_ref = node->has_strong_ref;
4131                         has_weak_ref = node->has_weak_ref;
4132
4133                         if (weak && !has_weak_ref) {
4134                                 node->has_weak_ref = 1;
4135                                 node->pending_weak_ref = 1;
4136                                 node->local_weak_refs++;
4137                         }
4138                         if (strong && !has_strong_ref) {
4139                                 node->has_strong_ref = 1;
4140                                 node->pending_strong_ref = 1;
4141                                 node->local_strong_refs++;
4142                         }
4143                         if (!strong && has_strong_ref)
4144                                 node->has_strong_ref = 0;
4145                         if (!weak && has_weak_ref)
4146                                 node->has_weak_ref = 0;
4147                         if (!weak && !strong) {
4148                                 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4149                                              "%d:%d node %d u%016llx c%016llx deleted\n",
4150                                              proc->pid, thread->pid,
4151                                              node_debug_id,
4152                                              (u64)node_ptr,
4153                                              (u64)node_cookie);
4154                                 rb_erase(&node->rb_node, &proc->nodes);
4155                                 binder_inner_proc_unlock(proc);
4156                                 binder_node_lock(node);
4157                                 /*
4158                                  * Acquire the node lock before freeing the
4159                                  * node to serialize with other threads that
4160                                  * may have been holding the node lock while
4161                                  * decrementing this node (avoids race where
4162                                  * this thread frees while the other thread
4163                                  * is unlocking the node after the final
4164                                  * decrement)
4165                                  */
4166                                 binder_node_unlock(node);
4167                                 binder_free_node(node);
4168                         } else
4169                                 binder_inner_proc_unlock(proc);
4170
4171                         if (weak && !has_weak_ref)
4172                                 ret = binder_put_node_cmd(
4173                                                 proc, thread, &ptr, node_ptr,
4174                                                 node_cookie, node_debug_id,
4175                                                 BR_INCREFS, "BR_INCREFS");
4176                         if (!ret && strong && !has_strong_ref)
4177                                 ret = binder_put_node_cmd(
4178                                                 proc, thread, &ptr, node_ptr,
4179                                                 node_cookie, node_debug_id,
4180                                                 BR_ACQUIRE, "BR_ACQUIRE");
4181                         if (!ret && !strong && has_strong_ref)
4182                                 ret = binder_put_node_cmd(
4183                                                 proc, thread, &ptr, node_ptr,
4184                                                 node_cookie, node_debug_id,
4185                                                 BR_RELEASE, "BR_RELEASE");
4186                         if (!ret && !weak && has_weak_ref)
4187                                 ret = binder_put_node_cmd(
4188                                                 proc, thread, &ptr, node_ptr,
4189                                                 node_cookie, node_debug_id,
4190                                                 BR_DECREFS, "BR_DECREFS");
4191                         if (orig_ptr == ptr)
4192                                 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4193                                              "%d:%d node %d u%016llx c%016llx state unchanged\n",
4194                                              proc->pid, thread->pid,
4195                                              node_debug_id,
4196                                              (u64)node_ptr,
4197                                              (u64)node_cookie);
4198                         if (ret)
4199                                 return ret;
4200                 } break;
4201                 case BINDER_WORK_DEAD_BINDER:
4202                 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4203                 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4204                         struct binder_ref_death *death;
4205                         uint32_t cmd;
4206                         binder_uintptr_t cookie;
4207
4208                         death = container_of(w, struct binder_ref_death, work);
4209                         if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4210                                 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4211                         else
4212                                 cmd = BR_DEAD_BINDER;
4213                         cookie = death->cookie;
4214
4215                         binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4216                                      "%d:%d %s %016llx\n",
4217                                       proc->pid, thread->pid,
4218                                       cmd == BR_DEAD_BINDER ?
4219                                       "BR_DEAD_BINDER" :
4220                                       "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4221                                       (u64)cookie);
4222                         if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4223                                 binder_inner_proc_unlock(proc);
4224                                 kfree(death);
4225                                 binder_stats_deleted(BINDER_STAT_DEATH);
4226                         } else {
4227                                 binder_enqueue_work_ilocked(
4228                                                 w, &proc->delivered_death);
4229                                 binder_inner_proc_unlock(proc);
4230                         }
4231                         if (put_user(cmd, (uint32_t __user *)ptr))
4232                                 return -EFAULT;
4233                         ptr += sizeof(uint32_t);
4234                         if (put_user(cookie,
4235                                      (binder_uintptr_t __user *)ptr))
4236                                 return -EFAULT;
4237                         ptr += sizeof(binder_uintptr_t);
4238                         binder_stat_br(proc, thread, cmd);
4239                         if (cmd == BR_DEAD_BINDER)
4240                                 goto done; /* DEAD_BINDER notifications can cause transactions */
4241                 } break;
4242                 default:
4243                         binder_inner_proc_unlock(proc);
4244                         pr_err("%d:%d: bad work type %d\n",
4245                                proc->pid, thread->pid, w->type);
4246                         break;
4247                 }
4248
4249                 if (!t)
4250                         continue;
4251
4252                 BUG_ON(t->buffer == NULL);
4253                 if (t->buffer->target_node) {
4254                         struct binder_node *target_node = t->buffer->target_node;
4255
4256                         tr.target.ptr = target_node->ptr;
4257                         tr.cookie =  target_node->cookie;
4258                         t->saved_priority = task_nice(current);
4259                         if (t->priority < target_node->min_priority &&
4260                             !(t->flags & TF_ONE_WAY))
4261                                 binder_set_nice(t->priority);
4262                         else if (!(t->flags & TF_ONE_WAY) ||
4263                                  t->saved_priority > target_node->min_priority)
4264                                 binder_set_nice(target_node->min_priority);
4265                         cmd = BR_TRANSACTION;
4266                 } else {
4267                         tr.target.ptr = 0;
4268                         tr.cookie = 0;
4269                         cmd = BR_REPLY;
4270                 }
4271                 tr.code = t->code;
4272                 tr.flags = t->flags;
4273                 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4274
4275                 t_from = binder_get_txn_from(t);
4276                 if (t_from) {
4277                         struct task_struct *sender = t_from->proc->tsk;
4278
4279                         tr.sender_pid = task_tgid_nr_ns(sender,
4280                                                         task_active_pid_ns(current));
4281                 } else {
4282                         tr.sender_pid = 0;
4283                 }
4284
4285                 ret = binder_apply_fd_fixups(t);
4286                 if (ret) {
4287                         struct binder_buffer *buffer = t->buffer;
4288                         bool oneway = !!(t->flags & TF_ONE_WAY);
4289                         int tid = t->debug_id;
4290
4291                         if (t_from)
4292                                 binder_thread_dec_tmpref(t_from);
4293                         buffer->transaction = NULL;
4294                         binder_cleanup_transaction(t, "fd fixups failed",
4295                                                    BR_FAILED_REPLY);
4296                         binder_free_buf(proc, buffer);
4297                         binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
4298                                      "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
4299                                      proc->pid, thread->pid,
4300                                      oneway ? "async " :
4301                                         (cmd == BR_REPLY ? "reply " : ""),
4302                                      tid, BR_FAILED_REPLY, ret, __LINE__);
4303                         if (cmd == BR_REPLY) {
4304                                 cmd = BR_FAILED_REPLY;
4305                                 if (put_user(cmd, (uint32_t __user *)ptr))
4306                                         return -EFAULT;
4307                                 ptr += sizeof(uint32_t);
4308                                 binder_stat_br(proc, thread, cmd);
4309                                 break;
4310                         }
4311                         continue;
4312                 }
4313                 tr.data_size = t->buffer->data_size;
4314                 tr.offsets_size = t->buffer->offsets_size;
4315                 tr.data.ptr.buffer = (binder_uintptr_t)
4316                         ((uintptr_t)t->buffer->data +
4317                         binder_alloc_get_user_buffer_offset(&proc->alloc));
4318                 tr.data.ptr.offsets = tr.data.ptr.buffer +
4319                                         ALIGN(t->buffer->data_size,
4320                                             sizeof(void *));
4321
4322                 if (put_user(cmd, (uint32_t __user *)ptr)) {
4323                         if (t_from)
4324                                 binder_thread_dec_tmpref(t_from);
4325
4326                         binder_cleanup_transaction(t, "put_user failed",
4327                                                    BR_FAILED_REPLY);
4328
4329                         return -EFAULT;
4330                 }
4331                 ptr += sizeof(uint32_t);
4332                 if (copy_to_user(ptr, &tr, sizeof(tr))) {
4333                         if (t_from)
4334                                 binder_thread_dec_tmpref(t_from);
4335
4336                         binder_cleanup_transaction(t, "copy_to_user failed",
4337                                                    BR_FAILED_REPLY);
4338
4339                         return -EFAULT;
4340                 }
4341                 ptr += sizeof(tr);
4342
4343                 trace_binder_transaction_received(t);
4344                 binder_stat_br(proc, thread, cmd);
4345                 binder_debug(BINDER_DEBUG_TRANSACTION,
4346                              "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4347                              proc->pid, thread->pid,
4348                              (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4349                              "BR_REPLY",
4350                              t->debug_id, t_from ? t_from->proc->pid : 0,
4351                              t_from ? t_from->pid : 0, cmd,
4352                              t->buffer->data_size, t->buffer->offsets_size,
4353                              (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
4354
4355                 if (t_from)
4356                         binder_thread_dec_tmpref(t_from);
4357                 t->buffer->allow_user_free = 1;
4358                 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
4359                         binder_inner_proc_lock(thread->proc);
4360                         t->to_parent = thread->transaction_stack;
4361                         t->to_thread = thread;
4362                         thread->transaction_stack = t;
4363                         binder_inner_proc_unlock(thread->proc);
4364                 } else {
4365                         binder_free_transaction(t);
4366                 }
4367                 break;
4368         }
4369
4370 done:
4371
4372         *consumed = ptr - buffer;
4373         binder_inner_proc_lock(proc);
4374         if (proc->requested_threads == 0 &&
4375             list_empty(&thread->proc->waiting_threads) &&
4376             proc->requested_threads_started < proc->max_threads &&
4377             (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4378              BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4379              /*spawn a new thread if we leave this out */) {
4380                 proc->requested_threads++;
4381                 binder_inner_proc_unlock(proc);
4382                 binder_debug(BINDER_DEBUG_THREADS,
4383                              "%d:%d BR_SPAWN_LOOPER\n",
4384                              proc->pid, thread->pid);
4385                 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4386                         return -EFAULT;
4387                 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4388         } else
4389                 binder_inner_proc_unlock(proc);
4390         return 0;
4391 }
4392
4393 static void binder_release_work(struct binder_proc *proc,
4394                                 struct list_head *list)
4395 {
4396         struct binder_work *w;
4397
4398         while (1) {
4399                 w = binder_dequeue_work_head(proc, list);
4400                 if (!w)
4401                         return;
4402
4403                 switch (w->type) {
4404                 case BINDER_WORK_TRANSACTION: {
4405                         struct binder_transaction *t;
4406
4407                         t = container_of(w, struct binder_transaction, work);
4408
4409                         binder_cleanup_transaction(t, "process died.",
4410                                                    BR_DEAD_REPLY);
4411                 } break;
4412                 case BINDER_WORK_RETURN_ERROR: {
4413                         struct binder_error *e = container_of(
4414                                         w, struct binder_error, work);
4415
4416                         binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4417                                 "undelivered TRANSACTION_ERROR: %u\n",
4418                                 e->cmd);
4419                 } break;
4420                 case BINDER_WORK_TRANSACTION_COMPLETE: {
4421                         binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4422                                 "undelivered TRANSACTION_COMPLETE\n");
4423                         kfree(w);
4424                         binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4425                 } break;
4426                 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4427                 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4428                         struct binder_ref_death *death;
4429
4430                         death = container_of(w, struct binder_ref_death, work);
4431                         binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4432                                 "undelivered death notification, %016llx\n",
4433                                 (u64)death->cookie);
4434                         kfree(death);
4435                         binder_stats_deleted(BINDER_STAT_DEATH);
4436                 } break;
4437                 default:
4438                         pr_err("unexpected work type, %d, not freed\n",
4439                                w->type);
4440                         break;
4441                 }
4442         }
4443
4444 }
4445
4446 static struct binder_thread *binder_get_thread_ilocked(
4447                 struct binder_proc *proc, struct binder_thread *new_thread)
4448 {
4449         struct binder_thread *thread = NULL;
4450         struct rb_node *parent = NULL;
4451         struct rb_node **p = &proc->threads.rb_node;
4452
4453         while (*p) {
4454                 parent = *p;
4455                 thread = rb_entry(parent, struct binder_thread, rb_node);
4456
4457                 if (current->pid < thread->pid)
4458                         p = &(*p)->rb_left;
4459                 else if (current->pid > thread->pid)
4460                         p = &(*p)->rb_right;
4461                 else
4462                         return thread;
4463         }
4464         if (!new_thread)
4465                 return NULL;
4466         thread = new_thread;
4467         binder_stats_created(BINDER_STAT_THREAD);
4468         thread->proc = proc;
4469         thread->pid = current->pid;
4470         atomic_set(&thread->tmp_ref, 0);
4471         init_waitqueue_head(&thread->wait);
4472         INIT_LIST_HEAD(&thread->todo);
4473         rb_link_node(&thread->rb_node, parent, p);
4474         rb_insert_color(&thread->rb_node, &proc->threads);
4475         thread->looper_need_return = true;
4476         thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4477         thread->return_error.cmd = BR_OK;
4478         thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4479         thread->reply_error.cmd = BR_OK;
4480         INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4481         return thread;
4482 }
4483
4484 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4485 {
4486         struct binder_thread *thread;
4487         struct binder_thread *new_thread;
4488
4489         binder_inner_proc_lock(proc);
4490         thread = binder_get_thread_ilocked(proc, NULL);
4491         binder_inner_proc_unlock(proc);
4492         if (!thread) {
4493                 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4494                 if (new_thread == NULL)
4495                         return NULL;
4496                 binder_inner_proc_lock(proc);
4497                 thread = binder_get_thread_ilocked(proc, new_thread);
4498                 binder_inner_proc_unlock(proc);
4499                 if (thread != new_thread)
4500                         kfree(new_thread);
4501         }
4502         return thread;
4503 }
4504
4505 static void binder_free_proc(struct binder_proc *proc)
4506 {
4507         BUG_ON(!list_empty(&proc->todo));
4508         BUG_ON(!list_empty(&proc->delivered_death));
4509         binder_alloc_deferred_release(&proc->alloc);
4510         put_task_struct(proc->tsk);
4511         binder_stats_deleted(BINDER_STAT_PROC);
4512         kfree(proc);
4513 }
4514
4515 static void binder_free_thread(struct binder_thread *thread)
4516 {
4517         BUG_ON(!list_empty(&thread->todo));
4518         binder_stats_deleted(BINDER_STAT_THREAD);
4519         binder_proc_dec_tmpref(thread->proc);
4520         kfree(thread);
4521 }
4522
4523 static int binder_thread_release(struct binder_proc *proc,
4524                                  struct binder_thread *thread)
4525 {
4526         struct binder_transaction *t;
4527         struct binder_transaction *send_reply = NULL;
4528         int active_transactions = 0;
4529         struct binder_transaction *last_t = NULL;
4530
4531         binder_inner_proc_lock(thread->proc);
4532         /*
4533          * take a ref on the proc so it survives
4534          * after we remove this thread from proc->threads.
4535          * The corresponding dec is when we actually
4536          * free the thread in binder_free_thread()
4537          */
4538         proc->tmp_ref++;
4539         /*
4540          * take a ref on this thread to ensure it
4541          * survives while we are releasing it
4542          */
4543         atomic_inc(&thread->tmp_ref);
4544         rb_erase(&thread->rb_node, &proc->threads);
4545         t = thread->transaction_stack;
4546         if (t) {
4547                 spin_lock(&t->lock);
4548                 if (t->to_thread == thread)
4549                         send_reply = t;
4550         } else {
4551                 __acquire(&t->lock);
4552         }
4553         thread->is_dead = true;
4554
4555         while (t) {
4556                 last_t = t;
4557                 active_transactions++;
4558                 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4559                              "release %d:%d transaction %d %s, still active\n",
4560                               proc->pid, thread->pid,
4561                              t->debug_id,
4562                              (t->to_thread == thread) ? "in" : "out");
4563
4564                 if (t->to_thread == thread) {
4565                         t->to_proc = NULL;
4566                         t->to_thread = NULL;
4567                         if (t->buffer) {
4568                                 t->buffer->transaction = NULL;
4569                                 t->buffer = NULL;
4570                         }
4571                         t = t->to_parent;
4572                 } else if (t->from == thread) {
4573                         t->from = NULL;
4574                         t = t->from_parent;
4575                 } else
4576                         BUG();
4577                 spin_unlock(&last_t->lock);
4578                 if (t)
4579                         spin_lock(&t->lock);
4580                 else
4581                         __acquire(&t->lock);
4582         }
4583         /* annotation for sparse, lock not acquired in last iteration above */
4584         __release(&t->lock);
4585
4586         /*
4587          * If this thread used poll, make sure we remove the waitqueue
4588          * from any epoll data structures holding it with POLLFREE.
4589          * waitqueue_active() is safe to use here because we're holding
4590          * the inner lock.
4591          */
4592         if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
4593             waitqueue_active(&thread->wait)) {
4594                 wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE);
4595         }
4596
4597         binder_inner_proc_unlock(thread->proc);
4598
4599         /*
4600          * This is needed to avoid races between wake_up_poll() above and
4601          * and ep_remove_waitqueue() called for other reasons (eg the epoll file
4602          * descriptor being closed); ep_remove_waitqueue() holds an RCU read
4603          * lock, so we can be sure it's done after calling synchronize_rcu().
4604          */
4605         if (thread->looper & BINDER_LOOPER_STATE_POLL)
4606                 synchronize_rcu();
4607
4608         if (send_reply)
4609                 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
4610         binder_release_work(proc, &thread->todo);
4611         binder_thread_dec_tmpref(thread);
4612         return active_transactions;
4613 }
4614
4615 static __poll_t binder_poll(struct file *filp,
4616                                 struct poll_table_struct *wait)
4617 {
4618         struct binder_proc *proc = filp->private_data;
4619         struct binder_thread *thread = NULL;
4620         bool wait_for_proc_work;
4621
4622         thread = binder_get_thread(proc);
4623         if (!thread)
4624                 return POLLERR;
4625
4626         binder_inner_proc_lock(thread->proc);
4627         thread->looper |= BINDER_LOOPER_STATE_POLL;
4628         wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4629
4630         binder_inner_proc_unlock(thread->proc);
4631
4632         poll_wait(filp, &thread->wait, wait);
4633
4634         if (binder_has_work(thread, wait_for_proc_work))
4635                 return EPOLLIN;
4636
4637         return 0;
4638 }
4639
4640 static int binder_ioctl_write_read(struct file *filp,
4641                                 unsigned int cmd, unsigned long arg,
4642                                 struct binder_thread *thread)
4643 {
4644         int ret = 0;
4645         struct binder_proc *proc = filp->private_data;
4646         unsigned int size = _IOC_SIZE(cmd);
4647         void __user *ubuf = (void __user *)arg;
4648         struct binder_write_read bwr;
4649
4650         if (size != sizeof(struct binder_write_read)) {
4651                 ret = -EINVAL;
4652                 goto out;
4653         }
4654         if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4655                 ret = -EFAULT;
4656                 goto out;
4657         }
4658         binder_debug(BINDER_DEBUG_READ_WRITE,
4659                      "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4660                      proc->pid, thread->pid,
4661                      (u64)bwr.write_size, (u64)bwr.write_buffer,
4662                      (u64)bwr.read_size, (u64)bwr.read_buffer);
4663
4664         if (bwr.write_size > 0) {
4665                 ret = binder_thread_write(proc, thread,
4666                                           bwr.write_buffer,
4667                                           bwr.write_size,
4668                                           &bwr.write_consumed);
4669                 trace_binder_write_done(ret);
4670                 if (ret < 0) {
4671                         bwr.read_consumed = 0;
4672                         if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4673                                 ret = -EFAULT;
4674                         goto out;
4675                 }
4676         }
4677         if (bwr.read_size > 0) {
4678                 ret = binder_thread_read(proc, thread, bwr.read_buffer,
4679                                          bwr.read_size,
4680                                          &bwr.read_consumed,
4681                                          filp->f_flags & O_NONBLOCK);
4682                 trace_binder_read_done(ret);
4683                 binder_inner_proc_lock(proc);
4684                 if (!binder_worklist_empty_ilocked(&proc->todo))
4685                         binder_wakeup_proc_ilocked(proc);
4686                 binder_inner_proc_unlock(proc);
4687                 if (ret < 0) {
4688                         if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4689                                 ret = -EFAULT;
4690                         goto out;
4691                 }
4692         }
4693         binder_debug(BINDER_DEBUG_READ_WRITE,
4694                      "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4695                      proc->pid, thread->pid,
4696                      (u64)bwr.write_consumed, (u64)bwr.write_size,
4697                      (u64)bwr.read_consumed, (u64)bwr.read_size);
4698         if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4699                 ret = -EFAULT;
4700                 goto out;
4701         }
4702 out:
4703         return ret;
4704 }
4705
4706 static int binder_ioctl_set_ctx_mgr(struct file *filp)
4707 {
4708         int ret = 0;
4709         struct binder_proc *proc = filp->private_data;
4710         struct binder_context *context = proc->context;
4711         struct binder_node *new_node;
4712         kuid_t curr_euid = current_euid();
4713
4714         mutex_lock(&context->context_mgr_node_lock);
4715         if (context->binder_context_mgr_node) {
4716                 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4717                 ret = -EBUSY;
4718                 goto out;
4719         }
4720         ret = security_binder_set_context_mgr(proc->tsk);
4721         if (ret < 0)
4722                 goto out;
4723         if (uid_valid(context->binder_context_mgr_uid)) {
4724                 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
4725                         pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4726                                from_kuid(&init_user_ns, curr_euid),
4727                                from_kuid(&init_user_ns,
4728                                          context->binder_context_mgr_uid));
4729                         ret = -EPERM;
4730                         goto out;
4731                 }
4732         } else {
4733                 context->binder_context_mgr_uid = curr_euid;
4734         }
4735         new_node = binder_new_node(proc, NULL);
4736         if (!new_node) {
4737                 ret = -ENOMEM;
4738                 goto out;
4739         }
4740         binder_node_lock(new_node);
4741         new_node->local_weak_refs++;
4742         new_node->local_strong_refs++;
4743         new_node->has_strong_ref = 1;
4744         new_node->has_weak_ref = 1;
4745         context->binder_context_mgr_node = new_node;
4746         binder_node_unlock(new_node);
4747         binder_put_node(new_node);
4748 out:
4749         mutex_unlock(&context->context_mgr_node_lock);
4750         return ret;
4751 }
4752
4753 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
4754                 struct binder_node_info_for_ref *info)
4755 {
4756         struct binder_node *node;
4757         struct binder_context *context = proc->context;
4758         __u32 handle = info->handle;
4759
4760         if (info->strong_count || info->weak_count || info->reserved1 ||
4761             info->reserved2 || info->reserved3) {
4762                 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
4763                                   proc->pid);
4764                 return -EINVAL;
4765         }
4766
4767         /* This ioctl may only be used by the context manager */
4768         mutex_lock(&context->context_mgr_node_lock);
4769         if (!context->binder_context_mgr_node ||
4770                 context->binder_context_mgr_node->proc != proc) {
4771                 mutex_unlock(&context->context_mgr_node_lock);
4772                 return -EPERM;
4773         }
4774         mutex_unlock(&context->context_mgr_node_lock);
4775
4776         node = binder_get_node_from_ref(proc, handle, true, NULL);
4777         if (!node)
4778                 return -EINVAL;
4779
4780         info->strong_count = node->local_strong_refs +
4781                 node->internal_strong_refs;
4782         info->weak_count = node->local_weak_refs;
4783
4784         binder_put_node(node);
4785
4786         return 0;
4787 }
4788
4789 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
4790                                 struct binder_node_debug_info *info)
4791 {
4792         struct rb_node *n;
4793         binder_uintptr_t ptr = info->ptr;
4794
4795         memset(info, 0, sizeof(*info));
4796
4797         binder_inner_proc_lock(proc);
4798         for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
4799                 struct binder_node *node = rb_entry(n, struct binder_node,
4800                                                     rb_node);
4801                 if (node->ptr > ptr) {
4802                         info->ptr = node->ptr;
4803                         info->cookie = node->cookie;
4804                         info->has_strong_ref = node->has_strong_ref;
4805                         info->has_weak_ref = node->has_weak_ref;
4806                         break;
4807                 }
4808         }
4809         binder_inner_proc_unlock(proc);
4810
4811         return 0;
4812 }
4813
4814 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4815 {
4816         int ret;
4817         struct binder_proc *proc = filp->private_data;
4818         struct binder_thread *thread;
4819         unsigned int size = _IOC_SIZE(cmd);
4820         void __user *ubuf = (void __user *)arg;
4821
4822         /*pr_info("binder_ioctl: %d:%d %x %lx\n",
4823                         proc->pid, current->pid, cmd, arg);*/
4824
4825         binder_selftest_alloc(&proc->alloc);
4826
4827         trace_binder_ioctl(cmd, arg);
4828
4829         ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4830         if (ret)
4831                 goto err_unlocked;
4832
4833         thread = binder_get_thread(proc);
4834         if (thread == NULL) {
4835                 ret = -ENOMEM;
4836                 goto err;
4837         }
4838
4839         switch (cmd) {
4840         case BINDER_WRITE_READ:
4841                 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
4842                 if (ret)
4843                         goto err;
4844                 break;
4845         case BINDER_SET_MAX_THREADS: {
4846                 int max_threads;
4847
4848                 if (copy_from_user(&max_threads, ubuf,
4849                                    sizeof(max_threads))) {
4850                         ret = -EINVAL;
4851                         goto err;
4852                 }
4853                 binder_inner_proc_lock(proc);
4854                 proc->max_threads = max_threads;
4855                 binder_inner_proc_unlock(proc);
4856                 break;
4857         }
4858         case BINDER_SET_CONTEXT_MGR:
4859                 ret = binder_ioctl_set_ctx_mgr(filp);
4860                 if (ret)
4861                         goto err;
4862                 break;
4863         case BINDER_THREAD_EXIT:
4864                 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
4865                              proc->pid, thread->pid);
4866                 binder_thread_release(proc, thread);
4867                 thread = NULL;
4868                 break;
4869         case BINDER_VERSION: {
4870                 struct binder_version __user *ver = ubuf;
4871
4872                 if (size != sizeof(struct binder_version)) {
4873                         ret = -EINVAL;
4874                         goto err;
4875                 }
4876                 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
4877                              &ver->protocol_version)) {
4878                         ret = -EINVAL;
4879                         goto err;
4880                 }
4881                 break;
4882         }
4883         case BINDER_GET_NODE_INFO_FOR_REF: {
4884                 struct binder_node_info_for_ref info;
4885
4886                 if (copy_from_user(&info, ubuf, sizeof(info))) {
4887                         ret = -EFAULT;
4888                         goto err;
4889                 }
4890
4891                 ret = binder_ioctl_get_node_info_for_ref(proc, &info);
4892                 if (ret < 0)
4893                         goto err;
4894
4895                 if (copy_to_user(ubuf, &info, sizeof(info))) {
4896                         ret = -EFAULT;
4897                         goto err;
4898                 }
4899
4900                 break;
4901         }
4902         case BINDER_GET_NODE_DEBUG_INFO: {
4903                 struct binder_node_debug_info info;
4904
4905                 if (copy_from_user(&info, ubuf, sizeof(info))) {
4906                         ret = -EFAULT;
4907                         goto err;
4908                 }
4909
4910                 ret = binder_ioctl_get_node_debug_info(proc, &info);
4911                 if (ret < 0)
4912                         goto err;
4913
4914                 if (copy_to_user(ubuf, &info, sizeof(info))) {
4915                         ret = -EFAULT;
4916                         goto err;
4917                 }
4918                 break;
4919         }
4920         default:
4921                 ret = -EINVAL;
4922                 goto err;
4923         }
4924         ret = 0;
4925 err:
4926         if (thread)
4927                 thread->looper_need_return = false;
4928         wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4929         if (ret && ret != -ERESTARTSYS)
4930                 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
4931 err_unlocked:
4932         trace_binder_ioctl_done(ret);
4933         return ret;
4934 }
4935
4936 static void binder_vma_open(struct vm_area_struct *vma)
4937 {
4938         struct binder_proc *proc = vma->vm_private_data;
4939
4940         binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4941                      "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4942                      proc->pid, vma->vm_start, vma->vm_end,
4943                      (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4944                      (unsigned long)pgprot_val(vma->vm_page_prot));
4945 }
4946
4947 static void binder_vma_close(struct vm_area_struct *vma)
4948 {
4949         struct binder_proc *proc = vma->vm_private_data;
4950
4951         binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4952                      "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4953                      proc->pid, vma->vm_start, vma->vm_end,
4954                      (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4955                      (unsigned long)pgprot_val(vma->vm_page_prot));
4956         binder_alloc_vma_close(&proc->alloc);
4957 }
4958
4959 static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
4960 {
4961         return VM_FAULT_SIGBUS;
4962 }
4963
4964 static const struct vm_operations_struct binder_vm_ops = {
4965         .open = binder_vma_open,
4966         .close = binder_vma_close,
4967         .fault = binder_vm_fault,
4968 };
4969
4970 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
4971 {
4972         int ret;
4973         struct binder_proc *proc = filp->private_data;
4974         const char *failure_string;
4975
4976         if (proc->tsk != current->group_leader)
4977                 return -EINVAL;
4978
4979         if ((vma->vm_end - vma->vm_start) > SZ_4M)
4980                 vma->vm_end = vma->vm_start + SZ_4M;
4981
4982         binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4983                      "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
4984                      __func__, proc->pid, vma->vm_start, vma->vm_end,
4985                      (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4986                      (unsigned long)pgprot_val(vma->vm_page_prot));
4987
4988         if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
4989                 ret = -EPERM;
4990                 failure_string = "bad vm_flags";
4991                 goto err_bad_arg;
4992         }
4993         vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
4994         vma->vm_flags &= ~VM_MAYWRITE;
4995
4996         vma->vm_ops = &binder_vm_ops;
4997         vma->vm_private_data = proc;
4998
4999         ret = binder_alloc_mmap_handler(&proc->alloc, vma);
5000         if (ret)
5001                 return ret;
5002         return 0;
5003
5004 err_bad_arg:
5005         pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
5006                proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
5007         return ret;
5008 }
5009
5010 static int binder_open(struct inode *nodp, struct file *filp)
5011 {
5012         struct binder_proc *proc;
5013         struct binder_device *binder_dev;
5014
5015         binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
5016                      current->group_leader->pid, current->pid);
5017
5018         proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5019         if (proc == NULL)
5020                 return -ENOMEM;
5021         spin_lock_init(&proc->inner_lock);
5022         spin_lock_init(&proc->outer_lock);
5023         get_task_struct(current->group_leader);
5024         proc->tsk = current->group_leader;
5025         INIT_LIST_HEAD(&proc->todo);
5026         proc->default_priority = task_nice(current);
5027         binder_dev = container_of(filp->private_data, struct binder_device,
5028                                   miscdev);
5029         proc->context = &binder_dev->context;
5030         binder_alloc_init(&proc->alloc);
5031
5032         binder_stats_created(BINDER_STAT_PROC);
5033         proc->pid = current->group_leader->pid;
5034         INIT_LIST_HEAD(&proc->delivered_death);
5035         INIT_LIST_HEAD(&proc->waiting_threads);
5036         filp->private_data = proc;
5037
5038         mutex_lock(&binder_procs_lock);
5039         hlist_add_head(&proc->proc_node, &binder_procs);
5040         mutex_unlock(&binder_procs_lock);
5041
5042         if (binder_debugfs_dir_entry_proc) {
5043                 char strbuf[11];
5044
5045                 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5046                 /*
5047                  * proc debug entries are shared between contexts, so
5048                  * this will fail if the process tries to open the driver
5049                  * again with a different context. The priting code will
5050                  * anyway print all contexts that a given PID has, so this
5051                  * is not a problem.
5052                  */
5053                 proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
5054                         binder_debugfs_dir_entry_proc,
5055                         (void *)(unsigned long)proc->pid,
5056                         &proc_fops);
5057         }
5058
5059         return 0;
5060 }
5061
5062 static int binder_flush(struct file *filp, fl_owner_t id)
5063 {
5064         struct binder_proc *proc = filp->private_data;
5065
5066         binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5067
5068         return 0;
5069 }
5070
5071 static void binder_deferred_flush(struct binder_proc *proc)
5072 {
5073         struct rb_node *n;
5074         int wake_count = 0;
5075
5076         binder_inner_proc_lock(proc);
5077         for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5078                 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
5079
5080                 thread->looper_need_return = true;
5081                 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
5082                         wake_up_interruptible(&thread->wait);
5083                         wake_count++;
5084                 }
5085         }
5086         binder_inner_proc_unlock(proc);
5087
5088         binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5089                      "binder_flush: %d woke %d threads\n", proc->pid,
5090                      wake_count);
5091 }
5092
5093 static int binder_release(struct inode *nodp, struct file *filp)
5094 {
5095         struct binder_proc *proc = filp->private_data;
5096
5097         debugfs_remove(proc->debugfs_entry);
5098         binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5099
5100         return 0;
5101 }
5102
5103 static int binder_node_release(struct binder_node *node, int refs)
5104 {
5105         struct binder_ref *ref;
5106         int death = 0;
5107         struct binder_proc *proc = node->proc;
5108
5109         binder_release_work(proc, &node->async_todo);
5110
5111         binder_node_lock(node);
5112         binder_inner_proc_lock(proc);
5113         binder_dequeue_work_ilocked(&node->work);
5114         /*
5115          * The caller must have taken a temporary ref on the node,
5116          */
5117         BUG_ON(!node->tmp_refs);
5118         if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
5119                 binder_inner_proc_unlock(proc);
5120                 binder_node_unlock(node);
5121                 binder_free_node(node);
5122
5123                 return refs;
5124         }
5125
5126         node->proc = NULL;
5127         node->local_strong_refs = 0;
5128         node->local_weak_refs = 0;
5129         binder_inner_proc_unlock(proc);
5130
5131         spin_lock(&binder_dead_nodes_lock);
5132         hlist_add_head(&node->dead_node, &binder_dead_nodes);
5133         spin_unlock(&binder_dead_nodes_lock);
5134
5135         hlist_for_each_entry(ref, &node->refs, node_entry) {
5136                 refs++;
5137                 /*
5138                  * Need the node lock to synchronize
5139                  * with new notification requests and the
5140                  * inner lock to synchronize with queued
5141                  * death notifications.
5142                  */
5143                 binder_inner_proc_lock(ref->proc);
5144                 if (!ref->death) {
5145                         binder_inner_proc_unlock(ref->proc);
5146                         continue;
5147                 }
5148
5149                 death++;
5150
5151                 BUG_ON(!list_empty(&ref->death->work.entry));
5152                 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5153                 binder_enqueue_work_ilocked(&ref->death->work,
5154                                             &ref->proc->todo);
5155                 binder_wakeup_proc_ilocked(ref->proc);
5156                 binder_inner_proc_unlock(ref->proc);
5157         }
5158
5159         binder_debug(BINDER_DEBUG_DEAD_BINDER,
5160                      "node %d now dead, refs %d, death %d\n",
5161                      node->debug_id, refs, death);
5162         binder_node_unlock(node);
5163         binder_put_node(node);
5164
5165         return refs;
5166 }
5167
5168 static void binder_deferred_release(struct binder_proc *proc)
5169 {
5170         struct binder_context *context = proc->context;
5171         struct rb_node *n;
5172         int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
5173
5174         mutex_lock(&binder_procs_lock);
5175         hlist_del(&proc->proc_node);
5176         mutex_unlock(&binder_procs_lock);
5177
5178         mutex_lock(&context->context_mgr_node_lock);
5179         if (context->binder_context_mgr_node &&
5180             context->binder_context_mgr_node->proc == proc) {
5181                 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5182                              "%s: %d context_mgr_node gone\n",
5183                              __func__, proc->pid);
5184                 context->binder_context_mgr_node = NULL;
5185         }
5186         mutex_unlock(&context->context_mgr_node_lock);
5187         binder_inner_proc_lock(proc);
5188         /*
5189          * Make sure proc stays alive after we
5190          * remove all the threads
5191          */
5192         proc->tmp_ref++;
5193
5194         proc->is_dead = true;
5195         threads = 0;
5196         active_transactions = 0;
5197         while ((n = rb_first(&proc->threads))) {
5198                 struct binder_thread *thread;
5199
5200                 thread = rb_entry(n, struct binder_thread, rb_node);
5201                 binder_inner_proc_unlock(proc);
5202                 threads++;
5203                 active_transactions += binder_thread_release(proc, thread);
5204                 binder_inner_proc_lock(proc);
5205         }
5206
5207         nodes = 0;
5208         incoming_refs = 0;
5209         while ((n = rb_first(&proc->nodes))) {
5210                 struct binder_node *node;
5211
5212                 node = rb_entry(n, struct binder_node, rb_node);
5213                 nodes++;
5214                 /*
5215                  * take a temporary ref on the node before
5216                  * calling binder_node_release() which will either
5217                  * kfree() the node or call binder_put_node()
5218                  */
5219                 binder_inc_node_tmpref_ilocked(node);
5220                 rb_erase(&node->rb_node, &proc->nodes);
5221                 binder_inner_proc_unlock(proc);
5222                 incoming_refs = binder_node_release(node, incoming_refs);
5223                 binder_inner_proc_lock(proc);
5224         }
5225         binder_inner_proc_unlock(proc);
5226
5227         outgoing_refs = 0;
5228         binder_proc_lock(proc);
5229         while ((n = rb_first(&proc->refs_by_desc))) {
5230                 struct binder_ref *ref;
5231
5232                 ref = rb_entry(n, struct binder_ref, rb_node_desc);
5233                 outgoing_refs++;
5234                 binder_cleanup_ref_olocked(ref);
5235                 binder_proc_unlock(proc);
5236                 binder_free_ref(ref);
5237                 binder_proc_lock(proc);
5238         }
5239         binder_proc_unlock(proc);
5240
5241         binder_release_work(proc, &proc->todo);
5242         binder_release_work(proc, &proc->delivered_death);
5243
5244         binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5245                      "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5246                      __func__, proc->pid, threads, nodes, incoming_refs,
5247                      outgoing_refs, active_transactions);
5248
5249         binder_proc_dec_tmpref(proc);
5250 }
5251
5252 static void binder_deferred_func(struct work_struct *work)
5253 {
5254         struct binder_proc *proc;
5255
5256         int defer;
5257
5258         do {
5259                 mutex_lock(&binder_deferred_lock);
5260                 if (!hlist_empty(&binder_deferred_list)) {
5261                         proc = hlist_entry(binder_deferred_list.first,
5262                                         struct binder_proc, deferred_work_node);
5263                         hlist_del_init(&proc->deferred_work_node);
5264                         defer = proc->deferred_work;
5265                         proc->deferred_work = 0;
5266                 } else {
5267                         proc = NULL;
5268                         defer = 0;
5269                 }
5270                 mutex_unlock(&binder_deferred_lock);
5271
5272                 if (defer & BINDER_DEFERRED_FLUSH)
5273                         binder_deferred_flush(proc);
5274
5275                 if (defer & BINDER_DEFERRED_RELEASE)
5276                         binder_deferred_release(proc); /* frees proc */
5277         } while (proc);
5278 }
5279 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5280
5281 static void
5282 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5283 {
5284         mutex_lock(&binder_deferred_lock);
5285         proc->deferred_work |= defer;
5286         if (hlist_unhashed(&proc->deferred_work_node)) {
5287                 hlist_add_head(&proc->deferred_work_node,
5288                                 &binder_deferred_list);
5289                 schedule_work(&binder_deferred_work);
5290         }
5291         mutex_unlock(&binder_deferred_lock);
5292 }
5293
5294 static void print_binder_transaction_ilocked(struct seq_file *m,
5295                                              struct binder_proc *proc,
5296                                              const char *prefix,
5297                                              struct binder_transaction *t)
5298 {
5299         struct binder_proc *to_proc;
5300         struct binder_buffer *buffer = t->buffer;
5301
5302         spin_lock(&t->lock);
5303         to_proc = t->to_proc;
5304         seq_printf(m,
5305                    "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
5306                    prefix, t->debug_id, t,
5307                    t->from ? t->from->proc->pid : 0,
5308                    t->from ? t->from->pid : 0,
5309                    to_proc ? to_proc->pid : 0,
5310                    t->to_thread ? t->to_thread->pid : 0,
5311                    t->code, t->flags, t->priority, t->need_reply);
5312         spin_unlock(&t->lock);
5313
5314         if (proc != to_proc) {
5315                 /*
5316                  * Can only safely deref buffer if we are holding the
5317                  * correct proc inner lock for this node
5318                  */
5319                 seq_puts(m, "\n");
5320                 return;
5321         }
5322
5323         if (buffer == NULL) {
5324                 seq_puts(m, " buffer free\n");
5325                 return;
5326         }
5327         if (buffer->target_node)
5328                 seq_printf(m, " node %d", buffer->target_node->debug_id);
5329         seq_printf(m, " size %zd:%zd data %pK\n",
5330                    buffer->data_size, buffer->offsets_size,
5331                    buffer->data);
5332 }
5333
5334 static void print_binder_work_ilocked(struct seq_file *m,
5335                                      struct binder_proc *proc,
5336                                      const char *prefix,
5337                                      const char *transaction_prefix,
5338                                      struct binder_work *w)
5339 {
5340         struct binder_node *node;
5341         struct binder_transaction *t;
5342
5343         switch (w->type) {
5344         case BINDER_WORK_TRANSACTION:
5345                 t = container_of(w, struct binder_transaction, work);
5346                 print_binder_transaction_ilocked(
5347                                 m, proc, transaction_prefix, t);
5348                 break;
5349         case BINDER_WORK_RETURN_ERROR: {
5350                 struct binder_error *e = container_of(
5351                                 w, struct binder_error, work);
5352
5353                 seq_printf(m, "%stransaction error: %u\n",
5354                            prefix, e->cmd);
5355         } break;
5356         case BINDER_WORK_TRANSACTION_COMPLETE:
5357                 seq_printf(m, "%stransaction complete\n", prefix);
5358                 break;
5359         case BINDER_WORK_NODE:
5360                 node = container_of(w, struct binder_node, work);
5361                 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5362                            prefix, node->debug_id,
5363                            (u64)node->ptr, (u64)node->cookie);
5364                 break;
5365         case BINDER_WORK_DEAD_BINDER:
5366                 seq_printf(m, "%shas dead binder\n", prefix);
5367                 break;
5368         case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5369                 seq_printf(m, "%shas cleared dead binder\n", prefix);
5370                 break;
5371         case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5372                 seq_printf(m, "%shas cleared death notification\n", prefix);
5373                 break;
5374         default:
5375                 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
5376                 break;
5377         }
5378 }
5379
5380 static void print_binder_thread_ilocked(struct seq_file *m,
5381                                         struct binder_thread *thread,
5382                                         int print_always)
5383 {
5384         struct binder_transaction *t;
5385         struct binder_work *w;
5386         size_t start_pos = m->count;
5387         size_t header_pos;
5388
5389         seq_printf(m, "  thread %d: l %02x need_return %d tr %d\n",
5390                         thread->pid, thread->looper,
5391                         thread->looper_need_return,
5392                         atomic_read(&thread->tmp_ref));
5393         header_pos = m->count;
5394         t = thread->transaction_stack;
5395         while (t) {
5396                 if (t->from == thread) {
5397                         print_binder_transaction_ilocked(m, thread->proc,
5398                                         "    outgoing transaction", t);
5399                         t = t->from_parent;
5400                 } else if (t->to_thread == thread) {
5401                         print_binder_transaction_ilocked(m, thread->proc,
5402                                                  "    incoming transaction", t);
5403                         t = t->to_parent;
5404                 } else {
5405                         print_binder_transaction_ilocked(m, thread->proc,
5406                                         "    bad transaction", t);
5407                         t = NULL;
5408                 }
5409         }
5410         list_for_each_entry(w, &thread->todo, entry) {
5411                 print_binder_work_ilocked(m, thread->proc, "    ",
5412                                           "    pending transaction", w);
5413         }
5414         if (!print_always && m->count == header_pos)
5415                 m->count = start_pos;
5416 }
5417
5418 static void print_binder_node_nilocked(struct seq_file *m,
5419                                        struct binder_node *node)
5420 {
5421         struct binder_ref *ref;
5422         struct binder_work *w;
5423         int count;
5424
5425         count = 0;
5426         hlist_for_each_entry(ref, &node->refs, node_entry)
5427                 count++;
5428
5429         seq_printf(m, "  node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
5430                    node->debug_id, (u64)node->ptr, (u64)node->cookie,
5431                    node->has_strong_ref, node->has_weak_ref,
5432                    node->local_strong_refs, node->local_weak_refs,
5433                    node->internal_strong_refs, count, node->tmp_refs);
5434         if (count) {
5435                 seq_puts(m, " proc");
5436                 hlist_for_each_entry(ref, &node->refs, node_entry)
5437                         seq_printf(m, " %d", ref->proc->pid);
5438         }
5439         seq_puts(m, "\n");
5440         if (node->proc) {
5441                 list_for_each_entry(w, &node->async_todo, entry)
5442                         print_binder_work_ilocked(m, node->proc, "    ",
5443                                           "    pending async transaction", w);
5444         }
5445 }
5446
5447 static void print_binder_ref_olocked(struct seq_file *m,
5448                                      struct binder_ref *ref)
5449 {
5450         binder_node_lock(ref->node);
5451         seq_printf(m, "  ref %d: desc %d %snode %d s %d w %d d %pK\n",
5452                    ref->data.debug_id, ref->data.desc,
5453                    ref->node->proc ? "" : "dead ",
5454                    ref->node->debug_id, ref->data.strong,
5455                    ref->data.weak, ref->death);
5456         binder_node_unlock(ref->node);
5457 }
5458
5459 static void print_binder_proc(struct seq_file *m,
5460                               struct binder_proc *proc, int print_all)
5461 {
5462         struct binder_work *w;
5463         struct rb_node *n;
5464         size_t start_pos = m->count;
5465         size_t header_pos;
5466         struct binder_node *last_node = NULL;
5467
5468         seq_printf(m, "proc %d\n", proc->pid);
5469         seq_printf(m, "context %s\n", proc->context->name);
5470         header_pos = m->count;
5471
5472         binder_inner_proc_lock(proc);
5473         for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5474                 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
5475                                                 rb_node), print_all);
5476
5477         for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5478                 struct binder_node *node = rb_entry(n, struct binder_node,
5479                                                     rb_node);
5480                 if (!print_all && !node->has_async_transaction)
5481                         continue;
5482
5483                 /*
5484                  * take a temporary reference on the node so it
5485                  * survives and isn't removed from the tree
5486                  * while we print it.
5487                  */
5488                 binder_inc_node_tmpref_ilocked(node);
5489                 /* Need to drop inner lock to take node lock */
5490                 binder_inner_proc_unlock(proc);
5491                 if (last_node)
5492                         binder_put_node(last_node);
5493                 binder_node_inner_lock(node);
5494                 print_binder_node_nilocked(m, node);
5495                 binder_node_inner_unlock(node);
5496                 last_node = node;
5497                 binder_inner_proc_lock(proc);
5498         }
5499         binder_inner_proc_unlock(proc);
5500         if (last_node)
5501                 binder_put_node(last_node);
5502
5503         if (print_all) {
5504                 binder_proc_lock(proc);
5505                 for (n = rb_first(&proc->refs_by_desc);
5506                      n != NULL;
5507                      n = rb_next(n))
5508                         print_binder_ref_olocked(m, rb_entry(n,
5509                                                             struct binder_ref,
5510                                                             rb_node_desc));
5511                 binder_proc_unlock(proc);
5512         }
5513         binder_alloc_print_allocated(m, &proc->alloc);
5514         binder_inner_proc_lock(proc);
5515         list_for_each_entry(w, &proc->todo, entry)
5516                 print_binder_work_ilocked(m, proc, "  ",
5517                                           "  pending transaction", w);
5518         list_for_each_entry(w, &proc->delivered_death, entry) {
5519                 seq_puts(m, "  has delivered dead binder\n");
5520                 break;
5521         }
5522         binder_inner_proc_unlock(proc);
5523         if (!print_all && m->count == header_pos)
5524                 m->count = start_pos;
5525 }
5526
5527 static const char * const binder_return_strings[] = {
5528         "BR_ERROR",
5529         "BR_OK",
5530         "BR_TRANSACTION",
5531         "BR_REPLY",
5532         "BR_ACQUIRE_RESULT",
5533         "BR_DEAD_REPLY",
5534         "BR_TRANSACTION_COMPLETE",
5535         "BR_INCREFS",
5536         "BR_ACQUIRE",
5537         "BR_RELEASE",
5538         "BR_DECREFS",
5539         "BR_ATTEMPT_ACQUIRE",
5540         "BR_NOOP",
5541         "BR_SPAWN_LOOPER",
5542         "BR_FINISHED",
5543         "BR_DEAD_BINDER",
5544         "BR_CLEAR_DEATH_NOTIFICATION_DONE",
5545         "BR_FAILED_REPLY"
5546 };
5547
5548 static const char * const binder_command_strings[] = {
5549         "BC_TRANSACTION",
5550         "BC_REPLY",
5551         "BC_ACQUIRE_RESULT",
5552         "BC_FREE_BUFFER",
5553         "BC_INCREFS",
5554         "BC_ACQUIRE",
5555         "BC_RELEASE",
5556         "BC_DECREFS",
5557         "BC_INCREFS_DONE",
5558         "BC_ACQUIRE_DONE",
5559         "BC_ATTEMPT_ACQUIRE",
5560         "BC_REGISTER_LOOPER",
5561         "BC_ENTER_LOOPER",
5562         "BC_EXIT_LOOPER",
5563         "BC_REQUEST_DEATH_NOTIFICATION",
5564         "BC_CLEAR_DEATH_NOTIFICATION",
5565         "BC_DEAD_BINDER_DONE",
5566         "BC_TRANSACTION_SG",
5567         "BC_REPLY_SG",
5568 };
5569
5570 static const char * const binder_objstat_strings[] = {
5571         "proc",
5572         "thread",
5573         "node",
5574         "ref",
5575         "death",
5576         "transaction",
5577         "transaction_complete"
5578 };
5579
5580 static void print_binder_stats(struct seq_file *m, const char *prefix,
5581                                struct binder_stats *stats)
5582 {
5583         int i;
5584
5585         BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
5586                      ARRAY_SIZE(binder_command_strings));
5587         for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
5588                 int temp = atomic_read(&stats->bc[i]);
5589
5590                 if (temp)
5591                         seq_printf(m, "%s%s: %d\n", prefix,
5592                                    binder_command_strings[i], temp);
5593         }
5594
5595         BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
5596                      ARRAY_SIZE(binder_return_strings));
5597         for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
5598                 int temp = atomic_read(&stats->br[i]);
5599
5600                 if (temp)
5601                         seq_printf(m, "%s%s: %d\n", prefix,
5602                                    binder_return_strings[i], temp);
5603         }
5604
5605         BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5606                      ARRAY_SIZE(binder_objstat_strings));
5607         BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5608                      ARRAY_SIZE(stats->obj_deleted));
5609         for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
5610                 int created = atomic_read(&stats->obj_created[i]);
5611                 int deleted = atomic_read(&stats->obj_deleted[i]);
5612
5613                 if (created || deleted)
5614                         seq_printf(m, "%s%s: active %d total %d\n",
5615                                 prefix,
5616                                 binder_objstat_strings[i],
5617                                 created - deleted,
5618                                 created);
5619         }
5620 }
5621
5622 static void print_binder_proc_stats(struct seq_file *m,
5623                                     struct binder_proc *proc)
5624 {
5625         struct binder_work *w;
5626         struct binder_thread *thread;
5627         struct rb_node *n;
5628         int count, strong, weak, ready_threads;
5629         size_t free_async_space =
5630                 binder_alloc_get_free_async_space(&proc->alloc);
5631
5632         seq_printf(m, "proc %d\n", proc->pid);
5633         seq_printf(m, "context %s\n", proc->context->name);
5634         count = 0;
5635         ready_threads = 0;
5636         binder_inner_proc_lock(proc);
5637         for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5638                 count++;
5639
5640         list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
5641                 ready_threads++;
5642
5643         seq_printf(m, "  threads: %d\n", count);
5644         seq_printf(m, "  requested threads: %d+%d/%d\n"
5645                         "  ready threads %d\n"
5646                         "  free async space %zd\n", proc->requested_threads,
5647                         proc->requested_threads_started, proc->max_threads,
5648                         ready_threads,
5649                         free_async_space);
5650         count = 0;
5651         for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
5652                 count++;
5653         binder_inner_proc_unlock(proc);
5654         seq_printf(m, "  nodes: %d\n", count);
5655         count = 0;
5656         strong = 0;
5657         weak = 0;
5658         binder_proc_lock(proc);
5659         for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
5660                 struct binder_ref *ref = rb_entry(n, struct binder_ref,
5661                                                   rb_node_desc);
5662                 count++;
5663                 strong += ref->data.strong;
5664                 weak += ref->data.weak;
5665         }
5666         binder_proc_unlock(proc);
5667         seq_printf(m, "  refs: %d s %d w %d\n", count, strong, weak);
5668
5669         count = binder_alloc_get_allocated_count(&proc->alloc);
5670         seq_printf(m, "  buffers: %d\n", count);
5671
5672         binder_alloc_print_pages(m, &proc->alloc);
5673
5674         count = 0;
5675         binder_inner_proc_lock(proc);
5676         list_for_each_entry(w, &proc->todo, entry) {
5677                 if (w->type == BINDER_WORK_TRANSACTION)
5678                         count++;
5679         }
5680         binder_inner_proc_unlock(proc);
5681         seq_printf(m, "  pending transactions: %d\n", count);
5682
5683         print_binder_stats(m, "  ", &proc->stats);
5684 }
5685
5686
5687 static int state_show(struct seq_file *m, void *unused)
5688 {
5689         struct binder_proc *proc;
5690         struct binder_node *node;
5691         struct binder_node *last_node = NULL;
5692
5693         seq_puts(m, "binder state:\n");
5694
5695         spin_lock(&binder_dead_nodes_lock);
5696         if (!hlist_empty(&binder_dead_nodes))
5697                 seq_puts(m, "dead nodes:\n");
5698         hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
5699                 /*
5700                  * take a temporary reference on the node so it
5701                  * survives and isn't removed from the list
5702                  * while we print it.
5703                  */
5704                 node->tmp_refs++;
5705                 spin_unlock(&binder_dead_nodes_lock);
5706                 if (last_node)
5707                         binder_put_node(last_node);
5708                 binder_node_lock(node);
5709                 print_binder_node_nilocked(m, node);
5710                 binder_node_unlock(node);
5711                 last_node = node;
5712                 spin_lock(&binder_dead_nodes_lock);
5713         }
5714         spin_unlock(&binder_dead_nodes_lock);
5715         if (last_node)
5716                 binder_put_node(last_node);
5717
5718         mutex_lock(&binder_procs_lock);
5719         hlist_for_each_entry(proc, &binder_procs, proc_node)
5720                 print_binder_proc(m, proc, 1);
5721         mutex_unlock(&binder_procs_lock);
5722
5723         return 0;
5724 }
5725
5726 static int stats_show(struct seq_file *m, void *unused)
5727 {
5728         struct binder_proc *proc;
5729
5730         seq_puts(m, "binder stats:\n");
5731
5732         print_binder_stats(m, "", &binder_stats);
5733
5734         mutex_lock(&binder_procs_lock);
5735         hlist_for_each_entry(proc, &binder_procs, proc_node)
5736                 print_binder_proc_stats(m, proc);
5737         mutex_unlock(&binder_procs_lock);
5738
5739         return 0;
5740 }
5741
5742 static int transactions_show(struct seq_file *m, void *unused)
5743 {
5744         struct binder_proc *proc;
5745
5746         seq_puts(m, "binder transactions:\n");
5747         mutex_lock(&binder_procs_lock);
5748         hlist_for_each_entry(proc, &binder_procs, proc_node)
5749                 print_binder_proc(m, proc, 0);
5750         mutex_unlock(&binder_procs_lock);
5751
5752         return 0;
5753 }
5754
5755 static int proc_show(struct seq_file *m, void *unused)
5756 {
5757         struct binder_proc *itr;
5758         int pid = (unsigned long)m->private;
5759
5760         mutex_lock(&binder_procs_lock);
5761         hlist_for_each_entry(itr, &binder_procs, proc_node) {
5762                 if (itr->pid == pid) {
5763                         seq_puts(m, "binder proc state:\n");
5764                         print_binder_proc(m, itr, 1);
5765                 }
5766         }
5767         mutex_unlock(&binder_procs_lock);
5768
5769         return 0;
5770 }
5771
5772 static void print_binder_transaction_log_entry(struct seq_file *m,
5773                                         struct binder_transaction_log_entry *e)
5774 {
5775         int debug_id = READ_ONCE(e->debug_id_done);
5776         /*
5777          * read barrier to guarantee debug_id_done read before
5778          * we print the log values
5779          */
5780         smp_rmb();
5781         seq_printf(m,
5782                    "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
5783                    e->debug_id, (e->call_type == 2) ? "reply" :
5784                    ((e->call_type == 1) ? "async" : "call "), e->from_proc,
5785                    e->from_thread, e->to_proc, e->to_thread, e->context_name,
5786                    e->to_node, e->target_handle, e->data_size, e->offsets_size,
5787                    e->return_error, e->return_error_param,
5788                    e->return_error_line);
5789         /*
5790          * read-barrier to guarantee read of debug_id_done after
5791          * done printing the fields of the entry
5792          */
5793         smp_rmb();
5794         seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
5795                         "\n" : " (incomplete)\n");
5796 }
5797
5798 static int transaction_log_show(struct seq_file *m, void *unused)
5799 {
5800         struct binder_transaction_log *log = m->private;
5801         unsigned int log_cur = atomic_read(&log->cur);
5802         unsigned int count;
5803         unsigned int cur;
5804         int i;
5805
5806         count = log_cur + 1;
5807         cur = count < ARRAY_SIZE(log->entry) && !log->full ?
5808                 0 : count % ARRAY_SIZE(log->entry);
5809         if (count > ARRAY_SIZE(log->entry) || log->full)
5810                 count = ARRAY_SIZE(log->entry);
5811         for (i = 0; i < count; i++) {
5812                 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
5813
5814                 print_binder_transaction_log_entry(m, &log->entry[index]);
5815         }
5816         return 0;
5817 }
5818
5819 static const struct file_operations binder_fops = {
5820         .owner = THIS_MODULE,
5821         .poll = binder_poll,
5822         .unlocked_ioctl = binder_ioctl,
5823         .compat_ioctl = binder_ioctl,
5824         .mmap = binder_mmap,
5825         .open = binder_open,
5826         .flush = binder_flush,
5827         .release = binder_release,
5828 };
5829
5830 DEFINE_SHOW_ATTRIBUTE(state);
5831 DEFINE_SHOW_ATTRIBUTE(stats);
5832 DEFINE_SHOW_ATTRIBUTE(transactions);
5833 DEFINE_SHOW_ATTRIBUTE(transaction_log);
5834
5835 static int __init init_binder_device(const char *name)
5836 {
5837         int ret;
5838         struct binder_device *binder_device;
5839
5840         binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
5841         if (!binder_device)
5842                 return -ENOMEM;
5843
5844         binder_device->miscdev.fops = &binder_fops;
5845         binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
5846         binder_device->miscdev.name = name;
5847
5848         binder_device->context.binder_context_mgr_uid = INVALID_UID;
5849         binder_device->context.name = name;
5850         mutex_init(&binder_device->context.context_mgr_node_lock);
5851
5852         ret = misc_register(&binder_device->miscdev);
5853         if (ret < 0) {
5854                 kfree(binder_device);
5855                 return ret;
5856         }
5857
5858         hlist_add_head(&binder_device->hlist, &binder_devices);
5859
5860         return ret;
5861 }
5862
5863 static int __init binder_init(void)
5864 {
5865         int ret;
5866         char *device_name, *device_names, *device_tmp;
5867         struct binder_device *device;
5868         struct hlist_node *tmp;
5869
5870         ret = binder_alloc_shrinker_init();
5871         if (ret)
5872                 return ret;
5873
5874         atomic_set(&binder_transaction_log.cur, ~0U);
5875         atomic_set(&binder_transaction_log_failed.cur, ~0U);
5876
5877         binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
5878         if (binder_debugfs_dir_entry_root)
5879                 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
5880                                                  binder_debugfs_dir_entry_root);
5881
5882         if (binder_debugfs_dir_entry_root) {
5883                 debugfs_create_file("state",
5884                                     0444,
5885                                     binder_debugfs_dir_entry_root,
5886                                     NULL,
5887                                     &state_fops);
5888                 debugfs_create_file("stats",
5889                                     0444,
5890                                     binder_debugfs_dir_entry_root,
5891                                     NULL,
5892                                     &stats_fops);
5893                 debugfs_create_file("transactions",
5894                                     0444,
5895                                     binder_debugfs_dir_entry_root,
5896                                     NULL,
5897                                     &transactions_fops);
5898                 debugfs_create_file("transaction_log",
5899                                     0444,
5900                                     binder_debugfs_dir_entry_root,
5901                                     &binder_transaction_log,
5902                                     &transaction_log_fops);
5903                 debugfs_create_file("failed_transaction_log",
5904                                     0444,
5905                                     binder_debugfs_dir_entry_root,
5906                                     &binder_transaction_log_failed,
5907                                     &transaction_log_fops);
5908         }
5909
5910         /*
5911          * Copy the module_parameter string, because we don't want to
5912          * tokenize it in-place.
5913          */
5914         device_names = kstrdup(binder_devices_param, GFP_KERNEL);
5915         if (!device_names) {
5916                 ret = -ENOMEM;
5917                 goto err_alloc_device_names_failed;
5918         }
5919
5920         device_tmp = device_names;
5921         while ((device_name = strsep(&device_tmp, ","))) {
5922                 ret = init_binder_device(device_name);
5923                 if (ret)
5924                         goto err_init_binder_device_failed;
5925         }
5926
5927         return ret;
5928
5929 err_init_binder_device_failed:
5930         hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
5931                 misc_deregister(&device->miscdev);
5932                 hlist_del(&device->hlist);
5933                 kfree(device);
5934         }
5935
5936         kfree(device_names);
5937
5938 err_alloc_device_names_failed:
5939         debugfs_remove_recursive(binder_debugfs_dir_entry_root);
5940
5941         return ret;
5942 }
5943
5944 device_initcall(binder_init);
5945
5946 #define CREATE_TRACE_POINTS
5947 #include "binder_trace.h"
5948
5949 MODULE_LICENSE("GPL v2");
This page took 0.3799 seconds and 4 git commands to generate.