Merge branch 'stable-3.2' into pandora-3.2
[pandora-kernel.git] / drivers / staging / android / binder.c
1 /* binder.c
2  *
3  * Android IPC Subsystem
4  *
5  * Copyright (C) 2007-2008 Google, Inc.
6  *
7  * This software is licensed under the terms of the GNU General Public
8  * License version 2, as published by the Free Software Foundation, and
9  * may be copied, distributed, and modified under those terms.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  */
17
18 #include <asm/cacheflush.h>
19 #include <linux/fdtable.h>
20 #include <linux/file.h>
21 #include <linux/fs.h>
22 #include <linux/list.h>
23 #include <linux/miscdevice.h>
24 #include <linux/mm.h>
25 #include <linux/module.h>
26 #include <linux/mutex.h>
27 #include <linux/nsproxy.h>
28 #include <linux/poll.h>
29 #include <linux/debugfs.h>
30 #include <linux/rbtree.h>
31 #include <linux/sched.h>
32 #include <linux/seq_file.h>
33 #include <linux/uaccess.h>
34 #include <linux/vmalloc.h>
35 #include <linux/slab.h>
36
37 #include "binder.h"
38
39 static DEFINE_MUTEX(binder_lock);
40 static DEFINE_MUTEX(binder_deferred_lock);
41 static DEFINE_MUTEX(binder_mmap_lock);
42
43 static HLIST_HEAD(binder_procs);
44 static HLIST_HEAD(binder_deferred_list);
45 static HLIST_HEAD(binder_dead_nodes);
46
47 static struct dentry *binder_debugfs_dir_entry_root;
48 static struct dentry *binder_debugfs_dir_entry_proc;
49 static struct dentry *binder_debugfs_state;
50 static struct dentry *binder_debugfs_stats;
51 static struct dentry *binder_debugfs_transactions;
52 static struct dentry *binder_debugfs_transaction_log;
53 static struct dentry *binder_debugfs_failed_transaction_log;
54 static struct binder_node *binder_context_mgr_node;
55 static uid_t binder_context_mgr_uid = -1;
56 static int binder_last_id;
57 static struct workqueue_struct *binder_deferred_workqueue;
58
59 #define BINDER_DEBUG_ENTRY(name) \
60 static int binder_##name##_open(struct inode *inode, struct file *file) \
61 { \
62         return single_open(file, binder_##name##_show, inode->i_private); \
63 } \
64 \
65 static const struct file_operations binder_##name##_fops = { \
66         .owner = THIS_MODULE, \
67         .open = binder_##name##_open, \
68         .read = seq_read, \
69         .llseek = seq_lseek, \
70         .release = single_release, \
71 }
72
73 static int binder_proc_show(struct seq_file *m, void *unused);
74 BINDER_DEBUG_ENTRY(proc);
75
76 /* This is only defined in include/asm-arm/sizes.h */
77 #ifndef SZ_1K
78 #define SZ_1K                               0x400
79 #endif
80
81 #ifndef SZ_4M
82 #define SZ_4M                               0x400000
83 #endif
84
85 #define FORBIDDEN_MMAP_FLAGS                (VM_WRITE)
86
87 #define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
88
89 enum {
90         BINDER_DEBUG_USER_ERROR             = 1U << 0,
91         BINDER_DEBUG_FAILED_TRANSACTION     = 1U << 1,
92         BINDER_DEBUG_DEAD_TRANSACTION       = 1U << 2,
93         BINDER_DEBUG_OPEN_CLOSE             = 1U << 3,
94         BINDER_DEBUG_DEAD_BINDER            = 1U << 4,
95         BINDER_DEBUG_DEATH_NOTIFICATION     = 1U << 5,
96         BINDER_DEBUG_READ_WRITE             = 1U << 6,
97         BINDER_DEBUG_USER_REFS              = 1U << 7,
98         BINDER_DEBUG_THREADS                = 1U << 8,
99         BINDER_DEBUG_TRANSACTION            = 1U << 9,
100         BINDER_DEBUG_TRANSACTION_COMPLETE   = 1U << 10,
101         BINDER_DEBUG_FREE_BUFFER            = 1U << 11,
102         BINDER_DEBUG_INTERNAL_REFS          = 1U << 12,
103         BINDER_DEBUG_BUFFER_ALLOC           = 1U << 13,
104         BINDER_DEBUG_PRIORITY_CAP           = 1U << 14,
105         BINDER_DEBUG_BUFFER_ALLOC_ASYNC     = 1U << 15,
106 };
107 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
108         BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
109 module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
110
111 static bool binder_debug_no_lock;
112 module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO);
113
114 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
115 static int binder_stop_on_user_error;
116
117 static int binder_set_stop_on_user_error(const char *val,
118                                          struct kernel_param *kp)
119 {
120         int ret;
121         ret = param_set_int(val, kp);
122         if (binder_stop_on_user_error < 2)
123                 wake_up(&binder_user_error_wait);
124         return ret;
125 }
126 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
127         param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
128
129 #define binder_debug(mask, x...) \
130         do { \
131                 if (binder_debug_mask & mask) \
132                         printk(KERN_INFO x); \
133         } while (0)
134
135 #define binder_user_error(x...) \
136         do { \
137                 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
138                         printk(KERN_INFO x); \
139                 if (binder_stop_on_user_error) \
140                         binder_stop_on_user_error = 2; \
141         } while (0)
142
143 enum binder_stat_types {
144         BINDER_STAT_PROC,
145         BINDER_STAT_THREAD,
146         BINDER_STAT_NODE,
147         BINDER_STAT_REF,
148         BINDER_STAT_DEATH,
149         BINDER_STAT_TRANSACTION,
150         BINDER_STAT_TRANSACTION_COMPLETE,
151         BINDER_STAT_COUNT
152 };
153
154 struct binder_stats {
155         int br[_IOC_NR(BR_FAILED_REPLY) + 1];
156         int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1];
157         int obj_created[BINDER_STAT_COUNT];
158         int obj_deleted[BINDER_STAT_COUNT];
159 };
160
161 static struct binder_stats binder_stats;
162
163 static inline void binder_stats_deleted(enum binder_stat_types type)
164 {
165         binder_stats.obj_deleted[type]++;
166 }
167
168 static inline void binder_stats_created(enum binder_stat_types type)
169 {
170         binder_stats.obj_created[type]++;
171 }
172
173 struct binder_transaction_log_entry {
174         int debug_id;
175         int call_type;
176         int from_proc;
177         int from_thread;
178         int target_handle;
179         int to_proc;
180         int to_thread;
181         int to_node;
182         int data_size;
183         int offsets_size;
184 };
185 struct binder_transaction_log {
186         int next;
187         int full;
188         struct binder_transaction_log_entry entry[32];
189 };
190 static struct binder_transaction_log binder_transaction_log;
191 static struct binder_transaction_log binder_transaction_log_failed;
192
193 static struct binder_transaction_log_entry *binder_transaction_log_add(
194         struct binder_transaction_log *log)
195 {
196         struct binder_transaction_log_entry *e;
197         e = &log->entry[log->next];
198         memset(e, 0, sizeof(*e));
199         log->next++;
200         if (log->next == ARRAY_SIZE(log->entry)) {
201                 log->next = 0;
202                 log->full = 1;
203         }
204         return e;
205 }
206
207 struct binder_work {
208         struct list_head entry;
209         enum {
210                 BINDER_WORK_TRANSACTION = 1,
211                 BINDER_WORK_TRANSACTION_COMPLETE,
212                 BINDER_WORK_NODE,
213                 BINDER_WORK_DEAD_BINDER,
214                 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
215                 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
216         } type;
217 };
218
219 struct binder_node {
220         int debug_id;
221         struct binder_work work;
222         union {
223                 struct rb_node rb_node;
224                 struct hlist_node dead_node;
225         };
226         struct binder_proc *proc;
227         struct hlist_head refs;
228         int internal_strong_refs;
229         int local_weak_refs;
230         int local_strong_refs;
231         void __user *ptr;
232         void __user *cookie;
233         unsigned has_strong_ref:1;
234         unsigned pending_strong_ref:1;
235         unsigned has_weak_ref:1;
236         unsigned pending_weak_ref:1;
237         unsigned has_async_transaction:1;
238         unsigned accept_fds:1;
239         unsigned min_priority:8;
240         struct list_head async_todo;
241 };
242
243 struct binder_ref_death {
244         struct binder_work work;
245         void __user *cookie;
246 };
247
248 struct binder_ref {
249         /* Lookups needed: */
250         /*   node + proc => ref (transaction) */
251         /*   desc + proc => ref (transaction, inc/dec ref) */
252         /*   node => refs + procs (proc exit) */
253         int debug_id;
254         struct rb_node rb_node_desc;
255         struct rb_node rb_node_node;
256         struct hlist_node node_entry;
257         struct binder_proc *proc;
258         struct binder_node *node;
259         uint32_t desc;
260         int strong;
261         int weak;
262         struct binder_ref_death *death;
263 };
264
265 struct binder_buffer {
266         struct list_head entry; /* free and allocated entries by address */
267         struct rb_node rb_node; /* free entry by size or allocated entry */
268                                 /* by address */
269         unsigned free:1;
270         unsigned allow_user_free:1;
271         unsigned async_transaction:1;
272         unsigned debug_id:29;
273
274         struct binder_transaction *transaction;
275
276         struct binder_node *target_node;
277         size_t data_size;
278         size_t offsets_size;
279         uint8_t data[0];
280 };
281
282 enum binder_deferred_state {
283         BINDER_DEFERRED_PUT_FILES    = 0x01,
284         BINDER_DEFERRED_FLUSH        = 0x02,
285         BINDER_DEFERRED_RELEASE      = 0x04,
286 };
287
288 struct binder_proc {
289         struct hlist_node proc_node;
290         struct rb_root threads;
291         struct rb_root nodes;
292         struct rb_root refs_by_desc;
293         struct rb_root refs_by_node;
294         int pid;
295         struct vm_area_struct *vma;
296         struct mm_struct *vma_vm_mm;
297         struct task_struct *tsk;
298         struct files_struct *files;
299         struct hlist_node deferred_work_node;
300         int deferred_work;
301         void *buffer;
302         ptrdiff_t user_buffer_offset;
303
304         struct list_head buffers;
305         struct rb_root free_buffers;
306         struct rb_root allocated_buffers;
307         size_t free_async_space;
308
309         struct page **pages;
310         size_t buffer_size;
311         uint32_t buffer_free;
312         struct list_head todo;
313         wait_queue_head_t wait;
314         struct binder_stats stats;
315         struct list_head delivered_death;
316         int max_threads;
317         int requested_threads;
318         int requested_threads_started;
319         int ready_threads;
320         long default_priority;
321         struct dentry *debugfs_entry;
322 };
323
324 enum {
325         BINDER_LOOPER_STATE_REGISTERED  = 0x01,
326         BINDER_LOOPER_STATE_ENTERED     = 0x02,
327         BINDER_LOOPER_STATE_EXITED      = 0x04,
328         BINDER_LOOPER_STATE_INVALID     = 0x08,
329         BINDER_LOOPER_STATE_WAITING     = 0x10,
330         BINDER_LOOPER_STATE_NEED_RETURN = 0x20
331 };
332
333 struct binder_thread {
334         struct binder_proc *proc;
335         struct rb_node rb_node;
336         int pid;
337         int looper;
338         struct binder_transaction *transaction_stack;
339         struct list_head todo;
340         uint32_t return_error; /* Write failed, return error code in read buf */
341         uint32_t return_error2; /* Write failed, return error code in read */
342                 /* buffer. Used when sending a reply to a dead process that */
343                 /* we are also waiting on */
344         wait_queue_head_t wait;
345         struct binder_stats stats;
346 };
347
348 struct binder_transaction {
349         int debug_id;
350         struct binder_work work;
351         struct binder_thread *from;
352         struct binder_transaction *from_parent;
353         struct binder_proc *to_proc;
354         struct binder_thread *to_thread;
355         struct binder_transaction *to_parent;
356         unsigned need_reply:1;
357         /* unsigned is_dead:1; */       /* not used at the moment */
358
359         struct binder_buffer *buffer;
360         unsigned int    code;
361         unsigned int    flags;
362         long    priority;
363         long    saved_priority;
364         uid_t   sender_euid;
365 };
366
367 static void
368 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
369
370 /*
371  * copied from get_unused_fd_flags
372  */
373 int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
374 {
375         struct files_struct *files = proc->files;
376         int fd, error;
377         struct fdtable *fdt;
378         unsigned long rlim_cur;
379         unsigned long irqs;
380
381         if (files == NULL)
382                 return -ESRCH;
383
384         error = -EMFILE;
385         spin_lock(&files->file_lock);
386
387 repeat:
388         fdt = files_fdtable(files);
389         fd = find_next_zero_bit(fdt->open_fds, fdt->max_fds, files->next_fd);
390
391         /*
392          * N.B. For clone tasks sharing a files structure, this test
393          * will limit the total number of files that can be opened.
394          */
395         rlim_cur = 0;
396         if (lock_task_sighand(proc->tsk, &irqs)) {
397                 rlim_cur = proc->tsk->signal->rlim[RLIMIT_NOFILE].rlim_cur;
398                 unlock_task_sighand(proc->tsk, &irqs);
399         }
400         if (fd >= rlim_cur)
401                 goto out;
402
403         /* Do we need to expand the fd array or fd set?  */
404         error = expand_files(files, fd);
405         if (error < 0)
406                 goto out;
407
408         if (error) {
409                 /*
410                  * If we needed to expand the fs array we
411                  * might have blocked - try again.
412                  */
413                 error = -EMFILE;
414                 goto repeat;
415         }
416
417         __set_open_fd(fd, fdt);
418         if (flags & O_CLOEXEC)
419                 __set_close_on_exec(fd, fdt);
420         else
421                 __clear_close_on_exec(fd, fdt);
422         files->next_fd = fd + 1;
423 #if 1
424         /* Sanity check */
425         if (fdt->fd[fd] != NULL) {
426                 printk(KERN_WARNING "get_unused_fd: slot %d not NULL!\n", fd);
427                 fdt->fd[fd] = NULL;
428         }
429 #endif
430         error = fd;
431
432 out:
433         spin_unlock(&files->file_lock);
434         return error;
435 }
436
437 /*
438  * copied from fd_install
439  */
440 static void task_fd_install(
441         struct binder_proc *proc, unsigned int fd, struct file *file)
442 {
443         struct files_struct *files = proc->files;
444         struct fdtable *fdt;
445
446         if (files == NULL)
447                 return;
448
449         spin_lock(&files->file_lock);
450         fdt = files_fdtable(files);
451         BUG_ON(fdt->fd[fd] != NULL);
452         rcu_assign_pointer(fdt->fd[fd], file);
453         spin_unlock(&files->file_lock);
454 }
455
456 /*
457  * copied from __put_unused_fd in open.c
458  */
459 static void __put_unused_fd(struct files_struct *files, unsigned int fd)
460 {
461         struct fdtable *fdt = files_fdtable(files);
462         __clear_open_fd(fd, fdt);
463         if (fd < files->next_fd)
464                 files->next_fd = fd;
465 }
466
467 /*
468  * copied from sys_close
469  */
470 static long task_close_fd(struct binder_proc *proc, unsigned int fd)
471 {
472         struct file *filp;
473         struct files_struct *files = proc->files;
474         struct fdtable *fdt;
475         int retval;
476
477         if (files == NULL)
478                 return -ESRCH;
479
480         spin_lock(&files->file_lock);
481         fdt = files_fdtable(files);
482         if (fd >= fdt->max_fds)
483                 goto out_unlock;
484         filp = fdt->fd[fd];
485         if (!filp)
486                 goto out_unlock;
487         rcu_assign_pointer(fdt->fd[fd], NULL);
488         __clear_close_on_exec(fd, fdt);
489         __put_unused_fd(files, fd);
490         spin_unlock(&files->file_lock);
491         retval = filp_close(filp, files);
492
493         /* can't restart close syscall because file table entry was cleared */
494         if (unlikely(retval == -ERESTARTSYS ||
495                      retval == -ERESTARTNOINTR ||
496                      retval == -ERESTARTNOHAND ||
497                      retval == -ERESTART_RESTARTBLOCK))
498                 retval = -EINTR;
499
500         return retval;
501
502 out_unlock:
503         spin_unlock(&files->file_lock);
504         return -EBADF;
505 }
506
507 static void binder_set_nice(long nice)
508 {
509         long min_nice;
510         if (can_nice(current, nice)) {
511                 set_user_nice(current, nice);
512                 return;
513         }
514         min_nice = 20 - current->signal->rlim[RLIMIT_NICE].rlim_cur;
515         binder_debug(BINDER_DEBUG_PRIORITY_CAP,
516                      "binder: %d: nice value %ld not allowed use "
517                      "%ld instead\n", current->pid, nice, min_nice);
518         set_user_nice(current, min_nice);
519         if (min_nice < 20)
520                 return;
521         binder_user_error("binder: %d RLIMIT_NICE not set\n", current->pid);
522 }
523
524 static size_t binder_buffer_size(struct binder_proc *proc,
525                                  struct binder_buffer *buffer)
526 {
527         if (list_is_last(&buffer->entry, &proc->buffers))
528                 return proc->buffer + proc->buffer_size - (void *)buffer->data;
529         else
530                 return (size_t)list_entry(buffer->entry.next,
531                         struct binder_buffer, entry) - (size_t)buffer->data;
532 }
533
534 static void binder_insert_free_buffer(struct binder_proc *proc,
535                                       struct binder_buffer *new_buffer)
536 {
537         struct rb_node **p = &proc->free_buffers.rb_node;
538         struct rb_node *parent = NULL;
539         struct binder_buffer *buffer;
540         size_t buffer_size;
541         size_t new_buffer_size;
542
543         BUG_ON(!new_buffer->free);
544
545         new_buffer_size = binder_buffer_size(proc, new_buffer);
546
547         binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
548                      "binder: %d: add free buffer, size %zd, "
549                      "at %p\n", proc->pid, new_buffer_size, new_buffer);
550
551         while (*p) {
552                 parent = *p;
553                 buffer = rb_entry(parent, struct binder_buffer, rb_node);
554                 BUG_ON(!buffer->free);
555
556                 buffer_size = binder_buffer_size(proc, buffer);
557
558                 if (new_buffer_size < buffer_size)
559                         p = &parent->rb_left;
560                 else
561                         p = &parent->rb_right;
562         }
563         rb_link_node(&new_buffer->rb_node, parent, p);
564         rb_insert_color(&new_buffer->rb_node, &proc->free_buffers);
565 }
566
567 static void binder_insert_allocated_buffer(struct binder_proc *proc,
568                                            struct binder_buffer *new_buffer)
569 {
570         struct rb_node **p = &proc->allocated_buffers.rb_node;
571         struct rb_node *parent = NULL;
572         struct binder_buffer *buffer;
573
574         BUG_ON(new_buffer->free);
575
576         while (*p) {
577                 parent = *p;
578                 buffer = rb_entry(parent, struct binder_buffer, rb_node);
579                 BUG_ON(buffer->free);
580
581                 if (new_buffer < buffer)
582                         p = &parent->rb_left;
583                 else if (new_buffer > buffer)
584                         p = &parent->rb_right;
585                 else
586                         BUG();
587         }
588         rb_link_node(&new_buffer->rb_node, parent, p);
589         rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers);
590 }
591
592 static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc,
593                                                   void __user *user_ptr)
594 {
595         struct rb_node *n = proc->allocated_buffers.rb_node;
596         struct binder_buffer *buffer;
597         struct binder_buffer *kern_ptr;
598
599         kern_ptr = user_ptr - proc->user_buffer_offset
600                 - offsetof(struct binder_buffer, data);
601
602         while (n) {
603                 buffer = rb_entry(n, struct binder_buffer, rb_node);
604                 BUG_ON(buffer->free);
605
606                 if (kern_ptr < buffer)
607                         n = n->rb_left;
608                 else if (kern_ptr > buffer)
609                         n = n->rb_right;
610                 else
611                         return buffer;
612         }
613         return NULL;
614 }
615
616 static int binder_update_page_range(struct binder_proc *proc, int allocate,
617                                     void *start, void *end,
618                                     struct vm_area_struct *vma)
619 {
620         void *page_addr;
621         unsigned long user_page_addr;
622         struct vm_struct tmp_area;
623         struct page **page;
624         struct mm_struct *mm;
625
626         binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
627                      "binder: %d: %s pages %p-%p\n", proc->pid,
628                      allocate ? "allocate" : "free", start, end);
629
630         if (end <= start)
631                 return 0;
632
633         if (vma)
634                 mm = NULL;
635         else
636                 mm = get_task_mm(proc->tsk);
637
638         if (mm) {
639                 down_write(&mm->mmap_sem);
640                 vma = proc->vma;
641                 if (vma && mm != proc->vma_vm_mm) {
642                         pr_err("binder: %d: vma mm and task mm mismatch\n",
643                                 proc->pid);
644                         vma = NULL;
645                 }
646         }
647
648         if (allocate == 0)
649                 goto free_range;
650
651         if (vma == NULL) {
652                 printk(KERN_ERR "binder: %d: binder_alloc_buf failed to "
653                        "map pages in userspace, no vma\n", proc->pid);
654                 goto err_no_vma;
655         }
656
657         for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
658                 int ret;
659                 struct page **page_array_ptr;
660                 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
661
662                 BUG_ON(*page);
663                 *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
664                 if (*page == NULL) {
665                         printk(KERN_ERR "binder: %d: binder_alloc_buf failed "
666                                "for page at %p\n", proc->pid, page_addr);
667                         goto err_alloc_page_failed;
668                 }
669                 tmp_area.addr = page_addr;
670                 tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */;
671                 page_array_ptr = page;
672                 ret = map_vm_area(&tmp_area, PAGE_KERNEL, &page_array_ptr);
673                 if (ret) {
674                         printk(KERN_ERR "binder: %d: binder_alloc_buf failed "
675                                "to map page at %p in kernel\n",
676                                proc->pid, page_addr);
677                         goto err_map_kernel_failed;
678                 }
679                 user_page_addr =
680                         (uintptr_t)page_addr + proc->user_buffer_offset;
681                 ret = vm_insert_page(vma, user_page_addr, page[0]);
682                 if (ret) {
683                         printk(KERN_ERR "binder: %d: binder_alloc_buf failed "
684                                "to map page at %lx in userspace\n",
685                                proc->pid, user_page_addr);
686                         goto err_vm_insert_page_failed;
687                 }
688                 /* vm_insert_page does not seem to increment the refcount */
689         }
690         if (mm) {
691                 up_write(&mm->mmap_sem);
692                 mmput(mm);
693         }
694         return 0;
695
696 free_range:
697         for (page_addr = end - PAGE_SIZE; page_addr >= start;
698              page_addr -= PAGE_SIZE) {
699                 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
700                 if (vma)
701                         zap_page_range(vma, (uintptr_t)page_addr +
702                                 proc->user_buffer_offset, PAGE_SIZE, NULL);
703 err_vm_insert_page_failed:
704                 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
705 err_map_kernel_failed:
706                 __free_page(*page);
707                 *page = NULL;
708 err_alloc_page_failed:
709                 ;
710         }
711 err_no_vma:
712         if (mm) {
713                 up_write(&mm->mmap_sem);
714                 mmput(mm);
715         }
716         return -ENOMEM;
717 }
718
719 static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
720                                               size_t data_size,
721                                               size_t offsets_size, int is_async)
722 {
723         struct rb_node *n = proc->free_buffers.rb_node;
724         struct binder_buffer *buffer;
725         size_t buffer_size;
726         struct rb_node *best_fit = NULL;
727         void *has_page_addr;
728         void *end_page_addr;
729         size_t size;
730
731         if (proc->vma == NULL) {
732                 printk(KERN_ERR "binder: %d: binder_alloc_buf, no vma\n",
733                        proc->pid);
734                 return NULL;
735         }
736
737         size = ALIGN(data_size, sizeof(void *)) +
738                 ALIGN(offsets_size, sizeof(void *));
739
740         if (size < data_size || size < offsets_size) {
741                 binder_user_error("binder: %d: got transaction with invalid "
742                         "size %zd-%zd\n", proc->pid, data_size, offsets_size);
743                 return NULL;
744         }
745
746         if (is_async &&
747             proc->free_async_space < size + sizeof(struct binder_buffer)) {
748                 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
749                              "binder: %d: binder_alloc_buf size %zd"
750                              "failed, no async space left\n", proc->pid, size);
751                 return NULL;
752         }
753
754         while (n) {
755                 buffer = rb_entry(n, struct binder_buffer, rb_node);
756                 BUG_ON(!buffer->free);
757                 buffer_size = binder_buffer_size(proc, buffer);
758
759                 if (size < buffer_size) {
760                         best_fit = n;
761                         n = n->rb_left;
762                 } else if (size > buffer_size)
763                         n = n->rb_right;
764                 else {
765                         best_fit = n;
766                         break;
767                 }
768         }
769         if (best_fit == NULL) {
770                 printk(KERN_ERR "binder: %d: binder_alloc_buf size %zd failed, "
771                        "no address space\n", proc->pid, size);
772                 return NULL;
773         }
774         if (n == NULL) {
775                 buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
776                 buffer_size = binder_buffer_size(proc, buffer);
777         }
778
779         binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
780                      "binder: %d: binder_alloc_buf size %zd got buff"
781                      "er %p size %zd\n", proc->pid, size, buffer, buffer_size);
782
783         has_page_addr =
784                 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
785         if (n == NULL) {
786                 if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
787                         buffer_size = size; /* no room for other buffers */
788                 else
789                         buffer_size = size + sizeof(struct binder_buffer);
790         }
791         end_page_addr =
792                 (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
793         if (end_page_addr > has_page_addr)
794                 end_page_addr = has_page_addr;
795         if (binder_update_page_range(proc, 1,
796             (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL))
797                 return NULL;
798
799         rb_erase(best_fit, &proc->free_buffers);
800         buffer->free = 0;
801         binder_insert_allocated_buffer(proc, buffer);
802         if (buffer_size != size) {
803                 struct binder_buffer *new_buffer = (void *)buffer->data + size;
804                 list_add(&new_buffer->entry, &buffer->entry);
805                 new_buffer->free = 1;
806                 binder_insert_free_buffer(proc, new_buffer);
807         }
808         binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
809                      "binder: %d: binder_alloc_buf size %zd got "
810                      "%p\n", proc->pid, size, buffer);
811         buffer->data_size = data_size;
812         buffer->offsets_size = offsets_size;
813         buffer->async_transaction = is_async;
814         if (is_async) {
815                 proc->free_async_space -= size + sizeof(struct binder_buffer);
816                 binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
817                              "binder: %d: binder_alloc_buf size %zd "
818                              "async free %zd\n", proc->pid, size,
819                              proc->free_async_space);
820         }
821
822         return buffer;
823 }
824
825 static void *buffer_start_page(struct binder_buffer *buffer)
826 {
827         return (void *)((uintptr_t)buffer & PAGE_MASK);
828 }
829
830 static void *buffer_end_page(struct binder_buffer *buffer)
831 {
832         return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
833 }
834
835 static void binder_delete_free_buffer(struct binder_proc *proc,
836                                       struct binder_buffer *buffer)
837 {
838         struct binder_buffer *prev, *next = NULL;
839         int free_page_end = 1;
840         int free_page_start = 1;
841
842         BUG_ON(proc->buffers.next == &buffer->entry);
843         prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
844         BUG_ON(!prev->free);
845         if (buffer_end_page(prev) == buffer_start_page(buffer)) {
846                 free_page_start = 0;
847                 if (buffer_end_page(prev) == buffer_end_page(buffer))
848                         free_page_end = 0;
849                 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
850                              "binder: %d: merge free, buffer %p "
851                              "share page with %p\n", proc->pid, buffer, prev);
852         }
853
854         if (!list_is_last(&buffer->entry, &proc->buffers)) {
855                 next = list_entry(buffer->entry.next,
856                                   struct binder_buffer, entry);
857                 if (buffer_start_page(next) == buffer_end_page(buffer)) {
858                         free_page_end = 0;
859                         if (buffer_start_page(next) ==
860                             buffer_start_page(buffer))
861                                 free_page_start = 0;
862                         binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
863                                      "binder: %d: merge free, buffer"
864                                      " %p share page with %p\n", proc->pid,
865                                      buffer, prev);
866                 }
867         }
868         list_del(&buffer->entry);
869         if (free_page_start || free_page_end) {
870                 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
871                              "binder: %d: merge free, buffer %p do "
872                              "not share page%s%s with with %p or %p\n",
873                              proc->pid, buffer, free_page_start ? "" : " end",
874                              free_page_end ? "" : " start", prev, next);
875                 binder_update_page_range(proc, 0, free_page_start ?
876                         buffer_start_page(buffer) : buffer_end_page(buffer),
877                         (free_page_end ? buffer_end_page(buffer) :
878                         buffer_start_page(buffer)) + PAGE_SIZE, NULL);
879         }
880 }
881
882 static void binder_free_buf(struct binder_proc *proc,
883                             struct binder_buffer *buffer)
884 {
885         size_t size, buffer_size;
886
887         buffer_size = binder_buffer_size(proc, buffer);
888
889         size = ALIGN(buffer->data_size, sizeof(void *)) +
890                 ALIGN(buffer->offsets_size, sizeof(void *));
891
892         binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
893                      "binder: %d: binder_free_buf %p size %zd buffer"
894                      "_size %zd\n", proc->pid, buffer, size, buffer_size);
895
896         BUG_ON(buffer->free);
897         BUG_ON(size > buffer_size);
898         BUG_ON(buffer->transaction != NULL);
899         BUG_ON((void *)buffer < proc->buffer);
900         BUG_ON((void *)buffer > proc->buffer + proc->buffer_size);
901
902         if (buffer->async_transaction) {
903                 proc->free_async_space += size + sizeof(struct binder_buffer);
904
905                 binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
906                              "binder: %d: binder_free_buf size %zd "
907                              "async free %zd\n", proc->pid, size,
908                              proc->free_async_space);
909         }
910
911         binder_update_page_range(proc, 0,
912                 (void *)PAGE_ALIGN((uintptr_t)buffer->data),
913                 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
914                 NULL);
915         rb_erase(&buffer->rb_node, &proc->allocated_buffers);
916         buffer->free = 1;
917         if (!list_is_last(&buffer->entry, &proc->buffers)) {
918                 struct binder_buffer *next = list_entry(buffer->entry.next,
919                                                 struct binder_buffer, entry);
920                 if (next->free) {
921                         rb_erase(&next->rb_node, &proc->free_buffers);
922                         binder_delete_free_buffer(proc, next);
923                 }
924         }
925         if (proc->buffers.next != &buffer->entry) {
926                 struct binder_buffer *prev = list_entry(buffer->entry.prev,
927                                                 struct binder_buffer, entry);
928                 if (prev->free) {
929                         binder_delete_free_buffer(proc, buffer);
930                         rb_erase(&prev->rb_node, &proc->free_buffers);
931                         buffer = prev;
932                 }
933         }
934         binder_insert_free_buffer(proc, buffer);
935 }
936
937 static struct binder_node *binder_get_node(struct binder_proc *proc,
938                                            void __user *ptr)
939 {
940         struct rb_node *n = proc->nodes.rb_node;
941         struct binder_node *node;
942
943         while (n) {
944                 node = rb_entry(n, struct binder_node, rb_node);
945
946                 if (ptr < node->ptr)
947                         n = n->rb_left;
948                 else if (ptr > node->ptr)
949                         n = n->rb_right;
950                 else
951                         return node;
952         }
953         return NULL;
954 }
955
956 static struct binder_node *binder_new_node(struct binder_proc *proc,
957                                            void __user *ptr,
958                                            void __user *cookie)
959 {
960         struct rb_node **p = &proc->nodes.rb_node;
961         struct rb_node *parent = NULL;
962         struct binder_node *node;
963
964         while (*p) {
965                 parent = *p;
966                 node = rb_entry(parent, struct binder_node, rb_node);
967
968                 if (ptr < node->ptr)
969                         p = &(*p)->rb_left;
970                 else if (ptr > node->ptr)
971                         p = &(*p)->rb_right;
972                 else
973                         return NULL;
974         }
975
976         node = kzalloc(sizeof(*node), GFP_KERNEL);
977         if (node == NULL)
978                 return NULL;
979         binder_stats_created(BINDER_STAT_NODE);
980         rb_link_node(&node->rb_node, parent, p);
981         rb_insert_color(&node->rb_node, &proc->nodes);
982         node->debug_id = ++binder_last_id;
983         node->proc = proc;
984         node->ptr = ptr;
985         node->cookie = cookie;
986         node->work.type = BINDER_WORK_NODE;
987         INIT_LIST_HEAD(&node->work.entry);
988         INIT_LIST_HEAD(&node->async_todo);
989         binder_debug(BINDER_DEBUG_INTERNAL_REFS,
990                      "binder: %d:%d node %d u%p c%p created\n",
991                      proc->pid, current->pid, node->debug_id,
992                      node->ptr, node->cookie);
993         return node;
994 }
995
996 static int binder_inc_node(struct binder_node *node, int strong, int internal,
997                            struct list_head *target_list)
998 {
999         if (strong) {
1000                 if (internal) {
1001                         if (target_list == NULL &&
1002                             node->internal_strong_refs == 0 &&
1003                             !(node == binder_context_mgr_node &&
1004                             node->has_strong_ref)) {
1005                                 printk(KERN_ERR "binder: invalid inc strong "
1006                                         "node for %d\n", node->debug_id);
1007                                 return -EINVAL;
1008                         }
1009                         node->internal_strong_refs++;
1010                 } else
1011                         node->local_strong_refs++;
1012                 if (!node->has_strong_ref && target_list) {
1013                         list_del_init(&node->work.entry);
1014                         list_add_tail(&node->work.entry, target_list);
1015                 }
1016         } else {
1017                 if (!internal)
1018                         node->local_weak_refs++;
1019                 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1020                         if (target_list == NULL) {
1021                                 printk(KERN_ERR "binder: invalid inc weak node "
1022                                         "for %d\n", node->debug_id);
1023                                 return -EINVAL;
1024                         }
1025                         list_add_tail(&node->work.entry, target_list);
1026                 }
1027         }
1028         return 0;
1029 }
1030
1031 static int binder_dec_node(struct binder_node *node, int strong, int internal)
1032 {
1033         if (strong) {
1034                 if (internal)
1035                         node->internal_strong_refs--;
1036                 else
1037                         node->local_strong_refs--;
1038                 if (node->local_strong_refs || node->internal_strong_refs)
1039                         return 0;
1040         } else {
1041                 if (!internal)
1042                         node->local_weak_refs--;
1043                 if (node->local_weak_refs || !hlist_empty(&node->refs))
1044                         return 0;
1045         }
1046         if (node->proc && (node->has_strong_ref || node->has_weak_ref)) {
1047                 if (list_empty(&node->work.entry)) {
1048                         list_add_tail(&node->work.entry, &node->proc->todo);
1049                         wake_up_interruptible(&node->proc->wait);
1050                 }
1051         } else {
1052                 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
1053                     !node->local_weak_refs) {
1054                         list_del_init(&node->work.entry);
1055                         if (node->proc) {
1056                                 rb_erase(&node->rb_node, &node->proc->nodes);
1057                                 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1058                                              "binder: refless node %d deleted\n",
1059                                              node->debug_id);
1060                         } else {
1061                                 hlist_del(&node->dead_node);
1062                                 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1063                                              "binder: dead node %d deleted\n",
1064                                              node->debug_id);
1065                         }
1066                         kfree(node);
1067                         binder_stats_deleted(BINDER_STAT_NODE);
1068                 }
1069         }
1070
1071         return 0;
1072 }
1073
1074
1075 static struct binder_ref *binder_get_ref(struct binder_proc *proc,
1076                                          uint32_t desc)
1077 {
1078         struct rb_node *n = proc->refs_by_desc.rb_node;
1079         struct binder_ref *ref;
1080
1081         while (n) {
1082                 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1083
1084                 if (desc < ref->desc)
1085                         n = n->rb_left;
1086                 else if (desc > ref->desc)
1087                         n = n->rb_right;
1088                 else
1089                         return ref;
1090         }
1091         return NULL;
1092 }
1093
1094 static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
1095                                                   struct binder_node *node)
1096 {
1097         struct rb_node *n;
1098         struct rb_node **p = &proc->refs_by_node.rb_node;
1099         struct rb_node *parent = NULL;
1100         struct binder_ref *ref, *new_ref;
1101
1102         while (*p) {
1103                 parent = *p;
1104                 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1105
1106                 if (node < ref->node)
1107                         p = &(*p)->rb_left;
1108                 else if (node > ref->node)
1109                         p = &(*p)->rb_right;
1110                 else
1111                         return ref;
1112         }
1113         new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1114         if (new_ref == NULL)
1115                 return NULL;
1116         binder_stats_created(BINDER_STAT_REF);
1117         new_ref->debug_id = ++binder_last_id;
1118         new_ref->proc = proc;
1119         new_ref->node = node;
1120         rb_link_node(&new_ref->rb_node_node, parent, p);
1121         rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1122
1123         new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1;
1124         for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1125                 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1126                 if (ref->desc > new_ref->desc)
1127                         break;
1128                 new_ref->desc = ref->desc + 1;
1129         }
1130
1131         p = &proc->refs_by_desc.rb_node;
1132         while (*p) {
1133                 parent = *p;
1134                 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1135
1136                 if (new_ref->desc < ref->desc)
1137                         p = &(*p)->rb_left;
1138                 else if (new_ref->desc > ref->desc)
1139                         p = &(*p)->rb_right;
1140                 else
1141                         BUG();
1142         }
1143         rb_link_node(&new_ref->rb_node_desc, parent, p);
1144         rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1145         if (node) {
1146                 hlist_add_head(&new_ref->node_entry, &node->refs);
1147
1148                 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1149                              "binder: %d new ref %d desc %d for "
1150                              "node %d\n", proc->pid, new_ref->debug_id,
1151                              new_ref->desc, node->debug_id);
1152         } else {
1153                 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1154                              "binder: %d new ref %d desc %d for "
1155                              "dead node\n", proc->pid, new_ref->debug_id,
1156                               new_ref->desc);
1157         }
1158         return new_ref;
1159 }
1160
1161 static void binder_delete_ref(struct binder_ref *ref)
1162 {
1163         binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1164                      "binder: %d delete ref %d desc %d for "
1165                      "node %d\n", ref->proc->pid, ref->debug_id,
1166                      ref->desc, ref->node->debug_id);
1167
1168         rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1169         rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1170         if (ref->strong)
1171                 binder_dec_node(ref->node, 1, 1);
1172         hlist_del(&ref->node_entry);
1173         binder_dec_node(ref->node, 0, 1);
1174         if (ref->death) {
1175                 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1176                              "binder: %d delete ref %d desc %d "
1177                              "has death notification\n", ref->proc->pid,
1178                              ref->debug_id, ref->desc);
1179                 list_del(&ref->death->work.entry);
1180                 kfree(ref->death);
1181                 binder_stats_deleted(BINDER_STAT_DEATH);
1182         }
1183         kfree(ref);
1184         binder_stats_deleted(BINDER_STAT_REF);
1185 }
1186
1187 static int binder_inc_ref(struct binder_ref *ref, int strong,
1188                           struct list_head *target_list)
1189 {
1190         int ret;
1191         if (strong) {
1192                 if (ref->strong == 0) {
1193                         ret = binder_inc_node(ref->node, 1, 1, target_list);
1194                         if (ret)
1195                                 return ret;
1196                 }
1197                 ref->strong++;
1198         } else {
1199                 if (ref->weak == 0) {
1200                         ret = binder_inc_node(ref->node, 0, 1, target_list);
1201                         if (ret)
1202                                 return ret;
1203                 }
1204                 ref->weak++;
1205         }
1206         return 0;
1207 }
1208
1209
1210 static int binder_dec_ref(struct binder_ref *ref, int strong)
1211 {
1212         if (strong) {
1213                 if (ref->strong == 0) {
1214                         binder_user_error("binder: %d invalid dec strong, "
1215                                           "ref %d desc %d s %d w %d\n",
1216                                           ref->proc->pid, ref->debug_id,
1217                                           ref->desc, ref->strong, ref->weak);
1218                         return -EINVAL;
1219                 }
1220                 ref->strong--;
1221                 if (ref->strong == 0) {
1222                         int ret;
1223                         ret = binder_dec_node(ref->node, strong, 1);
1224                         if (ret)
1225                                 return ret;
1226                 }
1227         } else {
1228                 if (ref->weak == 0) {
1229                         binder_user_error("binder: %d invalid dec weak, "
1230                                           "ref %d desc %d s %d w %d\n",
1231                                           ref->proc->pid, ref->debug_id,
1232                                           ref->desc, ref->strong, ref->weak);
1233                         return -EINVAL;
1234                 }
1235                 ref->weak--;
1236         }
1237         if (ref->strong == 0 && ref->weak == 0)
1238                 binder_delete_ref(ref);
1239         return 0;
1240 }
1241
1242 static void binder_pop_transaction(struct binder_thread *target_thread,
1243                                    struct binder_transaction *t)
1244 {
1245         if (target_thread) {
1246                 BUG_ON(target_thread->transaction_stack != t);
1247                 BUG_ON(target_thread->transaction_stack->from != target_thread);
1248                 target_thread->transaction_stack =
1249                         target_thread->transaction_stack->from_parent;
1250                 t->from = NULL;
1251         }
1252         t->need_reply = 0;
1253         if (t->buffer)
1254                 t->buffer->transaction = NULL;
1255         kfree(t);
1256         binder_stats_deleted(BINDER_STAT_TRANSACTION);
1257 }
1258
1259 static void binder_send_failed_reply(struct binder_transaction *t,
1260                                      uint32_t error_code)
1261 {
1262         struct binder_thread *target_thread;
1263         BUG_ON(t->flags & TF_ONE_WAY);
1264         while (1) {
1265                 target_thread = t->from;
1266                 if (target_thread) {
1267                         if (target_thread->return_error != BR_OK &&
1268                            target_thread->return_error2 == BR_OK) {
1269                                 target_thread->return_error2 =
1270                                         target_thread->return_error;
1271                                 target_thread->return_error = BR_OK;
1272                         }
1273                         if (target_thread->return_error == BR_OK) {
1274                                 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1275                                              "binder: send failed reply for "
1276                                              "transaction %d to %d:%d\n",
1277                                               t->debug_id, target_thread->proc->pid,
1278                                               target_thread->pid);
1279
1280                                 binder_pop_transaction(target_thread, t);
1281                                 target_thread->return_error = error_code;
1282                                 wake_up_interruptible(&target_thread->wait);
1283                         } else {
1284                                 printk(KERN_ERR "binder: reply failed, target "
1285                                         "thread, %d:%d, has error code %d "
1286                                         "already\n", target_thread->proc->pid,
1287                                         target_thread->pid,
1288                                         target_thread->return_error);
1289                         }
1290                         return;
1291                 } else {
1292                         struct binder_transaction *next = t->from_parent;
1293
1294                         binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1295                                      "binder: send failed reply "
1296                                      "for transaction %d, target dead\n",
1297                                      t->debug_id);
1298
1299                         binder_pop_transaction(target_thread, t);
1300                         if (next == NULL) {
1301                                 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1302                                              "binder: reply failed,"
1303                                              " no target thread at root\n");
1304                                 return;
1305                         }
1306                         t = next;
1307                         binder_debug(BINDER_DEBUG_DEAD_BINDER,
1308                                      "binder: reply failed, no target "
1309                                      "thread -- retry %d\n", t->debug_id);
1310                 }
1311         }
1312 }
1313
1314 static void binder_transaction_buffer_release(struct binder_proc *proc,
1315                                               struct binder_buffer *buffer,
1316                                               size_t *failed_at)
1317 {
1318         size_t *offp, *off_end;
1319         int debug_id = buffer->debug_id;
1320
1321         binder_debug(BINDER_DEBUG_TRANSACTION,
1322                      "binder: %d buffer release %d, size %zd-%zd, failed at %p\n",
1323                      proc->pid, buffer->debug_id,
1324                      buffer->data_size, buffer->offsets_size, failed_at);
1325
1326         if (buffer->target_node)
1327                 binder_dec_node(buffer->target_node, 1, 0);
1328
1329         offp = (size_t *)(buffer->data + ALIGN(buffer->data_size, sizeof(void *)));
1330         if (failed_at)
1331                 off_end = failed_at;
1332         else
1333                 off_end = (void *)offp + buffer->offsets_size;
1334         for (; offp < off_end; offp++) {
1335                 struct flat_binder_object *fp;
1336                 if (*offp > buffer->data_size - sizeof(*fp) ||
1337                     buffer->data_size < sizeof(*fp) ||
1338                     !IS_ALIGNED(*offp, sizeof(void *))) {
1339                         printk(KERN_ERR "binder: transaction release %d bad"
1340                                         "offset %zd, size %zd\n", debug_id,
1341                                         *offp, buffer->data_size);
1342                         continue;
1343                 }
1344                 fp = (struct flat_binder_object *)(buffer->data + *offp);
1345                 switch (fp->type) {
1346                 case BINDER_TYPE_BINDER:
1347                 case BINDER_TYPE_WEAK_BINDER: {
1348                         struct binder_node *node = binder_get_node(proc, fp->binder);
1349                         if (node == NULL) {
1350                                 printk(KERN_ERR "binder: transaction release %d"
1351                                        " bad node %p\n", debug_id, fp->binder);
1352                                 break;
1353                         }
1354                         binder_debug(BINDER_DEBUG_TRANSACTION,
1355                                      "        node %d u%p\n",
1356                                      node->debug_id, node->ptr);
1357                         binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0);
1358                 } break;
1359                 case BINDER_TYPE_HANDLE:
1360                 case BINDER_TYPE_WEAK_HANDLE: {
1361                         struct binder_ref *ref = binder_get_ref(proc, fp->handle);
1362                         if (ref == NULL) {
1363                                 printk(KERN_ERR "binder: transaction release %d"
1364                                        " bad handle %ld\n", debug_id,
1365                                        fp->handle);
1366                                 break;
1367                         }
1368                         binder_debug(BINDER_DEBUG_TRANSACTION,
1369                                      "        ref %d desc %d (node %d)\n",
1370                                      ref->debug_id, ref->desc, ref->node->debug_id);
1371                         binder_dec_ref(ref, fp->type == BINDER_TYPE_HANDLE);
1372                 } break;
1373
1374                 case BINDER_TYPE_FD:
1375                         binder_debug(BINDER_DEBUG_TRANSACTION,
1376                                      "        fd %ld\n", fp->handle);
1377                         if (failed_at)
1378                                 task_close_fd(proc, fp->handle);
1379                         break;
1380
1381                 default:
1382                         printk(KERN_ERR "binder: transaction release %d bad "
1383                                "object type %lx\n", debug_id, fp->type);
1384                         break;
1385                 }
1386         }
1387 }
1388
1389 static void binder_transaction(struct binder_proc *proc,
1390                                struct binder_thread *thread,
1391                                struct binder_transaction_data *tr, int reply)
1392 {
1393         struct binder_transaction *t;
1394         struct binder_work *tcomplete;
1395         size_t *offp, *off_end;
1396         struct binder_proc *target_proc;
1397         struct binder_thread *target_thread = NULL;
1398         struct binder_node *target_node = NULL;
1399         struct list_head *target_list;
1400         wait_queue_head_t *target_wait;
1401         struct binder_transaction *in_reply_to = NULL;
1402         struct binder_transaction_log_entry *e;
1403         uint32_t return_error;
1404
1405         e = binder_transaction_log_add(&binder_transaction_log);
1406         e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
1407         e->from_proc = proc->pid;
1408         e->from_thread = thread->pid;
1409         e->target_handle = tr->target.handle;
1410         e->data_size = tr->data_size;
1411         e->offsets_size = tr->offsets_size;
1412
1413         if (reply) {
1414                 in_reply_to = thread->transaction_stack;
1415                 if (in_reply_to == NULL) {
1416                         binder_user_error("binder: %d:%d got reply transaction "
1417                                           "with no transaction stack\n",
1418                                           proc->pid, thread->pid);
1419                         return_error = BR_FAILED_REPLY;
1420                         goto err_empty_call_stack;
1421                 }
1422                 binder_set_nice(in_reply_to->saved_priority);
1423                 if (in_reply_to->to_thread != thread) {
1424                         binder_user_error("binder: %d:%d got reply transaction "
1425                                 "with bad transaction stack,"
1426                                 " transaction %d has target %d:%d\n",
1427                                 proc->pid, thread->pid, in_reply_to->debug_id,
1428                                 in_reply_to->to_proc ?
1429                                 in_reply_to->to_proc->pid : 0,
1430                                 in_reply_to->to_thread ?
1431                                 in_reply_to->to_thread->pid : 0);
1432                         return_error = BR_FAILED_REPLY;
1433                         in_reply_to = NULL;
1434                         goto err_bad_call_stack;
1435                 }
1436                 thread->transaction_stack = in_reply_to->to_parent;
1437                 target_thread = in_reply_to->from;
1438                 if (target_thread == NULL) {
1439                         return_error = BR_DEAD_REPLY;
1440                         goto err_dead_binder;
1441                 }
1442                 if (target_thread->transaction_stack != in_reply_to) {
1443                         binder_user_error("binder: %d:%d got reply transaction "
1444                                 "with bad target transaction stack %d, "
1445                                 "expected %d\n",
1446                                 proc->pid, thread->pid,
1447                                 target_thread->transaction_stack ?
1448                                 target_thread->transaction_stack->debug_id : 0,
1449                                 in_reply_to->debug_id);
1450                         return_error = BR_FAILED_REPLY;
1451                         in_reply_to = NULL;
1452                         target_thread = NULL;
1453                         goto err_dead_binder;
1454                 }
1455                 target_proc = target_thread->proc;
1456         } else {
1457                 if (tr->target.handle) {
1458                         struct binder_ref *ref;
1459                         ref = binder_get_ref(proc, tr->target.handle);
1460                         if (ref == NULL) {
1461                                 binder_user_error("binder: %d:%d got "
1462                                         "transaction to invalid handle\n",
1463                                         proc->pid, thread->pid);
1464                                 return_error = BR_FAILED_REPLY;
1465                                 goto err_invalid_target_handle;
1466                         }
1467                         target_node = ref->node;
1468                 } else {
1469                         target_node = binder_context_mgr_node;
1470                         if (target_node == NULL) {
1471                                 return_error = BR_DEAD_REPLY;
1472                                 goto err_no_context_mgr_node;
1473                         }
1474                 }
1475                 e->to_node = target_node->debug_id;
1476                 target_proc = target_node->proc;
1477                 if (target_proc == NULL) {
1478                         return_error = BR_DEAD_REPLY;
1479                         goto err_dead_binder;
1480                 }
1481                 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
1482                         struct binder_transaction *tmp;
1483                         tmp = thread->transaction_stack;
1484                         if (tmp->to_thread != thread) {
1485                                 binder_user_error("binder: %d:%d got new "
1486                                         "transaction with bad transaction stack"
1487                                         ", transaction %d has target %d:%d\n",
1488                                         proc->pid, thread->pid, tmp->debug_id,
1489                                         tmp->to_proc ? tmp->to_proc->pid : 0,
1490                                         tmp->to_thread ?
1491                                         tmp->to_thread->pid : 0);
1492                                 return_error = BR_FAILED_REPLY;
1493                                 goto err_bad_call_stack;
1494                         }
1495                         while (tmp) {
1496                                 if (tmp->from && tmp->from->proc == target_proc)
1497                                         target_thread = tmp->from;
1498                                 tmp = tmp->from_parent;
1499                         }
1500                 }
1501         }
1502         if (target_thread) {
1503                 e->to_thread = target_thread->pid;
1504                 target_list = &target_thread->todo;
1505                 target_wait = &target_thread->wait;
1506         } else {
1507                 target_list = &target_proc->todo;
1508                 target_wait = &target_proc->wait;
1509         }
1510         e->to_proc = target_proc->pid;
1511
1512         /* TODO: reuse incoming transaction for reply */
1513         t = kzalloc(sizeof(*t), GFP_KERNEL);
1514         if (t == NULL) {
1515                 return_error = BR_FAILED_REPLY;
1516                 goto err_alloc_t_failed;
1517         }
1518         binder_stats_created(BINDER_STAT_TRANSACTION);
1519
1520         tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
1521         if (tcomplete == NULL) {
1522                 return_error = BR_FAILED_REPLY;
1523                 goto err_alloc_tcomplete_failed;
1524         }
1525         binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
1526
1527         t->debug_id = ++binder_last_id;
1528         e->debug_id = t->debug_id;
1529
1530         if (reply)
1531                 binder_debug(BINDER_DEBUG_TRANSACTION,
1532                              "binder: %d:%d BC_REPLY %d -> %d:%d, "
1533                              "data %p-%p size %zd-%zd\n",
1534                              proc->pid, thread->pid, t->debug_id,
1535                              target_proc->pid, target_thread->pid,
1536                              tr->data.ptr.buffer, tr->data.ptr.offsets,
1537                              tr->data_size, tr->offsets_size);
1538         else
1539                 binder_debug(BINDER_DEBUG_TRANSACTION,
1540                              "binder: %d:%d BC_TRANSACTION %d -> "
1541                              "%d - node %d, data %p-%p size %zd-%zd\n",
1542                              proc->pid, thread->pid, t->debug_id,
1543                              target_proc->pid, target_node->debug_id,
1544                              tr->data.ptr.buffer, tr->data.ptr.offsets,
1545                              tr->data_size, tr->offsets_size);
1546
1547         if (!reply && !(tr->flags & TF_ONE_WAY))
1548                 t->from = thread;
1549         else
1550                 t->from = NULL;
1551         t->sender_euid = proc->tsk->cred->euid;
1552         t->to_proc = target_proc;
1553         t->to_thread = target_thread;
1554         t->code = tr->code;
1555         t->flags = tr->flags;
1556         t->priority = task_nice(current);
1557         t->buffer = binder_alloc_buf(target_proc, tr->data_size,
1558                 tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
1559         if (t->buffer == NULL) {
1560                 return_error = BR_FAILED_REPLY;
1561                 goto err_binder_alloc_buf_failed;
1562         }
1563         t->buffer->allow_user_free = 0;
1564         t->buffer->debug_id = t->debug_id;
1565         t->buffer->transaction = t;
1566         t->buffer->target_node = target_node;
1567         if (target_node)
1568                 binder_inc_node(target_node, 1, 0, NULL);
1569
1570         offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *)));
1571
1572         if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) {
1573                 binder_user_error("binder: %d:%d got transaction with invalid "
1574                         "data ptr\n", proc->pid, thread->pid);
1575                 return_error = BR_FAILED_REPLY;
1576                 goto err_copy_data_failed;
1577         }
1578         if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) {
1579                 binder_user_error("binder: %d:%d got transaction with invalid "
1580                         "offsets ptr\n", proc->pid, thread->pid);
1581                 return_error = BR_FAILED_REPLY;
1582                 goto err_copy_data_failed;
1583         }
1584         if (!IS_ALIGNED(tr->offsets_size, sizeof(size_t))) {
1585                 binder_user_error("binder: %d:%d got transaction with "
1586                         "invalid offsets size, %zd\n",
1587                         proc->pid, thread->pid, tr->offsets_size);
1588                 return_error = BR_FAILED_REPLY;
1589                 goto err_bad_offset;
1590         }
1591         off_end = (void *)offp + tr->offsets_size;
1592         for (; offp < off_end; offp++) {
1593                 struct flat_binder_object *fp;
1594                 if (*offp > t->buffer->data_size - sizeof(*fp) ||
1595                     t->buffer->data_size < sizeof(*fp) ||
1596                     !IS_ALIGNED(*offp, sizeof(void *))) {
1597                         binder_user_error("binder: %d:%d got transaction with "
1598                                 "invalid offset, %zd\n",
1599                                 proc->pid, thread->pid, *offp);
1600                         return_error = BR_FAILED_REPLY;
1601                         goto err_bad_offset;
1602                 }
1603                 fp = (struct flat_binder_object *)(t->buffer->data + *offp);
1604                 switch (fp->type) {
1605                 case BINDER_TYPE_BINDER:
1606                 case BINDER_TYPE_WEAK_BINDER: {
1607                         struct binder_ref *ref;
1608                         struct binder_node *node = binder_get_node(proc, fp->binder);
1609                         if (node == NULL) {
1610                                 node = binder_new_node(proc, fp->binder, fp->cookie);
1611                                 if (node == NULL) {
1612                                         return_error = BR_FAILED_REPLY;
1613                                         goto err_binder_new_node_failed;
1614                                 }
1615                                 node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1616                                 node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1617                         }
1618                         if (fp->cookie != node->cookie) {
1619                                 binder_user_error("binder: %d:%d sending u%p "
1620                                         "node %d, cookie mismatch %p != %p\n",
1621                                         proc->pid, thread->pid,
1622                                         fp->binder, node->debug_id,
1623                                         fp->cookie, node->cookie);
1624                                 goto err_binder_get_ref_for_node_failed;
1625                         }
1626                         ref = binder_get_ref_for_node(target_proc, node);
1627                         if (ref == NULL) {
1628                                 return_error = BR_FAILED_REPLY;
1629                                 goto err_binder_get_ref_for_node_failed;
1630                         }
1631                         if (fp->type == BINDER_TYPE_BINDER)
1632                                 fp->type = BINDER_TYPE_HANDLE;
1633                         else
1634                                 fp->type = BINDER_TYPE_WEAK_HANDLE;
1635                         fp->handle = ref->desc;
1636                         binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
1637                                        &thread->todo);
1638
1639                         binder_debug(BINDER_DEBUG_TRANSACTION,
1640                                      "        node %d u%p -> ref %d desc %d\n",
1641                                      node->debug_id, node->ptr, ref->debug_id,
1642                                      ref->desc);
1643                 } break;
1644                 case BINDER_TYPE_HANDLE:
1645                 case BINDER_TYPE_WEAK_HANDLE: {
1646                         struct binder_ref *ref = binder_get_ref(proc, fp->handle);
1647                         if (ref == NULL) {
1648                                 binder_user_error("binder: %d:%d got "
1649                                         "transaction with invalid "
1650                                         "handle, %ld\n", proc->pid,
1651                                         thread->pid, fp->handle);
1652                                 return_error = BR_FAILED_REPLY;
1653                                 goto err_binder_get_ref_failed;
1654                         }
1655                         if (ref->node->proc == target_proc) {
1656                                 if (fp->type == BINDER_TYPE_HANDLE)
1657                                         fp->type = BINDER_TYPE_BINDER;
1658                                 else
1659                                         fp->type = BINDER_TYPE_WEAK_BINDER;
1660                                 fp->binder = ref->node->ptr;
1661                                 fp->cookie = ref->node->cookie;
1662                                 binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
1663                                 binder_debug(BINDER_DEBUG_TRANSACTION,
1664                                              "        ref %d desc %d -> node %d u%p\n",
1665                                              ref->debug_id, ref->desc, ref->node->debug_id,
1666                                              ref->node->ptr);
1667                         } else {
1668                                 struct binder_ref *new_ref;
1669                                 new_ref = binder_get_ref_for_node(target_proc, ref->node);
1670                                 if (new_ref == NULL) {
1671                                         return_error = BR_FAILED_REPLY;
1672                                         goto err_binder_get_ref_for_node_failed;
1673                                 }
1674                                 fp->handle = new_ref->desc;
1675                                 binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
1676                                 binder_debug(BINDER_DEBUG_TRANSACTION,
1677                                              "        ref %d desc %d -> ref %d desc %d (node %d)\n",
1678                                              ref->debug_id, ref->desc, new_ref->debug_id,
1679                                              new_ref->desc, ref->node->debug_id);
1680                         }
1681                 } break;
1682
1683                 case BINDER_TYPE_FD: {
1684                         int target_fd;
1685                         struct file *file;
1686
1687                         if (reply) {
1688                                 if (!(in_reply_to->flags & TF_ACCEPT_FDS)) {
1689                                         binder_user_error("binder: %d:%d got reply with fd, %ld, but target does not allow fds\n",
1690                                                 proc->pid, thread->pid, fp->handle);
1691                                         return_error = BR_FAILED_REPLY;
1692                                         goto err_fd_not_allowed;
1693                                 }
1694                         } else if (!target_node->accept_fds) {
1695                                 binder_user_error("binder: %d:%d got transaction with fd, %ld, but target does not allow fds\n",
1696                                         proc->pid, thread->pid, fp->handle);
1697                                 return_error = BR_FAILED_REPLY;
1698                                 goto err_fd_not_allowed;
1699                         }
1700
1701                         file = fget(fp->handle);
1702                         if (file == NULL) {
1703                                 binder_user_error("binder: %d:%d got transaction with invalid fd, %ld\n",
1704                                         proc->pid, thread->pid, fp->handle);
1705                                 return_error = BR_FAILED_REPLY;
1706                                 goto err_fget_failed;
1707                         }
1708                         target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
1709                         if (target_fd < 0) {
1710                                 fput(file);
1711                                 return_error = BR_FAILED_REPLY;
1712                                 goto err_get_unused_fd_failed;
1713                         }
1714                         task_fd_install(target_proc, target_fd, file);
1715                         binder_debug(BINDER_DEBUG_TRANSACTION,
1716                                      "        fd %ld -> %d\n", fp->handle, target_fd);
1717                         /* TODO: fput? */
1718                         fp->handle = target_fd;
1719                 } break;
1720
1721                 default:
1722                         binder_user_error("binder: %d:%d got transactio"
1723                                 "n with invalid object type, %lx\n",
1724                                 proc->pid, thread->pid, fp->type);
1725                         return_error = BR_FAILED_REPLY;
1726                         goto err_bad_object_type;
1727                 }
1728         }
1729         if (reply) {
1730                 BUG_ON(t->buffer->async_transaction != 0);
1731                 binder_pop_transaction(target_thread, in_reply_to);
1732         } else if (!(t->flags & TF_ONE_WAY)) {
1733                 BUG_ON(t->buffer->async_transaction != 0);
1734                 t->need_reply = 1;
1735                 t->from_parent = thread->transaction_stack;
1736                 thread->transaction_stack = t;
1737         } else {
1738                 BUG_ON(target_node == NULL);
1739                 BUG_ON(t->buffer->async_transaction != 1);
1740                 if (target_node->has_async_transaction) {
1741                         target_list = &target_node->async_todo;
1742                         target_wait = NULL;
1743                 } else
1744                         target_node->has_async_transaction = 1;
1745         }
1746         t->work.type = BINDER_WORK_TRANSACTION;
1747         list_add_tail(&t->work.entry, target_list);
1748         tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
1749         list_add_tail(&tcomplete->entry, &thread->todo);
1750         if (target_wait)
1751                 wake_up_interruptible(target_wait);
1752         return;
1753
1754 err_get_unused_fd_failed:
1755 err_fget_failed:
1756 err_fd_not_allowed:
1757 err_binder_get_ref_for_node_failed:
1758 err_binder_get_ref_failed:
1759 err_binder_new_node_failed:
1760 err_bad_object_type:
1761 err_bad_offset:
1762 err_copy_data_failed:
1763         binder_transaction_buffer_release(target_proc, t->buffer, offp);
1764         t->buffer->transaction = NULL;
1765         binder_free_buf(target_proc, t->buffer);
1766 err_binder_alloc_buf_failed:
1767         kfree(tcomplete);
1768         binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
1769 err_alloc_tcomplete_failed:
1770         kfree(t);
1771         binder_stats_deleted(BINDER_STAT_TRANSACTION);
1772 err_alloc_t_failed:
1773 err_bad_call_stack:
1774 err_empty_call_stack:
1775 err_dead_binder:
1776 err_invalid_target_handle:
1777 err_no_context_mgr_node:
1778         binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1779                      "binder: %d:%d transaction failed %d, size %zd-%zd\n",
1780                      proc->pid, thread->pid, return_error,
1781                      tr->data_size, tr->offsets_size);
1782
1783         {
1784                 struct binder_transaction_log_entry *fe;
1785                 fe = binder_transaction_log_add(&binder_transaction_log_failed);
1786                 *fe = *e;
1787         }
1788
1789         BUG_ON(thread->return_error != BR_OK);
1790         if (in_reply_to) {
1791                 thread->return_error = BR_TRANSACTION_COMPLETE;
1792                 binder_send_failed_reply(in_reply_to, return_error);
1793         } else
1794                 thread->return_error = return_error;
1795 }
1796
1797 int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
1798                         void __user *buffer, int size, signed long *consumed)
1799 {
1800         uint32_t cmd;
1801         void __user *ptr = buffer + *consumed;
1802         void __user *end = buffer + size;
1803
1804         while (ptr < end && thread->return_error == BR_OK) {
1805                 if (get_user(cmd, (uint32_t __user *)ptr))
1806                         return -EFAULT;
1807                 ptr += sizeof(uint32_t);
1808                 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
1809                         binder_stats.bc[_IOC_NR(cmd)]++;
1810                         proc->stats.bc[_IOC_NR(cmd)]++;
1811                         thread->stats.bc[_IOC_NR(cmd)]++;
1812                 }
1813                 switch (cmd) {
1814                 case BC_INCREFS:
1815                 case BC_ACQUIRE:
1816                 case BC_RELEASE:
1817                 case BC_DECREFS: {
1818                         uint32_t target;
1819                         struct binder_ref *ref;
1820                         const char *debug_string;
1821
1822                         if (get_user(target, (uint32_t __user *)ptr))
1823                                 return -EFAULT;
1824                         ptr += sizeof(uint32_t);
1825                         if (target == 0 && binder_context_mgr_node &&
1826                             (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
1827                                 ref = binder_get_ref_for_node(proc,
1828                                                binder_context_mgr_node);
1829                                 if (ref->desc != target) {
1830                                         binder_user_error("binder: %d:"
1831                                                 "%d tried to acquire "
1832                                                 "reference to desc 0, "
1833                                                 "got %d instead\n",
1834                                                 proc->pid, thread->pid,
1835                                                 ref->desc);
1836                                 }
1837                         } else
1838                                 ref = binder_get_ref(proc, target);
1839                         if (ref == NULL) {
1840                                 binder_user_error("binder: %d:%d refcou"
1841                                         "nt change on invalid ref %d\n",
1842                                         proc->pid, thread->pid, target);
1843                                 break;
1844                         }
1845                         switch (cmd) {
1846                         case BC_INCREFS:
1847                                 debug_string = "IncRefs";
1848                                 binder_inc_ref(ref, 0, NULL);
1849                                 break;
1850                         case BC_ACQUIRE:
1851                                 debug_string = "Acquire";
1852                                 binder_inc_ref(ref, 1, NULL);
1853                                 break;
1854                         case BC_RELEASE:
1855                                 debug_string = "Release";
1856                                 binder_dec_ref(ref, 1);
1857                                 break;
1858                         case BC_DECREFS:
1859                         default:
1860                                 debug_string = "DecRefs";
1861                                 binder_dec_ref(ref, 0);
1862                                 break;
1863                         }
1864                         binder_debug(BINDER_DEBUG_USER_REFS,
1865                                      "binder: %d:%d %s ref %d desc %d s %d w %d for node %d\n",
1866                                      proc->pid, thread->pid, debug_string, ref->debug_id,
1867                                      ref->desc, ref->strong, ref->weak, ref->node->debug_id);
1868                         break;
1869                 }
1870                 case BC_INCREFS_DONE:
1871                 case BC_ACQUIRE_DONE: {
1872                         void __user *node_ptr;
1873                         void *cookie;
1874                         struct binder_node *node;
1875
1876                         if (get_user(node_ptr, (void * __user *)ptr))
1877                                 return -EFAULT;
1878                         ptr += sizeof(void *);
1879                         if (get_user(cookie, (void * __user *)ptr))
1880                                 return -EFAULT;
1881                         ptr += sizeof(void *);
1882                         node = binder_get_node(proc, node_ptr);
1883                         if (node == NULL) {
1884                                 binder_user_error("binder: %d:%d "
1885                                         "%s u%p no match\n",
1886                                         proc->pid, thread->pid,
1887                                         cmd == BC_INCREFS_DONE ?
1888                                         "BC_INCREFS_DONE" :
1889                                         "BC_ACQUIRE_DONE",
1890                                         node_ptr);
1891                                 break;
1892                         }
1893                         if (cookie != node->cookie) {
1894                                 binder_user_error("binder: %d:%d %s u%p node %d"
1895                                         " cookie mismatch %p != %p\n",
1896                                         proc->pid, thread->pid,
1897                                         cmd == BC_INCREFS_DONE ?
1898                                         "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
1899                                         node_ptr, node->debug_id,
1900                                         cookie, node->cookie);
1901                                 break;
1902                         }
1903                         if (cmd == BC_ACQUIRE_DONE) {
1904                                 if (node->pending_strong_ref == 0) {
1905                                         binder_user_error("binder: %d:%d "
1906                                                 "BC_ACQUIRE_DONE node %d has "
1907                                                 "no pending acquire request\n",
1908                                                 proc->pid, thread->pid,
1909                                                 node->debug_id);
1910                                         break;
1911                                 }
1912                                 node->pending_strong_ref = 0;
1913                         } else {
1914                                 if (node->pending_weak_ref == 0) {
1915                                         binder_user_error("binder: %d:%d "
1916                                                 "BC_INCREFS_DONE node %d has "
1917                                                 "no pending increfs request\n",
1918                                                 proc->pid, thread->pid,
1919                                                 node->debug_id);
1920                                         break;
1921                                 }
1922                                 node->pending_weak_ref = 0;
1923                         }
1924                         binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0);
1925                         binder_debug(BINDER_DEBUG_USER_REFS,
1926                                      "binder: %d:%d %s node %d ls %d lw %d\n",
1927                                      proc->pid, thread->pid,
1928                                      cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
1929                                      node->debug_id, node->local_strong_refs, node->local_weak_refs);
1930                         break;
1931                 }
1932                 case BC_ATTEMPT_ACQUIRE:
1933                         printk(KERN_ERR "binder: BC_ATTEMPT_ACQUIRE not supported\n");
1934                         return -EINVAL;
1935                 case BC_ACQUIRE_RESULT:
1936                         printk(KERN_ERR "binder: BC_ACQUIRE_RESULT not supported\n");
1937                         return -EINVAL;
1938
1939                 case BC_FREE_BUFFER: {
1940                         void __user *data_ptr;
1941                         struct binder_buffer *buffer;
1942
1943                         if (get_user(data_ptr, (void * __user *)ptr))
1944                                 return -EFAULT;
1945                         ptr += sizeof(void *);
1946
1947                         buffer = binder_buffer_lookup(proc, data_ptr);
1948                         if (buffer == NULL) {
1949                                 binder_user_error("binder: %d:%d "
1950                                         "BC_FREE_BUFFER u%p no match\n",
1951                                         proc->pid, thread->pid, data_ptr);
1952                                 break;
1953                         }
1954                         if (!buffer->allow_user_free) {
1955                                 binder_user_error("binder: %d:%d "
1956                                         "BC_FREE_BUFFER u%p matched "
1957                                         "unreturned buffer\n",
1958                                         proc->pid, thread->pid, data_ptr);
1959                                 break;
1960                         }
1961                         binder_debug(BINDER_DEBUG_FREE_BUFFER,
1962                                      "binder: %d:%d BC_FREE_BUFFER u%p found buffer %d for %s transaction\n",
1963                                      proc->pid, thread->pid, data_ptr, buffer->debug_id,
1964                                      buffer->transaction ? "active" : "finished");
1965
1966                         if (buffer->transaction) {
1967                                 buffer->transaction->buffer = NULL;
1968                                 buffer->transaction = NULL;
1969                         }
1970                         if (buffer->async_transaction && buffer->target_node) {
1971                                 BUG_ON(!buffer->target_node->has_async_transaction);
1972                                 if (list_empty(&buffer->target_node->async_todo))
1973                                         buffer->target_node->has_async_transaction = 0;
1974                                 else
1975                                         list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
1976                         }
1977                         binder_transaction_buffer_release(proc, buffer, NULL);
1978                         binder_free_buf(proc, buffer);
1979                         break;
1980                 }
1981
1982                 case BC_TRANSACTION:
1983                 case BC_REPLY: {
1984                         struct binder_transaction_data tr;
1985
1986                         if (copy_from_user(&tr, ptr, sizeof(tr)))
1987                                 return -EFAULT;
1988                         ptr += sizeof(tr);
1989                         binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
1990                         break;
1991                 }
1992
1993                 case BC_REGISTER_LOOPER:
1994                         binder_debug(BINDER_DEBUG_THREADS,
1995                                      "binder: %d:%d BC_REGISTER_LOOPER\n",
1996                                      proc->pid, thread->pid);
1997                         if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
1998                                 thread->looper |= BINDER_LOOPER_STATE_INVALID;
1999                                 binder_user_error("binder: %d:%d ERROR:"
2000                                         " BC_REGISTER_LOOPER called "
2001                                         "after BC_ENTER_LOOPER\n",
2002                                         proc->pid, thread->pid);
2003                         } else if (proc->requested_threads == 0) {
2004                                 thread->looper |= BINDER_LOOPER_STATE_INVALID;
2005                                 binder_user_error("binder: %d:%d ERROR:"
2006                                         " BC_REGISTER_LOOPER called "
2007                                         "without request\n",
2008                                         proc->pid, thread->pid);
2009                         } else {
2010                                 proc->requested_threads--;
2011                                 proc->requested_threads_started++;
2012                         }
2013                         thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
2014                         break;
2015                 case BC_ENTER_LOOPER:
2016                         binder_debug(BINDER_DEBUG_THREADS,
2017                                      "binder: %d:%d BC_ENTER_LOOPER\n",
2018                                      proc->pid, thread->pid);
2019                         if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
2020                                 thread->looper |= BINDER_LOOPER_STATE_INVALID;
2021                                 binder_user_error("binder: %d:%d ERROR:"
2022                                         " BC_ENTER_LOOPER called after "
2023                                         "BC_REGISTER_LOOPER\n",
2024                                         proc->pid, thread->pid);
2025                         }
2026                         thread->looper |= BINDER_LOOPER_STATE_ENTERED;
2027                         break;
2028                 case BC_EXIT_LOOPER:
2029                         binder_debug(BINDER_DEBUG_THREADS,
2030                                      "binder: %d:%d BC_EXIT_LOOPER\n",
2031                                      proc->pid, thread->pid);
2032                         thread->looper |= BINDER_LOOPER_STATE_EXITED;
2033                         break;
2034
2035                 case BC_REQUEST_DEATH_NOTIFICATION:
2036                 case BC_CLEAR_DEATH_NOTIFICATION: {
2037                         uint32_t target;
2038                         void __user *cookie;
2039                         struct binder_ref *ref;
2040                         struct binder_ref_death *death;
2041
2042                         if (get_user(target, (uint32_t __user *)ptr))
2043                                 return -EFAULT;
2044                         ptr += sizeof(uint32_t);
2045                         if (get_user(cookie, (void __user * __user *)ptr))
2046                                 return -EFAULT;
2047                         ptr += sizeof(void *);
2048                         ref = binder_get_ref(proc, target);
2049                         if (ref == NULL) {
2050                                 binder_user_error("binder: %d:%d %s "
2051                                         "invalid ref %d\n",
2052                                         proc->pid, thread->pid,
2053                                         cmd == BC_REQUEST_DEATH_NOTIFICATION ?
2054                                         "BC_REQUEST_DEATH_NOTIFICATION" :
2055                                         "BC_CLEAR_DEATH_NOTIFICATION",
2056                                         target);
2057                                 break;
2058                         }
2059
2060                         binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
2061                                      "binder: %d:%d %s %p ref %d desc %d s %d w %d for node %d\n",
2062                                      proc->pid, thread->pid,
2063                                      cmd == BC_REQUEST_DEATH_NOTIFICATION ?
2064                                      "BC_REQUEST_DEATH_NOTIFICATION" :
2065                                      "BC_CLEAR_DEATH_NOTIFICATION",
2066                                      cookie, ref->debug_id, ref->desc,
2067                                      ref->strong, ref->weak, ref->node->debug_id);
2068
2069                         if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
2070                                 if (ref->death) {
2071                                         binder_user_error("binder: %d:%"
2072                                                 "d BC_REQUEST_DEATH_NOTI"
2073                                                 "FICATION death notific"
2074                                                 "ation already set\n",
2075                                                 proc->pid, thread->pid);
2076                                         break;
2077                                 }
2078                                 death = kzalloc(sizeof(*death), GFP_KERNEL);
2079                                 if (death == NULL) {
2080                                         thread->return_error = BR_ERROR;
2081                                         binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2082                                                      "binder: %d:%d "
2083                                                      "BC_REQUEST_DEATH_NOTIFICATION failed\n",
2084                                                      proc->pid, thread->pid);
2085                                         break;
2086                                 }
2087                                 binder_stats_created(BINDER_STAT_DEATH);
2088                                 INIT_LIST_HEAD(&death->work.entry);
2089                                 death->cookie = cookie;
2090                                 ref->death = death;
2091                                 if (ref->node->proc == NULL) {
2092                                         ref->death->work.type = BINDER_WORK_DEAD_BINDER;
2093                                         if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2094                                                 list_add_tail(&ref->death->work.entry, &thread->todo);
2095                                         } else {
2096                                                 list_add_tail(&ref->death->work.entry, &proc->todo);
2097                                                 wake_up_interruptible(&proc->wait);
2098                                         }
2099                                 }
2100                         } else {
2101                                 if (ref->death == NULL) {
2102                                         binder_user_error("binder: %d:%"
2103                                                 "d BC_CLEAR_DEATH_NOTIFI"
2104                                                 "CATION death notificat"
2105                                                 "ion not active\n",
2106                                                 proc->pid, thread->pid);
2107                                         break;
2108                                 }
2109                                 death = ref->death;
2110                                 if (death->cookie != cookie) {
2111                                         binder_user_error("binder: %d:%"
2112                                                 "d BC_CLEAR_DEATH_NOTIFI"
2113                                                 "CATION death notificat"
2114                                                 "ion cookie mismatch "
2115                                                 "%p != %p\n",
2116                                                 proc->pid, thread->pid,
2117                                                 death->cookie, cookie);
2118                                         break;
2119                                 }
2120                                 ref->death = NULL;
2121                                 if (list_empty(&death->work.entry)) {
2122                                         death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
2123                                         if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2124                                                 list_add_tail(&death->work.entry, &thread->todo);
2125                                         } else {
2126                                                 list_add_tail(&death->work.entry, &proc->todo);
2127                                                 wake_up_interruptible(&proc->wait);
2128                                         }
2129                                 } else {
2130                                         BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
2131                                         death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
2132                                 }
2133                         }
2134                 } break;
2135                 case BC_DEAD_BINDER_DONE: {
2136                         struct binder_work *w;
2137                         void __user *cookie;
2138                         struct binder_ref_death *death = NULL;
2139                         if (get_user(cookie, (void __user * __user *)ptr))
2140                                 return -EFAULT;
2141
2142                         ptr += sizeof(void *);
2143                         list_for_each_entry(w, &proc->delivered_death, entry) {
2144                                 struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
2145                                 if (tmp_death->cookie == cookie) {
2146                                         death = tmp_death;
2147                                         break;
2148                                 }
2149                         }
2150                         binder_debug(BINDER_DEBUG_DEAD_BINDER,
2151                                      "binder: %d:%d BC_DEAD_BINDER_DONE %p found %p\n",
2152                                      proc->pid, thread->pid, cookie, death);
2153                         if (death == NULL) {
2154                                 binder_user_error("binder: %d:%d BC_DEAD"
2155                                         "_BINDER_DONE %p not found\n",
2156                                         proc->pid, thread->pid, cookie);
2157                                 break;
2158                         }
2159
2160                         list_del_init(&death->work.entry);
2161                         if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
2162                                 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
2163                                 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2164                                         list_add_tail(&death->work.entry, &thread->todo);
2165                                 } else {
2166                                         list_add_tail(&death->work.entry, &proc->todo);
2167                                         wake_up_interruptible(&proc->wait);
2168                                 }
2169                         }
2170                 } break;
2171
2172                 default:
2173                         printk(KERN_ERR "binder: %d:%d unknown command %d\n",
2174                                proc->pid, thread->pid, cmd);
2175                         return -EINVAL;
2176                 }
2177                 *consumed = ptr - buffer;
2178         }
2179         return 0;
2180 }
2181
2182 void binder_stat_br(struct binder_proc *proc, struct binder_thread *thread,
2183                     uint32_t cmd)
2184 {
2185         if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
2186                 binder_stats.br[_IOC_NR(cmd)]++;
2187                 proc->stats.br[_IOC_NR(cmd)]++;
2188                 thread->stats.br[_IOC_NR(cmd)]++;
2189         }
2190 }
2191
2192 static int binder_has_proc_work(struct binder_proc *proc,
2193                                 struct binder_thread *thread)
2194 {
2195         return !list_empty(&proc->todo) ||
2196                 (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
2197 }
2198
2199 static int binder_has_thread_work(struct binder_thread *thread)
2200 {
2201         return !list_empty(&thread->todo) || thread->return_error != BR_OK ||
2202                 (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
2203 }
2204
2205 static int binder_thread_read(struct binder_proc *proc,
2206                               struct binder_thread *thread,
2207                               void  __user *buffer, int size,
2208                               signed long *consumed, int non_block)
2209 {
2210         void __user *ptr = buffer + *consumed;
2211         void __user *end = buffer + size;
2212
2213         int ret = 0;
2214         int wait_for_proc_work;
2215
2216         if (*consumed == 0) {
2217                 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
2218                         return -EFAULT;
2219                 ptr += sizeof(uint32_t);
2220         }
2221
2222 retry:
2223         wait_for_proc_work = thread->transaction_stack == NULL &&
2224                                 list_empty(&thread->todo);
2225
2226         if (thread->return_error != BR_OK && ptr < end) {
2227                 if (thread->return_error2 != BR_OK) {
2228                         if (put_user(thread->return_error2, (uint32_t __user *)ptr))
2229                                 return -EFAULT;
2230                         ptr += sizeof(uint32_t);
2231                         if (ptr == end)
2232                                 goto done;
2233                         thread->return_error2 = BR_OK;
2234                 }
2235                 if (put_user(thread->return_error, (uint32_t __user *)ptr))
2236                         return -EFAULT;
2237                 ptr += sizeof(uint32_t);
2238                 thread->return_error = BR_OK;
2239                 goto done;
2240         }
2241
2242
2243         thread->looper |= BINDER_LOOPER_STATE_WAITING;
2244         if (wait_for_proc_work)
2245                 proc->ready_threads++;
2246         mutex_unlock(&binder_lock);
2247         if (wait_for_proc_work) {
2248                 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
2249                                         BINDER_LOOPER_STATE_ENTERED))) {
2250                         binder_user_error("binder: %d:%d ERROR: Thread waiting "
2251                                 "for process work before calling BC_REGISTER_"
2252                                 "LOOPER or BC_ENTER_LOOPER (state %x)\n",
2253                                 proc->pid, thread->pid, thread->looper);
2254                         wait_event_interruptible(binder_user_error_wait,
2255                                                  binder_stop_on_user_error < 2);
2256                 }
2257                 binder_set_nice(proc->default_priority);
2258                 if (non_block) {
2259                         if (!binder_has_proc_work(proc, thread))
2260                                 ret = -EAGAIN;
2261                 } else
2262                         ret = wait_event_interruptible_exclusive(proc->wait, binder_has_proc_work(proc, thread));
2263         } else {
2264                 if (non_block) {
2265                         if (!binder_has_thread_work(thread))
2266                                 ret = -EAGAIN;
2267                 } else
2268                         ret = wait_event_interruptible(thread->wait, binder_has_thread_work(thread));
2269         }
2270         mutex_lock(&binder_lock);
2271         if (wait_for_proc_work)
2272                 proc->ready_threads--;
2273         thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
2274
2275         if (ret)
2276                 return ret;
2277
2278         while (1) {
2279                 uint32_t cmd;
2280                 struct binder_transaction_data tr;
2281                 struct binder_work *w;
2282                 struct binder_transaction *t = NULL;
2283
2284                 if (!list_empty(&thread->todo))
2285                         w = list_first_entry(&thread->todo, struct binder_work, entry);
2286                 else if (!list_empty(&proc->todo) && wait_for_proc_work)
2287                         w = list_first_entry(&proc->todo, struct binder_work, entry);
2288                 else {
2289                         if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) /* no data added */
2290                                 goto retry;
2291                         break;
2292                 }
2293
2294                 if (end - ptr < sizeof(tr) + 4)
2295                         break;
2296
2297                 switch (w->type) {
2298                 case BINDER_WORK_TRANSACTION: {
2299                         t = container_of(w, struct binder_transaction, work);
2300                 } break;
2301                 case BINDER_WORK_TRANSACTION_COMPLETE: {
2302                         cmd = BR_TRANSACTION_COMPLETE;
2303                         if (put_user(cmd, (uint32_t __user *)ptr))
2304                                 return -EFAULT;
2305                         ptr += sizeof(uint32_t);
2306
2307                         binder_stat_br(proc, thread, cmd);
2308                         binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
2309                                      "binder: %d:%d BR_TRANSACTION_COMPLETE\n",
2310                                      proc->pid, thread->pid);
2311
2312                         list_del(&w->entry);
2313                         kfree(w);
2314                         binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2315                 } break;
2316                 case BINDER_WORK_NODE: {
2317                         struct binder_node *node = container_of(w, struct binder_node, work);
2318                         uint32_t cmd = BR_NOOP;
2319                         const char *cmd_name;
2320                         int strong = node->internal_strong_refs || node->local_strong_refs;
2321                         int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong;
2322                         if (weak && !node->has_weak_ref) {
2323                                 cmd = BR_INCREFS;
2324                                 cmd_name = "BR_INCREFS";
2325                                 node->has_weak_ref = 1;
2326                                 node->pending_weak_ref = 1;
2327                                 node->local_weak_refs++;
2328                         } else if (strong && !node->has_strong_ref) {
2329                                 cmd = BR_ACQUIRE;
2330                                 cmd_name = "BR_ACQUIRE";
2331                                 node->has_strong_ref = 1;
2332                                 node->pending_strong_ref = 1;
2333                                 node->local_strong_refs++;
2334                         } else if (!strong && node->has_strong_ref) {
2335                                 cmd = BR_RELEASE;
2336                                 cmd_name = "BR_RELEASE";
2337                                 node->has_strong_ref = 0;
2338                         } else if (!weak && node->has_weak_ref) {
2339                                 cmd = BR_DECREFS;
2340                                 cmd_name = "BR_DECREFS";
2341                                 node->has_weak_ref = 0;
2342                         }
2343                         if (cmd != BR_NOOP) {
2344                                 if (put_user(cmd, (uint32_t __user *)ptr))
2345                                         return -EFAULT;
2346                                 ptr += sizeof(uint32_t);
2347                                 if (put_user(node->ptr, (void * __user *)ptr))
2348                                         return -EFAULT;
2349                                 ptr += sizeof(void *);
2350                                 if (put_user(node->cookie, (void * __user *)ptr))
2351                                         return -EFAULT;
2352                                 ptr += sizeof(void *);
2353
2354                                 binder_stat_br(proc, thread, cmd);
2355                                 binder_debug(BINDER_DEBUG_USER_REFS,
2356                                              "binder: %d:%d %s %d u%p c%p\n",
2357                                              proc->pid, thread->pid, cmd_name, node->debug_id, node->ptr, node->cookie);
2358                         } else {
2359                                 list_del_init(&w->entry);
2360                                 if (!weak && !strong) {
2361                                         binder_debug(BINDER_DEBUG_INTERNAL_REFS,
2362                                                      "binder: %d:%d node %d u%p c%p deleted\n",
2363                                                      proc->pid, thread->pid, node->debug_id,
2364                                                      node->ptr, node->cookie);
2365                                         rb_erase(&node->rb_node, &proc->nodes);
2366                                         kfree(node);
2367                                         binder_stats_deleted(BINDER_STAT_NODE);
2368                                 } else {
2369                                         binder_debug(BINDER_DEBUG_INTERNAL_REFS,
2370                                                      "binder: %d:%d node %d u%p c%p state unchanged\n",
2371                                                      proc->pid, thread->pid, node->debug_id, node->ptr,
2372                                                      node->cookie);
2373                                 }
2374                         }
2375                 } break;
2376                 case BINDER_WORK_DEAD_BINDER:
2377                 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
2378                 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
2379                         struct binder_ref_death *death;
2380                         uint32_t cmd;
2381
2382                         death = container_of(w, struct binder_ref_death, work);
2383                         if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
2384                                 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
2385                         else
2386                                 cmd = BR_DEAD_BINDER;
2387                         if (put_user(cmd, (uint32_t __user *)ptr))
2388                                 return -EFAULT;
2389                         ptr += sizeof(uint32_t);
2390                         if (put_user(death->cookie, (void * __user *)ptr))
2391                                 return -EFAULT;
2392                         ptr += sizeof(void *);
2393                         binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
2394                                      "binder: %d:%d %s %p\n",
2395                                       proc->pid, thread->pid,
2396                                       cmd == BR_DEAD_BINDER ?
2397                                       "BR_DEAD_BINDER" :
2398                                       "BR_CLEAR_DEATH_NOTIFICATION_DONE",
2399                                       death->cookie);
2400
2401                         if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
2402                                 list_del(&w->entry);
2403                                 kfree(death);
2404                                 binder_stats_deleted(BINDER_STAT_DEATH);
2405                         } else
2406                                 list_move(&w->entry, &proc->delivered_death);
2407                         if (cmd == BR_DEAD_BINDER)
2408                                 goto done; /* DEAD_BINDER notifications can cause transactions */
2409                 } break;
2410                 }
2411
2412                 if (!t)
2413                         continue;
2414
2415                 BUG_ON(t->buffer == NULL);
2416                 if (t->buffer->target_node) {
2417                         struct binder_node *target_node = t->buffer->target_node;
2418                         tr.target.ptr = target_node->ptr;
2419                         tr.cookie =  target_node->cookie;
2420                         t->saved_priority = task_nice(current);
2421                         if (t->priority < target_node->min_priority &&
2422                             !(t->flags & TF_ONE_WAY))
2423                                 binder_set_nice(t->priority);
2424                         else if (!(t->flags & TF_ONE_WAY) ||
2425                                  t->saved_priority > target_node->min_priority)
2426                                 binder_set_nice(target_node->min_priority);
2427                         cmd = BR_TRANSACTION;
2428                 } else {
2429                         tr.target.ptr = NULL;
2430                         tr.cookie = NULL;
2431                         cmd = BR_REPLY;
2432                 }
2433                 tr.code = t->code;
2434                 tr.flags = t->flags;
2435                 tr.sender_euid = t->sender_euid;
2436
2437                 if (t->from) {
2438                         struct task_struct *sender = t->from->proc->tsk;
2439                         tr.sender_pid = task_tgid_nr_ns(sender,
2440                                                         current->nsproxy->pid_ns);
2441                 } else {
2442                         tr.sender_pid = 0;
2443                 }
2444
2445                 tr.data_size = t->buffer->data_size;
2446                 tr.offsets_size = t->buffer->offsets_size;
2447                 tr.data.ptr.buffer = (void *)t->buffer->data +
2448                                         proc->user_buffer_offset;
2449                 tr.data.ptr.offsets = tr.data.ptr.buffer +
2450                                         ALIGN(t->buffer->data_size,
2451                                             sizeof(void *));
2452
2453                 if (put_user(cmd, (uint32_t __user *)ptr))
2454                         return -EFAULT;
2455                 ptr += sizeof(uint32_t);
2456                 if (copy_to_user(ptr, &tr, sizeof(tr)))
2457                         return -EFAULT;
2458                 ptr += sizeof(tr);
2459
2460                 binder_stat_br(proc, thread, cmd);
2461                 binder_debug(BINDER_DEBUG_TRANSACTION,
2462                              "binder: %d:%d %s %d %d:%d, cmd %d"
2463                              "size %zd-%zd ptr %p-%p\n",
2464                              proc->pid, thread->pid,
2465                              (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
2466                              "BR_REPLY",
2467                              t->debug_id, t->from ? t->from->proc->pid : 0,
2468                              t->from ? t->from->pid : 0, cmd,
2469                              t->buffer->data_size, t->buffer->offsets_size,
2470                              tr.data.ptr.buffer, tr.data.ptr.offsets);
2471
2472                 list_del(&t->work.entry);
2473                 t->buffer->allow_user_free = 1;
2474                 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
2475                         t->to_parent = thread->transaction_stack;
2476                         t->to_thread = thread;
2477                         thread->transaction_stack = t;
2478                 } else {
2479                         t->buffer->transaction = NULL;
2480                         kfree(t);
2481                         binder_stats_deleted(BINDER_STAT_TRANSACTION);
2482                 }
2483                 break;
2484         }
2485
2486 done:
2487
2488         *consumed = ptr - buffer;
2489         if (proc->requested_threads + proc->ready_threads == 0 &&
2490             proc->requested_threads_started < proc->max_threads &&
2491             (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
2492              BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
2493              /*spawn a new thread if we leave this out */) {
2494                 proc->requested_threads++;
2495                 binder_debug(BINDER_DEBUG_THREADS,
2496                              "binder: %d:%d BR_SPAWN_LOOPER\n",
2497                              proc->pid, thread->pid);
2498                 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
2499                         return -EFAULT;
2500         }
2501         return 0;
2502 }
2503
2504 static void binder_release_work(struct list_head *list)
2505 {
2506         struct binder_work *w;
2507         while (!list_empty(list)) {
2508                 w = list_first_entry(list, struct binder_work, entry);
2509                 list_del_init(&w->entry);
2510                 switch (w->type) {
2511                 case BINDER_WORK_TRANSACTION: {
2512                         struct binder_transaction *t;
2513
2514                         t = container_of(w, struct binder_transaction, work);
2515                         if (t->buffer->target_node && !(t->flags & TF_ONE_WAY))
2516                                 binder_send_failed_reply(t, BR_DEAD_REPLY);
2517                 } break;
2518                 case BINDER_WORK_TRANSACTION_COMPLETE: {
2519                         kfree(w);
2520                         binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2521                 } break;
2522                 default:
2523                         break;
2524                 }
2525         }
2526
2527 }
2528
2529 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
2530 {
2531         struct binder_thread *thread = NULL;
2532         struct rb_node *parent = NULL;
2533         struct rb_node **p = &proc->threads.rb_node;
2534
2535         while (*p) {
2536                 parent = *p;
2537                 thread = rb_entry(parent, struct binder_thread, rb_node);
2538
2539                 if (current->pid < thread->pid)
2540                         p = &(*p)->rb_left;
2541                 else if (current->pid > thread->pid)
2542                         p = &(*p)->rb_right;
2543                 else
2544                         break;
2545         }
2546         if (*p == NULL) {
2547                 thread = kzalloc(sizeof(*thread), GFP_KERNEL);
2548                 if (thread == NULL)
2549                         return NULL;
2550                 binder_stats_created(BINDER_STAT_THREAD);
2551                 thread->proc = proc;
2552                 thread->pid = current->pid;
2553                 init_waitqueue_head(&thread->wait);
2554                 INIT_LIST_HEAD(&thread->todo);
2555                 rb_link_node(&thread->rb_node, parent, p);
2556                 rb_insert_color(&thread->rb_node, &proc->threads);
2557                 thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
2558                 thread->return_error = BR_OK;
2559                 thread->return_error2 = BR_OK;
2560         }
2561         return thread;
2562 }
2563
2564 static int binder_free_thread(struct binder_proc *proc,
2565                               struct binder_thread *thread)
2566 {
2567         struct binder_transaction *t;
2568         struct binder_transaction *send_reply = NULL;
2569         int active_transactions = 0;
2570
2571         rb_erase(&thread->rb_node, &proc->threads);
2572         t = thread->transaction_stack;
2573         if (t && t->to_thread == thread)
2574                 send_reply = t;
2575         while (t) {
2576                 active_transactions++;
2577                 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2578                              "binder: release %d:%d transaction %d "
2579                              "%s, still active\n", proc->pid, thread->pid,
2580                              t->debug_id,
2581                              (t->to_thread == thread) ? "in" : "out");
2582
2583                 if (t->to_thread == thread) {
2584                         t->to_proc = NULL;
2585                         t->to_thread = NULL;
2586                         if (t->buffer) {
2587                                 t->buffer->transaction = NULL;
2588                                 t->buffer = NULL;
2589                         }
2590                         t = t->to_parent;
2591                 } else if (t->from == thread) {
2592                         t->from = NULL;
2593                         t = t->from_parent;
2594                 } else
2595                         BUG();
2596         }
2597         if (send_reply)
2598                 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
2599         binder_release_work(&thread->todo);
2600         kfree(thread);
2601         binder_stats_deleted(BINDER_STAT_THREAD);
2602         return active_transactions;
2603 }
2604
2605 static unsigned int binder_poll(struct file *filp,
2606                                 struct poll_table_struct *wait)
2607 {
2608         struct binder_proc *proc = filp->private_data;
2609         struct binder_thread *thread = NULL;
2610         int wait_for_proc_work;
2611
2612         mutex_lock(&binder_lock);
2613         thread = binder_get_thread(proc);
2614
2615         wait_for_proc_work = thread->transaction_stack == NULL &&
2616                 list_empty(&thread->todo) && thread->return_error == BR_OK;
2617         mutex_unlock(&binder_lock);
2618
2619         if (wait_for_proc_work) {
2620                 if (binder_has_proc_work(proc, thread))
2621                         return POLLIN;
2622                 poll_wait(filp, &proc->wait, wait);
2623                 if (binder_has_proc_work(proc, thread))
2624                         return POLLIN;
2625         } else {
2626                 if (binder_has_thread_work(thread))
2627                         return POLLIN;
2628                 poll_wait(filp, &thread->wait, wait);
2629                 if (binder_has_thread_work(thread))
2630                         return POLLIN;
2631         }
2632         return 0;
2633 }
2634
2635 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
2636 {
2637         int ret;
2638         struct binder_proc *proc = filp->private_data;
2639         struct binder_thread *thread;
2640         unsigned int size = _IOC_SIZE(cmd);
2641         void __user *ubuf = (void __user *)arg;
2642
2643         /*printk(KERN_INFO "binder_ioctl: %d:%d %x %lx\n", proc->pid, current->pid, cmd, arg);*/
2644
2645         ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
2646         if (ret)
2647                 return ret;
2648
2649         mutex_lock(&binder_lock);
2650         thread = binder_get_thread(proc);
2651         if (thread == NULL) {
2652                 ret = -ENOMEM;
2653                 goto err;
2654         }
2655
2656         switch (cmd) {
2657         case BINDER_WRITE_READ: {
2658                 struct binder_write_read bwr;
2659                 if (size != sizeof(struct binder_write_read)) {
2660                         ret = -EINVAL;
2661                         goto err;
2662                 }
2663                 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
2664                         ret = -EFAULT;
2665                         goto err;
2666                 }
2667                 binder_debug(BINDER_DEBUG_READ_WRITE,
2668                              "binder: %d:%d write %ld at %08lx, read %ld at %08lx\n",
2669                              proc->pid, thread->pid, bwr.write_size, bwr.write_buffer,
2670                              bwr.read_size, bwr.read_buffer);
2671
2672                 if (bwr.write_size > 0) {
2673                         ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
2674                         if (ret < 0) {
2675                                 bwr.read_consumed = 0;
2676                                 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
2677                                         ret = -EFAULT;
2678                                 goto err;
2679                         }
2680                 }
2681                 if (bwr.read_size > 0) {
2682                         ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
2683                         if (!list_empty(&proc->todo))
2684                                 wake_up_interruptible(&proc->wait);
2685                         if (ret < 0) {
2686                                 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
2687                                         ret = -EFAULT;
2688                                 goto err;
2689                         }
2690                 }
2691                 binder_debug(BINDER_DEBUG_READ_WRITE,
2692                              "binder: %d:%d wrote %ld of %ld, read return %ld of %ld\n",
2693                              proc->pid, thread->pid, bwr.write_consumed, bwr.write_size,
2694                              bwr.read_consumed, bwr.read_size);
2695                 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
2696                         ret = -EFAULT;
2697                         goto err;
2698                 }
2699                 break;
2700         }
2701         case BINDER_SET_MAX_THREADS:
2702                 if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
2703                         ret = -EINVAL;
2704                         goto err;
2705                 }
2706                 break;
2707         case BINDER_SET_CONTEXT_MGR:
2708                 if (binder_context_mgr_node != NULL) {
2709                         printk(KERN_ERR "binder: BINDER_SET_CONTEXT_MGR already set\n");
2710                         ret = -EBUSY;
2711                         goto err;
2712                 }
2713                 if (binder_context_mgr_uid != -1) {
2714                         if (binder_context_mgr_uid != current->cred->euid) {
2715                                 printk(KERN_ERR "binder: BINDER_SET_"
2716                                        "CONTEXT_MGR bad uid %d != %d\n",
2717                                        current->cred->euid,
2718                                        binder_context_mgr_uid);
2719                                 ret = -EPERM;
2720                                 goto err;
2721                         }
2722                 } else
2723                         binder_context_mgr_uid = current->cred->euid;
2724                 binder_context_mgr_node = binder_new_node(proc, NULL, NULL);
2725                 if (binder_context_mgr_node == NULL) {
2726                         ret = -ENOMEM;
2727                         goto err;
2728                 }
2729                 binder_context_mgr_node->local_weak_refs++;
2730                 binder_context_mgr_node->local_strong_refs++;
2731                 binder_context_mgr_node->has_strong_ref = 1;
2732                 binder_context_mgr_node->has_weak_ref = 1;
2733                 break;
2734         case BINDER_THREAD_EXIT:
2735                 binder_debug(BINDER_DEBUG_THREADS, "binder: %d:%d exit\n",
2736                              proc->pid, thread->pid);
2737                 binder_free_thread(proc, thread);
2738                 thread = NULL;
2739                 break;
2740         case BINDER_VERSION:
2741                 if (size != sizeof(struct binder_version)) {
2742                         ret = -EINVAL;
2743                         goto err;
2744                 }
2745                 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, &((struct binder_version *)ubuf)->protocol_version)) {
2746                         ret = -EINVAL;
2747                         goto err;
2748                 }
2749                 break;
2750         default:
2751                 ret = -EINVAL;
2752                 goto err;
2753         }
2754         ret = 0;
2755 err:
2756         if (thread)
2757                 thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
2758         mutex_unlock(&binder_lock);
2759         wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
2760         if (ret && ret != -ERESTARTSYS)
2761                 printk(KERN_INFO "binder: %d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
2762         return ret;
2763 }
2764
2765 static void binder_vma_open(struct vm_area_struct *vma)
2766 {
2767         struct binder_proc *proc = vma->vm_private_data;
2768         binder_debug(BINDER_DEBUG_OPEN_CLOSE,
2769                      "binder: %d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
2770                      proc->pid, vma->vm_start, vma->vm_end,
2771                      (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
2772                      (unsigned long)pgprot_val(vma->vm_page_prot));
2773 }
2774
2775 static void binder_vma_close(struct vm_area_struct *vma)
2776 {
2777         struct binder_proc *proc = vma->vm_private_data;
2778         binder_debug(BINDER_DEBUG_OPEN_CLOSE,
2779                      "binder: %d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
2780                      proc->pid, vma->vm_start, vma->vm_end,
2781                      (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
2782                      (unsigned long)pgprot_val(vma->vm_page_prot));
2783         proc->vma = NULL;
2784         proc->vma_vm_mm = NULL;
2785         binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
2786 }
2787
2788 static struct vm_operations_struct binder_vm_ops = {
2789         .open = binder_vma_open,
2790         .close = binder_vma_close,
2791 };
2792
2793 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
2794 {
2795         int ret;
2796         struct vm_struct *area;
2797         struct binder_proc *proc = filp->private_data;
2798         const char *failure_string;
2799         struct binder_buffer *buffer;
2800
2801         if ((vma->vm_end - vma->vm_start) > SZ_4M)
2802                 vma->vm_end = vma->vm_start + SZ_4M;
2803
2804         binder_debug(BINDER_DEBUG_OPEN_CLOSE,
2805                      "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
2806                      proc->pid, vma->vm_start, vma->vm_end,
2807                      (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
2808                      (unsigned long)pgprot_val(vma->vm_page_prot));
2809
2810         if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
2811                 ret = -EPERM;
2812                 failure_string = "bad vm_flags";
2813                 goto err_bad_arg;
2814         }
2815         vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
2816
2817         mutex_lock(&binder_mmap_lock);
2818         if (proc->buffer) {
2819                 ret = -EBUSY;
2820                 failure_string = "already mapped";
2821                 goto err_already_mapped;
2822         }
2823
2824         area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
2825         if (area == NULL) {
2826                 ret = -ENOMEM;
2827                 failure_string = "get_vm_area";
2828                 goto err_get_vm_area_failed;
2829         }
2830         proc->buffer = area->addr;
2831         proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
2832         mutex_unlock(&binder_mmap_lock);
2833
2834 #ifdef CONFIG_CPU_CACHE_VIPT
2835         if (cache_is_vipt_aliasing()) {
2836                 while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) {
2837                         printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);
2838                         vma->vm_start += PAGE_SIZE;
2839                 }
2840         }
2841 #endif
2842         proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
2843         if (proc->pages == NULL) {
2844                 ret = -ENOMEM;
2845                 failure_string = "alloc page array";
2846                 goto err_alloc_pages_failed;
2847         }
2848         proc->buffer_size = vma->vm_end - vma->vm_start;
2849
2850         vma->vm_ops = &binder_vm_ops;
2851         vma->vm_private_data = proc;
2852
2853         if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
2854                 ret = -ENOMEM;
2855                 failure_string = "alloc small buf";
2856                 goto err_alloc_small_buf_failed;
2857         }
2858         buffer = proc->buffer;
2859         INIT_LIST_HEAD(&proc->buffers);
2860         list_add(&buffer->entry, &proc->buffers);
2861         buffer->free = 1;
2862         binder_insert_free_buffer(proc, buffer);
2863         proc->free_async_space = proc->buffer_size / 2;
2864         barrier();
2865         proc->files = get_files_struct(proc->tsk);
2866         proc->vma = vma;
2867         proc->vma_vm_mm = vma->vm_mm;
2868
2869         /*printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p\n",
2870                  proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
2871         return 0;
2872
2873 err_alloc_small_buf_failed:
2874         kfree(proc->pages);
2875         proc->pages = NULL;
2876 err_alloc_pages_failed:
2877         mutex_lock(&binder_mmap_lock);
2878         vfree(proc->buffer);
2879         proc->buffer = NULL;
2880 err_get_vm_area_failed:
2881 err_already_mapped:
2882         mutex_unlock(&binder_mmap_lock);
2883 err_bad_arg:
2884         printk(KERN_ERR "binder_mmap: %d %lx-%lx %s failed %d\n",
2885                proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
2886         return ret;
2887 }
2888
2889 static int binder_open(struct inode *nodp, struct file *filp)
2890 {
2891         struct binder_proc *proc;
2892
2893         binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
2894                      current->group_leader->pid, current->pid);
2895
2896         proc = kzalloc(sizeof(*proc), GFP_KERNEL);
2897         if (proc == NULL)
2898                 return -ENOMEM;
2899         get_task_struct(current);
2900         proc->tsk = current;
2901         INIT_LIST_HEAD(&proc->todo);
2902         init_waitqueue_head(&proc->wait);
2903         proc->default_priority = task_nice(current);
2904         mutex_lock(&binder_lock);
2905         binder_stats_created(BINDER_STAT_PROC);
2906         hlist_add_head(&proc->proc_node, &binder_procs);
2907         proc->pid = current->group_leader->pid;
2908         INIT_LIST_HEAD(&proc->delivered_death);
2909         filp->private_data = proc;
2910         mutex_unlock(&binder_lock);
2911
2912         if (binder_debugfs_dir_entry_proc) {
2913                 char strbuf[11];
2914                 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
2915                 proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
2916                         binder_debugfs_dir_entry_proc, proc, &binder_proc_fops);
2917         }
2918
2919         return 0;
2920 }
2921
2922 static int binder_flush(struct file *filp, fl_owner_t id)
2923 {
2924         struct binder_proc *proc = filp->private_data;
2925
2926         binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
2927
2928         return 0;
2929 }
2930
2931 static void binder_deferred_flush(struct binder_proc *proc)
2932 {
2933         struct rb_node *n;
2934         int wake_count = 0;
2935         for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
2936                 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
2937                 thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
2938                 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
2939                         wake_up_interruptible(&thread->wait);
2940                         wake_count++;
2941                 }
2942         }
2943         wake_up_interruptible_all(&proc->wait);
2944
2945         binder_debug(BINDER_DEBUG_OPEN_CLOSE,
2946                      "binder_flush: %d woke %d threads\n", proc->pid,
2947                      wake_count);
2948 }
2949
2950 static int binder_release(struct inode *nodp, struct file *filp)
2951 {
2952         struct binder_proc *proc = filp->private_data;
2953         debugfs_remove(proc->debugfs_entry);
2954         binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
2955
2956         return 0;
2957 }
2958
2959 static void binder_deferred_release(struct binder_proc *proc)
2960 {
2961         struct hlist_node *pos;
2962         struct binder_transaction *t;
2963         struct rb_node *n;
2964         int threads, nodes, incoming_refs, outgoing_refs, buffers, active_transactions, page_count;
2965
2966         BUG_ON(proc->vma);
2967         BUG_ON(proc->files);
2968
2969         hlist_del(&proc->proc_node);
2970         if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) {
2971                 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2972                              "binder_release: %d context_mgr_node gone\n",
2973                              proc->pid);
2974                 binder_context_mgr_node = NULL;
2975         }
2976
2977         threads = 0;
2978         active_transactions = 0;
2979         while ((n = rb_first(&proc->threads))) {
2980                 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
2981                 threads++;
2982                 active_transactions += binder_free_thread(proc, thread);
2983         }
2984         nodes = 0;
2985         incoming_refs = 0;
2986         while ((n = rb_first(&proc->nodes))) {
2987                 struct binder_node *node = rb_entry(n, struct binder_node, rb_node);
2988
2989                 nodes++;
2990                 rb_erase(&node->rb_node, &proc->nodes);
2991                 list_del_init(&node->work.entry);
2992                 if (hlist_empty(&node->refs)) {
2993                         kfree(node);
2994                         binder_stats_deleted(BINDER_STAT_NODE);
2995                 } else {
2996                         struct binder_ref *ref;
2997                         int death = 0;
2998
2999                         node->proc = NULL;
3000                         node->local_strong_refs = 0;
3001                         node->local_weak_refs = 0;
3002                         hlist_add_head(&node->dead_node, &binder_dead_nodes);
3003
3004                         hlist_for_each_entry(ref, pos, &node->refs, node_entry) {
3005                                 incoming_refs++;
3006                                 if (ref->death) {
3007                                         death++;
3008                                         if (list_empty(&ref->death->work.entry)) {
3009                                                 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3010                                                 list_add_tail(&ref->death->work.entry, &ref->proc->todo);
3011                                                 wake_up_interruptible(&ref->proc->wait);
3012                                         } else
3013                                                 BUG();
3014                                 }
3015                         }
3016                         binder_debug(BINDER_DEBUG_DEAD_BINDER,
3017                                      "binder: node %d now dead, "
3018                                      "refs %d, death %d\n", node->debug_id,
3019                                      incoming_refs, death);
3020                 }
3021         }
3022         outgoing_refs = 0;
3023         while ((n = rb_first(&proc->refs_by_desc))) {
3024                 struct binder_ref *ref = rb_entry(n, struct binder_ref,
3025                                                   rb_node_desc);
3026                 outgoing_refs++;
3027                 binder_delete_ref(ref);
3028         }
3029         binder_release_work(&proc->todo);
3030         buffers = 0;
3031
3032         while ((n = rb_first(&proc->allocated_buffers))) {
3033                 struct binder_buffer *buffer = rb_entry(n, struct binder_buffer,
3034                                                         rb_node);
3035                 t = buffer->transaction;
3036                 if (t) {
3037                         t->buffer = NULL;
3038                         buffer->transaction = NULL;
3039                         printk(KERN_ERR "binder: release proc %d, "
3040                                "transaction %d, not freed\n",
3041                                proc->pid, t->debug_id);
3042                         /*BUG();*/
3043                 }
3044                 binder_free_buf(proc, buffer);
3045                 buffers++;
3046         }
3047
3048         binder_stats_deleted(BINDER_STAT_PROC);
3049
3050         page_count = 0;
3051         if (proc->pages) {
3052                 int i;
3053                 for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) {
3054                         if (proc->pages[i]) {
3055                                 void *page_addr = proc->buffer + i * PAGE_SIZE;
3056                                 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
3057                                              "binder_release: %d: "
3058                                              "page %d at %p not freed\n",
3059                                              proc->pid, i,
3060                                              page_addr);
3061                                 unmap_kernel_range((unsigned long)page_addr,
3062                                         PAGE_SIZE);
3063                                 __free_page(proc->pages[i]);
3064                                 page_count++;
3065                         }
3066                 }
3067                 kfree(proc->pages);
3068                 vfree(proc->buffer);
3069         }
3070
3071         put_task_struct(proc->tsk);
3072
3073         binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3074                      "binder_release: %d threads %d, nodes %d (ref %d), "
3075                      "refs %d, active transactions %d, buffers %d, "
3076                      "pages %d\n",
3077                      proc->pid, threads, nodes, incoming_refs, outgoing_refs,
3078                      active_transactions, buffers, page_count);
3079
3080         kfree(proc);
3081 }
3082
3083 static void binder_deferred_func(struct work_struct *work)
3084 {
3085         struct binder_proc *proc;
3086         struct files_struct *files;
3087
3088         int defer;
3089         do {
3090                 mutex_lock(&binder_lock);
3091                 mutex_lock(&binder_deferred_lock);
3092                 if (!hlist_empty(&binder_deferred_list)) {
3093                         proc = hlist_entry(binder_deferred_list.first,
3094                                         struct binder_proc, deferred_work_node);
3095                         hlist_del_init(&proc->deferred_work_node);
3096                         defer = proc->deferred_work;
3097                         proc->deferred_work = 0;
3098                 } else {
3099                         proc = NULL;
3100                         defer = 0;
3101                 }
3102                 mutex_unlock(&binder_deferred_lock);
3103
3104                 files = NULL;
3105                 if (defer & BINDER_DEFERRED_PUT_FILES) {
3106                         files = proc->files;
3107                         if (files)
3108                                 proc->files = NULL;
3109                 }
3110
3111                 if (defer & BINDER_DEFERRED_FLUSH)
3112                         binder_deferred_flush(proc);
3113
3114                 if (defer & BINDER_DEFERRED_RELEASE)
3115                         binder_deferred_release(proc); /* frees proc */
3116
3117                 mutex_unlock(&binder_lock);
3118                 if (files)
3119                         put_files_struct(files);
3120         } while (proc);
3121 }
3122 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
3123
3124 static void
3125 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
3126 {
3127         mutex_lock(&binder_deferred_lock);
3128         proc->deferred_work |= defer;
3129         if (hlist_unhashed(&proc->deferred_work_node)) {
3130                 hlist_add_head(&proc->deferred_work_node,
3131                                 &binder_deferred_list);
3132                 queue_work(binder_deferred_workqueue, &binder_deferred_work);
3133         }
3134         mutex_unlock(&binder_deferred_lock);
3135 }
3136
3137 static void print_binder_transaction(struct seq_file *m, const char *prefix,
3138                                      struct binder_transaction *t)
3139 {
3140         seq_printf(m,
3141                    "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
3142                    prefix, t->debug_id, t,
3143                    t->from ? t->from->proc->pid : 0,
3144                    t->from ? t->from->pid : 0,
3145                    t->to_proc ? t->to_proc->pid : 0,
3146                    t->to_thread ? t->to_thread->pid : 0,
3147                    t->code, t->flags, t->priority, t->need_reply);
3148         if (t->buffer == NULL) {
3149                 seq_puts(m, " buffer free\n");
3150                 return;
3151         }
3152         if (t->buffer->target_node)
3153                 seq_printf(m, " node %d",
3154                            t->buffer->target_node->debug_id);
3155         seq_printf(m, " size %zd:%zd data %p\n",
3156                    t->buffer->data_size, t->buffer->offsets_size,
3157                    t->buffer->data);
3158 }
3159
3160 static void print_binder_buffer(struct seq_file *m, const char *prefix,
3161                                 struct binder_buffer *buffer)
3162 {
3163         seq_printf(m, "%s %d: %p size %zd:%zd %s\n",
3164                    prefix, buffer->debug_id, buffer->data,
3165                    buffer->data_size, buffer->offsets_size,
3166                    buffer->transaction ? "active" : "delivered");
3167 }
3168
3169 static void print_binder_work(struct seq_file *m, const char *prefix,
3170                               const char *transaction_prefix,
3171                               struct binder_work *w)
3172 {
3173         struct binder_node *node;
3174         struct binder_transaction *t;
3175
3176         switch (w->type) {
3177         case BINDER_WORK_TRANSACTION:
3178                 t = container_of(w, struct binder_transaction, work);
3179                 print_binder_transaction(m, transaction_prefix, t);
3180                 break;
3181         case BINDER_WORK_TRANSACTION_COMPLETE:
3182                 seq_printf(m, "%stransaction complete\n", prefix);
3183                 break;
3184         case BINDER_WORK_NODE:
3185                 node = container_of(w, struct binder_node, work);
3186                 seq_printf(m, "%snode work %d: u%p c%p\n",
3187                            prefix, node->debug_id, node->ptr, node->cookie);
3188                 break;
3189         case BINDER_WORK_DEAD_BINDER:
3190                 seq_printf(m, "%shas dead binder\n", prefix);
3191                 break;
3192         case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
3193                 seq_printf(m, "%shas cleared dead binder\n", prefix);
3194                 break;
3195         case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
3196                 seq_printf(m, "%shas cleared death notification\n", prefix);
3197                 break;
3198         default:
3199                 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
3200                 break;
3201         }
3202 }
3203
3204 static void print_binder_thread(struct seq_file *m,
3205                                 struct binder_thread *thread,
3206                                 int print_always)
3207 {
3208         struct binder_transaction *t;
3209         struct binder_work *w;
3210         size_t start_pos = m->count;
3211         size_t header_pos;
3212
3213         seq_printf(m, "  thread %d: l %02x\n", thread->pid, thread->looper);
3214         header_pos = m->count;
3215         t = thread->transaction_stack;
3216         while (t) {
3217                 if (t->from == thread) {
3218                         print_binder_transaction(m,
3219                                                  "    outgoing transaction", t);
3220                         t = t->from_parent;
3221                 } else if (t->to_thread == thread) {
3222                         print_binder_transaction(m,
3223                                                  "    incoming transaction", t);
3224                         t = t->to_parent;
3225                 } else {
3226                         print_binder_transaction(m, "    bad transaction", t);
3227                         t = NULL;
3228                 }
3229         }
3230         list_for_each_entry(w, &thread->todo, entry) {
3231                 print_binder_work(m, "    ", "    pending transaction", w);
3232         }
3233         if (!print_always && m->count == header_pos)
3234                 m->count = start_pos;
3235 }
3236
3237 static void print_binder_node(struct seq_file *m, struct binder_node *node)
3238 {
3239         struct binder_ref *ref;
3240         struct hlist_node *pos;
3241         struct binder_work *w;
3242         int count;
3243
3244         count = 0;
3245         hlist_for_each_entry(ref, pos, &node->refs, node_entry)
3246                 count++;
3247
3248         seq_printf(m, "  node %d: u%p c%p hs %d hw %d ls %d lw %d is %d iw %d",
3249                    node->debug_id, node->ptr, node->cookie,
3250                    node->has_strong_ref, node->has_weak_ref,
3251                    node->local_strong_refs, node->local_weak_refs,
3252                    node->internal_strong_refs, count);
3253         if (count) {
3254                 seq_puts(m, " proc");
3255                 hlist_for_each_entry(ref, pos, &node->refs, node_entry)
3256                         seq_printf(m, " %d", ref->proc->pid);
3257         }
3258         seq_puts(m, "\n");
3259         list_for_each_entry(w, &node->async_todo, entry)
3260                 print_binder_work(m, "    ",
3261                                   "    pending async transaction", w);
3262 }
3263
3264 static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
3265 {
3266         seq_printf(m, "  ref %d: desc %d %snode %d s %d w %d d %p\n",
3267                    ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ",
3268                    ref->node->debug_id, ref->strong, ref->weak, ref->death);
3269 }
3270
3271 static void print_binder_proc(struct seq_file *m,
3272                               struct binder_proc *proc, int print_all)
3273 {
3274         struct binder_work *w;
3275         struct rb_node *n;
3276         size_t start_pos = m->count;
3277         size_t header_pos;
3278
3279         seq_printf(m, "proc %d\n", proc->pid);
3280         header_pos = m->count;
3281
3282         for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
3283                 print_binder_thread(m, rb_entry(n, struct binder_thread,
3284                                                 rb_node), print_all);
3285         for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
3286                 struct binder_node *node = rb_entry(n, struct binder_node,
3287                                                     rb_node);
3288                 if (print_all || node->has_async_transaction)
3289                         print_binder_node(m, node);
3290         }
3291         if (print_all) {
3292                 for (n = rb_first(&proc->refs_by_desc);
3293                      n != NULL;
3294                      n = rb_next(n))
3295                         print_binder_ref(m, rb_entry(n, struct binder_ref,
3296                                                      rb_node_desc));
3297         }
3298         for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
3299                 print_binder_buffer(m, "  buffer",
3300                                     rb_entry(n, struct binder_buffer, rb_node));
3301         list_for_each_entry(w, &proc->todo, entry)
3302                 print_binder_work(m, "  ", "  pending transaction", w);
3303         list_for_each_entry(w, &proc->delivered_death, entry) {
3304                 seq_puts(m, "  has delivered dead binder\n");
3305                 break;
3306         }
3307         if (!print_all && m->count == header_pos)
3308                 m->count = start_pos;
3309 }
3310
3311 static const char *binder_return_strings[] = {
3312         "BR_ERROR",
3313         "BR_OK",
3314         "BR_TRANSACTION",
3315         "BR_REPLY",
3316         "BR_ACQUIRE_RESULT",
3317         "BR_DEAD_REPLY",
3318         "BR_TRANSACTION_COMPLETE",
3319         "BR_INCREFS",
3320         "BR_ACQUIRE",
3321         "BR_RELEASE",
3322         "BR_DECREFS",
3323         "BR_ATTEMPT_ACQUIRE",
3324         "BR_NOOP",
3325         "BR_SPAWN_LOOPER",
3326         "BR_FINISHED",
3327         "BR_DEAD_BINDER",
3328         "BR_CLEAR_DEATH_NOTIFICATION_DONE",
3329         "BR_FAILED_REPLY"
3330 };
3331
3332 static const char *binder_command_strings[] = {
3333         "BC_TRANSACTION",
3334         "BC_REPLY",
3335         "BC_ACQUIRE_RESULT",
3336         "BC_FREE_BUFFER",
3337         "BC_INCREFS",
3338         "BC_ACQUIRE",
3339         "BC_RELEASE",
3340         "BC_DECREFS",
3341         "BC_INCREFS_DONE",
3342         "BC_ACQUIRE_DONE",
3343         "BC_ATTEMPT_ACQUIRE",
3344         "BC_REGISTER_LOOPER",
3345         "BC_ENTER_LOOPER",
3346         "BC_EXIT_LOOPER",
3347         "BC_REQUEST_DEATH_NOTIFICATION",
3348         "BC_CLEAR_DEATH_NOTIFICATION",
3349         "BC_DEAD_BINDER_DONE"
3350 };
3351
3352 static const char *binder_objstat_strings[] = {
3353         "proc",
3354         "thread",
3355         "node",
3356         "ref",
3357         "death",
3358         "transaction",
3359         "transaction_complete"
3360 };
3361
3362 static void print_binder_stats(struct seq_file *m, const char *prefix,
3363                                struct binder_stats *stats)
3364 {
3365         int i;
3366
3367         BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
3368                      ARRAY_SIZE(binder_command_strings));
3369         for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
3370                 if (stats->bc[i])
3371                         seq_printf(m, "%s%s: %d\n", prefix,
3372                                    binder_command_strings[i], stats->bc[i]);
3373         }
3374
3375         BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
3376                      ARRAY_SIZE(binder_return_strings));
3377         for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
3378                 if (stats->br[i])
3379                         seq_printf(m, "%s%s: %d\n", prefix,
3380                                    binder_return_strings[i], stats->br[i]);
3381         }
3382
3383         BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
3384                      ARRAY_SIZE(binder_objstat_strings));
3385         BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
3386                      ARRAY_SIZE(stats->obj_deleted));
3387         for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
3388                 if (stats->obj_created[i] || stats->obj_deleted[i])
3389                         seq_printf(m, "%s%s: active %d total %d\n", prefix,
3390                                 binder_objstat_strings[i],
3391                                 stats->obj_created[i] - stats->obj_deleted[i],
3392                                 stats->obj_created[i]);
3393         }
3394 }
3395
3396 static void print_binder_proc_stats(struct seq_file *m,
3397                                     struct binder_proc *proc)
3398 {
3399         struct binder_work *w;
3400         struct rb_node *n;
3401         int count, strong, weak;
3402
3403         seq_printf(m, "proc %d\n", proc->pid);
3404         count = 0;
3405         for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
3406                 count++;
3407         seq_printf(m, "  threads: %d\n", count);
3408         seq_printf(m, "  requested threads: %d+%d/%d\n"
3409                         "  ready threads %d\n"
3410                         "  free async space %zd\n", proc->requested_threads,
3411                         proc->requested_threads_started, proc->max_threads,
3412                         proc->ready_threads, proc->free_async_space);
3413         count = 0;
3414         for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
3415                 count++;
3416         seq_printf(m, "  nodes: %d\n", count);
3417         count = 0;
3418         strong = 0;
3419         weak = 0;
3420         for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
3421                 struct binder_ref *ref = rb_entry(n, struct binder_ref,
3422                                                   rb_node_desc);
3423                 count++;
3424                 strong += ref->strong;
3425                 weak += ref->weak;
3426         }
3427         seq_printf(m, "  refs: %d s %d w %d\n", count, strong, weak);
3428
3429         count = 0;
3430         for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
3431                 count++;
3432         seq_printf(m, "  buffers: %d\n", count);
3433
3434         count = 0;
3435         list_for_each_entry(w, &proc->todo, entry) {
3436                 switch (w->type) {
3437                 case BINDER_WORK_TRANSACTION:
3438                         count++;
3439                         break;
3440                 default:
3441                         break;
3442                 }
3443         }
3444         seq_printf(m, "  pending transactions: %d\n", count);
3445
3446         print_binder_stats(m, "  ", &proc->stats);
3447 }
3448
3449
3450 static int binder_state_show(struct seq_file *m, void *unused)
3451 {
3452         struct binder_proc *proc;
3453         struct hlist_node *pos;
3454         struct binder_node *node;
3455         int do_lock = !binder_debug_no_lock;
3456
3457         if (do_lock)
3458                 mutex_lock(&binder_lock);
3459
3460         seq_puts(m, "binder state:\n");
3461
3462         if (!hlist_empty(&binder_dead_nodes))
3463                 seq_puts(m, "dead nodes:\n");
3464         hlist_for_each_entry(node, pos, &binder_dead_nodes, dead_node)
3465                 print_binder_node(m, node);
3466
3467         hlist_for_each_entry(proc, pos, &binder_procs, proc_node)
3468                 print_binder_proc(m, proc, 1);
3469         if (do_lock)
3470                 mutex_unlock(&binder_lock);
3471         return 0;
3472 }
3473
3474 static int binder_stats_show(struct seq_file *m, void *unused)
3475 {
3476         struct binder_proc *proc;
3477         struct hlist_node *pos;
3478         int do_lock = !binder_debug_no_lock;
3479
3480         if (do_lock)
3481                 mutex_lock(&binder_lock);
3482
3483         seq_puts(m, "binder stats:\n");
3484
3485         print_binder_stats(m, "", &binder_stats);
3486
3487         hlist_for_each_entry(proc, pos, &binder_procs, proc_node)
3488                 print_binder_proc_stats(m, proc);
3489         if (do_lock)
3490                 mutex_unlock(&binder_lock);
3491         return 0;
3492 }
3493
3494 static int binder_transactions_show(struct seq_file *m, void *unused)
3495 {
3496         struct binder_proc *proc;
3497         struct hlist_node *pos;
3498         int do_lock = !binder_debug_no_lock;
3499
3500         if (do_lock)
3501                 mutex_lock(&binder_lock);
3502
3503         seq_puts(m, "binder transactions:\n");
3504         hlist_for_each_entry(proc, pos, &binder_procs, proc_node)
3505                 print_binder_proc(m, proc, 0);
3506         if (do_lock)
3507                 mutex_unlock(&binder_lock);
3508         return 0;
3509 }
3510
3511 static int binder_proc_show(struct seq_file *m, void *unused)
3512 {
3513         struct binder_proc *proc = m->private;
3514         int do_lock = !binder_debug_no_lock;
3515
3516         if (do_lock)
3517                 mutex_lock(&binder_lock);
3518         seq_puts(m, "binder proc state:\n");
3519         print_binder_proc(m, proc, 1);
3520         if (do_lock)
3521                 mutex_unlock(&binder_lock);
3522         return 0;
3523 }
3524
3525 static void print_binder_transaction_log_entry(struct seq_file *m,
3526                                         struct binder_transaction_log_entry *e)
3527 {
3528         seq_printf(m,
3529                    "%d: %s from %d:%d to %d:%d node %d handle %d size %d:%d\n",
3530                    e->debug_id, (e->call_type == 2) ? "reply" :
3531                    ((e->call_type == 1) ? "async" : "call "), e->from_proc,
3532                    e->from_thread, e->to_proc, e->to_thread, e->to_node,
3533                    e->target_handle, e->data_size, e->offsets_size);
3534 }
3535
3536 static int binder_transaction_log_show(struct seq_file *m, void *unused)
3537 {
3538         struct binder_transaction_log *log = m->private;
3539         int i;
3540
3541         if (log->full) {
3542                 for (i = log->next; i < ARRAY_SIZE(log->entry); i++)
3543                         print_binder_transaction_log_entry(m, &log->entry[i]);
3544         }
3545         for (i = 0; i < log->next; i++)
3546                 print_binder_transaction_log_entry(m, &log->entry[i]);
3547         return 0;
3548 }
3549
3550 static const struct file_operations binder_fops = {
3551         .owner = THIS_MODULE,
3552         .poll = binder_poll,
3553         .unlocked_ioctl = binder_ioctl,
3554         .mmap = binder_mmap,
3555         .open = binder_open,
3556         .flush = binder_flush,
3557         .release = binder_release,
3558 };
3559
3560 static struct miscdevice binder_miscdev = {
3561         .minor = MISC_DYNAMIC_MINOR,
3562         .name = "binder",
3563         .fops = &binder_fops
3564 };
3565
3566 BINDER_DEBUG_ENTRY(state);
3567 BINDER_DEBUG_ENTRY(stats);
3568 BINDER_DEBUG_ENTRY(transactions);
3569 BINDER_DEBUG_ENTRY(transaction_log);
3570
3571 static int __init binder_init(void)
3572 {
3573         int ret;
3574
3575         binder_deferred_workqueue = create_singlethread_workqueue("binder");
3576         if (!binder_deferred_workqueue)
3577                 return -ENOMEM;
3578
3579         binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
3580         if (binder_debugfs_dir_entry_root)
3581                 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
3582                                                  binder_debugfs_dir_entry_root);
3583         ret = misc_register(&binder_miscdev);
3584         if (binder_debugfs_dir_entry_root) {
3585                 binder_debugfs_state = debugfs_create_file(
3586                                     "state",
3587                                     S_IRUGO,
3588                                     binder_debugfs_dir_entry_root,
3589                                     NULL,
3590                                     &binder_state_fops);
3591                 binder_debugfs_stats = debugfs_create_file(
3592                                     "stats",
3593                                     S_IRUGO,
3594                                     binder_debugfs_dir_entry_root,
3595                                     NULL,
3596                                     &binder_stats_fops);
3597                 binder_debugfs_transactions = debugfs_create_file(
3598                                     "transactions",
3599                                     S_IRUGO,
3600                                     binder_debugfs_dir_entry_root,
3601                                     NULL,
3602                                     &binder_transactions_fops);
3603                 binder_debugfs_transaction_log = debugfs_create_file(
3604                                     "transaction_log",
3605                                     S_IRUGO,
3606                                     binder_debugfs_dir_entry_root,
3607                                     &binder_transaction_log,
3608                                     &binder_transaction_log_fops);
3609                 binder_debugfs_failed_transaction_log = debugfs_create_file(
3610                                     "failed_transaction_log",
3611                                     S_IRUGO,
3612                                     binder_debugfs_dir_entry_root,
3613                                     &binder_transaction_log_failed,
3614                                     &binder_transaction_log_fops);
3615         }
3616         return ret;
3617 }
3618
3619 static void __exit binder_exit(void)
3620 {
3621         if (binder_debugfs_dir_entry_root) {
3622                 debugfs_remove(binder_debugfs_dir_entry_proc);
3623                 debugfs_remove(binder_debugfs_state);
3624                 debugfs_remove(binder_debugfs_stats);
3625                 debugfs_remove(binder_debugfs_transactions);
3626                 debugfs_remove(binder_debugfs_transaction_log);
3627                 debugfs_remove(binder_debugfs_failed_transaction_log);
3628                 debugfs_remove(binder_debugfs_dir_entry_root);
3629         }
3630
3631         misc_deregister(&binder_miscdev);
3632         destroy_workqueue(binder_deferred_workqueue);
3633
3634         printk(KERN_INFO "binder: unloaded\n");
3635 }
3636
3637 module_init(binder_init);
3638 module_exit(binder_exit);
3639
3640 MODULE_LICENSE("GPL v2");