aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@suse.de>2009-10-05 19:29:29 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2009-12-11 15:23:21 -0500
commitb0a0ccfad85b3657fe999805df65f5cfe634ab8a (patch)
tree9d65ace48cbde0ad3432f9dac24d155bf651433b /drivers/staging
parent99fd99f618daecae638550275cb132ab1ffe464c (diff)
Staging: android: delete android drivers
These drivers are no longer being developed and the original authors seem to have abandonded them and hence, do not want them in the mainline kernel tree. So sad :( Cc: Brian Swetland <swetland@google.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/staging')
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/android/Kconfig96
-rw-r--r--drivers/staging/android/Makefile6
-rw-r--r--drivers/staging/android/TODO10
-rw-r--r--drivers/staging/android/binder.c3767
-rw-r--r--drivers/staging/android/binder.h330
-rw-r--r--drivers/staging/android/logger.c607
-rw-r--r--drivers/staging/android/logger.h48
-rw-r--r--drivers/staging/android/lowmemorykiller.c173
-rw-r--r--drivers/staging/android/ram_console.c410
-rw-r--r--drivers/staging/android/timed_gpio.c166
-rw-r--r--drivers/staging/android/timed_gpio.h33
-rw-r--r--drivers/staging/android/timed_output.c121
-rw-r--r--drivers/staging/android/timed_output.h37
15 files changed, 0 insertions, 5807 deletions
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 56b275073cee..5dccf6bfb0f9 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -83,8 +83,6 @@ source "drivers/staging/mimio/Kconfig"
83 83
84source "drivers/staging/frontier/Kconfig" 84source "drivers/staging/frontier/Kconfig"
85 85
86source "drivers/staging/android/Kconfig"
87
88source "drivers/staging/dream/Kconfig" 86source "drivers/staging/dream/Kconfig"
89 87
90source "drivers/staging/dst/Kconfig" 88source "drivers/staging/dst/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index edae7d681e6e..2f228f99be93 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -24,7 +24,6 @@ obj-$(CONFIG_RTL8192SU) += rtl8192su/
24obj-$(CONFIG_RTL8192E) += rtl8192e/ 24obj-$(CONFIG_RTL8192E) += rtl8192e/
25obj-$(CONFIG_INPUT_MIMIO) += mimio/ 25obj-$(CONFIG_INPUT_MIMIO) += mimio/
26obj-$(CONFIG_TRANZPORT) += frontier/ 26obj-$(CONFIG_TRANZPORT) += frontier/
27obj-$(CONFIG_ANDROID) += android/
28obj-$(CONFIG_DREAM) += dream/ 27obj-$(CONFIG_DREAM) += dream/
29obj-$(CONFIG_DST) += dst/ 28obj-$(CONFIG_DST) += dst/
30obj-$(CONFIG_POHMELFS) += pohmelfs/ 29obj-$(CONFIG_POHMELFS) += pohmelfs/
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
deleted file mode 100644
index eb675635ae60..000000000000
--- a/drivers/staging/android/Kconfig
+++ /dev/null
@@ -1,96 +0,0 @@
1menu "Android"
2
3config ANDROID
4 bool "Android Drivers"
5 depends on BROKEN
6 default N
7 ---help---
8 Enable support for various drivers needed on the Android platform
9
10if ANDROID
11
12config ANDROID_BINDER_IPC
13 bool "Android Binder IPC Driver"
14 default n
15
16config ANDROID_LOGGER
17 tristate "Android log driver"
18 default n
19
20config ANDROID_RAM_CONSOLE
21 bool "Android RAM buffer console"
22 default n
23
24config ANDROID_RAM_CONSOLE_ENABLE_VERBOSE
25 bool "Enable verbose console messages on Android RAM console"
26 default y
27 depends on ANDROID_RAM_CONSOLE
28
29menuconfig ANDROID_RAM_CONSOLE_ERROR_CORRECTION
30 bool "Android RAM Console Enable error correction"
31 default n
32 depends on ANDROID_RAM_CONSOLE
33 depends on !ANDROID_RAM_CONSOLE_EARLY_INIT
34 select REED_SOLOMON
35 select REED_SOLOMON_ENC8
36 select REED_SOLOMON_DEC8
37
38if ANDROID_RAM_CONSOLE_ERROR_CORRECTION
39
40config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_DATA_SIZE
41 int "Android RAM Console Data data size"
42 default 128
43 help
44 Must be a power of 2.
45
46config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_ECC_SIZE
47 int "Android RAM Console ECC size"
48 default 16
49
50config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE
51 int "Android RAM Console Symbol size"
52 default 8
53
54config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_POLYNOMIAL
55 hex "Android RAM Console Polynomial"
56 default 0x19 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 4)
57 default 0x29 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 5)
58 default 0x61 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 6)
59 default 0x89 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 7)
60 default 0x11d if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 8)
61
62endif # ANDROID_RAM_CONSOLE_ERROR_CORRECTION
63
64config ANDROID_RAM_CONSOLE_EARLY_INIT
65 bool "Start Android RAM console early"
66 default n
67 depends on ANDROID_RAM_CONSOLE
68
69config ANDROID_RAM_CONSOLE_EARLY_ADDR
70 hex "Android RAM console virtual address"
71 default 0
72 depends on ANDROID_RAM_CONSOLE_EARLY_INIT
73
74config ANDROID_RAM_CONSOLE_EARLY_SIZE
75 hex "Android RAM console buffer size"
76 default 0
77 depends on ANDROID_RAM_CONSOLE_EARLY_INIT
78
79config ANDROID_TIMED_OUTPUT
80 bool "Timed output class driver"
81 default y
82
83config ANDROID_TIMED_GPIO
84 tristate "Android timed gpio driver"
85 depends on GENERIC_GPIO && ANDROID_TIMED_OUTPUT
86 default n
87
88config ANDROID_LOW_MEMORY_KILLER
89 bool "Android Low Memory Killer"
90 default N
91 ---help---
92 Register processes to be killed when memory is low
93
94endif # if ANDROID
95
96endmenu
diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile
deleted file mode 100644
index 8e057e626d11..000000000000
--- a/drivers/staging/android/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
1obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o
2obj-$(CONFIG_ANDROID_LOGGER) += logger.o
3obj-$(CONFIG_ANDROID_RAM_CONSOLE) += ram_console.o
4obj-$(CONFIG_ANDROID_TIMED_OUTPUT) += timed_output.o
5obj-$(CONFIG_ANDROID_TIMED_GPIO) += timed_gpio.o
6obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o
diff --git a/drivers/staging/android/TODO b/drivers/staging/android/TODO
deleted file mode 100644
index e59c5be4be2b..000000000000
--- a/drivers/staging/android/TODO
+++ /dev/null
@@ -1,10 +0,0 @@
1TODO:
2 - checkpatch.pl cleanups
3 - sparse fixes
4 - rename files to be not so "generic"
5 - make sure things build as modules properly
6 - add proper arch dependancies as needed
7 - audit userspace interfaces to make sure they are sane
8
9Please send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc:
10Brian Swetland <swetland@google.com>
diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
deleted file mode 100644
index 99010d4b3044..000000000000
--- a/drivers/staging/android/binder.c
+++ /dev/null
@@ -1,3767 +0,0 @@
1/* binder.c
2 *
3 * Android IPC Subsystem
4 *
5 * Copyright (C) 2007-2008 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <asm/cacheflush.h>
19#include <linux/fdtable.h>
20#include <linux/file.h>
21#include <linux/fs.h>
22#include <linux/list.h>
23#include <linux/miscdevice.h>
24#include <linux/mm.h>
25#include <linux/module.h>
26#include <linux/mutex.h>
27#include <linux/nsproxy.h>
28#include <linux/poll.h>
29#include <linux/proc_fs.h>
30#include <linux/rbtree.h>
31#include <linux/sched.h>
32#include <linux/uaccess.h>
33#include <linux/vmalloc.h>
34
35#include "binder.h"
36
37static DEFINE_MUTEX(binder_lock);
38static DEFINE_MUTEX(binder_deferred_lock);
39
40static HLIST_HEAD(binder_procs);
41static HLIST_HEAD(binder_deferred_list);
42static HLIST_HEAD(binder_dead_nodes);
43
44static struct proc_dir_entry *binder_proc_dir_entry_root;
45static struct proc_dir_entry *binder_proc_dir_entry_proc;
46static struct binder_node *binder_context_mgr_node;
47static uid_t binder_context_mgr_uid = -1;
48static int binder_last_id;
49
50static int binder_read_proc_proc(char *page, char **start, off_t off,
51 int count, int *eof, void *data);
52
53/* This is only defined in include/asm-arm/sizes.h */
54#ifndef SZ_1K
55#define SZ_1K 0x400
56#endif
57
58#ifndef SZ_4M
59#define SZ_4M 0x400000
60#endif
61
62#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
63
64#define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
65
66enum {
67 BINDER_DEBUG_USER_ERROR = 1U << 0,
68 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
69 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
70 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
71 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
72 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
73 BINDER_DEBUG_READ_WRITE = 1U << 6,
74 BINDER_DEBUG_USER_REFS = 1U << 7,
75 BINDER_DEBUG_THREADS = 1U << 8,
76 BINDER_DEBUG_TRANSACTION = 1U << 9,
77 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
78 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
79 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
80 BINDER_DEBUG_BUFFER_ALLOC = 1U << 13,
81 BINDER_DEBUG_PRIORITY_CAP = 1U << 14,
82 BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 15,
83};
84static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
85 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
86module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
87
88static int binder_debug_no_lock;
89module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO);
90
91static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
92static int binder_stop_on_user_error;
93
94static int binder_set_stop_on_user_error(const char *val,
95 struct kernel_param *kp)
96{
97 int ret;
98 ret = param_set_int(val, kp);
99 if (binder_stop_on_user_error < 2)
100 wake_up(&binder_user_error_wait);
101 return ret;
102}
103module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
104 param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
105
106#define binder_debug(mask, x...) \
107 do { \
108 if (binder_debug_mask & mask) \
109 printk(KERN_INFO x); \
110 } while (0)
111
112#define binder_user_error(x...) \
113 do { \
114 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
115 printk(KERN_INFO x); \
116 if (binder_stop_on_user_error) \
117 binder_stop_on_user_error = 2; \
118 } while (0)
119
120enum binder_stat_types {
121 BINDER_STAT_PROC,
122 BINDER_STAT_THREAD,
123 BINDER_STAT_NODE,
124 BINDER_STAT_REF,
125 BINDER_STAT_DEATH,
126 BINDER_STAT_TRANSACTION,
127 BINDER_STAT_TRANSACTION_COMPLETE,
128 BINDER_STAT_COUNT
129};
130
131struct binder_stats {
132 int br[_IOC_NR(BR_FAILED_REPLY) + 1];
133 int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1];
134 int obj_created[BINDER_STAT_COUNT];
135 int obj_deleted[BINDER_STAT_COUNT];
136};
137
138static struct binder_stats binder_stats;
139
140static inline void binder_stats_deleted(enum binder_stat_types type)
141{
142 binder_stats.obj_deleted[type]++;
143}
144
145static inline void binder_stats_created(enum binder_stat_types type)
146{
147 binder_stats.obj_created[type]++;
148}
149
150struct binder_transaction_log_entry {
151 int debug_id;
152 int call_type;
153 int from_proc;
154 int from_thread;
155 int target_handle;
156 int to_proc;
157 int to_thread;
158 int to_node;
159 int data_size;
160 int offsets_size;
161};
162struct binder_transaction_log {
163 int next;
164 int full;
165 struct binder_transaction_log_entry entry[32];
166};
167static struct binder_transaction_log binder_transaction_log;
168static struct binder_transaction_log binder_transaction_log_failed;
169
170static struct binder_transaction_log_entry *binder_transaction_log_add(
171 struct binder_transaction_log *log)
172{
173 struct binder_transaction_log_entry *e;
174 e = &log->entry[log->next];
175 memset(e, 0, sizeof(*e));
176 log->next++;
177 if (log->next == ARRAY_SIZE(log->entry)) {
178 log->next = 0;
179 log->full = 1;
180 }
181 return e;
182}
183
184struct binder_work {
185 struct list_head entry;
186 enum {
187 BINDER_WORK_TRANSACTION = 1,
188 BINDER_WORK_TRANSACTION_COMPLETE,
189 BINDER_WORK_NODE,
190 BINDER_WORK_DEAD_BINDER,
191 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
192 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
193 } type;
194};
195
196struct binder_node {
197 int debug_id;
198 struct binder_work work;
199 union {
200 struct rb_node rb_node;
201 struct hlist_node dead_node;
202 };
203 struct binder_proc *proc;
204 struct hlist_head refs;
205 int internal_strong_refs;
206 int local_weak_refs;
207 int local_strong_refs;
208 void __user *ptr;
209 void __user *cookie;
210 unsigned has_strong_ref:1;
211 unsigned pending_strong_ref:1;
212 unsigned has_weak_ref:1;
213 unsigned pending_weak_ref:1;
214 unsigned has_async_transaction:1;
215 unsigned accept_fds:1;
216 unsigned min_priority:8;
217 struct list_head async_todo;
218};
219
220struct binder_ref_death {
221 struct binder_work work;
222 void __user *cookie;
223};
224
225struct binder_ref {
226 /* Lookups needed: */
227 /* node + proc => ref (transaction) */
228 /* desc + proc => ref (transaction, inc/dec ref) */
229 /* node => refs + procs (proc exit) */
230 int debug_id;
231 struct rb_node rb_node_desc;
232 struct rb_node rb_node_node;
233 struct hlist_node node_entry;
234 struct binder_proc *proc;
235 struct binder_node *node;
236 uint32_t desc;
237 int strong;
238 int weak;
239 struct binder_ref_death *death;
240};
241
242struct binder_buffer {
243 struct list_head entry; /* free and allocated entries by addesss */
244 struct rb_node rb_node; /* free entry by size or allocated entry */
245 /* by address */
246 unsigned free:1;
247 unsigned allow_user_free:1;
248 unsigned async_transaction:1;
249 unsigned debug_id:29;
250
251 struct binder_transaction *transaction;
252
253 struct binder_node *target_node;
254 size_t data_size;
255 size_t offsets_size;
256 uint8_t data[0];
257};
258
259enum binder_deferred_state {
260 BINDER_DEFERRED_PUT_FILES = 0x01,
261 BINDER_DEFERRED_FLUSH = 0x02,
262 BINDER_DEFERRED_RELEASE = 0x04,
263};
264
265struct binder_proc {
266 struct hlist_node proc_node;
267 struct rb_root threads;
268 struct rb_root nodes;
269 struct rb_root refs_by_desc;
270 struct rb_root refs_by_node;
271 int pid;
272 struct vm_area_struct *vma;
273 struct task_struct *tsk;
274 struct files_struct *files;
275 struct hlist_node deferred_work_node;
276 int deferred_work;
277 void *buffer;
278 ptrdiff_t user_buffer_offset;
279
280 struct list_head buffers;
281 struct rb_root free_buffers;
282 struct rb_root allocated_buffers;
283 size_t free_async_space;
284
285 struct page **pages;
286 size_t buffer_size;
287 uint32_t buffer_free;
288 struct list_head todo;
289 wait_queue_head_t wait;
290 struct binder_stats stats;
291 struct list_head delivered_death;
292 int max_threads;
293 int requested_threads;
294 int requested_threads_started;
295 int ready_threads;
296 long default_priority;
297};
298
299enum {
300 BINDER_LOOPER_STATE_REGISTERED = 0x01,
301 BINDER_LOOPER_STATE_ENTERED = 0x02,
302 BINDER_LOOPER_STATE_EXITED = 0x04,
303 BINDER_LOOPER_STATE_INVALID = 0x08,
304 BINDER_LOOPER_STATE_WAITING = 0x10,
305 BINDER_LOOPER_STATE_NEED_RETURN = 0x20
306};
307
308struct binder_thread {
309 struct binder_proc *proc;
310 struct rb_node rb_node;
311 int pid;
312 int looper;
313 struct binder_transaction *transaction_stack;
314 struct list_head todo;
315 uint32_t return_error; /* Write failed, return error code in read buf */
316 uint32_t return_error2; /* Write failed, return error code in read */
317 /* buffer. Used when sending a reply to a dead process that */
318 /* we are also waiting on */
319 wait_queue_head_t wait;
320 struct binder_stats stats;
321};
322
323struct binder_transaction {
324 int debug_id;
325 struct binder_work work;
326 struct binder_thread *from;
327 struct binder_transaction *from_parent;
328 struct binder_proc *to_proc;
329 struct binder_thread *to_thread;
330 struct binder_transaction *to_parent;
331 unsigned need_reply:1;
332 /* unsigned is_dead:1; */ /* not used at the moment */
333
334 struct binder_buffer *buffer;
335 unsigned int code;
336 unsigned int flags;
337 long priority;
338 long saved_priority;
339 uid_t sender_euid;
340};
341
342static void
343binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
344
345/*
346 * copied from get_unused_fd_flags
347 */
348int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
349{
350 struct files_struct *files = proc->files;
351 int fd, error;
352 struct fdtable *fdt;
353 unsigned long rlim_cur;
354 unsigned long irqs;
355
356 if (files == NULL)
357 return -ESRCH;
358
359 error = -EMFILE;
360 spin_lock(&files->file_lock);
361
362repeat:
363 fdt = files_fdtable(files);
364 fd = find_next_zero_bit(fdt->open_fds->fds_bits, fdt->max_fds,
365 files->next_fd);
366
367 /*
368 * N.B. For clone tasks sharing a files structure, this test
369 * will limit the total number of files that can be opened.
370 */
371 rlim_cur = 0;
372 if (lock_task_sighand(proc->tsk, &irqs)) {
373 rlim_cur = proc->tsk->signal->rlim[RLIMIT_NOFILE].rlim_cur;
374 unlock_task_sighand(proc->tsk, &irqs);
375 }
376 if (fd >= rlim_cur)
377 goto out;
378
379 /* Do we need to expand the fd array or fd set? */
380 error = expand_files(files, fd);
381 if (error < 0)
382 goto out;
383
384 if (error) {
385 /*
386 * If we needed to expand the fs array we
387 * might have blocked - try again.
388 */
389 error = -EMFILE;
390 goto repeat;
391 }
392
393 FD_SET(fd, fdt->open_fds);
394 if (flags & O_CLOEXEC)
395 FD_SET(fd, fdt->close_on_exec);
396 else
397 FD_CLR(fd, fdt->close_on_exec);
398 files->next_fd = fd + 1;
399#if 1
400 /* Sanity check */
401 if (fdt->fd[fd] != NULL) {
402 printk(KERN_WARNING "get_unused_fd: slot %d not NULL!\n", fd);
403 fdt->fd[fd] = NULL;
404 }
405#endif
406 error = fd;
407
408out:
409 spin_unlock(&files->file_lock);
410 return error;
411}
412
413/*
414 * copied from fd_install
415 */
416static void task_fd_install(
417 struct binder_proc *proc, unsigned int fd, struct file *file)
418{
419 struct files_struct *files = proc->files;
420 struct fdtable *fdt;
421
422 if (files == NULL)
423 return;
424
425 spin_lock(&files->file_lock);
426 fdt = files_fdtable(files);
427 BUG_ON(fdt->fd[fd] != NULL);
428 rcu_assign_pointer(fdt->fd[fd], file);
429 spin_unlock(&files->file_lock);
430}
431
432/*
433 * copied from __put_unused_fd in open.c
434 */
435static void __put_unused_fd(struct files_struct *files, unsigned int fd)
436{
437 struct fdtable *fdt = files_fdtable(files);
438 __FD_CLR(fd, fdt->open_fds);
439 if (fd < files->next_fd)
440 files->next_fd = fd;
441}
442
443/*
444 * copied from sys_close
445 */
446static long task_close_fd(struct binder_proc *proc, unsigned int fd)
447{
448 struct file *filp;
449 struct files_struct *files = proc->files;
450 struct fdtable *fdt;
451 int retval;
452
453 if (files == NULL)
454 return -ESRCH;
455
456 spin_lock(&files->file_lock);
457 fdt = files_fdtable(files);
458 if (fd >= fdt->max_fds)
459 goto out_unlock;
460 filp = fdt->fd[fd];
461 if (!filp)
462 goto out_unlock;
463 rcu_assign_pointer(fdt->fd[fd], NULL);
464 FD_CLR(fd, fdt->close_on_exec);
465 __put_unused_fd(files, fd);
466 spin_unlock(&files->file_lock);
467 retval = filp_close(filp, files);
468
469 /* can't restart close syscall because file table entry was cleared */
470 if (unlikely(retval == -ERESTARTSYS ||
471 retval == -ERESTARTNOINTR ||
472 retval == -ERESTARTNOHAND ||
473 retval == -ERESTART_RESTARTBLOCK))
474 retval = -EINTR;
475
476 return retval;
477
478out_unlock:
479 spin_unlock(&files->file_lock);
480 return -EBADF;
481}
482
483static void binder_set_nice(long nice)
484{
485 long min_nice;
486 if (can_nice(current, nice)) {
487 set_user_nice(current, nice);
488 return;
489 }
490 min_nice = 20 - current->signal->rlim[RLIMIT_NICE].rlim_cur;
491 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
492 "binder: %d: nice value %ld not allowed use "
493 "%ld instead\n", current->pid, nice, min_nice);
494 set_user_nice(current, min_nice);
495 if (min_nice < 20)
496 return;
497 binder_user_error("binder: %d RLIMIT_NICE not set\n", current->pid);
498}
499
500static size_t binder_buffer_size(struct binder_proc *proc,
501 struct binder_buffer *buffer)
502{
503 if (list_is_last(&buffer->entry, &proc->buffers))
504 return proc->buffer + proc->buffer_size - (void *)buffer->data;
505 else
506 return (size_t)list_entry(buffer->entry.next,
507 struct binder_buffer, entry) - (size_t)buffer->data;
508}
509
510static void binder_insert_free_buffer(struct binder_proc *proc,
511 struct binder_buffer *new_buffer)
512{
513 struct rb_node **p = &proc->free_buffers.rb_node;
514 struct rb_node *parent = NULL;
515 struct binder_buffer *buffer;
516 size_t buffer_size;
517 size_t new_buffer_size;
518
519 BUG_ON(!new_buffer->free);
520
521 new_buffer_size = binder_buffer_size(proc, new_buffer);
522
523 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
524 "binder: %d: add free buffer, size %zd, "
525 "at %p\n", proc->pid, new_buffer_size, new_buffer);
526
527 while (*p) {
528 parent = *p;
529 buffer = rb_entry(parent, struct binder_buffer, rb_node);
530 BUG_ON(!buffer->free);
531
532 buffer_size = binder_buffer_size(proc, buffer);
533
534 if (new_buffer_size < buffer_size)
535 p = &parent->rb_left;
536 else
537 p = &parent->rb_right;
538 }
539 rb_link_node(&new_buffer->rb_node, parent, p);
540 rb_insert_color(&new_buffer->rb_node, &proc->free_buffers);
541}
542
543static void binder_insert_allocated_buffer(struct binder_proc *proc,
544 struct binder_buffer *new_buffer)
545{
546 struct rb_node **p = &proc->allocated_buffers.rb_node;
547 struct rb_node *parent = NULL;
548 struct binder_buffer *buffer;
549
550 BUG_ON(new_buffer->free);
551
552 while (*p) {
553 parent = *p;
554 buffer = rb_entry(parent, struct binder_buffer, rb_node);
555 BUG_ON(buffer->free);
556
557 if (new_buffer < buffer)
558 p = &parent->rb_left;
559 else if (new_buffer > buffer)
560 p = &parent->rb_right;
561 else
562 BUG();
563 }
564 rb_link_node(&new_buffer->rb_node, parent, p);
565 rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers);
566}
567
568static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc,
569 void __user *user_ptr)
570{
571 struct rb_node *n = proc->allocated_buffers.rb_node;
572 struct binder_buffer *buffer;
573 struct binder_buffer *kern_ptr;
574
575 kern_ptr = user_ptr - proc->user_buffer_offset
576 - offsetof(struct binder_buffer, data);
577
578 while (n) {
579 buffer = rb_entry(n, struct binder_buffer, rb_node);
580 BUG_ON(buffer->free);
581
582 if (kern_ptr < buffer)
583 n = n->rb_left;
584 else if (kern_ptr > buffer)
585 n = n->rb_right;
586 else
587 return buffer;
588 }
589 return NULL;
590}
591
592static int binder_update_page_range(struct binder_proc *proc, int allocate,
593 void *start, void *end,
594 struct vm_area_struct *vma)
595{
596 void *page_addr;
597 unsigned long user_page_addr;
598 struct vm_struct tmp_area;
599 struct page **page;
600 struct mm_struct *mm;
601
602 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
603 "binder: %d: %s pages %p-%p\n", proc->pid,
604 allocate ? "allocate" : "free", start, end);
605
606 if (end <= start)
607 return 0;
608
609 if (vma)
610 mm = NULL;
611 else
612 mm = get_task_mm(proc->tsk);
613
614 if (mm) {
615 down_write(&mm->mmap_sem);
616 vma = proc->vma;
617 }
618
619 if (allocate == 0)
620 goto free_range;
621
622 if (vma == NULL) {
623 printk(KERN_ERR "binder: %d: binder_alloc_buf failed to "
624 "map pages in userspace, no vma\n", proc->pid);
625 goto err_no_vma;
626 }
627
628 for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
629 int ret;
630 struct page **page_array_ptr;
631 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
632
633 BUG_ON(*page);
634 *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
635 if (*page == NULL) {
636 printk(KERN_ERR "binder: %d: binder_alloc_buf failed "
637 "for page at %p\n", proc->pid, page_addr);
638 goto err_alloc_page_failed;
639 }
640 tmp_area.addr = page_addr;
641 tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */;
642 page_array_ptr = page;
643 ret = map_vm_area(&tmp_area, PAGE_KERNEL, &page_array_ptr);
644 if (ret) {
645 printk(KERN_ERR "binder: %d: binder_alloc_buf failed "
646 "to map page at %p in kernel\n",
647 proc->pid, page_addr);
648 goto err_map_kernel_failed;
649 }
650 user_page_addr =
651 (uintptr_t)page_addr + proc->user_buffer_offset;
652 ret = vm_insert_page(vma, user_page_addr, page[0]);
653 if (ret) {
654 printk(KERN_ERR "binder: %d: binder_alloc_buf failed "
655 "to map page at %lx in userspace\n",
656 proc->pid, user_page_addr);
657 goto err_vm_insert_page_failed;
658 }
659 /* vm_insert_page does not seem to increment the refcount */
660 }
661 if (mm) {
662 up_write(&mm->mmap_sem);
663 mmput(mm);
664 }
665 return 0;
666
667free_range:
668 for (page_addr = end - PAGE_SIZE; page_addr >= start;
669 page_addr -= PAGE_SIZE) {
670 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
671 if (vma)
672 zap_page_range(vma, (uintptr_t)page_addr +
673 proc->user_buffer_offset, PAGE_SIZE, NULL);
674err_vm_insert_page_failed:
675 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
676err_map_kernel_failed:
677 __free_page(*page);
678 *page = NULL;
679err_alloc_page_failed:
680 ;
681 }
682err_no_vma:
683 if (mm) {
684 up_write(&mm->mmap_sem);
685 mmput(mm);
686 }
687 return -ENOMEM;
688}
689
690static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
691 size_t data_size,
692 size_t offsets_size, int is_async)
693{
694 struct rb_node *n = proc->free_buffers.rb_node;
695 struct binder_buffer *buffer;
696 size_t buffer_size;
697 struct rb_node *best_fit = NULL;
698 void *has_page_addr;
699 void *end_page_addr;
700 size_t size;
701
702 if (proc->vma == NULL) {
703 printk(KERN_ERR "binder: %d: binder_alloc_buf, no vma\n",
704 proc->pid);
705 return NULL;
706 }
707
708 size = ALIGN(data_size, sizeof(void *)) +
709 ALIGN(offsets_size, sizeof(void *));
710
711 if (size < data_size || size < offsets_size) {
712 binder_user_error("binder: %d: got transaction with invalid "
713 "size %zd-%zd\n", proc->pid, data_size, offsets_size);
714 return NULL;
715 }
716
717 if (is_async &&
718 proc->free_async_space < size + sizeof(struct binder_buffer)) {
719 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
720 "binder: %d: binder_alloc_buf size %zd"
721 "failed, no async space left\n", proc->pid, size);
722 return NULL;
723 }
724
725 while (n) {
726 buffer = rb_entry(n, struct binder_buffer, rb_node);
727 BUG_ON(!buffer->free);
728 buffer_size = binder_buffer_size(proc, buffer);
729
730 if (size < buffer_size) {
731 best_fit = n;
732 n = n->rb_left;
733 } else if (size > buffer_size)
734 n = n->rb_right;
735 else {
736 best_fit = n;
737 break;
738 }
739 }
740 if (best_fit == NULL) {
741 printk(KERN_ERR "binder: %d: binder_alloc_buf size %zd failed, "
742 "no address space\n", proc->pid, size);
743 return NULL;
744 }
745 if (n == NULL) {
746 buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
747 buffer_size = binder_buffer_size(proc, buffer);
748 }
749
750 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
751 "binder: %d: binder_alloc_buf size %zd got buff"
752 "er %p size %zd\n", proc->pid, size, buffer, buffer_size);
753
754 has_page_addr =
755 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
756 if (n == NULL) {
757 if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
758 buffer_size = size; /* no room for other buffers */
759 else
760 buffer_size = size + sizeof(struct binder_buffer);
761 }
762 end_page_addr =
763 (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
764 if (end_page_addr > has_page_addr)
765 end_page_addr = has_page_addr;
766 if (binder_update_page_range(proc, 1,
767 (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL))
768 return NULL;
769
770 rb_erase(best_fit, &proc->free_buffers);
771 buffer->free = 0;
772 binder_insert_allocated_buffer(proc, buffer);
773 if (buffer_size != size) {
774 struct binder_buffer *new_buffer = (void *)buffer->data + size;
775 list_add(&new_buffer->entry, &buffer->entry);
776 new_buffer->free = 1;
777 binder_insert_free_buffer(proc, new_buffer);
778 }
779 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
780 "binder: %d: binder_alloc_buf size %zd got "
781 "%p\n", proc->pid, size, buffer);
782 buffer->data_size = data_size;
783 buffer->offsets_size = offsets_size;
784 buffer->async_transaction = is_async;
785 if (is_async) {
786 proc->free_async_space -= size + sizeof(struct binder_buffer);
787 binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
788 "binder: %d: binder_alloc_buf size %zd "
789 "async free %zd\n", proc->pid, size,
790 proc->free_async_space);
791 }
792
793 return buffer;
794}
795
796static void *buffer_start_page(struct binder_buffer *buffer)
797{
798 return (void *)((uintptr_t)buffer & PAGE_MASK);
799}
800
801static void *buffer_end_page(struct binder_buffer *buffer)
802{
803 return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
804}
805
806static void binder_delete_free_buffer(struct binder_proc *proc,
807 struct binder_buffer *buffer)
808{
809 struct binder_buffer *prev, *next = NULL;
810 int free_page_end = 1;
811 int free_page_start = 1;
812
813 BUG_ON(proc->buffers.next == &buffer->entry);
814 prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
815 BUG_ON(!prev->free);
816 if (buffer_end_page(prev) == buffer_start_page(buffer)) {
817 free_page_start = 0;
818 if (buffer_end_page(prev) == buffer_end_page(buffer))
819 free_page_end = 0;
820 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
821 "binder: %d: merge free, buffer %p "
822 "share page with %p\n", proc->pid, buffer, prev);
823 }
824
825 if (!list_is_last(&buffer->entry, &proc->buffers)) {
826 next = list_entry(buffer->entry.next,
827 struct binder_buffer, entry);
828 if (buffer_start_page(next) == buffer_end_page(buffer)) {
829 free_page_end = 0;
830 if (buffer_start_page(next) ==
831 buffer_start_page(buffer))
832 free_page_start = 0;
833 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
834 "binder: %d: merge free, buffer"
835 " %p share page with %p\n", proc->pid,
836 buffer, prev);
837 }
838 }
839 list_del(&buffer->entry);
840 if (free_page_start || free_page_end) {
841 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
842 "binder: %d: merge free, buffer %p do "
843 "not share page%s%s with with %p or %p\n",
844 proc->pid, buffer, free_page_start ? "" : " end",
845 free_page_end ? "" : " start", prev, next);
846 binder_update_page_range(proc, 0, free_page_start ?
847 buffer_start_page(buffer) : buffer_end_page(buffer),
848 (free_page_end ? buffer_end_page(buffer) :
849 buffer_start_page(buffer)) + PAGE_SIZE, NULL);
850 }
851}
852
853static void binder_free_buf(struct binder_proc *proc,
854 struct binder_buffer *buffer)
855{
856 size_t size, buffer_size;
857
858 buffer_size = binder_buffer_size(proc, buffer);
859
860 size = ALIGN(buffer->data_size, sizeof(void *)) +
861 ALIGN(buffer->offsets_size, sizeof(void *));
862
863 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
864 "binder: %d: binder_free_buf %p size %zd buffer"
865 "_size %zd\n", proc->pid, buffer, size, buffer_size);
866
867 BUG_ON(buffer->free);
868 BUG_ON(size > buffer_size);
869 BUG_ON(buffer->transaction != NULL);
870 BUG_ON((void *)buffer < proc->buffer);
871 BUG_ON((void *)buffer > proc->buffer + proc->buffer_size);
872
873 if (buffer->async_transaction) {
874 proc->free_async_space += size + sizeof(struct binder_buffer);
875
876 binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
877 "binder: %d: binder_free_buf size %zd "
878 "async free %zd\n", proc->pid, size,
879 proc->free_async_space);
880 }
881
882 binder_update_page_range(proc, 0,
883 (void *)PAGE_ALIGN((uintptr_t)buffer->data),
884 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
885 NULL);
886 rb_erase(&buffer->rb_node, &proc->allocated_buffers);
887 buffer->free = 1;
888 if (!list_is_last(&buffer->entry, &proc->buffers)) {
889 struct binder_buffer *next = list_entry(buffer->entry.next,
890 struct binder_buffer, entry);
891 if (next->free) {
892 rb_erase(&next->rb_node, &proc->free_buffers);
893 binder_delete_free_buffer(proc, next);
894 }
895 }
896 if (proc->buffers.next != &buffer->entry) {
897 struct binder_buffer *prev = list_entry(buffer->entry.prev,
898 struct binder_buffer, entry);
899 if (prev->free) {
900 binder_delete_free_buffer(proc, buffer);
901 rb_erase(&prev->rb_node, &proc->free_buffers);
902 buffer = prev;
903 }
904 }
905 binder_insert_free_buffer(proc, buffer);
906}
907
908static struct binder_node *binder_get_node(struct binder_proc *proc,
909 void __user *ptr)
910{
911 struct rb_node *n = proc->nodes.rb_node;
912 struct binder_node *node;
913
914 while (n) {
915 node = rb_entry(n, struct binder_node, rb_node);
916
917 if (ptr < node->ptr)
918 n = n->rb_left;
919 else if (ptr > node->ptr)
920 n = n->rb_right;
921 else
922 return node;
923 }
924 return NULL;
925}
926
927static struct binder_node *binder_new_node(struct binder_proc *proc,
928 void __user *ptr,
929 void __user *cookie)
930{
931 struct rb_node **p = &proc->nodes.rb_node;
932 struct rb_node *parent = NULL;
933 struct binder_node *node;
934
935 while (*p) {
936 parent = *p;
937 node = rb_entry(parent, struct binder_node, rb_node);
938
939 if (ptr < node->ptr)
940 p = &(*p)->rb_left;
941 else if (ptr > node->ptr)
942 p = &(*p)->rb_right;
943 else
944 return NULL;
945 }
946
947 node = kzalloc(sizeof(*node), GFP_KERNEL);
948 if (node == NULL)
949 return NULL;
950 binder_stats_created(BINDER_STAT_NODE);
951 rb_link_node(&node->rb_node, parent, p);
952 rb_insert_color(&node->rb_node, &proc->nodes);
953 node->debug_id = ++binder_last_id;
954 node->proc = proc;
955 node->ptr = ptr;
956 node->cookie = cookie;
957 node->work.type = BINDER_WORK_NODE;
958 INIT_LIST_HEAD(&node->work.entry);
959 INIT_LIST_HEAD(&node->async_todo);
960 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
961 "binder: %d:%d node %d u%p c%p created\n",
962 proc->pid, current->pid, node->debug_id,
963 node->ptr, node->cookie);
964 return node;
965}
966
967static int binder_inc_node(struct binder_node *node, int strong, int internal,
968 struct list_head *target_list)
969{
970 if (strong) {
971 if (internal) {
972 if (target_list == NULL &&
973 node->internal_strong_refs == 0 &&
974 !(node == binder_context_mgr_node &&
975 node->has_strong_ref)) {
976 printk(KERN_ERR "binder: invalid inc strong "
977 "node for %d\n", node->debug_id);
978 return -EINVAL;
979 }
980 node->internal_strong_refs++;
981 } else
982 node->local_strong_refs++;
983 if (!node->has_strong_ref && target_list) {
984 list_del_init(&node->work.entry);
985 list_add_tail(&node->work.entry, target_list);
986 }
987 } else {
988 if (!internal)
989 node->local_weak_refs++;
990 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
991 if (target_list == NULL) {
992 printk(KERN_ERR "binder: invalid inc weak node "
993 "for %d\n", node->debug_id);
994 return -EINVAL;
995 }
996 list_add_tail(&node->work.entry, target_list);
997 }
998 }
999 return 0;
1000}
1001
1002static int binder_dec_node(struct binder_node *node, int strong, int internal)
1003{
1004 if (strong) {
1005 if (internal)
1006 node->internal_strong_refs--;
1007 else
1008 node->local_strong_refs--;
1009 if (node->local_strong_refs || node->internal_strong_refs)
1010 return 0;
1011 } else {
1012 if (!internal)
1013 node->local_weak_refs--;
1014 if (node->local_weak_refs || !hlist_empty(&node->refs))
1015 return 0;
1016 }
1017 if (node->proc && (node->has_strong_ref || node->has_weak_ref)) {
1018 if (list_empty(&node->work.entry)) {
1019 list_add_tail(&node->work.entry, &node->proc->todo);
1020 wake_up_interruptible(&node->proc->wait);
1021 }
1022 } else {
1023 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
1024 !node->local_weak_refs) {
1025 list_del_init(&node->work.entry);
1026 if (node->proc) {
1027 rb_erase(&node->rb_node, &node->proc->nodes);
1028 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1029 "binder: refless node %d deleted\n",
1030 node->debug_id);
1031 } else {
1032 hlist_del(&node->dead_node);
1033 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1034 "binder: dead node %d deleted\n",
1035 node->debug_id);
1036 }
1037 kfree(node);
1038 binder_stats_deleted(BINDER_STAT_NODE);
1039 }
1040 }
1041
1042 return 0;
1043}
1044
1045
1046static struct binder_ref *binder_get_ref(struct binder_proc *proc,
1047 uint32_t desc)
1048{
1049 struct rb_node *n = proc->refs_by_desc.rb_node;
1050 struct binder_ref *ref;
1051
1052 while (n) {
1053 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1054
1055 if (desc < ref->desc)
1056 n = n->rb_left;
1057 else if (desc > ref->desc)
1058 n = n->rb_right;
1059 else
1060 return ref;
1061 }
1062 return NULL;
1063}
1064
1065static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
1066 struct binder_node *node)
1067{
1068 struct rb_node *n;
1069 struct rb_node **p = &proc->refs_by_node.rb_node;
1070 struct rb_node *parent = NULL;
1071 struct binder_ref *ref, *new_ref;
1072
1073 while (*p) {
1074 parent = *p;
1075 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1076
1077 if (node < ref->node)
1078 p = &(*p)->rb_left;
1079 else if (node > ref->node)
1080 p = &(*p)->rb_right;
1081 else
1082 return ref;
1083 }
1084 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1085 if (new_ref == NULL)
1086 return NULL;
1087 binder_stats_created(BINDER_STAT_REF);
1088 new_ref->debug_id = ++binder_last_id;
1089 new_ref->proc = proc;
1090 new_ref->node = node;
1091 rb_link_node(&new_ref->rb_node_node, parent, p);
1092 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1093
1094 new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1;
1095 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1096 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1097 if (ref->desc > new_ref->desc)
1098 break;
1099 new_ref->desc = ref->desc + 1;
1100 }
1101
1102 p = &proc->refs_by_desc.rb_node;
1103 while (*p) {
1104 parent = *p;
1105 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1106
1107 if (new_ref->desc < ref->desc)
1108 p = &(*p)->rb_left;
1109 else if (new_ref->desc > ref->desc)
1110 p = &(*p)->rb_right;
1111 else
1112 BUG();
1113 }
1114 rb_link_node(&new_ref->rb_node_desc, parent, p);
1115 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1116 if (node) {
1117 hlist_add_head(&new_ref->node_entry, &node->refs);
1118
1119 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1120 "binder: %d new ref %d desc %d for "
1121 "node %d\n", proc->pid, new_ref->debug_id,
1122 new_ref->desc, node->debug_id);
1123 } else {
1124 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1125 "binder: %d new ref %d desc %d for "
1126 "dead node\n", proc->pid, new_ref->debug_id,
1127 new_ref->desc);
1128 }
1129 return new_ref;
1130}
1131
1132static void binder_delete_ref(struct binder_ref *ref)
1133{
1134 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1135 "binder: %d delete ref %d desc %d for "
1136 "node %d\n", ref->proc->pid, ref->debug_id,
1137 ref->desc, ref->node->debug_id);
1138
1139 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1140 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1141 if (ref->strong)
1142 binder_dec_node(ref->node, 1, 1);
1143 hlist_del(&ref->node_entry);
1144 binder_dec_node(ref->node, 0, 1);
1145 if (ref->death) {
1146 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1147 "binder: %d delete ref %d desc %d "
1148 "has death notification\n", ref->proc->pid,
1149 ref->debug_id, ref->desc);
1150 list_del(&ref->death->work.entry);
1151 kfree(ref->death);
1152 binder_stats_deleted(BINDER_STAT_DEATH);
1153 }
1154 kfree(ref);
1155 binder_stats_deleted(BINDER_STAT_REF);
1156}
1157
1158static int binder_inc_ref(struct binder_ref *ref, int strong,
1159 struct list_head *target_list)
1160{
1161 int ret;
1162 if (strong) {
1163 if (ref->strong == 0) {
1164 ret = binder_inc_node(ref->node, 1, 1, target_list);
1165 if (ret)
1166 return ret;
1167 }
1168 ref->strong++;
1169 } else {
1170 if (ref->weak == 0) {
1171 ret = binder_inc_node(ref->node, 0, 1, target_list);
1172 if (ret)
1173 return ret;
1174 }
1175 ref->weak++;
1176 }
1177 return 0;
1178}
1179
1180
1181static int binder_dec_ref(struct binder_ref *ref, int strong)
1182{
1183 if (strong) {
1184 if (ref->strong == 0) {
1185 binder_user_error("binder: %d invalid dec strong, "
1186 "ref %d desc %d s %d w %d\n",
1187 ref->proc->pid, ref->debug_id,
1188 ref->desc, ref->strong, ref->weak);
1189 return -EINVAL;
1190 }
1191 ref->strong--;
1192 if (ref->strong == 0) {
1193 int ret;
1194 ret = binder_dec_node(ref->node, strong, 1);
1195 if (ret)
1196 return ret;
1197 }
1198 } else {
1199 if (ref->weak == 0) {
1200 binder_user_error("binder: %d invalid dec weak, "
1201 "ref %d desc %d s %d w %d\n",
1202 ref->proc->pid, ref->debug_id,
1203 ref->desc, ref->strong, ref->weak);
1204 return -EINVAL;
1205 }
1206 ref->weak--;
1207 }
1208 if (ref->strong == 0 && ref->weak == 0)
1209 binder_delete_ref(ref);
1210 return 0;
1211}
1212
1213static void binder_pop_transaction(struct binder_thread *target_thread,
1214 struct binder_transaction *t)
1215{
1216 if (target_thread) {
1217 BUG_ON(target_thread->transaction_stack != t);
1218 BUG_ON(target_thread->transaction_stack->from != target_thread);
1219 target_thread->transaction_stack =
1220 target_thread->transaction_stack->from_parent;
1221 t->from = NULL;
1222 }
1223 t->need_reply = 0;
1224 if (t->buffer)
1225 t->buffer->transaction = NULL;
1226 kfree(t);
1227 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1228}
1229
1230static void binder_send_failed_reply(struct binder_transaction *t,
1231 uint32_t error_code)
1232{
1233 struct binder_thread *target_thread;
1234 BUG_ON(t->flags & TF_ONE_WAY);
1235 while (1) {
1236 target_thread = t->from;
1237 if (target_thread) {
1238 if (target_thread->return_error != BR_OK &&
1239 target_thread->return_error2 == BR_OK) {
1240 target_thread->return_error2 =
1241 target_thread->return_error;
1242 target_thread->return_error = BR_OK;
1243 }
1244 if (target_thread->return_error == BR_OK) {
1245 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1246 "binder: send failed reply for "
1247 "transaction %d to %d:%d\n",
1248 t->debug_id, target_thread->proc->pid,
1249 target_thread->pid);
1250
1251 binder_pop_transaction(target_thread, t);
1252 target_thread->return_error = error_code;
1253 wake_up_interruptible(&target_thread->wait);
1254 } else {
1255 printk(KERN_ERR "binder: reply failed, target "
1256 "thread, %d:%d, has error code %d "
1257 "already\n", target_thread->proc->pid,
1258 target_thread->pid,
1259 target_thread->return_error);
1260 }
1261 return;
1262 } else {
1263 struct binder_transaction *next = t->from_parent;
1264
1265 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1266 "binder: send failed reply "
1267 "for transaction %d, target dead\n",
1268 t->debug_id);
1269
1270 binder_pop_transaction(target_thread, t);
1271 if (next == NULL) {
1272 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1273 "binder: reply failed,"
1274 " no target thread at root\n");
1275 return;
1276 }
1277 t = next;
1278 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1279 "binder: reply failed, no target "
1280 "thread -- retry %d\n", t->debug_id);
1281 }
1282 }
1283}
1284
1285static void binder_transaction_buffer_release(struct binder_proc *proc,
1286 struct binder_buffer *buffer,
1287 size_t *failed_at)
1288{
1289 size_t *offp, *off_end;
1290 int debug_id = buffer->debug_id;
1291
1292 binder_debug(BINDER_DEBUG_TRANSACTION,
1293 "binder: %d buffer release %d, size %zd-%zd, failed at %p\n",
1294 proc->pid, buffer->debug_id,
1295 buffer->data_size, buffer->offsets_size, failed_at);
1296
1297 if (buffer->target_node)
1298 binder_dec_node(buffer->target_node, 1, 0);
1299
1300 offp = (size_t *)(buffer->data + ALIGN(buffer->data_size, sizeof(void *)));
1301 if (failed_at)
1302 off_end = failed_at;
1303 else
1304 off_end = (void *)offp + buffer->offsets_size;
1305 for (; offp < off_end; offp++) {
1306 struct flat_binder_object *fp;
1307 if (*offp > buffer->data_size - sizeof(*fp) ||
1308 buffer->data_size < sizeof(*fp) ||
1309 !IS_ALIGNED(*offp, sizeof(void *))) {
1310 printk(KERN_ERR "binder: transaction release %d bad"
1311 "offset %zd, size %zd\n", debug_id,
1312 *offp, buffer->data_size);
1313 continue;
1314 }
1315 fp = (struct flat_binder_object *)(buffer->data + *offp);
1316 switch (fp->type) {
1317 case BINDER_TYPE_BINDER:
1318 case BINDER_TYPE_WEAK_BINDER: {
1319 struct binder_node *node = binder_get_node(proc, fp->binder);
1320 if (node == NULL) {
1321 printk(KERN_ERR "binder: transaction release %d"
1322 " bad node %p\n", debug_id, fp->binder);
1323 break;
1324 }
1325 binder_debug(BINDER_DEBUG_TRANSACTION,
1326 " node %d u%p\n",
1327 node->debug_id, node->ptr);
1328 binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0);
1329 } break;
1330 case BINDER_TYPE_HANDLE:
1331 case BINDER_TYPE_WEAK_HANDLE: {
1332 struct binder_ref *ref = binder_get_ref(proc, fp->handle);
1333 if (ref == NULL) {
1334 printk(KERN_ERR "binder: transaction release %d"
1335 " bad handle %ld\n", debug_id,
1336 fp->handle);
1337 break;
1338 }
1339 binder_debug(BINDER_DEBUG_TRANSACTION,
1340 " ref %d desc %d (node %d)\n",
1341 ref->debug_id, ref->desc, ref->node->debug_id);
1342 binder_dec_ref(ref, fp->type == BINDER_TYPE_HANDLE);
1343 } break;
1344
1345 case BINDER_TYPE_FD:
1346 binder_debug(BINDER_DEBUG_TRANSACTION,
1347 " fd %ld\n", fp->handle);
1348 if (failed_at)
1349 task_close_fd(proc, fp->handle);
1350 break;
1351
1352 default:
1353 printk(KERN_ERR "binder: transaction release %d bad "
1354 "object type %lx\n", debug_id, fp->type);
1355 break;
1356 }
1357 }
1358}
1359
1360static void binder_transaction(struct binder_proc *proc,
1361 struct binder_thread *thread,
1362 struct binder_transaction_data *tr, int reply)
1363{
1364 struct binder_transaction *t;
1365 struct binder_work *tcomplete;
1366 size_t *offp, *off_end;
1367 struct binder_proc *target_proc;
1368 struct binder_thread *target_thread = NULL;
1369 struct binder_node *target_node = NULL;
1370 struct list_head *target_list;
1371 wait_queue_head_t *target_wait;
1372 struct binder_transaction *in_reply_to = NULL;
1373 struct binder_transaction_log_entry *e;
1374 uint32_t return_error;
1375
1376 e = binder_transaction_log_add(&binder_transaction_log);
1377 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
1378 e->from_proc = proc->pid;
1379 e->from_thread = thread->pid;
1380 e->target_handle = tr->target.handle;
1381 e->data_size = tr->data_size;
1382 e->offsets_size = tr->offsets_size;
1383
1384 if (reply) {
1385 in_reply_to = thread->transaction_stack;
1386 if (in_reply_to == NULL) {
1387 binder_user_error("binder: %d:%d got reply transaction "
1388 "with no transaction stack\n",
1389 proc->pid, thread->pid);
1390 return_error = BR_FAILED_REPLY;
1391 goto err_empty_call_stack;
1392 }
1393 binder_set_nice(in_reply_to->saved_priority);
1394 if (in_reply_to->to_thread != thread) {
1395 binder_user_error("binder: %d:%d got reply transaction "
1396 "with bad transaction stack,"
1397 " transaction %d has target %d:%d\n",
1398 proc->pid, thread->pid, in_reply_to->debug_id,
1399 in_reply_to->to_proc ?
1400 in_reply_to->to_proc->pid : 0,
1401 in_reply_to->to_thread ?
1402 in_reply_to->to_thread->pid : 0);
1403 return_error = BR_FAILED_REPLY;
1404 in_reply_to = NULL;
1405 goto err_bad_call_stack;
1406 }
1407 thread->transaction_stack = in_reply_to->to_parent;
1408 target_thread = in_reply_to->from;
1409 if (target_thread == NULL) {
1410 return_error = BR_DEAD_REPLY;
1411 goto err_dead_binder;
1412 }
1413 if (target_thread->transaction_stack != in_reply_to) {
1414 binder_user_error("binder: %d:%d got reply transaction "
1415 "with bad target transaction stack %d, "
1416 "expected %d\n",
1417 proc->pid, thread->pid,
1418 target_thread->transaction_stack ?
1419 target_thread->transaction_stack->debug_id : 0,
1420 in_reply_to->debug_id);
1421 return_error = BR_FAILED_REPLY;
1422 in_reply_to = NULL;
1423 target_thread = NULL;
1424 goto err_dead_binder;
1425 }
1426 target_proc = target_thread->proc;
1427 } else {
1428 if (tr->target.handle) {
1429 struct binder_ref *ref;
1430 ref = binder_get_ref(proc, tr->target.handle);
1431 if (ref == NULL) {
1432 binder_user_error("binder: %d:%d got "
1433 "transaction to invalid handle\n",
1434 proc->pid, thread->pid);
1435 return_error = BR_FAILED_REPLY;
1436 goto err_invalid_target_handle;
1437 }
1438 target_node = ref->node;
1439 } else {
1440 target_node = binder_context_mgr_node;
1441 if (target_node == NULL) {
1442 return_error = BR_DEAD_REPLY;
1443 goto err_no_context_mgr_node;
1444 }
1445 }
1446 e->to_node = target_node->debug_id;
1447 target_proc = target_node->proc;
1448 if (target_proc == NULL) {
1449 return_error = BR_DEAD_REPLY;
1450 goto err_dead_binder;
1451 }
1452 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
1453 struct binder_transaction *tmp;
1454 tmp = thread->transaction_stack;
1455 if (tmp->to_thread != thread) {
1456 binder_user_error("binder: %d:%d got new "
1457 "transaction with bad transaction stack"
1458 ", transaction %d has target %d:%d\n",
1459 proc->pid, thread->pid, tmp->debug_id,
1460 tmp->to_proc ? tmp->to_proc->pid : 0,
1461 tmp->to_thread ?
1462 tmp->to_thread->pid : 0);
1463 return_error = BR_FAILED_REPLY;
1464 goto err_bad_call_stack;
1465 }
1466 while (tmp) {
1467 if (tmp->from && tmp->from->proc == target_proc)
1468 target_thread = tmp->from;
1469 tmp = tmp->from_parent;
1470 }
1471 }
1472 }
1473 if (target_thread) {
1474 e->to_thread = target_thread->pid;
1475 target_list = &target_thread->todo;
1476 target_wait = &target_thread->wait;
1477 } else {
1478 target_list = &target_proc->todo;
1479 target_wait = &target_proc->wait;
1480 }
1481 e->to_proc = target_proc->pid;
1482
1483 /* TODO: reuse incoming transaction for reply */
1484 t = kzalloc(sizeof(*t), GFP_KERNEL);
1485 if (t == NULL) {
1486 return_error = BR_FAILED_REPLY;
1487 goto err_alloc_t_failed;
1488 }
1489 binder_stats_created(BINDER_STAT_TRANSACTION);
1490
1491 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
1492 if (tcomplete == NULL) {
1493 return_error = BR_FAILED_REPLY;
1494 goto err_alloc_tcomplete_failed;
1495 }
1496 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
1497
1498 t->debug_id = ++binder_last_id;
1499 e->debug_id = t->debug_id;
1500
1501 if (reply)
1502 binder_debug(BINDER_DEBUG_TRANSACTION,
1503 "binder: %d:%d BC_REPLY %d -> %d:%d, "
1504 "data %p-%p size %zd-%zd\n",
1505 proc->pid, thread->pid, t->debug_id,
1506 target_proc->pid, target_thread->pid,
1507 tr->data.ptr.buffer, tr->data.ptr.offsets,
1508 tr->data_size, tr->offsets_size);
1509 else
1510 binder_debug(BINDER_DEBUG_TRANSACTION,
1511 "binder: %d:%d BC_TRANSACTION %d -> "
1512 "%d - node %d, data %p-%p size %zd-%zd\n",
1513 proc->pid, thread->pid, t->debug_id,
1514 target_proc->pid, target_node->debug_id,
1515 tr->data.ptr.buffer, tr->data.ptr.offsets,
1516 tr->data_size, tr->offsets_size);
1517
1518 if (!reply && !(tr->flags & TF_ONE_WAY))
1519 t->from = thread;
1520 else
1521 t->from = NULL;
1522 t->sender_euid = proc->tsk->cred->euid;
1523 t->to_proc = target_proc;
1524 t->to_thread = target_thread;
1525 t->code = tr->code;
1526 t->flags = tr->flags;
1527 t->priority = task_nice(current);
1528 t->buffer = binder_alloc_buf(target_proc, tr->data_size,
1529 tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
1530 if (t->buffer == NULL) {
1531 return_error = BR_FAILED_REPLY;
1532 goto err_binder_alloc_buf_failed;
1533 }
1534 t->buffer->allow_user_free = 0;
1535 t->buffer->debug_id = t->debug_id;
1536 t->buffer->transaction = t;
1537 t->buffer->target_node = target_node;
1538 if (target_node)
1539 binder_inc_node(target_node, 1, 0, NULL);
1540
1541 offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *)));
1542
1543 if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) {
1544 binder_user_error("binder: %d:%d got transaction with invalid "
1545 "data ptr\n", proc->pid, thread->pid);
1546 return_error = BR_FAILED_REPLY;
1547 goto err_copy_data_failed;
1548 }
1549 if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) {
1550 binder_user_error("binder: %d:%d got transaction with invalid "
1551 "offsets ptr\n", proc->pid, thread->pid);
1552 return_error = BR_FAILED_REPLY;
1553 goto err_copy_data_failed;
1554 }
1555 if (!IS_ALIGNED(tr->offsets_size, sizeof(size_t))) {
1556 binder_user_error("binder: %d:%d got transaction with "
1557 "invalid offsets size, %zd\n",
1558 proc->pid, thread->pid, tr->offsets_size);
1559 return_error = BR_FAILED_REPLY;
1560 goto err_bad_offset;
1561 }
1562 off_end = (void *)offp + tr->offsets_size;
1563 for (; offp < off_end; offp++) {
1564 struct flat_binder_object *fp;
1565 if (*offp > t->buffer->data_size - sizeof(*fp) ||
1566 t->buffer->data_size < sizeof(*fp) ||
1567 !IS_ALIGNED(*offp, sizeof(void *))) {
1568 binder_user_error("binder: %d:%d got transaction with "
1569 "invalid offset, %zd\n",
1570 proc->pid, thread->pid, *offp);
1571 return_error = BR_FAILED_REPLY;
1572 goto err_bad_offset;
1573 }
1574 fp = (struct flat_binder_object *)(t->buffer->data + *offp);
1575 switch (fp->type) {
1576 case BINDER_TYPE_BINDER:
1577 case BINDER_TYPE_WEAK_BINDER: {
1578 struct binder_ref *ref;
1579 struct binder_node *node = binder_get_node(proc, fp->binder);
1580 if (node == NULL) {
1581 node = binder_new_node(proc, fp->binder, fp->cookie);
1582 if (node == NULL) {
1583 return_error = BR_FAILED_REPLY;
1584 goto err_binder_new_node_failed;
1585 }
1586 node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1587 node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1588 }
1589 if (fp->cookie != node->cookie) {
1590 binder_user_error("binder: %d:%d sending u%p "
1591 "node %d, cookie mismatch %p != %p\n",
1592 proc->pid, thread->pid,
1593 fp->binder, node->debug_id,
1594 fp->cookie, node->cookie);
1595 goto err_binder_get_ref_for_node_failed;
1596 }
1597 ref = binder_get_ref_for_node(target_proc, node);
1598 if (ref == NULL) {
1599 return_error = BR_FAILED_REPLY;
1600 goto err_binder_get_ref_for_node_failed;
1601 }
1602 if (fp->type == BINDER_TYPE_BINDER)
1603 fp->type = BINDER_TYPE_HANDLE;
1604 else
1605 fp->type = BINDER_TYPE_WEAK_HANDLE;
1606 fp->handle = ref->desc;
1607 binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
1608 &thread->todo);
1609
1610 binder_debug(BINDER_DEBUG_TRANSACTION,
1611 " node %d u%p -> ref %d desc %d\n",
1612 node->debug_id, node->ptr, ref->debug_id,
1613 ref->desc);
1614 } break;
1615 case BINDER_TYPE_HANDLE:
1616 case BINDER_TYPE_WEAK_HANDLE: {
1617 struct binder_ref *ref = binder_get_ref(proc, fp->handle);
1618 if (ref == NULL) {
1619 binder_user_error("binder: %d:%d got "
1620 "transaction with invalid "
1621 "handle, %ld\n", proc->pid,
1622 thread->pid, fp->handle);
1623 return_error = BR_FAILED_REPLY;
1624 goto err_binder_get_ref_failed;
1625 }
1626 if (ref->node->proc == target_proc) {
1627 if (fp->type == BINDER_TYPE_HANDLE)
1628 fp->type = BINDER_TYPE_BINDER;
1629 else
1630 fp->type = BINDER_TYPE_WEAK_BINDER;
1631 fp->binder = ref->node->ptr;
1632 fp->cookie = ref->node->cookie;
1633 binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
1634 binder_debug(BINDER_DEBUG_TRANSACTION,
1635 " ref %d desc %d -> node %d u%p\n",
1636 ref->debug_id, ref->desc, ref->node->debug_id,
1637 ref->node->ptr);
1638 } else {
1639 struct binder_ref *new_ref;
1640 new_ref = binder_get_ref_for_node(target_proc, ref->node);
1641 if (new_ref == NULL) {
1642 return_error = BR_FAILED_REPLY;
1643 goto err_binder_get_ref_for_node_failed;
1644 }
1645 fp->handle = new_ref->desc;
1646 binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
1647 binder_debug(BINDER_DEBUG_TRANSACTION,
1648 " ref %d desc %d -> ref %d desc %d (node %d)\n",
1649 ref->debug_id, ref->desc, new_ref->debug_id,
1650 new_ref->desc, ref->node->debug_id);
1651 }
1652 } break;
1653
1654 case BINDER_TYPE_FD: {
1655 int target_fd;
1656 struct file *file;
1657
1658 if (reply) {
1659 if (!(in_reply_to->flags & TF_ACCEPT_FDS)) {
1660 binder_user_error("binder: %d:%d got reply with fd, %ld, but target does not allow fds\n",
1661 proc->pid, thread->pid, fp->handle);
1662 return_error = BR_FAILED_REPLY;
1663 goto err_fd_not_allowed;
1664 }
1665 } else if (!target_node->accept_fds) {
1666 binder_user_error("binder: %d:%d got transaction with fd, %ld, but target does not allow fds\n",
1667 proc->pid, thread->pid, fp->handle);
1668 return_error = BR_FAILED_REPLY;
1669 goto err_fd_not_allowed;
1670 }
1671
1672 file = fget(fp->handle);
1673 if (file == NULL) {
1674 binder_user_error("binder: %d:%d got transaction with invalid fd, %ld\n",
1675 proc->pid, thread->pid, fp->handle);
1676 return_error = BR_FAILED_REPLY;
1677 goto err_fget_failed;
1678 }
1679 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
1680 if (target_fd < 0) {
1681 fput(file);
1682 return_error = BR_FAILED_REPLY;
1683 goto err_get_unused_fd_failed;
1684 }
1685 task_fd_install(target_proc, target_fd, file);
1686 binder_debug(BINDER_DEBUG_TRANSACTION,
1687 " fd %ld -> %d\n", fp->handle, target_fd);
1688 /* TODO: fput? */
1689 fp->handle = target_fd;
1690 } break;
1691
1692 default:
1693 binder_user_error("binder: %d:%d got transactio"
1694 "n with invalid object type, %lx\n",
1695 proc->pid, thread->pid, fp->type);
1696 return_error = BR_FAILED_REPLY;
1697 goto err_bad_object_type;
1698 }
1699 }
1700 if (reply) {
1701 BUG_ON(t->buffer->async_transaction != 0);
1702 binder_pop_transaction(target_thread, in_reply_to);
1703 } else if (!(t->flags & TF_ONE_WAY)) {
1704 BUG_ON(t->buffer->async_transaction != 0);
1705 t->need_reply = 1;
1706 t->from_parent = thread->transaction_stack;
1707 thread->transaction_stack = t;
1708 } else {
1709 BUG_ON(target_node == NULL);
1710 BUG_ON(t->buffer->async_transaction != 1);
1711 if (target_node->has_async_transaction) {
1712 target_list = &target_node->async_todo;
1713 target_wait = NULL;
1714 } else
1715 target_node->has_async_transaction = 1;
1716 }
1717 t->work.type = BINDER_WORK_TRANSACTION;
1718 list_add_tail(&t->work.entry, target_list);
1719 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
1720 list_add_tail(&tcomplete->entry, &thread->todo);
1721 if (target_wait)
1722 wake_up_interruptible(target_wait);
1723 return;
1724
1725err_get_unused_fd_failed:
1726err_fget_failed:
1727err_fd_not_allowed:
1728err_binder_get_ref_for_node_failed:
1729err_binder_get_ref_failed:
1730err_binder_new_node_failed:
1731err_bad_object_type:
1732err_bad_offset:
1733err_copy_data_failed:
1734 binder_transaction_buffer_release(target_proc, t->buffer, offp);
1735 t->buffer->transaction = NULL;
1736 binder_free_buf(target_proc, t->buffer);
1737err_binder_alloc_buf_failed:
1738 kfree(tcomplete);
1739 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
1740err_alloc_tcomplete_failed:
1741 kfree(t);
1742 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1743err_alloc_t_failed:
1744err_bad_call_stack:
1745err_empty_call_stack:
1746err_dead_binder:
1747err_invalid_target_handle:
1748err_no_context_mgr_node:
1749 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1750 "binder: %d:%d transaction failed %d, size %zd-%zd\n",
1751 proc->pid, thread->pid, return_error,
1752 tr->data_size, tr->offsets_size);
1753
1754 {
1755 struct binder_transaction_log_entry *fe;
1756 fe = binder_transaction_log_add(&binder_transaction_log_failed);
1757 *fe = *e;
1758 }
1759
1760 BUG_ON(thread->return_error != BR_OK);
1761 if (in_reply_to) {
1762 thread->return_error = BR_TRANSACTION_COMPLETE;
1763 binder_send_failed_reply(in_reply_to, return_error);
1764 } else
1765 thread->return_error = return_error;
1766}
1767
1768int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
1769 void __user *buffer, int size, signed long *consumed)
1770{
1771 uint32_t cmd;
1772 void __user *ptr = buffer + *consumed;
1773 void __user *end = buffer + size;
1774
1775 while (ptr < end && thread->return_error == BR_OK) {
1776 if (get_user(cmd, (uint32_t __user *)ptr))
1777 return -EFAULT;
1778 ptr += sizeof(uint32_t);
1779 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
1780 binder_stats.bc[_IOC_NR(cmd)]++;
1781 proc->stats.bc[_IOC_NR(cmd)]++;
1782 thread->stats.bc[_IOC_NR(cmd)]++;
1783 }
1784 switch (cmd) {
1785 case BC_INCREFS:
1786 case BC_ACQUIRE:
1787 case BC_RELEASE:
1788 case BC_DECREFS: {
1789 uint32_t target;
1790 struct binder_ref *ref;
1791 const char *debug_string;
1792
1793 if (get_user(target, (uint32_t __user *)ptr))
1794 return -EFAULT;
1795 ptr += sizeof(uint32_t);
1796 if (target == 0 && binder_context_mgr_node &&
1797 (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
1798 ref = binder_get_ref_for_node(proc,
1799 binder_context_mgr_node);
1800 if (ref->desc != target) {
1801 binder_user_error("binder: %d:"
1802 "%d tried to acquire "
1803 "reference to desc 0, "
1804 "got %d instead\n",
1805 proc->pid, thread->pid,
1806 ref->desc);
1807 }
1808 } else
1809 ref = binder_get_ref(proc, target);
1810 if (ref == NULL) {
1811 binder_user_error("binder: %d:%d refcou"
1812 "nt change on invalid ref %d\n",
1813 proc->pid, thread->pid, target);
1814 break;
1815 }
1816 switch (cmd) {
1817 case BC_INCREFS:
1818 debug_string = "IncRefs";
1819 binder_inc_ref(ref, 0, NULL);
1820 break;
1821 case BC_ACQUIRE:
1822 debug_string = "Acquire";
1823 binder_inc_ref(ref, 1, NULL);
1824 break;
1825 case BC_RELEASE:
1826 debug_string = "Release";
1827 binder_dec_ref(ref, 1);
1828 break;
1829 case BC_DECREFS:
1830 default:
1831 debug_string = "DecRefs";
1832 binder_dec_ref(ref, 0);
1833 break;
1834 }
1835 binder_debug(BINDER_DEBUG_USER_REFS,
1836 "binder: %d:%d %s ref %d desc %d s %d w %d for node %d\n",
1837 proc->pid, thread->pid, debug_string, ref->debug_id,
1838 ref->desc, ref->strong, ref->weak, ref->node->debug_id);
1839 break;
1840 }
1841 case BC_INCREFS_DONE:
1842 case BC_ACQUIRE_DONE: {
1843 void __user *node_ptr;
1844 void *cookie;
1845 struct binder_node *node;
1846
1847 if (get_user(node_ptr, (void * __user *)ptr))
1848 return -EFAULT;
1849 ptr += sizeof(void *);
1850 if (get_user(cookie, (void * __user *)ptr))
1851 return -EFAULT;
1852 ptr += sizeof(void *);
1853 node = binder_get_node(proc, node_ptr);
1854 if (node == NULL) {
1855 binder_user_error("binder: %d:%d "
1856 "%s u%p no match\n",
1857 proc->pid, thread->pid,
1858 cmd == BC_INCREFS_DONE ?
1859 "BC_INCREFS_DONE" :
1860 "BC_ACQUIRE_DONE",
1861 node_ptr);
1862 break;
1863 }
1864 if (cookie != node->cookie) {
1865 binder_user_error("binder: %d:%d %s u%p node %d"
1866 " cookie mismatch %p != %p\n",
1867 proc->pid, thread->pid,
1868 cmd == BC_INCREFS_DONE ?
1869 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
1870 node_ptr, node->debug_id,
1871 cookie, node->cookie);
1872 break;
1873 }
1874 if (cmd == BC_ACQUIRE_DONE) {
1875 if (node->pending_strong_ref == 0) {
1876 binder_user_error("binder: %d:%d "
1877 "BC_ACQUIRE_DONE node %d has "
1878 "no pending acquire request\n",
1879 proc->pid, thread->pid,
1880 node->debug_id);
1881 break;
1882 }
1883 node->pending_strong_ref = 0;
1884 } else {
1885 if (node->pending_weak_ref == 0) {
1886 binder_user_error("binder: %d:%d "
1887 "BC_INCREFS_DONE node %d has "
1888 "no pending increfs request\n",
1889 proc->pid, thread->pid,
1890 node->debug_id);
1891 break;
1892 }
1893 node->pending_weak_ref = 0;
1894 }
1895 binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0);
1896 binder_debug(BINDER_DEBUG_USER_REFS,
1897 "binder: %d:%d %s node %d ls %d lw %d\n",
1898 proc->pid, thread->pid,
1899 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
1900 node->debug_id, node->local_strong_refs, node->local_weak_refs);
1901 break;
1902 }
1903 case BC_ATTEMPT_ACQUIRE:
1904 printk(KERN_ERR "binder: BC_ATTEMPT_ACQUIRE not supported\n");
1905 return -EINVAL;
1906 case BC_ACQUIRE_RESULT:
1907 printk(KERN_ERR "binder: BC_ACQUIRE_RESULT not supported\n");
1908 return -EINVAL;
1909
1910 case BC_FREE_BUFFER: {
1911 void __user *data_ptr;
1912 struct binder_buffer *buffer;
1913
1914 if (get_user(data_ptr, (void * __user *)ptr))
1915 return -EFAULT;
1916 ptr += sizeof(void *);
1917
1918 buffer = binder_buffer_lookup(proc, data_ptr);
1919 if (buffer == NULL) {
1920 binder_user_error("binder: %d:%d "
1921 "BC_FREE_BUFFER u%p no match\n",
1922 proc->pid, thread->pid, data_ptr);
1923 break;
1924 }
1925 if (!buffer->allow_user_free) {
1926 binder_user_error("binder: %d:%d "
1927 "BC_FREE_BUFFER u%p matched "
1928 "unreturned buffer\n",
1929 proc->pid, thread->pid, data_ptr);
1930 break;
1931 }
1932 binder_debug(BINDER_DEBUG_FREE_BUFFER,
1933 "binder: %d:%d BC_FREE_BUFFER u%p found buffer %d for %s transaction\n",
1934 proc->pid, thread->pid, data_ptr, buffer->debug_id,
1935 buffer->transaction ? "active" : "finished");
1936
1937 if (buffer->transaction) {
1938 buffer->transaction->buffer = NULL;
1939 buffer->transaction = NULL;
1940 }
1941 if (buffer->async_transaction && buffer->target_node) {
1942 BUG_ON(!buffer->target_node->has_async_transaction);
1943 if (list_empty(&buffer->target_node->async_todo))
1944 buffer->target_node->has_async_transaction = 0;
1945 else
1946 list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
1947 }
1948 binder_transaction_buffer_release(proc, buffer, NULL);
1949 binder_free_buf(proc, buffer);
1950 break;
1951 }
1952
1953 case BC_TRANSACTION:
1954 case BC_REPLY: {
1955 struct binder_transaction_data tr;
1956
1957 if (copy_from_user(&tr, ptr, sizeof(tr)))
1958 return -EFAULT;
1959 ptr += sizeof(tr);
1960 binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
1961 break;
1962 }
1963
1964 case BC_REGISTER_LOOPER:
1965 binder_debug(BINDER_DEBUG_THREADS,
1966 "binder: %d:%d BC_REGISTER_LOOPER\n",
1967 proc->pid, thread->pid);
1968 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
1969 thread->looper |= BINDER_LOOPER_STATE_INVALID;
1970 binder_user_error("binder: %d:%d ERROR:"
1971 " BC_REGISTER_LOOPER called "
1972 "after BC_ENTER_LOOPER\n",
1973 proc->pid, thread->pid);
1974 } else if (proc->requested_threads == 0) {
1975 thread->looper |= BINDER_LOOPER_STATE_INVALID;
1976 binder_user_error("binder: %d:%d ERROR:"
1977 " BC_REGISTER_LOOPER called "
1978 "without request\n",
1979 proc->pid, thread->pid);
1980 } else {
1981 proc->requested_threads--;
1982 proc->requested_threads_started++;
1983 }
1984 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
1985 break;
1986 case BC_ENTER_LOOPER:
1987 binder_debug(BINDER_DEBUG_THREADS,
1988 "binder: %d:%d BC_ENTER_LOOPER\n",
1989 proc->pid, thread->pid);
1990 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
1991 thread->looper |= BINDER_LOOPER_STATE_INVALID;
1992 binder_user_error("binder: %d:%d ERROR:"
1993 " BC_ENTER_LOOPER called after "
1994 "BC_REGISTER_LOOPER\n",
1995 proc->pid, thread->pid);
1996 }
1997 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
1998 break;
1999 case BC_EXIT_LOOPER:
2000 binder_debug(BINDER_DEBUG_THREADS,
2001 "binder: %d:%d BC_EXIT_LOOPER\n",
2002 proc->pid, thread->pid);
2003 thread->looper |= BINDER_LOOPER_STATE_EXITED;
2004 break;
2005
2006 case BC_REQUEST_DEATH_NOTIFICATION:
2007 case BC_CLEAR_DEATH_NOTIFICATION: {
2008 uint32_t target;
2009 void __user *cookie;
2010 struct binder_ref *ref;
2011 struct binder_ref_death *death;
2012
2013 if (get_user(target, (uint32_t __user *)ptr))
2014 return -EFAULT;
2015 ptr += sizeof(uint32_t);
2016 if (get_user(cookie, (void __user * __user *)ptr))
2017 return -EFAULT;
2018 ptr += sizeof(void *);
2019 ref = binder_get_ref(proc, target);
2020 if (ref == NULL) {
2021 binder_user_error("binder: %d:%d %s "
2022 "invalid ref %d\n",
2023 proc->pid, thread->pid,
2024 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
2025 "BC_REQUEST_DEATH_NOTIFICATION" :
2026 "BC_CLEAR_DEATH_NOTIFICATION",
2027 target);
2028 break;
2029 }
2030
2031 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
2032 "binder: %d:%d %s %p ref %d desc %d s %d w %d for node %d\n",
2033 proc->pid, thread->pid,
2034 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
2035 "BC_REQUEST_DEATH_NOTIFICATION" :
2036 "BC_CLEAR_DEATH_NOTIFICATION",
2037 cookie, ref->debug_id, ref->desc,
2038 ref->strong, ref->weak, ref->node->debug_id);
2039
2040 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
2041 if (ref->death) {
2042 binder_user_error("binder: %d:%"
2043 "d BC_REQUEST_DEATH_NOTI"
2044 "FICATION death notific"
2045 "ation already set\n",
2046 proc->pid, thread->pid);
2047 break;
2048 }
2049 death = kzalloc(sizeof(*death), GFP_KERNEL);
2050 if (death == NULL) {
2051 thread->return_error = BR_ERROR;
2052 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2053 "binder: %d:%d "
2054 "BC_REQUEST_DEATH_NOTIFICATION failed\n",
2055 proc->pid, thread->pid);
2056 break;
2057 }
2058 binder_stats_created(BINDER_STAT_DEATH);
2059 INIT_LIST_HEAD(&death->work.entry);
2060 death->cookie = cookie;
2061 ref->death = death;
2062 if (ref->node->proc == NULL) {
2063 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
2064 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2065 list_add_tail(&ref->death->work.entry, &thread->todo);
2066 } else {
2067 list_add_tail(&ref->death->work.entry, &proc->todo);
2068 wake_up_interruptible(&proc->wait);
2069 }
2070 }
2071 } else {
2072 if (ref->death == NULL) {
2073 binder_user_error("binder: %d:%"
2074 "d BC_CLEAR_DEATH_NOTIFI"
2075 "CATION death notificat"
2076 "ion not active\n",
2077 proc->pid, thread->pid);
2078 break;
2079 }
2080 death = ref->death;
2081 if (death->cookie != cookie) {
2082 binder_user_error("binder: %d:%"
2083 "d BC_CLEAR_DEATH_NOTIFI"
2084 "CATION death notificat"
2085 "ion cookie mismatch "
2086 "%p != %p\n",
2087 proc->pid, thread->pid,
2088 death->cookie, cookie);
2089 break;
2090 }
2091 ref->death = NULL;
2092 if (list_empty(&death->work.entry)) {
2093 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
2094 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2095 list_add_tail(&death->work.entry, &thread->todo);
2096 } else {
2097 list_add_tail(&death->work.entry, &proc->todo);
2098 wake_up_interruptible(&proc->wait);
2099 }
2100 } else {
2101 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
2102 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
2103 }
2104 }
2105 } break;
2106 case BC_DEAD_BINDER_DONE: {
2107 struct binder_work *w;
2108 void __user *cookie;
2109 struct binder_ref_death *death = NULL;
2110 if (get_user(cookie, (void __user * __user *)ptr))
2111 return -EFAULT;
2112
2113 ptr += sizeof(void *);
2114 list_for_each_entry(w, &proc->delivered_death, entry) {
2115 struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
2116 if (tmp_death->cookie == cookie) {
2117 death = tmp_death;
2118 break;
2119 }
2120 }
2121 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2122 "binder: %d:%d BC_DEAD_BINDER_DONE %p found %p\n",
2123 proc->pid, thread->pid, cookie, death);
2124 if (death == NULL) {
2125 binder_user_error("binder: %d:%d BC_DEAD"
2126 "_BINDER_DONE %p not found\n",
2127 proc->pid, thread->pid, cookie);
2128 break;
2129 }
2130
2131 list_del_init(&death->work.entry);
2132 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
2133 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
2134 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2135 list_add_tail(&death->work.entry, &thread->todo);
2136 } else {
2137 list_add_tail(&death->work.entry, &proc->todo);
2138 wake_up_interruptible(&proc->wait);
2139 }
2140 }
2141 } break;
2142
2143 default:
2144 printk(KERN_ERR "binder: %d:%d unknown command %d\n",
2145 proc->pid, thread->pid, cmd);
2146 return -EINVAL;
2147 }
2148 *consumed = ptr - buffer;
2149 }
2150 return 0;
2151}
2152
2153void binder_stat_br(struct binder_proc *proc, struct binder_thread *thread,
2154 uint32_t cmd)
2155{
2156 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
2157 binder_stats.br[_IOC_NR(cmd)]++;
2158 proc->stats.br[_IOC_NR(cmd)]++;
2159 thread->stats.br[_IOC_NR(cmd)]++;
2160 }
2161}
2162
2163static int binder_has_proc_work(struct binder_proc *proc,
2164 struct binder_thread *thread)
2165{
2166 return !list_empty(&proc->todo) ||
2167 (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
2168}
2169
2170static int binder_has_thread_work(struct binder_thread *thread)
2171{
2172 return !list_empty(&thread->todo) || thread->return_error != BR_OK ||
2173 (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
2174}
2175
2176static int binder_thread_read(struct binder_proc *proc,
2177 struct binder_thread *thread,
2178 void __user *buffer, int size,
2179 signed long *consumed, int non_block)
2180{
2181 void __user *ptr = buffer + *consumed;
2182 void __user *end = buffer + size;
2183
2184 int ret = 0;
2185 int wait_for_proc_work;
2186
2187 if (*consumed == 0) {
2188 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
2189 return -EFAULT;
2190 ptr += sizeof(uint32_t);
2191 }
2192
2193retry:
2194 wait_for_proc_work = thread->transaction_stack == NULL &&
2195 list_empty(&thread->todo);
2196
2197 if (thread->return_error != BR_OK && ptr < end) {
2198 if (thread->return_error2 != BR_OK) {
2199 if (put_user(thread->return_error2, (uint32_t __user *)ptr))
2200 return -EFAULT;
2201 ptr += sizeof(uint32_t);
2202 if (ptr == end)
2203 goto done;
2204 thread->return_error2 = BR_OK;
2205 }
2206 if (put_user(thread->return_error, (uint32_t __user *)ptr))
2207 return -EFAULT;
2208 ptr += sizeof(uint32_t);
2209 thread->return_error = BR_OK;
2210 goto done;
2211 }
2212
2213
2214 thread->looper |= BINDER_LOOPER_STATE_WAITING;
2215 if (wait_for_proc_work)
2216 proc->ready_threads++;
2217 mutex_unlock(&binder_lock);
2218 if (wait_for_proc_work) {
2219 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
2220 BINDER_LOOPER_STATE_ENTERED))) {
2221 binder_user_error("binder: %d:%d ERROR: Thread waiting "
2222 "for process work before calling BC_REGISTER_"
2223 "LOOPER or BC_ENTER_LOOPER (state %x)\n",
2224 proc->pid, thread->pid, thread->looper);
2225 wait_event_interruptible(binder_user_error_wait,
2226 binder_stop_on_user_error < 2);
2227 }
2228 binder_set_nice(proc->default_priority);
2229 if (non_block) {
2230 if (!binder_has_proc_work(proc, thread))
2231 ret = -EAGAIN;
2232 } else
2233 ret = wait_event_interruptible_exclusive(proc->wait, binder_has_proc_work(proc, thread));
2234 } else {
2235 if (non_block) {
2236 if (!binder_has_thread_work(thread))
2237 ret = -EAGAIN;
2238 } else
2239 ret = wait_event_interruptible(thread->wait, binder_has_thread_work(thread));
2240 }
2241 mutex_lock(&binder_lock);
2242 if (wait_for_proc_work)
2243 proc->ready_threads--;
2244 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
2245
2246 if (ret)
2247 return ret;
2248
2249 while (1) {
2250 uint32_t cmd;
2251 struct binder_transaction_data tr;
2252 struct binder_work *w;
2253 struct binder_transaction *t = NULL;
2254
2255 if (!list_empty(&thread->todo))
2256 w = list_first_entry(&thread->todo, struct binder_work, entry);
2257 else if (!list_empty(&proc->todo) && wait_for_proc_work)
2258 w = list_first_entry(&proc->todo, struct binder_work, entry);
2259 else {
2260 if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) /* no data added */
2261 goto retry;
2262 break;
2263 }
2264
2265 if (end - ptr < sizeof(tr) + 4)
2266 break;
2267
2268 switch (w->type) {
2269 case BINDER_WORK_TRANSACTION: {
2270 t = container_of(w, struct binder_transaction, work);
2271 } break;
2272 case BINDER_WORK_TRANSACTION_COMPLETE: {
2273 cmd = BR_TRANSACTION_COMPLETE;
2274 if (put_user(cmd, (uint32_t __user *)ptr))
2275 return -EFAULT;
2276 ptr += sizeof(uint32_t);
2277
2278 binder_stat_br(proc, thread, cmd);
2279 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
2280 "binder: %d:%d BR_TRANSACTION_COMPLETE\n",
2281 proc->pid, thread->pid);
2282
2283 list_del(&w->entry);
2284 kfree(w);
2285 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2286 } break;
2287 case BINDER_WORK_NODE: {
2288 struct binder_node *node = container_of(w, struct binder_node, work);
2289 uint32_t cmd = BR_NOOP;
2290 const char *cmd_name;
2291 int strong = node->internal_strong_refs || node->local_strong_refs;
2292 int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong;
2293 if (weak && !node->has_weak_ref) {
2294 cmd = BR_INCREFS;
2295 cmd_name = "BR_INCREFS";
2296 node->has_weak_ref = 1;
2297 node->pending_weak_ref = 1;
2298 node->local_weak_refs++;
2299 } else if (strong && !node->has_strong_ref) {
2300 cmd = BR_ACQUIRE;
2301 cmd_name = "BR_ACQUIRE";
2302 node->has_strong_ref = 1;
2303 node->pending_strong_ref = 1;
2304 node->local_strong_refs++;
2305 } else if (!strong && node->has_strong_ref) {
2306 cmd = BR_RELEASE;
2307 cmd_name = "BR_RELEASE";
2308 node->has_strong_ref = 0;
2309 } else if (!weak && node->has_weak_ref) {
2310 cmd = BR_DECREFS;
2311 cmd_name = "BR_DECREFS";
2312 node->has_weak_ref = 0;
2313 }
2314 if (cmd != BR_NOOP) {
2315 if (put_user(cmd, (uint32_t __user *)ptr))
2316 return -EFAULT;
2317 ptr += sizeof(uint32_t);
2318 if (put_user(node->ptr, (void * __user *)ptr))
2319 return -EFAULT;
2320 ptr += sizeof(void *);
2321 if (put_user(node->cookie, (void * __user *)ptr))
2322 return -EFAULT;
2323 ptr += sizeof(void *);
2324
2325 binder_stat_br(proc, thread, cmd);
2326 binder_debug(BINDER_DEBUG_USER_REFS,
2327 "binder: %d:%d %s %d u%p c%p\n",
2328 proc->pid, thread->pid, cmd_name, node->debug_id, node->ptr, node->cookie);
2329 } else {
2330 list_del_init(&w->entry);
2331 if (!weak && !strong) {
2332 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
2333 "binder: %d:%d node %d u%p c%p deleted\n",
2334 proc->pid, thread->pid, node->debug_id,
2335 node->ptr, node->cookie);
2336 rb_erase(&node->rb_node, &proc->nodes);
2337 kfree(node);
2338 binder_stats_deleted(BINDER_STAT_NODE);
2339 } else {
2340 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
2341 "binder: %d:%d node %d u%p c%p state unchanged\n",
2342 proc->pid, thread->pid, node->debug_id, node->ptr,
2343 node->cookie);
2344 }
2345 }
2346 } break;
2347 case BINDER_WORK_DEAD_BINDER:
2348 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
2349 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
2350 struct binder_ref_death *death;
2351 uint32_t cmd;
2352
2353 death = container_of(w, struct binder_ref_death, work);
2354 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
2355 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
2356 else
2357 cmd = BR_DEAD_BINDER;
2358 if (put_user(cmd, (uint32_t __user *)ptr))
2359 return -EFAULT;
2360 ptr += sizeof(uint32_t);
2361 if (put_user(death->cookie, (void * __user *)ptr))
2362 return -EFAULT;
2363 ptr += sizeof(void *);
2364 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
2365 "binder: %d:%d %s %p\n",
2366 proc->pid, thread->pid,
2367 cmd == BR_DEAD_BINDER ?
2368 "BR_DEAD_BINDER" :
2369 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
2370 death->cookie);
2371
2372 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
2373 list_del(&w->entry);
2374 kfree(death);
2375 binder_stats_deleted(BINDER_STAT_DEATH);
2376 } else
2377 list_move(&w->entry, &proc->delivered_death);
2378 if (cmd == BR_DEAD_BINDER)
2379 goto done; /* DEAD_BINDER notifications can cause transactions */
2380 } break;
2381 }
2382
2383 if (!t)
2384 continue;
2385
2386 BUG_ON(t->buffer == NULL);
2387 if (t->buffer->target_node) {
2388 struct binder_node *target_node = t->buffer->target_node;
2389 tr.target.ptr = target_node->ptr;
2390 tr.cookie = target_node->cookie;
2391 t->saved_priority = task_nice(current);
2392 if (t->priority < target_node->min_priority &&
2393 !(t->flags & TF_ONE_WAY))
2394 binder_set_nice(t->priority);
2395 else if (!(t->flags & TF_ONE_WAY) ||
2396 t->saved_priority > target_node->min_priority)
2397 binder_set_nice(target_node->min_priority);
2398 cmd = BR_TRANSACTION;
2399 } else {
2400 tr.target.ptr = NULL;
2401 tr.cookie = NULL;
2402 cmd = BR_REPLY;
2403 }
2404 tr.code = t->code;
2405 tr.flags = t->flags;
2406 tr.sender_euid = t->sender_euid;
2407
2408 if (t->from) {
2409 struct task_struct *sender = t->from->proc->tsk;
2410 tr.sender_pid = task_tgid_nr_ns(sender,
2411 current->nsproxy->pid_ns);
2412 } else {
2413 tr.sender_pid = 0;
2414 }
2415
2416 tr.data_size = t->buffer->data_size;
2417 tr.offsets_size = t->buffer->offsets_size;
2418 tr.data.ptr.buffer = (void *)t->buffer->data +
2419 proc->user_buffer_offset;
2420 tr.data.ptr.offsets = tr.data.ptr.buffer +
2421 ALIGN(t->buffer->data_size,
2422 sizeof(void *));
2423
2424 if (put_user(cmd, (uint32_t __user *)ptr))
2425 return -EFAULT;
2426 ptr += sizeof(uint32_t);
2427 if (copy_to_user(ptr, &tr, sizeof(tr)))
2428 return -EFAULT;
2429 ptr += sizeof(tr);
2430
2431 binder_stat_br(proc, thread, cmd);
2432 binder_debug(BINDER_DEBUG_TRANSACTION,
2433 "binder: %d:%d %s %d %d:%d, cmd %d"
2434 "size %zd-%zd ptr %p-%p\n",
2435 proc->pid, thread->pid,
2436 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
2437 "BR_REPLY",
2438 t->debug_id, t->from ? t->from->proc->pid : 0,
2439 t->from ? t->from->pid : 0, cmd,
2440 t->buffer->data_size, t->buffer->offsets_size,
2441 tr.data.ptr.buffer, tr.data.ptr.offsets);
2442
2443 list_del(&t->work.entry);
2444 t->buffer->allow_user_free = 1;
2445 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
2446 t->to_parent = thread->transaction_stack;
2447 t->to_thread = thread;
2448 thread->transaction_stack = t;
2449 } else {
2450 t->buffer->transaction = NULL;
2451 kfree(t);
2452 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2453 }
2454 break;
2455 }
2456
2457done:
2458
2459 *consumed = ptr - buffer;
2460 if (proc->requested_threads + proc->ready_threads == 0 &&
2461 proc->requested_threads_started < proc->max_threads &&
2462 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
2463 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
2464 /*spawn a new thread if we leave this out */) {
2465 proc->requested_threads++;
2466 binder_debug(BINDER_DEBUG_THREADS,
2467 "binder: %d:%d BR_SPAWN_LOOPER\n",
2468 proc->pid, thread->pid);
2469 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
2470 return -EFAULT;
2471 }
2472 return 0;
2473}
2474
2475static void binder_release_work(struct list_head *list)
2476{
2477 struct binder_work *w;
2478 while (!list_empty(list)) {
2479 w = list_first_entry(list, struct binder_work, entry);
2480 list_del_init(&w->entry);
2481 switch (w->type) {
2482 case BINDER_WORK_TRANSACTION: {
2483 struct binder_transaction *t;
2484
2485 t = container_of(w, struct binder_transaction, work);
2486 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY))
2487 binder_send_failed_reply(t, BR_DEAD_REPLY);
2488 } break;
2489 case BINDER_WORK_TRANSACTION_COMPLETE: {
2490 kfree(w);
2491 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2492 } break;
2493 default:
2494 break;
2495 }
2496 }
2497
2498}
2499
2500static struct binder_thread *binder_get_thread(struct binder_proc *proc)
2501{
2502 struct binder_thread *thread = NULL;
2503 struct rb_node *parent = NULL;
2504 struct rb_node **p = &proc->threads.rb_node;
2505
2506 while (*p) {
2507 parent = *p;
2508 thread = rb_entry(parent, struct binder_thread, rb_node);
2509
2510 if (current->pid < thread->pid)
2511 p = &(*p)->rb_left;
2512 else if (current->pid > thread->pid)
2513 p = &(*p)->rb_right;
2514 else
2515 break;
2516 }
2517 if (*p == NULL) {
2518 thread = kzalloc(sizeof(*thread), GFP_KERNEL);
2519 if (thread == NULL)
2520 return NULL;
2521 binder_stats_created(BINDER_STAT_THREAD);
2522 thread->proc = proc;
2523 thread->pid = current->pid;
2524 init_waitqueue_head(&thread->wait);
2525 INIT_LIST_HEAD(&thread->todo);
2526 rb_link_node(&thread->rb_node, parent, p);
2527 rb_insert_color(&thread->rb_node, &proc->threads);
2528 thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
2529 thread->return_error = BR_OK;
2530 thread->return_error2 = BR_OK;
2531 }
2532 return thread;
2533}
2534
2535static int binder_free_thread(struct binder_proc *proc,
2536 struct binder_thread *thread)
2537{
2538 struct binder_transaction *t;
2539 struct binder_transaction *send_reply = NULL;
2540 int active_transactions = 0;
2541
2542 rb_erase(&thread->rb_node, &proc->threads);
2543 t = thread->transaction_stack;
2544 if (t && t->to_thread == thread)
2545 send_reply = t;
2546 while (t) {
2547 active_transactions++;
2548 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2549 "binder: release %d:%d transaction %d "
2550 "%s, still active\n", proc->pid, thread->pid,
2551 t->debug_id,
2552 (t->to_thread == thread) ? "in" : "out");
2553
2554 if (t->to_thread == thread) {
2555 t->to_proc = NULL;
2556 t->to_thread = NULL;
2557 if (t->buffer) {
2558 t->buffer->transaction = NULL;
2559 t->buffer = NULL;
2560 }
2561 t = t->to_parent;
2562 } else if (t->from == thread) {
2563 t->from = NULL;
2564 t = t->from_parent;
2565 } else
2566 BUG();
2567 }
2568 if (send_reply)
2569 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
2570 binder_release_work(&thread->todo);
2571 kfree(thread);
2572 binder_stats_deleted(BINDER_STAT_THREAD);
2573 return active_transactions;
2574}
2575
2576static unsigned int binder_poll(struct file *filp,
2577 struct poll_table_struct *wait)
2578{
2579 struct binder_proc *proc = filp->private_data;
2580 struct binder_thread *thread = NULL;
2581 int wait_for_proc_work;
2582
2583 mutex_lock(&binder_lock);
2584 thread = binder_get_thread(proc);
2585
2586 wait_for_proc_work = thread->transaction_stack == NULL &&
2587 list_empty(&thread->todo) && thread->return_error == BR_OK;
2588 mutex_unlock(&binder_lock);
2589
2590 if (wait_for_proc_work) {
2591 if (binder_has_proc_work(proc, thread))
2592 return POLLIN;
2593 poll_wait(filp, &proc->wait, wait);
2594 if (binder_has_proc_work(proc, thread))
2595 return POLLIN;
2596 } else {
2597 if (binder_has_thread_work(thread))
2598 return POLLIN;
2599 poll_wait(filp, &thread->wait, wait);
2600 if (binder_has_thread_work(thread))
2601 return POLLIN;
2602 }
2603 return 0;
2604}
2605
2606static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
2607{
2608 int ret;
2609 struct binder_proc *proc = filp->private_data;
2610 struct binder_thread *thread;
2611 unsigned int size = _IOC_SIZE(cmd);
2612 void __user *ubuf = (void __user *)arg;
2613
2614 /*printk(KERN_INFO "binder_ioctl: %d:%d %x %lx\n", proc->pid, current->pid, cmd, arg);*/
2615
2616 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
2617 if (ret)
2618 return ret;
2619
2620 mutex_lock(&binder_lock);
2621 thread = binder_get_thread(proc);
2622 if (thread == NULL) {
2623 ret = -ENOMEM;
2624 goto err;
2625 }
2626
2627 switch (cmd) {
2628 case BINDER_WRITE_READ: {
2629 struct binder_write_read bwr;
2630 if (size != sizeof(struct binder_write_read)) {
2631 ret = -EINVAL;
2632 goto err;
2633 }
2634 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
2635 ret = -EFAULT;
2636 goto err;
2637 }
2638 binder_debug(BINDER_DEBUG_READ_WRITE,
2639 "binder: %d:%d write %ld at %08lx, read %ld at %08lx\n",
2640 proc->pid, thread->pid, bwr.write_size, bwr.write_buffer,
2641 bwr.read_size, bwr.read_buffer);
2642
2643 if (bwr.write_size > 0) {
2644 ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
2645 if (ret < 0) {
2646 bwr.read_consumed = 0;
2647 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
2648 ret = -EFAULT;
2649 goto err;
2650 }
2651 }
2652 if (bwr.read_size > 0) {
2653 ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
2654 if (!list_empty(&proc->todo))
2655 wake_up_interruptible(&proc->wait);
2656 if (ret < 0) {
2657 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
2658 ret = -EFAULT;
2659 goto err;
2660 }
2661 }
2662 binder_debug(BINDER_DEBUG_READ_WRITE,
2663 "binder: %d:%d wrote %ld of %ld, read return %ld of %ld\n",
2664 proc->pid, thread->pid, bwr.write_consumed, bwr.write_size,
2665 bwr.read_consumed, bwr.read_size);
2666 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
2667 ret = -EFAULT;
2668 goto err;
2669 }
2670 break;
2671 }
2672 case BINDER_SET_MAX_THREADS:
2673 if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
2674 ret = -EINVAL;
2675 goto err;
2676 }
2677 break;
2678 case BINDER_SET_CONTEXT_MGR:
2679 if (binder_context_mgr_node != NULL) {
2680 printk(KERN_ERR "binder: BINDER_SET_CONTEXT_MGR already set\n");
2681 ret = -EBUSY;
2682 goto err;
2683 }
2684 if (binder_context_mgr_uid != -1) {
2685 if (binder_context_mgr_uid != current->cred->euid) {
2686 printk(KERN_ERR "binder: BINDER_SET_"
2687 "CONTEXT_MGR bad uid %d != %d\n",
2688 current->cred->euid,
2689 binder_context_mgr_uid);
2690 ret = -EPERM;
2691 goto err;
2692 }
2693 } else
2694 binder_context_mgr_uid = current->cred->euid;
2695 binder_context_mgr_node = binder_new_node(proc, NULL, NULL);
2696 if (binder_context_mgr_node == NULL) {
2697 ret = -ENOMEM;
2698 goto err;
2699 }
2700 binder_context_mgr_node->local_weak_refs++;
2701 binder_context_mgr_node->local_strong_refs++;
2702 binder_context_mgr_node->has_strong_ref = 1;
2703 binder_context_mgr_node->has_weak_ref = 1;
2704 break;
2705 case BINDER_THREAD_EXIT:
2706 binder_debug(BINDER_DEBUG_THREADS, "binder: %d:%d exit\n",
2707 proc->pid, thread->pid);
2708 binder_free_thread(proc, thread);
2709 thread = NULL;
2710 break;
2711 case BINDER_VERSION:
2712 if (size != sizeof(struct binder_version)) {
2713 ret = -EINVAL;
2714 goto err;
2715 }
2716 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, &((struct binder_version *)ubuf)->protocol_version)) {
2717 ret = -EINVAL;
2718 goto err;
2719 }
2720 break;
2721 default:
2722 ret = -EINVAL;
2723 goto err;
2724 }
2725 ret = 0;
2726err:
2727 if (thread)
2728 thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
2729 mutex_unlock(&binder_lock);
2730 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
2731 if (ret && ret != -ERESTARTSYS)
2732 printk(KERN_INFO "binder: %d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
2733 return ret;
2734}
2735
2736static void binder_vma_open(struct vm_area_struct *vma)
2737{
2738 struct binder_proc *proc = vma->vm_private_data;
2739 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
2740 "binder: %d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
2741 proc->pid, vma->vm_start, vma->vm_end,
2742 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
2743 (unsigned long)pgprot_val(vma->vm_page_prot));
2744 dump_stack();
2745}
2746
2747static void binder_vma_close(struct vm_area_struct *vma)
2748{
2749 struct binder_proc *proc = vma->vm_private_data;
2750 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
2751 "binder: %d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
2752 proc->pid, vma->vm_start, vma->vm_end,
2753 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
2754 (unsigned long)pgprot_val(vma->vm_page_prot));
2755 proc->vma = NULL;
2756 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
2757}
2758
2759static struct vm_operations_struct binder_vm_ops = {
2760 .open = binder_vma_open,
2761 .close = binder_vma_close,
2762};
2763
2764static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
2765{
2766 int ret;
2767 struct vm_struct *area;
2768 struct binder_proc *proc = filp->private_data;
2769 const char *failure_string;
2770 struct binder_buffer *buffer;
2771
2772 if ((vma->vm_end - vma->vm_start) > SZ_4M)
2773 vma->vm_end = vma->vm_start + SZ_4M;
2774
2775 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
2776 "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
2777 proc->pid, vma->vm_start, vma->vm_end,
2778 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
2779 (unsigned long)pgprot_val(vma->vm_page_prot));
2780
2781 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
2782 ret = -EPERM;
2783 failure_string = "bad vm_flags";
2784 goto err_bad_arg;
2785 }
2786 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
2787
2788 if (proc->buffer) {
2789 ret = -EBUSY;
2790 failure_string = "already mapped";
2791 goto err_already_mapped;
2792 }
2793
2794 area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
2795 if (area == NULL) {
2796 ret = -ENOMEM;
2797 failure_string = "get_vm_area";
2798 goto err_get_vm_area_failed;
2799 }
2800 proc->buffer = area->addr;
2801 proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
2802
2803#ifdef CONFIG_CPU_CACHE_VIPT
2804 if (cache_is_vipt_aliasing()) {
2805 while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) {
2806 printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);
2807 vma->vm_start += PAGE_SIZE;
2808 }
2809 }
2810#endif
2811 proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
2812 if (proc->pages == NULL) {
2813 ret = -ENOMEM;
2814 failure_string = "alloc page array";
2815 goto err_alloc_pages_failed;
2816 }
2817 proc->buffer_size = vma->vm_end - vma->vm_start;
2818
2819 vma->vm_ops = &binder_vm_ops;
2820 vma->vm_private_data = proc;
2821
2822 if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
2823 ret = -ENOMEM;
2824 failure_string = "alloc small buf";
2825 goto err_alloc_small_buf_failed;
2826 }
2827 buffer = proc->buffer;
2828 INIT_LIST_HEAD(&proc->buffers);
2829 list_add(&buffer->entry, &proc->buffers);
2830 buffer->free = 1;
2831 binder_insert_free_buffer(proc, buffer);
2832 proc->free_async_space = proc->buffer_size / 2;
2833 barrier();
2834 proc->files = get_files_struct(current);
2835 proc->vma = vma;
2836
2837 /*printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p\n",
2838 proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
2839 return 0;
2840
2841err_alloc_small_buf_failed:
2842 kfree(proc->pages);
2843 proc->pages = NULL;
2844err_alloc_pages_failed:
2845 vfree(proc->buffer);
2846 proc->buffer = NULL;
2847err_get_vm_area_failed:
2848err_already_mapped:
2849err_bad_arg:
2850 printk(KERN_ERR "binder_mmap: %d %lx-%lx %s failed %d\n",
2851 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
2852 return ret;
2853}
2854
2855static int binder_open(struct inode *nodp, struct file *filp)
2856{
2857 struct binder_proc *proc;
2858
2859 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
2860 current->group_leader->pid, current->pid);
2861
2862 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
2863 if (proc == NULL)
2864 return -ENOMEM;
2865 get_task_struct(current);
2866 proc->tsk = current;
2867 INIT_LIST_HEAD(&proc->todo);
2868 init_waitqueue_head(&proc->wait);
2869 proc->default_priority = task_nice(current);
2870 mutex_lock(&binder_lock);
2871 binder_stats_created(BINDER_STAT_PROC);
2872 hlist_add_head(&proc->proc_node, &binder_procs);
2873 proc->pid = current->group_leader->pid;
2874 INIT_LIST_HEAD(&proc->delivered_death);
2875 filp->private_data = proc;
2876 mutex_unlock(&binder_lock);
2877
2878 if (binder_proc_dir_entry_proc) {
2879 char strbuf[11];
2880 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
2881 remove_proc_entry(strbuf, binder_proc_dir_entry_proc);
2882 create_proc_read_entry(strbuf, S_IRUGO,
2883 binder_proc_dir_entry_proc,
2884 binder_read_proc_proc, proc);
2885 }
2886
2887 return 0;
2888}
2889
2890static int binder_flush(struct file *filp, fl_owner_t id)
2891{
2892 struct binder_proc *proc = filp->private_data;
2893
2894 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
2895
2896 return 0;
2897}
2898
2899static void binder_deferred_flush(struct binder_proc *proc)
2900{
2901 struct rb_node *n;
2902 int wake_count = 0;
2903 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
2904 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
2905 thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
2906 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
2907 wake_up_interruptible(&thread->wait);
2908 wake_count++;
2909 }
2910 }
2911 wake_up_interruptible_all(&proc->wait);
2912
2913 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
2914 "binder_flush: %d woke %d threads\n", proc->pid,
2915 wake_count);
2916}
2917
2918static int binder_release(struct inode *nodp, struct file *filp)
2919{
2920 struct binder_proc *proc = filp->private_data;
2921 if (binder_proc_dir_entry_proc) {
2922 char strbuf[11];
2923 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
2924 remove_proc_entry(strbuf, binder_proc_dir_entry_proc);
2925 }
2926
2927 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
2928
2929 return 0;
2930}
2931
2932static void binder_deferred_release(struct binder_proc *proc)
2933{
2934 struct hlist_node *pos;
2935 struct binder_transaction *t;
2936 struct rb_node *n;
2937 int threads, nodes, incoming_refs, outgoing_refs, buffers, active_transactions, page_count;
2938
2939 BUG_ON(proc->vma);
2940 BUG_ON(proc->files);
2941
2942 hlist_del(&proc->proc_node);
2943 if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) {
2944 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2945 "binder_release: %d context_mgr_node gone\n",
2946 proc->pid);
2947 binder_context_mgr_node = NULL;
2948 }
2949
2950 threads = 0;
2951 active_transactions = 0;
2952 while ((n = rb_first(&proc->threads))) {
2953 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
2954 threads++;
2955 active_transactions += binder_free_thread(proc, thread);
2956 }
2957 nodes = 0;
2958 incoming_refs = 0;
2959 while ((n = rb_first(&proc->nodes))) {
2960 struct binder_node *node = rb_entry(n, struct binder_node, rb_node);
2961
2962 nodes++;
2963 rb_erase(&node->rb_node, &proc->nodes);
2964 list_del_init(&node->work.entry);
2965 if (hlist_empty(&node->refs)) {
2966 kfree(node);
2967 binder_stats_deleted(BINDER_STAT_NODE);
2968 } else {
2969 struct binder_ref *ref;
2970 int death = 0;
2971
2972 node->proc = NULL;
2973 node->local_strong_refs = 0;
2974 node->local_weak_refs = 0;
2975 hlist_add_head(&node->dead_node, &binder_dead_nodes);
2976
2977 hlist_for_each_entry(ref, pos, &node->refs, node_entry) {
2978 incoming_refs++;
2979 if (ref->death) {
2980 death++;
2981 if (list_empty(&ref->death->work.entry)) {
2982 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
2983 list_add_tail(&ref->death->work.entry, &ref->proc->todo);
2984 wake_up_interruptible(&ref->proc->wait);
2985 } else
2986 BUG();
2987 }
2988 }
2989 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2990 "binder: node %d now dead, "
2991 "refs %d, death %d\n", node->debug_id,
2992 incoming_refs, death);
2993 }
2994 }
2995 outgoing_refs = 0;
2996 while ((n = rb_first(&proc->refs_by_desc))) {
2997 struct binder_ref *ref = rb_entry(n, struct binder_ref,
2998 rb_node_desc);
2999 outgoing_refs++;
3000 binder_delete_ref(ref);
3001 }
3002 binder_release_work(&proc->todo);
3003 buffers = 0;
3004
3005 while ((n = rb_first(&proc->allocated_buffers))) {
3006 struct binder_buffer *buffer = rb_entry(n, struct binder_buffer,
3007 rb_node);
3008 t = buffer->transaction;
3009 if (t) {
3010 t->buffer = NULL;
3011 buffer->transaction = NULL;
3012 printk(KERN_ERR "binder: release proc %d, "
3013 "transaction %d, not freed\n",
3014 proc->pid, t->debug_id);
3015 /*BUG();*/
3016 }
3017 binder_free_buf(proc, buffer);
3018 buffers++;
3019 }
3020
3021 binder_stats_deleted(BINDER_STAT_PROC);
3022
3023 page_count = 0;
3024 if (proc->pages) {
3025 int i;
3026 for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) {
3027 if (proc->pages[i]) {
3028 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
3029 "binder_release: %d: "
3030 "page %d at %p not freed\n",
3031 proc->pid, i,
3032 proc->buffer + i * PAGE_SIZE);
3033 __free_page(proc->pages[i]);
3034 page_count++;
3035 }
3036 }
3037 kfree(proc->pages);
3038 vfree(proc->buffer);
3039 }
3040
3041 put_task_struct(proc->tsk);
3042
3043 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3044 "binder_release: %d threads %d, nodes %d (ref %d), "
3045 "refs %d, active transactions %d, buffers %d, "
3046 "pages %d\n",
3047 proc->pid, threads, nodes, incoming_refs, outgoing_refs,
3048 active_transactions, buffers, page_count);
3049
3050 kfree(proc);
3051}
3052
3053static void binder_deferred_func(struct work_struct *work)
3054{
3055 struct binder_proc *proc;
3056 struct files_struct *files;
3057
3058 int defer;
3059 do {
3060 mutex_lock(&binder_lock);
3061 mutex_lock(&binder_deferred_lock);
3062 if (!hlist_empty(&binder_deferred_list)) {
3063 proc = hlist_entry(binder_deferred_list.first,
3064 struct binder_proc, deferred_work_node);
3065 hlist_del_init(&proc->deferred_work_node);
3066 defer = proc->deferred_work;
3067 proc->deferred_work = 0;
3068 } else {
3069 proc = NULL;
3070 defer = 0;
3071 }
3072 mutex_unlock(&binder_deferred_lock);
3073
3074 files = NULL;
3075 if (defer & BINDER_DEFERRED_PUT_FILES) {
3076 files = proc->files;
3077 if (files)
3078 proc->files = NULL;
3079 }
3080
3081 if (defer & BINDER_DEFERRED_FLUSH)
3082 binder_deferred_flush(proc);
3083
3084 if (defer & BINDER_DEFERRED_RELEASE)
3085 binder_deferred_release(proc); /* frees proc */
3086
3087 mutex_unlock(&binder_lock);
3088 if (files)
3089 put_files_struct(files);
3090 } while (proc);
3091}
3092static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
3093
3094static void
3095binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
3096{
3097 mutex_lock(&binder_deferred_lock);
3098 proc->deferred_work |= defer;
3099 if (hlist_unhashed(&proc->deferred_work_node)) {
3100 hlist_add_head(&proc->deferred_work_node,
3101 &binder_deferred_list);
3102 schedule_work(&binder_deferred_work);
3103 }
3104 mutex_unlock(&binder_deferred_lock);
3105}
3106
3107static char *print_binder_transaction(char *buf, char *end, const char *prefix,
3108 struct binder_transaction *t)
3109{
3110 buf += snprintf(buf, end - buf,
3111 "%s %d: %p from %d:%d to %d:%d code %x "
3112 "flags %x pri %ld r%d",
3113 prefix, t->debug_id, t,
3114 t->from ? t->from->proc->pid : 0,
3115 t->from ? t->from->pid : 0,
3116 t->to_proc ? t->to_proc->pid : 0,
3117 t->to_thread ? t->to_thread->pid : 0,
3118 t->code, t->flags, t->priority, t->need_reply);
3119 if (buf >= end)
3120 return buf;
3121 if (t->buffer == NULL) {
3122 buf += snprintf(buf, end - buf, " buffer free\n");
3123 return buf;
3124 }
3125 if (t->buffer->target_node) {
3126 buf += snprintf(buf, end - buf, " node %d",
3127 t->buffer->target_node->debug_id);
3128 if (buf >= end)
3129 return buf;
3130 }
3131 buf += snprintf(buf, end - buf, " size %zd:%zd data %p\n",
3132 t->buffer->data_size, t->buffer->offsets_size,
3133 t->buffer->data);
3134 return buf;
3135}
3136
3137static char *print_binder_buffer(char *buf, char *end, const char *prefix,
3138 struct binder_buffer *buffer)
3139{
3140 buf += snprintf(buf, end - buf, "%s %d: %p size %zd:%zd %s\n",
3141 prefix, buffer->debug_id, buffer->data,
3142 buffer->data_size, buffer->offsets_size,
3143 buffer->transaction ? "active" : "delivered");
3144 return buf;
3145}
3146
3147static char *print_binder_work(char *buf, char *end, const char *prefix,
3148 const char *transaction_prefix,
3149 struct binder_work *w)
3150{
3151 struct binder_node *node;
3152 struct binder_transaction *t;
3153
3154 switch (w->type) {
3155 case BINDER_WORK_TRANSACTION:
3156 t = container_of(w, struct binder_transaction, work);
3157 buf = print_binder_transaction(buf, end, transaction_prefix, t);
3158 break;
3159 case BINDER_WORK_TRANSACTION_COMPLETE:
3160 buf += snprintf(buf, end - buf,
3161 "%stransaction complete\n", prefix);
3162 break;
3163 case BINDER_WORK_NODE:
3164 node = container_of(w, struct binder_node, work);
3165 buf += snprintf(buf, end - buf, "%snode work %d: u%p c%p\n",
3166 prefix, node->debug_id, node->ptr,
3167 node->cookie);
3168 break;
3169 case BINDER_WORK_DEAD_BINDER:
3170 buf += snprintf(buf, end - buf, "%shas dead binder\n", prefix);
3171 break;
3172 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
3173 buf += snprintf(buf, end - buf,
3174 "%shas cleared dead binder\n", prefix);
3175 break;
3176 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
3177 buf += snprintf(buf, end - buf,
3178 "%shas cleared death notification\n", prefix);
3179 break;
3180 default:
3181 buf += snprintf(buf, end - buf, "%sunknown work: type %d\n",
3182 prefix, w->type);
3183 break;
3184 }
3185 return buf;
3186}
3187
3188static char *print_binder_thread(char *buf, char *end,
3189 struct binder_thread *thread,
3190 int print_always)
3191{
3192 struct binder_transaction *t;
3193 struct binder_work *w;
3194 char *start_buf = buf;
3195 char *header_buf;
3196
3197 buf += snprintf(buf, end - buf, " thread %d: l %02x\n",
3198 thread->pid, thread->looper);
3199 header_buf = buf;
3200 t = thread->transaction_stack;
3201 while (t) {
3202 if (buf >= end)
3203 break;
3204 if (t->from == thread) {
3205 buf = print_binder_transaction(buf, end,
3206 " outgoing transaction", t);
3207 t = t->from_parent;
3208 } else if (t->to_thread == thread) {
3209 buf = print_binder_transaction(buf, end,
3210 " incoming transaction", t);
3211 t = t->to_parent;
3212 } else {
3213 buf = print_binder_transaction(buf, end,
3214 " bad transaction", t);
3215 t = NULL;
3216 }
3217 }
3218 list_for_each_entry(w, &thread->todo, entry) {
3219 if (buf >= end)
3220 break;
3221 buf = print_binder_work(buf, end, " ",
3222 " pending transaction", w);
3223 }
3224 if (!print_always && buf == header_buf)
3225 buf = start_buf;
3226 return buf;
3227}
3228
3229static char *print_binder_node(char *buf, char *end, struct binder_node *node)
3230{
3231 struct binder_ref *ref;
3232 struct hlist_node *pos;
3233 struct binder_work *w;
3234 int count;
3235
3236 count = 0;
3237 hlist_for_each_entry(ref, pos, &node->refs, node_entry)
3238 count++;
3239
3240 buf += snprintf(buf, end - buf,
3241 " node %d: u%p c%p hs %d hw %d ls %d lw %d "
3242 "is %d iw %d",
3243 node->debug_id, node->ptr, node->cookie,
3244 node->has_strong_ref, node->has_weak_ref,
3245 node->local_strong_refs, node->local_weak_refs,
3246 node->internal_strong_refs, count);
3247 if (buf >= end)
3248 return buf;
3249 if (count) {
3250 buf += snprintf(buf, end - buf, " proc");
3251 if (buf >= end)
3252 return buf;
3253 hlist_for_each_entry(ref, pos, &node->refs, node_entry) {
3254 buf += snprintf(buf, end - buf, " %d", ref->proc->pid);
3255 if (buf >= end)
3256 return buf;
3257 }
3258 }
3259 buf += snprintf(buf, end - buf, "\n");
3260 list_for_each_entry(w, &node->async_todo, entry) {
3261 if (buf >= end)
3262 break;
3263 buf = print_binder_work(buf, end, " ",
3264 " pending async transaction", w);
3265 }
3266 return buf;
3267}
3268
3269static char *print_binder_ref(char *buf, char *end, struct binder_ref *ref)
3270{
3271 buf += snprintf(buf, end - buf,
3272 " ref %d: desc %d %snode %d s %d w %d d %p\n",
3273 ref->debug_id, ref->desc,
3274 ref->node->proc ? "" : "dead ", ref->node->debug_id,
3275 ref->strong, ref->weak, ref->death);
3276 return buf;
3277}
3278
3279static char *print_binder_proc(char *buf, char *end,
3280 struct binder_proc *proc, int print_all)
3281{
3282 struct binder_work *w;
3283 struct rb_node *n;
3284 char *start_buf = buf;
3285 char *header_buf;
3286
3287 buf += snprintf(buf, end - buf, "proc %d\n", proc->pid);
3288 header_buf = buf;
3289
3290 for (n = rb_first(&proc->threads);
3291 n != NULL && buf < end;
3292 n = rb_next(n))
3293 buf = print_binder_thread(buf, end,
3294 rb_entry(n, struct binder_thread,
3295 rb_node), print_all);
3296 for (n = rb_first(&proc->nodes);
3297 n != NULL && buf < end;
3298 n = rb_next(n)) {
3299 struct binder_node *node = rb_entry(n, struct binder_node,
3300 rb_node);
3301 if (print_all || node->has_async_transaction)
3302 buf = print_binder_node(buf, end, node);
3303 }
3304 if (print_all) {
3305 for (n = rb_first(&proc->refs_by_desc);
3306 n != NULL && buf < end;
3307 n = rb_next(n))
3308 buf = print_binder_ref(buf, end,
3309 rb_entry(n, struct binder_ref,
3310 rb_node_desc));
3311 }
3312 for (n = rb_first(&proc->allocated_buffers);
3313 n != NULL && buf < end;
3314 n = rb_next(n))
3315 buf = print_binder_buffer(buf, end, " buffer",
3316 rb_entry(n, struct binder_buffer,
3317 rb_node));
3318 list_for_each_entry(w, &proc->todo, entry) {
3319 if (buf >= end)
3320 break;
3321 buf = print_binder_work(buf, end, " ",
3322 " pending transaction", w);
3323 }
3324 list_for_each_entry(w, &proc->delivered_death, entry) {
3325 if (buf >= end)
3326 break;
3327 buf += snprintf(buf, end - buf,
3328 " has delivered dead binder\n");
3329 break;
3330 }
3331 if (!print_all && buf == header_buf)
3332 buf = start_buf;
3333 return buf;
3334}
3335
3336static const char *binder_return_strings[] = {
3337 "BR_ERROR",
3338 "BR_OK",
3339 "BR_TRANSACTION",
3340 "BR_REPLY",
3341 "BR_ACQUIRE_RESULT",
3342 "BR_DEAD_REPLY",
3343 "BR_TRANSACTION_COMPLETE",
3344 "BR_INCREFS",
3345 "BR_ACQUIRE",
3346 "BR_RELEASE",
3347 "BR_DECREFS",
3348 "BR_ATTEMPT_ACQUIRE",
3349 "BR_NOOP",
3350 "BR_SPAWN_LOOPER",
3351 "BR_FINISHED",
3352 "BR_DEAD_BINDER",
3353 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
3354 "BR_FAILED_REPLY"
3355};
3356
3357static const char *binder_command_strings[] = {
3358 "BC_TRANSACTION",
3359 "BC_REPLY",
3360 "BC_ACQUIRE_RESULT",
3361 "BC_FREE_BUFFER",
3362 "BC_INCREFS",
3363 "BC_ACQUIRE",
3364 "BC_RELEASE",
3365 "BC_DECREFS",
3366 "BC_INCREFS_DONE",
3367 "BC_ACQUIRE_DONE",
3368 "BC_ATTEMPT_ACQUIRE",
3369 "BC_REGISTER_LOOPER",
3370 "BC_ENTER_LOOPER",
3371 "BC_EXIT_LOOPER",
3372 "BC_REQUEST_DEATH_NOTIFICATION",
3373 "BC_CLEAR_DEATH_NOTIFICATION",
3374 "BC_DEAD_BINDER_DONE"
3375};
3376
3377static const char *binder_objstat_strings[] = {
3378 "proc",
3379 "thread",
3380 "node",
3381 "ref",
3382 "death",
3383 "transaction",
3384 "transaction_complete"
3385};
3386
3387static char *print_binder_stats(char *buf, char *end, const char *prefix,
3388 struct binder_stats *stats)
3389{
3390 int i;
3391
3392 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
3393 ARRAY_SIZE(binder_command_strings));
3394 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
3395 if (stats->bc[i])
3396 buf += snprintf(buf, end - buf, "%s%s: %d\n", prefix,
3397 binder_command_strings[i],
3398 stats->bc[i]);
3399 if (buf >= end)
3400 return buf;
3401 }
3402
3403 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
3404 ARRAY_SIZE(binder_return_strings));
3405 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
3406 if (stats->br[i])
3407 buf += snprintf(buf, end - buf, "%s%s: %d\n", prefix,
3408 binder_return_strings[i], stats->br[i]);
3409 if (buf >= end)
3410 return buf;
3411 }
3412
3413 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
3414 ARRAY_SIZE(binder_objstat_strings));
3415 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
3416 ARRAY_SIZE(stats->obj_deleted));
3417 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
3418 if (stats->obj_created[i] || stats->obj_deleted[i])
3419 buf += snprintf(buf, end - buf,
3420 "%s%s: active %d total %d\n", prefix,
3421 binder_objstat_strings[i],
3422 stats->obj_created[i] -
3423 stats->obj_deleted[i],
3424 stats->obj_created[i]);
3425 if (buf >= end)
3426 return buf;
3427 }
3428 return buf;
3429}
3430
3431static char *print_binder_proc_stats(char *buf, char *end,
3432 struct binder_proc *proc)
3433{
3434 struct binder_work *w;
3435 struct rb_node *n;
3436 int count, strong, weak;
3437
3438 buf += snprintf(buf, end - buf, "proc %d\n", proc->pid);
3439 if (buf >= end)
3440 return buf;
3441 count = 0;
3442 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
3443 count++;
3444 buf += snprintf(buf, end - buf, " threads: %d\n", count);
3445 if (buf >= end)
3446 return buf;
3447 buf += snprintf(buf, end - buf, " requested threads: %d+%d/%d\n"
3448 " ready threads %d\n"
3449 " free async space %zd\n", proc->requested_threads,
3450 proc->requested_threads_started, proc->max_threads,
3451 proc->ready_threads, proc->free_async_space);
3452 if (buf >= end)
3453 return buf;
3454 count = 0;
3455 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
3456 count++;
3457 buf += snprintf(buf, end - buf, " nodes: %d\n", count);
3458 if (buf >= end)
3459 return buf;
3460 count = 0;
3461 strong = 0;
3462 weak = 0;
3463 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
3464 struct binder_ref *ref = rb_entry(n, struct binder_ref,
3465 rb_node_desc);
3466 count++;
3467 strong += ref->strong;
3468 weak += ref->weak;
3469 }
3470 buf += snprintf(buf, end - buf, " refs: %d s %d w %d\n",
3471 count, strong, weak);
3472 if (buf >= end)
3473 return buf;
3474
3475 count = 0;
3476 for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
3477 count++;
3478 buf += snprintf(buf, end - buf, " buffers: %d\n", count);
3479 if (buf >= end)
3480 return buf;
3481
3482 count = 0;
3483 list_for_each_entry(w, &proc->todo, entry) {
3484 switch (w->type) {
3485 case BINDER_WORK_TRANSACTION:
3486 count++;
3487 break;
3488 default:
3489 break;
3490 }
3491 }
3492 buf += snprintf(buf, end - buf, " pending transactions: %d\n", count);
3493 if (buf >= end)
3494 return buf;
3495
3496 buf = print_binder_stats(buf, end, " ", &proc->stats);
3497
3498 return buf;
3499}
3500
3501
3502static int binder_read_proc_state(char *page, char **start, off_t off,
3503 int count, int *eof, void *data)
3504{
3505 struct binder_proc *proc;
3506 struct hlist_node *pos;
3507 struct binder_node *node;
3508 int len = 0;
3509 char *buf = page;
3510 char *end = page + PAGE_SIZE;
3511 int do_lock = !binder_debug_no_lock;
3512
3513 if (off)
3514 return 0;
3515
3516 if (do_lock)
3517 mutex_lock(&binder_lock);
3518
3519 buf += snprintf(buf, end - buf, "binder state:\n");
3520
3521 if (!hlist_empty(&binder_dead_nodes))
3522 buf += snprintf(buf, end - buf, "dead nodes:\n");
3523 hlist_for_each_entry(node, pos, &binder_dead_nodes, dead_node) {
3524 if (buf >= end)
3525 break;
3526 buf = print_binder_node(buf, end, node);
3527 }
3528
3529 hlist_for_each_entry(proc, pos, &binder_procs, proc_node) {
3530 if (buf >= end)
3531 break;
3532 buf = print_binder_proc(buf, end, proc, 1);
3533 }
3534 if (do_lock)
3535 mutex_unlock(&binder_lock);
3536 if (buf > page + PAGE_SIZE)
3537 buf = page + PAGE_SIZE;
3538
3539 *start = page + off;
3540
3541 len = buf - page;
3542 if (len > off)
3543 len -= off;
3544 else
3545 len = 0;
3546
3547 return len < count ? len : count;
3548}
3549
3550static int binder_read_proc_stats(char *page, char **start, off_t off,
3551 int count, int *eof, void *data)
3552{
3553 struct binder_proc *proc;
3554 struct hlist_node *pos;
3555 int len = 0;
3556 char *p = page;
3557 int do_lock = !binder_debug_no_lock;
3558
3559 if (off)
3560 return 0;
3561
3562 if (do_lock)
3563 mutex_lock(&binder_lock);
3564
3565 p += snprintf(p, PAGE_SIZE, "binder stats:\n");
3566
3567 p = print_binder_stats(p, page + PAGE_SIZE, "", &binder_stats);
3568
3569 hlist_for_each_entry(proc, pos, &binder_procs, proc_node) {
3570 if (p >= page + PAGE_SIZE)
3571 break;
3572 p = print_binder_proc_stats(p, page + PAGE_SIZE, proc);
3573 }
3574 if (do_lock)
3575 mutex_unlock(&binder_lock);
3576 if (p > page + PAGE_SIZE)
3577 p = page + PAGE_SIZE;
3578
3579 *start = page + off;
3580
3581 len = p - page;
3582 if (len > off)
3583 len -= off;
3584 else
3585 len = 0;
3586
3587 return len < count ? len : count;
3588}
3589
3590static int binder_read_proc_transactions(char *page, char **start, off_t off,
3591 int count, int *eof, void *data)
3592{
3593 struct binder_proc *proc;
3594 struct hlist_node *pos;
3595 int len = 0;
3596 char *buf = page;
3597 char *end = page + PAGE_SIZE;
3598 int do_lock = !binder_debug_no_lock;
3599
3600 if (off)
3601 return 0;
3602
3603 if (do_lock)
3604 mutex_lock(&binder_lock);
3605
3606 buf += snprintf(buf, end - buf, "binder transactions:\n");
3607 hlist_for_each_entry(proc, pos, &binder_procs, proc_node) {
3608 if (buf >= end)
3609 break;
3610 buf = print_binder_proc(buf, end, proc, 0);
3611 }
3612 if (do_lock)
3613 mutex_unlock(&binder_lock);
3614 if (buf > page + PAGE_SIZE)
3615 buf = page + PAGE_SIZE;
3616
3617 *start = page + off;
3618
3619 len = buf - page;
3620 if (len > off)
3621 len -= off;
3622 else
3623 len = 0;
3624
3625 return len < count ? len : count;
3626}
3627
3628static int binder_read_proc_proc(char *page, char **start, off_t off,
3629 int count, int *eof, void *data)
3630{
3631 struct binder_proc *proc = data;
3632 int len = 0;
3633 char *p = page;
3634 int do_lock = !binder_debug_no_lock;
3635
3636 if (off)
3637 return 0;
3638
3639 if (do_lock)
3640 mutex_lock(&binder_lock);
3641 p += snprintf(p, PAGE_SIZE, "binder proc state:\n");
3642 p = print_binder_proc(p, page + PAGE_SIZE, proc, 1);
3643 if (do_lock)
3644 mutex_unlock(&binder_lock);
3645
3646 if (p > page + PAGE_SIZE)
3647 p = page + PAGE_SIZE;
3648 *start = page + off;
3649
3650 len = p - page;
3651 if (len > off)
3652 len -= off;
3653 else
3654 len = 0;
3655
3656 return len < count ? len : count;
3657}
3658
3659static char *print_binder_transaction_log_entry(char *buf, char *end,
3660 struct binder_transaction_log_entry *e)
3661{
3662 buf += snprintf(buf, end - buf,
3663 "%d: %s from %d:%d to %d:%d node %d handle %d "
3664 "size %d:%d\n",
3665 e->debug_id, (e->call_type == 2) ? "reply" :
3666 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
3667 e->from_thread, e->to_proc, e->to_thread, e->to_node,
3668 e->target_handle, e->data_size, e->offsets_size);
3669 return buf;
3670}
3671
3672static int binder_read_proc_transaction_log(
3673 char *page, char **start, off_t off, int count, int *eof, void *data)
3674{
3675 struct binder_transaction_log *log = data;
3676 int len = 0;
3677 int i;
3678 char *buf = page;
3679 char *end = page + PAGE_SIZE;
3680
3681 if (off)
3682 return 0;
3683
3684 if (log->full) {
3685 for (i = log->next; i < ARRAY_SIZE(log->entry); i++) {
3686 if (buf >= end)
3687 break;
3688 buf = print_binder_transaction_log_entry(buf, end,
3689 &log->entry[i]);
3690 }
3691 }
3692 for (i = 0; i < log->next; i++) {
3693 if (buf >= end)
3694 break;
3695 buf = print_binder_transaction_log_entry(buf, end,
3696 &log->entry[i]);
3697 }
3698
3699 *start = page + off;
3700
3701 len = buf - page;
3702 if (len > off)
3703 len -= off;
3704 else
3705 len = 0;
3706
3707 return len < count ? len : count;
3708}
3709
3710static const struct file_operations binder_fops = {
3711 .owner = THIS_MODULE,
3712 .poll = binder_poll,
3713 .unlocked_ioctl = binder_ioctl,
3714 .mmap = binder_mmap,
3715 .open = binder_open,
3716 .flush = binder_flush,
3717 .release = binder_release,
3718};
3719
3720static struct miscdevice binder_miscdev = {
3721 .minor = MISC_DYNAMIC_MINOR,
3722 .name = "binder",
3723 .fops = &binder_fops
3724};
3725
3726static int __init binder_init(void)
3727{
3728 int ret;
3729
3730 binder_proc_dir_entry_root = proc_mkdir("binder", NULL);
3731 if (binder_proc_dir_entry_root)
3732 binder_proc_dir_entry_proc = proc_mkdir("proc",
3733 binder_proc_dir_entry_root);
3734 ret = misc_register(&binder_miscdev);
3735 if (binder_proc_dir_entry_root) {
3736 create_proc_read_entry("state",
3737 S_IRUGO,
3738 binder_proc_dir_entry_root,
3739 binder_read_proc_state,
3740 NULL);
3741 create_proc_read_entry("stats",
3742 S_IRUGO,
3743 binder_proc_dir_entry_root,
3744 binder_read_proc_stats,
3745 NULL);
3746 create_proc_read_entry("transactions",
3747 S_IRUGO,
3748 binder_proc_dir_entry_root,
3749 binder_read_proc_transactions,
3750 NULL);
3751 create_proc_read_entry("transaction_log",
3752 S_IRUGO,
3753 binder_proc_dir_entry_root,
3754 binder_read_proc_transaction_log,
3755 &binder_transaction_log);
3756 create_proc_read_entry("failed_transaction_log",
3757 S_IRUGO,
3758 binder_proc_dir_entry_root,
3759 binder_read_proc_transaction_log,
3760 &binder_transaction_log_failed);
3761 }
3762 return ret;
3763}
3764
3765device_initcall(binder_init);
3766
3767MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/android/binder.h b/drivers/staging/android/binder.h
deleted file mode 100644
index 863ae1ad5d55..000000000000
--- a/drivers/staging/android/binder.h
+++ /dev/null
@@ -1,330 +0,0 @@
1/*
2 * Copyright (C) 2008 Google, Inc.
3 *
4 * Based on, but no longer compatible with, the original
5 * OpenBinder.org binder driver interface, which is:
6 *
7 * Copyright (c) 2005 Palmsource, Inc.
8 *
9 * This software is licensed under the terms of the GNU General Public
10 * License version 2, as published by the Free Software Foundation, and
11 * may be copied, distributed, and modified under those terms.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 */
19
20#ifndef _LINUX_BINDER_H
21#define _LINUX_BINDER_H
22
23#include <linux/ioctl.h>
24
25#define B_PACK_CHARS(c1, c2, c3, c4) \
26 ((((c1)<<24)) | (((c2)<<16)) | (((c3)<<8)) | (c4))
27#define B_TYPE_LARGE 0x85
28
29enum {
30 BINDER_TYPE_BINDER = B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE),
31 BINDER_TYPE_WEAK_BINDER = B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE),
32 BINDER_TYPE_HANDLE = B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE),
33 BINDER_TYPE_WEAK_HANDLE = B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE),
34 BINDER_TYPE_FD = B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE),
35};
36
37enum {
38 FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff,
39 FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100,
40};
41
42/*
43 * This is the flattened representation of a Binder object for transfer
44 * between processes. The 'offsets' supplied as part of a binder transaction
45 * contains offsets into the data where these structures occur. The Binder
46 * driver takes care of re-writing the structure type and data as it moves
47 * between processes.
48 */
49struct flat_binder_object {
50 /* 8 bytes for large_flat_header. */
51 unsigned long type;
52 unsigned long flags;
53
54 /* 8 bytes of data. */
55 union {
56 void *binder; /* local object */
57 signed long handle; /* remote object */
58 };
59
60 /* extra data associated with local object */
61 void *cookie;
62};
63
64/*
65 * On 64-bit platforms where user code may run in 32-bits the driver must
66 * translate the buffer (and local binder) addresses apropriately.
67 */
68
69struct binder_write_read {
70 signed long write_size; /* bytes to write */
71 signed long write_consumed; /* bytes consumed by driver */
72 unsigned long write_buffer;
73 signed long read_size; /* bytes to read */
74 signed long read_consumed; /* bytes consumed by driver */
75 unsigned long read_buffer;
76};
77
78/* Use with BINDER_VERSION, driver fills in fields. */
79struct binder_version {
80 /* driver protocol version -- increment with incompatible change */
81 signed long protocol_version;
82};
83
84/* This is the current protocol version. */
85#define BINDER_CURRENT_PROTOCOL_VERSION 7
86
87#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read)
88#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, int64_t)
89#define BINDER_SET_MAX_THREADS _IOW('b', 5, size_t)
90#define BINDER_SET_IDLE_PRIORITY _IOW('b', 6, int)
91#define BINDER_SET_CONTEXT_MGR _IOW('b', 7, int)
92#define BINDER_THREAD_EXIT _IOW('b', 8, int)
93#define BINDER_VERSION _IOWR('b', 9, struct binder_version)
94
95/*
96 * NOTE: Two special error codes you should check for when calling
97 * in to the driver are:
98 *
99 * EINTR -- The operation has been interupted. This should be
100 * handled by retrying the ioctl() until a different error code
101 * is returned.
102 *
103 * ECONNREFUSED -- The driver is no longer accepting operations
104 * from your process. That is, the process is being destroyed.
105 * You should handle this by exiting from your process. Note
106 * that once this error code is returned, all further calls to
107 * the driver from any thread will return this same code.
108 */
109
110enum transaction_flags {
111 TF_ONE_WAY = 0x01, /* this is a one-way call: async, no return */
112 TF_ROOT_OBJECT = 0x04, /* contents are the component's root object */
113 TF_STATUS_CODE = 0x08, /* contents are a 32-bit status code */
114 TF_ACCEPT_FDS = 0x10, /* allow replies with file descriptors */
115};
116
117struct binder_transaction_data {
118 /* The first two are only used for bcTRANSACTION and brTRANSACTION,
119 * identifying the target and contents of the transaction.
120 */
121 union {
122 size_t handle; /* target descriptor of command transaction */
123 void *ptr; /* target descriptor of return transaction */
124 } target;
125 void *cookie; /* target object cookie */
126 unsigned int code; /* transaction command */
127
128 /* General information about the transaction. */
129 unsigned int flags;
130 pid_t sender_pid;
131 uid_t sender_euid;
132 size_t data_size; /* number of bytes of data */
133 size_t offsets_size; /* number of bytes of offsets */
134
135 /* If this transaction is inline, the data immediately
136 * follows here; otherwise, it ends with a pointer to
137 * the data buffer.
138 */
139 union {
140 struct {
141 /* transaction data */
142 const void *buffer;
143 /* offsets from buffer to flat_binder_object structs */
144 const void *offsets;
145 } ptr;
146 uint8_t buf[8];
147 } data;
148};
149
150struct binder_ptr_cookie {
151 void *ptr;
152 void *cookie;
153};
154
155struct binder_pri_desc {
156 int priority;
157 int desc;
158};
159
160struct binder_pri_ptr_cookie {
161 int priority;
162 void *ptr;
163 void *cookie;
164};
165
166enum BinderDriverReturnProtocol {
167 BR_ERROR = _IOR('r', 0, int),
168 /*
169 * int: error code
170 */
171
172 BR_OK = _IO('r', 1),
173 /* No parameters! */
174
175 BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data),
176 BR_REPLY = _IOR('r', 3, struct binder_transaction_data),
177 /*
178 * binder_transaction_data: the received command.
179 */
180
181 BR_ACQUIRE_RESULT = _IOR('r', 4, int),
182 /*
183 * not currently supported
184 * int: 0 if the last bcATTEMPT_ACQUIRE was not successful.
185 * Else the remote object has acquired a primary reference.
186 */
187
188 BR_DEAD_REPLY = _IO('r', 5),
189 /*
190 * The target of the last transaction (either a bcTRANSACTION or
191 * a bcATTEMPT_ACQUIRE) is no longer with us. No parameters.
192 */
193
194 BR_TRANSACTION_COMPLETE = _IO('r', 6),
195 /*
196 * No parameters... always refers to the last transaction requested
197 * (including replies). Note that this will be sent even for
198 * asynchronous transactions.
199 */
200
201 BR_INCREFS = _IOR('r', 7, struct binder_ptr_cookie),
202 BR_ACQUIRE = _IOR('r', 8, struct binder_ptr_cookie),
203 BR_RELEASE = _IOR('r', 9, struct binder_ptr_cookie),
204 BR_DECREFS = _IOR('r', 10, struct binder_ptr_cookie),
205 /*
206 * void *: ptr to binder
207 * void *: cookie for binder
208 */
209
210 BR_ATTEMPT_ACQUIRE = _IOR('r', 11, struct binder_pri_ptr_cookie),
211 /*
212 * not currently supported
213 * int: priority
214 * void *: ptr to binder
215 * void *: cookie for binder
216 */
217
218 BR_NOOP = _IO('r', 12),
219 /*
220 * No parameters. Do nothing and examine the next command. It exists
221 * primarily so that we can replace it with a BR_SPAWN_LOOPER command.
222 */
223
224 BR_SPAWN_LOOPER = _IO('r', 13),
225 /*
226 * No parameters. The driver has determined that a process has no
227 * threads waiting to service incomming transactions. When a process
228 * receives this command, it must spawn a new service thread and
229 * register it via bcENTER_LOOPER.
230 */
231
232 BR_FINISHED = _IO('r', 14),
233 /*
234 * not currently supported
235 * stop threadpool thread
236 */
237
238 BR_DEAD_BINDER = _IOR('r', 15, void *),
239 /*
240 * void *: cookie
241 */
242 BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR('r', 16, void *),
243 /*
244 * void *: cookie
245 */
246
247 BR_FAILED_REPLY = _IO('r', 17),
248 /*
249 * The the last transaction (either a bcTRANSACTION or
250 * a bcATTEMPT_ACQUIRE) failed (e.g. out of memory). No parameters.
251 */
252};
253
254enum BinderDriverCommandProtocol {
255 BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data),
256 BC_REPLY = _IOW('c', 1, struct binder_transaction_data),
257 /*
258 * binder_transaction_data: the sent command.
259 */
260
261 BC_ACQUIRE_RESULT = _IOW('c', 2, int),
262 /*
263 * not currently supported
264 * int: 0 if the last BR_ATTEMPT_ACQUIRE was not successful.
265 * Else you have acquired a primary reference on the object.
266 */
267
268 BC_FREE_BUFFER = _IOW('c', 3, int),
269 /*
270 * void *: ptr to transaction data received on a read
271 */
272
273 BC_INCREFS = _IOW('c', 4, int),
274 BC_ACQUIRE = _IOW('c', 5, int),
275 BC_RELEASE = _IOW('c', 6, int),
276 BC_DECREFS = _IOW('c', 7, int),
277 /*
278 * int: descriptor
279 */
280
281 BC_INCREFS_DONE = _IOW('c', 8, struct binder_ptr_cookie),
282 BC_ACQUIRE_DONE = _IOW('c', 9, struct binder_ptr_cookie),
283 /*
284 * void *: ptr to binder
285 * void *: cookie for binder
286 */
287
288 BC_ATTEMPT_ACQUIRE = _IOW('c', 10, struct binder_pri_desc),
289 /*
290 * not currently supported
291 * int: priority
292 * int: descriptor
293 */
294
295 BC_REGISTER_LOOPER = _IO('c', 11),
296 /*
297 * No parameters.
298 * Register a spawned looper thread with the device.
299 */
300
301 BC_ENTER_LOOPER = _IO('c', 12),
302 BC_EXIT_LOOPER = _IO('c', 13),
303 /*
304 * No parameters.
305 * These two commands are sent as an application-level thread
306 * enters and exits the binder loop, respectively. They are
307 * used so the binder can have an accurate count of the number
308 * of looping threads it has available.
309 */
310
311 BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14, struct binder_ptr_cookie),
312 /*
313 * void *: ptr to binder
314 * void *: cookie
315 */
316
317 BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15, struct binder_ptr_cookie),
318 /*
319 * void *: ptr to binder
320 * void *: cookie
321 */
322
323 BC_DEAD_BINDER_DONE = _IOW('c', 16, void *),
324 /*
325 * void *: cookie
326 */
327};
328
329#endif /* _LINUX_BINDER_H */
330
diff --git a/drivers/staging/android/logger.c b/drivers/staging/android/logger.c
deleted file mode 100644
index 6c10b456c6cc..000000000000
--- a/drivers/staging/android/logger.c
+++ /dev/null
@@ -1,607 +0,0 @@
1/*
2 * drivers/misc/logger.c
3 *
4 * A Logging Subsystem
5 *
6 * Copyright (C) 2007-2008 Google, Inc.
7 *
8 * Robert Love <rlove@google.com>
9 *
10 * This software is licensed under the terms of the GNU General Public
11 * License version 2, as published by the Free Software Foundation, and
12 * may be copied, distributed, and modified under those terms.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
20#include <linux/module.h>
21#include <linux/fs.h>
22#include <linux/miscdevice.h>
23#include <linux/uaccess.h>
24#include <linux/poll.h>
25#include <linux/time.h>
26#include "logger.h"
27
28#include <asm/ioctls.h>
29
30/*
31 * struct logger_log - represents a specific log, such as 'main' or 'radio'
32 *
33 * This structure lives from module insertion until module removal, so it does
34 * not need additional reference counting. The structure is protected by the
35 * mutex 'mutex'.
36 */
37struct logger_log {
38 unsigned char *buffer;/* the ring buffer itself */
39 struct miscdevice misc; /* misc device representing the log */
40 wait_queue_head_t wq; /* wait queue for readers */
41 struct list_head readers; /* this log's readers */
42 struct mutex mutex; /* mutex protecting buffer */
43 size_t w_off; /* current write head offset */
44 size_t head; /* new readers start here */
45 size_t size; /* size of the log */
46};
47
48/*
49 * struct logger_reader - a logging device open for reading
50 *
51 * This object lives from open to release, so we don't need additional
52 * reference counting. The structure is protected by log->mutex.
53 */
54struct logger_reader {
55 struct logger_log *log; /* associated log */
56 struct list_head list; /* entry in logger_log's list */
57 size_t r_off; /* current read head offset */
58};
59
60/* logger_offset - returns index 'n' into the log via (optimized) modulus */
61#define logger_offset(n) ((n) & (log->size - 1))
62
63/*
64 * file_get_log - Given a file structure, return the associated log
65 *
66 * This isn't aesthetic. We have several goals:
67 *
68 * 1) Need to quickly obtain the associated log during an I/O operation
69 * 2) Readers need to maintain state (logger_reader)
70 * 3) Writers need to be very fast (open() should be a near no-op)
71 *
72 * In the reader case, we can trivially go file->logger_reader->logger_log.
73 * For a writer, we don't want to maintain a logger_reader, so we just go
74 * file->logger_log. Thus what file->private_data points at depends on whether
75 * or not the file was opened for reading. This function hides that dirtiness.
76 */
77static inline struct logger_log *file_get_log(struct file *file)
78{
79 if (file->f_mode & FMODE_READ) {
80 struct logger_reader *reader = file->private_data;
81 return reader->log;
82 } else
83 return file->private_data;
84}
85
86/*
87 * get_entry_len - Grabs the length of the payload of the next entry starting
88 * from 'off'.
89 *
90 * Caller needs to hold log->mutex.
91 */
92static __u32 get_entry_len(struct logger_log *log, size_t off)
93{
94 __u16 val;
95
96 switch (log->size - off) {
97 case 1:
98 memcpy(&val, log->buffer + off, 1);
99 memcpy(((char *) &val) + 1, log->buffer, 1);
100 break;
101 default:
102 memcpy(&val, log->buffer + off, 2);
103 }
104
105 return sizeof(struct logger_entry) + val;
106}
107
108/*
109 * do_read_log_to_user - reads exactly 'count' bytes from 'log' into the
110 * user-space buffer 'buf'. Returns 'count' on success.
111 *
112 * Caller must hold log->mutex.
113 */
114static ssize_t do_read_log_to_user(struct logger_log *log,
115 struct logger_reader *reader,
116 char __user *buf,
117 size_t count)
118{
119 size_t len;
120
121 /*
122 * We read from the log in two disjoint operations. First, we read from
123 * the current read head offset up to 'count' bytes or to the end of
124 * the log, whichever comes first.
125 */
126 len = min(count, log->size - reader->r_off);
127 if (copy_to_user(buf, log->buffer + reader->r_off, len))
128 return -EFAULT;
129
130 /*
131 * Second, we read any remaining bytes, starting back at the head of
132 * the log.
133 */
134 if (count != len)
135 if (copy_to_user(buf + len, log->buffer, count - len))
136 return -EFAULT;
137
138 reader->r_off = logger_offset(reader->r_off + count);
139
140 return count;
141}
142
143/*
144 * logger_read - our log's read() method
145 *
146 * Behavior:
147 *
148 * - O_NONBLOCK works
149 * - If there are no log entries to read, blocks until log is written to
150 * - Atomically reads exactly one log entry
151 *
152 * Optimal read size is LOGGER_ENTRY_MAX_LEN. Will set errno to EINVAL if read
153 * buffer is insufficient to hold next entry.
154 */
155static ssize_t logger_read(struct file *file, char __user *buf,
156 size_t count, loff_t *pos)
157{
158 struct logger_reader *reader = file->private_data;
159 struct logger_log *log = reader->log;
160 ssize_t ret;
161 DEFINE_WAIT(wait);
162
163start:
164 while (1) {
165 prepare_to_wait(&log->wq, &wait, TASK_INTERRUPTIBLE);
166
167 mutex_lock(&log->mutex);
168 ret = (log->w_off == reader->r_off);
169 mutex_unlock(&log->mutex);
170 if (!ret)
171 break;
172
173 if (file->f_flags & O_NONBLOCK) {
174 ret = -EAGAIN;
175 break;
176 }
177
178 if (signal_pending(current)) {
179 ret = -EINTR;
180 break;
181 }
182
183 schedule();
184 }
185
186 finish_wait(&log->wq, &wait);
187 if (ret)
188 return ret;
189
190 mutex_lock(&log->mutex);
191
192 /* is there still something to read or did we race? */
193 if (unlikely(log->w_off == reader->r_off)) {
194 mutex_unlock(&log->mutex);
195 goto start;
196 }
197
198 /* get the size of the next entry */
199 ret = get_entry_len(log, reader->r_off);
200 if (count < ret) {
201 ret = -EINVAL;
202 goto out;
203 }
204
205 /* get exactly one entry from the log */
206 ret = do_read_log_to_user(log, reader, buf, ret);
207
208out:
209 mutex_unlock(&log->mutex);
210
211 return ret;
212}
213
214/*
215 * get_next_entry - return the offset of the first valid entry at least 'len'
216 * bytes after 'off'.
217 *
218 * Caller must hold log->mutex.
219 */
220static size_t get_next_entry(struct logger_log *log, size_t off, size_t len)
221{
222 size_t count = 0;
223
224 do {
225 size_t nr = get_entry_len(log, off);
226 off = logger_offset(off + nr);
227 count += nr;
228 } while (count < len);
229
230 return off;
231}
232
233/*
234 * clock_interval - is a < c < b in mod-space? Put another way, does the line
235 * from a to b cross c?
236 */
237static inline int clock_interval(size_t a, size_t b, size_t c)
238{
239 if (b < a) {
240 if (a < c || b >= c)
241 return 1;
242 } else {
243 if (a < c && b >= c)
244 return 1;
245 }
246
247 return 0;
248}
249
250/*
251 * fix_up_readers - walk the list of all readers and "fix up" any who were
252 * lapped by the writer; also do the same for the default "start head".
253 * We do this by "pulling forward" the readers and start head to the first
254 * entry after the new write head.
255 *
256 * The caller needs to hold log->mutex.
257 */
258static void fix_up_readers(struct logger_log *log, size_t len)
259{
260 size_t old = log->w_off;
261 size_t new = logger_offset(old + len);
262 struct logger_reader *reader;
263
264 if (clock_interval(old, new, log->head))
265 log->head = get_next_entry(log, log->head, len);
266
267 list_for_each_entry(reader, &log->readers, list)
268 if (clock_interval(old, new, reader->r_off))
269 reader->r_off = get_next_entry(log, reader->r_off, len);
270}
271
272/*
273 * do_write_log - writes 'len' bytes from 'buf' to 'log'
274 *
275 * The caller needs to hold log->mutex.
276 */
277static void do_write_log(struct logger_log *log, const void *buf, size_t count)
278{
279 size_t len;
280
281 len = min(count, log->size - log->w_off);
282 memcpy(log->buffer + log->w_off, buf, len);
283
284 if (count != len)
285 memcpy(log->buffer, buf + len, count - len);
286
287 log->w_off = logger_offset(log->w_off + count);
288
289}
290
291/*
292 * do_write_log_user - writes 'len' bytes from the user-space buffer 'buf' to
293 * the log 'log'
294 *
295 * The caller needs to hold log->mutex.
296 *
297 * Returns 'count' on success, negative error code on failure.
298 */
299static ssize_t do_write_log_from_user(struct logger_log *log,
300 const void __user *buf, size_t count)
301{
302 size_t len;
303
304 len = min(count, log->size - log->w_off);
305 if (len && copy_from_user(log->buffer + log->w_off, buf, len))
306 return -EFAULT;
307
308 if (count != len)
309 if (copy_from_user(log->buffer, buf + len, count - len))
310 return -EFAULT;
311
312 log->w_off = logger_offset(log->w_off + count);
313
314 return count;
315}
316
317/*
318 * logger_aio_write - our write method, implementing support for write(),
319 * writev(), and aio_write(). Writes are our fast path, and we try to optimize
320 * them above all else.
321 */
322ssize_t logger_aio_write(struct kiocb *iocb, const struct iovec *iov,
323 unsigned long nr_segs, loff_t ppos)
324{
325 struct logger_log *log = file_get_log(iocb->ki_filp);
326 size_t orig = log->w_off;
327 struct logger_entry header;
328 struct timespec now;
329 ssize_t ret = 0;
330
331 now = current_kernel_time();
332
333 header.pid = current->tgid;
334 header.tid = current->pid;
335 header.sec = now.tv_sec;
336 header.nsec = now.tv_nsec;
337 header.len = min_t(size_t, iocb->ki_left, LOGGER_ENTRY_MAX_PAYLOAD);
338
339 /* null writes succeed, return zero */
340 if (unlikely(!header.len))
341 return 0;
342
343 mutex_lock(&log->mutex);
344
345 /*
346 * Fix up any readers, pulling them forward to the first readable
347 * entry after (what will be) the new write offset. We do this now
348 * because if we partially fail, we can end up with clobbered log
349 * entries that encroach on readable buffer.
350 */
351 fix_up_readers(log, sizeof(struct logger_entry) + header.len);
352
353 do_write_log(log, &header, sizeof(struct logger_entry));
354
355 while (nr_segs-- > 0) {
356 size_t len;
357 ssize_t nr;
358
359 /* figure out how much of this vector we can keep */
360 len = min_t(size_t, iov->iov_len, header.len - ret);
361
362 /* write out this segment's payload */
363 nr = do_write_log_from_user(log, iov->iov_base, len);
364 if (unlikely(nr < 0)) {
365 log->w_off = orig;
366 mutex_unlock(&log->mutex);
367 return nr;
368 }
369
370 iov++;
371 ret += nr;
372 }
373
374 mutex_unlock(&log->mutex);
375
376 /* wake up any blocked readers */
377 wake_up_interruptible(&log->wq);
378
379 return ret;
380}
381
382static struct logger_log *get_log_from_minor(int);
383
384/*
385 * logger_open - the log's open() file operation
386 *
387 * Note how near a no-op this is in the write-only case. Keep it that way!
388 */
389static int logger_open(struct inode *inode, struct file *file)
390{
391 struct logger_log *log;
392 int ret;
393
394 ret = nonseekable_open(inode, file);
395 if (ret)
396 return ret;
397
398 log = get_log_from_minor(MINOR(inode->i_rdev));
399 if (!log)
400 return -ENODEV;
401
402 if (file->f_mode & FMODE_READ) {
403 struct logger_reader *reader;
404
405 reader = kmalloc(sizeof(struct logger_reader), GFP_KERNEL);
406 if (!reader)
407 return -ENOMEM;
408
409 reader->log = log;
410 INIT_LIST_HEAD(&reader->list);
411
412 mutex_lock(&log->mutex);
413 reader->r_off = log->head;
414 list_add_tail(&reader->list, &log->readers);
415 mutex_unlock(&log->mutex);
416
417 file->private_data = reader;
418 } else
419 file->private_data = log;
420
421 return 0;
422}
423
424/*
425 * logger_release - the log's release file operation
426 *
427 * Note this is a total no-op in the write-only case. Keep it that way!
428 */
429static int logger_release(struct inode *ignored, struct file *file)
430{
431 if (file->f_mode & FMODE_READ) {
432 struct logger_reader *reader = file->private_data;
433 list_del(&reader->list);
434 kfree(reader);
435 }
436
437 return 0;
438}
439
440/*
441 * logger_poll - the log's poll file operation, for poll/select/epoll
442 *
443 * Note we always return POLLOUT, because you can always write() to the log.
444 * Note also that, strictly speaking, a return value of POLLIN does not
445 * guarantee that the log is readable without blocking, as there is a small
446 * chance that the writer can lap the reader in the interim between poll()
447 * returning and the read() request.
448 */
449static unsigned int logger_poll(struct file *file, poll_table *wait)
450{
451 struct logger_reader *reader;
452 struct logger_log *log;
453 unsigned int ret = POLLOUT | POLLWRNORM;
454
455 if (!(file->f_mode & FMODE_READ))
456 return ret;
457
458 reader = file->private_data;
459 log = reader->log;
460
461 poll_wait(file, &log->wq, wait);
462
463 mutex_lock(&log->mutex);
464 if (log->w_off != reader->r_off)
465 ret |= POLLIN | POLLRDNORM;
466 mutex_unlock(&log->mutex);
467
468 return ret;
469}
470
471static long logger_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
472{
473 struct logger_log *log = file_get_log(file);
474 struct logger_reader *reader;
475 long ret = -ENOTTY;
476
477 mutex_lock(&log->mutex);
478
479 switch (cmd) {
480 case LOGGER_GET_LOG_BUF_SIZE:
481 ret = log->size;
482 break;
483 case LOGGER_GET_LOG_LEN:
484 if (!(file->f_mode & FMODE_READ)) {
485 ret = -EBADF;
486 break;
487 }
488 reader = file->private_data;
489 if (log->w_off >= reader->r_off)
490 ret = log->w_off - reader->r_off;
491 else
492 ret = (log->size - reader->r_off) + log->w_off;
493 break;
494 case LOGGER_GET_NEXT_ENTRY_LEN:
495 if (!(file->f_mode & FMODE_READ)) {
496 ret = -EBADF;
497 break;
498 }
499 reader = file->private_data;
500 if (log->w_off != reader->r_off)
501 ret = get_entry_len(log, reader->r_off);
502 else
503 ret = 0;
504 break;
505 case LOGGER_FLUSH_LOG:
506 if (!(file->f_mode & FMODE_WRITE)) {
507 ret = -EBADF;
508 break;
509 }
510 list_for_each_entry(reader, &log->readers, list)
511 reader->r_off = log->w_off;
512 log->head = log->w_off;
513 ret = 0;
514 break;
515 }
516
517 mutex_unlock(&log->mutex);
518
519 return ret;
520}
521
522static const struct file_operations logger_fops = {
523 .owner = THIS_MODULE,
524 .read = logger_read,
525 .aio_write = logger_aio_write,
526 .poll = logger_poll,
527 .unlocked_ioctl = logger_ioctl,
528 .compat_ioctl = logger_ioctl,
529 .open = logger_open,
530 .release = logger_release,
531};
532
533/*
534 * Defines a log structure with name 'NAME' and a size of 'SIZE' bytes, which
535 * must be a power of two, greater than LOGGER_ENTRY_MAX_LEN, and less than
536 * LONG_MAX minus LOGGER_ENTRY_MAX_LEN.
537 */
538#define DEFINE_LOGGER_DEVICE(VAR, NAME, SIZE) \
539static unsigned char _buf_ ## VAR[SIZE]; \
540static struct logger_log VAR = { \
541 .buffer = _buf_ ## VAR, \
542 .misc = { \
543 .minor = MISC_DYNAMIC_MINOR, \
544 .name = NAME, \
545 .fops = &logger_fops, \
546 .parent = NULL, \
547 }, \
548 .wq = __WAIT_QUEUE_HEAD_INITIALIZER(VAR .wq), \
549 .readers = LIST_HEAD_INIT(VAR .readers), \
550 .mutex = __MUTEX_INITIALIZER(VAR .mutex), \
551 .w_off = 0, \
552 .head = 0, \
553 .size = SIZE, \
554};
555
556DEFINE_LOGGER_DEVICE(log_main, LOGGER_LOG_MAIN, 64*1024)
557DEFINE_LOGGER_DEVICE(log_events, LOGGER_LOG_EVENTS, 256*1024)
558DEFINE_LOGGER_DEVICE(log_radio, LOGGER_LOG_RADIO, 64*1024)
559
560static struct logger_log *get_log_from_minor(int minor)
561{
562 if (log_main.misc.minor == minor)
563 return &log_main;
564 if (log_events.misc.minor == minor)
565 return &log_events;
566 if (log_radio.misc.minor == minor)
567 return &log_radio;
568 return NULL;
569}
570
571static int __init init_log(struct logger_log *log)
572{
573 int ret;
574
575 ret = misc_register(&log->misc);
576 if (unlikely(ret)) {
577 printk(KERN_ERR "logger: failed to register misc "
578 "device for log '%s'!\n", log->misc.name);
579 return ret;
580 }
581
582 printk(KERN_INFO "logger: created %luK log '%s'\n",
583 (unsigned long) log->size >> 10, log->misc.name);
584
585 return 0;
586}
587
588static int __init logger_init(void)
589{
590 int ret;
591
592 ret = init_log(&log_main);
593 if (unlikely(ret))
594 goto out;
595
596 ret = init_log(&log_events);
597 if (unlikely(ret))
598 goto out;
599
600 ret = init_log(&log_radio);
601 if (unlikely(ret))
602 goto out;
603
604out:
605 return ret;
606}
607device_initcall(logger_init);
diff --git a/drivers/staging/android/logger.h b/drivers/staging/android/logger.h
deleted file mode 100644
index a562434d7419..000000000000
--- a/drivers/staging/android/logger.h
+++ /dev/null
@@ -1,48 +0,0 @@
1/* include/linux/logger.h
2 *
3 * Copyright (C) 2007-2008 Google, Inc.
4 * Author: Robert Love <rlove@android.com>
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#ifndef _LINUX_LOGGER_H
18#define _LINUX_LOGGER_H
19
20#include <linux/types.h>
21#include <linux/ioctl.h>
22
23struct logger_entry {
24 __u16 len; /* length of the payload */
25 __u16 __pad; /* no matter what, we get 2 bytes of padding */
26 __s32 pid; /* generating process's pid */
27 __s32 tid; /* generating process's tid */
28 __s32 sec; /* seconds since Epoch */
29 __s32 nsec; /* nanoseconds */
30 char msg[0]; /* the entry's payload */
31};
32
33#define LOGGER_LOG_RADIO "log_radio" /* radio-related messages */
34#define LOGGER_LOG_EVENTS "log_events" /* system/hardware events */
35#define LOGGER_LOG_MAIN "log_main" /* everything else */
36
37#define LOGGER_ENTRY_MAX_LEN (4*1024)
38#define LOGGER_ENTRY_MAX_PAYLOAD \
39 (LOGGER_ENTRY_MAX_LEN - sizeof(struct logger_entry))
40
41#define __LOGGERIO 0xAE
42
43#define LOGGER_GET_LOG_BUF_SIZE _IO(__LOGGERIO, 1) /* size of log */
44#define LOGGER_GET_LOG_LEN _IO(__LOGGERIO, 2) /* used log len */
45#define LOGGER_GET_NEXT_ENTRY_LEN _IO(__LOGGERIO, 3) /* next entry len */
46#define LOGGER_FLUSH_LOG _IO(__LOGGERIO, 4) /* flush log */
47
48#endif /* _LINUX_LOGGER_H */
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
deleted file mode 100644
index 935d281a201a..000000000000
--- a/drivers/staging/android/lowmemorykiller.c
+++ /dev/null
@@ -1,173 +0,0 @@
1/* drivers/misc/lowmemorykiller.c
2 *
3 * The lowmemorykiller driver lets user-space specify a set of memory thresholds
4 * where processes with a range of oom_adj values will get killed. Specify the
5 * minimum oom_adj values in /sys/module/lowmemorykiller/parameters/adj and the
6 * number of free pages in /sys/module/lowmemorykiller/parameters/minfree. Both
7 * files take a comma separated list of numbers in ascending order.
8 *
9 * For example, write "0,8" to /sys/module/lowmemorykiller/parameters/adj and
10 * "1024,4096" to /sys/module/lowmemorykiller/parameters/minfree to kill processes
11 * with a oom_adj value of 8 or higher when the free memory drops below 4096 pages
12 * and kill processes with a oom_adj value of 0 or higher when the free memory
13 * drops below 1024 pages.
14 *
15 * The driver considers memory used for caches to be free, but if a large
16 * percentage of the cached memory is locked this can be very inaccurate
17 * and processes may not get killed until the normal oom killer is triggered.
18 *
19 * Copyright (C) 2007-2008 Google, Inc.
20 *
21 * This software is licensed under the terms of the GNU General Public
22 * License version 2, as published by the Free Software Foundation, and
23 * may be copied, distributed, and modified under those terms.
24 *
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
29 *
30 */
31
32#include <linux/module.h>
33#include <linux/kernel.h>
34#include <linux/mm.h>
35#include <linux/oom.h>
36#include <linux/sched.h>
37
38static uint32_t lowmem_debug_level = 2;
39static int lowmem_adj[6] = {
40 0,
41 1,
42 6,
43 12,
44};
45static int lowmem_adj_size = 4;
46static size_t lowmem_minfree[6] = {
47 3 * 512, /* 6MB */
48 2 * 1024, /* 8MB */
49 4 * 1024, /* 16MB */
50 16 * 1024, /* 64MB */
51};
52static int lowmem_minfree_size = 4;
53
54#define lowmem_print(level, x...) \
55 do { \
56 if (lowmem_debug_level >= (level)) \
57 printk(x); \
58 } while (0)
59
60static int lowmem_shrink(int nr_to_scan, gfp_t gfp_mask)
61{
62 struct task_struct *p;
63 struct task_struct *selected = NULL;
64 int rem = 0;
65 int tasksize;
66 int i;
67 int min_adj = OOM_ADJUST_MAX + 1;
68 int selected_tasksize = 0;
69 int selected_oom_adj;
70 int array_size = ARRAY_SIZE(lowmem_adj);
71 int other_free = global_page_state(NR_FREE_PAGES);
72 int other_file = global_page_state(NR_FILE_PAGES);
73
74 if (lowmem_adj_size < array_size)
75 array_size = lowmem_adj_size;
76 if (lowmem_minfree_size < array_size)
77 array_size = lowmem_minfree_size;
78 for (i = 0; i < array_size; i++) {
79 if (other_free < lowmem_minfree[i] &&
80 other_file < lowmem_minfree[i]) {
81 min_adj = lowmem_adj[i];
82 break;
83 }
84 }
85 if (nr_to_scan > 0)
86 lowmem_print(3, "lowmem_shrink %d, %x, ofree %d %d, ma %d\n",
87 nr_to_scan, gfp_mask, other_free, other_file,
88 min_adj);
89 rem = global_page_state(NR_ACTIVE_ANON) +
90 global_page_state(NR_ACTIVE_FILE) +
91 global_page_state(NR_INACTIVE_ANON) +
92 global_page_state(NR_INACTIVE_FILE);
93 if (nr_to_scan <= 0 || min_adj == OOM_ADJUST_MAX + 1) {
94 lowmem_print(5, "lowmem_shrink %d, %x, return %d\n",
95 nr_to_scan, gfp_mask, rem);
96 return rem;
97 }
98 selected_oom_adj = min_adj;
99
100 read_lock(&tasklist_lock);
101 for_each_process(p) {
102 struct mm_struct *mm;
103 int oom_adj;
104
105 task_lock(p);
106 mm = p->mm;
107 if (!mm) {
108 task_unlock(p);
109 continue;
110 }
111 oom_adj = mm->oom_adj;
112 if (oom_adj < min_adj) {
113 task_unlock(p);
114 continue;
115 }
116 tasksize = get_mm_rss(mm);
117 task_unlock(p);
118 if (tasksize <= 0)
119 continue;
120 if (selected) {
121 if (oom_adj < selected_oom_adj)
122 continue;
123 if (oom_adj == selected_oom_adj &&
124 tasksize <= selected_tasksize)
125 continue;
126 }
127 selected = p;
128 selected_tasksize = tasksize;
129 selected_oom_adj = oom_adj;
130 lowmem_print(2, "select %d (%s), adj %d, size %d, to kill\n",
131 p->pid, p->comm, oom_adj, tasksize);
132 }
133 if (selected) {
134 lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d\n",
135 selected->pid, selected->comm,
136 selected_oom_adj, selected_tasksize);
137 force_sig(SIGKILL, selected);
138 rem -= selected_tasksize;
139 }
140 lowmem_print(4, "lowmem_shrink %d, %x, return %d\n",
141 nr_to_scan, gfp_mask, rem);
142 read_unlock(&tasklist_lock);
143 return rem;
144}
145
146static struct shrinker lowmem_shrinker = {
147 .shrink = lowmem_shrink,
148 .seeks = DEFAULT_SEEKS * 16
149};
150
151static int __init lowmem_init(void)
152{
153 register_shrinker(&lowmem_shrinker);
154 return 0;
155}
156
157static void __exit lowmem_exit(void)
158{
159 unregister_shrinker(&lowmem_shrinker);
160}
161
162module_param_named(cost, lowmem_shrinker.seeks, int, S_IRUGO | S_IWUSR);
163module_param_array_named(adj, lowmem_adj, int, &lowmem_adj_size,
164 S_IRUGO | S_IWUSR);
165module_param_array_named(minfree, lowmem_minfree, uint, &lowmem_minfree_size,
166 S_IRUGO | S_IWUSR);
167module_param_named(debug_level, lowmem_debug_level, uint, S_IRUGO | S_IWUSR);
168
169module_init(lowmem_init);
170module_exit(lowmem_exit);
171
172MODULE_LICENSE("GPL");
173
diff --git a/drivers/staging/android/ram_console.c b/drivers/staging/android/ram_console.c
deleted file mode 100644
index 8f18a59744cd..000000000000
--- a/drivers/staging/android/ram_console.c
+++ /dev/null
@@ -1,410 +0,0 @@
1/* drivers/android/ram_console.c
2 *
3 * Copyright (C) 2007-2008 Google, Inc.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15
16#include <linux/console.h>
17#include <linux/init.h>
18#include <linux/module.h>
19#include <linux/platform_device.h>
20#include <linux/proc_fs.h>
21#include <linux/string.h>
22#include <linux/uaccess.h>
23#include <linux/io.h>
24
25#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
26#include <linux/rslib.h>
27#endif
28
29struct ram_console_buffer {
30 uint32_t sig;
31 uint32_t start;
32 uint32_t size;
33 uint8_t data[0];
34};
35
36#define RAM_CONSOLE_SIG (0x43474244) /* DBGC */
37
38#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT
39static char __initdata
40 ram_console_old_log_init_buffer[CONFIG_ANDROID_RAM_CONSOLE_EARLY_SIZE];
41#endif
42static char *ram_console_old_log;
43static size_t ram_console_old_log_size;
44
45static struct ram_console_buffer *ram_console_buffer;
46static size_t ram_console_buffer_size;
47#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
48static char *ram_console_par_buffer;
49static struct rs_control *ram_console_rs_decoder;
50static int ram_console_corrected_bytes;
51static int ram_console_bad_blocks;
52#define ECC_BLOCK_SIZE CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_DATA_SIZE
53#define ECC_SIZE CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_ECC_SIZE
54#define ECC_SYMSIZE CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE
55#define ECC_POLY CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_POLYNOMIAL
56#endif
57
58#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
59static void ram_console_encode_rs8(uint8_t *data, size_t len, uint8_t *ecc)
60{
61 int i;
62 uint16_t par[ECC_SIZE];
63 /* Initialize the parity buffer */
64 memset(par, 0, sizeof(par));
65 encode_rs8(ram_console_rs_decoder, data, len, par, 0);
66 for (i = 0; i < ECC_SIZE; i++)
67 ecc[i] = par[i];
68}
69
70static int ram_console_decode_rs8(void *data, size_t len, uint8_t *ecc)
71{
72 int i;
73 uint16_t par[ECC_SIZE];
74 for (i = 0; i < ECC_SIZE; i++)
75 par[i] = ecc[i];
76 return decode_rs8(ram_console_rs_decoder, data, par, len,
77 NULL, 0, NULL, 0, NULL);
78}
79#endif
80
81static void ram_console_update(const char *s, unsigned int count)
82{
83 struct ram_console_buffer *buffer = ram_console_buffer;
84#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
85 uint8_t *buffer_end = buffer->data + ram_console_buffer_size;
86 uint8_t *block;
87 uint8_t *par;
88 int size = ECC_BLOCK_SIZE;
89#endif
90 memcpy(buffer->data + buffer->start, s, count);
91#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
92 block = buffer->data + (buffer->start & ~(ECC_BLOCK_SIZE - 1));
93 par = ram_console_par_buffer +
94 (buffer->start / ECC_BLOCK_SIZE) * ECC_SIZE;
95 do {
96 if (block + ECC_BLOCK_SIZE > buffer_end)
97 size = buffer_end - block;
98 ram_console_encode_rs8(block, size, par);
99 block += ECC_BLOCK_SIZE;
100 par += ECC_SIZE;
101 } while (block < buffer->data + buffer->start + count);
102#endif
103}
104
105static void ram_console_update_header(void)
106{
107#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
108 struct ram_console_buffer *buffer = ram_console_buffer;
109 uint8_t *par;
110 par = ram_console_par_buffer +
111 DIV_ROUND_UP(ram_console_buffer_size, ECC_BLOCK_SIZE) * ECC_SIZE;
112 ram_console_encode_rs8((uint8_t *)buffer, sizeof(*buffer), par);
113#endif
114}
115
116static void
117ram_console_write(struct console *console, const char *s, unsigned int count)
118{
119 int rem;
120 struct ram_console_buffer *buffer = ram_console_buffer;
121
122 if (count > ram_console_buffer_size) {
123 s += count - ram_console_buffer_size;
124 count = ram_console_buffer_size;
125 }
126 rem = ram_console_buffer_size - buffer->start;
127 if (rem < count) {
128 ram_console_update(s, rem);
129 s += rem;
130 count -= rem;
131 buffer->start = 0;
132 buffer->size = ram_console_buffer_size;
133 }
134 ram_console_update(s, count);
135
136 buffer->start += count;
137 if (buffer->size < ram_console_buffer_size)
138 buffer->size += count;
139 ram_console_update_header();
140}
141
142static struct console ram_console = {
143 .name = "ram",
144 .write = ram_console_write,
145 .flags = CON_PRINTBUFFER | CON_ENABLED,
146 .index = -1,
147};
148
149static void __init
150ram_console_save_old(struct ram_console_buffer *buffer, char *dest)
151{
152 size_t old_log_size = buffer->size;
153#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
154 uint8_t *block;
155 uint8_t *par;
156 char strbuf[80];
157 int strbuf_len;
158
159 block = buffer->data;
160 par = ram_console_par_buffer;
161 while (block < buffer->data + buffer->size) {
162 int numerr;
163 int size = ECC_BLOCK_SIZE;
164 if (block + size > buffer->data + ram_console_buffer_size)
165 size = buffer->data + ram_console_buffer_size - block;
166 numerr = ram_console_decode_rs8(block, size, par);
167 if (numerr > 0) {
168#if 0
169 printk(KERN_INFO "ram_console: error in block %p, %d\n",
170 block, numerr);
171#endif
172 ram_console_corrected_bytes += numerr;
173 } else if (numerr < 0) {
174#if 0
175 printk(KERN_INFO "ram_console: uncorrectable error in "
176 "block %p\n", block);
177#endif
178 ram_console_bad_blocks++;
179 }
180 block += ECC_BLOCK_SIZE;
181 par += ECC_SIZE;
182 }
183 if (ram_console_corrected_bytes || ram_console_bad_blocks)
184 strbuf_len = snprintf(strbuf, sizeof(strbuf),
185 "\n%d Corrected bytes, %d unrecoverable blocks\n",
186 ram_console_corrected_bytes, ram_console_bad_blocks);
187 else
188 strbuf_len = snprintf(strbuf, sizeof(strbuf),
189 "\nNo errors detected\n");
190 if (strbuf_len >= sizeof(strbuf))
191 strbuf_len = sizeof(strbuf) - 1;
192 old_log_size += strbuf_len;
193#endif
194
195 if (dest == NULL) {
196 dest = kmalloc(old_log_size, GFP_KERNEL);
197 if (dest == NULL) {
198 printk(KERN_ERR
199 "ram_console: failed to allocate buffer\n");
200 return;
201 }
202 }
203
204 ram_console_old_log = dest;
205 ram_console_old_log_size = old_log_size;
206 memcpy(ram_console_old_log,
207 &buffer->data[buffer->start], buffer->size - buffer->start);
208 memcpy(ram_console_old_log + buffer->size - buffer->start,
209 &buffer->data[0], buffer->start);
210#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
211 memcpy(ram_console_old_log + old_log_size - strbuf_len,
212 strbuf, strbuf_len);
213#endif
214}
215
216static int __init ram_console_init(struct ram_console_buffer *buffer,
217 size_t buffer_size, char *old_buf)
218{
219#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
220 int numerr;
221 uint8_t *par;
222#endif
223 ram_console_buffer = buffer;
224 ram_console_buffer_size =
225 buffer_size - sizeof(struct ram_console_buffer);
226
227 if (ram_console_buffer_size > buffer_size) {
228 pr_err("ram_console: buffer %p, invalid size %zu, "
229 "datasize %zu\n", buffer, buffer_size,
230 ram_console_buffer_size);
231 return 0;
232 }
233
234#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
235 ram_console_buffer_size -= (DIV_ROUND_UP(ram_console_buffer_size,
236 ECC_BLOCK_SIZE) + 1) * ECC_SIZE;
237
238 if (ram_console_buffer_size > buffer_size) {
239 pr_err("ram_console: buffer %p, invalid size %zu, "
240 "non-ecc datasize %zu\n",
241 buffer, buffer_size, ram_console_buffer_size);
242 return 0;
243 }
244
245 ram_console_par_buffer = buffer->data + ram_console_buffer_size;
246
247
248 /* first consecutive root is 0
249 * primitive element to generate roots = 1
250 */
251 ram_console_rs_decoder = init_rs(ECC_SYMSIZE, ECC_POLY, 0, 1, ECC_SIZE);
252 if (ram_console_rs_decoder == NULL) {
253 printk(KERN_INFO "ram_console: init_rs failed\n");
254 return 0;
255 }
256
257 ram_console_corrected_bytes = 0;
258 ram_console_bad_blocks = 0;
259
260 par = ram_console_par_buffer +
261 DIV_ROUND_UP(ram_console_buffer_size, ECC_BLOCK_SIZE) * ECC_SIZE;
262
263 numerr = ram_console_decode_rs8(buffer, sizeof(*buffer), par);
264 if (numerr > 0) {
265 printk(KERN_INFO "ram_console: error in header, %d\n", numerr);
266 ram_console_corrected_bytes += numerr;
267 } else if (numerr < 0) {
268 printk(KERN_INFO
269 "ram_console: uncorrectable error in header\n");
270 ram_console_bad_blocks++;
271 }
272#endif
273
274 if (buffer->sig == RAM_CONSOLE_SIG) {
275 if (buffer->size > ram_console_buffer_size
276 || buffer->start > buffer->size)
277 printk(KERN_INFO "ram_console: found existing invalid "
278 "buffer, size %d, start %d\n",
279 buffer->size, buffer->start);
280 else {
281 printk(KERN_INFO "ram_console: found existing buffer, "
282 "size %d, start %d\n",
283 buffer->size, buffer->start);
284 ram_console_save_old(buffer, old_buf);
285 }
286 } else {
287 printk(KERN_INFO "ram_console: no valid data in buffer "
288 "(sig = 0x%08x)\n", buffer->sig);
289 }
290
291 buffer->sig = RAM_CONSOLE_SIG;
292 buffer->start = 0;
293 buffer->size = 0;
294
295 register_console(&ram_console);
296#ifdef CONFIG_ANDROID_RAM_CONSOLE_ENABLE_VERBOSE
297 console_verbose();
298#endif
299 return 0;
300}
301
302#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT
303static int __init ram_console_early_init(void)
304{
305 return ram_console_init((struct ram_console_buffer *)
306 CONFIG_ANDROID_RAM_CONSOLE_EARLY_ADDR,
307 CONFIG_ANDROID_RAM_CONSOLE_EARLY_SIZE,
308 ram_console_old_log_init_buffer);
309}
310#else
311static int ram_console_driver_probe(struct platform_device *pdev)
312{
313 struct resource *res = pdev->resource;
314 size_t start;
315 size_t buffer_size;
316 void *buffer;
317
318 if (res == NULL || pdev->num_resources != 1 ||
319 !(res->flags & IORESOURCE_MEM)) {
320 printk(KERN_ERR "ram_console: invalid resource, %p %d flags "
321 "%lx\n", res, pdev->num_resources, res ? res->flags : 0);
322 return -ENXIO;
323 }
324 buffer_size = res->end - res->start + 1;
325 start = res->start;
326 printk(KERN_INFO "ram_console: got buffer at %zx, size %zx\n",
327 start, buffer_size);
328 buffer = ioremap(res->start, buffer_size);
329 if (buffer == NULL) {
330 printk(KERN_ERR "ram_console: failed to map memory\n");
331 return -ENOMEM;
332 }
333
334 return ram_console_init(buffer, buffer_size, NULL/* allocate */);
335}
336
337static struct platform_driver ram_console_driver = {
338 .probe = ram_console_driver_probe,
339 .driver = {
340 .name = "ram_console",
341 },
342};
343
344static int __init ram_console_module_init(void)
345{
346 int err;
347 err = platform_driver_register(&ram_console_driver);
348 return err;
349}
350#endif
351
352static ssize_t ram_console_read_old(struct file *file, char __user *buf,
353 size_t len, loff_t *offset)
354{
355 loff_t pos = *offset;
356 ssize_t count;
357
358 if (pos >= ram_console_old_log_size)
359 return 0;
360
361 count = min(len, (size_t)(ram_console_old_log_size - pos));
362 if (copy_to_user(buf, ram_console_old_log + pos, count))
363 return -EFAULT;
364
365 *offset += count;
366 return count;
367}
368
369static const struct file_operations ram_console_file_ops = {
370 .owner = THIS_MODULE,
371 .read = ram_console_read_old,
372};
373
374static int __init ram_console_late_init(void)
375{
376 struct proc_dir_entry *entry;
377
378 if (ram_console_old_log == NULL)
379 return 0;
380#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT
381 ram_console_old_log = kmalloc(ram_console_old_log_size, GFP_KERNEL);
382 if (ram_console_old_log == NULL) {
383 printk(KERN_ERR
384 "ram_console: failed to allocate buffer for old log\n");
385 ram_console_old_log_size = 0;
386 return 0;
387 }
388 memcpy(ram_console_old_log,
389 ram_console_old_log_init_buffer, ram_console_old_log_size);
390#endif
391 entry = create_proc_entry("last_kmsg", S_IFREG | S_IRUGO, NULL);
392 if (!entry) {
393 printk(KERN_ERR "ram_console: failed to create proc entry\n");
394 kfree(ram_console_old_log);
395 ram_console_old_log = NULL;
396 return 0;
397 }
398
399 entry->proc_fops = &ram_console_file_ops;
400 entry->size = ram_console_old_log_size;
401 return 0;
402}
403
404#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT
405console_initcall(ram_console_early_init);
406#else
407module_init(ram_console_module_init);
408#endif
409late_initcall(ram_console_late_init);
410
diff --git a/drivers/staging/android/timed_gpio.c b/drivers/staging/android/timed_gpio.c
deleted file mode 100644
index be7cdaa783ae..000000000000
--- a/drivers/staging/android/timed_gpio.c
+++ /dev/null
@@ -1,166 +0,0 @@
1/* drivers/misc/timed_gpio.c
2 *
3 * Copyright (C) 2008 Google, Inc.
4 * Author: Mike Lockwood <lockwood@android.com>
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/module.h>
18#include <linux/platform_device.h>
19#include <linux/hrtimer.h>
20#include <linux/err.h>
21#include <linux/gpio.h>
22
23#include "timed_output.h"
24#include "timed_gpio.h"
25
26
27struct timed_gpio_data {
28 struct timed_output_dev dev;
29 struct hrtimer timer;
30 spinlock_t lock;
31 unsigned gpio;
32 int max_timeout;
33 u8 active_low;
34};
35
36static enum hrtimer_restart gpio_timer_func(struct hrtimer *timer)
37{
38 struct timed_gpio_data *data =
39 container_of(timer, struct timed_gpio_data, timer);
40
41 gpio_direction_output(data->gpio, data->active_low ? 1 : 0);
42 return HRTIMER_NORESTART;
43}
44
45static int gpio_get_time(struct timed_output_dev *dev)
46{
47 struct timed_gpio_data *data =
48 container_of(dev, struct timed_gpio_data, dev);
49
50 if (hrtimer_active(&data->timer)) {
51 ktime_t r = hrtimer_get_remaining(&data->timer);
52 struct timeval t = ktime_to_timeval(r);
53 return t.tv_sec * 1000 + t.tv_usec / 1000;
54 } else
55 return 0;
56}
57
58static void gpio_enable(struct timed_output_dev *dev, int value)
59{
60 struct timed_gpio_data *data =
61 container_of(dev, struct timed_gpio_data, dev);
62 unsigned long flags;
63
64 spin_lock_irqsave(&data->lock, flags);
65
66 /* cancel previous timer and set GPIO according to value */
67 hrtimer_cancel(&data->timer);
68 gpio_direction_output(data->gpio, data->active_low ? !value : !!value);
69
70 if (value > 0) {
71 if (value > data->max_timeout)
72 value = data->max_timeout;
73
74 hrtimer_start(&data->timer,
75 ktime_set(value / 1000, (value % 1000) * 1000000),
76 HRTIMER_MODE_REL);
77 }
78
79 spin_unlock_irqrestore(&data->lock, flags);
80}
81
82static int timed_gpio_probe(struct platform_device *pdev)
83{
84 struct timed_gpio_platform_data *pdata = pdev->dev.platform_data;
85 struct timed_gpio *cur_gpio;
86 struct timed_gpio_data *gpio_data, *gpio_dat;
87 int i, j, ret = 0;
88
89 if (!pdata)
90 return -EBUSY;
91
92 gpio_data = kzalloc(sizeof(struct timed_gpio_data) * pdata->num_gpios,
93 GFP_KERNEL);
94 if (!gpio_data)
95 return -ENOMEM;
96
97 for (i = 0; i < pdata->num_gpios; i++) {
98 cur_gpio = &pdata->gpios[i];
99 gpio_dat = &gpio_data[i];
100
101 hrtimer_init(&gpio_dat->timer, CLOCK_MONOTONIC,
102 HRTIMER_MODE_REL);
103 gpio_dat->timer.function = gpio_timer_func;
104 spin_lock_init(&gpio_dat->lock);
105
106 gpio_dat->dev.name = cur_gpio->name;
107 gpio_dat->dev.get_time = gpio_get_time;
108 gpio_dat->dev.enable = gpio_enable;
109 ret = timed_output_dev_register(&gpio_dat->dev);
110 if (ret < 0) {
111 for (j = 0; j < i; j++)
112 timed_output_dev_unregister(&gpio_data[i].dev);
113 kfree(gpio_data);
114 return ret;
115 }
116
117 gpio_dat->gpio = cur_gpio->gpio;
118 gpio_dat->max_timeout = cur_gpio->max_timeout;
119 gpio_dat->active_low = cur_gpio->active_low;
120 gpio_direction_output(gpio_dat->gpio, gpio_dat->active_low);
121 }
122
123 platform_set_drvdata(pdev, gpio_data);
124
125 return 0;
126}
127
128static int timed_gpio_remove(struct platform_device *pdev)
129{
130 struct timed_gpio_platform_data *pdata = pdev->dev.platform_data;
131 struct timed_gpio_data *gpio_data = platform_get_drvdata(pdev);
132 int i;
133
134 for (i = 0; i < pdata->num_gpios; i++)
135 timed_output_dev_unregister(&gpio_data[i].dev);
136
137 kfree(gpio_data);
138
139 return 0;
140}
141
142static struct platform_driver timed_gpio_driver = {
143 .probe = timed_gpio_probe,
144 .remove = timed_gpio_remove,
145 .driver = {
146 .name = TIMED_GPIO_NAME,
147 .owner = THIS_MODULE,
148 },
149};
150
151static int __init timed_gpio_init(void)
152{
153 return platform_driver_register(&timed_gpio_driver);
154}
155
156static void __exit timed_gpio_exit(void)
157{
158 platform_driver_unregister(&timed_gpio_driver);
159}
160
161module_init(timed_gpio_init);
162module_exit(timed_gpio_exit);
163
164MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
165MODULE_DESCRIPTION("timed gpio driver");
166MODULE_LICENSE("GPL");
diff --git a/drivers/staging/android/timed_gpio.h b/drivers/staging/android/timed_gpio.h
deleted file mode 100644
index a0e15f8be3f7..000000000000
--- a/drivers/staging/android/timed_gpio.h
+++ /dev/null
@@ -1,33 +0,0 @@
1/* include/linux/timed_gpio.h
2 *
3 * Copyright (C) 2008 Google, Inc.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14*/
15
16#ifndef _LINUX_TIMED_GPIO_H
17#define _LINUX_TIMED_GPIO_H
18
19#define TIMED_GPIO_NAME "timed-gpio"
20
21struct timed_gpio {
22 const char *name;
23 unsigned gpio;
24 int max_timeout;
25 u8 active_low;
26};
27
28struct timed_gpio_platform_data {
29 int num_gpios;
30 struct timed_gpio *gpios;
31};
32
33#endif
diff --git a/drivers/staging/android/timed_output.c b/drivers/staging/android/timed_output.c
deleted file mode 100644
index 62e79180421b..000000000000
--- a/drivers/staging/android/timed_output.c
+++ /dev/null
@@ -1,121 +0,0 @@
1/* drivers/misc/timed_output.c
2 *
3 * Copyright (C) 2009 Google, Inc.
4 * Author: Mike Lockwood <lockwood@android.com>
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/module.h>
18#include <linux/types.h>
19#include <linux/device.h>
20#include <linux/fs.h>
21#include <linux/err.h>
22
23#include "timed_output.h"
24
25static struct class *timed_output_class;
26static atomic_t device_count;
27
28static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
29 char *buf)
30{
31 struct timed_output_dev *tdev = dev_get_drvdata(dev);
32 int remaining = tdev->get_time(tdev);
33
34 return sprintf(buf, "%d\n", remaining);
35}
36
37static ssize_t enable_store(
38 struct device *dev, struct device_attribute *attr,
39 const char *buf, size_t size)
40{
41 struct timed_output_dev *tdev = dev_get_drvdata(dev);
42 int value;
43
44 sscanf(buf, "%d", &value);
45 tdev->enable(tdev, value);
46
47 return size;
48}
49
50static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, enable_show, enable_store);
51
52static int create_timed_output_class(void)
53{
54 if (!timed_output_class) {
55 timed_output_class = class_create(THIS_MODULE, "timed_output");
56 if (IS_ERR(timed_output_class))
57 return PTR_ERR(timed_output_class);
58 atomic_set(&device_count, 0);
59 }
60
61 return 0;
62}
63
64int timed_output_dev_register(struct timed_output_dev *tdev)
65{
66 int ret;
67
68 if (!tdev || !tdev->name || !tdev->enable || !tdev->get_time)
69 return -EINVAL;
70
71 ret = create_timed_output_class();
72 if (ret < 0)
73 return ret;
74
75 tdev->index = atomic_inc_return(&device_count);
76 tdev->dev = device_create(timed_output_class, NULL,
77 MKDEV(0, tdev->index), NULL, tdev->name);
78 if (IS_ERR(tdev->dev))
79 return PTR_ERR(tdev->dev);
80
81 ret = device_create_file(tdev->dev, &dev_attr_enable);
82 if (ret < 0)
83 goto err_create_file;
84
85 dev_set_drvdata(tdev->dev, tdev);
86 tdev->state = 0;
87 return 0;
88
89err_create_file:
90 device_destroy(timed_output_class, MKDEV(0, tdev->index));
91 printk(KERN_ERR "timed_output: Failed to register driver %s\n",
92 tdev->name);
93
94 return ret;
95}
96EXPORT_SYMBOL_GPL(timed_output_dev_register);
97
98void timed_output_dev_unregister(struct timed_output_dev *tdev)
99{
100 device_remove_file(tdev->dev, &dev_attr_enable);
101 device_destroy(timed_output_class, MKDEV(0, tdev->index));
102 dev_set_drvdata(tdev->dev, NULL);
103}
104EXPORT_SYMBOL_GPL(timed_output_dev_unregister);
105
106static int __init timed_output_init(void)
107{
108 return create_timed_output_class();
109}
110
111static void __exit timed_output_exit(void)
112{
113 class_destroy(timed_output_class);
114}
115
116module_init(timed_output_init);
117module_exit(timed_output_exit);
118
119MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
120MODULE_DESCRIPTION("timed output class driver");
121MODULE_LICENSE("GPL");
diff --git a/drivers/staging/android/timed_output.h b/drivers/staging/android/timed_output.h
deleted file mode 100644
index ec907ab2ff54..000000000000
--- a/drivers/staging/android/timed_output.h
+++ /dev/null
@@ -1,37 +0,0 @@
1/* include/linux/timed_output.h
2 *
3 * Copyright (C) 2008 Google, Inc.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14*/
15
16#ifndef _LINUX_TIMED_OUTPUT_H
17#define _LINUX_TIMED_OUTPUT_H
18
19struct timed_output_dev {
20 const char *name;
21
22 /* enable the output and set the timer */
23 void (*enable)(struct timed_output_dev *sdev, int timeout);
24
25 /* returns the current number of milliseconds remaining on the timer */
26 int (*get_time)(struct timed_output_dev *sdev);
27
28 /* private data */
29 struct device *dev;
30 int index;
31 int state;
32};
33
34extern int timed_output_dev_register(struct timed_output_dev *dev);
35extern void timed_output_dev_unregister(struct timed_output_dev *dev);
36
37#endif