aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/exit.c4
-rw-r--r--kernel/fork.c4
-rw-r--r--kernel/kallsyms.c11
-rw-r--r--kernel/kprobes.c9
-rw-r--r--kernel/notifier.c1
-rw-r--r--kernel/params.c2
-rw-r--r--kernel/power/Kconfig9
-rw-r--r--kernel/printk.c36
-rw-r--r--kernel/ptrace.c11
-rw-r--r--kernel/relay.c24
-rw-r--r--kernel/signal.c35
-rw-r--r--kernel/srcu.c3
-rw-r--r--kernel/stop_machine.c6
-rw-r--r--kernel/sys.c22
-rw-r--r--kernel/sysctl.c8
-rw-r--r--kernel/test_kprobes.c16
-rw-r--r--kernel/time.c13
-rw-r--r--kernel/time/clocksource.c19
-rw-r--r--kernel/timer.c10
19 files changed, 149 insertions, 94 deletions
diff --git a/kernel/exit.c b/kernel/exit.c
index 9d3d0f0b27d9..eb9934a82fc1 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -1590,8 +1590,6 @@ repeat:
1590 goto repeat; 1590 goto repeat;
1591 if (retval != 0) /* He released the lock. */ 1591 if (retval != 0) /* He released the lock. */
1592 goto end; 1592 goto end;
1593 } else if (p->exit_state == EXIT_DEAD) {
1594 continue;
1595 } else if (p->exit_state == EXIT_ZOMBIE) { 1593 } else if (p->exit_state == EXIT_ZOMBIE) {
1596 /* 1594 /*
1597 * Eligible but we cannot release it yet: 1595 * Eligible but we cannot release it yet:
@@ -1606,7 +1604,7 @@ repeat:
1606 /* He released the lock. */ 1604 /* He released the lock. */
1607 if (retval != 0) 1605 if (retval != 0)
1608 goto end; 1606 goto end;
1609 } else { 1607 } else if (p->exit_state != EXIT_DEAD) {
1610check_continued: 1608check_continued:
1611 /* 1609 /*
1612 * It's running now, so it might later 1610 * It's running now, so it might later
diff --git a/kernel/fork.c b/kernel/fork.c
index 2b55b74cd999..3995297567a9 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1399,7 +1399,7 @@ fork_out:
1399 return ERR_PTR(retval); 1399 return ERR_PTR(retval);
1400} 1400}
1401 1401
1402noinline struct pt_regs * __devinit __attribute__((weak)) idle_regs(struct pt_regs *regs) 1402noinline struct pt_regs * __cpuinit __attribute__((weak)) idle_regs(struct pt_regs *regs)
1403{ 1403{
1404 memset(regs, 0, sizeof(struct pt_regs)); 1404 memset(regs, 0, sizeof(struct pt_regs));
1405 return regs; 1405 return regs;
@@ -1510,7 +1510,7 @@ long do_fork(unsigned long clone_flags,
1510 if (!(clone_flags & CLONE_STOPPED)) 1510 if (!(clone_flags & CLONE_STOPPED))
1511 wake_up_new_task(p, clone_flags); 1511 wake_up_new_task(p, clone_flags);
1512 else 1512 else
1513 p->state = TASK_STOPPED; 1513 __set_task_state(p, TASK_STOPPED);
1514 1514
1515 if (unlikely (trace)) { 1515 if (unlikely (trace)) {
1516 current->ptrace_message = nr; 1516 current->ptrace_message = nr;
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index 7dadc71ce516..f091d13def00 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -53,14 +53,6 @@ static inline int is_kernel_inittext(unsigned long addr)
53 return 0; 53 return 0;
54} 54}
55 55
56static inline int is_kernel_extratext(unsigned long addr)
57{
58 if (addr >= (unsigned long)_sextratext
59 && addr <= (unsigned long)_eextratext)
60 return 1;
61 return 0;
62}
63
64static inline int is_kernel_text(unsigned long addr) 56static inline int is_kernel_text(unsigned long addr)
65{ 57{
66 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) 58 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_etext)
@@ -80,8 +72,7 @@ static int is_ksym_addr(unsigned long addr)
80 if (all_var) 72 if (all_var)
81 return is_kernel(addr); 73 return is_kernel(addr);
82 74
83 return is_kernel_text(addr) || is_kernel_inittext(addr) || 75 return is_kernel_text(addr) || is_kernel_inittext(addr);
84 is_kernel_extratext(addr);
85} 76}
86 77
87/* expand a compressed symbol data into the resulting uncompressed string, 78/* expand a compressed symbol data into the resulting uncompressed string,
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index d0493eafea3e..7a86e6432338 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -699,6 +699,12 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p,
699 struct kretprobe_instance, uflist); 699 struct kretprobe_instance, uflist);
700 ri->rp = rp; 700 ri->rp = rp;
701 ri->task = current; 701 ri->task = current;
702
703 if (rp->entry_handler && rp->entry_handler(ri, regs)) {
704 spin_unlock_irqrestore(&kretprobe_lock, flags);
705 return 0;
706 }
707
702 arch_prepare_kretprobe(ri, regs); 708 arch_prepare_kretprobe(ri, regs);
703 709
704 /* XXX(hch): why is there no hlist_move_head? */ 710 /* XXX(hch): why is there no hlist_move_head? */
@@ -745,7 +751,8 @@ int __kprobes register_kretprobe(struct kretprobe *rp)
745 INIT_HLIST_HEAD(&rp->used_instances); 751 INIT_HLIST_HEAD(&rp->used_instances);
746 INIT_HLIST_HEAD(&rp->free_instances); 752 INIT_HLIST_HEAD(&rp->free_instances);
747 for (i = 0; i < rp->maxactive; i++) { 753 for (i = 0; i < rp->maxactive; i++) {
748 inst = kmalloc(sizeof(struct kretprobe_instance), GFP_KERNEL); 754 inst = kmalloc(sizeof(struct kretprobe_instance) +
755 rp->data_size, GFP_KERNEL);
749 if (inst == NULL) { 756 if (inst == NULL) {
750 free_rp_inst(rp); 757 free_rp_inst(rp);
751 return -ENOMEM; 758 return -ENOMEM;
diff --git a/kernel/notifier.c b/kernel/notifier.c
index 4253f472f060..643360d1bb14 100644
--- a/kernel/notifier.c
+++ b/kernel/notifier.c
@@ -4,6 +4,7 @@
4#include <linux/notifier.h> 4#include <linux/notifier.h>
5#include <linux/rcupdate.h> 5#include <linux/rcupdate.h>
6#include <linux/vmalloc.h> 6#include <linux/vmalloc.h>
7#include <linux/reboot.h>
7 8
8/* 9/*
9 * Notifier list for kernel code which wants to be called 10 * Notifier list for kernel code which wants to be called
diff --git a/kernel/params.c b/kernel/params.c
index 42fe5e6126c0..e28c70628bb7 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -272,7 +272,7 @@ static int param_array(const char *name,
272 unsigned int min, unsigned int max, 272 unsigned int min, unsigned int max,
273 void *elem, int elemsize, 273 void *elem, int elemsize,
274 int (*set)(const char *, struct kernel_param *kp), 274 int (*set)(const char *, struct kernel_param *kp),
275 int *num) 275 unsigned int *num)
276{ 276{
277 int ret; 277 int ret;
278 struct kernel_param kp; 278 struct kernel_param kp;
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index ef9b802738a5..79833170bb9c 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -74,8 +74,8 @@ config PM_TRACE_RTC
74 RTC across reboots, so that you can debug a machine that just hangs 74 RTC across reboots, so that you can debug a machine that just hangs
75 during suspend (or more commonly, during resume). 75 during suspend (or more commonly, during resume).
76 76
77 To use this debugging feature you should attempt to suspend the machine, 77 To use this debugging feature you should attempt to suspend the
78 then reboot it, then run 78 machine, reboot it and then run
79 79
80 dmesg -s 1000000 | grep 'hash matches' 80 dmesg -s 1000000 | grep 'hash matches'
81 81
@@ -123,7 +123,10 @@ config HIBERNATION
123 called "hibernation" in user interfaces. STD checkpoints the 123 called "hibernation" in user interfaces. STD checkpoints the
124 system and powers it off; and restores that checkpoint on reboot. 124 system and powers it off; and restores that checkpoint on reboot.
125 125
126 You can suspend your machine with 'echo disk > /sys/power/state'. 126 You can suspend your machine with 'echo disk > /sys/power/state'
127 after placing resume=/dev/swappartition on the kernel command line
128 in your bootloader's configuration file.
129
127 Alternatively, you can use the additional userland tools available 130 Alternatively, you can use the additional userland tools available
128 from <http://suspend.sf.net>. 131 from <http://suspend.sf.net>.
129 132
diff --git a/kernel/printk.c b/kernel/printk.c
index 29ae1e99cde0..4a090621f379 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -93,16 +93,16 @@ static int console_locked, console_suspended;
93 */ 93 */
94static DEFINE_SPINLOCK(logbuf_lock); 94static DEFINE_SPINLOCK(logbuf_lock);
95 95
96#define LOG_BUF_MASK (log_buf_len-1) 96#define LOG_BUF_MASK (log_buf_len-1)
97#define LOG_BUF(idx) (log_buf[(idx) & LOG_BUF_MASK]) 97#define LOG_BUF(idx) (log_buf[(idx) & LOG_BUF_MASK])
98 98
99/* 99/*
100 * The indices into log_buf are not constrained to log_buf_len - they 100 * The indices into log_buf are not constrained to log_buf_len - they
101 * must be masked before subscripting 101 * must be masked before subscripting
102 */ 102 */
103static unsigned long log_start; /* Index into log_buf: next char to be read by syslog() */ 103static unsigned log_start; /* Index into log_buf: next char to be read by syslog() */
104static unsigned long con_start; /* Index into log_buf: next char to be sent to consoles */ 104static unsigned con_start; /* Index into log_buf: next char to be sent to consoles */
105static unsigned long log_end; /* Index into log_buf: most-recently-written-char + 1 */ 105static unsigned log_end; /* Index into log_buf: most-recently-written-char + 1 */
106 106
107/* 107/*
108 * Array of consoles built from command line options (console=) 108 * Array of consoles built from command line options (console=)
@@ -128,17 +128,17 @@ static int console_may_schedule;
128static char __log_buf[__LOG_BUF_LEN]; 128static char __log_buf[__LOG_BUF_LEN];
129static char *log_buf = __log_buf; 129static char *log_buf = __log_buf;
130static int log_buf_len = __LOG_BUF_LEN; 130static int log_buf_len = __LOG_BUF_LEN;
131static unsigned long logged_chars; /* Number of chars produced since last read+clear operation */ 131static unsigned logged_chars; /* Number of chars produced since last read+clear operation */
132 132
133static int __init log_buf_len_setup(char *str) 133static int __init log_buf_len_setup(char *str)
134{ 134{
135 unsigned long size = memparse(str, &str); 135 unsigned size = memparse(str, &str);
136 unsigned long flags; 136 unsigned long flags;
137 137
138 if (size) 138 if (size)
139 size = roundup_pow_of_two(size); 139 size = roundup_pow_of_two(size);
140 if (size > log_buf_len) { 140 if (size > log_buf_len) {
141 unsigned long start, dest_idx, offset; 141 unsigned start, dest_idx, offset;
142 char *new_log_buf; 142 char *new_log_buf;
143 143
144 new_log_buf = alloc_bootmem(size); 144 new_log_buf = alloc_bootmem(size);
@@ -295,7 +295,7 @@ int log_buf_read(int idx)
295 */ 295 */
296int do_syslog(int type, char __user *buf, int len) 296int do_syslog(int type, char __user *buf, int len)
297{ 297{
298 unsigned long i, j, limit, count; 298 unsigned i, j, limit, count;
299 int do_clear = 0; 299 int do_clear = 0;
300 char c; 300 char c;
301 int error = 0; 301 int error = 0;
@@ -436,7 +436,7 @@ asmlinkage long sys_syslog(int type, char __user *buf, int len)
436/* 436/*
437 * Call the console drivers on a range of log_buf 437 * Call the console drivers on a range of log_buf
438 */ 438 */
439static void __call_console_drivers(unsigned long start, unsigned long end) 439static void __call_console_drivers(unsigned start, unsigned end)
440{ 440{
441 struct console *con; 441 struct console *con;
442 442
@@ -463,8 +463,8 @@ early_param("ignore_loglevel", ignore_loglevel_setup);
463/* 463/*
464 * Write out chars from start to end - 1 inclusive 464 * Write out chars from start to end - 1 inclusive
465 */ 465 */
466static void _call_console_drivers(unsigned long start, 466static void _call_console_drivers(unsigned start,
467 unsigned long end, int msg_log_level) 467 unsigned end, int msg_log_level)
468{ 468{
469 if ((msg_log_level < console_loglevel || ignore_loglevel) && 469 if ((msg_log_level < console_loglevel || ignore_loglevel) &&
470 console_drivers && start != end) { 470 console_drivers && start != end) {
@@ -484,12 +484,12 @@ static void _call_console_drivers(unsigned long start,
484 * log_buf[start] to log_buf[end - 1]. 484 * log_buf[start] to log_buf[end - 1].
485 * The console_sem must be held. 485 * The console_sem must be held.
486 */ 486 */
487static void call_console_drivers(unsigned long start, unsigned long end) 487static void call_console_drivers(unsigned start, unsigned end)
488{ 488{
489 unsigned long cur_index, start_print; 489 unsigned cur_index, start_print;
490 static int msg_level = -1; 490 static int msg_level = -1;
491 491
492 BUG_ON(((long)(start - end)) > 0); 492 BUG_ON(((int)(start - end)) > 0);
493 493
494 cur_index = start; 494 cur_index = start;
495 start_print = start; 495 start_print = start;
@@ -790,7 +790,7 @@ asmlinkage long sys_syslog(int type, char __user *buf, int len)
790 return -ENOSYS; 790 return -ENOSYS;
791} 791}
792 792
793static void call_console_drivers(unsigned long start, unsigned long end) 793static void call_console_drivers(unsigned start, unsigned end)
794{ 794{
795} 795}
796 796
@@ -983,8 +983,8 @@ void wake_up_klogd(void)
983void release_console_sem(void) 983void release_console_sem(void)
984{ 984{
985 unsigned long flags; 985 unsigned long flags;
986 unsigned long _con_start, _log_end; 986 unsigned _con_start, _log_end;
987 unsigned long wake_klogd = 0; 987 unsigned wake_klogd = 0;
988 988
989 if (console_suspended) { 989 if (console_suspended) {
990 up(&secondary_console_sem); 990 up(&secondary_console_sem);
@@ -1275,7 +1275,7 @@ void tty_write_message(struct tty_struct *tty, char *msg)
1275int __printk_ratelimit(int ratelimit_jiffies, int ratelimit_burst) 1275int __printk_ratelimit(int ratelimit_jiffies, int ratelimit_burst)
1276{ 1276{
1277 static DEFINE_SPINLOCK(ratelimit_lock); 1277 static DEFINE_SPINLOCK(ratelimit_lock);
1278 static unsigned long toks = 10 * 5 * HZ; 1278 static unsigned toks = 10 * 5 * HZ;
1279 static unsigned long last_msg; 1279 static unsigned long last_msg;
1280 static int missed; 1280 static int missed;
1281 unsigned long flags; 1281 unsigned long flags;
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index b0d4ab4dfd3d..628b03ab88a5 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -20,6 +20,7 @@
20#include <linux/signal.h> 20#include <linux/signal.h>
21#include <linux/audit.h> 21#include <linux/audit.h>
22#include <linux/pid_namespace.h> 22#include <linux/pid_namespace.h>
23#include <linux/syscalls.h>
23 24
24#include <asm/pgtable.h> 25#include <asm/pgtable.h>
25#include <asm/uaccess.h> 26#include <asm/uaccess.h>
@@ -53,7 +54,7 @@ void ptrace_untrace(struct task_struct *child)
53 spin_lock(&child->sighand->siglock); 54 spin_lock(&child->sighand->siglock);
54 if (task_is_traced(child)) { 55 if (task_is_traced(child)) {
55 if (child->signal->flags & SIGNAL_STOP_STOPPED) { 56 if (child->signal->flags & SIGNAL_STOP_STOPPED) {
56 child->state = TASK_STOPPED; 57 __set_task_state(child, TASK_STOPPED);
57 } else { 58 } else {
58 signal_wake_up(child, 1); 59 signal_wake_up(child, 1);
59 } 60 }
@@ -103,18 +104,16 @@ int ptrace_check_attach(struct task_struct *child, int kill)
103 && child->signal != NULL) { 104 && child->signal != NULL) {
104 ret = 0; 105 ret = 0;
105 spin_lock_irq(&child->sighand->siglock); 106 spin_lock_irq(&child->sighand->siglock);
106 if (task_is_stopped(child)) { 107 if (task_is_stopped(child))
107 child->state = TASK_TRACED; 108 child->state = TASK_TRACED;
108 } else if (!task_is_traced(child) && !kill) { 109 else if (!task_is_traced(child) && !kill)
109 ret = -ESRCH; 110 ret = -ESRCH;
110 }
111 spin_unlock_irq(&child->sighand->siglock); 111 spin_unlock_irq(&child->sighand->siglock);
112 } 112 }
113 read_unlock(&tasklist_lock); 113 read_unlock(&tasklist_lock);
114 114
115 if (!ret && !kill) { 115 if (!ret && !kill)
116 wait_task_inactive(child); 116 wait_task_inactive(child);
117 }
118 117
119 /* All systems go.. */ 118 /* All systems go.. */
120 return ret; 119 return ret;
diff --git a/kernel/relay.c b/kernel/relay.c
index 7c0373322f18..d080b9d161a7 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -37,37 +37,31 @@ static void relay_file_mmap_close(struct vm_area_struct *vma)
37} 37}
38 38
39/* 39/*
40 * nopage() vm_op implementation for relay file mapping. 40 * fault() vm_op implementation for relay file mapping.
41 */ 41 */
42static struct page *relay_buf_nopage(struct vm_area_struct *vma, 42static int relay_buf_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
43 unsigned long address,
44 int *type)
45{ 43{
46 struct page *page; 44 struct page *page;
47 struct rchan_buf *buf = vma->vm_private_data; 45 struct rchan_buf *buf = vma->vm_private_data;
48 unsigned long offset = address - vma->vm_start; 46 pgoff_t pgoff = vmf->pgoff;
49 47
50 if (address > vma->vm_end)
51 return NOPAGE_SIGBUS; /* Disallow mremap */
52 if (!buf) 48 if (!buf)
53 return NOPAGE_OOM; 49 return VM_FAULT_OOM;
54 50
55 page = vmalloc_to_page(buf->start + offset); 51 page = vmalloc_to_page(buf->start + (pgoff << PAGE_SHIFT));
56 if (!page) 52 if (!page)
57 return NOPAGE_OOM; 53 return VM_FAULT_SIGBUS;
58 get_page(page); 54 get_page(page);
55 vmf->page = page;
59 56
60 if (type) 57 return 0;
61 *type = VM_FAULT_MINOR;
62
63 return page;
64} 58}
65 59
66/* 60/*
67 * vm_ops for relay file mappings. 61 * vm_ops for relay file mappings.
68 */ 62 */
69static struct vm_operations_struct relay_file_mmap_ops = { 63static struct vm_operations_struct relay_file_mmap_ops = {
70 .nopage = relay_buf_nopage, 64 .fault = relay_buf_fault,
71 .close = relay_file_mmap_close, 65 .close = relay_file_mmap_close,
72}; 66};
73 67
diff --git a/kernel/signal.c b/kernel/signal.c
index 6a5f97cd337a..5d30ff561847 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1578,6 +1578,17 @@ static inline int may_ptrace_stop(void)
1578} 1578}
1579 1579
1580/* 1580/*
1581 * Return nonzero if there is a SIGKILL that should be waking us up.
1582 * Called with the siglock held.
1583 */
1584static int sigkill_pending(struct task_struct *tsk)
1585{
1586 return ((sigismember(&tsk->pending.signal, SIGKILL) ||
1587 sigismember(&tsk->signal->shared_pending.signal, SIGKILL)) &&
1588 !unlikely(sigismember(&tsk->blocked, SIGKILL)));
1589}
1590
1591/*
1581 * This must be called with current->sighand->siglock held. 1592 * This must be called with current->sighand->siglock held.
1582 * 1593 *
1583 * This should be the path for all ptrace stops. 1594 * This should be the path for all ptrace stops.
@@ -1590,6 +1601,26 @@ static inline int may_ptrace_stop(void)
1590 */ 1601 */
1591static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info) 1602static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
1592{ 1603{
1604 int killed = 0;
1605
1606 if (arch_ptrace_stop_needed(exit_code, info)) {
1607 /*
1608 * The arch code has something special to do before a
1609 * ptrace stop. This is allowed to block, e.g. for faults
1610 * on user stack pages. We can't keep the siglock while
1611 * calling arch_ptrace_stop, so we must release it now.
1612 * To preserve proper semantics, we must do this before
1613 * any signal bookkeeping like checking group_stop_count.
1614 * Meanwhile, a SIGKILL could come in before we retake the
1615 * siglock. That must prevent us from sleeping in TASK_TRACED.
1616 * So after regaining the lock, we must check for SIGKILL.
1617 */
1618 spin_unlock_irq(&current->sighand->siglock);
1619 arch_ptrace_stop(exit_code, info);
1620 spin_lock_irq(&current->sighand->siglock);
1621 killed = sigkill_pending(current);
1622 }
1623
1593 /* 1624 /*
1594 * If there is a group stop in progress, 1625 * If there is a group stop in progress,
1595 * we must participate in the bookkeeping. 1626 * we must participate in the bookkeeping.
@@ -1601,11 +1632,11 @@ static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
1601 current->exit_code = exit_code; 1632 current->exit_code = exit_code;
1602 1633
1603 /* Let the debugger run. */ 1634 /* Let the debugger run. */
1604 set_current_state(TASK_TRACED); 1635 __set_current_state(TASK_TRACED);
1605 spin_unlock_irq(&current->sighand->siglock); 1636 spin_unlock_irq(&current->sighand->siglock);
1606 try_to_freeze(); 1637 try_to_freeze();
1607 read_lock(&tasklist_lock); 1638 read_lock(&tasklist_lock);
1608 if (may_ptrace_stop()) { 1639 if (!unlikely(killed) && may_ptrace_stop()) {
1609 do_notify_parent_cldstop(current, CLD_TRAPPED); 1640 do_notify_parent_cldstop(current, CLD_TRAPPED);
1610 read_unlock(&tasklist_lock); 1641 read_unlock(&tasklist_lock);
1611 schedule(); 1642 schedule();
diff --git a/kernel/srcu.c b/kernel/srcu.c
index 3507cabe963b..b0aeeaf22ce4 100644
--- a/kernel/srcu.c
+++ b/kernel/srcu.c
@@ -74,7 +74,7 @@ static int srcu_readers_active_idx(struct srcu_struct *sp, int idx)
74 * severe errors when invoked on an active srcu_struct. That said, it 74 * severe errors when invoked on an active srcu_struct. That said, it
75 * can be useful as an error check at cleanup time. 75 * can be useful as an error check at cleanup time.
76 */ 76 */
77int srcu_readers_active(struct srcu_struct *sp) 77static int srcu_readers_active(struct srcu_struct *sp)
78{ 78{
79 return srcu_readers_active_idx(sp, 0) + srcu_readers_active_idx(sp, 1); 79 return srcu_readers_active_idx(sp, 0) + srcu_readers_active_idx(sp, 1);
80} 80}
@@ -255,4 +255,3 @@ EXPORT_SYMBOL_GPL(srcu_read_lock);
255EXPORT_SYMBOL_GPL(srcu_read_unlock); 255EXPORT_SYMBOL_GPL(srcu_read_unlock);
256EXPORT_SYMBOL_GPL(synchronize_srcu); 256EXPORT_SYMBOL_GPL(synchronize_srcu);
257EXPORT_SYMBOL_GPL(srcu_batches_completed); 257EXPORT_SYMBOL_GPL(srcu_batches_completed);
258EXPORT_SYMBOL_GPL(srcu_readers_active);
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 51b5ee53571a..6f4e0e13f70c 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -29,7 +29,6 @@ enum stopmachine_state {
29static enum stopmachine_state stopmachine_state; 29static enum stopmachine_state stopmachine_state;
30static unsigned int stopmachine_num_threads; 30static unsigned int stopmachine_num_threads;
31static atomic_t stopmachine_thread_ack; 31static atomic_t stopmachine_thread_ack;
32static DECLARE_MUTEX(stopmachine_mutex);
33 32
34static int stopmachine(void *cpu) 33static int stopmachine(void *cpu)
35{ 34{
@@ -170,6 +169,7 @@ static int do_stop(void *_smdata)
170struct task_struct *__stop_machine_run(int (*fn)(void *), void *data, 169struct task_struct *__stop_machine_run(int (*fn)(void *), void *data,
171 unsigned int cpu) 170 unsigned int cpu)
172{ 171{
172 static DEFINE_MUTEX(stopmachine_mutex);
173 struct stop_machine_data smdata; 173 struct stop_machine_data smdata;
174 struct task_struct *p; 174 struct task_struct *p;
175 175
@@ -177,7 +177,7 @@ struct task_struct *__stop_machine_run(int (*fn)(void *), void *data,
177 smdata.data = data; 177 smdata.data = data;
178 init_completion(&smdata.done); 178 init_completion(&smdata.done);
179 179
180 down(&stopmachine_mutex); 180 mutex_lock(&stopmachine_mutex);
181 181
182 /* If they don't care which CPU fn runs on, bind to any online one. */ 182 /* If they don't care which CPU fn runs on, bind to any online one. */
183 if (cpu == NR_CPUS) 183 if (cpu == NR_CPUS)
@@ -193,7 +193,7 @@ struct task_struct *__stop_machine_run(int (*fn)(void *), void *data,
193 wake_up_process(p); 193 wake_up_process(p);
194 wait_for_completion(&smdata.done); 194 wait_for_completion(&smdata.done);
195 } 195 }
196 up(&stopmachine_mutex); 196 mutex_unlock(&stopmachine_mutex);
197 return p; 197 return p;
198} 198}
199 199
diff --git a/kernel/sys.c b/kernel/sys.c
index 53de35fc8245..e3c08d4324de 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1145,16 +1145,16 @@ static int groups_to_user(gid_t __user *grouplist,
1145 struct group_info *group_info) 1145 struct group_info *group_info)
1146{ 1146{
1147 int i; 1147 int i;
1148 int count = group_info->ngroups; 1148 unsigned int count = group_info->ngroups;
1149 1149
1150 for (i = 0; i < group_info->nblocks; i++) { 1150 for (i = 0; i < group_info->nblocks; i++) {
1151 int cp_count = min(NGROUPS_PER_BLOCK, count); 1151 unsigned int cp_count = min(NGROUPS_PER_BLOCK, count);
1152 int off = i * NGROUPS_PER_BLOCK; 1152 unsigned int len = cp_count * sizeof(*grouplist);
1153 int len = cp_count * sizeof(*grouplist);
1154 1153
1155 if (copy_to_user(grouplist+off, group_info->blocks[i], len)) 1154 if (copy_to_user(grouplist, group_info->blocks[i], len))
1156 return -EFAULT; 1155 return -EFAULT;
1157 1156
1157 grouplist += NGROUPS_PER_BLOCK;
1158 count -= cp_count; 1158 count -= cp_count;
1159 } 1159 }
1160 return 0; 1160 return 0;
@@ -1165,16 +1165,16 @@ static int groups_from_user(struct group_info *group_info,
1165 gid_t __user *grouplist) 1165 gid_t __user *grouplist)
1166{ 1166{
1167 int i; 1167 int i;
1168 int count = group_info->ngroups; 1168 unsigned int count = group_info->ngroups;
1169 1169
1170 for (i = 0; i < group_info->nblocks; i++) { 1170 for (i = 0; i < group_info->nblocks; i++) {
1171 int cp_count = min(NGROUPS_PER_BLOCK, count); 1171 unsigned int cp_count = min(NGROUPS_PER_BLOCK, count);
1172 int off = i * NGROUPS_PER_BLOCK; 1172 unsigned int len = cp_count * sizeof(*grouplist);
1173 int len = cp_count * sizeof(*grouplist);
1174 1173
1175 if (copy_from_user(group_info->blocks[i], grouplist+off, len)) 1174 if (copy_from_user(group_info->blocks[i], grouplist, len))
1176 return -EFAULT; 1175 return -EFAULT;
1177 1176
1177 grouplist += NGROUPS_PER_BLOCK;
1178 count -= cp_count; 1178 count -= cp_count;
1179 } 1179 }
1180 return 0; 1180 return 0;
@@ -1472,7 +1472,7 @@ asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim)
1472 if ((new_rlim.rlim_max > old_rlim->rlim_max) && 1472 if ((new_rlim.rlim_max > old_rlim->rlim_max) &&
1473 !capable(CAP_SYS_RESOURCE)) 1473 !capable(CAP_SYS_RESOURCE))
1474 return -EPERM; 1474 return -EPERM;
1475 if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > NR_OPEN) 1475 if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > sysctl_nr_open)
1476 return -EPERM; 1476 return -EPERM;
1477 1477
1478 retval = security_task_setrlimit(resource, &new_rlim); 1478 retval = security_task_setrlimit(resource, &new_rlim);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 5e2ad5bf88e2..86daaa26d120 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -1203,6 +1203,14 @@ static struct ctl_table fs_table[] = {
1203 .proc_handler = &proc_dointvec, 1203 .proc_handler = &proc_dointvec,
1204 }, 1204 },
1205 { 1205 {
1206 .ctl_name = CTL_UNNUMBERED,
1207 .procname = "nr_open",
1208 .data = &sysctl_nr_open,
1209 .maxlen = sizeof(int),
1210 .mode = 0644,
1211 .proc_handler = &proc_dointvec,
1212 },
1213 {
1206 .ctl_name = FS_DENTRY, 1214 .ctl_name = FS_DENTRY,
1207 .procname = "dentry-state", 1215 .procname = "dentry-state",
1208 .data = &dentry_stat, 1216 .data = &dentry_stat,
diff --git a/kernel/test_kprobes.c b/kernel/test_kprobes.c
index 88cdb109e13c..06b6395b45b2 100644
--- a/kernel/test_kprobes.c
+++ b/kernel/test_kprobes.c
@@ -135,6 +135,12 @@ static int test_jprobe(void)
135#ifdef CONFIG_KRETPROBES 135#ifdef CONFIG_KRETPROBES
136static u32 krph_val; 136static u32 krph_val;
137 137
138static int entry_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
139{
140 krph_val = (rand1 / div_factor);
141 return 0;
142}
143
138static int return_handler(struct kretprobe_instance *ri, struct pt_regs *regs) 144static int return_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
139{ 145{
140 unsigned long ret = regs_return_value(regs); 146 unsigned long ret = regs_return_value(regs);
@@ -144,13 +150,19 @@ static int return_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
144 printk(KERN_ERR "Kprobe smoke test failed: " 150 printk(KERN_ERR "Kprobe smoke test failed: "
145 "incorrect value in kretprobe handler\n"); 151 "incorrect value in kretprobe handler\n");
146 } 152 }
153 if (krph_val == 0) {
154 handler_errors++;
155 printk(KERN_ERR "Kprobe smoke test failed: "
156 "call to kretprobe entry handler failed\n");
157 }
147 158
148 krph_val = (rand1 / div_factor); 159 krph_val = rand1;
149 return 0; 160 return 0;
150} 161}
151 162
152static struct kretprobe rp = { 163static struct kretprobe rp = {
153 .handler = return_handler, 164 .handler = return_handler,
165 .entry_handler = entry_handler,
154 .kp.symbol_name = "kprobe_target" 166 .kp.symbol_name = "kprobe_target"
155}; 167};
156 168
@@ -167,7 +179,7 @@ static int test_kretprobe(void)
167 179
168 ret = kprobe_target(rand1); 180 ret = kprobe_target(rand1);
169 unregister_kretprobe(&rp); 181 unregister_kretprobe(&rp);
170 if (krph_val == 0) { 182 if (krph_val != rand1) {
171 printk(KERN_ERR "Kprobe smoke test failed: " 183 printk(KERN_ERR "Kprobe smoke test failed: "
172 "kretprobe handler not called\n"); 184 "kretprobe handler not called\n");
173 handler_errors++; 185 handler_errors++;
diff --git a/kernel/time.c b/kernel/time.c
index 4064c0566e77..33af3e55570d 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -566,7 +566,11 @@ EXPORT_SYMBOL(jiffies_to_timeval);
566clock_t jiffies_to_clock_t(long x) 566clock_t jiffies_to_clock_t(long x)
567{ 567{
568#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0 568#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
569# if HZ < USER_HZ
570 return x * (USER_HZ / HZ);
571# else
569 return x / (HZ / USER_HZ); 572 return x / (HZ / USER_HZ);
573# endif
570#else 574#else
571 u64 tmp = (u64)x * TICK_NSEC; 575 u64 tmp = (u64)x * TICK_NSEC;
572 do_div(tmp, (NSEC_PER_SEC / USER_HZ)); 576 do_div(tmp, (NSEC_PER_SEC / USER_HZ));
@@ -599,7 +603,14 @@ EXPORT_SYMBOL(clock_t_to_jiffies);
599u64 jiffies_64_to_clock_t(u64 x) 603u64 jiffies_64_to_clock_t(u64 x)
600{ 604{
601#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0 605#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
606# if HZ < USER_HZ
607 x *= USER_HZ;
608 do_div(x, HZ);
609# elif HZ > USER_HZ
602 do_div(x, HZ / USER_HZ); 610 do_div(x, HZ / USER_HZ);
611# else
612 /* Nothing to do */
613# endif
603#else 614#else
604 /* 615 /*
605 * There are better ways that don't overflow early, 616 * There are better ways that don't overflow early,
@@ -611,7 +622,6 @@ u64 jiffies_64_to_clock_t(u64 x)
611#endif 622#endif
612 return x; 623 return x;
613} 624}
614
615EXPORT_SYMBOL(jiffies_64_to_clock_t); 625EXPORT_SYMBOL(jiffies_64_to_clock_t);
616 626
617u64 nsec_to_clock_t(u64 x) 627u64 nsec_to_clock_t(u64 x)
@@ -646,7 +656,6 @@ u64 get_jiffies_64(void)
646 } while (read_seqretry(&xtime_lock, seq)); 656 } while (read_seqretry(&xtime_lock, seq));
647 return ret; 657 return ret;
648} 658}
649
650EXPORT_SYMBOL(get_jiffies_64); 659EXPORT_SYMBOL(get_jiffies_64);
651#endif 660#endif
652 661
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 6e9259a5d501..81afb3927ecc 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -363,15 +363,13 @@ void clocksource_unregister(struct clocksource *cs)
363static ssize_t 363static ssize_t
364sysfs_show_current_clocksources(struct sys_device *dev, char *buf) 364sysfs_show_current_clocksources(struct sys_device *dev, char *buf)
365{ 365{
366 char *curr = buf; 366 ssize_t count = 0;
367 367
368 spin_lock_irq(&clocksource_lock); 368 spin_lock_irq(&clocksource_lock);
369 curr += sprintf(curr, "%s ", curr_clocksource->name); 369 count = snprintf(buf, PAGE_SIZE, "%s\n", curr_clocksource->name);
370 spin_unlock_irq(&clocksource_lock); 370 spin_unlock_irq(&clocksource_lock);
371 371
372 curr += sprintf(curr, "\n"); 372 return count;
373
374 return curr - buf;
375} 373}
376 374
377/** 375/**
@@ -439,17 +437,20 @@ static ssize_t
439sysfs_show_available_clocksources(struct sys_device *dev, char *buf) 437sysfs_show_available_clocksources(struct sys_device *dev, char *buf)
440{ 438{
441 struct clocksource *src; 439 struct clocksource *src;
442 char *curr = buf; 440 ssize_t count = 0;
443 441
444 spin_lock_irq(&clocksource_lock); 442 spin_lock_irq(&clocksource_lock);
445 list_for_each_entry(src, &clocksource_list, list) { 443 list_for_each_entry(src, &clocksource_list, list) {
446 curr += sprintf(curr, "%s ", src->name); 444 count += snprintf(buf + count,
445 max((ssize_t)PAGE_SIZE - count, (ssize_t)0),
446 "%s ", src->name);
447 } 447 }
448 spin_unlock_irq(&clocksource_lock); 448 spin_unlock_irq(&clocksource_lock);
449 449
450 curr += sprintf(curr, "\n"); 450 count += snprintf(buf + count,
451 max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "\n");
451 452
452 return curr - buf; 453 return count;
453} 454}
454 455
455/* 456/*
diff --git a/kernel/timer.c b/kernel/timer.c
index 9fbb472b8cf0..70b29b59343f 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -818,12 +818,14 @@ unsigned long next_timer_interrupt(void)
818#ifndef CONFIG_VIRT_CPU_ACCOUNTING 818#ifndef CONFIG_VIRT_CPU_ACCOUNTING
819void account_process_tick(struct task_struct *p, int user_tick) 819void account_process_tick(struct task_struct *p, int user_tick)
820{ 820{
821 cputime_t one_jiffy = jiffies_to_cputime(1);
822
821 if (user_tick) { 823 if (user_tick) {
822 account_user_time(p, jiffies_to_cputime(1)); 824 account_user_time(p, one_jiffy);
823 account_user_time_scaled(p, jiffies_to_cputime(1)); 825 account_user_time_scaled(p, cputime_to_scaled(one_jiffy));
824 } else { 826 } else {
825 account_system_time(p, HARDIRQ_OFFSET, jiffies_to_cputime(1)); 827 account_system_time(p, HARDIRQ_OFFSET, one_jiffy);
826 account_system_time_scaled(p, jiffies_to_cputime(1)); 828 account_system_time_scaled(p, cputime_to_scaled(one_jiffy));
827 } 829 }
828} 830}
829#endif 831#endif