aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/audit.c4
-rw-r--r--kernel/configs.c2
-rw-r--r--kernel/cpu.c83
-rw-r--r--kernel/fork.c7
-rw-r--r--kernel/futex.c21
-rw-r--r--kernel/irq/manage.c15
-rw-r--r--kernel/kprobes.c36
-rw-r--r--kernel/params.c2
-rw-r--r--kernel/posix-cpu-timers.c2
-rw-r--r--kernel/power/main.c21
-rw-r--r--kernel/printk.c2
-rw-r--r--kernel/ptrace.c3
-rw-r--r--kernel/rcupdate.c59
-rw-r--r--kernel/rcutorture.c3
-rw-r--r--kernel/sys.c3
-rw-r--r--kernel/sysctl.c29
-rw-r--r--kernel/time.c22
-rw-r--r--kernel/workqueue.c12
18 files changed, 229 insertions, 97 deletions
diff --git a/kernel/audit.c b/kernel/audit.c
index 0c56320d38dc..32fa03ad1984 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -291,8 +291,10 @@ int kauditd_thread(void *dummy)
291 set_current_state(TASK_INTERRUPTIBLE); 291 set_current_state(TASK_INTERRUPTIBLE);
292 add_wait_queue(&kauditd_wait, &wait); 292 add_wait_queue(&kauditd_wait, &wait);
293 293
294 if (!skb_queue_len(&audit_skb_queue)) 294 if (!skb_queue_len(&audit_skb_queue)) {
295 try_to_freeze();
295 schedule(); 296 schedule();
297 }
296 298
297 __set_current_state(TASK_RUNNING); 299 __set_current_state(TASK_RUNNING);
298 remove_wait_queue(&kauditd_wait, &wait); 300 remove_wait_queue(&kauditd_wait, &wait);
diff --git a/kernel/configs.c b/kernel/configs.c
index 986f7af31e0a..009e1ebdcb88 100644
--- a/kernel/configs.c
+++ b/kernel/configs.c
@@ -3,7 +3,7 @@
3 * Echo the kernel .config file used to build the kernel 3 * Echo the kernel .config file used to build the kernel
4 * 4 *
5 * Copyright (C) 2002 Khalid Aziz <khalid_aziz@hp.com> 5 * Copyright (C) 2002 Khalid Aziz <khalid_aziz@hp.com>
6 * Copyright (C) 2002 Randy Dunlap <rddunlap@osdl.org> 6 * Copyright (C) 2002 Randy Dunlap <rdunlap@xenotime.net>
7 * Copyright (C) 2002 Al Stone <ahs3@fc.hp.com> 7 * Copyright (C) 2002 Al Stone <ahs3@fc.hp.com>
8 * Copyright (C) 2002 Hewlett-Packard Company 8 * Copyright (C) 2002 Hewlett-Packard Company
9 * 9 *
diff --git a/kernel/cpu.c b/kernel/cpu.c
index d61ba88f34e5..e882c6babf41 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -16,47 +16,76 @@
16#include <asm/semaphore.h> 16#include <asm/semaphore.h>
17 17
18/* This protects CPUs going up and down... */ 18/* This protects CPUs going up and down... */
19DECLARE_MUTEX(cpucontrol); 19static DECLARE_MUTEX(cpucontrol);
20EXPORT_SYMBOL_GPL(cpucontrol);
21 20
22static struct notifier_block *cpu_chain; 21static struct notifier_block *cpu_chain;
23 22
24/* 23#ifdef CONFIG_HOTPLUG_CPU
25 * Used to check by callers if they need to acquire the cpucontrol 24static struct task_struct *lock_cpu_hotplug_owner;
26 * or not to protect a cpu from being removed. Its sometimes required to 25static int lock_cpu_hotplug_depth;
27 * call these functions both for normal operations, and in response to
28 * a cpu being added/removed. If the context of the call is in the same
29 * thread context as a CPU hotplug thread, we dont need to take the lock
30 * since its already protected
31 * check drivers/cpufreq/cpufreq.c for its usage - Ashok Raj
32 */
33 26
34int current_in_cpu_hotplug(void) 27static int __lock_cpu_hotplug(int interruptible)
35{ 28{
36 return (current->flags & PF_HOTPLUG_CPU); 29 int ret = 0;
30
31 if (lock_cpu_hotplug_owner != current) {
32 if (interruptible)
33 ret = down_interruptible(&cpucontrol);
34 else
35 down(&cpucontrol);
36 }
37
38 /*
39 * Set only if we succeed in locking
40 */
41 if (!ret) {
42 lock_cpu_hotplug_depth++;
43 lock_cpu_hotplug_owner = current;
44 }
45
46 return ret;
37} 47}
38 48
39EXPORT_SYMBOL_GPL(current_in_cpu_hotplug); 49void lock_cpu_hotplug(void)
50{
51 __lock_cpu_hotplug(0);
52}
53EXPORT_SYMBOL_GPL(lock_cpu_hotplug);
40 54
55void unlock_cpu_hotplug(void)
56{
57 if (--lock_cpu_hotplug_depth == 0) {
58 lock_cpu_hotplug_owner = NULL;
59 up(&cpucontrol);
60 }
61}
62EXPORT_SYMBOL_GPL(unlock_cpu_hotplug);
63
64int lock_cpu_hotplug_interruptible(void)
65{
66 return __lock_cpu_hotplug(1);
67}
68EXPORT_SYMBOL_GPL(lock_cpu_hotplug_interruptible);
69#endif /* CONFIG_HOTPLUG_CPU */
41 70
42/* Need to know about CPUs going up/down? */ 71/* Need to know about CPUs going up/down? */
43int register_cpu_notifier(struct notifier_block *nb) 72int register_cpu_notifier(struct notifier_block *nb)
44{ 73{
45 int ret; 74 int ret;
46 75
47 if ((ret = down_interruptible(&cpucontrol)) != 0) 76 if ((ret = lock_cpu_hotplug_interruptible()) != 0)
48 return ret; 77 return ret;
49 ret = notifier_chain_register(&cpu_chain, nb); 78 ret = notifier_chain_register(&cpu_chain, nb);
50 up(&cpucontrol); 79 unlock_cpu_hotplug();
51 return ret; 80 return ret;
52} 81}
53EXPORT_SYMBOL(register_cpu_notifier); 82EXPORT_SYMBOL(register_cpu_notifier);
54 83
55void unregister_cpu_notifier(struct notifier_block *nb) 84void unregister_cpu_notifier(struct notifier_block *nb)
56{ 85{
57 down(&cpucontrol); 86 lock_cpu_hotplug();
58 notifier_chain_unregister(&cpu_chain, nb); 87 notifier_chain_unregister(&cpu_chain, nb);
59 up(&cpucontrol); 88 unlock_cpu_hotplug();
60} 89}
61EXPORT_SYMBOL(unregister_cpu_notifier); 90EXPORT_SYMBOL(unregister_cpu_notifier);
62 91
@@ -112,13 +141,6 @@ int cpu_down(unsigned int cpu)
112 goto out; 141 goto out;
113 } 142 }
114 143
115 /*
116 * Leave a trace in current->flags indicating we are already in
117 * process of performing CPU hotplug. Callers can check if cpucontrol
118 * is already acquired by current thread, and if so not cause
119 * a dead lock by not acquiring the lock
120 */
121 current->flags |= PF_HOTPLUG_CPU;
122 err = notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE, 144 err = notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE,
123 (void *)(long)cpu); 145 (void *)(long)cpu);
124 if (err == NOTIFY_BAD) { 146 if (err == NOTIFY_BAD) {
@@ -171,7 +193,6 @@ out_thread:
171out_allowed: 193out_allowed:
172 set_cpus_allowed(current, old_allowed); 194 set_cpus_allowed(current, old_allowed);
173out: 195out:
174 current->flags &= ~PF_HOTPLUG_CPU;
175 unlock_cpu_hotplug(); 196 unlock_cpu_hotplug();
176 return err; 197 return err;
177} 198}
@@ -182,7 +203,7 @@ int __devinit cpu_up(unsigned int cpu)
182 int ret; 203 int ret;
183 void *hcpu = (void *)(long)cpu; 204 void *hcpu = (void *)(long)cpu;
184 205
185 if ((ret = down_interruptible(&cpucontrol)) != 0) 206 if ((ret = lock_cpu_hotplug_interruptible()) != 0)
186 return ret; 207 return ret;
187 208
188 if (cpu_online(cpu) || !cpu_present(cpu)) { 209 if (cpu_online(cpu) || !cpu_present(cpu)) {
@@ -190,11 +211,6 @@ int __devinit cpu_up(unsigned int cpu)
190 goto out; 211 goto out;
191 } 212 }
192 213
193 /*
194 * Leave a trace in current->flags indicating we are already in
195 * process of performing CPU hotplug.
196 */
197 current->flags |= PF_HOTPLUG_CPU;
198 ret = notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu); 214 ret = notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu);
199 if (ret == NOTIFY_BAD) { 215 if (ret == NOTIFY_BAD) {
200 printk("%s: attempt to bring up CPU %u failed\n", 216 printk("%s: attempt to bring up CPU %u failed\n",
@@ -217,7 +233,6 @@ out_notify:
217 if (ret != 0) 233 if (ret != 0)
218 notifier_call_chain(&cpu_chain, CPU_UP_CANCELED, hcpu); 234 notifier_call_chain(&cpu_chain, CPU_UP_CANCELED, hcpu);
219out: 235out:
220 current->flags &= ~PF_HOTPLUG_CPU; 236 unlock_cpu_hotplug();
221 up(&cpucontrol);
222 return ret; 237 return ret;
223} 238}
diff --git a/kernel/fork.c b/kernel/fork.c
index e0d0b77343f8..fb8572a42297 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -263,7 +263,7 @@ static inline int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
263 rb_parent = &tmp->vm_rb; 263 rb_parent = &tmp->vm_rb;
264 264
265 mm->map_count++; 265 mm->map_count++;
266 retval = copy_page_range(mm, oldmm, tmp); 266 retval = copy_page_range(mm, oldmm, mpnt);
267 267
268 if (tmp->vm_ops && tmp->vm_ops->open) 268 if (tmp->vm_ops && tmp->vm_ops->open)
269 tmp->vm_ops->open(tmp); 269 tmp->vm_ops->open(tmp);
@@ -1124,8 +1124,6 @@ static task_t *copy_process(unsigned long clone_flags,
1124 if (unlikely(p->ptrace & PT_PTRACED)) 1124 if (unlikely(p->ptrace & PT_PTRACED))
1125 __ptrace_link(p, current->parent); 1125 __ptrace_link(p, current->parent);
1126 1126
1127 cpuset_fork(p);
1128
1129 attach_pid(p, PIDTYPE_PID, p->pid); 1127 attach_pid(p, PIDTYPE_PID, p->pid);
1130 attach_pid(p, PIDTYPE_TGID, p->tgid); 1128 attach_pid(p, PIDTYPE_TGID, p->tgid);
1131 if (thread_group_leader(p)) { 1129 if (thread_group_leader(p)) {
@@ -1135,13 +1133,14 @@ static task_t *copy_process(unsigned long clone_flags,
1135 __get_cpu_var(process_counts)++; 1133 __get_cpu_var(process_counts)++;
1136 } 1134 }
1137 1135
1138 proc_fork_connector(p);
1139 if (!current->signal->tty && p->signal->tty) 1136 if (!current->signal->tty && p->signal->tty)
1140 p->signal->tty = NULL; 1137 p->signal->tty = NULL;
1141 1138
1142 nr_threads++; 1139 nr_threads++;
1143 total_forks++; 1140 total_forks++;
1144 write_unlock_irq(&tasklist_lock); 1141 write_unlock_irq(&tasklist_lock);
1142 proc_fork_connector(p);
1143 cpuset_fork(p);
1145 retval = 0; 1144 retval = 0;
1146 1145
1147fork_out: 1146fork_out:
diff --git a/kernel/futex.c b/kernel/futex.c
index aca8d10704f6..5e71a6bf6f6b 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -201,21 +201,6 @@ static int get_futex_key(unsigned long uaddr, union futex_key *key)
201 * from swap. But that's a lot of code to duplicate here 201 * from swap. But that's a lot of code to duplicate here
202 * for a rare case, so we simply fetch the page. 202 * for a rare case, so we simply fetch the page.
203 */ 203 */
204
205 /*
206 * Do a quick atomic lookup first - this is the fastpath.
207 */
208 page = follow_page(mm, uaddr, FOLL_TOUCH|FOLL_GET);
209 if (likely(page != NULL)) {
210 key->shared.pgoff =
211 page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
212 put_page(page);
213 return 0;
214 }
215
216 /*
217 * Do it the general way.
218 */
219 err = get_user_pages(current, mm, uaddr, 1, 0, 0, &page, NULL); 204 err = get_user_pages(current, mm, uaddr, 1, 0, 0, &page, NULL);
220 if (err >= 0) { 205 if (err >= 0) {
221 key->shared.pgoff = 206 key->shared.pgoff =
@@ -285,7 +270,13 @@ static void wake_futex(struct futex_q *q)
285 /* 270 /*
286 * The waiting task can free the futex_q as soon as this is written, 271 * The waiting task can free the futex_q as soon as this is written,
287 * without taking any locks. This must come last. 272 * without taking any locks. This must come last.
273 *
274 * A memory barrier is required here to prevent the following store
275 * to lock_ptr from getting ahead of the wakeup. Clearing the lock
276 * at the end of wake_up_all() does not prevent this store from
277 * moving.
288 */ 278 */
279 wmb();
289 q->lock_ptr = NULL; 280 q->lock_ptr = NULL;
290} 281}
291 282
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 3bd7226d15fa..81c49a4d679e 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -36,6 +36,9 @@ void synchronize_irq(unsigned int irq)
36{ 36{
37 struct irq_desc *desc = irq_desc + irq; 37 struct irq_desc *desc = irq_desc + irq;
38 38
39 if (irq >= NR_IRQS)
40 return;
41
39 while (desc->status & IRQ_INPROGRESS) 42 while (desc->status & IRQ_INPROGRESS)
40 cpu_relax(); 43 cpu_relax();
41} 44}
@@ -60,6 +63,9 @@ void disable_irq_nosync(unsigned int irq)
60 irq_desc_t *desc = irq_desc + irq; 63 irq_desc_t *desc = irq_desc + irq;
61 unsigned long flags; 64 unsigned long flags;
62 65
66 if (irq >= NR_IRQS)
67 return;
68
63 spin_lock_irqsave(&desc->lock, flags); 69 spin_lock_irqsave(&desc->lock, flags);
64 if (!desc->depth++) { 70 if (!desc->depth++) {
65 desc->status |= IRQ_DISABLED; 71 desc->status |= IRQ_DISABLED;
@@ -86,6 +92,9 @@ void disable_irq(unsigned int irq)
86{ 92{
87 irq_desc_t *desc = irq_desc + irq; 93 irq_desc_t *desc = irq_desc + irq;
88 94
95 if (irq >= NR_IRQS)
96 return;
97
89 disable_irq_nosync(irq); 98 disable_irq_nosync(irq);
90 if (desc->action) 99 if (desc->action)
91 synchronize_irq(irq); 100 synchronize_irq(irq);
@@ -108,6 +117,9 @@ void enable_irq(unsigned int irq)
108 irq_desc_t *desc = irq_desc + irq; 117 irq_desc_t *desc = irq_desc + irq;
109 unsigned long flags; 118 unsigned long flags;
110 119
120 if (irq >= NR_IRQS)
121 return;
122
111 spin_lock_irqsave(&desc->lock, flags); 123 spin_lock_irqsave(&desc->lock, flags);
112 switch (desc->depth) { 124 switch (desc->depth) {
113 case 0: 125 case 0:
@@ -163,6 +175,9 @@ int setup_irq(unsigned int irq, struct irqaction * new)
163 unsigned long flags; 175 unsigned long flags;
164 int shared = 0; 176 int shared = 0;
165 177
178 if (irq >= NR_IRQS)
179 return -EINVAL;
180
166 if (desc->handler == &no_irq_type) 181 if (desc->handler == &no_irq_type)
167 return -ENOSYS; 182 return -ENOSYS;
168 /* 183 /*
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 5beda378cc75..3bb71e63a37e 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -246,6 +246,19 @@ static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
246 return ret; 246 return ret;
247} 247}
248 248
249/* Walks the list and increments nmissed count for multiprobe case */
250void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
251{
252 struct kprobe *kp;
253 if (p->pre_handler != aggr_pre_handler) {
254 p->nmissed++;
255 } else {
256 list_for_each_entry_rcu(kp, &p->list, list)
257 kp->nmissed++;
258 }
259 return;
260}
261
249/* Called with kretprobe_lock held */ 262/* Called with kretprobe_lock held */
250struct kretprobe_instance __kprobes *get_free_rp_inst(struct kretprobe *rp) 263struct kretprobe_instance __kprobes *get_free_rp_inst(struct kretprobe *rp)
251{ 264{
@@ -399,10 +412,7 @@ static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
399 INIT_LIST_HEAD(&ap->list); 412 INIT_LIST_HEAD(&ap->list);
400 list_add_rcu(&p->list, &ap->list); 413 list_add_rcu(&p->list, &ap->list);
401 414
402 INIT_HLIST_NODE(&ap->hlist); 415 hlist_replace_rcu(&p->hlist, &ap->hlist);
403 hlist_del_rcu(&p->hlist);
404 hlist_add_head_rcu(&ap->hlist,
405 &kprobe_table[hash_ptr(ap->addr, KPROBE_HASH_BITS)]);
406} 416}
407 417
408/* 418/*
@@ -462,9 +472,16 @@ int __kprobes register_kprobe(struct kprobe *p)
462 int ret = 0; 472 int ret = 0;
463 unsigned long flags = 0; 473 unsigned long flags = 0;
464 struct kprobe *old_p; 474 struct kprobe *old_p;
475 struct module *mod;
476
477 if ((!kernel_text_address((unsigned long) p->addr)) ||
478 in_kprobes_functions((unsigned long) p->addr))
479 return -EINVAL;
480
481 if ((mod = module_text_address((unsigned long) p->addr)) &&
482 (unlikely(!try_module_get(mod))))
483 return -EINVAL;
465 484
466 if ((ret = in_kprobes_functions((unsigned long) p->addr)) != 0)
467 return ret;
468 if ((ret = arch_prepare_kprobe(p)) != 0) 485 if ((ret = arch_prepare_kprobe(p)) != 0)
469 goto rm_kprobe; 486 goto rm_kprobe;
470 487
@@ -488,6 +505,8 @@ out:
488rm_kprobe: 505rm_kprobe:
489 if (ret == -EEXIST) 506 if (ret == -EEXIST)
490 arch_remove_kprobe(p); 507 arch_remove_kprobe(p);
508 if (ret && mod)
509 module_put(mod);
491 return ret; 510 return ret;
492} 511}
493 512
@@ -495,6 +514,7 @@ void __kprobes unregister_kprobe(struct kprobe *p)
495{ 514{
496 unsigned long flags; 515 unsigned long flags;
497 struct kprobe *old_p; 516 struct kprobe *old_p;
517 struct module *mod;
498 518
499 spin_lock_irqsave(&kprobe_lock, flags); 519 spin_lock_irqsave(&kprobe_lock, flags);
500 old_p = get_kprobe(p->addr); 520 old_p = get_kprobe(p->addr);
@@ -506,6 +526,10 @@ void __kprobes unregister_kprobe(struct kprobe *p)
506 cleanup_kprobe(p, flags); 526 cleanup_kprobe(p, flags);
507 527
508 synchronize_sched(); 528 synchronize_sched();
529
530 if ((mod = module_text_address((unsigned long)p->addr)))
531 module_put(mod);
532
509 if (old_p->pre_handler == aggr_pre_handler && 533 if (old_p->pre_handler == aggr_pre_handler &&
510 list_empty(&old_p->list)) 534 list_empty(&old_p->list))
511 kfree(old_p); 535 kfree(old_p);
diff --git a/kernel/params.c b/kernel/params.c
index 47ba69547945..c76ad25e6a21 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -619,7 +619,7 @@ static void __init param_sysfs_builtin(void)
619 619
620 620
621/* module-related sysfs stuff */ 621/* module-related sysfs stuff */
622#ifdef CONFIG_MODULES 622#ifdef CONFIG_SYSFS
623 623
624#define to_module_attr(n) container_of(n, struct module_attribute, attr); 624#define to_module_attr(n) container_of(n, struct module_attribute, attr);
625#define to_module_kobject(n) container_of(n, struct module_kobject, kobj); 625#define to_module_kobject(n) container_of(n, struct module_kobject, kobj);
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 84af54c39e1b..cae4f5728997 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -36,7 +36,7 @@ timespec_to_sample(clockid_t which_clock, const struct timespec *tp)
36 union cpu_time_count ret; 36 union cpu_time_count ret;
37 ret.sched = 0; /* high half always zero when .cpu used */ 37 ret.sched = 0; /* high half always zero when .cpu used */
38 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { 38 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
39 ret.sched = tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec; 39 ret.sched = (unsigned long long)tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec;
40 } else { 40 } else {
41 ret.cpu = timespec_to_cputime(tp); 41 ret.cpu = timespec_to_cputime(tp);
42 } 42 }
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 6ee2cad530e8..d253f3ae2fa5 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -24,7 +24,7 @@
24 24
25DECLARE_MUTEX(pm_sem); 25DECLARE_MUTEX(pm_sem);
26 26
27struct pm_ops * pm_ops = NULL; 27struct pm_ops *pm_ops;
28suspend_disk_method_t pm_disk_mode = PM_DISK_SHUTDOWN; 28suspend_disk_method_t pm_disk_mode = PM_DISK_SHUTDOWN;
29 29
30/** 30/**
@@ -151,6 +151,18 @@ static char *pm_states[PM_SUSPEND_MAX] = {
151#endif 151#endif
152}; 152};
153 153
154static inline int valid_state(suspend_state_t state)
155{
156 /* Suspend-to-disk does not really need low-level support.
157 * It can work with reboot if needed. */
158 if (state == PM_SUSPEND_DISK)
159 return 1;
160
161 if (pm_ops && pm_ops->valid && !pm_ops->valid(state))
162 return 0;
163 return 1;
164}
165
154 166
155/** 167/**
156 * enter_state - Do common work of entering low-power state. 168 * enter_state - Do common work of entering low-power state.
@@ -167,7 +179,7 @@ static int enter_state(suspend_state_t state)
167{ 179{
168 int error; 180 int error;
169 181
170 if (pm_ops && pm_ops->valid && !pm_ops->valid(state)) 182 if (!valid_state(state))
171 return -ENODEV; 183 return -ENODEV;
172 if (down_trylock(&pm_sem)) 184 if (down_trylock(&pm_sem))
173 return -EBUSY; 185 return -EBUSY;
@@ -238,9 +250,8 @@ static ssize_t state_show(struct subsystem * subsys, char * buf)
238 char * s = buf; 250 char * s = buf;
239 251
240 for (i = 0; i < PM_SUSPEND_MAX; i++) { 252 for (i = 0; i < PM_SUSPEND_MAX; i++) {
241 if (pm_states[i] && pm_ops && (!pm_ops->valid 253 if (pm_states[i] && valid_state(i))
242 ||(pm_ops->valid && pm_ops->valid(i)))) 254 s += sprintf(s,"%s ", pm_states[i]);
243 s += sprintf(s,"%s ",pm_states[i]);
244 } 255 }
245 s += sprintf(s,"\n"); 256 s += sprintf(s,"\n");
246 return (s - buf); 257 return (s - buf);
diff --git a/kernel/printk.c b/kernel/printk.c
index ac8a08f36207..5287be83e3e7 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -956,7 +956,7 @@ int unregister_console(struct console *console)
956 if (console_drivers == console) { 956 if (console_drivers == console) {
957 console_drivers=console->next; 957 console_drivers=console->next;
958 res = 0; 958 res = 0;
959 } else { 959 } else if (console_drivers) {
960 for (a=console_drivers->next, b=console_drivers ; 960 for (a=console_drivers->next, b=console_drivers ;
961 a; b=a, a=b->next) { 961 a; b=a, a=b->next) {
962 if (a == console) { 962 if (a == console) {
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 17ee7e5a3451..656476eedb1b 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -241,7 +241,8 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in
241 if (write) { 241 if (write) {
242 copy_to_user_page(vma, page, addr, 242 copy_to_user_page(vma, page, addr,
243 maddr + offset, buf, bytes); 243 maddr + offset, buf, bytes);
244 set_page_dirty_lock(page); 244 if (!PageCompound(page))
245 set_page_dirty_lock(page);
245 } else { 246 } else {
246 copy_from_user_page(vma, page, addr, 247 copy_from_user_page(vma, page, addr,
247 buf, maddr + offset, bytes); 248 buf, maddr + offset, bytes);
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index c4d159a21e04..48d3bce465b8 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -116,6 +116,10 @@ void fastcall call_rcu(struct rcu_head *head,
116 local_irq_restore(flags); 116 local_irq_restore(flags);
117} 117}
118 118
119static atomic_t rcu_barrier_cpu_count;
120static struct semaphore rcu_barrier_sema;
121static struct completion rcu_barrier_completion;
122
119/** 123/**
120 * call_rcu_bh - Queue an RCU for invocation after a quicker grace period. 124 * call_rcu_bh - Queue an RCU for invocation after a quicker grace period.
121 * @head: structure to be used for queueing the RCU updates. 125 * @head: structure to be used for queueing the RCU updates.
@@ -162,6 +166,42 @@ long rcu_batches_completed(void)
162 return rcu_ctrlblk.completed; 166 return rcu_ctrlblk.completed;
163} 167}
164 168
169static void rcu_barrier_callback(struct rcu_head *notused)
170{
171 if (atomic_dec_and_test(&rcu_barrier_cpu_count))
172 complete(&rcu_barrier_completion);
173}
174
175/*
176 * Called with preemption disabled, and from cross-cpu IRQ context.
177 */
178static void rcu_barrier_func(void *notused)
179{
180 int cpu = smp_processor_id();
181 struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
182 struct rcu_head *head;
183
184 head = &rdp->barrier;
185 atomic_inc(&rcu_barrier_cpu_count);
186 call_rcu(head, rcu_barrier_callback);
187}
188
189/**
190 * rcu_barrier - Wait until all the in-flight RCUs are complete.
191 */
192void rcu_barrier(void)
193{
194 BUG_ON(in_interrupt());
195 /* Take cpucontrol semaphore to protect against CPU hotplug */
196 down(&rcu_barrier_sema);
197 init_completion(&rcu_barrier_completion);
198 atomic_set(&rcu_barrier_cpu_count, 0);
199 on_each_cpu(rcu_barrier_func, NULL, 0, 1);
200 wait_for_completion(&rcu_barrier_completion);
201 up(&rcu_barrier_sema);
202}
203EXPORT_SYMBOL_GPL(rcu_barrier);
204
165/* 205/*
166 * Invoke the completed RCU callbacks. They are expected to be in 206 * Invoke the completed RCU callbacks. They are expected to be in
167 * a per-cpu list. 207 * a per-cpu list.
@@ -217,15 +257,23 @@ static void rcu_start_batch(struct rcu_ctrlblk *rcp, struct rcu_state *rsp,
217 257
218 if (rcp->next_pending && 258 if (rcp->next_pending &&
219 rcp->completed == rcp->cur) { 259 rcp->completed == rcp->cur) {
220 /* Can't change, since spin lock held. */
221 cpus_andnot(rsp->cpumask, cpu_online_map, nohz_cpu_mask);
222
223 rcp->next_pending = 0; 260 rcp->next_pending = 0;
224 /* next_pending == 0 must be visible in __rcu_process_callbacks() 261 /*
225 * before it can see new value of cur. 262 * next_pending == 0 must be visible in
263 * __rcu_process_callbacks() before it can see new value of cur.
226 */ 264 */
227 smp_wmb(); 265 smp_wmb();
228 rcp->cur++; 266 rcp->cur++;
267
268 /*
269 * Accessing nohz_cpu_mask before incrementing rcp->cur needs a
270 * Barrier Otherwise it can cause tickless idle CPUs to be
271 * included in rsp->cpumask, which will extend graceperiods
272 * unnecessarily.
273 */
274 smp_mb();
275 cpus_andnot(rsp->cpumask, cpu_online_map, nohz_cpu_mask);
276
229 } 277 }
230} 278}
231 279
@@ -457,6 +505,7 @@ static struct notifier_block __devinitdata rcu_nb = {
457 */ 505 */
458void __init rcu_init(void) 506void __init rcu_init(void)
459{ 507{
508 sema_init(&rcu_barrier_sema, 1);
460 rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, 509 rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE,
461 (void *)(long)smp_processor_id()); 510 (void *)(long)smp_processor_id());
462 /* Register notifier for non-boot CPUs */ 511 /* Register notifier for non-boot CPUs */
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 88c28d476550..49fbbeff201c 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -409,9 +409,8 @@ rcu_torture_cleanup(void)
409 stats_task = NULL; 409 stats_task = NULL;
410 410
411 /* Wait for all RCU callbacks to fire. */ 411 /* Wait for all RCU callbacks to fire. */
412 rcu_barrier();
412 413
413 for (i = 0; i < RCU_TORTURE_PIPE_LEN; i++)
414 synchronize_rcu();
415 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */ 414 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
416 printk(KERN_ALERT TORTURE_FLAG 415 printk(KERN_ALERT TORTURE_FLAG
417 "--- End of test: %s\n", 416 "--- End of test: %s\n",
diff --git a/kernel/sys.c b/kernel/sys.c
index bce933ebb29f..eecf84526afe 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -32,6 +32,7 @@
32 32
33#include <linux/compat.h> 33#include <linux/compat.h>
34#include <linux/syscalls.h> 34#include <linux/syscalls.h>
35#include <linux/kprobes.h>
35 36
36#include <asm/uaccess.h> 37#include <asm/uaccess.h>
37#include <asm/io.h> 38#include <asm/io.h>
@@ -168,7 +169,7 @@ EXPORT_SYMBOL(notifier_chain_unregister);
168 * of the last notifier function called. 169 * of the last notifier function called.
169 */ 170 */
170 171
171int notifier_call_chain(struct notifier_block **n, unsigned long val, void *v) 172int __kprobes notifier_call_chain(struct notifier_block **n, unsigned long val, void *v)
172{ 173{
173 int ret=NOTIFY_DONE; 174 int ret=NOTIFY_DONE;
174 struct notifier_block *nb = *n; 175 struct notifier_block *nb = *n;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 9990e10192e8..b53115b882e1 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -2192,29 +2192,32 @@ int sysctl_string(ctl_table *table, int __user *name, int nlen,
2192 void __user *oldval, size_t __user *oldlenp, 2192 void __user *oldval, size_t __user *oldlenp,
2193 void __user *newval, size_t newlen, void **context) 2193 void __user *newval, size_t newlen, void **context)
2194{ 2194{
2195 size_t l, len;
2196
2197 if (!table->data || !table->maxlen) 2195 if (!table->data || !table->maxlen)
2198 return -ENOTDIR; 2196 return -ENOTDIR;
2199 2197
2200 if (oldval && oldlenp) { 2198 if (oldval && oldlenp) {
2201 if (get_user(len, oldlenp)) 2199 size_t bufsize;
2200 if (get_user(bufsize, oldlenp))
2202 return -EFAULT; 2201 return -EFAULT;
2203 if (len) { 2202 if (bufsize) {
2204 l = strlen(table->data); 2203 size_t len = strlen(table->data), copied;
2205 if (len > l) len = l; 2204
2206 if (len >= table->maxlen) 2205 /* This shouldn't trigger for a well-formed sysctl */
2206 if (len > table->maxlen)
2207 len = table->maxlen; 2207 len = table->maxlen;
2208 if(copy_to_user(oldval, table->data, len)) 2208
2209 return -EFAULT; 2209 /* Copy up to a max of bufsize-1 bytes of the string */
2210 if(put_user(0, ((char __user *) oldval) + len)) 2210 copied = (len >= bufsize) ? bufsize - 1 : len;
2211
2212 if (copy_to_user(oldval, table->data, copied) ||
2213 put_user(0, (char __user *)(oldval + copied)))
2211 return -EFAULT; 2214 return -EFAULT;
2212 if(put_user(len, oldlenp)) 2215 if (put_user(len, oldlenp))
2213 return -EFAULT; 2216 return -EFAULT;
2214 } 2217 }
2215 } 2218 }
2216 if (newval && newlen) { 2219 if (newval && newlen) {
2217 len = newlen; 2220 size_t len = newlen;
2218 if (len > table->maxlen) 2221 if (len > table->maxlen)
2219 len = table->maxlen; 2222 len = table->maxlen;
2220 if(copy_from_user(table->data, newval, len)) 2223 if(copy_from_user(table->data, newval, len))
@@ -2223,7 +2226,7 @@ int sysctl_string(ctl_table *table, int __user *name, int nlen,
2223 len--; 2226 len--;
2224 ((char *) table->data)[len] = 0; 2227 ((char *) table->data)[len] = 0;
2225 } 2228 }
2226 return 0; 2229 return 1;
2227} 2230}
2228 2231
2229/* 2232/*
diff --git a/kernel/time.c b/kernel/time.c
index 245d595a13cb..b94bfa8c03e0 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -561,6 +561,28 @@ void getnstimeofday(struct timespec *tv)
561EXPORT_SYMBOL_GPL(getnstimeofday); 561EXPORT_SYMBOL_GPL(getnstimeofday);
562#endif 562#endif
563 563
564void getnstimestamp(struct timespec *ts)
565{
566 unsigned int seq;
567 struct timespec wall2mono;
568
569 /* synchronize with settimeofday() changes */
570 do {
571 seq = read_seqbegin(&xtime_lock);
572 getnstimeofday(ts);
573 wall2mono = wall_to_monotonic;
574 } while(unlikely(read_seqretry(&xtime_lock, seq)));
575
576 /* adjust to monotonicaly-increasing values */
577 ts->tv_sec += wall2mono.tv_sec;
578 ts->tv_nsec += wall2mono.tv_nsec;
579 while (unlikely(ts->tv_nsec >= NSEC_PER_SEC)) {
580 ts->tv_nsec -= NSEC_PER_SEC;
581 ts->tv_sec++;
582 }
583}
584EXPORT_SYMBOL_GPL(getnstimestamp);
585
564#if (BITS_PER_LONG < 64) 586#if (BITS_PER_LONG < 64)
565u64 get_jiffies_64(void) 587u64 get_jiffies_64(void)
566{ 588{
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 42df83d7fad2..2bd5aee1c736 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -102,7 +102,7 @@ int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
102 102
103 if (!test_and_set_bit(0, &work->pending)) { 103 if (!test_and_set_bit(0, &work->pending)) {
104 if (unlikely(is_single_threaded(wq))) 104 if (unlikely(is_single_threaded(wq)))
105 cpu = 0; 105 cpu = any_online_cpu(cpu_online_map);
106 BUG_ON(!list_empty(&work->entry)); 106 BUG_ON(!list_empty(&work->entry));
107 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work); 107 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
108 ret = 1; 108 ret = 1;
@@ -118,7 +118,7 @@ static void delayed_work_timer_fn(unsigned long __data)
118 int cpu = smp_processor_id(); 118 int cpu = smp_processor_id();
119 119
120 if (unlikely(is_single_threaded(wq))) 120 if (unlikely(is_single_threaded(wq)))
121 cpu = 0; 121 cpu = any_online_cpu(cpu_online_map);
122 122
123 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work); 123 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
124} 124}
@@ -266,8 +266,8 @@ void fastcall flush_workqueue(struct workqueue_struct *wq)
266 might_sleep(); 266 might_sleep();
267 267
268 if (is_single_threaded(wq)) { 268 if (is_single_threaded(wq)) {
269 /* Always use cpu 0's area. */ 269 /* Always use first cpu's area. */
270 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, 0)); 270 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, any_online_cpu(cpu_online_map)));
271 } else { 271 } else {
272 int cpu; 272 int cpu;
273 273
@@ -320,7 +320,7 @@ struct workqueue_struct *__create_workqueue(const char *name,
320 lock_cpu_hotplug(); 320 lock_cpu_hotplug();
321 if (singlethread) { 321 if (singlethread) {
322 INIT_LIST_HEAD(&wq->list); 322 INIT_LIST_HEAD(&wq->list);
323 p = create_workqueue_thread(wq, 0); 323 p = create_workqueue_thread(wq, any_online_cpu(cpu_online_map));
324 if (!p) 324 if (!p)
325 destroy = 1; 325 destroy = 1;
326 else 326 else
@@ -374,7 +374,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
374 /* We don't need the distraction of CPUs appearing and vanishing. */ 374 /* We don't need the distraction of CPUs appearing and vanishing. */
375 lock_cpu_hotplug(); 375 lock_cpu_hotplug();
376 if (is_single_threaded(wq)) 376 if (is_single_threaded(wq))
377 cleanup_workqueue_thread(wq, 0); 377 cleanup_workqueue_thread(wq, any_online_cpu(cpu_online_map));
378 else { 378 else {
379 for_each_online_cpu(cpu) 379 for_each_online_cpu(cpu)
380 cleanup_workqueue_thread(wq, cpu); 380 cleanup_workqueue_thread(wq, cpu);