aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cgroup.c2
-rw-r--r--kernel/exit.c53
-rw-r--r--kernel/fork.c76
-rw-r--r--kernel/irq/chip.c12
-rw-r--r--kernel/irq/manage.c3
-rw-r--r--kernel/kexec.c104
-rw-r--r--kernel/kthread.c2
-rw-r--r--kernel/power/main.c7
-rw-r--r--kernel/power/power.h2
-rw-r--r--kernel/ptrace.c2
-rw-r--r--kernel/relay.c170
-rw-r--r--kernel/sched.c34
-rw-r--r--kernel/signal.c99
-rw-r--r--kernel/smp.c4
-rw-r--r--kernel/softirq.c3
-rw-r--r--kernel/softlockup.c25
-rw-r--r--kernel/sys.c31
-rw-r--r--kernel/trace/trace.c3
-rw-r--r--kernel/trace/trace_irqsoff.c8
-rw-r--r--kernel/trace/trace_sched_wakeup.c27
20 files changed, 455 insertions, 212 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 66ec9fd21e0c..89bd6fb7894f 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1529,7 +1529,7 @@ static int cgroup_seqfile_show(struct seq_file *m, void *arg)
1529 return cft->read_seq_string(state->cgroup, cft, m); 1529 return cft->read_seq_string(state->cgroup, cft, m);
1530} 1530}
1531 1531
1532int cgroup_seqfile_release(struct inode *inode, struct file *file) 1532static int cgroup_seqfile_release(struct inode *inode, struct file *file)
1533{ 1533{
1534 struct seq_file *seq = file->private_data; 1534 struct seq_file *seq = file->private_data;
1535 kfree(seq->private); 1535 kfree(seq->private);
diff --git a/kernel/exit.c b/kernel/exit.c
index ad933bb29ec7..6cdf60712bd2 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -46,6 +46,7 @@
46#include <linux/resource.h> 46#include <linux/resource.h>
47#include <linux/blkdev.h> 47#include <linux/blkdev.h>
48#include <linux/task_io_accounting_ops.h> 48#include <linux/task_io_accounting_ops.h>
49#include <linux/tracehook.h>
49 50
50#include <asm/uaccess.h> 51#include <asm/uaccess.h>
51#include <asm/unistd.h> 52#include <asm/unistd.h>
@@ -162,27 +163,17 @@ static void delayed_put_task_struct(struct rcu_head *rhp)
162 put_task_struct(container_of(rhp, struct task_struct, rcu)); 163 put_task_struct(container_of(rhp, struct task_struct, rcu));
163} 164}
164 165
165/*
166 * Do final ptrace-related cleanup of a zombie being reaped.
167 *
168 * Called with write_lock(&tasklist_lock) held.
169 */
170static void ptrace_release_task(struct task_struct *p)
171{
172 BUG_ON(!list_empty(&p->ptraced));
173 ptrace_unlink(p);
174 BUG_ON(!list_empty(&p->ptrace_entry));
175}
176 166
177void release_task(struct task_struct * p) 167void release_task(struct task_struct * p)
178{ 168{
179 struct task_struct *leader; 169 struct task_struct *leader;
180 int zap_leader; 170 int zap_leader;
181repeat: 171repeat:
172 tracehook_prepare_release_task(p);
182 atomic_dec(&p->user->processes); 173 atomic_dec(&p->user->processes);
183 proc_flush_task(p); 174 proc_flush_task(p);
184 write_lock_irq(&tasklist_lock); 175 write_lock_irq(&tasklist_lock);
185 ptrace_release_task(p); 176 tracehook_finish_release_task(p);
186 __exit_signal(p); 177 __exit_signal(p);
187 178
188 /* 179 /*
@@ -204,6 +195,13 @@ repeat:
204 * that case. 195 * that case.
205 */ 196 */
206 zap_leader = task_detached(leader); 197 zap_leader = task_detached(leader);
198
199 /*
200 * This maintains the invariant that release_task()
201 * only runs on a task in EXIT_DEAD, just for sanity.
202 */
203 if (zap_leader)
204 leader->exit_state = EXIT_DEAD;
207 } 205 }
208 206
209 write_unlock_irq(&tasklist_lock); 207 write_unlock_irq(&tasklist_lock);
@@ -887,7 +885,8 @@ static void forget_original_parent(struct task_struct *father)
887 */ 885 */
888static void exit_notify(struct task_struct *tsk, int group_dead) 886static void exit_notify(struct task_struct *tsk, int group_dead)
889{ 887{
890 int state; 888 int signal;
889 void *cookie;
891 890
892 /* 891 /*
893 * This does two things: 892 * This does two things:
@@ -924,22 +923,11 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
924 !capable(CAP_KILL)) 923 !capable(CAP_KILL))
925 tsk->exit_signal = SIGCHLD; 924 tsk->exit_signal = SIGCHLD;
926 925
927 /* If something other than our normal parent is ptracing us, then 926 signal = tracehook_notify_death(tsk, &cookie, group_dead);
928 * send it a SIGCHLD instead of honoring exit_signal. exit_signal 927 if (signal > 0)
929 * only has special meaning to our real parent. 928 signal = do_notify_parent(tsk, signal);
930 */
931 if (!task_detached(tsk) && thread_group_empty(tsk)) {
932 int signal = ptrace_reparented(tsk) ?
933 SIGCHLD : tsk->exit_signal;
934 do_notify_parent(tsk, signal);
935 } else if (tsk->ptrace) {
936 do_notify_parent(tsk, SIGCHLD);
937 }
938 929
939 state = EXIT_ZOMBIE; 930 tsk->exit_state = signal < 0 ? EXIT_DEAD : EXIT_ZOMBIE;
940 if (task_detached(tsk) && likely(!tsk->ptrace))
941 state = EXIT_DEAD;
942 tsk->exit_state = state;
943 931
944 /* mt-exec, de_thread() is waiting for us */ 932 /* mt-exec, de_thread() is waiting for us */
945 if (thread_group_leader(tsk) && 933 if (thread_group_leader(tsk) &&
@@ -949,8 +937,10 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
949 937
950 write_unlock_irq(&tasklist_lock); 938 write_unlock_irq(&tasklist_lock);
951 939
940 tracehook_report_death(tsk, signal, cookie, group_dead);
941
952 /* If the process is dead, release it - nobody will wait for it */ 942 /* If the process is dead, release it - nobody will wait for it */
953 if (state == EXIT_DEAD) 943 if (signal < 0)
954 release_task(tsk); 944 release_task(tsk);
955} 945}
956 946
@@ -1029,10 +1019,7 @@ NORET_TYPE void do_exit(long code)
1029 if (unlikely(!tsk->pid)) 1019 if (unlikely(!tsk->pid))
1030 panic("Attempted to kill the idle task!"); 1020 panic("Attempted to kill the idle task!");
1031 1021
1032 if (unlikely(current->ptrace & PT_TRACE_EXIT)) { 1022 tracehook_report_exit(&code);
1033 current->ptrace_message = code;
1034 ptrace_notify((PTRACE_EVENT_EXIT << 8) | SIGTRAP);
1035 }
1036 1023
1037 /* 1024 /*
1038 * We're taking recursive faults here in do_exit. Safest is to just 1025 * We're taking recursive faults here in do_exit. Safest is to just
diff --git a/kernel/fork.c b/kernel/fork.c
index b99d73e971a4..abb3ed6298f6 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -37,6 +37,7 @@
37#include <linux/swap.h> 37#include <linux/swap.h>
38#include <linux/syscalls.h> 38#include <linux/syscalls.h>
39#include <linux/jiffies.h> 39#include <linux/jiffies.h>
40#include <linux/tracehook.h>
40#include <linux/futex.h> 41#include <linux/futex.h>
41#include <linux/task_io_accounting_ops.h> 42#include <linux/task_io_accounting_ops.h>
42#include <linux/rcupdate.h> 43#include <linux/rcupdate.h>
@@ -865,8 +866,7 @@ static void copy_flags(unsigned long clone_flags, struct task_struct *p)
865 866
866 new_flags &= ~PF_SUPERPRIV; 867 new_flags &= ~PF_SUPERPRIV;
867 new_flags |= PF_FORKNOEXEC; 868 new_flags |= PF_FORKNOEXEC;
868 if (!(clone_flags & CLONE_PTRACE)) 869 new_flags |= PF_STARTING;
869 p->ptrace = 0;
870 p->flags = new_flags; 870 p->flags = new_flags;
871 clear_freeze_flag(p); 871 clear_freeze_flag(p);
872} 872}
@@ -907,7 +907,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
907 struct pt_regs *regs, 907 struct pt_regs *regs,
908 unsigned long stack_size, 908 unsigned long stack_size,
909 int __user *child_tidptr, 909 int __user *child_tidptr,
910 struct pid *pid) 910 struct pid *pid,
911 int trace)
911{ 912{
912 int retval; 913 int retval;
913 struct task_struct *p; 914 struct task_struct *p;
@@ -1163,8 +1164,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1163 */ 1164 */
1164 p->group_leader = p; 1165 p->group_leader = p;
1165 INIT_LIST_HEAD(&p->thread_group); 1166 INIT_LIST_HEAD(&p->thread_group);
1166 INIT_LIST_HEAD(&p->ptrace_entry);
1167 INIT_LIST_HEAD(&p->ptraced);
1168 1167
1169 /* Now that the task is set up, run cgroup callbacks if 1168 /* Now that the task is set up, run cgroup callbacks if
1170 * necessary. We need to run them before the task is visible 1169 * necessary. We need to run them before the task is visible
@@ -1195,7 +1194,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1195 p->real_parent = current->real_parent; 1194 p->real_parent = current->real_parent;
1196 else 1195 else
1197 p->real_parent = current; 1196 p->real_parent = current;
1198 p->parent = p->real_parent;
1199 1197
1200 spin_lock(&current->sighand->siglock); 1198 spin_lock(&current->sighand->siglock);
1201 1199
@@ -1237,8 +1235,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1237 1235
1238 if (likely(p->pid)) { 1236 if (likely(p->pid)) {
1239 list_add_tail(&p->sibling, &p->real_parent->children); 1237 list_add_tail(&p->sibling, &p->real_parent->children);
1240 if (unlikely(p->ptrace & PT_PTRACED)) 1238 tracehook_finish_clone(p, clone_flags, trace);
1241 __ptrace_link(p, current->parent);
1242 1239
1243 if (thread_group_leader(p)) { 1240 if (thread_group_leader(p)) {
1244 if (clone_flags & CLONE_NEWPID) 1241 if (clone_flags & CLONE_NEWPID)
@@ -1323,29 +1320,13 @@ struct task_struct * __cpuinit fork_idle(int cpu)
1323 struct pt_regs regs; 1320 struct pt_regs regs;
1324 1321
1325 task = copy_process(CLONE_VM, 0, idle_regs(&regs), 0, NULL, 1322 task = copy_process(CLONE_VM, 0, idle_regs(&regs), 0, NULL,
1326 &init_struct_pid); 1323 &init_struct_pid, 0);
1327 if (!IS_ERR(task)) 1324 if (!IS_ERR(task))
1328 init_idle(task, cpu); 1325 init_idle(task, cpu);
1329 1326
1330 return task; 1327 return task;
1331} 1328}
1332 1329
1333static int fork_traceflag(unsigned clone_flags)
1334{
1335 if (clone_flags & CLONE_UNTRACED)
1336 return 0;
1337 else if (clone_flags & CLONE_VFORK) {
1338 if (current->ptrace & PT_TRACE_VFORK)
1339 return PTRACE_EVENT_VFORK;
1340 } else if ((clone_flags & CSIGNAL) != SIGCHLD) {
1341 if (current->ptrace & PT_TRACE_CLONE)
1342 return PTRACE_EVENT_CLONE;
1343 } else if (current->ptrace & PT_TRACE_FORK)
1344 return PTRACE_EVENT_FORK;
1345
1346 return 0;
1347}
1348
1349/* 1330/*
1350 * Ok, this is the main fork-routine. 1331 * Ok, this is the main fork-routine.
1351 * 1332 *
@@ -1380,14 +1361,14 @@ long do_fork(unsigned long clone_flags,
1380 } 1361 }
1381 } 1362 }
1382 1363
1383 if (unlikely(current->ptrace)) { 1364 /*
1384 trace = fork_traceflag (clone_flags); 1365 * When called from kernel_thread, don't do user tracing stuff.
1385 if (trace) 1366 */
1386 clone_flags |= CLONE_PTRACE; 1367 if (likely(user_mode(regs)))
1387 } 1368 trace = tracehook_prepare_clone(clone_flags);
1388 1369
1389 p = copy_process(clone_flags, stack_start, regs, stack_size, 1370 p = copy_process(clone_flags, stack_start, regs, stack_size,
1390 child_tidptr, NULL); 1371 child_tidptr, NULL, trace);
1391 /* 1372 /*
1392 * Do this prior waking up the new thread - the thread pointer 1373 * Do this prior waking up the new thread - the thread pointer
1393 * might get invalid after that point, if the thread exits quickly. 1374 * might get invalid after that point, if the thread exits quickly.
@@ -1405,32 +1386,35 @@ long do_fork(unsigned long clone_flags,
1405 init_completion(&vfork); 1386 init_completion(&vfork);
1406 } 1387 }
1407 1388
1408 if ((p->ptrace & PT_PTRACED) || (clone_flags & CLONE_STOPPED)) { 1389 tracehook_report_clone(trace, regs, clone_flags, nr, p);
1390
1391 /*
1392 * We set PF_STARTING at creation in case tracing wants to
1393 * use this to distinguish a fully live task from one that
1394 * hasn't gotten to tracehook_report_clone() yet. Now we
1395 * clear it and set the child going.
1396 */
1397 p->flags &= ~PF_STARTING;
1398
1399 if (unlikely(clone_flags & CLONE_STOPPED)) {
1409 /* 1400 /*
1410 * We'll start up with an immediate SIGSTOP. 1401 * We'll start up with an immediate SIGSTOP.
1411 */ 1402 */
1412 sigaddset(&p->pending.signal, SIGSTOP); 1403 sigaddset(&p->pending.signal, SIGSTOP);
1413 set_tsk_thread_flag(p, TIF_SIGPENDING); 1404 set_tsk_thread_flag(p, TIF_SIGPENDING);
1414 }
1415
1416 if (!(clone_flags & CLONE_STOPPED))
1417 wake_up_new_task(p, clone_flags);
1418 else
1419 __set_task_state(p, TASK_STOPPED); 1405 __set_task_state(p, TASK_STOPPED);
1420 1406 } else {
1421 if (unlikely (trace)) { 1407 wake_up_new_task(p, clone_flags);
1422 current->ptrace_message = nr;
1423 ptrace_notify ((trace << 8) | SIGTRAP);
1424 } 1408 }
1425 1409
1410 tracehook_report_clone_complete(trace, regs,
1411 clone_flags, nr, p);
1412
1426 if (clone_flags & CLONE_VFORK) { 1413 if (clone_flags & CLONE_VFORK) {
1427 freezer_do_not_count(); 1414 freezer_do_not_count();
1428 wait_for_completion(&vfork); 1415 wait_for_completion(&vfork);
1429 freezer_count(); 1416 freezer_count();
1430 if (unlikely (current->ptrace & PT_TRACE_VFORK_DONE)) { 1417 tracehook_report_vfork_done(p, nr);
1431 current->ptrace_message = nr;
1432 ptrace_notify ((PTRACE_EVENT_VFORK_DONE << 8) | SIGTRAP);
1433 }
1434 } 1418 }
1435 } else { 1419 } else {
1436 nr = PTR_ERR(p); 1420 nr = PTR_ERR(p);
@@ -1442,7 +1426,7 @@ long do_fork(unsigned long clone_flags,
1442#define ARCH_MIN_MMSTRUCT_ALIGN 0 1426#define ARCH_MIN_MMSTRUCT_ALIGN 0
1443#endif 1427#endif
1444 1428
1445static void sighand_ctor(struct kmem_cache *cachep, void *data) 1429static void sighand_ctor(void *data)
1446{ 1430{
1447 struct sighand_struct *sighand = data; 1431 struct sighand_struct *sighand = data;
1448 1432
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 964964baefa2..3cd441ebf5d2 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -28,8 +28,7 @@ void dynamic_irq_init(unsigned int irq)
28 unsigned long flags; 28 unsigned long flags;
29 29
30 if (irq >= NR_IRQS) { 30 if (irq >= NR_IRQS) {
31 printk(KERN_ERR "Trying to initialize invalid IRQ%d\n", irq); 31 WARN(1, KERN_ERR "Trying to initialize invalid IRQ%d\n", irq);
32 WARN_ON(1);
33 return; 32 return;
34 } 33 }
35 34
@@ -62,8 +61,7 @@ void dynamic_irq_cleanup(unsigned int irq)
62 unsigned long flags; 61 unsigned long flags;
63 62
64 if (irq >= NR_IRQS) { 63 if (irq >= NR_IRQS) {
65 printk(KERN_ERR "Trying to cleanup invalid IRQ%d\n", irq); 64 WARN(1, KERN_ERR "Trying to cleanup invalid IRQ%d\n", irq);
66 WARN_ON(1);
67 return; 65 return;
68 } 66 }
69 67
@@ -71,9 +69,8 @@ void dynamic_irq_cleanup(unsigned int irq)
71 spin_lock_irqsave(&desc->lock, flags); 69 spin_lock_irqsave(&desc->lock, flags);
72 if (desc->action) { 70 if (desc->action) {
73 spin_unlock_irqrestore(&desc->lock, flags); 71 spin_unlock_irqrestore(&desc->lock, flags);
74 printk(KERN_ERR "Destroying IRQ%d without calling free_irq\n", 72 WARN(1, KERN_ERR "Destroying IRQ%d without calling free_irq\n",
75 irq); 73 irq);
76 WARN_ON(1);
77 return; 74 return;
78 } 75 }
79 desc->msi_desc = NULL; 76 desc->msi_desc = NULL;
@@ -96,8 +93,7 @@ int set_irq_chip(unsigned int irq, struct irq_chip *chip)
96 unsigned long flags; 93 unsigned long flags;
97 94
98 if (irq >= NR_IRQS) { 95 if (irq >= NR_IRQS) {
99 printk(KERN_ERR "Trying to install chip for IRQ%d\n", irq); 96 WARN(1, KERN_ERR "Trying to install chip for IRQ%d\n", irq);
100 WARN_ON(1);
101 return -EINVAL; 97 return -EINVAL;
102 } 98 }
103 99
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index f8914b92b664..152abfd3589f 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -177,8 +177,7 @@ static void __enable_irq(struct irq_desc *desc, unsigned int irq)
177{ 177{
178 switch (desc->depth) { 178 switch (desc->depth) {
179 case 0: 179 case 0:
180 printk(KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); 180 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
181 WARN_ON(1);
182 break; 181 break;
183 case 1: { 182 case 1: {
184 unsigned int status = desc->status & ~IRQ_DISABLED; 183 unsigned int status = desc->status & ~IRQ_DISABLED;
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 1c5fcacbcf33..c8a4370e2a34 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -24,6 +24,12 @@
24#include <linux/utsrelease.h> 24#include <linux/utsrelease.h>
25#include <linux/utsname.h> 25#include <linux/utsname.h>
26#include <linux/numa.h> 26#include <linux/numa.h>
27#include <linux/suspend.h>
28#include <linux/device.h>
29#include <linux/freezer.h>
30#include <linux/pm.h>
31#include <linux/cpu.h>
32#include <linux/console.h>
27 33
28#include <asm/page.h> 34#include <asm/page.h>
29#include <asm/uaccess.h> 35#include <asm/uaccess.h>
@@ -242,6 +248,12 @@ static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry,
242 goto out; 248 goto out;
243 } 249 }
244 250
251 image->swap_page = kimage_alloc_control_pages(image, 0);
252 if (!image->swap_page) {
253 printk(KERN_ERR "Could not allocate swap buffer\n");
254 goto out;
255 }
256
245 result = 0; 257 result = 0;
246 out: 258 out:
247 if (result == 0) 259 if (result == 0)
@@ -589,14 +601,12 @@ static void kimage_free_extra_pages(struct kimage *image)
589 kimage_free_page_list(&image->unuseable_pages); 601 kimage_free_page_list(&image->unuseable_pages);
590 602
591} 603}
592static int kimage_terminate(struct kimage *image) 604static void kimage_terminate(struct kimage *image)
593{ 605{
594 if (*image->entry != 0) 606 if (*image->entry != 0)
595 image->entry++; 607 image->entry++;
596 608
597 *image->entry = IND_DONE; 609 *image->entry = IND_DONE;
598
599 return 0;
600} 610}
601 611
602#define for_each_kimage_entry(image, ptr, entry) \ 612#define for_each_kimage_entry(image, ptr, entry) \
@@ -988,6 +998,8 @@ asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments,
988 if (result) 998 if (result)
989 goto out; 999 goto out;
990 1000
1001 if (flags & KEXEC_PRESERVE_CONTEXT)
1002 image->preserve_context = 1;
991 result = machine_kexec_prepare(image); 1003 result = machine_kexec_prepare(image);
992 if (result) 1004 if (result)
993 goto out; 1005 goto out;
@@ -997,9 +1009,7 @@ asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments,
997 if (result) 1009 if (result)
998 goto out; 1010 goto out;
999 } 1011 }
1000 result = kimage_terminate(image); 1012 kimage_terminate(image);
1001 if (result)
1002 goto out;
1003 } 1013 }
1004 /* Install the new kernel, and Uninstall the old */ 1014 /* Install the new kernel, and Uninstall the old */
1005 image = xchg(dest_image, image); 1015 image = xchg(dest_image, image);
@@ -1415,3 +1425,85 @@ static int __init crash_save_vmcoreinfo_init(void)
1415} 1425}
1416 1426
1417module_init(crash_save_vmcoreinfo_init) 1427module_init(crash_save_vmcoreinfo_init)
1428
1429/**
1430 * kernel_kexec - reboot the system
1431 *
1432 * Move into place and start executing a preloaded standalone
1433 * executable. If nothing was preloaded return an error.
1434 */
1435int kernel_kexec(void)
1436{
1437 int error = 0;
1438
1439 if (xchg(&kexec_lock, 1))
1440 return -EBUSY;
1441 if (!kexec_image) {
1442 error = -EINVAL;
1443 goto Unlock;
1444 }
1445
1446 if (kexec_image->preserve_context) {
1447#ifdef CONFIG_KEXEC_JUMP
1448 mutex_lock(&pm_mutex);
1449 pm_prepare_console();
1450 error = freeze_processes();
1451 if (error) {
1452 error = -EBUSY;
1453 goto Restore_console;
1454 }
1455 suspend_console();
1456 error = device_suspend(PMSG_FREEZE);
1457 if (error)
1458 goto Resume_console;
1459 error = disable_nonboot_cpus();
1460 if (error)
1461 goto Resume_devices;
1462 local_irq_disable();
1463 /* At this point, device_suspend() has been called,
1464 * but *not* device_power_down(). We *must*
1465 * device_power_down() now. Otherwise, drivers for
1466 * some devices (e.g. interrupt controllers) become
1467 * desynchronized with the actual state of the
1468 * hardware at resume time, and evil weirdness ensues.
1469 */
1470 error = device_power_down(PMSG_FREEZE);
1471 if (error)
1472 goto Enable_irqs;
1473 save_processor_state();
1474#endif
1475 } else {
1476 blocking_notifier_call_chain(&reboot_notifier_list,
1477 SYS_RESTART, NULL);
1478 system_state = SYSTEM_RESTART;
1479 device_shutdown();
1480 sysdev_shutdown();
1481 printk(KERN_EMERG "Starting new kernel\n");
1482 machine_shutdown();
1483 }
1484
1485 machine_kexec(kexec_image);
1486
1487 if (kexec_image->preserve_context) {
1488#ifdef CONFIG_KEXEC_JUMP
1489 restore_processor_state();
1490 device_power_up(PMSG_RESTORE);
1491 Enable_irqs:
1492 local_irq_enable();
1493 enable_nonboot_cpus();
1494 Resume_devices:
1495 device_resume(PMSG_RESTORE);
1496 Resume_console:
1497 resume_console();
1498 thaw_processes();
1499 Restore_console:
1500 pm_restore_console();
1501 mutex_unlock(&pm_mutex);
1502#endif
1503 }
1504
1505 Unlock:
1506 xchg(&kexec_lock, 0);
1507
1508 return error;
1509}
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 6111c27491b1..96cff2f8710b 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -176,7 +176,7 @@ void kthread_bind(struct task_struct *k, unsigned int cpu)
176 return; 176 return;
177 } 177 }
178 /* Must have done schedule() in kthread() before we set_task_cpu */ 178 /* Must have done schedule() in kthread() before we set_task_cpu */
179 wait_task_inactive(k); 179 wait_task_inactive(k, 0);
180 set_task_cpu(k, cpu); 180 set_task_cpu(k, cpu);
181 k->cpus_allowed = cpumask_of_cpu(cpu); 181 k->cpus_allowed = cpumask_of_cpu(cpu);
182 k->rt.nr_cpus_allowed = 1; 182 k->rt.nr_cpus_allowed = 1;
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 95bff23ecdaa..0b7476f5d2a6 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -635,6 +635,13 @@ static void __init test_wakealarm(struct rtc_device *rtc, suspend_state_t state)
635 } 635 }
636 if (status < 0) 636 if (status < 0)
637 printk(err_suspend, status); 637 printk(err_suspend, status);
638
639 /* Some platforms can't detect that the alarm triggered the
640 * wakeup, or (accordingly) disable it after it afterwards.
641 * It's supposed to give oneshot behavior; cope.
642 */
643 alm.enabled = false;
644 rtc_set_alarm(rtc, &alm);
638} 645}
639 646
640static int __init has_wakealarm(struct device *dev, void *name_ptr) 647static int __init has_wakealarm(struct device *dev, void *name_ptr)
diff --git a/kernel/power/power.h b/kernel/power/power.h
index 700f44ec8406..acc0c101dbd5 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -53,8 +53,6 @@ extern int hibernation_platform_enter(void);
53 53
54extern int pfn_is_nosave(unsigned long); 54extern int pfn_is_nosave(unsigned long);
55 55
56extern struct mutex pm_mutex;
57
58#define power_attr(_name) \ 56#define power_attr(_name) \
59static struct kobj_attribute _name##_attr = { \ 57static struct kobj_attribute _name##_attr = { \
60 .attr = { \ 58 .attr = { \
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 8392a9da6450..082b3fcb32a0 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -107,7 +107,7 @@ int ptrace_check_attach(struct task_struct *child, int kill)
107 read_unlock(&tasklist_lock); 107 read_unlock(&tasklist_lock);
108 108
109 if (!ret && !kill) 109 if (!ret && !kill)
110 wait_task_inactive(child); 110 ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH;
111 111
112 /* All systems go.. */ 112 /* All systems go.. */
113 return ret; 113 return ret;
diff --git a/kernel/relay.c b/kernel/relay.c
index 7de644cdec43..04006ef970b8 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -407,6 +407,35 @@ void relay_reset(struct rchan *chan)
407} 407}
408EXPORT_SYMBOL_GPL(relay_reset); 408EXPORT_SYMBOL_GPL(relay_reset);
409 409
410static inline void relay_set_buf_dentry(struct rchan_buf *buf,
411 struct dentry *dentry)
412{
413 buf->dentry = dentry;
414 buf->dentry->d_inode->i_size = buf->early_bytes;
415}
416
417static struct dentry *relay_create_buf_file(struct rchan *chan,
418 struct rchan_buf *buf,
419 unsigned int cpu)
420{
421 struct dentry *dentry;
422 char *tmpname;
423
424 tmpname = kzalloc(NAME_MAX + 1, GFP_KERNEL);
425 if (!tmpname)
426 return NULL;
427 snprintf(tmpname, NAME_MAX, "%s%d", chan->base_filename, cpu);
428
429 /* Create file in fs */
430 dentry = chan->cb->create_buf_file(tmpname, chan->parent,
431 S_IRUSR, buf,
432 &chan->is_global);
433
434 kfree(tmpname);
435
436 return dentry;
437}
438
410/* 439/*
411 * relay_open_buf - create a new relay channel buffer 440 * relay_open_buf - create a new relay channel buffer
412 * 441 *
@@ -416,45 +445,34 @@ static struct rchan_buf *relay_open_buf(struct rchan *chan, unsigned int cpu)
416{ 445{
417 struct rchan_buf *buf = NULL; 446 struct rchan_buf *buf = NULL;
418 struct dentry *dentry; 447 struct dentry *dentry;
419 char *tmpname;
420 448
421 if (chan->is_global) 449 if (chan->is_global)
422 return chan->buf[0]; 450 return chan->buf[0];
423 451
424 tmpname = kzalloc(NAME_MAX + 1, GFP_KERNEL);
425 if (!tmpname)
426 goto end;
427 snprintf(tmpname, NAME_MAX, "%s%d", chan->base_filename, cpu);
428
429 buf = relay_create_buf(chan); 452 buf = relay_create_buf(chan);
430 if (!buf) 453 if (!buf)
431 goto free_name; 454 return NULL;
455
456 if (chan->has_base_filename) {
457 dentry = relay_create_buf_file(chan, buf, cpu);
458 if (!dentry)
459 goto free_buf;
460 relay_set_buf_dentry(buf, dentry);
461 }
432 462
433 buf->cpu = cpu; 463 buf->cpu = cpu;
434 __relay_reset(buf, 1); 464 __relay_reset(buf, 1);
435 465
436 /* Create file in fs */
437 dentry = chan->cb->create_buf_file(tmpname, chan->parent, S_IRUSR,
438 buf, &chan->is_global);
439 if (!dentry)
440 goto free_buf;
441
442 buf->dentry = dentry;
443
444 if(chan->is_global) { 466 if(chan->is_global) {
445 chan->buf[0] = buf; 467 chan->buf[0] = buf;
446 buf->cpu = 0; 468 buf->cpu = 0;
447 } 469 }
448 470
449 goto free_name; 471 return buf;
450 472
451free_buf: 473free_buf:
452 relay_destroy_buf(buf); 474 relay_destroy_buf(buf);
453 buf = NULL; 475 return NULL;
454free_name:
455 kfree(tmpname);
456end:
457 return buf;
458} 476}
459 477
460/** 478/**
@@ -537,8 +555,8 @@ static int __cpuinit relay_hotcpu_callback(struct notifier_block *nb,
537 555
538/** 556/**
539 * relay_open - create a new relay channel 557 * relay_open - create a new relay channel
540 * @base_filename: base name of files to create 558 * @base_filename: base name of files to create, %NULL for buffering only
541 * @parent: dentry of parent directory, %NULL for root directory 559 * @parent: dentry of parent directory, %NULL for root directory or buffer
542 * @subbuf_size: size of sub-buffers 560 * @subbuf_size: size of sub-buffers
543 * @n_subbufs: number of sub-buffers 561 * @n_subbufs: number of sub-buffers
544 * @cb: client callback functions 562 * @cb: client callback functions
@@ -560,8 +578,6 @@ struct rchan *relay_open(const char *base_filename,
560{ 578{
561 unsigned int i; 579 unsigned int i;
562 struct rchan *chan; 580 struct rchan *chan;
563 if (!base_filename)
564 return NULL;
565 581
566 if (!(subbuf_size && n_subbufs)) 582 if (!(subbuf_size && n_subbufs))
567 return NULL; 583 return NULL;
@@ -576,7 +592,10 @@ struct rchan *relay_open(const char *base_filename,
576 chan->alloc_size = FIX_SIZE(subbuf_size * n_subbufs); 592 chan->alloc_size = FIX_SIZE(subbuf_size * n_subbufs);
577 chan->parent = parent; 593 chan->parent = parent;
578 chan->private_data = private_data; 594 chan->private_data = private_data;
579 strlcpy(chan->base_filename, base_filename, NAME_MAX); 595 if (base_filename) {
596 chan->has_base_filename = 1;
597 strlcpy(chan->base_filename, base_filename, NAME_MAX);
598 }
580 setup_callbacks(chan, cb); 599 setup_callbacks(chan, cb);
581 kref_init(&chan->kref); 600 kref_init(&chan->kref);
582 601
@@ -604,6 +623,94 @@ free_bufs:
604} 623}
605EXPORT_SYMBOL_GPL(relay_open); 624EXPORT_SYMBOL_GPL(relay_open);
606 625
626struct rchan_percpu_buf_dispatcher {
627 struct rchan_buf *buf;
628 struct dentry *dentry;
629};
630
631/* Called in atomic context. */
632static void __relay_set_buf_dentry(void *info)
633{
634 struct rchan_percpu_buf_dispatcher *p = info;
635
636 relay_set_buf_dentry(p->buf, p->dentry);
637}
638
639/**
640 * relay_late_setup_files - triggers file creation
641 * @chan: channel to operate on
642 * @base_filename: base name of files to create
643 * @parent: dentry of parent directory, %NULL for root directory
644 *
645 * Returns 0 if successful, non-zero otherwise.
646 *
647 * Use to setup files for a previously buffer-only channel.
648 * Useful to do early tracing in kernel, before VFS is up, for example.
649 */
650int relay_late_setup_files(struct rchan *chan,
651 const char *base_filename,
652 struct dentry *parent)
653{
654 int err = 0;
655 unsigned int i, curr_cpu;
656 unsigned long flags;
657 struct dentry *dentry;
658 struct rchan_percpu_buf_dispatcher disp;
659
660 if (!chan || !base_filename)
661 return -EINVAL;
662
663 strlcpy(chan->base_filename, base_filename, NAME_MAX);
664
665 mutex_lock(&relay_channels_mutex);
666 /* Is chan already set up? */
667 if (unlikely(chan->has_base_filename))
668 return -EEXIST;
669 chan->has_base_filename = 1;
670 chan->parent = parent;
671 curr_cpu = get_cpu();
672 /*
673 * The CPU hotplug notifier ran before us and created buffers with
674 * no files associated. So it's safe to call relay_setup_buf_file()
675 * on all currently online CPUs.
676 */
677 for_each_online_cpu(i) {
678 if (unlikely(!chan->buf[i])) {
679 printk(KERN_ERR "relay_late_setup_files: CPU %u "
680 "has no buffer, it must have!\n", i);
681 BUG();
682 err = -EINVAL;
683 break;
684 }
685
686 dentry = relay_create_buf_file(chan, chan->buf[i], i);
687 if (unlikely(!dentry)) {
688 err = -EINVAL;
689 break;
690 }
691
692 if (curr_cpu == i) {
693 local_irq_save(flags);
694 relay_set_buf_dentry(chan->buf[i], dentry);
695 local_irq_restore(flags);
696 } else {
697 disp.buf = chan->buf[i];
698 disp.dentry = dentry;
699 smp_mb();
700 /* relay_channels_mutex must be held, so wait. */
701 err = smp_call_function_single(i,
702 __relay_set_buf_dentry,
703 &disp, 1);
704 }
705 if (unlikely(err))
706 break;
707 }
708 put_cpu();
709 mutex_unlock(&relay_channels_mutex);
710
711 return err;
712}
713
607/** 714/**
608 * relay_switch_subbuf - switch to a new sub-buffer 715 * relay_switch_subbuf - switch to a new sub-buffer
609 * @buf: channel buffer 716 * @buf: channel buffer
@@ -627,8 +734,13 @@ size_t relay_switch_subbuf(struct rchan_buf *buf, size_t length)
627 old_subbuf = buf->subbufs_produced % buf->chan->n_subbufs; 734 old_subbuf = buf->subbufs_produced % buf->chan->n_subbufs;
628 buf->padding[old_subbuf] = buf->prev_padding; 735 buf->padding[old_subbuf] = buf->prev_padding;
629 buf->subbufs_produced++; 736 buf->subbufs_produced++;
630 buf->dentry->d_inode->i_size += buf->chan->subbuf_size - 737 if (buf->dentry)
631 buf->padding[old_subbuf]; 738 buf->dentry->d_inode->i_size +=
739 buf->chan->subbuf_size -
740 buf->padding[old_subbuf];
741 else
742 buf->early_bytes += buf->chan->subbuf_size -
743 buf->padding[old_subbuf];
632 smp_mb(); 744 smp_mb();
633 if (waitqueue_active(&buf->read_wait)) 745 if (waitqueue_active(&buf->read_wait))
634 /* 746 /*
@@ -1237,4 +1349,4 @@ static __init int relay_init(void)
1237 return 0; 1349 return 0;
1238} 1350}
1239 1351
1240module_init(relay_init); 1352early_initcall(relay_init);
diff --git a/kernel/sched.c b/kernel/sched.c
index 0047bd9b96aa..0236958addcb 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1867,16 +1867,24 @@ migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
1867/* 1867/*
1868 * wait_task_inactive - wait for a thread to unschedule. 1868 * wait_task_inactive - wait for a thread to unschedule.
1869 * 1869 *
1870 * If @match_state is nonzero, it's the @p->state value just checked and
1871 * not expected to change. If it changes, i.e. @p might have woken up,
1872 * then return zero. When we succeed in waiting for @p to be off its CPU,
1873 * we return a positive number (its total switch count). If a second call
1874 * a short while later returns the same number, the caller can be sure that
1875 * @p has remained unscheduled the whole time.
1876 *
1870 * The caller must ensure that the task *will* unschedule sometime soon, 1877 * The caller must ensure that the task *will* unschedule sometime soon,
1871 * else this function might spin for a *long* time. This function can't 1878 * else this function might spin for a *long* time. This function can't
1872 * be called with interrupts off, or it may introduce deadlock with 1879 * be called with interrupts off, or it may introduce deadlock with
1873 * smp_call_function() if an IPI is sent by the same process we are 1880 * smp_call_function() if an IPI is sent by the same process we are
1874 * waiting to become inactive. 1881 * waiting to become inactive.
1875 */ 1882 */
1876void wait_task_inactive(struct task_struct *p) 1883unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1877{ 1884{
1878 unsigned long flags; 1885 unsigned long flags;
1879 int running, on_rq; 1886 int running, on_rq;
1887 unsigned long ncsw;
1880 struct rq *rq; 1888 struct rq *rq;
1881 1889
1882 for (;;) { 1890 for (;;) {
@@ -1899,8 +1907,11 @@ void wait_task_inactive(struct task_struct *p)
1899 * return false if the runqueue has changed and p 1907 * return false if the runqueue has changed and p
1900 * is actually now running somewhere else! 1908 * is actually now running somewhere else!
1901 */ 1909 */
1902 while (task_running(rq, p)) 1910 while (task_running(rq, p)) {
1911 if (match_state && unlikely(p->state != match_state))
1912 return 0;
1903 cpu_relax(); 1913 cpu_relax();
1914 }
1904 1915
1905 /* 1916 /*
1906 * Ok, time to look more closely! We need the rq 1917 * Ok, time to look more closely! We need the rq
@@ -1910,9 +1921,21 @@ void wait_task_inactive(struct task_struct *p)
1910 rq = task_rq_lock(p, &flags); 1921 rq = task_rq_lock(p, &flags);
1911 running = task_running(rq, p); 1922 running = task_running(rq, p);
1912 on_rq = p->se.on_rq; 1923 on_rq = p->se.on_rq;
1924 ncsw = 0;
1925 if (!match_state || p->state == match_state) {
1926 ncsw = p->nivcsw + p->nvcsw;
1927 if (unlikely(!ncsw))
1928 ncsw = 1;
1929 }
1913 task_rq_unlock(rq, &flags); 1930 task_rq_unlock(rq, &flags);
1914 1931
1915 /* 1932 /*
1933 * If it changed from the expected state, bail out now.
1934 */
1935 if (unlikely(!ncsw))
1936 break;
1937
1938 /*
1916 * Was it really running after all now that we 1939 * Was it really running after all now that we
1917 * checked with the proper locks actually held? 1940 * checked with the proper locks actually held?
1918 * 1941 *
@@ -1944,6 +1967,8 @@ void wait_task_inactive(struct task_struct *p)
1944 */ 1967 */
1945 break; 1968 break;
1946 } 1969 }
1970
1971 return ncsw;
1947} 1972}
1948 1973
1949/*** 1974/***
@@ -6389,7 +6414,7 @@ static struct notifier_block __cpuinitdata migration_notifier = {
6389 .priority = 10 6414 .priority = 10
6390}; 6415};
6391 6416
6392void __init migration_init(void) 6417static int __init migration_init(void)
6393{ 6418{
6394 void *cpu = (void *)(long)smp_processor_id(); 6419 void *cpu = (void *)(long)smp_processor_id();
6395 int err; 6420 int err;
@@ -6399,7 +6424,10 @@ void __init migration_init(void)
6399 BUG_ON(err == NOTIFY_BAD); 6424 BUG_ON(err == NOTIFY_BAD);
6400 migration_call(&migration_notifier, CPU_ONLINE, cpu); 6425 migration_call(&migration_notifier, CPU_ONLINE, cpu);
6401 register_cpu_notifier(&migration_notifier); 6426 register_cpu_notifier(&migration_notifier);
6427
6428 return err;
6402} 6429}
6430early_initcall(migration_init);
6403#endif 6431#endif
6404 6432
6405#ifdef CONFIG_SMP 6433#ifdef CONFIG_SMP
diff --git a/kernel/signal.c b/kernel/signal.c
index 82c3545596c5..954f77d7e3bc 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -22,6 +22,7 @@
22#include <linux/ptrace.h> 22#include <linux/ptrace.h>
23#include <linux/signal.h> 23#include <linux/signal.h>
24#include <linux/signalfd.h> 24#include <linux/signalfd.h>
25#include <linux/tracehook.h>
25#include <linux/capability.h> 26#include <linux/capability.h>
26#include <linux/freezer.h> 27#include <linux/freezer.h>
27#include <linux/pid_namespace.h> 28#include <linux/pid_namespace.h>
@@ -39,24 +40,21 @@
39 40
40static struct kmem_cache *sigqueue_cachep; 41static struct kmem_cache *sigqueue_cachep;
41 42
42static int __sig_ignored(struct task_struct *t, int sig) 43static void __user *sig_handler(struct task_struct *t, int sig)
43{ 44{
44 void __user *handler; 45 return t->sighand->action[sig - 1].sa.sa_handler;
46}
45 47
48static int sig_handler_ignored(void __user *handler, int sig)
49{
46 /* Is it explicitly or implicitly ignored? */ 50 /* Is it explicitly or implicitly ignored? */
47
48 handler = t->sighand->action[sig - 1].sa.sa_handler;
49 return handler == SIG_IGN || 51 return handler == SIG_IGN ||
50 (handler == SIG_DFL && sig_kernel_ignore(sig)); 52 (handler == SIG_DFL && sig_kernel_ignore(sig));
51} 53}
52 54
53static int sig_ignored(struct task_struct *t, int sig) 55static int sig_ignored(struct task_struct *t, int sig)
54{ 56{
55 /* 57 void __user *handler;
56 * Tracers always want to know about signals..
57 */
58 if (t->ptrace & PT_PTRACED)
59 return 0;
60 58
61 /* 59 /*
62 * Blocked signals are never ignored, since the 60 * Blocked signals are never ignored, since the
@@ -66,7 +64,14 @@ static int sig_ignored(struct task_struct *t, int sig)
66 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig)) 64 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
67 return 0; 65 return 0;
68 66
69 return __sig_ignored(t, sig); 67 handler = sig_handler(t, sig);
68 if (!sig_handler_ignored(handler, sig))
69 return 0;
70
71 /*
72 * Tracers may want to know about even ignored signals.
73 */
74 return !tracehook_consider_ignored_signal(t, sig, handler);
70} 75}
71 76
72/* 77/*
@@ -129,7 +134,9 @@ void recalc_sigpending_and_wake(struct task_struct *t)
129 134
130void recalc_sigpending(void) 135void recalc_sigpending(void)
131{ 136{
132 if (!recalc_sigpending_tsk(current) && !freezing(current)) 137 if (unlikely(tracehook_force_sigpending()))
138 set_thread_flag(TIF_SIGPENDING);
139 else if (!recalc_sigpending_tsk(current) && !freezing(current))
133 clear_thread_flag(TIF_SIGPENDING); 140 clear_thread_flag(TIF_SIGPENDING);
134 141
135} 142}
@@ -295,12 +302,12 @@ flush_signal_handlers(struct task_struct *t, int force_default)
295 302
296int unhandled_signal(struct task_struct *tsk, int sig) 303int unhandled_signal(struct task_struct *tsk, int sig)
297{ 304{
305 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
298 if (is_global_init(tsk)) 306 if (is_global_init(tsk))
299 return 1; 307 return 1;
300 if (tsk->ptrace & PT_PTRACED) 308 if (handler != SIG_IGN && handler != SIG_DFL)
301 return 0; 309 return 0;
302 return (tsk->sighand->action[sig-1].sa.sa_handler == SIG_IGN) || 310 return !tracehook_consider_fatal_signal(tsk, sig, handler);
303 (tsk->sighand->action[sig-1].sa.sa_handler == SIG_DFL);
304} 311}
305 312
306 313
@@ -591,9 +598,6 @@ static int check_kill_permission(int sig, struct siginfo *info,
591 return security_task_kill(t, info, sig, 0); 598 return security_task_kill(t, info, sig, 0);
592} 599}
593 600
594/* forward decl */
595static void do_notify_parent_cldstop(struct task_struct *tsk, int why);
596
597/* 601/*
598 * Handle magic process-wide effects of stop/continue signals. Unlike 602 * Handle magic process-wide effects of stop/continue signals. Unlike
599 * the signal actions, these happen immediately at signal-generation 603 * the signal actions, these happen immediately at signal-generation
@@ -756,7 +760,8 @@ static void complete_signal(int sig, struct task_struct *p, int group)
756 if (sig_fatal(p, sig) && 760 if (sig_fatal(p, sig) &&
757 !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) && 761 !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
758 !sigismember(&t->real_blocked, sig) && 762 !sigismember(&t->real_blocked, sig) &&
759 (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) { 763 (sig == SIGKILL ||
764 !tracehook_consider_fatal_signal(t, sig, SIG_DFL))) {
760 /* 765 /*
761 * This signal will be fatal to the whole group. 766 * This signal will be fatal to the whole group.
762 */ 767 */
@@ -1323,9 +1328,11 @@ static inline void __wake_up_parent(struct task_struct *p,
1323/* 1328/*
1324 * Let a parent know about the death of a child. 1329 * Let a parent know about the death of a child.
1325 * For a stopped/continued status change, use do_notify_parent_cldstop instead. 1330 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1331 *
1332 * Returns -1 if our parent ignored us and so we've switched to
1333 * self-reaping, or else @sig.
1326 */ 1334 */
1327 1335int do_notify_parent(struct task_struct *tsk, int sig)
1328void do_notify_parent(struct task_struct *tsk, int sig)
1329{ 1336{
1330 struct siginfo info; 1337 struct siginfo info;
1331 unsigned long flags; 1338 unsigned long flags;
@@ -1396,12 +1403,14 @@ void do_notify_parent(struct task_struct *tsk, int sig)
1396 */ 1403 */
1397 tsk->exit_signal = -1; 1404 tsk->exit_signal = -1;
1398 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) 1405 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1399 sig = 0; 1406 sig = -1;
1400 } 1407 }
1401 if (valid_signal(sig) && sig > 0) 1408 if (valid_signal(sig) && sig > 0)
1402 __group_send_sig_info(sig, &info, tsk->parent); 1409 __group_send_sig_info(sig, &info, tsk->parent);
1403 __wake_up_parent(tsk, tsk->parent); 1410 __wake_up_parent(tsk, tsk->parent);
1404 spin_unlock_irqrestore(&psig->siglock, flags); 1411 spin_unlock_irqrestore(&psig->siglock, flags);
1412
1413 return sig;
1405} 1414}
1406 1415
1407static void do_notify_parent_cldstop(struct task_struct *tsk, int why) 1416static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
@@ -1599,7 +1608,7 @@ finish_stop(int stop_count)
1599 * a group stop in progress and we are the last to stop, 1608 * a group stop in progress and we are the last to stop,
1600 * report to the parent. When ptraced, every thread reports itself. 1609 * report to the parent. When ptraced, every thread reports itself.
1601 */ 1610 */
1602 if (stop_count == 0 || (current->ptrace & PT_PTRACED)) { 1611 if (tracehook_notify_jctl(stop_count == 0, CLD_STOPPED)) {
1603 read_lock(&tasklist_lock); 1612 read_lock(&tasklist_lock);
1604 do_notify_parent_cldstop(current, CLD_STOPPED); 1613 do_notify_parent_cldstop(current, CLD_STOPPED);
1605 read_unlock(&tasklist_lock); 1614 read_unlock(&tasklist_lock);
@@ -1735,6 +1744,9 @@ relock:
1735 signal->flags &= ~SIGNAL_CLD_MASK; 1744 signal->flags &= ~SIGNAL_CLD_MASK;
1736 spin_unlock_irq(&sighand->siglock); 1745 spin_unlock_irq(&sighand->siglock);
1737 1746
1747 if (unlikely(!tracehook_notify_jctl(1, why)))
1748 goto relock;
1749
1738 read_lock(&tasklist_lock); 1750 read_lock(&tasklist_lock);
1739 do_notify_parent_cldstop(current->group_leader, why); 1751 do_notify_parent_cldstop(current->group_leader, why);
1740 read_unlock(&tasklist_lock); 1752 read_unlock(&tasklist_lock);
@@ -1748,17 +1760,33 @@ relock:
1748 do_signal_stop(0)) 1760 do_signal_stop(0))
1749 goto relock; 1761 goto relock;
1750 1762
1751 signr = dequeue_signal(current, &current->blocked, info); 1763 /*
1752 if (!signr) 1764 * Tracing can induce an artifical signal and choose sigaction.
1753 break; /* will return 0 */ 1765 * The return value in @signr determines the default action,
1766 * but @info->si_signo is the signal number we will report.
1767 */
1768 signr = tracehook_get_signal(current, regs, info, return_ka);
1769 if (unlikely(signr < 0))
1770 goto relock;
1771 if (unlikely(signr != 0))
1772 ka = return_ka;
1773 else {
1774 signr = dequeue_signal(current, &current->blocked,
1775 info);
1754 1776
1755 if (signr != SIGKILL) {
1756 signr = ptrace_signal(signr, info, regs, cookie);
1757 if (!signr) 1777 if (!signr)
1758 continue; 1778 break; /* will return 0 */
1779
1780 if (signr != SIGKILL) {
1781 signr = ptrace_signal(signr, info,
1782 regs, cookie);
1783 if (!signr)
1784 continue;
1785 }
1786
1787 ka = &sighand->action[signr-1];
1759 } 1788 }
1760 1789
1761 ka = &sighand->action[signr-1];
1762 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */ 1790 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1763 continue; 1791 continue;
1764 if (ka->sa.sa_handler != SIG_DFL) { 1792 if (ka->sa.sa_handler != SIG_DFL) {
@@ -1806,7 +1834,7 @@ relock:
1806 spin_lock_irq(&sighand->siglock); 1834 spin_lock_irq(&sighand->siglock);
1807 } 1835 }
1808 1836
1809 if (likely(do_signal_stop(signr))) { 1837 if (likely(do_signal_stop(info->si_signo))) {
1810 /* It released the siglock. */ 1838 /* It released the siglock. */
1811 goto relock; 1839 goto relock;
1812 } 1840 }
@@ -1827,7 +1855,7 @@ relock:
1827 1855
1828 if (sig_kernel_coredump(signr)) { 1856 if (sig_kernel_coredump(signr)) {
1829 if (print_fatal_signals) 1857 if (print_fatal_signals)
1830 print_fatal_signal(regs, signr); 1858 print_fatal_signal(regs, info->si_signo);
1831 /* 1859 /*
1832 * If it was able to dump core, this kills all 1860 * If it was able to dump core, this kills all
1833 * other threads in the group and synchronizes with 1861 * other threads in the group and synchronizes with
@@ -1836,13 +1864,13 @@ relock:
1836 * first and our do_group_exit call below will use 1864 * first and our do_group_exit call below will use
1837 * that value and ignore the one we pass it. 1865 * that value and ignore the one we pass it.
1838 */ 1866 */
1839 do_coredump((long)signr, signr, regs); 1867 do_coredump(info->si_signo, info->si_signo, regs);
1840 } 1868 }
1841 1869
1842 /* 1870 /*
1843 * Death signals, no core dump. 1871 * Death signals, no core dump.
1844 */ 1872 */
1845 do_group_exit(signr); 1873 do_group_exit(info->si_signo);
1846 /* NOTREACHED */ 1874 /* NOTREACHED */
1847 } 1875 }
1848 spin_unlock_irq(&sighand->siglock); 1876 spin_unlock_irq(&sighand->siglock);
@@ -1884,7 +1912,7 @@ void exit_signals(struct task_struct *tsk)
1884out: 1912out:
1885 spin_unlock_irq(&tsk->sighand->siglock); 1913 spin_unlock_irq(&tsk->sighand->siglock);
1886 1914
1887 if (unlikely(group_stop)) { 1915 if (unlikely(group_stop) && tracehook_notify_jctl(1, CLD_STOPPED)) {
1888 read_lock(&tasklist_lock); 1916 read_lock(&tasklist_lock);
1889 do_notify_parent_cldstop(tsk, CLD_STOPPED); 1917 do_notify_parent_cldstop(tsk, CLD_STOPPED);
1890 read_unlock(&tasklist_lock); 1918 read_unlock(&tasklist_lock);
@@ -1895,7 +1923,6 @@ EXPORT_SYMBOL(recalc_sigpending);
1895EXPORT_SYMBOL_GPL(dequeue_signal); 1923EXPORT_SYMBOL_GPL(dequeue_signal);
1896EXPORT_SYMBOL(flush_signals); 1924EXPORT_SYMBOL(flush_signals);
1897EXPORT_SYMBOL(force_sig); 1925EXPORT_SYMBOL(force_sig);
1898EXPORT_SYMBOL(ptrace_notify);
1899EXPORT_SYMBOL(send_sig); 1926EXPORT_SYMBOL(send_sig);
1900EXPORT_SYMBOL(send_sig_info); 1927EXPORT_SYMBOL(send_sig_info);
1901EXPORT_SYMBOL(sigprocmask); 1928EXPORT_SYMBOL(sigprocmask);
@@ -2299,7 +2326,7 @@ int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2299 * (for example, SIGCHLD), shall cause the pending signal to 2326 * (for example, SIGCHLD), shall cause the pending signal to
2300 * be discarded, whether or not it is blocked" 2327 * be discarded, whether or not it is blocked"
2301 */ 2328 */
2302 if (__sig_ignored(t, sig)) { 2329 if (sig_handler_ignored(sig_handler(t, sig), sig)) {
2303 sigemptyset(&mask); 2330 sigemptyset(&mask);
2304 sigaddset(&mask, sig); 2331 sigaddset(&mask, sig);
2305 rm_from_queue_full(&mask, &t->signal->shared_pending); 2332 rm_from_queue_full(&mask, &t->signal->shared_pending);
diff --git a/kernel/smp.c b/kernel/smp.c
index 462c785ca1ee..96fc7c0edc59 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -33,7 +33,7 @@ struct call_single_queue {
33 spinlock_t lock; 33 spinlock_t lock;
34}; 34};
35 35
36void __cpuinit init_call_single_data(void) 36static int __cpuinit init_call_single_data(void)
37{ 37{
38 int i; 38 int i;
39 39
@@ -43,7 +43,9 @@ void __cpuinit init_call_single_data(void)
43 spin_lock_init(&q->lock); 43 spin_lock_init(&q->lock);
44 INIT_LIST_HEAD(&q->list); 44 INIT_LIST_HEAD(&q->list);
45 } 45 }
46 return 0;
46} 47}
48early_initcall(init_call_single_data);
47 49
48static void csd_flag_wait(struct call_single_data *data) 50static void csd_flag_wait(struct call_single_data *data)
49{ 51{
diff --git a/kernel/softirq.c b/kernel/softirq.c
index f6b03d56c2bf..c506f266a6b9 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -630,7 +630,7 @@ static struct notifier_block __cpuinitdata cpu_nfb = {
630 .notifier_call = cpu_callback 630 .notifier_call = cpu_callback
631}; 631};
632 632
633__init int spawn_ksoftirqd(void) 633static __init int spawn_ksoftirqd(void)
634{ 634{
635 void *cpu = (void *)(long)smp_processor_id(); 635 void *cpu = (void *)(long)smp_processor_id();
636 int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); 636 int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
@@ -640,6 +640,7 @@ __init int spawn_ksoftirqd(void)
640 register_cpu_notifier(&cpu_nfb); 640 register_cpu_notifier(&cpu_nfb);
641 return 0; 641 return 0;
642} 642}
643early_initcall(spawn_ksoftirqd);
643 644
644#ifdef CONFIG_SMP 645#ifdef CONFIG_SMP
645/* 646/*
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index 7bd8d1aadd5d..b75b492fbfcf 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -338,14 +338,33 @@ static struct notifier_block __cpuinitdata cpu_nfb = {
338 .notifier_call = cpu_callback 338 .notifier_call = cpu_callback
339}; 339};
340 340
341__init void spawn_softlockup_task(void) 341static int __initdata nosoftlockup;
342
343static int __init nosoftlockup_setup(char *str)
344{
345 nosoftlockup = 1;
346 return 1;
347}
348__setup("nosoftlockup", nosoftlockup_setup);
349
350static int __init spawn_softlockup_task(void)
342{ 351{
343 void *cpu = (void *)(long)smp_processor_id(); 352 void *cpu = (void *)(long)smp_processor_id();
344 int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); 353 int err;
345 354
346 BUG_ON(err == NOTIFY_BAD); 355 if (nosoftlockup)
356 return 0;
357
358 err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
359 if (err == NOTIFY_BAD) {
360 BUG();
361 return 1;
362 }
347 cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); 363 cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
348 register_cpu_notifier(&cpu_nfb); 364 register_cpu_notifier(&cpu_nfb);
349 365
350 atomic_notifier_chain_register(&panic_notifier_list, &panic_block); 366 atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
367
368 return 0;
351} 369}
370early_initcall(spawn_softlockup_task);
diff --git a/kernel/sys.c b/kernel/sys.c
index 0c9d3fa1f5ff..c01858090a98 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -301,26 +301,6 @@ void kernel_restart(char *cmd)
301} 301}
302EXPORT_SYMBOL_GPL(kernel_restart); 302EXPORT_SYMBOL_GPL(kernel_restart);
303 303
304/**
305 * kernel_kexec - reboot the system
306 *
307 * Move into place and start executing a preloaded standalone
308 * executable. If nothing was preloaded return an error.
309 */
310static void kernel_kexec(void)
311{
312#ifdef CONFIG_KEXEC
313 struct kimage *image;
314 image = xchg(&kexec_image, NULL);
315 if (!image)
316 return;
317 kernel_restart_prepare(NULL);
318 printk(KERN_EMERG "Starting new kernel\n");
319 machine_shutdown();
320 machine_kexec(image);
321#endif
322}
323
324static void kernel_shutdown_prepare(enum system_states state) 304static void kernel_shutdown_prepare(enum system_states state)
325{ 305{
326 blocking_notifier_call_chain(&reboot_notifier_list, 306 blocking_notifier_call_chain(&reboot_notifier_list,
@@ -425,10 +405,15 @@ asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user
425 kernel_restart(buffer); 405 kernel_restart(buffer);
426 break; 406 break;
427 407
408#ifdef CONFIG_KEXEC
428 case LINUX_REBOOT_CMD_KEXEC: 409 case LINUX_REBOOT_CMD_KEXEC:
429 kernel_kexec(); 410 {
430 unlock_kernel(); 411 int ret;
431 return -EINVAL; 412 ret = kernel_kexec();
413 unlock_kernel();
414 return ret;
415 }
416#endif
432 417
433#ifdef CONFIG_HIBERNATION 418#ifdef CONFIG_HIBERNATION
434 case LINUX_REBOOT_CMD_SW_SUSPEND: 419 case LINUX_REBOOT_CMD_SW_SUSPEND:
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 868e121c8e38..fc20e09a6cb1 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1203,9 +1203,6 @@ static void *s_next(struct seq_file *m, void *v, loff_t *pos)
1203 1203
1204 iter->pos = *pos; 1204 iter->pos = *pos;
1205 1205
1206 if (last_ent && !ent)
1207 seq_puts(m, "\n\nvim:ft=help\n");
1208
1209 return ent; 1206 return ent;
1210} 1207}
1211 1208
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 421d6fe3650e..ece6cfb649fa 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -253,12 +253,14 @@ void start_critical_timings(void)
253 if (preempt_trace() || irq_trace()) 253 if (preempt_trace() || irq_trace())
254 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); 254 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
255} 255}
256EXPORT_SYMBOL_GPL(start_critical_timings);
256 257
257void stop_critical_timings(void) 258void stop_critical_timings(void)
258{ 259{
259 if (preempt_trace() || irq_trace()) 260 if (preempt_trace() || irq_trace())
260 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); 261 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
261} 262}
263EXPORT_SYMBOL_GPL(stop_critical_timings);
262 264
263#ifdef CONFIG_IRQSOFF_TRACER 265#ifdef CONFIG_IRQSOFF_TRACER
264#ifdef CONFIG_PROVE_LOCKING 266#ifdef CONFIG_PROVE_LOCKING
@@ -337,12 +339,14 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller);
337#ifdef CONFIG_PREEMPT_TRACER 339#ifdef CONFIG_PREEMPT_TRACER
338void trace_preempt_on(unsigned long a0, unsigned long a1) 340void trace_preempt_on(unsigned long a0, unsigned long a1)
339{ 341{
340 stop_critical_timing(a0, a1); 342 if (preempt_trace())
343 stop_critical_timing(a0, a1);
341} 344}
342 345
343void trace_preempt_off(unsigned long a0, unsigned long a1) 346void trace_preempt_off(unsigned long a0, unsigned long a1)
344{ 347{
345 start_critical_timing(a0, a1); 348 if (preempt_trace())
349 start_critical_timing(a0, a1);
346} 350}
347#endif /* CONFIG_PREEMPT_TRACER */ 351#endif /* CONFIG_PREEMPT_TRACER */
348 352
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 3c8d61df4474..e303ccb62cdf 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -26,7 +26,8 @@ static struct task_struct *wakeup_task;
26static int wakeup_cpu; 26static int wakeup_cpu;
27static unsigned wakeup_prio = -1; 27static unsigned wakeup_prio = -1;
28 28
29static DEFINE_SPINLOCK(wakeup_lock); 29static raw_spinlock_t wakeup_lock =
30 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
30 31
31static void __wakeup_reset(struct trace_array *tr); 32static void __wakeup_reset(struct trace_array *tr);
32 33
@@ -56,7 +57,8 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
56 if (unlikely(disabled != 1)) 57 if (unlikely(disabled != 1))
57 goto out; 58 goto out;
58 59
59 spin_lock_irqsave(&wakeup_lock, flags); 60 local_irq_save(flags);
61 __raw_spin_lock(&wakeup_lock);
60 62
61 if (unlikely(!wakeup_task)) 63 if (unlikely(!wakeup_task))
62 goto unlock; 64 goto unlock;
@@ -71,7 +73,8 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
71 trace_function(tr, data, ip, parent_ip, flags); 73 trace_function(tr, data, ip, parent_ip, flags);
72 74
73 unlock: 75 unlock:
74 spin_unlock_irqrestore(&wakeup_lock, flags); 76 __raw_spin_unlock(&wakeup_lock);
77 local_irq_restore(flags);
75 78
76 out: 79 out:
77 atomic_dec(&data->disabled); 80 atomic_dec(&data->disabled);
@@ -145,7 +148,8 @@ wakeup_sched_switch(void *private, void *rq, struct task_struct *prev,
145 if (likely(disabled != 1)) 148 if (likely(disabled != 1))
146 goto out; 149 goto out;
147 150
148 spin_lock_irqsave(&wakeup_lock, flags); 151 local_irq_save(flags);
152 __raw_spin_lock(&wakeup_lock);
149 153
150 /* We could race with grabbing wakeup_lock */ 154 /* We could race with grabbing wakeup_lock */
151 if (unlikely(!tracer_enabled || next != wakeup_task)) 155 if (unlikely(!tracer_enabled || next != wakeup_task))
@@ -174,7 +178,8 @@ wakeup_sched_switch(void *private, void *rq, struct task_struct *prev,
174 178
175out_unlock: 179out_unlock:
176 __wakeup_reset(tr); 180 __wakeup_reset(tr);
177 spin_unlock_irqrestore(&wakeup_lock, flags); 181 __raw_spin_unlock(&wakeup_lock);
182 local_irq_restore(flags);
178out: 183out:
179 atomic_dec(&tr->data[cpu]->disabled); 184 atomic_dec(&tr->data[cpu]->disabled);
180} 185}
@@ -209,8 +214,6 @@ static void __wakeup_reset(struct trace_array *tr)
209 struct trace_array_cpu *data; 214 struct trace_array_cpu *data;
210 int cpu; 215 int cpu;
211 216
212 assert_spin_locked(&wakeup_lock);
213
214 for_each_possible_cpu(cpu) { 217 for_each_possible_cpu(cpu) {
215 data = tr->data[cpu]; 218 data = tr->data[cpu];
216 tracing_reset(data); 219 tracing_reset(data);
@@ -229,9 +232,11 @@ static void wakeup_reset(struct trace_array *tr)
229{ 232{
230 unsigned long flags; 233 unsigned long flags;
231 234
232 spin_lock_irqsave(&wakeup_lock, flags); 235 local_irq_save(flags);
236 __raw_spin_lock(&wakeup_lock);
233 __wakeup_reset(tr); 237 __wakeup_reset(tr);
234 spin_unlock_irqrestore(&wakeup_lock, flags); 238 __raw_spin_unlock(&wakeup_lock);
239 local_irq_restore(flags);
235} 240}
236 241
237static void 242static void
@@ -252,7 +257,7 @@ wakeup_check_start(struct trace_array *tr, struct task_struct *p,
252 goto out; 257 goto out;
253 258
254 /* interrupts should be off from try_to_wake_up */ 259 /* interrupts should be off from try_to_wake_up */
255 spin_lock(&wakeup_lock); 260 __raw_spin_lock(&wakeup_lock);
256 261
257 /* check for races. */ 262 /* check for races. */
258 if (!tracer_enabled || p->prio >= wakeup_prio) 263 if (!tracer_enabled || p->prio >= wakeup_prio)
@@ -274,7 +279,7 @@ wakeup_check_start(struct trace_array *tr, struct task_struct *p,
274 CALLER_ADDR1, CALLER_ADDR2, flags); 279 CALLER_ADDR1, CALLER_ADDR2, flags);
275 280
276out_locked: 281out_locked:
277 spin_unlock(&wakeup_lock); 282 __raw_spin_unlock(&wakeup_lock);
278out: 283out:
279 atomic_dec(&tr->data[cpu]->disabled); 284 atomic_dec(&tr->data[cpu]->disabled);
280} 285}