aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cgroup.c3
-rw-r--r--kernel/exec_domain.c1
-rw-r--r--kernel/exit.c85
-rw-r--r--kernel/fork.c98
-rw-r--r--kernel/irq/chip.c12
-rw-r--r--kernel/irq/manage.c3
-rw-r--r--kernel/kexec.c104
-rw-r--r--kernel/kthread.c2
-rw-r--r--kernel/power/main.c7
-rw-r--r--kernel/power/power.h2
-rw-r--r--kernel/ptrace.c2
-rw-r--r--kernel/relay.c170
-rw-r--r--kernel/sched.c34
-rw-r--r--kernel/signal.c99
-rw-r--r--kernel/smp.c4
-rw-r--r--kernel/softirq.c3
-rw-r--r--kernel/softlockup.c25
-rw-r--r--kernel/sys.c31
-rw-r--r--kernel/sysctl.c168
-rw-r--r--kernel/trace/trace.c4
-rw-r--r--kernel/trace/trace_irqsoff.c8
-rw-r--r--kernel/trace/trace_sched_wakeup.c27
-rw-r--r--kernel/tsacct.c8
23 files changed, 617 insertions, 283 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 66ec9fd21e0c..657f8f8d93a5 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -45,6 +45,7 @@
45#include <linux/delayacct.h> 45#include <linux/delayacct.h>
46#include <linux/cgroupstats.h> 46#include <linux/cgroupstats.h>
47#include <linux/hash.h> 47#include <linux/hash.h>
48#include <linux/namei.h>
48 49
49#include <asm/atomic.h> 50#include <asm/atomic.h>
50 51
@@ -1529,7 +1530,7 @@ static int cgroup_seqfile_show(struct seq_file *m, void *arg)
1529 return cft->read_seq_string(state->cgroup, cft, m); 1530 return cft->read_seq_string(state->cgroup, cft, m);
1530} 1531}
1531 1532
1532int cgroup_seqfile_release(struct inode *inode, struct file *file) 1533static int cgroup_seqfile_release(struct inode *inode, struct file *file)
1533{ 1534{
1534 struct seq_file *seq = file->private_data; 1535 struct seq_file *seq = file->private_data;
1535 kfree(seq->private); 1536 kfree(seq->private);
diff --git a/kernel/exec_domain.c b/kernel/exec_domain.c
index c1ef192aa655..0d407e886735 100644
--- a/kernel/exec_domain.c
+++ b/kernel/exec_domain.c
@@ -168,7 +168,6 @@ __set_personality(u_long personality)
168 current->personality = personality; 168 current->personality = personality;
169 oep = current_thread_info()->exec_domain; 169 oep = current_thread_info()->exec_domain;
170 current_thread_info()->exec_domain = ep; 170 current_thread_info()->exec_domain = ep;
171 set_fs_altroot();
172 171
173 module_put(oep->module); 172 module_put(oep->module);
174 return 0; 173 return 0;
diff --git a/kernel/exit.c b/kernel/exit.c
index ad933bb29ec7..eb4d6470d1d0 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -46,6 +46,7 @@
46#include <linux/resource.h> 46#include <linux/resource.h>
47#include <linux/blkdev.h> 47#include <linux/blkdev.h>
48#include <linux/task_io_accounting_ops.h> 48#include <linux/task_io_accounting_ops.h>
49#include <linux/tracehook.h>
49 50
50#include <asm/uaccess.h> 51#include <asm/uaccess.h>
51#include <asm/unistd.h> 52#include <asm/unistd.h>
@@ -120,18 +121,7 @@ static void __exit_signal(struct task_struct *tsk)
120 sig->nivcsw += tsk->nivcsw; 121 sig->nivcsw += tsk->nivcsw;
121 sig->inblock += task_io_get_inblock(tsk); 122 sig->inblock += task_io_get_inblock(tsk);
122 sig->oublock += task_io_get_oublock(tsk); 123 sig->oublock += task_io_get_oublock(tsk);
123#ifdef CONFIG_TASK_XACCT 124 task_io_accounting_add(&sig->ioac, &tsk->ioac);
124 sig->rchar += tsk->rchar;
125 sig->wchar += tsk->wchar;
126 sig->syscr += tsk->syscr;
127 sig->syscw += tsk->syscw;
128#endif /* CONFIG_TASK_XACCT */
129#ifdef CONFIG_TASK_IO_ACCOUNTING
130 sig->ioac.read_bytes += tsk->ioac.read_bytes;
131 sig->ioac.write_bytes += tsk->ioac.write_bytes;
132 sig->ioac.cancelled_write_bytes +=
133 tsk->ioac.cancelled_write_bytes;
134#endif /* CONFIG_TASK_IO_ACCOUNTING */
135 sig->sum_sched_runtime += tsk->se.sum_exec_runtime; 125 sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
136 sig = NULL; /* Marker for below. */ 126 sig = NULL; /* Marker for below. */
137 } 127 }
@@ -162,27 +152,17 @@ static void delayed_put_task_struct(struct rcu_head *rhp)
162 put_task_struct(container_of(rhp, struct task_struct, rcu)); 152 put_task_struct(container_of(rhp, struct task_struct, rcu));
163} 153}
164 154
165/*
166 * Do final ptrace-related cleanup of a zombie being reaped.
167 *
168 * Called with write_lock(&tasklist_lock) held.
169 */
170static void ptrace_release_task(struct task_struct *p)
171{
172 BUG_ON(!list_empty(&p->ptraced));
173 ptrace_unlink(p);
174 BUG_ON(!list_empty(&p->ptrace_entry));
175}
176 155
177void release_task(struct task_struct * p) 156void release_task(struct task_struct * p)
178{ 157{
179 struct task_struct *leader; 158 struct task_struct *leader;
180 int zap_leader; 159 int zap_leader;
181repeat: 160repeat:
161 tracehook_prepare_release_task(p);
182 atomic_dec(&p->user->processes); 162 atomic_dec(&p->user->processes);
183 proc_flush_task(p); 163 proc_flush_task(p);
184 write_lock_irq(&tasklist_lock); 164 write_lock_irq(&tasklist_lock);
185 ptrace_release_task(p); 165 tracehook_finish_release_task(p);
186 __exit_signal(p); 166 __exit_signal(p);
187 167
188 /* 168 /*
@@ -204,6 +184,13 @@ repeat:
204 * that case. 184 * that case.
205 */ 185 */
206 zap_leader = task_detached(leader); 186 zap_leader = task_detached(leader);
187
188 /*
189 * This maintains the invariant that release_task()
190 * only runs on a task in EXIT_DEAD, just for sanity.
191 */
192 if (zap_leader)
193 leader->exit_state = EXIT_DEAD;
207 } 194 }
208 195
209 write_unlock_irq(&tasklist_lock); 196 write_unlock_irq(&tasklist_lock);
@@ -567,8 +554,6 @@ void put_fs_struct(struct fs_struct *fs)
567 if (atomic_dec_and_test(&fs->count)) { 554 if (atomic_dec_and_test(&fs->count)) {
568 path_put(&fs->root); 555 path_put(&fs->root);
569 path_put(&fs->pwd); 556 path_put(&fs->pwd);
570 if (fs->altroot.dentry)
571 path_put(&fs->altroot);
572 kmem_cache_free(fs_cachep, fs); 557 kmem_cache_free(fs_cachep, fs);
573 } 558 }
574} 559}
@@ -887,7 +872,8 @@ static void forget_original_parent(struct task_struct *father)
887 */ 872 */
888static void exit_notify(struct task_struct *tsk, int group_dead) 873static void exit_notify(struct task_struct *tsk, int group_dead)
889{ 874{
890 int state; 875 int signal;
876 void *cookie;
891 877
892 /* 878 /*
893 * This does two things: 879 * This does two things:
@@ -924,22 +910,11 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
924 !capable(CAP_KILL)) 910 !capable(CAP_KILL))
925 tsk->exit_signal = SIGCHLD; 911 tsk->exit_signal = SIGCHLD;
926 912
927 /* If something other than our normal parent is ptracing us, then 913 signal = tracehook_notify_death(tsk, &cookie, group_dead);
928 * send it a SIGCHLD instead of honoring exit_signal. exit_signal 914 if (signal > 0)
929 * only has special meaning to our real parent. 915 signal = do_notify_parent(tsk, signal);
930 */
931 if (!task_detached(tsk) && thread_group_empty(tsk)) {
932 int signal = ptrace_reparented(tsk) ?
933 SIGCHLD : tsk->exit_signal;
934 do_notify_parent(tsk, signal);
935 } else if (tsk->ptrace) {
936 do_notify_parent(tsk, SIGCHLD);
937 }
938 916
939 state = EXIT_ZOMBIE; 917 tsk->exit_state = signal < 0 ? EXIT_DEAD : EXIT_ZOMBIE;
940 if (task_detached(tsk) && likely(!tsk->ptrace))
941 state = EXIT_DEAD;
942 tsk->exit_state = state;
943 918
944 /* mt-exec, de_thread() is waiting for us */ 919 /* mt-exec, de_thread() is waiting for us */
945 if (thread_group_leader(tsk) && 920 if (thread_group_leader(tsk) &&
@@ -949,8 +924,10 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
949 924
950 write_unlock_irq(&tasklist_lock); 925 write_unlock_irq(&tasklist_lock);
951 926
927 tracehook_report_death(tsk, signal, cookie, group_dead);
928
952 /* If the process is dead, release it - nobody will wait for it */ 929 /* If the process is dead, release it - nobody will wait for it */
953 if (state == EXIT_DEAD) 930 if (signal < 0)
954 release_task(tsk); 931 release_task(tsk);
955} 932}
956 933
@@ -1029,10 +1006,7 @@ NORET_TYPE void do_exit(long code)
1029 if (unlikely(!tsk->pid)) 1006 if (unlikely(!tsk->pid))
1030 panic("Attempted to kill the idle task!"); 1007 panic("Attempted to kill the idle task!");
1031 1008
1032 if (unlikely(current->ptrace & PT_TRACE_EXIT)) { 1009 tracehook_report_exit(&code);
1033 current->ptrace_message = code;
1034 ptrace_notify((PTRACE_EVENT_EXIT << 8) | SIGTRAP);
1035 }
1036 1010
1037 /* 1011 /*
1038 * We're taking recursive faults here in do_exit. Safest is to just 1012 * We're taking recursive faults here in do_exit. Safest is to just
@@ -1378,21 +1352,8 @@ static int wait_task_zombie(struct task_struct *p, int options,
1378 psig->coublock += 1352 psig->coublock +=
1379 task_io_get_oublock(p) + 1353 task_io_get_oublock(p) +
1380 sig->oublock + sig->coublock; 1354 sig->oublock + sig->coublock;
1381#ifdef CONFIG_TASK_XACCT 1355 task_io_accounting_add(&psig->ioac, &p->ioac);
1382 psig->rchar += p->rchar + sig->rchar; 1356 task_io_accounting_add(&psig->ioac, &sig->ioac);
1383 psig->wchar += p->wchar + sig->wchar;
1384 psig->syscr += p->syscr + sig->syscr;
1385 psig->syscw += p->syscw + sig->syscw;
1386#endif /* CONFIG_TASK_XACCT */
1387#ifdef CONFIG_TASK_IO_ACCOUNTING
1388 psig->ioac.read_bytes +=
1389 p->ioac.read_bytes + sig->ioac.read_bytes;
1390 psig->ioac.write_bytes +=
1391 p->ioac.write_bytes + sig->ioac.write_bytes;
1392 psig->ioac.cancelled_write_bytes +=
1393 p->ioac.cancelled_write_bytes +
1394 sig->ioac.cancelled_write_bytes;
1395#endif /* CONFIG_TASK_IO_ACCOUNTING */
1396 spin_unlock_irq(&p->parent->sighand->siglock); 1357 spin_unlock_irq(&p->parent->sighand->siglock);
1397 } 1358 }
1398 1359
diff --git a/kernel/fork.c b/kernel/fork.c
index b99d73e971a4..8214ba7c8bb1 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -37,6 +37,7 @@
37#include <linux/swap.h> 37#include <linux/swap.h>
38#include <linux/syscalls.h> 38#include <linux/syscalls.h>
39#include <linux/jiffies.h> 39#include <linux/jiffies.h>
40#include <linux/tracehook.h>
40#include <linux/futex.h> 41#include <linux/futex.h>
41#include <linux/task_io_accounting_ops.h> 42#include <linux/task_io_accounting_ops.h>
42#include <linux/rcupdate.h> 43#include <linux/rcupdate.h>
@@ -656,13 +657,6 @@ static struct fs_struct *__copy_fs_struct(struct fs_struct *old)
656 path_get(&old->root); 657 path_get(&old->root);
657 fs->pwd = old->pwd; 658 fs->pwd = old->pwd;
658 path_get(&old->pwd); 659 path_get(&old->pwd);
659 if (old->altroot.dentry) {
660 fs->altroot = old->altroot;
661 path_get(&old->altroot);
662 } else {
663 fs->altroot.mnt = NULL;
664 fs->altroot.dentry = NULL;
665 }
666 read_unlock(&old->lock); 660 read_unlock(&old->lock);
667 } 661 }
668 return fs; 662 return fs;
@@ -812,12 +806,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
812 sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0; 806 sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0;
813 sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0; 807 sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0;
814 sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0; 808 sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0;
815#ifdef CONFIG_TASK_XACCT 809 task_io_accounting_init(&sig->ioac);
816 sig->rchar = sig->wchar = sig->syscr = sig->syscw = 0;
817#endif
818#ifdef CONFIG_TASK_IO_ACCOUNTING
819 memset(&sig->ioac, 0, sizeof(sig->ioac));
820#endif
821 sig->sum_sched_runtime = 0; 810 sig->sum_sched_runtime = 0;
822 INIT_LIST_HEAD(&sig->cpu_timers[0]); 811 INIT_LIST_HEAD(&sig->cpu_timers[0]);
823 INIT_LIST_HEAD(&sig->cpu_timers[1]); 812 INIT_LIST_HEAD(&sig->cpu_timers[1]);
@@ -865,8 +854,7 @@ static void copy_flags(unsigned long clone_flags, struct task_struct *p)
865 854
866 new_flags &= ~PF_SUPERPRIV; 855 new_flags &= ~PF_SUPERPRIV;
867 new_flags |= PF_FORKNOEXEC; 856 new_flags |= PF_FORKNOEXEC;
868 if (!(clone_flags & CLONE_PTRACE)) 857 new_flags |= PF_STARTING;
869 p->ptrace = 0;
870 p->flags = new_flags; 858 p->flags = new_flags;
871 clear_freeze_flag(p); 859 clear_freeze_flag(p);
872} 860}
@@ -907,7 +895,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
907 struct pt_regs *regs, 895 struct pt_regs *regs,
908 unsigned long stack_size, 896 unsigned long stack_size,
909 int __user *child_tidptr, 897 int __user *child_tidptr,
910 struct pid *pid) 898 struct pid *pid,
899 int trace)
911{ 900{
912 int retval; 901 int retval;
913 struct task_struct *p; 902 struct task_struct *p;
@@ -1000,13 +989,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1000 p->last_switch_timestamp = 0; 989 p->last_switch_timestamp = 0;
1001#endif 990#endif
1002 991
1003#ifdef CONFIG_TASK_XACCT 992 task_io_accounting_init(&p->ioac);
1004 p->rchar = 0; /* I/O counter: bytes read */
1005 p->wchar = 0; /* I/O counter: bytes written */
1006 p->syscr = 0; /* I/O counter: read syscalls */
1007 p->syscw = 0; /* I/O counter: write syscalls */
1008#endif
1009 task_io_accounting_init(p);
1010 acct_clear_integrals(p); 993 acct_clear_integrals(p);
1011 994
1012 p->it_virt_expires = cputime_zero; 995 p->it_virt_expires = cputime_zero;
@@ -1163,8 +1146,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1163 */ 1146 */
1164 p->group_leader = p; 1147 p->group_leader = p;
1165 INIT_LIST_HEAD(&p->thread_group); 1148 INIT_LIST_HEAD(&p->thread_group);
1166 INIT_LIST_HEAD(&p->ptrace_entry);
1167 INIT_LIST_HEAD(&p->ptraced);
1168 1149
1169 /* Now that the task is set up, run cgroup callbacks if 1150 /* Now that the task is set up, run cgroup callbacks if
1170 * necessary. We need to run them before the task is visible 1151 * necessary. We need to run them before the task is visible
@@ -1195,7 +1176,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1195 p->real_parent = current->real_parent; 1176 p->real_parent = current->real_parent;
1196 else 1177 else
1197 p->real_parent = current; 1178 p->real_parent = current;
1198 p->parent = p->real_parent;
1199 1179
1200 spin_lock(&current->sighand->siglock); 1180 spin_lock(&current->sighand->siglock);
1201 1181
@@ -1237,8 +1217,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1237 1217
1238 if (likely(p->pid)) { 1218 if (likely(p->pid)) {
1239 list_add_tail(&p->sibling, &p->real_parent->children); 1219 list_add_tail(&p->sibling, &p->real_parent->children);
1240 if (unlikely(p->ptrace & PT_PTRACED)) 1220 tracehook_finish_clone(p, clone_flags, trace);
1241 __ptrace_link(p, current->parent);
1242 1221
1243 if (thread_group_leader(p)) { 1222 if (thread_group_leader(p)) {
1244 if (clone_flags & CLONE_NEWPID) 1223 if (clone_flags & CLONE_NEWPID)
@@ -1323,29 +1302,13 @@ struct task_struct * __cpuinit fork_idle(int cpu)
1323 struct pt_regs regs; 1302 struct pt_regs regs;
1324 1303
1325 task = copy_process(CLONE_VM, 0, idle_regs(&regs), 0, NULL, 1304 task = copy_process(CLONE_VM, 0, idle_regs(&regs), 0, NULL,
1326 &init_struct_pid); 1305 &init_struct_pid, 0);
1327 if (!IS_ERR(task)) 1306 if (!IS_ERR(task))
1328 init_idle(task, cpu); 1307 init_idle(task, cpu);
1329 1308
1330 return task; 1309 return task;
1331} 1310}
1332 1311
1333static int fork_traceflag(unsigned clone_flags)
1334{
1335 if (clone_flags & CLONE_UNTRACED)
1336 return 0;
1337 else if (clone_flags & CLONE_VFORK) {
1338 if (current->ptrace & PT_TRACE_VFORK)
1339 return PTRACE_EVENT_VFORK;
1340 } else if ((clone_flags & CSIGNAL) != SIGCHLD) {
1341 if (current->ptrace & PT_TRACE_CLONE)
1342 return PTRACE_EVENT_CLONE;
1343 } else if (current->ptrace & PT_TRACE_FORK)
1344 return PTRACE_EVENT_FORK;
1345
1346 return 0;
1347}
1348
1349/* 1312/*
1350 * Ok, this is the main fork-routine. 1313 * Ok, this is the main fork-routine.
1351 * 1314 *
@@ -1380,14 +1343,14 @@ long do_fork(unsigned long clone_flags,
1380 } 1343 }
1381 } 1344 }
1382 1345
1383 if (unlikely(current->ptrace)) { 1346 /*
1384 trace = fork_traceflag (clone_flags); 1347 * When called from kernel_thread, don't do user tracing stuff.
1385 if (trace) 1348 */
1386 clone_flags |= CLONE_PTRACE; 1349 if (likely(user_mode(regs)))
1387 } 1350 trace = tracehook_prepare_clone(clone_flags);
1388 1351
1389 p = copy_process(clone_flags, stack_start, regs, stack_size, 1352 p = copy_process(clone_flags, stack_start, regs, stack_size,
1390 child_tidptr, NULL); 1353 child_tidptr, NULL, trace);
1391 /* 1354 /*
1392 * Do this prior waking up the new thread - the thread pointer 1355 * Do this prior waking up the new thread - the thread pointer
1393 * might get invalid after that point, if the thread exits quickly. 1356 * might get invalid after that point, if the thread exits quickly.
@@ -1405,32 +1368,35 @@ long do_fork(unsigned long clone_flags,
1405 init_completion(&vfork); 1368 init_completion(&vfork);
1406 } 1369 }
1407 1370
1408 if ((p->ptrace & PT_PTRACED) || (clone_flags & CLONE_STOPPED)) { 1371 tracehook_report_clone(trace, regs, clone_flags, nr, p);
1372
1373 /*
1374 * We set PF_STARTING at creation in case tracing wants to
1375 * use this to distinguish a fully live task from one that
1376 * hasn't gotten to tracehook_report_clone() yet. Now we
1377 * clear it and set the child going.
1378 */
1379 p->flags &= ~PF_STARTING;
1380
1381 if (unlikely(clone_flags & CLONE_STOPPED)) {
1409 /* 1382 /*
1410 * We'll start up with an immediate SIGSTOP. 1383 * We'll start up with an immediate SIGSTOP.
1411 */ 1384 */
1412 sigaddset(&p->pending.signal, SIGSTOP); 1385 sigaddset(&p->pending.signal, SIGSTOP);
1413 set_tsk_thread_flag(p, TIF_SIGPENDING); 1386 set_tsk_thread_flag(p, TIF_SIGPENDING);
1414 }
1415
1416 if (!(clone_flags & CLONE_STOPPED))
1417 wake_up_new_task(p, clone_flags);
1418 else
1419 __set_task_state(p, TASK_STOPPED); 1387 __set_task_state(p, TASK_STOPPED);
1420 1388 } else {
1421 if (unlikely (trace)) { 1389 wake_up_new_task(p, clone_flags);
1422 current->ptrace_message = nr;
1423 ptrace_notify ((trace << 8) | SIGTRAP);
1424 } 1390 }
1425 1391
1392 tracehook_report_clone_complete(trace, regs,
1393 clone_flags, nr, p);
1394
1426 if (clone_flags & CLONE_VFORK) { 1395 if (clone_flags & CLONE_VFORK) {
1427 freezer_do_not_count(); 1396 freezer_do_not_count();
1428 wait_for_completion(&vfork); 1397 wait_for_completion(&vfork);
1429 freezer_count(); 1398 freezer_count();
1430 if (unlikely (current->ptrace & PT_TRACE_VFORK_DONE)) { 1399 tracehook_report_vfork_done(p, nr);
1431 current->ptrace_message = nr;
1432 ptrace_notify ((PTRACE_EVENT_VFORK_DONE << 8) | SIGTRAP);
1433 }
1434 } 1400 }
1435 } else { 1401 } else {
1436 nr = PTR_ERR(p); 1402 nr = PTR_ERR(p);
@@ -1442,7 +1408,7 @@ long do_fork(unsigned long clone_flags,
1442#define ARCH_MIN_MMSTRUCT_ALIGN 0 1408#define ARCH_MIN_MMSTRUCT_ALIGN 0
1443#endif 1409#endif
1444 1410
1445static void sighand_ctor(struct kmem_cache *cachep, void *data) 1411static void sighand_ctor(void *data)
1446{ 1412{
1447 struct sighand_struct *sighand = data; 1413 struct sighand_struct *sighand = data;
1448 1414
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 964964baefa2..3cd441ebf5d2 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -28,8 +28,7 @@ void dynamic_irq_init(unsigned int irq)
28 unsigned long flags; 28 unsigned long flags;
29 29
30 if (irq >= NR_IRQS) { 30 if (irq >= NR_IRQS) {
31 printk(KERN_ERR "Trying to initialize invalid IRQ%d\n", irq); 31 WARN(1, KERN_ERR "Trying to initialize invalid IRQ%d\n", irq);
32 WARN_ON(1);
33 return; 32 return;
34 } 33 }
35 34
@@ -62,8 +61,7 @@ void dynamic_irq_cleanup(unsigned int irq)
62 unsigned long flags; 61 unsigned long flags;
63 62
64 if (irq >= NR_IRQS) { 63 if (irq >= NR_IRQS) {
65 printk(KERN_ERR "Trying to cleanup invalid IRQ%d\n", irq); 64 WARN(1, KERN_ERR "Trying to cleanup invalid IRQ%d\n", irq);
66 WARN_ON(1);
67 return; 65 return;
68 } 66 }
69 67
@@ -71,9 +69,8 @@ void dynamic_irq_cleanup(unsigned int irq)
71 spin_lock_irqsave(&desc->lock, flags); 69 spin_lock_irqsave(&desc->lock, flags);
72 if (desc->action) { 70 if (desc->action) {
73 spin_unlock_irqrestore(&desc->lock, flags); 71 spin_unlock_irqrestore(&desc->lock, flags);
74 printk(KERN_ERR "Destroying IRQ%d without calling free_irq\n", 72 WARN(1, KERN_ERR "Destroying IRQ%d without calling free_irq\n",
75 irq); 73 irq);
76 WARN_ON(1);
77 return; 74 return;
78 } 75 }
79 desc->msi_desc = NULL; 76 desc->msi_desc = NULL;
@@ -96,8 +93,7 @@ int set_irq_chip(unsigned int irq, struct irq_chip *chip)
96 unsigned long flags; 93 unsigned long flags;
97 94
98 if (irq >= NR_IRQS) { 95 if (irq >= NR_IRQS) {
99 printk(KERN_ERR "Trying to install chip for IRQ%d\n", irq); 96 WARN(1, KERN_ERR "Trying to install chip for IRQ%d\n", irq);
100 WARN_ON(1);
101 return -EINVAL; 97 return -EINVAL;
102 } 98 }
103 99
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index f8914b92b664..152abfd3589f 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -177,8 +177,7 @@ static void __enable_irq(struct irq_desc *desc, unsigned int irq)
177{ 177{
178 switch (desc->depth) { 178 switch (desc->depth) {
179 case 0: 179 case 0:
180 printk(KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); 180 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
181 WARN_ON(1);
182 break; 181 break;
183 case 1: { 182 case 1: {
184 unsigned int status = desc->status & ~IRQ_DISABLED; 183 unsigned int status = desc->status & ~IRQ_DISABLED;
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 1c5fcacbcf33..c8a4370e2a34 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -24,6 +24,12 @@
24#include <linux/utsrelease.h> 24#include <linux/utsrelease.h>
25#include <linux/utsname.h> 25#include <linux/utsname.h>
26#include <linux/numa.h> 26#include <linux/numa.h>
27#include <linux/suspend.h>
28#include <linux/device.h>
29#include <linux/freezer.h>
30#include <linux/pm.h>
31#include <linux/cpu.h>
32#include <linux/console.h>
27 33
28#include <asm/page.h> 34#include <asm/page.h>
29#include <asm/uaccess.h> 35#include <asm/uaccess.h>
@@ -242,6 +248,12 @@ static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry,
242 goto out; 248 goto out;
243 } 249 }
244 250
251 image->swap_page = kimage_alloc_control_pages(image, 0);
252 if (!image->swap_page) {
253 printk(KERN_ERR "Could not allocate swap buffer\n");
254 goto out;
255 }
256
245 result = 0; 257 result = 0;
246 out: 258 out:
247 if (result == 0) 259 if (result == 0)
@@ -589,14 +601,12 @@ static void kimage_free_extra_pages(struct kimage *image)
589 kimage_free_page_list(&image->unuseable_pages); 601 kimage_free_page_list(&image->unuseable_pages);
590 602
591} 603}
592static int kimage_terminate(struct kimage *image) 604static void kimage_terminate(struct kimage *image)
593{ 605{
594 if (*image->entry != 0) 606 if (*image->entry != 0)
595 image->entry++; 607 image->entry++;
596 608
597 *image->entry = IND_DONE; 609 *image->entry = IND_DONE;
598
599 return 0;
600} 610}
601 611
602#define for_each_kimage_entry(image, ptr, entry) \ 612#define for_each_kimage_entry(image, ptr, entry) \
@@ -988,6 +998,8 @@ asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments,
988 if (result) 998 if (result)
989 goto out; 999 goto out;
990 1000
1001 if (flags & KEXEC_PRESERVE_CONTEXT)
1002 image->preserve_context = 1;
991 result = machine_kexec_prepare(image); 1003 result = machine_kexec_prepare(image);
992 if (result) 1004 if (result)
993 goto out; 1005 goto out;
@@ -997,9 +1009,7 @@ asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments,
997 if (result) 1009 if (result)
998 goto out; 1010 goto out;
999 } 1011 }
1000 result = kimage_terminate(image); 1012 kimage_terminate(image);
1001 if (result)
1002 goto out;
1003 } 1013 }
1004 /* Install the new kernel, and Uninstall the old */ 1014 /* Install the new kernel, and Uninstall the old */
1005 image = xchg(dest_image, image); 1015 image = xchg(dest_image, image);
@@ -1415,3 +1425,85 @@ static int __init crash_save_vmcoreinfo_init(void)
1415} 1425}
1416 1426
1417module_init(crash_save_vmcoreinfo_init) 1427module_init(crash_save_vmcoreinfo_init)
1428
1429/**
1430 * kernel_kexec - reboot the system
1431 *
1432 * Move into place and start executing a preloaded standalone
1433 * executable. If nothing was preloaded return an error.
1434 */
1435int kernel_kexec(void)
1436{
1437 int error = 0;
1438
1439 if (xchg(&kexec_lock, 1))
1440 return -EBUSY;
1441 if (!kexec_image) {
1442 error = -EINVAL;
1443 goto Unlock;
1444 }
1445
1446 if (kexec_image->preserve_context) {
1447#ifdef CONFIG_KEXEC_JUMP
1448 mutex_lock(&pm_mutex);
1449 pm_prepare_console();
1450 error = freeze_processes();
1451 if (error) {
1452 error = -EBUSY;
1453 goto Restore_console;
1454 }
1455 suspend_console();
1456 error = device_suspend(PMSG_FREEZE);
1457 if (error)
1458 goto Resume_console;
1459 error = disable_nonboot_cpus();
1460 if (error)
1461 goto Resume_devices;
1462 local_irq_disable();
1463 /* At this point, device_suspend() has been called,
1464 * but *not* device_power_down(). We *must*
1465 * device_power_down() now. Otherwise, drivers for
1466 * some devices (e.g. interrupt controllers) become
1467 * desynchronized with the actual state of the
1468 * hardware at resume time, and evil weirdness ensues.
1469 */
1470 error = device_power_down(PMSG_FREEZE);
1471 if (error)
1472 goto Enable_irqs;
1473 save_processor_state();
1474#endif
1475 } else {
1476 blocking_notifier_call_chain(&reboot_notifier_list,
1477 SYS_RESTART, NULL);
1478 system_state = SYSTEM_RESTART;
1479 device_shutdown();
1480 sysdev_shutdown();
1481 printk(KERN_EMERG "Starting new kernel\n");
1482 machine_shutdown();
1483 }
1484
1485 machine_kexec(kexec_image);
1486
1487 if (kexec_image->preserve_context) {
1488#ifdef CONFIG_KEXEC_JUMP
1489 restore_processor_state();
1490 device_power_up(PMSG_RESTORE);
1491 Enable_irqs:
1492 local_irq_enable();
1493 enable_nonboot_cpus();
1494 Resume_devices:
1495 device_resume(PMSG_RESTORE);
1496 Resume_console:
1497 resume_console();
1498 thaw_processes();
1499 Restore_console:
1500 pm_restore_console();
1501 mutex_unlock(&pm_mutex);
1502#endif
1503 }
1504
1505 Unlock:
1506 xchg(&kexec_lock, 0);
1507
1508 return error;
1509}
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 6111c27491b1..96cff2f8710b 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -176,7 +176,7 @@ void kthread_bind(struct task_struct *k, unsigned int cpu)
176 return; 176 return;
177 } 177 }
178 /* Must have done schedule() in kthread() before we set_task_cpu */ 178 /* Must have done schedule() in kthread() before we set_task_cpu */
179 wait_task_inactive(k); 179 wait_task_inactive(k, 0);
180 set_task_cpu(k, cpu); 180 set_task_cpu(k, cpu);
181 k->cpus_allowed = cpumask_of_cpu(cpu); 181 k->cpus_allowed = cpumask_of_cpu(cpu);
182 k->rt.nr_cpus_allowed = 1; 182 k->rt.nr_cpus_allowed = 1;
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 95bff23ecdaa..0b7476f5d2a6 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -635,6 +635,13 @@ static void __init test_wakealarm(struct rtc_device *rtc, suspend_state_t state)
635 } 635 }
636 if (status < 0) 636 if (status < 0)
637 printk(err_suspend, status); 637 printk(err_suspend, status);
638
639 /* Some platforms can't detect that the alarm triggered the
640 * wakeup, or (accordingly) disable it after it afterwards.
641 * It's supposed to give oneshot behavior; cope.
642 */
643 alm.enabled = false;
644 rtc_set_alarm(rtc, &alm);
638} 645}
639 646
640static int __init has_wakealarm(struct device *dev, void *name_ptr) 647static int __init has_wakealarm(struct device *dev, void *name_ptr)
diff --git a/kernel/power/power.h b/kernel/power/power.h
index 700f44ec8406..acc0c101dbd5 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -53,8 +53,6 @@ extern int hibernation_platform_enter(void);
53 53
54extern int pfn_is_nosave(unsigned long); 54extern int pfn_is_nosave(unsigned long);
55 55
56extern struct mutex pm_mutex;
57
58#define power_attr(_name) \ 56#define power_attr(_name) \
59static struct kobj_attribute _name##_attr = { \ 57static struct kobj_attribute _name##_attr = { \
60 .attr = { \ 58 .attr = { \
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 8392a9da6450..082b3fcb32a0 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -107,7 +107,7 @@ int ptrace_check_attach(struct task_struct *child, int kill)
107 read_unlock(&tasklist_lock); 107 read_unlock(&tasklist_lock);
108 108
109 if (!ret && !kill) 109 if (!ret && !kill)
110 wait_task_inactive(child); 110 ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH;
111 111
112 /* All systems go.. */ 112 /* All systems go.. */
113 return ret; 113 return ret;
diff --git a/kernel/relay.c b/kernel/relay.c
index 7de644cdec43..04006ef970b8 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -407,6 +407,35 @@ void relay_reset(struct rchan *chan)
407} 407}
408EXPORT_SYMBOL_GPL(relay_reset); 408EXPORT_SYMBOL_GPL(relay_reset);
409 409
410static inline void relay_set_buf_dentry(struct rchan_buf *buf,
411 struct dentry *dentry)
412{
413 buf->dentry = dentry;
414 buf->dentry->d_inode->i_size = buf->early_bytes;
415}
416
417static struct dentry *relay_create_buf_file(struct rchan *chan,
418 struct rchan_buf *buf,
419 unsigned int cpu)
420{
421 struct dentry *dentry;
422 char *tmpname;
423
424 tmpname = kzalloc(NAME_MAX + 1, GFP_KERNEL);
425 if (!tmpname)
426 return NULL;
427 snprintf(tmpname, NAME_MAX, "%s%d", chan->base_filename, cpu);
428
429 /* Create file in fs */
430 dentry = chan->cb->create_buf_file(tmpname, chan->parent,
431 S_IRUSR, buf,
432 &chan->is_global);
433
434 kfree(tmpname);
435
436 return dentry;
437}
438
410/* 439/*
411 * relay_open_buf - create a new relay channel buffer 440 * relay_open_buf - create a new relay channel buffer
412 * 441 *
@@ -416,45 +445,34 @@ static struct rchan_buf *relay_open_buf(struct rchan *chan, unsigned int cpu)
416{ 445{
417 struct rchan_buf *buf = NULL; 446 struct rchan_buf *buf = NULL;
418 struct dentry *dentry; 447 struct dentry *dentry;
419 char *tmpname;
420 448
421 if (chan->is_global) 449 if (chan->is_global)
422 return chan->buf[0]; 450 return chan->buf[0];
423 451
424 tmpname = kzalloc(NAME_MAX + 1, GFP_KERNEL);
425 if (!tmpname)
426 goto end;
427 snprintf(tmpname, NAME_MAX, "%s%d", chan->base_filename, cpu);
428
429 buf = relay_create_buf(chan); 452 buf = relay_create_buf(chan);
430 if (!buf) 453 if (!buf)
431 goto free_name; 454 return NULL;
455
456 if (chan->has_base_filename) {
457 dentry = relay_create_buf_file(chan, buf, cpu);
458 if (!dentry)
459 goto free_buf;
460 relay_set_buf_dentry(buf, dentry);
461 }
432 462
433 buf->cpu = cpu; 463 buf->cpu = cpu;
434 __relay_reset(buf, 1); 464 __relay_reset(buf, 1);
435 465
436 /* Create file in fs */
437 dentry = chan->cb->create_buf_file(tmpname, chan->parent, S_IRUSR,
438 buf, &chan->is_global);
439 if (!dentry)
440 goto free_buf;
441
442 buf->dentry = dentry;
443
444 if(chan->is_global) { 466 if(chan->is_global) {
445 chan->buf[0] = buf; 467 chan->buf[0] = buf;
446 buf->cpu = 0; 468 buf->cpu = 0;
447 } 469 }
448 470
449 goto free_name; 471 return buf;
450 472
451free_buf: 473free_buf:
452 relay_destroy_buf(buf); 474 relay_destroy_buf(buf);
453 buf = NULL; 475 return NULL;
454free_name:
455 kfree(tmpname);
456end:
457 return buf;
458} 476}
459 477
460/** 478/**
@@ -537,8 +555,8 @@ static int __cpuinit relay_hotcpu_callback(struct notifier_block *nb,
537 555
538/** 556/**
539 * relay_open - create a new relay channel 557 * relay_open - create a new relay channel
540 * @base_filename: base name of files to create 558 * @base_filename: base name of files to create, %NULL for buffering only
541 * @parent: dentry of parent directory, %NULL for root directory 559 * @parent: dentry of parent directory, %NULL for root directory or buffer
542 * @subbuf_size: size of sub-buffers 560 * @subbuf_size: size of sub-buffers
543 * @n_subbufs: number of sub-buffers 561 * @n_subbufs: number of sub-buffers
544 * @cb: client callback functions 562 * @cb: client callback functions
@@ -560,8 +578,6 @@ struct rchan *relay_open(const char *base_filename,
560{ 578{
561 unsigned int i; 579 unsigned int i;
562 struct rchan *chan; 580 struct rchan *chan;
563 if (!base_filename)
564 return NULL;
565 581
566 if (!(subbuf_size && n_subbufs)) 582 if (!(subbuf_size && n_subbufs))
567 return NULL; 583 return NULL;
@@ -576,7 +592,10 @@ struct rchan *relay_open(const char *base_filename,
576 chan->alloc_size = FIX_SIZE(subbuf_size * n_subbufs); 592 chan->alloc_size = FIX_SIZE(subbuf_size * n_subbufs);
577 chan->parent = parent; 593 chan->parent = parent;
578 chan->private_data = private_data; 594 chan->private_data = private_data;
579 strlcpy(chan->base_filename, base_filename, NAME_MAX); 595 if (base_filename) {
596 chan->has_base_filename = 1;
597 strlcpy(chan->base_filename, base_filename, NAME_MAX);
598 }
580 setup_callbacks(chan, cb); 599 setup_callbacks(chan, cb);
581 kref_init(&chan->kref); 600 kref_init(&chan->kref);
582 601
@@ -604,6 +623,94 @@ free_bufs:
604} 623}
605EXPORT_SYMBOL_GPL(relay_open); 624EXPORT_SYMBOL_GPL(relay_open);
606 625
626struct rchan_percpu_buf_dispatcher {
627 struct rchan_buf *buf;
628 struct dentry *dentry;
629};
630
631/* Called in atomic context. */
632static void __relay_set_buf_dentry(void *info)
633{
634 struct rchan_percpu_buf_dispatcher *p = info;
635
636 relay_set_buf_dentry(p->buf, p->dentry);
637}
638
639/**
640 * relay_late_setup_files - triggers file creation
641 * @chan: channel to operate on
642 * @base_filename: base name of files to create
643 * @parent: dentry of parent directory, %NULL for root directory
644 *
645 * Returns 0 if successful, non-zero otherwise.
646 *
647 * Use to setup files for a previously buffer-only channel.
648 * Useful to do early tracing in kernel, before VFS is up, for example.
649 */
650int relay_late_setup_files(struct rchan *chan,
651 const char *base_filename,
652 struct dentry *parent)
653{
654 int err = 0;
655 unsigned int i, curr_cpu;
656 unsigned long flags;
657 struct dentry *dentry;
658 struct rchan_percpu_buf_dispatcher disp;
659
660 if (!chan || !base_filename)
661 return -EINVAL;
662
663 strlcpy(chan->base_filename, base_filename, NAME_MAX);
664
665 mutex_lock(&relay_channels_mutex);
666 /* Is chan already set up? */
667 if (unlikely(chan->has_base_filename))
668 return -EEXIST;
669 chan->has_base_filename = 1;
670 chan->parent = parent;
671 curr_cpu = get_cpu();
672 /*
673 * The CPU hotplug notifier ran before us and created buffers with
674 * no files associated. So it's safe to call relay_setup_buf_file()
675 * on all currently online CPUs.
676 */
677 for_each_online_cpu(i) {
678 if (unlikely(!chan->buf[i])) {
679 printk(KERN_ERR "relay_late_setup_files: CPU %u "
680 "has no buffer, it must have!\n", i);
681 BUG();
682 err = -EINVAL;
683 break;
684 }
685
686 dentry = relay_create_buf_file(chan, chan->buf[i], i);
687 if (unlikely(!dentry)) {
688 err = -EINVAL;
689 break;
690 }
691
692 if (curr_cpu == i) {
693 local_irq_save(flags);
694 relay_set_buf_dentry(chan->buf[i], dentry);
695 local_irq_restore(flags);
696 } else {
697 disp.buf = chan->buf[i];
698 disp.dentry = dentry;
699 smp_mb();
700 /* relay_channels_mutex must be held, so wait. */
701 err = smp_call_function_single(i,
702 __relay_set_buf_dentry,
703 &disp, 1);
704 }
705 if (unlikely(err))
706 break;
707 }
708 put_cpu();
709 mutex_unlock(&relay_channels_mutex);
710
711 return err;
712}
713
607/** 714/**
608 * relay_switch_subbuf - switch to a new sub-buffer 715 * relay_switch_subbuf - switch to a new sub-buffer
609 * @buf: channel buffer 716 * @buf: channel buffer
@@ -627,8 +734,13 @@ size_t relay_switch_subbuf(struct rchan_buf *buf, size_t length)
627 old_subbuf = buf->subbufs_produced % buf->chan->n_subbufs; 734 old_subbuf = buf->subbufs_produced % buf->chan->n_subbufs;
628 buf->padding[old_subbuf] = buf->prev_padding; 735 buf->padding[old_subbuf] = buf->prev_padding;
629 buf->subbufs_produced++; 736 buf->subbufs_produced++;
630 buf->dentry->d_inode->i_size += buf->chan->subbuf_size - 737 if (buf->dentry)
631 buf->padding[old_subbuf]; 738 buf->dentry->d_inode->i_size +=
739 buf->chan->subbuf_size -
740 buf->padding[old_subbuf];
741 else
742 buf->early_bytes += buf->chan->subbuf_size -
743 buf->padding[old_subbuf];
632 smp_mb(); 744 smp_mb();
633 if (waitqueue_active(&buf->read_wait)) 745 if (waitqueue_active(&buf->read_wait))
634 /* 746 /*
@@ -1237,4 +1349,4 @@ static __init int relay_init(void)
1237 return 0; 1349 return 0;
1238} 1350}
1239 1351
1240module_init(relay_init); 1352early_initcall(relay_init);
diff --git a/kernel/sched.c b/kernel/sched.c
index 0047bd9b96aa..0236958addcb 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1867,16 +1867,24 @@ migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
1867/* 1867/*
1868 * wait_task_inactive - wait for a thread to unschedule. 1868 * wait_task_inactive - wait for a thread to unschedule.
1869 * 1869 *
1870 * If @match_state is nonzero, it's the @p->state value just checked and
1871 * not expected to change. If it changes, i.e. @p might have woken up,
1872 * then return zero. When we succeed in waiting for @p to be off its CPU,
1873 * we return a positive number (its total switch count). If a second call
1874 * a short while later returns the same number, the caller can be sure that
1875 * @p has remained unscheduled the whole time.
1876 *
1870 * The caller must ensure that the task *will* unschedule sometime soon, 1877 * The caller must ensure that the task *will* unschedule sometime soon,
1871 * else this function might spin for a *long* time. This function can't 1878 * else this function might spin for a *long* time. This function can't
1872 * be called with interrupts off, or it may introduce deadlock with 1879 * be called with interrupts off, or it may introduce deadlock with
1873 * smp_call_function() if an IPI is sent by the same process we are 1880 * smp_call_function() if an IPI is sent by the same process we are
1874 * waiting to become inactive. 1881 * waiting to become inactive.
1875 */ 1882 */
1876void wait_task_inactive(struct task_struct *p) 1883unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1877{ 1884{
1878 unsigned long flags; 1885 unsigned long flags;
1879 int running, on_rq; 1886 int running, on_rq;
1887 unsigned long ncsw;
1880 struct rq *rq; 1888 struct rq *rq;
1881 1889
1882 for (;;) { 1890 for (;;) {
@@ -1899,8 +1907,11 @@ void wait_task_inactive(struct task_struct *p)
1899 * return false if the runqueue has changed and p 1907 * return false if the runqueue has changed and p
1900 * is actually now running somewhere else! 1908 * is actually now running somewhere else!
1901 */ 1909 */
1902 while (task_running(rq, p)) 1910 while (task_running(rq, p)) {
1911 if (match_state && unlikely(p->state != match_state))
1912 return 0;
1903 cpu_relax(); 1913 cpu_relax();
1914 }
1904 1915
1905 /* 1916 /*
1906 * Ok, time to look more closely! We need the rq 1917 * Ok, time to look more closely! We need the rq
@@ -1910,9 +1921,21 @@ void wait_task_inactive(struct task_struct *p)
1910 rq = task_rq_lock(p, &flags); 1921 rq = task_rq_lock(p, &flags);
1911 running = task_running(rq, p); 1922 running = task_running(rq, p);
1912 on_rq = p->se.on_rq; 1923 on_rq = p->se.on_rq;
1924 ncsw = 0;
1925 if (!match_state || p->state == match_state) {
1926 ncsw = p->nivcsw + p->nvcsw;
1927 if (unlikely(!ncsw))
1928 ncsw = 1;
1929 }
1913 task_rq_unlock(rq, &flags); 1930 task_rq_unlock(rq, &flags);
1914 1931
1915 /* 1932 /*
1933 * If it changed from the expected state, bail out now.
1934 */
1935 if (unlikely(!ncsw))
1936 break;
1937
1938 /*
1916 * Was it really running after all now that we 1939 * Was it really running after all now that we
1917 * checked with the proper locks actually held? 1940 * checked with the proper locks actually held?
1918 * 1941 *
@@ -1944,6 +1967,8 @@ void wait_task_inactive(struct task_struct *p)
1944 */ 1967 */
1945 break; 1968 break;
1946 } 1969 }
1970
1971 return ncsw;
1947} 1972}
1948 1973
1949/*** 1974/***
@@ -6389,7 +6414,7 @@ static struct notifier_block __cpuinitdata migration_notifier = {
6389 .priority = 10 6414 .priority = 10
6390}; 6415};
6391 6416
6392void __init migration_init(void) 6417static int __init migration_init(void)
6393{ 6418{
6394 void *cpu = (void *)(long)smp_processor_id(); 6419 void *cpu = (void *)(long)smp_processor_id();
6395 int err; 6420 int err;
@@ -6399,7 +6424,10 @@ void __init migration_init(void)
6399 BUG_ON(err == NOTIFY_BAD); 6424 BUG_ON(err == NOTIFY_BAD);
6400 migration_call(&migration_notifier, CPU_ONLINE, cpu); 6425 migration_call(&migration_notifier, CPU_ONLINE, cpu);
6401 register_cpu_notifier(&migration_notifier); 6426 register_cpu_notifier(&migration_notifier);
6427
6428 return err;
6402} 6429}
6430early_initcall(migration_init);
6403#endif 6431#endif
6404 6432
6405#ifdef CONFIG_SMP 6433#ifdef CONFIG_SMP
diff --git a/kernel/signal.c b/kernel/signal.c
index 82c3545596c5..954f77d7e3bc 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -22,6 +22,7 @@
22#include <linux/ptrace.h> 22#include <linux/ptrace.h>
23#include <linux/signal.h> 23#include <linux/signal.h>
24#include <linux/signalfd.h> 24#include <linux/signalfd.h>
25#include <linux/tracehook.h>
25#include <linux/capability.h> 26#include <linux/capability.h>
26#include <linux/freezer.h> 27#include <linux/freezer.h>
27#include <linux/pid_namespace.h> 28#include <linux/pid_namespace.h>
@@ -39,24 +40,21 @@
39 40
40static struct kmem_cache *sigqueue_cachep; 41static struct kmem_cache *sigqueue_cachep;
41 42
42static int __sig_ignored(struct task_struct *t, int sig) 43static void __user *sig_handler(struct task_struct *t, int sig)
43{ 44{
44 void __user *handler; 45 return t->sighand->action[sig - 1].sa.sa_handler;
46}
45 47
48static int sig_handler_ignored(void __user *handler, int sig)
49{
46 /* Is it explicitly or implicitly ignored? */ 50 /* Is it explicitly or implicitly ignored? */
47
48 handler = t->sighand->action[sig - 1].sa.sa_handler;
49 return handler == SIG_IGN || 51 return handler == SIG_IGN ||
50 (handler == SIG_DFL && sig_kernel_ignore(sig)); 52 (handler == SIG_DFL && sig_kernel_ignore(sig));
51} 53}
52 54
53static int sig_ignored(struct task_struct *t, int sig) 55static int sig_ignored(struct task_struct *t, int sig)
54{ 56{
55 /* 57 void __user *handler;
56 * Tracers always want to know about signals..
57 */
58 if (t->ptrace & PT_PTRACED)
59 return 0;
60 58
61 /* 59 /*
62 * Blocked signals are never ignored, since the 60 * Blocked signals are never ignored, since the
@@ -66,7 +64,14 @@ static int sig_ignored(struct task_struct *t, int sig)
66 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig)) 64 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
67 return 0; 65 return 0;
68 66
69 return __sig_ignored(t, sig); 67 handler = sig_handler(t, sig);
68 if (!sig_handler_ignored(handler, sig))
69 return 0;
70
71 /*
72 * Tracers may want to know about even ignored signals.
73 */
74 return !tracehook_consider_ignored_signal(t, sig, handler);
70} 75}
71 76
72/* 77/*
@@ -129,7 +134,9 @@ void recalc_sigpending_and_wake(struct task_struct *t)
129 134
130void recalc_sigpending(void) 135void recalc_sigpending(void)
131{ 136{
132 if (!recalc_sigpending_tsk(current) && !freezing(current)) 137 if (unlikely(tracehook_force_sigpending()))
138 set_thread_flag(TIF_SIGPENDING);
139 else if (!recalc_sigpending_tsk(current) && !freezing(current))
133 clear_thread_flag(TIF_SIGPENDING); 140 clear_thread_flag(TIF_SIGPENDING);
134 141
135} 142}
@@ -295,12 +302,12 @@ flush_signal_handlers(struct task_struct *t, int force_default)
295 302
296int unhandled_signal(struct task_struct *tsk, int sig) 303int unhandled_signal(struct task_struct *tsk, int sig)
297{ 304{
305 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
298 if (is_global_init(tsk)) 306 if (is_global_init(tsk))
299 return 1; 307 return 1;
300 if (tsk->ptrace & PT_PTRACED) 308 if (handler != SIG_IGN && handler != SIG_DFL)
301 return 0; 309 return 0;
302 return (tsk->sighand->action[sig-1].sa.sa_handler == SIG_IGN) || 310 return !tracehook_consider_fatal_signal(tsk, sig, handler);
303 (tsk->sighand->action[sig-1].sa.sa_handler == SIG_DFL);
304} 311}
305 312
306 313
@@ -591,9 +598,6 @@ static int check_kill_permission(int sig, struct siginfo *info,
591 return security_task_kill(t, info, sig, 0); 598 return security_task_kill(t, info, sig, 0);
592} 599}
593 600
594/* forward decl */
595static void do_notify_parent_cldstop(struct task_struct *tsk, int why);
596
597/* 601/*
598 * Handle magic process-wide effects of stop/continue signals. Unlike 602 * Handle magic process-wide effects of stop/continue signals. Unlike
599 * the signal actions, these happen immediately at signal-generation 603 * the signal actions, these happen immediately at signal-generation
@@ -756,7 +760,8 @@ static void complete_signal(int sig, struct task_struct *p, int group)
756 if (sig_fatal(p, sig) && 760 if (sig_fatal(p, sig) &&
757 !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) && 761 !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
758 !sigismember(&t->real_blocked, sig) && 762 !sigismember(&t->real_blocked, sig) &&
759 (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) { 763 (sig == SIGKILL ||
764 !tracehook_consider_fatal_signal(t, sig, SIG_DFL))) {
760 /* 765 /*
761 * This signal will be fatal to the whole group. 766 * This signal will be fatal to the whole group.
762 */ 767 */
@@ -1323,9 +1328,11 @@ static inline void __wake_up_parent(struct task_struct *p,
1323/* 1328/*
1324 * Let a parent know about the death of a child. 1329 * Let a parent know about the death of a child.
1325 * For a stopped/continued status change, use do_notify_parent_cldstop instead. 1330 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1331 *
1332 * Returns -1 if our parent ignored us and so we've switched to
1333 * self-reaping, or else @sig.
1326 */ 1334 */
1327 1335int do_notify_parent(struct task_struct *tsk, int sig)
1328void do_notify_parent(struct task_struct *tsk, int sig)
1329{ 1336{
1330 struct siginfo info; 1337 struct siginfo info;
1331 unsigned long flags; 1338 unsigned long flags;
@@ -1396,12 +1403,14 @@ void do_notify_parent(struct task_struct *tsk, int sig)
1396 */ 1403 */
1397 tsk->exit_signal = -1; 1404 tsk->exit_signal = -1;
1398 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) 1405 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1399 sig = 0; 1406 sig = -1;
1400 } 1407 }
1401 if (valid_signal(sig) && sig > 0) 1408 if (valid_signal(sig) && sig > 0)
1402 __group_send_sig_info(sig, &info, tsk->parent); 1409 __group_send_sig_info(sig, &info, tsk->parent);
1403 __wake_up_parent(tsk, tsk->parent); 1410 __wake_up_parent(tsk, tsk->parent);
1404 spin_unlock_irqrestore(&psig->siglock, flags); 1411 spin_unlock_irqrestore(&psig->siglock, flags);
1412
1413 return sig;
1405} 1414}
1406 1415
1407static void do_notify_parent_cldstop(struct task_struct *tsk, int why) 1416static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
@@ -1599,7 +1608,7 @@ finish_stop(int stop_count)
1599 * a group stop in progress and we are the last to stop, 1608 * a group stop in progress and we are the last to stop,
1600 * report to the parent. When ptraced, every thread reports itself. 1609 * report to the parent. When ptraced, every thread reports itself.
1601 */ 1610 */
1602 if (stop_count == 0 || (current->ptrace & PT_PTRACED)) { 1611 if (tracehook_notify_jctl(stop_count == 0, CLD_STOPPED)) {
1603 read_lock(&tasklist_lock); 1612 read_lock(&tasklist_lock);
1604 do_notify_parent_cldstop(current, CLD_STOPPED); 1613 do_notify_parent_cldstop(current, CLD_STOPPED);
1605 read_unlock(&tasklist_lock); 1614 read_unlock(&tasklist_lock);
@@ -1735,6 +1744,9 @@ relock:
1735 signal->flags &= ~SIGNAL_CLD_MASK; 1744 signal->flags &= ~SIGNAL_CLD_MASK;
1736 spin_unlock_irq(&sighand->siglock); 1745 spin_unlock_irq(&sighand->siglock);
1737 1746
1747 if (unlikely(!tracehook_notify_jctl(1, why)))
1748 goto relock;
1749
1738 read_lock(&tasklist_lock); 1750 read_lock(&tasklist_lock);
1739 do_notify_parent_cldstop(current->group_leader, why); 1751 do_notify_parent_cldstop(current->group_leader, why);
1740 read_unlock(&tasklist_lock); 1752 read_unlock(&tasklist_lock);
@@ -1748,17 +1760,33 @@ relock:
1748 do_signal_stop(0)) 1760 do_signal_stop(0))
1749 goto relock; 1761 goto relock;
1750 1762
1751 signr = dequeue_signal(current, &current->blocked, info); 1763 /*
1752 if (!signr) 1764 * Tracing can induce an artifical signal and choose sigaction.
1753 break; /* will return 0 */ 1765 * The return value in @signr determines the default action,
1766 * but @info->si_signo is the signal number we will report.
1767 */
1768 signr = tracehook_get_signal(current, regs, info, return_ka);
1769 if (unlikely(signr < 0))
1770 goto relock;
1771 if (unlikely(signr != 0))
1772 ka = return_ka;
1773 else {
1774 signr = dequeue_signal(current, &current->blocked,
1775 info);
1754 1776
1755 if (signr != SIGKILL) {
1756 signr = ptrace_signal(signr, info, regs, cookie);
1757 if (!signr) 1777 if (!signr)
1758 continue; 1778 break; /* will return 0 */
1779
1780 if (signr != SIGKILL) {
1781 signr = ptrace_signal(signr, info,
1782 regs, cookie);
1783 if (!signr)
1784 continue;
1785 }
1786
1787 ka = &sighand->action[signr-1];
1759 } 1788 }
1760 1789
1761 ka = &sighand->action[signr-1];
1762 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */ 1790 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1763 continue; 1791 continue;
1764 if (ka->sa.sa_handler != SIG_DFL) { 1792 if (ka->sa.sa_handler != SIG_DFL) {
@@ -1806,7 +1834,7 @@ relock:
1806 spin_lock_irq(&sighand->siglock); 1834 spin_lock_irq(&sighand->siglock);
1807 } 1835 }
1808 1836
1809 if (likely(do_signal_stop(signr))) { 1837 if (likely(do_signal_stop(info->si_signo))) {
1810 /* It released the siglock. */ 1838 /* It released the siglock. */
1811 goto relock; 1839 goto relock;
1812 } 1840 }
@@ -1827,7 +1855,7 @@ relock:
1827 1855
1828 if (sig_kernel_coredump(signr)) { 1856 if (sig_kernel_coredump(signr)) {
1829 if (print_fatal_signals) 1857 if (print_fatal_signals)
1830 print_fatal_signal(regs, signr); 1858 print_fatal_signal(regs, info->si_signo);
1831 /* 1859 /*
1832 * If it was able to dump core, this kills all 1860 * If it was able to dump core, this kills all
1833 * other threads in the group and synchronizes with 1861 * other threads in the group and synchronizes with
@@ -1836,13 +1864,13 @@ relock:
1836 * first and our do_group_exit call below will use 1864 * first and our do_group_exit call below will use
1837 * that value and ignore the one we pass it. 1865 * that value and ignore the one we pass it.
1838 */ 1866 */
1839 do_coredump((long)signr, signr, regs); 1867 do_coredump(info->si_signo, info->si_signo, regs);
1840 } 1868 }
1841 1869
1842 /* 1870 /*
1843 * Death signals, no core dump. 1871 * Death signals, no core dump.
1844 */ 1872 */
1845 do_group_exit(signr); 1873 do_group_exit(info->si_signo);
1846 /* NOTREACHED */ 1874 /* NOTREACHED */
1847 } 1875 }
1848 spin_unlock_irq(&sighand->siglock); 1876 spin_unlock_irq(&sighand->siglock);
@@ -1884,7 +1912,7 @@ void exit_signals(struct task_struct *tsk)
1884out: 1912out:
1885 spin_unlock_irq(&tsk->sighand->siglock); 1913 spin_unlock_irq(&tsk->sighand->siglock);
1886 1914
1887 if (unlikely(group_stop)) { 1915 if (unlikely(group_stop) && tracehook_notify_jctl(1, CLD_STOPPED)) {
1888 read_lock(&tasklist_lock); 1916 read_lock(&tasklist_lock);
1889 do_notify_parent_cldstop(tsk, CLD_STOPPED); 1917 do_notify_parent_cldstop(tsk, CLD_STOPPED);
1890 read_unlock(&tasklist_lock); 1918 read_unlock(&tasklist_lock);
@@ -1895,7 +1923,6 @@ EXPORT_SYMBOL(recalc_sigpending);
1895EXPORT_SYMBOL_GPL(dequeue_signal); 1923EXPORT_SYMBOL_GPL(dequeue_signal);
1896EXPORT_SYMBOL(flush_signals); 1924EXPORT_SYMBOL(flush_signals);
1897EXPORT_SYMBOL(force_sig); 1925EXPORT_SYMBOL(force_sig);
1898EXPORT_SYMBOL(ptrace_notify);
1899EXPORT_SYMBOL(send_sig); 1926EXPORT_SYMBOL(send_sig);
1900EXPORT_SYMBOL(send_sig_info); 1927EXPORT_SYMBOL(send_sig_info);
1901EXPORT_SYMBOL(sigprocmask); 1928EXPORT_SYMBOL(sigprocmask);
@@ -2299,7 +2326,7 @@ int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2299 * (for example, SIGCHLD), shall cause the pending signal to 2326 * (for example, SIGCHLD), shall cause the pending signal to
2300 * be discarded, whether or not it is blocked" 2327 * be discarded, whether or not it is blocked"
2301 */ 2328 */
2302 if (__sig_ignored(t, sig)) { 2329 if (sig_handler_ignored(sig_handler(t, sig), sig)) {
2303 sigemptyset(&mask); 2330 sigemptyset(&mask);
2304 sigaddset(&mask, sig); 2331 sigaddset(&mask, sig);
2305 rm_from_queue_full(&mask, &t->signal->shared_pending); 2332 rm_from_queue_full(&mask, &t->signal->shared_pending);
diff --git a/kernel/smp.c b/kernel/smp.c
index 462c785ca1ee..96fc7c0edc59 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -33,7 +33,7 @@ struct call_single_queue {
33 spinlock_t lock; 33 spinlock_t lock;
34}; 34};
35 35
36void __cpuinit init_call_single_data(void) 36static int __cpuinit init_call_single_data(void)
37{ 37{
38 int i; 38 int i;
39 39
@@ -43,7 +43,9 @@ void __cpuinit init_call_single_data(void)
43 spin_lock_init(&q->lock); 43 spin_lock_init(&q->lock);
44 INIT_LIST_HEAD(&q->list); 44 INIT_LIST_HEAD(&q->list);
45 } 45 }
46 return 0;
46} 47}
48early_initcall(init_call_single_data);
47 49
48static void csd_flag_wait(struct call_single_data *data) 50static void csd_flag_wait(struct call_single_data *data)
49{ 51{
diff --git a/kernel/softirq.c b/kernel/softirq.c
index f6b03d56c2bf..c506f266a6b9 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -630,7 +630,7 @@ static struct notifier_block __cpuinitdata cpu_nfb = {
630 .notifier_call = cpu_callback 630 .notifier_call = cpu_callback
631}; 631};
632 632
633__init int spawn_ksoftirqd(void) 633static __init int spawn_ksoftirqd(void)
634{ 634{
635 void *cpu = (void *)(long)smp_processor_id(); 635 void *cpu = (void *)(long)smp_processor_id();
636 int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); 636 int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
@@ -640,6 +640,7 @@ __init int spawn_ksoftirqd(void)
640 register_cpu_notifier(&cpu_nfb); 640 register_cpu_notifier(&cpu_nfb);
641 return 0; 641 return 0;
642} 642}
643early_initcall(spawn_ksoftirqd);
643 644
644#ifdef CONFIG_SMP 645#ifdef CONFIG_SMP
645/* 646/*
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index 7bd8d1aadd5d..b75b492fbfcf 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -338,14 +338,33 @@ static struct notifier_block __cpuinitdata cpu_nfb = {
338 .notifier_call = cpu_callback 338 .notifier_call = cpu_callback
339}; 339};
340 340
341__init void spawn_softlockup_task(void) 341static int __initdata nosoftlockup;
342
343static int __init nosoftlockup_setup(char *str)
344{
345 nosoftlockup = 1;
346 return 1;
347}
348__setup("nosoftlockup", nosoftlockup_setup);
349
350static int __init spawn_softlockup_task(void)
342{ 351{
343 void *cpu = (void *)(long)smp_processor_id(); 352 void *cpu = (void *)(long)smp_processor_id();
344 int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); 353 int err;
345 354
346 BUG_ON(err == NOTIFY_BAD); 355 if (nosoftlockup)
356 return 0;
357
358 err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
359 if (err == NOTIFY_BAD) {
360 BUG();
361 return 1;
362 }
347 cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); 363 cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
348 register_cpu_notifier(&cpu_nfb); 364 register_cpu_notifier(&cpu_nfb);
349 365
350 atomic_notifier_chain_register(&panic_notifier_list, &panic_block); 366 atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
367
368 return 0;
351} 369}
370early_initcall(spawn_softlockup_task);
diff --git a/kernel/sys.c b/kernel/sys.c
index 0c9d3fa1f5ff..c01858090a98 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -301,26 +301,6 @@ void kernel_restart(char *cmd)
301} 301}
302EXPORT_SYMBOL_GPL(kernel_restart); 302EXPORT_SYMBOL_GPL(kernel_restart);
303 303
304/**
305 * kernel_kexec - reboot the system
306 *
307 * Move into place and start executing a preloaded standalone
308 * executable. If nothing was preloaded return an error.
309 */
310static void kernel_kexec(void)
311{
312#ifdef CONFIG_KEXEC
313 struct kimage *image;
314 image = xchg(&kexec_image, NULL);
315 if (!image)
316 return;
317 kernel_restart_prepare(NULL);
318 printk(KERN_EMERG "Starting new kernel\n");
319 machine_shutdown();
320 machine_kexec(image);
321#endif
322}
323
324static void kernel_shutdown_prepare(enum system_states state) 304static void kernel_shutdown_prepare(enum system_states state)
325{ 305{
326 blocking_notifier_call_chain(&reboot_notifier_list, 306 blocking_notifier_call_chain(&reboot_notifier_list,
@@ -425,10 +405,15 @@ asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user
425 kernel_restart(buffer); 405 kernel_restart(buffer);
426 break; 406 break;
427 407
408#ifdef CONFIG_KEXEC
428 case LINUX_REBOOT_CMD_KEXEC: 409 case LINUX_REBOOT_CMD_KEXEC:
429 kernel_kexec(); 410 {
430 unlock_kernel(); 411 int ret;
431 return -EINVAL; 412 ret = kernel_kexec();
413 unlock_kernel();
414 return ret;
415 }
416#endif
432 417
433#ifdef CONFIG_HIBERNATION 418#ifdef CONFIG_HIBERNATION
434 case LINUX_REBOOT_CMD_SW_SUSPEND: 419 case LINUX_REBOOT_CMD_SW_SUSPEND:
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 35a50db9b6ce..fe4713347275 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -160,12 +160,13 @@ static struct ctl_table root_table[];
160static struct ctl_table_root sysctl_table_root; 160static struct ctl_table_root sysctl_table_root;
161static struct ctl_table_header root_table_header = { 161static struct ctl_table_header root_table_header = {
162 .ctl_table = root_table, 162 .ctl_table = root_table,
163 .ctl_entry = LIST_HEAD_INIT(sysctl_table_root.header_list), 163 .ctl_entry = LIST_HEAD_INIT(sysctl_table_root.default_set.list),
164 .root = &sysctl_table_root, 164 .root = &sysctl_table_root,
165 .set = &sysctl_table_root.default_set,
165}; 166};
166static struct ctl_table_root sysctl_table_root = { 167static struct ctl_table_root sysctl_table_root = {
167 .root_list = LIST_HEAD_INIT(sysctl_table_root.root_list), 168 .root_list = LIST_HEAD_INIT(sysctl_table_root.root_list),
168 .header_list = LIST_HEAD_INIT(root_table_header.ctl_entry), 169 .default_set.list = LIST_HEAD_INIT(root_table_header.ctl_entry),
169}; 170};
170 171
171static struct ctl_table kern_table[]; 172static struct ctl_table kern_table[];
@@ -1386,6 +1387,9 @@ static void start_unregistering(struct ctl_table_header *p)
1386 spin_unlock(&sysctl_lock); 1387 spin_unlock(&sysctl_lock);
1387 wait_for_completion(&wait); 1388 wait_for_completion(&wait);
1388 spin_lock(&sysctl_lock); 1389 spin_lock(&sysctl_lock);
1390 } else {
1391 /* anything non-NULL; we'll never dereference it */
1392 p->unregistering = ERR_PTR(-EINVAL);
1389 } 1393 }
1390 /* 1394 /*
1391 * do not remove from the list until nobody holds it; walking the 1395 * do not remove from the list until nobody holds it; walking the
@@ -1394,6 +1398,32 @@ static void start_unregistering(struct ctl_table_header *p)
1394 list_del_init(&p->ctl_entry); 1398 list_del_init(&p->ctl_entry);
1395} 1399}
1396 1400
1401void sysctl_head_get(struct ctl_table_header *head)
1402{
1403 spin_lock(&sysctl_lock);
1404 head->count++;
1405 spin_unlock(&sysctl_lock);
1406}
1407
1408void sysctl_head_put(struct ctl_table_header *head)
1409{
1410 spin_lock(&sysctl_lock);
1411 if (!--head->count)
1412 kfree(head);
1413 spin_unlock(&sysctl_lock);
1414}
1415
1416struct ctl_table_header *sysctl_head_grab(struct ctl_table_header *head)
1417{
1418 if (!head)
1419 BUG();
1420 spin_lock(&sysctl_lock);
1421 if (!use_table(head))
1422 head = ERR_PTR(-ENOENT);
1423 spin_unlock(&sysctl_lock);
1424 return head;
1425}
1426
1397void sysctl_head_finish(struct ctl_table_header *head) 1427void sysctl_head_finish(struct ctl_table_header *head)
1398{ 1428{
1399 if (!head) 1429 if (!head)
@@ -1403,14 +1433,20 @@ void sysctl_head_finish(struct ctl_table_header *head)
1403 spin_unlock(&sysctl_lock); 1433 spin_unlock(&sysctl_lock);
1404} 1434}
1405 1435
1436static struct ctl_table_set *
1437lookup_header_set(struct ctl_table_root *root, struct nsproxy *namespaces)
1438{
1439 struct ctl_table_set *set = &root->default_set;
1440 if (root->lookup)
1441 set = root->lookup(root, namespaces);
1442 return set;
1443}
1444
1406static struct list_head * 1445static struct list_head *
1407lookup_header_list(struct ctl_table_root *root, struct nsproxy *namespaces) 1446lookup_header_list(struct ctl_table_root *root, struct nsproxy *namespaces)
1408{ 1447{
1409 struct list_head *header_list; 1448 struct ctl_table_set *set = lookup_header_set(root, namespaces);
1410 header_list = &root->header_list; 1449 return &set->list;
1411 if (root->lookup)
1412 header_list = root->lookup(root, namespaces);
1413 return header_list;
1414} 1450}
1415 1451
1416struct ctl_table_header *__sysctl_head_next(struct nsproxy *namespaces, 1452struct ctl_table_header *__sysctl_head_next(struct nsproxy *namespaces,
@@ -1480,9 +1516,9 @@ static int do_sysctl_strategy(struct ctl_table_root *root,
1480 int op = 0, rc; 1516 int op = 0, rc;
1481 1517
1482 if (oldval) 1518 if (oldval)
1483 op |= 004; 1519 op |= MAY_READ;
1484 if (newval) 1520 if (newval)
1485 op |= 002; 1521 op |= MAY_WRITE;
1486 if (sysctl_perm(root, table, op)) 1522 if (sysctl_perm(root, table, op))
1487 return -EPERM; 1523 return -EPERM;
1488 1524
@@ -1524,7 +1560,7 @@ repeat:
1524 if (n == table->ctl_name) { 1560 if (n == table->ctl_name) {
1525 int error; 1561 int error;
1526 if (table->child) { 1562 if (table->child) {
1527 if (sysctl_perm(root, table, 001)) 1563 if (sysctl_perm(root, table, MAY_EXEC))
1528 return -EPERM; 1564 return -EPERM;
1529 name++; 1565 name++;
1530 nlen--; 1566 nlen--;
@@ -1599,7 +1635,7 @@ static int test_perm(int mode, int op)
1599 mode >>= 6; 1635 mode >>= 6;
1600 else if (in_egroup_p(0)) 1636 else if (in_egroup_p(0))
1601 mode >>= 3; 1637 mode >>= 3;
1602 if ((mode & op & 0007) == op) 1638 if ((op & ~mode & (MAY_READ|MAY_WRITE|MAY_EXEC)) == 0)
1603 return 0; 1639 return 0;
1604 return -EACCES; 1640 return -EACCES;
1605} 1641}
@@ -1609,7 +1645,7 @@ int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
1609 int error; 1645 int error;
1610 int mode; 1646 int mode;
1611 1647
1612 error = security_sysctl(table, op); 1648 error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
1613 if (error) 1649 if (error)
1614 return error; 1650 return error;
1615 1651
@@ -1644,6 +1680,54 @@ static __init int sysctl_init(void)
1644 1680
1645core_initcall(sysctl_init); 1681core_initcall(sysctl_init);
1646 1682
1683static struct ctl_table *is_branch_in(struct ctl_table *branch,
1684 struct ctl_table *table)
1685{
1686 struct ctl_table *p;
1687 const char *s = branch->procname;
1688
1689 /* branch should have named subdirectory as its first element */
1690 if (!s || !branch->child)
1691 return NULL;
1692
1693 /* ... and nothing else */
1694 if (branch[1].procname || branch[1].ctl_name)
1695 return NULL;
1696
1697 /* table should contain subdirectory with the same name */
1698 for (p = table; p->procname || p->ctl_name; p++) {
1699 if (!p->child)
1700 continue;
1701 if (p->procname && strcmp(p->procname, s) == 0)
1702 return p;
1703 }
1704 return NULL;
1705}
1706
1707/* see if attaching q to p would be an improvement */
1708static void try_attach(struct ctl_table_header *p, struct ctl_table_header *q)
1709{
1710 struct ctl_table *to = p->ctl_table, *by = q->ctl_table;
1711 struct ctl_table *next;
1712 int is_better = 0;
1713 int not_in_parent = !p->attached_by;
1714
1715 while ((next = is_branch_in(by, to)) != NULL) {
1716 if (by == q->attached_by)
1717 is_better = 1;
1718 if (to == p->attached_by)
1719 not_in_parent = 1;
1720 by = by->child;
1721 to = next->child;
1722 }
1723
1724 if (is_better && not_in_parent) {
1725 q->attached_by = by;
1726 q->attached_to = to;
1727 q->parent = p;
1728 }
1729}
1730
1647/** 1731/**
1648 * __register_sysctl_paths - register a sysctl hierarchy 1732 * __register_sysctl_paths - register a sysctl hierarchy
1649 * @root: List of sysctl headers to register on 1733 * @root: List of sysctl headers to register on
@@ -1720,10 +1804,10 @@ struct ctl_table_header *__register_sysctl_paths(
1720 struct nsproxy *namespaces, 1804 struct nsproxy *namespaces,
1721 const struct ctl_path *path, struct ctl_table *table) 1805 const struct ctl_path *path, struct ctl_table *table)
1722{ 1806{
1723 struct list_head *header_list;
1724 struct ctl_table_header *header; 1807 struct ctl_table_header *header;
1725 struct ctl_table *new, **prevp; 1808 struct ctl_table *new, **prevp;
1726 unsigned int n, npath; 1809 unsigned int n, npath;
1810 struct ctl_table_set *set;
1727 1811
1728 /* Count the path components */ 1812 /* Count the path components */
1729 for (npath = 0; path[npath].ctl_name || path[npath].procname; ++npath) 1813 for (npath = 0; path[npath].ctl_name || path[npath].procname; ++npath)
@@ -1765,6 +1849,7 @@ struct ctl_table_header *__register_sysctl_paths(
1765 header->unregistering = NULL; 1849 header->unregistering = NULL;
1766 header->root = root; 1850 header->root = root;
1767 sysctl_set_parent(NULL, header->ctl_table); 1851 sysctl_set_parent(NULL, header->ctl_table);
1852 header->count = 1;
1768#ifdef CONFIG_SYSCTL_SYSCALL_CHECK 1853#ifdef CONFIG_SYSCTL_SYSCALL_CHECK
1769 if (sysctl_check_table(namespaces, header->ctl_table)) { 1854 if (sysctl_check_table(namespaces, header->ctl_table)) {
1770 kfree(header); 1855 kfree(header);
@@ -1772,8 +1857,20 @@ struct ctl_table_header *__register_sysctl_paths(
1772 } 1857 }
1773#endif 1858#endif
1774 spin_lock(&sysctl_lock); 1859 spin_lock(&sysctl_lock);
1775 header_list = lookup_header_list(root, namespaces); 1860 header->set = lookup_header_set(root, namespaces);
1776 list_add_tail(&header->ctl_entry, header_list); 1861 header->attached_by = header->ctl_table;
1862 header->attached_to = root_table;
1863 header->parent = &root_table_header;
1864 for (set = header->set; set; set = set->parent) {
1865 struct ctl_table_header *p;
1866 list_for_each_entry(p, &set->list, ctl_entry) {
1867 if (p->unregistering)
1868 continue;
1869 try_attach(p, header);
1870 }
1871 }
1872 header->parent->count++;
1873 list_add_tail(&header->ctl_entry, &header->set->list);
1777 spin_unlock(&sysctl_lock); 1874 spin_unlock(&sysctl_lock);
1778 1875
1779 return header; 1876 return header;
@@ -1828,8 +1925,37 @@ void unregister_sysctl_table(struct ctl_table_header * header)
1828 1925
1829 spin_lock(&sysctl_lock); 1926 spin_lock(&sysctl_lock);
1830 start_unregistering(header); 1927 start_unregistering(header);
1928 if (!--header->parent->count) {
1929 WARN_ON(1);
1930 kfree(header->parent);
1931 }
1932 if (!--header->count)
1933 kfree(header);
1831 spin_unlock(&sysctl_lock); 1934 spin_unlock(&sysctl_lock);
1832 kfree(header); 1935}
1936
1937int sysctl_is_seen(struct ctl_table_header *p)
1938{
1939 struct ctl_table_set *set = p->set;
1940 int res;
1941 spin_lock(&sysctl_lock);
1942 if (p->unregistering)
1943 res = 0;
1944 else if (!set->is_seen)
1945 res = 1;
1946 else
1947 res = set->is_seen(set);
1948 spin_unlock(&sysctl_lock);
1949 return res;
1950}
1951
1952void setup_sysctl_set(struct ctl_table_set *p,
1953 struct ctl_table_set *parent,
1954 int (*is_seen)(struct ctl_table_set *))
1955{
1956 INIT_LIST_HEAD(&p->list);
1957 p->parent = parent ? parent : &sysctl_table_root.default_set;
1958 p->is_seen = is_seen;
1833} 1959}
1834 1960
1835#else /* !CONFIG_SYSCTL */ 1961#else /* !CONFIG_SYSCTL */
@@ -1848,6 +1974,16 @@ void unregister_sysctl_table(struct ctl_table_header * table)
1848{ 1974{
1849} 1975}
1850 1976
1977void setup_sysctl_set(struct ctl_table_set *p,
1978 struct ctl_table_set *parent,
1979 int (*is_seen)(struct ctl_table_set *))
1980{
1981}
1982
1983void sysctl_head_put(struct ctl_table_header *head)
1984{
1985}
1986
1851#endif /* CONFIG_SYSCTL */ 1987#endif /* CONFIG_SYSCTL */
1852 1988
1853/* 1989/*
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 868e121c8e38..8f3fb3db61c3 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1183,7 +1183,6 @@ static void *find_next_entry_inc(struct trace_iterator *iter)
1183static void *s_next(struct seq_file *m, void *v, loff_t *pos) 1183static void *s_next(struct seq_file *m, void *v, loff_t *pos)
1184{ 1184{
1185 struct trace_iterator *iter = m->private; 1185 struct trace_iterator *iter = m->private;
1186 void *last_ent = iter->ent;
1187 int i = (int)*pos; 1186 int i = (int)*pos;
1188 void *ent; 1187 void *ent;
1189 1188
@@ -1203,9 +1202,6 @@ static void *s_next(struct seq_file *m, void *v, loff_t *pos)
1203 1202
1204 iter->pos = *pos; 1203 iter->pos = *pos;
1205 1204
1206 if (last_ent && !ent)
1207 seq_puts(m, "\n\nvim:ft=help\n");
1208
1209 return ent; 1205 return ent;
1210} 1206}
1211 1207
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 421d6fe3650e..ece6cfb649fa 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -253,12 +253,14 @@ void start_critical_timings(void)
253 if (preempt_trace() || irq_trace()) 253 if (preempt_trace() || irq_trace())
254 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); 254 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
255} 255}
256EXPORT_SYMBOL_GPL(start_critical_timings);
256 257
257void stop_critical_timings(void) 258void stop_critical_timings(void)
258{ 259{
259 if (preempt_trace() || irq_trace()) 260 if (preempt_trace() || irq_trace())
260 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); 261 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
261} 262}
263EXPORT_SYMBOL_GPL(stop_critical_timings);
262 264
263#ifdef CONFIG_IRQSOFF_TRACER 265#ifdef CONFIG_IRQSOFF_TRACER
264#ifdef CONFIG_PROVE_LOCKING 266#ifdef CONFIG_PROVE_LOCKING
@@ -337,12 +339,14 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller);
337#ifdef CONFIG_PREEMPT_TRACER 339#ifdef CONFIG_PREEMPT_TRACER
338void trace_preempt_on(unsigned long a0, unsigned long a1) 340void trace_preempt_on(unsigned long a0, unsigned long a1)
339{ 341{
340 stop_critical_timing(a0, a1); 342 if (preempt_trace())
343 stop_critical_timing(a0, a1);
341} 344}
342 345
343void trace_preempt_off(unsigned long a0, unsigned long a1) 346void trace_preempt_off(unsigned long a0, unsigned long a1)
344{ 347{
345 start_critical_timing(a0, a1); 348 if (preempt_trace())
349 start_critical_timing(a0, a1);
346} 350}
347#endif /* CONFIG_PREEMPT_TRACER */ 351#endif /* CONFIG_PREEMPT_TRACER */
348 352
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 3c8d61df4474..e303ccb62cdf 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -26,7 +26,8 @@ static struct task_struct *wakeup_task;
26static int wakeup_cpu; 26static int wakeup_cpu;
27static unsigned wakeup_prio = -1; 27static unsigned wakeup_prio = -1;
28 28
29static DEFINE_SPINLOCK(wakeup_lock); 29static raw_spinlock_t wakeup_lock =
30 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
30 31
31static void __wakeup_reset(struct trace_array *tr); 32static void __wakeup_reset(struct trace_array *tr);
32 33
@@ -56,7 +57,8 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
56 if (unlikely(disabled != 1)) 57 if (unlikely(disabled != 1))
57 goto out; 58 goto out;
58 59
59 spin_lock_irqsave(&wakeup_lock, flags); 60 local_irq_save(flags);
61 __raw_spin_lock(&wakeup_lock);
60 62
61 if (unlikely(!wakeup_task)) 63 if (unlikely(!wakeup_task))
62 goto unlock; 64 goto unlock;
@@ -71,7 +73,8 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
71 trace_function(tr, data, ip, parent_ip, flags); 73 trace_function(tr, data, ip, parent_ip, flags);
72 74
73 unlock: 75 unlock:
74 spin_unlock_irqrestore(&wakeup_lock, flags); 76 __raw_spin_unlock(&wakeup_lock);
77 local_irq_restore(flags);
75 78
76 out: 79 out:
77 atomic_dec(&data->disabled); 80 atomic_dec(&data->disabled);
@@ -145,7 +148,8 @@ wakeup_sched_switch(void *private, void *rq, struct task_struct *prev,
145 if (likely(disabled != 1)) 148 if (likely(disabled != 1))
146 goto out; 149 goto out;
147 150
148 spin_lock_irqsave(&wakeup_lock, flags); 151 local_irq_save(flags);
152 __raw_spin_lock(&wakeup_lock);
149 153
150 /* We could race with grabbing wakeup_lock */ 154 /* We could race with grabbing wakeup_lock */
151 if (unlikely(!tracer_enabled || next != wakeup_task)) 155 if (unlikely(!tracer_enabled || next != wakeup_task))
@@ -174,7 +178,8 @@ wakeup_sched_switch(void *private, void *rq, struct task_struct *prev,
174 178
175out_unlock: 179out_unlock:
176 __wakeup_reset(tr); 180 __wakeup_reset(tr);
177 spin_unlock_irqrestore(&wakeup_lock, flags); 181 __raw_spin_unlock(&wakeup_lock);
182 local_irq_restore(flags);
178out: 183out:
179 atomic_dec(&tr->data[cpu]->disabled); 184 atomic_dec(&tr->data[cpu]->disabled);
180} 185}
@@ -209,8 +214,6 @@ static void __wakeup_reset(struct trace_array *tr)
209 struct trace_array_cpu *data; 214 struct trace_array_cpu *data;
210 int cpu; 215 int cpu;
211 216
212 assert_spin_locked(&wakeup_lock);
213
214 for_each_possible_cpu(cpu) { 217 for_each_possible_cpu(cpu) {
215 data = tr->data[cpu]; 218 data = tr->data[cpu];
216 tracing_reset(data); 219 tracing_reset(data);
@@ -229,9 +232,11 @@ static void wakeup_reset(struct trace_array *tr)
229{ 232{
230 unsigned long flags; 233 unsigned long flags;
231 234
232 spin_lock_irqsave(&wakeup_lock, flags); 235 local_irq_save(flags);
236 __raw_spin_lock(&wakeup_lock);
233 __wakeup_reset(tr); 237 __wakeup_reset(tr);
234 spin_unlock_irqrestore(&wakeup_lock, flags); 238 __raw_spin_unlock(&wakeup_lock);
239 local_irq_restore(flags);
235} 240}
236 241
237static void 242static void
@@ -252,7 +257,7 @@ wakeup_check_start(struct trace_array *tr, struct task_struct *p,
252 goto out; 257 goto out;
253 258
254 /* interrupts should be off from try_to_wake_up */ 259 /* interrupts should be off from try_to_wake_up */
255 spin_lock(&wakeup_lock); 260 __raw_spin_lock(&wakeup_lock);
256 261
257 /* check for races. */ 262 /* check for races. */
258 if (!tracer_enabled || p->prio >= wakeup_prio) 263 if (!tracer_enabled || p->prio >= wakeup_prio)
@@ -274,7 +279,7 @@ wakeup_check_start(struct trace_array *tr, struct task_struct *p,
274 CALLER_ADDR1, CALLER_ADDR2, flags); 279 CALLER_ADDR1, CALLER_ADDR2, flags);
275 280
276out_locked: 281out_locked:
277 spin_unlock(&wakeup_lock); 282 __raw_spin_unlock(&wakeup_lock);
278out: 283out:
279 atomic_dec(&tr->data[cpu]->disabled); 284 atomic_dec(&tr->data[cpu]->disabled);
280} 285}
diff --git a/kernel/tsacct.c b/kernel/tsacct.c
index 3da47ccdc5e5..8ebcd8532dfb 100644
--- a/kernel/tsacct.c
+++ b/kernel/tsacct.c
@@ -94,10 +94,10 @@ void xacct_add_tsk(struct taskstats *stats, struct task_struct *p)
94 stats->hiwater_vm = mm->hiwater_vm * PAGE_SIZE / KB; 94 stats->hiwater_vm = mm->hiwater_vm * PAGE_SIZE / KB;
95 mmput(mm); 95 mmput(mm);
96 } 96 }
97 stats->read_char = p->rchar; 97 stats->read_char = p->ioac.rchar;
98 stats->write_char = p->wchar; 98 stats->write_char = p->ioac.wchar;
99 stats->read_syscalls = p->syscr; 99 stats->read_syscalls = p->ioac.syscr;
100 stats->write_syscalls = p->syscw; 100 stats->write_syscalls = p->ioac.syscw;
101#ifdef CONFIG_TASK_IO_ACCOUNTING 101#ifdef CONFIG_TASK_IO_ACCOUNTING
102 stats->read_bytes = p->ioac.read_bytes; 102 stats->read_bytes = p->ioac.read_bytes;
103 stats->write_bytes = p->ioac.write_bytes; 103 stats->write_bytes = p->ioac.write_bytes;