aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-08-11 06:57:01 -0400
committerIngo Molnar <mingo@elte.hu>2008-08-11 06:57:01 -0400
commit6de9c70882ecdee63a652d493bf2353963bd4c22 (patch)
tree9d219e705492331c97f5f7dccce3b0b1a29251bf /kernel
parentd406d21d90dce2e66c7eb4a44605aac947fe55fb (diff)
parent796aadeb1b2db9b5d463946766c5bbfd7717158c (diff)
Merge branch 'linus' into x86/cleanups
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Makefile1
-rw-r--r--kernel/audit.c13
-rw-r--r--kernel/auditfilter.c10
-rw-r--r--kernel/auditsc.c11
-rw-r--r--kernel/cgroup.c42
-rw-r--r--kernel/cpu.c41
-rw-r--r--kernel/cpuset.c71
-rw-r--r--kernel/dma-coherent.c153
-rw-r--r--kernel/exec_domain.c1
-rw-r--r--kernel/exit.c85
-rw-r--r--kernel/fork.c101
-rw-r--r--kernel/irq/chip.c12
-rw-r--r--kernel/irq/manage.c6
-rw-r--r--kernel/kexec.c104
-rw-r--r--kernel/kgdb.c94
-rw-r--r--kernel/kthread.c2
-rw-r--r--kernel/marker.c12
-rw-r--r--kernel/module.c33
-rw-r--r--kernel/mutex.c1
-rw-r--r--kernel/pm_qos_params.c16
-rw-r--r--kernel/power/main.c7
-rw-r--r--kernel/power/power.h2
-rw-r--r--kernel/printk.c8
-rw-r--r--kernel/ptrace.c2
-rw-r--r--kernel/rcuclassic.c4
-rw-r--r--kernel/relay.c182
-rw-r--r--kernel/resource.c2
-rw-r--r--kernel/sched.c78
-rw-r--r--kernel/semaphore.c4
-rw-r--r--kernel/signal.c99
-rw-r--r--kernel/smp.c4
-rw-r--r--kernel/softirq.c3
-rw-r--r--kernel/softlockup.c25
-rw-r--r--kernel/stop_machine.c288
-rw-r--r--kernel/sys.c31
-rw-r--r--kernel/sysctl.c168
-rw-r--r--kernel/time/tick-common.c8
-rw-r--r--kernel/trace/ftrace.c6
-rw-r--r--kernel/trace/trace.c4
-rw-r--r--kernel/trace/trace_irqsoff.c8
-rw-r--r--kernel/trace/trace_sched_wakeup.c27
-rw-r--r--kernel/trace/trace_sysprof.c4
-rw-r--r--kernel/tsacct.c8
-rw-r--r--kernel/workqueue.c13
44 files changed, 1195 insertions, 599 deletions
diff --git a/kernel/Makefile b/kernel/Makefile
index 54f69837d35..4e1d7df7c3e 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -84,6 +84,7 @@ obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o
84obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o 84obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o
85obj-$(CONFIG_MARKERS) += marker.o 85obj-$(CONFIG_MARKERS) += marker.o
86obj-$(CONFIG_LATENCYTOP) += latencytop.o 86obj-$(CONFIG_LATENCYTOP) += latencytop.o
87obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
87obj-$(CONFIG_FTRACE) += trace/ 88obj-$(CONFIG_FTRACE) += trace/
88obj-$(CONFIG_TRACING) += trace/ 89obj-$(CONFIG_TRACING) += trace/
89obj-$(CONFIG_SMP) += sched_cpupri.o 90obj-$(CONFIG_SMP) += sched_cpupri.o
diff --git a/kernel/audit.c b/kernel/audit.c
index e092f1c0ce3..4414e93d875 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -707,12 +707,14 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
707 if (status_get->mask & AUDIT_STATUS_ENABLED) { 707 if (status_get->mask & AUDIT_STATUS_ENABLED) {
708 err = audit_set_enabled(status_get->enabled, 708 err = audit_set_enabled(status_get->enabled,
709 loginuid, sessionid, sid); 709 loginuid, sessionid, sid);
710 if (err < 0) return err; 710 if (err < 0)
711 return err;
711 } 712 }
712 if (status_get->mask & AUDIT_STATUS_FAILURE) { 713 if (status_get->mask & AUDIT_STATUS_FAILURE) {
713 err = audit_set_failure(status_get->failure, 714 err = audit_set_failure(status_get->failure,
714 loginuid, sessionid, sid); 715 loginuid, sessionid, sid);
715 if (err < 0) return err; 716 if (err < 0)
717 return err;
716 } 718 }
717 if (status_get->mask & AUDIT_STATUS_PID) { 719 if (status_get->mask & AUDIT_STATUS_PID) {
718 int new_pid = status_get->pid; 720 int new_pid = status_get->pid;
@@ -725,9 +727,12 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
725 audit_pid = new_pid; 727 audit_pid = new_pid;
726 audit_nlk_pid = NETLINK_CB(skb).pid; 728 audit_nlk_pid = NETLINK_CB(skb).pid;
727 } 729 }
728 if (status_get->mask & AUDIT_STATUS_RATE_LIMIT) 730 if (status_get->mask & AUDIT_STATUS_RATE_LIMIT) {
729 err = audit_set_rate_limit(status_get->rate_limit, 731 err = audit_set_rate_limit(status_get->rate_limit,
730 loginuid, sessionid, sid); 732 loginuid, sessionid, sid);
733 if (err < 0)
734 return err;
735 }
731 if (status_get->mask & AUDIT_STATUS_BACKLOG_LIMIT) 736 if (status_get->mask & AUDIT_STATUS_BACKLOG_LIMIT)
732 err = audit_set_backlog_limit(status_get->backlog_limit, 737 err = audit_set_backlog_limit(status_get->backlog_limit,
733 loginuid, sessionid, sid); 738 loginuid, sessionid, sid);
@@ -1366,7 +1371,7 @@ int audit_string_contains_control(const char *string, size_t len)
1366{ 1371{
1367 const unsigned char *p; 1372 const unsigned char *p;
1368 for (p = string; p < (const unsigned char *)string + len && *p; p++) { 1373 for (p = string; p < (const unsigned char *)string + len && *p; p++) {
1369 if (*p == '"' || *p < 0x21 || *p > 0x7f) 1374 if (*p == '"' || *p < 0x21 || *p > 0x7e)
1370 return 1; 1375 return 1;
1371 } 1376 }
1372 return 0; 1377 return 0;
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index 98c50cc671b..b7d354e2b0e 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -1022,8 +1022,11 @@ static void audit_update_watch(struct audit_parent *parent,
1022 struct audit_buffer *ab; 1022 struct audit_buffer *ab;
1023 ab = audit_log_start(NULL, GFP_KERNEL, 1023 ab = audit_log_start(NULL, GFP_KERNEL,
1024 AUDIT_CONFIG_CHANGE); 1024 AUDIT_CONFIG_CHANGE);
1025 audit_log_format(ab, "auid=%u ses=%u",
1026 audit_get_loginuid(current),
1027 audit_get_sessionid(current));
1025 audit_log_format(ab, 1028 audit_log_format(ab,
1026 "op=updated rules specifying path="); 1029 " op=updated rules specifying path=");
1027 audit_log_untrustedstring(ab, owatch->path); 1030 audit_log_untrustedstring(ab, owatch->path);
1028 audit_log_format(ab, " with dev=%u ino=%lu\n", 1031 audit_log_format(ab, " with dev=%u ino=%lu\n",
1029 dev, ino); 1032 dev, ino);
@@ -1058,7 +1061,10 @@ static void audit_remove_parent_watches(struct audit_parent *parent)
1058 struct audit_buffer *ab; 1061 struct audit_buffer *ab;
1059 ab = audit_log_start(NULL, GFP_KERNEL, 1062 ab = audit_log_start(NULL, GFP_KERNEL,
1060 AUDIT_CONFIG_CHANGE); 1063 AUDIT_CONFIG_CHANGE);
1061 audit_log_format(ab, "op=remove rule path="); 1064 audit_log_format(ab, "auid=%u ses=%u",
1065 audit_get_loginuid(current),
1066 audit_get_sessionid(current));
1067 audit_log_format(ab, " op=remove rule path=");
1062 audit_log_untrustedstring(ab, w->path); 1068 audit_log_untrustedstring(ab, w->path);
1063 if (r->filterkey) { 1069 if (r->filterkey) {
1064 audit_log_format(ab, " key="); 1070 audit_log_format(ab, " key=");
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 4699950e65b..972f8e61d36 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -243,6 +243,9 @@ static inline int open_arg(int flags, int mask)
243 243
244static int audit_match_perm(struct audit_context *ctx, int mask) 244static int audit_match_perm(struct audit_context *ctx, int mask)
245{ 245{
246 if (unlikely(!ctx))
247 return 0;
248
246 unsigned n = ctx->major; 249 unsigned n = ctx->major;
247 switch (audit_classify_syscall(ctx->arch, n)) { 250 switch (audit_classify_syscall(ctx->arch, n)) {
248 case 0: /* native */ 251 case 0: /* native */
@@ -284,6 +287,10 @@ static int audit_match_filetype(struct audit_context *ctx, int which)
284{ 287{
285 unsigned index = which & ~S_IFMT; 288 unsigned index = which & ~S_IFMT;
286 mode_t mode = which & S_IFMT; 289 mode_t mode = which & S_IFMT;
290
291 if (unlikely(!ctx))
292 return 0;
293
287 if (index >= ctx->name_count) 294 if (index >= ctx->name_count)
288 return 0; 295 return 0;
289 if (ctx->names[index].ino == -1) 296 if (ctx->names[index].ino == -1)
@@ -610,7 +617,7 @@ static int audit_filter_rules(struct task_struct *tsk,
610 if (!result) 617 if (!result)
611 return 0; 618 return 0;
612 } 619 }
613 if (rule->filterkey) 620 if (rule->filterkey && ctx)
614 ctx->filterkey = kstrdup(rule->filterkey, GFP_ATOMIC); 621 ctx->filterkey = kstrdup(rule->filterkey, GFP_ATOMIC);
615 switch (rule->action) { 622 switch (rule->action) {
616 case AUDIT_NEVER: *state = AUDIT_DISABLED; break; 623 case AUDIT_NEVER: *state = AUDIT_DISABLED; break;
@@ -2375,7 +2382,7 @@ int __audit_signal_info(int sig, struct task_struct *t)
2375 struct audit_context *ctx = tsk->audit_context; 2382 struct audit_context *ctx = tsk->audit_context;
2376 2383
2377 if (audit_pid && t->tgid == audit_pid) { 2384 if (audit_pid && t->tgid == audit_pid) {
2378 if (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1) { 2385 if (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1 || sig == SIGUSR2) {
2379 audit_sig_pid = tsk->pid; 2386 audit_sig_pid = tsk->pid;
2380 if (tsk->loginuid != -1) 2387 if (tsk->loginuid != -1)
2381 audit_sig_uid = tsk->loginuid; 2388 audit_sig_uid = tsk->loginuid;
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 66ec9fd21e0..13932abde15 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -45,6 +45,7 @@
45#include <linux/delayacct.h> 45#include <linux/delayacct.h>
46#include <linux/cgroupstats.h> 46#include <linux/cgroupstats.h>
47#include <linux/hash.h> 47#include <linux/hash.h>
48#include <linux/namei.h>
48 49
49#include <asm/atomic.h> 50#include <asm/atomic.h>
50 51
@@ -354,6 +355,17 @@ static struct css_set *find_existing_css_set(
354 return NULL; 355 return NULL;
355} 356}
356 357
358static void free_cg_links(struct list_head *tmp)
359{
360 struct cg_cgroup_link *link;
361 struct cg_cgroup_link *saved_link;
362
363 list_for_each_entry_safe(link, saved_link, tmp, cgrp_link_list) {
364 list_del(&link->cgrp_link_list);
365 kfree(link);
366 }
367}
368
357/* 369/*
358 * allocate_cg_links() allocates "count" cg_cgroup_link structures 370 * allocate_cg_links() allocates "count" cg_cgroup_link structures
359 * and chains them on tmp through their cgrp_link_list fields. Returns 0 on 371 * and chains them on tmp through their cgrp_link_list fields. Returns 0 on
@@ -362,17 +374,12 @@ static struct css_set *find_existing_css_set(
362static int allocate_cg_links(int count, struct list_head *tmp) 374static int allocate_cg_links(int count, struct list_head *tmp)
363{ 375{
364 struct cg_cgroup_link *link; 376 struct cg_cgroup_link *link;
365 struct cg_cgroup_link *saved_link;
366 int i; 377 int i;
367 INIT_LIST_HEAD(tmp); 378 INIT_LIST_HEAD(tmp);
368 for (i = 0; i < count; i++) { 379 for (i = 0; i < count; i++) {
369 link = kmalloc(sizeof(*link), GFP_KERNEL); 380 link = kmalloc(sizeof(*link), GFP_KERNEL);
370 if (!link) { 381 if (!link) {
371 list_for_each_entry_safe(link, saved_link, tmp, 382 free_cg_links(tmp);
372 cgrp_link_list) {
373 list_del(&link->cgrp_link_list);
374 kfree(link);
375 }
376 return -ENOMEM; 383 return -ENOMEM;
377 } 384 }
378 list_add(&link->cgrp_link_list, tmp); 385 list_add(&link->cgrp_link_list, tmp);
@@ -380,17 +387,6 @@ static int allocate_cg_links(int count, struct list_head *tmp)
380 return 0; 387 return 0;
381} 388}
382 389
383static void free_cg_links(struct list_head *tmp)
384{
385 struct cg_cgroup_link *link;
386 struct cg_cgroup_link *saved_link;
387
388 list_for_each_entry_safe(link, saved_link, tmp, cgrp_link_list) {
389 list_del(&link->cgrp_link_list);
390 kfree(link);
391 }
392}
393
394/* 390/*
395 * find_css_set() takes an existing cgroup group and a 391 * find_css_set() takes an existing cgroup group and a
396 * cgroup object, and returns a css_set object that's 392 * cgroup object, and returns a css_set object that's
@@ -955,7 +951,6 @@ static int cgroup_get_sb(struct file_system_type *fs_type,
955 struct super_block *sb; 951 struct super_block *sb;
956 struct cgroupfs_root *root; 952 struct cgroupfs_root *root;
957 struct list_head tmp_cg_links; 953 struct list_head tmp_cg_links;
958 INIT_LIST_HEAD(&tmp_cg_links);
959 954
960 /* First find the desired set of subsystems */ 955 /* First find the desired set of subsystems */
961 ret = parse_cgroupfs_options(data, &opts); 956 ret = parse_cgroupfs_options(data, &opts);
@@ -1423,14 +1418,17 @@ static ssize_t cgroup_write_string(struct cgroup *cgrp, struct cftype *cft,
1423 if (buffer == NULL) 1418 if (buffer == NULL)
1424 return -ENOMEM; 1419 return -ENOMEM;
1425 } 1420 }
1426 if (nbytes && copy_from_user(buffer, userbuf, nbytes)) 1421 if (nbytes && copy_from_user(buffer, userbuf, nbytes)) {
1427 return -EFAULT; 1422 retval = -EFAULT;
1423 goto out;
1424 }
1428 1425
1429 buffer[nbytes] = 0; /* nul-terminate */ 1426 buffer[nbytes] = 0; /* nul-terminate */
1430 strstrip(buffer); 1427 strstrip(buffer);
1431 retval = cft->write_string(cgrp, cft, buffer); 1428 retval = cft->write_string(cgrp, cft, buffer);
1432 if (!retval) 1429 if (!retval)
1433 retval = nbytes; 1430 retval = nbytes;
1431out:
1434 if (buffer != local_buffer) 1432 if (buffer != local_buffer)
1435 kfree(buffer); 1433 kfree(buffer);
1436 return retval; 1434 return retval;
@@ -1529,7 +1527,7 @@ static int cgroup_seqfile_show(struct seq_file *m, void *arg)
1529 return cft->read_seq_string(state->cgroup, cft, m); 1527 return cft->read_seq_string(state->cgroup, cft, m);
1530} 1528}
1531 1529
1532int cgroup_seqfile_release(struct inode *inode, struct file *file) 1530static int cgroup_seqfile_release(struct inode *inode, struct file *file)
1533{ 1531{
1534 struct seq_file *seq = file->private_data; 1532 struct seq_file *seq = file->private_data;
1535 kfree(seq->private); 1533 kfree(seq->private);
@@ -2370,7 +2368,7 @@ static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode)
2370 return cgroup_create(c_parent, dentry, mode | S_IFDIR); 2368 return cgroup_create(c_parent, dentry, mode | S_IFDIR);
2371} 2369}
2372 2370
2373static inline int cgroup_has_css_refs(struct cgroup *cgrp) 2371static int cgroup_has_css_refs(struct cgroup *cgrp)
2374{ 2372{
2375 /* Check the reference count on each subsystem. Since we 2373 /* Check the reference count on each subsystem. Since we
2376 * already established that there are no tasks in the 2374 * already established that there are no tasks in the
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 10ba5f1004a..e202a68d1cc 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -216,7 +216,6 @@ static int __ref take_cpu_down(void *_param)
216static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) 216static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
217{ 217{
218 int err, nr_calls = 0; 218 int err, nr_calls = 0;
219 struct task_struct *p;
220 cpumask_t old_allowed, tmp; 219 cpumask_t old_allowed, tmp;
221 void *hcpu = (void *)(long)cpu; 220 void *hcpu = (void *)(long)cpu;
222 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; 221 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
@@ -249,21 +248,18 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
249 cpus_setall(tmp); 248 cpus_setall(tmp);
250 cpu_clear(cpu, tmp); 249 cpu_clear(cpu, tmp);
251 set_cpus_allowed_ptr(current, &tmp); 250 set_cpus_allowed_ptr(current, &tmp);
251 tmp = cpumask_of_cpu(cpu);
252 252
253 p = __stop_machine_run(take_cpu_down, &tcd_param, cpu); 253 err = __stop_machine(take_cpu_down, &tcd_param, &tmp);
254 254 if (err) {
255 if (IS_ERR(p) || cpu_online(cpu)) {
256 /* CPU didn't die: tell everyone. Can't complain. */ 255 /* CPU didn't die: tell everyone. Can't complain. */
257 if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, 256 if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
258 hcpu) == NOTIFY_BAD) 257 hcpu) == NOTIFY_BAD)
259 BUG(); 258 BUG();
260 259
261 if (IS_ERR(p)) { 260 goto out_allowed;
262 err = PTR_ERR(p);
263 goto out_allowed;
264 }
265 goto out_thread;
266 } 261 }
262 BUG_ON(cpu_online(cpu));
267 263
268 /* Wait for it to sleep (leaving idle task). */ 264 /* Wait for it to sleep (leaving idle task). */
269 while (!idle_cpu(cpu)) 265 while (!idle_cpu(cpu))
@@ -279,8 +275,6 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
279 275
280 check_for_tasks(cpu); 276 check_for_tasks(cpu);
281 277
282out_thread:
283 err = kthread_stop(p);
284out_allowed: 278out_allowed:
285 set_cpus_allowed_ptr(current, &old_allowed); 279 set_cpus_allowed_ptr(current, &old_allowed);
286out_release: 280out_release:
@@ -461,3 +455,28 @@ out:
461#endif /* CONFIG_PM_SLEEP_SMP */ 455#endif /* CONFIG_PM_SLEEP_SMP */
462 456
463#endif /* CONFIG_SMP */ 457#endif /* CONFIG_SMP */
458
459/*
460 * cpu_bit_bitmap[] is a special, "compressed" data structure that
461 * represents all NR_CPUS bits binary values of 1<<nr.
462 *
463 * It is used by cpumask_of_cpu() to get a constant address to a CPU
464 * mask value that has a single bit set only.
465 */
466
467/* cpu_bit_bitmap[0] is empty - so we can back into it */
468#define MASK_DECLARE_1(x) [x+1][0] = 1UL << (x)
469#define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
470#define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
471#define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
472
473const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
474
475 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
476 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
477#if BITS_PER_LONG > 32
478 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
479 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
480#endif
481};
482EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 91cf85b36dd..d5ab79cf516 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -54,7 +54,6 @@
54#include <asm/uaccess.h> 54#include <asm/uaccess.h>
55#include <asm/atomic.h> 55#include <asm/atomic.h>
56#include <linux/mutex.h> 56#include <linux/mutex.h>
57#include <linux/kfifo.h>
58#include <linux/workqueue.h> 57#include <linux/workqueue.h>
59#include <linux/cgroup.h> 58#include <linux/cgroup.h>
60 59
@@ -486,13 +485,38 @@ static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
486static void 485static void
487update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c) 486update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
488{ 487{
489 if (!dattr)
490 return;
491 if (dattr->relax_domain_level < c->relax_domain_level) 488 if (dattr->relax_domain_level < c->relax_domain_level)
492 dattr->relax_domain_level = c->relax_domain_level; 489 dattr->relax_domain_level = c->relax_domain_level;
493 return; 490 return;
494} 491}
495 492
493static void
494update_domain_attr_tree(struct sched_domain_attr *dattr, struct cpuset *c)
495{
496 LIST_HEAD(q);
497
498 list_add(&c->stack_list, &q);
499 while (!list_empty(&q)) {
500 struct cpuset *cp;
501 struct cgroup *cont;
502 struct cpuset *child;
503
504 cp = list_first_entry(&q, struct cpuset, stack_list);
505 list_del(q.next);
506
507 if (cpus_empty(cp->cpus_allowed))
508 continue;
509
510 if (is_sched_load_balance(cp))
511 update_domain_attr(dattr, cp);
512
513 list_for_each_entry(cont, &cp->css.cgroup->children, sibling) {
514 child = cgroup_cs(cont);
515 list_add_tail(&child->stack_list, &q);
516 }
517 }
518}
519
496/* 520/*
497 * rebuild_sched_domains() 521 * rebuild_sched_domains()
498 * 522 *
@@ -532,7 +556,7 @@ update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
532 * So the reverse nesting would risk an ABBA deadlock. 556 * So the reverse nesting would risk an ABBA deadlock.
533 * 557 *
534 * The three key local variables below are: 558 * The three key local variables below are:
535 * q - a kfifo queue of cpuset pointers, used to implement a 559 * q - a linked-list queue of cpuset pointers, used to implement a
536 * top-down scan of all cpusets. This scan loads a pointer 560 * top-down scan of all cpusets. This scan loads a pointer
537 * to each cpuset marked is_sched_load_balance into the 561 * to each cpuset marked is_sched_load_balance into the
538 * array 'csa'. For our purposes, rebuilding the schedulers 562 * array 'csa'. For our purposes, rebuilding the schedulers
@@ -567,7 +591,7 @@ update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
567 591
568void rebuild_sched_domains(void) 592void rebuild_sched_domains(void)
569{ 593{
570 struct kfifo *q; /* queue of cpusets to be scanned */ 594 LIST_HEAD(q); /* queue of cpusets to be scanned*/
571 struct cpuset *cp; /* scans q */ 595 struct cpuset *cp; /* scans q */
572 struct cpuset **csa; /* array of all cpuset ptrs */ 596 struct cpuset **csa; /* array of all cpuset ptrs */
573 int csn; /* how many cpuset ptrs in csa so far */ 597 int csn; /* how many cpuset ptrs in csa so far */
@@ -577,7 +601,6 @@ void rebuild_sched_domains(void)
577 int ndoms; /* number of sched domains in result */ 601 int ndoms; /* number of sched domains in result */
578 int nslot; /* next empty doms[] cpumask_t slot */ 602 int nslot; /* next empty doms[] cpumask_t slot */
579 603
580 q = NULL;
581 csa = NULL; 604 csa = NULL;
582 doms = NULL; 605 doms = NULL;
583 dattr = NULL; 606 dattr = NULL;
@@ -591,35 +614,42 @@ void rebuild_sched_domains(void)
591 dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL); 614 dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL);
592 if (dattr) { 615 if (dattr) {
593 *dattr = SD_ATTR_INIT; 616 *dattr = SD_ATTR_INIT;
594 update_domain_attr(dattr, &top_cpuset); 617 update_domain_attr_tree(dattr, &top_cpuset);
595 } 618 }
596 *doms = top_cpuset.cpus_allowed; 619 *doms = top_cpuset.cpus_allowed;
597 goto rebuild; 620 goto rebuild;
598 } 621 }
599 622
600 q = kfifo_alloc(number_of_cpusets * sizeof(cp), GFP_KERNEL, NULL);
601 if (IS_ERR(q))
602 goto done;
603 csa = kmalloc(number_of_cpusets * sizeof(cp), GFP_KERNEL); 623 csa = kmalloc(number_of_cpusets * sizeof(cp), GFP_KERNEL);
604 if (!csa) 624 if (!csa)
605 goto done; 625 goto done;
606 csn = 0; 626 csn = 0;
607 627
608 cp = &top_cpuset; 628 list_add(&top_cpuset.stack_list, &q);
609 __kfifo_put(q, (void *)&cp, sizeof(cp)); 629 while (!list_empty(&q)) {
610 while (__kfifo_get(q, (void *)&cp, sizeof(cp))) {
611 struct cgroup *cont; 630 struct cgroup *cont;
612 struct cpuset *child; /* scans child cpusets of cp */ 631 struct cpuset *child; /* scans child cpusets of cp */
613 632
633 cp = list_first_entry(&q, struct cpuset, stack_list);
634 list_del(q.next);
635
614 if (cpus_empty(cp->cpus_allowed)) 636 if (cpus_empty(cp->cpus_allowed))
615 continue; 637 continue;
616 638
617 if (is_sched_load_balance(cp)) 639 /*
640 * All child cpusets contain a subset of the parent's cpus, so
641 * just skip them, and then we call update_domain_attr_tree()
642 * to calc relax_domain_level of the corresponding sched
643 * domain.
644 */
645 if (is_sched_load_balance(cp)) {
618 csa[csn++] = cp; 646 csa[csn++] = cp;
647 continue;
648 }
619 649
620 list_for_each_entry(cont, &cp->css.cgroup->children, sibling) { 650 list_for_each_entry(cont, &cp->css.cgroup->children, sibling) {
621 child = cgroup_cs(cont); 651 child = cgroup_cs(cont);
622 __kfifo_put(q, (void *)&child, sizeof(cp)); 652 list_add_tail(&child->stack_list, &q);
623 } 653 }
624 } 654 }
625 655
@@ -686,7 +716,7 @@ restart:
686 cpus_or(*dp, *dp, b->cpus_allowed); 716 cpus_or(*dp, *dp, b->cpus_allowed);
687 b->pn = -1; 717 b->pn = -1;
688 if (dattr) 718 if (dattr)
689 update_domain_attr(dattr 719 update_domain_attr_tree(dattr
690 + nslot, b); 720 + nslot, b);
691 } 721 }
692 } 722 }
@@ -702,8 +732,6 @@ rebuild:
702 put_online_cpus(); 732 put_online_cpus();
703 733
704done: 734done:
705 if (q && !IS_ERR(q))
706 kfifo_free(q);
707 kfree(csa); 735 kfree(csa);
708 /* Don't kfree(doms) -- partition_sched_domains() does that. */ 736 /* Don't kfree(doms) -- partition_sched_domains() does that. */
709 /* Don't kfree(dattr) -- partition_sched_domains() does that. */ 737 /* Don't kfree(dattr) -- partition_sched_domains() does that. */
@@ -1833,24 +1861,21 @@ static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
1833 */ 1861 */
1834static void scan_for_empty_cpusets(const struct cpuset *root) 1862static void scan_for_empty_cpusets(const struct cpuset *root)
1835{ 1863{
1864 LIST_HEAD(queue);
1836 struct cpuset *cp; /* scans cpusets being updated */ 1865 struct cpuset *cp; /* scans cpusets being updated */
1837 struct cpuset *child; /* scans child cpusets of cp */ 1866 struct cpuset *child; /* scans child cpusets of cp */
1838 struct list_head queue;
1839 struct cgroup *cont; 1867 struct cgroup *cont;
1840 nodemask_t oldmems; 1868 nodemask_t oldmems;
1841 1869
1842 INIT_LIST_HEAD(&queue);
1843
1844 list_add_tail((struct list_head *)&root->stack_list, &queue); 1870 list_add_tail((struct list_head *)&root->stack_list, &queue);
1845 1871
1846 while (!list_empty(&queue)) { 1872 while (!list_empty(&queue)) {
1847 cp = container_of(queue.next, struct cpuset, stack_list); 1873 cp = list_first_entry(&queue, struct cpuset, stack_list);
1848 list_del(queue.next); 1874 list_del(queue.next);
1849 list_for_each_entry(cont, &cp->css.cgroup->children, sibling) { 1875 list_for_each_entry(cont, &cp->css.cgroup->children, sibling) {
1850 child = cgroup_cs(cont); 1876 child = cgroup_cs(cont);
1851 list_add_tail(&child->stack_list, &queue); 1877 list_add_tail(&child->stack_list, &queue);
1852 } 1878 }
1853 cont = cp->css.cgroup;
1854 1879
1855 /* Continue past cpusets with all cpus, mems online */ 1880 /* Continue past cpusets with all cpus, mems online */
1856 if (cpus_subset(cp->cpus_allowed, cpu_online_map) && 1881 if (cpus_subset(cp->cpus_allowed, cpu_online_map) &&
diff --git a/kernel/dma-coherent.c b/kernel/dma-coherent.c
new file mode 100644
index 00000000000..91e96950cd5
--- /dev/null
+++ b/kernel/dma-coherent.c
@@ -0,0 +1,153 @@
1/*
2 * Coherent per-device memory handling.
3 * Borrowed from i386
4 */
5#include <linux/kernel.h>
6#include <linux/dma-mapping.h>
7
8struct dma_coherent_mem {
9 void *virt_base;
10 u32 device_base;
11 int size;
12 int flags;
13 unsigned long *bitmap;
14};
15
16int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
17 dma_addr_t device_addr, size_t size, int flags)
18{
19 void __iomem *mem_base = NULL;
20 int pages = size >> PAGE_SHIFT;
21 int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
22
23 if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
24 goto out;
25 if (!size)
26 goto out;
27 if (dev->dma_mem)
28 goto out;
29
30 /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
31
32 mem_base = ioremap(bus_addr, size);
33 if (!mem_base)
34 goto out;
35
36 dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
37 if (!dev->dma_mem)
38 goto out;
39 dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
40 if (!dev->dma_mem->bitmap)
41 goto free1_out;
42
43 dev->dma_mem->virt_base = mem_base;
44 dev->dma_mem->device_base = device_addr;
45 dev->dma_mem->size = pages;
46 dev->dma_mem->flags = flags;
47
48 if (flags & DMA_MEMORY_MAP)
49 return DMA_MEMORY_MAP;
50
51 return DMA_MEMORY_IO;
52
53 free1_out:
54 kfree(dev->dma_mem);
55 out:
56 if (mem_base)
57 iounmap(mem_base);
58 return 0;
59}
60EXPORT_SYMBOL(dma_declare_coherent_memory);
61
62void dma_release_declared_memory(struct device *dev)
63{
64 struct dma_coherent_mem *mem = dev->dma_mem;
65
66 if (!mem)
67 return;
68 dev->dma_mem = NULL;
69 iounmap(mem->virt_base);
70 kfree(mem->bitmap);
71 kfree(mem);
72}
73EXPORT_SYMBOL(dma_release_declared_memory);
74
75void *dma_mark_declared_memory_occupied(struct device *dev,
76 dma_addr_t device_addr, size_t size)
77{
78 struct dma_coherent_mem *mem = dev->dma_mem;
79 int pos, err;
80
81 size += device_addr & ~PAGE_MASK;
82
83 if (!mem)
84 return ERR_PTR(-EINVAL);
85
86 pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
87 err = bitmap_allocate_region(mem->bitmap, pos, get_order(size));
88 if (err != 0)
89 return ERR_PTR(err);
90 return mem->virt_base + (pos << PAGE_SHIFT);
91}
92EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
93
94/**
95 * Try to allocate memory from the per-device coherent area.
96 *
97 * @dev: device from which we allocate memory
98 * @size: size of requested memory area
99 * @dma_handle: This will be filled with the correct dma handle
100 * @ret: This pointer will be filled with the virtual address
101 * to allocated area.
102 *
103 * This function should be only called from per-arch %dma_alloc_coherent()
104 * to support allocation from per-device coherent memory pools.
105 *
106 * Returns 0 if dma_alloc_coherent should continue with allocating from
107 * generic memory areas, or !0 if dma_alloc_coherent should return %ret.
108 */
109int dma_alloc_from_coherent(struct device *dev, ssize_t size,
110 dma_addr_t *dma_handle, void **ret)
111{
112 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
113 int order = get_order(size);
114
115 if (mem) {
116 int page = bitmap_find_free_region(mem->bitmap, mem->size,
117 order);
118 if (page >= 0) {
119 *dma_handle = mem->device_base + (page << PAGE_SHIFT);
120 *ret = mem->virt_base + (page << PAGE_SHIFT);
121 memset(*ret, 0, size);
122 } else if (mem->flags & DMA_MEMORY_EXCLUSIVE)
123 *ret = NULL;
124 }
125 return (mem != NULL);
126}
127
128/**
129 * Try to free the memory allocated from per-device coherent memory pool.
130 * @dev: device from which the memory was allocated
131 * @order: the order of pages allocated
132 * @vaddr: virtual address of allocated pages
133 *
134 * This checks whether the memory was allocated from the per-device
135 * coherent memory pool and if so, releases that memory.
136 *
137 * Returns 1 if we correctly released the memory, or 0 if
138 * %dma_release_coherent() should proceed with releasing memory from
139 * generic pools.
140 */
141int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
142{
143 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
144
145 if (mem && vaddr >= mem->virt_base && vaddr <
146 (mem->virt_base + (mem->size << PAGE_SHIFT))) {
147 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
148
149 bitmap_release_region(mem->bitmap, page, order);
150 return 1;
151 }
152 return 0;
153}
diff --git a/kernel/exec_domain.c b/kernel/exec_domain.c
index c1ef192aa65..0d407e88673 100644
--- a/kernel/exec_domain.c
+++ b/kernel/exec_domain.c
@@ -168,7 +168,6 @@ __set_personality(u_long personality)
168 current->personality = personality; 168 current->personality = personality;
169 oep = current_thread_info()->exec_domain; 169 oep = current_thread_info()->exec_domain;
170 current_thread_info()->exec_domain = ep; 170 current_thread_info()->exec_domain = ep;
171 set_fs_altroot();
172 171
173 module_put(oep->module); 172 module_put(oep->module);
174 return 0; 173 return 0;
diff --git a/kernel/exit.c b/kernel/exit.c
index ad933bb29ec..38ec4063014 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -46,6 +46,7 @@
46#include <linux/resource.h> 46#include <linux/resource.h>
47#include <linux/blkdev.h> 47#include <linux/blkdev.h>
48#include <linux/task_io_accounting_ops.h> 48#include <linux/task_io_accounting_ops.h>
49#include <linux/tracehook.h>
49 50
50#include <asm/uaccess.h> 51#include <asm/uaccess.h>
51#include <asm/unistd.h> 52#include <asm/unistd.h>
@@ -120,18 +121,7 @@ static void __exit_signal(struct task_struct *tsk)
120 sig->nivcsw += tsk->nivcsw; 121 sig->nivcsw += tsk->nivcsw;
121 sig->inblock += task_io_get_inblock(tsk); 122 sig->inblock += task_io_get_inblock(tsk);
122 sig->oublock += task_io_get_oublock(tsk); 123 sig->oublock += task_io_get_oublock(tsk);
123#ifdef CONFIG_TASK_XACCT 124 task_io_accounting_add(&sig->ioac, &tsk->ioac);
124 sig->rchar += tsk->rchar;
125 sig->wchar += tsk->wchar;
126 sig->syscr += tsk->syscr;
127 sig->syscw += tsk->syscw;
128#endif /* CONFIG_TASK_XACCT */
129#ifdef CONFIG_TASK_IO_ACCOUNTING
130 sig->ioac.read_bytes += tsk->ioac.read_bytes;
131 sig->ioac.write_bytes += tsk->ioac.write_bytes;
132 sig->ioac.cancelled_write_bytes +=
133 tsk->ioac.cancelled_write_bytes;
134#endif /* CONFIG_TASK_IO_ACCOUNTING */
135 sig->sum_sched_runtime += tsk->se.sum_exec_runtime; 125 sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
136 sig = NULL; /* Marker for below. */ 126 sig = NULL; /* Marker for below. */
137 } 127 }
@@ -162,27 +152,17 @@ static void delayed_put_task_struct(struct rcu_head *rhp)
162 put_task_struct(container_of(rhp, struct task_struct, rcu)); 152 put_task_struct(container_of(rhp, struct task_struct, rcu));
163} 153}
164 154
165/*
166 * Do final ptrace-related cleanup of a zombie being reaped.
167 *
168 * Called with write_lock(&tasklist_lock) held.
169 */
170static void ptrace_release_task(struct task_struct *p)
171{
172 BUG_ON(!list_empty(&p->ptraced));
173 ptrace_unlink(p);
174 BUG_ON(!list_empty(&p->ptrace_entry));
175}
176 155
177void release_task(struct task_struct * p) 156void release_task(struct task_struct * p)
178{ 157{
179 struct task_struct *leader; 158 struct task_struct *leader;
180 int zap_leader; 159 int zap_leader;
181repeat: 160repeat:
161 tracehook_prepare_release_task(p);
182 atomic_dec(&p->user->processes); 162 atomic_dec(&p->user->processes);
183 proc_flush_task(p); 163 proc_flush_task(p);
184 write_lock_irq(&tasklist_lock); 164 write_lock_irq(&tasklist_lock);
185 ptrace_release_task(p); 165 tracehook_finish_release_task(p);
186 __exit_signal(p); 166 __exit_signal(p);
187 167
188 /* 168 /*
@@ -204,6 +184,13 @@ repeat:
204 * that case. 184 * that case.
205 */ 185 */
206 zap_leader = task_detached(leader); 186 zap_leader = task_detached(leader);
187
188 /*
189 * This maintains the invariant that release_task()
190 * only runs on a task in EXIT_DEAD, just for sanity.
191 */
192 if (zap_leader)
193 leader->exit_state = EXIT_DEAD;
207 } 194 }
208 195
209 write_unlock_irq(&tasklist_lock); 196 write_unlock_irq(&tasklist_lock);
@@ -567,8 +554,6 @@ void put_fs_struct(struct fs_struct *fs)
567 if (atomic_dec_and_test(&fs->count)) { 554 if (atomic_dec_and_test(&fs->count)) {
568 path_put(&fs->root); 555 path_put(&fs->root);
569 path_put(&fs->pwd); 556 path_put(&fs->pwd);
570 if (fs->altroot.dentry)
571 path_put(&fs->altroot);
572 kmem_cache_free(fs_cachep, fs); 557 kmem_cache_free(fs_cachep, fs);
573 } 558 }
574} 559}
@@ -887,7 +872,8 @@ static void forget_original_parent(struct task_struct *father)
887 */ 872 */
888static void exit_notify(struct task_struct *tsk, int group_dead) 873static void exit_notify(struct task_struct *tsk, int group_dead)
889{ 874{
890 int state; 875 int signal;
876 void *cookie;
891 877
892 /* 878 /*
893 * This does two things: 879 * This does two things:
@@ -924,22 +910,11 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
924 !capable(CAP_KILL)) 910 !capable(CAP_KILL))
925 tsk->exit_signal = SIGCHLD; 911 tsk->exit_signal = SIGCHLD;
926 912
927 /* If something other than our normal parent is ptracing us, then 913 signal = tracehook_notify_death(tsk, &cookie, group_dead);
928 * send it a SIGCHLD instead of honoring exit_signal. exit_signal 914 if (signal >= 0)
929 * only has special meaning to our real parent. 915 signal = do_notify_parent(tsk, signal);
930 */
931 if (!task_detached(tsk) && thread_group_empty(tsk)) {
932 int signal = ptrace_reparented(tsk) ?
933 SIGCHLD : tsk->exit_signal;
934 do_notify_parent(tsk, signal);
935 } else if (tsk->ptrace) {
936 do_notify_parent(tsk, SIGCHLD);
937 }
938 916
939 state = EXIT_ZOMBIE; 917 tsk->exit_state = signal == DEATH_REAP ? EXIT_DEAD : EXIT_ZOMBIE;
940 if (task_detached(tsk) && likely(!tsk->ptrace))
941 state = EXIT_DEAD;
942 tsk->exit_state = state;
943 918
944 /* mt-exec, de_thread() is waiting for us */ 919 /* mt-exec, de_thread() is waiting for us */
945 if (thread_group_leader(tsk) && 920 if (thread_group_leader(tsk) &&
@@ -949,8 +924,10 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
949 924
950 write_unlock_irq(&tasklist_lock); 925 write_unlock_irq(&tasklist_lock);
951 926
927 tracehook_report_death(tsk, signal, cookie, group_dead);
928
952 /* If the process is dead, release it - nobody will wait for it */ 929 /* If the process is dead, release it - nobody will wait for it */
953 if (state == EXIT_DEAD) 930 if (signal == DEATH_REAP)
954 release_task(tsk); 931 release_task(tsk);
955} 932}
956 933
@@ -1029,10 +1006,7 @@ NORET_TYPE void do_exit(long code)
1029 if (unlikely(!tsk->pid)) 1006 if (unlikely(!tsk->pid))
1030 panic("Attempted to kill the idle task!"); 1007 panic("Attempted to kill the idle task!");
1031 1008
1032 if (unlikely(current->ptrace & PT_TRACE_EXIT)) { 1009 tracehook_report_exit(&code);
1033 current->ptrace_message = code;
1034 ptrace_notify((PTRACE_EVENT_EXIT << 8) | SIGTRAP);
1035 }
1036 1010
1037 /* 1011 /*
1038 * We're taking recursive faults here in do_exit. Safest is to just 1012 * We're taking recursive faults here in do_exit. Safest is to just
@@ -1378,21 +1352,8 @@ static int wait_task_zombie(struct task_struct *p, int options,
1378 psig->coublock += 1352 psig->coublock +=
1379 task_io_get_oublock(p) + 1353 task_io_get_oublock(p) +
1380 sig->oublock + sig->coublock; 1354 sig->oublock + sig->coublock;
1381#ifdef CONFIG_TASK_XACCT 1355 task_io_accounting_add(&psig->ioac, &p->ioac);
1382 psig->rchar += p->rchar + sig->rchar; 1356 task_io_accounting_add(&psig->ioac, &sig->ioac);
1383 psig->wchar += p->wchar + sig->wchar;
1384 psig->syscr += p->syscr + sig->syscr;
1385 psig->syscw += p->syscw + sig->syscw;
1386#endif /* CONFIG_TASK_XACCT */
1387#ifdef CONFIG_TASK_IO_ACCOUNTING
1388 psig->ioac.read_bytes +=
1389 p->ioac.read_bytes + sig->ioac.read_bytes;
1390 psig->ioac.write_bytes +=
1391 p->ioac.write_bytes + sig->ioac.write_bytes;
1392 psig->ioac.cancelled_write_bytes +=
1393 p->ioac.cancelled_write_bytes +
1394 sig->ioac.cancelled_write_bytes;
1395#endif /* CONFIG_TASK_IO_ACCOUNTING */
1396 spin_unlock_irq(&p->parent->sighand->siglock); 1357 spin_unlock_irq(&p->parent->sighand->siglock);
1397 } 1358 }
1398 1359
diff --git a/kernel/fork.c b/kernel/fork.c
index b99d73e971a..7ce2ebe8479 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -27,6 +27,7 @@
27#include <linux/key.h> 27#include <linux/key.h>
28#include <linux/binfmts.h> 28#include <linux/binfmts.h>
29#include <linux/mman.h> 29#include <linux/mman.h>
30#include <linux/mmu_notifier.h>
30#include <linux/fs.h> 31#include <linux/fs.h>
31#include <linux/nsproxy.h> 32#include <linux/nsproxy.h>
32#include <linux/capability.h> 33#include <linux/capability.h>
@@ -37,6 +38,7 @@
37#include <linux/swap.h> 38#include <linux/swap.h>
38#include <linux/syscalls.h> 39#include <linux/syscalls.h>
39#include <linux/jiffies.h> 40#include <linux/jiffies.h>
41#include <linux/tracehook.h>
40#include <linux/futex.h> 42#include <linux/futex.h>
41#include <linux/task_io_accounting_ops.h> 43#include <linux/task_io_accounting_ops.h>
42#include <linux/rcupdate.h> 44#include <linux/rcupdate.h>
@@ -413,6 +415,7 @@ static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p)
413 415
414 if (likely(!mm_alloc_pgd(mm))) { 416 if (likely(!mm_alloc_pgd(mm))) {
415 mm->def_flags = 0; 417 mm->def_flags = 0;
418 mmu_notifier_mm_init(mm);
416 return mm; 419 return mm;
417 } 420 }
418 421
@@ -445,6 +448,7 @@ void __mmdrop(struct mm_struct *mm)
445 BUG_ON(mm == &init_mm); 448 BUG_ON(mm == &init_mm);
446 mm_free_pgd(mm); 449 mm_free_pgd(mm);
447 destroy_context(mm); 450 destroy_context(mm);
451 mmu_notifier_mm_destroy(mm);
448 free_mm(mm); 452 free_mm(mm);
449} 453}
450EXPORT_SYMBOL_GPL(__mmdrop); 454EXPORT_SYMBOL_GPL(__mmdrop);
@@ -656,13 +660,6 @@ static struct fs_struct *__copy_fs_struct(struct fs_struct *old)
656 path_get(&old->root); 660 path_get(&old->root);
657 fs->pwd = old->pwd; 661 fs->pwd = old->pwd;
658 path_get(&old->pwd); 662 path_get(&old->pwd);
659 if (old->altroot.dentry) {
660 fs->altroot = old->altroot;
661 path_get(&old->altroot);
662 } else {
663 fs->altroot.mnt = NULL;
664 fs->altroot.dentry = NULL;
665 }
666 read_unlock(&old->lock); 663 read_unlock(&old->lock);
667 } 664 }
668 return fs; 665 return fs;
@@ -812,12 +809,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
812 sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0; 809 sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0;
813 sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0; 810 sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0;
814 sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0; 811 sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0;
815#ifdef CONFIG_TASK_XACCT 812 task_io_accounting_init(&sig->ioac);
816 sig->rchar = sig->wchar = sig->syscr = sig->syscw = 0;
817#endif
818#ifdef CONFIG_TASK_IO_ACCOUNTING
819 memset(&sig->ioac, 0, sizeof(sig->ioac));
820#endif
821 sig->sum_sched_runtime = 0; 813 sig->sum_sched_runtime = 0;
822 INIT_LIST_HEAD(&sig->cpu_timers[0]); 814 INIT_LIST_HEAD(&sig->cpu_timers[0]);
823 INIT_LIST_HEAD(&sig->cpu_timers[1]); 815 INIT_LIST_HEAD(&sig->cpu_timers[1]);
@@ -865,8 +857,7 @@ static void copy_flags(unsigned long clone_flags, struct task_struct *p)
865 857
866 new_flags &= ~PF_SUPERPRIV; 858 new_flags &= ~PF_SUPERPRIV;
867 new_flags |= PF_FORKNOEXEC; 859 new_flags |= PF_FORKNOEXEC;
868 if (!(clone_flags & CLONE_PTRACE)) 860 new_flags |= PF_STARTING;
869 p->ptrace = 0;
870 p->flags = new_flags; 861 p->flags = new_flags;
871 clear_freeze_flag(p); 862 clear_freeze_flag(p);
872} 863}
@@ -907,7 +898,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
907 struct pt_regs *regs, 898 struct pt_regs *regs,
908 unsigned long stack_size, 899 unsigned long stack_size,
909 int __user *child_tidptr, 900 int __user *child_tidptr,
910 struct pid *pid) 901 struct pid *pid,
902 int trace)
911{ 903{
912 int retval; 904 int retval;
913 struct task_struct *p; 905 struct task_struct *p;
@@ -1000,13 +992,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1000 p->last_switch_timestamp = 0; 992 p->last_switch_timestamp = 0;
1001#endif 993#endif
1002 994
1003#ifdef CONFIG_TASK_XACCT 995 task_io_accounting_init(&p->ioac);
1004 p->rchar = 0; /* I/O counter: bytes read */
1005 p->wchar = 0; /* I/O counter: bytes written */
1006 p->syscr = 0; /* I/O counter: read syscalls */
1007 p->syscw = 0; /* I/O counter: write syscalls */
1008#endif
1009 task_io_accounting_init(p);
1010 acct_clear_integrals(p); 996 acct_clear_integrals(p);
1011 997
1012 p->it_virt_expires = cputime_zero; 998 p->it_virt_expires = cputime_zero;
@@ -1163,8 +1149,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1163 */ 1149 */
1164 p->group_leader = p; 1150 p->group_leader = p;
1165 INIT_LIST_HEAD(&p->thread_group); 1151 INIT_LIST_HEAD(&p->thread_group);
1166 INIT_LIST_HEAD(&p->ptrace_entry);
1167 INIT_LIST_HEAD(&p->ptraced);
1168 1152
1169 /* Now that the task is set up, run cgroup callbacks if 1153 /* Now that the task is set up, run cgroup callbacks if
1170 * necessary. We need to run them before the task is visible 1154 * necessary. We need to run them before the task is visible
@@ -1195,7 +1179,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1195 p->real_parent = current->real_parent; 1179 p->real_parent = current->real_parent;
1196 else 1180 else
1197 p->real_parent = current; 1181 p->real_parent = current;
1198 p->parent = p->real_parent;
1199 1182
1200 spin_lock(&current->sighand->siglock); 1183 spin_lock(&current->sighand->siglock);
1201 1184
@@ -1237,8 +1220,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1237 1220
1238 if (likely(p->pid)) { 1221 if (likely(p->pid)) {
1239 list_add_tail(&p->sibling, &p->real_parent->children); 1222 list_add_tail(&p->sibling, &p->real_parent->children);
1240 if (unlikely(p->ptrace & PT_PTRACED)) 1223 tracehook_finish_clone(p, clone_flags, trace);
1241 __ptrace_link(p, current->parent);
1242 1224
1243 if (thread_group_leader(p)) { 1225 if (thread_group_leader(p)) {
1244 if (clone_flags & CLONE_NEWPID) 1226 if (clone_flags & CLONE_NEWPID)
@@ -1323,29 +1305,13 @@ struct task_struct * __cpuinit fork_idle(int cpu)
1323 struct pt_regs regs; 1305 struct pt_regs regs;
1324 1306
1325 task = copy_process(CLONE_VM, 0, idle_regs(&regs), 0, NULL, 1307 task = copy_process(CLONE_VM, 0, idle_regs(&regs), 0, NULL,
1326 &init_struct_pid); 1308 &init_struct_pid, 0);
1327 if (!IS_ERR(task)) 1309 if (!IS_ERR(task))
1328 init_idle(task, cpu); 1310 init_idle(task, cpu);
1329 1311
1330 return task; 1312 return task;
1331} 1313}
1332 1314
1333static int fork_traceflag(unsigned clone_flags)
1334{
1335 if (clone_flags & CLONE_UNTRACED)
1336 return 0;
1337 else if (clone_flags & CLONE_VFORK) {
1338 if (current->ptrace & PT_TRACE_VFORK)
1339 return PTRACE_EVENT_VFORK;
1340 } else if ((clone_flags & CSIGNAL) != SIGCHLD) {
1341 if (current->ptrace & PT_TRACE_CLONE)
1342 return PTRACE_EVENT_CLONE;
1343 } else if (current->ptrace & PT_TRACE_FORK)
1344 return PTRACE_EVENT_FORK;
1345
1346 return 0;
1347}
1348
1349/* 1315/*
1350 * Ok, this is the main fork-routine. 1316 * Ok, this is the main fork-routine.
1351 * 1317 *
@@ -1380,14 +1346,14 @@ long do_fork(unsigned long clone_flags,
1380 } 1346 }
1381 } 1347 }
1382 1348
1383 if (unlikely(current->ptrace)) { 1349 /*
1384 trace = fork_traceflag (clone_flags); 1350 * When called from kernel_thread, don't do user tracing stuff.
1385 if (trace) 1351 */
1386 clone_flags |= CLONE_PTRACE; 1352 if (likely(user_mode(regs)))
1387 } 1353 trace = tracehook_prepare_clone(clone_flags);
1388 1354
1389 p = copy_process(clone_flags, stack_start, regs, stack_size, 1355 p = copy_process(clone_flags, stack_start, regs, stack_size,
1390 child_tidptr, NULL); 1356 child_tidptr, NULL, trace);
1391 /* 1357 /*
1392 * Do this prior waking up the new thread - the thread pointer 1358 * Do this prior waking up the new thread - the thread pointer
1393 * might get invalid after that point, if the thread exits quickly. 1359 * might get invalid after that point, if the thread exits quickly.
@@ -1405,32 +1371,35 @@ long do_fork(unsigned long clone_flags,
1405 init_completion(&vfork); 1371 init_completion(&vfork);
1406 } 1372 }
1407 1373
1408 if ((p->ptrace & PT_PTRACED) || (clone_flags & CLONE_STOPPED)) { 1374 tracehook_report_clone(trace, regs, clone_flags, nr, p);
1375
1376 /*
1377 * We set PF_STARTING at creation in case tracing wants to
1378 * use this to distinguish a fully live task from one that
1379 * hasn't gotten to tracehook_report_clone() yet. Now we
1380 * clear it and set the child going.
1381 */
1382 p->flags &= ~PF_STARTING;
1383
1384 if (unlikely(clone_flags & CLONE_STOPPED)) {
1409 /* 1385 /*
1410 * We'll start up with an immediate SIGSTOP. 1386 * We'll start up with an immediate SIGSTOP.
1411 */ 1387 */
1412 sigaddset(&p->pending.signal, SIGSTOP); 1388 sigaddset(&p->pending.signal, SIGSTOP);
1413 set_tsk_thread_flag(p, TIF_SIGPENDING); 1389 set_tsk_thread_flag(p, TIF_SIGPENDING);
1414 }
1415
1416 if (!(clone_flags & CLONE_STOPPED))
1417 wake_up_new_task(p, clone_flags);
1418 else
1419 __set_task_state(p, TASK_STOPPED); 1390 __set_task_state(p, TASK_STOPPED);
1420 1391 } else {
1421 if (unlikely (trace)) { 1392 wake_up_new_task(p, clone_flags);
1422 current->ptrace_message = nr;
1423 ptrace_notify ((trace << 8) | SIGTRAP);
1424 } 1393 }
1425 1394
1395 tracehook_report_clone_complete(trace, regs,
1396 clone_flags, nr, p);
1397
1426 if (clone_flags & CLONE_VFORK) { 1398 if (clone_flags & CLONE_VFORK) {
1427 freezer_do_not_count(); 1399 freezer_do_not_count();
1428 wait_for_completion(&vfork); 1400 wait_for_completion(&vfork);
1429 freezer_count(); 1401 freezer_count();
1430 if (unlikely (current->ptrace & PT_TRACE_VFORK_DONE)) { 1402 tracehook_report_vfork_done(p, nr);
1431 current->ptrace_message = nr;
1432 ptrace_notify ((PTRACE_EVENT_VFORK_DONE << 8) | SIGTRAP);
1433 }
1434 } 1403 }
1435 } else { 1404 } else {
1436 nr = PTR_ERR(p); 1405 nr = PTR_ERR(p);
@@ -1442,7 +1411,7 @@ long do_fork(unsigned long clone_flags,
1442#define ARCH_MIN_MMSTRUCT_ALIGN 0 1411#define ARCH_MIN_MMSTRUCT_ALIGN 0
1443#endif 1412#endif
1444 1413
1445static void sighand_ctor(struct kmem_cache *cachep, void *data) 1414static void sighand_ctor(void *data)
1446{ 1415{
1447 struct sighand_struct *sighand = data; 1416 struct sighand_struct *sighand = data;
1448 1417
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 964964baefa..3cd441ebf5d 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -28,8 +28,7 @@ void dynamic_irq_init(unsigned int irq)
28 unsigned long flags; 28 unsigned long flags;
29 29
30 if (irq >= NR_IRQS) { 30 if (irq >= NR_IRQS) {
31 printk(KERN_ERR "Trying to initialize invalid IRQ%d\n", irq); 31 WARN(1, KERN_ERR "Trying to initialize invalid IRQ%d\n", irq);
32 WARN_ON(1);
33 return; 32 return;
34 } 33 }
35 34
@@ -62,8 +61,7 @@ void dynamic_irq_cleanup(unsigned int irq)
62 unsigned long flags; 61 unsigned long flags;
63 62
64 if (irq >= NR_IRQS) { 63 if (irq >= NR_IRQS) {
65 printk(KERN_ERR "Trying to cleanup invalid IRQ%d\n", irq); 64 WARN(1, KERN_ERR "Trying to cleanup invalid IRQ%d\n", irq);
66 WARN_ON(1);
67 return; 65 return;
68 } 66 }
69 67
@@ -71,9 +69,8 @@ void dynamic_irq_cleanup(unsigned int irq)
71 spin_lock_irqsave(&desc->lock, flags); 69 spin_lock_irqsave(&desc->lock, flags);
72 if (desc->action) { 70 if (desc->action) {
73 spin_unlock_irqrestore(&desc->lock, flags); 71 spin_unlock_irqrestore(&desc->lock, flags);
74 printk(KERN_ERR "Destroying IRQ%d without calling free_irq\n", 72 WARN(1, KERN_ERR "Destroying IRQ%d without calling free_irq\n",
75 irq); 73 irq);
76 WARN_ON(1);
77 return; 74 return;
78 } 75 }
79 desc->msi_desc = NULL; 76 desc->msi_desc = NULL;
@@ -96,8 +93,7 @@ int set_irq_chip(unsigned int irq, struct irq_chip *chip)
96 unsigned long flags; 93 unsigned long flags;
97 94
98 if (irq >= NR_IRQS) { 95 if (irq >= NR_IRQS) {
99 printk(KERN_ERR "Trying to install chip for IRQ%d\n", irq); 96 WARN(1, KERN_ERR "Trying to install chip for IRQ%d\n", irq);
100 WARN_ON(1);
101 return -EINVAL; 97 return -EINVAL;
102 } 98 }
103 99
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index f8914b92b66..0314074fa23 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -177,8 +177,7 @@ static void __enable_irq(struct irq_desc *desc, unsigned int irq)
177{ 177{
178 switch (desc->depth) { 178 switch (desc->depth) {
179 case 0: 179 case 0:
180 printk(KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); 180 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
181 WARN_ON(1);
182 break; 181 break;
183 case 1: { 182 case 1: {
184 unsigned int status = desc->status & ~IRQ_DISABLED; 183 unsigned int status = desc->status & ~IRQ_DISABLED;
@@ -324,7 +323,8 @@ static int __irq_set_trigger(struct irq_chip *chip, unsigned int irq,
324 ret = chip->set_type(irq, flags & IRQF_TRIGGER_MASK); 323 ret = chip->set_type(irq, flags & IRQF_TRIGGER_MASK);
325 324
326 if (ret) 325 if (ret)
327 pr_err("setting flow type for irq %u failed (%pF)\n", 326 pr_err("setting trigger mode %d for irq %u failed (%pF)\n",
327 (int)(flags & IRQF_TRIGGER_MASK),
328 irq, chip->set_type); 328 irq, chip->set_type);
329 329
330 return ret; 330 return ret;
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 1c5fcacbcf3..c8a4370e2a3 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -24,6 +24,12 @@
24#include <linux/utsrelease.h> 24#include <linux/utsrelease.h>
25#include <linux/utsname.h> 25#include <linux/utsname.h>
26#include <linux/numa.h> 26#include <linux/numa.h>
27#include <linux/suspend.h>
28#include <linux/device.h>
29#include <linux/freezer.h>
30#include <linux/pm.h>
31#include <linux/cpu.h>
32#include <linux/console.h>
27 33
28#include <asm/page.h> 34#include <asm/page.h>
29#include <asm/uaccess.h> 35#include <asm/uaccess.h>
@@ -242,6 +248,12 @@ static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry,
242 goto out; 248 goto out;
243 } 249 }
244 250
251 image->swap_page = kimage_alloc_control_pages(image, 0);
252 if (!image->swap_page) {
253 printk(KERN_ERR "Could not allocate swap buffer\n");
254 goto out;
255 }
256
245 result = 0; 257 result = 0;
246 out: 258 out:
247 if (result == 0) 259 if (result == 0)
@@ -589,14 +601,12 @@ static void kimage_free_extra_pages(struct kimage *image)
589 kimage_free_page_list(&image->unuseable_pages); 601 kimage_free_page_list(&image->unuseable_pages);
590 602
591} 603}
592static int kimage_terminate(struct kimage *image) 604static void kimage_terminate(struct kimage *image)
593{ 605{
594 if (*image->entry != 0) 606 if (*image->entry != 0)
595 image->entry++; 607 image->entry++;
596 608
597 *image->entry = IND_DONE; 609 *image->entry = IND_DONE;
598
599 return 0;
600} 610}
601 611
602#define for_each_kimage_entry(image, ptr, entry) \ 612#define for_each_kimage_entry(image, ptr, entry) \
@@ -988,6 +998,8 @@ asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments,
988 if (result) 998 if (result)
989 goto out; 999 goto out;
990 1000
1001 if (flags & KEXEC_PRESERVE_CONTEXT)
1002 image->preserve_context = 1;
991 result = machine_kexec_prepare(image); 1003 result = machine_kexec_prepare(image);
992 if (result) 1004 if (result)
993 goto out; 1005 goto out;
@@ -997,9 +1009,7 @@ asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments,
997 if (result) 1009 if (result)
998 goto out; 1010 goto out;
999 } 1011 }
1000 result = kimage_terminate(image); 1012 kimage_terminate(image);
1001 if (result)
1002 goto out;
1003 } 1013 }
1004 /* Install the new kernel, and Uninstall the old */ 1014 /* Install the new kernel, and Uninstall the old */
1005 image = xchg(dest_image, image); 1015 image = xchg(dest_image, image);
@@ -1415,3 +1425,85 @@ static int __init crash_save_vmcoreinfo_init(void)
1415} 1425}
1416 1426
1417module_init(crash_save_vmcoreinfo_init) 1427module_init(crash_save_vmcoreinfo_init)
1428
1429/**
1430 * kernel_kexec - reboot the system
1431 *
1432 * Move into place and start executing a preloaded standalone
1433 * executable. If nothing was preloaded return an error.
1434 */
1435int kernel_kexec(void)
1436{
1437 int error = 0;
1438
1439 if (xchg(&kexec_lock, 1))
1440 return -EBUSY;
1441 if (!kexec_image) {
1442 error = -EINVAL;
1443 goto Unlock;
1444 }
1445
1446 if (kexec_image->preserve_context) {
1447#ifdef CONFIG_KEXEC_JUMP
1448 mutex_lock(&pm_mutex);
1449 pm_prepare_console();
1450 error = freeze_processes();
1451 if (error) {
1452 error = -EBUSY;
1453 goto Restore_console;
1454 }
1455 suspend_console();
1456 error = device_suspend(PMSG_FREEZE);
1457 if (error)
1458 goto Resume_console;
1459 error = disable_nonboot_cpus();
1460 if (error)
1461 goto Resume_devices;
1462 local_irq_disable();
1463 /* At this point, device_suspend() has been called,
1464 * but *not* device_power_down(). We *must*
1465 * device_power_down() now. Otherwise, drivers for
1466 * some devices (e.g. interrupt controllers) become
1467 * desynchronized with the actual state of the
1468 * hardware at resume time, and evil weirdness ensues.
1469 */
1470 error = device_power_down(PMSG_FREEZE);
1471 if (error)
1472 goto Enable_irqs;
1473 save_processor_state();
1474#endif
1475 } else {
1476 blocking_notifier_call_chain(&reboot_notifier_list,
1477 SYS_RESTART, NULL);
1478 system_state = SYSTEM_RESTART;
1479 device_shutdown();
1480 sysdev_shutdown();
1481 printk(KERN_EMERG "Starting new kernel\n");
1482 machine_shutdown();
1483 }
1484
1485 machine_kexec(kexec_image);
1486
1487 if (kexec_image->preserve_context) {
1488#ifdef CONFIG_KEXEC_JUMP
1489 restore_processor_state();
1490 device_power_up(PMSG_RESTORE);
1491 Enable_irqs:
1492 local_irq_enable();
1493 enable_nonboot_cpus();
1494 Resume_devices:
1495 device_resume(PMSG_RESTORE);
1496 Resume_console:
1497 resume_console();
1498 thaw_processes();
1499 Restore_console:
1500 pm_restore_console();
1501 mutex_unlock(&pm_mutex);
1502#endif
1503 }
1504
1505 Unlock:
1506 xchg(&kexec_lock, 0);
1507
1508 return error;
1509}
diff --git a/kernel/kgdb.c b/kernel/kgdb.c
index 3ec23c3ec97..eaa21fc9ad1 100644
--- a/kernel/kgdb.c
+++ b/kernel/kgdb.c
@@ -56,12 +56,14 @@
56 56
57static int kgdb_break_asap; 57static int kgdb_break_asap;
58 58
59#define KGDB_MAX_THREAD_QUERY 17
59struct kgdb_state { 60struct kgdb_state {
60 int ex_vector; 61 int ex_vector;
61 int signo; 62 int signo;
62 int err_code; 63 int err_code;
63 int cpu; 64 int cpu;
64 int pass_exception; 65 int pass_exception;
66 unsigned long thr_query;
65 unsigned long threadid; 67 unsigned long threadid;
66 long kgdb_usethreadid; 68 long kgdb_usethreadid;
67 struct pt_regs *linux_regs; 69 struct pt_regs *linux_regs;
@@ -166,13 +168,6 @@ early_param("nokgdbroundup", opt_nokgdbroundup);
166 * Weak aliases for breakpoint management, 168 * Weak aliases for breakpoint management,
167 * can be overriden by architectures when needed: 169 * can be overriden by architectures when needed:
168 */ 170 */
169int __weak kgdb_validate_break_address(unsigned long addr)
170{
171 char tmp_variable[BREAK_INSTR_SIZE];
172
173 return probe_kernel_read(tmp_variable, (char *)addr, BREAK_INSTR_SIZE);
174}
175
176int __weak kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr) 171int __weak kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr)
177{ 172{
178 int err; 173 int err;
@@ -191,6 +186,25 @@ int __weak kgdb_arch_remove_breakpoint(unsigned long addr, char *bundle)
191 (char *)bundle, BREAK_INSTR_SIZE); 186 (char *)bundle, BREAK_INSTR_SIZE);
192} 187}
193 188
189int __weak kgdb_validate_break_address(unsigned long addr)
190{
191 char tmp_variable[BREAK_INSTR_SIZE];
192 int err;
193 /* Validate setting the breakpoint and then removing it. In the
194 * remove fails, the kernel needs to emit a bad message because we
195 * are deep trouble not being able to put things back the way we
196 * found them.
197 */
198 err = kgdb_arch_set_breakpoint(addr, tmp_variable);
199 if (err)
200 return err;
201 err = kgdb_arch_remove_breakpoint(addr, tmp_variable);
202 if (err)
203 printk(KERN_ERR "KGDB: Critical breakpoint error, kernel "
204 "memory destroyed at: %lx", addr);
205 return err;
206}
207
194unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs) 208unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs)
195{ 209{
196 return instruction_pointer(regs); 210 return instruction_pointer(regs);
@@ -433,9 +447,14 @@ int kgdb_hex2long(char **ptr, unsigned long *long_val)
433{ 447{
434 int hex_val; 448 int hex_val;
435 int num = 0; 449 int num = 0;
450 int negate = 0;
436 451
437 *long_val = 0; 452 *long_val = 0;
438 453
454 if (**ptr == '-') {
455 negate = 1;
456 (*ptr)++;
457 }
439 while (**ptr) { 458 while (**ptr) {
440 hex_val = hex(**ptr); 459 hex_val = hex(**ptr);
441 if (hex_val < 0) 460 if (hex_val < 0)
@@ -446,6 +465,9 @@ int kgdb_hex2long(char **ptr, unsigned long *long_val)
446 (*ptr)++; 465 (*ptr)++;
447 } 466 }
448 467
468 if (negate)
469 *long_val = -*long_val;
470
449 return num; 471 return num;
450} 472}
451 473
@@ -515,10 +537,16 @@ static void int_to_threadref(unsigned char *id, int value)
515static struct task_struct *getthread(struct pt_regs *regs, int tid) 537static struct task_struct *getthread(struct pt_regs *regs, int tid)
516{ 538{
517 /* 539 /*
518 * Non-positive TIDs are remapped idle tasks: 540 * Non-positive TIDs are remapped to the cpu shadow information
519 */ 541 */
520 if (tid <= 0) 542 if (tid == 0 || tid == -1)
521 return idle_task(-tid); 543 tid = -atomic_read(&kgdb_active) - 2;
544 if (tid < 0) {
545 if (kgdb_info[-tid - 2].task)
546 return kgdb_info[-tid - 2].task;
547 else
548 return idle_task(-tid - 2);
549 }
522 550
523 /* 551 /*
524 * find_task_by_pid_ns() does not take the tasklist lock anymore 552 * find_task_by_pid_ns() does not take the tasklist lock anymore
@@ -725,14 +753,15 @@ setundefined:
725} 753}
726 754
727/* 755/*
728 * Remap normal tasks to their real PID, idle tasks to -1 ... -NR_CPUs: 756 * Remap normal tasks to their real PID,
757 * CPU shadow threads are mapped to -CPU - 2
729 */ 758 */
730static inline int shadow_pid(int realpid) 759static inline int shadow_pid(int realpid)
731{ 760{
732 if (realpid) 761 if (realpid)
733 return realpid; 762 return realpid;
734 763
735 return -1-raw_smp_processor_id(); 764 return -raw_smp_processor_id() - 2;
736} 765}
737 766
738static char gdbmsgbuf[BUFMAX + 1]; 767static char gdbmsgbuf[BUFMAX + 1];
@@ -826,7 +855,7 @@ static void gdb_cmd_getregs(struct kgdb_state *ks)
826 local_debuggerinfo = kgdb_info[ks->cpu].debuggerinfo; 855 local_debuggerinfo = kgdb_info[ks->cpu].debuggerinfo;
827 } else { 856 } else {
828 local_debuggerinfo = NULL; 857 local_debuggerinfo = NULL;
829 for (i = 0; i < NR_CPUS; i++) { 858 for_each_online_cpu(i) {
830 /* 859 /*
831 * Try to find the task on some other 860 * Try to find the task on some other
832 * or possibly this node if we do not 861 * or possibly this node if we do not
@@ -960,10 +989,13 @@ static int gdb_cmd_reboot(struct kgdb_state *ks)
960/* Handle the 'q' query packets */ 989/* Handle the 'q' query packets */
961static void gdb_cmd_query(struct kgdb_state *ks) 990static void gdb_cmd_query(struct kgdb_state *ks)
962{ 991{
963 struct task_struct *thread; 992 struct task_struct *g;
993 struct task_struct *p;
964 unsigned char thref[8]; 994 unsigned char thref[8];
965 char *ptr; 995 char *ptr;
966 int i; 996 int i;
997 int cpu;
998 int finished = 0;
967 999
968 switch (remcom_in_buffer[1]) { 1000 switch (remcom_in_buffer[1]) {
969 case 's': 1001 case 's':
@@ -973,22 +1005,34 @@ static void gdb_cmd_query(struct kgdb_state *ks)
973 break; 1005 break;
974 } 1006 }
975 1007
976 if (remcom_in_buffer[1] == 'f') 1008 i = 0;
977 ks->threadid = 1;
978
979 remcom_out_buffer[0] = 'm'; 1009 remcom_out_buffer[0] = 'm';
980 ptr = remcom_out_buffer + 1; 1010 ptr = remcom_out_buffer + 1;
981 1011 if (remcom_in_buffer[1] == 'f') {
982 for (i = 0; i < 17; ks->threadid++) { 1012 /* Each cpu is a shadow thread */
983 thread = getthread(ks->linux_regs, ks->threadid); 1013 for_each_online_cpu(cpu) {
984 if (thread) { 1014 ks->thr_query = 0;
985 int_to_threadref(thref, ks->threadid); 1015 int_to_threadref(thref, -cpu - 2);
986 pack_threadid(ptr, thref); 1016 pack_threadid(ptr, thref);
987 ptr += BUF_THREAD_ID_SIZE; 1017 ptr += BUF_THREAD_ID_SIZE;
988 *(ptr++) = ','; 1018 *(ptr++) = ',';
989 i++; 1019 i++;
990 } 1020 }
991 } 1021 }
1022
1023 do_each_thread(g, p) {
1024 if (i >= ks->thr_query && !finished) {
1025 int_to_threadref(thref, p->pid);
1026 pack_threadid(ptr, thref);
1027 ptr += BUF_THREAD_ID_SIZE;
1028 *(ptr++) = ',';
1029 ks->thr_query++;
1030 if (ks->thr_query % KGDB_MAX_THREAD_QUERY == 0)
1031 finished = 1;
1032 }
1033 i++;
1034 } while_each_thread(g, p);
1035
992 *(--ptr) = '\0'; 1036 *(--ptr) = '\0';
993 break; 1037 break;
994 1038
@@ -1011,15 +1055,15 @@ static void gdb_cmd_query(struct kgdb_state *ks)
1011 error_packet(remcom_out_buffer, -EINVAL); 1055 error_packet(remcom_out_buffer, -EINVAL);
1012 break; 1056 break;
1013 } 1057 }
1014 if (ks->threadid > 0) { 1058 if ((int)ks->threadid > 0) {
1015 kgdb_mem2hex(getthread(ks->linux_regs, 1059 kgdb_mem2hex(getthread(ks->linux_regs,
1016 ks->threadid)->comm, 1060 ks->threadid)->comm,
1017 remcom_out_buffer, 16); 1061 remcom_out_buffer, 16);
1018 } else { 1062 } else {
1019 static char tmpstr[23 + BUF_THREAD_ID_SIZE]; 1063 static char tmpstr[23 + BUF_THREAD_ID_SIZE];
1020 1064
1021 sprintf(tmpstr, "Shadow task %d for pid 0", 1065 sprintf(tmpstr, "shadowCPU%d",
1022 (int)(-ks->threadid-1)); 1066 (int)(-ks->threadid - 2));
1023 kgdb_mem2hex(tmpstr, remcom_out_buffer, strlen(tmpstr)); 1067 kgdb_mem2hex(tmpstr, remcom_out_buffer, strlen(tmpstr));
1024 } 1068 }
1025 break; 1069 break;
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 6111c27491b..96cff2f8710 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -176,7 +176,7 @@ void kthread_bind(struct task_struct *k, unsigned int cpu)
176 return; 176 return;
177 } 177 }
178 /* Must have done schedule() in kthread() before we set_task_cpu */ 178 /* Must have done schedule() in kthread() before we set_task_cpu */
179 wait_task_inactive(k); 179 wait_task_inactive(k, 0);
180 set_task_cpu(k, cpu); 180 set_task_cpu(k, cpu);
181 k->cpus_allowed = cpumask_of_cpu(cpu); 181 k->cpus_allowed = cpumask_of_cpu(cpu);
182 k->rt.nr_cpus_allowed = 1; 182 k->rt.nr_cpus_allowed = 1;
diff --git a/kernel/marker.c b/kernel/marker.c
index 971da531790..7d1faecd7a5 100644
--- a/kernel/marker.c
+++ b/kernel/marker.c
@@ -126,6 +126,11 @@ void marker_probe_cb(const struct marker *mdata, void *call_private, ...)
126 struct marker_probe_closure *multi; 126 struct marker_probe_closure *multi;
127 int i; 127 int i;
128 /* 128 /*
129 * Read mdata->ptype before mdata->multi.
130 */
131 smp_rmb();
132 multi = mdata->multi;
133 /*
129 * multi points to an array, therefore accessing the array 134 * multi points to an array, therefore accessing the array
130 * depends on reading multi. However, even in this case, 135 * depends on reading multi. However, even in this case,
131 * we must insure that the pointer is read _before_ the array 136 * we must insure that the pointer is read _before_ the array
@@ -133,7 +138,6 @@ void marker_probe_cb(const struct marker *mdata, void *call_private, ...)
133 * in the fast path, so put the explicit barrier here. 138 * in the fast path, so put the explicit barrier here.
134 */ 139 */
135 smp_read_barrier_depends(); 140 smp_read_barrier_depends();
136 multi = mdata->multi;
137 for (i = 0; multi[i].func; i++) { 141 for (i = 0; multi[i].func; i++) {
138 va_start(args, call_private); 142 va_start(args, call_private);
139 multi[i].func(multi[i].probe_private, call_private, 143 multi[i].func(multi[i].probe_private, call_private,
@@ -175,6 +179,11 @@ void marker_probe_cb_noarg(const struct marker *mdata, void *call_private, ...)
175 struct marker_probe_closure *multi; 179 struct marker_probe_closure *multi;
176 int i; 180 int i;
177 /* 181 /*
182 * Read mdata->ptype before mdata->multi.
183 */
184 smp_rmb();
185 multi = mdata->multi;
186 /*
178 * multi points to an array, therefore accessing the array 187 * multi points to an array, therefore accessing the array
179 * depends on reading multi. However, even in this case, 188 * depends on reading multi. However, even in this case,
180 * we must insure that the pointer is read _before_ the array 189 * we must insure that the pointer is read _before_ the array
@@ -182,7 +191,6 @@ void marker_probe_cb_noarg(const struct marker *mdata, void *call_private, ...)
182 * in the fast path, so put the explicit barrier here. 191 * in the fast path, so put the explicit barrier here.
183 */ 192 */
184 smp_read_barrier_depends(); 193 smp_read_barrier_depends();
185 multi = mdata->multi;
186 for (i = 0; multi[i].func; i++) 194 for (i = 0; multi[i].func; i++)
187 multi[i].func(multi[i].probe_private, call_private, 195 multi[i].func(multi[i].probe_private, call_private,
188 mdata->format, &args); 196 mdata->format, &args);
diff --git a/kernel/module.c b/kernel/module.c
index d8b5605132a..61d212120df 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -325,18 +325,6 @@ static unsigned long find_symbol(const char *name,
325 return -ENOENT; 325 return -ENOENT;
326} 326}
327 327
328/* lookup symbol in given range of kernel_symbols */
329static const struct kernel_symbol *lookup_symbol(const char *name,
330 const struct kernel_symbol *start,
331 const struct kernel_symbol *stop)
332{
333 const struct kernel_symbol *ks = start;
334 for (; ks < stop; ks++)
335 if (strcmp(ks->name, name) == 0)
336 return ks;
337 return NULL;
338}
339
340/* Search for module by name: must hold module_mutex. */ 328/* Search for module by name: must hold module_mutex. */
341static struct module *find_module(const char *name) 329static struct module *find_module(const char *name)
342{ 330{
@@ -690,7 +678,7 @@ static int try_stop_module(struct module *mod, int flags, int *forced)
690 if (flags & O_NONBLOCK) { 678 if (flags & O_NONBLOCK) {
691 struct stopref sref = { mod, flags, forced }; 679 struct stopref sref = { mod, flags, forced };
692 680
693 return stop_machine_run(__try_stop_module, &sref, NR_CPUS); 681 return stop_machine(__try_stop_module, &sref, NULL);
694 } else { 682 } else {
695 /* We don't need to stop the machine for this. */ 683 /* We don't need to stop the machine for this. */
696 mod->state = MODULE_STATE_GOING; 684 mod->state = MODULE_STATE_GOING;
@@ -1428,7 +1416,7 @@ static int __unlink_module(void *_mod)
1428static void free_module(struct module *mod) 1416static void free_module(struct module *mod)
1429{ 1417{
1430 /* Delete from various lists */ 1418 /* Delete from various lists */
1431 stop_machine_run(__unlink_module, mod, NR_CPUS); 1419 stop_machine(__unlink_module, mod, NULL);
1432 remove_notes_attrs(mod); 1420 remove_notes_attrs(mod);
1433 remove_sect_attrs(mod); 1421 remove_sect_attrs(mod);
1434 mod_kobject_remove(mod); 1422 mod_kobject_remove(mod);
@@ -1703,6 +1691,19 @@ static void setup_modinfo(struct module *mod, Elf_Shdr *sechdrs,
1703} 1691}
1704 1692
1705#ifdef CONFIG_KALLSYMS 1693#ifdef CONFIG_KALLSYMS
1694
1695/* lookup symbol in given range of kernel_symbols */
1696static const struct kernel_symbol *lookup_symbol(const char *name,
1697 const struct kernel_symbol *start,
1698 const struct kernel_symbol *stop)
1699{
1700 const struct kernel_symbol *ks = start;
1701 for (; ks < stop; ks++)
1702 if (strcmp(ks->name, name) == 0)
1703 return ks;
1704 return NULL;
1705}
1706
1706static int is_exported(const char *name, const struct module *mod) 1707static int is_exported(const char *name, const struct module *mod)
1707{ 1708{
1708 if (!mod && lookup_symbol(name, __start___ksymtab, __stop___ksymtab)) 1709 if (!mod && lookup_symbol(name, __start___ksymtab, __stop___ksymtab))
@@ -2196,7 +2197,7 @@ static struct module *load_module(void __user *umod,
2196 /* Now sew it into the lists so we can get lockdep and oops 2197 /* Now sew it into the lists so we can get lockdep and oops
2197 * info during argument parsing. Noone should access us, since 2198 * info during argument parsing. Noone should access us, since
2198 * strong_try_module_get() will fail. */ 2199 * strong_try_module_get() will fail. */
2199 stop_machine_run(__link_module, mod, NR_CPUS); 2200 stop_machine(__link_module, mod, NULL);
2200 2201
2201 /* Size of section 0 is 0, so this works well if no params */ 2202 /* Size of section 0 is 0, so this works well if no params */
2202 err = parse_args(mod->name, mod->args, 2203 err = parse_args(mod->name, mod->args,
@@ -2230,7 +2231,7 @@ static struct module *load_module(void __user *umod,
2230 return mod; 2231 return mod;
2231 2232
2232 unlink: 2233 unlink:
2233 stop_machine_run(__unlink_module, mod, NR_CPUS); 2234 stop_machine(__unlink_module, mod, NULL);
2234 module_arch_cleanup(mod); 2235 module_arch_cleanup(mod);
2235 cleanup: 2236 cleanup:
2236 kobject_del(&mod->mkobj.kobj); 2237 kobject_del(&mod->mkobj.kobj);
diff --git a/kernel/mutex.c b/kernel/mutex.c
index bcdc9ac8ef6..12c779dc65d 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -34,6 +34,7 @@
34/*** 34/***
35 * mutex_init - initialize the mutex 35 * mutex_init - initialize the mutex
36 * @lock: the mutex to be initialized 36 * @lock: the mutex to be initialized
37 * @key: the lock_class_key for the class; used by mutex lock debugging
37 * 38 *
38 * Initialize the mutex to unlocked state. 39 * Initialize the mutex to unlocked state.
39 * 40 *
diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
index 8cb75702638..da9c2dda6a4 100644
--- a/kernel/pm_qos_params.c
+++ b/kernel/pm_qos_params.c
@@ -24,7 +24,7 @@
24 * requirement that the application has is cleaned up when closes the file 24 * requirement that the application has is cleaned up when closes the file
25 * pointer or exits the pm_qos_object will get an opportunity to clean up. 25 * pointer or exits the pm_qos_object will get an opportunity to clean up.
26 * 26 *
27 * mark gross mgross@linux.intel.com 27 * Mark Gross <mgross@linux.intel.com>
28 */ 28 */
29 29
30#include <linux/pm_qos_params.h> 30#include <linux/pm_qos_params.h>
@@ -211,8 +211,8 @@ EXPORT_SYMBOL_GPL(pm_qos_requirement);
211 * @value: defines the qos request 211 * @value: defines the qos request
212 * 212 *
213 * This function inserts a new entry in the pm_qos_class list of requested qos 213 * This function inserts a new entry in the pm_qos_class list of requested qos
214 * performance charactoistics. It recomputes the agregate QoS expectations for 214 * performance characteristics. It recomputes the aggregate QoS expectations
215 * the pm_qos_class of parrameters. 215 * for the pm_qos_class of parameters.
216 */ 216 */
217int pm_qos_add_requirement(int pm_qos_class, char *name, s32 value) 217int pm_qos_add_requirement(int pm_qos_class, char *name, s32 value)
218{ 218{
@@ -250,10 +250,10 @@ EXPORT_SYMBOL_GPL(pm_qos_add_requirement);
250 * @name: identifies the request 250 * @name: identifies the request
251 * @value: defines the qos request 251 * @value: defines the qos request
252 * 252 *
253 * Updates an existing qos requierement for the pm_qos_class of parameters along 253 * Updates an existing qos requirement for the pm_qos_class of parameters along
254 * with updating the target pm_qos_class value. 254 * with updating the target pm_qos_class value.
255 * 255 *
256 * If the named request isn't in the lest then no change is made. 256 * If the named request isn't in the list then no change is made.
257 */ 257 */
258int pm_qos_update_requirement(int pm_qos_class, char *name, s32 new_value) 258int pm_qos_update_requirement(int pm_qos_class, char *name, s32 new_value)
259{ 259{
@@ -287,7 +287,7 @@ EXPORT_SYMBOL_GPL(pm_qos_update_requirement);
287 * @pm_qos_class: identifies which list of qos request to us 287 * @pm_qos_class: identifies which list of qos request to us
288 * @name: identifies the request 288 * @name: identifies the request
289 * 289 *
290 * Will remove named qos request from pm_qos_class list of parrameters and 290 * Will remove named qos request from pm_qos_class list of parameters and
291 * recompute the current target value for the pm_qos_class. 291 * recompute the current target value for the pm_qos_class.
292 */ 292 */
293void pm_qos_remove_requirement(int pm_qos_class, char *name) 293void pm_qos_remove_requirement(int pm_qos_class, char *name)
@@ -319,7 +319,7 @@ EXPORT_SYMBOL_GPL(pm_qos_remove_requirement);
319 * @notifier: notifier block managed by caller. 319 * @notifier: notifier block managed by caller.
320 * 320 *
321 * will register the notifier into a notification chain that gets called 321 * will register the notifier into a notification chain that gets called
322 * uppon changes to the pm_qos_class target value. 322 * upon changes to the pm_qos_class target value.
323 */ 323 */
324 int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier) 324 int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier)
325{ 325{
@@ -338,7 +338,7 @@ EXPORT_SYMBOL_GPL(pm_qos_add_notifier);
338 * @notifier: notifier block to be removed. 338 * @notifier: notifier block to be removed.
339 * 339 *
340 * will remove the notifier from the notification chain that gets called 340 * will remove the notifier from the notification chain that gets called
341 * uppon changes to the pm_qos_class target value. 341 * upon changes to the pm_qos_class target value.
342 */ 342 */
343int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier) 343int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier)
344{ 344{
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 95bff23ecda..0b7476f5d2a 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -635,6 +635,13 @@ static void __init test_wakealarm(struct rtc_device *rtc, suspend_state_t state)
635 } 635 }
636 if (status < 0) 636 if (status < 0)
637 printk(err_suspend, status); 637 printk(err_suspend, status);
638
639 /* Some platforms can't detect that the alarm triggered the
640 * wakeup, or (accordingly) disable it after it afterwards.
641 * It's supposed to give oneshot behavior; cope.
642 */
643 alm.enabled = false;
644 rtc_set_alarm(rtc, &alm);
638} 645}
639 646
640static int __init has_wakealarm(struct device *dev, void *name_ptr) 647static int __init has_wakealarm(struct device *dev, void *name_ptr)
diff --git a/kernel/power/power.h b/kernel/power/power.h
index 700f44ec840..acc0c101dbd 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -53,8 +53,6 @@ extern int hibernation_platform_enter(void);
53 53
54extern int pfn_is_nosave(unsigned long); 54extern int pfn_is_nosave(unsigned long);
55 55
56extern struct mutex pm_mutex;
57
58#define power_attr(_name) \ 56#define power_attr(_name) \
59static struct kobj_attribute _name##_attr = { \ 57static struct kobj_attribute _name##_attr = { \
60 .attr = { \ 58 .attr = { \
diff --git a/kernel/printk.c b/kernel/printk.c
index a7f7559c5f6..b51b1567bb5 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -1309,14 +1309,14 @@ void tty_write_message(struct tty_struct *tty, char *msg)
1309 1309
1310#if defined CONFIG_PRINTK 1310#if defined CONFIG_PRINTK
1311 1311
1312DEFINE_RATELIMIT_STATE(printk_ratelimit_state, 5 * HZ, 10);
1313/* 1312/*
1314 * printk rate limiting, lifted from the networking subsystem. 1313 * printk rate limiting, lifted from the networking subsystem.
1315 * 1314 *
1316 * This enforces a rate limit: not more than one kernel message 1315 * This enforces a rate limit: not more than 10 kernel messages
1317 * every printk_ratelimit_jiffies to make a denial-of-service 1316 * every 5s to make a denial-of-service attack impossible.
1318 * attack impossible.
1319 */ 1317 */
1318DEFINE_RATELIMIT_STATE(printk_ratelimit_state, 5 * HZ, 10);
1319
1320int printk_ratelimit(void) 1320int printk_ratelimit(void)
1321{ 1321{
1322 return __ratelimit(&printk_ratelimit_state); 1322 return __ratelimit(&printk_ratelimit_state);
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 8392a9da645..082b3fcb32a 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -107,7 +107,7 @@ int ptrace_check_attach(struct task_struct *child, int kill)
107 read_unlock(&tasklist_lock); 107 read_unlock(&tasklist_lock);
108 108
109 if (!ret && !kill) 109 if (!ret && !kill)
110 wait_task_inactive(child); 110 ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH;
111 111
112 /* All systems go.. */ 112 /* All systems go.. */
113 return ret; 113 return ret;
diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c
index 6f8696c502f..aad93cdc9f6 100644
--- a/kernel/rcuclassic.c
+++ b/kernel/rcuclassic.c
@@ -91,8 +91,8 @@ static void force_quiescent_state(struct rcu_data *rdp,
91 * rdp->cpu is the current cpu. 91 * rdp->cpu is the current cpu.
92 * 92 *
93 * cpu_online_map is updated by the _cpu_down() 93 * cpu_online_map is updated by the _cpu_down()
94 * using stop_machine_run(). Since we're in irqs disabled 94 * using __stop_machine(). Since we're in irqs disabled
95 * section, stop_machine_run() is not exectuting, hence 95 * section, __stop_machine() is not exectuting, hence
96 * the cpu_online_map is stable. 96 * the cpu_online_map is stable.
97 * 97 *
98 * However, a cpu might have been offlined _just_ before 98 * However, a cpu might have been offlined _just_ before
diff --git a/kernel/relay.c b/kernel/relay.c
index 7de644cdec4..8d13a7855c0 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -407,6 +407,35 @@ void relay_reset(struct rchan *chan)
407} 407}
408EXPORT_SYMBOL_GPL(relay_reset); 408EXPORT_SYMBOL_GPL(relay_reset);
409 409
410static inline void relay_set_buf_dentry(struct rchan_buf *buf,
411 struct dentry *dentry)
412{
413 buf->dentry = dentry;
414 buf->dentry->d_inode->i_size = buf->early_bytes;
415}
416
417static struct dentry *relay_create_buf_file(struct rchan *chan,
418 struct rchan_buf *buf,
419 unsigned int cpu)
420{
421 struct dentry *dentry;
422 char *tmpname;
423
424 tmpname = kzalloc(NAME_MAX + 1, GFP_KERNEL);
425 if (!tmpname)
426 return NULL;
427 snprintf(tmpname, NAME_MAX, "%s%d", chan->base_filename, cpu);
428
429 /* Create file in fs */
430 dentry = chan->cb->create_buf_file(tmpname, chan->parent,
431 S_IRUSR, buf,
432 &chan->is_global);
433
434 kfree(tmpname);
435
436 return dentry;
437}
438
410/* 439/*
411 * relay_open_buf - create a new relay channel buffer 440 * relay_open_buf - create a new relay channel buffer
412 * 441 *
@@ -416,45 +445,34 @@ static struct rchan_buf *relay_open_buf(struct rchan *chan, unsigned int cpu)
416{ 445{
417 struct rchan_buf *buf = NULL; 446 struct rchan_buf *buf = NULL;
418 struct dentry *dentry; 447 struct dentry *dentry;
419 char *tmpname;
420 448
421 if (chan->is_global) 449 if (chan->is_global)
422 return chan->buf[0]; 450 return chan->buf[0];
423 451
424 tmpname = kzalloc(NAME_MAX + 1, GFP_KERNEL);
425 if (!tmpname)
426 goto end;
427 snprintf(tmpname, NAME_MAX, "%s%d", chan->base_filename, cpu);
428
429 buf = relay_create_buf(chan); 452 buf = relay_create_buf(chan);
430 if (!buf) 453 if (!buf)
431 goto free_name; 454 return NULL;
455
456 if (chan->has_base_filename) {
457 dentry = relay_create_buf_file(chan, buf, cpu);
458 if (!dentry)
459 goto free_buf;
460 relay_set_buf_dentry(buf, dentry);
461 }
432 462
433 buf->cpu = cpu; 463 buf->cpu = cpu;
434 __relay_reset(buf, 1); 464 __relay_reset(buf, 1);
435 465
436 /* Create file in fs */
437 dentry = chan->cb->create_buf_file(tmpname, chan->parent, S_IRUSR,
438 buf, &chan->is_global);
439 if (!dentry)
440 goto free_buf;
441
442 buf->dentry = dentry;
443
444 if(chan->is_global) { 466 if(chan->is_global) {
445 chan->buf[0] = buf; 467 chan->buf[0] = buf;
446 buf->cpu = 0; 468 buf->cpu = 0;
447 } 469 }
448 470
449 goto free_name; 471 return buf;
450 472
451free_buf: 473free_buf:
452 relay_destroy_buf(buf); 474 relay_destroy_buf(buf);
453 buf = NULL; 475 return NULL;
454free_name:
455 kfree(tmpname);
456end:
457 return buf;
458} 476}
459 477
460/** 478/**
@@ -537,8 +555,8 @@ static int __cpuinit relay_hotcpu_callback(struct notifier_block *nb,
537 555
538/** 556/**
539 * relay_open - create a new relay channel 557 * relay_open - create a new relay channel
540 * @base_filename: base name of files to create 558 * @base_filename: base name of files to create, %NULL for buffering only
541 * @parent: dentry of parent directory, %NULL for root directory 559 * @parent: dentry of parent directory, %NULL for root directory or buffer
542 * @subbuf_size: size of sub-buffers 560 * @subbuf_size: size of sub-buffers
543 * @n_subbufs: number of sub-buffers 561 * @n_subbufs: number of sub-buffers
544 * @cb: client callback functions 562 * @cb: client callback functions
@@ -560,8 +578,6 @@ struct rchan *relay_open(const char *base_filename,
560{ 578{
561 unsigned int i; 579 unsigned int i;
562 struct rchan *chan; 580 struct rchan *chan;
563 if (!base_filename)
564 return NULL;
565 581
566 if (!(subbuf_size && n_subbufs)) 582 if (!(subbuf_size && n_subbufs))
567 return NULL; 583 return NULL;
@@ -576,7 +592,10 @@ struct rchan *relay_open(const char *base_filename,
576 chan->alloc_size = FIX_SIZE(subbuf_size * n_subbufs); 592 chan->alloc_size = FIX_SIZE(subbuf_size * n_subbufs);
577 chan->parent = parent; 593 chan->parent = parent;
578 chan->private_data = private_data; 594 chan->private_data = private_data;
579 strlcpy(chan->base_filename, base_filename, NAME_MAX); 595 if (base_filename) {
596 chan->has_base_filename = 1;
597 strlcpy(chan->base_filename, base_filename, NAME_MAX);
598 }
580 setup_callbacks(chan, cb); 599 setup_callbacks(chan, cb);
581 kref_init(&chan->kref); 600 kref_init(&chan->kref);
582 601
@@ -604,6 +623,94 @@ free_bufs:
604} 623}
605EXPORT_SYMBOL_GPL(relay_open); 624EXPORT_SYMBOL_GPL(relay_open);
606 625
626struct rchan_percpu_buf_dispatcher {
627 struct rchan_buf *buf;
628 struct dentry *dentry;
629};
630
631/* Called in atomic context. */
632static void __relay_set_buf_dentry(void *info)
633{
634 struct rchan_percpu_buf_dispatcher *p = info;
635
636 relay_set_buf_dentry(p->buf, p->dentry);
637}
638
639/**
640 * relay_late_setup_files - triggers file creation
641 * @chan: channel to operate on
642 * @base_filename: base name of files to create
643 * @parent: dentry of parent directory, %NULL for root directory
644 *
645 * Returns 0 if successful, non-zero otherwise.
646 *
647 * Use to setup files for a previously buffer-only channel.
648 * Useful to do early tracing in kernel, before VFS is up, for example.
649 */
650int relay_late_setup_files(struct rchan *chan,
651 const char *base_filename,
652 struct dentry *parent)
653{
654 int err = 0;
655 unsigned int i, curr_cpu;
656 unsigned long flags;
657 struct dentry *dentry;
658 struct rchan_percpu_buf_dispatcher disp;
659
660 if (!chan || !base_filename)
661 return -EINVAL;
662
663 strlcpy(chan->base_filename, base_filename, NAME_MAX);
664
665 mutex_lock(&relay_channels_mutex);
666 /* Is chan already set up? */
667 if (unlikely(chan->has_base_filename))
668 return -EEXIST;
669 chan->has_base_filename = 1;
670 chan->parent = parent;
671 curr_cpu = get_cpu();
672 /*
673 * The CPU hotplug notifier ran before us and created buffers with
674 * no files associated. So it's safe to call relay_setup_buf_file()
675 * on all currently online CPUs.
676 */
677 for_each_online_cpu(i) {
678 if (unlikely(!chan->buf[i])) {
679 printk(KERN_ERR "relay_late_setup_files: CPU %u "
680 "has no buffer, it must have!\n", i);
681 BUG();
682 err = -EINVAL;
683 break;
684 }
685
686 dentry = relay_create_buf_file(chan, chan->buf[i], i);
687 if (unlikely(!dentry)) {
688 err = -EINVAL;
689 break;
690 }
691
692 if (curr_cpu == i) {
693 local_irq_save(flags);
694 relay_set_buf_dentry(chan->buf[i], dentry);
695 local_irq_restore(flags);
696 } else {
697 disp.buf = chan->buf[i];
698 disp.dentry = dentry;
699 smp_mb();
700 /* relay_channels_mutex must be held, so wait. */
701 err = smp_call_function_single(i,
702 __relay_set_buf_dentry,
703 &disp, 1);
704 }
705 if (unlikely(err))
706 break;
707 }
708 put_cpu();
709 mutex_unlock(&relay_channels_mutex);
710
711 return err;
712}
713
607/** 714/**
608 * relay_switch_subbuf - switch to a new sub-buffer 715 * relay_switch_subbuf - switch to a new sub-buffer
609 * @buf: channel buffer 716 * @buf: channel buffer
@@ -627,8 +734,13 @@ size_t relay_switch_subbuf(struct rchan_buf *buf, size_t length)
627 old_subbuf = buf->subbufs_produced % buf->chan->n_subbufs; 734 old_subbuf = buf->subbufs_produced % buf->chan->n_subbufs;
628 buf->padding[old_subbuf] = buf->prev_padding; 735 buf->padding[old_subbuf] = buf->prev_padding;
629 buf->subbufs_produced++; 736 buf->subbufs_produced++;
630 buf->dentry->d_inode->i_size += buf->chan->subbuf_size - 737 if (buf->dentry)
631 buf->padding[old_subbuf]; 738 buf->dentry->d_inode->i_size +=
739 buf->chan->subbuf_size -
740 buf->padding[old_subbuf];
741 else
742 buf->early_bytes += buf->chan->subbuf_size -
743 buf->padding[old_subbuf];
632 smp_mb(); 744 smp_mb();
633 if (waitqueue_active(&buf->read_wait)) 745 if (waitqueue_active(&buf->read_wait))
634 /* 746 /*
@@ -832,6 +944,10 @@ static void relay_file_read_consume(struct rchan_buf *buf,
832 size_t n_subbufs = buf->chan->n_subbufs; 944 size_t n_subbufs = buf->chan->n_subbufs;
833 size_t read_subbuf; 945 size_t read_subbuf;
834 946
947 if (buf->subbufs_produced == buf->subbufs_consumed &&
948 buf->offset == buf->bytes_consumed)
949 return;
950
835 if (buf->bytes_consumed + bytes_consumed > subbuf_size) { 951 if (buf->bytes_consumed + bytes_consumed > subbuf_size) {
836 relay_subbufs_consumed(buf->chan, buf->cpu, 1); 952 relay_subbufs_consumed(buf->chan, buf->cpu, 1);
837 buf->bytes_consumed = 0; 953 buf->bytes_consumed = 0;
@@ -863,6 +979,8 @@ static int relay_file_read_avail(struct rchan_buf *buf, size_t read_pos)
863 979
864 relay_file_read_consume(buf, read_pos, 0); 980 relay_file_read_consume(buf, read_pos, 0);
865 981
982 consumed = buf->subbufs_consumed;
983
866 if (unlikely(buf->offset > subbuf_size)) { 984 if (unlikely(buf->offset > subbuf_size)) {
867 if (produced == consumed) 985 if (produced == consumed)
868 return 0; 986 return 0;
@@ -881,8 +999,12 @@ static int relay_file_read_avail(struct rchan_buf *buf, size_t read_pos)
881 if (consumed > produced) 999 if (consumed > produced)
882 produced += n_subbufs * subbuf_size; 1000 produced += n_subbufs * subbuf_size;
883 1001
884 if (consumed == produced) 1002 if (consumed == produced) {
1003 if (buf->offset == subbuf_size &&
1004 buf->subbufs_produced > buf->subbufs_consumed)
1005 return 1;
885 return 0; 1006 return 0;
1007 }
886 1008
887 return 1; 1009 return 1;
888} 1010}
@@ -1237,4 +1359,4 @@ static __init int relay_init(void)
1237 return 0; 1359 return 0;
1238} 1360}
1239 1361
1240module_init(relay_init); 1362early_initcall(relay_init);
diff --git a/kernel/resource.c b/kernel/resource.c
index 74af2d7cb5a..f5b518eabef 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -490,7 +490,7 @@ resource_size_t resource_alignment(struct resource *res)
490{ 490{
491 switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) { 491 switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) {
492 case IORESOURCE_SIZEALIGN: 492 case IORESOURCE_SIZEALIGN:
493 return res->end - res->start + 1; 493 return resource_size(res);
494 case IORESOURCE_STARTALIGN: 494 case IORESOURCE_STARTALIGN:
495 return res->start; 495 return res->start;
496 default: 496 default:
diff --git a/kernel/sched.c b/kernel/sched.c
index 0047bd9b96a..04160d277e7 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1867,16 +1867,24 @@ migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
1867/* 1867/*
1868 * wait_task_inactive - wait for a thread to unschedule. 1868 * wait_task_inactive - wait for a thread to unschedule.
1869 * 1869 *
1870 * If @match_state is nonzero, it's the @p->state value just checked and
1871 * not expected to change. If it changes, i.e. @p might have woken up,
1872 * then return zero. When we succeed in waiting for @p to be off its CPU,
1873 * we return a positive number (its total switch count). If a second call
1874 * a short while later returns the same number, the caller can be sure that
1875 * @p has remained unscheduled the whole time.
1876 *
1870 * The caller must ensure that the task *will* unschedule sometime soon, 1877 * The caller must ensure that the task *will* unschedule sometime soon,
1871 * else this function might spin for a *long* time. This function can't 1878 * else this function might spin for a *long* time. This function can't
1872 * be called with interrupts off, or it may introduce deadlock with 1879 * be called with interrupts off, or it may introduce deadlock with
1873 * smp_call_function() if an IPI is sent by the same process we are 1880 * smp_call_function() if an IPI is sent by the same process we are
1874 * waiting to become inactive. 1881 * waiting to become inactive.
1875 */ 1882 */
1876void wait_task_inactive(struct task_struct *p) 1883unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1877{ 1884{
1878 unsigned long flags; 1885 unsigned long flags;
1879 int running, on_rq; 1886 int running, on_rq;
1887 unsigned long ncsw;
1880 struct rq *rq; 1888 struct rq *rq;
1881 1889
1882 for (;;) { 1890 for (;;) {
@@ -1899,8 +1907,11 @@ void wait_task_inactive(struct task_struct *p)
1899 * return false if the runqueue has changed and p 1907 * return false if the runqueue has changed and p
1900 * is actually now running somewhere else! 1908 * is actually now running somewhere else!
1901 */ 1909 */
1902 while (task_running(rq, p)) 1910 while (task_running(rq, p)) {
1911 if (match_state && unlikely(p->state != match_state))
1912 return 0;
1903 cpu_relax(); 1913 cpu_relax();
1914 }
1904 1915
1905 /* 1916 /*
1906 * Ok, time to look more closely! We need the rq 1917 * Ok, time to look more closely! We need the rq
@@ -1910,9 +1921,21 @@ void wait_task_inactive(struct task_struct *p)
1910 rq = task_rq_lock(p, &flags); 1921 rq = task_rq_lock(p, &flags);
1911 running = task_running(rq, p); 1922 running = task_running(rq, p);
1912 on_rq = p->se.on_rq; 1923 on_rq = p->se.on_rq;
1924 ncsw = 0;
1925 if (!match_state || p->state == match_state) {
1926 ncsw = p->nivcsw + p->nvcsw;
1927 if (unlikely(!ncsw))
1928 ncsw = 1;
1929 }
1913 task_rq_unlock(rq, &flags); 1930 task_rq_unlock(rq, &flags);
1914 1931
1915 /* 1932 /*
1933 * If it changed from the expected state, bail out now.
1934 */
1935 if (unlikely(!ncsw))
1936 break;
1937
1938 /*
1916 * Was it really running after all now that we 1939 * Was it really running after all now that we
1917 * checked with the proper locks actually held? 1940 * checked with the proper locks actually held?
1918 * 1941 *
@@ -1944,6 +1967,8 @@ void wait_task_inactive(struct task_struct *p)
1944 */ 1967 */
1945 break; 1968 break;
1946 } 1969 }
1970
1971 return ncsw;
1947} 1972}
1948 1973
1949/*** 1974/***
@@ -4979,19 +5004,21 @@ recheck:
4979 return -EPERM; 5004 return -EPERM;
4980 } 5005 }
4981 5006
5007 if (user) {
4982#ifdef CONFIG_RT_GROUP_SCHED 5008#ifdef CONFIG_RT_GROUP_SCHED
4983 /* 5009 /*
4984 * Do not allow realtime tasks into groups that have no runtime 5010 * Do not allow realtime tasks into groups that have no runtime
4985 * assigned. 5011 * assigned.
4986 */ 5012 */
4987 if (user 5013 if (rt_policy(policy) && task_group(p)->rt_bandwidth.rt_runtime == 0)
4988 && rt_policy(policy) && task_group(p)->rt_bandwidth.rt_runtime == 0) 5014 return -EPERM;
4989 return -EPERM;
4990#endif 5015#endif
4991 5016
4992 retval = security_task_setscheduler(p, policy, param); 5017 retval = security_task_setscheduler(p, policy, param);
4993 if (retval) 5018 if (retval)
4994 return retval; 5019 return retval;
5020 }
5021
4995 /* 5022 /*
4996 * make sure no PI-waiters arrive (or leave) while we are 5023 * make sure no PI-waiters arrive (or leave) while we are
4997 * changing the priority of the task: 5024 * changing the priority of the task:
@@ -6389,7 +6416,7 @@ static struct notifier_block __cpuinitdata migration_notifier = {
6389 .priority = 10 6416 .priority = 10
6390}; 6417};
6391 6418
6392void __init migration_init(void) 6419static int __init migration_init(void)
6393{ 6420{
6394 void *cpu = (void *)(long)smp_processor_id(); 6421 void *cpu = (void *)(long)smp_processor_id();
6395 int err; 6422 int err;
@@ -6399,7 +6426,10 @@ void __init migration_init(void)
6399 BUG_ON(err == NOTIFY_BAD); 6426 BUG_ON(err == NOTIFY_BAD);
6400 migration_call(&migration_notifier, CPU_ONLINE, cpu); 6427 migration_call(&migration_notifier, CPU_ONLINE, cpu);
6401 register_cpu_notifier(&migration_notifier); 6428 register_cpu_notifier(&migration_notifier);
6429
6430 return err;
6402} 6431}
6432early_initcall(migration_init);
6403#endif 6433#endif
6404 6434
6405#ifdef CONFIG_SMP 6435#ifdef CONFIG_SMP
@@ -7643,34 +7673,34 @@ static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt)
7643} 7673}
7644 7674
7645#ifdef CONFIG_SCHED_MC 7675#ifdef CONFIG_SCHED_MC
7646static ssize_t sched_mc_power_savings_show(struct sys_device *dev, 7676static ssize_t sched_mc_power_savings_show(struct sysdev_class *class,
7647 struct sysdev_attribute *attr, char *page) 7677 char *page)
7648{ 7678{
7649 return sprintf(page, "%u\n", sched_mc_power_savings); 7679 return sprintf(page, "%u\n", sched_mc_power_savings);
7650} 7680}
7651static ssize_t sched_mc_power_savings_store(struct sys_device *dev, 7681static ssize_t sched_mc_power_savings_store(struct sysdev_class *class,
7652 struct sysdev_attribute *attr,
7653 const char *buf, size_t count) 7682 const char *buf, size_t count)
7654{ 7683{
7655 return sched_power_savings_store(buf, count, 0); 7684 return sched_power_savings_store(buf, count, 0);
7656} 7685}
7657static SYSDEV_ATTR(sched_mc_power_savings, 0644, sched_mc_power_savings_show, 7686static SYSDEV_CLASS_ATTR(sched_mc_power_savings, 0644,
7658 sched_mc_power_savings_store); 7687 sched_mc_power_savings_show,
7688 sched_mc_power_savings_store);
7659#endif 7689#endif
7660 7690
7661#ifdef CONFIG_SCHED_SMT 7691#ifdef CONFIG_SCHED_SMT
7662static ssize_t sched_smt_power_savings_show(struct sys_device *dev, 7692static ssize_t sched_smt_power_savings_show(struct sysdev_class *dev,
7663 struct sysdev_attribute *attr, char *page) 7693 char *page)
7664{ 7694{
7665 return sprintf(page, "%u\n", sched_smt_power_savings); 7695 return sprintf(page, "%u\n", sched_smt_power_savings);
7666} 7696}
7667static ssize_t sched_smt_power_savings_store(struct sys_device *dev, 7697static ssize_t sched_smt_power_savings_store(struct sysdev_class *dev,
7668 struct sysdev_attribute *attr,
7669 const char *buf, size_t count) 7698 const char *buf, size_t count)
7670{ 7699{
7671 return sched_power_savings_store(buf, count, 1); 7700 return sched_power_savings_store(buf, count, 1);
7672} 7701}
7673static SYSDEV_ATTR(sched_smt_power_savings, 0644, sched_smt_power_savings_show, 7702static SYSDEV_CLASS_ATTR(sched_smt_power_savings, 0644,
7703 sched_smt_power_savings_show,
7674 sched_smt_power_savings_store); 7704 sched_smt_power_savings_store);
7675#endif 7705#endif
7676 7706
diff --git a/kernel/semaphore.c b/kernel/semaphore.c
index aaaeae8244e..94a62c0d4ad 100644
--- a/kernel/semaphore.c
+++ b/kernel/semaphore.c
@@ -212,9 +212,7 @@ static inline int __sched __down_common(struct semaphore *sem, long state,
212 waiter.up = 0; 212 waiter.up = 0;
213 213
214 for (;;) { 214 for (;;) {
215 if (state == TASK_INTERRUPTIBLE && signal_pending(task)) 215 if (signal_pending_state(state, task))
216 goto interrupted;
217 if (state == TASK_KILLABLE && fatal_signal_pending(task))
218 goto interrupted; 216 goto interrupted;
219 if (timeout <= 0) 217 if (timeout <= 0)
220 goto timed_out; 218 goto timed_out;
diff --git a/kernel/signal.c b/kernel/signal.c
index 82c3545596c..954f77d7e3b 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -22,6 +22,7 @@
22#include <linux/ptrace.h> 22#include <linux/ptrace.h>
23#include <linux/signal.h> 23#include <linux/signal.h>
24#include <linux/signalfd.h> 24#include <linux/signalfd.h>
25#include <linux/tracehook.h>
25#include <linux/capability.h> 26#include <linux/capability.h>
26#include <linux/freezer.h> 27#include <linux/freezer.h>
27#include <linux/pid_namespace.h> 28#include <linux/pid_namespace.h>
@@ -39,24 +40,21 @@
39 40
40static struct kmem_cache *sigqueue_cachep; 41static struct kmem_cache *sigqueue_cachep;
41 42
42static int __sig_ignored(struct task_struct *t, int sig) 43static void __user *sig_handler(struct task_struct *t, int sig)
43{ 44{
44 void __user *handler; 45 return t->sighand->action[sig - 1].sa.sa_handler;
46}
45 47
48static int sig_handler_ignored(void __user *handler, int sig)
49{
46 /* Is it explicitly or implicitly ignored? */ 50 /* Is it explicitly or implicitly ignored? */
47
48 handler = t->sighand->action[sig - 1].sa.sa_handler;
49 return handler == SIG_IGN || 51 return handler == SIG_IGN ||
50 (handler == SIG_DFL && sig_kernel_ignore(sig)); 52 (handler == SIG_DFL && sig_kernel_ignore(sig));
51} 53}
52 54
53static int sig_ignored(struct task_struct *t, int sig) 55static int sig_ignored(struct task_struct *t, int sig)
54{ 56{
55 /* 57 void __user *handler;
56 * Tracers always want to know about signals..
57 */
58 if (t->ptrace & PT_PTRACED)
59 return 0;
60 58
61 /* 59 /*
62 * Blocked signals are never ignored, since the 60 * Blocked signals are never ignored, since the
@@ -66,7 +64,14 @@ static int sig_ignored(struct task_struct *t, int sig)
66 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig)) 64 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
67 return 0; 65 return 0;
68 66
69 return __sig_ignored(t, sig); 67 handler = sig_handler(t, sig);
68 if (!sig_handler_ignored(handler, sig))
69 return 0;
70
71 /*
72 * Tracers may want to know about even ignored signals.
73 */
74 return !tracehook_consider_ignored_signal(t, sig, handler);
70} 75}
71 76
72/* 77/*
@@ -129,7 +134,9 @@ void recalc_sigpending_and_wake(struct task_struct *t)
129 134
130void recalc_sigpending(void) 135void recalc_sigpending(void)
131{ 136{
132 if (!recalc_sigpending_tsk(current) && !freezing(current)) 137 if (unlikely(tracehook_force_sigpending()))
138 set_thread_flag(TIF_SIGPENDING);
139 else if (!recalc_sigpending_tsk(current) && !freezing(current))
133 clear_thread_flag(TIF_SIGPENDING); 140 clear_thread_flag(TIF_SIGPENDING);
134 141
135} 142}
@@ -295,12 +302,12 @@ flush_signal_handlers(struct task_struct *t, int force_default)
295 302
296int unhandled_signal(struct task_struct *tsk, int sig) 303int unhandled_signal(struct task_struct *tsk, int sig)
297{ 304{
305 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
298 if (is_global_init(tsk)) 306 if (is_global_init(tsk))
299 return 1; 307 return 1;
300 if (tsk->ptrace & PT_PTRACED) 308 if (handler != SIG_IGN && handler != SIG_DFL)
301 return 0; 309 return 0;
302 return (tsk->sighand->action[sig-1].sa.sa_handler == SIG_IGN) || 310 return !tracehook_consider_fatal_signal(tsk, sig, handler);
303 (tsk->sighand->action[sig-1].sa.sa_handler == SIG_DFL);
304} 311}
305 312
306 313
@@ -591,9 +598,6 @@ static int check_kill_permission(int sig, struct siginfo *info,
591 return security_task_kill(t, info, sig, 0); 598 return security_task_kill(t, info, sig, 0);
592} 599}
593 600
594/* forward decl */
595static void do_notify_parent_cldstop(struct task_struct *tsk, int why);
596
597/* 601/*
598 * Handle magic process-wide effects of stop/continue signals. Unlike 602 * Handle magic process-wide effects of stop/continue signals. Unlike
599 * the signal actions, these happen immediately at signal-generation 603 * the signal actions, these happen immediately at signal-generation
@@ -756,7 +760,8 @@ static void complete_signal(int sig, struct task_struct *p, int group)
756 if (sig_fatal(p, sig) && 760 if (sig_fatal(p, sig) &&
757 !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) && 761 !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
758 !sigismember(&t->real_blocked, sig) && 762 !sigismember(&t->real_blocked, sig) &&
759 (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) { 763 (sig == SIGKILL ||
764 !tracehook_consider_fatal_signal(t, sig, SIG_DFL))) {
760 /* 765 /*
761 * This signal will be fatal to the whole group. 766 * This signal will be fatal to the whole group.
762 */ 767 */
@@ -1323,9 +1328,11 @@ static inline void __wake_up_parent(struct task_struct *p,
1323/* 1328/*
1324 * Let a parent know about the death of a child. 1329 * Let a parent know about the death of a child.
1325 * For a stopped/continued status change, use do_notify_parent_cldstop instead. 1330 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1331 *
1332 * Returns -1 if our parent ignored us and so we've switched to
1333 * self-reaping, or else @sig.
1326 */ 1334 */
1327 1335int do_notify_parent(struct task_struct *tsk, int sig)
1328void do_notify_parent(struct task_struct *tsk, int sig)
1329{ 1336{
1330 struct siginfo info; 1337 struct siginfo info;
1331 unsigned long flags; 1338 unsigned long flags;
@@ -1396,12 +1403,14 @@ void do_notify_parent(struct task_struct *tsk, int sig)
1396 */ 1403 */
1397 tsk->exit_signal = -1; 1404 tsk->exit_signal = -1;
1398 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) 1405 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1399 sig = 0; 1406 sig = -1;
1400 } 1407 }
1401 if (valid_signal(sig) && sig > 0) 1408 if (valid_signal(sig) && sig > 0)
1402 __group_send_sig_info(sig, &info, tsk->parent); 1409 __group_send_sig_info(sig, &info, tsk->parent);
1403 __wake_up_parent(tsk, tsk->parent); 1410 __wake_up_parent(tsk, tsk->parent);
1404 spin_unlock_irqrestore(&psig->siglock, flags); 1411 spin_unlock_irqrestore(&psig->siglock, flags);
1412
1413 return sig;
1405} 1414}
1406 1415
1407static void do_notify_parent_cldstop(struct task_struct *tsk, int why) 1416static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
@@ -1599,7 +1608,7 @@ finish_stop(int stop_count)
1599 * a group stop in progress and we are the last to stop, 1608 * a group stop in progress and we are the last to stop,
1600 * report to the parent. When ptraced, every thread reports itself. 1609 * report to the parent. When ptraced, every thread reports itself.
1601 */ 1610 */
1602 if (stop_count == 0 || (current->ptrace & PT_PTRACED)) { 1611 if (tracehook_notify_jctl(stop_count == 0, CLD_STOPPED)) {
1603 read_lock(&tasklist_lock); 1612 read_lock(&tasklist_lock);
1604 do_notify_parent_cldstop(current, CLD_STOPPED); 1613 do_notify_parent_cldstop(current, CLD_STOPPED);
1605 read_unlock(&tasklist_lock); 1614 read_unlock(&tasklist_lock);
@@ -1735,6 +1744,9 @@ relock:
1735 signal->flags &= ~SIGNAL_CLD_MASK; 1744 signal->flags &= ~SIGNAL_CLD_MASK;
1736 spin_unlock_irq(&sighand->siglock); 1745 spin_unlock_irq(&sighand->siglock);
1737 1746
1747 if (unlikely(!tracehook_notify_jctl(1, why)))
1748 goto relock;
1749
1738 read_lock(&tasklist_lock); 1750 read_lock(&tasklist_lock);
1739 do_notify_parent_cldstop(current->group_leader, why); 1751 do_notify_parent_cldstop(current->group_leader, why);
1740 read_unlock(&tasklist_lock); 1752 read_unlock(&tasklist_lock);
@@ -1748,17 +1760,33 @@ relock:
1748 do_signal_stop(0)) 1760 do_signal_stop(0))
1749 goto relock; 1761 goto relock;
1750 1762
1751 signr = dequeue_signal(current, &current->blocked, info); 1763 /*
1752 if (!signr) 1764 * Tracing can induce an artifical signal and choose sigaction.
1753 break; /* will return 0 */ 1765 * The return value in @signr determines the default action,
1766 * but @info->si_signo is the signal number we will report.
1767 */
1768 signr = tracehook_get_signal(current, regs, info, return_ka);
1769 if (unlikely(signr < 0))
1770 goto relock;
1771 if (unlikely(signr != 0))
1772 ka = return_ka;
1773 else {
1774 signr = dequeue_signal(current, &current->blocked,
1775 info);
1754 1776
1755 if (signr != SIGKILL) {
1756 signr = ptrace_signal(signr, info, regs, cookie);
1757 if (!signr) 1777 if (!signr)
1758 continue; 1778 break; /* will return 0 */
1779
1780 if (signr != SIGKILL) {
1781 signr = ptrace_signal(signr, info,
1782 regs, cookie);
1783 if (!signr)
1784 continue;
1785 }
1786
1787 ka = &sighand->action[signr-1];
1759 } 1788 }
1760 1789
1761 ka = &sighand->action[signr-1];
1762 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */ 1790 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1763 continue; 1791 continue;
1764 if (ka->sa.sa_handler != SIG_DFL) { 1792 if (ka->sa.sa_handler != SIG_DFL) {
@@ -1806,7 +1834,7 @@ relock:
1806 spin_lock_irq(&sighand->siglock); 1834 spin_lock_irq(&sighand->siglock);
1807 } 1835 }
1808 1836
1809 if (likely(do_signal_stop(signr))) { 1837 if (likely(do_signal_stop(info->si_signo))) {
1810 /* It released the siglock. */ 1838 /* It released the siglock. */
1811 goto relock; 1839 goto relock;
1812 } 1840 }
@@ -1827,7 +1855,7 @@ relock:
1827 1855
1828 if (sig_kernel_coredump(signr)) { 1856 if (sig_kernel_coredump(signr)) {
1829 if (print_fatal_signals) 1857 if (print_fatal_signals)
1830 print_fatal_signal(regs, signr); 1858 print_fatal_signal(regs, info->si_signo);
1831 /* 1859 /*
1832 * If it was able to dump core, this kills all 1860 * If it was able to dump core, this kills all
1833 * other threads in the group and synchronizes with 1861 * other threads in the group and synchronizes with
@@ -1836,13 +1864,13 @@ relock:
1836 * first and our do_group_exit call below will use 1864 * first and our do_group_exit call below will use
1837 * that value and ignore the one we pass it. 1865 * that value and ignore the one we pass it.
1838 */ 1866 */
1839 do_coredump((long)signr, signr, regs); 1867 do_coredump(info->si_signo, info->si_signo, regs);
1840 } 1868 }
1841 1869
1842 /* 1870 /*
1843 * Death signals, no core dump. 1871 * Death signals, no core dump.
1844 */ 1872 */
1845 do_group_exit(signr); 1873 do_group_exit(info->si_signo);
1846 /* NOTREACHED */ 1874 /* NOTREACHED */
1847 } 1875 }
1848 spin_unlock_irq(&sighand->siglock); 1876 spin_unlock_irq(&sighand->siglock);
@@ -1884,7 +1912,7 @@ void exit_signals(struct task_struct *tsk)
1884out: 1912out:
1885 spin_unlock_irq(&tsk->sighand->siglock); 1913 spin_unlock_irq(&tsk->sighand->siglock);
1886 1914
1887 if (unlikely(group_stop)) { 1915 if (unlikely(group_stop) && tracehook_notify_jctl(1, CLD_STOPPED)) {
1888 read_lock(&tasklist_lock); 1916 read_lock(&tasklist_lock);
1889 do_notify_parent_cldstop(tsk, CLD_STOPPED); 1917 do_notify_parent_cldstop(tsk, CLD_STOPPED);
1890 read_unlock(&tasklist_lock); 1918 read_unlock(&tasklist_lock);
@@ -1895,7 +1923,6 @@ EXPORT_SYMBOL(recalc_sigpending);
1895EXPORT_SYMBOL_GPL(dequeue_signal); 1923EXPORT_SYMBOL_GPL(dequeue_signal);
1896EXPORT_SYMBOL(flush_signals); 1924EXPORT_SYMBOL(flush_signals);
1897EXPORT_SYMBOL(force_sig); 1925EXPORT_SYMBOL(force_sig);
1898EXPORT_SYMBOL(ptrace_notify);
1899EXPORT_SYMBOL(send_sig); 1926EXPORT_SYMBOL(send_sig);
1900EXPORT_SYMBOL(send_sig_info); 1927EXPORT_SYMBOL(send_sig_info);
1901EXPORT_SYMBOL(sigprocmask); 1928EXPORT_SYMBOL(sigprocmask);
@@ -2299,7 +2326,7 @@ int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2299 * (for example, SIGCHLD), shall cause the pending signal to 2326 * (for example, SIGCHLD), shall cause the pending signal to
2300 * be discarded, whether or not it is blocked" 2327 * be discarded, whether or not it is blocked"
2301 */ 2328 */
2302 if (__sig_ignored(t, sig)) { 2329 if (sig_handler_ignored(sig_handler(t, sig), sig)) {
2303 sigemptyset(&mask); 2330 sigemptyset(&mask);
2304 sigaddset(&mask, sig); 2331 sigaddset(&mask, sig);
2305 rm_from_queue_full(&mask, &t->signal->shared_pending); 2332 rm_from_queue_full(&mask, &t->signal->shared_pending);
diff --git a/kernel/smp.c b/kernel/smp.c
index 462c785ca1e..96fc7c0edc5 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -33,7 +33,7 @@ struct call_single_queue {
33 spinlock_t lock; 33 spinlock_t lock;
34}; 34};
35 35
36void __cpuinit init_call_single_data(void) 36static int __cpuinit init_call_single_data(void)
37{ 37{
38 int i; 38 int i;
39 39
@@ -43,7 +43,9 @@ void __cpuinit init_call_single_data(void)
43 spin_lock_init(&q->lock); 43 spin_lock_init(&q->lock);
44 INIT_LIST_HEAD(&q->list); 44 INIT_LIST_HEAD(&q->list);
45 } 45 }
46 return 0;
46} 47}
48early_initcall(init_call_single_data);
47 49
48static void csd_flag_wait(struct call_single_data *data) 50static void csd_flag_wait(struct call_single_data *data)
49{ 51{
diff --git a/kernel/softirq.c b/kernel/softirq.c
index f6b03d56c2b..c506f266a6b 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -630,7 +630,7 @@ static struct notifier_block __cpuinitdata cpu_nfb = {
630 .notifier_call = cpu_callback 630 .notifier_call = cpu_callback
631}; 631};
632 632
633__init int spawn_ksoftirqd(void) 633static __init int spawn_ksoftirqd(void)
634{ 634{
635 void *cpu = (void *)(long)smp_processor_id(); 635 void *cpu = (void *)(long)smp_processor_id();
636 int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); 636 int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
@@ -640,6 +640,7 @@ __init int spawn_ksoftirqd(void)
640 register_cpu_notifier(&cpu_nfb); 640 register_cpu_notifier(&cpu_nfb);
641 return 0; 641 return 0;
642} 642}
643early_initcall(spawn_ksoftirqd);
643 644
644#ifdef CONFIG_SMP 645#ifdef CONFIG_SMP
645/* 646/*
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index 7bd8d1aadd5..b75b492fbfc 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -338,14 +338,33 @@ static struct notifier_block __cpuinitdata cpu_nfb = {
338 .notifier_call = cpu_callback 338 .notifier_call = cpu_callback
339}; 339};
340 340
341__init void spawn_softlockup_task(void) 341static int __initdata nosoftlockup;
342
343static int __init nosoftlockup_setup(char *str)
344{
345 nosoftlockup = 1;
346 return 1;
347}
348__setup("nosoftlockup", nosoftlockup_setup);
349
350static int __init spawn_softlockup_task(void)
342{ 351{
343 void *cpu = (void *)(long)smp_processor_id(); 352 void *cpu = (void *)(long)smp_processor_id();
344 int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); 353 int err;
345 354
346 BUG_ON(err == NOTIFY_BAD); 355 if (nosoftlockup)
356 return 0;
357
358 err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
359 if (err == NOTIFY_BAD) {
360 BUG();
361 return 1;
362 }
347 cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); 363 cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
348 register_cpu_notifier(&cpu_nfb); 364 register_cpu_notifier(&cpu_nfb);
349 365
350 atomic_notifier_chain_register(&panic_notifier_list, &panic_block); 366 atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
367
368 return 0;
351} 369}
370early_initcall(spawn_softlockup_task);
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 738b411ff2d..e446c7c7d6a 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -1,4 +1,4 @@
1/* Copyright 2005 Rusty Russell rusty@rustcorp.com.au IBM Corporation. 1/* Copyright 2008, 2005 Rusty Russell rusty@rustcorp.com.au IBM Corporation.
2 * GPL v2 and any later version. 2 * GPL v2 and any later version.
3 */ 3 */
4#include <linux/cpu.h> 4#include <linux/cpu.h>
@@ -13,204 +13,178 @@
13#include <asm/atomic.h> 13#include <asm/atomic.h>
14#include <asm/uaccess.h> 14#include <asm/uaccess.h>
15 15
16/* Since we effect priority and affinity (both of which are visible 16/* This controls the threads on each CPU. */
17 * to, and settable by outside processes) we do indirection via a
18 * kthread. */
19
20/* Thread to stop each CPU in user context. */
21enum stopmachine_state { 17enum stopmachine_state {
22 STOPMACHINE_WAIT, 18 /* Dummy starting state for thread. */
19 STOPMACHINE_NONE,
20 /* Awaiting everyone to be scheduled. */
23 STOPMACHINE_PREPARE, 21 STOPMACHINE_PREPARE,
22 /* Disable interrupts. */
24 STOPMACHINE_DISABLE_IRQ, 23 STOPMACHINE_DISABLE_IRQ,
24 /* Run the function */
25 STOPMACHINE_RUN,
26 /* Exit */
25 STOPMACHINE_EXIT, 27 STOPMACHINE_EXIT,
26}; 28};
29static enum stopmachine_state state;
27 30
28static enum stopmachine_state stopmachine_state; 31struct stop_machine_data {
29static unsigned int stopmachine_num_threads; 32 int (*fn)(void *);
30static atomic_t stopmachine_thread_ack; 33 void *data;
31 34 int fnret;
32static int stopmachine(void *cpu) 35};
33{
34 int irqs_disabled = 0;
35 int prepared = 0;
36 cpumask_of_cpu_ptr(cpumask, (int)(long)cpu);
37
38 set_cpus_allowed_ptr(current, cpumask);
39
40 /* Ack: we are alive */
41 smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */
42 atomic_inc(&stopmachine_thread_ack);
43
44 /* Simple state machine */
45 while (stopmachine_state != STOPMACHINE_EXIT) {
46 if (stopmachine_state == STOPMACHINE_DISABLE_IRQ
47 && !irqs_disabled) {
48 local_irq_disable();
49 hard_irq_disable();
50 irqs_disabled = 1;
51 /* Ack: irqs disabled. */
52 smp_mb(); /* Must read state first. */
53 atomic_inc(&stopmachine_thread_ack);
54 } else if (stopmachine_state == STOPMACHINE_PREPARE
55 && !prepared) {
56 /* Everyone is in place, hold CPU. */
57 preempt_disable();
58 prepared = 1;
59 smp_mb(); /* Must read state first. */
60 atomic_inc(&stopmachine_thread_ack);
61 }
62 /* Yield in first stage: migration threads need to
63 * help our sisters onto their CPUs. */
64 if (!prepared && !irqs_disabled)
65 yield();
66 cpu_relax();
67 }
68
69 /* Ack: we are exiting. */
70 smp_mb(); /* Must read state first. */
71 atomic_inc(&stopmachine_thread_ack);
72
73 if (irqs_disabled)
74 local_irq_enable();
75 if (prepared)
76 preempt_enable();
77 36
78 return 0; 37/* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */
79} 38static unsigned int num_threads;
39static atomic_t thread_ack;
40static struct completion finished;
41static DEFINE_MUTEX(lock);
80 42
81/* Change the thread state */ 43static void set_state(enum stopmachine_state newstate)
82static void stopmachine_set_state(enum stopmachine_state state)
83{ 44{
84 atomic_set(&stopmachine_thread_ack, 0); 45 /* Reset ack counter. */
46 atomic_set(&thread_ack, num_threads);
85 smp_wmb(); 47 smp_wmb();
86 stopmachine_state = state; 48 state = newstate;
87 while (atomic_read(&stopmachine_thread_ack) != stopmachine_num_threads)
88 cpu_relax();
89} 49}
90 50
91static int stop_machine(void) 51/* Last one to ack a state moves to the next state. */
52static void ack_state(void)
92{ 53{
93 int i, ret = 0; 54 if (atomic_dec_and_test(&thread_ack)) {
94 55 /* If we're the last one to ack the EXIT, we're finished. */
95 atomic_set(&stopmachine_thread_ack, 0); 56 if (state == STOPMACHINE_EXIT)
96 stopmachine_num_threads = 0; 57 complete(&finished);
97 stopmachine_state = STOPMACHINE_WAIT; 58 else
98 59 set_state(state + 1);
99 for_each_online_cpu(i) {
100 if (i == raw_smp_processor_id())
101 continue;
102 ret = kernel_thread(stopmachine, (void *)(long)i,CLONE_KERNEL);
103 if (ret < 0)
104 break;
105 stopmachine_num_threads++;
106 }
107
108 /* Wait for them all to come to life. */
109 while (atomic_read(&stopmachine_thread_ack) != stopmachine_num_threads) {
110 yield();
111 cpu_relax();
112 } 60 }
61}
113 62
114 /* If some failed, kill them all. */ 63/* This is the actual thread which stops the CPU. It exits by itself rather
115 if (ret < 0) { 64 * than waiting for kthread_stop(), because it's easier for hotplug CPU. */
116 stopmachine_set_state(STOPMACHINE_EXIT); 65static int stop_cpu(struct stop_machine_data *smdata)
117 return ret; 66{
118 } 67 enum stopmachine_state curstate = STOPMACHINE_NONE;
68 int uninitialized_var(ret);
119 69
120 /* Now they are all started, make them hold the CPUs, ready. */ 70 /* Simple state machine */
121 preempt_disable(); 71 do {
122 stopmachine_set_state(STOPMACHINE_PREPARE); 72 /* Chill out and ensure we re-read stopmachine_state. */
73 cpu_relax();
74 if (state != curstate) {
75 curstate = state;
76 switch (curstate) {
77 case STOPMACHINE_DISABLE_IRQ:
78 local_irq_disable();
79 hard_irq_disable();
80 break;
81 case STOPMACHINE_RUN:
82 /* |= allows error detection if functions on
83 * multiple CPUs. */
84 smdata->fnret |= smdata->fn(smdata->data);
85 break;
86 default:
87 break;
88 }
89 ack_state();
90 }
91 } while (curstate != STOPMACHINE_EXIT);
123 92
124 /* Make them disable irqs. */ 93 local_irq_enable();
125 local_irq_disable(); 94 do_exit(0);
126 hard_irq_disable(); 95}
127 stopmachine_set_state(STOPMACHINE_DISABLE_IRQ);
128 96
97/* Callback for CPUs which aren't supposed to do anything. */
98static int chill(void *unused)
99{
129 return 0; 100 return 0;
130} 101}
131 102
132static void restart_machine(void) 103int __stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus)
133{ 104{
134 stopmachine_set_state(STOPMACHINE_EXIT); 105 int i, err;
135 local_irq_enable(); 106 struct stop_machine_data active, idle;
136 preempt_enable_no_resched(); 107 struct task_struct **threads;
137} 108
109 active.fn = fn;
110 active.data = data;
111 active.fnret = 0;
112 idle.fn = chill;
113 idle.data = NULL;
114
115 /* This could be too big for stack on large machines. */
116 threads = kcalloc(NR_CPUS, sizeof(threads[0]), GFP_KERNEL);
117 if (!threads)
118 return -ENOMEM;
119
120 /* Set up initial state. */
121 mutex_lock(&lock);
122 init_completion(&finished);
123 num_threads = num_online_cpus();
124 set_state(STOPMACHINE_PREPARE);
138 125
139struct stop_machine_data { 126 for_each_online_cpu(i) {
140 int (*fn)(void *); 127 struct stop_machine_data *smdata = &idle;
141 void *data; 128 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
142 struct completion done;
143};
144 129
145static int do_stop(void *_smdata) 130 if (!cpus) {
146{ 131 if (i == first_cpu(cpu_online_map))
147 struct stop_machine_data *smdata = _smdata; 132 smdata = &active;
148 int ret; 133 } else {
134 if (cpu_isset(i, *cpus))
135 smdata = &active;
136 }
149 137
150 ret = stop_machine(); 138 threads[i] = kthread_create((void *)stop_cpu, smdata, "kstop%u",
151 if (ret == 0) { 139 i);
152 ret = smdata->fn(smdata->data); 140 if (IS_ERR(threads[i])) {
153 restart_machine(); 141 err = PTR_ERR(threads[i]);
154 } 142 threads[i] = NULL;
143 goto kill_threads;
144 }
155 145
156 /* We're done: you can kthread_stop us now */ 146 /* Place it onto correct cpu. */
157 complete(&smdata->done); 147 kthread_bind(threads[i], i);
158 148
159 /* Wait for kthread_stop */ 149 /* Make it highest prio. */
160 set_current_state(TASK_INTERRUPTIBLE); 150 if (sched_setscheduler_nocheck(threads[i], SCHED_FIFO, &param))
161 while (!kthread_should_stop()) { 151 BUG();
162 schedule();
163 set_current_state(TASK_INTERRUPTIBLE);
164 } 152 }
165 __set_current_state(TASK_RUNNING);
166 return ret;
167}
168 153
169struct task_struct *__stop_machine_run(int (*fn)(void *), void *data, 154 /* We've created all the threads. Wake them all: hold this CPU so one
170 unsigned int cpu) 155 * doesn't hit this CPU until we're ready. */
171{ 156 get_cpu();
172 static DEFINE_MUTEX(stopmachine_mutex); 157 for_each_online_cpu(i)
173 struct stop_machine_data smdata; 158 wake_up_process(threads[i]);
174 struct task_struct *p;
175 159
176 smdata.fn = fn; 160 /* This will release the thread on our CPU. */
177 smdata.data = data; 161 put_cpu();
178 init_completion(&smdata.done); 162 wait_for_completion(&finished);
163 mutex_unlock(&lock);
179 164
180 mutex_lock(&stopmachine_mutex); 165 kfree(threads);
181 166
182 /* If they don't care which CPU fn runs on, bind to any online one. */ 167 return active.fnret;
183 if (cpu == NR_CPUS)
184 cpu = raw_smp_processor_id();
185 168
186 p = kthread_create(do_stop, &smdata, "kstopmachine"); 169kill_threads:
187 if (!IS_ERR(p)) { 170 for_each_online_cpu(i)
188 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; 171 if (threads[i])
172 kthread_stop(threads[i]);
173 mutex_unlock(&lock);
189 174
190 /* One high-prio thread per cpu. We'll do this one. */ 175 kfree(threads);
191 sched_setscheduler_nocheck(p, SCHED_FIFO, &param); 176 return err;
192 kthread_bind(p, cpu);
193 wake_up_process(p);
194 wait_for_completion(&smdata.done);
195 }
196 mutex_unlock(&stopmachine_mutex);
197 return p;
198} 177}
199 178
200int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu) 179int stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus)
201{ 180{
202 struct task_struct *p;
203 int ret; 181 int ret;
204 182
205 /* No CPUs can come up or down during this. */ 183 /* No CPUs can come up or down during this. */
206 get_online_cpus(); 184 get_online_cpus();
207 p = __stop_machine_run(fn, data, cpu); 185 ret = __stop_machine(fn, data, cpus);
208 if (!IS_ERR(p))
209 ret = kthread_stop(p);
210 else
211 ret = PTR_ERR(p);
212 put_online_cpus(); 186 put_online_cpus();
213 187
214 return ret; 188 return ret;
215} 189}
216EXPORT_SYMBOL_GPL(stop_machine_run); 190EXPORT_SYMBOL_GPL(stop_machine);
diff --git a/kernel/sys.c b/kernel/sys.c
index 0c9d3fa1f5f..c01858090a9 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -301,26 +301,6 @@ void kernel_restart(char *cmd)
301} 301}
302EXPORT_SYMBOL_GPL(kernel_restart); 302EXPORT_SYMBOL_GPL(kernel_restart);
303 303
304/**
305 * kernel_kexec - reboot the system
306 *
307 * Move into place and start executing a preloaded standalone
308 * executable. If nothing was preloaded return an error.
309 */
310static void kernel_kexec(void)
311{
312#ifdef CONFIG_KEXEC
313 struct kimage *image;
314 image = xchg(&kexec_image, NULL);
315 if (!image)
316 return;
317 kernel_restart_prepare(NULL);
318 printk(KERN_EMERG "Starting new kernel\n");
319 machine_shutdown();
320 machine_kexec(image);
321#endif
322}
323
324static void kernel_shutdown_prepare(enum system_states state) 304static void kernel_shutdown_prepare(enum system_states state)
325{ 305{
326 blocking_notifier_call_chain(&reboot_notifier_list, 306 blocking_notifier_call_chain(&reboot_notifier_list,
@@ -425,10 +405,15 @@ asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user
425 kernel_restart(buffer); 405 kernel_restart(buffer);
426 break; 406 break;
427 407
408#ifdef CONFIG_KEXEC
428 case LINUX_REBOOT_CMD_KEXEC: 409 case LINUX_REBOOT_CMD_KEXEC:
429 kernel_kexec(); 410 {
430 unlock_kernel(); 411 int ret;
431 return -EINVAL; 412 ret = kernel_kexec();
413 unlock_kernel();
414 return ret;
415 }
416#endif
432 417
433#ifdef CONFIG_HIBERNATION 418#ifdef CONFIG_HIBERNATION
434 case LINUX_REBOOT_CMD_SW_SUSPEND: 419 case LINUX_REBOOT_CMD_SW_SUSPEND:
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 35a50db9b6c..fe471334727 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -160,12 +160,13 @@ static struct ctl_table root_table[];
160static struct ctl_table_root sysctl_table_root; 160static struct ctl_table_root sysctl_table_root;
161static struct ctl_table_header root_table_header = { 161static struct ctl_table_header root_table_header = {
162 .ctl_table = root_table, 162 .ctl_table = root_table,
163 .ctl_entry = LIST_HEAD_INIT(sysctl_table_root.header_list), 163 .ctl_entry = LIST_HEAD_INIT(sysctl_table_root.default_set.list),
164 .root = &sysctl_table_root, 164 .root = &sysctl_table_root,
165 .set = &sysctl_table_root.default_set,
165}; 166};
166static struct ctl_table_root sysctl_table_root = { 167static struct ctl_table_root sysctl_table_root = {
167 .root_list = LIST_HEAD_INIT(sysctl_table_root.root_list), 168 .root_list = LIST_HEAD_INIT(sysctl_table_root.root_list),
168 .header_list = LIST_HEAD_INIT(root_table_header.ctl_entry), 169 .default_set.list = LIST_HEAD_INIT(root_table_header.ctl_entry),
169}; 170};
170 171
171static struct ctl_table kern_table[]; 172static struct ctl_table kern_table[];
@@ -1386,6 +1387,9 @@ static void start_unregistering(struct ctl_table_header *p)
1386 spin_unlock(&sysctl_lock); 1387 spin_unlock(&sysctl_lock);
1387 wait_for_completion(&wait); 1388 wait_for_completion(&wait);
1388 spin_lock(&sysctl_lock); 1389 spin_lock(&sysctl_lock);
1390 } else {
1391 /* anything non-NULL; we'll never dereference it */
1392 p->unregistering = ERR_PTR(-EINVAL);
1389 } 1393 }
1390 /* 1394 /*
1391 * do not remove from the list until nobody holds it; walking the 1395 * do not remove from the list until nobody holds it; walking the
@@ -1394,6 +1398,32 @@ static void start_unregistering(struct ctl_table_header *p)
1394 list_del_init(&p->ctl_entry); 1398 list_del_init(&p->ctl_entry);
1395} 1399}
1396 1400
1401void sysctl_head_get(struct ctl_table_header *head)
1402{
1403 spin_lock(&sysctl_lock);
1404 head->count++;
1405 spin_unlock(&sysctl_lock);
1406}
1407
1408void sysctl_head_put(struct ctl_table_header *head)
1409{
1410 spin_lock(&sysctl_lock);
1411 if (!--head->count)
1412 kfree(head);
1413 spin_unlock(&sysctl_lock);
1414}
1415
1416struct ctl_table_header *sysctl_head_grab(struct ctl_table_header *head)
1417{
1418 if (!head)
1419 BUG();
1420 spin_lock(&sysctl_lock);
1421 if (!use_table(head))
1422 head = ERR_PTR(-ENOENT);
1423 spin_unlock(&sysctl_lock);
1424 return head;
1425}
1426
1397void sysctl_head_finish(struct ctl_table_header *head) 1427void sysctl_head_finish(struct ctl_table_header *head)
1398{ 1428{
1399 if (!head) 1429 if (!head)
@@ -1403,14 +1433,20 @@ void sysctl_head_finish(struct ctl_table_header *head)
1403 spin_unlock(&sysctl_lock); 1433 spin_unlock(&sysctl_lock);
1404} 1434}
1405 1435
1436static struct ctl_table_set *
1437lookup_header_set(struct ctl_table_root *root, struct nsproxy *namespaces)
1438{
1439 struct ctl_table_set *set = &root->default_set;
1440 if (root->lookup)
1441 set = root->lookup(root, namespaces);
1442 return set;
1443}
1444
1406static struct list_head * 1445static struct list_head *
1407lookup_header_list(struct ctl_table_root *root, struct nsproxy *namespaces) 1446lookup_header_list(struct ctl_table_root *root, struct nsproxy *namespaces)
1408{ 1447{
1409 struct list_head *header_list; 1448 struct ctl_table_set *set = lookup_header_set(root, namespaces);
1410 header_list = &root->header_list; 1449 return &set->list;
1411 if (root->lookup)
1412 header_list = root->lookup(root, namespaces);
1413 return header_list;
1414} 1450}
1415 1451
1416struct ctl_table_header *__sysctl_head_next(struct nsproxy *namespaces, 1452struct ctl_table_header *__sysctl_head_next(struct nsproxy *namespaces,
@@ -1480,9 +1516,9 @@ static int do_sysctl_strategy(struct ctl_table_root *root,
1480 int op = 0, rc; 1516 int op = 0, rc;
1481 1517
1482 if (oldval) 1518 if (oldval)
1483 op |= 004; 1519 op |= MAY_READ;
1484 if (newval) 1520 if (newval)
1485 op |= 002; 1521 op |= MAY_WRITE;
1486 if (sysctl_perm(root, table, op)) 1522 if (sysctl_perm(root, table, op))
1487 return -EPERM; 1523 return -EPERM;
1488 1524
@@ -1524,7 +1560,7 @@ repeat:
1524 if (n == table->ctl_name) { 1560 if (n == table->ctl_name) {
1525 int error; 1561 int error;
1526 if (table->child) { 1562 if (table->child) {
1527 if (sysctl_perm(root, table, 001)) 1563 if (sysctl_perm(root, table, MAY_EXEC))
1528 return -EPERM; 1564 return -EPERM;
1529 name++; 1565 name++;
1530 nlen--; 1566 nlen--;
@@ -1599,7 +1635,7 @@ static int test_perm(int mode, int op)
1599 mode >>= 6; 1635 mode >>= 6;
1600 else if (in_egroup_p(0)) 1636 else if (in_egroup_p(0))
1601 mode >>= 3; 1637 mode >>= 3;
1602 if ((mode & op & 0007) == op) 1638 if ((op & ~mode & (MAY_READ|MAY_WRITE|MAY_EXEC)) == 0)
1603 return 0; 1639 return 0;
1604 return -EACCES; 1640 return -EACCES;
1605} 1641}
@@ -1609,7 +1645,7 @@ int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
1609 int error; 1645 int error;
1610 int mode; 1646 int mode;
1611 1647
1612 error = security_sysctl(table, op); 1648 error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
1613 if (error) 1649 if (error)
1614 return error; 1650 return error;
1615 1651
@@ -1644,6 +1680,54 @@ static __init int sysctl_init(void)
1644 1680
1645core_initcall(sysctl_init); 1681core_initcall(sysctl_init);
1646 1682
1683static struct ctl_table *is_branch_in(struct ctl_table *branch,
1684 struct ctl_table *table)
1685{
1686 struct ctl_table *p;
1687 const char *s = branch->procname;
1688
1689 /* branch should have named subdirectory as its first element */
1690 if (!s || !branch->child)
1691 return NULL;
1692
1693 /* ... and nothing else */
1694 if (branch[1].procname || branch[1].ctl_name)
1695 return NULL;
1696
1697 /* table should contain subdirectory with the same name */
1698 for (p = table; p->procname || p->ctl_name; p++) {
1699 if (!p->child)
1700 continue;
1701 if (p->procname && strcmp(p->procname, s) == 0)
1702 return p;
1703 }
1704 return NULL;
1705}
1706
1707/* see if attaching q to p would be an improvement */
1708static void try_attach(struct ctl_table_header *p, struct ctl_table_header *q)
1709{
1710 struct ctl_table *to = p->ctl_table, *by = q->ctl_table;
1711 struct ctl_table *next;
1712 int is_better = 0;
1713 int not_in_parent = !p->attached_by;
1714
1715 while ((next = is_branch_in(by, to)) != NULL) {
1716 if (by == q->attached_by)
1717 is_better = 1;
1718 if (to == p->attached_by)
1719 not_in_parent = 1;
1720 by = by->child;
1721 to = next->child;
1722 }
1723
1724 if (is_better && not_in_parent) {
1725 q->attached_by = by;
1726 q->attached_to = to;
1727 q->parent = p;
1728 }
1729}
1730
1647/** 1731/**
1648 * __register_sysctl_paths - register a sysctl hierarchy 1732 * __register_sysctl_paths - register a sysctl hierarchy
1649 * @root: List of sysctl headers to register on 1733 * @root: List of sysctl headers to register on
@@ -1720,10 +1804,10 @@ struct ctl_table_header *__register_sysctl_paths(
1720 struct nsproxy *namespaces, 1804 struct nsproxy *namespaces,
1721 const struct ctl_path *path, struct ctl_table *table) 1805 const struct ctl_path *path, struct ctl_table *table)
1722{ 1806{
1723 struct list_head *header_list;
1724 struct ctl_table_header *header; 1807 struct ctl_table_header *header;
1725 struct ctl_table *new, **prevp; 1808 struct ctl_table *new, **prevp;
1726 unsigned int n, npath; 1809 unsigned int n, npath;
1810 struct ctl_table_set *set;
1727 1811
1728 /* Count the path components */ 1812 /* Count the path components */
1729 for (npath = 0; path[npath].ctl_name || path[npath].procname; ++npath) 1813 for (npath = 0; path[npath].ctl_name || path[npath].procname; ++npath)
@@ -1765,6 +1849,7 @@ struct ctl_table_header *__register_sysctl_paths(
1765 header->unregistering = NULL; 1849 header->unregistering = NULL;
1766 header->root = root; 1850 header->root = root;
1767 sysctl_set_parent(NULL, header->ctl_table); 1851 sysctl_set_parent(NULL, header->ctl_table);
1852 header->count = 1;
1768#ifdef CONFIG_SYSCTL_SYSCALL_CHECK 1853#ifdef CONFIG_SYSCTL_SYSCALL_CHECK
1769 if (sysctl_check_table(namespaces, header->ctl_table)) { 1854 if (sysctl_check_table(namespaces, header->ctl_table)) {
1770 kfree(header); 1855 kfree(header);
@@ -1772,8 +1857,20 @@ struct ctl_table_header *__register_sysctl_paths(
1772 } 1857 }
1773#endif 1858#endif
1774 spin_lock(&sysctl_lock); 1859 spin_lock(&sysctl_lock);
1775 header_list = lookup_header_list(root, namespaces); 1860 header->set = lookup_header_set(root, namespaces);
1776 list_add_tail(&header->ctl_entry, header_list); 1861 header->attached_by = header->ctl_table;
1862 header->attached_to = root_table;
1863 header->parent = &root_table_header;
1864 for (set = header->set; set; set = set->parent) {
1865 struct ctl_table_header *p;
1866 list_for_each_entry(p, &set->list, ctl_entry) {
1867 if (p->unregistering)
1868 continue;
1869 try_attach(p, header);
1870 }
1871 }
1872 header->parent->count++;
1873 list_add_tail(&header->ctl_entry, &header->set->list);
1777 spin_unlock(&sysctl_lock); 1874 spin_unlock(&sysctl_lock);
1778 1875
1779 return header; 1876 return header;
@@ -1828,8 +1925,37 @@ void unregister_sysctl_table(struct ctl_table_header * header)
1828 1925
1829 spin_lock(&sysctl_lock); 1926 spin_lock(&sysctl_lock);
1830 start_unregistering(header); 1927 start_unregistering(header);
1928 if (!--header->parent->count) {
1929 WARN_ON(1);
1930 kfree(header->parent);
1931 }
1932 if (!--header->count)
1933 kfree(header);
1831 spin_unlock(&sysctl_lock); 1934 spin_unlock(&sysctl_lock);
1832 kfree(header); 1935}
1936
1937int sysctl_is_seen(struct ctl_table_header *p)
1938{
1939 struct ctl_table_set *set = p->set;
1940 int res;
1941 spin_lock(&sysctl_lock);
1942 if (p->unregistering)
1943 res = 0;
1944 else if (!set->is_seen)
1945 res = 1;
1946 else
1947 res = set->is_seen(set);
1948 spin_unlock(&sysctl_lock);
1949 return res;
1950}
1951
1952void setup_sysctl_set(struct ctl_table_set *p,
1953 struct ctl_table_set *parent,
1954 int (*is_seen)(struct ctl_table_set *))
1955{
1956 INIT_LIST_HEAD(&p->list);
1957 p->parent = parent ? parent : &sysctl_table_root.default_set;
1958 p->is_seen = is_seen;
1833} 1959}
1834 1960
1835#else /* !CONFIG_SYSCTL */ 1961#else /* !CONFIG_SYSCTL */
@@ -1848,6 +1974,16 @@ void unregister_sysctl_table(struct ctl_table_header * table)
1848{ 1974{
1849} 1975}
1850 1976
1977void setup_sysctl_set(struct ctl_table_set *p,
1978 struct ctl_table_set *parent,
1979 int (*is_seen)(struct ctl_table_set *))
1980{
1981}
1982
1983void sysctl_head_put(struct ctl_table_header *head)
1984{
1985}
1986
1851#endif /* CONFIG_SYSCTL */ 1987#endif /* CONFIG_SYSCTL */
1852 1988
1853/* 1989/*
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index bf43284d685..80c4336f418 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -196,12 +196,10 @@ static int tick_check_new_device(struct clock_event_device *newdev)
196 struct tick_device *td; 196 struct tick_device *td;
197 int cpu, ret = NOTIFY_OK; 197 int cpu, ret = NOTIFY_OK;
198 unsigned long flags; 198 unsigned long flags;
199 cpumask_of_cpu_ptr_declare(cpumask);
200 199
201 spin_lock_irqsave(&tick_device_lock, flags); 200 spin_lock_irqsave(&tick_device_lock, flags);
202 201
203 cpu = smp_processor_id(); 202 cpu = smp_processor_id();
204 cpumask_of_cpu_ptr_next(cpumask, cpu);
205 if (!cpu_isset(cpu, newdev->cpumask)) 203 if (!cpu_isset(cpu, newdev->cpumask))
206 goto out_bc; 204 goto out_bc;
207 205
@@ -209,7 +207,7 @@ static int tick_check_new_device(struct clock_event_device *newdev)
209 curdev = td->evtdev; 207 curdev = td->evtdev;
210 208
211 /* cpu local device ? */ 209 /* cpu local device ? */
212 if (!cpus_equal(newdev->cpumask, *cpumask)) { 210 if (!cpus_equal(newdev->cpumask, cpumask_of_cpu(cpu))) {
213 211
214 /* 212 /*
215 * If the cpu affinity of the device interrupt can not 213 * If the cpu affinity of the device interrupt can not
@@ -222,7 +220,7 @@ static int tick_check_new_device(struct clock_event_device *newdev)
222 * If we have a cpu local device already, do not replace it 220 * If we have a cpu local device already, do not replace it
223 * by a non cpu local device 221 * by a non cpu local device
224 */ 222 */
225 if (curdev && cpus_equal(curdev->cpumask, *cpumask)) 223 if (curdev && cpus_equal(curdev->cpumask, cpumask_of_cpu(cpu)))
226 goto out_bc; 224 goto out_bc;
227 } 225 }
228 226
@@ -254,7 +252,7 @@ static int tick_check_new_device(struct clock_event_device *newdev)
254 curdev = NULL; 252 curdev = NULL;
255 } 253 }
256 clockevents_exchange_device(curdev, newdev); 254 clockevents_exchange_device(curdev, newdev);
257 tick_setup_device(td, newdev, cpu, cpumask); 255 tick_setup_device(td, newdev, cpu, &cpumask_of_cpu(cpu));
258 if (newdev->features & CLOCK_EVT_FEAT_ONESHOT) 256 if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
259 tick_oneshot_notify(); 257 tick_oneshot_notify();
260 258
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 4231a3dc224..f6e3af31b40 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -587,7 +587,7 @@ static int __ftrace_modify_code(void *data)
587 587
588static void ftrace_run_update_code(int command) 588static void ftrace_run_update_code(int command)
589{ 589{
590 stop_machine_run(__ftrace_modify_code, &command, NR_CPUS); 590 stop_machine(__ftrace_modify_code, &command, NULL);
591} 591}
592 592
593void ftrace_disable_daemon(void) 593void ftrace_disable_daemon(void)
@@ -787,7 +787,7 @@ static int ftrace_update_code(void)
787 !ftrace_enabled || !ftraced_trigger) 787 !ftrace_enabled || !ftraced_trigger)
788 return 0; 788 return 0;
789 789
790 stop_machine_run(__ftrace_update_code, NULL, NR_CPUS); 790 stop_machine(__ftrace_update_code, NULL, NULL);
791 791
792 return 1; 792 return 1;
793} 793}
@@ -1564,7 +1564,7 @@ static int __init ftrace_dynamic_init(void)
1564 1564
1565 addr = (unsigned long)ftrace_record_ip; 1565 addr = (unsigned long)ftrace_record_ip;
1566 1566
1567 stop_machine_run(ftrace_dyn_arch_init, &addr, NR_CPUS); 1567 stop_machine(ftrace_dyn_arch_init, &addr, NULL);
1568 1568
1569 /* ftrace_dyn_arch_init places the return code in addr */ 1569 /* ftrace_dyn_arch_init places the return code in addr */
1570 if (addr) { 1570 if (addr) {
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 868e121c8e3..8f3fb3db61c 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1183,7 +1183,6 @@ static void *find_next_entry_inc(struct trace_iterator *iter)
1183static void *s_next(struct seq_file *m, void *v, loff_t *pos) 1183static void *s_next(struct seq_file *m, void *v, loff_t *pos)
1184{ 1184{
1185 struct trace_iterator *iter = m->private; 1185 struct trace_iterator *iter = m->private;
1186 void *last_ent = iter->ent;
1187 int i = (int)*pos; 1186 int i = (int)*pos;
1188 void *ent; 1187 void *ent;
1189 1188
@@ -1203,9 +1202,6 @@ static void *s_next(struct seq_file *m, void *v, loff_t *pos)
1203 1202
1204 iter->pos = *pos; 1203 iter->pos = *pos;
1205 1204
1206 if (last_ent && !ent)
1207 seq_puts(m, "\n\nvim:ft=help\n");
1208
1209 return ent; 1205 return ent;
1210} 1206}
1211 1207
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 421d6fe3650..ece6cfb649f 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -253,12 +253,14 @@ void start_critical_timings(void)
253 if (preempt_trace() || irq_trace()) 253 if (preempt_trace() || irq_trace())
254 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); 254 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
255} 255}
256EXPORT_SYMBOL_GPL(start_critical_timings);
256 257
257void stop_critical_timings(void) 258void stop_critical_timings(void)
258{ 259{
259 if (preempt_trace() || irq_trace()) 260 if (preempt_trace() || irq_trace())
260 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); 261 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
261} 262}
263EXPORT_SYMBOL_GPL(stop_critical_timings);
262 264
263#ifdef CONFIG_IRQSOFF_TRACER 265#ifdef CONFIG_IRQSOFF_TRACER
264#ifdef CONFIG_PROVE_LOCKING 266#ifdef CONFIG_PROVE_LOCKING
@@ -337,12 +339,14 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller);
337#ifdef CONFIG_PREEMPT_TRACER 339#ifdef CONFIG_PREEMPT_TRACER
338void trace_preempt_on(unsigned long a0, unsigned long a1) 340void trace_preempt_on(unsigned long a0, unsigned long a1)
339{ 341{
340 stop_critical_timing(a0, a1); 342 if (preempt_trace())
343 stop_critical_timing(a0, a1);
341} 344}
342 345
343void trace_preempt_off(unsigned long a0, unsigned long a1) 346void trace_preempt_off(unsigned long a0, unsigned long a1)
344{ 347{
345 start_critical_timing(a0, a1); 348 if (preempt_trace())
349 start_critical_timing(a0, a1);
346} 350}
347#endif /* CONFIG_PREEMPT_TRACER */ 351#endif /* CONFIG_PREEMPT_TRACER */
348 352
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 3c8d61df447..e303ccb62cd 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -26,7 +26,8 @@ static struct task_struct *wakeup_task;
26static int wakeup_cpu; 26static int wakeup_cpu;
27static unsigned wakeup_prio = -1; 27static unsigned wakeup_prio = -1;
28 28
29static DEFINE_SPINLOCK(wakeup_lock); 29static raw_spinlock_t wakeup_lock =
30 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
30 31
31static void __wakeup_reset(struct trace_array *tr); 32static void __wakeup_reset(struct trace_array *tr);
32 33
@@ -56,7 +57,8 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
56 if (unlikely(disabled != 1)) 57 if (unlikely(disabled != 1))
57 goto out; 58 goto out;
58 59
59 spin_lock_irqsave(&wakeup_lock, flags); 60 local_irq_save(flags);
61 __raw_spin_lock(&wakeup_lock);
60 62
61 if (unlikely(!wakeup_task)) 63 if (unlikely(!wakeup_task))
62 goto unlock; 64 goto unlock;
@@ -71,7 +73,8 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
71 trace_function(tr, data, ip, parent_ip, flags); 73 trace_function(tr, data, ip, parent_ip, flags);
72 74
73 unlock: 75 unlock:
74 spin_unlock_irqrestore(&wakeup_lock, flags); 76 __raw_spin_unlock(&wakeup_lock);
77 local_irq_restore(flags);
75 78
76 out: 79 out:
77 atomic_dec(&data->disabled); 80 atomic_dec(&data->disabled);
@@ -145,7 +148,8 @@ wakeup_sched_switch(void *private, void *rq, struct task_struct *prev,
145 if (likely(disabled != 1)) 148 if (likely(disabled != 1))
146 goto out; 149 goto out;
147 150
148 spin_lock_irqsave(&wakeup_lock, flags); 151 local_irq_save(flags);
152 __raw_spin_lock(&wakeup_lock);
149 153
150 /* We could race with grabbing wakeup_lock */ 154 /* We could race with grabbing wakeup_lock */
151 if (unlikely(!tracer_enabled || next != wakeup_task)) 155 if (unlikely(!tracer_enabled || next != wakeup_task))
@@ -174,7 +178,8 @@ wakeup_sched_switch(void *private, void *rq, struct task_struct *prev,
174 178
175out_unlock: 179out_unlock:
176 __wakeup_reset(tr); 180 __wakeup_reset(tr);
177 spin_unlock_irqrestore(&wakeup_lock, flags); 181 __raw_spin_unlock(&wakeup_lock);
182 local_irq_restore(flags);
178out: 183out:
179 atomic_dec(&tr->data[cpu]->disabled); 184 atomic_dec(&tr->data[cpu]->disabled);
180} 185}
@@ -209,8 +214,6 @@ static void __wakeup_reset(struct trace_array *tr)
209 struct trace_array_cpu *data; 214 struct trace_array_cpu *data;
210 int cpu; 215 int cpu;
211 216
212 assert_spin_locked(&wakeup_lock);
213
214 for_each_possible_cpu(cpu) { 217 for_each_possible_cpu(cpu) {
215 data = tr->data[cpu]; 218 data = tr->data[cpu];
216 tracing_reset(data); 219 tracing_reset(data);
@@ -229,9 +232,11 @@ static void wakeup_reset(struct trace_array *tr)
229{ 232{
230 unsigned long flags; 233 unsigned long flags;
231 234
232 spin_lock_irqsave(&wakeup_lock, flags); 235 local_irq_save(flags);
236 __raw_spin_lock(&wakeup_lock);
233 __wakeup_reset(tr); 237 __wakeup_reset(tr);
234 spin_unlock_irqrestore(&wakeup_lock, flags); 238 __raw_spin_unlock(&wakeup_lock);
239 local_irq_restore(flags);
235} 240}
236 241
237static void 242static void
@@ -252,7 +257,7 @@ wakeup_check_start(struct trace_array *tr, struct task_struct *p,
252 goto out; 257 goto out;
253 258
254 /* interrupts should be off from try_to_wake_up */ 259 /* interrupts should be off from try_to_wake_up */
255 spin_lock(&wakeup_lock); 260 __raw_spin_lock(&wakeup_lock);
256 261
257 /* check for races. */ 262 /* check for races. */
258 if (!tracer_enabled || p->prio >= wakeup_prio) 263 if (!tracer_enabled || p->prio >= wakeup_prio)
@@ -274,7 +279,7 @@ wakeup_check_start(struct trace_array *tr, struct task_struct *p,
274 CALLER_ADDR1, CALLER_ADDR2, flags); 279 CALLER_ADDR1, CALLER_ADDR2, flags);
275 280
276out_locked: 281out_locked:
277 spin_unlock(&wakeup_lock); 282 __raw_spin_unlock(&wakeup_lock);
278out: 283out:
279 atomic_dec(&tr->data[cpu]->disabled); 284 atomic_dec(&tr->data[cpu]->disabled);
280} 285}
diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c
index ce2d723c10e..bb948e52ce2 100644
--- a/kernel/trace/trace_sysprof.c
+++ b/kernel/trace/trace_sysprof.c
@@ -213,9 +213,7 @@ static void start_stack_timers(void)
213 int cpu; 213 int cpu;
214 214
215 for_each_online_cpu(cpu) { 215 for_each_online_cpu(cpu) {
216 cpumask_of_cpu_ptr(new_mask, cpu); 216 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
217
218 set_cpus_allowed_ptr(current, new_mask);
219 start_stack_timer(cpu); 217 start_stack_timer(cpu);
220 } 218 }
221 set_cpus_allowed_ptr(current, &saved_mask); 219 set_cpus_allowed_ptr(current, &saved_mask);
diff --git a/kernel/tsacct.c b/kernel/tsacct.c
index 3da47ccdc5e..8ebcd8532df 100644
--- a/kernel/tsacct.c
+++ b/kernel/tsacct.c
@@ -94,10 +94,10 @@ void xacct_add_tsk(struct taskstats *stats, struct task_struct *p)
94 stats->hiwater_vm = mm->hiwater_vm * PAGE_SIZE / KB; 94 stats->hiwater_vm = mm->hiwater_vm * PAGE_SIZE / KB;
95 mmput(mm); 95 mmput(mm);
96 } 96 }
97 stats->read_char = p->rchar; 97 stats->read_char = p->ioac.rchar;
98 stats->write_char = p->wchar; 98 stats->write_char = p->ioac.wchar;
99 stats->read_syscalls = p->syscr; 99 stats->read_syscalls = p->ioac.syscr;
100 stats->write_syscalls = p->syscw; 100 stats->write_syscalls = p->ioac.syscw;
101#ifdef CONFIG_TASK_IO_ACCOUNTING 101#ifdef CONFIG_TASK_IO_ACCOUNTING
102 stats->read_bytes = p->ioac.read_bytes; 102 stats->read_bytes = p->ioac.read_bytes;
103 stats->write_bytes = p->ioac.write_bytes; 103 stats->write_bytes = p->ioac.write_bytes;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index ec7e4f62aaf..4a26a1382df 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -830,10 +830,21 @@ struct workqueue_struct *__create_workqueue_key(const char *name,
830 start_workqueue_thread(cwq, -1); 830 start_workqueue_thread(cwq, -1);
831 } else { 831 } else {
832 cpu_maps_update_begin(); 832 cpu_maps_update_begin();
833 /*
834 * We must place this wq on list even if the code below fails.
835 * cpu_down(cpu) can remove cpu from cpu_populated_map before
836 * destroy_workqueue() takes the lock, in that case we leak
837 * cwq[cpu]->thread.
838 */
833 spin_lock(&workqueue_lock); 839 spin_lock(&workqueue_lock);
834 list_add(&wq->list, &workqueues); 840 list_add(&wq->list, &workqueues);
835 spin_unlock(&workqueue_lock); 841 spin_unlock(&workqueue_lock);
836 842 /*
843 * We must initialize cwqs for each possible cpu even if we
844 * are going to call destroy_workqueue() finally. Otherwise
845 * cpu_up() can hit the uninitialized cwq once we drop the
846 * lock.
847 */
837 for_each_possible_cpu(cpu) { 848 for_each_possible_cpu(cpu) {
838 cwq = init_cpu_workqueue(wq, cpu); 849 cwq = init_cpu_workqueue(wq, cpu);
839 if (err || !cpu_online(cpu)) 850 if (err || !cpu_online(cpu))