aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Kconfig.preempt25
-rw-r--r--kernel/Makefile6
-rw-r--r--kernel/audit.h5
-rw-r--r--kernel/audit_tree.c3
-rw-r--r--kernel/auditfilter.c325
-rw-r--r--kernel/auditsc.c739
-rw-r--r--kernel/capability.c4
-rw-r--r--kernel/cgroup.c7
-rw-r--r--kernel/compat.c49
-rw-r--r--kernel/cpu.c151
-rw-r--r--kernel/cpuset.c4
-rw-r--r--kernel/exit.c4
-rw-r--r--kernel/extable.c16
-rw-r--r--kernel/fork.c4
-rw-r--r--kernel/futex.c351
-rw-r--r--kernel/hrtimer.c332
-rw-r--r--kernel/irq/Makefile1
-rw-r--r--kernel/irq/chip.c18
-rw-r--r--kernel/irq/handle.c205
-rw-r--r--kernel/irq/internals.h5
-rw-r--r--kernel/irq/manage.c58
-rw-r--r--kernel/irq/migration.c14
-rw-r--r--kernel/irq/numa_migrate.c119
-rw-r--r--kernel/irq/proc.c63
-rw-r--r--kernel/kexec.c2
-rw-r--r--kernel/lockdep.c60
-rw-r--r--kernel/lockdep_proc.c28
-rw-r--r--kernel/module.c59
-rw-r--r--kernel/mutex.c10
-rw-r--r--kernel/notifier.c8
-rw-r--r--kernel/panic.c32
-rw-r--r--kernel/posix-cpu-timers.c10
-rw-r--r--kernel/posix-timers.c40
-rw-r--r--kernel/power/poweroff.c2
-rw-r--r--kernel/printk.c2
-rw-r--r--kernel/profile.c38
-rw-r--r--kernel/rcuclassic.c36
-rw-r--r--kernel/rcupreempt.c29
-rw-r--r--kernel/rcupreempt_trace.c10
-rw-r--r--kernel/rcutorture.c93
-rw-r--r--kernel/rcutree.c1535
-rw-r--r--kernel/rcutree_trace.c271
-rw-r--r--kernel/resource.c9
-rw-r--r--kernel/sched.c1106
-rw-r--r--kernel/sched_clock.c5
-rw-r--r--kernel/sched_cpupri.c39
-rw-r--r--kernel/sched_cpupri.h5
-rw-r--r--kernel/sched_fair.c62
-rw-r--r--kernel/sched_rt.c74
-rw-r--r--kernel/sched_stats.h3
-rw-r--r--kernel/smp.c145
-rw-r--r--kernel/softirq.c41
-rw-r--r--kernel/softlockup.c12
-rw-r--r--kernel/stacktrace.c11
-rw-r--r--kernel/stop_machine.c63
-rw-r--r--kernel/sys.c4
-rw-r--r--kernel/sysctl.c14
-rw-r--r--kernel/sysctl_check.c1
-rw-r--r--kernel/taskstats.c41
-rw-r--r--kernel/time/clockevents.c2
-rw-r--r--kernel/time/clocksource.c9
-rw-r--r--kernel/time/ntp.c4
-rw-r--r--kernel/time/tick-broadcast.c113
-rw-r--r--kernel/time/tick-common.c18
-rw-r--r--kernel/time/tick-sched.c66
-rw-r--r--kernel/time/timekeeping.c7
-rw-r--r--kernel/timer.c15
-rw-r--r--kernel/trace/ring_buffer.c78
-rw-r--r--kernel/trace/trace.c73
-rw-r--r--kernel/trace/trace.h2
-rw-r--r--kernel/trace/trace_boot.c2
-rw-r--r--kernel/trace/trace_functions_graph.c2
-rw-r--r--kernel/trace/trace_hw_branches.c6
-rw-r--r--kernel/trace/trace_power.c2
-rw-r--r--kernel/trace/trace_sysprof.c14
-rw-r--r--kernel/workqueue.c26
76 files changed, 4525 insertions, 2322 deletions
diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
index 9fdba03dc1fc..bf987b95b356 100644
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
@@ -52,28 +52,3 @@ config PREEMPT
52 52
53endchoice 53endchoice
54 54
55config PREEMPT_RCU
56 bool "Preemptible RCU"
57 depends on PREEMPT
58 default n
59 help
60 This option reduces the latency of the kernel by making certain
61 RCU sections preemptible. Normally RCU code is non-preemptible, if
62 this option is selected then read-only RCU sections become
63 preemptible. This helps latency, but may expose bugs due to
64 now-naive assumptions about each RCU read-side critical section
65 remaining on a given CPU through its execution.
66
67 Say N if you are unsure.
68
69config RCU_TRACE
70 bool "Enable tracing for RCU - currently stats in debugfs"
71 depends on PREEMPT_RCU
72 select DEBUG_FS
73 default y
74 help
75 This option provides tracing in RCU which presents stats
76 in debugfs for debugging RCU implementation.
77
78 Say Y here if you want to enable RCU tracing
79 Say N if you are unsure.
diff --git a/kernel/Makefile b/kernel/Makefile
index 027edda63511..e1c5bf3365c0 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -73,10 +73,10 @@ obj-$(CONFIG_GENERIC_HARDIRQS) += irq/
73obj-$(CONFIG_SECCOMP) += seccomp.o 73obj-$(CONFIG_SECCOMP) += seccomp.o
74obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o 74obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o
75obj-$(CONFIG_CLASSIC_RCU) += rcuclassic.o 75obj-$(CONFIG_CLASSIC_RCU) += rcuclassic.o
76obj-$(CONFIG_TREE_RCU) += rcutree.o
76obj-$(CONFIG_PREEMPT_RCU) += rcupreempt.o 77obj-$(CONFIG_PREEMPT_RCU) += rcupreempt.o
77ifeq ($(CONFIG_PREEMPT_RCU),y) 78obj-$(CONFIG_TREE_RCU_TRACE) += rcutree_trace.o
78obj-$(CONFIG_RCU_TRACE) += rcupreempt_trace.o 79obj-$(CONFIG_PREEMPT_RCU_TRACE) += rcupreempt_trace.o
79endif
80obj-$(CONFIG_RELAY) += relay.o 80obj-$(CONFIG_RELAY) += relay.o
81obj-$(CONFIG_SYSCTL) += utsname_sysctl.o 81obj-$(CONFIG_SYSCTL) += utsname_sysctl.o
82obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o 82obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o
diff --git a/kernel/audit.h b/kernel/audit.h
index 9d6717412fec..16f18cac661b 100644
--- a/kernel/audit.h
+++ b/kernel/audit.h
@@ -159,11 +159,8 @@ static inline int audit_signal_info(int sig, struct task_struct *t)
159 return __audit_signal_info(sig, t); 159 return __audit_signal_info(sig, t);
160 return 0; 160 return 0;
161} 161}
162extern enum audit_state audit_filter_inodes(struct task_struct *, 162extern void audit_filter_inodes(struct task_struct *, struct audit_context *);
163 struct audit_context *);
164extern void audit_set_auditable(struct audit_context *);
165#else 163#else
166#define audit_signal_info(s,t) AUDIT_DISABLED 164#define audit_signal_info(s,t) AUDIT_DISABLED
167#define audit_filter_inodes(t,c) AUDIT_DISABLED 165#define audit_filter_inodes(t,c) AUDIT_DISABLED
168#define audit_set_auditable(c)
169#endif 166#endif
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index 8b509441f49a..8ad9545b8db9 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -450,6 +450,7 @@ static void kill_rules(struct audit_tree *tree)
450 audit_log_end(ab); 450 audit_log_end(ab);
451 rule->tree = NULL; 451 rule->tree = NULL;
452 list_del_rcu(&entry->list); 452 list_del_rcu(&entry->list);
453 list_del(&entry->rule.list);
453 call_rcu(&entry->rcu, audit_free_rule_rcu); 454 call_rcu(&entry->rcu, audit_free_rule_rcu);
454 } 455 }
455 } 456 }
@@ -617,7 +618,7 @@ int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
617 618
618 if (pathname[0] != '/' || 619 if (pathname[0] != '/' ||
619 rule->listnr != AUDIT_FILTER_EXIT || 620 rule->listnr != AUDIT_FILTER_EXIT ||
620 op & ~AUDIT_EQUAL || 621 op != Audit_equal ||
621 rule->inode_f || rule->watch || rule->tree) 622 rule->inode_f || rule->watch || rule->tree)
622 return -EINVAL; 623 return -EINVAL;
623 rule->tree = alloc_tree(pathname); 624 rule->tree = alloc_tree(pathname);
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index 9fd85a4640a0..fbf24d121d97 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -86,6 +86,14 @@ struct list_head audit_filter_list[AUDIT_NR_FILTERS] = {
86#error Fix audit_filter_list initialiser 86#error Fix audit_filter_list initialiser
87#endif 87#endif
88}; 88};
89static struct list_head audit_rules_list[AUDIT_NR_FILTERS] = {
90 LIST_HEAD_INIT(audit_rules_list[0]),
91 LIST_HEAD_INIT(audit_rules_list[1]),
92 LIST_HEAD_INIT(audit_rules_list[2]),
93 LIST_HEAD_INIT(audit_rules_list[3]),
94 LIST_HEAD_INIT(audit_rules_list[4]),
95 LIST_HEAD_INIT(audit_rules_list[5]),
96};
89 97
90DEFINE_MUTEX(audit_filter_mutex); 98DEFINE_MUTEX(audit_filter_mutex);
91 99
@@ -244,7 +252,8 @@ static inline int audit_to_inode(struct audit_krule *krule,
244 struct audit_field *f) 252 struct audit_field *f)
245{ 253{
246 if (krule->listnr != AUDIT_FILTER_EXIT || 254 if (krule->listnr != AUDIT_FILTER_EXIT ||
247 krule->watch || krule->inode_f || krule->tree) 255 krule->watch || krule->inode_f || krule->tree ||
256 (f->op != Audit_equal && f->op != Audit_not_equal))
248 return -EINVAL; 257 return -EINVAL;
249 258
250 krule->inode_f = f; 259 krule->inode_f = f;
@@ -262,7 +271,7 @@ static int audit_to_watch(struct audit_krule *krule, char *path, int len,
262 271
263 if (path[0] != '/' || path[len-1] == '/' || 272 if (path[0] != '/' || path[len-1] == '/' ||
264 krule->listnr != AUDIT_FILTER_EXIT || 273 krule->listnr != AUDIT_FILTER_EXIT ||
265 op & ~AUDIT_EQUAL || 274 op != Audit_equal ||
266 krule->inode_f || krule->watch || krule->tree) 275 krule->inode_f || krule->watch || krule->tree)
267 return -EINVAL; 276 return -EINVAL;
268 277
@@ -412,12 +421,32 @@ exit_err:
412 return ERR_PTR(err); 421 return ERR_PTR(err);
413} 422}
414 423
424static u32 audit_ops[] =
425{
426 [Audit_equal] = AUDIT_EQUAL,
427 [Audit_not_equal] = AUDIT_NOT_EQUAL,
428 [Audit_bitmask] = AUDIT_BIT_MASK,
429 [Audit_bittest] = AUDIT_BIT_TEST,
430 [Audit_lt] = AUDIT_LESS_THAN,
431 [Audit_gt] = AUDIT_GREATER_THAN,
432 [Audit_le] = AUDIT_LESS_THAN_OR_EQUAL,
433 [Audit_ge] = AUDIT_GREATER_THAN_OR_EQUAL,
434};
435
436static u32 audit_to_op(u32 op)
437{
438 u32 n;
439 for (n = Audit_equal; n < Audit_bad && audit_ops[n] != op; n++)
440 ;
441 return n;
442}
443
444
415/* Translate struct audit_rule to kernel's rule respresentation. 445/* Translate struct audit_rule to kernel's rule respresentation.
416 * Exists for backward compatibility with userspace. */ 446 * Exists for backward compatibility with userspace. */
417static struct audit_entry *audit_rule_to_entry(struct audit_rule *rule) 447static struct audit_entry *audit_rule_to_entry(struct audit_rule *rule)
418{ 448{
419 struct audit_entry *entry; 449 struct audit_entry *entry;
420 struct audit_field *ino_f;
421 int err = 0; 450 int err = 0;
422 int i; 451 int i;
423 452
@@ -427,12 +456,28 @@ static struct audit_entry *audit_rule_to_entry(struct audit_rule *rule)
427 456
428 for (i = 0; i < rule->field_count; i++) { 457 for (i = 0; i < rule->field_count; i++) {
429 struct audit_field *f = &entry->rule.fields[i]; 458 struct audit_field *f = &entry->rule.fields[i];
459 u32 n;
460
461 n = rule->fields[i] & (AUDIT_NEGATE|AUDIT_OPERATORS);
462
463 /* Support for legacy operators where
464 * AUDIT_NEGATE bit signifies != and otherwise assumes == */
465 if (n & AUDIT_NEGATE)
466 f->op = Audit_not_equal;
467 else if (!n)
468 f->op = Audit_equal;
469 else
470 f->op = audit_to_op(n);
471
472 entry->rule.vers_ops = (n & AUDIT_OPERATORS) ? 2 : 1;
430 473
431 f->op = rule->fields[i] & (AUDIT_NEGATE|AUDIT_OPERATORS);
432 f->type = rule->fields[i] & ~(AUDIT_NEGATE|AUDIT_OPERATORS); 474 f->type = rule->fields[i] & ~(AUDIT_NEGATE|AUDIT_OPERATORS);
433 f->val = rule->values[i]; 475 f->val = rule->values[i];
434 476
435 err = -EINVAL; 477 err = -EINVAL;
478 if (f->op == Audit_bad)
479 goto exit_free;
480
436 switch(f->type) { 481 switch(f->type) {
437 default: 482 default:
438 goto exit_free; 483 goto exit_free;
@@ -454,11 +499,8 @@ static struct audit_entry *audit_rule_to_entry(struct audit_rule *rule)
454 case AUDIT_EXIT: 499 case AUDIT_EXIT:
455 case AUDIT_SUCCESS: 500 case AUDIT_SUCCESS:
456 /* bit ops are only useful on syscall args */ 501 /* bit ops are only useful on syscall args */
457 if (f->op == AUDIT_BIT_MASK || 502 if (f->op == Audit_bitmask || f->op == Audit_bittest)
458 f->op == AUDIT_BIT_TEST) {
459 err = -EINVAL;
460 goto exit_free; 503 goto exit_free;
461 }
462 break; 504 break;
463 case AUDIT_ARG0: 505 case AUDIT_ARG0:
464 case AUDIT_ARG1: 506 case AUDIT_ARG1:
@@ -467,11 +509,8 @@ static struct audit_entry *audit_rule_to_entry(struct audit_rule *rule)
467 break; 509 break;
468 /* arch is only allowed to be = or != */ 510 /* arch is only allowed to be = or != */
469 case AUDIT_ARCH: 511 case AUDIT_ARCH:
470 if ((f->op != AUDIT_NOT_EQUAL) && (f->op != AUDIT_EQUAL) 512 if (f->op != Audit_not_equal && f->op != Audit_equal)
471 && (f->op != AUDIT_NEGATE) && (f->op)) {
472 err = -EINVAL;
473 goto exit_free; 513 goto exit_free;
474 }
475 entry->rule.arch_f = f; 514 entry->rule.arch_f = f;
476 break; 515 break;
477 case AUDIT_PERM: 516 case AUDIT_PERM:
@@ -488,33 +527,10 @@ static struct audit_entry *audit_rule_to_entry(struct audit_rule *rule)
488 goto exit_free; 527 goto exit_free;
489 break; 528 break;
490 } 529 }
491
492 entry->rule.vers_ops = (f->op & AUDIT_OPERATORS) ? 2 : 1;
493
494 /* Support for legacy operators where
495 * AUDIT_NEGATE bit signifies != and otherwise assumes == */
496 if (f->op & AUDIT_NEGATE)
497 f->op = AUDIT_NOT_EQUAL;
498 else if (!f->op)
499 f->op = AUDIT_EQUAL;
500 else if (f->op == AUDIT_OPERATORS) {
501 err = -EINVAL;
502 goto exit_free;
503 }
504 } 530 }
505 531
506 ino_f = entry->rule.inode_f; 532 if (entry->rule.inode_f && entry->rule.inode_f->op == Audit_not_equal)
507 if (ino_f) { 533 entry->rule.inode_f = NULL;
508 switch(ino_f->op) {
509 case AUDIT_NOT_EQUAL:
510 entry->rule.inode_f = NULL;
511 case AUDIT_EQUAL:
512 break;
513 default:
514 err = -EINVAL;
515 goto exit_free;
516 }
517 }
518 534
519exit_nofree: 535exit_nofree:
520 return entry; 536 return entry;
@@ -530,7 +546,6 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
530{ 546{
531 int err = 0; 547 int err = 0;
532 struct audit_entry *entry; 548 struct audit_entry *entry;
533 struct audit_field *ino_f;
534 void *bufp; 549 void *bufp;
535 size_t remain = datasz - sizeof(struct audit_rule_data); 550 size_t remain = datasz - sizeof(struct audit_rule_data);
536 int i; 551 int i;
@@ -546,11 +561,11 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
546 struct audit_field *f = &entry->rule.fields[i]; 561 struct audit_field *f = &entry->rule.fields[i];
547 562
548 err = -EINVAL; 563 err = -EINVAL;
549 if (!(data->fieldflags[i] & AUDIT_OPERATORS) || 564
550 data->fieldflags[i] & ~AUDIT_OPERATORS) 565 f->op = audit_to_op(data->fieldflags[i]);
566 if (f->op == Audit_bad)
551 goto exit_free; 567 goto exit_free;
552 568
553 f->op = data->fieldflags[i] & AUDIT_OPERATORS;
554 f->type = data->fields[i]; 569 f->type = data->fields[i];
555 f->val = data->values[i]; 570 f->val = data->values[i];
556 f->lsm_str = NULL; 571 f->lsm_str = NULL;
@@ -662,18 +677,8 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
662 } 677 }
663 } 678 }
664 679
665 ino_f = entry->rule.inode_f; 680 if (entry->rule.inode_f && entry->rule.inode_f->op == Audit_not_equal)
666 if (ino_f) { 681 entry->rule.inode_f = NULL;
667 switch(ino_f->op) {
668 case AUDIT_NOT_EQUAL:
669 entry->rule.inode_f = NULL;
670 case AUDIT_EQUAL:
671 break;
672 default:
673 err = -EINVAL;
674 goto exit_free;
675 }
676 }
677 682
678exit_nofree: 683exit_nofree:
679 return entry; 684 return entry;
@@ -713,10 +718,10 @@ static struct audit_rule *audit_krule_to_rule(struct audit_krule *krule)
713 rule->fields[i] = krule->fields[i].type; 718 rule->fields[i] = krule->fields[i].type;
714 719
715 if (krule->vers_ops == 1) { 720 if (krule->vers_ops == 1) {
716 if (krule->fields[i].op & AUDIT_NOT_EQUAL) 721 if (krule->fields[i].op == Audit_not_equal)
717 rule->fields[i] |= AUDIT_NEGATE; 722 rule->fields[i] |= AUDIT_NEGATE;
718 } else { 723 } else {
719 rule->fields[i] |= krule->fields[i].op; 724 rule->fields[i] |= audit_ops[krule->fields[i].op];
720 } 725 }
721 } 726 }
722 for (i = 0; i < AUDIT_BITMASK_SIZE; i++) rule->mask[i] = krule->mask[i]; 727 for (i = 0; i < AUDIT_BITMASK_SIZE; i++) rule->mask[i] = krule->mask[i];
@@ -744,7 +749,7 @@ static struct audit_rule_data *audit_krule_to_data(struct audit_krule *krule)
744 struct audit_field *f = &krule->fields[i]; 749 struct audit_field *f = &krule->fields[i];
745 750
746 data->fields[i] = f->type; 751 data->fields[i] = f->type;
747 data->fieldflags[i] = f->op; 752 data->fieldflags[i] = audit_ops[f->op];
748 switch(f->type) { 753 switch(f->type) {
749 case AUDIT_SUBJ_USER: 754 case AUDIT_SUBJ_USER:
750 case AUDIT_SUBJ_ROLE: 755 case AUDIT_SUBJ_ROLE:
@@ -919,6 +924,7 @@ static struct audit_entry *audit_dupe_rule(struct audit_krule *old,
919 new->action = old->action; 924 new->action = old->action;
920 for (i = 0; i < AUDIT_BITMASK_SIZE; i++) 925 for (i = 0; i < AUDIT_BITMASK_SIZE; i++)
921 new->mask[i] = old->mask[i]; 926 new->mask[i] = old->mask[i];
927 new->prio = old->prio;
922 new->buflen = old->buflen; 928 new->buflen = old->buflen;
923 new->inode_f = old->inode_f; 929 new->inode_f = old->inode_f;
924 new->watch = NULL; 930 new->watch = NULL;
@@ -987,9 +993,8 @@ static void audit_update_watch(struct audit_parent *parent,
987 993
988 /* If the update involves invalidating rules, do the inode-based 994 /* If the update involves invalidating rules, do the inode-based
989 * filtering now, so we don't omit records. */ 995 * filtering now, so we don't omit records. */
990 if (invalidating && current->audit_context && 996 if (invalidating && current->audit_context)
991 audit_filter_inodes(current, current->audit_context) == AUDIT_RECORD_CONTEXT) 997 audit_filter_inodes(current, current->audit_context);
992 audit_set_auditable(current->audit_context);
993 998
994 nwatch = audit_dupe_watch(owatch); 999 nwatch = audit_dupe_watch(owatch);
995 if (IS_ERR(nwatch)) { 1000 if (IS_ERR(nwatch)) {
@@ -1007,12 +1012,15 @@ static void audit_update_watch(struct audit_parent *parent,
1007 list_del_rcu(&oentry->list); 1012 list_del_rcu(&oentry->list);
1008 1013
1009 nentry = audit_dupe_rule(&oentry->rule, nwatch); 1014 nentry = audit_dupe_rule(&oentry->rule, nwatch);
1010 if (IS_ERR(nentry)) 1015 if (IS_ERR(nentry)) {
1016 list_del(&oentry->rule.list);
1011 audit_panic("error updating watch, removing"); 1017 audit_panic("error updating watch, removing");
1012 else { 1018 } else {
1013 int h = audit_hash_ino((u32)ino); 1019 int h = audit_hash_ino((u32)ino);
1014 list_add(&nentry->rule.rlist, &nwatch->rules); 1020 list_add(&nentry->rule.rlist, &nwatch->rules);
1015 list_add_rcu(&nentry->list, &audit_inode_hash[h]); 1021 list_add_rcu(&nentry->list, &audit_inode_hash[h]);
1022 list_replace(&oentry->rule.list,
1023 &nentry->rule.list);
1016 } 1024 }
1017 1025
1018 call_rcu(&oentry->rcu, audit_free_rule_rcu); 1026 call_rcu(&oentry->rcu, audit_free_rule_rcu);
@@ -1077,6 +1085,7 @@ static void audit_remove_parent_watches(struct audit_parent *parent)
1077 audit_log_end(ab); 1085 audit_log_end(ab);
1078 } 1086 }
1079 list_del(&r->rlist); 1087 list_del(&r->rlist);
1088 list_del(&r->list);
1080 list_del_rcu(&e->list); 1089 list_del_rcu(&e->list);
1081 call_rcu(&e->rcu, audit_free_rule_rcu); 1090 call_rcu(&e->rcu, audit_free_rule_rcu);
1082 } 1091 }
@@ -1102,12 +1111,16 @@ static void audit_inotify_unregister(struct list_head *in_list)
1102/* Find an existing audit rule. 1111/* Find an existing audit rule.
1103 * Caller must hold audit_filter_mutex to prevent stale rule data. */ 1112 * Caller must hold audit_filter_mutex to prevent stale rule data. */
1104static struct audit_entry *audit_find_rule(struct audit_entry *entry, 1113static struct audit_entry *audit_find_rule(struct audit_entry *entry,
1105 struct list_head *list) 1114 struct list_head **p)
1106{ 1115{
1107 struct audit_entry *e, *found = NULL; 1116 struct audit_entry *e, *found = NULL;
1117 struct list_head *list;
1108 int h; 1118 int h;
1109 1119
1110 if (entry->rule.watch) { 1120 if (entry->rule.inode_f) {
1121 h = audit_hash_ino(entry->rule.inode_f->val);
1122 *p = list = &audit_inode_hash[h];
1123 } else if (entry->rule.watch) {
1111 /* we don't know the inode number, so must walk entire hash */ 1124 /* we don't know the inode number, so must walk entire hash */
1112 for (h = 0; h < AUDIT_INODE_BUCKETS; h++) { 1125 for (h = 0; h < AUDIT_INODE_BUCKETS; h++) {
1113 list = &audit_inode_hash[h]; 1126 list = &audit_inode_hash[h];
@@ -1118,6 +1131,8 @@ static struct audit_entry *audit_find_rule(struct audit_entry *entry,
1118 } 1131 }
1119 } 1132 }
1120 goto out; 1133 goto out;
1134 } else {
1135 *p = list = &audit_filter_list[entry->rule.listnr];
1121 } 1136 }
1122 1137
1123 list_for_each_entry(e, list, list) 1138 list_for_each_entry(e, list, list)
@@ -1258,15 +1273,17 @@ static int audit_add_watch(struct audit_krule *krule, struct nameidata *ndp,
1258 return ret; 1273 return ret;
1259} 1274}
1260 1275
1276static u64 prio_low = ~0ULL/2;
1277static u64 prio_high = ~0ULL/2 - 1;
1278
1261/* Add rule to given filterlist if not a duplicate. */ 1279/* Add rule to given filterlist if not a duplicate. */
1262static inline int audit_add_rule(struct audit_entry *entry, 1280static inline int audit_add_rule(struct audit_entry *entry)
1263 struct list_head *list)
1264{ 1281{
1265 struct audit_entry *e; 1282 struct audit_entry *e;
1266 struct audit_field *inode_f = entry->rule.inode_f;
1267 struct audit_watch *watch = entry->rule.watch; 1283 struct audit_watch *watch = entry->rule.watch;
1268 struct audit_tree *tree = entry->rule.tree; 1284 struct audit_tree *tree = entry->rule.tree;
1269 struct nameidata *ndp = NULL, *ndw = NULL; 1285 struct nameidata *ndp = NULL, *ndw = NULL;
1286 struct list_head *list;
1270 int h, err; 1287 int h, err;
1271#ifdef CONFIG_AUDITSYSCALL 1288#ifdef CONFIG_AUDITSYSCALL
1272 int dont_count = 0; 1289 int dont_count = 0;
@@ -1277,13 +1294,8 @@ static inline int audit_add_rule(struct audit_entry *entry,
1277 dont_count = 1; 1294 dont_count = 1;
1278#endif 1295#endif
1279 1296
1280 if (inode_f) {
1281 h = audit_hash_ino(inode_f->val);
1282 list = &audit_inode_hash[h];
1283 }
1284
1285 mutex_lock(&audit_filter_mutex); 1297 mutex_lock(&audit_filter_mutex);
1286 e = audit_find_rule(entry, list); 1298 e = audit_find_rule(entry, &list);
1287 mutex_unlock(&audit_filter_mutex); 1299 mutex_unlock(&audit_filter_mutex);
1288 if (e) { 1300 if (e) {
1289 err = -EEXIST; 1301 err = -EEXIST;
@@ -1319,10 +1331,22 @@ static inline int audit_add_rule(struct audit_entry *entry,
1319 } 1331 }
1320 } 1332 }
1321 1333
1334 entry->rule.prio = ~0ULL;
1335 if (entry->rule.listnr == AUDIT_FILTER_EXIT) {
1336 if (entry->rule.flags & AUDIT_FILTER_PREPEND)
1337 entry->rule.prio = ++prio_high;
1338 else
1339 entry->rule.prio = --prio_low;
1340 }
1341
1322 if (entry->rule.flags & AUDIT_FILTER_PREPEND) { 1342 if (entry->rule.flags & AUDIT_FILTER_PREPEND) {
1343 list_add(&entry->rule.list,
1344 &audit_rules_list[entry->rule.listnr]);
1323 list_add_rcu(&entry->list, list); 1345 list_add_rcu(&entry->list, list);
1324 entry->rule.flags &= ~AUDIT_FILTER_PREPEND; 1346 entry->rule.flags &= ~AUDIT_FILTER_PREPEND;
1325 } else { 1347 } else {
1348 list_add_tail(&entry->rule.list,
1349 &audit_rules_list[entry->rule.listnr]);
1326 list_add_tail_rcu(&entry->list, list); 1350 list_add_tail_rcu(&entry->list, list);
1327 } 1351 }
1328#ifdef CONFIG_AUDITSYSCALL 1352#ifdef CONFIG_AUDITSYSCALL
@@ -1345,15 +1369,14 @@ error:
1345} 1369}
1346 1370
1347/* Remove an existing rule from filterlist. */ 1371/* Remove an existing rule from filterlist. */
1348static inline int audit_del_rule(struct audit_entry *entry, 1372static inline int audit_del_rule(struct audit_entry *entry)
1349 struct list_head *list)
1350{ 1373{
1351 struct audit_entry *e; 1374 struct audit_entry *e;
1352 struct audit_field *inode_f = entry->rule.inode_f;
1353 struct audit_watch *watch, *tmp_watch = entry->rule.watch; 1375 struct audit_watch *watch, *tmp_watch = entry->rule.watch;
1354 struct audit_tree *tree = entry->rule.tree; 1376 struct audit_tree *tree = entry->rule.tree;
1377 struct list_head *list;
1355 LIST_HEAD(inotify_list); 1378 LIST_HEAD(inotify_list);
1356 int h, ret = 0; 1379 int ret = 0;
1357#ifdef CONFIG_AUDITSYSCALL 1380#ifdef CONFIG_AUDITSYSCALL
1358 int dont_count = 0; 1381 int dont_count = 0;
1359 1382
@@ -1363,13 +1386,8 @@ static inline int audit_del_rule(struct audit_entry *entry,
1363 dont_count = 1; 1386 dont_count = 1;
1364#endif 1387#endif
1365 1388
1366 if (inode_f) {
1367 h = audit_hash_ino(inode_f->val);
1368 list = &audit_inode_hash[h];
1369 }
1370
1371 mutex_lock(&audit_filter_mutex); 1389 mutex_lock(&audit_filter_mutex);
1372 e = audit_find_rule(entry, list); 1390 e = audit_find_rule(entry, &list);
1373 if (!e) { 1391 if (!e) {
1374 mutex_unlock(&audit_filter_mutex); 1392 mutex_unlock(&audit_filter_mutex);
1375 ret = -ENOENT; 1393 ret = -ENOENT;
@@ -1404,6 +1422,7 @@ static inline int audit_del_rule(struct audit_entry *entry,
1404 audit_remove_tree_rule(&e->rule); 1422 audit_remove_tree_rule(&e->rule);
1405 1423
1406 list_del_rcu(&e->list); 1424 list_del_rcu(&e->list);
1425 list_del(&e->rule.list);
1407 call_rcu(&e->rcu, audit_free_rule_rcu); 1426 call_rcu(&e->rcu, audit_free_rule_rcu);
1408 1427
1409#ifdef CONFIG_AUDITSYSCALL 1428#ifdef CONFIG_AUDITSYSCALL
@@ -1432,30 +1451,16 @@ out:
1432static void audit_list(int pid, int seq, struct sk_buff_head *q) 1451static void audit_list(int pid, int seq, struct sk_buff_head *q)
1433{ 1452{
1434 struct sk_buff *skb; 1453 struct sk_buff *skb;
1435 struct audit_entry *entry; 1454 struct audit_krule *r;
1436 int i; 1455 int i;
1437 1456
1438 /* This is a blocking read, so use audit_filter_mutex instead of rcu 1457 /* This is a blocking read, so use audit_filter_mutex instead of rcu
1439 * iterator to sync with list writers. */ 1458 * iterator to sync with list writers. */
1440 for (i=0; i<AUDIT_NR_FILTERS; i++) { 1459 for (i=0; i<AUDIT_NR_FILTERS; i++) {
1441 list_for_each_entry(entry, &audit_filter_list[i], list) { 1460 list_for_each_entry(r, &audit_rules_list[i], list) {
1442 struct audit_rule *rule;
1443
1444 rule = audit_krule_to_rule(&entry->rule);
1445 if (unlikely(!rule))
1446 break;
1447 skb = audit_make_reply(pid, seq, AUDIT_LIST, 0, 1,
1448 rule, sizeof(*rule));
1449 if (skb)
1450 skb_queue_tail(q, skb);
1451 kfree(rule);
1452 }
1453 }
1454 for (i = 0; i < AUDIT_INODE_BUCKETS; i++) {
1455 list_for_each_entry(entry, &audit_inode_hash[i], list) {
1456 struct audit_rule *rule; 1461 struct audit_rule *rule;
1457 1462
1458 rule = audit_krule_to_rule(&entry->rule); 1463 rule = audit_krule_to_rule(r);
1459 if (unlikely(!rule)) 1464 if (unlikely(!rule))
1460 break; 1465 break;
1461 skb = audit_make_reply(pid, seq, AUDIT_LIST, 0, 1, 1466 skb = audit_make_reply(pid, seq, AUDIT_LIST, 0, 1,
@@ -1474,30 +1479,16 @@ static void audit_list(int pid, int seq, struct sk_buff_head *q)
1474static void audit_list_rules(int pid, int seq, struct sk_buff_head *q) 1479static void audit_list_rules(int pid, int seq, struct sk_buff_head *q)
1475{ 1480{
1476 struct sk_buff *skb; 1481 struct sk_buff *skb;
1477 struct audit_entry *e; 1482 struct audit_krule *r;
1478 int i; 1483 int i;
1479 1484
1480 /* This is a blocking read, so use audit_filter_mutex instead of rcu 1485 /* This is a blocking read, so use audit_filter_mutex instead of rcu
1481 * iterator to sync with list writers. */ 1486 * iterator to sync with list writers. */
1482 for (i=0; i<AUDIT_NR_FILTERS; i++) { 1487 for (i=0; i<AUDIT_NR_FILTERS; i++) {
1483 list_for_each_entry(e, &audit_filter_list[i], list) { 1488 list_for_each_entry(r, &audit_rules_list[i], list) {
1484 struct audit_rule_data *data;
1485
1486 data = audit_krule_to_data(&e->rule);
1487 if (unlikely(!data))
1488 break;
1489 skb = audit_make_reply(pid, seq, AUDIT_LIST_RULES, 0, 1,
1490 data, sizeof(*data) + data->buflen);
1491 if (skb)
1492 skb_queue_tail(q, skb);
1493 kfree(data);
1494 }
1495 }
1496 for (i=0; i< AUDIT_INODE_BUCKETS; i++) {
1497 list_for_each_entry(e, &audit_inode_hash[i], list) {
1498 struct audit_rule_data *data; 1489 struct audit_rule_data *data;
1499 1490
1500 data = audit_krule_to_data(&e->rule); 1491 data = audit_krule_to_data(r);
1501 if (unlikely(!data)) 1492 if (unlikely(!data))
1502 break; 1493 break;
1503 skb = audit_make_reply(pid, seq, AUDIT_LIST_RULES, 0, 1, 1494 skb = audit_make_reply(pid, seq, AUDIT_LIST_RULES, 0, 1,
@@ -1603,8 +1594,7 @@ int audit_receive_filter(int type, int pid, int uid, int seq, void *data,
1603 if (IS_ERR(entry)) 1594 if (IS_ERR(entry))
1604 return PTR_ERR(entry); 1595 return PTR_ERR(entry);
1605 1596
1606 err = audit_add_rule(entry, 1597 err = audit_add_rule(entry);
1607 &audit_filter_list[entry->rule.listnr]);
1608 audit_log_rule_change(loginuid, sessionid, sid, "add", 1598 audit_log_rule_change(loginuid, sessionid, sid, "add",
1609 &entry->rule, !err); 1599 &entry->rule, !err);
1610 1600
@@ -1620,8 +1610,7 @@ int audit_receive_filter(int type, int pid, int uid, int seq, void *data,
1620 if (IS_ERR(entry)) 1610 if (IS_ERR(entry))
1621 return PTR_ERR(entry); 1611 return PTR_ERR(entry);
1622 1612
1623 err = audit_del_rule(entry, 1613 err = audit_del_rule(entry);
1624 &audit_filter_list[entry->rule.listnr]);
1625 audit_log_rule_change(loginuid, sessionid, sid, "remove", 1614 audit_log_rule_change(loginuid, sessionid, sid, "remove",
1626 &entry->rule, !err); 1615 &entry->rule, !err);
1627 1616
@@ -1634,28 +1623,29 @@ int audit_receive_filter(int type, int pid, int uid, int seq, void *data,
1634 return err; 1623 return err;
1635} 1624}
1636 1625
1637int audit_comparator(const u32 left, const u32 op, const u32 right) 1626int audit_comparator(u32 left, u32 op, u32 right)
1638{ 1627{
1639 switch (op) { 1628 switch (op) {
1640 case AUDIT_EQUAL: 1629 case Audit_equal:
1641 return (left == right); 1630 return (left == right);
1642 case AUDIT_NOT_EQUAL: 1631 case Audit_not_equal:
1643 return (left != right); 1632 return (left != right);
1644 case AUDIT_LESS_THAN: 1633 case Audit_lt:
1645 return (left < right); 1634 return (left < right);
1646 case AUDIT_LESS_THAN_OR_EQUAL: 1635 case Audit_le:
1647 return (left <= right); 1636 return (left <= right);
1648 case AUDIT_GREATER_THAN: 1637 case Audit_gt:
1649 return (left > right); 1638 return (left > right);
1650 case AUDIT_GREATER_THAN_OR_EQUAL: 1639 case Audit_ge:
1651 return (left >= right); 1640 return (left >= right);
1652 case AUDIT_BIT_MASK: 1641 case Audit_bitmask:
1653 return (left & right); 1642 return (left & right);
1654 case AUDIT_BIT_TEST: 1643 case Audit_bittest:
1655 return ((left & right) == right); 1644 return ((left & right) == right);
1645 default:
1646 BUG();
1647 return 0;
1656 } 1648 }
1657 BUG();
1658 return 0;
1659} 1649}
1660 1650
1661/* Compare given dentry name with last component in given path, 1651/* Compare given dentry name with last component in given path,
@@ -1778,6 +1768,43 @@ unlock_and_return:
1778 return result; 1768 return result;
1779} 1769}
1780 1770
1771static int update_lsm_rule(struct audit_krule *r)
1772{
1773 struct audit_entry *entry = container_of(r, struct audit_entry, rule);
1774 struct audit_entry *nentry;
1775 struct audit_watch *watch;
1776 struct audit_tree *tree;
1777 int err = 0;
1778
1779 if (!security_audit_rule_known(r))
1780 return 0;
1781
1782 watch = r->watch;
1783 tree = r->tree;
1784 nentry = audit_dupe_rule(r, watch);
1785 if (IS_ERR(nentry)) {
1786 /* save the first error encountered for the
1787 * return value */
1788 err = PTR_ERR(nentry);
1789 audit_panic("error updating LSM filters");
1790 if (watch)
1791 list_del(&r->rlist);
1792 list_del_rcu(&entry->list);
1793 list_del(&r->list);
1794 } else {
1795 if (watch) {
1796 list_add(&nentry->rule.rlist, &watch->rules);
1797 list_del(&r->rlist);
1798 } else if (tree)
1799 list_replace_init(&r->rlist, &nentry->rule.rlist);
1800 list_replace_rcu(&entry->list, &nentry->list);
1801 list_replace(&r->list, &nentry->rule.list);
1802 }
1803 call_rcu(&entry->rcu, audit_free_rule_rcu);
1804
1805 return err;
1806}
1807
1781/* This function will re-initialize the lsm_rule field of all applicable rules. 1808/* This function will re-initialize the lsm_rule field of all applicable rules.
1782 * It will traverse the filter lists serarching for rules that contain LSM 1809 * It will traverse the filter lists serarching for rules that contain LSM
1783 * specific filter fields. When such a rule is found, it is copied, the 1810 * specific filter fields. When such a rule is found, it is copied, the
@@ -1785,45 +1812,19 @@ unlock_and_return:
1785 * updated rule. */ 1812 * updated rule. */
1786int audit_update_lsm_rules(void) 1813int audit_update_lsm_rules(void)
1787{ 1814{
1788 struct audit_entry *entry, *n, *nentry; 1815 struct audit_krule *r, *n;
1789 struct audit_watch *watch;
1790 struct audit_tree *tree;
1791 int i, err = 0; 1816 int i, err = 0;
1792 1817
1793 /* audit_filter_mutex synchronizes the writers */ 1818 /* audit_filter_mutex synchronizes the writers */
1794 mutex_lock(&audit_filter_mutex); 1819 mutex_lock(&audit_filter_mutex);
1795 1820
1796 for (i = 0; i < AUDIT_NR_FILTERS; i++) { 1821 for (i = 0; i < AUDIT_NR_FILTERS; i++) {
1797 list_for_each_entry_safe(entry, n, &audit_filter_list[i], list) { 1822 list_for_each_entry_safe(r, n, &audit_rules_list[i], list) {
1798 if (!security_audit_rule_known(&entry->rule)) 1823 int res = update_lsm_rule(r);
1799 continue; 1824 if (!err)
1800 1825 err = res;
1801 watch = entry->rule.watch;
1802 tree = entry->rule.tree;
1803 nentry = audit_dupe_rule(&entry->rule, watch);
1804 if (IS_ERR(nentry)) {
1805 /* save the first error encountered for the
1806 * return value */
1807 if (!err)
1808 err = PTR_ERR(nentry);
1809 audit_panic("error updating LSM filters");
1810 if (watch)
1811 list_del(&entry->rule.rlist);
1812 list_del_rcu(&entry->list);
1813 } else {
1814 if (watch) {
1815 list_add(&nentry->rule.rlist,
1816 &watch->rules);
1817 list_del(&entry->rule.rlist);
1818 } else if (tree)
1819 list_replace_init(&entry->rule.rlist,
1820 &nentry->rule.rlist);
1821 list_replace_rcu(&entry->list, &nentry->list);
1822 }
1823 call_rcu(&entry->rcu, audit_free_rule_rcu);
1824 } 1826 }
1825 } 1827 }
1826
1827 mutex_unlock(&audit_filter_mutex); 1828 mutex_unlock(&audit_filter_mutex);
1828 1829
1829 return err; 1830 return err;
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 4819f3711973..8cbddff6c283 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -124,43 +124,6 @@ struct audit_aux_data {
124/* Number of target pids per aux struct. */ 124/* Number of target pids per aux struct. */
125#define AUDIT_AUX_PIDS 16 125#define AUDIT_AUX_PIDS 16
126 126
127struct audit_aux_data_mq_open {
128 struct audit_aux_data d;
129 int oflag;
130 mode_t mode;
131 struct mq_attr attr;
132};
133
134struct audit_aux_data_mq_sendrecv {
135 struct audit_aux_data d;
136 mqd_t mqdes;
137 size_t msg_len;
138 unsigned int msg_prio;
139 struct timespec abs_timeout;
140};
141
142struct audit_aux_data_mq_notify {
143 struct audit_aux_data d;
144 mqd_t mqdes;
145 struct sigevent notification;
146};
147
148struct audit_aux_data_mq_getsetattr {
149 struct audit_aux_data d;
150 mqd_t mqdes;
151 struct mq_attr mqstat;
152};
153
154struct audit_aux_data_ipcctl {
155 struct audit_aux_data d;
156 struct ipc_perm p;
157 unsigned long qbytes;
158 uid_t uid;
159 gid_t gid;
160 mode_t mode;
161 u32 osid;
162};
163
164struct audit_aux_data_execve { 127struct audit_aux_data_execve {
165 struct audit_aux_data d; 128 struct audit_aux_data d;
166 int argc; 129 int argc;
@@ -168,23 +131,6 @@ struct audit_aux_data_execve {
168 struct mm_struct *mm; 131 struct mm_struct *mm;
169}; 132};
170 133
171struct audit_aux_data_socketcall {
172 struct audit_aux_data d;
173 int nargs;
174 unsigned long args[0];
175};
176
177struct audit_aux_data_sockaddr {
178 struct audit_aux_data d;
179 int len;
180 char a[0];
181};
182
183struct audit_aux_data_fd_pair {
184 struct audit_aux_data d;
185 int fd[2];
186};
187
188struct audit_aux_data_pids { 134struct audit_aux_data_pids {
189 struct audit_aux_data d; 135 struct audit_aux_data d;
190 pid_t target_pid[AUDIT_AUX_PIDS]; 136 pid_t target_pid[AUDIT_AUX_PIDS];
@@ -219,14 +165,14 @@ struct audit_tree_refs {
219struct audit_context { 165struct audit_context {
220 int dummy; /* must be the first element */ 166 int dummy; /* must be the first element */
221 int in_syscall; /* 1 if task is in a syscall */ 167 int in_syscall; /* 1 if task is in a syscall */
222 enum audit_state state; 168 enum audit_state state, current_state;
223 unsigned int serial; /* serial number for record */ 169 unsigned int serial; /* serial number for record */
224 struct timespec ctime; /* time of syscall entry */ 170 struct timespec ctime; /* time of syscall entry */
225 int major; /* syscall number */ 171 int major; /* syscall number */
226 unsigned long argv[4]; /* syscall arguments */ 172 unsigned long argv[4]; /* syscall arguments */
227 int return_valid; /* return code is valid */ 173 int return_valid; /* return code is valid */
228 long return_code;/* syscall return code */ 174 long return_code;/* syscall return code */
229 int auditable; /* 1 if record should be written */ 175 u64 prio;
230 int name_count; 176 int name_count;
231 struct audit_names names[AUDIT_NAMES]; 177 struct audit_names names[AUDIT_NAMES];
232 char * filterkey; /* key for rule that triggered record */ 178 char * filterkey; /* key for rule that triggered record */
@@ -234,7 +180,8 @@ struct audit_context {
234 struct audit_context *previous; /* For nested syscalls */ 180 struct audit_context *previous; /* For nested syscalls */
235 struct audit_aux_data *aux; 181 struct audit_aux_data *aux;
236 struct audit_aux_data *aux_pids; 182 struct audit_aux_data *aux_pids;
237 183 struct sockaddr_storage *sockaddr;
184 size_t sockaddr_len;
238 /* Save things to print about task_struct */ 185 /* Save things to print about task_struct */
239 pid_t pid, ppid; 186 pid_t pid, ppid;
240 uid_t uid, euid, suid, fsuid; 187 uid_t uid, euid, suid, fsuid;
@@ -252,6 +199,49 @@ struct audit_context {
252 struct audit_tree_refs *trees, *first_trees; 199 struct audit_tree_refs *trees, *first_trees;
253 int tree_count; 200 int tree_count;
254 201
202 int type;
203 union {
204 struct {
205 int nargs;
206 long args[6];
207 } socketcall;
208 struct {
209 uid_t uid;
210 gid_t gid;
211 mode_t mode;
212 u32 osid;
213 int has_perm;
214 uid_t perm_uid;
215 gid_t perm_gid;
216 mode_t perm_mode;
217 unsigned long qbytes;
218 } ipc;
219 struct {
220 mqd_t mqdes;
221 struct mq_attr mqstat;
222 } mq_getsetattr;
223 struct {
224 mqd_t mqdes;
225 int sigev_signo;
226 } mq_notify;
227 struct {
228 mqd_t mqdes;
229 size_t msg_len;
230 unsigned int msg_prio;
231 struct timespec abs_timeout;
232 } mq_sendrecv;
233 struct {
234 int oflag;
235 mode_t mode;
236 struct mq_attr attr;
237 } mq_open;
238 struct {
239 pid_t pid;
240 struct audit_cap_data cap;
241 } capset;
242 };
243 int fds[2];
244
255#if AUDIT_DEBUG 245#if AUDIT_DEBUG
256 int put_count; 246 int put_count;
257 int ino_count; 247 int ino_count;
@@ -608,19 +598,12 @@ static int audit_filter_rules(struct task_struct *tsk,
608 } 598 }
609 } 599 }
610 /* Find ipc objects that match */ 600 /* Find ipc objects that match */
611 if (ctx) { 601 if (!ctx || ctx->type != AUDIT_IPC)
612 struct audit_aux_data *aux; 602 break;
613 for (aux = ctx->aux; aux; 603 if (security_audit_rule_match(ctx->ipc.osid,
614 aux = aux->next) { 604 f->type, f->op,
615 if (aux->type == AUDIT_IPC) { 605 f->lsm_rule, ctx))
616 struct audit_aux_data_ipcctl *axi = (void *)aux; 606 ++result;
617 if (security_audit_rule_match(axi->osid, f->type, f->op, f->lsm_rule, ctx)) {
618 ++result;
619 break;
620 }
621 }
622 }
623 }
624 } 607 }
625 break; 608 break;
626 case AUDIT_ARG0: 609 case AUDIT_ARG0:
@@ -647,8 +630,16 @@ static int audit_filter_rules(struct task_struct *tsk,
647 return 0; 630 return 0;
648 } 631 }
649 } 632 }
650 if (rule->filterkey && ctx) 633
651 ctx->filterkey = kstrdup(rule->filterkey, GFP_ATOMIC); 634 if (ctx) {
635 if (rule->prio <= ctx->prio)
636 return 0;
637 if (rule->filterkey) {
638 kfree(ctx->filterkey);
639 ctx->filterkey = kstrdup(rule->filterkey, GFP_ATOMIC);
640 }
641 ctx->prio = rule->prio;
642 }
652 switch (rule->action) { 643 switch (rule->action) {
653 case AUDIT_NEVER: *state = AUDIT_DISABLED; break; 644 case AUDIT_NEVER: *state = AUDIT_DISABLED; break;
654 case AUDIT_ALWAYS: *state = AUDIT_RECORD_CONTEXT; break; 645 case AUDIT_ALWAYS: *state = AUDIT_RECORD_CONTEXT; break;
@@ -661,7 +652,7 @@ static int audit_filter_rules(struct task_struct *tsk,
661 * completely disabled for this task. Since we only have the task 652 * completely disabled for this task. Since we only have the task
662 * structure at this point, we can only check uid and gid. 653 * structure at this point, we can only check uid and gid.
663 */ 654 */
664static enum audit_state audit_filter_task(struct task_struct *tsk) 655static enum audit_state audit_filter_task(struct task_struct *tsk, char **key)
665{ 656{
666 struct audit_entry *e; 657 struct audit_entry *e;
667 enum audit_state state; 658 enum audit_state state;
@@ -669,6 +660,8 @@ static enum audit_state audit_filter_task(struct task_struct *tsk)
669 rcu_read_lock(); 660 rcu_read_lock();
670 list_for_each_entry_rcu(e, &audit_filter_list[AUDIT_FILTER_TASK], list) { 661 list_for_each_entry_rcu(e, &audit_filter_list[AUDIT_FILTER_TASK], list) {
671 if (audit_filter_rules(tsk, &e->rule, NULL, NULL, &state)) { 662 if (audit_filter_rules(tsk, &e->rule, NULL, NULL, &state)) {
663 if (state == AUDIT_RECORD_CONTEXT)
664 *key = kstrdup(e->rule.filterkey, GFP_ATOMIC);
672 rcu_read_unlock(); 665 rcu_read_unlock();
673 return state; 666 return state;
674 } 667 }
@@ -702,6 +695,7 @@ static enum audit_state audit_filter_syscall(struct task_struct *tsk,
702 audit_filter_rules(tsk, &e->rule, ctx, NULL, 695 audit_filter_rules(tsk, &e->rule, ctx, NULL,
703 &state)) { 696 &state)) {
704 rcu_read_unlock(); 697 rcu_read_unlock();
698 ctx->current_state = state;
705 return state; 699 return state;
706 } 700 }
707 } 701 }
@@ -715,15 +709,14 @@ static enum audit_state audit_filter_syscall(struct task_struct *tsk,
715 * buckets applicable to the inode numbers in audit_names[]. 709 * buckets applicable to the inode numbers in audit_names[].
716 * Regarding audit_state, same rules apply as for audit_filter_syscall(). 710 * Regarding audit_state, same rules apply as for audit_filter_syscall().
717 */ 711 */
718enum audit_state audit_filter_inodes(struct task_struct *tsk, 712void audit_filter_inodes(struct task_struct *tsk, struct audit_context *ctx)
719 struct audit_context *ctx)
720{ 713{
721 int i; 714 int i;
722 struct audit_entry *e; 715 struct audit_entry *e;
723 enum audit_state state; 716 enum audit_state state;
724 717
725 if (audit_pid && tsk->tgid == audit_pid) 718 if (audit_pid && tsk->tgid == audit_pid)
726 return AUDIT_DISABLED; 719 return;
727 720
728 rcu_read_lock(); 721 rcu_read_lock();
729 for (i = 0; i < ctx->name_count; i++) { 722 for (i = 0; i < ctx->name_count; i++) {
@@ -740,17 +733,20 @@ enum audit_state audit_filter_inodes(struct task_struct *tsk,
740 if ((e->rule.mask[word] & bit) == bit && 733 if ((e->rule.mask[word] & bit) == bit &&
741 audit_filter_rules(tsk, &e->rule, ctx, n, &state)) { 734 audit_filter_rules(tsk, &e->rule, ctx, n, &state)) {
742 rcu_read_unlock(); 735 rcu_read_unlock();
743 return state; 736 ctx->current_state = state;
737 return;
744 } 738 }
745 } 739 }
746 } 740 }
747 rcu_read_unlock(); 741 rcu_read_unlock();
748 return AUDIT_BUILD_CONTEXT;
749} 742}
750 743
751void audit_set_auditable(struct audit_context *ctx) 744static void audit_set_auditable(struct audit_context *ctx)
752{ 745{
753 ctx->auditable = 1; 746 if (!ctx->prio) {
747 ctx->prio = 1;
748 ctx->current_state = AUDIT_RECORD_CONTEXT;
749 }
754} 750}
755 751
756static inline struct audit_context *audit_get_context(struct task_struct *tsk, 752static inline struct audit_context *audit_get_context(struct task_struct *tsk,
@@ -781,23 +777,11 @@ static inline struct audit_context *audit_get_context(struct task_struct *tsk,
781 else 777 else
782 context->return_code = return_code; 778 context->return_code = return_code;
783 779
784 if (context->in_syscall && !context->dummy && !context->auditable) { 780 if (context->in_syscall && !context->dummy) {
785 enum audit_state state; 781 audit_filter_syscall(tsk, context, &audit_filter_list[AUDIT_FILTER_EXIT]);
786 782 audit_filter_inodes(tsk, context);
787 state = audit_filter_syscall(tsk, context, &audit_filter_list[AUDIT_FILTER_EXIT]);
788 if (state == AUDIT_RECORD_CONTEXT) {
789 context->auditable = 1;
790 goto get_context;
791 }
792
793 state = audit_filter_inodes(tsk, context);
794 if (state == AUDIT_RECORD_CONTEXT)
795 context->auditable = 1;
796
797 } 783 }
798 784
799get_context:
800
801 tsk->audit_context = NULL; 785 tsk->audit_context = NULL;
802 return context; 786 return context;
803} 787}
@@ -807,8 +791,7 @@ static inline void audit_free_names(struct audit_context *context)
807 int i; 791 int i;
808 792
809#if AUDIT_DEBUG == 2 793#if AUDIT_DEBUG == 2
810 if (context->auditable 794 if (context->put_count + context->ino_count != context->name_count) {
811 ||context->put_count + context->ino_count != context->name_count) {
812 printk(KERN_ERR "%s:%d(:%d): major=%d in_syscall=%d" 795 printk(KERN_ERR "%s:%d(:%d): major=%d in_syscall=%d"
813 " name_count=%d put_count=%d" 796 " name_count=%d put_count=%d"
814 " ino_count=%d [NOT freeing]\n", 797 " ino_count=%d [NOT freeing]\n",
@@ -859,6 +842,7 @@ static inline void audit_zero_context(struct audit_context *context,
859{ 842{
860 memset(context, 0, sizeof(*context)); 843 memset(context, 0, sizeof(*context));
861 context->state = state; 844 context->state = state;
845 context->prio = state == AUDIT_RECORD_CONTEXT ? ~0ULL : 0;
862} 846}
863 847
864static inline struct audit_context *audit_alloc_context(enum audit_state state) 848static inline struct audit_context *audit_alloc_context(enum audit_state state)
@@ -884,18 +868,21 @@ int audit_alloc(struct task_struct *tsk)
884{ 868{
885 struct audit_context *context; 869 struct audit_context *context;
886 enum audit_state state; 870 enum audit_state state;
871 char *key = NULL;
887 872
888 if (likely(!audit_ever_enabled)) 873 if (likely(!audit_ever_enabled))
889 return 0; /* Return if not auditing. */ 874 return 0; /* Return if not auditing. */
890 875
891 state = audit_filter_task(tsk); 876 state = audit_filter_task(tsk, &key);
892 if (likely(state == AUDIT_DISABLED)) 877 if (likely(state == AUDIT_DISABLED))
893 return 0; 878 return 0;
894 879
895 if (!(context = audit_alloc_context(state))) { 880 if (!(context = audit_alloc_context(state))) {
881 kfree(key);
896 audit_log_lost("out of memory in audit_alloc"); 882 audit_log_lost("out of memory in audit_alloc");
897 return -ENOMEM; 883 return -ENOMEM;
898 } 884 }
885 context->filterkey = key;
899 886
900 tsk->audit_context = context; 887 tsk->audit_context = context;
901 set_tsk_thread_flag(tsk, TIF_SYSCALL_AUDIT); 888 set_tsk_thread_flag(tsk, TIF_SYSCALL_AUDIT);
@@ -921,6 +908,7 @@ static inline void audit_free_context(struct audit_context *context)
921 free_tree_refs(context); 908 free_tree_refs(context);
922 audit_free_aux(context); 909 audit_free_aux(context);
923 kfree(context->filterkey); 910 kfree(context->filterkey);
911 kfree(context->sockaddr);
924 kfree(context); 912 kfree(context);
925 context = previous; 913 context = previous;
926 } while (context); 914 } while (context);
@@ -1230,6 +1218,97 @@ static void audit_log_fcaps(struct audit_buffer *ab, struct audit_names *name)
1230 audit_log_format(ab, " cap_fe=%d cap_fver=%x", name->fcap.fE, name->fcap_ver); 1218 audit_log_format(ab, " cap_fe=%d cap_fver=%x", name->fcap.fE, name->fcap_ver);
1231} 1219}
1232 1220
1221static void show_special(struct audit_context *context, int *call_panic)
1222{
1223 struct audit_buffer *ab;
1224 int i;
1225
1226 ab = audit_log_start(context, GFP_KERNEL, context->type);
1227 if (!ab)
1228 return;
1229
1230 switch (context->type) {
1231 case AUDIT_SOCKETCALL: {
1232 int nargs = context->socketcall.nargs;
1233 audit_log_format(ab, "nargs=%d", nargs);
1234 for (i = 0; i < nargs; i++)
1235 audit_log_format(ab, " a%d=%lx", i,
1236 context->socketcall.args[i]);
1237 break; }
1238 case AUDIT_IPC: {
1239 u32 osid = context->ipc.osid;
1240
1241 audit_log_format(ab, "ouid=%u ogid=%u mode=%#o",
1242 context->ipc.uid, context->ipc.gid, context->ipc.mode);
1243 if (osid) {
1244 char *ctx = NULL;
1245 u32 len;
1246 if (security_secid_to_secctx(osid, &ctx, &len)) {
1247 audit_log_format(ab, " osid=%u", osid);
1248 *call_panic = 1;
1249 } else {
1250 audit_log_format(ab, " obj=%s", ctx);
1251 security_release_secctx(ctx, len);
1252 }
1253 }
1254 if (context->ipc.has_perm) {
1255 audit_log_end(ab);
1256 ab = audit_log_start(context, GFP_KERNEL,
1257 AUDIT_IPC_SET_PERM);
1258 audit_log_format(ab,
1259 "qbytes=%lx ouid=%u ogid=%u mode=%#o",
1260 context->ipc.qbytes,
1261 context->ipc.perm_uid,
1262 context->ipc.perm_gid,
1263 context->ipc.perm_mode);
1264 if (!ab)
1265 return;
1266 }
1267 break; }
1268 case AUDIT_MQ_OPEN: {
1269 audit_log_format(ab,
1270 "oflag=0x%x mode=%#o mq_flags=0x%lx mq_maxmsg=%ld "
1271 "mq_msgsize=%ld mq_curmsgs=%ld",
1272 context->mq_open.oflag, context->mq_open.mode,
1273 context->mq_open.attr.mq_flags,
1274 context->mq_open.attr.mq_maxmsg,
1275 context->mq_open.attr.mq_msgsize,
1276 context->mq_open.attr.mq_curmsgs);
1277 break; }
1278 case AUDIT_MQ_SENDRECV: {
1279 audit_log_format(ab,
1280 "mqdes=%d msg_len=%zd msg_prio=%u "
1281 "abs_timeout_sec=%ld abs_timeout_nsec=%ld",
1282 context->mq_sendrecv.mqdes,
1283 context->mq_sendrecv.msg_len,
1284 context->mq_sendrecv.msg_prio,
1285 context->mq_sendrecv.abs_timeout.tv_sec,
1286 context->mq_sendrecv.abs_timeout.tv_nsec);
1287 break; }
1288 case AUDIT_MQ_NOTIFY: {
1289 audit_log_format(ab, "mqdes=%d sigev_signo=%d",
1290 context->mq_notify.mqdes,
1291 context->mq_notify.sigev_signo);
1292 break; }
1293 case AUDIT_MQ_GETSETATTR: {
1294 struct mq_attr *attr = &context->mq_getsetattr.mqstat;
1295 audit_log_format(ab,
1296 "mqdes=%d mq_flags=0x%lx mq_maxmsg=%ld mq_msgsize=%ld "
1297 "mq_curmsgs=%ld ",
1298 context->mq_getsetattr.mqdes,
1299 attr->mq_flags, attr->mq_maxmsg,
1300 attr->mq_msgsize, attr->mq_curmsgs);
1301 break; }
1302 case AUDIT_CAPSET: {
1303 audit_log_format(ab, "pid=%d", context->capset.pid);
1304 audit_log_cap(ab, "cap_pi", &context->capset.cap.inheritable);
1305 audit_log_cap(ab, "cap_pp", &context->capset.cap.permitted);
1306 audit_log_cap(ab, "cap_pe", &context->capset.cap.effective);
1307 break; }
1308 }
1309 audit_log_end(ab);
1310}
1311
1233static void audit_log_exit(struct audit_context *context, struct task_struct *tsk) 1312static void audit_log_exit(struct audit_context *context, struct task_struct *tsk)
1234{ 1313{
1235 const struct cred *cred; 1314 const struct cred *cred;
@@ -1307,94 +1386,12 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts
1307 continue; /* audit_panic has been called */ 1386 continue; /* audit_panic has been called */
1308 1387
1309 switch (aux->type) { 1388 switch (aux->type) {
1310 case AUDIT_MQ_OPEN: {
1311 struct audit_aux_data_mq_open *axi = (void *)aux;
1312 audit_log_format(ab,
1313 "oflag=0x%x mode=%#o mq_flags=0x%lx mq_maxmsg=%ld "
1314 "mq_msgsize=%ld mq_curmsgs=%ld",
1315 axi->oflag, axi->mode, axi->attr.mq_flags,
1316 axi->attr.mq_maxmsg, axi->attr.mq_msgsize,
1317 axi->attr.mq_curmsgs);
1318 break; }
1319
1320 case AUDIT_MQ_SENDRECV: {
1321 struct audit_aux_data_mq_sendrecv *axi = (void *)aux;
1322 audit_log_format(ab,
1323 "mqdes=%d msg_len=%zd msg_prio=%u "
1324 "abs_timeout_sec=%ld abs_timeout_nsec=%ld",
1325 axi->mqdes, axi->msg_len, axi->msg_prio,
1326 axi->abs_timeout.tv_sec, axi->abs_timeout.tv_nsec);
1327 break; }
1328
1329 case AUDIT_MQ_NOTIFY: {
1330 struct audit_aux_data_mq_notify *axi = (void *)aux;
1331 audit_log_format(ab,
1332 "mqdes=%d sigev_signo=%d",
1333 axi->mqdes,
1334 axi->notification.sigev_signo);
1335 break; }
1336
1337 case AUDIT_MQ_GETSETATTR: {
1338 struct audit_aux_data_mq_getsetattr *axi = (void *)aux;
1339 audit_log_format(ab,
1340 "mqdes=%d mq_flags=0x%lx mq_maxmsg=%ld mq_msgsize=%ld "
1341 "mq_curmsgs=%ld ",
1342 axi->mqdes,
1343 axi->mqstat.mq_flags, axi->mqstat.mq_maxmsg,
1344 axi->mqstat.mq_msgsize, axi->mqstat.mq_curmsgs);
1345 break; }
1346
1347 case AUDIT_IPC: {
1348 struct audit_aux_data_ipcctl *axi = (void *)aux;
1349 audit_log_format(ab,
1350 "ouid=%u ogid=%u mode=%#o",
1351 axi->uid, axi->gid, axi->mode);
1352 if (axi->osid != 0) {
1353 char *ctx = NULL;
1354 u32 len;
1355 if (security_secid_to_secctx(
1356 axi->osid, &ctx, &len)) {
1357 audit_log_format(ab, " osid=%u",
1358 axi->osid);
1359 call_panic = 1;
1360 } else {
1361 audit_log_format(ab, " obj=%s", ctx);
1362 security_release_secctx(ctx, len);
1363 }
1364 }
1365 break; }
1366
1367 case AUDIT_IPC_SET_PERM: {
1368 struct audit_aux_data_ipcctl *axi = (void *)aux;
1369 audit_log_format(ab,
1370 "qbytes=%lx ouid=%u ogid=%u mode=%#o",
1371 axi->qbytes, axi->uid, axi->gid, axi->mode);
1372 break; }
1373 1389
1374 case AUDIT_EXECVE: { 1390 case AUDIT_EXECVE: {
1375 struct audit_aux_data_execve *axi = (void *)aux; 1391 struct audit_aux_data_execve *axi = (void *)aux;
1376 audit_log_execve_info(context, &ab, axi); 1392 audit_log_execve_info(context, &ab, axi);
1377 break; } 1393 break; }
1378 1394
1379 case AUDIT_SOCKETCALL: {
1380 struct audit_aux_data_socketcall *axs = (void *)aux;
1381 audit_log_format(ab, "nargs=%d", axs->nargs);
1382 for (i=0; i<axs->nargs; i++)
1383 audit_log_format(ab, " a%d=%lx", i, axs->args[i]);
1384 break; }
1385
1386 case AUDIT_SOCKADDR: {
1387 struct audit_aux_data_sockaddr *axs = (void *)aux;
1388
1389 audit_log_format(ab, "saddr=");
1390 audit_log_n_hex(ab, axs->a, axs->len);
1391 break; }
1392
1393 case AUDIT_FD_PAIR: {
1394 struct audit_aux_data_fd_pair *axs = (void *)aux;
1395 audit_log_format(ab, "fd0=%d fd1=%d", axs->fd[0], axs->fd[1]);
1396 break; }
1397
1398 case AUDIT_BPRM_FCAPS: { 1395 case AUDIT_BPRM_FCAPS: {
1399 struct audit_aux_data_bprm_fcaps *axs = (void *)aux; 1396 struct audit_aux_data_bprm_fcaps *axs = (void *)aux;
1400 audit_log_format(ab, "fver=%x", axs->fcap_ver); 1397 audit_log_format(ab, "fver=%x", axs->fcap_ver);
@@ -1409,18 +1406,32 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts
1409 audit_log_cap(ab, "new_pe", &axs->new_pcap.effective); 1406 audit_log_cap(ab, "new_pe", &axs->new_pcap.effective);
1410 break; } 1407 break; }
1411 1408
1412 case AUDIT_CAPSET: {
1413 struct audit_aux_data_capset *axs = (void *)aux;
1414 audit_log_format(ab, "pid=%d", axs->pid);
1415 audit_log_cap(ab, "cap_pi", &axs->cap.inheritable);
1416 audit_log_cap(ab, "cap_pp", &axs->cap.permitted);
1417 audit_log_cap(ab, "cap_pe", &axs->cap.effective);
1418 break; }
1419
1420 } 1409 }
1421 audit_log_end(ab); 1410 audit_log_end(ab);
1422 } 1411 }
1423 1412
1413 if (context->type)
1414 show_special(context, &call_panic);
1415
1416 if (context->fds[0] >= 0) {
1417 ab = audit_log_start(context, GFP_KERNEL, AUDIT_FD_PAIR);
1418 if (ab) {
1419 audit_log_format(ab, "fd0=%d fd1=%d",
1420 context->fds[0], context->fds[1]);
1421 audit_log_end(ab);
1422 }
1423 }
1424
1425 if (context->sockaddr_len) {
1426 ab = audit_log_start(context, GFP_KERNEL, AUDIT_SOCKADDR);
1427 if (ab) {
1428 audit_log_format(ab, "saddr=");
1429 audit_log_n_hex(ab, (void *)context->sockaddr,
1430 context->sockaddr_len);
1431 audit_log_end(ab);
1432 }
1433 }
1434
1424 for (aux = context->aux_pids; aux; aux = aux->next) { 1435 for (aux = context->aux_pids; aux; aux = aux->next) {
1425 struct audit_aux_data_pids *axs = (void *)aux; 1436 struct audit_aux_data_pids *axs = (void *)aux;
1426 1437
@@ -1536,7 +1547,7 @@ void audit_free(struct task_struct *tsk)
1536 * We use GFP_ATOMIC here because we might be doing this 1547 * We use GFP_ATOMIC here because we might be doing this
1537 * in the context of the idle thread */ 1548 * in the context of the idle thread */
1538 /* that can happen only if we are called from do_exit() */ 1549 /* that can happen only if we are called from do_exit() */
1539 if (context->in_syscall && context->auditable) 1550 if (context->in_syscall && context->current_state == AUDIT_RECORD_CONTEXT)
1540 audit_log_exit(context, tsk); 1551 audit_log_exit(context, tsk);
1541 1552
1542 audit_free_context(context); 1553 audit_free_context(context);
@@ -1620,15 +1631,17 @@ void audit_syscall_entry(int arch, int major,
1620 1631
1621 state = context->state; 1632 state = context->state;
1622 context->dummy = !audit_n_rules; 1633 context->dummy = !audit_n_rules;
1623 if (!context->dummy && (state == AUDIT_SETUP_CONTEXT || state == AUDIT_BUILD_CONTEXT)) 1634 if (!context->dummy && state == AUDIT_BUILD_CONTEXT) {
1635 context->prio = 0;
1624 state = audit_filter_syscall(tsk, context, &audit_filter_list[AUDIT_FILTER_ENTRY]); 1636 state = audit_filter_syscall(tsk, context, &audit_filter_list[AUDIT_FILTER_ENTRY]);
1637 }
1625 if (likely(state == AUDIT_DISABLED)) 1638 if (likely(state == AUDIT_DISABLED))
1626 return; 1639 return;
1627 1640
1628 context->serial = 0; 1641 context->serial = 0;
1629 context->ctime = CURRENT_TIME; 1642 context->ctime = CURRENT_TIME;
1630 context->in_syscall = 1; 1643 context->in_syscall = 1;
1631 context->auditable = !!(state == AUDIT_RECORD_CONTEXT); 1644 context->current_state = state;
1632 context->ppid = 0; 1645 context->ppid = 0;
1633} 1646}
1634 1647
@@ -1636,17 +1649,20 @@ void audit_finish_fork(struct task_struct *child)
1636{ 1649{
1637 struct audit_context *ctx = current->audit_context; 1650 struct audit_context *ctx = current->audit_context;
1638 struct audit_context *p = child->audit_context; 1651 struct audit_context *p = child->audit_context;
1639 if (!p || !ctx || !ctx->auditable) 1652 if (!p || !ctx)
1653 return;
1654 if (!ctx->in_syscall || ctx->current_state != AUDIT_RECORD_CONTEXT)
1640 return; 1655 return;
1641 p->arch = ctx->arch; 1656 p->arch = ctx->arch;
1642 p->major = ctx->major; 1657 p->major = ctx->major;
1643 memcpy(p->argv, ctx->argv, sizeof(ctx->argv)); 1658 memcpy(p->argv, ctx->argv, sizeof(ctx->argv));
1644 p->ctime = ctx->ctime; 1659 p->ctime = ctx->ctime;
1645 p->dummy = ctx->dummy; 1660 p->dummy = ctx->dummy;
1646 p->auditable = ctx->auditable;
1647 p->in_syscall = ctx->in_syscall; 1661 p->in_syscall = ctx->in_syscall;
1648 p->filterkey = kstrdup(ctx->filterkey, GFP_KERNEL); 1662 p->filterkey = kstrdup(ctx->filterkey, GFP_KERNEL);
1649 p->ppid = current->pid; 1663 p->ppid = current->pid;
1664 p->prio = ctx->prio;
1665 p->current_state = ctx->current_state;
1650} 1666}
1651 1667
1652/** 1668/**
@@ -1670,11 +1686,11 @@ void audit_syscall_exit(int valid, long return_code)
1670 if (likely(!context)) 1686 if (likely(!context))
1671 return; 1687 return;
1672 1688
1673 if (context->in_syscall && context->auditable) 1689 if (context->in_syscall && context->current_state == AUDIT_RECORD_CONTEXT)
1674 audit_log_exit(context, tsk); 1690 audit_log_exit(context, tsk);
1675 1691
1676 context->in_syscall = 0; 1692 context->in_syscall = 0;
1677 context->auditable = 0; 1693 context->prio = context->state == AUDIT_RECORD_CONTEXT ? ~0ULL : 0;
1678 1694
1679 if (context->previous) { 1695 if (context->previous) {
1680 struct audit_context *new_context = context->previous; 1696 struct audit_context *new_context = context->previous;
@@ -1689,8 +1705,13 @@ void audit_syscall_exit(int valid, long return_code)
1689 context->aux_pids = NULL; 1705 context->aux_pids = NULL;
1690 context->target_pid = 0; 1706 context->target_pid = 0;
1691 context->target_sid = 0; 1707 context->target_sid = 0;
1692 kfree(context->filterkey); 1708 context->sockaddr_len = 0;
1693 context->filterkey = NULL; 1709 context->type = 0;
1710 context->fds[0] = -1;
1711 if (context->state != AUDIT_RECORD_CONTEXT) {
1712 kfree(context->filterkey);
1713 context->filterkey = NULL;
1714 }
1694 tsk->audit_context = context; 1715 tsk->audit_context = context;
1695 } 1716 }
1696} 1717}
@@ -2081,7 +2102,10 @@ int auditsc_get_stamp(struct audit_context *ctx,
2081 t->tv_sec = ctx->ctime.tv_sec; 2102 t->tv_sec = ctx->ctime.tv_sec;
2082 t->tv_nsec = ctx->ctime.tv_nsec; 2103 t->tv_nsec = ctx->ctime.tv_nsec;
2083 *serial = ctx->serial; 2104 *serial = ctx->serial;
2084 ctx->auditable = 1; 2105 if (!ctx->prio) {
2106 ctx->prio = 1;
2107 ctx->current_state = AUDIT_RECORD_CONTEXT;
2108 }
2085 return 1; 2109 return 1;
2086} 2110}
2087 2111
@@ -2127,132 +2151,46 @@ int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
2127 * @mode: mode bits 2151 * @mode: mode bits
2128 * @u_attr: queue attributes 2152 * @u_attr: queue attributes
2129 * 2153 *
2130 * Returns 0 for success or NULL context or < 0 on error.
2131 */ 2154 */
2132int __audit_mq_open(int oflag, mode_t mode, struct mq_attr __user *u_attr) 2155void __audit_mq_open(int oflag, mode_t mode, struct mq_attr *attr)
2133{ 2156{
2134 struct audit_aux_data_mq_open *ax;
2135 struct audit_context *context = current->audit_context; 2157 struct audit_context *context = current->audit_context;
2136 2158
2137 if (!audit_enabled) 2159 if (attr)
2138 return 0; 2160 memcpy(&context->mq_open.attr, attr, sizeof(struct mq_attr));
2139 2161 else
2140 if (likely(!context)) 2162 memset(&context->mq_open.attr, 0, sizeof(struct mq_attr));
2141 return 0;
2142
2143 ax = kmalloc(sizeof(*ax), GFP_ATOMIC);
2144 if (!ax)
2145 return -ENOMEM;
2146
2147 if (u_attr != NULL) {
2148 if (copy_from_user(&ax->attr, u_attr, sizeof(ax->attr))) {
2149 kfree(ax);
2150 return -EFAULT;
2151 }
2152 } else
2153 memset(&ax->attr, 0, sizeof(ax->attr));
2154 2163
2155 ax->oflag = oflag; 2164 context->mq_open.oflag = oflag;
2156 ax->mode = mode; 2165 context->mq_open.mode = mode;
2157 2166
2158 ax->d.type = AUDIT_MQ_OPEN; 2167 context->type = AUDIT_MQ_OPEN;
2159 ax->d.next = context->aux;
2160 context->aux = (void *)ax;
2161 return 0;
2162} 2168}
2163 2169
2164/** 2170/**
2165 * __audit_mq_timedsend - record audit data for a POSIX MQ timed send 2171 * __audit_mq_sendrecv - record audit data for a POSIX MQ timed send/receive
2166 * @mqdes: MQ descriptor 2172 * @mqdes: MQ descriptor
2167 * @msg_len: Message length 2173 * @msg_len: Message length
2168 * @msg_prio: Message priority 2174 * @msg_prio: Message priority
2169 * @u_abs_timeout: Message timeout in absolute time 2175 * @abs_timeout: Message timeout in absolute time
2170 * 2176 *
2171 * Returns 0 for success or NULL context or < 0 on error.
2172 */ 2177 */
2173int __audit_mq_timedsend(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, 2178void __audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio,
2174 const struct timespec __user *u_abs_timeout) 2179 const struct timespec *abs_timeout)
2175{ 2180{
2176 struct audit_aux_data_mq_sendrecv *ax;
2177 struct audit_context *context = current->audit_context; 2181 struct audit_context *context = current->audit_context;
2182 struct timespec *p = &context->mq_sendrecv.abs_timeout;
2178 2183
2179 if (!audit_enabled) 2184 if (abs_timeout)
2180 return 0; 2185 memcpy(p, abs_timeout, sizeof(struct timespec));
2181 2186 else
2182 if (likely(!context)) 2187 memset(p, 0, sizeof(struct timespec));
2183 return 0;
2184
2185 ax = kmalloc(sizeof(*ax), GFP_ATOMIC);
2186 if (!ax)
2187 return -ENOMEM;
2188
2189 if (u_abs_timeout != NULL) {
2190 if (copy_from_user(&ax->abs_timeout, u_abs_timeout, sizeof(ax->abs_timeout))) {
2191 kfree(ax);
2192 return -EFAULT;
2193 }
2194 } else
2195 memset(&ax->abs_timeout, 0, sizeof(ax->abs_timeout));
2196
2197 ax->mqdes = mqdes;
2198 ax->msg_len = msg_len;
2199 ax->msg_prio = msg_prio;
2200
2201 ax->d.type = AUDIT_MQ_SENDRECV;
2202 ax->d.next = context->aux;
2203 context->aux = (void *)ax;
2204 return 0;
2205}
2206
2207/**
2208 * __audit_mq_timedreceive - record audit data for a POSIX MQ timed receive
2209 * @mqdes: MQ descriptor
2210 * @msg_len: Message length
2211 * @u_msg_prio: Message priority
2212 * @u_abs_timeout: Message timeout in absolute time
2213 *
2214 * Returns 0 for success or NULL context or < 0 on error.
2215 */
2216int __audit_mq_timedreceive(mqd_t mqdes, size_t msg_len,
2217 unsigned int __user *u_msg_prio,
2218 const struct timespec __user *u_abs_timeout)
2219{
2220 struct audit_aux_data_mq_sendrecv *ax;
2221 struct audit_context *context = current->audit_context;
2222
2223 if (!audit_enabled)
2224 return 0;
2225
2226 if (likely(!context))
2227 return 0;
2228
2229 ax = kmalloc(sizeof(*ax), GFP_ATOMIC);
2230 if (!ax)
2231 return -ENOMEM;
2232
2233 if (u_msg_prio != NULL) {
2234 if (get_user(ax->msg_prio, u_msg_prio)) {
2235 kfree(ax);
2236 return -EFAULT;
2237 }
2238 } else
2239 ax->msg_prio = 0;
2240
2241 if (u_abs_timeout != NULL) {
2242 if (copy_from_user(&ax->abs_timeout, u_abs_timeout, sizeof(ax->abs_timeout))) {
2243 kfree(ax);
2244 return -EFAULT;
2245 }
2246 } else
2247 memset(&ax->abs_timeout, 0, sizeof(ax->abs_timeout));
2248 2188
2249 ax->mqdes = mqdes; 2189 context->mq_sendrecv.mqdes = mqdes;
2250 ax->msg_len = msg_len; 2190 context->mq_sendrecv.msg_len = msg_len;
2191 context->mq_sendrecv.msg_prio = msg_prio;
2251 2192
2252 ax->d.type = AUDIT_MQ_SENDRECV; 2193 context->type = AUDIT_MQ_SENDRECV;
2253 ax->d.next = context->aux;
2254 context->aux = (void *)ax;
2255 return 0;
2256} 2194}
2257 2195
2258/** 2196/**
@@ -2260,38 +2198,19 @@ int __audit_mq_timedreceive(mqd_t mqdes, size_t msg_len,
2260 * @mqdes: MQ descriptor 2198 * @mqdes: MQ descriptor
2261 * @u_notification: Notification event 2199 * @u_notification: Notification event
2262 * 2200 *
2263 * Returns 0 for success or NULL context or < 0 on error.
2264 */ 2201 */
2265 2202
2266int __audit_mq_notify(mqd_t mqdes, const struct sigevent __user *u_notification) 2203void __audit_mq_notify(mqd_t mqdes, const struct sigevent *notification)
2267{ 2204{
2268 struct audit_aux_data_mq_notify *ax;
2269 struct audit_context *context = current->audit_context; 2205 struct audit_context *context = current->audit_context;
2270 2206
2271 if (!audit_enabled) 2207 if (notification)
2272 return 0; 2208 context->mq_notify.sigev_signo = notification->sigev_signo;
2273 2209 else
2274 if (likely(!context)) 2210 context->mq_notify.sigev_signo = 0;
2275 return 0;
2276
2277 ax = kmalloc(sizeof(*ax), GFP_ATOMIC);
2278 if (!ax)
2279 return -ENOMEM;
2280
2281 if (u_notification != NULL) {
2282 if (copy_from_user(&ax->notification, u_notification, sizeof(ax->notification))) {
2283 kfree(ax);
2284 return -EFAULT;
2285 }
2286 } else
2287 memset(&ax->notification, 0, sizeof(ax->notification));
2288
2289 ax->mqdes = mqdes;
2290 2211
2291 ax->d.type = AUDIT_MQ_NOTIFY; 2212 context->mq_notify.mqdes = mqdes;
2292 ax->d.next = context->aux; 2213 context->type = AUDIT_MQ_NOTIFY;
2293 context->aux = (void *)ax;
2294 return 0;
2295} 2214}
2296 2215
2297/** 2216/**
@@ -2299,55 +2218,29 @@ int __audit_mq_notify(mqd_t mqdes, const struct sigevent __user *u_notification)
2299 * @mqdes: MQ descriptor 2218 * @mqdes: MQ descriptor
2300 * @mqstat: MQ flags 2219 * @mqstat: MQ flags
2301 * 2220 *
2302 * Returns 0 for success or NULL context or < 0 on error.
2303 */ 2221 */
2304int __audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat) 2222void __audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat)
2305{ 2223{
2306 struct audit_aux_data_mq_getsetattr *ax;
2307 struct audit_context *context = current->audit_context; 2224 struct audit_context *context = current->audit_context;
2308 2225 context->mq_getsetattr.mqdes = mqdes;
2309 if (!audit_enabled) 2226 context->mq_getsetattr.mqstat = *mqstat;
2310 return 0; 2227 context->type = AUDIT_MQ_GETSETATTR;
2311
2312 if (likely(!context))
2313 return 0;
2314
2315 ax = kmalloc(sizeof(*ax), GFP_ATOMIC);
2316 if (!ax)
2317 return -ENOMEM;
2318
2319 ax->mqdes = mqdes;
2320 ax->mqstat = *mqstat;
2321
2322 ax->d.type = AUDIT_MQ_GETSETATTR;
2323 ax->d.next = context->aux;
2324 context->aux = (void *)ax;
2325 return 0;
2326} 2228}
2327 2229
2328/** 2230/**
2329 * audit_ipc_obj - record audit data for ipc object 2231 * audit_ipc_obj - record audit data for ipc object
2330 * @ipcp: ipc permissions 2232 * @ipcp: ipc permissions
2331 * 2233 *
2332 * Returns 0 for success or NULL context or < 0 on error.
2333 */ 2234 */
2334int __audit_ipc_obj(struct kern_ipc_perm *ipcp) 2235void __audit_ipc_obj(struct kern_ipc_perm *ipcp)
2335{ 2236{
2336 struct audit_aux_data_ipcctl *ax;
2337 struct audit_context *context = current->audit_context; 2237 struct audit_context *context = current->audit_context;
2338 2238 context->ipc.uid = ipcp->uid;
2339 ax = kmalloc(sizeof(*ax), GFP_ATOMIC); 2239 context->ipc.gid = ipcp->gid;
2340 if (!ax) 2240 context->ipc.mode = ipcp->mode;
2341 return -ENOMEM; 2241 context->ipc.has_perm = 0;
2342 2242 security_ipc_getsecid(ipcp, &context->ipc.osid);
2343 ax->uid = ipcp->uid; 2243 context->type = AUDIT_IPC;
2344 ax->gid = ipcp->gid;
2345 ax->mode = ipcp->mode;
2346 security_ipc_getsecid(ipcp, &ax->osid);
2347 ax->d.type = AUDIT_IPC;
2348 ax->d.next = context->aux;
2349 context->aux = (void *)ax;
2350 return 0;
2351} 2244}
2352 2245
2353/** 2246/**
@@ -2357,26 +2250,17 @@ int __audit_ipc_obj(struct kern_ipc_perm *ipcp)
2357 * @gid: msgq group id 2250 * @gid: msgq group id
2358 * @mode: msgq mode (permissions) 2251 * @mode: msgq mode (permissions)
2359 * 2252 *
2360 * Returns 0 for success or NULL context or < 0 on error. 2253 * Called only after audit_ipc_obj().
2361 */ 2254 */
2362int __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, mode_t mode) 2255void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, mode_t mode)
2363{ 2256{
2364 struct audit_aux_data_ipcctl *ax;
2365 struct audit_context *context = current->audit_context; 2257 struct audit_context *context = current->audit_context;
2366 2258
2367 ax = kmalloc(sizeof(*ax), GFP_ATOMIC); 2259 context->ipc.qbytes = qbytes;
2368 if (!ax) 2260 context->ipc.perm_uid = uid;
2369 return -ENOMEM; 2261 context->ipc.perm_gid = gid;
2370 2262 context->ipc.perm_mode = mode;
2371 ax->qbytes = qbytes; 2263 context->ipc.has_perm = 1;
2372 ax->uid = uid;
2373 ax->gid = gid;
2374 ax->mode = mode;
2375
2376 ax->d.type = AUDIT_IPC_SET_PERM;
2377 ax->d.next = context->aux;
2378 context->aux = (void *)ax;
2379 return 0;
2380} 2264}
2381 2265
2382int audit_bprm(struct linux_binprm *bprm) 2266int audit_bprm(struct linux_binprm *bprm)
@@ -2406,27 +2290,17 @@ int audit_bprm(struct linux_binprm *bprm)
2406 * @nargs: number of args 2290 * @nargs: number of args
2407 * @args: args array 2291 * @args: args array
2408 * 2292 *
2409 * Returns 0 for success or NULL context or < 0 on error.
2410 */ 2293 */
2411int audit_socketcall(int nargs, unsigned long *args) 2294void audit_socketcall(int nargs, unsigned long *args)
2412{ 2295{
2413 struct audit_aux_data_socketcall *ax;
2414 struct audit_context *context = current->audit_context; 2296 struct audit_context *context = current->audit_context;
2415 2297
2416 if (likely(!context || context->dummy)) 2298 if (likely(!context || context->dummy))
2417 return 0; 2299 return;
2418
2419 ax = kmalloc(sizeof(*ax) + nargs * sizeof(unsigned long), GFP_KERNEL);
2420 if (!ax)
2421 return -ENOMEM;
2422
2423 ax->nargs = nargs;
2424 memcpy(ax->args, args, nargs * sizeof(unsigned long));
2425 2300
2426 ax->d.type = AUDIT_SOCKETCALL; 2301 context->type = AUDIT_SOCKETCALL;
2427 ax->d.next = context->aux; 2302 context->socketcall.nargs = nargs;
2428 context->aux = (void *)ax; 2303 memcpy(context->socketcall.args, args, nargs * sizeof(unsigned long));
2429 return 0;
2430} 2304}
2431 2305
2432/** 2306/**
@@ -2434,29 +2308,12 @@ int audit_socketcall(int nargs, unsigned long *args)
2434 * @fd1: the first file descriptor 2308 * @fd1: the first file descriptor
2435 * @fd2: the second file descriptor 2309 * @fd2: the second file descriptor
2436 * 2310 *
2437 * Returns 0 for success or NULL context or < 0 on error.
2438 */ 2311 */
2439int __audit_fd_pair(int fd1, int fd2) 2312void __audit_fd_pair(int fd1, int fd2)
2440{ 2313{
2441 struct audit_context *context = current->audit_context; 2314 struct audit_context *context = current->audit_context;
2442 struct audit_aux_data_fd_pair *ax; 2315 context->fds[0] = fd1;
2443 2316 context->fds[1] = fd2;
2444 if (likely(!context)) {
2445 return 0;
2446 }
2447
2448 ax = kmalloc(sizeof(*ax), GFP_KERNEL);
2449 if (!ax) {
2450 return -ENOMEM;
2451 }
2452
2453 ax->fd[0] = fd1;
2454 ax->fd[1] = fd2;
2455
2456 ax->d.type = AUDIT_FD_PAIR;
2457 ax->d.next = context->aux;
2458 context->aux = (void *)ax;
2459 return 0;
2460} 2317}
2461 2318
2462/** 2319/**
@@ -2468,22 +2325,20 @@ int __audit_fd_pair(int fd1, int fd2)
2468 */ 2325 */
2469int audit_sockaddr(int len, void *a) 2326int audit_sockaddr(int len, void *a)
2470{ 2327{
2471 struct audit_aux_data_sockaddr *ax;
2472 struct audit_context *context = current->audit_context; 2328 struct audit_context *context = current->audit_context;
2473 2329
2474 if (likely(!context || context->dummy)) 2330 if (likely(!context || context->dummy))
2475 return 0; 2331 return 0;
2476 2332
2477 ax = kmalloc(sizeof(*ax) + len, GFP_KERNEL); 2333 if (!context->sockaddr) {
2478 if (!ax) 2334 void *p = kmalloc(sizeof(struct sockaddr_storage), GFP_KERNEL);
2479 return -ENOMEM; 2335 if (!p)
2480 2336 return -ENOMEM;
2481 ax->len = len; 2337 context->sockaddr = p;
2482 memcpy(ax->a, a, len); 2338 }
2483 2339
2484 ax->d.type = AUDIT_SOCKADDR; 2340 context->sockaddr_len = len;
2485 ax->d.next = context->aux; 2341 memcpy(context->sockaddr, a, len);
2486 context->aux = (void *)ax;
2487 return 0; 2342 return 0;
2488} 2343}
2489 2344
@@ -2617,29 +2472,15 @@ int __audit_log_bprm_fcaps(struct linux_binprm *bprm,
2617 * Record the aguments userspace sent to sys_capset for later printing by the 2472 * Record the aguments userspace sent to sys_capset for later printing by the
2618 * audit system if applicable 2473 * audit system if applicable
2619 */ 2474 */
2620int __audit_log_capset(pid_t pid, 2475void __audit_log_capset(pid_t pid,
2621 const struct cred *new, const struct cred *old) 2476 const struct cred *new, const struct cred *old)
2622{ 2477{
2623 struct audit_aux_data_capset *ax;
2624 struct audit_context *context = current->audit_context; 2478 struct audit_context *context = current->audit_context;
2625 2479 context->capset.pid = pid;
2626 if (likely(!audit_enabled || !context || context->dummy)) 2480 context->capset.cap.effective = new->cap_effective;
2627 return 0; 2481 context->capset.cap.inheritable = new->cap_effective;
2628 2482 context->capset.cap.permitted = new->cap_permitted;
2629 ax = kmalloc(sizeof(*ax), GFP_KERNEL); 2483 context->type = AUDIT_CAPSET;
2630 if (!ax)
2631 return -ENOMEM;
2632
2633 ax->d.type = AUDIT_CAPSET;
2634 ax->d.next = context->aux;
2635 context->aux = (void *)ax;
2636
2637 ax->pid = pid;
2638 ax->cap.effective = new->cap_effective;
2639 ax->cap.inheritable = new->cap_effective;
2640 ax->cap.permitted = new->cap_permitted;
2641
2642 return 0;
2643} 2484}
2644 2485
2645/** 2486/**
diff --git a/kernel/capability.c b/kernel/capability.c
index 36b4b4daebec..c598d9d5be4f 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -280,9 +280,7 @@ asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data)
280 if (ret < 0) 280 if (ret < 0)
281 goto error; 281 goto error;
282 282
283 ret = audit_log_capset(pid, new, current_cred()); 283 audit_log_capset(pid, new, current_cred());
284 if (ret < 0)
285 return ret;
286 284
287 return commit_creds(new); 285 return commit_creds(new);
288 286
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 48348dde6d81..87bb0258fd27 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -573,7 +573,6 @@ static struct inode *cgroup_new_inode(mode_t mode, struct super_block *sb)
573 inode->i_mode = mode; 573 inode->i_mode = mode;
574 inode->i_uid = current_fsuid(); 574 inode->i_uid = current_fsuid();
575 inode->i_gid = current_fsgid(); 575 inode->i_gid = current_fsgid();
576 inode->i_blocks = 0;
577 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 576 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
578 inode->i_mapping->backing_dev_info = &cgroup_backing_dev_info; 577 inode->i_mapping->backing_dev_info = &cgroup_backing_dev_info;
579 } 578 }
@@ -2945,7 +2944,11 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys,
2945 parent = task_cgroup(tsk, subsys->subsys_id); 2944 parent = task_cgroup(tsk, subsys->subsys_id);
2946 2945
2947 /* Pin the hierarchy */ 2946 /* Pin the hierarchy */
2948 atomic_inc(&parent->root->sb->s_active); 2947 if (!atomic_inc_not_zero(&parent->root->sb->s_active)) {
2948 /* We race with the final deactivate_super() */
2949 mutex_unlock(&cgroup_mutex);
2950 return 0;
2951 }
2949 2952
2950 /* Keep the cgroup alive */ 2953 /* Keep the cgroup alive */
2951 get_css_set(cg); 2954 get_css_set(cg);
diff --git a/kernel/compat.c b/kernel/compat.c
index 8eafe3eb50d9..d52e2ec1deb5 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -454,16 +454,16 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
454} 454}
455 455
456static int compat_get_user_cpu_mask(compat_ulong_t __user *user_mask_ptr, 456static int compat_get_user_cpu_mask(compat_ulong_t __user *user_mask_ptr,
457 unsigned len, cpumask_t *new_mask) 457 unsigned len, struct cpumask *new_mask)
458{ 458{
459 unsigned long *k; 459 unsigned long *k;
460 460
461 if (len < sizeof(cpumask_t)) 461 if (len < cpumask_size())
462 memset(new_mask, 0, sizeof(cpumask_t)); 462 memset(new_mask, 0, cpumask_size());
463 else if (len > sizeof(cpumask_t)) 463 else if (len > cpumask_size())
464 len = sizeof(cpumask_t); 464 len = cpumask_size();
465 465
466 k = cpus_addr(*new_mask); 466 k = cpumask_bits(new_mask);
467 return compat_get_bitmap(k, user_mask_ptr, len * 8); 467 return compat_get_bitmap(k, user_mask_ptr, len * 8);
468} 468}
469 469
@@ -471,40 +471,51 @@ asmlinkage long compat_sys_sched_setaffinity(compat_pid_t pid,
471 unsigned int len, 471 unsigned int len,
472 compat_ulong_t __user *user_mask_ptr) 472 compat_ulong_t __user *user_mask_ptr)
473{ 473{
474 cpumask_t new_mask; 474 cpumask_var_t new_mask;
475 int retval; 475 int retval;
476 476
477 retval = compat_get_user_cpu_mask(user_mask_ptr, len, &new_mask); 477 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
478 return -ENOMEM;
479
480 retval = compat_get_user_cpu_mask(user_mask_ptr, len, new_mask);
478 if (retval) 481 if (retval)
479 return retval; 482 goto out;
480 483
481 return sched_setaffinity(pid, &new_mask); 484 retval = sched_setaffinity(pid, new_mask);
485out:
486 free_cpumask_var(new_mask);
487 return retval;
482} 488}
483 489
484asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, unsigned int len, 490asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, unsigned int len,
485 compat_ulong_t __user *user_mask_ptr) 491 compat_ulong_t __user *user_mask_ptr)
486{ 492{
487 int ret; 493 int ret;
488 cpumask_t mask; 494 cpumask_var_t mask;
489 unsigned long *k; 495 unsigned long *k;
490 unsigned int min_length = sizeof(cpumask_t); 496 unsigned int min_length = cpumask_size();
491 497
492 if (NR_CPUS <= BITS_PER_COMPAT_LONG) 498 if (nr_cpu_ids <= BITS_PER_COMPAT_LONG)
493 min_length = sizeof(compat_ulong_t); 499 min_length = sizeof(compat_ulong_t);
494 500
495 if (len < min_length) 501 if (len < min_length)
496 return -EINVAL; 502 return -EINVAL;
497 503
498 ret = sched_getaffinity(pid, &mask); 504 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
505 return -ENOMEM;
506
507 ret = sched_getaffinity(pid, mask);
499 if (ret < 0) 508 if (ret < 0)
500 return ret; 509 goto out;
501 510
502 k = cpus_addr(mask); 511 k = cpumask_bits(mask);
503 ret = compat_put_bitmap(user_mask_ptr, k, min_length * 8); 512 ret = compat_put_bitmap(user_mask_ptr, k, min_length * 8);
504 if (ret) 513 if (ret == 0)
505 return ret; 514 ret = min_length;
506 515
507 return min_length; 516out:
517 free_cpumask_var(mask);
518 return ret;
508} 519}
509 520
510int get_compat_itimerspec(struct itimerspec *dst, 521int get_compat_itimerspec(struct itimerspec *dst,
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 8ea32e8d68b0..30e74dd6d01b 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -15,29 +15,8 @@
15#include <linux/stop_machine.h> 15#include <linux/stop_machine.h>
16#include <linux/mutex.h> 16#include <linux/mutex.h>
17 17
18/* 18#ifdef CONFIG_SMP
19 * Represents all cpu's present in the system 19/* Serializes the updates to cpu_online_mask, cpu_present_mask */
20 * In systems capable of hotplug, this map could dynamically grow
21 * as new cpu's are detected in the system via any platform specific
22 * method, such as ACPI for e.g.
23 */
24cpumask_t cpu_present_map __read_mostly;
25EXPORT_SYMBOL(cpu_present_map);
26
27#ifndef CONFIG_SMP
28
29/*
30 * Represents all cpu's that are currently online.
31 */
32cpumask_t cpu_online_map __read_mostly = CPU_MASK_ALL;
33EXPORT_SYMBOL(cpu_online_map);
34
35cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL;
36EXPORT_SYMBOL(cpu_possible_map);
37
38#else /* CONFIG_SMP */
39
40/* Serializes the updates to cpu_online_map, cpu_present_map */
41static DEFINE_MUTEX(cpu_add_remove_lock); 20static DEFINE_MUTEX(cpu_add_remove_lock);
42 21
43static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain); 22static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
@@ -64,8 +43,6 @@ void __init cpu_hotplug_init(void)
64 cpu_hotplug.refcount = 0; 43 cpu_hotplug.refcount = 0;
65} 44}
66 45
67cpumask_t cpu_active_map;
68
69#ifdef CONFIG_HOTPLUG_CPU 46#ifdef CONFIG_HOTPLUG_CPU
70 47
71void get_online_cpus(void) 48void get_online_cpus(void)
@@ -96,7 +73,7 @@ EXPORT_SYMBOL_GPL(put_online_cpus);
96 73
97/* 74/*
98 * The following two API's must be used when attempting 75 * The following two API's must be used when attempting
99 * to serialize the updates to cpu_online_map, cpu_present_map. 76 * to serialize the updates to cpu_online_mask, cpu_present_mask.
100 */ 77 */
101void cpu_maps_update_begin(void) 78void cpu_maps_update_begin(void)
102{ 79{
@@ -217,7 +194,7 @@ static int __ref take_cpu_down(void *_param)
217static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) 194static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
218{ 195{
219 int err, nr_calls = 0; 196 int err, nr_calls = 0;
220 cpumask_t old_allowed, tmp; 197 cpumask_var_t old_allowed;
221 void *hcpu = (void *)(long)cpu; 198 void *hcpu = (void *)(long)cpu;
222 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; 199 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
223 struct take_cpu_down_param tcd_param = { 200 struct take_cpu_down_param tcd_param = {
@@ -231,6 +208,9 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
231 if (!cpu_online(cpu)) 208 if (!cpu_online(cpu))
232 return -EINVAL; 209 return -EINVAL;
233 210
211 if (!alloc_cpumask_var(&old_allowed, GFP_KERNEL))
212 return -ENOMEM;
213
234 cpu_hotplug_begin(); 214 cpu_hotplug_begin();
235 err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod, 215 err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod,
236 hcpu, -1, &nr_calls); 216 hcpu, -1, &nr_calls);
@@ -245,13 +225,11 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
245 } 225 }
246 226
247 /* Ensure that we are not runnable on dying cpu */ 227 /* Ensure that we are not runnable on dying cpu */
248 old_allowed = current->cpus_allowed; 228 cpumask_copy(old_allowed, &current->cpus_allowed);
249 cpus_setall(tmp); 229 set_cpus_allowed_ptr(current,
250 cpu_clear(cpu, tmp); 230 cpumask_of(cpumask_any_but(cpu_online_mask, cpu)));
251 set_cpus_allowed_ptr(current, &tmp);
252 tmp = cpumask_of_cpu(cpu);
253 231
254 err = __stop_machine(take_cpu_down, &tcd_param, &tmp); 232 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
255 if (err) { 233 if (err) {
256 /* CPU didn't die: tell everyone. Can't complain. */ 234 /* CPU didn't die: tell everyone. Can't complain. */
257 if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, 235 if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
@@ -277,7 +255,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
277 check_for_tasks(cpu); 255 check_for_tasks(cpu);
278 256
279out_allowed: 257out_allowed:
280 set_cpus_allowed_ptr(current, &old_allowed); 258 set_cpus_allowed_ptr(current, old_allowed);
281out_release: 259out_release:
282 cpu_hotplug_done(); 260 cpu_hotplug_done();
283 if (!err) { 261 if (!err) {
@@ -285,13 +263,17 @@ out_release:
285 hcpu) == NOTIFY_BAD) 263 hcpu) == NOTIFY_BAD)
286 BUG(); 264 BUG();
287 } 265 }
266 free_cpumask_var(old_allowed);
288 return err; 267 return err;
289} 268}
290 269
291int __ref cpu_down(unsigned int cpu) 270int __ref cpu_down(unsigned int cpu)
292{ 271{
293 int err = 0; 272 int err;
294 273
274 err = stop_machine_create();
275 if (err)
276 return err;
295 cpu_maps_update_begin(); 277 cpu_maps_update_begin();
296 278
297 if (cpu_hotplug_disabled) { 279 if (cpu_hotplug_disabled) {
@@ -303,7 +285,7 @@ int __ref cpu_down(unsigned int cpu)
303 285
304 /* 286 /*
305 * Make sure the all cpus did the reschedule and are not 287 * Make sure the all cpus did the reschedule and are not
306 * using stale version of the cpu_active_map. 288 * using stale version of the cpu_active_mask.
307 * This is not strictly necessary becuase stop_machine() 289 * This is not strictly necessary becuase stop_machine()
308 * that we run down the line already provides the required 290 * that we run down the line already provides the required
309 * synchronization. But it's really a side effect and we do not 291 * synchronization. But it's really a side effect and we do not
@@ -318,6 +300,7 @@ int __ref cpu_down(unsigned int cpu)
318 300
319out: 301out:
320 cpu_maps_update_done(); 302 cpu_maps_update_done();
303 stop_machine_destroy();
321 return err; 304 return err;
322} 305}
323EXPORT_SYMBOL(cpu_down); 306EXPORT_SYMBOL(cpu_down);
@@ -367,7 +350,7 @@ out_notify:
367int __cpuinit cpu_up(unsigned int cpu) 350int __cpuinit cpu_up(unsigned int cpu)
368{ 351{
369 int err = 0; 352 int err = 0;
370 if (!cpu_isset(cpu, cpu_possible_map)) { 353 if (!cpu_possible(cpu)) {
371 printk(KERN_ERR "can't online cpu %d because it is not " 354 printk(KERN_ERR "can't online cpu %d because it is not "
372 "configured as may-hotadd at boot time\n", cpu); 355 "configured as may-hotadd at boot time\n", cpu);
373#if defined(CONFIG_IA64) || defined(CONFIG_X86_64) 356#if defined(CONFIG_IA64) || defined(CONFIG_X86_64)
@@ -392,25 +375,25 @@ out:
392} 375}
393 376
394#ifdef CONFIG_PM_SLEEP_SMP 377#ifdef CONFIG_PM_SLEEP_SMP
395static cpumask_t frozen_cpus; 378static cpumask_var_t frozen_cpus;
396 379
397int disable_nonboot_cpus(void) 380int disable_nonboot_cpus(void)
398{ 381{
399 int cpu, first_cpu, error = 0; 382 int cpu, first_cpu, error = 0;
400 383
401 cpu_maps_update_begin(); 384 cpu_maps_update_begin();
402 first_cpu = first_cpu(cpu_online_map); 385 first_cpu = cpumask_first(cpu_online_mask);
403 /* We take down all of the non-boot CPUs in one shot to avoid races 386 /* We take down all of the non-boot CPUs in one shot to avoid races
404 * with the userspace trying to use the CPU hotplug at the same time 387 * with the userspace trying to use the CPU hotplug at the same time
405 */ 388 */
406 cpus_clear(frozen_cpus); 389 cpumask_clear(frozen_cpus);
407 printk("Disabling non-boot CPUs ...\n"); 390 printk("Disabling non-boot CPUs ...\n");
408 for_each_online_cpu(cpu) { 391 for_each_online_cpu(cpu) {
409 if (cpu == first_cpu) 392 if (cpu == first_cpu)
410 continue; 393 continue;
411 error = _cpu_down(cpu, 1); 394 error = _cpu_down(cpu, 1);
412 if (!error) { 395 if (!error) {
413 cpu_set(cpu, frozen_cpus); 396 cpumask_set_cpu(cpu, frozen_cpus);
414 printk("CPU%d is down\n", cpu); 397 printk("CPU%d is down\n", cpu);
415 } else { 398 } else {
416 printk(KERN_ERR "Error taking CPU%d down: %d\n", 399 printk(KERN_ERR "Error taking CPU%d down: %d\n",
@@ -436,11 +419,11 @@ void __ref enable_nonboot_cpus(void)
436 /* Allow everyone to use the CPU hotplug again */ 419 /* Allow everyone to use the CPU hotplug again */
437 cpu_maps_update_begin(); 420 cpu_maps_update_begin();
438 cpu_hotplug_disabled = 0; 421 cpu_hotplug_disabled = 0;
439 if (cpus_empty(frozen_cpus)) 422 if (cpumask_empty(frozen_cpus))
440 goto out; 423 goto out;
441 424
442 printk("Enabling non-boot CPUs ...\n"); 425 printk("Enabling non-boot CPUs ...\n");
443 for_each_cpu_mask_nr(cpu, frozen_cpus) { 426 for_each_cpu(cpu, frozen_cpus) {
444 error = _cpu_up(cpu, 1); 427 error = _cpu_up(cpu, 1);
445 if (!error) { 428 if (!error) {
446 printk("CPU%d is up\n", cpu); 429 printk("CPU%d is up\n", cpu);
@@ -448,10 +431,18 @@ void __ref enable_nonboot_cpus(void)
448 } 431 }
449 printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error); 432 printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error);
450 } 433 }
451 cpus_clear(frozen_cpus); 434 cpumask_clear(frozen_cpus);
452out: 435out:
453 cpu_maps_update_done(); 436 cpu_maps_update_done();
454} 437}
438
439static int alloc_frozen_cpus(void)
440{
441 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
442 return -ENOMEM;
443 return 0;
444}
445core_initcall(alloc_frozen_cpus);
455#endif /* CONFIG_PM_SLEEP_SMP */ 446#endif /* CONFIG_PM_SLEEP_SMP */
456 447
457/** 448/**
@@ -467,7 +458,7 @@ void __cpuinit notify_cpu_starting(unsigned int cpu)
467 unsigned long val = CPU_STARTING; 458 unsigned long val = CPU_STARTING;
468 459
469#ifdef CONFIG_PM_SLEEP_SMP 460#ifdef CONFIG_PM_SLEEP_SMP
470 if (cpu_isset(cpu, frozen_cpus)) 461 if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
471 val = CPU_STARTING_FROZEN; 462 val = CPU_STARTING_FROZEN;
472#endif /* CONFIG_PM_SLEEP_SMP */ 463#endif /* CONFIG_PM_SLEEP_SMP */
473 raw_notifier_call_chain(&cpu_chain, val, (void *)(long)cpu); 464 raw_notifier_call_chain(&cpu_chain, val, (void *)(long)cpu);
@@ -479,7 +470,7 @@ void __cpuinit notify_cpu_starting(unsigned int cpu)
479 * cpu_bit_bitmap[] is a special, "compressed" data structure that 470 * cpu_bit_bitmap[] is a special, "compressed" data structure that
480 * represents all NR_CPUS bits binary values of 1<<nr. 471 * represents all NR_CPUS bits binary values of 1<<nr.
481 * 472 *
482 * It is used by cpumask_of_cpu() to get a constant address to a CPU 473 * It is used by cpumask_of() to get a constant address to a CPU
483 * mask value that has a single bit set only. 474 * mask value that has a single bit set only.
484 */ 475 */
485 476
@@ -502,3 +493,71 @@ EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
502 493
503const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL; 494const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
504EXPORT_SYMBOL(cpu_all_bits); 495EXPORT_SYMBOL(cpu_all_bits);
496
497#ifdef CONFIG_INIT_ALL_POSSIBLE
498static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly
499 = CPU_BITS_ALL;
500#else
501static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly;
502#endif
503const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits);
504EXPORT_SYMBOL(cpu_possible_mask);
505
506static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly;
507const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits);
508EXPORT_SYMBOL(cpu_online_mask);
509
510static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly;
511const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits);
512EXPORT_SYMBOL(cpu_present_mask);
513
514static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly;
515const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
516EXPORT_SYMBOL(cpu_active_mask);
517
518void set_cpu_possible(unsigned int cpu, bool possible)
519{
520 if (possible)
521 cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits));
522 else
523 cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits));
524}
525
526void set_cpu_present(unsigned int cpu, bool present)
527{
528 if (present)
529 cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits));
530 else
531 cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits));
532}
533
534void set_cpu_online(unsigned int cpu, bool online)
535{
536 if (online)
537 cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
538 else
539 cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
540}
541
542void set_cpu_active(unsigned int cpu, bool active)
543{
544 if (active)
545 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
546 else
547 cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
548}
549
550void init_cpu_present(const struct cpumask *src)
551{
552 cpumask_copy(to_cpumask(cpu_present_bits), src);
553}
554
555void init_cpu_possible(const struct cpumask *src)
556{
557 cpumask_copy(to_cpumask(cpu_possible_bits), src);
558}
559
560void init_cpu_online(const struct cpumask *src)
561{
562 cpumask_copy(to_cpumask(cpu_online_bits), src);
563}
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 96c0ba13b8cd..39c1a4c1c5a9 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -896,7 +896,7 @@ static int update_cpumask(struct cpuset *cs, const char *buf)
896 if (!*buf) { 896 if (!*buf) {
897 cpus_clear(trialcs.cpus_allowed); 897 cpus_clear(trialcs.cpus_allowed);
898 } else { 898 } else {
899 retval = cpulist_parse(buf, trialcs.cpus_allowed); 899 retval = cpulist_parse(buf, &trialcs.cpus_allowed);
900 if (retval < 0) 900 if (retval < 0)
901 return retval; 901 return retval;
902 902
@@ -1482,7 +1482,7 @@ static int cpuset_sprintf_cpulist(char *page, struct cpuset *cs)
1482 mask = cs->cpus_allowed; 1482 mask = cs->cpus_allowed;
1483 mutex_unlock(&callback_mutex); 1483 mutex_unlock(&callback_mutex);
1484 1484
1485 return cpulist_scnprintf(page, PAGE_SIZE, mask); 1485 return cpulist_scnprintf(page, PAGE_SIZE, &mask);
1486} 1486}
1487 1487
1488static int cpuset_sprintf_memlist(char *page, struct cpuset *cs) 1488static int cpuset_sprintf_memlist(char *page, struct cpuset *cs)
diff --git a/kernel/exit.c b/kernel/exit.c
index c7422ca92038..c9e5a1c14e08 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -1037,8 +1037,6 @@ NORET_TYPE void do_exit(long code)
1037 * task into the wait for ever nirwana as well. 1037 * task into the wait for ever nirwana as well.
1038 */ 1038 */
1039 tsk->flags |= PF_EXITPIDONE; 1039 tsk->flags |= PF_EXITPIDONE;
1040 if (tsk->io_context)
1041 exit_io_context();
1042 set_current_state(TASK_UNINTERRUPTIBLE); 1040 set_current_state(TASK_UNINTERRUPTIBLE);
1043 schedule(); 1041 schedule();
1044 } 1042 }
@@ -1328,10 +1326,10 @@ static int wait_task_zombie(struct task_struct *p, int options,
1328 * group, which consolidates times for all threads in the 1326 * group, which consolidates times for all threads in the
1329 * group including the group leader. 1327 * group including the group leader.
1330 */ 1328 */
1329 thread_group_cputime(p, &cputime);
1331 spin_lock_irq(&p->parent->sighand->siglock); 1330 spin_lock_irq(&p->parent->sighand->siglock);
1332 psig = p->parent->signal; 1331 psig = p->parent->signal;
1333 sig = p->signal; 1332 sig = p->signal;
1334 thread_group_cputime(p, &cputime);
1335 psig->cutime = 1333 psig->cutime =
1336 cputime_add(psig->cutime, 1334 cputime_add(psig->cutime,
1337 cputime_add(cputime.utime, 1335 cputime_add(cputime.utime,
diff --git a/kernel/extable.c b/kernel/extable.c
index feb0317cf09a..e136ed8d82ba 100644
--- a/kernel/extable.c
+++ b/kernel/extable.c
@@ -67,3 +67,19 @@ int kernel_text_address(unsigned long addr)
67 return 1; 67 return 1;
68 return module_text_address(addr) != NULL; 68 return module_text_address(addr) != NULL;
69} 69}
70
71/*
72 * On some architectures (PPC64, IA64) function pointers
73 * are actually only tokens to some data that then holds the
74 * real function address. As a result, to find if a function
75 * pointer is part of the kernel text, we need to do some
76 * special dereferencing first.
77 */
78int func_ptr_is_kernel_text(void *ptr)
79{
80 unsigned long addr;
81 addr = (unsigned long) dereference_function_descriptor(ptr);
82 if (core_kernel_text(addr))
83 return 1;
84 return module_text_address(addr) != NULL;
85}
diff --git a/kernel/fork.c b/kernel/fork.c
index 6144b36cd897..43cbf30669e6 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -415,8 +415,8 @@ static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p)
415 set_mm_counter(mm, file_rss, 0); 415 set_mm_counter(mm, file_rss, 0);
416 set_mm_counter(mm, anon_rss, 0); 416 set_mm_counter(mm, anon_rss, 0);
417 spin_lock_init(&mm->page_table_lock); 417 spin_lock_init(&mm->page_table_lock);
418 rwlock_init(&mm->ioctx_list_lock); 418 spin_lock_init(&mm->ioctx_lock);
419 mm->ioctx_list = NULL; 419 INIT_HLIST_HEAD(&mm->ioctx_list);
420 mm->free_area_cache = TASK_UNMAPPED_BASE; 420 mm->free_area_cache = TASK_UNMAPPED_BASE;
421 mm->cached_hole_size = ~0UL; 421 mm->cached_hole_size = ~0UL;
422 mm_init_owner(mm, p); 422 mm_init_owner(mm, p);
diff --git a/kernel/futex.c b/kernel/futex.c
index 4fe790e89d0f..7c6cbabe52b3 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -92,11 +92,12 @@ struct futex_pi_state {
92 * A futex_q has a woken state, just like tasks have TASK_RUNNING. 92 * A futex_q has a woken state, just like tasks have TASK_RUNNING.
93 * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0. 93 * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
94 * The order of wakup is always to make the first condition true, then 94 * The order of wakup is always to make the first condition true, then
95 * wake up q->waiters, then make the second condition true. 95 * wake up q->waiter, then make the second condition true.
96 */ 96 */
97struct futex_q { 97struct futex_q {
98 struct plist_node list; 98 struct plist_node list;
99 wait_queue_head_t waiters; 99 /* There can only be a single waiter */
100 wait_queue_head_t waiter;
100 101
101 /* Which hash list lock to use: */ 102 /* Which hash list lock to use: */
102 spinlock_t *lock_ptr; 103 spinlock_t *lock_ptr;
@@ -123,24 +124,6 @@ struct futex_hash_bucket {
123static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS]; 124static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS];
124 125
125/* 126/*
126 * Take mm->mmap_sem, when futex is shared
127 */
128static inline void futex_lock_mm(struct rw_semaphore *fshared)
129{
130 if (fshared)
131 down_read(fshared);
132}
133
134/*
135 * Release mm->mmap_sem, when the futex is shared
136 */
137static inline void futex_unlock_mm(struct rw_semaphore *fshared)
138{
139 if (fshared)
140 up_read(fshared);
141}
142
143/*
144 * We hash on the keys returned from get_futex_key (see below). 127 * We hash on the keys returned from get_futex_key (see below).
145 */ 128 */
146static struct futex_hash_bucket *hash_futex(union futex_key *key) 129static struct futex_hash_bucket *hash_futex(union futex_key *key)
@@ -161,6 +144,45 @@ static inline int match_futex(union futex_key *key1, union futex_key *key2)
161 && key1->both.offset == key2->both.offset); 144 && key1->both.offset == key2->both.offset);
162} 145}
163 146
147/*
148 * Take a reference to the resource addressed by a key.
149 * Can be called while holding spinlocks.
150 *
151 */
152static void get_futex_key_refs(union futex_key *key)
153{
154 if (!key->both.ptr)
155 return;
156
157 switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
158 case FUT_OFF_INODE:
159 atomic_inc(&key->shared.inode->i_count);
160 break;
161 case FUT_OFF_MMSHARED:
162 atomic_inc(&key->private.mm->mm_count);
163 break;
164 }
165}
166
167/*
168 * Drop a reference to the resource addressed by a key.
169 * The hash bucket spinlock must not be held.
170 */
171static void drop_futex_key_refs(union futex_key *key)
172{
173 if (!key->both.ptr)
174 return;
175
176 switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
177 case FUT_OFF_INODE:
178 iput(key->shared.inode);
179 break;
180 case FUT_OFF_MMSHARED:
181 mmdrop(key->private.mm);
182 break;
183 }
184}
185
164/** 186/**
165 * get_futex_key - Get parameters which are the keys for a futex. 187 * get_futex_key - Get parameters which are the keys for a futex.
166 * @uaddr: virtual address of the futex 188 * @uaddr: virtual address of the futex
@@ -179,12 +201,10 @@ static inline int match_futex(union futex_key *key1, union futex_key *key2)
179 * For other futexes, it points to &current->mm->mmap_sem and 201 * For other futexes, it points to &current->mm->mmap_sem and
180 * caller must have taken the reader lock. but NOT any spinlocks. 202 * caller must have taken the reader lock. but NOT any spinlocks.
181 */ 203 */
182static int get_futex_key(u32 __user *uaddr, struct rw_semaphore *fshared, 204static int get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key)
183 union futex_key *key)
184{ 205{
185 unsigned long address = (unsigned long)uaddr; 206 unsigned long address = (unsigned long)uaddr;
186 struct mm_struct *mm = current->mm; 207 struct mm_struct *mm = current->mm;
187 struct vm_area_struct *vma;
188 struct page *page; 208 struct page *page;
189 int err; 209 int err;
190 210
@@ -208,100 +228,50 @@ static int get_futex_key(u32 __user *uaddr, struct rw_semaphore *fshared,
208 return -EFAULT; 228 return -EFAULT;
209 key->private.mm = mm; 229 key->private.mm = mm;
210 key->private.address = address; 230 key->private.address = address;
231 get_futex_key_refs(key);
211 return 0; 232 return 0;
212 } 233 }
213 /*
214 * The futex is hashed differently depending on whether
215 * it's in a shared or private mapping. So check vma first.
216 */
217 vma = find_extend_vma(mm, address);
218 if (unlikely(!vma))
219 return -EFAULT;
220 234
221 /* 235again:
222 * Permissions. 236 err = get_user_pages_fast(address, 1, 0, &page);
223 */ 237 if (err < 0)
224 if (unlikely((vma->vm_flags & (VM_IO|VM_READ)) != VM_READ)) 238 return err;
225 return (vma->vm_flags & VM_IO) ? -EPERM : -EACCES; 239
240 lock_page(page);
241 if (!page->mapping) {
242 unlock_page(page);
243 put_page(page);
244 goto again;
245 }
226 246
227 /* 247 /*
228 * Private mappings are handled in a simple way. 248 * Private mappings are handled in a simple way.
229 * 249 *
230 * NOTE: When userspace waits on a MAP_SHARED mapping, even if 250 * NOTE: When userspace waits on a MAP_SHARED mapping, even if
231 * it's a read-only handle, it's expected that futexes attach to 251 * it's a read-only handle, it's expected that futexes attach to
232 * the object not the particular process. Therefore we use 252 * the object not the particular process.
233 * VM_MAYSHARE here, not VM_SHARED which is restricted to shared
234 * mappings of _writable_ handles.
235 */ 253 */
236 if (likely(!(vma->vm_flags & VM_MAYSHARE))) { 254 if (PageAnon(page)) {
237 key->both.offset |= FUT_OFF_MMSHARED; /* reference taken on mm */ 255 key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
238 key->private.mm = mm; 256 key->private.mm = mm;
239 key->private.address = address; 257 key->private.address = address;
240 return 0; 258 } else {
259 key->both.offset |= FUT_OFF_INODE; /* inode-based key */
260 key->shared.inode = page->mapping->host;
261 key->shared.pgoff = page->index;
241 } 262 }
242 263
243 /* 264 get_futex_key_refs(key);
244 * Linear file mappings are also simple.
245 */
246 key->shared.inode = vma->vm_file->f_path.dentry->d_inode;
247 key->both.offset |= FUT_OFF_INODE; /* inode-based key. */
248 if (likely(!(vma->vm_flags & VM_NONLINEAR))) {
249 key->shared.pgoff = (((address - vma->vm_start) >> PAGE_SHIFT)
250 + vma->vm_pgoff);
251 return 0;
252 }
253 265
254 /* 266 unlock_page(page);
255 * We could walk the page table to read the non-linear 267 put_page(page);
256 * pte, and get the page index without fetching the page 268 return 0;
257 * from swap. But that's a lot of code to duplicate here
258 * for a rare case, so we simply fetch the page.
259 */
260 err = get_user_pages(current, mm, address, 1, 0, 0, &page, NULL);
261 if (err >= 0) {
262 key->shared.pgoff =
263 page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
264 put_page(page);
265 return 0;
266 }
267 return err;
268}
269
270/*
271 * Take a reference to the resource addressed by a key.
272 * Can be called while holding spinlocks.
273 *
274 */
275static void get_futex_key_refs(union futex_key *key)
276{
277 if (key->both.ptr == NULL)
278 return;
279 switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
280 case FUT_OFF_INODE:
281 atomic_inc(&key->shared.inode->i_count);
282 break;
283 case FUT_OFF_MMSHARED:
284 atomic_inc(&key->private.mm->mm_count);
285 break;
286 }
287} 269}
288 270
289/* 271static inline
290 * Drop a reference to the resource addressed by a key. 272void put_futex_key(int fshared, union futex_key *key)
291 * The hash bucket spinlock must not be held.
292 */
293static void drop_futex_key_refs(union futex_key *key)
294{ 273{
295 if (!key->both.ptr) 274 drop_futex_key_refs(key);
296 return;
297 switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
298 case FUT_OFF_INODE:
299 iput(key->shared.inode);
300 break;
301 case FUT_OFF_MMSHARED:
302 mmdrop(key->private.mm);
303 break;
304 }
305} 275}
306 276
307static u32 cmpxchg_futex_value_locked(u32 __user *uaddr, u32 uval, u32 newval) 277static u32 cmpxchg_futex_value_locked(u32 __user *uaddr, u32 uval, u32 newval)
@@ -328,10 +298,8 @@ static int get_futex_value_locked(u32 *dest, u32 __user *from)
328 298
329/* 299/*
330 * Fault handling. 300 * Fault handling.
331 * if fshared is non NULL, current->mm->mmap_sem is already held
332 */ 301 */
333static int futex_handle_fault(unsigned long address, 302static int futex_handle_fault(unsigned long address, int attempt)
334 struct rw_semaphore *fshared, int attempt)
335{ 303{
336 struct vm_area_struct * vma; 304 struct vm_area_struct * vma;
337 struct mm_struct *mm = current->mm; 305 struct mm_struct *mm = current->mm;
@@ -340,8 +308,7 @@ static int futex_handle_fault(unsigned long address,
340 if (attempt > 2) 308 if (attempt > 2)
341 return ret; 309 return ret;
342 310
343 if (!fshared) 311 down_read(&mm->mmap_sem);
344 down_read(&mm->mmap_sem);
345 vma = find_vma(mm, address); 312 vma = find_vma(mm, address);
346 if (vma && address >= vma->vm_start && 313 if (vma && address >= vma->vm_start &&
347 (vma->vm_flags & VM_WRITE)) { 314 (vma->vm_flags & VM_WRITE)) {
@@ -361,8 +328,7 @@ static int futex_handle_fault(unsigned long address,
361 current->min_flt++; 328 current->min_flt++;
362 } 329 }
363 } 330 }
364 if (!fshared) 331 up_read(&mm->mmap_sem);
365 up_read(&mm->mmap_sem);
366 return ret; 332 return ret;
367} 333}
368 334
@@ -385,6 +351,7 @@ static int refill_pi_state_cache(void)
385 /* pi_mutex gets initialized later */ 351 /* pi_mutex gets initialized later */
386 pi_state->owner = NULL; 352 pi_state->owner = NULL;
387 atomic_set(&pi_state->refcount, 1); 353 atomic_set(&pi_state->refcount, 1);
354 pi_state->key = FUTEX_KEY_INIT;
388 355
389 current->pi_state_cache = pi_state; 356 current->pi_state_cache = pi_state;
390 357
@@ -469,7 +436,7 @@ void exit_pi_state_list(struct task_struct *curr)
469 struct list_head *next, *head = &curr->pi_state_list; 436 struct list_head *next, *head = &curr->pi_state_list;
470 struct futex_pi_state *pi_state; 437 struct futex_pi_state *pi_state;
471 struct futex_hash_bucket *hb; 438 struct futex_hash_bucket *hb;
472 union futex_key key; 439 union futex_key key = FUTEX_KEY_INIT;
473 440
474 if (!futex_cmpxchg_enabled) 441 if (!futex_cmpxchg_enabled)
475 return; 442 return;
@@ -614,7 +581,7 @@ static void wake_futex(struct futex_q *q)
614 * The lock in wake_up_all() is a crucial memory barrier after the 581 * The lock in wake_up_all() is a crucial memory barrier after the
615 * plist_del() and also before assigning to q->lock_ptr. 582 * plist_del() and also before assigning to q->lock_ptr.
616 */ 583 */
617 wake_up_all(&q->waiters); 584 wake_up(&q->waiter);
618 /* 585 /*
619 * The waiting task can free the futex_q as soon as this is written, 586 * The waiting task can free the futex_q as soon as this is written,
620 * without taking any locks. This must come last. 587 * without taking any locks. This must come last.
@@ -726,20 +693,17 @@ double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
726 * Wake up all waiters hashed on the physical page that is mapped 693 * Wake up all waiters hashed on the physical page that is mapped
727 * to this virtual address: 694 * to this virtual address:
728 */ 695 */
729static int futex_wake(u32 __user *uaddr, struct rw_semaphore *fshared, 696static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset)
730 int nr_wake, u32 bitset)
731{ 697{
732 struct futex_hash_bucket *hb; 698 struct futex_hash_bucket *hb;
733 struct futex_q *this, *next; 699 struct futex_q *this, *next;
734 struct plist_head *head; 700 struct plist_head *head;
735 union futex_key key; 701 union futex_key key = FUTEX_KEY_INIT;
736 int ret; 702 int ret;
737 703
738 if (!bitset) 704 if (!bitset)
739 return -EINVAL; 705 return -EINVAL;
740 706
741 futex_lock_mm(fshared);
742
743 ret = get_futex_key(uaddr, fshared, &key); 707 ret = get_futex_key(uaddr, fshared, &key);
744 if (unlikely(ret != 0)) 708 if (unlikely(ret != 0))
745 goto out; 709 goto out;
@@ -767,7 +731,7 @@ static int futex_wake(u32 __user *uaddr, struct rw_semaphore *fshared,
767 731
768 spin_unlock(&hb->lock); 732 spin_unlock(&hb->lock);
769out: 733out:
770 futex_unlock_mm(fshared); 734 put_futex_key(fshared, &key);
771 return ret; 735 return ret;
772} 736}
773 737
@@ -776,19 +740,16 @@ out:
776 * to this virtual address: 740 * to this virtual address:
777 */ 741 */
778static int 742static int
779futex_wake_op(u32 __user *uaddr1, struct rw_semaphore *fshared, 743futex_wake_op(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
780 u32 __user *uaddr2,
781 int nr_wake, int nr_wake2, int op) 744 int nr_wake, int nr_wake2, int op)
782{ 745{
783 union futex_key key1, key2; 746 union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
784 struct futex_hash_bucket *hb1, *hb2; 747 struct futex_hash_bucket *hb1, *hb2;
785 struct plist_head *head; 748 struct plist_head *head;
786 struct futex_q *this, *next; 749 struct futex_q *this, *next;
787 int ret, op_ret, attempt = 0; 750 int ret, op_ret, attempt = 0;
788 751
789retryfull: 752retryfull:
790 futex_lock_mm(fshared);
791
792 ret = get_futex_key(uaddr1, fshared, &key1); 753 ret = get_futex_key(uaddr1, fshared, &key1);
793 if (unlikely(ret != 0)) 754 if (unlikely(ret != 0))
794 goto out; 755 goto out;
@@ -833,18 +794,12 @@ retry:
833 */ 794 */
834 if (attempt++) { 795 if (attempt++) {
835 ret = futex_handle_fault((unsigned long)uaddr2, 796 ret = futex_handle_fault((unsigned long)uaddr2,
836 fshared, attempt); 797 attempt);
837 if (ret) 798 if (ret)
838 goto out; 799 goto out;
839 goto retry; 800 goto retry;
840 } 801 }
841 802
842 /*
843 * If we would have faulted, release mmap_sem,
844 * fault it in and start all over again.
845 */
846 futex_unlock_mm(fshared);
847
848 ret = get_user(dummy, uaddr2); 803 ret = get_user(dummy, uaddr2);
849 if (ret) 804 if (ret)
850 return ret; 805 return ret;
@@ -880,7 +835,8 @@ retry:
880 if (hb1 != hb2) 835 if (hb1 != hb2)
881 spin_unlock(&hb2->lock); 836 spin_unlock(&hb2->lock);
882out: 837out:
883 futex_unlock_mm(fshared); 838 put_futex_key(fshared, &key2);
839 put_futex_key(fshared, &key1);
884 840
885 return ret; 841 return ret;
886} 842}
@@ -889,19 +845,16 @@ out:
889 * Requeue all waiters hashed on one physical page to another 845 * Requeue all waiters hashed on one physical page to another
890 * physical page. 846 * physical page.
891 */ 847 */
892static int futex_requeue(u32 __user *uaddr1, struct rw_semaphore *fshared, 848static int futex_requeue(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
893 u32 __user *uaddr2,
894 int nr_wake, int nr_requeue, u32 *cmpval) 849 int nr_wake, int nr_requeue, u32 *cmpval)
895{ 850{
896 union futex_key key1, key2; 851 union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
897 struct futex_hash_bucket *hb1, *hb2; 852 struct futex_hash_bucket *hb1, *hb2;
898 struct plist_head *head1; 853 struct plist_head *head1;
899 struct futex_q *this, *next; 854 struct futex_q *this, *next;
900 int ret, drop_count = 0; 855 int ret, drop_count = 0;
901 856
902 retry: 857 retry:
903 futex_lock_mm(fshared);
904
905 ret = get_futex_key(uaddr1, fshared, &key1); 858 ret = get_futex_key(uaddr1, fshared, &key1);
906 if (unlikely(ret != 0)) 859 if (unlikely(ret != 0))
907 goto out; 860 goto out;
@@ -924,12 +877,6 @@ static int futex_requeue(u32 __user *uaddr1, struct rw_semaphore *fshared,
924 if (hb1 != hb2) 877 if (hb1 != hb2)
925 spin_unlock(&hb2->lock); 878 spin_unlock(&hb2->lock);
926 879
927 /*
928 * If we would have faulted, release mmap_sem, fault
929 * it in and start all over again.
930 */
931 futex_unlock_mm(fshared);
932
933 ret = get_user(curval, uaddr1); 880 ret = get_user(curval, uaddr1);
934 881
935 if (!ret) 882 if (!ret)
@@ -981,7 +928,8 @@ out_unlock:
981 drop_futex_key_refs(&key1); 928 drop_futex_key_refs(&key1);
982 929
983out: 930out:
984 futex_unlock_mm(fshared); 931 put_futex_key(fshared, &key2);
932 put_futex_key(fshared, &key1);
985 return ret; 933 return ret;
986} 934}
987 935
@@ -990,7 +938,7 @@ static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
990{ 938{
991 struct futex_hash_bucket *hb; 939 struct futex_hash_bucket *hb;
992 940
993 init_waitqueue_head(&q->waiters); 941 init_waitqueue_head(&q->waiter);
994 942
995 get_futex_key_refs(&q->key); 943 get_futex_key_refs(&q->key);
996 hb = hash_futex(&q->key); 944 hb = hash_futex(&q->key);
@@ -1103,8 +1051,7 @@ static void unqueue_me_pi(struct futex_q *q)
1103 * private futexes. 1051 * private futexes.
1104 */ 1052 */
1105static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, 1053static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
1106 struct task_struct *newowner, 1054 struct task_struct *newowner, int fshared)
1107 struct rw_semaphore *fshared)
1108{ 1055{
1109 u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS; 1056 u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
1110 struct futex_pi_state *pi_state = q->pi_state; 1057 struct futex_pi_state *pi_state = q->pi_state;
@@ -1183,7 +1130,7 @@ retry:
1183handle_fault: 1130handle_fault:
1184 spin_unlock(q->lock_ptr); 1131 spin_unlock(q->lock_ptr);
1185 1132
1186 ret = futex_handle_fault((unsigned long)uaddr, fshared, attempt++); 1133 ret = futex_handle_fault((unsigned long)uaddr, attempt++);
1187 1134
1188 spin_lock(q->lock_ptr); 1135 spin_lock(q->lock_ptr);
1189 1136
@@ -1203,12 +1150,13 @@ handle_fault:
1203 * In case we must use restart_block to restart a futex_wait, 1150 * In case we must use restart_block to restart a futex_wait,
1204 * we encode in the 'flags' shared capability 1151 * we encode in the 'flags' shared capability
1205 */ 1152 */
1206#define FLAGS_SHARED 1 1153#define FLAGS_SHARED 0x01
1154#define FLAGS_CLOCKRT 0x02
1207 1155
1208static long futex_wait_restart(struct restart_block *restart); 1156static long futex_wait_restart(struct restart_block *restart);
1209 1157
1210static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared, 1158static int futex_wait(u32 __user *uaddr, int fshared,
1211 u32 val, ktime_t *abs_time, u32 bitset) 1159 u32 val, ktime_t *abs_time, u32 bitset, int clockrt)
1212{ 1160{
1213 struct task_struct *curr = current; 1161 struct task_struct *curr = current;
1214 DECLARE_WAITQUEUE(wait, curr); 1162 DECLARE_WAITQUEUE(wait, curr);
@@ -1225,8 +1173,7 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
1225 q.pi_state = NULL; 1173 q.pi_state = NULL;
1226 q.bitset = bitset; 1174 q.bitset = bitset;
1227 retry: 1175 retry:
1228 futex_lock_mm(fshared); 1176 q.key = FUTEX_KEY_INIT;
1229
1230 ret = get_futex_key(uaddr, fshared, &q.key); 1177 ret = get_futex_key(uaddr, fshared, &q.key);
1231 if (unlikely(ret != 0)) 1178 if (unlikely(ret != 0))
1232 goto out_release_sem; 1179 goto out_release_sem;
@@ -1258,12 +1205,6 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
1258 if (unlikely(ret)) { 1205 if (unlikely(ret)) {
1259 queue_unlock(&q, hb); 1206 queue_unlock(&q, hb);
1260 1207
1261 /*
1262 * If we would have faulted, release mmap_sem, fault it in and
1263 * start all over again.
1264 */
1265 futex_unlock_mm(fshared);
1266
1267 ret = get_user(uval, uaddr); 1208 ret = get_user(uval, uaddr);
1268 1209
1269 if (!ret) 1210 if (!ret)
@@ -1278,12 +1219,6 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
1278 queue_me(&q, hb); 1219 queue_me(&q, hb);
1279 1220
1280 /* 1221 /*
1281 * Now the futex is queued and we have checked the data, we
1282 * don't want to hold mmap_sem while we sleep.
1283 */
1284 futex_unlock_mm(fshared);
1285
1286 /*
1287 * There might have been scheduling since the queue_me(), as we 1222 * There might have been scheduling since the queue_me(), as we
1288 * cannot hold a spinlock across the get_user() in case it 1223 * cannot hold a spinlock across the get_user() in case it
1289 * faults, and we cannot just set TASK_INTERRUPTIBLE state when 1224 * faults, and we cannot just set TASK_INTERRUPTIBLE state when
@@ -1294,7 +1229,7 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
1294 1229
1295 /* add_wait_queue is the barrier after __set_current_state. */ 1230 /* add_wait_queue is the barrier after __set_current_state. */
1296 __set_current_state(TASK_INTERRUPTIBLE); 1231 __set_current_state(TASK_INTERRUPTIBLE);
1297 add_wait_queue(&q.waiters, &wait); 1232 add_wait_queue(&q.waiter, &wait);
1298 /* 1233 /*
1299 * !plist_node_empty() is safe here without any lock. 1234 * !plist_node_empty() is safe here without any lock.
1300 * q.lock_ptr != 0 is not safe, because of ordering against wakeup. 1235 * q.lock_ptr != 0 is not safe, because of ordering against wakeup.
@@ -1307,8 +1242,10 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
1307 slack = current->timer_slack_ns; 1242 slack = current->timer_slack_ns;
1308 if (rt_task(current)) 1243 if (rt_task(current))
1309 slack = 0; 1244 slack = 0;
1310 hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, 1245 hrtimer_init_on_stack(&t.timer,
1311 HRTIMER_MODE_ABS); 1246 clockrt ? CLOCK_REALTIME :
1247 CLOCK_MONOTONIC,
1248 HRTIMER_MODE_ABS);
1312 hrtimer_init_sleeper(&t, current); 1249 hrtimer_init_sleeper(&t, current);
1313 hrtimer_set_expires_range_ns(&t.timer, *abs_time, slack); 1250 hrtimer_set_expires_range_ns(&t.timer, *abs_time, slack);
1314 1251
@@ -1363,6 +1300,8 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
1363 1300
1364 if (fshared) 1301 if (fshared)
1365 restart->futex.flags |= FLAGS_SHARED; 1302 restart->futex.flags |= FLAGS_SHARED;
1303 if (clockrt)
1304 restart->futex.flags |= FLAGS_CLOCKRT;
1366 return -ERESTART_RESTARTBLOCK; 1305 return -ERESTART_RESTARTBLOCK;
1367 } 1306 }
1368 1307
@@ -1370,7 +1309,7 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
1370 queue_unlock(&q, hb); 1309 queue_unlock(&q, hb);
1371 1310
1372 out_release_sem: 1311 out_release_sem:
1373 futex_unlock_mm(fshared); 1312 put_futex_key(fshared, &q.key);
1374 return ret; 1313 return ret;
1375} 1314}
1376 1315
@@ -1378,15 +1317,16 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
1378static long futex_wait_restart(struct restart_block *restart) 1317static long futex_wait_restart(struct restart_block *restart)
1379{ 1318{
1380 u32 __user *uaddr = (u32 __user *)restart->futex.uaddr; 1319 u32 __user *uaddr = (u32 __user *)restart->futex.uaddr;
1381 struct rw_semaphore *fshared = NULL; 1320 int fshared = 0;
1382 ktime_t t; 1321 ktime_t t;
1383 1322
1384 t.tv64 = restart->futex.time; 1323 t.tv64 = restart->futex.time;
1385 restart->fn = do_no_restart_syscall; 1324 restart->fn = do_no_restart_syscall;
1386 if (restart->futex.flags & FLAGS_SHARED) 1325 if (restart->futex.flags & FLAGS_SHARED)
1387 fshared = &current->mm->mmap_sem; 1326 fshared = 1;
1388 return (long)futex_wait(uaddr, fshared, restart->futex.val, &t, 1327 return (long)futex_wait(uaddr, fshared, restart->futex.val, &t,
1389 restart->futex.bitset); 1328 restart->futex.bitset,
1329 restart->futex.flags & FLAGS_CLOCKRT);
1390} 1330}
1391 1331
1392 1332
@@ -1396,7 +1336,7 @@ static long futex_wait_restart(struct restart_block *restart)
1396 * if there are waiters then it will block, it does PI, etc. (Due to 1336 * if there are waiters then it will block, it does PI, etc. (Due to
1397 * races the kernel might see a 0 value of the futex too.) 1337 * races the kernel might see a 0 value of the futex too.)
1398 */ 1338 */
1399static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared, 1339static int futex_lock_pi(u32 __user *uaddr, int fshared,
1400 int detect, ktime_t *time, int trylock) 1340 int detect, ktime_t *time, int trylock)
1401{ 1341{
1402 struct hrtimer_sleeper timeout, *to = NULL; 1342 struct hrtimer_sleeper timeout, *to = NULL;
@@ -1419,8 +1359,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
1419 1359
1420 q.pi_state = NULL; 1360 q.pi_state = NULL;
1421 retry: 1361 retry:
1422 futex_lock_mm(fshared); 1362 q.key = FUTEX_KEY_INIT;
1423
1424 ret = get_futex_key(uaddr, fshared, &q.key); 1363 ret = get_futex_key(uaddr, fshared, &q.key);
1425 if (unlikely(ret != 0)) 1364 if (unlikely(ret != 0))
1426 goto out_release_sem; 1365 goto out_release_sem;
@@ -1509,7 +1448,6 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
1509 * exit to complete. 1448 * exit to complete.
1510 */ 1449 */
1511 queue_unlock(&q, hb); 1450 queue_unlock(&q, hb);
1512 futex_unlock_mm(fshared);
1513 cond_resched(); 1451 cond_resched();
1514 goto retry; 1452 goto retry;
1515 1453
@@ -1541,12 +1479,6 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
1541 */ 1479 */
1542 queue_me(&q, hb); 1480 queue_me(&q, hb);
1543 1481
1544 /*
1545 * Now the futex is queued and we have checked the data, we
1546 * don't want to hold mmap_sem while we sleep.
1547 */
1548 futex_unlock_mm(fshared);
1549
1550 WARN_ON(!q.pi_state); 1482 WARN_ON(!q.pi_state);
1551 /* 1483 /*
1552 * Block on the PI mutex: 1484 * Block on the PI mutex:
@@ -1559,7 +1491,6 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
1559 ret = ret ? 0 : -EWOULDBLOCK; 1491 ret = ret ? 0 : -EWOULDBLOCK;
1560 } 1492 }
1561 1493
1562 futex_lock_mm(fshared);
1563 spin_lock(q.lock_ptr); 1494 spin_lock(q.lock_ptr);
1564 1495
1565 if (!ret) { 1496 if (!ret) {
@@ -1625,7 +1556,6 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
1625 1556
1626 /* Unqueue and drop the lock */ 1557 /* Unqueue and drop the lock */
1627 unqueue_me_pi(&q); 1558 unqueue_me_pi(&q);
1628 futex_unlock_mm(fshared);
1629 1559
1630 if (to) 1560 if (to)
1631 destroy_hrtimer_on_stack(&to->timer); 1561 destroy_hrtimer_on_stack(&to->timer);
@@ -1635,34 +1565,30 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
1635 queue_unlock(&q, hb); 1565 queue_unlock(&q, hb);
1636 1566
1637 out_release_sem: 1567 out_release_sem:
1638 futex_unlock_mm(fshared); 1568 put_futex_key(fshared, &q.key);
1639 if (to) 1569 if (to)
1640 destroy_hrtimer_on_stack(&to->timer); 1570 destroy_hrtimer_on_stack(&to->timer);
1641 return ret; 1571 return ret;
1642 1572
1643 uaddr_faulted: 1573 uaddr_faulted:
1644 /* 1574 /*
1645 * We have to r/w *(int __user *)uaddr, but we can't modify it 1575 * We have to r/w *(int __user *)uaddr, and we have to modify it
1646 * non-atomically. Therefore, if get_user below is not 1576 * atomically. Therefore, if we continue to fault after get_user()
1647 * enough, we need to handle the fault ourselves, while 1577 * below, we need to handle the fault ourselves, while still holding
1648 * still holding the mmap_sem. 1578 * the mmap_sem. This can occur if the uaddr is under contention as
1649 * 1579 * we have to drop the mmap_sem in order to call get_user().
1650 * ... and hb->lock. :-) --ANK
1651 */ 1580 */
1652 queue_unlock(&q, hb); 1581 queue_unlock(&q, hb);
1653 1582
1654 if (attempt++) { 1583 if (attempt++) {
1655 ret = futex_handle_fault((unsigned long)uaddr, fshared, 1584 ret = futex_handle_fault((unsigned long)uaddr, attempt);
1656 attempt);
1657 if (ret) 1585 if (ret)
1658 goto out_release_sem; 1586 goto out_release_sem;
1659 goto retry_unlocked; 1587 goto retry_unlocked;
1660 } 1588 }
1661 1589
1662 futex_unlock_mm(fshared);
1663
1664 ret = get_user(uval, uaddr); 1590 ret = get_user(uval, uaddr);
1665 if (!ret && (uval != -EFAULT)) 1591 if (!ret)
1666 goto retry; 1592 goto retry;
1667 1593
1668 if (to) 1594 if (to)
@@ -1675,13 +1601,13 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
1675 * This is the in-kernel slowpath: we look up the PI state (if any), 1601 * This is the in-kernel slowpath: we look up the PI state (if any),
1676 * and do the rt-mutex unlock. 1602 * and do the rt-mutex unlock.
1677 */ 1603 */
1678static int futex_unlock_pi(u32 __user *uaddr, struct rw_semaphore *fshared) 1604static int futex_unlock_pi(u32 __user *uaddr, int fshared)
1679{ 1605{
1680 struct futex_hash_bucket *hb; 1606 struct futex_hash_bucket *hb;
1681 struct futex_q *this, *next; 1607 struct futex_q *this, *next;
1682 u32 uval; 1608 u32 uval;
1683 struct plist_head *head; 1609 struct plist_head *head;
1684 union futex_key key; 1610 union futex_key key = FUTEX_KEY_INIT;
1685 int ret, attempt = 0; 1611 int ret, attempt = 0;
1686 1612
1687retry: 1613retry:
@@ -1692,10 +1618,6 @@ retry:
1692 */ 1618 */
1693 if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current)) 1619 if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current))
1694 return -EPERM; 1620 return -EPERM;
1695 /*
1696 * First take all the futex related locks:
1697 */
1698 futex_lock_mm(fshared);
1699 1621
1700 ret = get_futex_key(uaddr, fshared, &key); 1622 ret = get_futex_key(uaddr, fshared, &key);
1701 if (unlikely(ret != 0)) 1623 if (unlikely(ret != 0))
@@ -1754,34 +1676,30 @@ retry_unlocked:
1754out_unlock: 1676out_unlock:
1755 spin_unlock(&hb->lock); 1677 spin_unlock(&hb->lock);
1756out: 1678out:
1757 futex_unlock_mm(fshared); 1679 put_futex_key(fshared, &key);
1758 1680
1759 return ret; 1681 return ret;
1760 1682
1761pi_faulted: 1683pi_faulted:
1762 /* 1684 /*
1763 * We have to r/w *(int __user *)uaddr, but we can't modify it 1685 * We have to r/w *(int __user *)uaddr, and we have to modify it
1764 * non-atomically. Therefore, if get_user below is not 1686 * atomically. Therefore, if we continue to fault after get_user()
1765 * enough, we need to handle the fault ourselves, while 1687 * below, we need to handle the fault ourselves, while still holding
1766 * still holding the mmap_sem. 1688 * the mmap_sem. This can occur if the uaddr is under contention as
1767 * 1689 * we have to drop the mmap_sem in order to call get_user().
1768 * ... and hb->lock. --ANK
1769 */ 1690 */
1770 spin_unlock(&hb->lock); 1691 spin_unlock(&hb->lock);
1771 1692
1772 if (attempt++) { 1693 if (attempt++) {
1773 ret = futex_handle_fault((unsigned long)uaddr, fshared, 1694 ret = futex_handle_fault((unsigned long)uaddr, attempt);
1774 attempt);
1775 if (ret) 1695 if (ret)
1776 goto out; 1696 goto out;
1777 uval = 0; 1697 uval = 0;
1778 goto retry_unlocked; 1698 goto retry_unlocked;
1779 } 1699 }
1780 1700
1781 futex_unlock_mm(fshared);
1782
1783 ret = get_user(uval, uaddr); 1701 ret = get_user(uval, uaddr);
1784 if (!ret && (uval != -EFAULT)) 1702 if (!ret)
1785 goto retry; 1703 goto retry;
1786 1704
1787 return ret; 1705 return ret;
@@ -1908,8 +1826,7 @@ retry:
1908 * PI futexes happens in exit_pi_state(): 1826 * PI futexes happens in exit_pi_state():
1909 */ 1827 */
1910 if (!pi && (uval & FUTEX_WAITERS)) 1828 if (!pi && (uval & FUTEX_WAITERS))
1911 futex_wake(uaddr, &curr->mm->mmap_sem, 1, 1829 futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
1912 FUTEX_BITSET_MATCH_ANY);
1913 } 1830 }
1914 return 0; 1831 return 0;
1915} 1832}
@@ -2003,18 +1920,22 @@ void exit_robust_list(struct task_struct *curr)
2003long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, 1920long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
2004 u32 __user *uaddr2, u32 val2, u32 val3) 1921 u32 __user *uaddr2, u32 val2, u32 val3)
2005{ 1922{
2006 int ret = -ENOSYS; 1923 int clockrt, ret = -ENOSYS;
2007 int cmd = op & FUTEX_CMD_MASK; 1924 int cmd = op & FUTEX_CMD_MASK;
2008 struct rw_semaphore *fshared = NULL; 1925 int fshared = 0;
2009 1926
2010 if (!(op & FUTEX_PRIVATE_FLAG)) 1927 if (!(op & FUTEX_PRIVATE_FLAG))
2011 fshared = &current->mm->mmap_sem; 1928 fshared = 1;
1929
1930 clockrt = op & FUTEX_CLOCK_REALTIME;
1931 if (clockrt && cmd != FUTEX_WAIT_BITSET)
1932 return -ENOSYS;
2012 1933
2013 switch (cmd) { 1934 switch (cmd) {
2014 case FUTEX_WAIT: 1935 case FUTEX_WAIT:
2015 val3 = FUTEX_BITSET_MATCH_ANY; 1936 val3 = FUTEX_BITSET_MATCH_ANY;
2016 case FUTEX_WAIT_BITSET: 1937 case FUTEX_WAIT_BITSET:
2017 ret = futex_wait(uaddr, fshared, val, timeout, val3); 1938 ret = futex_wait(uaddr, fshared, val, timeout, val3, clockrt);
2018 break; 1939 break;
2019 case FUTEX_WAKE: 1940 case FUTEX_WAKE:
2020 val3 = FUTEX_BITSET_MATCH_ANY; 1941 val3 = FUTEX_BITSET_MATCH_ANY;
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 47e63349d1b2..eb2bfefa6dcc 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -32,7 +32,6 @@
32 */ 32 */
33 33
34#include <linux/cpu.h> 34#include <linux/cpu.h>
35#include <linux/irq.h>
36#include <linux/module.h> 35#include <linux/module.h>
37#include <linux/percpu.h> 36#include <linux/percpu.h>
38#include <linux/hrtimer.h> 37#include <linux/hrtimer.h>
@@ -442,22 +441,6 @@ static inline void debug_hrtimer_activate(struct hrtimer *timer) { }
442static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { } 441static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { }
443#endif 442#endif
444 443
445/*
446 * Check, whether the timer is on the callback pending list
447 */
448static inline int hrtimer_cb_pending(const struct hrtimer *timer)
449{
450 return timer->state & HRTIMER_STATE_PENDING;
451}
452
453/*
454 * Remove a timer from the callback pending list
455 */
456static inline void hrtimer_remove_cb_pending(struct hrtimer *timer)
457{
458 list_del_init(&timer->cb_entry);
459}
460
461/* High resolution timer related functions */ 444/* High resolution timer related functions */
462#ifdef CONFIG_HIGH_RES_TIMERS 445#ifdef CONFIG_HIGH_RES_TIMERS
463 446
@@ -651,6 +634,8 @@ static inline void hrtimer_init_timer_hres(struct hrtimer *timer)
651{ 634{
652} 635}
653 636
637static void __run_hrtimer(struct hrtimer *timer);
638
654/* 639/*
655 * When High resolution timers are active, try to reprogram. Note, that in case 640 * When High resolution timers are active, try to reprogram. Note, that in case
656 * the state has HRTIMER_STATE_CALLBACK set, no reprogramming and no expiry 641 * the state has HRTIMER_STATE_CALLBACK set, no reprogramming and no expiry
@@ -661,31 +646,14 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
661 struct hrtimer_clock_base *base) 646 struct hrtimer_clock_base *base)
662{ 647{
663 if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) { 648 if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
664 649 /*
665 /* Timer is expired, act upon the callback mode */ 650 * XXX: recursion check?
666 switch(timer->cb_mode) { 651 * hrtimer_forward() should round up with timer granularity
667 case HRTIMER_CB_IRQSAFE_PERCPU: 652 * so that we never get into inf recursion here,
668 case HRTIMER_CB_IRQSAFE_UNLOCKED: 653 * it doesn't do that though
669 /* 654 */
670 * This is solely for the sched tick emulation with 655 __run_hrtimer(timer);
671 * dynamic tick support to ensure that we do not 656 return 1;
672 * restart the tick right on the edge and end up with
673 * the tick timer in the softirq ! The calling site
674 * takes care of this. Also used for hrtimer sleeper !
675 */
676 debug_hrtimer_deactivate(timer);
677 return 1;
678 case HRTIMER_CB_SOFTIRQ:
679 /*
680 * Move everything else into the softirq pending list !
681 */
682 list_add_tail(&timer->cb_entry,
683 &base->cpu_base->cb_pending);
684 timer->state = HRTIMER_STATE_PENDING;
685 return 1;
686 default:
687 BUG();
688 }
689 } 657 }
690 return 0; 658 return 0;
691} 659}
@@ -724,11 +692,6 @@ static int hrtimer_switch_to_hres(void)
724 return 1; 692 return 1;
725} 693}
726 694
727static inline void hrtimer_raise_softirq(void)
728{
729 raise_softirq(HRTIMER_SOFTIRQ);
730}
731
732#else 695#else
733 696
734static inline int hrtimer_hres_active(void) { return 0; } 697static inline int hrtimer_hres_active(void) { return 0; }
@@ -747,7 +710,6 @@ static inline int hrtimer_reprogram(struct hrtimer *timer,
747{ 710{
748 return 0; 711 return 0;
749} 712}
750static inline void hrtimer_raise_softirq(void) { }
751 713
752#endif /* CONFIG_HIGH_RES_TIMERS */ 714#endif /* CONFIG_HIGH_RES_TIMERS */
753 715
@@ -890,10 +852,7 @@ static void __remove_hrtimer(struct hrtimer *timer,
890 struct hrtimer_clock_base *base, 852 struct hrtimer_clock_base *base,
891 unsigned long newstate, int reprogram) 853 unsigned long newstate, int reprogram)
892{ 854{
893 /* High res. callback list. NOP for !HIGHRES */ 855 if (timer->state & HRTIMER_STATE_ENQUEUED) {
894 if (hrtimer_cb_pending(timer))
895 hrtimer_remove_cb_pending(timer);
896 else {
897 /* 856 /*
898 * Remove the timer from the rbtree and replace the 857 * Remove the timer from the rbtree and replace the
899 * first entry pointer if necessary. 858 * first entry pointer if necessary.
@@ -953,7 +912,7 @@ hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long delta_n
953{ 912{
954 struct hrtimer_clock_base *base, *new_base; 913 struct hrtimer_clock_base *base, *new_base;
955 unsigned long flags; 914 unsigned long flags;
956 int ret, raise; 915 int ret;
957 916
958 base = lock_hrtimer_base(timer, &flags); 917 base = lock_hrtimer_base(timer, &flags);
959 918
@@ -988,26 +947,8 @@ hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long delta_n
988 enqueue_hrtimer(timer, new_base, 947 enqueue_hrtimer(timer, new_base,
989 new_base->cpu_base == &__get_cpu_var(hrtimer_bases)); 948 new_base->cpu_base == &__get_cpu_var(hrtimer_bases));
990 949
991 /*
992 * The timer may be expired and moved to the cb_pending
993 * list. We can not raise the softirq with base lock held due
994 * to a possible deadlock with runqueue lock.
995 */
996 raise = timer->state == HRTIMER_STATE_PENDING;
997
998 /*
999 * We use preempt_disable to prevent this task from migrating after
1000 * setting up the softirq and raising it. Otherwise, if me migrate
1001 * we will raise the softirq on the wrong CPU.
1002 */
1003 preempt_disable();
1004
1005 unlock_hrtimer_base(timer, &flags); 950 unlock_hrtimer_base(timer, &flags);
1006 951
1007 if (raise)
1008 hrtimer_raise_softirq();
1009 preempt_enable();
1010
1011 return ret; 952 return ret;
1012} 953}
1013EXPORT_SYMBOL_GPL(hrtimer_start_range_ns); 954EXPORT_SYMBOL_GPL(hrtimer_start_range_ns);
@@ -1192,75 +1133,6 @@ int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
1192} 1133}
1193EXPORT_SYMBOL_GPL(hrtimer_get_res); 1134EXPORT_SYMBOL_GPL(hrtimer_get_res);
1194 1135
1195static void run_hrtimer_pending(struct hrtimer_cpu_base *cpu_base)
1196{
1197 spin_lock_irq(&cpu_base->lock);
1198
1199 while (!list_empty(&cpu_base->cb_pending)) {
1200 enum hrtimer_restart (*fn)(struct hrtimer *);
1201 struct hrtimer *timer;
1202 int restart;
1203 int emulate_hardirq_ctx = 0;
1204
1205 timer = list_entry(cpu_base->cb_pending.next,
1206 struct hrtimer, cb_entry);
1207
1208 debug_hrtimer_deactivate(timer);
1209 timer_stats_account_hrtimer(timer);
1210
1211 fn = timer->function;
1212 /*
1213 * A timer might have been added to the cb_pending list
1214 * when it was migrated during a cpu-offline operation.
1215 * Emulate hardirq context for such timers.
1216 */
1217 if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU ||
1218 timer->cb_mode == HRTIMER_CB_IRQSAFE_UNLOCKED)
1219 emulate_hardirq_ctx = 1;
1220
1221 __remove_hrtimer(timer, timer->base, HRTIMER_STATE_CALLBACK, 0);
1222 spin_unlock_irq(&cpu_base->lock);
1223
1224 if (unlikely(emulate_hardirq_ctx)) {
1225 local_irq_disable();
1226 restart = fn(timer);
1227 local_irq_enable();
1228 } else
1229 restart = fn(timer);
1230
1231 spin_lock_irq(&cpu_base->lock);
1232
1233 timer->state &= ~HRTIMER_STATE_CALLBACK;
1234 if (restart == HRTIMER_RESTART) {
1235 BUG_ON(hrtimer_active(timer));
1236 /*
1237 * Enqueue the timer, allow reprogramming of the event
1238 * device
1239 */
1240 enqueue_hrtimer(timer, timer->base, 1);
1241 } else if (hrtimer_active(timer)) {
1242 /*
1243 * If the timer was rearmed on another CPU, reprogram
1244 * the event device.
1245 */
1246 struct hrtimer_clock_base *base = timer->base;
1247
1248 if (base->first == &timer->node &&
1249 hrtimer_reprogram(timer, base)) {
1250 /*
1251 * Timer is expired. Thus move it from tree to
1252 * pending list again.
1253 */
1254 __remove_hrtimer(timer, base,
1255 HRTIMER_STATE_PENDING, 0);
1256 list_add_tail(&timer->cb_entry,
1257 &base->cpu_base->cb_pending);
1258 }
1259 }
1260 }
1261 spin_unlock_irq(&cpu_base->lock);
1262}
1263
1264static void __run_hrtimer(struct hrtimer *timer) 1136static void __run_hrtimer(struct hrtimer *timer)
1265{ 1137{
1266 struct hrtimer_clock_base *base = timer->base; 1138 struct hrtimer_clock_base *base = timer->base;
@@ -1268,25 +1140,21 @@ static void __run_hrtimer(struct hrtimer *timer)
1268 enum hrtimer_restart (*fn)(struct hrtimer *); 1140 enum hrtimer_restart (*fn)(struct hrtimer *);
1269 int restart; 1141 int restart;
1270 1142
1143 WARN_ON(!irqs_disabled());
1144
1271 debug_hrtimer_deactivate(timer); 1145 debug_hrtimer_deactivate(timer);
1272 __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0); 1146 __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0);
1273 timer_stats_account_hrtimer(timer); 1147 timer_stats_account_hrtimer(timer);
1274
1275 fn = timer->function; 1148 fn = timer->function;
1276 if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU || 1149
1277 timer->cb_mode == HRTIMER_CB_IRQSAFE_UNLOCKED) { 1150 /*
1278 /* 1151 * Because we run timers from hardirq context, there is no chance
1279 * Used for scheduler timers, avoid lock inversion with 1152 * they get migrated to another cpu, therefore its safe to unlock
1280 * rq->lock and tasklist_lock. 1153 * the timer base.
1281 * 1154 */
1282 * These timers are required to deal with enqueue expiry 1155 spin_unlock(&cpu_base->lock);
1283 * themselves and are not allowed to migrate. 1156 restart = fn(timer);
1284 */ 1157 spin_lock(&cpu_base->lock);
1285 spin_unlock(&cpu_base->lock);
1286 restart = fn(timer);
1287 spin_lock(&cpu_base->lock);
1288 } else
1289 restart = fn(timer);
1290 1158
1291 /* 1159 /*
1292 * Note: We clear the CALLBACK bit after enqueue_hrtimer to avoid 1160 * Note: We clear the CALLBACK bit after enqueue_hrtimer to avoid
@@ -1311,7 +1179,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
1311 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); 1179 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
1312 struct hrtimer_clock_base *base; 1180 struct hrtimer_clock_base *base;
1313 ktime_t expires_next, now; 1181 ktime_t expires_next, now;
1314 int i, raise = 0; 1182 int i;
1315 1183
1316 BUG_ON(!cpu_base->hres_active); 1184 BUG_ON(!cpu_base->hres_active);
1317 cpu_base->nr_events++; 1185 cpu_base->nr_events++;
@@ -1360,16 +1228,6 @@ void hrtimer_interrupt(struct clock_event_device *dev)
1360 break; 1228 break;
1361 } 1229 }
1362 1230
1363 /* Move softirq callbacks to the pending list */
1364 if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) {
1365 __remove_hrtimer(timer, base,
1366 HRTIMER_STATE_PENDING, 0);
1367 list_add_tail(&timer->cb_entry,
1368 &base->cpu_base->cb_pending);
1369 raise = 1;
1370 continue;
1371 }
1372
1373 __run_hrtimer(timer); 1231 __run_hrtimer(timer);
1374 } 1232 }
1375 spin_unlock(&cpu_base->lock); 1233 spin_unlock(&cpu_base->lock);
@@ -1383,10 +1241,6 @@ void hrtimer_interrupt(struct clock_event_device *dev)
1383 if (tick_program_event(expires_next, 0)) 1241 if (tick_program_event(expires_next, 0))
1384 goto retry; 1242 goto retry;
1385 } 1243 }
1386
1387 /* Raise softirq ? */
1388 if (raise)
1389 raise_softirq(HRTIMER_SOFTIRQ);
1390} 1244}
1391 1245
1392/** 1246/**
@@ -1413,11 +1267,6 @@ void hrtimer_peek_ahead_timers(void)
1413 local_irq_restore(flags); 1267 local_irq_restore(flags);
1414} 1268}
1415 1269
1416static void run_hrtimer_softirq(struct softirq_action *h)
1417{
1418 run_hrtimer_pending(&__get_cpu_var(hrtimer_bases));
1419}
1420
1421#endif /* CONFIG_HIGH_RES_TIMERS */ 1270#endif /* CONFIG_HIGH_RES_TIMERS */
1422 1271
1423/* 1272/*
@@ -1429,8 +1278,6 @@ static void run_hrtimer_softirq(struct softirq_action *h)
1429 */ 1278 */
1430void hrtimer_run_pending(void) 1279void hrtimer_run_pending(void)
1431{ 1280{
1432 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
1433
1434 if (hrtimer_hres_active()) 1281 if (hrtimer_hres_active())
1435 return; 1282 return;
1436 1283
@@ -1444,8 +1291,6 @@ void hrtimer_run_pending(void)
1444 */ 1291 */
1445 if (tick_check_oneshot_change(!hrtimer_is_hres_enabled())) 1292 if (tick_check_oneshot_change(!hrtimer_is_hres_enabled()))
1446 hrtimer_switch_to_hres(); 1293 hrtimer_switch_to_hres();
1447
1448 run_hrtimer_pending(cpu_base);
1449} 1294}
1450 1295
1451/* 1296/*
@@ -1482,14 +1327,6 @@ void hrtimer_run_queues(void)
1482 hrtimer_get_expires_tv64(timer)) 1327 hrtimer_get_expires_tv64(timer))
1483 break; 1328 break;
1484 1329
1485 if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) {
1486 __remove_hrtimer(timer, base,
1487 HRTIMER_STATE_PENDING, 0);
1488 list_add_tail(&timer->cb_entry,
1489 &base->cpu_base->cb_pending);
1490 continue;
1491 }
1492
1493 __run_hrtimer(timer); 1330 __run_hrtimer(timer);
1494 } 1331 }
1495 spin_unlock(&cpu_base->lock); 1332 spin_unlock(&cpu_base->lock);
@@ -1516,9 +1353,6 @@ void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
1516{ 1353{
1517 sl->timer.function = hrtimer_wakeup; 1354 sl->timer.function = hrtimer_wakeup;
1518 sl->task = task; 1355 sl->task = task;
1519#ifdef CONFIG_HIGH_RES_TIMERS
1520 sl->timer.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED;
1521#endif
1522} 1356}
1523 1357
1524static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode) 1358static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
@@ -1655,18 +1489,16 @@ static void __cpuinit init_hrtimers_cpu(int cpu)
1655 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) 1489 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
1656 cpu_base->clock_base[i].cpu_base = cpu_base; 1490 cpu_base->clock_base[i].cpu_base = cpu_base;
1657 1491
1658 INIT_LIST_HEAD(&cpu_base->cb_pending);
1659 hrtimer_init_hres(cpu_base); 1492 hrtimer_init_hres(cpu_base);
1660} 1493}
1661 1494
1662#ifdef CONFIG_HOTPLUG_CPU 1495#ifdef CONFIG_HOTPLUG_CPU
1663 1496
1664static int migrate_hrtimer_list(struct hrtimer_clock_base *old_base, 1497static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
1665 struct hrtimer_clock_base *new_base, int dcpu) 1498 struct hrtimer_clock_base *new_base)
1666{ 1499{
1667 struct hrtimer *timer; 1500 struct hrtimer *timer;
1668 struct rb_node *node; 1501 struct rb_node *node;
1669 int raise = 0;
1670 1502
1671 while ((node = rb_first(&old_base->active))) { 1503 while ((node = rb_first(&old_base->active))) {
1672 timer = rb_entry(node, struct hrtimer, node); 1504 timer = rb_entry(node, struct hrtimer, node);
@@ -1674,18 +1506,6 @@ static int migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
1674 debug_hrtimer_deactivate(timer); 1506 debug_hrtimer_deactivate(timer);
1675 1507
1676 /* 1508 /*
1677 * Should not happen. Per CPU timers should be
1678 * canceled _before_ the migration code is called
1679 */
1680 if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU) {
1681 __remove_hrtimer(timer, old_base,
1682 HRTIMER_STATE_INACTIVE, 0);
1683 WARN(1, "hrtimer (%p %p)active but cpu %d dead\n",
1684 timer, timer->function, dcpu);
1685 continue;
1686 }
1687
1688 /*
1689 * Mark it as STATE_MIGRATE not INACTIVE otherwise the 1509 * Mark it as STATE_MIGRATE not INACTIVE otherwise the
1690 * timer could be seen as !active and just vanish away 1510 * timer could be seen as !active and just vanish away
1691 * under us on another CPU 1511 * under us on another CPU
@@ -1693,69 +1513,34 @@ static int migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
1693 __remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0); 1513 __remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0);
1694 timer->base = new_base; 1514 timer->base = new_base;
1695 /* 1515 /*
1696 * Enqueue the timer. Allow reprogramming of the event device 1516 * Enqueue the timers on the new cpu, but do not reprogram
1517 * the timer as that would enable a deadlock between
1518 * hrtimer_enqueue_reprogramm() running the timer and us still
1519 * holding a nested base lock.
1520 *
1521 * Instead we tickle the hrtimer interrupt after the migration
1522 * is done, which will run all expired timers and re-programm
1523 * the timer device.
1697 */ 1524 */
1698 enqueue_hrtimer(timer, new_base, 1); 1525 enqueue_hrtimer(timer, new_base, 0);
1699 1526
1700#ifdef CONFIG_HIGH_RES_TIMERS
1701 /*
1702 * Happens with high res enabled when the timer was
1703 * already expired and the callback mode is
1704 * HRTIMER_CB_IRQSAFE_UNLOCKED (hrtimer_sleeper). The
1705 * enqueue code does not move them to the soft irq
1706 * pending list for performance/latency reasons, but
1707 * in the migration state, we need to do that
1708 * otherwise we end up with a stale timer.
1709 */
1710 if (timer->state == HRTIMER_STATE_MIGRATE) {
1711 timer->state = HRTIMER_STATE_PENDING;
1712 list_add_tail(&timer->cb_entry,
1713 &new_base->cpu_base->cb_pending);
1714 raise = 1;
1715 }
1716#endif
1717 /* Clear the migration state bit */ 1527 /* Clear the migration state bit */
1718 timer->state &= ~HRTIMER_STATE_MIGRATE; 1528 timer->state &= ~HRTIMER_STATE_MIGRATE;
1719 } 1529 }
1720 return raise;
1721}
1722
1723#ifdef CONFIG_HIGH_RES_TIMERS
1724static int migrate_hrtimer_pending(struct hrtimer_cpu_base *old_base,
1725 struct hrtimer_cpu_base *new_base)
1726{
1727 struct hrtimer *timer;
1728 int raise = 0;
1729
1730 while (!list_empty(&old_base->cb_pending)) {
1731 timer = list_entry(old_base->cb_pending.next,
1732 struct hrtimer, cb_entry);
1733
1734 __remove_hrtimer(timer, timer->base, HRTIMER_STATE_PENDING, 0);
1735 timer->base = &new_base->clock_base[timer->base->index];
1736 list_add_tail(&timer->cb_entry, &new_base->cb_pending);
1737 raise = 1;
1738 }
1739 return raise;
1740}
1741#else
1742static int migrate_hrtimer_pending(struct hrtimer_cpu_base *old_base,
1743 struct hrtimer_cpu_base *new_base)
1744{
1745 return 0;
1746} 1530}
1747#endif
1748 1531
1749static void migrate_hrtimers(int cpu) 1532static int migrate_hrtimers(int scpu)
1750{ 1533{
1751 struct hrtimer_cpu_base *old_base, *new_base; 1534 struct hrtimer_cpu_base *old_base, *new_base;
1752 int i, raise = 0; 1535 int dcpu, i;
1753 1536
1754 BUG_ON(cpu_online(cpu)); 1537 BUG_ON(cpu_online(scpu));
1755 old_base = &per_cpu(hrtimer_bases, cpu); 1538 old_base = &per_cpu(hrtimer_bases, scpu);
1756 new_base = &get_cpu_var(hrtimer_bases); 1539 new_base = &get_cpu_var(hrtimer_bases);
1757 1540
1758 tick_cancel_sched_timer(cpu); 1541 dcpu = smp_processor_id();
1542
1543 tick_cancel_sched_timer(scpu);
1759 /* 1544 /*
1760 * The caller is globally serialized and nobody else 1545 * The caller is globally serialized and nobody else
1761 * takes two locks at once, deadlock is not possible. 1546 * takes two locks at once, deadlock is not possible.
@@ -1764,41 +1549,47 @@ static void migrate_hrtimers(int cpu)
1764 spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); 1549 spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
1765 1550
1766 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { 1551 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
1767 if (migrate_hrtimer_list(&old_base->clock_base[i], 1552 migrate_hrtimer_list(&old_base->clock_base[i],
1768 &new_base->clock_base[i], cpu)) 1553 &new_base->clock_base[i]);
1769 raise = 1;
1770 } 1554 }
1771 1555
1772 if (migrate_hrtimer_pending(old_base, new_base))
1773 raise = 1;
1774
1775 spin_unlock(&old_base->lock); 1556 spin_unlock(&old_base->lock);
1776 spin_unlock_irq(&new_base->lock); 1557 spin_unlock_irq(&new_base->lock);
1777 put_cpu_var(hrtimer_bases); 1558 put_cpu_var(hrtimer_bases);
1778 1559
1779 if (raise) 1560 return dcpu;
1780 hrtimer_raise_softirq(); 1561}
1562
1563static void tickle_timers(void *arg)
1564{
1565 hrtimer_peek_ahead_timers();
1781} 1566}
1567
1782#endif /* CONFIG_HOTPLUG_CPU */ 1568#endif /* CONFIG_HOTPLUG_CPU */
1783 1569
1784static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self, 1570static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
1785 unsigned long action, void *hcpu) 1571 unsigned long action, void *hcpu)
1786{ 1572{
1787 unsigned int cpu = (long)hcpu; 1573 int scpu = (long)hcpu;
1788 1574
1789 switch (action) { 1575 switch (action) {
1790 1576
1791 case CPU_UP_PREPARE: 1577 case CPU_UP_PREPARE:
1792 case CPU_UP_PREPARE_FROZEN: 1578 case CPU_UP_PREPARE_FROZEN:
1793 init_hrtimers_cpu(cpu); 1579 init_hrtimers_cpu(scpu);
1794 break; 1580 break;
1795 1581
1796#ifdef CONFIG_HOTPLUG_CPU 1582#ifdef CONFIG_HOTPLUG_CPU
1797 case CPU_DEAD: 1583 case CPU_DEAD:
1798 case CPU_DEAD_FROZEN: 1584 case CPU_DEAD_FROZEN:
1799 clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &cpu); 1585 {
1800 migrate_hrtimers(cpu); 1586 int dcpu;
1587
1588 clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &scpu);
1589 dcpu = migrate_hrtimers(scpu);
1590 smp_call_function_single(dcpu, tickle_timers, NULL, 0);
1801 break; 1591 break;
1592 }
1802#endif 1593#endif
1803 1594
1804 default: 1595 default:
@@ -1817,9 +1608,6 @@ void __init hrtimers_init(void)
1817 hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE, 1608 hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
1818 (void *)(long)smp_processor_id()); 1609 (void *)(long)smp_processor_id());
1819 register_cpu_notifier(&hrtimers_nb); 1610 register_cpu_notifier(&hrtimers_nb);
1820#ifdef CONFIG_HIGH_RES_TIMERS
1821 open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq);
1822#endif
1823} 1611}
1824 1612
1825/** 1613/**
diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile
index 681c52dbfe22..4dd5b1edac98 100644
--- a/kernel/irq/Makefile
+++ b/kernel/irq/Makefile
@@ -3,3 +3,4 @@ obj-y := handle.o manage.o spurious.o resend.o chip.o devres.o
3obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o 3obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o
4obj-$(CONFIG_PROC_FS) += proc.o 4obj-$(CONFIG_PROC_FS) += proc.o
5obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o 5obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o
6obj-$(CONFIG_NUMA_MIGRATE_IRQ_DESC) += numa_migrate.o
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 10b5092e9bfe..f63c706d25e1 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -24,9 +24,10 @@
24 */ 24 */
25void dynamic_irq_init(unsigned int irq) 25void dynamic_irq_init(unsigned int irq)
26{ 26{
27 struct irq_desc *desc = irq_to_desc(irq); 27 struct irq_desc *desc;
28 unsigned long flags; 28 unsigned long flags;
29 29
30 desc = irq_to_desc(irq);
30 if (!desc) { 31 if (!desc) {
31 WARN(1, KERN_ERR "Trying to initialize invalid IRQ%d\n", irq); 32 WARN(1, KERN_ERR "Trying to initialize invalid IRQ%d\n", irq);
32 return; 33 return;
@@ -45,7 +46,7 @@ void dynamic_irq_init(unsigned int irq)
45 desc->irq_count = 0; 46 desc->irq_count = 0;
46 desc->irqs_unhandled = 0; 47 desc->irqs_unhandled = 0;
47#ifdef CONFIG_SMP 48#ifdef CONFIG_SMP
48 cpus_setall(desc->affinity); 49 cpumask_setall(&desc->affinity);
49#endif 50#endif
50 spin_unlock_irqrestore(&desc->lock, flags); 51 spin_unlock_irqrestore(&desc->lock, flags);
51} 52}
@@ -124,6 +125,7 @@ int set_irq_type(unsigned int irq, unsigned int type)
124 return -ENODEV; 125 return -ENODEV;
125 } 126 }
126 127
128 type &= IRQ_TYPE_SENSE_MASK;
127 if (type == IRQ_TYPE_NONE) 129 if (type == IRQ_TYPE_NONE)
128 return 0; 130 return 0;
129 131
@@ -352,6 +354,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
352 354
353 spin_lock(&desc->lock); 355 spin_lock(&desc->lock);
354 mask_ack_irq(desc, irq); 356 mask_ack_irq(desc, irq);
357 desc = irq_remap_to_desc(irq, desc);
355 358
356 if (unlikely(desc->status & IRQ_INPROGRESS)) 359 if (unlikely(desc->status & IRQ_INPROGRESS))
357 goto out_unlock; 360 goto out_unlock;
@@ -429,6 +432,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
429 desc->status &= ~IRQ_INPROGRESS; 432 desc->status &= ~IRQ_INPROGRESS;
430out: 433out:
431 desc->chip->eoi(irq); 434 desc->chip->eoi(irq);
435 desc = irq_remap_to_desc(irq, desc);
432 436
433 spin_unlock(&desc->lock); 437 spin_unlock(&desc->lock);
434} 438}
@@ -465,12 +469,14 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
465 !desc->action)) { 469 !desc->action)) {
466 desc->status |= (IRQ_PENDING | IRQ_MASKED); 470 desc->status |= (IRQ_PENDING | IRQ_MASKED);
467 mask_ack_irq(desc, irq); 471 mask_ack_irq(desc, irq);
472 desc = irq_remap_to_desc(irq, desc);
468 goto out_unlock; 473 goto out_unlock;
469 } 474 }
470 kstat_incr_irqs_this_cpu(irq, desc); 475 kstat_incr_irqs_this_cpu(irq, desc);
471 476
472 /* Start handling the irq */ 477 /* Start handling the irq */
473 desc->chip->ack(irq); 478 desc->chip->ack(irq);
479 desc = irq_remap_to_desc(irq, desc);
474 480
475 /* Mark the IRQ currently in progress.*/ 481 /* Mark the IRQ currently in progress.*/
476 desc->status |= IRQ_INPROGRESS; 482 desc->status |= IRQ_INPROGRESS;
@@ -531,8 +537,10 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
531 if (!noirqdebug) 537 if (!noirqdebug)
532 note_interrupt(irq, desc, action_ret); 538 note_interrupt(irq, desc, action_ret);
533 539
534 if (desc->chip->eoi) 540 if (desc->chip->eoi) {
535 desc->chip->eoi(irq); 541 desc->chip->eoi(irq);
542 desc = irq_remap_to_desc(irq, desc);
543 }
536} 544}
537 545
538void 546void
@@ -567,8 +575,10 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
567 575
568 /* Uninstall? */ 576 /* Uninstall? */
569 if (handle == handle_bad_irq) { 577 if (handle == handle_bad_irq) {
570 if (desc->chip != &no_irq_chip) 578 if (desc->chip != &no_irq_chip) {
571 mask_ack_irq(desc, irq); 579 mask_ack_irq(desc, irq);
580 desc = irq_remap_to_desc(irq, desc);
581 }
572 desc->status |= IRQ_DISABLED; 582 desc->status |= IRQ_DISABLED;
573 desc->depth = 1; 583 desc->depth = 1;
574 } 584 }
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index c815b42d0f5b..c20db0be9173 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -15,9 +15,16 @@
15#include <linux/random.h> 15#include <linux/random.h>
16#include <linux/interrupt.h> 16#include <linux/interrupt.h>
17#include <linux/kernel_stat.h> 17#include <linux/kernel_stat.h>
18#include <linux/rculist.h>
19#include <linux/hash.h>
18 20
19#include "internals.h" 21#include "internals.h"
20 22
23/*
24 * lockdep: we want to handle all irq_desc locks as a single lock-class:
25 */
26struct lock_class_key irq_desc_lock_class;
27
21/** 28/**
22 * handle_bad_irq - handle spurious and unhandled irqs 29 * handle_bad_irq - handle spurious and unhandled irqs
23 * @irq: the interrupt number 30 * @irq: the interrupt number
@@ -49,6 +56,150 @@ void handle_bad_irq(unsigned int irq, struct irq_desc *desc)
49int nr_irqs = NR_IRQS; 56int nr_irqs = NR_IRQS;
50EXPORT_SYMBOL_GPL(nr_irqs); 57EXPORT_SYMBOL_GPL(nr_irqs);
51 58
59#ifdef CONFIG_SPARSE_IRQ
60static struct irq_desc irq_desc_init = {
61 .irq = -1,
62 .status = IRQ_DISABLED,
63 .chip = &no_irq_chip,
64 .handle_irq = handle_bad_irq,
65 .depth = 1,
66 .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
67#ifdef CONFIG_SMP
68 .affinity = CPU_MASK_ALL
69#endif
70};
71
72void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr)
73{
74 unsigned long bytes;
75 char *ptr;
76 int node;
77
78 /* Compute how many bytes we need per irq and allocate them */
79 bytes = nr * sizeof(unsigned int);
80
81 node = cpu_to_node(cpu);
82 ptr = kzalloc_node(bytes, GFP_ATOMIC, node);
83 printk(KERN_DEBUG " alloc kstat_irqs on cpu %d node %d\n", cpu, node);
84
85 if (ptr)
86 desc->kstat_irqs = (unsigned int *)ptr;
87}
88
89static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)
90{
91 memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));
92
93 spin_lock_init(&desc->lock);
94 desc->irq = irq;
95#ifdef CONFIG_SMP
96 desc->cpu = cpu;
97#endif
98 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
99 init_kstat_irqs(desc, cpu, nr_cpu_ids);
100 if (!desc->kstat_irqs) {
101 printk(KERN_ERR "can not alloc kstat_irqs\n");
102 BUG_ON(1);
103 }
104 arch_init_chip_data(desc, cpu);
105}
106
107/*
108 * Protect the sparse_irqs:
109 */
110DEFINE_SPINLOCK(sparse_irq_lock);
111
112struct irq_desc *irq_desc_ptrs[NR_IRQS] __read_mostly;
113
114static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = {
115 [0 ... NR_IRQS_LEGACY-1] = {
116 .irq = -1,
117 .status = IRQ_DISABLED,
118 .chip = &no_irq_chip,
119 .handle_irq = handle_bad_irq,
120 .depth = 1,
121 .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
122#ifdef CONFIG_SMP
123 .affinity = CPU_MASK_ALL
124#endif
125 }
126};
127
128/* FIXME: use bootmem alloc ...*/
129static unsigned int kstat_irqs_legacy[NR_IRQS_LEGACY][NR_CPUS];
130
131int __init early_irq_init(void)
132{
133 struct irq_desc *desc;
134 int legacy_count;
135 int i;
136
137 desc = irq_desc_legacy;
138 legacy_count = ARRAY_SIZE(irq_desc_legacy);
139
140 for (i = 0; i < legacy_count; i++) {
141 desc[i].irq = i;
142 desc[i].kstat_irqs = kstat_irqs_legacy[i];
143 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
144
145 irq_desc_ptrs[i] = desc + i;
146 }
147
148 for (i = legacy_count; i < NR_IRQS; i++)
149 irq_desc_ptrs[i] = NULL;
150
151 return arch_early_irq_init();
152}
153
154struct irq_desc *irq_to_desc(unsigned int irq)
155{
156 return (irq < NR_IRQS) ? irq_desc_ptrs[irq] : NULL;
157}
158
159struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
160{
161 struct irq_desc *desc;
162 unsigned long flags;
163 int node;
164
165 if (irq >= NR_IRQS) {
166 printk(KERN_WARNING "irq >= NR_IRQS in irq_to_desc_alloc: %d %d\n",
167 irq, NR_IRQS);
168 WARN_ON(1);
169 return NULL;
170 }
171
172 desc = irq_desc_ptrs[irq];
173 if (desc)
174 return desc;
175
176 spin_lock_irqsave(&sparse_irq_lock, flags);
177
178 /* We have to check it to avoid races with another CPU */
179 desc = irq_desc_ptrs[irq];
180 if (desc)
181 goto out_unlock;
182
183 node = cpu_to_node(cpu);
184 desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
185 printk(KERN_DEBUG " alloc irq_desc for %d on cpu %d node %d\n",
186 irq, cpu, node);
187 if (!desc) {
188 printk(KERN_ERR "can not alloc irq_desc\n");
189 BUG_ON(1);
190 }
191 init_one_irq_desc(irq, desc, cpu);
192
193 irq_desc_ptrs[irq] = desc;
194
195out_unlock:
196 spin_unlock_irqrestore(&sparse_irq_lock, flags);
197
198 return desc;
199}
200
201#else /* !CONFIG_SPARSE_IRQ */
202
52struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { 203struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
53 [0 ... NR_IRQS-1] = { 204 [0 ... NR_IRQS-1] = {
54 .status = IRQ_DISABLED, 205 .status = IRQ_DISABLED,
@@ -62,6 +213,32 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
62 } 213 }
63}; 214};
64 215
216int __init early_irq_init(void)
217{
218 struct irq_desc *desc;
219 int count;
220 int i;
221
222 desc = irq_desc;
223 count = ARRAY_SIZE(irq_desc);
224
225 for (i = 0; i < count; i++)
226 desc[i].irq = i;
227
228 return arch_early_irq_init();
229}
230
231struct irq_desc *irq_to_desc(unsigned int irq)
232{
233 return (irq < NR_IRQS) ? irq_desc + irq : NULL;
234}
235
236struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
237{
238 return irq_to_desc(irq);
239}
240#endif /* !CONFIG_SPARSE_IRQ */
241
65/* 242/*
66 * What should we do if we get a hw irq event on an illegal vector? 243 * What should we do if we get a hw irq event on an illegal vector?
67 * Each architecture has to answer this themself. 244 * Each architecture has to answer this themself.
@@ -179,8 +356,11 @@ unsigned int __do_IRQ(unsigned int irq)
179 /* 356 /*
180 * No locking required for CPU-local interrupts: 357 * No locking required for CPU-local interrupts:
181 */ 358 */
182 if (desc->chip->ack) 359 if (desc->chip->ack) {
183 desc->chip->ack(irq); 360 desc->chip->ack(irq);
361 /* get new one */
362 desc = irq_remap_to_desc(irq, desc);
363 }
184 if (likely(!(desc->status & IRQ_DISABLED))) { 364 if (likely(!(desc->status & IRQ_DISABLED))) {
185 action_ret = handle_IRQ_event(irq, desc->action); 365 action_ret = handle_IRQ_event(irq, desc->action);
186 if (!noirqdebug) 366 if (!noirqdebug)
@@ -191,8 +371,10 @@ unsigned int __do_IRQ(unsigned int irq)
191 } 371 }
192 372
193 spin_lock(&desc->lock); 373 spin_lock(&desc->lock);
194 if (desc->chip->ack) 374 if (desc->chip->ack) {
195 desc->chip->ack(irq); 375 desc->chip->ack(irq);
376 desc = irq_remap_to_desc(irq, desc);
377 }
196 /* 378 /*
197 * REPLAY is when Linux resends an IRQ that was dropped earlier 379 * REPLAY is when Linux resends an IRQ that was dropped earlier
198 * WAITING is used by probe to mark irqs that are being tested 380 * WAITING is used by probe to mark irqs that are being tested
@@ -259,19 +441,22 @@ out:
259} 441}
260#endif 442#endif
261 443
262
263#ifdef CONFIG_TRACE_IRQFLAGS
264/*
265 * lockdep: we want to handle all irq_desc locks as a single lock-class:
266 */
267static struct lock_class_key irq_desc_lock_class;
268
269void early_init_irq_lock_class(void) 444void early_init_irq_lock_class(void)
270{ 445{
271 struct irq_desc *desc; 446 struct irq_desc *desc;
272 int i; 447 int i;
273 448
274 for_each_irq_desc(i, desc) 449 for_each_irq_desc(i, desc) {
275 lockdep_set_class(&desc->lock, &irq_desc_lock_class); 450 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
451 }
452}
453
454#ifdef CONFIG_SPARSE_IRQ
455unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
456{
457 struct irq_desc *desc = irq_to_desc(irq);
458 return desc ? desc->kstat_irqs[cpu] : 0;
276} 459}
277#endif 460#endif
461EXPORT_SYMBOL(kstat_irqs_cpu);
462
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 64c1c7253dae..e6d0a43cc125 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -13,6 +13,11 @@ extern void compat_irq_chip_set_default_handler(struct irq_desc *desc);
13extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, 13extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
14 unsigned long flags); 14 unsigned long flags);
15 15
16extern struct lock_class_key irq_desc_lock_class;
17extern void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr);
18extern spinlock_t sparse_irq_lock;
19extern struct irq_desc *irq_desc_ptrs[NR_IRQS];
20
16#ifdef CONFIG_PROC_FS 21#ifdef CONFIG_PROC_FS
17extern void register_irq_proc(unsigned int irq, struct irq_desc *desc); 22extern void register_irq_proc(unsigned int irq, struct irq_desc *desc);
18extern void register_handler_proc(unsigned int irq, struct irqaction *action); 23extern void register_handler_proc(unsigned int irq, struct irqaction *action);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 801addda3c43..cd0cd8dcb345 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -16,8 +16,15 @@
16#include "internals.h" 16#include "internals.h"
17 17
18#ifdef CONFIG_SMP 18#ifdef CONFIG_SMP
19cpumask_var_t irq_default_affinity;
19 20
20cpumask_t irq_default_affinity = CPU_MASK_ALL; 21static int init_irq_default_affinity(void)
22{
23 alloc_cpumask_var(&irq_default_affinity, GFP_KERNEL);
24 cpumask_setall(irq_default_affinity);
25 return 0;
26}
27core_initcall(init_irq_default_affinity);
21 28
22/** 29/**
23 * synchronize_irq - wait for pending IRQ handlers (on other CPUs) 30 * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
@@ -79,7 +86,7 @@ int irq_can_set_affinity(unsigned int irq)
79 * @cpumask: cpumask 86 * @cpumask: cpumask
80 * 87 *
81 */ 88 */
82int irq_set_affinity(unsigned int irq, cpumask_t cpumask) 89int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
83{ 90{
84 struct irq_desc *desc = irq_to_desc(irq); 91 struct irq_desc *desc = irq_to_desc(irq);
85 unsigned long flags; 92 unsigned long flags;
@@ -91,14 +98,14 @@ int irq_set_affinity(unsigned int irq, cpumask_t cpumask)
91 98
92#ifdef CONFIG_GENERIC_PENDING_IRQ 99#ifdef CONFIG_GENERIC_PENDING_IRQ
93 if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) { 100 if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) {
94 desc->affinity = cpumask; 101 cpumask_copy(&desc->affinity, cpumask);
95 desc->chip->set_affinity(irq, cpumask); 102 desc->chip->set_affinity(irq, cpumask);
96 } else { 103 } else {
97 desc->status |= IRQ_MOVE_PENDING; 104 desc->status |= IRQ_MOVE_PENDING;
98 desc->pending_mask = cpumask; 105 cpumask_copy(&desc->pending_mask, cpumask);
99 } 106 }
100#else 107#else
101 desc->affinity = cpumask; 108 cpumask_copy(&desc->affinity, cpumask);
102 desc->chip->set_affinity(irq, cpumask); 109 desc->chip->set_affinity(irq, cpumask);
103#endif 110#endif
104 desc->status |= IRQ_AFFINITY_SET; 111 desc->status |= IRQ_AFFINITY_SET;
@@ -112,26 +119,24 @@ int irq_set_affinity(unsigned int irq, cpumask_t cpumask)
112 */ 119 */
113int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc) 120int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc)
114{ 121{
115 cpumask_t mask;
116
117 if (!irq_can_set_affinity(irq)) 122 if (!irq_can_set_affinity(irq))
118 return 0; 123 return 0;
119 124
120 cpus_and(mask, cpu_online_map, irq_default_affinity);
121
122 /* 125 /*
123 * Preserve an userspace affinity setup, but make sure that 126 * Preserve an userspace affinity setup, but make sure that
124 * one of the targets is online. 127 * one of the targets is online.
125 */ 128 */
126 if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { 129 if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) {
127 if (cpus_intersects(desc->affinity, cpu_online_map)) 130 if (cpumask_any_and(&desc->affinity, cpu_online_mask)
128 mask = desc->affinity; 131 < nr_cpu_ids)
132 goto set_affinity;
129 else 133 else
130 desc->status &= ~IRQ_AFFINITY_SET; 134 desc->status &= ~IRQ_AFFINITY_SET;
131 } 135 }
132 136
133 desc->affinity = mask; 137 cpumask_and(&desc->affinity, cpu_online_mask, irq_default_affinity);
134 desc->chip->set_affinity(irq, mask); 138set_affinity:
139 desc->chip->set_affinity(irq, &desc->affinity);
135 140
136 return 0; 141 return 0;
137} 142}
@@ -370,16 +375,18 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
370 return 0; 375 return 0;
371 } 376 }
372 377
373 ret = chip->set_type(irq, flags & IRQF_TRIGGER_MASK); 378 /* caller masked out all except trigger mode flags */
379 ret = chip->set_type(irq, flags);
374 380
375 if (ret) 381 if (ret)
376 pr_err("setting trigger mode %d for irq %u failed (%pF)\n", 382 pr_err("setting trigger mode %d for irq %u failed (%pF)\n",
377 (int)(flags & IRQF_TRIGGER_MASK), 383 (int)flags, irq, chip->set_type);
378 irq, chip->set_type);
379 else { 384 else {
385 if (flags & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
386 flags |= IRQ_LEVEL;
380 /* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */ 387 /* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */
381 desc->status &= ~IRQ_TYPE_SENSE_MASK; 388 desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK);
382 desc->status |= flags & IRQ_TYPE_SENSE_MASK; 389 desc->status |= flags;
383 } 390 }
384 391
385 return ret; 392 return ret;
@@ -459,7 +466,8 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
459 466
460 /* Setup the type (level, edge polarity) if configured: */ 467 /* Setup the type (level, edge polarity) if configured: */
461 if (new->flags & IRQF_TRIGGER_MASK) { 468 if (new->flags & IRQF_TRIGGER_MASK) {
462 ret = __irq_set_trigger(desc, irq, new->flags); 469 ret = __irq_set_trigger(desc, irq,
470 new->flags & IRQF_TRIGGER_MASK);
463 471
464 if (ret) { 472 if (ret) {
465 spin_unlock_irqrestore(&desc->lock, flags); 473 spin_unlock_irqrestore(&desc->lock, flags);
@@ -673,6 +681,18 @@ int request_irq(unsigned int irq, irq_handler_t handler,
673 struct irq_desc *desc; 681 struct irq_desc *desc;
674 int retval; 682 int retval;
675 683
684 /*
685 * handle_IRQ_event() always ignores IRQF_DISABLED except for
686 * the _first_ irqaction (sigh). That can cause oopsing, but
687 * the behavior is classified as "will not fix" so we need to
688 * start nudging drivers away from using that idiom.
689 */
690 if ((irqflags & (IRQF_SHARED|IRQF_DISABLED))
691 == (IRQF_SHARED|IRQF_DISABLED))
692 pr_warning("IRQ %d/%s: IRQF_DISABLED is not "
693 "guaranteed on shared IRQs\n",
694 irq, devname);
695
676#ifdef CONFIG_LOCKDEP 696#ifdef CONFIG_LOCKDEP
677 /* 697 /*
678 * Lockdep wants atomic interrupt handlers: 698 * Lockdep wants atomic interrupt handlers:
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index 9db681d95814..bd72329e630c 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -4,7 +4,6 @@
4void move_masked_irq(int irq) 4void move_masked_irq(int irq)
5{ 5{
6 struct irq_desc *desc = irq_to_desc(irq); 6 struct irq_desc *desc = irq_to_desc(irq);
7 cpumask_t tmp;
8 7
9 if (likely(!(desc->status & IRQ_MOVE_PENDING))) 8 if (likely(!(desc->status & IRQ_MOVE_PENDING)))
10 return; 9 return;
@@ -19,7 +18,7 @@ void move_masked_irq(int irq)
19 18
20 desc->status &= ~IRQ_MOVE_PENDING; 19 desc->status &= ~IRQ_MOVE_PENDING;
21 20
22 if (unlikely(cpus_empty(desc->pending_mask))) 21 if (unlikely(cpumask_empty(&desc->pending_mask)))
23 return; 22 return;
24 23
25 if (!desc->chip->set_affinity) 24 if (!desc->chip->set_affinity)
@@ -27,8 +26,6 @@ void move_masked_irq(int irq)
27 26
28 assert_spin_locked(&desc->lock); 27 assert_spin_locked(&desc->lock);
29 28
30 cpus_and(tmp, desc->pending_mask, cpu_online_map);
31
32 /* 29 /*
33 * If there was a valid mask to work with, please 30 * If there was a valid mask to work with, please
34 * do the disable, re-program, enable sequence. 31 * do the disable, re-program, enable sequence.
@@ -41,10 +38,13 @@ void move_masked_irq(int irq)
41 * For correct operation this depends on the caller 38 * For correct operation this depends on the caller
42 * masking the irqs. 39 * masking the irqs.
43 */ 40 */
44 if (likely(!cpus_empty(tmp))) { 41 if (likely(cpumask_any_and(&desc->pending_mask, cpu_online_mask)
45 desc->chip->set_affinity(irq,tmp); 42 < nr_cpu_ids)) {
43 cpumask_and(&desc->affinity,
44 &desc->pending_mask, cpu_online_mask);
45 desc->chip->set_affinity(irq, &desc->affinity);
46 } 46 }
47 cpus_clear(desc->pending_mask); 47 cpumask_clear(&desc->pending_mask);
48} 48}
49 49
50void move_native_irq(int irq) 50void move_native_irq(int irq)
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c
new file mode 100644
index 000000000000..ecf765c6a77a
--- /dev/null
+++ b/kernel/irq/numa_migrate.c
@@ -0,0 +1,119 @@
1/*
2 * NUMA irq-desc migration code
3 *
4 * Migrate IRQ data structures (irq_desc, chip_data, etc.) over to
5 * the new "home node" of the IRQ.
6 */
7
8#include <linux/irq.h>
9#include <linux/module.h>
10#include <linux/random.h>
11#include <linux/interrupt.h>
12#include <linux/kernel_stat.h>
13
14#include "internals.h"
15
16static void init_copy_kstat_irqs(struct irq_desc *old_desc,
17 struct irq_desc *desc,
18 int cpu, int nr)
19{
20 unsigned long bytes;
21
22 init_kstat_irqs(desc, cpu, nr);
23
24 if (desc->kstat_irqs != old_desc->kstat_irqs) {
25 /* Compute how many bytes we need per irq and allocate them */
26 bytes = nr * sizeof(unsigned int);
27
28 memcpy(desc->kstat_irqs, old_desc->kstat_irqs, bytes);
29 }
30}
31
32static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc)
33{
34 if (old_desc->kstat_irqs == desc->kstat_irqs)
35 return;
36
37 kfree(old_desc->kstat_irqs);
38 old_desc->kstat_irqs = NULL;
39}
40
41static void init_copy_one_irq_desc(int irq, struct irq_desc *old_desc,
42 struct irq_desc *desc, int cpu)
43{
44 memcpy(desc, old_desc, sizeof(struct irq_desc));
45 spin_lock_init(&desc->lock);
46 desc->cpu = cpu;
47 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
48 init_copy_kstat_irqs(old_desc, desc, cpu, nr_cpu_ids);
49 arch_init_copy_chip_data(old_desc, desc, cpu);
50}
51
52static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc)
53{
54 free_kstat_irqs(old_desc, desc);
55 arch_free_chip_data(old_desc, desc);
56}
57
58static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
59 int cpu)
60{
61 struct irq_desc *desc;
62 unsigned int irq;
63 unsigned long flags;
64 int node;
65
66 irq = old_desc->irq;
67
68 spin_lock_irqsave(&sparse_irq_lock, flags);
69
70 /* We have to check it to avoid races with another CPU */
71 desc = irq_desc_ptrs[irq];
72
73 if (desc && old_desc != desc)
74 goto out_unlock;
75
76 node = cpu_to_node(cpu);
77 desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
78 if (!desc) {
79 printk(KERN_ERR "irq %d: can not get new irq_desc for migration.\n", irq);
80 /* still use old one */
81 desc = old_desc;
82 goto out_unlock;
83 }
84 init_copy_one_irq_desc(irq, old_desc, desc, cpu);
85
86 irq_desc_ptrs[irq] = desc;
87
88 /* free the old one */
89 free_one_irq_desc(old_desc, desc);
90 kfree(old_desc);
91
92out_unlock:
93 spin_unlock_irqrestore(&sparse_irq_lock, flags);
94
95 return desc;
96}
97
98struct irq_desc *move_irq_desc(struct irq_desc *desc, int cpu)
99{
100 int old_cpu;
101 int node, old_node;
102
103 /* those all static, do move them */
104 if (desc->irq < NR_IRQS_LEGACY)
105 return desc;
106
107 old_cpu = desc->cpu;
108 if (old_cpu != cpu) {
109 node = cpu_to_node(cpu);
110 old_node = cpu_to_node(old_cpu);
111 if (old_node != node)
112 desc = __real_move_irq_desc(desc, cpu);
113 else
114 desc->cpu = cpu;
115 }
116
117 return desc;
118}
119
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index d257e7d6a8a4..aae3f742bcec 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -20,7 +20,7 @@ static struct proc_dir_entry *root_irq_dir;
20static int irq_affinity_proc_show(struct seq_file *m, void *v) 20static int irq_affinity_proc_show(struct seq_file *m, void *v)
21{ 21{
22 struct irq_desc *desc = irq_to_desc((long)m->private); 22 struct irq_desc *desc = irq_to_desc((long)m->private);
23 cpumask_t *mask = &desc->affinity; 23 const struct cpumask *mask = &desc->affinity;
24 24
25#ifdef CONFIG_GENERIC_PENDING_IRQ 25#ifdef CONFIG_GENERIC_PENDING_IRQ
26 if (desc->status & IRQ_MOVE_PENDING) 26 if (desc->status & IRQ_MOVE_PENDING)
@@ -40,33 +40,42 @@ static ssize_t irq_affinity_proc_write(struct file *file,
40 const char __user *buffer, size_t count, loff_t *pos) 40 const char __user *buffer, size_t count, loff_t *pos)
41{ 41{
42 unsigned int irq = (int)(long)PDE(file->f_path.dentry->d_inode)->data; 42 unsigned int irq = (int)(long)PDE(file->f_path.dentry->d_inode)->data;
43 cpumask_t new_value; 43 cpumask_var_t new_value;
44 int err; 44 int err;
45 45
46 if (!irq_to_desc(irq)->chip->set_affinity || no_irq_affinity || 46 if (!irq_to_desc(irq)->chip->set_affinity || no_irq_affinity ||
47 irq_balancing_disabled(irq)) 47 irq_balancing_disabled(irq))
48 return -EIO; 48 return -EIO;
49 49
50 if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
51 return -ENOMEM;
52
50 err = cpumask_parse_user(buffer, count, new_value); 53 err = cpumask_parse_user(buffer, count, new_value);
51 if (err) 54 if (err)
52 return err; 55 goto free_cpumask;
53 56
54 if (!is_affinity_mask_valid(new_value)) 57 if (!is_affinity_mask_valid(new_value)) {
55 return -EINVAL; 58 err = -EINVAL;
59 goto free_cpumask;
60 }
56 61
57 /* 62 /*
58 * Do not allow disabling IRQs completely - it's a too easy 63 * Do not allow disabling IRQs completely - it's a too easy
59 * way to make the system unusable accidentally :-) At least 64 * way to make the system unusable accidentally :-) At least
60 * one online CPU still has to be targeted. 65 * one online CPU still has to be targeted.
61 */ 66 */
62 if (!cpus_intersects(new_value, cpu_online_map)) 67 if (!cpumask_intersects(new_value, cpu_online_mask)) {
63 /* Special case for empty set - allow the architecture 68 /* Special case for empty set - allow the architecture
64 code to set default SMP affinity. */ 69 code to set default SMP affinity. */
65 return irq_select_affinity_usr(irq) ? -EINVAL : count; 70 err = irq_select_affinity_usr(irq) ? -EINVAL : count;
66 71 } else {
67 irq_set_affinity(irq, new_value); 72 irq_set_affinity(irq, new_value);
73 err = count;
74 }
68 75
69 return count; 76free_cpumask:
77 free_cpumask_var(new_value);
78 return err;
70} 79}
71 80
72static int irq_affinity_proc_open(struct inode *inode, struct file *file) 81static int irq_affinity_proc_open(struct inode *inode, struct file *file)
@@ -84,7 +93,7 @@ static const struct file_operations irq_affinity_proc_fops = {
84 93
85static int default_affinity_show(struct seq_file *m, void *v) 94static int default_affinity_show(struct seq_file *m, void *v)
86{ 95{
87 seq_cpumask(m, &irq_default_affinity); 96 seq_cpumask(m, irq_default_affinity);
88 seq_putc(m, '\n'); 97 seq_putc(m, '\n');
89 return 0; 98 return 0;
90} 99}
@@ -92,27 +101,37 @@ static int default_affinity_show(struct seq_file *m, void *v)
92static ssize_t default_affinity_write(struct file *file, 101static ssize_t default_affinity_write(struct file *file,
93 const char __user *buffer, size_t count, loff_t *ppos) 102 const char __user *buffer, size_t count, loff_t *ppos)
94{ 103{
95 cpumask_t new_value; 104 cpumask_var_t new_value;
96 int err; 105 int err;
97 106
107 if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
108 return -ENOMEM;
109
98 err = cpumask_parse_user(buffer, count, new_value); 110 err = cpumask_parse_user(buffer, count, new_value);
99 if (err) 111 if (err)
100 return err; 112 goto out;
101 113
102 if (!is_affinity_mask_valid(new_value)) 114 if (!is_affinity_mask_valid(new_value)) {
103 return -EINVAL; 115 err = -EINVAL;
116 goto out;
117 }
104 118
105 /* 119 /*
106 * Do not allow disabling IRQs completely - it's a too easy 120 * Do not allow disabling IRQs completely - it's a too easy
107 * way to make the system unusable accidentally :-) At least 121 * way to make the system unusable accidentally :-) At least
108 * one online CPU still has to be targeted. 122 * one online CPU still has to be targeted.
109 */ 123 */
110 if (!cpus_intersects(new_value, cpu_online_map)) 124 if (!cpumask_intersects(new_value, cpu_online_mask)) {
111 return -EINVAL; 125 err = -EINVAL;
126 goto out;
127 }
112 128
113 irq_default_affinity = new_value; 129 cpumask_copy(irq_default_affinity, new_value);
130 err = count;
114 131
115 return count; 132out:
133 free_cpumask_var(new_value);
134 return err;
116} 135}
117 136
118static int default_affinity_open(struct inode *inode, struct file *file) 137static int default_affinity_open(struct inode *inode, struct file *file)
@@ -243,7 +262,11 @@ void init_irq_proc(void)
243 /* 262 /*
244 * Create entries for all existing IRQs. 263 * Create entries for all existing IRQs.
245 */ 264 */
246 for_each_irq_desc(irq, desc) 265 for_each_irq_desc(irq, desc) {
266 if (!desc)
267 continue;
268
247 register_irq_proc(irq, desc); 269 register_irq_proc(irq, desc);
270 }
248} 271}
249 272
diff --git a/kernel/kexec.c b/kernel/kexec.c
index ac0fde7b54d0..3fb855ad6aa0 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -1116,7 +1116,7 @@ void crash_save_cpu(struct pt_regs *regs, int cpu)
1116 struct elf_prstatus prstatus; 1116 struct elf_prstatus prstatus;
1117 u32 *buf; 1117 u32 *buf;
1118 1118
1119 if ((cpu < 0) || (cpu >= NR_CPUS)) 1119 if ((cpu < 0) || (cpu >= nr_cpu_ids))
1120 return; 1120 return;
1121 1121
1122 /* Using ELF notes here is opportunistic. 1122 /* Using ELF notes here is opportunistic.
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 74b1878b8bb8..06b0c3568f0b 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -137,16 +137,16 @@ static inline struct lock_class *hlock_class(struct held_lock *hlock)
137#ifdef CONFIG_LOCK_STAT 137#ifdef CONFIG_LOCK_STAT
138static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats); 138static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats);
139 139
140static int lock_contention_point(struct lock_class *class, unsigned long ip) 140static int lock_point(unsigned long points[], unsigned long ip)
141{ 141{
142 int i; 142 int i;
143 143
144 for (i = 0; i < ARRAY_SIZE(class->contention_point); i++) { 144 for (i = 0; i < LOCKSTAT_POINTS; i++) {
145 if (class->contention_point[i] == 0) { 145 if (points[i] == 0) {
146 class->contention_point[i] = ip; 146 points[i] = ip;
147 break; 147 break;
148 } 148 }
149 if (class->contention_point[i] == ip) 149 if (points[i] == ip)
150 break; 150 break;
151 } 151 }
152 152
@@ -186,6 +186,9 @@ struct lock_class_stats lock_stats(struct lock_class *class)
186 for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++) 186 for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
187 stats.contention_point[i] += pcs->contention_point[i]; 187 stats.contention_point[i] += pcs->contention_point[i];
188 188
189 for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++)
190 stats.contending_point[i] += pcs->contending_point[i];
191
189 lock_time_add(&pcs->read_waittime, &stats.read_waittime); 192 lock_time_add(&pcs->read_waittime, &stats.read_waittime);
190 lock_time_add(&pcs->write_waittime, &stats.write_waittime); 193 lock_time_add(&pcs->write_waittime, &stats.write_waittime);
191 194
@@ -210,6 +213,7 @@ void clear_lock_stats(struct lock_class *class)
210 memset(cpu_stats, 0, sizeof(struct lock_class_stats)); 213 memset(cpu_stats, 0, sizeof(struct lock_class_stats));
211 } 214 }
212 memset(class->contention_point, 0, sizeof(class->contention_point)); 215 memset(class->contention_point, 0, sizeof(class->contention_point));
216 memset(class->contending_point, 0, sizeof(class->contending_point));
213} 217}
214 218
215static struct lock_class_stats *get_lock_stats(struct lock_class *class) 219static struct lock_class_stats *get_lock_stats(struct lock_class *class)
@@ -288,14 +292,12 @@ void lockdep_off(void)
288{ 292{
289 current->lockdep_recursion++; 293 current->lockdep_recursion++;
290} 294}
291
292EXPORT_SYMBOL(lockdep_off); 295EXPORT_SYMBOL(lockdep_off);
293 296
294void lockdep_on(void) 297void lockdep_on(void)
295{ 298{
296 current->lockdep_recursion--; 299 current->lockdep_recursion--;
297} 300}
298
299EXPORT_SYMBOL(lockdep_on); 301EXPORT_SYMBOL(lockdep_on);
300 302
301/* 303/*
@@ -577,7 +579,8 @@ static void print_lock_class_header(struct lock_class *class, int depth)
577/* 579/*
578 * printk all lock dependencies starting at <entry>: 580 * printk all lock dependencies starting at <entry>:
579 */ 581 */
580static void print_lock_dependencies(struct lock_class *class, int depth) 582static void __used
583print_lock_dependencies(struct lock_class *class, int depth)
581{ 584{
582 struct lock_list *entry; 585 struct lock_list *entry;
583 586
@@ -2509,7 +2512,6 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
2509 if (subclass) 2512 if (subclass)
2510 register_lock_class(lock, subclass, 1); 2513 register_lock_class(lock, subclass, 1);
2511} 2514}
2512
2513EXPORT_SYMBOL_GPL(lockdep_init_map); 2515EXPORT_SYMBOL_GPL(lockdep_init_map);
2514 2516
2515/* 2517/*
@@ -2690,8 +2692,9 @@ static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,
2690} 2692}
2691 2693
2692static int 2694static int
2693__lock_set_subclass(struct lockdep_map *lock, 2695__lock_set_class(struct lockdep_map *lock, const char *name,
2694 unsigned int subclass, unsigned long ip) 2696 struct lock_class_key *key, unsigned int subclass,
2697 unsigned long ip)
2695{ 2698{
2696 struct task_struct *curr = current; 2699 struct task_struct *curr = current;
2697 struct held_lock *hlock, *prev_hlock; 2700 struct held_lock *hlock, *prev_hlock;
@@ -2718,6 +2721,7 @@ __lock_set_subclass(struct lockdep_map *lock,
2718 return print_unlock_inbalance_bug(curr, lock, ip); 2721 return print_unlock_inbalance_bug(curr, lock, ip);
2719 2722
2720found_it: 2723found_it:
2724 lockdep_init_map(lock, name, key, 0);
2721 class = register_lock_class(lock, subclass, 0); 2725 class = register_lock_class(lock, subclass, 0);
2722 hlock->class_idx = class - lock_classes + 1; 2726 hlock->class_idx = class - lock_classes + 1;
2723 2727
@@ -2902,9 +2906,9 @@ static void check_flags(unsigned long flags)
2902#endif 2906#endif
2903} 2907}
2904 2908
2905void 2909void lock_set_class(struct lockdep_map *lock, const char *name,
2906lock_set_subclass(struct lockdep_map *lock, 2910 struct lock_class_key *key, unsigned int subclass,
2907 unsigned int subclass, unsigned long ip) 2911 unsigned long ip)
2908{ 2912{
2909 unsigned long flags; 2913 unsigned long flags;
2910 2914
@@ -2914,13 +2918,12 @@ lock_set_subclass(struct lockdep_map *lock,
2914 raw_local_irq_save(flags); 2918 raw_local_irq_save(flags);
2915 current->lockdep_recursion = 1; 2919 current->lockdep_recursion = 1;
2916 check_flags(flags); 2920 check_flags(flags);
2917 if (__lock_set_subclass(lock, subclass, ip)) 2921 if (__lock_set_class(lock, name, key, subclass, ip))
2918 check_chain_key(current); 2922 check_chain_key(current);
2919 current->lockdep_recursion = 0; 2923 current->lockdep_recursion = 0;
2920 raw_local_irq_restore(flags); 2924 raw_local_irq_restore(flags);
2921} 2925}
2922 2926EXPORT_SYMBOL_GPL(lock_set_class);
2923EXPORT_SYMBOL_GPL(lock_set_subclass);
2924 2927
2925/* 2928/*
2926 * We are not always called with irqs disabled - do that here, 2929 * We are not always called with irqs disabled - do that here,
@@ -2944,7 +2947,6 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2944 current->lockdep_recursion = 0; 2947 current->lockdep_recursion = 0;
2945 raw_local_irq_restore(flags); 2948 raw_local_irq_restore(flags);
2946} 2949}
2947
2948EXPORT_SYMBOL_GPL(lock_acquire); 2950EXPORT_SYMBOL_GPL(lock_acquire);
2949 2951
2950void lock_release(struct lockdep_map *lock, int nested, 2952void lock_release(struct lockdep_map *lock, int nested,
@@ -2962,7 +2964,6 @@ void lock_release(struct lockdep_map *lock, int nested,
2962 current->lockdep_recursion = 0; 2964 current->lockdep_recursion = 0;
2963 raw_local_irq_restore(flags); 2965 raw_local_irq_restore(flags);
2964} 2966}
2965
2966EXPORT_SYMBOL_GPL(lock_release); 2967EXPORT_SYMBOL_GPL(lock_release);
2967 2968
2968#ifdef CONFIG_LOCK_STAT 2969#ifdef CONFIG_LOCK_STAT
@@ -3000,7 +3001,7 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip)
3000 struct held_lock *hlock, *prev_hlock; 3001 struct held_lock *hlock, *prev_hlock;
3001 struct lock_class_stats *stats; 3002 struct lock_class_stats *stats;
3002 unsigned int depth; 3003 unsigned int depth;
3003 int i, point; 3004 int i, contention_point, contending_point;
3004 3005
3005 depth = curr->lockdep_depth; 3006 depth = curr->lockdep_depth;
3006 if (DEBUG_LOCKS_WARN_ON(!depth)) 3007 if (DEBUG_LOCKS_WARN_ON(!depth))
@@ -3024,18 +3025,22 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip)
3024found_it: 3025found_it:
3025 hlock->waittime_stamp = sched_clock(); 3026 hlock->waittime_stamp = sched_clock();
3026 3027
3027 point = lock_contention_point(hlock_class(hlock), ip); 3028 contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
3029 contending_point = lock_point(hlock_class(hlock)->contending_point,
3030 lock->ip);
3028 3031
3029 stats = get_lock_stats(hlock_class(hlock)); 3032 stats = get_lock_stats(hlock_class(hlock));
3030 if (point < ARRAY_SIZE(stats->contention_point)) 3033 if (contention_point < LOCKSTAT_POINTS)
3031 stats->contention_point[point]++; 3034 stats->contention_point[contention_point]++;
3035 if (contending_point < LOCKSTAT_POINTS)
3036 stats->contending_point[contending_point]++;
3032 if (lock->cpu != smp_processor_id()) 3037 if (lock->cpu != smp_processor_id())
3033 stats->bounces[bounce_contended + !!hlock->read]++; 3038 stats->bounces[bounce_contended + !!hlock->read]++;
3034 put_lock_stats(stats); 3039 put_lock_stats(stats);
3035} 3040}
3036 3041
3037static void 3042static void
3038__lock_acquired(struct lockdep_map *lock) 3043__lock_acquired(struct lockdep_map *lock, unsigned long ip)
3039{ 3044{
3040 struct task_struct *curr = current; 3045 struct task_struct *curr = current;
3041 struct held_lock *hlock, *prev_hlock; 3046 struct held_lock *hlock, *prev_hlock;
@@ -3084,6 +3089,7 @@ found_it:
3084 put_lock_stats(stats); 3089 put_lock_stats(stats);
3085 3090
3086 lock->cpu = cpu; 3091 lock->cpu = cpu;
3092 lock->ip = ip;
3087} 3093}
3088 3094
3089void lock_contended(struct lockdep_map *lock, unsigned long ip) 3095void lock_contended(struct lockdep_map *lock, unsigned long ip)
@@ -3105,7 +3111,7 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip)
3105} 3111}
3106EXPORT_SYMBOL_GPL(lock_contended); 3112EXPORT_SYMBOL_GPL(lock_contended);
3107 3113
3108void lock_acquired(struct lockdep_map *lock) 3114void lock_acquired(struct lockdep_map *lock, unsigned long ip)
3109{ 3115{
3110 unsigned long flags; 3116 unsigned long flags;
3111 3117
@@ -3118,7 +3124,7 @@ void lock_acquired(struct lockdep_map *lock)
3118 raw_local_irq_save(flags); 3124 raw_local_irq_save(flags);
3119 check_flags(flags); 3125 check_flags(flags);
3120 current->lockdep_recursion = 1; 3126 current->lockdep_recursion = 1;
3121 __lock_acquired(lock); 3127 __lock_acquired(lock, ip);
3122 current->lockdep_recursion = 0; 3128 current->lockdep_recursion = 0;
3123 raw_local_irq_restore(flags); 3129 raw_local_irq_restore(flags);
3124} 3130}
@@ -3442,7 +3448,6 @@ retry:
3442 if (unlock) 3448 if (unlock)
3443 read_unlock(&tasklist_lock); 3449 read_unlock(&tasklist_lock);
3444} 3450}
3445
3446EXPORT_SYMBOL_GPL(debug_show_all_locks); 3451EXPORT_SYMBOL_GPL(debug_show_all_locks);
3447 3452
3448/* 3453/*
@@ -3463,7 +3468,6 @@ void debug_show_held_locks(struct task_struct *task)
3463{ 3468{
3464 __debug_show_held_locks(task); 3469 __debug_show_held_locks(task);
3465} 3470}
3466
3467EXPORT_SYMBOL_GPL(debug_show_held_locks); 3471EXPORT_SYMBOL_GPL(debug_show_held_locks);
3468 3472
3469void lockdep_sys_exit(void) 3473void lockdep_sys_exit(void)
diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
index 20dbcbf9c7dd..13716b813896 100644
--- a/kernel/lockdep_proc.c
+++ b/kernel/lockdep_proc.c
@@ -470,11 +470,12 @@ static void seq_line(struct seq_file *m, char c, int offset, int length)
470 470
471static void snprint_time(char *buf, size_t bufsiz, s64 nr) 471static void snprint_time(char *buf, size_t bufsiz, s64 nr)
472{ 472{
473 unsigned long rem; 473 s64 div;
474 s32 rem;
474 475
475 nr += 5; /* for display rounding */ 476 nr += 5; /* for display rounding */
476 rem = do_div(nr, 1000); /* XXX: do_div_signed */ 477 div = div_s64_rem(nr, 1000, &rem);
477 snprintf(buf, bufsiz, "%lld.%02d", (long long)nr, (int)rem/10); 478 snprintf(buf, bufsiz, "%lld.%02d", (long long)div, (int)rem/10);
478} 479}
479 480
480static void seq_time(struct seq_file *m, s64 time) 481static void seq_time(struct seq_file *m, s64 time)
@@ -556,7 +557,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
556 if (stats->read_holdtime.nr) 557 if (stats->read_holdtime.nr)
557 namelen += 2; 558 namelen += 2;
558 559
559 for (i = 0; i < ARRAY_SIZE(class->contention_point); i++) { 560 for (i = 0; i < LOCKSTAT_POINTS; i++) {
560 char sym[KSYM_SYMBOL_LEN]; 561 char sym[KSYM_SYMBOL_LEN];
561 char ip[32]; 562 char ip[32];
562 563
@@ -573,6 +574,23 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
573 stats->contention_point[i], 574 stats->contention_point[i],
574 ip, sym); 575 ip, sym);
575 } 576 }
577 for (i = 0; i < LOCKSTAT_POINTS; i++) {
578 char sym[KSYM_SYMBOL_LEN];
579 char ip[32];
580
581 if (class->contending_point[i] == 0)
582 break;
583
584 if (!i)
585 seq_line(m, '-', 40-namelen, namelen);
586
587 sprint_symbol(sym, class->contending_point[i]);
588 snprintf(ip, sizeof(ip), "[<%p>]",
589 (void *)class->contending_point[i]);
590 seq_printf(m, "%40s %14lu %29s %s\n", name,
591 stats->contending_point[i],
592 ip, sym);
593 }
576 if (i) { 594 if (i) {
577 seq_puts(m, "\n"); 595 seq_puts(m, "\n");
578 seq_line(m, '.', 0, 40 + 1 + 10 * (14 + 1)); 596 seq_line(m, '.', 0, 40 + 1 + 10 * (14 + 1));
@@ -582,7 +600,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
582 600
583static void seq_header(struct seq_file *m) 601static void seq_header(struct seq_file *m)
584{ 602{
585 seq_printf(m, "lock_stat version 0.2\n"); 603 seq_printf(m, "lock_stat version 0.3\n");
586 seq_line(m, '-', 0, 40 + 1 + 10 * (14 + 1)); 604 seq_line(m, '-', 0, 40 + 1 + 10 * (14 + 1));
587 seq_printf(m, "%40s %14s %14s %14s %14s %14s %14s %14s %14s " 605 seq_printf(m, "%40s %14s %14s %14s %14s %14s %14s %14s %14s "
588 "%14s %14s\n", 606 "%14s %14s\n",
diff --git a/kernel/module.c b/kernel/module.c
index dd2a54155b54..f47cce910f25 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -757,8 +757,16 @@ sys_delete_module(const char __user *name_user, unsigned int flags)
757 return -EFAULT; 757 return -EFAULT;
758 name[MODULE_NAME_LEN-1] = '\0'; 758 name[MODULE_NAME_LEN-1] = '\0';
759 759
760 if (mutex_lock_interruptible(&module_mutex) != 0) 760 /* Create stop_machine threads since free_module relies on
761 return -EINTR; 761 * a non-failing stop_machine call. */
762 ret = stop_machine_create();
763 if (ret)
764 return ret;
765
766 if (mutex_lock_interruptible(&module_mutex) != 0) {
767 ret = -EINTR;
768 goto out_stop;
769 }
762 770
763 mod = find_module(name); 771 mod = find_module(name);
764 if (!mod) { 772 if (!mod) {
@@ -817,10 +825,12 @@ sys_delete_module(const char __user *name_user, unsigned int flags)
817 825
818 out: 826 out:
819 mutex_unlock(&module_mutex); 827 mutex_unlock(&module_mutex);
828out_stop:
829 stop_machine_destroy();
820 return ret; 830 return ret;
821} 831}
822 832
823static void print_unload_info(struct seq_file *m, struct module *mod) 833static inline void print_unload_info(struct seq_file *m, struct module *mod)
824{ 834{
825 struct module_use *use; 835 struct module_use *use;
826 int printed_something = 0; 836 int printed_something = 0;
@@ -893,7 +903,7 @@ void module_put(struct module *module)
893EXPORT_SYMBOL(module_put); 903EXPORT_SYMBOL(module_put);
894 904
895#else /* !CONFIG_MODULE_UNLOAD */ 905#else /* !CONFIG_MODULE_UNLOAD */
896static void print_unload_info(struct seq_file *m, struct module *mod) 906static inline void print_unload_info(struct seq_file *m, struct module *mod)
897{ 907{
898 /* We don't know the usage count, or what modules are using. */ 908 /* We don't know the usage count, or what modules are using. */
899 seq_printf(m, " - -"); 909 seq_printf(m, " - -");
@@ -1578,11 +1588,21 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
1578 return ret; 1588 return ret;
1579} 1589}
1580 1590
1591/* Additional bytes needed by arch in front of individual sections */
1592unsigned int __weak arch_mod_section_prepend(struct module *mod,
1593 unsigned int section)
1594{
1595 /* default implementation just returns zero */
1596 return 0;
1597}
1598
1581/* Update size with this section: return offset. */ 1599/* Update size with this section: return offset. */
1582static long get_offset(unsigned int *size, Elf_Shdr *sechdr) 1600static long get_offset(struct module *mod, unsigned int *size,
1601 Elf_Shdr *sechdr, unsigned int section)
1583{ 1602{
1584 long ret; 1603 long ret;
1585 1604
1605 *size += arch_mod_section_prepend(mod, section);
1586 ret = ALIGN(*size, sechdr->sh_addralign ?: 1); 1606 ret = ALIGN(*size, sechdr->sh_addralign ?: 1);
1587 *size = ret + sechdr->sh_size; 1607 *size = ret + sechdr->sh_size;
1588 return ret; 1608 return ret;
@@ -1622,7 +1642,7 @@ static void layout_sections(struct module *mod,
1622 || strncmp(secstrings + s->sh_name, 1642 || strncmp(secstrings + s->sh_name,
1623 ".init", 5) == 0) 1643 ".init", 5) == 0)
1624 continue; 1644 continue;
1625 s->sh_entsize = get_offset(&mod->core_size, s); 1645 s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
1626 DEBUGP("\t%s\n", secstrings + s->sh_name); 1646 DEBUGP("\t%s\n", secstrings + s->sh_name);
1627 } 1647 }
1628 if (m == 0) 1648 if (m == 0)
@@ -1640,7 +1660,7 @@ static void layout_sections(struct module *mod,
1640 || strncmp(secstrings + s->sh_name, 1660 || strncmp(secstrings + s->sh_name,
1641 ".init", 5) != 0) 1661 ".init", 5) != 0)
1642 continue; 1662 continue;
1643 s->sh_entsize = (get_offset(&mod->init_size, s) 1663 s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
1644 | INIT_OFFSET_MASK); 1664 | INIT_OFFSET_MASK);
1645 DEBUGP("\t%s\n", secstrings + s->sh_name); 1665 DEBUGP("\t%s\n", secstrings + s->sh_name);
1646 } 1666 }
@@ -1725,15 +1745,15 @@ static const struct kernel_symbol *lookup_symbol(const char *name,
1725 return NULL; 1745 return NULL;
1726} 1746}
1727 1747
1728static int is_exported(const char *name, const struct module *mod) 1748static int is_exported(const char *name, unsigned long value,
1749 const struct module *mod)
1729{ 1750{
1730 if (!mod && lookup_symbol(name, __start___ksymtab, __stop___ksymtab)) 1751 const struct kernel_symbol *ks;
1731 return 1; 1752 if (!mod)
1753 ks = lookup_symbol(name, __start___ksymtab, __stop___ksymtab);
1732 else 1754 else
1733 if (mod && lookup_symbol(name, mod->syms, mod->syms + mod->num_syms)) 1755 ks = lookup_symbol(name, mod->syms, mod->syms + mod->num_syms);
1734 return 1; 1756 return ks != NULL && ks->value == value;
1735 else
1736 return 0;
1737} 1757}
1738 1758
1739/* As per nm */ 1759/* As per nm */
@@ -1865,6 +1885,13 @@ static noinline struct module *load_module(void __user *umod,
1865 /* vmalloc barfs on "unusual" numbers. Check here */ 1885 /* vmalloc barfs on "unusual" numbers. Check here */
1866 if (len > 64 * 1024 * 1024 || (hdr = vmalloc(len)) == NULL) 1886 if (len > 64 * 1024 * 1024 || (hdr = vmalloc(len)) == NULL)
1867 return ERR_PTR(-ENOMEM); 1887 return ERR_PTR(-ENOMEM);
1888
1889 /* Create stop_machine threads since the error path relies on
1890 * a non-failing stop_machine call. */
1891 err = stop_machine_create();
1892 if (err)
1893 goto free_hdr;
1894
1868 if (copy_from_user(hdr, umod, len) != 0) { 1895 if (copy_from_user(hdr, umod, len) != 0) {
1869 err = -EFAULT; 1896 err = -EFAULT;
1870 goto free_hdr; 1897 goto free_hdr;
@@ -2248,6 +2275,7 @@ static noinline struct module *load_module(void __user *umod,
2248 /* Get rid of temporary copy */ 2275 /* Get rid of temporary copy */
2249 vfree(hdr); 2276 vfree(hdr);
2250 2277
2278 stop_machine_destroy();
2251 /* Done! */ 2279 /* Done! */
2252 return mod; 2280 return mod;
2253 2281
@@ -2270,6 +2298,7 @@ static noinline struct module *load_module(void __user *umod,
2270 kfree(args); 2298 kfree(args);
2271 free_hdr: 2299 free_hdr:
2272 vfree(hdr); 2300 vfree(hdr);
2301 stop_machine_destroy();
2273 return ERR_PTR(err); 2302 return ERR_PTR(err);
2274 2303
2275 truncated: 2304 truncated:
@@ -2504,7 +2533,7 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
2504 strlcpy(name, mod->strtab + mod->symtab[symnum].st_name, 2533 strlcpy(name, mod->strtab + mod->symtab[symnum].st_name,
2505 KSYM_NAME_LEN); 2534 KSYM_NAME_LEN);
2506 strlcpy(module_name, mod->name, MODULE_NAME_LEN); 2535 strlcpy(module_name, mod->name, MODULE_NAME_LEN);
2507 *exported = is_exported(name, mod); 2536 *exported = is_exported(name, *value, mod);
2508 preempt_enable(); 2537 preempt_enable();
2509 return 0; 2538 return 0;
2510 } 2539 }
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 12c779dc65d4..4f45d4b658ef 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -59,7 +59,7 @@ EXPORT_SYMBOL(__mutex_init);
59 * We also put the fastpath first in the kernel image, to make sure the 59 * We also put the fastpath first in the kernel image, to make sure the
60 * branch is predicted by the CPU as default-untaken. 60 * branch is predicted by the CPU as default-untaken.
61 */ 61 */
62static void noinline __sched 62static __used noinline void __sched
63__mutex_lock_slowpath(atomic_t *lock_count); 63__mutex_lock_slowpath(atomic_t *lock_count);
64 64
65/*** 65/***
@@ -96,7 +96,7 @@ void inline __sched mutex_lock(struct mutex *lock)
96EXPORT_SYMBOL(mutex_lock); 96EXPORT_SYMBOL(mutex_lock);
97#endif 97#endif
98 98
99static noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count); 99static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
100 100
101/*** 101/***
102 * mutex_unlock - release the mutex 102 * mutex_unlock - release the mutex
@@ -184,7 +184,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
184 } 184 }
185 185
186done: 186done:
187 lock_acquired(&lock->dep_map); 187 lock_acquired(&lock->dep_map, ip);
188 /* got the lock - rejoice! */ 188 /* got the lock - rejoice! */
189 mutex_remove_waiter(lock, &waiter, task_thread_info(task)); 189 mutex_remove_waiter(lock, &waiter, task_thread_info(task));
190 debug_mutex_set_owner(lock, task_thread_info(task)); 190 debug_mutex_set_owner(lock, task_thread_info(task));
@@ -268,7 +268,7 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
268/* 268/*
269 * Release the lock, slowpath: 269 * Release the lock, slowpath:
270 */ 270 */
271static noinline void 271static __used noinline void
272__mutex_unlock_slowpath(atomic_t *lock_count) 272__mutex_unlock_slowpath(atomic_t *lock_count)
273{ 273{
274 __mutex_unlock_common_slowpath(lock_count, 1); 274 __mutex_unlock_common_slowpath(lock_count, 1);
@@ -313,7 +313,7 @@ int __sched mutex_lock_killable(struct mutex *lock)
313} 313}
314EXPORT_SYMBOL(mutex_lock_killable); 314EXPORT_SYMBOL(mutex_lock_killable);
315 315
316static noinline void __sched 316static __used noinline void __sched
317__mutex_lock_slowpath(atomic_t *lock_count) 317__mutex_lock_slowpath(atomic_t *lock_count)
318{ 318{
319 struct mutex *lock = container_of(lock_count, struct mutex, count); 319 struct mutex *lock = container_of(lock_count, struct mutex, count);
diff --git a/kernel/notifier.c b/kernel/notifier.c
index 4282c0a40a57..61d5aa5eced3 100644
--- a/kernel/notifier.c
+++ b/kernel/notifier.c
@@ -82,6 +82,14 @@ static int __kprobes notifier_call_chain(struct notifier_block **nl,
82 82
83 while (nb && nr_to_call) { 83 while (nb && nr_to_call) {
84 next_nb = rcu_dereference(nb->next); 84 next_nb = rcu_dereference(nb->next);
85
86#ifdef CONFIG_DEBUG_NOTIFIERS
87 if (unlikely(!func_ptr_is_kernel_text(nb->notifier_call))) {
88 WARN(1, "Invalid notifier called!");
89 nb = next_nb;
90 continue;
91 }
92#endif
85 ret = nb->notifier_call(nb, val, v); 93 ret = nb->notifier_call(nb, val, v);
86 94
87 if (nr_calls) 95 if (nr_calls)
diff --git a/kernel/panic.c b/kernel/panic.c
index 4d5088355bfe..13f06349a786 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -21,6 +21,7 @@
21#include <linux/debug_locks.h> 21#include <linux/debug_locks.h>
22#include <linux/random.h> 22#include <linux/random.h>
23#include <linux/kallsyms.h> 23#include <linux/kallsyms.h>
24#include <linux/dmi.h>
24 25
25int panic_on_oops; 26int panic_on_oops;
26static unsigned long tainted_mask; 27static unsigned long tainted_mask;
@@ -321,36 +322,27 @@ void oops_exit(void)
321} 322}
322 323
323#ifdef WANT_WARN_ON_SLOWPATH 324#ifdef WANT_WARN_ON_SLOWPATH
324void warn_on_slowpath(const char *file, int line)
325{
326 char function[KSYM_SYMBOL_LEN];
327 unsigned long caller = (unsigned long) __builtin_return_address(0);
328 sprint_symbol(function, caller);
329
330 printk(KERN_WARNING "------------[ cut here ]------------\n");
331 printk(KERN_WARNING "WARNING: at %s:%d %s()\n", file,
332 line, function);
333 print_modules();
334 dump_stack();
335 print_oops_end_marker();
336 add_taint(TAINT_WARN);
337}
338EXPORT_SYMBOL(warn_on_slowpath);
339
340
341void warn_slowpath(const char *file, int line, const char *fmt, ...) 325void warn_slowpath(const char *file, int line, const char *fmt, ...)
342{ 326{
343 va_list args; 327 va_list args;
344 char function[KSYM_SYMBOL_LEN]; 328 char function[KSYM_SYMBOL_LEN];
345 unsigned long caller = (unsigned long)__builtin_return_address(0); 329 unsigned long caller = (unsigned long)__builtin_return_address(0);
330 const char *board;
331
346 sprint_symbol(function, caller); 332 sprint_symbol(function, caller);
347 333
348 printk(KERN_WARNING "------------[ cut here ]------------\n"); 334 printk(KERN_WARNING "------------[ cut here ]------------\n");
349 printk(KERN_WARNING "WARNING: at %s:%d %s()\n", file, 335 printk(KERN_WARNING "WARNING: at %s:%d %s()\n", file,
350 line, function); 336 line, function);
351 va_start(args, fmt); 337 board = dmi_get_system_info(DMI_PRODUCT_NAME);
352 vprintk(fmt, args); 338 if (board)
353 va_end(args); 339 printk(KERN_WARNING "Hardware name: %s\n", board);
340
341 if (fmt) {
342 va_start(args, fmt);
343 vprintk(fmt, args);
344 va_end(args);
345 }
354 346
355 print_modules(); 347 print_modules();
356 dump_stack(); 348 dump_stack();
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 4e5288a831de..157de3a47832 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -58,21 +58,21 @@ void thread_group_cputime(
58 struct task_struct *tsk, 58 struct task_struct *tsk,
59 struct task_cputime *times) 59 struct task_cputime *times)
60{ 60{
61 struct signal_struct *sig; 61 struct task_cputime *totals, *tot;
62 int i; 62 int i;
63 struct task_cputime *tot;
64 63
65 sig = tsk->signal; 64 totals = tsk->signal->cputime.totals;
66 if (unlikely(!sig) || !sig->cputime.totals) { 65 if (!totals) {
67 times->utime = tsk->utime; 66 times->utime = tsk->utime;
68 times->stime = tsk->stime; 67 times->stime = tsk->stime;
69 times->sum_exec_runtime = tsk->se.sum_exec_runtime; 68 times->sum_exec_runtime = tsk->se.sum_exec_runtime;
70 return; 69 return;
71 } 70 }
71
72 times->stime = times->utime = cputime_zero; 72 times->stime = times->utime = cputime_zero;
73 times->sum_exec_runtime = 0; 73 times->sum_exec_runtime = 0;
74 for_each_possible_cpu(i) { 74 for_each_possible_cpu(i) {
75 tot = per_cpu_ptr(tsk->signal->cputime.totals, i); 75 tot = per_cpu_ptr(totals, i);
76 times->utime = cputime_add(times->utime, tot->utime); 76 times->utime = cputime_add(times->utime, tot->utime);
77 times->stime = cputime_add(times->stime, tot->stime); 77 times->stime = cputime_add(times->stime, tot->stime);
78 times->sum_exec_runtime += tot->sum_exec_runtime; 78 times->sum_exec_runtime += tot->sum_exec_runtime;
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index a140e44eebba..887c63787de6 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -116,7 +116,7 @@ static DEFINE_SPINLOCK(idr_lock);
116 * must supply functions here, even if the function just returns 116 * must supply functions here, even if the function just returns
117 * ENOSYS. The standard POSIX timer management code assumes the 117 * ENOSYS. The standard POSIX timer management code assumes the
118 * following: 1.) The k_itimer struct (sched.h) is used for the 118 * following: 1.) The k_itimer struct (sched.h) is used for the
119 * timer. 2.) The list, it_lock, it_clock, it_id and it_process 119 * timer. 2.) The list, it_lock, it_clock, it_id and it_pid
120 * fields are not modified by timer code. 120 * fields are not modified by timer code.
121 * 121 *
122 * At this time all functions EXCEPT clock_nanosleep can be 122 * At this time all functions EXCEPT clock_nanosleep can be
@@ -319,7 +319,8 @@ void do_schedule_next_timer(struct siginfo *info)
319 319
320int posix_timer_event(struct k_itimer *timr, int si_private) 320int posix_timer_event(struct k_itimer *timr, int si_private)
321{ 321{
322 int shared, ret; 322 struct task_struct *task;
323 int shared, ret = -1;
323 /* 324 /*
324 * FIXME: if ->sigq is queued we can race with 325 * FIXME: if ->sigq is queued we can race with
325 * dequeue_signal()->do_schedule_next_timer(). 326 * dequeue_signal()->do_schedule_next_timer().
@@ -333,8 +334,13 @@ int posix_timer_event(struct k_itimer *timr, int si_private)
333 */ 334 */
334 timr->sigq->info.si_sys_private = si_private; 335 timr->sigq->info.si_sys_private = si_private;
335 336
336 shared = !(timr->it_sigev_notify & SIGEV_THREAD_ID); 337 rcu_read_lock();
337 ret = send_sigqueue(timr->sigq, timr->it_process, shared); 338 task = pid_task(timr->it_pid, PIDTYPE_PID);
339 if (task) {
340 shared = !(timr->it_sigev_notify & SIGEV_THREAD_ID);
341 ret = send_sigqueue(timr->sigq, task, shared);
342 }
343 rcu_read_unlock();
338 /* If we failed to send the signal the timer stops. */ 344 /* If we failed to send the signal the timer stops. */
339 return ret > 0; 345 return ret > 0;
340} 346}
@@ -411,7 +417,7 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
411 return ret; 417 return ret;
412} 418}
413 419
414static struct task_struct * good_sigevent(sigevent_t * event) 420static struct pid *good_sigevent(sigevent_t * event)
415{ 421{
416 struct task_struct *rtn = current->group_leader; 422 struct task_struct *rtn = current->group_leader;
417 423
@@ -425,7 +431,7 @@ static struct task_struct * good_sigevent(sigevent_t * event)
425 ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX))) 431 ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX)))
426 return NULL; 432 return NULL;
427 433
428 return rtn; 434 return task_pid(rtn);
429} 435}
430 436
431void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock) 437void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock)
@@ -464,6 +470,7 @@ static void release_posix_timer(struct k_itimer *tmr, int it_id_set)
464 idr_remove(&posix_timers_id, tmr->it_id); 470 idr_remove(&posix_timers_id, tmr->it_id);
465 spin_unlock_irqrestore(&idr_lock, flags); 471 spin_unlock_irqrestore(&idr_lock, flags);
466 } 472 }
473 put_pid(tmr->it_pid);
467 sigqueue_free(tmr->sigq); 474 sigqueue_free(tmr->sigq);
468 kmem_cache_free(posix_timers_cache, tmr); 475 kmem_cache_free(posix_timers_cache, tmr);
469} 476}
@@ -477,7 +484,6 @@ sys_timer_create(const clockid_t which_clock,
477{ 484{
478 struct k_itimer *new_timer; 485 struct k_itimer *new_timer;
479 int error, new_timer_id; 486 int error, new_timer_id;
480 struct task_struct *process;
481 sigevent_t event; 487 sigevent_t event;
482 int it_id_set = IT_ID_NOT_SET; 488 int it_id_set = IT_ID_NOT_SET;
483 489
@@ -531,11 +537,9 @@ sys_timer_create(const clockid_t which_clock,
531 goto out; 537 goto out;
532 } 538 }
533 rcu_read_lock(); 539 rcu_read_lock();
534 process = good_sigevent(&event); 540 new_timer->it_pid = get_pid(good_sigevent(&event));
535 if (process)
536 get_task_struct(process);
537 rcu_read_unlock(); 541 rcu_read_unlock();
538 if (!process) { 542 if (!new_timer->it_pid) {
539 error = -EINVAL; 543 error = -EINVAL;
540 goto out; 544 goto out;
541 } 545 }
@@ -543,8 +547,7 @@ sys_timer_create(const clockid_t which_clock,
543 event.sigev_notify = SIGEV_SIGNAL; 547 event.sigev_notify = SIGEV_SIGNAL;
544 event.sigev_signo = SIGALRM; 548 event.sigev_signo = SIGALRM;
545 event.sigev_value.sival_int = new_timer->it_id; 549 event.sigev_value.sival_int = new_timer->it_id;
546 process = current->group_leader; 550 new_timer->it_pid = get_pid(task_tgid(current));
547 get_task_struct(process);
548 } 551 }
549 552
550 new_timer->it_sigev_notify = event.sigev_notify; 553 new_timer->it_sigev_notify = event.sigev_notify;
@@ -554,7 +557,7 @@ sys_timer_create(const clockid_t which_clock,
554 new_timer->sigq->info.si_code = SI_TIMER; 557 new_timer->sigq->info.si_code = SI_TIMER;
555 558
556 spin_lock_irq(&current->sighand->siglock); 559 spin_lock_irq(&current->sighand->siglock);
557 new_timer->it_process = process; 560 new_timer->it_signal = current->signal;
558 list_add(&new_timer->list, &current->signal->posix_timers); 561 list_add(&new_timer->list, &current->signal->posix_timers);
559 spin_unlock_irq(&current->sighand->siglock); 562 spin_unlock_irq(&current->sighand->siglock);
560 563
@@ -589,8 +592,7 @@ static struct k_itimer *lock_timer(timer_t timer_id, unsigned long *flags)
589 timr = idr_find(&posix_timers_id, (int)timer_id); 592 timr = idr_find(&posix_timers_id, (int)timer_id);
590 if (timr) { 593 if (timr) {
591 spin_lock(&timr->it_lock); 594 spin_lock(&timr->it_lock);
592 if (timr->it_process && 595 if (timr->it_signal == current->signal) {
593 same_thread_group(timr->it_process, current)) {
594 spin_unlock(&idr_lock); 596 spin_unlock(&idr_lock);
595 return timr; 597 return timr;
596 } 598 }
@@ -837,8 +839,7 @@ retry_delete:
837 * This keeps any tasks waiting on the spin lock from thinking 839 * This keeps any tasks waiting on the spin lock from thinking
838 * they got something (see the lock code above). 840 * they got something (see the lock code above).
839 */ 841 */
840 put_task_struct(timer->it_process); 842 timer->it_signal = NULL;
841 timer->it_process = NULL;
842 843
843 unlock_timer(timer, flags); 844 unlock_timer(timer, flags);
844 release_posix_timer(timer, IT_ID_SET); 845 release_posix_timer(timer, IT_ID_SET);
@@ -864,8 +865,7 @@ retry_delete:
864 * This keeps any tasks waiting on the spin lock from thinking 865 * This keeps any tasks waiting on the spin lock from thinking
865 * they got something (see the lock code above). 866 * they got something (see the lock code above).
866 */ 867 */
867 put_task_struct(timer->it_process); 868 timer->it_signal = NULL;
868 timer->it_process = NULL;
869 869
870 unlock_timer(timer, flags); 870 unlock_timer(timer, flags);
871 release_posix_timer(timer, IT_ID_SET); 871 release_posix_timer(timer, IT_ID_SET);
diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
index 72016f051477..97890831e1b5 100644
--- a/kernel/power/poweroff.c
+++ b/kernel/power/poweroff.c
@@ -27,7 +27,7 @@ static DECLARE_WORK(poweroff_work, do_poweroff);
27static void handle_poweroff(int key, struct tty_struct *tty) 27static void handle_poweroff(int key, struct tty_struct *tty)
28{ 28{
29 /* run sysrq poweroff on boot cpu */ 29 /* run sysrq poweroff on boot cpu */
30 schedule_work_on(first_cpu(cpu_online_map), &poweroff_work); 30 schedule_work_on(cpumask_first(cpu_online_mask), &poweroff_work);
31} 31}
32 32
33static struct sysrq_key_op sysrq_poweroff_op = { 33static struct sysrq_key_op sysrq_poweroff_op = {
diff --git a/kernel/printk.c b/kernel/printk.c
index f492f1583d77..e651ab05655f 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -662,7 +662,7 @@ asmlinkage int vprintk(const char *fmt, va_list args)
662 if (recursion_bug) { 662 if (recursion_bug) {
663 recursion_bug = 0; 663 recursion_bug = 0;
664 strcpy(printk_buf, recursion_bug_msg); 664 strcpy(printk_buf, recursion_bug_msg);
665 printed_len = sizeof(recursion_bug_msg); 665 printed_len = strlen(recursion_bug_msg);
666 } 666 }
667 /* Emit the output into the temporary buffer */ 667 /* Emit the output into the temporary buffer */
668 printed_len += vscnprintf(printk_buf + printed_len, 668 printed_len += vscnprintf(printk_buf + printed_len,
diff --git a/kernel/profile.c b/kernel/profile.c
index 60adefb59b5e..d18e2d2654f2 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -45,7 +45,7 @@ static unsigned long prof_len, prof_shift;
45int prof_on __read_mostly; 45int prof_on __read_mostly;
46EXPORT_SYMBOL_GPL(prof_on); 46EXPORT_SYMBOL_GPL(prof_on);
47 47
48static cpumask_t prof_cpu_mask = CPU_MASK_ALL; 48static cpumask_var_t prof_cpu_mask;
49#ifdef CONFIG_SMP 49#ifdef CONFIG_SMP
50static DEFINE_PER_CPU(struct profile_hit *[2], cpu_profile_hits); 50static DEFINE_PER_CPU(struct profile_hit *[2], cpu_profile_hits);
51static DEFINE_PER_CPU(int, cpu_profile_flip); 51static DEFINE_PER_CPU(int, cpu_profile_flip);
@@ -113,9 +113,13 @@ int __ref profile_init(void)
113 buffer_bytes = prof_len*sizeof(atomic_t); 113 buffer_bytes = prof_len*sizeof(atomic_t);
114 if (!slab_is_available()) { 114 if (!slab_is_available()) {
115 prof_buffer = alloc_bootmem(buffer_bytes); 115 prof_buffer = alloc_bootmem(buffer_bytes);
116 alloc_bootmem_cpumask_var(&prof_cpu_mask);
116 return 0; 117 return 0;
117 } 118 }
118 119
120 if (!alloc_cpumask_var(&prof_cpu_mask, GFP_KERNEL))
121 return -ENOMEM;
122
119 prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL); 123 prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL);
120 if (prof_buffer) 124 if (prof_buffer)
121 return 0; 125 return 0;
@@ -128,6 +132,7 @@ int __ref profile_init(void)
128 if (prof_buffer) 132 if (prof_buffer)
129 return 0; 133 return 0;
130 134
135 free_cpumask_var(prof_cpu_mask);
131 return -ENOMEM; 136 return -ENOMEM;
132} 137}
133 138
@@ -386,13 +391,15 @@ out_free:
386 return NOTIFY_BAD; 391 return NOTIFY_BAD;
387 case CPU_ONLINE: 392 case CPU_ONLINE:
388 case CPU_ONLINE_FROZEN: 393 case CPU_ONLINE_FROZEN:
389 cpu_set(cpu, prof_cpu_mask); 394 if (prof_cpu_mask != NULL)
395 cpumask_set_cpu(cpu, prof_cpu_mask);
390 break; 396 break;
391 case CPU_UP_CANCELED: 397 case CPU_UP_CANCELED:
392 case CPU_UP_CANCELED_FROZEN: 398 case CPU_UP_CANCELED_FROZEN:
393 case CPU_DEAD: 399 case CPU_DEAD:
394 case CPU_DEAD_FROZEN: 400 case CPU_DEAD_FROZEN:
395 cpu_clear(cpu, prof_cpu_mask); 401 if (prof_cpu_mask != NULL)
402 cpumask_clear_cpu(cpu, prof_cpu_mask);
396 if (per_cpu(cpu_profile_hits, cpu)[0]) { 403 if (per_cpu(cpu_profile_hits, cpu)[0]) {
397 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]); 404 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]);
398 per_cpu(cpu_profile_hits, cpu)[0] = NULL; 405 per_cpu(cpu_profile_hits, cpu)[0] = NULL;
@@ -430,7 +437,8 @@ void profile_tick(int type)
430 437
431 if (type == CPU_PROFILING && timer_hook) 438 if (type == CPU_PROFILING && timer_hook)
432 timer_hook(regs); 439 timer_hook(regs);
433 if (!user_mode(regs) && cpu_isset(smp_processor_id(), prof_cpu_mask)) 440 if (!user_mode(regs) && prof_cpu_mask != NULL &&
441 cpumask_test_cpu(smp_processor_id(), prof_cpu_mask))
434 profile_hit(type, (void *)profile_pc(regs)); 442 profile_hit(type, (void *)profile_pc(regs));
435} 443}
436 444
@@ -442,7 +450,7 @@ void profile_tick(int type)
442static int prof_cpu_mask_read_proc(char *page, char **start, off_t off, 450static int prof_cpu_mask_read_proc(char *page, char **start, off_t off,
443 int count, int *eof, void *data) 451 int count, int *eof, void *data)
444{ 452{
445 int len = cpumask_scnprintf(page, count, *(cpumask_t *)data); 453 int len = cpumask_scnprintf(page, count, data);
446 if (count - len < 2) 454 if (count - len < 2)
447 return -EINVAL; 455 return -EINVAL;
448 len += sprintf(page + len, "\n"); 456 len += sprintf(page + len, "\n");
@@ -452,16 +460,20 @@ static int prof_cpu_mask_read_proc(char *page, char **start, off_t off,
452static int prof_cpu_mask_write_proc(struct file *file, 460static int prof_cpu_mask_write_proc(struct file *file,
453 const char __user *buffer, unsigned long count, void *data) 461 const char __user *buffer, unsigned long count, void *data)
454{ 462{
455 cpumask_t *mask = (cpumask_t *)data; 463 struct cpumask *mask = data;
456 unsigned long full_count = count, err; 464 unsigned long full_count = count, err;
457 cpumask_t new_value; 465 cpumask_var_t new_value;
458 466
459 err = cpumask_parse_user(buffer, count, new_value); 467 if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
460 if (err) 468 return -ENOMEM;
461 return err;
462 469
463 *mask = new_value; 470 err = cpumask_parse_user(buffer, count, new_value);
464 return full_count; 471 if (!err) {
472 cpumask_copy(mask, new_value);
473 err = full_count;
474 }
475 free_cpumask_var(new_value);
476 return err;
465} 477}
466 478
467void create_prof_cpu_mask(struct proc_dir_entry *root_irq_dir) 479void create_prof_cpu_mask(struct proc_dir_entry *root_irq_dir)
@@ -472,7 +484,7 @@ void create_prof_cpu_mask(struct proc_dir_entry *root_irq_dir)
472 entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir); 484 entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir);
473 if (!entry) 485 if (!entry)
474 return; 486 return;
475 entry->data = (void *)&prof_cpu_mask; 487 entry->data = prof_cpu_mask;
476 entry->read_proc = prof_cpu_mask_read_proc; 488 entry->read_proc = prof_cpu_mask_read_proc;
477 entry->write_proc = prof_cpu_mask_write_proc; 489 entry->write_proc = prof_cpu_mask_write_proc;
478} 490}
diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c
index 37f72e551542..490934fc7ac3 100644
--- a/kernel/rcuclassic.c
+++ b/kernel/rcuclassic.c
@@ -63,14 +63,14 @@ static struct rcu_ctrlblk rcu_ctrlblk = {
63 .completed = -300, 63 .completed = -300,
64 .pending = -300, 64 .pending = -300,
65 .lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock), 65 .lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock),
66 .cpumask = CPU_MASK_NONE, 66 .cpumask = CPU_BITS_NONE,
67}; 67};
68static struct rcu_ctrlblk rcu_bh_ctrlblk = { 68static struct rcu_ctrlblk rcu_bh_ctrlblk = {
69 .cur = -300, 69 .cur = -300,
70 .completed = -300, 70 .completed = -300,
71 .pending = -300, 71 .pending = -300,
72 .lock = __SPIN_LOCK_UNLOCKED(&rcu_bh_ctrlblk.lock), 72 .lock = __SPIN_LOCK_UNLOCKED(&rcu_bh_ctrlblk.lock),
73 .cpumask = CPU_MASK_NONE, 73 .cpumask = CPU_BITS_NONE,
74}; 74};
75 75
76DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L }; 76DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L };
@@ -85,7 +85,6 @@ static void force_quiescent_state(struct rcu_data *rdp,
85 struct rcu_ctrlblk *rcp) 85 struct rcu_ctrlblk *rcp)
86{ 86{
87 int cpu; 87 int cpu;
88 cpumask_t cpumask;
89 unsigned long flags; 88 unsigned long flags;
90 89
91 set_need_resched(); 90 set_need_resched();
@@ -96,10 +95,10 @@ static void force_quiescent_state(struct rcu_data *rdp,
96 * Don't send IPI to itself. With irqs disabled, 95 * Don't send IPI to itself. With irqs disabled,
97 * rdp->cpu is the current cpu. 96 * rdp->cpu is the current cpu.
98 * 97 *
99 * cpu_online_map is updated by the _cpu_down() 98 * cpu_online_mask is updated by the _cpu_down()
100 * using __stop_machine(). Since we're in irqs disabled 99 * using __stop_machine(). Since we're in irqs disabled
101 * section, __stop_machine() is not exectuting, hence 100 * section, __stop_machine() is not exectuting, hence
102 * the cpu_online_map is stable. 101 * the cpu_online_mask is stable.
103 * 102 *
104 * However, a cpu might have been offlined _just_ before 103 * However, a cpu might have been offlined _just_ before
105 * we disabled irqs while entering here. 104 * we disabled irqs while entering here.
@@ -107,13 +106,14 @@ static void force_quiescent_state(struct rcu_data *rdp,
107 * notification, leading to the offlined cpu's bit 106 * notification, leading to the offlined cpu's bit
108 * being set in the rcp->cpumask. 107 * being set in the rcp->cpumask.
109 * 108 *
110 * Hence cpumask = (rcp->cpumask & cpu_online_map) to prevent 109 * Hence cpumask = (rcp->cpumask & cpu_online_mask) to prevent
111 * sending smp_reschedule() to an offlined CPU. 110 * sending smp_reschedule() to an offlined CPU.
112 */ 111 */
113 cpus_and(cpumask, rcp->cpumask, cpu_online_map); 112 for_each_cpu_and(cpu,
114 cpu_clear(rdp->cpu, cpumask); 113 to_cpumask(rcp->cpumask), cpu_online_mask) {
115 for_each_cpu_mask_nr(cpu, cpumask) 114 if (cpu != rdp->cpu)
116 smp_send_reschedule(cpu); 115 smp_send_reschedule(cpu);
116 }
117 } 117 }
118 spin_unlock_irqrestore(&rcp->lock, flags); 118 spin_unlock_irqrestore(&rcp->lock, flags);
119} 119}
@@ -191,9 +191,9 @@ static void print_other_cpu_stall(struct rcu_ctrlblk *rcp)
191 191
192 /* OK, time to rat on our buddy... */ 192 /* OK, time to rat on our buddy... */
193 193
194 printk(KERN_ERR "RCU detected CPU stalls:"); 194 printk(KERN_ERR "INFO: RCU detected CPU stalls:");
195 for_each_possible_cpu(cpu) { 195 for_each_possible_cpu(cpu) {
196 if (cpu_isset(cpu, rcp->cpumask)) 196 if (cpumask_test_cpu(cpu, to_cpumask(rcp->cpumask)))
197 printk(" %d", cpu); 197 printk(" %d", cpu);
198 } 198 }
199 printk(" (detected by %d, t=%ld jiffies)\n", 199 printk(" (detected by %d, t=%ld jiffies)\n",
@@ -204,7 +204,7 @@ static void print_cpu_stall(struct rcu_ctrlblk *rcp)
204{ 204{
205 unsigned long flags; 205 unsigned long flags;
206 206
207 printk(KERN_ERR "RCU detected CPU %d stall (t=%lu/%lu jiffies)\n", 207 printk(KERN_ERR "INFO: RCU detected CPU %d stall (t=%lu/%lu jiffies)\n",
208 smp_processor_id(), jiffies, 208 smp_processor_id(), jiffies,
209 jiffies - rcp->gp_start); 209 jiffies - rcp->gp_start);
210 dump_stack(); 210 dump_stack();
@@ -221,7 +221,8 @@ static void check_cpu_stall(struct rcu_ctrlblk *rcp)
221 long delta; 221 long delta;
222 222
223 delta = jiffies - rcp->jiffies_stall; 223 delta = jiffies - rcp->jiffies_stall;
224 if (cpu_isset(smp_processor_id(), rcp->cpumask) && delta >= 0) { 224 if (cpumask_test_cpu(smp_processor_id(), to_cpumask(rcp->cpumask)) &&
225 delta >= 0) {
225 226
226 /* We haven't checked in, so go dump stack. */ 227 /* We haven't checked in, so go dump stack. */
227 print_cpu_stall(rcp); 228 print_cpu_stall(rcp);
@@ -393,7 +394,8 @@ static void rcu_start_batch(struct rcu_ctrlblk *rcp)
393 * unnecessarily. 394 * unnecessarily.
394 */ 395 */
395 smp_mb(); 396 smp_mb();
396 cpus_andnot(rcp->cpumask, cpu_online_map, nohz_cpu_mask); 397 cpumask_andnot(to_cpumask(rcp->cpumask),
398 cpu_online_mask, nohz_cpu_mask);
397 399
398 rcp->signaled = 0; 400 rcp->signaled = 0;
399 } 401 }
@@ -406,8 +408,8 @@ static void rcu_start_batch(struct rcu_ctrlblk *rcp)
406 */ 408 */
407static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp) 409static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp)
408{ 410{
409 cpu_clear(cpu, rcp->cpumask); 411 cpumask_clear_cpu(cpu, to_cpumask(rcp->cpumask));
410 if (cpus_empty(rcp->cpumask)) { 412 if (cpumask_empty(to_cpumask(rcp->cpumask))) {
411 /* batch completed ! */ 413 /* batch completed ! */
412 rcp->completed = rcp->cur; 414 rcp->completed = rcp->cur;
413 rcu_start_batch(rcp); 415 rcu_start_batch(rcp);
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c
index 59236e8b9daa..f9dc8f3720f6 100644
--- a/kernel/rcupreempt.c
+++ b/kernel/rcupreempt.c
@@ -164,7 +164,8 @@ static char *rcu_try_flip_state_names[] =
164 { "idle", "waitack", "waitzero", "waitmb" }; 164 { "idle", "waitack", "waitzero", "waitmb" };
165#endif /* #ifdef CONFIG_RCU_TRACE */ 165#endif /* #ifdef CONFIG_RCU_TRACE */
166 166
167static cpumask_t rcu_cpu_online_map __read_mostly = CPU_MASK_NONE; 167static DECLARE_BITMAP(rcu_cpu_online_map, NR_CPUS) __read_mostly
168 = CPU_BITS_NONE;
168 169
169/* 170/*
170 * Enum and per-CPU flag to determine when each CPU has seen 171 * Enum and per-CPU flag to determine when each CPU has seen
@@ -551,6 +552,16 @@ void rcu_irq_exit(void)
551 } 552 }
552} 553}
553 554
555void rcu_nmi_enter(void)
556{
557 rcu_irq_enter();
558}
559
560void rcu_nmi_exit(void)
561{
562 rcu_irq_exit();
563}
564
554static void dyntick_save_progress_counter(int cpu) 565static void dyntick_save_progress_counter(int cpu)
555{ 566{
556 struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu); 567 struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
@@ -748,7 +759,7 @@ rcu_try_flip_idle(void)
748 759
749 /* Now ask each CPU for acknowledgement of the flip. */ 760 /* Now ask each CPU for acknowledgement of the flip. */
750 761
751 for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) { 762 for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map)) {
752 per_cpu(rcu_flip_flag, cpu) = rcu_flipped; 763 per_cpu(rcu_flip_flag, cpu) = rcu_flipped;
753 dyntick_save_progress_counter(cpu); 764 dyntick_save_progress_counter(cpu);
754 } 765 }
@@ -766,7 +777,7 @@ rcu_try_flip_waitack(void)
766 int cpu; 777 int cpu;
767 778
768 RCU_TRACE_ME(rcupreempt_trace_try_flip_a1); 779 RCU_TRACE_ME(rcupreempt_trace_try_flip_a1);
769 for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) 780 for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map))
770 if (rcu_try_flip_waitack_needed(cpu) && 781 if (rcu_try_flip_waitack_needed(cpu) &&
771 per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) { 782 per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) {
772 RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1); 783 RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1);
@@ -798,7 +809,7 @@ rcu_try_flip_waitzero(void)
798 /* Check to see if the sum of the "last" counters is zero. */ 809 /* Check to see if the sum of the "last" counters is zero. */
799 810
800 RCU_TRACE_ME(rcupreempt_trace_try_flip_z1); 811 RCU_TRACE_ME(rcupreempt_trace_try_flip_z1);
801 for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) 812 for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map))
802 sum += RCU_DATA_CPU(cpu)->rcu_flipctr[lastidx]; 813 sum += RCU_DATA_CPU(cpu)->rcu_flipctr[lastidx];
803 if (sum != 0) { 814 if (sum != 0) {
804 RCU_TRACE_ME(rcupreempt_trace_try_flip_ze1); 815 RCU_TRACE_ME(rcupreempt_trace_try_flip_ze1);
@@ -813,7 +824,7 @@ rcu_try_flip_waitzero(void)
813 smp_mb(); /* ^^^^^^^^^^^^ */ 824 smp_mb(); /* ^^^^^^^^^^^^ */
814 825
815 /* Call for a memory barrier from each CPU. */ 826 /* Call for a memory barrier from each CPU. */
816 for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) { 827 for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map)) {
817 per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed; 828 per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed;
818 dyntick_save_progress_counter(cpu); 829 dyntick_save_progress_counter(cpu);
819 } 830 }
@@ -833,7 +844,7 @@ rcu_try_flip_waitmb(void)
833 int cpu; 844 int cpu;
834 845
835 RCU_TRACE_ME(rcupreempt_trace_try_flip_m1); 846 RCU_TRACE_ME(rcupreempt_trace_try_flip_m1);
836 for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) 847 for_each_cpu(cpu, to_cpumask(rcu_cpu_online_map))
837 if (rcu_try_flip_waitmb_needed(cpu) && 848 if (rcu_try_flip_waitmb_needed(cpu) &&
838 per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) { 849 per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) {
839 RCU_TRACE_ME(rcupreempt_trace_try_flip_me1); 850 RCU_TRACE_ME(rcupreempt_trace_try_flip_me1);
@@ -1022,7 +1033,7 @@ void rcu_offline_cpu(int cpu)
1022 RCU_DATA_CPU(cpu)->rcu_flipctr[0] = 0; 1033 RCU_DATA_CPU(cpu)->rcu_flipctr[0] = 0;
1023 RCU_DATA_CPU(cpu)->rcu_flipctr[1] = 0; 1034 RCU_DATA_CPU(cpu)->rcu_flipctr[1] = 0;
1024 1035
1025 cpu_clear(cpu, rcu_cpu_online_map); 1036 cpumask_clear_cpu(cpu, to_cpumask(rcu_cpu_online_map));
1026 1037
1027 spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags); 1038 spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags);
1028 1039
@@ -1062,7 +1073,7 @@ void __cpuinit rcu_online_cpu(int cpu)
1062 struct rcu_data *rdp; 1073 struct rcu_data *rdp;
1063 1074
1064 spin_lock_irqsave(&rcu_ctrlblk.fliplock, flags); 1075 spin_lock_irqsave(&rcu_ctrlblk.fliplock, flags);
1065 cpu_set(cpu, rcu_cpu_online_map); 1076 cpumask_set_cpu(cpu, to_cpumask(rcu_cpu_online_map));
1066 spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags); 1077 spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags);
1067 1078
1068 /* 1079 /*
@@ -1420,7 +1431,7 @@ void __init __rcu_init(void)
1420 * We don't need protection against CPU-Hotplug here 1431 * We don't need protection against CPU-Hotplug here
1421 * since 1432 * since
1422 * a) If a CPU comes online while we are iterating over the 1433 * a) If a CPU comes online while we are iterating over the
1423 * cpu_online_map below, we would only end up making a 1434 * cpu_online_mask below, we would only end up making a
1424 * duplicate call to rcu_online_cpu() which sets the corresponding 1435 * duplicate call to rcu_online_cpu() which sets the corresponding
1425 * CPU's mask in the rcu_cpu_online_map. 1436 * CPU's mask in the rcu_cpu_online_map.
1426 * 1437 *
diff --git a/kernel/rcupreempt_trace.c b/kernel/rcupreempt_trace.c
index 35c2d3360ecf..7c2665cac172 100644
--- a/kernel/rcupreempt_trace.c
+++ b/kernel/rcupreempt_trace.c
@@ -149,12 +149,12 @@ static void rcupreempt_trace_sum(struct rcupreempt_trace *sp)
149 sp->done_length += cp->done_length; 149 sp->done_length += cp->done_length;
150 sp->done_add += cp->done_add; 150 sp->done_add += cp->done_add;
151 sp->done_remove += cp->done_remove; 151 sp->done_remove += cp->done_remove;
152 atomic_set(&sp->done_invoked, atomic_read(&cp->done_invoked)); 152 atomic_add(atomic_read(&cp->done_invoked), &sp->done_invoked);
153 sp->rcu_check_callbacks += cp->rcu_check_callbacks; 153 sp->rcu_check_callbacks += cp->rcu_check_callbacks;
154 atomic_set(&sp->rcu_try_flip_1, 154 atomic_add(atomic_read(&cp->rcu_try_flip_1),
155 atomic_read(&cp->rcu_try_flip_1)); 155 &sp->rcu_try_flip_1);
156 atomic_set(&sp->rcu_try_flip_e1, 156 atomic_add(atomic_read(&cp->rcu_try_flip_e1),
157 atomic_read(&cp->rcu_try_flip_e1)); 157 &sp->rcu_try_flip_e1);
158 sp->rcu_try_flip_i1 += cp->rcu_try_flip_i1; 158 sp->rcu_try_flip_i1 += cp->rcu_try_flip_i1;
159 sp->rcu_try_flip_ie1 += cp->rcu_try_flip_ie1; 159 sp->rcu_try_flip_ie1 += cp->rcu_try_flip_ie1;
160 sp->rcu_try_flip_g1 += cp->rcu_try_flip_g1; 160 sp->rcu_try_flip_g1 += cp->rcu_try_flip_g1;
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 85cb90588a55..3245b40952c6 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -39,6 +39,7 @@
39#include <linux/moduleparam.h> 39#include <linux/moduleparam.h>
40#include <linux/percpu.h> 40#include <linux/percpu.h>
41#include <linux/notifier.h> 41#include <linux/notifier.h>
42#include <linux/reboot.h>
42#include <linux/freezer.h> 43#include <linux/freezer.h>
43#include <linux/cpu.h> 44#include <linux/cpu.h>
44#include <linux/delay.h> 45#include <linux/delay.h>
@@ -108,7 +109,6 @@ struct rcu_torture {
108 int rtort_mbtest; 109 int rtort_mbtest;
109}; 110};
110 111
111static int fullstop = 0; /* stop generating callbacks at test end. */
112static LIST_HEAD(rcu_torture_freelist); 112static LIST_HEAD(rcu_torture_freelist);
113static struct rcu_torture *rcu_torture_current = NULL; 113static struct rcu_torture *rcu_torture_current = NULL;
114static long rcu_torture_current_version = 0; 114static long rcu_torture_current_version = 0;
@@ -136,6 +136,30 @@ static int stutter_pause_test = 0;
136#endif 136#endif
137int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT; 137int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT;
138 138
139#define FULLSTOP_SIGNALED 1 /* Bail due to signal. */
140#define FULLSTOP_CLEANUP 2 /* Orderly shutdown. */
141static int fullstop; /* stop generating callbacks at test end. */
142DEFINE_MUTEX(fullstop_mutex); /* protect fullstop transitions and */
143 /* spawning of kthreads. */
144
145/*
146 * Detect and respond to a signal-based shutdown.
147 */
148static int
149rcutorture_shutdown_notify(struct notifier_block *unused1,
150 unsigned long unused2, void *unused3)
151{
152 if (fullstop)
153 return NOTIFY_DONE;
154 if (signal_pending(current)) {
155 mutex_lock(&fullstop_mutex);
156 if (!ACCESS_ONCE(fullstop))
157 fullstop = FULLSTOP_SIGNALED;
158 mutex_unlock(&fullstop_mutex);
159 }
160 return NOTIFY_DONE;
161}
162
139/* 163/*
140 * Allocate an element from the rcu_tortures pool. 164 * Allocate an element from the rcu_tortures pool.
141 */ 165 */
@@ -199,11 +223,12 @@ rcu_random(struct rcu_random_state *rrsp)
199static void 223static void
200rcu_stutter_wait(void) 224rcu_stutter_wait(void)
201{ 225{
202 while (stutter_pause_test || !rcutorture_runnable) 226 while ((stutter_pause_test || !rcutorture_runnable) && !fullstop) {
203 if (rcutorture_runnable) 227 if (rcutorture_runnable)
204 schedule_timeout_interruptible(1); 228 schedule_timeout_interruptible(1);
205 else 229 else
206 schedule_timeout_interruptible(round_jiffies_relative(HZ)); 230 schedule_timeout_interruptible(round_jiffies_relative(HZ));
231 }
207} 232}
208 233
209/* 234/*
@@ -599,7 +624,7 @@ rcu_torture_writer(void *arg)
599 rcu_stutter_wait(); 624 rcu_stutter_wait();
600 } while (!kthread_should_stop() && !fullstop); 625 } while (!kthread_should_stop() && !fullstop);
601 VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping"); 626 VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping");
602 while (!kthread_should_stop()) 627 while (!kthread_should_stop() && fullstop != FULLSTOP_SIGNALED)
603 schedule_timeout_uninterruptible(1); 628 schedule_timeout_uninterruptible(1);
604 return 0; 629 return 0;
605} 630}
@@ -624,7 +649,7 @@ rcu_torture_fakewriter(void *arg)
624 } while (!kthread_should_stop() && !fullstop); 649 } while (!kthread_should_stop() && !fullstop);
625 650
626 VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping"); 651 VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping");
627 while (!kthread_should_stop()) 652 while (!kthread_should_stop() && fullstop != FULLSTOP_SIGNALED)
628 schedule_timeout_uninterruptible(1); 653 schedule_timeout_uninterruptible(1);
629 return 0; 654 return 0;
630} 655}
@@ -734,7 +759,7 @@ rcu_torture_reader(void *arg)
734 VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping"); 759 VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping");
735 if (irqreader && cur_ops->irqcapable) 760 if (irqreader && cur_ops->irqcapable)
736 del_timer_sync(&t); 761 del_timer_sync(&t);
737 while (!kthread_should_stop()) 762 while (!kthread_should_stop() && fullstop != FULLSTOP_SIGNALED)
738 schedule_timeout_uninterruptible(1); 763 schedule_timeout_uninterruptible(1);
739 return 0; 764 return 0;
740} 765}
@@ -831,7 +856,7 @@ rcu_torture_stats(void *arg)
831 do { 856 do {
832 schedule_timeout_interruptible(stat_interval * HZ); 857 schedule_timeout_interruptible(stat_interval * HZ);
833 rcu_torture_stats_print(); 858 rcu_torture_stats_print();
834 } while (!kthread_should_stop()); 859 } while (!kthread_should_stop() && !fullstop);
835 VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping"); 860 VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping");
836 return 0; 861 return 0;
837} 862}
@@ -843,49 +868,52 @@ static int rcu_idle_cpu; /* Force all torture tasks off this CPU */
843 */ 868 */
844static void rcu_torture_shuffle_tasks(void) 869static void rcu_torture_shuffle_tasks(void)
845{ 870{
846 cpumask_t tmp_mask; 871 cpumask_var_t tmp_mask;
847 int i; 872 int i;
848 873
849 cpus_setall(tmp_mask); 874 if (!alloc_cpumask_var(&tmp_mask, GFP_KERNEL))
875 BUG();
876
877 cpumask_setall(tmp_mask);
850 get_online_cpus(); 878 get_online_cpus();
851 879
852 /* No point in shuffling if there is only one online CPU (ex: UP) */ 880 /* No point in shuffling if there is only one online CPU (ex: UP) */
853 if (num_online_cpus() == 1) { 881 if (num_online_cpus() == 1)
854 put_online_cpus(); 882 goto out;
855 return;
856 }
857 883
858 if (rcu_idle_cpu != -1) 884 if (rcu_idle_cpu != -1)
859 cpu_clear(rcu_idle_cpu, tmp_mask); 885 cpumask_clear_cpu(rcu_idle_cpu, tmp_mask);
860 886
861 set_cpus_allowed_ptr(current, &tmp_mask); 887 set_cpus_allowed_ptr(current, tmp_mask);
862 888
863 if (reader_tasks) { 889 if (reader_tasks) {
864 for (i = 0; i < nrealreaders; i++) 890 for (i = 0; i < nrealreaders; i++)
865 if (reader_tasks[i]) 891 if (reader_tasks[i])
866 set_cpus_allowed_ptr(reader_tasks[i], 892 set_cpus_allowed_ptr(reader_tasks[i],
867 &tmp_mask); 893 tmp_mask);
868 } 894 }
869 895
870 if (fakewriter_tasks) { 896 if (fakewriter_tasks) {
871 for (i = 0; i < nfakewriters; i++) 897 for (i = 0; i < nfakewriters; i++)
872 if (fakewriter_tasks[i]) 898 if (fakewriter_tasks[i])
873 set_cpus_allowed_ptr(fakewriter_tasks[i], 899 set_cpus_allowed_ptr(fakewriter_tasks[i],
874 &tmp_mask); 900 tmp_mask);
875 } 901 }
876 902
877 if (writer_task) 903 if (writer_task)
878 set_cpus_allowed_ptr(writer_task, &tmp_mask); 904 set_cpus_allowed_ptr(writer_task, tmp_mask);
879 905
880 if (stats_task) 906 if (stats_task)
881 set_cpus_allowed_ptr(stats_task, &tmp_mask); 907 set_cpus_allowed_ptr(stats_task, tmp_mask);
882 908
883 if (rcu_idle_cpu == -1) 909 if (rcu_idle_cpu == -1)
884 rcu_idle_cpu = num_online_cpus() - 1; 910 rcu_idle_cpu = num_online_cpus() - 1;
885 else 911 else
886 rcu_idle_cpu--; 912 rcu_idle_cpu--;
887 913
914out:
888 put_online_cpus(); 915 put_online_cpus();
916 free_cpumask_var(tmp_mask);
889} 917}
890 918
891/* Shuffle tasks across CPUs, with the intent of allowing each CPU in the 919/* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
@@ -899,7 +927,7 @@ rcu_torture_shuffle(void *arg)
899 do { 927 do {
900 schedule_timeout_interruptible(shuffle_interval * HZ); 928 schedule_timeout_interruptible(shuffle_interval * HZ);
901 rcu_torture_shuffle_tasks(); 929 rcu_torture_shuffle_tasks();
902 } while (!kthread_should_stop()); 930 } while (!kthread_should_stop() && !fullstop);
903 VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping"); 931 VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping");
904 return 0; 932 return 0;
905} 933}
@@ -914,10 +942,10 @@ rcu_torture_stutter(void *arg)
914 do { 942 do {
915 schedule_timeout_interruptible(stutter * HZ); 943 schedule_timeout_interruptible(stutter * HZ);
916 stutter_pause_test = 1; 944 stutter_pause_test = 1;
917 if (!kthread_should_stop()) 945 if (!kthread_should_stop() && !fullstop)
918 schedule_timeout_interruptible(stutter * HZ); 946 schedule_timeout_interruptible(stutter * HZ);
919 stutter_pause_test = 0; 947 stutter_pause_test = 0;
920 } while (!kthread_should_stop()); 948 } while (!kthread_should_stop() && !fullstop);
921 VERBOSE_PRINTK_STRING("rcu_torture_stutter task stopping"); 949 VERBOSE_PRINTK_STRING("rcu_torture_stutter task stopping");
922 return 0; 950 return 0;
923} 951}
@@ -934,12 +962,27 @@ rcu_torture_print_module_parms(char *tag)
934 stutter, irqreader); 962 stutter, irqreader);
935} 963}
936 964
965static struct notifier_block rcutorture_nb = {
966 .notifier_call = rcutorture_shutdown_notify,
967};
968
937static void 969static void
938rcu_torture_cleanup(void) 970rcu_torture_cleanup(void)
939{ 971{
940 int i; 972 int i;
941 973
942 fullstop = 1; 974 mutex_lock(&fullstop_mutex);
975 if (!fullstop) {
976 /* If being signaled, let it happen, then exit. */
977 mutex_unlock(&fullstop_mutex);
978 schedule_timeout_interruptible(10 * HZ);
979 if (cur_ops->cb_barrier != NULL)
980 cur_ops->cb_barrier();
981 return;
982 }
983 fullstop = FULLSTOP_CLEANUP;
984 mutex_unlock(&fullstop_mutex);
985 unregister_reboot_notifier(&rcutorture_nb);
943 if (stutter_task) { 986 if (stutter_task) {
944 VERBOSE_PRINTK_STRING("Stopping rcu_torture_stutter task"); 987 VERBOSE_PRINTK_STRING("Stopping rcu_torture_stutter task");
945 kthread_stop(stutter_task); 988 kthread_stop(stutter_task);
@@ -1015,6 +1058,8 @@ rcu_torture_init(void)
1015 { &rcu_ops, &rcu_sync_ops, &rcu_bh_ops, &rcu_bh_sync_ops, 1058 { &rcu_ops, &rcu_sync_ops, &rcu_bh_ops, &rcu_bh_sync_ops,
1016 &srcu_ops, &sched_ops, &sched_ops_sync, }; 1059 &srcu_ops, &sched_ops, &sched_ops_sync, };
1017 1060
1061 mutex_lock(&fullstop_mutex);
1062
1018 /* Process args and tell the world that the torturer is on the job. */ 1063 /* Process args and tell the world that the torturer is on the job. */
1019 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) { 1064 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
1020 cur_ops = torture_ops[i]; 1065 cur_ops = torture_ops[i];
@@ -1024,6 +1069,7 @@ rcu_torture_init(void)
1024 if (i == ARRAY_SIZE(torture_ops)) { 1069 if (i == ARRAY_SIZE(torture_ops)) {
1025 printk(KERN_ALERT "rcutorture: invalid torture type: \"%s\"\n", 1070 printk(KERN_ALERT "rcutorture: invalid torture type: \"%s\"\n",
1026 torture_type); 1071 torture_type);
1072 mutex_unlock(&fullstop_mutex);
1027 return (-EINVAL); 1073 return (-EINVAL);
1028 } 1074 }
1029 if (cur_ops->init) 1075 if (cur_ops->init)
@@ -1146,9 +1192,12 @@ rcu_torture_init(void)
1146 goto unwind; 1192 goto unwind;
1147 } 1193 }
1148 } 1194 }
1195 register_reboot_notifier(&rcutorture_nb);
1196 mutex_unlock(&fullstop_mutex);
1149 return 0; 1197 return 0;
1150 1198
1151unwind: 1199unwind:
1200 mutex_unlock(&fullstop_mutex);
1152 rcu_torture_cleanup(); 1201 rcu_torture_cleanup();
1153 return firsterr; 1202 return firsterr;
1154} 1203}
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
new file mode 100644
index 000000000000..a342b032112c
--- /dev/null
+++ b/kernel/rcutree.c
@@ -0,0 +1,1535 @@
1/*
2 * Read-Copy Update mechanism for mutual exclusion
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright IBM Corporation, 2008
19 *
20 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
21 * Manfred Spraul <manfred@colorfullife.com>
22 * Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical version
23 *
24 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
25 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
26 *
27 * For detailed explanation of Read-Copy Update mechanism see -
28 * Documentation/RCU
29 */
30#include <linux/types.h>
31#include <linux/kernel.h>
32#include <linux/init.h>
33#include <linux/spinlock.h>
34#include <linux/smp.h>
35#include <linux/rcupdate.h>
36#include <linux/interrupt.h>
37#include <linux/sched.h>
38#include <asm/atomic.h>
39#include <linux/bitops.h>
40#include <linux/module.h>
41#include <linux/completion.h>
42#include <linux/moduleparam.h>
43#include <linux/percpu.h>
44#include <linux/notifier.h>
45#include <linux/cpu.h>
46#include <linux/mutex.h>
47#include <linux/time.h>
48
49#ifdef CONFIG_DEBUG_LOCK_ALLOC
50static struct lock_class_key rcu_lock_key;
51struct lockdep_map rcu_lock_map =
52 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
53EXPORT_SYMBOL_GPL(rcu_lock_map);
54#endif
55
56/* Data structures. */
57
58#define RCU_STATE_INITIALIZER(name) { \
59 .level = { &name.node[0] }, \
60 .levelcnt = { \
61 NUM_RCU_LVL_0, /* root of hierarchy. */ \
62 NUM_RCU_LVL_1, \
63 NUM_RCU_LVL_2, \
64 NUM_RCU_LVL_3, /* == MAX_RCU_LVLS */ \
65 }, \
66 .signaled = RCU_SIGNAL_INIT, \
67 .gpnum = -300, \
68 .completed = -300, \
69 .onofflock = __SPIN_LOCK_UNLOCKED(&name.onofflock), \
70 .fqslock = __SPIN_LOCK_UNLOCKED(&name.fqslock), \
71 .n_force_qs = 0, \
72 .n_force_qs_ngp = 0, \
73}
74
75struct rcu_state rcu_state = RCU_STATE_INITIALIZER(rcu_state);
76DEFINE_PER_CPU(struct rcu_data, rcu_data);
77
78struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state);
79DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
80
81#ifdef CONFIG_NO_HZ
82DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks);
83#endif /* #ifdef CONFIG_NO_HZ */
84
85static int blimit = 10; /* Maximum callbacks per softirq. */
86static int qhimark = 10000; /* If this many pending, ignore blimit. */
87static int qlowmark = 100; /* Once only this many pending, use blimit. */
88
89static void force_quiescent_state(struct rcu_state *rsp, int relaxed);
90
91/*
92 * Return the number of RCU batches processed thus far for debug & stats.
93 */
94long rcu_batches_completed(void)
95{
96 return rcu_state.completed;
97}
98EXPORT_SYMBOL_GPL(rcu_batches_completed);
99
100/*
101 * Return the number of RCU BH batches processed thus far for debug & stats.
102 */
103long rcu_batches_completed_bh(void)
104{
105 return rcu_bh_state.completed;
106}
107EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
108
109/*
110 * Does the CPU have callbacks ready to be invoked?
111 */
112static int
113cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp)
114{
115 return &rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL];
116}
117
118/*
119 * Does the current CPU require a yet-as-unscheduled grace period?
120 */
121static int
122cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
123{
124 /* ACCESS_ONCE() because we are accessing outside of lock. */
125 return *rdp->nxttail[RCU_DONE_TAIL] &&
126 ACCESS_ONCE(rsp->completed) == ACCESS_ONCE(rsp->gpnum);
127}
128
129/*
130 * Return the root node of the specified rcu_state structure.
131 */
132static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
133{
134 return &rsp->node[0];
135}
136
137#ifdef CONFIG_SMP
138
139/*
140 * If the specified CPU is offline, tell the caller that it is in
141 * a quiescent state. Otherwise, whack it with a reschedule IPI.
142 * Grace periods can end up waiting on an offline CPU when that
143 * CPU is in the process of coming online -- it will be added to the
144 * rcu_node bitmasks before it actually makes it online. The same thing
145 * can happen while a CPU is in the process of coming online. Because this
146 * race is quite rare, we check for it after detecting that the grace
147 * period has been delayed rather than checking each and every CPU
148 * each and every time we start a new grace period.
149 */
150static int rcu_implicit_offline_qs(struct rcu_data *rdp)
151{
152 /*
153 * If the CPU is offline, it is in a quiescent state. We can
154 * trust its state not to change because interrupts are disabled.
155 */
156 if (cpu_is_offline(rdp->cpu)) {
157 rdp->offline_fqs++;
158 return 1;
159 }
160
161 /* The CPU is online, so send it a reschedule IPI. */
162 if (rdp->cpu != smp_processor_id())
163 smp_send_reschedule(rdp->cpu);
164 else
165 set_need_resched();
166 rdp->resched_ipi++;
167 return 0;
168}
169
170#endif /* #ifdef CONFIG_SMP */
171
172#ifdef CONFIG_NO_HZ
173static DEFINE_RATELIMIT_STATE(rcu_rs, 10 * HZ, 5);
174
175/**
176 * rcu_enter_nohz - inform RCU that current CPU is entering nohz
177 *
178 * Enter nohz mode, in other words, -leave- the mode in which RCU
179 * read-side critical sections can occur. (Though RCU read-side
180 * critical sections can occur in irq handlers in nohz mode, a possibility
181 * handled by rcu_irq_enter() and rcu_irq_exit()).
182 */
183void rcu_enter_nohz(void)
184{
185 unsigned long flags;
186 struct rcu_dynticks *rdtp;
187
188 smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
189 local_irq_save(flags);
190 rdtp = &__get_cpu_var(rcu_dynticks);
191 rdtp->dynticks++;
192 rdtp->dynticks_nesting--;
193 WARN_ON_RATELIMIT(rdtp->dynticks & 0x1, &rcu_rs);
194 local_irq_restore(flags);
195}
196
197/*
198 * rcu_exit_nohz - inform RCU that current CPU is leaving nohz
199 *
200 * Exit nohz mode, in other words, -enter- the mode in which RCU
201 * read-side critical sections normally occur.
202 */
203void rcu_exit_nohz(void)
204{
205 unsigned long flags;
206 struct rcu_dynticks *rdtp;
207
208 local_irq_save(flags);
209 rdtp = &__get_cpu_var(rcu_dynticks);
210 rdtp->dynticks++;
211 rdtp->dynticks_nesting++;
212 WARN_ON_RATELIMIT(!(rdtp->dynticks & 0x1), &rcu_rs);
213 local_irq_restore(flags);
214 smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
215}
216
217/**
218 * rcu_nmi_enter - inform RCU of entry to NMI context
219 *
220 * If the CPU was idle with dynamic ticks active, and there is no
221 * irq handler running, this updates rdtp->dynticks_nmi to let the
222 * RCU grace-period handling know that the CPU is active.
223 */
224void rcu_nmi_enter(void)
225{
226 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
227
228 if (rdtp->dynticks & 0x1)
229 return;
230 rdtp->dynticks_nmi++;
231 WARN_ON_RATELIMIT(!(rdtp->dynticks_nmi & 0x1), &rcu_rs);
232 smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
233}
234
235/**
236 * rcu_nmi_exit - inform RCU of exit from NMI context
237 *
238 * If the CPU was idle with dynamic ticks active, and there is no
239 * irq handler running, this updates rdtp->dynticks_nmi to let the
240 * RCU grace-period handling know that the CPU is no longer active.
241 */
242void rcu_nmi_exit(void)
243{
244 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
245
246 if (rdtp->dynticks & 0x1)
247 return;
248 smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
249 rdtp->dynticks_nmi++;
250 WARN_ON_RATELIMIT(rdtp->dynticks_nmi & 0x1, &rcu_rs);
251}
252
253/**
254 * rcu_irq_enter - inform RCU of entry to hard irq context
255 *
256 * If the CPU was idle with dynamic ticks active, this updates the
257 * rdtp->dynticks to let the RCU handling know that the CPU is active.
258 */
259void rcu_irq_enter(void)
260{
261 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
262
263 if (rdtp->dynticks_nesting++)
264 return;
265 rdtp->dynticks++;
266 WARN_ON_RATELIMIT(!(rdtp->dynticks & 0x1), &rcu_rs);
267 smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
268}
269
270/**
271 * rcu_irq_exit - inform RCU of exit from hard irq context
272 *
273 * If the CPU was idle with dynamic ticks active, update the rdp->dynticks
274 * to put let the RCU handling be aware that the CPU is going back to idle
275 * with no ticks.
276 */
277void rcu_irq_exit(void)
278{
279 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
280
281 if (--rdtp->dynticks_nesting)
282 return;
283 smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
284 rdtp->dynticks++;
285 WARN_ON_RATELIMIT(rdtp->dynticks & 0x1, &rcu_rs);
286
287 /* If the interrupt queued a callback, get out of dyntick mode. */
288 if (__get_cpu_var(rcu_data).nxtlist ||
289 __get_cpu_var(rcu_bh_data).nxtlist)
290 set_need_resched();
291}
292
293/*
294 * Record the specified "completed" value, which is later used to validate
295 * dynticks counter manipulations. Specify "rsp->completed - 1" to
296 * unconditionally invalidate any future dynticks manipulations (which is
297 * useful at the beginning of a grace period).
298 */
299static void dyntick_record_completed(struct rcu_state *rsp, long comp)
300{
301 rsp->dynticks_completed = comp;
302}
303
304#ifdef CONFIG_SMP
305
306/*
307 * Recall the previously recorded value of the completion for dynticks.
308 */
309static long dyntick_recall_completed(struct rcu_state *rsp)
310{
311 return rsp->dynticks_completed;
312}
313
314/*
315 * Snapshot the specified CPU's dynticks counter so that we can later
316 * credit them with an implicit quiescent state. Return 1 if this CPU
317 * is already in a quiescent state courtesy of dynticks idle mode.
318 */
319static int dyntick_save_progress_counter(struct rcu_data *rdp)
320{
321 int ret;
322 int snap;
323 int snap_nmi;
324
325 snap = rdp->dynticks->dynticks;
326 snap_nmi = rdp->dynticks->dynticks_nmi;
327 smp_mb(); /* Order sampling of snap with end of grace period. */
328 rdp->dynticks_snap = snap;
329 rdp->dynticks_nmi_snap = snap_nmi;
330 ret = ((snap & 0x1) == 0) && ((snap_nmi & 0x1) == 0);
331 if (ret)
332 rdp->dynticks_fqs++;
333 return ret;
334}
335
336/*
337 * Return true if the specified CPU has passed through a quiescent
338 * state by virtue of being in or having passed through an dynticks
339 * idle state since the last call to dyntick_save_progress_counter()
340 * for this same CPU.
341 */
342static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
343{
344 long curr;
345 long curr_nmi;
346 long snap;
347 long snap_nmi;
348
349 curr = rdp->dynticks->dynticks;
350 snap = rdp->dynticks_snap;
351 curr_nmi = rdp->dynticks->dynticks_nmi;
352 snap_nmi = rdp->dynticks_nmi_snap;
353 smp_mb(); /* force ordering with cpu entering/leaving dynticks. */
354
355 /*
356 * If the CPU passed through or entered a dynticks idle phase with
357 * no active irq/NMI handlers, then we can safely pretend that the CPU
358 * already acknowledged the request to pass through a quiescent
359 * state. Either way, that CPU cannot possibly be in an RCU
360 * read-side critical section that started before the beginning
361 * of the current RCU grace period.
362 */
363 if ((curr != snap || (curr & 0x1) == 0) &&
364 (curr_nmi != snap_nmi || (curr_nmi & 0x1) == 0)) {
365 rdp->dynticks_fqs++;
366 return 1;
367 }
368
369 /* Go check for the CPU being offline. */
370 return rcu_implicit_offline_qs(rdp);
371}
372
373#endif /* #ifdef CONFIG_SMP */
374
375#else /* #ifdef CONFIG_NO_HZ */
376
377static void dyntick_record_completed(struct rcu_state *rsp, long comp)
378{
379}
380
381#ifdef CONFIG_SMP
382
383/*
384 * If there are no dynticks, then the only way that a CPU can passively
385 * be in a quiescent state is to be offline. Unlike dynticks idle, which
386 * is a point in time during the prior (already finished) grace period,
387 * an offline CPU is always in a quiescent state, and thus can be
388 * unconditionally applied. So just return the current value of completed.
389 */
390static long dyntick_recall_completed(struct rcu_state *rsp)
391{
392 return rsp->completed;
393}
394
395static int dyntick_save_progress_counter(struct rcu_data *rdp)
396{
397 return 0;
398}
399
400static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
401{
402 return rcu_implicit_offline_qs(rdp);
403}
404
405#endif /* #ifdef CONFIG_SMP */
406
407#endif /* #else #ifdef CONFIG_NO_HZ */
408
409#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
410
411static void record_gp_stall_check_time(struct rcu_state *rsp)
412{
413 rsp->gp_start = jiffies;
414 rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_CHECK;
415}
416
417static void print_other_cpu_stall(struct rcu_state *rsp)
418{
419 int cpu;
420 long delta;
421 unsigned long flags;
422 struct rcu_node *rnp = rcu_get_root(rsp);
423 struct rcu_node *rnp_cur = rsp->level[NUM_RCU_LVLS - 1];
424 struct rcu_node *rnp_end = &rsp->node[NUM_RCU_NODES];
425
426 /* Only let one CPU complain about others per time interval. */
427
428 spin_lock_irqsave(&rnp->lock, flags);
429 delta = jiffies - rsp->jiffies_stall;
430 if (delta < RCU_STALL_RAT_DELAY || rsp->gpnum == rsp->completed) {
431 spin_unlock_irqrestore(&rnp->lock, flags);
432 return;
433 }
434 rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
435 spin_unlock_irqrestore(&rnp->lock, flags);
436
437 /* OK, time to rat on our buddy... */
438
439 printk(KERN_ERR "INFO: RCU detected CPU stalls:");
440 for (; rnp_cur < rnp_end; rnp_cur++) {
441 if (rnp_cur->qsmask == 0)
442 continue;
443 for (cpu = 0; cpu <= rnp_cur->grphi - rnp_cur->grplo; cpu++)
444 if (rnp_cur->qsmask & (1UL << cpu))
445 printk(" %d", rnp_cur->grplo + cpu);
446 }
447 printk(" (detected by %d, t=%ld jiffies)\n",
448 smp_processor_id(), (long)(jiffies - rsp->gp_start));
449 force_quiescent_state(rsp, 0); /* Kick them all. */
450}
451
452static void print_cpu_stall(struct rcu_state *rsp)
453{
454 unsigned long flags;
455 struct rcu_node *rnp = rcu_get_root(rsp);
456
457 printk(KERN_ERR "INFO: RCU detected CPU %d stall (t=%lu jiffies)\n",
458 smp_processor_id(), jiffies - rsp->gp_start);
459 dump_stack();
460 spin_lock_irqsave(&rnp->lock, flags);
461 if ((long)(jiffies - rsp->jiffies_stall) >= 0)
462 rsp->jiffies_stall =
463 jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
464 spin_unlock_irqrestore(&rnp->lock, flags);
465 set_need_resched(); /* kick ourselves to get things going. */
466}
467
468static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
469{
470 long delta;
471 struct rcu_node *rnp;
472
473 delta = jiffies - rsp->jiffies_stall;
474 rnp = rdp->mynode;
475 if ((rnp->qsmask & rdp->grpmask) && delta >= 0) {
476
477 /* We haven't checked in, so go dump stack. */
478 print_cpu_stall(rsp);
479
480 } else if (rsp->gpnum != rsp->completed &&
481 delta >= RCU_STALL_RAT_DELAY) {
482
483 /* They had two time units to dump stack, so complain. */
484 print_other_cpu_stall(rsp);
485 }
486}
487
488#else /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
489
490static void record_gp_stall_check_time(struct rcu_state *rsp)
491{
492}
493
494static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
495{
496}
497
498#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
499
500/*
501 * Update CPU-local rcu_data state to record the newly noticed grace period.
502 * This is used both when we started the grace period and when we notice
503 * that someone else started the grace period.
504 */
505static void note_new_gpnum(struct rcu_state *rsp, struct rcu_data *rdp)
506{
507 rdp->qs_pending = 1;
508 rdp->passed_quiesc = 0;
509 rdp->gpnum = rsp->gpnum;
510 rdp->n_rcu_pending_force_qs = rdp->n_rcu_pending +
511 RCU_JIFFIES_TILL_FORCE_QS;
512}
513
514/*
515 * Did someone else start a new RCU grace period start since we last
516 * checked? Update local state appropriately if so. Must be called
517 * on the CPU corresponding to rdp.
518 */
519static int
520check_for_new_grace_period(struct rcu_state *rsp, struct rcu_data *rdp)
521{
522 unsigned long flags;
523 int ret = 0;
524
525 local_irq_save(flags);
526 if (rdp->gpnum != rsp->gpnum) {
527 note_new_gpnum(rsp, rdp);
528 ret = 1;
529 }
530 local_irq_restore(flags);
531 return ret;
532}
533
534/*
535 * Start a new RCU grace period if warranted, re-initializing the hierarchy
536 * in preparation for detecting the next grace period. The caller must hold
537 * the root node's ->lock, which is released before return. Hard irqs must
538 * be disabled.
539 */
540static void
541rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
542 __releases(rcu_get_root(rsp)->lock)
543{
544 struct rcu_data *rdp = rsp->rda[smp_processor_id()];
545 struct rcu_node *rnp = rcu_get_root(rsp);
546 struct rcu_node *rnp_cur;
547 struct rcu_node *rnp_end;
548
549 if (!cpu_needs_another_gp(rsp, rdp)) {
550 spin_unlock_irqrestore(&rnp->lock, flags);
551 return;
552 }
553
554 /* Advance to a new grace period and initialize state. */
555 rsp->gpnum++;
556 rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */
557 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
558 rdp->n_rcu_pending_force_qs = rdp->n_rcu_pending +
559 RCU_JIFFIES_TILL_FORCE_QS;
560 record_gp_stall_check_time(rsp);
561 dyntick_record_completed(rsp, rsp->completed - 1);
562 note_new_gpnum(rsp, rdp);
563
564 /*
565 * Because we are first, we know that all our callbacks will
566 * be covered by this upcoming grace period, even the ones
567 * that were registered arbitrarily recently.
568 */
569 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
570 rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
571
572 /* Special-case the common single-level case. */
573 if (NUM_RCU_NODES == 1) {
574 rnp->qsmask = rnp->qsmaskinit;
575 spin_unlock_irqrestore(&rnp->lock, flags);
576 return;
577 }
578
579 spin_unlock(&rnp->lock); /* leave irqs disabled. */
580
581
582 /* Exclude any concurrent CPU-hotplug operations. */
583 spin_lock(&rsp->onofflock); /* irqs already disabled. */
584
585 /*
586 * Set the quiescent-state-needed bits in all the non-leaf RCU
587 * nodes for all currently online CPUs. This operation relies
588 * on the layout of the hierarchy within the rsp->node[] array.
589 * Note that other CPUs will access only the leaves of the
590 * hierarchy, which still indicate that no grace period is in
591 * progress. In addition, we have excluded CPU-hotplug operations.
592 *
593 * We therefore do not need to hold any locks. Any required
594 * memory barriers will be supplied by the locks guarding the
595 * leaf rcu_nodes in the hierarchy.
596 */
597
598 rnp_end = rsp->level[NUM_RCU_LVLS - 1];
599 for (rnp_cur = &rsp->node[0]; rnp_cur < rnp_end; rnp_cur++)
600 rnp_cur->qsmask = rnp_cur->qsmaskinit;
601
602 /*
603 * Now set up the leaf nodes. Here we must be careful. First,
604 * we need to hold the lock in order to exclude other CPUs, which
605 * might be contending for the leaf nodes' locks. Second, as
606 * soon as we initialize a given leaf node, its CPUs might run
607 * up the rest of the hierarchy. We must therefore acquire locks
608 * for each node that we touch during this stage. (But we still
609 * are excluding CPU-hotplug operations.)
610 *
611 * Note that the grace period cannot complete until we finish
612 * the initialization process, as there will be at least one
613 * qsmask bit set in the root node until that time, namely the
614 * one corresponding to this CPU.
615 */
616 rnp_end = &rsp->node[NUM_RCU_NODES];
617 rnp_cur = rsp->level[NUM_RCU_LVLS - 1];
618 for (; rnp_cur < rnp_end; rnp_cur++) {
619 spin_lock(&rnp_cur->lock); /* irqs already disabled. */
620 rnp_cur->qsmask = rnp_cur->qsmaskinit;
621 spin_unlock(&rnp_cur->lock); /* irqs already disabled. */
622 }
623
624 rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */
625 spin_unlock_irqrestore(&rsp->onofflock, flags);
626}
627
628/*
629 * Advance this CPU's callbacks, but only if the current grace period
630 * has ended. This may be called only from the CPU to whom the rdp
631 * belongs.
632 */
633static void
634rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp)
635{
636 long completed_snap;
637 unsigned long flags;
638
639 local_irq_save(flags);
640 completed_snap = ACCESS_ONCE(rsp->completed); /* outside of lock. */
641
642 /* Did another grace period end? */
643 if (rdp->completed != completed_snap) {
644
645 /* Advance callbacks. No harm if list empty. */
646 rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[RCU_WAIT_TAIL];
647 rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_READY_TAIL];
648 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
649
650 /* Remember that we saw this grace-period completion. */
651 rdp->completed = completed_snap;
652 }
653 local_irq_restore(flags);
654}
655
656/*
657 * Similar to cpu_quiet(), for which it is a helper function. Allows
658 * a group of CPUs to be quieted at one go, though all the CPUs in the
659 * group must be represented by the same leaf rcu_node structure.
660 * That structure's lock must be held upon entry, and it is released
661 * before return.
662 */
663static void
664cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp,
665 unsigned long flags)
666 __releases(rnp->lock)
667{
668 /* Walk up the rcu_node hierarchy. */
669 for (;;) {
670 if (!(rnp->qsmask & mask)) {
671
672 /* Our bit has already been cleared, so done. */
673 spin_unlock_irqrestore(&rnp->lock, flags);
674 return;
675 }
676 rnp->qsmask &= ~mask;
677 if (rnp->qsmask != 0) {
678
679 /* Other bits still set at this level, so done. */
680 spin_unlock_irqrestore(&rnp->lock, flags);
681 return;
682 }
683 mask = rnp->grpmask;
684 if (rnp->parent == NULL) {
685
686 /* No more levels. Exit loop holding root lock. */
687
688 break;
689 }
690 spin_unlock_irqrestore(&rnp->lock, flags);
691 rnp = rnp->parent;
692 spin_lock_irqsave(&rnp->lock, flags);
693 }
694
695 /*
696 * Get here if we are the last CPU to pass through a quiescent
697 * state for this grace period. Clean up and let rcu_start_gp()
698 * start up the next grace period if one is needed. Note that
699 * we still hold rnp->lock, as required by rcu_start_gp(), which
700 * will release it.
701 */
702 rsp->completed = rsp->gpnum;
703 rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]);
704 rcu_start_gp(rsp, flags); /* releases rnp->lock. */
705}
706
707/*
708 * Record a quiescent state for the specified CPU, which must either be
709 * the current CPU or an offline CPU. The lastcomp argument is used to
710 * make sure we are still in the grace period of interest. We don't want
711 * to end the current grace period based on quiescent states detected in
712 * an earlier grace period!
713 */
714static void
715cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp)
716{
717 unsigned long flags;
718 unsigned long mask;
719 struct rcu_node *rnp;
720
721 rnp = rdp->mynode;
722 spin_lock_irqsave(&rnp->lock, flags);
723 if (lastcomp != ACCESS_ONCE(rsp->completed)) {
724
725 /*
726 * Someone beat us to it for this grace period, so leave.
727 * The race with GP start is resolved by the fact that we
728 * hold the leaf rcu_node lock, so that the per-CPU bits
729 * cannot yet be initialized -- so we would simply find our
730 * CPU's bit already cleared in cpu_quiet_msk() if this race
731 * occurred.
732 */
733 rdp->passed_quiesc = 0; /* try again later! */
734 spin_unlock_irqrestore(&rnp->lock, flags);
735 return;
736 }
737 mask = rdp->grpmask;
738 if ((rnp->qsmask & mask) == 0) {
739 spin_unlock_irqrestore(&rnp->lock, flags);
740 } else {
741 rdp->qs_pending = 0;
742
743 /*
744 * This GP can't end until cpu checks in, so all of our
745 * callbacks can be processed during the next GP.
746 */
747 rdp = rsp->rda[smp_processor_id()];
748 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
749
750 cpu_quiet_msk(mask, rsp, rnp, flags); /* releases rnp->lock */
751 }
752}
753
754/*
755 * Check to see if there is a new grace period of which this CPU
756 * is not yet aware, and if so, set up local rcu_data state for it.
757 * Otherwise, see if this CPU has just passed through its first
758 * quiescent state for this grace period, and record that fact if so.
759 */
760static void
761rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
762{
763 /* If there is now a new grace period, record and return. */
764 if (check_for_new_grace_period(rsp, rdp))
765 return;
766
767 /*
768 * Does this CPU still need to do its part for current grace period?
769 * If no, return and let the other CPUs do their part as well.
770 */
771 if (!rdp->qs_pending)
772 return;
773
774 /*
775 * Was there a quiescent state since the beginning of the grace
776 * period? If no, then exit and wait for the next call.
777 */
778 if (!rdp->passed_quiesc)
779 return;
780
781 /* Tell RCU we are done (but cpu_quiet() will be the judge of that). */
782 cpu_quiet(rdp->cpu, rsp, rdp, rdp->passed_quiesc_completed);
783}
784
785#ifdef CONFIG_HOTPLUG_CPU
786
787/*
788 * Remove the outgoing CPU from the bitmasks in the rcu_node hierarchy
789 * and move all callbacks from the outgoing CPU to the current one.
790 */
791static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
792{
793 int i;
794 unsigned long flags;
795 long lastcomp;
796 unsigned long mask;
797 struct rcu_data *rdp = rsp->rda[cpu];
798 struct rcu_data *rdp_me;
799 struct rcu_node *rnp;
800
801 /* Exclude any attempts to start a new grace period. */
802 spin_lock_irqsave(&rsp->onofflock, flags);
803
804 /* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */
805 rnp = rdp->mynode;
806 mask = rdp->grpmask; /* rnp->grplo is constant. */
807 do {
808 spin_lock(&rnp->lock); /* irqs already disabled. */
809 rnp->qsmaskinit &= ~mask;
810 if (rnp->qsmaskinit != 0) {
811 spin_unlock(&rnp->lock); /* irqs already disabled. */
812 break;
813 }
814 mask = rnp->grpmask;
815 spin_unlock(&rnp->lock); /* irqs already disabled. */
816 rnp = rnp->parent;
817 } while (rnp != NULL);
818 lastcomp = rsp->completed;
819
820 spin_unlock(&rsp->onofflock); /* irqs remain disabled. */
821
822 /* Being offline is a quiescent state, so go record it. */
823 cpu_quiet(cpu, rsp, rdp, lastcomp);
824
825 /*
826 * Move callbacks from the outgoing CPU to the running CPU.
827 * Note that the outgoing CPU is now quiscent, so it is now
828 * (uncharacteristically) safe to access it rcu_data structure.
829 * Note also that we must carefully retain the order of the
830 * outgoing CPU's callbacks in order for rcu_barrier() to work
831 * correctly. Finally, note that we start all the callbacks
832 * afresh, even those that have passed through a grace period
833 * and are therefore ready to invoke. The theory is that hotplug
834 * events are rare, and that if they are frequent enough to
835 * indefinitely delay callbacks, you have far worse things to
836 * be worrying about.
837 */
838 rdp_me = rsp->rda[smp_processor_id()];
839 if (rdp->nxtlist != NULL) {
840 *rdp_me->nxttail[RCU_NEXT_TAIL] = rdp->nxtlist;
841 rdp_me->nxttail[RCU_NEXT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
842 rdp->nxtlist = NULL;
843 for (i = 0; i < RCU_NEXT_SIZE; i++)
844 rdp->nxttail[i] = &rdp->nxtlist;
845 rdp_me->qlen += rdp->qlen;
846 rdp->qlen = 0;
847 }
848 local_irq_restore(flags);
849}
850
851/*
852 * Remove the specified CPU from the RCU hierarchy and move any pending
853 * callbacks that it might have to the current CPU. This code assumes
854 * that at least one CPU in the system will remain running at all times.
855 * Any attempt to offline -all- CPUs is likely to strand RCU callbacks.
856 */
857static void rcu_offline_cpu(int cpu)
858{
859 __rcu_offline_cpu(cpu, &rcu_state);
860 __rcu_offline_cpu(cpu, &rcu_bh_state);
861}
862
863#else /* #ifdef CONFIG_HOTPLUG_CPU */
864
865static void rcu_offline_cpu(int cpu)
866{
867}
868
869#endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
870
871/*
872 * Invoke any RCU callbacks that have made it to the end of their grace
873 * period. Thottle as specified by rdp->blimit.
874 */
875static void rcu_do_batch(struct rcu_data *rdp)
876{
877 unsigned long flags;
878 struct rcu_head *next, *list, **tail;
879 int count;
880
881 /* If no callbacks are ready, just return.*/
882 if (!cpu_has_callbacks_ready_to_invoke(rdp))
883 return;
884
885 /*
886 * Extract the list of ready callbacks, disabling to prevent
887 * races with call_rcu() from interrupt handlers.
888 */
889 local_irq_save(flags);
890 list = rdp->nxtlist;
891 rdp->nxtlist = *rdp->nxttail[RCU_DONE_TAIL];
892 *rdp->nxttail[RCU_DONE_TAIL] = NULL;
893 tail = rdp->nxttail[RCU_DONE_TAIL];
894 for (count = RCU_NEXT_SIZE - 1; count >= 0; count--)
895 if (rdp->nxttail[count] == rdp->nxttail[RCU_DONE_TAIL])
896 rdp->nxttail[count] = &rdp->nxtlist;
897 local_irq_restore(flags);
898
899 /* Invoke callbacks. */
900 count = 0;
901 while (list) {
902 next = list->next;
903 prefetch(next);
904 list->func(list);
905 list = next;
906 if (++count >= rdp->blimit)
907 break;
908 }
909
910 local_irq_save(flags);
911
912 /* Update count, and requeue any remaining callbacks. */
913 rdp->qlen -= count;
914 if (list != NULL) {
915 *tail = rdp->nxtlist;
916 rdp->nxtlist = list;
917 for (count = 0; count < RCU_NEXT_SIZE; count++)
918 if (&rdp->nxtlist == rdp->nxttail[count])
919 rdp->nxttail[count] = tail;
920 else
921 break;
922 }
923
924 /* Reinstate batch limit if we have worked down the excess. */
925 if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark)
926 rdp->blimit = blimit;
927
928 local_irq_restore(flags);
929
930 /* Re-raise the RCU softirq if there are callbacks remaining. */
931 if (cpu_has_callbacks_ready_to_invoke(rdp))
932 raise_softirq(RCU_SOFTIRQ);
933}
934
935/*
936 * Check to see if this CPU is in a non-context-switch quiescent state
937 * (user mode or idle loop for rcu, non-softirq execution for rcu_bh).
938 * Also schedule the RCU softirq handler.
939 *
940 * This function must be called with hardirqs disabled. It is normally
941 * invoked from the scheduling-clock interrupt. If rcu_pending returns
942 * false, there is no point in invoking rcu_check_callbacks().
943 */
944void rcu_check_callbacks(int cpu, int user)
945{
946 if (user ||
947 (idle_cpu(cpu) && !in_softirq() &&
948 hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
949
950 /*
951 * Get here if this CPU took its interrupt from user
952 * mode or from the idle loop, and if this is not a
953 * nested interrupt. In this case, the CPU is in
954 * a quiescent state, so count it.
955 *
956 * No memory barrier is required here because both
957 * rcu_qsctr_inc() and rcu_bh_qsctr_inc() reference
958 * only CPU-local variables that other CPUs neither
959 * access nor modify, at least not while the corresponding
960 * CPU is online.
961 */
962
963 rcu_qsctr_inc(cpu);
964 rcu_bh_qsctr_inc(cpu);
965
966 } else if (!in_softirq()) {
967
968 /*
969 * Get here if this CPU did not take its interrupt from
970 * softirq, in other words, if it is not interrupting
971 * a rcu_bh read-side critical section. This is an _bh
972 * critical section, so count it.
973 */
974
975 rcu_bh_qsctr_inc(cpu);
976 }
977 raise_softirq(RCU_SOFTIRQ);
978}
979
980#ifdef CONFIG_SMP
981
982/*
983 * Scan the leaf rcu_node structures, processing dyntick state for any that
984 * have not yet encountered a quiescent state, using the function specified.
985 * Returns 1 if the current grace period ends while scanning (possibly
986 * because we made it end).
987 */
988static int rcu_process_dyntick(struct rcu_state *rsp, long lastcomp,
989 int (*f)(struct rcu_data *))
990{
991 unsigned long bit;
992 int cpu;
993 unsigned long flags;
994 unsigned long mask;
995 struct rcu_node *rnp_cur = rsp->level[NUM_RCU_LVLS - 1];
996 struct rcu_node *rnp_end = &rsp->node[NUM_RCU_NODES];
997
998 for (; rnp_cur < rnp_end; rnp_cur++) {
999 mask = 0;
1000 spin_lock_irqsave(&rnp_cur->lock, flags);
1001 if (rsp->completed != lastcomp) {
1002 spin_unlock_irqrestore(&rnp_cur->lock, flags);
1003 return 1;
1004 }
1005 if (rnp_cur->qsmask == 0) {
1006 spin_unlock_irqrestore(&rnp_cur->lock, flags);
1007 continue;
1008 }
1009 cpu = rnp_cur->grplo;
1010 bit = 1;
1011 for (; cpu <= rnp_cur->grphi; cpu++, bit <<= 1) {
1012 if ((rnp_cur->qsmask & bit) != 0 && f(rsp->rda[cpu]))
1013 mask |= bit;
1014 }
1015 if (mask != 0 && rsp->completed == lastcomp) {
1016
1017 /* cpu_quiet_msk() releases rnp_cur->lock. */
1018 cpu_quiet_msk(mask, rsp, rnp_cur, flags);
1019 continue;
1020 }
1021 spin_unlock_irqrestore(&rnp_cur->lock, flags);
1022 }
1023 return 0;
1024}
1025
1026/*
1027 * Force quiescent states on reluctant CPUs, and also detect which
1028 * CPUs are in dyntick-idle mode.
1029 */
1030static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
1031{
1032 unsigned long flags;
1033 long lastcomp;
1034 struct rcu_data *rdp = rsp->rda[smp_processor_id()];
1035 struct rcu_node *rnp = rcu_get_root(rsp);
1036 u8 signaled;
1037
1038 if (ACCESS_ONCE(rsp->completed) == ACCESS_ONCE(rsp->gpnum))
1039 return; /* No grace period in progress, nothing to force. */
1040 if (!spin_trylock_irqsave(&rsp->fqslock, flags)) {
1041 rsp->n_force_qs_lh++; /* Inexact, can lose counts. Tough! */
1042 return; /* Someone else is already on the job. */
1043 }
1044 if (relaxed &&
1045 (long)(rsp->jiffies_force_qs - jiffies) >= 0 &&
1046 (rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending) >= 0)
1047 goto unlock_ret; /* no emergency and done recently. */
1048 rsp->n_force_qs++;
1049 spin_lock(&rnp->lock);
1050 lastcomp = rsp->completed;
1051 signaled = rsp->signaled;
1052 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
1053 rdp->n_rcu_pending_force_qs = rdp->n_rcu_pending +
1054 RCU_JIFFIES_TILL_FORCE_QS;
1055 if (lastcomp == rsp->gpnum) {
1056 rsp->n_force_qs_ngp++;
1057 spin_unlock(&rnp->lock);
1058 goto unlock_ret; /* no GP in progress, time updated. */
1059 }
1060 spin_unlock(&rnp->lock);
1061 switch (signaled) {
1062 case RCU_GP_INIT:
1063
1064 break; /* grace period still initializing, ignore. */
1065
1066 case RCU_SAVE_DYNTICK:
1067
1068 if (RCU_SIGNAL_INIT != RCU_SAVE_DYNTICK)
1069 break; /* So gcc recognizes the dead code. */
1070
1071 /* Record dyntick-idle state. */
1072 if (rcu_process_dyntick(rsp, lastcomp,
1073 dyntick_save_progress_counter))
1074 goto unlock_ret;
1075
1076 /* Update state, record completion counter. */
1077 spin_lock(&rnp->lock);
1078 if (lastcomp == rsp->completed) {
1079 rsp->signaled = RCU_FORCE_QS;
1080 dyntick_record_completed(rsp, lastcomp);
1081 }
1082 spin_unlock(&rnp->lock);
1083 break;
1084
1085 case RCU_FORCE_QS:
1086
1087 /* Check dyntick-idle state, send IPI to laggarts. */
1088 if (rcu_process_dyntick(rsp, dyntick_recall_completed(rsp),
1089 rcu_implicit_dynticks_qs))
1090 goto unlock_ret;
1091
1092 /* Leave state in case more forcing is required. */
1093
1094 break;
1095 }
1096unlock_ret:
1097 spin_unlock_irqrestore(&rsp->fqslock, flags);
1098}
1099
1100#else /* #ifdef CONFIG_SMP */
1101
1102static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
1103{
1104 set_need_resched();
1105}
1106
1107#endif /* #else #ifdef CONFIG_SMP */
1108
1109/*
1110 * This does the RCU processing work from softirq context for the
1111 * specified rcu_state and rcu_data structures. This may be called
1112 * only from the CPU to whom the rdp belongs.
1113 */
1114static void
1115__rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
1116{
1117 unsigned long flags;
1118
1119 /*
1120 * If an RCU GP has gone long enough, go check for dyntick
1121 * idle CPUs and, if needed, send resched IPIs.
1122 */
1123 if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0 ||
1124 (rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending) < 0)
1125 force_quiescent_state(rsp, 1);
1126
1127 /*
1128 * Advance callbacks in response to end of earlier grace
1129 * period that some other CPU ended.
1130 */
1131 rcu_process_gp_end(rsp, rdp);
1132
1133 /* Update RCU state based on any recent quiescent states. */
1134 rcu_check_quiescent_state(rsp, rdp);
1135
1136 /* Does this CPU require a not-yet-started grace period? */
1137 if (cpu_needs_another_gp(rsp, rdp)) {
1138 spin_lock_irqsave(&rcu_get_root(rsp)->lock, flags);
1139 rcu_start_gp(rsp, flags); /* releases above lock */
1140 }
1141
1142 /* If there are callbacks ready, invoke them. */
1143 rcu_do_batch(rdp);
1144}
1145
1146/*
1147 * Do softirq processing for the current CPU.
1148 */
1149static void rcu_process_callbacks(struct softirq_action *unused)
1150{
1151 /*
1152 * Memory references from any prior RCU read-side critical sections
1153 * executed by the interrupted code must be seen before any RCU
1154 * grace-period manipulations below.
1155 */
1156 smp_mb(); /* See above block comment. */
1157
1158 __rcu_process_callbacks(&rcu_state, &__get_cpu_var(rcu_data));
1159 __rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
1160
1161 /*
1162 * Memory references from any later RCU read-side critical sections
1163 * executed by the interrupted code must be seen after any RCU
1164 * grace-period manipulations above.
1165 */
1166 smp_mb(); /* See above block comment. */
1167}
1168
1169static void
1170__call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
1171 struct rcu_state *rsp)
1172{
1173 unsigned long flags;
1174 struct rcu_data *rdp;
1175
1176 head->func = func;
1177 head->next = NULL;
1178
1179 smp_mb(); /* Ensure RCU update seen before callback registry. */
1180
1181 /*
1182 * Opportunistically note grace-period endings and beginnings.
1183 * Note that we might see a beginning right after we see an
1184 * end, but never vice versa, since this CPU has to pass through
1185 * a quiescent state betweentimes.
1186 */
1187 local_irq_save(flags);
1188 rdp = rsp->rda[smp_processor_id()];
1189 rcu_process_gp_end(rsp, rdp);
1190 check_for_new_grace_period(rsp, rdp);
1191
1192 /* Add the callback to our list. */
1193 *rdp->nxttail[RCU_NEXT_TAIL] = head;
1194 rdp->nxttail[RCU_NEXT_TAIL] = &head->next;
1195
1196 /* Start a new grace period if one not already started. */
1197 if (ACCESS_ONCE(rsp->completed) == ACCESS_ONCE(rsp->gpnum)) {
1198 unsigned long nestflag;
1199 struct rcu_node *rnp_root = rcu_get_root(rsp);
1200
1201 spin_lock_irqsave(&rnp_root->lock, nestflag);
1202 rcu_start_gp(rsp, nestflag); /* releases rnp_root->lock. */
1203 }
1204
1205 /* Force the grace period if too many callbacks or too long waiting. */
1206 if (unlikely(++rdp->qlen > qhimark)) {
1207 rdp->blimit = LONG_MAX;
1208 force_quiescent_state(rsp, 0);
1209 } else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0 ||
1210 (rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending) < 0)
1211 force_quiescent_state(rsp, 1);
1212 local_irq_restore(flags);
1213}
1214
1215/*
1216 * Queue an RCU callback for invocation after a grace period.
1217 */
1218void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
1219{
1220 __call_rcu(head, func, &rcu_state);
1221}
1222EXPORT_SYMBOL_GPL(call_rcu);
1223
1224/*
1225 * Queue an RCU for invocation after a quicker grace period.
1226 */
1227void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
1228{
1229 __call_rcu(head, func, &rcu_bh_state);
1230}
1231EXPORT_SYMBOL_GPL(call_rcu_bh);
1232
1233/*
1234 * Check to see if there is any immediate RCU-related work to be done
1235 * by the current CPU, for the specified type of RCU, returning 1 if so.
1236 * The checks are in order of increasing expense: checks that can be
1237 * carried out against CPU-local state are performed first. However,
1238 * we must check for CPU stalls first, else we might not get a chance.
1239 */
1240static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
1241{
1242 rdp->n_rcu_pending++;
1243
1244 /* Check for CPU stalls, if enabled. */
1245 check_cpu_stall(rsp, rdp);
1246
1247 /* Is the RCU core waiting for a quiescent state from this CPU? */
1248 if (rdp->qs_pending)
1249 return 1;
1250
1251 /* Does this CPU have callbacks ready to invoke? */
1252 if (cpu_has_callbacks_ready_to_invoke(rdp))
1253 return 1;
1254
1255 /* Has RCU gone idle with this CPU needing another grace period? */
1256 if (cpu_needs_another_gp(rsp, rdp))
1257 return 1;
1258
1259 /* Has another RCU grace period completed? */
1260 if (ACCESS_ONCE(rsp->completed) != rdp->completed) /* outside of lock */
1261 return 1;
1262
1263 /* Has a new RCU grace period started? */
1264 if (ACCESS_ONCE(rsp->gpnum) != rdp->gpnum) /* outside of lock */
1265 return 1;
1266
1267 /* Has an RCU GP gone long enough to send resched IPIs &c? */
1268 if (ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum) &&
1269 ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0 ||
1270 (rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending) < 0))
1271 return 1;
1272
1273 /* nothing to do */
1274 return 0;
1275}
1276
1277/*
1278 * Check to see if there is any immediate RCU-related work to be done
1279 * by the current CPU, returning 1 if so. This function is part of the
1280 * RCU implementation; it is -not- an exported member of the RCU API.
1281 */
1282int rcu_pending(int cpu)
1283{
1284 return __rcu_pending(&rcu_state, &per_cpu(rcu_data, cpu)) ||
1285 __rcu_pending(&rcu_bh_state, &per_cpu(rcu_bh_data, cpu));
1286}
1287
1288/*
1289 * Check to see if any future RCU-related work will need to be done
1290 * by the current CPU, even if none need be done immediately, returning
1291 * 1 if so. This function is part of the RCU implementation; it is -not-
1292 * an exported member of the RCU API.
1293 */
1294int rcu_needs_cpu(int cpu)
1295{
1296 /* RCU callbacks either ready or pending? */
1297 return per_cpu(rcu_data, cpu).nxtlist ||
1298 per_cpu(rcu_bh_data, cpu).nxtlist;
1299}
1300
1301/*
1302 * Initialize a CPU's per-CPU RCU data. We take this "scorched earth"
1303 * approach so that we don't have to worry about how long the CPU has
1304 * been gone, or whether it ever was online previously. We do trust the
1305 * ->mynode field, as it is constant for a given struct rcu_data and
1306 * initialized during early boot.
1307 *
1308 * Note that only one online or offline event can be happening at a given
1309 * time. Note also that we can accept some slop in the rsp->completed
1310 * access due to the fact that this CPU cannot possibly have any RCU
1311 * callbacks in flight yet.
1312 */
1313static void
1314rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
1315{
1316 unsigned long flags;
1317 int i;
1318 long lastcomp;
1319 unsigned long mask;
1320 struct rcu_data *rdp = rsp->rda[cpu];
1321 struct rcu_node *rnp = rcu_get_root(rsp);
1322
1323 /* Set up local state, ensuring consistent view of global state. */
1324 spin_lock_irqsave(&rnp->lock, flags);
1325 lastcomp = rsp->completed;
1326 rdp->completed = lastcomp;
1327 rdp->gpnum = lastcomp;
1328 rdp->passed_quiesc = 0; /* We could be racing with new GP, */
1329 rdp->qs_pending = 1; /* so set up to respond to current GP. */
1330 rdp->beenonline = 1; /* We have now been online. */
1331 rdp->passed_quiesc_completed = lastcomp - 1;
1332 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
1333 rdp->nxtlist = NULL;
1334 for (i = 0; i < RCU_NEXT_SIZE; i++)
1335 rdp->nxttail[i] = &rdp->nxtlist;
1336 rdp->qlen = 0;
1337 rdp->blimit = blimit;
1338#ifdef CONFIG_NO_HZ
1339 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
1340#endif /* #ifdef CONFIG_NO_HZ */
1341 rdp->cpu = cpu;
1342 spin_unlock(&rnp->lock); /* irqs remain disabled. */
1343
1344 /*
1345 * A new grace period might start here. If so, we won't be part
1346 * of it, but that is OK, as we are currently in a quiescent state.
1347 */
1348
1349 /* Exclude any attempts to start a new GP on large systems. */
1350 spin_lock(&rsp->onofflock); /* irqs already disabled. */
1351
1352 /* Add CPU to rcu_node bitmasks. */
1353 rnp = rdp->mynode;
1354 mask = rdp->grpmask;
1355 do {
1356 /* Exclude any attempts to start a new GP on small systems. */
1357 spin_lock(&rnp->lock); /* irqs already disabled. */
1358 rnp->qsmaskinit |= mask;
1359 mask = rnp->grpmask;
1360 spin_unlock(&rnp->lock); /* irqs already disabled. */
1361 rnp = rnp->parent;
1362 } while (rnp != NULL && !(rnp->qsmaskinit & mask));
1363
1364 spin_unlock(&rsp->onofflock); /* irqs remain disabled. */
1365
1366 /*
1367 * A new grace period might start here. If so, we will be part of
1368 * it, and its gpnum will be greater than ours, so we will
1369 * participate. It is also possible for the gpnum to have been
1370 * incremented before this function was called, and the bitmasks
1371 * to not be filled out until now, in which case we will also
1372 * participate due to our gpnum being behind.
1373 */
1374
1375 /* Since it is coming online, the CPU is in a quiescent state. */
1376 cpu_quiet(cpu, rsp, rdp, lastcomp);
1377 local_irq_restore(flags);
1378}
1379
1380static void __cpuinit rcu_online_cpu(int cpu)
1381{
1382#ifdef CONFIG_NO_HZ
1383 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
1384
1385 rdtp->dynticks_nesting = 1;
1386 rdtp->dynticks |= 1; /* need consecutive #s even for hotplug. */
1387 rdtp->dynticks_nmi = (rdtp->dynticks_nmi + 1) & ~0x1;
1388#endif /* #ifdef CONFIG_NO_HZ */
1389 rcu_init_percpu_data(cpu, &rcu_state);
1390 rcu_init_percpu_data(cpu, &rcu_bh_state);
1391 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
1392}
1393
1394/*
1395 * Handle CPU online/offline notifcation events.
1396 */
1397static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
1398 unsigned long action, void *hcpu)
1399{
1400 long cpu = (long)hcpu;
1401
1402 switch (action) {
1403 case CPU_UP_PREPARE:
1404 case CPU_UP_PREPARE_FROZEN:
1405 rcu_online_cpu(cpu);
1406 break;
1407 case CPU_DEAD:
1408 case CPU_DEAD_FROZEN:
1409 case CPU_UP_CANCELED:
1410 case CPU_UP_CANCELED_FROZEN:
1411 rcu_offline_cpu(cpu);
1412 break;
1413 default:
1414 break;
1415 }
1416 return NOTIFY_OK;
1417}
1418
1419/*
1420 * Compute the per-level fanout, either using the exact fanout specified
1421 * or balancing the tree, depending on CONFIG_RCU_FANOUT_EXACT.
1422 */
1423#ifdef CONFIG_RCU_FANOUT_EXACT
1424static void __init rcu_init_levelspread(struct rcu_state *rsp)
1425{
1426 int i;
1427
1428 for (i = NUM_RCU_LVLS - 1; i >= 0; i--)
1429 rsp->levelspread[i] = CONFIG_RCU_FANOUT;
1430}
1431#else /* #ifdef CONFIG_RCU_FANOUT_EXACT */
1432static void __init rcu_init_levelspread(struct rcu_state *rsp)
1433{
1434 int ccur;
1435 int cprv;
1436 int i;
1437
1438 cprv = NR_CPUS;
1439 for (i = NUM_RCU_LVLS - 1; i >= 0; i--) {
1440 ccur = rsp->levelcnt[i];
1441 rsp->levelspread[i] = (cprv + ccur - 1) / ccur;
1442 cprv = ccur;
1443 }
1444}
1445#endif /* #else #ifdef CONFIG_RCU_FANOUT_EXACT */
1446
1447/*
1448 * Helper function for rcu_init() that initializes one rcu_state structure.
1449 */
1450static void __init rcu_init_one(struct rcu_state *rsp)
1451{
1452 int cpustride = 1;
1453 int i;
1454 int j;
1455 struct rcu_node *rnp;
1456
1457 /* Initialize the level-tracking arrays. */
1458
1459 for (i = 1; i < NUM_RCU_LVLS; i++)
1460 rsp->level[i] = rsp->level[i - 1] + rsp->levelcnt[i - 1];
1461 rcu_init_levelspread(rsp);
1462
1463 /* Initialize the elements themselves, starting from the leaves. */
1464
1465 for (i = NUM_RCU_LVLS - 1; i >= 0; i--) {
1466 cpustride *= rsp->levelspread[i];
1467 rnp = rsp->level[i];
1468 for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) {
1469 spin_lock_init(&rnp->lock);
1470 rnp->qsmask = 0;
1471 rnp->qsmaskinit = 0;
1472 rnp->grplo = j * cpustride;
1473 rnp->grphi = (j + 1) * cpustride - 1;
1474 if (rnp->grphi >= NR_CPUS)
1475 rnp->grphi = NR_CPUS - 1;
1476 if (i == 0) {
1477 rnp->grpnum = 0;
1478 rnp->grpmask = 0;
1479 rnp->parent = NULL;
1480 } else {
1481 rnp->grpnum = j % rsp->levelspread[i - 1];
1482 rnp->grpmask = 1UL << rnp->grpnum;
1483 rnp->parent = rsp->level[i - 1] +
1484 j / rsp->levelspread[i - 1];
1485 }
1486 rnp->level = i;
1487 }
1488 }
1489}
1490
1491/*
1492 * Helper macro for __rcu_init(). To be used nowhere else!
1493 * Assigns leaf node pointers into each CPU's rcu_data structure.
1494 */
1495#define RCU_DATA_PTR_INIT(rsp, rcu_data) \
1496do { \
1497 rnp = (rsp)->level[NUM_RCU_LVLS - 1]; \
1498 j = 0; \
1499 for_each_possible_cpu(i) { \
1500 if (i > rnp[j].grphi) \
1501 j++; \
1502 per_cpu(rcu_data, i).mynode = &rnp[j]; \
1503 (rsp)->rda[i] = &per_cpu(rcu_data, i); \
1504 } \
1505} while (0)
1506
1507static struct notifier_block __cpuinitdata rcu_nb = {
1508 .notifier_call = rcu_cpu_notify,
1509};
1510
1511void __init __rcu_init(void)
1512{
1513 int i; /* All used by RCU_DATA_PTR_INIT(). */
1514 int j;
1515 struct rcu_node *rnp;
1516
1517 printk(KERN_WARNING "Experimental hierarchical RCU implementation.\n");
1518#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
1519 printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n");
1520#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
1521 rcu_init_one(&rcu_state);
1522 RCU_DATA_PTR_INIT(&rcu_state, rcu_data);
1523 rcu_init_one(&rcu_bh_state);
1524 RCU_DATA_PTR_INIT(&rcu_bh_state, rcu_bh_data);
1525
1526 for_each_online_cpu(i)
1527 rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, (void *)(long)i);
1528 /* Register notifier for non-boot CPUs */
1529 register_cpu_notifier(&rcu_nb);
1530 printk(KERN_WARNING "Experimental hierarchical RCU init done.\n");
1531}
1532
1533module_param(blimit, int, 0);
1534module_param(qhimark, int, 0);
1535module_param(qlowmark, int, 0);
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
new file mode 100644
index 000000000000..d6db3e837826
--- /dev/null
+++ b/kernel/rcutree_trace.c
@@ -0,0 +1,271 @@
1/*
2 * Read-Copy Update tracing for classic implementation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright IBM Corporation, 2008
19 *
20 * Papers: http://www.rdrop.com/users/paulmck/RCU
21 *
22 * For detailed explanation of Read-Copy Update mechanism see -
23 * Documentation/RCU
24 *
25 */
26#include <linux/types.h>
27#include <linux/kernel.h>
28#include <linux/init.h>
29#include <linux/spinlock.h>
30#include <linux/smp.h>
31#include <linux/rcupdate.h>
32#include <linux/interrupt.h>
33#include <linux/sched.h>
34#include <asm/atomic.h>
35#include <linux/bitops.h>
36#include <linux/module.h>
37#include <linux/completion.h>
38#include <linux/moduleparam.h>
39#include <linux/percpu.h>
40#include <linux/notifier.h>
41#include <linux/cpu.h>
42#include <linux/mutex.h>
43#include <linux/debugfs.h>
44#include <linux/seq_file.h>
45
46static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
47{
48 if (!rdp->beenonline)
49 return;
50 seq_printf(m, "%3d%cc=%ld g=%ld pq=%d pqc=%ld qp=%d rpfq=%ld rp=%x",
51 rdp->cpu,
52 cpu_is_offline(rdp->cpu) ? '!' : ' ',
53 rdp->completed, rdp->gpnum,
54 rdp->passed_quiesc, rdp->passed_quiesc_completed,
55 rdp->qs_pending,
56 rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending,
57 (int)(rdp->n_rcu_pending & 0xffff));
58#ifdef CONFIG_NO_HZ
59 seq_printf(m, " dt=%d/%d dn=%d df=%lu",
60 rdp->dynticks->dynticks,
61 rdp->dynticks->dynticks_nesting,
62 rdp->dynticks->dynticks_nmi,
63 rdp->dynticks_fqs);
64#endif /* #ifdef CONFIG_NO_HZ */
65 seq_printf(m, " of=%lu ri=%lu", rdp->offline_fqs, rdp->resched_ipi);
66 seq_printf(m, " ql=%ld b=%ld\n", rdp->qlen, rdp->blimit);
67}
68
69#define PRINT_RCU_DATA(name, func, m) \
70 do { \
71 int _p_r_d_i; \
72 \
73 for_each_possible_cpu(_p_r_d_i) \
74 func(m, &per_cpu(name, _p_r_d_i)); \
75 } while (0)
76
77static int show_rcudata(struct seq_file *m, void *unused)
78{
79 seq_puts(m, "rcu:\n");
80 PRINT_RCU_DATA(rcu_data, print_one_rcu_data, m);
81 seq_puts(m, "rcu_bh:\n");
82 PRINT_RCU_DATA(rcu_bh_data, print_one_rcu_data, m);
83 return 0;
84}
85
86static int rcudata_open(struct inode *inode, struct file *file)
87{
88 return single_open(file, show_rcudata, NULL);
89}
90
91static struct file_operations rcudata_fops = {
92 .owner = THIS_MODULE,
93 .open = rcudata_open,
94 .read = seq_read,
95 .llseek = seq_lseek,
96 .release = single_release,
97};
98
99static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
100{
101 if (!rdp->beenonline)
102 return;
103 seq_printf(m, "%d,%s,%ld,%ld,%d,%ld,%d,%ld,%ld",
104 rdp->cpu,
105 cpu_is_offline(rdp->cpu) ? "\"Y\"" : "\"N\"",
106 rdp->completed, rdp->gpnum,
107 rdp->passed_quiesc, rdp->passed_quiesc_completed,
108 rdp->qs_pending,
109 rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending,
110 rdp->n_rcu_pending);
111#ifdef CONFIG_NO_HZ
112 seq_printf(m, ",%d,%d,%d,%lu",
113 rdp->dynticks->dynticks,
114 rdp->dynticks->dynticks_nesting,
115 rdp->dynticks->dynticks_nmi,
116 rdp->dynticks_fqs);
117#endif /* #ifdef CONFIG_NO_HZ */
118 seq_printf(m, ",%lu,%lu", rdp->offline_fqs, rdp->resched_ipi);
119 seq_printf(m, ",%ld,%ld\n", rdp->qlen, rdp->blimit);
120}
121
122static int show_rcudata_csv(struct seq_file *m, void *unused)
123{
124 seq_puts(m, "\"CPU\",\"Online?\",\"c\",\"g\",\"pq\",\"pqc\",\"pq\",\"rpfq\",\"rp\",");
125#ifdef CONFIG_NO_HZ
126 seq_puts(m, "\"dt\",\"dt nesting\",\"dn\",\"df\",");
127#endif /* #ifdef CONFIG_NO_HZ */
128 seq_puts(m, "\"of\",\"ri\",\"ql\",\"b\"\n");
129 seq_puts(m, "\"rcu:\"\n");
130 PRINT_RCU_DATA(rcu_data, print_one_rcu_data_csv, m);
131 seq_puts(m, "\"rcu_bh:\"\n");
132 PRINT_RCU_DATA(rcu_bh_data, print_one_rcu_data_csv, m);
133 return 0;
134}
135
136static int rcudata_csv_open(struct inode *inode, struct file *file)
137{
138 return single_open(file, show_rcudata_csv, NULL);
139}
140
141static struct file_operations rcudata_csv_fops = {
142 .owner = THIS_MODULE,
143 .open = rcudata_csv_open,
144 .read = seq_read,
145 .llseek = seq_lseek,
146 .release = single_release,
147};
148
149static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp)
150{
151 int level = 0;
152 struct rcu_node *rnp;
153
154 seq_printf(m, "c=%ld g=%ld s=%d jfq=%ld j=%x "
155 "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu\n",
156 rsp->completed, rsp->gpnum, rsp->signaled,
157 (long)(rsp->jiffies_force_qs - jiffies),
158 (int)(jiffies & 0xffff),
159 rsp->n_force_qs, rsp->n_force_qs_ngp,
160 rsp->n_force_qs - rsp->n_force_qs_ngp,
161 rsp->n_force_qs_lh);
162 for (rnp = &rsp->node[0]; rnp - &rsp->node[0] < NUM_RCU_NODES; rnp++) {
163 if (rnp->level != level) {
164 seq_puts(m, "\n");
165 level = rnp->level;
166 }
167 seq_printf(m, "%lx/%lx %d:%d ^%d ",
168 rnp->qsmask, rnp->qsmaskinit,
169 rnp->grplo, rnp->grphi, rnp->grpnum);
170 }
171 seq_puts(m, "\n");
172}
173
174static int show_rcuhier(struct seq_file *m, void *unused)
175{
176 seq_puts(m, "rcu:\n");
177 print_one_rcu_state(m, &rcu_state);
178 seq_puts(m, "rcu_bh:\n");
179 print_one_rcu_state(m, &rcu_bh_state);
180 return 0;
181}
182
183static int rcuhier_open(struct inode *inode, struct file *file)
184{
185 return single_open(file, show_rcuhier, NULL);
186}
187
188static struct file_operations rcuhier_fops = {
189 .owner = THIS_MODULE,
190 .open = rcuhier_open,
191 .read = seq_read,
192 .llseek = seq_lseek,
193 .release = single_release,
194};
195
196static int show_rcugp(struct seq_file *m, void *unused)
197{
198 seq_printf(m, "rcu: completed=%ld gpnum=%ld\n",
199 rcu_state.completed, rcu_state.gpnum);
200 seq_printf(m, "rcu_bh: completed=%ld gpnum=%ld\n",
201 rcu_bh_state.completed, rcu_bh_state.gpnum);
202 return 0;
203}
204
205static int rcugp_open(struct inode *inode, struct file *file)
206{
207 return single_open(file, show_rcugp, NULL);
208}
209
210static struct file_operations rcugp_fops = {
211 .owner = THIS_MODULE,
212 .open = rcugp_open,
213 .read = seq_read,
214 .llseek = seq_lseek,
215 .release = single_release,
216};
217
218static struct dentry *rcudir, *datadir, *datadir_csv, *hierdir, *gpdir;
219static int __init rcuclassic_trace_init(void)
220{
221 rcudir = debugfs_create_dir("rcu", NULL);
222 if (!rcudir)
223 goto out;
224
225 datadir = debugfs_create_file("rcudata", 0444, rcudir,
226 NULL, &rcudata_fops);
227 if (!datadir)
228 goto free_out;
229
230 datadir_csv = debugfs_create_file("rcudata.csv", 0444, rcudir,
231 NULL, &rcudata_csv_fops);
232 if (!datadir_csv)
233 goto free_out;
234
235 gpdir = debugfs_create_file("rcugp", 0444, rcudir, NULL, &rcugp_fops);
236 if (!gpdir)
237 goto free_out;
238
239 hierdir = debugfs_create_file("rcuhier", 0444, rcudir,
240 NULL, &rcuhier_fops);
241 if (!hierdir)
242 goto free_out;
243 return 0;
244free_out:
245 if (datadir)
246 debugfs_remove(datadir);
247 if (datadir_csv)
248 debugfs_remove(datadir_csv);
249 if (gpdir)
250 debugfs_remove(gpdir);
251 debugfs_remove(rcudir);
252out:
253 return 1;
254}
255
256static void __exit rcuclassic_trace_cleanup(void)
257{
258 debugfs_remove(datadir);
259 debugfs_remove(datadir_csv);
260 debugfs_remove(gpdir);
261 debugfs_remove(hierdir);
262 debugfs_remove(rcudir);
263}
264
265
266module_init(rcuclassic_trace_init);
267module_exit(rcuclassic_trace_cleanup);
268
269MODULE_AUTHOR("Paul E. McKenney");
270MODULE_DESCRIPTION("Read-Copy Update tracing for hierarchical implementation");
271MODULE_LICENSE("GPL");
diff --git a/kernel/resource.c b/kernel/resource.c
index 4337063663ef..e633106b12f6 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -853,6 +853,15 @@ int iomem_map_sanity_check(resource_size_t addr, unsigned long size)
853 if (PFN_DOWN(p->start) <= PFN_DOWN(addr) && 853 if (PFN_DOWN(p->start) <= PFN_DOWN(addr) &&
854 PFN_DOWN(p->end) >= PFN_DOWN(addr + size - 1)) 854 PFN_DOWN(p->end) >= PFN_DOWN(addr + size - 1))
855 continue; 855 continue;
856 /*
857 * if a resource is "BUSY", it's not a hardware resource
858 * but a driver mapping of such a resource; we don't want
859 * to warn for those; some drivers legitimately map only
860 * partial hardware resources. (example: vesafb)
861 */
862 if (p->flags & IORESOURCE_BUSY)
863 continue;
864
856 printk(KERN_WARNING "resource map sanity check conflict: " 865 printk(KERN_WARNING "resource map sanity check conflict: "
857 "0x%llx 0x%llx 0x%llx 0x%llx %s\n", 866 "0x%llx 0x%llx 0x%llx 0x%llx %s\n",
858 (unsigned long long)addr, 867 (unsigned long long)addr,
diff --git a/kernel/sched.c b/kernel/sched.c
index 748ff924a290..c5019a5dcaa4 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -209,7 +209,6 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
209 hrtimer_init(&rt_b->rt_period_timer, 209 hrtimer_init(&rt_b->rt_period_timer,
210 CLOCK_MONOTONIC, HRTIMER_MODE_REL); 210 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
211 rt_b->rt_period_timer.function = sched_rt_period_timer; 211 rt_b->rt_period_timer.function = sched_rt_period_timer;
212 rt_b->rt_period_timer.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED;
213} 212}
214 213
215static inline int rt_bandwidth_enabled(void) 214static inline int rt_bandwidth_enabled(void)
@@ -499,18 +498,26 @@ struct rt_rq {
499 */ 498 */
500struct root_domain { 499struct root_domain {
501 atomic_t refcount; 500 atomic_t refcount;
502 cpumask_t span; 501 cpumask_var_t span;
503 cpumask_t online; 502 cpumask_var_t online;
504 503
505 /* 504 /*
506 * The "RT overload" flag: it gets set if a CPU has more than 505 * The "RT overload" flag: it gets set if a CPU has more than
507 * one runnable RT task. 506 * one runnable RT task.
508 */ 507 */
509 cpumask_t rto_mask; 508 cpumask_var_t rto_mask;
510 atomic_t rto_count; 509 atomic_t rto_count;
511#ifdef CONFIG_SMP 510#ifdef CONFIG_SMP
512 struct cpupri cpupri; 511 struct cpupri cpupri;
513#endif 512#endif
513#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
514 /*
515 * Preferred wake up cpu nominated by sched_mc balance that will be
516 * used when most cpus are idle in the system indicating overall very
517 * low system utilisation. Triggered at POWERSAVINGS_BALANCE_WAKEUP(2)
518 */
519 unsigned int sched_mc_preferred_wakeup_cpu;
520#endif
514}; 521};
515 522
516/* 523/*
@@ -1139,7 +1146,6 @@ static void init_rq_hrtick(struct rq *rq)
1139 1146
1140 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1147 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1141 rq->hrtick_timer.function = hrtick; 1148 rq->hrtick_timer.function = hrtick;
1142 rq->hrtick_timer.cb_mode = HRTIMER_CB_IRQSAFE_PERCPU;
1143} 1149}
1144#else /* CONFIG_SCHED_HRTICK */ 1150#else /* CONFIG_SCHED_HRTICK */
1145static inline void hrtick_clear(struct rq *rq) 1151static inline void hrtick_clear(struct rq *rq)
@@ -1516,7 +1522,7 @@ static int tg_shares_up(struct task_group *tg, void *data)
1516 struct sched_domain *sd = data; 1522 struct sched_domain *sd = data;
1517 int i; 1523 int i;
1518 1524
1519 for_each_cpu_mask(i, sd->span) { 1525 for_each_cpu(i, sched_domain_span(sd)) {
1520 /* 1526 /*
1521 * If there are currently no tasks on the cpu pretend there 1527 * If there are currently no tasks on the cpu pretend there
1522 * is one of average load so that when a new task gets to 1528 * is one of average load so that when a new task gets to
@@ -1537,7 +1543,7 @@ static int tg_shares_up(struct task_group *tg, void *data)
1537 if (!sd->parent || !(sd->parent->flags & SD_LOAD_BALANCE)) 1543 if (!sd->parent || !(sd->parent->flags & SD_LOAD_BALANCE))
1538 shares = tg->shares; 1544 shares = tg->shares;
1539 1545
1540 for_each_cpu_mask(i, sd->span) 1546 for_each_cpu(i, sched_domain_span(sd))
1541 update_group_shares_cpu(tg, i, shares, rq_weight); 1547 update_group_shares_cpu(tg, i, shares, rq_weight);
1542 1548
1543 return 0; 1549 return 0;
@@ -2103,15 +2109,17 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
2103 int i; 2109 int i;
2104 2110
2105 /* Skip over this group if it has no CPUs allowed */ 2111 /* Skip over this group if it has no CPUs allowed */
2106 if (!cpus_intersects(group->cpumask, p->cpus_allowed)) 2112 if (!cpumask_intersects(sched_group_cpus(group),
2113 &p->cpus_allowed))
2107 continue; 2114 continue;
2108 2115
2109 local_group = cpu_isset(this_cpu, group->cpumask); 2116 local_group = cpumask_test_cpu(this_cpu,
2117 sched_group_cpus(group));
2110 2118
2111 /* Tally up the load of all CPUs in the group */ 2119 /* Tally up the load of all CPUs in the group */
2112 avg_load = 0; 2120 avg_load = 0;
2113 2121
2114 for_each_cpu_mask_nr(i, group->cpumask) { 2122 for_each_cpu(i, sched_group_cpus(group)) {
2115 /* Bias balancing toward cpus of our domain */ 2123 /* Bias balancing toward cpus of our domain */
2116 if (local_group) 2124 if (local_group)
2117 load = source_load(i, load_idx); 2125 load = source_load(i, load_idx);
@@ -2143,17 +2151,14 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
2143 * find_idlest_cpu - find the idlest cpu among the cpus in group. 2151 * find_idlest_cpu - find the idlest cpu among the cpus in group.
2144 */ 2152 */
2145static int 2153static int
2146find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu, 2154find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
2147 cpumask_t *tmp)
2148{ 2155{
2149 unsigned long load, min_load = ULONG_MAX; 2156 unsigned long load, min_load = ULONG_MAX;
2150 int idlest = -1; 2157 int idlest = -1;
2151 int i; 2158 int i;
2152 2159
2153 /* Traverse only the allowed CPUs */ 2160 /* Traverse only the allowed CPUs */
2154 cpus_and(*tmp, group->cpumask, p->cpus_allowed); 2161 for_each_cpu_and(i, sched_group_cpus(group), &p->cpus_allowed) {
2155
2156 for_each_cpu_mask_nr(i, *tmp) {
2157 load = weighted_cpuload(i); 2162 load = weighted_cpuload(i);
2158 2163
2159 if (load < min_load || (load == min_load && i == this_cpu)) { 2164 if (load < min_load || (load == min_load && i == this_cpu)) {
@@ -2195,7 +2200,6 @@ static int sched_balance_self(int cpu, int flag)
2195 update_shares(sd); 2200 update_shares(sd);
2196 2201
2197 while (sd) { 2202 while (sd) {
2198 cpumask_t span, tmpmask;
2199 struct sched_group *group; 2203 struct sched_group *group;
2200 int new_cpu, weight; 2204 int new_cpu, weight;
2201 2205
@@ -2204,14 +2208,13 @@ static int sched_balance_self(int cpu, int flag)
2204 continue; 2208 continue;
2205 } 2209 }
2206 2210
2207 span = sd->span;
2208 group = find_idlest_group(sd, t, cpu); 2211 group = find_idlest_group(sd, t, cpu);
2209 if (!group) { 2212 if (!group) {
2210 sd = sd->child; 2213 sd = sd->child;
2211 continue; 2214 continue;
2212 } 2215 }
2213 2216
2214 new_cpu = find_idlest_cpu(group, t, cpu, &tmpmask); 2217 new_cpu = find_idlest_cpu(group, t, cpu);
2215 if (new_cpu == -1 || new_cpu == cpu) { 2218 if (new_cpu == -1 || new_cpu == cpu) {
2216 /* Now try balancing at a lower domain level of cpu */ 2219 /* Now try balancing at a lower domain level of cpu */
2217 sd = sd->child; 2220 sd = sd->child;
@@ -2220,10 +2223,10 @@ static int sched_balance_self(int cpu, int flag)
2220 2223
2221 /* Now try balancing at a lower domain level of new_cpu */ 2224 /* Now try balancing at a lower domain level of new_cpu */
2222 cpu = new_cpu; 2225 cpu = new_cpu;
2226 weight = cpumask_weight(sched_domain_span(sd));
2223 sd = NULL; 2227 sd = NULL;
2224 weight = cpus_weight(span);
2225 for_each_domain(cpu, tmp) { 2228 for_each_domain(cpu, tmp) {
2226 if (weight <= cpus_weight(tmp->span)) 2229 if (weight <= cpumask_weight(sched_domain_span(tmp)))
2227 break; 2230 break;
2228 if (tmp->flags & flag) 2231 if (tmp->flags & flag)
2229 sd = tmp; 2232 sd = tmp;
@@ -2268,7 +2271,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
2268 cpu = task_cpu(p); 2271 cpu = task_cpu(p);
2269 2272
2270 for_each_domain(this_cpu, sd) { 2273 for_each_domain(this_cpu, sd) {
2271 if (cpu_isset(cpu, sd->span)) { 2274 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
2272 update_shares(sd); 2275 update_shares(sd);
2273 break; 2276 break;
2274 } 2277 }
@@ -2317,7 +2320,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
2317 else { 2320 else {
2318 struct sched_domain *sd; 2321 struct sched_domain *sd;
2319 for_each_domain(this_cpu, sd) { 2322 for_each_domain(this_cpu, sd) {
2320 if (cpu_isset(cpu, sd->span)) { 2323 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
2321 schedstat_inc(sd, ttwu_wake_remote); 2324 schedstat_inc(sd, ttwu_wake_remote);
2322 break; 2325 break;
2323 } 2326 }
@@ -2848,7 +2851,7 @@ static void sched_migrate_task(struct task_struct *p, int dest_cpu)
2848 struct rq *rq; 2851 struct rq *rq;
2849 2852
2850 rq = task_rq_lock(p, &flags); 2853 rq = task_rq_lock(p, &flags);
2851 if (!cpu_isset(dest_cpu, p->cpus_allowed) 2854 if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed)
2852 || unlikely(!cpu_active(dest_cpu))) 2855 || unlikely(!cpu_active(dest_cpu)))
2853 goto out; 2856 goto out;
2854 2857
@@ -2913,7 +2916,7 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
2913 * 2) cannot be migrated to this CPU due to cpus_allowed, or 2916 * 2) cannot be migrated to this CPU due to cpus_allowed, or
2914 * 3) are cache-hot on their current CPU. 2917 * 3) are cache-hot on their current CPU.
2915 */ 2918 */
2916 if (!cpu_isset(this_cpu, p->cpus_allowed)) { 2919 if (!cpumask_test_cpu(this_cpu, &p->cpus_allowed)) {
2917 schedstat_inc(p, se.nr_failed_migrations_affine); 2920 schedstat_inc(p, se.nr_failed_migrations_affine);
2918 return 0; 2921 return 0;
2919 } 2922 }
@@ -3088,7 +3091,7 @@ static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
3088static struct sched_group * 3091static struct sched_group *
3089find_busiest_group(struct sched_domain *sd, int this_cpu, 3092find_busiest_group(struct sched_domain *sd, int this_cpu,
3090 unsigned long *imbalance, enum cpu_idle_type idle, 3093 unsigned long *imbalance, enum cpu_idle_type idle,
3091 int *sd_idle, const cpumask_t *cpus, int *balance) 3094 int *sd_idle, const struct cpumask *cpus, int *balance)
3092{ 3095{
3093 struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups; 3096 struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups;
3094 unsigned long max_load, avg_load, total_load, this_load, total_pwr; 3097 unsigned long max_load, avg_load, total_load, this_load, total_pwr;
@@ -3124,10 +3127,11 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
3124 unsigned long sum_avg_load_per_task; 3127 unsigned long sum_avg_load_per_task;
3125 unsigned long avg_load_per_task; 3128 unsigned long avg_load_per_task;
3126 3129
3127 local_group = cpu_isset(this_cpu, group->cpumask); 3130 local_group = cpumask_test_cpu(this_cpu,
3131 sched_group_cpus(group));
3128 3132
3129 if (local_group) 3133 if (local_group)
3130 balance_cpu = first_cpu(group->cpumask); 3134 balance_cpu = cpumask_first(sched_group_cpus(group));
3131 3135
3132 /* Tally up the load of all CPUs in the group */ 3136 /* Tally up the load of all CPUs in the group */
3133 sum_weighted_load = sum_nr_running = avg_load = 0; 3137 sum_weighted_load = sum_nr_running = avg_load = 0;
@@ -3136,13 +3140,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
3136 max_cpu_load = 0; 3140 max_cpu_load = 0;
3137 min_cpu_load = ~0UL; 3141 min_cpu_load = ~0UL;
3138 3142
3139 for_each_cpu_mask_nr(i, group->cpumask) { 3143 for_each_cpu_and(i, sched_group_cpus(group), cpus) {
3140 struct rq *rq; 3144 struct rq *rq = cpu_rq(i);
3141
3142 if (!cpu_isset(i, *cpus))
3143 continue;
3144
3145 rq = cpu_rq(i);
3146 3145
3147 if (*sd_idle && rq->nr_running) 3146 if (*sd_idle && rq->nr_running)
3148 *sd_idle = 0; 3147 *sd_idle = 0;
@@ -3253,8 +3252,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
3253 */ 3252 */
3254 if ((sum_nr_running < min_nr_running) || 3253 if ((sum_nr_running < min_nr_running) ||
3255 (sum_nr_running == min_nr_running && 3254 (sum_nr_running == min_nr_running &&
3256 first_cpu(group->cpumask) < 3255 cpumask_first(sched_group_cpus(group)) >
3257 first_cpu(group_min->cpumask))) { 3256 cpumask_first(sched_group_cpus(group_min)))) {
3258 group_min = group; 3257 group_min = group;
3259 min_nr_running = sum_nr_running; 3258 min_nr_running = sum_nr_running;
3260 min_load_per_task = sum_weighted_load / 3259 min_load_per_task = sum_weighted_load /
@@ -3269,8 +3268,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
3269 if (sum_nr_running <= group_capacity - 1) { 3268 if (sum_nr_running <= group_capacity - 1) {
3270 if (sum_nr_running > leader_nr_running || 3269 if (sum_nr_running > leader_nr_running ||
3271 (sum_nr_running == leader_nr_running && 3270 (sum_nr_running == leader_nr_running &&
3272 first_cpu(group->cpumask) > 3271 cpumask_first(sched_group_cpus(group)) <
3273 first_cpu(group_leader->cpumask))) { 3272 cpumask_first(sched_group_cpus(group_leader)))) {
3274 group_leader = group; 3273 group_leader = group;
3275 leader_nr_running = sum_nr_running; 3274 leader_nr_running = sum_nr_running;
3276 } 3275 }
@@ -3396,6 +3395,10 @@ out_balanced:
3396 3395
3397 if (this == group_leader && group_leader != group_min) { 3396 if (this == group_leader && group_leader != group_min) {
3398 *imbalance = min_load_per_task; 3397 *imbalance = min_load_per_task;
3398 if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP) {
3399 cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu =
3400 cpumask_first(sched_group_cpus(group_leader));
3401 }
3399 return group_min; 3402 return group_min;
3400 } 3403 }
3401#endif 3404#endif
@@ -3409,16 +3412,16 @@ ret:
3409 */ 3412 */
3410static struct rq * 3413static struct rq *
3411find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle, 3414find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
3412 unsigned long imbalance, const cpumask_t *cpus) 3415 unsigned long imbalance, const struct cpumask *cpus)
3413{ 3416{
3414 struct rq *busiest = NULL, *rq; 3417 struct rq *busiest = NULL, *rq;
3415 unsigned long max_load = 0; 3418 unsigned long max_load = 0;
3416 int i; 3419 int i;
3417 3420
3418 for_each_cpu_mask_nr(i, group->cpumask) { 3421 for_each_cpu(i, sched_group_cpus(group)) {
3419 unsigned long wl; 3422 unsigned long wl;
3420 3423
3421 if (!cpu_isset(i, *cpus)) 3424 if (!cpumask_test_cpu(i, cpus))
3422 continue; 3425 continue;
3423 3426
3424 rq = cpu_rq(i); 3427 rq = cpu_rq(i);
@@ -3448,7 +3451,7 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
3448 */ 3451 */
3449static int load_balance(int this_cpu, struct rq *this_rq, 3452static int load_balance(int this_cpu, struct rq *this_rq,
3450 struct sched_domain *sd, enum cpu_idle_type idle, 3453 struct sched_domain *sd, enum cpu_idle_type idle,
3451 int *balance, cpumask_t *cpus) 3454 int *balance, struct cpumask *cpus)
3452{ 3455{
3453 int ld_moved, all_pinned = 0, active_balance = 0, sd_idle = 0; 3456 int ld_moved, all_pinned = 0, active_balance = 0, sd_idle = 0;
3454 struct sched_group *group; 3457 struct sched_group *group;
@@ -3456,7 +3459,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
3456 struct rq *busiest; 3459 struct rq *busiest;
3457 unsigned long flags; 3460 unsigned long flags;
3458 3461
3459 cpus_setall(*cpus); 3462 cpumask_setall(cpus);
3460 3463
3461 /* 3464 /*
3462 * When power savings policy is enabled for the parent domain, idle 3465 * When power savings policy is enabled for the parent domain, idle
@@ -3516,8 +3519,8 @@ redo:
3516 3519
3517 /* All tasks on this runqueue were pinned by CPU affinity */ 3520 /* All tasks on this runqueue were pinned by CPU affinity */
3518 if (unlikely(all_pinned)) { 3521 if (unlikely(all_pinned)) {
3519 cpu_clear(cpu_of(busiest), *cpus); 3522 cpumask_clear_cpu(cpu_of(busiest), cpus);
3520 if (!cpus_empty(*cpus)) 3523 if (!cpumask_empty(cpus))
3521 goto redo; 3524 goto redo;
3522 goto out_balanced; 3525 goto out_balanced;
3523 } 3526 }
@@ -3534,7 +3537,8 @@ redo:
3534 /* don't kick the migration_thread, if the curr 3537 /* don't kick the migration_thread, if the curr
3535 * task on busiest cpu can't be moved to this_cpu 3538 * task on busiest cpu can't be moved to this_cpu
3536 */ 3539 */
3537 if (!cpu_isset(this_cpu, busiest->curr->cpus_allowed)) { 3540 if (!cpumask_test_cpu(this_cpu,
3541 &busiest->curr->cpus_allowed)) {
3538 spin_unlock_irqrestore(&busiest->lock, flags); 3542 spin_unlock_irqrestore(&busiest->lock, flags);
3539 all_pinned = 1; 3543 all_pinned = 1;
3540 goto out_one_pinned; 3544 goto out_one_pinned;
@@ -3609,7 +3613,7 @@ out:
3609 */ 3613 */
3610static int 3614static int
3611load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd, 3615load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd,
3612 cpumask_t *cpus) 3616 struct cpumask *cpus)
3613{ 3617{
3614 struct sched_group *group; 3618 struct sched_group *group;
3615 struct rq *busiest = NULL; 3619 struct rq *busiest = NULL;
@@ -3618,7 +3622,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd,
3618 int sd_idle = 0; 3622 int sd_idle = 0;
3619 int all_pinned = 0; 3623 int all_pinned = 0;
3620 3624
3621 cpus_setall(*cpus); 3625 cpumask_setall(cpus);
3622 3626
3623 /* 3627 /*
3624 * When power savings policy is enabled for the parent domain, idle 3628 * When power savings policy is enabled for the parent domain, idle
@@ -3662,17 +3666,71 @@ redo:
3662 double_unlock_balance(this_rq, busiest); 3666 double_unlock_balance(this_rq, busiest);
3663 3667
3664 if (unlikely(all_pinned)) { 3668 if (unlikely(all_pinned)) {
3665 cpu_clear(cpu_of(busiest), *cpus); 3669 cpumask_clear_cpu(cpu_of(busiest), cpus);
3666 if (!cpus_empty(*cpus)) 3670 if (!cpumask_empty(cpus))
3667 goto redo; 3671 goto redo;
3668 } 3672 }
3669 } 3673 }
3670 3674
3671 if (!ld_moved) { 3675 if (!ld_moved) {
3676 int active_balance = 0;
3677
3672 schedstat_inc(sd, lb_failed[CPU_NEWLY_IDLE]); 3678 schedstat_inc(sd, lb_failed[CPU_NEWLY_IDLE]);
3673 if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER && 3679 if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
3674 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) 3680 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
3675 return -1; 3681 return -1;
3682
3683 if (sched_mc_power_savings < POWERSAVINGS_BALANCE_WAKEUP)
3684 return -1;
3685
3686 if (sd->nr_balance_failed++ < 2)
3687 return -1;
3688
3689 /*
3690 * The only task running in a non-idle cpu can be moved to this
3691 * cpu in an attempt to completely freeup the other CPU
3692 * package. The same method used to move task in load_balance()
3693 * have been extended for load_balance_newidle() to speedup
3694 * consolidation at sched_mc=POWERSAVINGS_BALANCE_WAKEUP (2)
3695 *
3696 * The package power saving logic comes from
3697 * find_busiest_group(). If there are no imbalance, then
3698 * f_b_g() will return NULL. However when sched_mc={1,2} then
3699 * f_b_g() will select a group from which a running task may be
3700 * pulled to this cpu in order to make the other package idle.
3701 * If there is no opportunity to make a package idle and if
3702 * there are no imbalance, then f_b_g() will return NULL and no
3703 * action will be taken in load_balance_newidle().
3704 *
3705 * Under normal task pull operation due to imbalance, there
3706 * will be more than one task in the source run queue and
3707 * move_tasks() will succeed. ld_moved will be true and this
3708 * active balance code will not be triggered.
3709 */
3710
3711 /* Lock busiest in correct order while this_rq is held */
3712 double_lock_balance(this_rq, busiest);
3713
3714 /*
3715 * don't kick the migration_thread, if the curr
3716 * task on busiest cpu can't be moved to this_cpu
3717 */
3718 if (!cpumask_test_cpu(this_cpu, &busiest->curr->cpus_allowed)) {
3719 double_unlock_balance(this_rq, busiest);
3720 all_pinned = 1;
3721 return ld_moved;
3722 }
3723
3724 if (!busiest->active_balance) {
3725 busiest->active_balance = 1;
3726 busiest->push_cpu = this_cpu;
3727 active_balance = 1;
3728 }
3729
3730 double_unlock_balance(this_rq, busiest);
3731 if (active_balance)
3732 wake_up_process(busiest->migration_thread);
3733
3676 } else 3734 } else
3677 sd->nr_balance_failed = 0; 3735 sd->nr_balance_failed = 0;
3678 3736
@@ -3698,7 +3756,10 @@ static void idle_balance(int this_cpu, struct rq *this_rq)
3698 struct sched_domain *sd; 3756 struct sched_domain *sd;
3699 int pulled_task = 0; 3757 int pulled_task = 0;
3700 unsigned long next_balance = jiffies + HZ; 3758 unsigned long next_balance = jiffies + HZ;
3701 cpumask_t tmpmask; 3759 cpumask_var_t tmpmask;
3760
3761 if (!alloc_cpumask_var(&tmpmask, GFP_ATOMIC))
3762 return;
3702 3763
3703 for_each_domain(this_cpu, sd) { 3764 for_each_domain(this_cpu, sd) {
3704 unsigned long interval; 3765 unsigned long interval;
@@ -3709,7 +3770,7 @@ static void idle_balance(int this_cpu, struct rq *this_rq)
3709 if (sd->flags & SD_BALANCE_NEWIDLE) 3770 if (sd->flags & SD_BALANCE_NEWIDLE)
3710 /* If we've pulled tasks over stop searching: */ 3771 /* If we've pulled tasks over stop searching: */
3711 pulled_task = load_balance_newidle(this_cpu, this_rq, 3772 pulled_task = load_balance_newidle(this_cpu, this_rq,
3712 sd, &tmpmask); 3773 sd, tmpmask);
3713 3774
3714 interval = msecs_to_jiffies(sd->balance_interval); 3775 interval = msecs_to_jiffies(sd->balance_interval);
3715 if (time_after(next_balance, sd->last_balance + interval)) 3776 if (time_after(next_balance, sd->last_balance + interval))
@@ -3724,6 +3785,7 @@ static void idle_balance(int this_cpu, struct rq *this_rq)
3724 */ 3785 */
3725 this_rq->next_balance = next_balance; 3786 this_rq->next_balance = next_balance;
3726 } 3787 }
3788 free_cpumask_var(tmpmask);
3727} 3789}
3728 3790
3729/* 3791/*
@@ -3761,7 +3823,7 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
3761 /* Search for an sd spanning us and the target CPU. */ 3823 /* Search for an sd spanning us and the target CPU. */
3762 for_each_domain(target_cpu, sd) { 3824 for_each_domain(target_cpu, sd) {
3763 if ((sd->flags & SD_LOAD_BALANCE) && 3825 if ((sd->flags & SD_LOAD_BALANCE) &&
3764 cpu_isset(busiest_cpu, sd->span)) 3826 cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
3765 break; 3827 break;
3766 } 3828 }
3767 3829
@@ -3780,10 +3842,9 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
3780#ifdef CONFIG_NO_HZ 3842#ifdef CONFIG_NO_HZ
3781static struct { 3843static struct {
3782 atomic_t load_balancer; 3844 atomic_t load_balancer;
3783 cpumask_t cpu_mask; 3845 cpumask_var_t cpu_mask;
3784} nohz ____cacheline_aligned = { 3846} nohz ____cacheline_aligned = {
3785 .load_balancer = ATOMIC_INIT(-1), 3847 .load_balancer = ATOMIC_INIT(-1),
3786 .cpu_mask = CPU_MASK_NONE,
3787}; 3848};
3788 3849
3789/* 3850/*
@@ -3811,7 +3872,7 @@ int select_nohz_load_balancer(int stop_tick)
3811 int cpu = smp_processor_id(); 3872 int cpu = smp_processor_id();
3812 3873
3813 if (stop_tick) { 3874 if (stop_tick) {
3814 cpu_set(cpu, nohz.cpu_mask); 3875 cpumask_set_cpu(cpu, nohz.cpu_mask);
3815 cpu_rq(cpu)->in_nohz_recently = 1; 3876 cpu_rq(cpu)->in_nohz_recently = 1;
3816 3877
3817 /* 3878 /*
@@ -3825,7 +3886,7 @@ int select_nohz_load_balancer(int stop_tick)
3825 } 3886 }
3826 3887
3827 /* time for ilb owner also to sleep */ 3888 /* time for ilb owner also to sleep */
3828 if (cpus_weight(nohz.cpu_mask) == num_online_cpus()) { 3889 if (cpumask_weight(nohz.cpu_mask) == num_online_cpus()) {
3829 if (atomic_read(&nohz.load_balancer) == cpu) 3890 if (atomic_read(&nohz.load_balancer) == cpu)
3830 atomic_set(&nohz.load_balancer, -1); 3891 atomic_set(&nohz.load_balancer, -1);
3831 return 0; 3892 return 0;
@@ -3838,10 +3899,10 @@ int select_nohz_load_balancer(int stop_tick)
3838 } else if (atomic_read(&nohz.load_balancer) == cpu) 3899 } else if (atomic_read(&nohz.load_balancer) == cpu)
3839 return 1; 3900 return 1;
3840 } else { 3901 } else {
3841 if (!cpu_isset(cpu, nohz.cpu_mask)) 3902 if (!cpumask_test_cpu(cpu, nohz.cpu_mask))
3842 return 0; 3903 return 0;
3843 3904
3844 cpu_clear(cpu, nohz.cpu_mask); 3905 cpumask_clear_cpu(cpu, nohz.cpu_mask);
3845 3906
3846 if (atomic_read(&nohz.load_balancer) == cpu) 3907 if (atomic_read(&nohz.load_balancer) == cpu)
3847 if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu) 3908 if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu)
@@ -3869,7 +3930,11 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle)
3869 unsigned long next_balance = jiffies + 60*HZ; 3930 unsigned long next_balance = jiffies + 60*HZ;
3870 int update_next_balance = 0; 3931 int update_next_balance = 0;
3871 int need_serialize; 3932 int need_serialize;
3872 cpumask_t tmp; 3933 cpumask_var_t tmp;
3934
3935 /* Fails alloc? Rebalancing probably not a priority right now. */
3936 if (!alloc_cpumask_var(&tmp, GFP_ATOMIC))
3937 return;
3873 3938
3874 for_each_domain(cpu, sd) { 3939 for_each_domain(cpu, sd) {
3875 if (!(sd->flags & SD_LOAD_BALANCE)) 3940 if (!(sd->flags & SD_LOAD_BALANCE))
@@ -3894,7 +3959,7 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle)
3894 } 3959 }
3895 3960
3896 if (time_after_eq(jiffies, sd->last_balance + interval)) { 3961 if (time_after_eq(jiffies, sd->last_balance + interval)) {
3897 if (load_balance(cpu, rq, sd, idle, &balance, &tmp)) { 3962 if (load_balance(cpu, rq, sd, idle, &balance, tmp)) {
3898 /* 3963 /*
3899 * We've pulled tasks over so either we're no 3964 * We've pulled tasks over so either we're no
3900 * longer idle, or one of our SMT siblings is 3965 * longer idle, or one of our SMT siblings is
@@ -3928,6 +3993,8 @@ out:
3928 */ 3993 */
3929 if (likely(update_next_balance)) 3994 if (likely(update_next_balance))
3930 rq->next_balance = next_balance; 3995 rq->next_balance = next_balance;
3996
3997 free_cpumask_var(tmp);
3931} 3998}
3932 3999
3933/* 4000/*
@@ -3952,12 +4019,13 @@ static void run_rebalance_domains(struct softirq_action *h)
3952 */ 4019 */
3953 if (this_rq->idle_at_tick && 4020 if (this_rq->idle_at_tick &&
3954 atomic_read(&nohz.load_balancer) == this_cpu) { 4021 atomic_read(&nohz.load_balancer) == this_cpu) {
3955 cpumask_t cpus = nohz.cpu_mask;
3956 struct rq *rq; 4022 struct rq *rq;
3957 int balance_cpu; 4023 int balance_cpu;
3958 4024
3959 cpu_clear(this_cpu, cpus); 4025 for_each_cpu(balance_cpu, nohz.cpu_mask) {
3960 for_each_cpu_mask_nr(balance_cpu, cpus) { 4026 if (balance_cpu == this_cpu)
4027 continue;
4028
3961 /* 4029 /*
3962 * If this cpu gets work to do, stop the load balancing 4030 * If this cpu gets work to do, stop the load balancing
3963 * work being done for other cpus. Next load 4031 * work being done for other cpus. Next load
@@ -3995,7 +4063,7 @@ static inline void trigger_load_balance(struct rq *rq, int cpu)
3995 rq->in_nohz_recently = 0; 4063 rq->in_nohz_recently = 0;
3996 4064
3997 if (atomic_read(&nohz.load_balancer) == cpu) { 4065 if (atomic_read(&nohz.load_balancer) == cpu) {
3998 cpu_clear(cpu, nohz.cpu_mask); 4066 cpumask_clear_cpu(cpu, nohz.cpu_mask);
3999 atomic_set(&nohz.load_balancer, -1); 4067 atomic_set(&nohz.load_balancer, -1);
4000 } 4068 }
4001 4069
@@ -4008,7 +4076,7 @@ static inline void trigger_load_balance(struct rq *rq, int cpu)
4008 * TBD: Traverse the sched domains and nominate 4076 * TBD: Traverse the sched domains and nominate
4009 * the nearest cpu in the nohz.cpu_mask. 4077 * the nearest cpu in the nohz.cpu_mask.
4010 */ 4078 */
4011 int ilb = first_cpu(nohz.cpu_mask); 4079 int ilb = cpumask_first(nohz.cpu_mask);
4012 4080
4013 if (ilb < nr_cpu_ids) 4081 if (ilb < nr_cpu_ids)
4014 resched_cpu(ilb); 4082 resched_cpu(ilb);
@@ -4020,7 +4088,7 @@ static inline void trigger_load_balance(struct rq *rq, int cpu)
4020 * cpus with ticks stopped, is it time for that to stop? 4088 * cpus with ticks stopped, is it time for that to stop?
4021 */ 4089 */
4022 if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) == cpu && 4090 if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) == cpu &&
4023 cpus_weight(nohz.cpu_mask) == num_online_cpus()) { 4091 cpumask_weight(nohz.cpu_mask) == num_online_cpus()) {
4024 resched_cpu(cpu); 4092 resched_cpu(cpu);
4025 return; 4093 return;
4026 } 4094 }
@@ -4030,7 +4098,7 @@ static inline void trigger_load_balance(struct rq *rq, int cpu)
4030 * someone else, then no need raise the SCHED_SOFTIRQ 4098 * someone else, then no need raise the SCHED_SOFTIRQ
4031 */ 4099 */
4032 if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) != cpu && 4100 if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) != cpu &&
4033 cpu_isset(cpu, nohz.cpu_mask)) 4101 cpumask_test_cpu(cpu, nohz.cpu_mask))
4034 return; 4102 return;
4035#endif 4103#endif
4036 if (time_after_eq(jiffies, rq->next_balance)) 4104 if (time_after_eq(jiffies, rq->next_balance))
@@ -4082,13 +4150,17 @@ unsigned long long task_delta_exec(struct task_struct *p)
4082 * Account user cpu time to a process. 4150 * Account user cpu time to a process.
4083 * @p: the process that the cpu time gets accounted to 4151 * @p: the process that the cpu time gets accounted to
4084 * @cputime: the cpu time spent in user space since the last update 4152 * @cputime: the cpu time spent in user space since the last update
4153 * @cputime_scaled: cputime scaled by cpu frequency
4085 */ 4154 */
4086void account_user_time(struct task_struct *p, cputime_t cputime) 4155void account_user_time(struct task_struct *p, cputime_t cputime,
4156 cputime_t cputime_scaled)
4087{ 4157{
4088 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; 4158 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
4089 cputime64_t tmp; 4159 cputime64_t tmp;
4090 4160
4161 /* Add user time to process. */
4091 p->utime = cputime_add(p->utime, cputime); 4162 p->utime = cputime_add(p->utime, cputime);
4163 p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
4092 account_group_user_time(p, cputime); 4164 account_group_user_time(p, cputime);
4093 4165
4094 /* Add user time to cpustat. */ 4166 /* Add user time to cpustat. */
@@ -4105,51 +4177,48 @@ void account_user_time(struct task_struct *p, cputime_t cputime)
4105 * Account guest cpu time to a process. 4177 * Account guest cpu time to a process.
4106 * @p: the process that the cpu time gets accounted to 4178 * @p: the process that the cpu time gets accounted to
4107 * @cputime: the cpu time spent in virtual machine since the last update 4179 * @cputime: the cpu time spent in virtual machine since the last update
4180 * @cputime_scaled: cputime scaled by cpu frequency
4108 */ 4181 */
4109static void account_guest_time(struct task_struct *p, cputime_t cputime) 4182static void account_guest_time(struct task_struct *p, cputime_t cputime,
4183 cputime_t cputime_scaled)
4110{ 4184{
4111 cputime64_t tmp; 4185 cputime64_t tmp;
4112 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; 4186 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
4113 4187
4114 tmp = cputime_to_cputime64(cputime); 4188 tmp = cputime_to_cputime64(cputime);
4115 4189
4190 /* Add guest time to process. */
4116 p->utime = cputime_add(p->utime, cputime); 4191 p->utime = cputime_add(p->utime, cputime);
4192 p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
4117 account_group_user_time(p, cputime); 4193 account_group_user_time(p, cputime);
4118 p->gtime = cputime_add(p->gtime, cputime); 4194 p->gtime = cputime_add(p->gtime, cputime);
4119 4195
4196 /* Add guest time to cpustat. */
4120 cpustat->user = cputime64_add(cpustat->user, tmp); 4197 cpustat->user = cputime64_add(cpustat->user, tmp);
4121 cpustat->guest = cputime64_add(cpustat->guest, tmp); 4198 cpustat->guest = cputime64_add(cpustat->guest, tmp);
4122} 4199}
4123 4200
4124/* 4201/*
4125 * Account scaled user cpu time to a process.
4126 * @p: the process that the cpu time gets accounted to
4127 * @cputime: the cpu time spent in user space since the last update
4128 */
4129void account_user_time_scaled(struct task_struct *p, cputime_t cputime)
4130{
4131 p->utimescaled = cputime_add(p->utimescaled, cputime);
4132}
4133
4134/*
4135 * Account system cpu time to a process. 4202 * Account system cpu time to a process.
4136 * @p: the process that the cpu time gets accounted to 4203 * @p: the process that the cpu time gets accounted to
4137 * @hardirq_offset: the offset to subtract from hardirq_count() 4204 * @hardirq_offset: the offset to subtract from hardirq_count()
4138 * @cputime: the cpu time spent in kernel space since the last update 4205 * @cputime: the cpu time spent in kernel space since the last update
4206 * @cputime_scaled: cputime scaled by cpu frequency
4139 */ 4207 */
4140void account_system_time(struct task_struct *p, int hardirq_offset, 4208void account_system_time(struct task_struct *p, int hardirq_offset,
4141 cputime_t cputime) 4209 cputime_t cputime, cputime_t cputime_scaled)
4142{ 4210{
4143 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; 4211 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
4144 struct rq *rq = this_rq();
4145 cputime64_t tmp; 4212 cputime64_t tmp;
4146 4213
4147 if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) { 4214 if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
4148 account_guest_time(p, cputime); 4215 account_guest_time(p, cputime, cputime_scaled);
4149 return; 4216 return;
4150 } 4217 }
4151 4218
4219 /* Add system time to process. */
4152 p->stime = cputime_add(p->stime, cputime); 4220 p->stime = cputime_add(p->stime, cputime);
4221 p->stimescaled = cputime_add(p->stimescaled, cputime_scaled);
4153 account_group_system_time(p, cputime); 4222 account_group_system_time(p, cputime);
4154 4223
4155 /* Add system time to cpustat. */ 4224 /* Add system time to cpustat. */
@@ -4158,50 +4227,85 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
4158 cpustat->irq = cputime64_add(cpustat->irq, tmp); 4227 cpustat->irq = cputime64_add(cpustat->irq, tmp);
4159 else if (softirq_count()) 4228 else if (softirq_count())
4160 cpustat->softirq = cputime64_add(cpustat->softirq, tmp); 4229 cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
4161 else if (p != rq->idle)
4162 cpustat->system = cputime64_add(cpustat->system, tmp);
4163 else if (atomic_read(&rq->nr_iowait) > 0)
4164 cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
4165 else 4230 else
4166 cpustat->idle = cputime64_add(cpustat->idle, tmp); 4231 cpustat->system = cputime64_add(cpustat->system, tmp);
4232
4167 /* Account for system time used */ 4233 /* Account for system time used */
4168 acct_update_integrals(p); 4234 acct_update_integrals(p);
4169} 4235}
4170 4236
4171/* 4237/*
4172 * Account scaled system cpu time to a process. 4238 * Account for involuntary wait time.
4173 * @p: the process that the cpu time gets accounted to 4239 * @steal: the cpu time spent in involuntary wait
4174 * @hardirq_offset: the offset to subtract from hardirq_count()
4175 * @cputime: the cpu time spent in kernel space since the last update
4176 */ 4240 */
4177void account_system_time_scaled(struct task_struct *p, cputime_t cputime) 4241void account_steal_time(cputime_t cputime)
4178{ 4242{
4179 p->stimescaled = cputime_add(p->stimescaled, cputime); 4243 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
4244 cputime64_t cputime64 = cputime_to_cputime64(cputime);
4245
4246 cpustat->steal = cputime64_add(cpustat->steal, cputime64);
4180} 4247}
4181 4248
4182/* 4249/*
4183 * Account for involuntary wait time. 4250 * Account for idle time.
4184 * @p: the process from which the cpu time has been stolen 4251 * @cputime: the cpu time spent in idle wait
4185 * @steal: the cpu time spent in involuntary wait
4186 */ 4252 */
4187void account_steal_time(struct task_struct *p, cputime_t steal) 4253void account_idle_time(cputime_t cputime)
4188{ 4254{
4189 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; 4255 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
4190 cputime64_t tmp = cputime_to_cputime64(steal); 4256 cputime64_t cputime64 = cputime_to_cputime64(cputime);
4191 struct rq *rq = this_rq(); 4257 struct rq *rq = this_rq();
4192 4258
4193 if (p == rq->idle) { 4259 if (atomic_read(&rq->nr_iowait) > 0)
4194 p->stime = cputime_add(p->stime, steal); 4260 cpustat->iowait = cputime64_add(cpustat->iowait, cputime64);
4195 account_group_system_time(p, steal); 4261 else
4196 if (atomic_read(&rq->nr_iowait) > 0) 4262 cpustat->idle = cputime64_add(cpustat->idle, cputime64);
4197 cpustat->iowait = cputime64_add(cpustat->iowait, tmp); 4263}
4198 else 4264
4199 cpustat->idle = cputime64_add(cpustat->idle, tmp); 4265#ifndef CONFIG_VIRT_CPU_ACCOUNTING
4200 } else 4266
4201 cpustat->steal = cputime64_add(cpustat->steal, tmp); 4267/*
4268 * Account a single tick of cpu time.
4269 * @p: the process that the cpu time gets accounted to
4270 * @user_tick: indicates if the tick is a user or a system tick
4271 */
4272void account_process_tick(struct task_struct *p, int user_tick)
4273{
4274 cputime_t one_jiffy = jiffies_to_cputime(1);
4275 cputime_t one_jiffy_scaled = cputime_to_scaled(one_jiffy);
4276 struct rq *rq = this_rq();
4277
4278 if (user_tick)
4279 account_user_time(p, one_jiffy, one_jiffy_scaled);
4280 else if (p != rq->idle)
4281 account_system_time(p, HARDIRQ_OFFSET, one_jiffy,
4282 one_jiffy_scaled);
4283 else
4284 account_idle_time(one_jiffy);
4285}
4286
4287/*
4288 * Account multiple ticks of steal time.
4289 * @p: the process from which the cpu time has been stolen
4290 * @ticks: number of stolen ticks
4291 */
4292void account_steal_ticks(unsigned long ticks)
4293{
4294 account_steal_time(jiffies_to_cputime(ticks));
4202} 4295}
4203 4296
4204/* 4297/*
4298 * Account multiple ticks of idle time.
4299 * @ticks: number of stolen ticks
4300 */
4301void account_idle_ticks(unsigned long ticks)
4302{
4303 account_idle_time(jiffies_to_cputime(ticks));
4304}
4305
4306#endif
4307
4308/*
4205 * Use precise platform statistics if available: 4309 * Use precise platform statistics if available:
4206 */ 4310 */
4207#ifdef CONFIG_VIRT_CPU_ACCOUNTING 4311#ifdef CONFIG_VIRT_CPU_ACCOUNTING
@@ -4328,7 +4432,7 @@ void __kprobes sub_preempt_count(int val)
4328 /* 4432 /*
4329 * Underflow? 4433 * Underflow?
4330 */ 4434 */
4331 if (DEBUG_LOCKS_WARN_ON(val > preempt_count())) 4435 if (DEBUG_LOCKS_WARN_ON(val > preempt_count() - (!!kernel_locked())))
4332 return; 4436 return;
4333 /* 4437 /*
4334 * Is the spinlock portion underflowing? 4438 * Is the spinlock portion underflowing?
@@ -5404,10 +5508,9 @@ out_unlock:
5404 return retval; 5508 return retval;
5405} 5509}
5406 5510
5407long sched_setaffinity(pid_t pid, const cpumask_t *in_mask) 5511long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
5408{ 5512{
5409 cpumask_t cpus_allowed; 5513 cpumask_var_t cpus_allowed, new_mask;
5410 cpumask_t new_mask = *in_mask;
5411 struct task_struct *p; 5514 struct task_struct *p;
5412 int retval; 5515 int retval;
5413 5516
@@ -5429,6 +5532,14 @@ long sched_setaffinity(pid_t pid, const cpumask_t *in_mask)
5429 get_task_struct(p); 5532 get_task_struct(p);
5430 read_unlock(&tasklist_lock); 5533 read_unlock(&tasklist_lock);
5431 5534
5535 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
5536 retval = -ENOMEM;
5537 goto out_put_task;
5538 }
5539 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
5540 retval = -ENOMEM;
5541 goto out_free_cpus_allowed;
5542 }
5432 retval = -EPERM; 5543 retval = -EPERM;
5433 if (!check_same_owner(p) && !capable(CAP_SYS_NICE)) 5544 if (!check_same_owner(p) && !capable(CAP_SYS_NICE))
5434 goto out_unlock; 5545 goto out_unlock;
@@ -5437,37 +5548,41 @@ long sched_setaffinity(pid_t pid, const cpumask_t *in_mask)
5437 if (retval) 5548 if (retval)
5438 goto out_unlock; 5549 goto out_unlock;
5439 5550
5440 cpuset_cpus_allowed(p, &cpus_allowed); 5551 cpuset_cpus_allowed(p, cpus_allowed);
5441 cpus_and(new_mask, new_mask, cpus_allowed); 5552 cpumask_and(new_mask, in_mask, cpus_allowed);
5442 again: 5553 again:
5443 retval = set_cpus_allowed_ptr(p, &new_mask); 5554 retval = set_cpus_allowed_ptr(p, new_mask);
5444 5555
5445 if (!retval) { 5556 if (!retval) {
5446 cpuset_cpus_allowed(p, &cpus_allowed); 5557 cpuset_cpus_allowed(p, cpus_allowed);
5447 if (!cpus_subset(new_mask, cpus_allowed)) { 5558 if (!cpumask_subset(new_mask, cpus_allowed)) {
5448 /* 5559 /*
5449 * We must have raced with a concurrent cpuset 5560 * We must have raced with a concurrent cpuset
5450 * update. Just reset the cpus_allowed to the 5561 * update. Just reset the cpus_allowed to the
5451 * cpuset's cpus_allowed 5562 * cpuset's cpus_allowed
5452 */ 5563 */
5453 new_mask = cpus_allowed; 5564 cpumask_copy(new_mask, cpus_allowed);
5454 goto again; 5565 goto again;
5455 } 5566 }
5456 } 5567 }
5457out_unlock: 5568out_unlock:
5569 free_cpumask_var(new_mask);
5570out_free_cpus_allowed:
5571 free_cpumask_var(cpus_allowed);
5572out_put_task:
5458 put_task_struct(p); 5573 put_task_struct(p);
5459 put_online_cpus(); 5574 put_online_cpus();
5460 return retval; 5575 return retval;
5461} 5576}
5462 5577
5463static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, 5578static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
5464 cpumask_t *new_mask) 5579 struct cpumask *new_mask)
5465{ 5580{
5466 if (len < sizeof(cpumask_t)) { 5581 if (len < cpumask_size())
5467 memset(new_mask, 0, sizeof(cpumask_t)); 5582 cpumask_clear(new_mask);
5468 } else if (len > sizeof(cpumask_t)) { 5583 else if (len > cpumask_size())
5469 len = sizeof(cpumask_t); 5584 len = cpumask_size();
5470 } 5585
5471 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; 5586 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
5472} 5587}
5473 5588
@@ -5480,17 +5595,20 @@ static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
5480asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len, 5595asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len,
5481 unsigned long __user *user_mask_ptr) 5596 unsigned long __user *user_mask_ptr)
5482{ 5597{
5483 cpumask_t new_mask; 5598 cpumask_var_t new_mask;
5484 int retval; 5599 int retval;
5485 5600
5486 retval = get_user_cpu_mask(user_mask_ptr, len, &new_mask); 5601 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
5487 if (retval) 5602 return -ENOMEM;
5488 return retval;
5489 5603
5490 return sched_setaffinity(pid, &new_mask); 5604 retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
5605 if (retval == 0)
5606 retval = sched_setaffinity(pid, new_mask);
5607 free_cpumask_var(new_mask);
5608 return retval;
5491} 5609}
5492 5610
5493long sched_getaffinity(pid_t pid, cpumask_t *mask) 5611long sched_getaffinity(pid_t pid, struct cpumask *mask)
5494{ 5612{
5495 struct task_struct *p; 5613 struct task_struct *p;
5496 int retval; 5614 int retval;
@@ -5507,7 +5625,7 @@ long sched_getaffinity(pid_t pid, cpumask_t *mask)
5507 if (retval) 5625 if (retval)
5508 goto out_unlock; 5626 goto out_unlock;
5509 5627
5510 cpus_and(*mask, p->cpus_allowed, cpu_online_map); 5628 cpumask_and(mask, &p->cpus_allowed, cpu_online_mask);
5511 5629
5512out_unlock: 5630out_unlock:
5513 read_unlock(&tasklist_lock); 5631 read_unlock(&tasklist_lock);
@@ -5526,19 +5644,24 @@ asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len,
5526 unsigned long __user *user_mask_ptr) 5644 unsigned long __user *user_mask_ptr)
5527{ 5645{
5528 int ret; 5646 int ret;
5529 cpumask_t mask; 5647 cpumask_var_t mask;
5530 5648
5531 if (len < sizeof(cpumask_t)) 5649 if (len < cpumask_size())
5532 return -EINVAL; 5650 return -EINVAL;
5533 5651
5534 ret = sched_getaffinity(pid, &mask); 5652 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
5535 if (ret < 0) 5653 return -ENOMEM;
5536 return ret;
5537 5654
5538 if (copy_to_user(user_mask_ptr, &mask, sizeof(cpumask_t))) 5655 ret = sched_getaffinity(pid, mask);
5539 return -EFAULT; 5656 if (ret == 0) {
5657 if (copy_to_user(user_mask_ptr, mask, cpumask_size()))
5658 ret = -EFAULT;
5659 else
5660 ret = cpumask_size();
5661 }
5662 free_cpumask_var(mask);
5540 5663
5541 return sizeof(cpumask_t); 5664 return ret;
5542} 5665}
5543 5666
5544/** 5667/**
@@ -5880,7 +6003,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
5880 idle->se.exec_start = sched_clock(); 6003 idle->se.exec_start = sched_clock();
5881 6004
5882 idle->prio = idle->normal_prio = MAX_PRIO; 6005 idle->prio = idle->normal_prio = MAX_PRIO;
5883 idle->cpus_allowed = cpumask_of_cpu(cpu); 6006 cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu));
5884 __set_task_cpu(idle, cpu); 6007 __set_task_cpu(idle, cpu);
5885 6008
5886 rq->curr = rq->idle = idle; 6009 rq->curr = rq->idle = idle;
@@ -5907,9 +6030,9 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
5907 * indicates which cpus entered this state. This is used 6030 * indicates which cpus entered this state. This is used
5908 * in the rcu update to wait only for active cpus. For system 6031 * in the rcu update to wait only for active cpus. For system
5909 * which do not switch off the HZ timer nohz_cpu_mask should 6032 * which do not switch off the HZ timer nohz_cpu_mask should
5910 * always be CPU_MASK_NONE. 6033 * always be CPU_BITS_NONE.
5911 */ 6034 */
5912cpumask_t nohz_cpu_mask = CPU_MASK_NONE; 6035cpumask_var_t nohz_cpu_mask;
5913 6036
5914/* 6037/*
5915 * Increase the granularity value when there are more CPUs, 6038 * Increase the granularity value when there are more CPUs,
@@ -5964,7 +6087,7 @@ static inline void sched_init_granularity(void)
5964 * task must not exit() & deallocate itself prematurely. The 6087 * task must not exit() & deallocate itself prematurely. The
5965 * call is not atomic; no spinlocks may be held. 6088 * call is not atomic; no spinlocks may be held.
5966 */ 6089 */
5967int set_cpus_allowed_ptr(struct task_struct *p, const cpumask_t *new_mask) 6090int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
5968{ 6091{
5969 struct migration_req req; 6092 struct migration_req req;
5970 unsigned long flags; 6093 unsigned long flags;
@@ -5972,13 +6095,13 @@ int set_cpus_allowed_ptr(struct task_struct *p, const cpumask_t *new_mask)
5972 int ret = 0; 6095 int ret = 0;
5973 6096
5974 rq = task_rq_lock(p, &flags); 6097 rq = task_rq_lock(p, &flags);
5975 if (!cpus_intersects(*new_mask, cpu_online_map)) { 6098 if (!cpumask_intersects(new_mask, cpu_online_mask)) {
5976 ret = -EINVAL; 6099 ret = -EINVAL;
5977 goto out; 6100 goto out;
5978 } 6101 }
5979 6102
5980 if (unlikely((p->flags & PF_THREAD_BOUND) && p != current && 6103 if (unlikely((p->flags & PF_THREAD_BOUND) && p != current &&
5981 !cpus_equal(p->cpus_allowed, *new_mask))) { 6104 !cpumask_equal(&p->cpus_allowed, new_mask))) {
5982 ret = -EINVAL; 6105 ret = -EINVAL;
5983 goto out; 6106 goto out;
5984 } 6107 }
@@ -5986,15 +6109,15 @@ int set_cpus_allowed_ptr(struct task_struct *p, const cpumask_t *new_mask)
5986 if (p->sched_class->set_cpus_allowed) 6109 if (p->sched_class->set_cpus_allowed)
5987 p->sched_class->set_cpus_allowed(p, new_mask); 6110 p->sched_class->set_cpus_allowed(p, new_mask);
5988 else { 6111 else {
5989 p->cpus_allowed = *new_mask; 6112 cpumask_copy(&p->cpus_allowed, new_mask);
5990 p->rt.nr_cpus_allowed = cpus_weight(*new_mask); 6113 p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
5991 } 6114 }
5992 6115
5993 /* Can the task run on the task's current CPU? If so, we're done */ 6116 /* Can the task run on the task's current CPU? If so, we're done */
5994 if (cpu_isset(task_cpu(p), *new_mask)) 6117 if (cpumask_test_cpu(task_cpu(p), new_mask))
5995 goto out; 6118 goto out;
5996 6119
5997 if (migrate_task(p, any_online_cpu(*new_mask), &req)) { 6120 if (migrate_task(p, cpumask_any_and(cpu_online_mask, new_mask), &req)) {
5998 /* Need help from migration thread: drop lock and wait. */ 6121 /* Need help from migration thread: drop lock and wait. */
5999 task_rq_unlock(rq, &flags); 6122 task_rq_unlock(rq, &flags);
6000 wake_up_process(rq->migration_thread); 6123 wake_up_process(rq->migration_thread);
@@ -6036,7 +6159,7 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
6036 if (task_cpu(p) != src_cpu) 6159 if (task_cpu(p) != src_cpu)
6037 goto done; 6160 goto done;
6038 /* Affinity changed (again). */ 6161 /* Affinity changed (again). */
6039 if (!cpu_isset(dest_cpu, p->cpus_allowed)) 6162 if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
6040 goto fail; 6163 goto fail;
6041 6164
6042 on_rq = p->se.on_rq; 6165 on_rq = p->se.on_rq;
@@ -6133,50 +6256,41 @@ static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu)
6133 */ 6256 */
6134static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) 6257static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
6135{ 6258{
6136 unsigned long flags;
6137 cpumask_t mask;
6138 struct rq *rq;
6139 int dest_cpu; 6259 int dest_cpu;
6260 const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(dead_cpu));
6140 6261
6141 do { 6262again:
6142 /* On same node? */ 6263 /* Look for allowed, online CPU in same node. */
6143 mask = node_to_cpumask(cpu_to_node(dead_cpu)); 6264 for_each_cpu_and(dest_cpu, nodemask, cpu_online_mask)
6144 cpus_and(mask, mask, p->cpus_allowed); 6265 if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
6145 dest_cpu = any_online_cpu(mask); 6266 goto move;
6146 6267
6147 /* On any allowed CPU? */ 6268 /* Any allowed, online CPU? */
6148 if (dest_cpu >= nr_cpu_ids) 6269 dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_online_mask);
6149 dest_cpu = any_online_cpu(p->cpus_allowed); 6270 if (dest_cpu < nr_cpu_ids)
6271 goto move;
6150 6272
6151 /* No more Mr. Nice Guy. */ 6273 /* No more Mr. Nice Guy. */
6152 if (dest_cpu >= nr_cpu_ids) { 6274 if (dest_cpu >= nr_cpu_ids) {
6153 cpumask_t cpus_allowed; 6275 cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
6276 dest_cpu = cpumask_any_and(cpu_online_mask, &p->cpus_allowed);
6154 6277
6155 cpuset_cpus_allowed_locked(p, &cpus_allowed); 6278 /*
6156 /* 6279 * Don't tell them about moving exiting tasks or
6157 * Try to stay on the same cpuset, where the 6280 * kernel threads (both mm NULL), since they never
6158 * current cpuset may be a subset of all cpus. 6281 * leave kernel.
6159 * The cpuset_cpus_allowed_locked() variant of 6282 */
6160 * cpuset_cpus_allowed() will not block. It must be 6283 if (p->mm && printk_ratelimit()) {
6161 * called within calls to cpuset_lock/cpuset_unlock. 6284 printk(KERN_INFO "process %d (%s) no "
6162 */ 6285 "longer affine to cpu%d\n",
6163 rq = task_rq_lock(p, &flags); 6286 task_pid_nr(p), p->comm, dead_cpu);
6164 p->cpus_allowed = cpus_allowed;
6165 dest_cpu = any_online_cpu(p->cpus_allowed);
6166 task_rq_unlock(rq, &flags);
6167
6168 /*
6169 * Don't tell them about moving exiting tasks or
6170 * kernel threads (both mm NULL), since they never
6171 * leave kernel.
6172 */
6173 if (p->mm && printk_ratelimit()) {
6174 printk(KERN_INFO "process %d (%s) no "
6175 "longer affine to cpu%d\n",
6176 task_pid_nr(p), p->comm, dead_cpu);
6177 }
6178 } 6287 }
6179 } while (!__migrate_task_irq(p, dead_cpu, dest_cpu)); 6288 }
6289
6290move:
6291 /* It can have affinity changed while we were choosing. */
6292 if (unlikely(!__migrate_task_irq(p, dead_cpu, dest_cpu)))
6293 goto again;
6180} 6294}
6181 6295
6182/* 6296/*
@@ -6188,7 +6302,7 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
6188 */ 6302 */
6189static void migrate_nr_uninterruptible(struct rq *rq_src) 6303static void migrate_nr_uninterruptible(struct rq *rq_src)
6190{ 6304{
6191 struct rq *rq_dest = cpu_rq(any_online_cpu(*CPU_MASK_ALL_PTR)); 6305 struct rq *rq_dest = cpu_rq(cpumask_any(cpu_online_mask));
6192 unsigned long flags; 6306 unsigned long flags;
6193 6307
6194 local_irq_save(flags); 6308 local_irq_save(flags);
@@ -6478,7 +6592,7 @@ static void set_rq_online(struct rq *rq)
6478 if (!rq->online) { 6592 if (!rq->online) {
6479 const struct sched_class *class; 6593 const struct sched_class *class;
6480 6594
6481 cpu_set(rq->cpu, rq->rd->online); 6595 cpumask_set_cpu(rq->cpu, rq->rd->online);
6482 rq->online = 1; 6596 rq->online = 1;
6483 6597
6484 for_each_class(class) { 6598 for_each_class(class) {
@@ -6498,7 +6612,7 @@ static void set_rq_offline(struct rq *rq)
6498 class->rq_offline(rq); 6612 class->rq_offline(rq);
6499 } 6613 }
6500 6614
6501 cpu_clear(rq->cpu, rq->rd->online); 6615 cpumask_clear_cpu(rq->cpu, rq->rd->online);
6502 rq->online = 0; 6616 rq->online = 0;
6503 } 6617 }
6504} 6618}
@@ -6539,7 +6653,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
6539 rq = cpu_rq(cpu); 6653 rq = cpu_rq(cpu);
6540 spin_lock_irqsave(&rq->lock, flags); 6654 spin_lock_irqsave(&rq->lock, flags);
6541 if (rq->rd) { 6655 if (rq->rd) {
6542 BUG_ON(!cpu_isset(cpu, rq->rd->span)); 6656 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
6543 6657
6544 set_rq_online(rq); 6658 set_rq_online(rq);
6545 } 6659 }
@@ -6553,7 +6667,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
6553 break; 6667 break;
6554 /* Unbind it from offline cpu so it can run. Fall thru. */ 6668 /* Unbind it from offline cpu so it can run. Fall thru. */
6555 kthread_bind(cpu_rq(cpu)->migration_thread, 6669 kthread_bind(cpu_rq(cpu)->migration_thread,
6556 any_online_cpu(cpu_online_map)); 6670 cpumask_any(cpu_online_mask));
6557 kthread_stop(cpu_rq(cpu)->migration_thread); 6671 kthread_stop(cpu_rq(cpu)->migration_thread);
6558 cpu_rq(cpu)->migration_thread = NULL; 6672 cpu_rq(cpu)->migration_thread = NULL;
6559 break; 6673 break;
@@ -6603,7 +6717,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
6603 rq = cpu_rq(cpu); 6717 rq = cpu_rq(cpu);
6604 spin_lock_irqsave(&rq->lock, flags); 6718 spin_lock_irqsave(&rq->lock, flags);
6605 if (rq->rd) { 6719 if (rq->rd) {
6606 BUG_ON(!cpu_isset(cpu, rq->rd->span)); 6720 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
6607 set_rq_offline(rq); 6721 set_rq_offline(rq);
6608 } 6722 }
6609 spin_unlock_irqrestore(&rq->lock, flags); 6723 spin_unlock_irqrestore(&rq->lock, flags);
@@ -6642,13 +6756,13 @@ early_initcall(migration_init);
6642#ifdef CONFIG_SCHED_DEBUG 6756#ifdef CONFIG_SCHED_DEBUG
6643 6757
6644static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, 6758static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
6645 cpumask_t *groupmask) 6759 struct cpumask *groupmask)
6646{ 6760{
6647 struct sched_group *group = sd->groups; 6761 struct sched_group *group = sd->groups;
6648 char str[256]; 6762 char str[256];
6649 6763
6650 cpulist_scnprintf(str, sizeof(str), sd->span); 6764 cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd));
6651 cpus_clear(*groupmask); 6765 cpumask_clear(groupmask);
6652 6766
6653 printk(KERN_DEBUG "%*s domain %d: ", level, "", level); 6767 printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
6654 6768
@@ -6662,11 +6776,11 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
6662 6776
6663 printk(KERN_CONT "span %s level %s\n", str, sd->name); 6777 printk(KERN_CONT "span %s level %s\n", str, sd->name);
6664 6778
6665 if (!cpu_isset(cpu, sd->span)) { 6779 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
6666 printk(KERN_ERR "ERROR: domain->span does not contain " 6780 printk(KERN_ERR "ERROR: domain->span does not contain "
6667 "CPU%d\n", cpu); 6781 "CPU%d\n", cpu);
6668 } 6782 }
6669 if (!cpu_isset(cpu, group->cpumask)) { 6783 if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
6670 printk(KERN_ERR "ERROR: domain->groups does not contain" 6784 printk(KERN_ERR "ERROR: domain->groups does not contain"
6671 " CPU%d\n", cpu); 6785 " CPU%d\n", cpu);
6672 } 6786 }
@@ -6686,31 +6800,32 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
6686 break; 6800 break;
6687 } 6801 }
6688 6802
6689 if (!cpus_weight(group->cpumask)) { 6803 if (!cpumask_weight(sched_group_cpus(group))) {
6690 printk(KERN_CONT "\n"); 6804 printk(KERN_CONT "\n");
6691 printk(KERN_ERR "ERROR: empty group\n"); 6805 printk(KERN_ERR "ERROR: empty group\n");
6692 break; 6806 break;
6693 } 6807 }
6694 6808
6695 if (cpus_intersects(*groupmask, group->cpumask)) { 6809 if (cpumask_intersects(groupmask, sched_group_cpus(group))) {
6696 printk(KERN_CONT "\n"); 6810 printk(KERN_CONT "\n");
6697 printk(KERN_ERR "ERROR: repeated CPUs\n"); 6811 printk(KERN_ERR "ERROR: repeated CPUs\n");
6698 break; 6812 break;
6699 } 6813 }
6700 6814
6701 cpus_or(*groupmask, *groupmask, group->cpumask); 6815 cpumask_or(groupmask, groupmask, sched_group_cpus(group));
6702 6816
6703 cpulist_scnprintf(str, sizeof(str), group->cpumask); 6817 cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
6704 printk(KERN_CONT " %s", str); 6818 printk(KERN_CONT " %s", str);
6705 6819
6706 group = group->next; 6820 group = group->next;
6707 } while (group != sd->groups); 6821 } while (group != sd->groups);
6708 printk(KERN_CONT "\n"); 6822 printk(KERN_CONT "\n");
6709 6823
6710 if (!cpus_equal(sd->span, *groupmask)) 6824 if (!cpumask_equal(sched_domain_span(sd), groupmask))
6711 printk(KERN_ERR "ERROR: groups don't span domain->span\n"); 6825 printk(KERN_ERR "ERROR: groups don't span domain->span\n");
6712 6826
6713 if (sd->parent && !cpus_subset(*groupmask, sd->parent->span)) 6827 if (sd->parent &&
6828 !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
6714 printk(KERN_ERR "ERROR: parent span is not a superset " 6829 printk(KERN_ERR "ERROR: parent span is not a superset "
6715 "of domain->span\n"); 6830 "of domain->span\n");
6716 return 0; 6831 return 0;
@@ -6718,7 +6833,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
6718 6833
6719static void sched_domain_debug(struct sched_domain *sd, int cpu) 6834static void sched_domain_debug(struct sched_domain *sd, int cpu)
6720{ 6835{
6721 cpumask_t *groupmask; 6836 cpumask_var_t groupmask;
6722 int level = 0; 6837 int level = 0;
6723 6838
6724 if (!sd) { 6839 if (!sd) {
@@ -6728,8 +6843,7 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu)
6728 6843
6729 printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu); 6844 printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
6730 6845
6731 groupmask = kmalloc(sizeof(cpumask_t), GFP_KERNEL); 6846 if (!alloc_cpumask_var(&groupmask, GFP_KERNEL)) {
6732 if (!groupmask) {
6733 printk(KERN_DEBUG "Cannot load-balance (out of memory)\n"); 6847 printk(KERN_DEBUG "Cannot load-balance (out of memory)\n");
6734 return; 6848 return;
6735 } 6849 }
@@ -6742,7 +6856,7 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu)
6742 if (!sd) 6856 if (!sd)
6743 break; 6857 break;
6744 } 6858 }
6745 kfree(groupmask); 6859 free_cpumask_var(groupmask);
6746} 6860}
6747#else /* !CONFIG_SCHED_DEBUG */ 6861#else /* !CONFIG_SCHED_DEBUG */
6748# define sched_domain_debug(sd, cpu) do { } while (0) 6862# define sched_domain_debug(sd, cpu) do { } while (0)
@@ -6750,7 +6864,7 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu)
6750 6864
6751static int sd_degenerate(struct sched_domain *sd) 6865static int sd_degenerate(struct sched_domain *sd)
6752{ 6866{
6753 if (cpus_weight(sd->span) == 1) 6867 if (cpumask_weight(sched_domain_span(sd)) == 1)
6754 return 1; 6868 return 1;
6755 6869
6756 /* Following flags need at least 2 groups */ 6870 /* Following flags need at least 2 groups */
@@ -6781,7 +6895,7 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
6781 if (sd_degenerate(parent)) 6895 if (sd_degenerate(parent))
6782 return 1; 6896 return 1;
6783 6897
6784 if (!cpus_equal(sd->span, parent->span)) 6898 if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
6785 return 0; 6899 return 0;
6786 6900
6787 /* Does parent contain flags not in child? */ 6901 /* Does parent contain flags not in child? */
@@ -6805,6 +6919,16 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
6805 return 1; 6919 return 1;
6806} 6920}
6807 6921
6922static void free_rootdomain(struct root_domain *rd)
6923{
6924 cpupri_cleanup(&rd->cpupri);
6925
6926 free_cpumask_var(rd->rto_mask);
6927 free_cpumask_var(rd->online);
6928 free_cpumask_var(rd->span);
6929 kfree(rd);
6930}
6931
6808static void rq_attach_root(struct rq *rq, struct root_domain *rd) 6932static void rq_attach_root(struct rq *rq, struct root_domain *rd)
6809{ 6933{
6810 unsigned long flags; 6934 unsigned long flags;
@@ -6814,38 +6938,63 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
6814 if (rq->rd) { 6938 if (rq->rd) {
6815 struct root_domain *old_rd = rq->rd; 6939 struct root_domain *old_rd = rq->rd;
6816 6940
6817 if (cpu_isset(rq->cpu, old_rd->online)) 6941 if (cpumask_test_cpu(rq->cpu, old_rd->online))
6818 set_rq_offline(rq); 6942 set_rq_offline(rq);
6819 6943
6820 cpu_clear(rq->cpu, old_rd->span); 6944 cpumask_clear_cpu(rq->cpu, old_rd->span);
6821 6945
6822 if (atomic_dec_and_test(&old_rd->refcount)) 6946 if (atomic_dec_and_test(&old_rd->refcount))
6823 kfree(old_rd); 6947 free_rootdomain(old_rd);
6824 } 6948 }
6825 6949
6826 atomic_inc(&rd->refcount); 6950 atomic_inc(&rd->refcount);
6827 rq->rd = rd; 6951 rq->rd = rd;
6828 6952
6829 cpu_set(rq->cpu, rd->span); 6953 cpumask_set_cpu(rq->cpu, rd->span);
6830 if (cpu_isset(rq->cpu, cpu_online_map)) 6954 if (cpumask_test_cpu(rq->cpu, cpu_online_mask))
6831 set_rq_online(rq); 6955 set_rq_online(rq);
6832 6956
6833 spin_unlock_irqrestore(&rq->lock, flags); 6957 spin_unlock_irqrestore(&rq->lock, flags);
6834} 6958}
6835 6959
6836static void init_rootdomain(struct root_domain *rd) 6960static int init_rootdomain(struct root_domain *rd, bool bootmem)
6837{ 6961{
6838 memset(rd, 0, sizeof(*rd)); 6962 memset(rd, 0, sizeof(*rd));
6839 6963
6840 cpus_clear(rd->span); 6964 if (bootmem) {
6841 cpus_clear(rd->online); 6965 alloc_bootmem_cpumask_var(&def_root_domain.span);
6966 alloc_bootmem_cpumask_var(&def_root_domain.online);
6967 alloc_bootmem_cpumask_var(&def_root_domain.rto_mask);
6968 cpupri_init(&rd->cpupri, true);
6969 return 0;
6970 }
6971
6972 if (!alloc_cpumask_var(&rd->span, GFP_KERNEL))
6973 goto free_rd;
6974 if (!alloc_cpumask_var(&rd->online, GFP_KERNEL))
6975 goto free_span;
6976 if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
6977 goto free_online;
6978
6979 if (cpupri_init(&rd->cpupri, false) != 0)
6980 goto free_rto_mask;
6981 return 0;
6842 6982
6843 cpupri_init(&rd->cpupri); 6983free_rto_mask:
6984 free_cpumask_var(rd->rto_mask);
6985free_online:
6986 free_cpumask_var(rd->online);
6987free_span:
6988 free_cpumask_var(rd->span);
6989free_rd:
6990 kfree(rd);
6991 return -ENOMEM;
6844} 6992}
6845 6993
6846static void init_defrootdomain(void) 6994static void init_defrootdomain(void)
6847{ 6995{
6848 init_rootdomain(&def_root_domain); 6996 init_rootdomain(&def_root_domain, true);
6997
6849 atomic_set(&def_root_domain.refcount, 1); 6998 atomic_set(&def_root_domain.refcount, 1);
6850} 6999}
6851 7000
@@ -6857,7 +7006,10 @@ static struct root_domain *alloc_rootdomain(void)
6857 if (!rd) 7006 if (!rd)
6858 return NULL; 7007 return NULL;
6859 7008
6860 init_rootdomain(rd); 7009 if (init_rootdomain(rd, false) != 0) {
7010 kfree(rd);
7011 return NULL;
7012 }
6861 7013
6862 return rd; 7014 return rd;
6863} 7015}
@@ -6899,19 +7051,12 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
6899} 7051}
6900 7052
6901/* cpus with isolated domains */ 7053/* cpus with isolated domains */
6902static cpumask_t cpu_isolated_map = CPU_MASK_NONE; 7054static cpumask_var_t cpu_isolated_map;
6903 7055
6904/* Setup the mask of cpus configured for isolated domains */ 7056/* Setup the mask of cpus configured for isolated domains */
6905static int __init isolated_cpu_setup(char *str) 7057static int __init isolated_cpu_setup(char *str)
6906{ 7058{
6907 static int __initdata ints[NR_CPUS]; 7059 cpulist_parse(str, cpu_isolated_map);
6908 int i;
6909
6910 str = get_options(str, ARRAY_SIZE(ints), ints);
6911 cpus_clear(cpu_isolated_map);
6912 for (i = 1; i <= ints[0]; i++)
6913 if (ints[i] < NR_CPUS)
6914 cpu_set(ints[i], cpu_isolated_map);
6915 return 1; 7060 return 1;
6916} 7061}
6917 7062
@@ -6920,42 +7065,43 @@ __setup("isolcpus=", isolated_cpu_setup);
6920/* 7065/*
6921 * init_sched_build_groups takes the cpumask we wish to span, and a pointer 7066 * init_sched_build_groups takes the cpumask we wish to span, and a pointer
6922 * to a function which identifies what group(along with sched group) a CPU 7067 * to a function which identifies what group(along with sched group) a CPU
6923 * belongs to. The return value of group_fn must be a >= 0 and < NR_CPUS 7068 * belongs to. The return value of group_fn must be a >= 0 and < nr_cpu_ids
6924 * (due to the fact that we keep track of groups covered with a cpumask_t). 7069 * (due to the fact that we keep track of groups covered with a struct cpumask).
6925 * 7070 *
6926 * init_sched_build_groups will build a circular linked list of the groups 7071 * init_sched_build_groups will build a circular linked list of the groups
6927 * covered by the given span, and will set each group's ->cpumask correctly, 7072 * covered by the given span, and will set each group's ->cpumask correctly,
6928 * and ->cpu_power to 0. 7073 * and ->cpu_power to 0.
6929 */ 7074 */
6930static void 7075static void
6931init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map, 7076init_sched_build_groups(const struct cpumask *span,
6932 int (*group_fn)(int cpu, const cpumask_t *cpu_map, 7077 const struct cpumask *cpu_map,
7078 int (*group_fn)(int cpu, const struct cpumask *cpu_map,
6933 struct sched_group **sg, 7079 struct sched_group **sg,
6934 cpumask_t *tmpmask), 7080 struct cpumask *tmpmask),
6935 cpumask_t *covered, cpumask_t *tmpmask) 7081 struct cpumask *covered, struct cpumask *tmpmask)
6936{ 7082{
6937 struct sched_group *first = NULL, *last = NULL; 7083 struct sched_group *first = NULL, *last = NULL;
6938 int i; 7084 int i;
6939 7085
6940 cpus_clear(*covered); 7086 cpumask_clear(covered);
6941 7087
6942 for_each_cpu_mask_nr(i, *span) { 7088 for_each_cpu(i, span) {
6943 struct sched_group *sg; 7089 struct sched_group *sg;
6944 int group = group_fn(i, cpu_map, &sg, tmpmask); 7090 int group = group_fn(i, cpu_map, &sg, tmpmask);
6945 int j; 7091 int j;
6946 7092
6947 if (cpu_isset(i, *covered)) 7093 if (cpumask_test_cpu(i, covered))
6948 continue; 7094 continue;
6949 7095
6950 cpus_clear(sg->cpumask); 7096 cpumask_clear(sched_group_cpus(sg));
6951 sg->__cpu_power = 0; 7097 sg->__cpu_power = 0;
6952 7098
6953 for_each_cpu_mask_nr(j, *span) { 7099 for_each_cpu(j, span) {
6954 if (group_fn(j, cpu_map, NULL, tmpmask) != group) 7100 if (group_fn(j, cpu_map, NULL, tmpmask) != group)
6955 continue; 7101 continue;
6956 7102
6957 cpu_set(j, *covered); 7103 cpumask_set_cpu(j, covered);
6958 cpu_set(j, sg->cpumask); 7104 cpumask_set_cpu(j, sched_group_cpus(sg));
6959 } 7105 }
6960 if (!first) 7106 if (!first)
6961 first = sg; 7107 first = sg;
@@ -7019,23 +7165,21 @@ static int find_next_best_node(int node, nodemask_t *used_nodes)
7019 * should be one that prevents unnecessary balancing, but also spreads tasks 7165 * should be one that prevents unnecessary balancing, but also spreads tasks
7020 * out optimally. 7166 * out optimally.
7021 */ 7167 */
7022static void sched_domain_node_span(int node, cpumask_t *span) 7168static void sched_domain_node_span(int node, struct cpumask *span)
7023{ 7169{
7024 nodemask_t used_nodes; 7170 nodemask_t used_nodes;
7025 node_to_cpumask_ptr(nodemask, node);
7026 int i; 7171 int i;
7027 7172
7028 cpus_clear(*span); 7173 cpumask_clear(span);
7029 nodes_clear(used_nodes); 7174 nodes_clear(used_nodes);
7030 7175
7031 cpus_or(*span, *span, *nodemask); 7176 cpumask_or(span, span, cpumask_of_node(node));
7032 node_set(node, used_nodes); 7177 node_set(node, used_nodes);
7033 7178
7034 for (i = 1; i < SD_NODES_PER_DOMAIN; i++) { 7179 for (i = 1; i < SD_NODES_PER_DOMAIN; i++) {
7035 int next_node = find_next_best_node(node, &used_nodes); 7180 int next_node = find_next_best_node(node, &used_nodes);
7036 7181
7037 node_to_cpumask_ptr_next(nodemask, next_node); 7182 cpumask_or(span, span, cpumask_of_node(next_node));
7038 cpus_or(*span, *span, *nodemask);
7039 } 7183 }
7040} 7184}
7041#endif /* CONFIG_NUMA */ 7185#endif /* CONFIG_NUMA */
@@ -7043,18 +7187,33 @@ static void sched_domain_node_span(int node, cpumask_t *span)
7043int sched_smt_power_savings = 0, sched_mc_power_savings = 0; 7187int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
7044 7188
7045/* 7189/*
7190 * The cpus mask in sched_group and sched_domain hangs off the end.
7191 * FIXME: use cpumask_var_t or dynamic percpu alloc to avoid wasting space
7192 * for nr_cpu_ids < CONFIG_NR_CPUS.
7193 */
7194struct static_sched_group {
7195 struct sched_group sg;
7196 DECLARE_BITMAP(cpus, CONFIG_NR_CPUS);
7197};
7198
7199struct static_sched_domain {
7200 struct sched_domain sd;
7201 DECLARE_BITMAP(span, CONFIG_NR_CPUS);
7202};
7203
7204/*
7046 * SMT sched-domains: 7205 * SMT sched-domains:
7047 */ 7206 */
7048#ifdef CONFIG_SCHED_SMT 7207#ifdef CONFIG_SCHED_SMT
7049static DEFINE_PER_CPU(struct sched_domain, cpu_domains); 7208static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains);
7050static DEFINE_PER_CPU(struct sched_group, sched_group_cpus); 7209static DEFINE_PER_CPU(struct static_sched_group, sched_group_cpus);
7051 7210
7052static int 7211static int
7053cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg, 7212cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map,
7054 cpumask_t *unused) 7213 struct sched_group **sg, struct cpumask *unused)
7055{ 7214{
7056 if (sg) 7215 if (sg)
7057 *sg = &per_cpu(sched_group_cpus, cpu); 7216 *sg = &per_cpu(sched_group_cpus, cpu).sg;
7058 return cpu; 7217 return cpu;
7059} 7218}
7060#endif /* CONFIG_SCHED_SMT */ 7219#endif /* CONFIG_SCHED_SMT */
@@ -7063,56 +7222,53 @@ cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg,
7063 * multi-core sched-domains: 7222 * multi-core sched-domains:
7064 */ 7223 */
7065#ifdef CONFIG_SCHED_MC 7224#ifdef CONFIG_SCHED_MC
7066static DEFINE_PER_CPU(struct sched_domain, core_domains); 7225static DEFINE_PER_CPU(struct static_sched_domain, core_domains);
7067static DEFINE_PER_CPU(struct sched_group, sched_group_core); 7226static DEFINE_PER_CPU(struct static_sched_group, sched_group_core);
7068#endif /* CONFIG_SCHED_MC */ 7227#endif /* CONFIG_SCHED_MC */
7069 7228
7070#if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT) 7229#if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT)
7071static int 7230static int
7072cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg, 7231cpu_to_core_group(int cpu, const struct cpumask *cpu_map,
7073 cpumask_t *mask) 7232 struct sched_group **sg, struct cpumask *mask)
7074{ 7233{
7075 int group; 7234 int group;
7076 7235
7077 *mask = per_cpu(cpu_sibling_map, cpu); 7236 cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map);
7078 cpus_and(*mask, *mask, *cpu_map); 7237 group = cpumask_first(mask);
7079 group = first_cpu(*mask);
7080 if (sg) 7238 if (sg)
7081 *sg = &per_cpu(sched_group_core, group); 7239 *sg = &per_cpu(sched_group_core, group).sg;
7082 return group; 7240 return group;
7083} 7241}
7084#elif defined(CONFIG_SCHED_MC) 7242#elif defined(CONFIG_SCHED_MC)
7085static int 7243static int
7086cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg, 7244cpu_to_core_group(int cpu, const struct cpumask *cpu_map,
7087 cpumask_t *unused) 7245 struct sched_group **sg, struct cpumask *unused)
7088{ 7246{
7089 if (sg) 7247 if (sg)
7090 *sg = &per_cpu(sched_group_core, cpu); 7248 *sg = &per_cpu(sched_group_core, cpu).sg;
7091 return cpu; 7249 return cpu;
7092} 7250}
7093#endif 7251#endif
7094 7252
7095static DEFINE_PER_CPU(struct sched_domain, phys_domains); 7253static DEFINE_PER_CPU(struct static_sched_domain, phys_domains);
7096static DEFINE_PER_CPU(struct sched_group, sched_group_phys); 7254static DEFINE_PER_CPU(struct static_sched_group, sched_group_phys);
7097 7255
7098static int 7256static int
7099cpu_to_phys_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg, 7257cpu_to_phys_group(int cpu, const struct cpumask *cpu_map,
7100 cpumask_t *mask) 7258 struct sched_group **sg, struct cpumask *mask)
7101{ 7259{
7102 int group; 7260 int group;
7103#ifdef CONFIG_SCHED_MC 7261#ifdef CONFIG_SCHED_MC
7104 *mask = cpu_coregroup_map(cpu); 7262 cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map);
7105 cpus_and(*mask, *mask, *cpu_map); 7263 group = cpumask_first(mask);
7106 group = first_cpu(*mask);
7107#elif defined(CONFIG_SCHED_SMT) 7264#elif defined(CONFIG_SCHED_SMT)
7108 *mask = per_cpu(cpu_sibling_map, cpu); 7265 cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map);
7109 cpus_and(*mask, *mask, *cpu_map); 7266 group = cpumask_first(mask);
7110 group = first_cpu(*mask);
7111#else 7267#else
7112 group = cpu; 7268 group = cpu;
7113#endif 7269#endif
7114 if (sg) 7270 if (sg)
7115 *sg = &per_cpu(sched_group_phys, group); 7271 *sg = &per_cpu(sched_group_phys, group).sg;
7116 return group; 7272 return group;
7117} 7273}
7118 7274
@@ -7126,19 +7282,19 @@ static DEFINE_PER_CPU(struct sched_domain, node_domains);
7126static struct sched_group ***sched_group_nodes_bycpu; 7282static struct sched_group ***sched_group_nodes_bycpu;
7127 7283
7128static DEFINE_PER_CPU(struct sched_domain, allnodes_domains); 7284static DEFINE_PER_CPU(struct sched_domain, allnodes_domains);
7129static DEFINE_PER_CPU(struct sched_group, sched_group_allnodes); 7285static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes);
7130 7286
7131static int cpu_to_allnodes_group(int cpu, const cpumask_t *cpu_map, 7287static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map,
7132 struct sched_group **sg, cpumask_t *nodemask) 7288 struct sched_group **sg,
7289 struct cpumask *nodemask)
7133{ 7290{
7134 int group; 7291 int group;
7135 7292
7136 *nodemask = node_to_cpumask(cpu_to_node(cpu)); 7293 cpumask_and(nodemask, cpumask_of_node(cpu_to_node(cpu)), cpu_map);
7137 cpus_and(*nodemask, *nodemask, *cpu_map); 7294 group = cpumask_first(nodemask);
7138 group = first_cpu(*nodemask);
7139 7295
7140 if (sg) 7296 if (sg)
7141 *sg = &per_cpu(sched_group_allnodes, group); 7297 *sg = &per_cpu(sched_group_allnodes, group).sg;
7142 return group; 7298 return group;
7143} 7299}
7144 7300
@@ -7150,11 +7306,11 @@ static void init_numa_sched_groups_power(struct sched_group *group_head)
7150 if (!sg) 7306 if (!sg)
7151 return; 7307 return;
7152 do { 7308 do {
7153 for_each_cpu_mask_nr(j, sg->cpumask) { 7309 for_each_cpu(j, sched_group_cpus(sg)) {
7154 struct sched_domain *sd; 7310 struct sched_domain *sd;
7155 7311
7156 sd = &per_cpu(phys_domains, j); 7312 sd = &per_cpu(phys_domains, j).sd;
7157 if (j != first_cpu(sd->groups->cpumask)) { 7313 if (j != cpumask_first(sched_group_cpus(sd->groups))) {
7158 /* 7314 /*
7159 * Only add "power" once for each 7315 * Only add "power" once for each
7160 * physical package. 7316 * physical package.
@@ -7171,11 +7327,12 @@ static void init_numa_sched_groups_power(struct sched_group *group_head)
7171 7327
7172#ifdef CONFIG_NUMA 7328#ifdef CONFIG_NUMA
7173/* Free memory allocated for various sched_group structures */ 7329/* Free memory allocated for various sched_group structures */
7174static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask) 7330static void free_sched_groups(const struct cpumask *cpu_map,
7331 struct cpumask *nodemask)
7175{ 7332{
7176 int cpu, i; 7333 int cpu, i;
7177 7334
7178 for_each_cpu_mask_nr(cpu, *cpu_map) { 7335 for_each_cpu(cpu, cpu_map) {
7179 struct sched_group **sched_group_nodes 7336 struct sched_group **sched_group_nodes
7180 = sched_group_nodes_bycpu[cpu]; 7337 = sched_group_nodes_bycpu[cpu];
7181 7338
@@ -7185,9 +7342,8 @@ static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask)
7185 for (i = 0; i < nr_node_ids; i++) { 7342 for (i = 0; i < nr_node_ids; i++) {
7186 struct sched_group *oldsg, *sg = sched_group_nodes[i]; 7343 struct sched_group *oldsg, *sg = sched_group_nodes[i];
7187 7344
7188 *nodemask = node_to_cpumask(i); 7345 cpumask_and(nodemask, cpumask_of_node(i), cpu_map);
7189 cpus_and(*nodemask, *nodemask, *cpu_map); 7346 if (cpumask_empty(nodemask))
7190 if (cpus_empty(*nodemask))
7191 continue; 7347 continue;
7192 7348
7193 if (sg == NULL) 7349 if (sg == NULL)
@@ -7205,7 +7361,8 @@ next_sg:
7205 } 7361 }
7206} 7362}
7207#else /* !CONFIG_NUMA */ 7363#else /* !CONFIG_NUMA */
7208static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask) 7364static void free_sched_groups(const struct cpumask *cpu_map,
7365 struct cpumask *nodemask)
7209{ 7366{
7210} 7367}
7211#endif /* CONFIG_NUMA */ 7368#endif /* CONFIG_NUMA */
@@ -7231,7 +7388,7 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
7231 7388
7232 WARN_ON(!sd || !sd->groups); 7389 WARN_ON(!sd || !sd->groups);
7233 7390
7234 if (cpu != first_cpu(sd->groups->cpumask)) 7391 if (cpu != cpumask_first(sched_group_cpus(sd->groups)))
7235 return; 7392 return;
7236 7393
7237 child = sd->child; 7394 child = sd->child;
@@ -7296,48 +7453,6 @@ SD_INIT_FUNC(CPU)
7296 SD_INIT_FUNC(MC) 7453 SD_INIT_FUNC(MC)
7297#endif 7454#endif
7298 7455
7299/*
7300 * To minimize stack usage kmalloc room for cpumasks and share the
7301 * space as the usage in build_sched_domains() dictates. Used only
7302 * if the amount of space is significant.
7303 */
7304struct allmasks {
7305 cpumask_t tmpmask; /* make this one first */
7306 union {
7307 cpumask_t nodemask;
7308 cpumask_t this_sibling_map;
7309 cpumask_t this_core_map;
7310 };
7311 cpumask_t send_covered;
7312
7313#ifdef CONFIG_NUMA
7314 cpumask_t domainspan;
7315 cpumask_t covered;
7316 cpumask_t notcovered;
7317#endif
7318};
7319
7320#if NR_CPUS > 128
7321#define SCHED_CPUMASK_DECLARE(v) struct allmasks *v
7322static inline void sched_cpumask_alloc(struct allmasks **masks)
7323{
7324 *masks = kmalloc(sizeof(**masks), GFP_KERNEL);
7325}
7326static inline void sched_cpumask_free(struct allmasks *masks)
7327{
7328 kfree(masks);
7329}
7330#else
7331#define SCHED_CPUMASK_DECLARE(v) struct allmasks _v, *v = &_v
7332static inline void sched_cpumask_alloc(struct allmasks **masks)
7333{ }
7334static inline void sched_cpumask_free(struct allmasks *masks)
7335{ }
7336#endif
7337
7338#define SCHED_CPUMASK_VAR(v, a) cpumask_t *v = (cpumask_t *) \
7339 ((unsigned long)(a) + offsetof(struct allmasks, v))
7340
7341static int default_relax_domain_level = -1; 7456static int default_relax_domain_level = -1;
7342 7457
7343static int __init setup_relax_domain_level(char *str) 7458static int __init setup_relax_domain_level(char *str)
@@ -7377,17 +7492,38 @@ static void set_domain_attribute(struct sched_domain *sd,
7377 * Build sched domains for a given set of cpus and attach the sched domains 7492 * Build sched domains for a given set of cpus and attach the sched domains
7378 * to the individual cpus 7493 * to the individual cpus
7379 */ 7494 */
7380static int __build_sched_domains(const cpumask_t *cpu_map, 7495static int __build_sched_domains(const struct cpumask *cpu_map,
7381 struct sched_domain_attr *attr) 7496 struct sched_domain_attr *attr)
7382{ 7497{
7383 int i; 7498 int i, err = -ENOMEM;
7384 struct root_domain *rd; 7499 struct root_domain *rd;
7385 SCHED_CPUMASK_DECLARE(allmasks); 7500 cpumask_var_t nodemask, this_sibling_map, this_core_map, send_covered,
7386 cpumask_t *tmpmask; 7501 tmpmask;
7387#ifdef CONFIG_NUMA 7502#ifdef CONFIG_NUMA
7503 cpumask_var_t domainspan, covered, notcovered;
7388 struct sched_group **sched_group_nodes = NULL; 7504 struct sched_group **sched_group_nodes = NULL;
7389 int sd_allnodes = 0; 7505 int sd_allnodes = 0;
7390 7506
7507 if (!alloc_cpumask_var(&domainspan, GFP_KERNEL))
7508 goto out;
7509 if (!alloc_cpumask_var(&covered, GFP_KERNEL))
7510 goto free_domainspan;
7511 if (!alloc_cpumask_var(&notcovered, GFP_KERNEL))
7512 goto free_covered;
7513#endif
7514
7515 if (!alloc_cpumask_var(&nodemask, GFP_KERNEL))
7516 goto free_notcovered;
7517 if (!alloc_cpumask_var(&this_sibling_map, GFP_KERNEL))
7518 goto free_nodemask;
7519 if (!alloc_cpumask_var(&this_core_map, GFP_KERNEL))
7520 goto free_this_sibling_map;
7521 if (!alloc_cpumask_var(&send_covered, GFP_KERNEL))
7522 goto free_this_core_map;
7523 if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
7524 goto free_send_covered;
7525
7526#ifdef CONFIG_NUMA
7391 /* 7527 /*
7392 * Allocate the per-node list of sched groups 7528 * Allocate the per-node list of sched groups
7393 */ 7529 */
@@ -7395,54 +7531,35 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7395 GFP_KERNEL); 7531 GFP_KERNEL);
7396 if (!sched_group_nodes) { 7532 if (!sched_group_nodes) {
7397 printk(KERN_WARNING "Can not alloc sched group node list\n"); 7533 printk(KERN_WARNING "Can not alloc sched group node list\n");
7398 return -ENOMEM; 7534 goto free_tmpmask;
7399 } 7535 }
7400#endif 7536#endif
7401 7537
7402 rd = alloc_rootdomain(); 7538 rd = alloc_rootdomain();
7403 if (!rd) { 7539 if (!rd) {
7404 printk(KERN_WARNING "Cannot alloc root domain\n"); 7540 printk(KERN_WARNING "Cannot alloc root domain\n");
7405#ifdef CONFIG_NUMA 7541 goto free_sched_groups;
7406 kfree(sched_group_nodes);
7407#endif
7408 return -ENOMEM;
7409 } 7542 }
7410 7543
7411 /* get space for all scratch cpumask variables */
7412 sched_cpumask_alloc(&allmasks);
7413 if (!allmasks) {
7414 printk(KERN_WARNING "Cannot alloc cpumask array\n");
7415 kfree(rd);
7416#ifdef CONFIG_NUMA 7544#ifdef CONFIG_NUMA
7417 kfree(sched_group_nodes); 7545 sched_group_nodes_bycpu[cpumask_first(cpu_map)] = sched_group_nodes;
7418#endif
7419 return -ENOMEM;
7420 }
7421
7422 tmpmask = (cpumask_t *)allmasks;
7423
7424
7425#ifdef CONFIG_NUMA
7426 sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes;
7427#endif 7546#endif
7428 7547
7429 /* 7548 /*
7430 * Set up domains for cpus specified by the cpu_map. 7549 * Set up domains for cpus specified by the cpu_map.
7431 */ 7550 */
7432 for_each_cpu_mask_nr(i, *cpu_map) { 7551 for_each_cpu(i, cpu_map) {
7433 struct sched_domain *sd = NULL, *p; 7552 struct sched_domain *sd = NULL, *p;
7434 SCHED_CPUMASK_VAR(nodemask, allmasks);
7435 7553
7436 *nodemask = node_to_cpumask(cpu_to_node(i)); 7554 cpumask_and(nodemask, cpumask_of_node(cpu_to_node(i)), cpu_map);
7437 cpus_and(*nodemask, *nodemask, *cpu_map);
7438 7555
7439#ifdef CONFIG_NUMA 7556#ifdef CONFIG_NUMA
7440 if (cpus_weight(*cpu_map) > 7557 if (cpumask_weight(cpu_map) >
7441 SD_NODES_PER_DOMAIN*cpus_weight(*nodemask)) { 7558 SD_NODES_PER_DOMAIN*cpumask_weight(nodemask)) {
7442 sd = &per_cpu(allnodes_domains, i); 7559 sd = &per_cpu(allnodes_domains, i);
7443 SD_INIT(sd, ALLNODES); 7560 SD_INIT(sd, ALLNODES);
7444 set_domain_attribute(sd, attr); 7561 set_domain_attribute(sd, attr);
7445 sd->span = *cpu_map; 7562 cpumask_copy(sched_domain_span(sd), cpu_map);
7446 cpu_to_allnodes_group(i, cpu_map, &sd->groups, tmpmask); 7563 cpu_to_allnodes_group(i, cpu_map, &sd->groups, tmpmask);
7447 p = sd; 7564 p = sd;
7448 sd_allnodes = 1; 7565 sd_allnodes = 1;
@@ -7452,18 +7569,19 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7452 sd = &per_cpu(node_domains, i); 7569 sd = &per_cpu(node_domains, i);
7453 SD_INIT(sd, NODE); 7570 SD_INIT(sd, NODE);
7454 set_domain_attribute(sd, attr); 7571 set_domain_attribute(sd, attr);
7455 sched_domain_node_span(cpu_to_node(i), &sd->span); 7572 sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd));
7456 sd->parent = p; 7573 sd->parent = p;
7457 if (p) 7574 if (p)
7458 p->child = sd; 7575 p->child = sd;
7459 cpus_and(sd->span, sd->span, *cpu_map); 7576 cpumask_and(sched_domain_span(sd),
7577 sched_domain_span(sd), cpu_map);
7460#endif 7578#endif
7461 7579
7462 p = sd; 7580 p = sd;
7463 sd = &per_cpu(phys_domains, i); 7581 sd = &per_cpu(phys_domains, i).sd;
7464 SD_INIT(sd, CPU); 7582 SD_INIT(sd, CPU);
7465 set_domain_attribute(sd, attr); 7583 set_domain_attribute(sd, attr);
7466 sd->span = *nodemask; 7584 cpumask_copy(sched_domain_span(sd), nodemask);
7467 sd->parent = p; 7585 sd->parent = p;
7468 if (p) 7586 if (p)
7469 p->child = sd; 7587 p->child = sd;
@@ -7471,11 +7589,11 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7471 7589
7472#ifdef CONFIG_SCHED_MC 7590#ifdef CONFIG_SCHED_MC
7473 p = sd; 7591 p = sd;
7474 sd = &per_cpu(core_domains, i); 7592 sd = &per_cpu(core_domains, i).sd;
7475 SD_INIT(sd, MC); 7593 SD_INIT(sd, MC);
7476 set_domain_attribute(sd, attr); 7594 set_domain_attribute(sd, attr);
7477 sd->span = cpu_coregroup_map(i); 7595 cpumask_and(sched_domain_span(sd), cpu_map,
7478 cpus_and(sd->span, sd->span, *cpu_map); 7596 cpu_coregroup_mask(i));
7479 sd->parent = p; 7597 sd->parent = p;
7480 p->child = sd; 7598 p->child = sd;
7481 cpu_to_core_group(i, cpu_map, &sd->groups, tmpmask); 7599 cpu_to_core_group(i, cpu_map, &sd->groups, tmpmask);
@@ -7483,11 +7601,11 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7483 7601
7484#ifdef CONFIG_SCHED_SMT 7602#ifdef CONFIG_SCHED_SMT
7485 p = sd; 7603 p = sd;
7486 sd = &per_cpu(cpu_domains, i); 7604 sd = &per_cpu(cpu_domains, i).sd;
7487 SD_INIT(sd, SIBLING); 7605 SD_INIT(sd, SIBLING);
7488 set_domain_attribute(sd, attr); 7606 set_domain_attribute(sd, attr);
7489 sd->span = per_cpu(cpu_sibling_map, i); 7607 cpumask_and(sched_domain_span(sd),
7490 cpus_and(sd->span, sd->span, *cpu_map); 7608 &per_cpu(cpu_sibling_map, i), cpu_map);
7491 sd->parent = p; 7609 sd->parent = p;
7492 p->child = sd; 7610 p->child = sd;
7493 cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask); 7611 cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask);
@@ -7496,13 +7614,10 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7496 7614
7497#ifdef CONFIG_SCHED_SMT 7615#ifdef CONFIG_SCHED_SMT
7498 /* Set up CPU (sibling) groups */ 7616 /* Set up CPU (sibling) groups */
7499 for_each_cpu_mask_nr(i, *cpu_map) { 7617 for_each_cpu(i, cpu_map) {
7500 SCHED_CPUMASK_VAR(this_sibling_map, allmasks); 7618 cpumask_and(this_sibling_map,
7501 SCHED_CPUMASK_VAR(send_covered, allmasks); 7619 &per_cpu(cpu_sibling_map, i), cpu_map);
7502 7620 if (i != cpumask_first(this_sibling_map))
7503 *this_sibling_map = per_cpu(cpu_sibling_map, i);
7504 cpus_and(*this_sibling_map, *this_sibling_map, *cpu_map);
7505 if (i != first_cpu(*this_sibling_map))
7506 continue; 7621 continue;
7507 7622
7508 init_sched_build_groups(this_sibling_map, cpu_map, 7623 init_sched_build_groups(this_sibling_map, cpu_map,
@@ -7513,13 +7628,9 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7513 7628
7514#ifdef CONFIG_SCHED_MC 7629#ifdef CONFIG_SCHED_MC
7515 /* Set up multi-core groups */ 7630 /* Set up multi-core groups */
7516 for_each_cpu_mask_nr(i, *cpu_map) { 7631 for_each_cpu(i, cpu_map) {
7517 SCHED_CPUMASK_VAR(this_core_map, allmasks); 7632 cpumask_and(this_core_map, cpu_coregroup_mask(i), cpu_map);
7518 SCHED_CPUMASK_VAR(send_covered, allmasks); 7633 if (i != cpumask_first(this_core_map))
7519
7520 *this_core_map = cpu_coregroup_map(i);
7521 cpus_and(*this_core_map, *this_core_map, *cpu_map);
7522 if (i != first_cpu(*this_core_map))
7523 continue; 7634 continue;
7524 7635
7525 init_sched_build_groups(this_core_map, cpu_map, 7636 init_sched_build_groups(this_core_map, cpu_map,
@@ -7530,12 +7641,8 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7530 7641
7531 /* Set up physical groups */ 7642 /* Set up physical groups */
7532 for (i = 0; i < nr_node_ids; i++) { 7643 for (i = 0; i < nr_node_ids; i++) {
7533 SCHED_CPUMASK_VAR(nodemask, allmasks); 7644 cpumask_and(nodemask, cpumask_of_node(i), cpu_map);
7534 SCHED_CPUMASK_VAR(send_covered, allmasks); 7645 if (cpumask_empty(nodemask))
7535
7536 *nodemask = node_to_cpumask(i);
7537 cpus_and(*nodemask, *nodemask, *cpu_map);
7538 if (cpus_empty(*nodemask))
7539 continue; 7646 continue;
7540 7647
7541 init_sched_build_groups(nodemask, cpu_map, 7648 init_sched_build_groups(nodemask, cpu_map,
@@ -7546,8 +7653,6 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7546#ifdef CONFIG_NUMA 7653#ifdef CONFIG_NUMA
7547 /* Set up node groups */ 7654 /* Set up node groups */
7548 if (sd_allnodes) { 7655 if (sd_allnodes) {
7549 SCHED_CPUMASK_VAR(send_covered, allmasks);
7550
7551 init_sched_build_groups(cpu_map, cpu_map, 7656 init_sched_build_groups(cpu_map, cpu_map,
7552 &cpu_to_allnodes_group, 7657 &cpu_to_allnodes_group,
7553 send_covered, tmpmask); 7658 send_covered, tmpmask);
@@ -7556,58 +7661,53 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7556 for (i = 0; i < nr_node_ids; i++) { 7661 for (i = 0; i < nr_node_ids; i++) {
7557 /* Set up node groups */ 7662 /* Set up node groups */
7558 struct sched_group *sg, *prev; 7663 struct sched_group *sg, *prev;
7559 SCHED_CPUMASK_VAR(nodemask, allmasks);
7560 SCHED_CPUMASK_VAR(domainspan, allmasks);
7561 SCHED_CPUMASK_VAR(covered, allmasks);
7562 int j; 7664 int j;
7563 7665
7564 *nodemask = node_to_cpumask(i); 7666 cpumask_clear(covered);
7565 cpus_clear(*covered); 7667 cpumask_and(nodemask, cpumask_of_node(i), cpu_map);
7566 7668 if (cpumask_empty(nodemask)) {
7567 cpus_and(*nodemask, *nodemask, *cpu_map);
7568 if (cpus_empty(*nodemask)) {
7569 sched_group_nodes[i] = NULL; 7669 sched_group_nodes[i] = NULL;
7570 continue; 7670 continue;
7571 } 7671 }
7572 7672
7573 sched_domain_node_span(i, domainspan); 7673 sched_domain_node_span(i, domainspan);
7574 cpus_and(*domainspan, *domainspan, *cpu_map); 7674 cpumask_and(domainspan, domainspan, cpu_map);
7575 7675
7576 sg = kmalloc_node(sizeof(struct sched_group), GFP_KERNEL, i); 7676 sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
7677 GFP_KERNEL, i);
7577 if (!sg) { 7678 if (!sg) {
7578 printk(KERN_WARNING "Can not alloc domain group for " 7679 printk(KERN_WARNING "Can not alloc domain group for "
7579 "node %d\n", i); 7680 "node %d\n", i);
7580 goto error; 7681 goto error;
7581 } 7682 }
7582 sched_group_nodes[i] = sg; 7683 sched_group_nodes[i] = sg;
7583 for_each_cpu_mask_nr(j, *nodemask) { 7684 for_each_cpu(j, nodemask) {
7584 struct sched_domain *sd; 7685 struct sched_domain *sd;
7585 7686
7586 sd = &per_cpu(node_domains, j); 7687 sd = &per_cpu(node_domains, j);
7587 sd->groups = sg; 7688 sd->groups = sg;
7588 } 7689 }
7589 sg->__cpu_power = 0; 7690 sg->__cpu_power = 0;
7590 sg->cpumask = *nodemask; 7691 cpumask_copy(sched_group_cpus(sg), nodemask);
7591 sg->next = sg; 7692 sg->next = sg;
7592 cpus_or(*covered, *covered, *nodemask); 7693 cpumask_or(covered, covered, nodemask);
7593 prev = sg; 7694 prev = sg;
7594 7695
7595 for (j = 0; j < nr_node_ids; j++) { 7696 for (j = 0; j < nr_node_ids; j++) {
7596 SCHED_CPUMASK_VAR(notcovered, allmasks);
7597 int n = (i + j) % nr_node_ids; 7697 int n = (i + j) % nr_node_ids;
7598 node_to_cpumask_ptr(pnodemask, n);
7599 7698
7600 cpus_complement(*notcovered, *covered); 7699 cpumask_complement(notcovered, covered);
7601 cpus_and(*tmpmask, *notcovered, *cpu_map); 7700 cpumask_and(tmpmask, notcovered, cpu_map);
7602 cpus_and(*tmpmask, *tmpmask, *domainspan); 7701 cpumask_and(tmpmask, tmpmask, domainspan);
7603 if (cpus_empty(*tmpmask)) 7702 if (cpumask_empty(tmpmask))
7604 break; 7703 break;
7605 7704
7606 cpus_and(*tmpmask, *tmpmask, *pnodemask); 7705 cpumask_and(tmpmask, tmpmask, cpumask_of_node(n));
7607 if (cpus_empty(*tmpmask)) 7706 if (cpumask_empty(tmpmask))
7608 continue; 7707 continue;
7609 7708
7610 sg = kmalloc_node(sizeof(struct sched_group), 7709 sg = kmalloc_node(sizeof(struct sched_group) +
7710 cpumask_size(),
7611 GFP_KERNEL, i); 7711 GFP_KERNEL, i);
7612 if (!sg) { 7712 if (!sg) {
7613 printk(KERN_WARNING 7713 printk(KERN_WARNING
@@ -7615,9 +7715,9 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7615 goto error; 7715 goto error;
7616 } 7716 }
7617 sg->__cpu_power = 0; 7717 sg->__cpu_power = 0;
7618 sg->cpumask = *tmpmask; 7718 cpumask_copy(sched_group_cpus(sg), tmpmask);
7619 sg->next = prev->next; 7719 sg->next = prev->next;
7620 cpus_or(*covered, *covered, *tmpmask); 7720 cpumask_or(covered, covered, tmpmask);
7621 prev->next = sg; 7721 prev->next = sg;
7622 prev = sg; 7722 prev = sg;
7623 } 7723 }
@@ -7626,22 +7726,22 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7626 7726
7627 /* Calculate CPU power for physical packages and nodes */ 7727 /* Calculate CPU power for physical packages and nodes */
7628#ifdef CONFIG_SCHED_SMT 7728#ifdef CONFIG_SCHED_SMT
7629 for_each_cpu_mask_nr(i, *cpu_map) { 7729 for_each_cpu(i, cpu_map) {
7630 struct sched_domain *sd = &per_cpu(cpu_domains, i); 7730 struct sched_domain *sd = &per_cpu(cpu_domains, i).sd;
7631 7731
7632 init_sched_groups_power(i, sd); 7732 init_sched_groups_power(i, sd);
7633 } 7733 }
7634#endif 7734#endif
7635#ifdef CONFIG_SCHED_MC 7735#ifdef CONFIG_SCHED_MC
7636 for_each_cpu_mask_nr(i, *cpu_map) { 7736 for_each_cpu(i, cpu_map) {
7637 struct sched_domain *sd = &per_cpu(core_domains, i); 7737 struct sched_domain *sd = &per_cpu(core_domains, i).sd;
7638 7738
7639 init_sched_groups_power(i, sd); 7739 init_sched_groups_power(i, sd);
7640 } 7740 }
7641#endif 7741#endif
7642 7742
7643 for_each_cpu_mask_nr(i, *cpu_map) { 7743 for_each_cpu(i, cpu_map) {
7644 struct sched_domain *sd = &per_cpu(phys_domains, i); 7744 struct sched_domain *sd = &per_cpu(phys_domains, i).sd;
7645 7745
7646 init_sched_groups_power(i, sd); 7746 init_sched_groups_power(i, sd);
7647 } 7747 }
@@ -7653,53 +7753,78 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7653 if (sd_allnodes) { 7753 if (sd_allnodes) {
7654 struct sched_group *sg; 7754 struct sched_group *sg;
7655 7755
7656 cpu_to_allnodes_group(first_cpu(*cpu_map), cpu_map, &sg, 7756 cpu_to_allnodes_group(cpumask_first(cpu_map), cpu_map, &sg,
7657 tmpmask); 7757 tmpmask);
7658 init_numa_sched_groups_power(sg); 7758 init_numa_sched_groups_power(sg);
7659 } 7759 }
7660#endif 7760#endif
7661 7761
7662 /* Attach the domains */ 7762 /* Attach the domains */
7663 for_each_cpu_mask_nr(i, *cpu_map) { 7763 for_each_cpu(i, cpu_map) {
7664 struct sched_domain *sd; 7764 struct sched_domain *sd;
7665#ifdef CONFIG_SCHED_SMT 7765#ifdef CONFIG_SCHED_SMT
7666 sd = &per_cpu(cpu_domains, i); 7766 sd = &per_cpu(cpu_domains, i).sd;
7667#elif defined(CONFIG_SCHED_MC) 7767#elif defined(CONFIG_SCHED_MC)
7668 sd = &per_cpu(core_domains, i); 7768 sd = &per_cpu(core_domains, i).sd;
7669#else 7769#else
7670 sd = &per_cpu(phys_domains, i); 7770 sd = &per_cpu(phys_domains, i).sd;
7671#endif 7771#endif
7672 cpu_attach_domain(sd, rd, i); 7772 cpu_attach_domain(sd, rd, i);
7673 } 7773 }
7674 7774
7675 sched_cpumask_free(allmasks); 7775 err = 0;
7676 return 0; 7776
7777free_tmpmask:
7778 free_cpumask_var(tmpmask);
7779free_send_covered:
7780 free_cpumask_var(send_covered);
7781free_this_core_map:
7782 free_cpumask_var(this_core_map);
7783free_this_sibling_map:
7784 free_cpumask_var(this_sibling_map);
7785free_nodemask:
7786 free_cpumask_var(nodemask);
7787free_notcovered:
7788#ifdef CONFIG_NUMA
7789 free_cpumask_var(notcovered);
7790free_covered:
7791 free_cpumask_var(covered);
7792free_domainspan:
7793 free_cpumask_var(domainspan);
7794out:
7795#endif
7796 return err;
7797
7798free_sched_groups:
7799#ifdef CONFIG_NUMA
7800 kfree(sched_group_nodes);
7801#endif
7802 goto free_tmpmask;
7677 7803
7678#ifdef CONFIG_NUMA 7804#ifdef CONFIG_NUMA
7679error: 7805error:
7680 free_sched_groups(cpu_map, tmpmask); 7806 free_sched_groups(cpu_map, tmpmask);
7681 sched_cpumask_free(allmasks); 7807 free_rootdomain(rd);
7682 kfree(rd); 7808 goto free_tmpmask;
7683 return -ENOMEM;
7684#endif 7809#endif
7685} 7810}
7686 7811
7687static int build_sched_domains(const cpumask_t *cpu_map) 7812static int build_sched_domains(const struct cpumask *cpu_map)
7688{ 7813{
7689 return __build_sched_domains(cpu_map, NULL); 7814 return __build_sched_domains(cpu_map, NULL);
7690} 7815}
7691 7816
7692static cpumask_t *doms_cur; /* current sched domains */ 7817static struct cpumask *doms_cur; /* current sched domains */
7693static int ndoms_cur; /* number of sched domains in 'doms_cur' */ 7818static int ndoms_cur; /* number of sched domains in 'doms_cur' */
7694static struct sched_domain_attr *dattr_cur; 7819static struct sched_domain_attr *dattr_cur;
7695 /* attribues of custom domains in 'doms_cur' */ 7820 /* attribues of custom domains in 'doms_cur' */
7696 7821
7697/* 7822/*
7698 * Special case: If a kmalloc of a doms_cur partition (array of 7823 * Special case: If a kmalloc of a doms_cur partition (array of
7699 * cpumask_t) fails, then fallback to a single sched domain, 7824 * cpumask) fails, then fallback to a single sched domain,
7700 * as determined by the single cpumask_t fallback_doms. 7825 * as determined by the single cpumask fallback_doms.
7701 */ 7826 */
7702static cpumask_t fallback_doms; 7827static cpumask_var_t fallback_doms;
7703 7828
7704/* 7829/*
7705 * arch_update_cpu_topology lets virtualized architectures update the 7830 * arch_update_cpu_topology lets virtualized architectures update the
@@ -7716,16 +7841,16 @@ int __attribute__((weak)) arch_update_cpu_topology(void)
7716 * For now this just excludes isolated cpus, but could be used to 7841 * For now this just excludes isolated cpus, but could be used to
7717 * exclude other special cases in the future. 7842 * exclude other special cases in the future.
7718 */ 7843 */
7719static int arch_init_sched_domains(const cpumask_t *cpu_map) 7844static int arch_init_sched_domains(const struct cpumask *cpu_map)
7720{ 7845{
7721 int err; 7846 int err;
7722 7847
7723 arch_update_cpu_topology(); 7848 arch_update_cpu_topology();
7724 ndoms_cur = 1; 7849 ndoms_cur = 1;
7725 doms_cur = kmalloc(sizeof(cpumask_t), GFP_KERNEL); 7850 doms_cur = kmalloc(cpumask_size(), GFP_KERNEL);
7726 if (!doms_cur) 7851 if (!doms_cur)
7727 doms_cur = &fallback_doms; 7852 doms_cur = fallback_doms;
7728 cpus_andnot(*doms_cur, *cpu_map, cpu_isolated_map); 7853 cpumask_andnot(doms_cur, cpu_map, cpu_isolated_map);
7729 dattr_cur = NULL; 7854 dattr_cur = NULL;
7730 err = build_sched_domains(doms_cur); 7855 err = build_sched_domains(doms_cur);
7731 register_sched_domain_sysctl(); 7856 register_sched_domain_sysctl();
@@ -7733,8 +7858,8 @@ static int arch_init_sched_domains(const cpumask_t *cpu_map)
7733 return err; 7858 return err;
7734} 7859}
7735 7860
7736static void arch_destroy_sched_domains(const cpumask_t *cpu_map, 7861static void arch_destroy_sched_domains(const struct cpumask *cpu_map,
7737 cpumask_t *tmpmask) 7862 struct cpumask *tmpmask)
7738{ 7863{
7739 free_sched_groups(cpu_map, tmpmask); 7864 free_sched_groups(cpu_map, tmpmask);
7740} 7865}
@@ -7743,15 +7868,16 @@ static void arch_destroy_sched_domains(const cpumask_t *cpu_map,
7743 * Detach sched domains from a group of cpus specified in cpu_map 7868 * Detach sched domains from a group of cpus specified in cpu_map
7744 * These cpus will now be attached to the NULL domain 7869 * These cpus will now be attached to the NULL domain
7745 */ 7870 */
7746static void detach_destroy_domains(const cpumask_t *cpu_map) 7871static void detach_destroy_domains(const struct cpumask *cpu_map)
7747{ 7872{
7748 cpumask_t tmpmask; 7873 /* Save because hotplug lock held. */
7874 static DECLARE_BITMAP(tmpmask, CONFIG_NR_CPUS);
7749 int i; 7875 int i;
7750 7876
7751 for_each_cpu_mask_nr(i, *cpu_map) 7877 for_each_cpu(i, cpu_map)
7752 cpu_attach_domain(NULL, &def_root_domain, i); 7878 cpu_attach_domain(NULL, &def_root_domain, i);
7753 synchronize_sched(); 7879 synchronize_sched();
7754 arch_destroy_sched_domains(cpu_map, &tmpmask); 7880 arch_destroy_sched_domains(cpu_map, to_cpumask(tmpmask));
7755} 7881}
7756 7882
7757/* handle null as "default" */ 7883/* handle null as "default" */
@@ -7776,7 +7902,7 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
7776 * doms_new[] to the current sched domain partitioning, doms_cur[]. 7902 * doms_new[] to the current sched domain partitioning, doms_cur[].
7777 * It destroys each deleted domain and builds each new domain. 7903 * It destroys each deleted domain and builds each new domain.
7778 * 7904 *
7779 * 'doms_new' is an array of cpumask_t's of length 'ndoms_new'. 7905 * 'doms_new' is an array of cpumask's of length 'ndoms_new'.
7780 * The masks don't intersect (don't overlap.) We should setup one 7906 * The masks don't intersect (don't overlap.) We should setup one
7781 * sched domain for each mask. CPUs not in any of the cpumasks will 7907 * sched domain for each mask. CPUs not in any of the cpumasks will
7782 * not be load balanced. If the same cpumask appears both in the 7908 * not be load balanced. If the same cpumask appears both in the
@@ -7790,13 +7916,14 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
7790 * the single partition 'fallback_doms', it also forces the domains 7916 * the single partition 'fallback_doms', it also forces the domains
7791 * to be rebuilt. 7917 * to be rebuilt.
7792 * 7918 *
7793 * If doms_new == NULL it will be replaced with cpu_online_map. 7919 * If doms_new == NULL it will be replaced with cpu_online_mask.
7794 * ndoms_new == 0 is a special case for destroying existing domains, 7920 * ndoms_new == 0 is a special case for destroying existing domains,
7795 * and it will not create the default domain. 7921 * and it will not create the default domain.
7796 * 7922 *
7797 * Call with hotplug lock held 7923 * Call with hotplug lock held
7798 */ 7924 */
7799void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, 7925/* FIXME: Change to struct cpumask *doms_new[] */
7926void partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
7800 struct sched_domain_attr *dattr_new) 7927 struct sched_domain_attr *dattr_new)
7801{ 7928{
7802 int i, j, n; 7929 int i, j, n;
@@ -7815,7 +7942,7 @@ void partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
7815 /* Destroy deleted domains */ 7942 /* Destroy deleted domains */
7816 for (i = 0; i < ndoms_cur; i++) { 7943 for (i = 0; i < ndoms_cur; i++) {
7817 for (j = 0; j < n && !new_topology; j++) { 7944 for (j = 0; j < n && !new_topology; j++) {
7818 if (cpus_equal(doms_cur[i], doms_new[j]) 7945 if (cpumask_equal(&doms_cur[i], &doms_new[j])
7819 && dattrs_equal(dattr_cur, i, dattr_new, j)) 7946 && dattrs_equal(dattr_cur, i, dattr_new, j))
7820 goto match1; 7947 goto match1;
7821 } 7948 }
@@ -7827,15 +7954,15 @@ match1:
7827 7954
7828 if (doms_new == NULL) { 7955 if (doms_new == NULL) {
7829 ndoms_cur = 0; 7956 ndoms_cur = 0;
7830 doms_new = &fallback_doms; 7957 doms_new = fallback_doms;
7831 cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map); 7958 cpumask_andnot(&doms_new[0], cpu_online_mask, cpu_isolated_map);
7832 WARN_ON_ONCE(dattr_new); 7959 WARN_ON_ONCE(dattr_new);
7833 } 7960 }
7834 7961
7835 /* Build new domains */ 7962 /* Build new domains */
7836 for (i = 0; i < ndoms_new; i++) { 7963 for (i = 0; i < ndoms_new; i++) {
7837 for (j = 0; j < ndoms_cur && !new_topology; j++) { 7964 for (j = 0; j < ndoms_cur && !new_topology; j++) {
7838 if (cpus_equal(doms_new[i], doms_cur[j]) 7965 if (cpumask_equal(&doms_new[i], &doms_cur[j])
7839 && dattrs_equal(dattr_new, i, dattr_cur, j)) 7966 && dattrs_equal(dattr_new, i, dattr_cur, j))
7840 goto match2; 7967 goto match2;
7841 } 7968 }
@@ -7847,7 +7974,7 @@ match2:
7847 } 7974 }
7848 7975
7849 /* Remember the new sched domains */ 7976 /* Remember the new sched domains */
7850 if (doms_cur != &fallback_doms) 7977 if (doms_cur != fallback_doms)
7851 kfree(doms_cur); 7978 kfree(doms_cur);
7852 kfree(dattr_cur); /* kfree(NULL) is safe */ 7979 kfree(dattr_cur); /* kfree(NULL) is safe */
7853 doms_cur = doms_new; 7980 doms_cur = doms_new;
@@ -7860,7 +7987,7 @@ match2:
7860} 7987}
7861 7988
7862#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) 7989#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
7863int arch_reinit_sched_domains(void) 7990static void arch_reinit_sched_domains(void)
7864{ 7991{
7865 get_online_cpus(); 7992 get_online_cpus();
7866 7993
@@ -7869,25 +7996,33 @@ int arch_reinit_sched_domains(void)
7869 7996
7870 rebuild_sched_domains(); 7997 rebuild_sched_domains();
7871 put_online_cpus(); 7998 put_online_cpus();
7872
7873 return 0;
7874} 7999}
7875 8000
7876static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt) 8001static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt)
7877{ 8002{
7878 int ret; 8003 unsigned int level = 0;
8004
8005 if (sscanf(buf, "%u", &level) != 1)
8006 return -EINVAL;
8007
8008 /*
8009 * level is always be positive so don't check for
8010 * level < POWERSAVINGS_BALANCE_NONE which is 0
8011 * What happens on 0 or 1 byte write,
8012 * need to check for count as well?
8013 */
7879 8014
7880 if (buf[0] != '0' && buf[0] != '1') 8015 if (level >= MAX_POWERSAVINGS_BALANCE_LEVELS)
7881 return -EINVAL; 8016 return -EINVAL;
7882 8017
7883 if (smt) 8018 if (smt)
7884 sched_smt_power_savings = (buf[0] == '1'); 8019 sched_smt_power_savings = level;
7885 else 8020 else
7886 sched_mc_power_savings = (buf[0] == '1'); 8021 sched_mc_power_savings = level;
7887 8022
7888 ret = arch_reinit_sched_domains(); 8023 arch_reinit_sched_domains();
7889 8024
7890 return ret ? ret : count; 8025 return count;
7891} 8026}
7892 8027
7893#ifdef CONFIG_SCHED_MC 8028#ifdef CONFIG_SCHED_MC
@@ -7922,7 +8057,7 @@ static SYSDEV_CLASS_ATTR(sched_smt_power_savings, 0644,
7922 sched_smt_power_savings_store); 8057 sched_smt_power_savings_store);
7923#endif 8058#endif
7924 8059
7925int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls) 8060int __init sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
7926{ 8061{
7927 int err = 0; 8062 int err = 0;
7928 8063
@@ -7987,7 +8122,9 @@ static int update_runtime(struct notifier_block *nfb,
7987 8122
7988void __init sched_init_smp(void) 8123void __init sched_init_smp(void)
7989{ 8124{
7990 cpumask_t non_isolated_cpus; 8125 cpumask_var_t non_isolated_cpus;
8126
8127 alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
7991 8128
7992#if defined(CONFIG_NUMA) 8129#if defined(CONFIG_NUMA)
7993 sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **), 8130 sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **),
@@ -7996,10 +8133,10 @@ void __init sched_init_smp(void)
7996#endif 8133#endif
7997 get_online_cpus(); 8134 get_online_cpus();
7998 mutex_lock(&sched_domains_mutex); 8135 mutex_lock(&sched_domains_mutex);
7999 arch_init_sched_domains(&cpu_online_map); 8136 arch_init_sched_domains(cpu_online_mask);
8000 cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map); 8137 cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
8001 if (cpus_empty(non_isolated_cpus)) 8138 if (cpumask_empty(non_isolated_cpus))
8002 cpu_set(smp_processor_id(), non_isolated_cpus); 8139 cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
8003 mutex_unlock(&sched_domains_mutex); 8140 mutex_unlock(&sched_domains_mutex);
8004 put_online_cpus(); 8141 put_online_cpus();
8005 8142
@@ -8014,9 +8151,13 @@ void __init sched_init_smp(void)
8014 init_hrtick(); 8151 init_hrtick();
8015 8152
8016 /* Move init over to a non-isolated CPU */ 8153 /* Move init over to a non-isolated CPU */
8017 if (set_cpus_allowed_ptr(current, &non_isolated_cpus) < 0) 8154 if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
8018 BUG(); 8155 BUG();
8019 sched_init_granularity(); 8156 sched_init_granularity();
8157 free_cpumask_var(non_isolated_cpus);
8158
8159 alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
8160 init_sched_rt_class();
8020} 8161}
8021#else 8162#else
8022void __init sched_init_smp(void) 8163void __init sched_init_smp(void)
@@ -8331,6 +8472,15 @@ void __init sched_init(void)
8331 */ 8472 */
8332 current->sched_class = &fair_sched_class; 8473 current->sched_class = &fair_sched_class;
8333 8474
8475 /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */
8476 alloc_bootmem_cpumask_var(&nohz_cpu_mask);
8477#ifdef CONFIG_SMP
8478#ifdef CONFIG_NO_HZ
8479 alloc_bootmem_cpumask_var(&nohz.cpu_mask);
8480#endif
8481 alloc_bootmem_cpumask_var(&cpu_isolated_map);
8482#endif /* SMP */
8483
8334 scheduler_running = 1; 8484 scheduler_running = 1;
8335} 8485}
8336 8486
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c
index e8ab096ddfe3..a0b0852414cc 100644
--- a/kernel/sched_clock.c
+++ b/kernel/sched_clock.c
@@ -124,7 +124,7 @@ static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now)
124 124
125 clock = scd->tick_gtod + delta; 125 clock = scd->tick_gtod + delta;
126 min_clock = wrap_max(scd->tick_gtod, scd->clock); 126 min_clock = wrap_max(scd->tick_gtod, scd->clock);
127 max_clock = scd->tick_gtod + TICK_NSEC; 127 max_clock = wrap_max(scd->clock, scd->tick_gtod + TICK_NSEC);
128 128
129 clock = wrap_max(clock, min_clock); 129 clock = wrap_max(clock, min_clock);
130 clock = wrap_min(clock, max_clock); 130 clock = wrap_min(clock, max_clock);
@@ -227,6 +227,9 @@ EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event);
227 */ 227 */
228void sched_clock_idle_wakeup_event(u64 delta_ns) 228void sched_clock_idle_wakeup_event(u64 delta_ns)
229{ 229{
230 if (timekeeping_suspended)
231 return;
232
230 sched_clock_tick(); 233 sched_clock_tick();
231 touch_softlockup_watchdog(); 234 touch_softlockup_watchdog();
232} 235}
diff --git a/kernel/sched_cpupri.c b/kernel/sched_cpupri.c
index 52154fefab7e..018b7be1db2e 100644
--- a/kernel/sched_cpupri.c
+++ b/kernel/sched_cpupri.c
@@ -67,24 +67,21 @@ static int convert_prio(int prio)
67 * Returns: (int)bool - CPUs were found 67 * Returns: (int)bool - CPUs were found
68 */ 68 */
69int cpupri_find(struct cpupri *cp, struct task_struct *p, 69int cpupri_find(struct cpupri *cp, struct task_struct *p,
70 cpumask_t *lowest_mask) 70 struct cpumask *lowest_mask)
71{ 71{
72 int idx = 0; 72 int idx = 0;
73 int task_pri = convert_prio(p->prio); 73 int task_pri = convert_prio(p->prio);
74 74
75 for_each_cpupri_active(cp->pri_active, idx) { 75 for_each_cpupri_active(cp->pri_active, idx) {
76 struct cpupri_vec *vec = &cp->pri_to_cpu[idx]; 76 struct cpupri_vec *vec = &cp->pri_to_cpu[idx];
77 cpumask_t mask;
78 77
79 if (idx >= task_pri) 78 if (idx >= task_pri)
80 break; 79 break;
81 80
82 cpus_and(mask, p->cpus_allowed, vec->mask); 81 if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids)
83
84 if (cpus_empty(mask))
85 continue; 82 continue;
86 83
87 *lowest_mask = mask; 84 cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask);
88 return 1; 85 return 1;
89 } 86 }
90 87
@@ -126,7 +123,7 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri)
126 vec->count--; 123 vec->count--;
127 if (!vec->count) 124 if (!vec->count)
128 clear_bit(oldpri, cp->pri_active); 125 clear_bit(oldpri, cp->pri_active);
129 cpu_clear(cpu, vec->mask); 126 cpumask_clear_cpu(cpu, vec->mask);
130 127
131 spin_unlock_irqrestore(&vec->lock, flags); 128 spin_unlock_irqrestore(&vec->lock, flags);
132 } 129 }
@@ -136,7 +133,7 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri)
136 133
137 spin_lock_irqsave(&vec->lock, flags); 134 spin_lock_irqsave(&vec->lock, flags);
138 135
139 cpu_set(cpu, vec->mask); 136 cpumask_set_cpu(cpu, vec->mask);
140 vec->count++; 137 vec->count++;
141 if (vec->count == 1) 138 if (vec->count == 1)
142 set_bit(newpri, cp->pri_active); 139 set_bit(newpri, cp->pri_active);
@@ -150,10 +147,11 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri)
150/** 147/**
151 * cpupri_init - initialize the cpupri structure 148 * cpupri_init - initialize the cpupri structure
152 * @cp: The cpupri context 149 * @cp: The cpupri context
150 * @bootmem: true if allocations need to use bootmem
153 * 151 *
154 * Returns: (void) 152 * Returns: -ENOMEM if memory fails.
155 */ 153 */
156void cpupri_init(struct cpupri *cp) 154int cpupri_init(struct cpupri *cp, bool bootmem)
157{ 155{
158 int i; 156 int i;
159 157
@@ -164,11 +162,30 @@ void cpupri_init(struct cpupri *cp)
164 162
165 spin_lock_init(&vec->lock); 163 spin_lock_init(&vec->lock);
166 vec->count = 0; 164 vec->count = 0;
167 cpus_clear(vec->mask); 165 if (bootmem)
166 alloc_bootmem_cpumask_var(&vec->mask);
167 else if (!alloc_cpumask_var(&vec->mask, GFP_KERNEL))
168 goto cleanup;
168 } 169 }
169 170
170 for_each_possible_cpu(i) 171 for_each_possible_cpu(i)
171 cp->cpu_to_pri[i] = CPUPRI_INVALID; 172 cp->cpu_to_pri[i] = CPUPRI_INVALID;
173 return 0;
174
175cleanup:
176 for (i--; i >= 0; i--)
177 free_cpumask_var(cp->pri_to_cpu[i].mask);
178 return -ENOMEM;
172} 179}
173 180
181/**
182 * cpupri_cleanup - clean up the cpupri structure
183 * @cp: The cpupri context
184 */
185void cpupri_cleanup(struct cpupri *cp)
186{
187 int i;
174 188
189 for (i = 0; i < CPUPRI_NR_PRIORITIES; i++)
190 free_cpumask_var(cp->pri_to_cpu[i].mask);
191}
diff --git a/kernel/sched_cpupri.h b/kernel/sched_cpupri.h
index f25811b0f931..642a94ef8a0a 100644
--- a/kernel/sched_cpupri.h
+++ b/kernel/sched_cpupri.h
@@ -14,7 +14,7 @@
14struct cpupri_vec { 14struct cpupri_vec {
15 spinlock_t lock; 15 spinlock_t lock;
16 int count; 16 int count;
17 cpumask_t mask; 17 cpumask_var_t mask;
18}; 18};
19 19
20struct cpupri { 20struct cpupri {
@@ -27,7 +27,8 @@ struct cpupri {
27int cpupri_find(struct cpupri *cp, 27int cpupri_find(struct cpupri *cp,
28 struct task_struct *p, cpumask_t *lowest_mask); 28 struct task_struct *p, cpumask_t *lowest_mask);
29void cpupri_set(struct cpupri *cp, int cpu, int pri); 29void cpupri_set(struct cpupri *cp, int cpu, int pri);
30void cpupri_init(struct cpupri *cp); 30int cpupri_init(struct cpupri *cp, bool bootmem);
31void cpupri_cleanup(struct cpupri *cp);
31#else 32#else
32#define cpupri_set(cp, cpu, pri) do { } while (0) 33#define cpupri_set(cp, cpu, pri) do { } while (0)
33#define cpupri_init() do { } while (0) 34#define cpupri_init() do { } while (0)
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 5ad4440f0fc4..e0c0b4bc3f08 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -386,20 +386,6 @@ int sched_nr_latency_handler(struct ctl_table *table, int write,
386#endif 386#endif
387 387
388/* 388/*
389 * delta *= P[w / rw]
390 */
391static inline unsigned long
392calc_delta_weight(unsigned long delta, struct sched_entity *se)
393{
394 for_each_sched_entity(se) {
395 delta = calc_delta_mine(delta,
396 se->load.weight, &cfs_rq_of(se)->load);
397 }
398
399 return delta;
400}
401
402/*
403 * delta /= w 389 * delta /= w
404 */ 390 */
405static inline unsigned long 391static inline unsigned long
@@ -440,12 +426,20 @@ static u64 __sched_period(unsigned long nr_running)
440 */ 426 */
441static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) 427static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
442{ 428{
443 unsigned long nr_running = cfs_rq->nr_running; 429 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
444 430
445 if (unlikely(!se->on_rq)) 431 for_each_sched_entity(se) {
446 nr_running++; 432 struct load_weight *load = &cfs_rq->load;
433
434 if (unlikely(!se->on_rq)) {
435 struct load_weight lw = cfs_rq->load;
447 436
448 return calc_delta_weight(__sched_period(nr_running), se); 437 update_load_add(&lw, se->load.weight);
438 load = &lw;
439 }
440 slice = calc_delta_mine(slice, se->load.weight, load);
441 }
442 return slice;
449} 443}
450 444
451/* 445/*
@@ -1019,16 +1013,33 @@ static void yield_task_fair(struct rq *rq)
1019 * search starts with cpus closest then further out as needed, 1013 * search starts with cpus closest then further out as needed,
1020 * so we always favor a closer, idle cpu. 1014 * so we always favor a closer, idle cpu.
1021 * Domains may include CPUs that are not usable for migration, 1015 * Domains may include CPUs that are not usable for migration,
1022 * hence we need to mask them out (cpu_active_map) 1016 * hence we need to mask them out (cpu_active_mask)
1023 * 1017 *
1024 * Returns the CPU we should wake onto. 1018 * Returns the CPU we should wake onto.
1025 */ 1019 */
1026#if defined(ARCH_HAS_SCHED_WAKE_IDLE) 1020#if defined(ARCH_HAS_SCHED_WAKE_IDLE)
1027static int wake_idle(int cpu, struct task_struct *p) 1021static int wake_idle(int cpu, struct task_struct *p)
1028{ 1022{
1029 cpumask_t tmp;
1030 struct sched_domain *sd; 1023 struct sched_domain *sd;
1031 int i; 1024 int i;
1025 unsigned int chosen_wakeup_cpu;
1026 int this_cpu;
1027
1028 /*
1029 * At POWERSAVINGS_BALANCE_WAKEUP level, if both this_cpu and prev_cpu
1030 * are idle and this is not a kernel thread and this task's affinity
1031 * allows it to be moved to preferred cpu, then just move!
1032 */
1033
1034 this_cpu = smp_processor_id();
1035 chosen_wakeup_cpu =
1036 cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu;
1037
1038 if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP &&
1039 idle_cpu(cpu) && idle_cpu(this_cpu) &&
1040 p->mm && !(p->flags & PF_KTHREAD) &&
1041 cpu_isset(chosen_wakeup_cpu, p->cpus_allowed))
1042 return chosen_wakeup_cpu;
1032 1043
1033 /* 1044 /*
1034 * If it is idle, then it is the best cpu to run this task. 1045 * If it is idle, then it is the best cpu to run this task.
@@ -1046,10 +1057,9 @@ static int wake_idle(int cpu, struct task_struct *p)
1046 if ((sd->flags & SD_WAKE_IDLE) 1057 if ((sd->flags & SD_WAKE_IDLE)
1047 || ((sd->flags & SD_WAKE_IDLE_FAR) 1058 || ((sd->flags & SD_WAKE_IDLE_FAR)
1048 && !task_hot(p, task_rq(p)->clock, sd))) { 1059 && !task_hot(p, task_rq(p)->clock, sd))) {
1049 cpus_and(tmp, sd->span, p->cpus_allowed); 1060 for_each_cpu_and(i, sched_domain_span(sd),
1050 cpus_and(tmp, tmp, cpu_active_map); 1061 &p->cpus_allowed) {
1051 for_each_cpu_mask_nr(i, tmp) { 1062 if (cpu_active(i) && idle_cpu(i)) {
1052 if (idle_cpu(i)) {
1053 if (i != task_cpu(p)) { 1063 if (i != task_cpu(p)) {
1054 schedstat_inc(p, 1064 schedstat_inc(p,
1055 se.nr_wakeups_idle); 1065 se.nr_wakeups_idle);
@@ -1242,13 +1252,13 @@ static int select_task_rq_fair(struct task_struct *p, int sync)
1242 * this_cpu and prev_cpu are present in: 1252 * this_cpu and prev_cpu are present in:
1243 */ 1253 */
1244 for_each_domain(this_cpu, sd) { 1254 for_each_domain(this_cpu, sd) {
1245 if (cpu_isset(prev_cpu, sd->span)) { 1255 if (cpumask_test_cpu(prev_cpu, sched_domain_span(sd))) {
1246 this_sd = sd; 1256 this_sd = sd;
1247 break; 1257 break;
1248 } 1258 }
1249 } 1259 }
1250 1260
1251 if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed))) 1261 if (unlikely(!cpumask_test_cpu(this_cpu, &p->cpus_allowed)))
1252 goto out; 1262 goto out;
1253 1263
1254 /* 1264 /*
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 51d2af3e6191..954e1a81b796 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -15,7 +15,7 @@ static inline void rt_set_overload(struct rq *rq)
15 if (!rq->online) 15 if (!rq->online)
16 return; 16 return;
17 17
18 cpu_set(rq->cpu, rq->rd->rto_mask); 18 cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
19 /* 19 /*
20 * Make sure the mask is visible before we set 20 * Make sure the mask is visible before we set
21 * the overload count. That is checked to determine 21 * the overload count. That is checked to determine
@@ -34,7 +34,7 @@ static inline void rt_clear_overload(struct rq *rq)
34 34
35 /* the order here really doesn't matter */ 35 /* the order here really doesn't matter */
36 atomic_dec(&rq->rd->rto_count); 36 atomic_dec(&rq->rd->rto_count);
37 cpu_clear(rq->cpu, rq->rd->rto_mask); 37 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
38} 38}
39 39
40static void update_rt_migration(struct rq *rq) 40static void update_rt_migration(struct rq *rq)
@@ -139,14 +139,14 @@ static int rt_se_boosted(struct sched_rt_entity *rt_se)
139} 139}
140 140
141#ifdef CONFIG_SMP 141#ifdef CONFIG_SMP
142static inline cpumask_t sched_rt_period_mask(void) 142static inline const struct cpumask *sched_rt_period_mask(void)
143{ 143{
144 return cpu_rq(smp_processor_id())->rd->span; 144 return cpu_rq(smp_processor_id())->rd->span;
145} 145}
146#else 146#else
147static inline cpumask_t sched_rt_period_mask(void) 147static inline const struct cpumask *sched_rt_period_mask(void)
148{ 148{
149 return cpu_online_map; 149 return cpu_online_mask;
150} 150}
151#endif 151#endif
152 152
@@ -212,9 +212,9 @@ static inline int rt_rq_throttled(struct rt_rq *rt_rq)
212 return rt_rq->rt_throttled; 212 return rt_rq->rt_throttled;
213} 213}
214 214
215static inline cpumask_t sched_rt_period_mask(void) 215static inline const struct cpumask *sched_rt_period_mask(void)
216{ 216{
217 return cpu_online_map; 217 return cpu_online_mask;
218} 218}
219 219
220static inline 220static inline
@@ -241,11 +241,11 @@ static int do_balance_runtime(struct rt_rq *rt_rq)
241 int i, weight, more = 0; 241 int i, weight, more = 0;
242 u64 rt_period; 242 u64 rt_period;
243 243
244 weight = cpus_weight(rd->span); 244 weight = cpumask_weight(rd->span);
245 245
246 spin_lock(&rt_b->rt_runtime_lock); 246 spin_lock(&rt_b->rt_runtime_lock);
247 rt_period = ktime_to_ns(rt_b->rt_period); 247 rt_period = ktime_to_ns(rt_b->rt_period);
248 for_each_cpu_mask_nr(i, rd->span) { 248 for_each_cpu(i, rd->span) {
249 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); 249 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
250 s64 diff; 250 s64 diff;
251 251
@@ -324,7 +324,7 @@ static void __disable_runtime(struct rq *rq)
324 /* 324 /*
325 * Greedy reclaim, take back as much as we can. 325 * Greedy reclaim, take back as much as we can.
326 */ 326 */
327 for_each_cpu_mask(i, rd->span) { 327 for_each_cpu(i, rd->span) {
328 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); 328 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
329 s64 diff; 329 s64 diff;
330 330
@@ -429,13 +429,13 @@ static inline int balance_runtime(struct rt_rq *rt_rq)
429static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) 429static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
430{ 430{
431 int i, idle = 1; 431 int i, idle = 1;
432 cpumask_t span; 432 const struct cpumask *span;
433 433
434 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF) 434 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
435 return 1; 435 return 1;
436 436
437 span = sched_rt_period_mask(); 437 span = sched_rt_period_mask();
438 for_each_cpu_mask(i, span) { 438 for_each_cpu(i, span) {
439 int enqueue = 0; 439 int enqueue = 0;
440 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); 440 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
441 struct rq *rq = rq_of_rt_rq(rt_rq); 441 struct rq *rq = rq_of_rt_rq(rt_rq);
@@ -805,17 +805,20 @@ static int select_task_rq_rt(struct task_struct *p, int sync)
805 805
806static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) 806static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
807{ 807{
808 cpumask_t mask; 808 cpumask_var_t mask;
809 809
810 if (rq->curr->rt.nr_cpus_allowed == 1) 810 if (rq->curr->rt.nr_cpus_allowed == 1)
811 return; 811 return;
812 812
813 if (p->rt.nr_cpus_allowed != 1 813 if (!alloc_cpumask_var(&mask, GFP_ATOMIC))
814 && cpupri_find(&rq->rd->cpupri, p, &mask))
815 return; 814 return;
816 815
817 if (!cpupri_find(&rq->rd->cpupri, rq->curr, &mask)) 816 if (p->rt.nr_cpus_allowed != 1
818 return; 817 && cpupri_find(&rq->rd->cpupri, p, mask))
818 goto free;
819
820 if (!cpupri_find(&rq->rd->cpupri, rq->curr, mask))
821 goto free;
819 822
820 /* 823 /*
821 * There appears to be other cpus that can accept 824 * There appears to be other cpus that can accept
@@ -824,6 +827,8 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
824 */ 827 */
825 requeue_task_rt(rq, p, 1); 828 requeue_task_rt(rq, p, 1);
826 resched_task(rq->curr); 829 resched_task(rq->curr);
830free:
831 free_cpumask_var(mask);
827} 832}
828 833
829#endif /* CONFIG_SMP */ 834#endif /* CONFIG_SMP */
@@ -914,7 +919,7 @@ static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
914static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) 919static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
915{ 920{
916 if (!task_running(rq, p) && 921 if (!task_running(rq, p) &&
917 (cpu < 0 || cpu_isset(cpu, p->cpus_allowed)) && 922 (cpu < 0 || cpumask_test_cpu(cpu, &p->cpus_allowed)) &&
918 (p->rt.nr_cpus_allowed > 1)) 923 (p->rt.nr_cpus_allowed > 1))
919 return 1; 924 return 1;
920 return 0; 925 return 0;
@@ -953,7 +958,7 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
953 return next; 958 return next;
954} 959}
955 960
956static DEFINE_PER_CPU(cpumask_t, local_cpu_mask); 961static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
957 962
958static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask) 963static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask)
959{ 964{
@@ -973,7 +978,7 @@ static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask)
973static int find_lowest_rq(struct task_struct *task) 978static int find_lowest_rq(struct task_struct *task)
974{ 979{
975 struct sched_domain *sd; 980 struct sched_domain *sd;
976 cpumask_t *lowest_mask = &__get_cpu_var(local_cpu_mask); 981 struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
977 int this_cpu = smp_processor_id(); 982 int this_cpu = smp_processor_id();
978 int cpu = task_cpu(task); 983 int cpu = task_cpu(task);
979 984
@@ -988,7 +993,7 @@ static int find_lowest_rq(struct task_struct *task)
988 * I guess we might want to change cpupri_find() to ignore those 993 * I guess we might want to change cpupri_find() to ignore those
989 * in the first place. 994 * in the first place.
990 */ 995 */
991 cpus_and(*lowest_mask, *lowest_mask, cpu_active_map); 996 cpumask_and(lowest_mask, lowest_mask, cpu_active_mask);
992 997
993 /* 998 /*
994 * At this point we have built a mask of cpus representing the 999 * At this point we have built a mask of cpus representing the
@@ -998,7 +1003,7 @@ static int find_lowest_rq(struct task_struct *task)
998 * We prioritize the last cpu that the task executed on since 1003 * We prioritize the last cpu that the task executed on since
999 * it is most likely cache-hot in that location. 1004 * it is most likely cache-hot in that location.
1000 */ 1005 */
1001 if (cpu_isset(cpu, *lowest_mask)) 1006 if (cpumask_test_cpu(cpu, lowest_mask))
1002 return cpu; 1007 return cpu;
1003 1008
1004 /* 1009 /*
@@ -1013,7 +1018,8 @@ static int find_lowest_rq(struct task_struct *task)
1013 cpumask_t domain_mask; 1018 cpumask_t domain_mask;
1014 int best_cpu; 1019 int best_cpu;
1015 1020
1016 cpus_and(domain_mask, sd->span, *lowest_mask); 1021 cpumask_and(&domain_mask, sched_domain_span(sd),
1022 lowest_mask);
1017 1023
1018 best_cpu = pick_optimal_cpu(this_cpu, 1024 best_cpu = pick_optimal_cpu(this_cpu,
1019 &domain_mask); 1025 &domain_mask);
@@ -1054,8 +1060,8 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1054 * Also make sure that it wasn't scheduled on its rq. 1060 * Also make sure that it wasn't scheduled on its rq.
1055 */ 1061 */
1056 if (unlikely(task_rq(task) != rq || 1062 if (unlikely(task_rq(task) != rq ||
1057 !cpu_isset(lowest_rq->cpu, 1063 !cpumask_test_cpu(lowest_rq->cpu,
1058 task->cpus_allowed) || 1064 &task->cpus_allowed) ||
1059 task_running(rq, task) || 1065 task_running(rq, task) ||
1060 !task->se.on_rq)) { 1066 !task->se.on_rq)) {
1061 1067
@@ -1176,7 +1182,7 @@ static int pull_rt_task(struct rq *this_rq)
1176 1182
1177 next = pick_next_task_rt(this_rq); 1183 next = pick_next_task_rt(this_rq);
1178 1184
1179 for_each_cpu_mask_nr(cpu, this_rq->rd->rto_mask) { 1185 for_each_cpu(cpu, this_rq->rd->rto_mask) {
1180 if (this_cpu == cpu) 1186 if (this_cpu == cpu)
1181 continue; 1187 continue;
1182 1188
@@ -1305,9 +1311,9 @@ move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
1305} 1311}
1306 1312
1307static void set_cpus_allowed_rt(struct task_struct *p, 1313static void set_cpus_allowed_rt(struct task_struct *p,
1308 const cpumask_t *new_mask) 1314 const struct cpumask *new_mask)
1309{ 1315{
1310 int weight = cpus_weight(*new_mask); 1316 int weight = cpumask_weight(new_mask);
1311 1317
1312 BUG_ON(!rt_task(p)); 1318 BUG_ON(!rt_task(p));
1313 1319
@@ -1328,7 +1334,7 @@ static void set_cpus_allowed_rt(struct task_struct *p,
1328 update_rt_migration(rq); 1334 update_rt_migration(rq);
1329 } 1335 }
1330 1336
1331 p->cpus_allowed = *new_mask; 1337 cpumask_copy(&p->cpus_allowed, new_mask);
1332 p->rt.nr_cpus_allowed = weight; 1338 p->rt.nr_cpus_allowed = weight;
1333} 1339}
1334 1340
@@ -1371,6 +1377,15 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p,
1371 if (!rq->rt.rt_nr_running) 1377 if (!rq->rt.rt_nr_running)
1372 pull_rt_task(rq); 1378 pull_rt_task(rq);
1373} 1379}
1380
1381static inline void init_sched_rt_class(void)
1382{
1383 unsigned int i;
1384
1385 for_each_possible_cpu(i)
1386 alloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
1387 GFP_KERNEL, cpu_to_node(i));
1388}
1374#endif /* CONFIG_SMP */ 1389#endif /* CONFIG_SMP */
1375 1390
1376/* 1391/*
@@ -1541,3 +1556,4 @@ static void print_rt_stats(struct seq_file *m, int cpu)
1541 rcu_read_unlock(); 1556 rcu_read_unlock();
1542} 1557}
1543#endif /* CONFIG_SCHED_DEBUG */ 1558#endif /* CONFIG_SCHED_DEBUG */
1559
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h
index 3b01098164c8..f2773b5d1226 100644
--- a/kernel/sched_stats.h
+++ b/kernel/sched_stats.h
@@ -42,7 +42,8 @@ static int show_schedstat(struct seq_file *seq, void *v)
42 for_each_domain(cpu, sd) { 42 for_each_domain(cpu, sd) {
43 enum cpu_idle_type itype; 43 enum cpu_idle_type itype;
44 44
45 cpumask_scnprintf(mask_str, mask_len, sd->span); 45 cpumask_scnprintf(mask_str, mask_len,
46 sched_domain_span(sd));
46 seq_printf(seq, "domain%d %s", dcount++, mask_str); 47 seq_printf(seq, "domain%d %s", dcount++, mask_str);
47 for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES; 48 for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES;
48 itype++) { 49 itype++) {
diff --git a/kernel/smp.c b/kernel/smp.c
index 75c8dde58c55..5cfa0e5e3e88 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -24,8 +24,8 @@ struct call_function_data {
24 struct call_single_data csd; 24 struct call_single_data csd;
25 spinlock_t lock; 25 spinlock_t lock;
26 unsigned int refs; 26 unsigned int refs;
27 cpumask_t cpumask;
28 struct rcu_head rcu_head; 27 struct rcu_head rcu_head;
28 unsigned long cpumask_bits[];
29}; 29};
30 30
31struct call_single_queue { 31struct call_single_queue {
@@ -110,13 +110,13 @@ void generic_smp_call_function_interrupt(void)
110 list_for_each_entry_rcu(data, &call_function_queue, csd.list) { 110 list_for_each_entry_rcu(data, &call_function_queue, csd.list) {
111 int refs; 111 int refs;
112 112
113 if (!cpu_isset(cpu, data->cpumask)) 113 if (!cpumask_test_cpu(cpu, to_cpumask(data->cpumask_bits)))
114 continue; 114 continue;
115 115
116 data->csd.func(data->csd.info); 116 data->csd.func(data->csd.info);
117 117
118 spin_lock(&data->lock); 118 spin_lock(&data->lock);
119 cpu_clear(cpu, data->cpumask); 119 cpumask_clear_cpu(cpu, to_cpumask(data->cpumask_bits));
120 WARN_ON(data->refs == 0); 120 WARN_ON(data->refs == 0);
121 data->refs--; 121 data->refs--;
122 refs = data->refs; 122 refs = data->refs;
@@ -223,7 +223,7 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
223 local_irq_save(flags); 223 local_irq_save(flags);
224 func(info); 224 func(info);
225 local_irq_restore(flags); 225 local_irq_restore(flags);
226 } else if ((unsigned)cpu < NR_CPUS && cpu_online(cpu)) { 226 } else if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) {
227 struct call_single_data *data = NULL; 227 struct call_single_data *data = NULL;
228 228
229 if (!wait) { 229 if (!wait) {
@@ -266,51 +266,19 @@ void __smp_call_function_single(int cpu, struct call_single_data *data)
266 generic_exec_single(cpu, data); 266 generic_exec_single(cpu, data);
267} 267}
268 268
269/* Dummy function */ 269/* FIXME: Shim for archs using old arch_send_call_function_ipi API. */
270static void quiesce_dummy(void *unused) 270#ifndef arch_send_call_function_ipi_mask
271{ 271#define arch_send_call_function_ipi_mask(maskp) \
272} 272 arch_send_call_function_ipi(*(maskp))
273 273#endif
274/*
275 * Ensure stack based data used in call function mask is safe to free.
276 *
277 * This is needed by smp_call_function_mask when using on-stack data, because
278 * a single call function queue is shared by all CPUs, and any CPU may pick up
279 * the data item on the queue at any time before it is deleted. So we need to
280 * ensure that all CPUs have transitioned through a quiescent state after
281 * this call.
282 *
283 * This is a very slow function, implemented by sending synchronous IPIs to
284 * all possible CPUs. For this reason, we have to alloc data rather than use
285 * stack based data even in the case of synchronous calls. The stack based
286 * data is then just used for deadlock/oom fallback which will be very rare.
287 *
288 * If a faster scheme can be made, we could go back to preferring stack based
289 * data -- the data allocation/free is non-zero cost.
290 */
291static void smp_call_function_mask_quiesce_stack(cpumask_t mask)
292{
293 struct call_single_data data;
294 int cpu;
295
296 data.func = quiesce_dummy;
297 data.info = NULL;
298
299 for_each_cpu_mask(cpu, mask) {
300 data.flags = CSD_FLAG_WAIT;
301 generic_exec_single(cpu, &data);
302 }
303}
304 274
305/** 275/**
306 * smp_call_function_mask(): Run a function on a set of other CPUs. 276 * smp_call_function_many(): Run a function on a set of other CPUs.
307 * @mask: The set of cpus to run on. 277 * @mask: The set of cpus to run on (only runs on online subset).
308 * @func: The function to run. This must be fast and non-blocking. 278 * @func: The function to run. This must be fast and non-blocking.
309 * @info: An arbitrary pointer to pass to the function. 279 * @info: An arbitrary pointer to pass to the function.
310 * @wait: If true, wait (atomically) until function has completed on other CPUs. 280 * @wait: If true, wait (atomically) until function has completed on other CPUs.
311 * 281 *
312 * Returns 0 on success, else a negative status code.
313 *
314 * If @wait is true, then returns once @func has returned. Note that @wait 282 * If @wait is true, then returns once @func has returned. Note that @wait
315 * will be implicitly turned on in case of allocation failures, since 283 * will be implicitly turned on in case of allocation failures, since
316 * we fall back to on-stack allocation. 284 * we fall back to on-stack allocation.
@@ -319,53 +287,57 @@ static void smp_call_function_mask_quiesce_stack(cpumask_t mask)
319 * hardware interrupt handler or from a bottom half handler. Preemption 287 * hardware interrupt handler or from a bottom half handler. Preemption
320 * must be disabled when calling this function. 288 * must be disabled when calling this function.
321 */ 289 */
322int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, 290void smp_call_function_many(const struct cpumask *mask,
323 int wait) 291 void (*func)(void *), void *info,
292 bool wait)
324{ 293{
325 struct call_function_data d; 294 struct call_function_data *data;
326 struct call_function_data *data = NULL;
327 cpumask_t allbutself;
328 unsigned long flags; 295 unsigned long flags;
329 int cpu, num_cpus; 296 int cpu, next_cpu;
330 int slowpath = 0;
331 297
332 /* Can deadlock when called with interrupts disabled */ 298 /* Can deadlock when called with interrupts disabled */
333 WARN_ON(irqs_disabled()); 299 WARN_ON(irqs_disabled());
334 300
335 cpu = smp_processor_id(); 301 /* So, what's a CPU they want? Ignoring this one. */
336 allbutself = cpu_online_map; 302 cpu = cpumask_first_and(mask, cpu_online_mask);
337 cpu_clear(cpu, allbutself); 303 if (cpu == smp_processor_id())
338 cpus_and(mask, mask, allbutself); 304 cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
339 num_cpus = cpus_weight(mask); 305 /* No online cpus? We're done. */
340 306 if (cpu >= nr_cpu_ids)
341 /* 307 return;
342 * If zero CPUs, return. If just a single CPU, turn this request 308
343 * into a targetted single call instead since it's faster. 309 /* Do we have another CPU which isn't us? */
344 */ 310 next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
345 if (!num_cpus) 311 if (next_cpu == smp_processor_id())
346 return 0; 312 next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
347 else if (num_cpus == 1) { 313
348 cpu = first_cpu(mask); 314 /* Fastpath: do that cpu by itself. */
349 return smp_call_function_single(cpu, func, info, wait); 315 if (next_cpu >= nr_cpu_ids) {
316 smp_call_function_single(cpu, func, info, wait);
317 return;
350 } 318 }
351 319
352 data = kmalloc(sizeof(*data), GFP_ATOMIC); 320 data = kmalloc(sizeof(*data) + cpumask_size(), GFP_ATOMIC);
353 if (data) { 321 if (unlikely(!data)) {
354 data->csd.flags = CSD_FLAG_ALLOC; 322 /* Slow path. */
355 if (wait) 323 for_each_online_cpu(cpu) {
356 data->csd.flags |= CSD_FLAG_WAIT; 324 if (cpu == smp_processor_id())
357 } else { 325 continue;
358 data = &d; 326 if (cpumask_test_cpu(cpu, mask))
359 data->csd.flags = CSD_FLAG_WAIT; 327 smp_call_function_single(cpu, func, info, wait);
360 wait = 1; 328 }
361 slowpath = 1; 329 return;
362 } 330 }
363 331
364 spin_lock_init(&data->lock); 332 spin_lock_init(&data->lock);
333 data->csd.flags = CSD_FLAG_ALLOC;
334 if (wait)
335 data->csd.flags |= CSD_FLAG_WAIT;
365 data->csd.func = func; 336 data->csd.func = func;
366 data->csd.info = info; 337 data->csd.info = info;
367 data->refs = num_cpus; 338 cpumask_and(to_cpumask(data->cpumask_bits), mask, cpu_online_mask);
368 data->cpumask = mask; 339 cpumask_clear_cpu(smp_processor_id(), to_cpumask(data->cpumask_bits));
340 data->refs = cpumask_weight(to_cpumask(data->cpumask_bits));
369 341
370 spin_lock_irqsave(&call_function_lock, flags); 342 spin_lock_irqsave(&call_function_lock, flags);
371 list_add_tail_rcu(&data->csd.list, &call_function_queue); 343 list_add_tail_rcu(&data->csd.list, &call_function_queue);
@@ -377,18 +349,13 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
377 smp_mb(); 349 smp_mb();
378 350
379 /* Send a message to all CPUs in the map */ 351 /* Send a message to all CPUs in the map */
380 arch_send_call_function_ipi(mask); 352 arch_send_call_function_ipi_mask(to_cpumask(data->cpumask_bits));
381 353
382 /* optionally wait for the CPUs to complete */ 354 /* optionally wait for the CPUs to complete */
383 if (wait) { 355 if (wait)
384 csd_flag_wait(&data->csd); 356 csd_flag_wait(&data->csd);
385 if (unlikely(slowpath))
386 smp_call_function_mask_quiesce_stack(mask);
387 }
388
389 return 0;
390} 357}
391EXPORT_SYMBOL(smp_call_function_mask); 358EXPORT_SYMBOL(smp_call_function_many);
392 359
393/** 360/**
394 * smp_call_function(): Run a function on all other CPUs. 361 * smp_call_function(): Run a function on all other CPUs.
@@ -396,7 +363,7 @@ EXPORT_SYMBOL(smp_call_function_mask);
396 * @info: An arbitrary pointer to pass to the function. 363 * @info: An arbitrary pointer to pass to the function.
397 * @wait: If true, wait (atomically) until function has completed on other CPUs. 364 * @wait: If true, wait (atomically) until function has completed on other CPUs.
398 * 365 *
399 * Returns 0 on success, else a negative status code. 366 * Returns 0.
400 * 367 *
401 * If @wait is true, then returns once @func has returned; otherwise 368 * If @wait is true, then returns once @func has returned; otherwise
402 * it returns just before the target cpu calls @func. In case of allocation 369 * it returns just before the target cpu calls @func. In case of allocation
@@ -407,12 +374,10 @@ EXPORT_SYMBOL(smp_call_function_mask);
407 */ 374 */
408int smp_call_function(void (*func)(void *), void *info, int wait) 375int smp_call_function(void (*func)(void *), void *info, int wait)
409{ 376{
410 int ret;
411
412 preempt_disable(); 377 preempt_disable();
413 ret = smp_call_function_mask(cpu_online_map, func, info, wait); 378 smp_call_function_many(cpu_online_mask, func, info, wait);
414 preempt_enable(); 379 preempt_enable();
415 return ret; 380 return 0;
416} 381}
417EXPORT_SYMBOL(smp_call_function); 382EXPORT_SYMBOL(smp_call_function);
418 383
diff --git a/kernel/softirq.c b/kernel/softirq.c
index e7c69a720d69..bdbe9de9cd8d 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -102,20 +102,6 @@ void local_bh_disable(void)
102 102
103EXPORT_SYMBOL(local_bh_disable); 103EXPORT_SYMBOL(local_bh_disable);
104 104
105void __local_bh_enable(void)
106{
107 WARN_ON_ONCE(in_irq());
108
109 /*
110 * softirqs should never be enabled by __local_bh_enable(),
111 * it always nests inside local_bh_enable() sections:
112 */
113 WARN_ON_ONCE(softirq_count() == SOFTIRQ_OFFSET);
114
115 sub_preempt_count(SOFTIRQ_OFFSET);
116}
117EXPORT_SYMBOL_GPL(__local_bh_enable);
118
119/* 105/*
120 * Special-case - softirqs can safely be enabled in 106 * Special-case - softirqs can safely be enabled in
121 * cond_resched_softirq(), or by __do_softirq(), 107 * cond_resched_softirq(), or by __do_softirq(),
@@ -269,6 +255,7 @@ void irq_enter(void)
269{ 255{
270 int cpu = smp_processor_id(); 256 int cpu = smp_processor_id();
271 257
258 rcu_irq_enter();
272 if (idle_cpu(cpu) && !in_interrupt()) { 259 if (idle_cpu(cpu) && !in_interrupt()) {
273 __irq_enter(); 260 __irq_enter();
274 tick_check_idle(cpu); 261 tick_check_idle(cpu);
@@ -295,9 +282,9 @@ void irq_exit(void)
295 282
296#ifdef CONFIG_NO_HZ 283#ifdef CONFIG_NO_HZ
297 /* Make sure that timer wheel updates are propagated */ 284 /* Make sure that timer wheel updates are propagated */
298 if (!in_interrupt() && idle_cpu(smp_processor_id()) && !need_resched())
299 tick_nohz_stop_sched_tick(0);
300 rcu_irq_exit(); 285 rcu_irq_exit();
286 if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
287 tick_nohz_stop_sched_tick(0);
301#endif 288#endif
302 preempt_enable_no_resched(); 289 preempt_enable_no_resched();
303} 290}
@@ -746,7 +733,7 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
746 break; 733 break;
747 /* Unbind so it can run. Fall thru. */ 734 /* Unbind so it can run. Fall thru. */
748 kthread_bind(per_cpu(ksoftirqd, hotcpu), 735 kthread_bind(per_cpu(ksoftirqd, hotcpu),
749 any_online_cpu(cpu_online_map)); 736 cpumask_any(cpu_online_mask));
750 case CPU_DEAD: 737 case CPU_DEAD:
751 case CPU_DEAD_FROZEN: { 738 case CPU_DEAD_FROZEN: {
752 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; 739 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
@@ -797,3 +784,23 @@ int on_each_cpu(void (*func) (void *info), void *info, int wait)
797} 784}
798EXPORT_SYMBOL(on_each_cpu); 785EXPORT_SYMBOL(on_each_cpu);
799#endif 786#endif
787
788/*
789 * [ These __weak aliases are kept in a separate compilation unit, so that
790 * GCC does not inline them incorrectly. ]
791 */
792
793int __init __weak early_irq_init(void)
794{
795 return 0;
796}
797
798int __init __weak arch_early_irq_init(void)
799{
800 return 0;
801}
802
803int __weak arch_init_chip_data(struct irq_desc *desc, int cpu)
804{
805 return 0;
806}
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index dc0b3be6b7d5..d9188c66278a 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -164,7 +164,7 @@ unsigned long __read_mostly sysctl_hung_task_check_count = 1024;
164/* 164/*
165 * Zero means infinite timeout - no checking done: 165 * Zero means infinite timeout - no checking done:
166 */ 166 */
167unsigned long __read_mostly sysctl_hung_task_timeout_secs = 120; 167unsigned long __read_mostly sysctl_hung_task_timeout_secs = 480;
168 168
169unsigned long __read_mostly sysctl_hung_task_warnings = 10; 169unsigned long __read_mostly sysctl_hung_task_warnings = 10;
170 170
@@ -303,17 +303,15 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
303 break; 303 break;
304 case CPU_ONLINE: 304 case CPU_ONLINE:
305 case CPU_ONLINE_FROZEN: 305 case CPU_ONLINE_FROZEN:
306 check_cpu = any_online_cpu(cpu_online_map); 306 check_cpu = cpumask_any(cpu_online_mask);
307 wake_up_process(per_cpu(watchdog_task, hotcpu)); 307 wake_up_process(per_cpu(watchdog_task, hotcpu));
308 break; 308 break;
309#ifdef CONFIG_HOTPLUG_CPU 309#ifdef CONFIG_HOTPLUG_CPU
310 case CPU_DOWN_PREPARE: 310 case CPU_DOWN_PREPARE:
311 case CPU_DOWN_PREPARE_FROZEN: 311 case CPU_DOWN_PREPARE_FROZEN:
312 if (hotcpu == check_cpu) { 312 if (hotcpu == check_cpu) {
313 cpumask_t temp_cpu_online_map = cpu_online_map; 313 /* Pick any other online cpu. */
314 314 check_cpu = cpumask_any_but(cpu_online_mask, hotcpu);
315 cpu_clear(hotcpu, temp_cpu_online_map);
316 check_cpu = any_online_cpu(temp_cpu_online_map);
317 } 315 }
318 break; 316 break;
319 317
@@ -323,7 +321,7 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
323 break; 321 break;
324 /* Unbind so it can run. Fall thru. */ 322 /* Unbind so it can run. Fall thru. */
325 kthread_bind(per_cpu(watchdog_task, hotcpu), 323 kthread_bind(per_cpu(watchdog_task, hotcpu),
326 any_online_cpu(cpu_online_map)); 324 cpumask_any(cpu_online_mask));
327 case CPU_DEAD: 325 case CPU_DEAD:
328 case CPU_DEAD_FROZEN: 326 case CPU_DEAD_FROZEN:
329 p = per_cpu(watchdog_task, hotcpu); 327 p = per_cpu(watchdog_task, hotcpu);
diff --git a/kernel/stacktrace.c b/kernel/stacktrace.c
index 94b527ef1d1e..eb212f8f8bc8 100644
--- a/kernel/stacktrace.c
+++ b/kernel/stacktrace.c
@@ -6,6 +6,7 @@
6 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 6 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
7 */ 7 */
8#include <linux/sched.h> 8#include <linux/sched.h>
9#include <linux/kernel.h>
9#include <linux/module.h> 10#include <linux/module.h>
10#include <linux/kallsyms.h> 11#include <linux/kallsyms.h>
11#include <linux/stacktrace.h> 12#include <linux/stacktrace.h>
@@ -24,3 +25,13 @@ void print_stack_trace(struct stack_trace *trace, int spaces)
24} 25}
25EXPORT_SYMBOL_GPL(print_stack_trace); 26EXPORT_SYMBOL_GPL(print_stack_trace);
26 27
28/*
29 * Architectures that do not implement save_stack_trace_tsk get this
30 * weak alias and a once-per-bootup warning (whenever this facility
31 * is utilized - for example by procfs):
32 */
33__weak void
34save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
35{
36 WARN_ONCE(1, KERN_INFO "save_stack_trace_tsk() not implemented yet.\n");
37}
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 24e8ceacc388..0cd415ee62a2 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -38,7 +38,10 @@ struct stop_machine_data {
38static unsigned int num_threads; 38static unsigned int num_threads;
39static atomic_t thread_ack; 39static atomic_t thread_ack;
40static DEFINE_MUTEX(lock); 40static DEFINE_MUTEX(lock);
41 41/* setup_lock protects refcount, stop_machine_wq and stop_machine_work. */
42static DEFINE_MUTEX(setup_lock);
43/* Users of stop_machine. */
44static int refcount;
42static struct workqueue_struct *stop_machine_wq; 45static struct workqueue_struct *stop_machine_wq;
43static struct stop_machine_data active, idle; 46static struct stop_machine_data active, idle;
44static const cpumask_t *active_cpus; 47static const cpumask_t *active_cpus;
@@ -69,10 +72,10 @@ static void stop_cpu(struct work_struct *unused)
69 int err; 72 int err;
70 73
71 if (!active_cpus) { 74 if (!active_cpus) {
72 if (cpu == first_cpu(cpu_online_map)) 75 if (cpu == cpumask_first(cpu_online_mask))
73 smdata = &active; 76 smdata = &active;
74 } else { 77 } else {
75 if (cpu_isset(cpu, *active_cpus)) 78 if (cpumask_test_cpu(cpu, active_cpus))
76 smdata = &active; 79 smdata = &active;
77 } 80 }
78 /* Simple state machine */ 81 /* Simple state machine */
@@ -109,7 +112,44 @@ static int chill(void *unused)
109 return 0; 112 return 0;
110} 113}
111 114
112int __stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus) 115int stop_machine_create(void)
116{
117 mutex_lock(&setup_lock);
118 if (refcount)
119 goto done;
120 stop_machine_wq = create_rt_workqueue("kstop");
121 if (!stop_machine_wq)
122 goto err_out;
123 stop_machine_work = alloc_percpu(struct work_struct);
124 if (!stop_machine_work)
125 goto err_out;
126done:
127 refcount++;
128 mutex_unlock(&setup_lock);
129 return 0;
130
131err_out:
132 if (stop_machine_wq)
133 destroy_workqueue(stop_machine_wq);
134 mutex_unlock(&setup_lock);
135 return -ENOMEM;
136}
137EXPORT_SYMBOL_GPL(stop_machine_create);
138
139void stop_machine_destroy(void)
140{
141 mutex_lock(&setup_lock);
142 refcount--;
143 if (refcount)
144 goto done;
145 destroy_workqueue(stop_machine_wq);
146 free_percpu(stop_machine_work);
147done:
148 mutex_unlock(&setup_lock);
149}
150EXPORT_SYMBOL_GPL(stop_machine_destroy);
151
152int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
113{ 153{
114 struct work_struct *sm_work; 154 struct work_struct *sm_work;
115 int i, ret; 155 int i, ret;
@@ -142,23 +182,18 @@ int __stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus)
142 return ret; 182 return ret;
143} 183}
144 184
145int stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus) 185int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
146{ 186{
147 int ret; 187 int ret;
148 188
189 ret = stop_machine_create();
190 if (ret)
191 return ret;
149 /* No CPUs can come up or down during this. */ 192 /* No CPUs can come up or down during this. */
150 get_online_cpus(); 193 get_online_cpus();
151 ret = __stop_machine(fn, data, cpus); 194 ret = __stop_machine(fn, data, cpus);
152 put_online_cpus(); 195 put_online_cpus();
153 196 stop_machine_destroy();
154 return ret; 197 return ret;
155} 198}
156EXPORT_SYMBOL_GPL(stop_machine); 199EXPORT_SYMBOL_GPL(stop_machine);
157
158static int __init stop_machine_init(void)
159{
160 stop_machine_wq = create_rt_workqueue("kstop");
161 stop_machine_work = alloc_percpu(struct work_struct);
162 return 0;
163}
164core_initcall(stop_machine_init);
diff --git a/kernel/sys.c b/kernel/sys.c
index ebe65c2c9873..61dbfd4a54df 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -907,8 +907,8 @@ void do_sys_times(struct tms *tms)
907 struct task_cputime cputime; 907 struct task_cputime cputime;
908 cputime_t cutime, cstime; 908 cputime_t cutime, cstime;
909 909
910 spin_lock_irq(&current->sighand->siglock);
911 thread_group_cputime(current, &cputime); 910 thread_group_cputime(current, &cputime);
911 spin_lock_irq(&current->sighand->siglock);
912 cutime = current->signal->cutime; 912 cutime = current->signal->cutime;
913 cstime = current->signal->cstime; 913 cstime = current->signal->cstime;
914 spin_unlock_irq(&current->sighand->siglock); 914 spin_unlock_irq(&current->sighand->siglock);
@@ -1627,6 +1627,8 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
1627 utime = stime = cputime_zero; 1627 utime = stime = cputime_zero;
1628 1628
1629 if (who == RUSAGE_THREAD) { 1629 if (who == RUSAGE_THREAD) {
1630 utime = task_utime(current);
1631 stime = task_stime(current);
1630 accumulate_thread_rusage(p, r); 1632 accumulate_thread_rusage(p, r);
1631 goto out; 1633 goto out;
1632 } 1634 }
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 0b627d9c93d8..ff6d45c7626f 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -121,6 +121,10 @@ extern int sg_big_buff;
121#include <asm/system.h> 121#include <asm/system.h>
122#endif 122#endif
123 123
124#ifdef CONFIG_SPARC64
125extern int sysctl_tsb_ratio;
126#endif
127
124#ifdef __hppa__ 128#ifdef __hppa__
125extern int pwrsw_enabled; 129extern int pwrsw_enabled;
126extern int unaligned_enabled; 130extern int unaligned_enabled;
@@ -451,6 +455,16 @@ static struct ctl_table kern_table[] = {
451 .proc_handler = &proc_dointvec, 455 .proc_handler = &proc_dointvec,
452 }, 456 },
453#endif 457#endif
458#ifdef CONFIG_SPARC64
459 {
460 .ctl_name = CTL_UNNUMBERED,
461 .procname = "tsb-ratio",
462 .data = &sysctl_tsb_ratio,
463 .maxlen = sizeof (int),
464 .mode = 0644,
465 .proc_handler = &proc_dointvec,
466 },
467#endif
454#ifdef __hppa__ 468#ifdef __hppa__
455 { 469 {
456 .ctl_name = KERN_HPPA_PWRSW, 470 .ctl_name = KERN_HPPA_PWRSW,
diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
index c35da23ab8fb..fafeb48f27c0 100644
--- a/kernel/sysctl_check.c
+++ b/kernel/sysctl_check.c
@@ -730,7 +730,6 @@ static const struct trans_ctl_table trans_fs_quota_table[] = {
730}; 730};
731 731
732static const struct trans_ctl_table trans_fs_xfs_table[] = { 732static const struct trans_ctl_table trans_fs_xfs_table[] = {
733 { XFS_RESTRICT_CHOWN, "restrict_chown" },
734 { XFS_SGID_INHERIT, "irix_sgid_inherit" }, 733 { XFS_SGID_INHERIT, "irix_sgid_inherit" },
735 { XFS_SYMLINK_MODE, "irix_symlink_mode" }, 734 { XFS_SYMLINK_MODE, "irix_symlink_mode" },
736 { XFS_PANIC_MASK, "panic_mask" }, 735 { XFS_PANIC_MASK, "panic_mask" },
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index bd6be76303cf..888adbcca30c 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -290,18 +290,17 @@ ret:
290 return; 290 return;
291} 291}
292 292
293static int add_del_listener(pid_t pid, cpumask_t *maskp, int isadd) 293static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd)
294{ 294{
295 struct listener_list *listeners; 295 struct listener_list *listeners;
296 struct listener *s, *tmp; 296 struct listener *s, *tmp;
297 unsigned int cpu; 297 unsigned int cpu;
298 cpumask_t mask = *maskp;
299 298
300 if (!cpus_subset(mask, cpu_possible_map)) 299 if (!cpumask_subset(mask, cpu_possible_mask))
301 return -EINVAL; 300 return -EINVAL;
302 301
303 if (isadd == REGISTER) { 302 if (isadd == REGISTER) {
304 for_each_cpu_mask_nr(cpu, mask) { 303 for_each_cpu(cpu, mask) {
305 s = kmalloc_node(sizeof(struct listener), GFP_KERNEL, 304 s = kmalloc_node(sizeof(struct listener), GFP_KERNEL,
306 cpu_to_node(cpu)); 305 cpu_to_node(cpu));
307 if (!s) 306 if (!s)
@@ -320,7 +319,7 @@ static int add_del_listener(pid_t pid, cpumask_t *maskp, int isadd)
320 319
321 /* Deregister or cleanup */ 320 /* Deregister or cleanup */
322cleanup: 321cleanup:
323 for_each_cpu_mask_nr(cpu, mask) { 322 for_each_cpu(cpu, mask) {
324 listeners = &per_cpu(listener_array, cpu); 323 listeners = &per_cpu(listener_array, cpu);
325 down_write(&listeners->sem); 324 down_write(&listeners->sem);
326 list_for_each_entry_safe(s, tmp, &listeners->list, list) { 325 list_for_each_entry_safe(s, tmp, &listeners->list, list) {
@@ -335,7 +334,7 @@ cleanup:
335 return 0; 334 return 0;
336} 335}
337 336
338static int parse(struct nlattr *na, cpumask_t *mask) 337static int parse(struct nlattr *na, struct cpumask *mask)
339{ 338{
340 char *data; 339 char *data;
341 int len; 340 int len;
@@ -352,7 +351,7 @@ static int parse(struct nlattr *na, cpumask_t *mask)
352 if (!data) 351 if (!data)
353 return -ENOMEM; 352 return -ENOMEM;
354 nla_strlcpy(data, na, len); 353 nla_strlcpy(data, na, len);
355 ret = cpulist_parse(data, *mask); 354 ret = cpulist_parse(data, mask);
356 kfree(data); 355 kfree(data);
357 return ret; 356 return ret;
358} 357}
@@ -428,23 +427,33 @@ err:
428 427
429static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info) 428static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
430{ 429{
431 int rc = 0; 430 int rc;
432 struct sk_buff *rep_skb; 431 struct sk_buff *rep_skb;
433 struct taskstats *stats; 432 struct taskstats *stats;
434 size_t size; 433 size_t size;
435 cpumask_t mask; 434 cpumask_var_t mask;
435
436 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
437 return -ENOMEM;
436 438
437 rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], &mask); 439 rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], mask);
438 if (rc < 0) 440 if (rc < 0)
439 return rc; 441 goto free_return_rc;
440 if (rc == 0) 442 if (rc == 0) {
441 return add_del_listener(info->snd_pid, &mask, REGISTER); 443 rc = add_del_listener(info->snd_pid, mask, REGISTER);
444 goto free_return_rc;
445 }
442 446
443 rc = parse(info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK], &mask); 447 rc = parse(info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK], mask);
444 if (rc < 0) 448 if (rc < 0)
449 goto free_return_rc;
450 if (rc == 0) {
451 rc = add_del_listener(info->snd_pid, mask, DEREGISTER);
452free_return_rc:
453 free_cpumask_var(mask);
445 return rc; 454 return rc;
446 if (rc == 0) 455 }
447 return add_del_listener(info->snd_pid, &mask, DEREGISTER); 456 free_cpumask_var(mask);
448 457
449 /* 458 /*
450 * Size includes space for nested attributes 459 * Size includes space for nested attributes
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index f8d968063cea..ea2f48af83cf 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -166,6 +166,8 @@ static void clockevents_notify_released(void)
166void clockevents_register_device(struct clock_event_device *dev) 166void clockevents_register_device(struct clock_event_device *dev)
167{ 167{
168 BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); 168 BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
169 BUG_ON(!dev->cpumask);
170
169 /* 171 /*
170 * A nsec2cyc multiplicator of 0 is invalid and we'd crash 172 * A nsec2cyc multiplicator of 0 is invalid and we'd crash
171 * on it, so fix it up and emit a warning: 173 * on it, so fix it up and emit a warning:
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 9ed2eec97526..ca89e1593f08 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -145,10 +145,11 @@ static void clocksource_watchdog(unsigned long data)
145 * Cycle through CPUs to check if the CPUs stay 145 * Cycle through CPUs to check if the CPUs stay
146 * synchronized to each other. 146 * synchronized to each other.
147 */ 147 */
148 int next_cpu = next_cpu_nr(raw_smp_processor_id(), cpu_online_map); 148 int next_cpu = cpumask_next(raw_smp_processor_id(),
149 cpu_online_mask);
149 150
150 if (next_cpu >= nr_cpu_ids) 151 if (next_cpu >= nr_cpu_ids)
151 next_cpu = first_cpu(cpu_online_map); 152 next_cpu = cpumask_first(cpu_online_mask);
152 watchdog_timer.expires += WATCHDOG_INTERVAL; 153 watchdog_timer.expires += WATCHDOG_INTERVAL;
153 add_timer_on(&watchdog_timer, next_cpu); 154 add_timer_on(&watchdog_timer, next_cpu);
154 } 155 }
@@ -173,7 +174,7 @@ static void clocksource_check_watchdog(struct clocksource *cs)
173 watchdog_last = watchdog->read(); 174 watchdog_last = watchdog->read();
174 watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; 175 watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
175 add_timer_on(&watchdog_timer, 176 add_timer_on(&watchdog_timer,
176 first_cpu(cpu_online_map)); 177 cpumask_first(cpu_online_mask));
177 } 178 }
178 } else { 179 } else {
179 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) 180 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
@@ -195,7 +196,7 @@ static void clocksource_check_watchdog(struct clocksource *cs)
195 watchdog_timer.expires = 196 watchdog_timer.expires =
196 jiffies + WATCHDOG_INTERVAL; 197 jiffies + WATCHDOG_INTERVAL;
197 add_timer_on(&watchdog_timer, 198 add_timer_on(&watchdog_timer,
198 first_cpu(cpu_online_map)); 199 cpumask_first(cpu_online_mask));
199 } 200 }
200 } 201 }
201 } 202 }
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 8ff15e5d486b..f5f793d92415 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -131,7 +131,7 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
131{ 131{
132 enum hrtimer_restart res = HRTIMER_NORESTART; 132 enum hrtimer_restart res = HRTIMER_NORESTART;
133 133
134 write_seqlock_irq(&xtime_lock); 134 write_seqlock(&xtime_lock);
135 135
136 switch (time_state) { 136 switch (time_state) {
137 case TIME_OK: 137 case TIME_OK:
@@ -164,7 +164,7 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
164 } 164 }
165 update_vsyscall(&xtime, clock); 165 update_vsyscall(&xtime, clock);
166 166
167 write_sequnlock_irq(&xtime_lock); 167 write_sequnlock(&xtime_lock);
168 168
169 return res; 169 return res;
170} 170}
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index f98a1b7b16e9..118a3b3b3f9a 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -28,7 +28,9 @@
28 */ 28 */
29 29
30struct tick_device tick_broadcast_device; 30struct tick_device tick_broadcast_device;
31static cpumask_t tick_broadcast_mask; 31/* FIXME: Use cpumask_var_t. */
32static DECLARE_BITMAP(tick_broadcast_mask, NR_CPUS);
33static DECLARE_BITMAP(tmpmask, NR_CPUS);
32static DEFINE_SPINLOCK(tick_broadcast_lock); 34static DEFINE_SPINLOCK(tick_broadcast_lock);
33static int tick_broadcast_force; 35static int tick_broadcast_force;
34 36
@@ -46,9 +48,9 @@ struct tick_device *tick_get_broadcast_device(void)
46 return &tick_broadcast_device; 48 return &tick_broadcast_device;
47} 49}
48 50
49cpumask_t *tick_get_broadcast_mask(void) 51struct cpumask *tick_get_broadcast_mask(void)
50{ 52{
51 return &tick_broadcast_mask; 53 return to_cpumask(tick_broadcast_mask);
52} 54}
53 55
54/* 56/*
@@ -72,7 +74,7 @@ int tick_check_broadcast_device(struct clock_event_device *dev)
72 74
73 clockevents_exchange_device(NULL, dev); 75 clockevents_exchange_device(NULL, dev);
74 tick_broadcast_device.evtdev = dev; 76 tick_broadcast_device.evtdev = dev;
75 if (!cpus_empty(tick_broadcast_mask)) 77 if (!cpumask_empty(tick_get_broadcast_mask()))
76 tick_broadcast_start_periodic(dev); 78 tick_broadcast_start_periodic(dev);
77 return 1; 79 return 1;
78} 80}
@@ -104,7 +106,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
104 */ 106 */
105 if (!tick_device_is_functional(dev)) { 107 if (!tick_device_is_functional(dev)) {
106 dev->event_handler = tick_handle_periodic; 108 dev->event_handler = tick_handle_periodic;
107 cpu_set(cpu, tick_broadcast_mask); 109 cpumask_set_cpu(cpu, tick_get_broadcast_mask());
108 tick_broadcast_start_periodic(tick_broadcast_device.evtdev); 110 tick_broadcast_start_periodic(tick_broadcast_device.evtdev);
109 ret = 1; 111 ret = 1;
110 } else { 112 } else {
@@ -116,7 +118,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
116 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) { 118 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
117 int cpu = smp_processor_id(); 119 int cpu = smp_processor_id();
118 120
119 cpu_clear(cpu, tick_broadcast_mask); 121 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
120 tick_broadcast_clear_oneshot(cpu); 122 tick_broadcast_clear_oneshot(cpu);
121 } 123 }
122 } 124 }
@@ -125,9 +127,9 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
125} 127}
126 128
127/* 129/*
128 * Broadcast the event to the cpus, which are set in the mask 130 * Broadcast the event to the cpus, which are set in the mask (mangled).
129 */ 131 */
130static void tick_do_broadcast(cpumask_t mask) 132static void tick_do_broadcast(struct cpumask *mask)
131{ 133{
132 int cpu = smp_processor_id(); 134 int cpu = smp_processor_id();
133 struct tick_device *td; 135 struct tick_device *td;
@@ -135,21 +137,20 @@ static void tick_do_broadcast(cpumask_t mask)
135 /* 137 /*
136 * Check, if the current cpu is in the mask 138 * Check, if the current cpu is in the mask
137 */ 139 */
138 if (cpu_isset(cpu, mask)) { 140 if (cpumask_test_cpu(cpu, mask)) {
139 cpu_clear(cpu, mask); 141 cpumask_clear_cpu(cpu, mask);
140 td = &per_cpu(tick_cpu_device, cpu); 142 td = &per_cpu(tick_cpu_device, cpu);
141 td->evtdev->event_handler(td->evtdev); 143 td->evtdev->event_handler(td->evtdev);
142 } 144 }
143 145
144 if (!cpus_empty(mask)) { 146 if (!cpumask_empty(mask)) {
145 /* 147 /*
146 * It might be necessary to actually check whether the devices 148 * It might be necessary to actually check whether the devices
147 * have different broadcast functions. For now, just use the 149 * have different broadcast functions. For now, just use the
148 * one of the first device. This works as long as we have this 150 * one of the first device. This works as long as we have this
149 * misfeature only on x86 (lapic) 151 * misfeature only on x86 (lapic)
150 */ 152 */
151 cpu = first_cpu(mask); 153 td = &per_cpu(tick_cpu_device, cpumask_first(mask));
152 td = &per_cpu(tick_cpu_device, cpu);
153 td->evtdev->broadcast(mask); 154 td->evtdev->broadcast(mask);
154 } 155 }
155} 156}
@@ -160,12 +161,11 @@ static void tick_do_broadcast(cpumask_t mask)
160 */ 161 */
161static void tick_do_periodic_broadcast(void) 162static void tick_do_periodic_broadcast(void)
162{ 163{
163 cpumask_t mask;
164
165 spin_lock(&tick_broadcast_lock); 164 spin_lock(&tick_broadcast_lock);
166 165
167 cpus_and(mask, cpu_online_map, tick_broadcast_mask); 166 cpumask_and(to_cpumask(tmpmask),
168 tick_do_broadcast(mask); 167 cpu_online_mask, tick_get_broadcast_mask());
168 tick_do_broadcast(to_cpumask(tmpmask));
169 169
170 spin_unlock(&tick_broadcast_lock); 170 spin_unlock(&tick_broadcast_lock);
171} 171}
@@ -228,13 +228,13 @@ static void tick_do_broadcast_on_off(void *why)
228 if (!tick_device_is_functional(dev)) 228 if (!tick_device_is_functional(dev))
229 goto out; 229 goto out;
230 230
231 bc_stopped = cpus_empty(tick_broadcast_mask); 231 bc_stopped = cpumask_empty(tick_get_broadcast_mask());
232 232
233 switch (*reason) { 233 switch (*reason) {
234 case CLOCK_EVT_NOTIFY_BROADCAST_ON: 234 case CLOCK_EVT_NOTIFY_BROADCAST_ON:
235 case CLOCK_EVT_NOTIFY_BROADCAST_FORCE: 235 case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
236 if (!cpu_isset(cpu, tick_broadcast_mask)) { 236 if (!cpumask_test_cpu(cpu, tick_get_broadcast_mask())) {
237 cpu_set(cpu, tick_broadcast_mask); 237 cpumask_set_cpu(cpu, tick_get_broadcast_mask());
238 if (tick_broadcast_device.mode == 238 if (tick_broadcast_device.mode ==
239 TICKDEV_MODE_PERIODIC) 239 TICKDEV_MODE_PERIODIC)
240 clockevents_shutdown(dev); 240 clockevents_shutdown(dev);
@@ -244,8 +244,8 @@ static void tick_do_broadcast_on_off(void *why)
244 break; 244 break;
245 case CLOCK_EVT_NOTIFY_BROADCAST_OFF: 245 case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
246 if (!tick_broadcast_force && 246 if (!tick_broadcast_force &&
247 cpu_isset(cpu, tick_broadcast_mask)) { 247 cpumask_test_cpu(cpu, tick_get_broadcast_mask())) {
248 cpu_clear(cpu, tick_broadcast_mask); 248 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
249 if (tick_broadcast_device.mode == 249 if (tick_broadcast_device.mode ==
250 TICKDEV_MODE_PERIODIC) 250 TICKDEV_MODE_PERIODIC)
251 tick_setup_periodic(dev, 0); 251 tick_setup_periodic(dev, 0);
@@ -253,7 +253,7 @@ static void tick_do_broadcast_on_off(void *why)
253 break; 253 break;
254 } 254 }
255 255
256 if (cpus_empty(tick_broadcast_mask)) { 256 if (cpumask_empty(tick_get_broadcast_mask())) {
257 if (!bc_stopped) 257 if (!bc_stopped)
258 clockevents_shutdown(bc); 258 clockevents_shutdown(bc);
259 } else if (bc_stopped) { 259 } else if (bc_stopped) {
@@ -272,7 +272,7 @@ out:
272 */ 272 */
273void tick_broadcast_on_off(unsigned long reason, int *oncpu) 273void tick_broadcast_on_off(unsigned long reason, int *oncpu)
274{ 274{
275 if (!cpu_isset(*oncpu, cpu_online_map)) 275 if (!cpumask_test_cpu(*oncpu, cpu_online_mask))
276 printk(KERN_ERR "tick-broadcast: ignoring broadcast for " 276 printk(KERN_ERR "tick-broadcast: ignoring broadcast for "
277 "offline CPU #%d\n", *oncpu); 277 "offline CPU #%d\n", *oncpu);
278 else 278 else
@@ -303,10 +303,10 @@ void tick_shutdown_broadcast(unsigned int *cpup)
303 spin_lock_irqsave(&tick_broadcast_lock, flags); 303 spin_lock_irqsave(&tick_broadcast_lock, flags);
304 304
305 bc = tick_broadcast_device.evtdev; 305 bc = tick_broadcast_device.evtdev;
306 cpu_clear(cpu, tick_broadcast_mask); 306 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
307 307
308 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) { 308 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
309 if (bc && cpus_empty(tick_broadcast_mask)) 309 if (bc && cpumask_empty(tick_get_broadcast_mask()))
310 clockevents_shutdown(bc); 310 clockevents_shutdown(bc);
311 } 311 }
312 312
@@ -342,10 +342,10 @@ int tick_resume_broadcast(void)
342 342
343 switch (tick_broadcast_device.mode) { 343 switch (tick_broadcast_device.mode) {
344 case TICKDEV_MODE_PERIODIC: 344 case TICKDEV_MODE_PERIODIC:
345 if(!cpus_empty(tick_broadcast_mask)) 345 if (!cpumask_empty(tick_get_broadcast_mask()))
346 tick_broadcast_start_periodic(bc); 346 tick_broadcast_start_periodic(bc);
347 broadcast = cpu_isset(smp_processor_id(), 347 broadcast = cpumask_test_cpu(smp_processor_id(),
348 tick_broadcast_mask); 348 tick_get_broadcast_mask());
349 break; 349 break;
350 case TICKDEV_MODE_ONESHOT: 350 case TICKDEV_MODE_ONESHOT:
351 broadcast = tick_resume_broadcast_oneshot(bc); 351 broadcast = tick_resume_broadcast_oneshot(bc);
@@ -360,14 +360,15 @@ int tick_resume_broadcast(void)
360 360
361#ifdef CONFIG_TICK_ONESHOT 361#ifdef CONFIG_TICK_ONESHOT
362 362
363static cpumask_t tick_broadcast_oneshot_mask; 363/* FIXME: use cpumask_var_t. */
364static DECLARE_BITMAP(tick_broadcast_oneshot_mask, NR_CPUS);
364 365
365/* 366/*
366 * Debugging: see timer_list.c 367 * Exposed for debugging: see timer_list.c
367 */ 368 */
368cpumask_t *tick_get_broadcast_oneshot_mask(void) 369struct cpumask *tick_get_broadcast_oneshot_mask(void)
369{ 370{
370 return &tick_broadcast_oneshot_mask; 371 return to_cpumask(tick_broadcast_oneshot_mask);
371} 372}
372 373
373static int tick_broadcast_set_event(ktime_t expires, int force) 374static int tick_broadcast_set_event(ktime_t expires, int force)
@@ -389,7 +390,7 @@ int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
389 */ 390 */
390void tick_check_oneshot_broadcast(int cpu) 391void tick_check_oneshot_broadcast(int cpu)
391{ 392{
392 if (cpu_isset(cpu, tick_broadcast_oneshot_mask)) { 393 if (cpumask_test_cpu(cpu, to_cpumask(tick_broadcast_oneshot_mask))) {
393 struct tick_device *td = &per_cpu(tick_cpu_device, cpu); 394 struct tick_device *td = &per_cpu(tick_cpu_device, cpu);
394 395
395 clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_ONESHOT); 396 clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_ONESHOT);
@@ -402,7 +403,6 @@ void tick_check_oneshot_broadcast(int cpu)
402static void tick_handle_oneshot_broadcast(struct clock_event_device *dev) 403static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
403{ 404{
404 struct tick_device *td; 405 struct tick_device *td;
405 cpumask_t mask;
406 ktime_t now, next_event; 406 ktime_t now, next_event;
407 int cpu; 407 int cpu;
408 408
@@ -410,13 +410,13 @@ static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
410again: 410again:
411 dev->next_event.tv64 = KTIME_MAX; 411 dev->next_event.tv64 = KTIME_MAX;
412 next_event.tv64 = KTIME_MAX; 412 next_event.tv64 = KTIME_MAX;
413 mask = CPU_MASK_NONE; 413 cpumask_clear(to_cpumask(tmpmask));
414 now = ktime_get(); 414 now = ktime_get();
415 /* Find all expired events */ 415 /* Find all expired events */
416 for_each_cpu_mask_nr(cpu, tick_broadcast_oneshot_mask) { 416 for_each_cpu(cpu, tick_get_broadcast_oneshot_mask()) {
417 td = &per_cpu(tick_cpu_device, cpu); 417 td = &per_cpu(tick_cpu_device, cpu);
418 if (td->evtdev->next_event.tv64 <= now.tv64) 418 if (td->evtdev->next_event.tv64 <= now.tv64)
419 cpu_set(cpu, mask); 419 cpumask_set_cpu(cpu, to_cpumask(tmpmask));
420 else if (td->evtdev->next_event.tv64 < next_event.tv64) 420 else if (td->evtdev->next_event.tv64 < next_event.tv64)
421 next_event.tv64 = td->evtdev->next_event.tv64; 421 next_event.tv64 = td->evtdev->next_event.tv64;
422 } 422 }
@@ -424,7 +424,7 @@ again:
424 /* 424 /*
425 * Wakeup the cpus which have an expired event. 425 * Wakeup the cpus which have an expired event.
426 */ 426 */
427 tick_do_broadcast(mask); 427 tick_do_broadcast(to_cpumask(tmpmask));
428 428
429 /* 429 /*
430 * Two reasons for reprogram: 430 * Two reasons for reprogram:
@@ -476,15 +476,16 @@ void tick_broadcast_oneshot_control(unsigned long reason)
476 goto out; 476 goto out;
477 477
478 if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) { 478 if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) {
479 if (!cpu_isset(cpu, tick_broadcast_oneshot_mask)) { 479 if (!cpumask_test_cpu(cpu, tick_get_broadcast_oneshot_mask())) {
480 cpu_set(cpu, tick_broadcast_oneshot_mask); 480 cpumask_set_cpu(cpu, tick_get_broadcast_oneshot_mask());
481 clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN); 481 clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
482 if (dev->next_event.tv64 < bc->next_event.tv64) 482 if (dev->next_event.tv64 < bc->next_event.tv64)
483 tick_broadcast_set_event(dev->next_event, 1); 483 tick_broadcast_set_event(dev->next_event, 1);
484 } 484 }
485 } else { 485 } else {
486 if (cpu_isset(cpu, tick_broadcast_oneshot_mask)) { 486 if (cpumask_test_cpu(cpu, tick_get_broadcast_oneshot_mask())) {
487 cpu_clear(cpu, tick_broadcast_oneshot_mask); 487 cpumask_clear_cpu(cpu,
488 tick_get_broadcast_oneshot_mask());
488 clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); 489 clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
489 if (dev->next_event.tv64 != KTIME_MAX) 490 if (dev->next_event.tv64 != KTIME_MAX)
490 tick_program_event(dev->next_event, 1); 491 tick_program_event(dev->next_event, 1);
@@ -502,15 +503,16 @@ out:
502 */ 503 */
503static void tick_broadcast_clear_oneshot(int cpu) 504static void tick_broadcast_clear_oneshot(int cpu)
504{ 505{
505 cpu_clear(cpu, tick_broadcast_oneshot_mask); 506 cpumask_clear_cpu(cpu, tick_get_broadcast_oneshot_mask());
506} 507}
507 508
508static void tick_broadcast_init_next_event(cpumask_t *mask, ktime_t expires) 509static void tick_broadcast_init_next_event(struct cpumask *mask,
510 ktime_t expires)
509{ 511{
510 struct tick_device *td; 512 struct tick_device *td;
511 int cpu; 513 int cpu;
512 514
513 for_each_cpu_mask_nr(cpu, *mask) { 515 for_each_cpu(cpu, mask) {
514 td = &per_cpu(tick_cpu_device, cpu); 516 td = &per_cpu(tick_cpu_device, cpu);
515 if (td->evtdev) 517 if (td->evtdev)
516 td->evtdev->next_event = expires; 518 td->evtdev->next_event = expires;
@@ -526,7 +528,6 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
526 if (bc->event_handler != tick_handle_oneshot_broadcast) { 528 if (bc->event_handler != tick_handle_oneshot_broadcast) {
527 int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC; 529 int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC;
528 int cpu = smp_processor_id(); 530 int cpu = smp_processor_id();
529 cpumask_t mask;
530 531
531 bc->event_handler = tick_handle_oneshot_broadcast; 532 bc->event_handler = tick_handle_oneshot_broadcast;
532 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); 533 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
@@ -540,13 +541,15 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
540 * oneshot_mask bits for those and program the 541 * oneshot_mask bits for those and program the
541 * broadcast device to fire. 542 * broadcast device to fire.
542 */ 543 */
543 mask = tick_broadcast_mask; 544 cpumask_copy(to_cpumask(tmpmask), tick_get_broadcast_mask());
544 cpu_clear(cpu, mask); 545 cpumask_clear_cpu(cpu, to_cpumask(tmpmask));
545 cpus_or(tick_broadcast_oneshot_mask, 546 cpumask_or(tick_get_broadcast_oneshot_mask(),
546 tick_broadcast_oneshot_mask, mask); 547 tick_get_broadcast_oneshot_mask(),
547 548 to_cpumask(tmpmask));
548 if (was_periodic && !cpus_empty(mask)) { 549
549 tick_broadcast_init_next_event(&mask, tick_next_period); 550 if (was_periodic && !cpumask_empty(to_cpumask(tmpmask))) {
551 tick_broadcast_init_next_event(to_cpumask(tmpmask),
552 tick_next_period);
550 tick_broadcast_set_event(tick_next_period, 1); 553 tick_broadcast_set_event(tick_next_period, 1);
551 } else 554 } else
552 bc->next_event.tv64 = KTIME_MAX; 555 bc->next_event.tv64 = KTIME_MAX;
@@ -585,7 +588,7 @@ void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
585 * Clear the broadcast mask flag for the dead cpu, but do not 588 * Clear the broadcast mask flag for the dead cpu, but do not
586 * stop the broadcast device! 589 * stop the broadcast device!
587 */ 590 */
588 cpu_clear(cpu, tick_broadcast_oneshot_mask); 591 cpumask_clear_cpu(cpu, tick_get_broadcast_oneshot_mask());
589 592
590 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 593 spin_unlock_irqrestore(&tick_broadcast_lock, flags);
591} 594}
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index df12434b43ca..63e05d423a09 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -136,7 +136,7 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
136 */ 136 */
137static void tick_setup_device(struct tick_device *td, 137static void tick_setup_device(struct tick_device *td,
138 struct clock_event_device *newdev, int cpu, 138 struct clock_event_device *newdev, int cpu,
139 const cpumask_t *cpumask) 139 const struct cpumask *cpumask)
140{ 140{
141 ktime_t next_event; 141 ktime_t next_event;
142 void (*handler)(struct clock_event_device *) = NULL; 142 void (*handler)(struct clock_event_device *) = NULL;
@@ -171,8 +171,8 @@ static void tick_setup_device(struct tick_device *td,
171 * When the device is not per cpu, pin the interrupt to the 171 * When the device is not per cpu, pin the interrupt to the
172 * current cpu: 172 * current cpu:
173 */ 173 */
174 if (!cpus_equal(newdev->cpumask, *cpumask)) 174 if (!cpumask_equal(newdev->cpumask, cpumask))
175 irq_set_affinity(newdev->irq, *cpumask); 175 irq_set_affinity(newdev->irq, cpumask);
176 176
177 /* 177 /*
178 * When global broadcasting is active, check if the current 178 * When global broadcasting is active, check if the current
@@ -202,14 +202,14 @@ static int tick_check_new_device(struct clock_event_device *newdev)
202 spin_lock_irqsave(&tick_device_lock, flags); 202 spin_lock_irqsave(&tick_device_lock, flags);
203 203
204 cpu = smp_processor_id(); 204 cpu = smp_processor_id();
205 if (!cpu_isset(cpu, newdev->cpumask)) 205 if (!cpumask_test_cpu(cpu, newdev->cpumask))
206 goto out_bc; 206 goto out_bc;
207 207
208 td = &per_cpu(tick_cpu_device, cpu); 208 td = &per_cpu(tick_cpu_device, cpu);
209 curdev = td->evtdev; 209 curdev = td->evtdev;
210 210
211 /* cpu local device ? */ 211 /* cpu local device ? */
212 if (!cpus_equal(newdev->cpumask, cpumask_of_cpu(cpu))) { 212 if (!cpumask_equal(newdev->cpumask, cpumask_of(cpu))) {
213 213
214 /* 214 /*
215 * If the cpu affinity of the device interrupt can not 215 * If the cpu affinity of the device interrupt can not
@@ -222,7 +222,7 @@ static int tick_check_new_device(struct clock_event_device *newdev)
222 * If we have a cpu local device already, do not replace it 222 * If we have a cpu local device already, do not replace it
223 * by a non cpu local device 223 * by a non cpu local device
224 */ 224 */
225 if (curdev && cpus_equal(curdev->cpumask, cpumask_of_cpu(cpu))) 225 if (curdev && cpumask_equal(curdev->cpumask, cpumask_of(cpu)))
226 goto out_bc; 226 goto out_bc;
227 } 227 }
228 228
@@ -254,7 +254,7 @@ static int tick_check_new_device(struct clock_event_device *newdev)
254 curdev = NULL; 254 curdev = NULL;
255 } 255 }
256 clockevents_exchange_device(curdev, newdev); 256 clockevents_exchange_device(curdev, newdev);
257 tick_setup_device(td, newdev, cpu, &cpumask_of_cpu(cpu)); 257 tick_setup_device(td, newdev, cpu, cpumask_of(cpu));
258 if (newdev->features & CLOCK_EVT_FEAT_ONESHOT) 258 if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
259 tick_oneshot_notify(); 259 tick_oneshot_notify();
260 260
@@ -299,9 +299,9 @@ static void tick_shutdown(unsigned int *cpup)
299 } 299 }
300 /* Transfer the do_timer job away from this cpu */ 300 /* Transfer the do_timer job away from this cpu */
301 if (*cpup == tick_do_timer_cpu) { 301 if (*cpup == tick_do_timer_cpu) {
302 int cpu = first_cpu(cpu_online_map); 302 int cpu = cpumask_first(cpu_online_mask);
303 303
304 tick_do_timer_cpu = (cpu != NR_CPUS) ? cpu : 304 tick_do_timer_cpu = (cpu < nr_cpu_ids) ? cpu :
305 TICK_DO_TIMER_NONE; 305 TICK_DO_TIMER_NONE;
306 } 306 }
307 spin_unlock_irqrestore(&tick_device_lock, flags); 307 spin_unlock_irqrestore(&tick_device_lock, flags);
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 342fc9ccab46..1b6c05bd0d0a 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -144,7 +144,7 @@ void tick_nohz_update_jiffies(void)
144 if (!ts->tick_stopped) 144 if (!ts->tick_stopped)
145 return; 145 return;
146 146
147 cpu_clear(cpu, nohz_cpu_mask); 147 cpumask_clear_cpu(cpu, nohz_cpu_mask);
148 now = ktime_get(); 148 now = ktime_get();
149 ts->idle_waketime = now; 149 ts->idle_waketime = now;
150 150
@@ -247,7 +247,7 @@ void tick_nohz_stop_sched_tick(int inidle)
247 if (need_resched()) 247 if (need_resched())
248 goto end; 248 goto end;
249 249
250 if (unlikely(local_softirq_pending())) { 250 if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
251 static int ratelimit; 251 static int ratelimit;
252 252
253 if (ratelimit < 10) { 253 if (ratelimit < 10) {
@@ -282,8 +282,31 @@ void tick_nohz_stop_sched_tick(int inidle)
282 /* Schedule the tick, if we are at least one jiffie off */ 282 /* Schedule the tick, if we are at least one jiffie off */
283 if ((long)delta_jiffies >= 1) { 283 if ((long)delta_jiffies >= 1) {
284 284
285 /*
286 * calculate the expiry time for the next timer wheel
287 * timer
288 */
289 expires = ktime_add_ns(last_update, tick_period.tv64 *
290 delta_jiffies);
291
292 /*
293 * If this cpu is the one which updates jiffies, then
294 * give up the assignment and let it be taken by the
295 * cpu which runs the tick timer next, which might be
296 * this cpu as well. If we don't drop this here the
297 * jiffies might be stale and do_timer() never
298 * invoked.
299 */
300 if (cpu == tick_do_timer_cpu)
301 tick_do_timer_cpu = TICK_DO_TIMER_NONE;
302
285 if (delta_jiffies > 1) 303 if (delta_jiffies > 1)
286 cpu_set(cpu, nohz_cpu_mask); 304 cpumask_set_cpu(cpu, nohz_cpu_mask);
305
306 /* Skip reprogram of event if its not changed */
307 if (ts->tick_stopped && ktime_equal(expires, dev->next_event))
308 goto out;
309
287 /* 310 /*
288 * nohz_stop_sched_tick can be called several times before 311 * nohz_stop_sched_tick can be called several times before
289 * the nohz_restart_sched_tick is called. This happens when 312 * the nohz_restart_sched_tick is called. This happens when
@@ -296,7 +319,7 @@ void tick_nohz_stop_sched_tick(int inidle)
296 /* 319 /*
297 * sched tick not stopped! 320 * sched tick not stopped!
298 */ 321 */
299 cpu_clear(cpu, nohz_cpu_mask); 322 cpumask_clear_cpu(cpu, nohz_cpu_mask);
300 goto out; 323 goto out;
301 } 324 }
302 325
@@ -306,17 +329,6 @@ void tick_nohz_stop_sched_tick(int inidle)
306 rcu_enter_nohz(); 329 rcu_enter_nohz();
307 } 330 }
308 331
309 /*
310 * If this cpu is the one which updates jiffies, then
311 * give up the assignment and let it be taken by the
312 * cpu which runs the tick timer next, which might be
313 * this cpu as well. If we don't drop this here the
314 * jiffies might be stale and do_timer() never
315 * invoked.
316 */
317 if (cpu == tick_do_timer_cpu)
318 tick_do_timer_cpu = TICK_DO_TIMER_NONE;
319
320 ts->idle_sleeps++; 332 ts->idle_sleeps++;
321 333
322 /* 334 /*
@@ -332,12 +344,7 @@ void tick_nohz_stop_sched_tick(int inidle)
332 goto out; 344 goto out;
333 } 345 }
334 346
335 /* 347 /* Mark expiries */
336 * calculate the expiry time for the next timer wheel
337 * timer
338 */
339 expires = ktime_add_ns(last_update, tick_period.tv64 *
340 delta_jiffies);
341 ts->idle_expires = expires; 348 ts->idle_expires = expires;
342 349
343 if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { 350 if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
@@ -354,7 +361,7 @@ void tick_nohz_stop_sched_tick(int inidle)
354 * softirq. 361 * softirq.
355 */ 362 */
356 tick_do_update_jiffies64(ktime_get()); 363 tick_do_update_jiffies64(ktime_get());
357 cpu_clear(cpu, nohz_cpu_mask); 364 cpumask_clear_cpu(cpu, nohz_cpu_mask);
358 } 365 }
359 raise_softirq_irqoff(TIMER_SOFTIRQ); 366 raise_softirq_irqoff(TIMER_SOFTIRQ);
360out: 367out:
@@ -412,7 +419,9 @@ void tick_nohz_restart_sched_tick(void)
412{ 419{
413 int cpu = smp_processor_id(); 420 int cpu = smp_processor_id();
414 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 421 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
422#ifndef CONFIG_VIRT_CPU_ACCOUNTING
415 unsigned long ticks; 423 unsigned long ticks;
424#endif
416 ktime_t now; 425 ktime_t now;
417 426
418 local_irq_disable(); 427 local_irq_disable();
@@ -432,8 +441,9 @@ void tick_nohz_restart_sched_tick(void)
432 select_nohz_load_balancer(0); 441 select_nohz_load_balancer(0);
433 now = ktime_get(); 442 now = ktime_get();
434 tick_do_update_jiffies64(now); 443 tick_do_update_jiffies64(now);
435 cpu_clear(cpu, nohz_cpu_mask); 444 cpumask_clear_cpu(cpu, nohz_cpu_mask);
436 445
446#ifndef CONFIG_VIRT_CPU_ACCOUNTING
437 /* 447 /*
438 * We stopped the tick in idle. Update process times would miss the 448 * We stopped the tick in idle. Update process times would miss the
439 * time we slept as update_process_times does only a 1 tick 449 * time we slept as update_process_times does only a 1 tick
@@ -443,12 +453,9 @@ void tick_nohz_restart_sched_tick(void)
443 /* 453 /*
444 * We might be one off. Do not randomly account a huge number of ticks! 454 * We might be one off. Do not randomly account a huge number of ticks!
445 */ 455 */
446 if (ticks && ticks < LONG_MAX) { 456 if (ticks && ticks < LONG_MAX)
447 add_preempt_count(HARDIRQ_OFFSET); 457 account_idle_ticks(ticks);
448 account_system_time(current, HARDIRQ_OFFSET, 458#endif
449 jiffies_to_cputime(ticks));
450 sub_preempt_count(HARDIRQ_OFFSET);
451 }
452 459
453 touch_softlockup_watchdog(); 460 touch_softlockup_watchdog();
454 /* 461 /*
@@ -681,7 +688,6 @@ void tick_setup_sched_timer(void)
681 */ 688 */
682 hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 689 hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
683 ts->sched_timer.function = tick_sched_timer; 690 ts->sched_timer.function = tick_sched_timer;
684 ts->sched_timer.cb_mode = HRTIMER_CB_IRQSAFE_PERCPU;
685 691
686 /* Get the next period (per cpu) */ 692 /* Get the next period (per cpu) */
687 hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update()); 693 hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update());
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index fa05e88aa76f..900f1b6598d1 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -46,6 +46,9 @@ struct timespec xtime __attribute__ ((aligned (16)));
46struct timespec wall_to_monotonic __attribute__ ((aligned (16))); 46struct timespec wall_to_monotonic __attribute__ ((aligned (16)));
47static unsigned long total_sleep_time; /* seconds */ 47static unsigned long total_sleep_time; /* seconds */
48 48
49/* flag for if timekeeping is suspended */
50int __read_mostly timekeeping_suspended;
51
49static struct timespec xtime_cache __attribute__ ((aligned (16))); 52static struct timespec xtime_cache __attribute__ ((aligned (16)));
50void update_xtime_cache(u64 nsec) 53void update_xtime_cache(u64 nsec)
51{ 54{
@@ -92,6 +95,8 @@ void getnstimeofday(struct timespec *ts)
92 unsigned long seq; 95 unsigned long seq;
93 s64 nsecs; 96 s64 nsecs;
94 97
98 WARN_ON(timekeeping_suspended);
99
95 do { 100 do {
96 seq = read_seqbegin(&xtime_lock); 101 seq = read_seqbegin(&xtime_lock);
97 102
@@ -299,8 +304,6 @@ void __init timekeeping_init(void)
299 write_sequnlock_irqrestore(&xtime_lock, flags); 304 write_sequnlock_irqrestore(&xtime_lock, flags);
300} 305}
301 306
302/* flag for if timekeeping is suspended */
303static int timekeeping_suspended;
304/* time in seconds when suspend began */ 307/* time in seconds when suspend began */
305static unsigned long timekeeping_suspend_time; 308static unsigned long timekeeping_suspend_time;
306 309
diff --git a/kernel/timer.c b/kernel/timer.c
index 566257d1dc10..dee3f641a7a7 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1018,21 +1018,6 @@ unsigned long get_next_timer_interrupt(unsigned long now)
1018} 1018}
1019#endif 1019#endif
1020 1020
1021#ifndef CONFIG_VIRT_CPU_ACCOUNTING
1022void account_process_tick(struct task_struct *p, int user_tick)
1023{
1024 cputime_t one_jiffy = jiffies_to_cputime(1);
1025
1026 if (user_tick) {
1027 account_user_time(p, one_jiffy);
1028 account_user_time_scaled(p, cputime_to_scaled(one_jiffy));
1029 } else {
1030 account_system_time(p, HARDIRQ_OFFSET, one_jiffy);
1031 account_system_time_scaled(p, cputime_to_scaled(one_jiffy));
1032 }
1033}
1034#endif
1035
1036/* 1021/*
1037 * Called from the timer interrupt handler to charge one tick to the current 1022 * Called from the timer interrupt handler to charge one tick to the current
1038 * process. user_tick is 1 if the tick is user time, 0 for system. 1023 * process. user_tick is 1 if the tick is user time, 0 for system.
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 76f34c0ef29c..a9d9760dc7b6 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -69,6 +69,7 @@ void tracing_on(void)
69{ 69{
70 set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags); 70 set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
71} 71}
72EXPORT_SYMBOL_GPL(tracing_on);
72 73
73/** 74/**
74 * tracing_off - turn off all tracing buffers 75 * tracing_off - turn off all tracing buffers
@@ -82,6 +83,7 @@ void tracing_off(void)
82{ 83{
83 clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags); 84 clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
84} 85}
86EXPORT_SYMBOL_GPL(tracing_off);
85 87
86/** 88/**
87 * tracing_off_permanent - permanently disable ring buffers 89 * tracing_off_permanent - permanently disable ring buffers
@@ -111,12 +113,14 @@ u64 ring_buffer_time_stamp(int cpu)
111 113
112 return time; 114 return time;
113} 115}
116EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
114 117
115void ring_buffer_normalize_time_stamp(int cpu, u64 *ts) 118void ring_buffer_normalize_time_stamp(int cpu, u64 *ts)
116{ 119{
117 /* Just stupid testing the normalize function and deltas */ 120 /* Just stupid testing the normalize function and deltas */
118 *ts >>= DEBUG_SHIFT; 121 *ts >>= DEBUG_SHIFT;
119} 122}
123EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
120 124
121#define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event)) 125#define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event))
122#define RB_ALIGNMENT_SHIFT 2 126#define RB_ALIGNMENT_SHIFT 2
@@ -166,6 +170,7 @@ unsigned ring_buffer_event_length(struct ring_buffer_event *event)
166{ 170{
167 return rb_event_length(event); 171 return rb_event_length(event);
168} 172}
173EXPORT_SYMBOL_GPL(ring_buffer_event_length);
169 174
170/* inline for ring buffer fast paths */ 175/* inline for ring buffer fast paths */
171static inline void * 176static inline void *
@@ -187,9 +192,10 @@ void *ring_buffer_event_data(struct ring_buffer_event *event)
187{ 192{
188 return rb_event_data(event); 193 return rb_event_data(event);
189} 194}
195EXPORT_SYMBOL_GPL(ring_buffer_event_data);
190 196
191#define for_each_buffer_cpu(buffer, cpu) \ 197#define for_each_buffer_cpu(buffer, cpu) \
192 for_each_cpu_mask(cpu, buffer->cpumask) 198 for_each_cpu(cpu, buffer->cpumask)
193 199
194#define TS_SHIFT 27 200#define TS_SHIFT 27
195#define TS_MASK ((1ULL << TS_SHIFT) - 1) 201#define TS_MASK ((1ULL << TS_SHIFT) - 1)
@@ -261,7 +267,7 @@ struct ring_buffer {
261 unsigned pages; 267 unsigned pages;
262 unsigned flags; 268 unsigned flags;
263 int cpus; 269 int cpus;
264 cpumask_t cpumask; 270 cpumask_var_t cpumask;
265 atomic_t record_disabled; 271 atomic_t record_disabled;
266 272
267 struct mutex mutex; 273 struct mutex mutex;
@@ -427,7 +433,7 @@ extern int ring_buffer_page_too_big(void);
427 433
428/** 434/**
429 * ring_buffer_alloc - allocate a new ring_buffer 435 * ring_buffer_alloc - allocate a new ring_buffer
430 * @size: the size in bytes that is needed. 436 * @size: the size in bytes per cpu that is needed.
431 * @flags: attributes to set for the ring buffer. 437 * @flags: attributes to set for the ring buffer.
432 * 438 *
433 * Currently the only flag that is available is the RB_FL_OVERWRITE 439 * Currently the only flag that is available is the RB_FL_OVERWRITE
@@ -452,6 +458,9 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
452 if (!buffer) 458 if (!buffer)
453 return NULL; 459 return NULL;
454 460
461 if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
462 goto fail_free_buffer;
463
455 buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); 464 buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
456 buffer->flags = flags; 465 buffer->flags = flags;
457 466
@@ -459,14 +468,14 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
459 if (buffer->pages == 1) 468 if (buffer->pages == 1)
460 buffer->pages++; 469 buffer->pages++;
461 470
462 buffer->cpumask = cpu_possible_map; 471 cpumask_copy(buffer->cpumask, cpu_possible_mask);
463 buffer->cpus = nr_cpu_ids; 472 buffer->cpus = nr_cpu_ids;
464 473
465 bsize = sizeof(void *) * nr_cpu_ids; 474 bsize = sizeof(void *) * nr_cpu_ids;
466 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()), 475 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
467 GFP_KERNEL); 476 GFP_KERNEL);
468 if (!buffer->buffers) 477 if (!buffer->buffers)
469 goto fail_free_buffer; 478 goto fail_free_cpumask;
470 479
471 for_each_buffer_cpu(buffer, cpu) { 480 for_each_buffer_cpu(buffer, cpu) {
472 buffer->buffers[cpu] = 481 buffer->buffers[cpu] =
@@ -486,10 +495,14 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
486 } 495 }
487 kfree(buffer->buffers); 496 kfree(buffer->buffers);
488 497
498 fail_free_cpumask:
499 free_cpumask_var(buffer->cpumask);
500
489 fail_free_buffer: 501 fail_free_buffer:
490 kfree(buffer); 502 kfree(buffer);
491 return NULL; 503 return NULL;
492} 504}
505EXPORT_SYMBOL_GPL(ring_buffer_alloc);
493 506
494/** 507/**
495 * ring_buffer_free - free a ring buffer. 508 * ring_buffer_free - free a ring buffer.
@@ -503,8 +516,11 @@ ring_buffer_free(struct ring_buffer *buffer)
503 for_each_buffer_cpu(buffer, cpu) 516 for_each_buffer_cpu(buffer, cpu)
504 rb_free_cpu_buffer(buffer->buffers[cpu]); 517 rb_free_cpu_buffer(buffer->buffers[cpu]);
505 518
519 free_cpumask_var(buffer->cpumask);
520
506 kfree(buffer); 521 kfree(buffer);
507} 522}
523EXPORT_SYMBOL_GPL(ring_buffer_free);
508 524
509static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer); 525static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
510 526
@@ -680,6 +696,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
680 mutex_unlock(&buffer->mutex); 696 mutex_unlock(&buffer->mutex);
681 return -ENOMEM; 697 return -ENOMEM;
682} 698}
699EXPORT_SYMBOL_GPL(ring_buffer_resize);
683 700
684static inline int rb_null_event(struct ring_buffer_event *event) 701static inline int rb_null_event(struct ring_buffer_event *event)
685{ 702{
@@ -1274,7 +1291,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer,
1274 1291
1275 cpu = raw_smp_processor_id(); 1292 cpu = raw_smp_processor_id();
1276 1293
1277 if (!cpu_isset(cpu, buffer->cpumask)) 1294 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1278 goto out; 1295 goto out;
1279 1296
1280 cpu_buffer = buffer->buffers[cpu]; 1297 cpu_buffer = buffer->buffers[cpu];
@@ -1304,6 +1321,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer,
1304 ftrace_preempt_enable(resched); 1321 ftrace_preempt_enable(resched);
1305 return NULL; 1322 return NULL;
1306} 1323}
1324EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
1307 1325
1308static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, 1326static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
1309 struct ring_buffer_event *event) 1327 struct ring_buffer_event *event)
@@ -1350,6 +1368,7 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer,
1350 1368
1351 return 0; 1369 return 0;
1352} 1370}
1371EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
1353 1372
1354/** 1373/**
1355 * ring_buffer_write - write data to the buffer without reserving 1374 * ring_buffer_write - write data to the buffer without reserving
@@ -1385,7 +1404,7 @@ int ring_buffer_write(struct ring_buffer *buffer,
1385 1404
1386 cpu = raw_smp_processor_id(); 1405 cpu = raw_smp_processor_id();
1387 1406
1388 if (!cpu_isset(cpu, buffer->cpumask)) 1407 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1389 goto out; 1408 goto out;
1390 1409
1391 cpu_buffer = buffer->buffers[cpu]; 1410 cpu_buffer = buffer->buffers[cpu];
@@ -1411,6 +1430,7 @@ int ring_buffer_write(struct ring_buffer *buffer,
1411 1430
1412 return ret; 1431 return ret;
1413} 1432}
1433EXPORT_SYMBOL_GPL(ring_buffer_write);
1414 1434
1415static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) 1435static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
1416{ 1436{
@@ -1437,6 +1457,7 @@ void ring_buffer_record_disable(struct ring_buffer *buffer)
1437{ 1457{
1438 atomic_inc(&buffer->record_disabled); 1458 atomic_inc(&buffer->record_disabled);
1439} 1459}
1460EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
1440 1461
1441/** 1462/**
1442 * ring_buffer_record_enable - enable writes to the buffer 1463 * ring_buffer_record_enable - enable writes to the buffer
@@ -1449,6 +1470,7 @@ void ring_buffer_record_enable(struct ring_buffer *buffer)
1449{ 1470{
1450 atomic_dec(&buffer->record_disabled); 1471 atomic_dec(&buffer->record_disabled);
1451} 1472}
1473EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
1452 1474
1453/** 1475/**
1454 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer 1476 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
@@ -1464,12 +1486,13 @@ void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
1464{ 1486{
1465 struct ring_buffer_per_cpu *cpu_buffer; 1487 struct ring_buffer_per_cpu *cpu_buffer;
1466 1488
1467 if (!cpu_isset(cpu, buffer->cpumask)) 1489 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1468 return; 1490 return;
1469 1491
1470 cpu_buffer = buffer->buffers[cpu]; 1492 cpu_buffer = buffer->buffers[cpu];
1471 atomic_inc(&cpu_buffer->record_disabled); 1493 atomic_inc(&cpu_buffer->record_disabled);
1472} 1494}
1495EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
1473 1496
1474/** 1497/**
1475 * ring_buffer_record_enable_cpu - enable writes to the buffer 1498 * ring_buffer_record_enable_cpu - enable writes to the buffer
@@ -1483,12 +1506,13 @@ void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
1483{ 1506{
1484 struct ring_buffer_per_cpu *cpu_buffer; 1507 struct ring_buffer_per_cpu *cpu_buffer;
1485 1508
1486 if (!cpu_isset(cpu, buffer->cpumask)) 1509 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1487 return; 1510 return;
1488 1511
1489 cpu_buffer = buffer->buffers[cpu]; 1512 cpu_buffer = buffer->buffers[cpu];
1490 atomic_dec(&cpu_buffer->record_disabled); 1513 atomic_dec(&cpu_buffer->record_disabled);
1491} 1514}
1515EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
1492 1516
1493/** 1517/**
1494 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer 1518 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
@@ -1499,12 +1523,13 @@ unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
1499{ 1523{
1500 struct ring_buffer_per_cpu *cpu_buffer; 1524 struct ring_buffer_per_cpu *cpu_buffer;
1501 1525
1502 if (!cpu_isset(cpu, buffer->cpumask)) 1526 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1503 return 0; 1527 return 0;
1504 1528
1505 cpu_buffer = buffer->buffers[cpu]; 1529 cpu_buffer = buffer->buffers[cpu];
1506 return cpu_buffer->entries; 1530 return cpu_buffer->entries;
1507} 1531}
1532EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
1508 1533
1509/** 1534/**
1510 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer 1535 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
@@ -1515,12 +1540,13 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
1515{ 1540{
1516 struct ring_buffer_per_cpu *cpu_buffer; 1541 struct ring_buffer_per_cpu *cpu_buffer;
1517 1542
1518 if (!cpu_isset(cpu, buffer->cpumask)) 1543 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1519 return 0; 1544 return 0;
1520 1545
1521 cpu_buffer = buffer->buffers[cpu]; 1546 cpu_buffer = buffer->buffers[cpu];
1522 return cpu_buffer->overrun; 1547 return cpu_buffer->overrun;
1523} 1548}
1549EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
1524 1550
1525/** 1551/**
1526 * ring_buffer_entries - get the number of entries in a buffer 1552 * ring_buffer_entries - get the number of entries in a buffer
@@ -1543,6 +1569,7 @@ unsigned long ring_buffer_entries(struct ring_buffer *buffer)
1543 1569
1544 return entries; 1570 return entries;
1545} 1571}
1572EXPORT_SYMBOL_GPL(ring_buffer_entries);
1546 1573
1547/** 1574/**
1548 * ring_buffer_overrun_cpu - get the number of overruns in buffer 1575 * ring_buffer_overrun_cpu - get the number of overruns in buffer
@@ -1565,6 +1592,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
1565 1592
1566 return overruns; 1593 return overruns;
1567} 1594}
1595EXPORT_SYMBOL_GPL(ring_buffer_overruns);
1568 1596
1569static void rb_iter_reset(struct ring_buffer_iter *iter) 1597static void rb_iter_reset(struct ring_buffer_iter *iter)
1570{ 1598{
@@ -1600,6 +1628,7 @@ void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
1600 rb_iter_reset(iter); 1628 rb_iter_reset(iter);
1601 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 1629 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1602} 1630}
1631EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
1603 1632
1604/** 1633/**
1605 * ring_buffer_iter_empty - check if an iterator has no more to read 1634 * ring_buffer_iter_empty - check if an iterator has no more to read
@@ -1614,6 +1643,7 @@ int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
1614 return iter->head_page == cpu_buffer->commit_page && 1643 return iter->head_page == cpu_buffer->commit_page &&
1615 iter->head == rb_commit_index(cpu_buffer); 1644 iter->head == rb_commit_index(cpu_buffer);
1616} 1645}
1646EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
1617 1647
1618static void 1648static void
1619rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, 1649rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
@@ -1828,7 +1858,7 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1828 struct buffer_page *reader; 1858 struct buffer_page *reader;
1829 int nr_loops = 0; 1859 int nr_loops = 0;
1830 1860
1831 if (!cpu_isset(cpu, buffer->cpumask)) 1861 if (!cpumask_test_cpu(cpu, buffer->cpumask))
1832 return NULL; 1862 return NULL;
1833 1863
1834 cpu_buffer = buffer->buffers[cpu]; 1864 cpu_buffer = buffer->buffers[cpu];
@@ -1880,6 +1910,7 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1880 1910
1881 return NULL; 1911 return NULL;
1882} 1912}
1913EXPORT_SYMBOL_GPL(ring_buffer_peek);
1883 1914
1884static struct ring_buffer_event * 1915static struct ring_buffer_event *
1885rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts) 1916rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
@@ -1940,6 +1971,7 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1940 1971
1941 return NULL; 1972 return NULL;
1942} 1973}
1974EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
1943 1975
1944/** 1976/**
1945 * ring_buffer_peek - peek at the next event to be read 1977 * ring_buffer_peek - peek at the next event to be read
@@ -2001,7 +2033,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
2001 struct ring_buffer_event *event; 2033 struct ring_buffer_event *event;
2002 unsigned long flags; 2034 unsigned long flags;
2003 2035
2004 if (!cpu_isset(cpu, buffer->cpumask)) 2036 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2005 return NULL; 2037 return NULL;
2006 2038
2007 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2039 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
@@ -2017,6 +2049,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
2017 2049
2018 return event; 2050 return event;
2019} 2051}
2052EXPORT_SYMBOL_GPL(ring_buffer_consume);
2020 2053
2021/** 2054/**
2022 * ring_buffer_read_start - start a non consuming read of the buffer 2055 * ring_buffer_read_start - start a non consuming read of the buffer
@@ -2037,7 +2070,7 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
2037 struct ring_buffer_iter *iter; 2070 struct ring_buffer_iter *iter;
2038 unsigned long flags; 2071 unsigned long flags;
2039 2072
2040 if (!cpu_isset(cpu, buffer->cpumask)) 2073 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2041 return NULL; 2074 return NULL;
2042 2075
2043 iter = kmalloc(sizeof(*iter), GFP_KERNEL); 2076 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
@@ -2059,6 +2092,7 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
2059 2092
2060 return iter; 2093 return iter;
2061} 2094}
2095EXPORT_SYMBOL_GPL(ring_buffer_read_start);
2062 2096
2063/** 2097/**
2064 * ring_buffer_finish - finish reading the iterator of the buffer 2098 * ring_buffer_finish - finish reading the iterator of the buffer
@@ -2075,6 +2109,7 @@ ring_buffer_read_finish(struct ring_buffer_iter *iter)
2075 atomic_dec(&cpu_buffer->record_disabled); 2109 atomic_dec(&cpu_buffer->record_disabled);
2076 kfree(iter); 2110 kfree(iter);
2077} 2111}
2112EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
2078 2113
2079/** 2114/**
2080 * ring_buffer_read - read the next item in the ring buffer by the iterator 2115 * ring_buffer_read - read the next item in the ring buffer by the iterator
@@ -2101,6 +2136,7 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
2101 2136
2102 return event; 2137 return event;
2103} 2138}
2139EXPORT_SYMBOL_GPL(ring_buffer_read);
2104 2140
2105/** 2141/**
2106 * ring_buffer_size - return the size of the ring buffer (in bytes) 2142 * ring_buffer_size - return the size of the ring buffer (in bytes)
@@ -2110,6 +2146,7 @@ unsigned long ring_buffer_size(struct ring_buffer *buffer)
2110{ 2146{
2111 return BUF_PAGE_SIZE * buffer->pages; 2147 return BUF_PAGE_SIZE * buffer->pages;
2112} 2148}
2149EXPORT_SYMBOL_GPL(ring_buffer_size);
2113 2150
2114static void 2151static void
2115rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) 2152rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
@@ -2143,7 +2180,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
2143 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; 2180 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2144 unsigned long flags; 2181 unsigned long flags;
2145 2182
2146 if (!cpu_isset(cpu, buffer->cpumask)) 2183 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2147 return; 2184 return;
2148 2185
2149 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2186 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
@@ -2156,6 +2193,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
2156 2193
2157 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 2194 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2158} 2195}
2196EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
2159 2197
2160/** 2198/**
2161 * ring_buffer_reset - reset a ring buffer 2199 * ring_buffer_reset - reset a ring buffer
@@ -2168,6 +2206,7 @@ void ring_buffer_reset(struct ring_buffer *buffer)
2168 for_each_buffer_cpu(buffer, cpu) 2206 for_each_buffer_cpu(buffer, cpu)
2169 ring_buffer_reset_cpu(buffer, cpu); 2207 ring_buffer_reset_cpu(buffer, cpu);
2170} 2208}
2209EXPORT_SYMBOL_GPL(ring_buffer_reset);
2171 2210
2172/** 2211/**
2173 * rind_buffer_empty - is the ring buffer empty? 2212 * rind_buffer_empty - is the ring buffer empty?
@@ -2186,6 +2225,7 @@ int ring_buffer_empty(struct ring_buffer *buffer)
2186 } 2225 }
2187 return 1; 2226 return 1;
2188} 2227}
2228EXPORT_SYMBOL_GPL(ring_buffer_empty);
2189 2229
2190/** 2230/**
2191 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty? 2231 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
@@ -2196,12 +2236,13 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
2196{ 2236{
2197 struct ring_buffer_per_cpu *cpu_buffer; 2237 struct ring_buffer_per_cpu *cpu_buffer;
2198 2238
2199 if (!cpu_isset(cpu, buffer->cpumask)) 2239 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2200 return 1; 2240 return 1;
2201 2241
2202 cpu_buffer = buffer->buffers[cpu]; 2242 cpu_buffer = buffer->buffers[cpu];
2203 return rb_per_cpu_empty(cpu_buffer); 2243 return rb_per_cpu_empty(cpu_buffer);
2204} 2244}
2245EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
2205 2246
2206/** 2247/**
2207 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers 2248 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
@@ -2219,8 +2260,8 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2219 struct ring_buffer_per_cpu *cpu_buffer_a; 2260 struct ring_buffer_per_cpu *cpu_buffer_a;
2220 struct ring_buffer_per_cpu *cpu_buffer_b; 2261 struct ring_buffer_per_cpu *cpu_buffer_b;
2221 2262
2222 if (!cpu_isset(cpu, buffer_a->cpumask) || 2263 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
2223 !cpu_isset(cpu, buffer_b->cpumask)) 2264 !cpumask_test_cpu(cpu, buffer_b->cpumask))
2224 return -EINVAL; 2265 return -EINVAL;
2225 2266
2226 /* At least make sure the two buffers are somewhat the same */ 2267 /* At least make sure the two buffers are somewhat the same */
@@ -2250,6 +2291,7 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2250 2291
2251 return 0; 2292 return 0;
2252} 2293}
2294EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
2253 2295
2254static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer, 2296static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer,
2255 struct buffer_data_page *bpage) 2297 struct buffer_data_page *bpage)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index f4bb3800318b..c580233add95 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -30,7 +30,6 @@
30#include <linux/gfp.h> 30#include <linux/gfp.h>
31#include <linux/fs.h> 31#include <linux/fs.h>
32#include <linux/kprobes.h> 32#include <linux/kprobes.h>
33#include <linux/seq_file.h>
34#include <linux/writeback.h> 33#include <linux/writeback.h>
35 34
36#include <linux/stacktrace.h> 35#include <linux/stacktrace.h>
@@ -90,10 +89,10 @@ static inline void ftrace_enable_cpu(void)
90 preempt_enable(); 89 preempt_enable();
91} 90}
92 91
93static cpumask_t __read_mostly tracing_buffer_mask; 92static cpumask_var_t __read_mostly tracing_buffer_mask;
94 93
95#define for_each_tracing_cpu(cpu) \ 94#define for_each_tracing_cpu(cpu) \
96 for_each_cpu_mask(cpu, tracing_buffer_mask) 95 for_each_cpu(cpu, tracing_buffer_mask)
97 96
98/* 97/*
99 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops 98 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
@@ -1310,7 +1309,7 @@ enum trace_file_type {
1310 TRACE_FILE_ANNOTATE = 2, 1309 TRACE_FILE_ANNOTATE = 2,
1311}; 1310};
1312 1311
1313static void trace_iterator_increment(struct trace_iterator *iter, int cpu) 1312static void trace_iterator_increment(struct trace_iterator *iter)
1314{ 1313{
1315 /* Don't allow ftrace to trace into the ring buffers */ 1314 /* Don't allow ftrace to trace into the ring buffers */
1316 ftrace_disable_cpu(); 1315 ftrace_disable_cpu();
@@ -1389,7 +1388,7 @@ static void *find_next_entry_inc(struct trace_iterator *iter)
1389 iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts); 1388 iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts);
1390 1389
1391 if (iter->ent) 1390 if (iter->ent)
1392 trace_iterator_increment(iter, iter->cpu); 1391 trace_iterator_increment(iter);
1393 1392
1394 return iter->ent ? iter : NULL; 1393 return iter->ent ? iter : NULL;
1395} 1394}
@@ -1812,10 +1811,10 @@ static void test_cpu_buff_start(struct trace_iterator *iter)
1812 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE)) 1811 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
1813 return; 1812 return;
1814 1813
1815 if (cpu_isset(iter->cpu, iter->started)) 1814 if (cpumask_test_cpu(iter->cpu, iter->started))
1816 return; 1815 return;
1817 1816
1818 cpu_set(iter->cpu, iter->started); 1817 cpumask_set_cpu(iter->cpu, iter->started);
1819 trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu); 1818 trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu);
1820} 1819}
1821 1820
@@ -2647,13 +2646,7 @@ static struct file_operations show_traces_fops = {
2647/* 2646/*
2648 * Only trace on a CPU if the bitmask is set: 2647 * Only trace on a CPU if the bitmask is set:
2649 */ 2648 */
2650static cpumask_t tracing_cpumask = CPU_MASK_ALL; 2649static cpumask_var_t tracing_cpumask;
2651
2652/*
2653 * When tracing/tracing_cpu_mask is modified then this holds
2654 * the new bitmask we are about to install:
2655 */
2656static cpumask_t tracing_cpumask_new;
2657 2650
2658/* 2651/*
2659 * The tracer itself will not take this lock, but still we want 2652 * The tracer itself will not take this lock, but still we want
@@ -2694,6 +2687,10 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2694 size_t count, loff_t *ppos) 2687 size_t count, loff_t *ppos)
2695{ 2688{
2696 int err, cpu; 2689 int err, cpu;
2690 cpumask_var_t tracing_cpumask_new;
2691
2692 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
2693 return -ENOMEM;
2697 2694
2698 mutex_lock(&tracing_cpumask_update_lock); 2695 mutex_lock(&tracing_cpumask_update_lock);
2699 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new); 2696 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
@@ -2707,26 +2704,28 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2707 * Increase/decrease the disabled counter if we are 2704 * Increase/decrease the disabled counter if we are
2708 * about to flip a bit in the cpumask: 2705 * about to flip a bit in the cpumask:
2709 */ 2706 */
2710 if (cpu_isset(cpu, tracing_cpumask) && 2707 if (cpumask_test_cpu(cpu, tracing_cpumask) &&
2711 !cpu_isset(cpu, tracing_cpumask_new)) { 2708 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
2712 atomic_inc(&global_trace.data[cpu]->disabled); 2709 atomic_inc(&global_trace.data[cpu]->disabled);
2713 } 2710 }
2714 if (!cpu_isset(cpu, tracing_cpumask) && 2711 if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
2715 cpu_isset(cpu, tracing_cpumask_new)) { 2712 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
2716 atomic_dec(&global_trace.data[cpu]->disabled); 2713 atomic_dec(&global_trace.data[cpu]->disabled);
2717 } 2714 }
2718 } 2715 }
2719 __raw_spin_unlock(&ftrace_max_lock); 2716 __raw_spin_unlock(&ftrace_max_lock);
2720 local_irq_enable(); 2717 local_irq_enable();
2721 2718
2722 tracing_cpumask = tracing_cpumask_new; 2719 cpumask_copy(tracing_cpumask, tracing_cpumask_new);
2723 2720
2724 mutex_unlock(&tracing_cpumask_update_lock); 2721 mutex_unlock(&tracing_cpumask_update_lock);
2722 free_cpumask_var(tracing_cpumask_new);
2725 2723
2726 return count; 2724 return count;
2727 2725
2728err_unlock: 2726err_unlock:
2729 mutex_unlock(&tracing_cpumask_update_lock); 2727 mutex_unlock(&tracing_cpumask_update_lock);
2728 free_cpumask_var(tracing_cpumask);
2730 2729
2731 return err; 2730 return err;
2732} 2731}
@@ -3115,10 +3114,15 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
3115 if (!iter) 3114 if (!iter)
3116 return -ENOMEM; 3115 return -ENOMEM;
3117 3116
3117 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
3118 kfree(iter);
3119 return -ENOMEM;
3120 }
3121
3118 mutex_lock(&trace_types_lock); 3122 mutex_lock(&trace_types_lock);
3119 3123
3120 /* trace pipe does not show start of buffer */ 3124 /* trace pipe does not show start of buffer */
3121 cpus_setall(iter->started); 3125 cpumask_setall(iter->started);
3122 3126
3123 iter->tr = &global_trace; 3127 iter->tr = &global_trace;
3124 iter->trace = current_trace; 3128 iter->trace = current_trace;
@@ -3135,6 +3139,7 @@ static int tracing_release_pipe(struct inode *inode, struct file *file)
3135{ 3139{
3136 struct trace_iterator *iter = file->private_data; 3140 struct trace_iterator *iter = file->private_data;
3137 3141
3142 free_cpumask_var(iter->started);
3138 kfree(iter); 3143 kfree(iter);
3139 atomic_dec(&tracing_reader); 3144 atomic_dec(&tracing_reader);
3140 3145
@@ -3753,7 +3758,6 @@ void ftrace_dump(void)
3753 static DEFINE_SPINLOCK(ftrace_dump_lock); 3758 static DEFINE_SPINLOCK(ftrace_dump_lock);
3754 /* use static because iter can be a bit big for the stack */ 3759 /* use static because iter can be a bit big for the stack */
3755 static struct trace_iterator iter; 3760 static struct trace_iterator iter;
3756 static cpumask_t mask;
3757 static int dump_ran; 3761 static int dump_ran;
3758 unsigned long flags; 3762 unsigned long flags;
3759 int cnt = 0, cpu; 3763 int cnt = 0, cpu;
@@ -3787,8 +3791,6 @@ void ftrace_dump(void)
3787 * and then release the locks again. 3791 * and then release the locks again.
3788 */ 3792 */
3789 3793
3790 cpus_clear(mask);
3791
3792 while (!trace_empty(&iter)) { 3794 while (!trace_empty(&iter)) {
3793 3795
3794 if (!cnt) 3796 if (!cnt)
@@ -3824,19 +3826,28 @@ __init static int tracer_alloc_buffers(void)
3824{ 3826{
3825 struct trace_array_cpu *data; 3827 struct trace_array_cpu *data;
3826 int i; 3828 int i;
3829 int ret = -ENOMEM;
3827 3830
3828 /* TODO: make the number of buffers hot pluggable with CPUS */ 3831 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
3829 tracing_buffer_mask = cpu_possible_map; 3832 goto out;
3833
3834 if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
3835 goto out_free_buffer_mask;
3836
3837 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
3838 cpumask_copy(tracing_cpumask, cpu_all_mask);
3830 3839
3840 /* TODO: make the number of buffers hot pluggable with CPUS */
3831 global_trace.buffer = ring_buffer_alloc(trace_buf_size, 3841 global_trace.buffer = ring_buffer_alloc(trace_buf_size,
3832 TRACE_BUFFER_FLAGS); 3842 TRACE_BUFFER_FLAGS);
3833 if (!global_trace.buffer) { 3843 if (!global_trace.buffer) {
3834 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); 3844 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
3835 WARN_ON(1); 3845 WARN_ON(1);
3836 return 0; 3846 goto out_free_cpumask;
3837 } 3847 }
3838 global_trace.entries = ring_buffer_size(global_trace.buffer); 3848 global_trace.entries = ring_buffer_size(global_trace.buffer);
3839 3849
3850
3840#ifdef CONFIG_TRACER_MAX_TRACE 3851#ifdef CONFIG_TRACER_MAX_TRACE
3841 max_tr.buffer = ring_buffer_alloc(trace_buf_size, 3852 max_tr.buffer = ring_buffer_alloc(trace_buf_size,
3842 TRACE_BUFFER_FLAGS); 3853 TRACE_BUFFER_FLAGS);
@@ -3844,7 +3855,7 @@ __init static int tracer_alloc_buffers(void)
3844 printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n"); 3855 printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
3845 WARN_ON(1); 3856 WARN_ON(1);
3846 ring_buffer_free(global_trace.buffer); 3857 ring_buffer_free(global_trace.buffer);
3847 return 0; 3858 goto out_free_cpumask;
3848 } 3859 }
3849 max_tr.entries = ring_buffer_size(max_tr.buffer); 3860 max_tr.entries = ring_buffer_size(max_tr.buffer);
3850 WARN_ON(max_tr.entries != global_trace.entries); 3861 WARN_ON(max_tr.entries != global_trace.entries);
@@ -3874,8 +3885,14 @@ __init static int tracer_alloc_buffers(void)
3874 &trace_panic_notifier); 3885 &trace_panic_notifier);
3875 3886
3876 register_die_notifier(&trace_die_notifier); 3887 register_die_notifier(&trace_die_notifier);
3888 ret = 0;
3877 3889
3878 return 0; 3890out_free_cpumask:
3891 free_cpumask_var(tracing_cpumask);
3892out_free_buffer_mask:
3893 free_cpumask_var(tracing_buffer_mask);
3894out:
3895 return ret;
3879} 3896}
3880early_initcall(tracer_alloc_buffers); 3897early_initcall(tracer_alloc_buffers);
3881fs_initcall(tracer_init_debugfs); 3898fs_initcall(tracer_init_debugfs);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index cc7a4f864036..4d3d381bfd95 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -368,7 +368,7 @@ struct trace_iterator {
368 loff_t pos; 368 loff_t pos;
369 long idx; 369 long idx;
370 370
371 cpumask_t started; 371 cpumask_var_t started;
372}; 372};
373 373
374int tracing_is_enabled(void); 374int tracing_is_enabled(void);
diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c
index 3ccebde28482..366c8c333e13 100644
--- a/kernel/trace/trace_boot.c
+++ b/kernel/trace/trace_boot.c
@@ -42,7 +42,7 @@ static int boot_trace_init(struct trace_array *tr)
42 int cpu; 42 int cpu;
43 boot_trace = tr; 43 boot_trace = tr;
44 44
45 for_each_cpu_mask(cpu, cpu_possible_map) 45 for_each_cpu(cpu, cpu_possible_mask)
46 tracing_reset(tr, cpu); 46 tracing_reset(tr, cpu);
47 47
48 tracing_sched_switch_assign_trace(tr); 48 tracing_sched_switch_assign_trace(tr);
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 4bf39fcae97a..930c08e5b38e 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -79,7 +79,7 @@ print_graph_cpu(struct trace_seq *s, int cpu)
79 int i; 79 int i;
80 int ret; 80 int ret;
81 int log10_this = log10_cpu(cpu); 81 int log10_this = log10_cpu(cpu);
82 int log10_all = log10_cpu(cpus_weight_nr(cpu_online_map)); 82 int log10_all = log10_cpu(cpumask_weight(cpu_online_mask));
83 83
84 84
85 /* 85 /*
diff --git a/kernel/trace/trace_hw_branches.c b/kernel/trace/trace_hw_branches.c
index b6a3e20a49a9..649df22d435f 100644
--- a/kernel/trace/trace_hw_branches.c
+++ b/kernel/trace/trace_hw_branches.c
@@ -46,7 +46,7 @@ static void bts_trace_start(struct trace_array *tr)
46 46
47 tracing_reset_online_cpus(tr); 47 tracing_reset_online_cpus(tr);
48 48
49 for_each_cpu_mask(cpu, cpu_possible_map) 49 for_each_cpu(cpu, cpu_possible_mask)
50 smp_call_function_single(cpu, bts_trace_start_cpu, NULL, 1); 50 smp_call_function_single(cpu, bts_trace_start_cpu, NULL, 1);
51} 51}
52 52
@@ -62,7 +62,7 @@ static void bts_trace_stop(struct trace_array *tr)
62{ 62{
63 int cpu; 63 int cpu;
64 64
65 for_each_cpu_mask(cpu, cpu_possible_map) 65 for_each_cpu(cpu, cpu_possible_mask)
66 smp_call_function_single(cpu, bts_trace_stop_cpu, NULL, 1); 66 smp_call_function_single(cpu, bts_trace_stop_cpu, NULL, 1);
67} 67}
68 68
@@ -172,7 +172,7 @@ static void trace_bts_prepare(struct trace_iterator *iter)
172{ 172{
173 int cpu; 173 int cpu;
174 174
175 for_each_cpu_mask(cpu, cpu_possible_map) 175 for_each_cpu(cpu, cpu_possible_mask)
176 smp_call_function_single(cpu, trace_bts_cpu, iter->tr, 1); 176 smp_call_function_single(cpu, trace_bts_cpu, iter->tr, 1);
177} 177}
178 178
diff --git a/kernel/trace/trace_power.c b/kernel/trace/trace_power.c
index a7172a352f62..7bda248daf55 100644
--- a/kernel/trace/trace_power.c
+++ b/kernel/trace/trace_power.c
@@ -39,7 +39,7 @@ static int power_trace_init(struct trace_array *tr)
39 39
40 trace_power_enabled = 1; 40 trace_power_enabled = 1;
41 41
42 for_each_cpu_mask(cpu, cpu_possible_map) 42 for_each_cpu(cpu, cpu_possible_mask)
43 tracing_reset(tr, cpu); 43 tracing_reset(tr, cpu);
44 return 0; 44 return 0;
45} 45}
diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c
index 01becf1f19ff..eaca5ad803ff 100644
--- a/kernel/trace/trace_sysprof.c
+++ b/kernel/trace/trace_sysprof.c
@@ -196,27 +196,19 @@ static enum hrtimer_restart stack_trace_timer_fn(struct hrtimer *hrtimer)
196 return HRTIMER_RESTART; 196 return HRTIMER_RESTART;
197} 197}
198 198
199static void start_stack_timer(int cpu) 199static void start_stack_timer(void *unused)
200{ 200{
201 struct hrtimer *hrtimer = &per_cpu(stack_trace_hrtimer, cpu); 201 struct hrtimer *hrtimer = &__get_cpu_var(stack_trace_hrtimer);
202 202
203 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 203 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
204 hrtimer->function = stack_trace_timer_fn; 204 hrtimer->function = stack_trace_timer_fn;
205 hrtimer->cb_mode = HRTIMER_CB_IRQSAFE_PERCPU;
206 205
207 hrtimer_start(hrtimer, ns_to_ktime(sample_period), HRTIMER_MODE_REL); 206 hrtimer_start(hrtimer, ns_to_ktime(sample_period), HRTIMER_MODE_REL);
208} 207}
209 208
210static void start_stack_timers(void) 209static void start_stack_timers(void)
211{ 210{
212 cpumask_t saved_mask = current->cpus_allowed; 211 on_each_cpu(start_stack_timer, NULL, 1);
213 int cpu;
214
215 for_each_online_cpu(cpu) {
216 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
217 start_stack_timer(cpu);
218 }
219 set_cpus_allowed_ptr(current, &saved_mask);
220} 212}
221 213
222static void stop_stack_timer(int cpu) 214static void stop_stack_timer(int cpu)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 4952322cba45..2f445833ae37 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -73,7 +73,7 @@ static DEFINE_SPINLOCK(workqueue_lock);
73static LIST_HEAD(workqueues); 73static LIST_HEAD(workqueues);
74 74
75static int singlethread_cpu __read_mostly; 75static int singlethread_cpu __read_mostly;
76static cpumask_t cpu_singlethread_map __read_mostly; 76static const struct cpumask *cpu_singlethread_map __read_mostly;
77/* 77/*
78 * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD 78 * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD
79 * flushes cwq->worklist. This means that flush_workqueue/wait_on_work 79 * flushes cwq->worklist. This means that flush_workqueue/wait_on_work
@@ -81,7 +81,7 @@ static cpumask_t cpu_singlethread_map __read_mostly;
81 * use cpu_possible_map, the cpumask below is more a documentation 81 * use cpu_possible_map, the cpumask below is more a documentation
82 * than optimization. 82 * than optimization.
83 */ 83 */
84static cpumask_t cpu_populated_map __read_mostly; 84static cpumask_var_t cpu_populated_map __read_mostly;
85 85
86/* If it's single threaded, it isn't in the list of workqueues. */ 86/* If it's single threaded, it isn't in the list of workqueues. */
87static inline int is_wq_single_threaded(struct workqueue_struct *wq) 87static inline int is_wq_single_threaded(struct workqueue_struct *wq)
@@ -89,10 +89,10 @@ static inline int is_wq_single_threaded(struct workqueue_struct *wq)
89 return wq->singlethread; 89 return wq->singlethread;
90} 90}
91 91
92static const cpumask_t *wq_cpu_map(struct workqueue_struct *wq) 92static const struct cpumask *wq_cpu_map(struct workqueue_struct *wq)
93{ 93{
94 return is_wq_single_threaded(wq) 94 return is_wq_single_threaded(wq)
95 ? &cpu_singlethread_map : &cpu_populated_map; 95 ? cpu_singlethread_map : cpu_populated_map;
96} 96}
97 97
98static 98static
@@ -410,7 +410,7 @@ static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
410 */ 410 */
411void flush_workqueue(struct workqueue_struct *wq) 411void flush_workqueue(struct workqueue_struct *wq)
412{ 412{
413 const cpumask_t *cpu_map = wq_cpu_map(wq); 413 const struct cpumask *cpu_map = wq_cpu_map(wq);
414 int cpu; 414 int cpu;
415 415
416 might_sleep(); 416 might_sleep();
@@ -532,7 +532,7 @@ static void wait_on_work(struct work_struct *work)
532{ 532{
533 struct cpu_workqueue_struct *cwq; 533 struct cpu_workqueue_struct *cwq;
534 struct workqueue_struct *wq; 534 struct workqueue_struct *wq;
535 const cpumask_t *cpu_map; 535 const struct cpumask *cpu_map;
536 int cpu; 536 int cpu;
537 537
538 might_sleep(); 538 might_sleep();
@@ -903,7 +903,7 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
903 */ 903 */
904void destroy_workqueue(struct workqueue_struct *wq) 904void destroy_workqueue(struct workqueue_struct *wq)
905{ 905{
906 const cpumask_t *cpu_map = wq_cpu_map(wq); 906 const struct cpumask *cpu_map = wq_cpu_map(wq);
907 int cpu; 907 int cpu;
908 908
909 cpu_maps_update_begin(); 909 cpu_maps_update_begin();
@@ -933,7 +933,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
933 933
934 switch (action) { 934 switch (action) {
935 case CPU_UP_PREPARE: 935 case CPU_UP_PREPARE:
936 cpu_set(cpu, cpu_populated_map); 936 cpumask_set_cpu(cpu, cpu_populated_map);
937 } 937 }
938undo: 938undo:
939 list_for_each_entry(wq, &workqueues, list) { 939 list_for_each_entry(wq, &workqueues, list) {
@@ -964,7 +964,7 @@ undo:
964 switch (action) { 964 switch (action) {
965 case CPU_UP_CANCELED: 965 case CPU_UP_CANCELED:
966 case CPU_POST_DEAD: 966 case CPU_POST_DEAD:
967 cpu_clear(cpu, cpu_populated_map); 967 cpumask_clear_cpu(cpu, cpu_populated_map);
968 } 968 }
969 969
970 return ret; 970 return ret;
@@ -1017,9 +1017,11 @@ EXPORT_SYMBOL_GPL(work_on_cpu);
1017 1017
1018void __init init_workqueues(void) 1018void __init init_workqueues(void)
1019{ 1019{
1020 cpu_populated_map = cpu_online_map; 1020 alloc_cpumask_var(&cpu_populated_map, GFP_KERNEL);
1021 singlethread_cpu = first_cpu(cpu_possible_map); 1021
1022 cpu_singlethread_map = cpumask_of_cpu(singlethread_cpu); 1022 cpumask_copy(cpu_populated_map, cpu_online_mask);
1023 singlethread_cpu = cpumask_first(cpu_possible_mask);
1024 cpu_singlethread_map = cpumask_of(singlethread_cpu);
1023 hotcpu_notifier(workqueue_cpu_callback, 0); 1025 hotcpu_notifier(workqueue_cpu_callback, 0);
1024 keventd_wq = create_workqueue("events"); 1026 keventd_wq = create_workqueue("events");
1025 BUG_ON(!keventd_wq); 1027 BUG_ON(!keventd_wq);