aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Makefile2
-rw-r--r--kernel/cgroup.c54
-rw-r--r--kernel/exit.c2
-rw-r--r--kernel/fork.c9
-rw-r--r--kernel/freezer.c9
-rw-r--r--kernel/hrtimer.c2
-rw-r--r--kernel/irq/manage.c2
-rw-r--r--kernel/irq_work.c18
-rw-r--r--kernel/kprobes.c8
-rw-r--r--kernel/kthread.c2
-rw-r--r--kernel/power/Makefile5
-rw-r--r--kernel/power/hibernate.c7
-rw-r--r--kernel/power/process.c8
-rw-r--r--kernel/power/suspend.c2
-rw-r--r--kernel/printk.c22
-rw-r--r--kernel/rcutree.c4
-rw-r--r--kernel/sched.c45
-rw-r--r--kernel/sched_autogroup.c8
-rw-r--r--kernel/softirq.c44
-rw-r--r--kernel/taskstats.c5
-rw-r--r--kernel/time/clocksource.c1
-rw-r--r--kernel/time/tick-common.c2
-rw-r--r--kernel/time/tick-oneshot.c4
-rw-r--r--kernel/trace/trace_selftest.c2
-rw-r--r--kernel/watchdog.c36
-rw-r--r--kernel/workqueue.c60
26 files changed, 227 insertions, 136 deletions
diff --git a/kernel/Makefile b/kernel/Makefile
index e0f2831634b4..5669f71dfdd5 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -122,7 +122,7 @@ $(obj)/configs.o: $(obj)/config_data.h
122# config_data.h contains the same information as ikconfig.h but gzipped. 122# config_data.h contains the same information as ikconfig.h but gzipped.
123# Info from config_data can be extracted from /proc/config* 123# Info from config_data can be extracted from /proc/config*
124targets += config_data.gz 124targets += config_data.gz
125$(obj)/config_data.gz: .config FORCE 125$(obj)/config_data.gz: $(KCONFIG_CONFIG) FORCE
126 $(call if_changed,gzip) 126 $(call if_changed,gzip)
127 127
128quiet_cmd_ikconfiggz = IKCFG $@ 128quiet_cmd_ikconfiggz = IKCFG $@
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 66a416b42c18..51cddc11cd85 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -763,6 +763,8 @@ EXPORT_SYMBOL_GPL(cgroup_unlock);
763 * -> cgroup_mkdir. 763 * -> cgroup_mkdir.
764 */ 764 */
765 765
766static struct dentry *cgroup_lookup(struct inode *dir,
767 struct dentry *dentry, struct nameidata *nd);
766static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode); 768static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode);
767static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry); 769static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry);
768static int cgroup_populate_dir(struct cgroup *cgrp); 770static int cgroup_populate_dir(struct cgroup *cgrp);
@@ -874,25 +876,29 @@ static void cgroup_clear_directory(struct dentry *dentry)
874 struct list_head *node; 876 struct list_head *node;
875 877
876 BUG_ON(!mutex_is_locked(&dentry->d_inode->i_mutex)); 878 BUG_ON(!mutex_is_locked(&dentry->d_inode->i_mutex));
877 spin_lock(&dcache_lock); 879 spin_lock(&dentry->d_lock);
878 node = dentry->d_subdirs.next; 880 node = dentry->d_subdirs.next;
879 while (node != &dentry->d_subdirs) { 881 while (node != &dentry->d_subdirs) {
880 struct dentry *d = list_entry(node, struct dentry, d_u.d_child); 882 struct dentry *d = list_entry(node, struct dentry, d_u.d_child);
883
884 spin_lock_nested(&d->d_lock, DENTRY_D_LOCK_NESTED);
881 list_del_init(node); 885 list_del_init(node);
882 if (d->d_inode) { 886 if (d->d_inode) {
883 /* This should never be called on a cgroup 887 /* This should never be called on a cgroup
884 * directory with child cgroups */ 888 * directory with child cgroups */
885 BUG_ON(d->d_inode->i_mode & S_IFDIR); 889 BUG_ON(d->d_inode->i_mode & S_IFDIR);
886 d = dget_locked(d); 890 dget_dlock(d);
887 spin_unlock(&dcache_lock); 891 spin_unlock(&d->d_lock);
892 spin_unlock(&dentry->d_lock);
888 d_delete(d); 893 d_delete(d);
889 simple_unlink(dentry->d_inode, d); 894 simple_unlink(dentry->d_inode, d);
890 dput(d); 895 dput(d);
891 spin_lock(&dcache_lock); 896 spin_lock(&dentry->d_lock);
892 } 897 } else
898 spin_unlock(&d->d_lock);
893 node = dentry->d_subdirs.next; 899 node = dentry->d_subdirs.next;
894 } 900 }
895 spin_unlock(&dcache_lock); 901 spin_unlock(&dentry->d_lock);
896} 902}
897 903
898/* 904/*
@@ -900,11 +906,16 @@ static void cgroup_clear_directory(struct dentry *dentry)
900 */ 906 */
901static void cgroup_d_remove_dir(struct dentry *dentry) 907static void cgroup_d_remove_dir(struct dentry *dentry)
902{ 908{
909 struct dentry *parent;
910
903 cgroup_clear_directory(dentry); 911 cgroup_clear_directory(dentry);
904 912
905 spin_lock(&dcache_lock); 913 parent = dentry->d_parent;
914 spin_lock(&parent->d_lock);
915 spin_lock(&dentry->d_lock);
906 list_del_init(&dentry->d_u.d_child); 916 list_del_init(&dentry->d_u.d_child);
907 spin_unlock(&dcache_lock); 917 spin_unlock(&dentry->d_lock);
918 spin_unlock(&parent->d_lock);
908 remove_dir(dentry); 919 remove_dir(dentry);
909} 920}
910 921
@@ -2180,7 +2191,7 @@ static const struct file_operations cgroup_file_operations = {
2180}; 2191};
2181 2192
2182static const struct inode_operations cgroup_dir_inode_operations = { 2193static const struct inode_operations cgroup_dir_inode_operations = {
2183 .lookup = simple_lookup, 2194 .lookup = cgroup_lookup,
2184 .mkdir = cgroup_mkdir, 2195 .mkdir = cgroup_mkdir,
2185 .rmdir = cgroup_rmdir, 2196 .rmdir = cgroup_rmdir,
2186 .rename = cgroup_rename, 2197 .rename = cgroup_rename,
@@ -2196,13 +2207,29 @@ static inline struct cftype *__file_cft(struct file *file)
2196 return __d_cft(file->f_dentry); 2207 return __d_cft(file->f_dentry);
2197} 2208}
2198 2209
2199static int cgroup_create_file(struct dentry *dentry, mode_t mode, 2210static int cgroup_delete_dentry(const struct dentry *dentry)
2200 struct super_block *sb) 2211{
2212 return 1;
2213}
2214
2215static struct dentry *cgroup_lookup(struct inode *dir,
2216 struct dentry *dentry, struct nameidata *nd)
2201{ 2217{
2202 static const struct dentry_operations cgroup_dops = { 2218 static const struct dentry_operations cgroup_dentry_operations = {
2219 .d_delete = cgroup_delete_dentry,
2203 .d_iput = cgroup_diput, 2220 .d_iput = cgroup_diput,
2204 }; 2221 };
2205 2222
2223 if (dentry->d_name.len > NAME_MAX)
2224 return ERR_PTR(-ENAMETOOLONG);
2225 d_set_d_op(dentry, &cgroup_dentry_operations);
2226 d_add(dentry, NULL);
2227 return NULL;
2228}
2229
2230static int cgroup_create_file(struct dentry *dentry, mode_t mode,
2231 struct super_block *sb)
2232{
2206 struct inode *inode; 2233 struct inode *inode;
2207 2234
2208 if (!dentry) 2235 if (!dentry)
@@ -2228,7 +2255,6 @@ static int cgroup_create_file(struct dentry *dentry, mode_t mode,
2228 inode->i_size = 0; 2255 inode->i_size = 0;
2229 inode->i_fop = &cgroup_file_operations; 2256 inode->i_fop = &cgroup_file_operations;
2230 } 2257 }
2231 dentry->d_op = &cgroup_dops;
2232 d_instantiate(dentry, inode); 2258 d_instantiate(dentry, inode);
2233 dget(dentry); /* Extra count - pin the dentry in core */ 2259 dget(dentry); /* Extra count - pin the dentry in core */
2234 return 0; 2260 return 0;
@@ -3638,9 +3664,7 @@ again:
3638 list_del(&cgrp->sibling); 3664 list_del(&cgrp->sibling);
3639 cgroup_unlock_hierarchy(cgrp->root); 3665 cgroup_unlock_hierarchy(cgrp->root);
3640 3666
3641 spin_lock(&cgrp->dentry->d_lock);
3642 d = dget(cgrp->dentry); 3667 d = dget(cgrp->dentry);
3643 spin_unlock(&d->d_lock);
3644 3668
3645 cgroup_d_remove_dir(d); 3669 cgroup_d_remove_dir(d);
3646 dput(d); 3670 dput(d);
diff --git a/kernel/exit.c b/kernel/exit.c
index 8cb89045ecf3..f9a45ebcc7b1 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -69,7 +69,7 @@ static void __unhash_process(struct task_struct *p, bool group_dead)
69 69
70 list_del_rcu(&p->tasks); 70 list_del_rcu(&p->tasks);
71 list_del_init(&p->sibling); 71 list_del_init(&p->sibling);
72 __get_cpu_var(process_counts)--; 72 __this_cpu_dec(process_counts);
73 } 73 }
74 list_del_rcu(&p->thread_group); 74 list_del_rcu(&p->thread_group);
75} 75}
diff --git a/kernel/fork.c b/kernel/fork.c
index 7d164e25b0f0..d9b44f20b6b0 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -169,15 +169,14 @@ EXPORT_SYMBOL(free_task);
169static inline void free_signal_struct(struct signal_struct *sig) 169static inline void free_signal_struct(struct signal_struct *sig)
170{ 170{
171 taskstats_tgid_free(sig); 171 taskstats_tgid_free(sig);
172 sched_autogroup_exit(sig);
172 kmem_cache_free(signal_cachep, sig); 173 kmem_cache_free(signal_cachep, sig);
173} 174}
174 175
175static inline void put_signal_struct(struct signal_struct *sig) 176static inline void put_signal_struct(struct signal_struct *sig)
176{ 177{
177 if (atomic_dec_and_test(&sig->sigcnt)) { 178 if (atomic_dec_and_test(&sig->sigcnt))
178 sched_autogroup_exit(sig);
179 free_signal_struct(sig); 179 free_signal_struct(sig);
180 }
181} 180}
182 181
183void __put_task_struct(struct task_struct *tsk) 182void __put_task_struct(struct task_struct *tsk)
@@ -1286,7 +1285,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1286 attach_pid(p, PIDTYPE_SID, task_session(current)); 1285 attach_pid(p, PIDTYPE_SID, task_session(current));
1287 list_add_tail(&p->sibling, &p->real_parent->children); 1286 list_add_tail(&p->sibling, &p->real_parent->children);
1288 list_add_tail_rcu(&p->tasks, &init_task.tasks); 1287 list_add_tail_rcu(&p->tasks, &init_task.tasks);
1289 __get_cpu_var(process_counts)++; 1288 __this_cpu_inc(process_counts);
1290 } 1289 }
1291 attach_pid(p, PIDTYPE_PID, pid); 1290 attach_pid(p, PIDTYPE_PID, pid);
1292 nr_threads++; 1291 nr_threads++;
@@ -1318,7 +1317,7 @@ bad_fork_cleanup_mm:
1318 } 1317 }
1319bad_fork_cleanup_signal: 1318bad_fork_cleanup_signal:
1320 if (!(clone_flags & CLONE_THREAD)) 1319 if (!(clone_flags & CLONE_THREAD))
1321 put_signal_struct(p->signal); 1320 free_signal_struct(p->signal);
1322bad_fork_cleanup_sighand: 1321bad_fork_cleanup_sighand:
1323 __cleanup_sighand(p->sighand); 1322 __cleanup_sighand(p->sighand);
1324bad_fork_cleanup_fs: 1323bad_fork_cleanup_fs:
diff --git a/kernel/freezer.c b/kernel/freezer.c
index bd1d42b17cb2..66ecd2ead215 100644
--- a/kernel/freezer.c
+++ b/kernel/freezer.c
@@ -104,8 +104,13 @@ bool freeze_task(struct task_struct *p, bool sig_only)
104 } 104 }
105 105
106 if (should_send_signal(p)) { 106 if (should_send_signal(p)) {
107 if (!signal_pending(p)) 107 fake_signal_wake_up(p);
108 fake_signal_wake_up(p); 108 /*
109 * fake_signal_wake_up() goes through p's scheduler
110 * lock and guarantees that TASK_STOPPED/TRACED ->
111 * TASK_RUNNING transition can't race with task state
112 * testing in try_to_freeze_tasks().
113 */
109 } else if (sig_only) { 114 } else if (sig_only) {
110 return false; 115 return false;
111 } else { 116 } else {
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index f2429fc3438c..45da2b6920ab 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -497,7 +497,7 @@ static inline int hrtimer_is_hres_enabled(void)
497 */ 497 */
498static inline int hrtimer_hres_active(void) 498static inline int hrtimer_hres_active(void)
499{ 499{
500 return __get_cpu_var(hrtimer_bases).hres_active; 500 return __this_cpu_read(hrtimer_bases.hres_active);
501} 501}
502 502
503/* 503/*
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 91a5fa25054e..0caa59f747dd 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -577,7 +577,7 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
577 */ 577 */
578static int irq_thread(void *data) 578static int irq_thread(void *data)
579{ 579{
580 static struct sched_param param = { 580 static const struct sched_param param = {
581 .sched_priority = MAX_USER_RT_PRIO/2, 581 .sched_priority = MAX_USER_RT_PRIO/2,
582 }; 582 };
583 struct irqaction *action = data; 583 struct irqaction *action = data;
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index 90f881904bb1..c58fa7da8aef 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -77,21 +77,21 @@ void __weak arch_irq_work_raise(void)
77 */ 77 */
78static void __irq_work_queue(struct irq_work *entry) 78static void __irq_work_queue(struct irq_work *entry)
79{ 79{
80 struct irq_work **head, *next; 80 struct irq_work *next;
81 81
82 head = &get_cpu_var(irq_work_list); 82 preempt_disable();
83 83
84 do { 84 do {
85 next = *head; 85 next = __this_cpu_read(irq_work_list);
86 /* Can assign non-atomic because we keep the flags set. */ 86 /* Can assign non-atomic because we keep the flags set. */
87 entry->next = next_flags(next, IRQ_WORK_FLAGS); 87 entry->next = next_flags(next, IRQ_WORK_FLAGS);
88 } while (cmpxchg(head, next, entry) != next); 88 } while (this_cpu_cmpxchg(irq_work_list, next, entry) != next);
89 89
90 /* The list was empty, raise self-interrupt to start processing. */ 90 /* The list was empty, raise self-interrupt to start processing. */
91 if (!irq_work_next(entry)) 91 if (!irq_work_next(entry))
92 arch_irq_work_raise(); 92 arch_irq_work_raise();
93 93
94 put_cpu_var(irq_work_list); 94 preempt_enable();
95} 95}
96 96
97/* 97/*
@@ -120,16 +120,16 @@ EXPORT_SYMBOL_GPL(irq_work_queue);
120 */ 120 */
121void irq_work_run(void) 121void irq_work_run(void)
122{ 122{
123 struct irq_work *list, **head; 123 struct irq_work *list;
124 124
125 head = &__get_cpu_var(irq_work_list); 125 if (this_cpu_read(irq_work_list) == NULL)
126 if (*head == NULL)
127 return; 126 return;
128 127
129 BUG_ON(!in_irq()); 128 BUG_ON(!in_irq());
130 BUG_ON(!irqs_disabled()); 129 BUG_ON(!irqs_disabled());
131 130
132 list = xchg(head, NULL); 131 list = this_cpu_xchg(irq_work_list, NULL);
132
133 while (list != NULL) { 133 while (list != NULL) {
134 struct irq_work *entry = list; 134 struct irq_work *entry = list;
135 135
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 7663e5df0e6f..77981813a1e7 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -317,12 +317,12 @@ void __kprobes free_optinsn_slot(kprobe_opcode_t * slot, int dirty)
317/* We have preemption disabled.. so it is safe to use __ versions */ 317/* We have preemption disabled.. so it is safe to use __ versions */
318static inline void set_kprobe_instance(struct kprobe *kp) 318static inline void set_kprobe_instance(struct kprobe *kp)
319{ 319{
320 __get_cpu_var(kprobe_instance) = kp; 320 __this_cpu_write(kprobe_instance, kp);
321} 321}
322 322
323static inline void reset_kprobe_instance(void) 323static inline void reset_kprobe_instance(void)
324{ 324{
325 __get_cpu_var(kprobe_instance) = NULL; 325 __this_cpu_write(kprobe_instance, NULL);
326} 326}
327 327
328/* 328/*
@@ -965,7 +965,7 @@ static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
965static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, 965static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
966 int trapnr) 966 int trapnr)
967{ 967{
968 struct kprobe *cur = __get_cpu_var(kprobe_instance); 968 struct kprobe *cur = __this_cpu_read(kprobe_instance);
969 969
970 /* 970 /*
971 * if we faulted "during" the execution of a user specified 971 * if we faulted "during" the execution of a user specified
@@ -980,7 +980,7 @@ static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
980 980
981static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs) 981static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
982{ 982{
983 struct kprobe *cur = __get_cpu_var(kprobe_instance); 983 struct kprobe *cur = __this_cpu_read(kprobe_instance);
984 int ret = 0; 984 int ret = 0;
985 985
986 if (cur && cur->break_handler) { 986 if (cur && cur->break_handler) {
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 5355cfd44a3f..c55afba990a3 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -148,7 +148,7 @@ struct task_struct *kthread_create(int (*threadfn)(void *data),
148 wait_for_completion(&create.done); 148 wait_for_completion(&create.done);
149 149
150 if (!IS_ERR(create.result)) { 150 if (!IS_ERR(create.result)) {
151 static struct sched_param param = { .sched_priority = 0 }; 151 static const struct sched_param param = { .sched_priority = 0 };
152 va_list args; 152 va_list args;
153 153
154 va_start(args, namefmt); 154 va_start(args, namefmt);
diff --git a/kernel/power/Makefile b/kernel/power/Makefile
index f9063c6b185d..b75597235d85 100644
--- a/kernel/power/Makefile
+++ b/kernel/power/Makefile
@@ -1,7 +1,4 @@
1 1ccflags-$(CONFIG_PM_DEBUG) := -DDEBUG
2ifeq ($(CONFIG_PM_DEBUG),y)
3EXTRA_CFLAGS += -DDEBUG
4endif
5 2
6obj-$(CONFIG_PM) += main.o 3obj-$(CONFIG_PM) += main.o
7obj-$(CONFIG_PM_SLEEP) += console.o 4obj-$(CONFIG_PM_SLEEP) += console.o
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index 048d0b514831..870f72bc72ae 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -62,7 +62,7 @@ void hibernation_set_ops(struct platform_hibernation_ops *ops)
62{ 62{
63 if (ops && !(ops->begin && ops->end && ops->pre_snapshot 63 if (ops && !(ops->begin && ops->end && ops->pre_snapshot
64 && ops->prepare && ops->finish && ops->enter && ops->pre_restore 64 && ops->prepare && ops->finish && ops->enter && ops->pre_restore
65 && ops->restore_cleanup)) { 65 && ops->restore_cleanup && ops->leave)) {
66 WARN_ON(1); 66 WARN_ON(1);
67 return; 67 return;
68 } 68 }
@@ -278,7 +278,7 @@ static int create_image(int platform_mode)
278 goto Enable_irqs; 278 goto Enable_irqs;
279 } 279 }
280 280
281 if (hibernation_test(TEST_CORE) || !pm_check_wakeup_events()) 281 if (hibernation_test(TEST_CORE) || pm_wakeup_pending())
282 goto Power_up; 282 goto Power_up;
283 283
284 in_suspend = 1; 284 in_suspend = 1;
@@ -516,7 +516,7 @@ int hibernation_platform_enter(void)
516 516
517 local_irq_disable(); 517 local_irq_disable();
518 sysdev_suspend(PMSG_HIBERNATE); 518 sysdev_suspend(PMSG_HIBERNATE);
519 if (!pm_check_wakeup_events()) { 519 if (pm_wakeup_pending()) {
520 error = -EAGAIN; 520 error = -EAGAIN;
521 goto Power_up; 521 goto Power_up;
522 } 522 }
@@ -647,6 +647,7 @@ int hibernate(void)
647 swsusp_free(); 647 swsusp_free();
648 if (!error) 648 if (!error)
649 power_down(); 649 power_down();
650 in_suspend = 0;
650 pm_restore_gfp_mask(); 651 pm_restore_gfp_mask();
651 } else { 652 } else {
652 pr_debug("PM: Image restored successfully.\n"); 653 pr_debug("PM: Image restored successfully.\n");
diff --git a/kernel/power/process.c b/kernel/power/process.c
index e50b4c1b2a0f..d6d2a10320e0 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -64,6 +64,12 @@ static int try_to_freeze_tasks(bool sig_only)
64 * perturb a task in TASK_STOPPED or TASK_TRACED. 64 * perturb a task in TASK_STOPPED or TASK_TRACED.
65 * It is "frozen enough". If the task does wake 65 * It is "frozen enough". If the task does wake
66 * up, it will immediately call try_to_freeze. 66 * up, it will immediately call try_to_freeze.
67 *
68 * Because freeze_task() goes through p's
69 * scheduler lock after setting TIF_FREEZE, it's
70 * guaranteed that either we see TASK_RUNNING or
71 * try_to_stop() after schedule() in ptrace/signal
72 * stop sees TIF_FREEZE.
67 */ 73 */
68 if (!task_is_stopped_or_traced(p) && 74 if (!task_is_stopped_or_traced(p) &&
69 !freezer_should_skip(p)) 75 !freezer_should_skip(p))
@@ -79,7 +85,7 @@ static int try_to_freeze_tasks(bool sig_only)
79 if (!todo || time_after(jiffies, end_time)) 85 if (!todo || time_after(jiffies, end_time))
80 break; 86 break;
81 87
82 if (!pm_check_wakeup_events()) { 88 if (pm_wakeup_pending()) {
83 wakeup = true; 89 wakeup = true;
84 break; 90 break;
85 } 91 }
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 031d5e3a6197..8850df68794d 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -164,7 +164,7 @@ static int suspend_enter(suspend_state_t state)
164 164
165 error = sysdev_suspend(PMSG_SUSPEND); 165 error = sysdev_suspend(PMSG_SUSPEND);
166 if (!error) { 166 if (!error) {
167 if (!suspend_test(TEST_CORE) && pm_check_wakeup_events()) { 167 if (!(suspend_test(TEST_CORE) || pm_wakeup_pending())) {
168 error = suspend_ops->enter(state); 168 error = suspend_ops->enter(state);
169 events_check_enabled = false; 169 events_check_enabled = false;
170 } 170 }
diff --git a/kernel/printk.c b/kernel/printk.c
index ab3ffc5b3b64..f64b8997fc76 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -43,12 +43,6 @@
43#include <asm/uaccess.h> 43#include <asm/uaccess.h>
44 44
45/* 45/*
46 * for_each_console() allows you to iterate on each console
47 */
48#define for_each_console(con) \
49 for (con = console_drivers; con != NULL; con = con->next)
50
51/*
52 * Architectures can override it: 46 * Architectures can override it:
53 */ 47 */
54void asmlinkage __attribute__((weak)) early_printk(const char *fmt, ...) 48void asmlinkage __attribute__((weak)) early_printk(const char *fmt, ...)
@@ -279,12 +273,12 @@ int do_syslog(int type, char __user *buf, int len, bool from_file)
279 * at open time. 273 * at open time.
280 */ 274 */
281 if (type == SYSLOG_ACTION_OPEN || !from_file) { 275 if (type == SYSLOG_ACTION_OPEN || !from_file) {
282 if (dmesg_restrict && !capable(CAP_SYS_ADMIN)) 276 if (dmesg_restrict && !capable(CAP_SYSLOG))
283 return -EPERM; 277 goto warn; /* switch to return -EPERM after 2.6.39 */
284 if ((type != SYSLOG_ACTION_READ_ALL && 278 if ((type != SYSLOG_ACTION_READ_ALL &&
285 type != SYSLOG_ACTION_SIZE_BUFFER) && 279 type != SYSLOG_ACTION_SIZE_BUFFER) &&
286 !capable(CAP_SYS_ADMIN)) 280 !capable(CAP_SYSLOG))
287 return -EPERM; 281 goto warn; /* switch to return -EPERM after 2.6.39 */
288 } 282 }
289 283
290 error = security_syslog(type); 284 error = security_syslog(type);
@@ -428,6 +422,12 @@ int do_syslog(int type, char __user *buf, int len, bool from_file)
428 } 422 }
429out: 423out:
430 return error; 424 return error;
425warn:
426 /* remove after 2.6.39 */
427 if (capable(CAP_SYS_ADMIN))
428 WARN_ONCE(1, "Attempt to access syslog with CAP_SYS_ADMIN "
429 "but no CAP_SYSLOG (deprecated and denied).\n");
430 return -EPERM;
431} 431}
432 432
433SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len) 433SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
@@ -1359,6 +1359,7 @@ void register_console(struct console *newcon)
1359 spin_unlock_irqrestore(&logbuf_lock, flags); 1359 spin_unlock_irqrestore(&logbuf_lock, flags);
1360 } 1360 }
1361 release_console_sem(); 1361 release_console_sem();
1362 console_sysfs_notify();
1362 1363
1363 /* 1364 /*
1364 * By unregistering the bootconsoles after we enable the real console 1365 * By unregistering the bootconsoles after we enable the real console
@@ -1417,6 +1418,7 @@ int unregister_console(struct console *console)
1417 console_drivers->flags |= CON_CONSDEV; 1418 console_drivers->flags |= CON_CONSDEV;
1418 1419
1419 release_console_sem(); 1420 release_console_sem();
1421 console_sysfs_notify();
1420 return res; 1422 return res;
1421} 1423}
1422EXPORT_SYMBOL(unregister_console); 1424EXPORT_SYMBOL(unregister_console);
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index d0ddfea6579d..dd4aea806f8e 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -364,8 +364,8 @@ void rcu_irq_exit(void)
364 WARN_ON_ONCE(rdtp->dynticks & 0x1); 364 WARN_ON_ONCE(rdtp->dynticks & 0x1);
365 365
366 /* If the interrupt queued a callback, get out of dyntick mode. */ 366 /* If the interrupt queued a callback, get out of dyntick mode. */
367 if (__get_cpu_var(rcu_sched_data).nxtlist || 367 if (__this_cpu_read(rcu_sched_data.nxtlist) ||
368 __get_cpu_var(rcu_bh_data).nxtlist) 368 __this_cpu_read(rcu_bh_data.nxtlist))
369 set_need_resched(); 369 set_need_resched();
370} 370}
371 371
diff --git a/kernel/sched.c b/kernel/sched.c
index 04949089e760..a0eb0941fa84 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -278,14 +278,12 @@ struct task_group {
278#endif 278#endif
279}; 279};
280 280
281#define root_task_group init_task_group
282
283/* task_group_lock serializes the addition/removal of task groups */ 281/* task_group_lock serializes the addition/removal of task groups */
284static DEFINE_SPINLOCK(task_group_lock); 282static DEFINE_SPINLOCK(task_group_lock);
285 283
286#ifdef CONFIG_FAIR_GROUP_SCHED 284#ifdef CONFIG_FAIR_GROUP_SCHED
287 285
288# define INIT_TASK_GROUP_LOAD NICE_0_LOAD 286# define ROOT_TASK_GROUP_LOAD NICE_0_LOAD
289 287
290/* 288/*
291 * A weight of 0 or 1 can cause arithmetics problems. 289 * A weight of 0 or 1 can cause arithmetics problems.
@@ -298,13 +296,13 @@ static DEFINE_SPINLOCK(task_group_lock);
298#define MIN_SHARES 2 296#define MIN_SHARES 2
299#define MAX_SHARES (1UL << 18) 297#define MAX_SHARES (1UL << 18)
300 298
301static int init_task_group_load = INIT_TASK_GROUP_LOAD; 299static int root_task_group_load = ROOT_TASK_GROUP_LOAD;
302#endif 300#endif
303 301
304/* Default task group. 302/* Default task group.
305 * Every task in system belong to this group at bootup. 303 * Every task in system belong to this group at bootup.
306 */ 304 */
307struct task_group init_task_group; 305struct task_group root_task_group;
308 306
309#endif /* CONFIG_CGROUP_SCHED */ 307#endif /* CONFIG_CGROUP_SCHED */
310 308
@@ -743,7 +741,7 @@ sched_feat_write(struct file *filp, const char __user *ubuf,
743 buf[cnt] = 0; 741 buf[cnt] = 0;
744 cmp = strstrip(buf); 742 cmp = strstrip(buf);
745 743
746 if (strncmp(buf, "NO_", 3) == 0) { 744 if (strncmp(cmp, "NO_", 3) == 0) {
747 neg = 1; 745 neg = 1;
748 cmp += 3; 746 cmp += 3;
749 } 747 }
@@ -7848,7 +7846,7 @@ static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
7848 cfs_rq->tg = tg; 7846 cfs_rq->tg = tg;
7849 7847
7850 tg->se[cpu] = se; 7848 tg->se[cpu] = se;
7851 /* se could be NULL for init_task_group */ 7849 /* se could be NULL for root_task_group */
7852 if (!se) 7850 if (!se)
7853 return; 7851 return;
7854 7852
@@ -7908,18 +7906,18 @@ void __init sched_init(void)
7908 ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT); 7906 ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
7909 7907
7910#ifdef CONFIG_FAIR_GROUP_SCHED 7908#ifdef CONFIG_FAIR_GROUP_SCHED
7911 init_task_group.se = (struct sched_entity **)ptr; 7909 root_task_group.se = (struct sched_entity **)ptr;
7912 ptr += nr_cpu_ids * sizeof(void **); 7910 ptr += nr_cpu_ids * sizeof(void **);
7913 7911
7914 init_task_group.cfs_rq = (struct cfs_rq **)ptr; 7912 root_task_group.cfs_rq = (struct cfs_rq **)ptr;
7915 ptr += nr_cpu_ids * sizeof(void **); 7913 ptr += nr_cpu_ids * sizeof(void **);
7916 7914
7917#endif /* CONFIG_FAIR_GROUP_SCHED */ 7915#endif /* CONFIG_FAIR_GROUP_SCHED */
7918#ifdef CONFIG_RT_GROUP_SCHED 7916#ifdef CONFIG_RT_GROUP_SCHED
7919 init_task_group.rt_se = (struct sched_rt_entity **)ptr; 7917 root_task_group.rt_se = (struct sched_rt_entity **)ptr;
7920 ptr += nr_cpu_ids * sizeof(void **); 7918 ptr += nr_cpu_ids * sizeof(void **);
7921 7919
7922 init_task_group.rt_rq = (struct rt_rq **)ptr; 7920 root_task_group.rt_rq = (struct rt_rq **)ptr;
7923 ptr += nr_cpu_ids * sizeof(void **); 7921 ptr += nr_cpu_ids * sizeof(void **);
7924 7922
7925#endif /* CONFIG_RT_GROUP_SCHED */ 7923#endif /* CONFIG_RT_GROUP_SCHED */
@@ -7939,13 +7937,13 @@ void __init sched_init(void)
7939 global_rt_period(), global_rt_runtime()); 7937 global_rt_period(), global_rt_runtime());
7940 7938
7941#ifdef CONFIG_RT_GROUP_SCHED 7939#ifdef CONFIG_RT_GROUP_SCHED
7942 init_rt_bandwidth(&init_task_group.rt_bandwidth, 7940 init_rt_bandwidth(&root_task_group.rt_bandwidth,
7943 global_rt_period(), global_rt_runtime()); 7941 global_rt_period(), global_rt_runtime());
7944#endif /* CONFIG_RT_GROUP_SCHED */ 7942#endif /* CONFIG_RT_GROUP_SCHED */
7945 7943
7946#ifdef CONFIG_CGROUP_SCHED 7944#ifdef CONFIG_CGROUP_SCHED
7947 list_add(&init_task_group.list, &task_groups); 7945 list_add(&root_task_group.list, &task_groups);
7948 INIT_LIST_HEAD(&init_task_group.children); 7946 INIT_LIST_HEAD(&root_task_group.children);
7949 autogroup_init(&init_task); 7947 autogroup_init(&init_task);
7950#endif /* CONFIG_CGROUP_SCHED */ 7948#endif /* CONFIG_CGROUP_SCHED */
7951 7949
@@ -7960,34 +7958,34 @@ void __init sched_init(void)
7960 init_cfs_rq(&rq->cfs, rq); 7958 init_cfs_rq(&rq->cfs, rq);
7961 init_rt_rq(&rq->rt, rq); 7959 init_rt_rq(&rq->rt, rq);
7962#ifdef CONFIG_FAIR_GROUP_SCHED 7960#ifdef CONFIG_FAIR_GROUP_SCHED
7963 init_task_group.shares = init_task_group_load; 7961 root_task_group.shares = root_task_group_load;
7964 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); 7962 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
7965 /* 7963 /*
7966 * How much cpu bandwidth does init_task_group get? 7964 * How much cpu bandwidth does root_task_group get?
7967 * 7965 *
7968 * In case of task-groups formed thr' the cgroup filesystem, it 7966 * In case of task-groups formed thr' the cgroup filesystem, it
7969 * gets 100% of the cpu resources in the system. This overall 7967 * gets 100% of the cpu resources in the system. This overall
7970 * system cpu resource is divided among the tasks of 7968 * system cpu resource is divided among the tasks of
7971 * init_task_group and its child task-groups in a fair manner, 7969 * root_task_group and its child task-groups in a fair manner,
7972 * based on each entity's (task or task-group's) weight 7970 * based on each entity's (task or task-group's) weight
7973 * (se->load.weight). 7971 * (se->load.weight).
7974 * 7972 *
7975 * In other words, if init_task_group has 10 tasks of weight 7973 * In other words, if root_task_group has 10 tasks of weight
7976 * 1024) and two child groups A0 and A1 (of weight 1024 each), 7974 * 1024) and two child groups A0 and A1 (of weight 1024 each),
7977 * then A0's share of the cpu resource is: 7975 * then A0's share of the cpu resource is:
7978 * 7976 *
7979 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33% 7977 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
7980 * 7978 *
7981 * We achieve this by letting init_task_group's tasks sit 7979 * We achieve this by letting root_task_group's tasks sit
7982 * directly in rq->cfs (i.e init_task_group->se[] = NULL). 7980 * directly in rq->cfs (i.e root_task_group->se[] = NULL).
7983 */ 7981 */
7984 init_tg_cfs_entry(&init_task_group, &rq->cfs, NULL, i, NULL); 7982 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
7985#endif /* CONFIG_FAIR_GROUP_SCHED */ 7983#endif /* CONFIG_FAIR_GROUP_SCHED */
7986 7984
7987 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime; 7985 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
7988#ifdef CONFIG_RT_GROUP_SCHED 7986#ifdef CONFIG_RT_GROUP_SCHED
7989 INIT_LIST_HEAD(&rq->leaf_rt_rq_list); 7987 INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
7990 init_tg_rt_entry(&init_task_group, &rq->rt, NULL, i, NULL); 7988 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
7991#endif 7989#endif
7992 7990
7993 for (j = 0; j < CPU_LOAD_IDX_MAX; j++) 7991 for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
@@ -8379,6 +8377,7 @@ static void free_sched_group(struct task_group *tg)
8379{ 8377{
8380 free_fair_sched_group(tg); 8378 free_fair_sched_group(tg);
8381 free_rt_sched_group(tg); 8379 free_rt_sched_group(tg);
8380 autogroup_free(tg);
8382 kfree(tg); 8381 kfree(tg);
8383} 8382}
8384 8383
@@ -8812,7 +8811,7 @@ cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp)
8812 8811
8813 if (!cgrp->parent) { 8812 if (!cgrp->parent) {
8814 /* This is early initialization for the top cgroup */ 8813 /* This is early initialization for the top cgroup */
8815 return &init_task_group.css; 8814 return &root_task_group.css;
8816 } 8815 }
8817 8816
8818 parent = cgroup_tg(cgrp->parent); 8817 parent = cgroup_tg(cgrp->parent);
diff --git a/kernel/sched_autogroup.c b/kernel/sched_autogroup.c
index c80fedcd476b..32a723b8f84c 100644
--- a/kernel/sched_autogroup.c
+++ b/kernel/sched_autogroup.c
@@ -9,10 +9,10 @@ unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
9static struct autogroup autogroup_default; 9static struct autogroup autogroup_default;
10static atomic_t autogroup_seq_nr; 10static atomic_t autogroup_seq_nr;
11 11
12static void autogroup_init(struct task_struct *init_task) 12static void __init autogroup_init(struct task_struct *init_task)
13{ 13{
14 autogroup_default.tg = &init_task_group; 14 autogroup_default.tg = &root_task_group;
15 init_task_group.autogroup = &autogroup_default; 15 root_task_group.autogroup = &autogroup_default;
16 kref_init(&autogroup_default.kref); 16 kref_init(&autogroup_default.kref);
17 init_rwsem(&autogroup_default.lock); 17 init_rwsem(&autogroup_default.lock);
18 init_task->signal->autogroup = &autogroup_default; 18 init_task->signal->autogroup = &autogroup_default;
@@ -63,7 +63,7 @@ static inline struct autogroup *autogroup_create(void)
63 if (!ag) 63 if (!ag)
64 goto out_fail; 64 goto out_fail;
65 65
66 tg = sched_create_group(&init_task_group); 66 tg = sched_create_group(&root_task_group);
67 67
68 if (IS_ERR(tg)) 68 if (IS_ERR(tg))
69 goto out_free; 69 goto out_free;
diff --git a/kernel/softirq.c b/kernel/softirq.c
index d4d918a91881..0823778f87fc 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -70,7 +70,7 @@ char *softirq_to_name[NR_SOFTIRQS] = {
70static void wakeup_softirqd(void) 70static void wakeup_softirqd(void)
71{ 71{
72 /* Interrupts are disabled: no need to stop preemption */ 72 /* Interrupts are disabled: no need to stop preemption */
73 struct task_struct *tsk = __get_cpu_var(ksoftirqd); 73 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
74 74
75 if (tsk && tsk->state != TASK_RUNNING) 75 if (tsk && tsk->state != TASK_RUNNING)
76 wake_up_process(tsk); 76 wake_up_process(tsk);
@@ -388,8 +388,8 @@ void __tasklet_schedule(struct tasklet_struct *t)
388 388
389 local_irq_save(flags); 389 local_irq_save(flags);
390 t->next = NULL; 390 t->next = NULL;
391 *__get_cpu_var(tasklet_vec).tail = t; 391 *__this_cpu_read(tasklet_vec.tail) = t;
392 __get_cpu_var(tasklet_vec).tail = &(t->next); 392 __this_cpu_write(tasklet_vec.tail, &(t->next));
393 raise_softirq_irqoff(TASKLET_SOFTIRQ); 393 raise_softirq_irqoff(TASKLET_SOFTIRQ);
394 local_irq_restore(flags); 394 local_irq_restore(flags);
395} 395}
@@ -402,8 +402,8 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
402 402
403 local_irq_save(flags); 403 local_irq_save(flags);
404 t->next = NULL; 404 t->next = NULL;
405 *__get_cpu_var(tasklet_hi_vec).tail = t; 405 *__this_cpu_read(tasklet_hi_vec.tail) = t;
406 __get_cpu_var(tasklet_hi_vec).tail = &(t->next); 406 __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
407 raise_softirq_irqoff(HI_SOFTIRQ); 407 raise_softirq_irqoff(HI_SOFTIRQ);
408 local_irq_restore(flags); 408 local_irq_restore(flags);
409} 409}
@@ -414,8 +414,8 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
414{ 414{
415 BUG_ON(!irqs_disabled()); 415 BUG_ON(!irqs_disabled());
416 416
417 t->next = __get_cpu_var(tasklet_hi_vec).head; 417 t->next = __this_cpu_read(tasklet_hi_vec.head);
418 __get_cpu_var(tasklet_hi_vec).head = t; 418 __this_cpu_write(tasklet_hi_vec.head, t);
419 __raise_softirq_irqoff(HI_SOFTIRQ); 419 __raise_softirq_irqoff(HI_SOFTIRQ);
420} 420}
421 421
@@ -426,9 +426,9 @@ static void tasklet_action(struct softirq_action *a)
426 struct tasklet_struct *list; 426 struct tasklet_struct *list;
427 427
428 local_irq_disable(); 428 local_irq_disable();
429 list = __get_cpu_var(tasklet_vec).head; 429 list = __this_cpu_read(tasklet_vec.head);
430 __get_cpu_var(tasklet_vec).head = NULL; 430 __this_cpu_write(tasklet_vec.head, NULL);
431 __get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head; 431 __this_cpu_write(tasklet_vec.tail, &__get_cpu_var(tasklet_vec).head);
432 local_irq_enable(); 432 local_irq_enable();
433 433
434 while (list) { 434 while (list) {
@@ -449,8 +449,8 @@ static void tasklet_action(struct softirq_action *a)
449 449
450 local_irq_disable(); 450 local_irq_disable();
451 t->next = NULL; 451 t->next = NULL;
452 *__get_cpu_var(tasklet_vec).tail = t; 452 *__this_cpu_read(tasklet_vec.tail) = t;
453 __get_cpu_var(tasklet_vec).tail = &(t->next); 453 __this_cpu_write(tasklet_vec.tail, &(t->next));
454 __raise_softirq_irqoff(TASKLET_SOFTIRQ); 454 __raise_softirq_irqoff(TASKLET_SOFTIRQ);
455 local_irq_enable(); 455 local_irq_enable();
456 } 456 }
@@ -461,9 +461,9 @@ static void tasklet_hi_action(struct softirq_action *a)
461 struct tasklet_struct *list; 461 struct tasklet_struct *list;
462 462
463 local_irq_disable(); 463 local_irq_disable();
464 list = __get_cpu_var(tasklet_hi_vec).head; 464 list = __this_cpu_read(tasklet_hi_vec.head);
465 __get_cpu_var(tasklet_hi_vec).head = NULL; 465 __this_cpu_write(tasklet_hi_vec.head, NULL);
466 __get_cpu_var(tasklet_hi_vec).tail = &__get_cpu_var(tasklet_hi_vec).head; 466 __this_cpu_write(tasklet_hi_vec.tail, &__get_cpu_var(tasklet_hi_vec).head);
467 local_irq_enable(); 467 local_irq_enable();
468 468
469 while (list) { 469 while (list) {
@@ -484,8 +484,8 @@ static void tasklet_hi_action(struct softirq_action *a)
484 484
485 local_irq_disable(); 485 local_irq_disable();
486 t->next = NULL; 486 t->next = NULL;
487 *__get_cpu_var(tasklet_hi_vec).tail = t; 487 *__this_cpu_read(tasklet_hi_vec.tail) = t;
488 __get_cpu_var(tasklet_hi_vec).tail = &(t->next); 488 __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
489 __raise_softirq_irqoff(HI_SOFTIRQ); 489 __raise_softirq_irqoff(HI_SOFTIRQ);
490 local_irq_enable(); 490 local_irq_enable();
491 } 491 }
@@ -802,16 +802,16 @@ static void takeover_tasklets(unsigned int cpu)
802 802
803 /* Find end, append list for that CPU. */ 803 /* Find end, append list for that CPU. */
804 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) { 804 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
805 *(__get_cpu_var(tasklet_vec).tail) = per_cpu(tasklet_vec, cpu).head; 805 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
806 __get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).tail; 806 this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
807 per_cpu(tasklet_vec, cpu).head = NULL; 807 per_cpu(tasklet_vec, cpu).head = NULL;
808 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head; 808 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
809 } 809 }
810 raise_softirq_irqoff(TASKLET_SOFTIRQ); 810 raise_softirq_irqoff(TASKLET_SOFTIRQ);
811 811
812 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) { 812 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
813 *__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).head; 813 *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
814 __get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).tail; 814 __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
815 per_cpu(tasklet_hi_vec, cpu).head = NULL; 815 per_cpu(tasklet_hi_vec, cpu).head = NULL;
816 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head; 816 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
817 } 817 }
@@ -853,7 +853,7 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
853 cpumask_any(cpu_online_mask)); 853 cpumask_any(cpu_online_mask));
854 case CPU_DEAD: 854 case CPU_DEAD:
855 case CPU_DEAD_FROZEN: { 855 case CPU_DEAD_FROZEN: {
856 static struct sched_param param = { 856 static const struct sched_param param = {
857 .sched_priority = MAX_RT_PRIO-1 857 .sched_priority = MAX_RT_PRIO-1
858 }; 858 };
859 859
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index 3308fd7f1b52..69691eb4b715 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -89,8 +89,7 @@ static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp,
89 return -ENOMEM; 89 return -ENOMEM;
90 90
91 if (!info) { 91 if (!info) {
92 int seq = get_cpu_var(taskstats_seqnum)++; 92 int seq = this_cpu_inc_return(taskstats_seqnum) - 1;
93 put_cpu_var(taskstats_seqnum);
94 93
95 reply = genlmsg_put(skb, 0, seq, &family, 0, cmd); 94 reply = genlmsg_put(skb, 0, seq, &family, 0, cmd);
96 } else 95 } else
@@ -612,7 +611,7 @@ void taskstats_exit(struct task_struct *tsk, int group_dead)
612 fill_tgid_exit(tsk); 611 fill_tgid_exit(tsk);
613 } 612 }
614 613
615 listeners = &__raw_get_cpu_var(listener_array); 614 listeners = __this_cpu_ptr(&listener_array);
616 if (list_empty(&listeners->list)) 615 if (list_empty(&listeners->list))
617 return; 616 return;
618 617
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index c18d7efa1b4b..df140cd3ea47 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -152,6 +152,7 @@ clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec)
152 */ 152 */
153 for (sft = 32; sft > 0; sft--) { 153 for (sft = 32; sft > 0; sft--) {
154 tmp = (u64) to << sft; 154 tmp = (u64) to << sft;
155 tmp += from / 2;
155 do_div(tmp, from); 156 do_div(tmp, from);
156 if ((tmp >> sftacc) == 0) 157 if ((tmp >> sftacc) == 0)
157 break; 158 break;
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index b6b898d2eeef..051bc80a0c43 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -49,7 +49,7 @@ struct tick_device *tick_get_device(int cpu)
49 */ 49 */
50int tick_is_oneshot_available(void) 50int tick_is_oneshot_available(void)
51{ 51{
52 struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; 52 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
53 53
54 return dev && (dev->features & CLOCK_EVT_FEAT_ONESHOT); 54 return dev && (dev->features & CLOCK_EVT_FEAT_ONESHOT);
55} 55}
diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c
index aada0e52680a..5cbc101f908b 100644
--- a/kernel/time/tick-oneshot.c
+++ b/kernel/time/tick-oneshot.c
@@ -95,7 +95,7 @@ int tick_dev_program_event(struct clock_event_device *dev, ktime_t expires,
95 */ 95 */
96int tick_program_event(ktime_t expires, int force) 96int tick_program_event(ktime_t expires, int force)
97{ 97{
98 struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; 98 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
99 99
100 return tick_dev_program_event(dev, expires, force); 100 return tick_dev_program_event(dev, expires, force);
101} 101}
@@ -167,7 +167,7 @@ int tick_oneshot_mode_active(void)
167 int ret; 167 int ret;
168 168
169 local_irq_save(flags); 169 local_irq_save(flags);
170 ret = __get_cpu_var(tick_cpu_device).mode == TICKDEV_MODE_ONESHOT; 170 ret = __this_cpu_read(tick_cpu_device.mode) == TICKDEV_MODE_ONESHOT;
171 local_irq_restore(flags); 171 local_irq_restore(flags);
172 172
173 return ret; 173 return ret;
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 562c56e048fd..659732eba07c 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -558,7 +558,7 @@ trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
558static int trace_wakeup_test_thread(void *data) 558static int trace_wakeup_test_thread(void *data)
559{ 559{
560 /* Make this a RT thread, doesn't need to be too high */ 560 /* Make this a RT thread, doesn't need to be too high */
561 static struct sched_param param = { .sched_priority = 5 }; 561 static const struct sched_param param = { .sched_priority = 5 };
562 struct completion *x = data; 562 struct completion *x = data;
563 563
564 sched_setscheduler(current, SCHED_FIFO, &param); 564 sched_setscheduler(current, SCHED_FIFO, &param);
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 6e7b575ac33c..d7ebdf4cea98 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -118,12 +118,12 @@ static void __touch_watchdog(void)
118{ 118{
119 int this_cpu = smp_processor_id(); 119 int this_cpu = smp_processor_id();
120 120
121 __get_cpu_var(watchdog_touch_ts) = get_timestamp(this_cpu); 121 __this_cpu_write(watchdog_touch_ts, get_timestamp(this_cpu));
122} 122}
123 123
124void touch_softlockup_watchdog(void) 124void touch_softlockup_watchdog(void)
125{ 125{
126 __raw_get_cpu_var(watchdog_touch_ts) = 0; 126 __this_cpu_write(watchdog_touch_ts, 0);
127} 127}
128EXPORT_SYMBOL(touch_softlockup_watchdog); 128EXPORT_SYMBOL(touch_softlockup_watchdog);
129 129
@@ -167,12 +167,12 @@ void touch_softlockup_watchdog_sync(void)
167/* watchdog detector functions */ 167/* watchdog detector functions */
168static int is_hardlockup(void) 168static int is_hardlockup(void)
169{ 169{
170 unsigned long hrint = __get_cpu_var(hrtimer_interrupts); 170 unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
171 171
172 if (__get_cpu_var(hrtimer_interrupts_saved) == hrint) 172 if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
173 return 1; 173 return 1;
174 174
175 __get_cpu_var(hrtimer_interrupts_saved) = hrint; 175 __this_cpu_write(hrtimer_interrupts_saved, hrint);
176 return 0; 176 return 0;
177} 177}
178#endif 178#endif
@@ -205,8 +205,8 @@ static void watchdog_overflow_callback(struct perf_event *event, int nmi,
205 /* Ensure the watchdog never gets throttled */ 205 /* Ensure the watchdog never gets throttled */
206 event->hw.interrupts = 0; 206 event->hw.interrupts = 0;
207 207
208 if (__get_cpu_var(watchdog_nmi_touch) == true) { 208 if (__this_cpu_read(watchdog_nmi_touch) == true) {
209 __get_cpu_var(watchdog_nmi_touch) = false; 209 __this_cpu_write(watchdog_nmi_touch, false);
210 return; 210 return;
211 } 211 }
212 212
@@ -220,7 +220,7 @@ static void watchdog_overflow_callback(struct perf_event *event, int nmi,
220 int this_cpu = smp_processor_id(); 220 int this_cpu = smp_processor_id();
221 221
222 /* only print hardlockups once */ 222 /* only print hardlockups once */
223 if (__get_cpu_var(hard_watchdog_warn) == true) 223 if (__this_cpu_read(hard_watchdog_warn) == true)
224 return; 224 return;
225 225
226 if (hardlockup_panic) 226 if (hardlockup_panic)
@@ -228,16 +228,16 @@ static void watchdog_overflow_callback(struct perf_event *event, int nmi,
228 else 228 else
229 WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu); 229 WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu);
230 230
231 __get_cpu_var(hard_watchdog_warn) = true; 231 __this_cpu_write(hard_watchdog_warn, true);
232 return; 232 return;
233 } 233 }
234 234
235 __get_cpu_var(hard_watchdog_warn) = false; 235 __this_cpu_write(hard_watchdog_warn, false);
236 return; 236 return;
237} 237}
238static void watchdog_interrupt_count(void) 238static void watchdog_interrupt_count(void)
239{ 239{
240 __get_cpu_var(hrtimer_interrupts)++; 240 __this_cpu_inc(hrtimer_interrupts);
241} 241}
242#else 242#else
243static inline void watchdog_interrupt_count(void) { return; } 243static inline void watchdog_interrupt_count(void) { return; }
@@ -246,7 +246,7 @@ static inline void watchdog_interrupt_count(void) { return; }
246/* watchdog kicker functions */ 246/* watchdog kicker functions */
247static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) 247static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
248{ 248{
249 unsigned long touch_ts = __get_cpu_var(watchdog_touch_ts); 249 unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
250 struct pt_regs *regs = get_irq_regs(); 250 struct pt_regs *regs = get_irq_regs();
251 int duration; 251 int duration;
252 252
@@ -254,18 +254,18 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
254 watchdog_interrupt_count(); 254 watchdog_interrupt_count();
255 255
256 /* kick the softlockup detector */ 256 /* kick the softlockup detector */
257 wake_up_process(__get_cpu_var(softlockup_watchdog)); 257 wake_up_process(__this_cpu_read(softlockup_watchdog));
258 258
259 /* .. and repeat */ 259 /* .. and repeat */
260 hrtimer_forward_now(hrtimer, ns_to_ktime(get_sample_period())); 260 hrtimer_forward_now(hrtimer, ns_to_ktime(get_sample_period()));
261 261
262 if (touch_ts == 0) { 262 if (touch_ts == 0) {
263 if (unlikely(__get_cpu_var(softlockup_touch_sync))) { 263 if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
264 /* 264 /*
265 * If the time stamp was touched atomically 265 * If the time stamp was touched atomically
266 * make sure the scheduler tick is up to date. 266 * make sure the scheduler tick is up to date.
267 */ 267 */
268 __get_cpu_var(softlockup_touch_sync) = false; 268 __this_cpu_write(softlockup_touch_sync, false);
269 sched_clock_tick(); 269 sched_clock_tick();
270 } 270 }
271 __touch_watchdog(); 271 __touch_watchdog();
@@ -281,7 +281,7 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
281 duration = is_softlockup(touch_ts); 281 duration = is_softlockup(touch_ts);
282 if (unlikely(duration)) { 282 if (unlikely(duration)) {
283 /* only warn once */ 283 /* only warn once */
284 if (__get_cpu_var(soft_watchdog_warn) == true) 284 if (__this_cpu_read(soft_watchdog_warn) == true)
285 return HRTIMER_RESTART; 285 return HRTIMER_RESTART;
286 286
287 printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n", 287 printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
@@ -296,9 +296,9 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
296 296
297 if (softlockup_panic) 297 if (softlockup_panic)
298 panic("softlockup: hung tasks"); 298 panic("softlockup: hung tasks");
299 __get_cpu_var(soft_watchdog_warn) = true; 299 __this_cpu_write(soft_watchdog_warn, true);
300 } else 300 } else
301 __get_cpu_var(soft_watchdog_warn) = false; 301 __this_cpu_write(soft_watchdog_warn, false);
302 302
303 return HRTIMER_RESTART; 303 return HRTIMER_RESTART;
304} 304}
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index e785b0f2aea5..8ee6ec82f88a 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -932,6 +932,38 @@ static void insert_work(struct cpu_workqueue_struct *cwq,
932 wake_up_worker(gcwq); 932 wake_up_worker(gcwq);
933} 933}
934 934
935/*
936 * Test whether @work is being queued from another work executing on the
937 * same workqueue. This is rather expensive and should only be used from
938 * cold paths.
939 */
940static bool is_chained_work(struct workqueue_struct *wq)
941{
942 unsigned long flags;
943 unsigned int cpu;
944
945 for_each_gcwq_cpu(cpu) {
946 struct global_cwq *gcwq = get_gcwq(cpu);
947 struct worker *worker;
948 struct hlist_node *pos;
949 int i;
950
951 spin_lock_irqsave(&gcwq->lock, flags);
952 for_each_busy_worker(worker, i, pos, gcwq) {
953 if (worker->task != current)
954 continue;
955 spin_unlock_irqrestore(&gcwq->lock, flags);
956 /*
957 * I'm @worker, no locking necessary. See if @work
958 * is headed to the same workqueue.
959 */
960 return worker->current_cwq->wq == wq;
961 }
962 spin_unlock_irqrestore(&gcwq->lock, flags);
963 }
964 return false;
965}
966
935static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, 967static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
936 struct work_struct *work) 968 struct work_struct *work)
937{ 969{
@@ -943,7 +975,9 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
943 975
944 debug_work_activate(work); 976 debug_work_activate(work);
945 977
946 if (WARN_ON_ONCE(wq->flags & WQ_DYING)) 978 /* if dying, only works from the same workqueue are allowed */
979 if (unlikely(wq->flags & WQ_DYING) &&
980 WARN_ON_ONCE(!is_chained_work(wq)))
947 return; 981 return;
948 982
949 /* determine gcwq to use */ 983 /* determine gcwq to use */
@@ -2936,11 +2970,35 @@ EXPORT_SYMBOL_GPL(__alloc_workqueue_key);
2936 */ 2970 */
2937void destroy_workqueue(struct workqueue_struct *wq) 2971void destroy_workqueue(struct workqueue_struct *wq)
2938{ 2972{
2973 unsigned int flush_cnt = 0;
2939 unsigned int cpu; 2974 unsigned int cpu;
2940 2975
2976 /*
2977 * Mark @wq dying and drain all pending works. Once WQ_DYING is
2978 * set, only chain queueing is allowed. IOW, only currently
2979 * pending or running work items on @wq can queue further work
2980 * items on it. @wq is flushed repeatedly until it becomes empty.
2981 * The number of flushing is detemined by the depth of chaining and
2982 * should be relatively short. Whine if it takes too long.
2983 */
2941 wq->flags |= WQ_DYING; 2984 wq->flags |= WQ_DYING;
2985reflush:
2942 flush_workqueue(wq); 2986 flush_workqueue(wq);
2943 2987
2988 for_each_cwq_cpu(cpu, wq) {
2989 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2990
2991 if (!cwq->nr_active && list_empty(&cwq->delayed_works))
2992 continue;
2993
2994 if (++flush_cnt == 10 ||
2995 (flush_cnt % 100 == 0 && flush_cnt <= 1000))
2996 printk(KERN_WARNING "workqueue %s: flush on "
2997 "destruction isn't complete after %u tries\n",
2998 wq->name, flush_cnt);
2999 goto reflush;
3000 }
3001
2944 /* 3002 /*
2945 * wq list is used to freeze wq, remove from list after 3003 * wq list is used to freeze wq, remove from list after
2946 * flushing is complete in case freeze races us. 3004 * flushing is complete in case freeze races us.