aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Kconfig.hz2
-rw-r--r--kernel/Makefile6
-rw-r--r--kernel/acct.c222
-rw-r--r--kernel/audit.c13
-rw-r--r--kernel/auditfilter.c10
-rw-r--r--kernel/auditsc.c14
-rw-r--r--kernel/capability.c338
-rw-r--r--kernel/cgroup.c331
-rw-r--r--kernel/cpu.c88
-rw-r--r--kernel/cpuset.c432
-rw-r--r--kernel/delayacct.c16
-rw-r--r--kernel/dma-coherent.c153
-rw-r--r--kernel/exec_domain.c3
-rw-r--r--kernel/exit.c92
-rw-r--r--kernel/fork.c133
-rw-r--r--kernel/irq/chip.c12
-rw-r--r--kernel/irq/manage.c111
-rw-r--r--kernel/kallsyms.c2
-rw-r--r--kernel/kexec.c104
-rw-r--r--kernel/kgdb.c94
-rw-r--r--kernel/kmod.c15
-rw-r--r--kernel/kprobes.c132
-rw-r--r--kernel/kthread.c6
-rw-r--r--kernel/lockdep.c295
-rw-r--r--kernel/lockdep_internals.h6
-rw-r--r--kernel/lockdep_proc.c37
-rw-r--r--kernel/marker.c37
-rw-r--r--kernel/module.c343
-rw-r--r--kernel/mutex.c1
-rw-r--r--kernel/ns_cgroup.c8
-rw-r--r--kernel/nsproxy.c8
-rw-r--r--kernel/panic.c22
-rw-r--r--kernel/pid.c10
-rw-r--r--kernel/pid_namespace.c10
-rw-r--r--kernel/pm_qos_params.c16
-rw-r--r--kernel/posix-timers.c21
-rw-r--r--kernel/power/Kconfig13
-rw-r--r--kernel/power/main.c201
-rw-r--r--kernel/power/power.h2
-rw-r--r--kernel/power/poweroff.c4
-rw-r--r--kernel/power/process.c2
-rw-r--r--kernel/power/snapshot.c88
-rw-r--r--kernel/printk.c23
-rw-r--r--kernel/profile.c4
-rw-r--r--kernel/ptrace.c2
-rw-r--r--kernel/rcuclassic.c6
-rw-r--r--kernel/rcupreempt.c10
-rw-r--r--kernel/relay.c182
-rw-r--r--kernel/res_counter.c48
-rw-r--r--kernel/resource.c2
-rw-r--r--kernel/rtmutex-tester.c7
-rw-r--r--kernel/sched.c446
-rw-r--r--kernel/sched_fair.c10
-rw-r--r--kernel/sched_rt.c91
-rw-r--r--kernel/semaphore.c4
-rw-r--r--kernel/signal.c179
-rw-r--r--kernel/smp.c58
-rw-r--r--kernel/softirq.c3
-rw-r--r--kernel/softlockup.c70
-rw-r--r--kernel/spinlock.c11
-rw-r--r--kernel/stop_machine.c287
-rw-r--r--kernel/sys.c35
-rw-r--r--kernel/sys_ni.c7
-rw-r--r--kernel/sysctl.c206
-rw-r--r--kernel/sysctl_check.c2
-rw-r--r--kernel/taskstats.c6
-rw-r--r--kernel/time/clocksource.c12
-rw-r--r--kernel/time/tick-broadcast.c3
-rw-r--r--kernel/time/tick-common.c14
-rw-r--r--kernel/time/tick-sched.c4
-rw-r--r--kernel/trace/ftrace.c6
-rw-r--r--kernel/trace/trace.c4
-rw-r--r--kernel/trace/trace_irqsoff.c8
-rw-r--r--kernel/trace/trace_sched_wakeup.c27
-rw-r--r--kernel/trace/trace_sysprof.c2
-rw-r--r--kernel/tsacct.c33
-rw-r--r--kernel/workqueue.c182
77 files changed, 3435 insertions, 2012 deletions
diff --git a/kernel/Kconfig.hz b/kernel/Kconfig.hz
index 526128a2e622..382dd5a8b2d7 100644
--- a/kernel/Kconfig.hz
+++ b/kernel/Kconfig.hz
@@ -55,4 +55,4 @@ config HZ
55 default 1000 if HZ_1000 55 default 1000 if HZ_1000
56 56
57config SCHED_HRTICK 57config SCHED_HRTICK
58 def_bool HIGH_RES_TIMERS && X86 58 def_bool HIGH_RES_TIMERS && USE_GENERIC_SMP_HELPERS
diff --git a/kernel/Makefile b/kernel/Makefile
index 985ddb7da4d0..4e1d7df7c3e2 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -2,7 +2,7 @@
2# Makefile for the linux kernel. 2# Makefile for the linux kernel.
3# 3#
4 4
5obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \ 5obj-y = sched.o fork.o exec_domain.o panic.o printk.o \
6 cpu.o exit.o itimer.o time.o softirq.o resource.o \ 6 cpu.o exit.o itimer.o time.o softirq.o resource.o \
7 sysctl.o capability.o ptrace.o timer.o user.o \ 7 sysctl.o capability.o ptrace.o timer.o user.o \
8 signal.o sys.o kmod.o workqueue.o pid.o \ 8 signal.o sys.o kmod.o workqueue.o pid.o \
@@ -11,6 +11,8 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \
11 hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ 11 hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
12 notifier.o ksysfs.o pm_qos_params.o sched_clock.o 12 notifier.o ksysfs.o pm_qos_params.o sched_clock.o
13 13
14CFLAGS_REMOVE_sched.o = -mno-spe
15
14ifdef CONFIG_FTRACE 16ifdef CONFIG_FTRACE
15# Do not trace debug files and internal ftrace files 17# Do not trace debug files and internal ftrace files
16CFLAGS_REMOVE_lockdep.o = -pg 18CFLAGS_REMOVE_lockdep.o = -pg
@@ -22,6 +24,7 @@ CFLAGS_REMOVE_sched_clock.o = -pg
22CFLAGS_REMOVE_sched.o = -mno-spe -pg 24CFLAGS_REMOVE_sched.o = -mno-spe -pg
23endif 25endif
24 26
27obj-$(CONFIG_PROFILING) += profile.o
25obj-$(CONFIG_SYSCTL_SYSCALL_CHECK) += sysctl_check.o 28obj-$(CONFIG_SYSCTL_SYSCALL_CHECK) += sysctl_check.o
26obj-$(CONFIG_STACKTRACE) += stacktrace.o 29obj-$(CONFIG_STACKTRACE) += stacktrace.o
27obj-y += time/ 30obj-y += time/
@@ -81,6 +84,7 @@ obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o
81obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o 84obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o
82obj-$(CONFIG_MARKERS) += marker.o 85obj-$(CONFIG_MARKERS) += marker.o
83obj-$(CONFIG_LATENCYTOP) += latencytop.o 86obj-$(CONFIG_LATENCYTOP) += latencytop.o
87obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
84obj-$(CONFIG_FTRACE) += trace/ 88obj-$(CONFIG_FTRACE) += trace/
85obj-$(CONFIG_TRACING) += trace/ 89obj-$(CONFIG_TRACING) += trace/
86obj-$(CONFIG_SMP) += sched_cpupri.o 90obj-$(CONFIG_SMP) += sched_cpupri.o
diff --git a/kernel/acct.c b/kernel/acct.c
index 91e1cfd734d2..dd68b9059418 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -75,37 +75,39 @@ int acct_parm[3] = {4, 2, 30};
75/* 75/*
76 * External references and all of the globals. 76 * External references and all of the globals.
77 */ 77 */
78static void do_acct_process(struct pid_namespace *ns, struct file *); 78static void do_acct_process(struct bsd_acct_struct *acct,
79 struct pid_namespace *ns, struct file *);
79 80
80/* 81/*
81 * This structure is used so that all the data protected by lock 82 * This structure is used so that all the data protected by lock
82 * can be placed in the same cache line as the lock. This primes 83 * can be placed in the same cache line as the lock. This primes
83 * the cache line to have the data after getting the lock. 84 * the cache line to have the data after getting the lock.
84 */ 85 */
85struct acct_glbs { 86struct bsd_acct_struct {
86 spinlock_t lock;
87 volatile int active; 87 volatile int active;
88 volatile int needcheck; 88 volatile int needcheck;
89 struct file *file; 89 struct file *file;
90 struct pid_namespace *ns; 90 struct pid_namespace *ns;
91 struct timer_list timer; 91 struct timer_list timer;
92 struct list_head list;
92}; 93};
93 94
94static struct acct_glbs acct_globals __cacheline_aligned = 95static DEFINE_SPINLOCK(acct_lock);
95 {__SPIN_LOCK_UNLOCKED(acct_globals.lock)}; 96static LIST_HEAD(acct_list);
96 97
97/* 98/*
98 * Called whenever the timer says to check the free space. 99 * Called whenever the timer says to check the free space.
99 */ 100 */
100static void acct_timeout(unsigned long unused) 101static void acct_timeout(unsigned long x)
101{ 102{
102 acct_globals.needcheck = 1; 103 struct bsd_acct_struct *acct = (struct bsd_acct_struct *)x;
104 acct->needcheck = 1;
103} 105}
104 106
105/* 107/*
106 * Check the amount of free space and suspend/resume accordingly. 108 * Check the amount of free space and suspend/resume accordingly.
107 */ 109 */
108static int check_free_space(struct file *file) 110static int check_free_space(struct bsd_acct_struct *acct, struct file *file)
109{ 111{
110 struct kstatfs sbuf; 112 struct kstatfs sbuf;
111 int res; 113 int res;
@@ -113,11 +115,11 @@ static int check_free_space(struct file *file)
113 sector_t resume; 115 sector_t resume;
114 sector_t suspend; 116 sector_t suspend;
115 117
116 spin_lock(&acct_globals.lock); 118 spin_lock(&acct_lock);
117 res = acct_globals.active; 119 res = acct->active;
118 if (!file || !acct_globals.needcheck) 120 if (!file || !acct->needcheck)
119 goto out; 121 goto out;
120 spin_unlock(&acct_globals.lock); 122 spin_unlock(&acct_lock);
121 123
122 /* May block */ 124 /* May block */
123 if (vfs_statfs(file->f_path.dentry, &sbuf)) 125 if (vfs_statfs(file->f_path.dentry, &sbuf))
@@ -136,35 +138,35 @@ static int check_free_space(struct file *file)
136 act = 0; 138 act = 0;
137 139
138 /* 140 /*
139 * If some joker switched acct_globals.file under us we'ld better be 141 * If some joker switched acct->file under us we'ld better be
140 * silent and _not_ touch anything. 142 * silent and _not_ touch anything.
141 */ 143 */
142 spin_lock(&acct_globals.lock); 144 spin_lock(&acct_lock);
143 if (file != acct_globals.file) { 145 if (file != acct->file) {
144 if (act) 146 if (act)
145 res = act>0; 147 res = act>0;
146 goto out; 148 goto out;
147 } 149 }
148 150
149 if (acct_globals.active) { 151 if (acct->active) {
150 if (act < 0) { 152 if (act < 0) {
151 acct_globals.active = 0; 153 acct->active = 0;
152 printk(KERN_INFO "Process accounting paused\n"); 154 printk(KERN_INFO "Process accounting paused\n");
153 } 155 }
154 } else { 156 } else {
155 if (act > 0) { 157 if (act > 0) {
156 acct_globals.active = 1; 158 acct->active = 1;
157 printk(KERN_INFO "Process accounting resumed\n"); 159 printk(KERN_INFO "Process accounting resumed\n");
158 } 160 }
159 } 161 }
160 162
161 del_timer(&acct_globals.timer); 163 del_timer(&acct->timer);
162 acct_globals.needcheck = 0; 164 acct->needcheck = 0;
163 acct_globals.timer.expires = jiffies + ACCT_TIMEOUT*HZ; 165 acct->timer.expires = jiffies + ACCT_TIMEOUT*HZ;
164 add_timer(&acct_globals.timer); 166 add_timer(&acct->timer);
165 res = acct_globals.active; 167 res = acct->active;
166out: 168out:
167 spin_unlock(&acct_globals.lock); 169 spin_unlock(&acct_lock);
168 return res; 170 return res;
169} 171}
170 172
@@ -172,39 +174,41 @@ out:
172 * Close the old accounting file (if currently open) and then replace 174 * Close the old accounting file (if currently open) and then replace
173 * it with file (if non-NULL). 175 * it with file (if non-NULL).
174 * 176 *
175 * NOTE: acct_globals.lock MUST be held on entry and exit. 177 * NOTE: acct_lock MUST be held on entry and exit.
176 */ 178 */
177static void acct_file_reopen(struct file *file) 179static void acct_file_reopen(struct bsd_acct_struct *acct, struct file *file,
180 struct pid_namespace *ns)
178{ 181{
179 struct file *old_acct = NULL; 182 struct file *old_acct = NULL;
180 struct pid_namespace *old_ns = NULL; 183 struct pid_namespace *old_ns = NULL;
181 184
182 if (acct_globals.file) { 185 if (acct->file) {
183 old_acct = acct_globals.file; 186 old_acct = acct->file;
184 old_ns = acct_globals.ns; 187 old_ns = acct->ns;
185 del_timer(&acct_globals.timer); 188 del_timer(&acct->timer);
186 acct_globals.active = 0; 189 acct->active = 0;
187 acct_globals.needcheck = 0; 190 acct->needcheck = 0;
188 acct_globals.file = NULL; 191 acct->file = NULL;
192 acct->ns = NULL;
193 list_del(&acct->list);
189 } 194 }
190 if (file) { 195 if (file) {
191 acct_globals.file = file; 196 acct->file = file;
192 acct_globals.ns = get_pid_ns(task_active_pid_ns(current)); 197 acct->ns = ns;
193 acct_globals.needcheck = 0; 198 acct->needcheck = 0;
194 acct_globals.active = 1; 199 acct->active = 1;
200 list_add(&acct->list, &acct_list);
195 /* It's been deleted if it was used before so this is safe */ 201 /* It's been deleted if it was used before so this is safe */
196 init_timer(&acct_globals.timer); 202 setup_timer(&acct->timer, acct_timeout, (unsigned long)acct);
197 acct_globals.timer.function = acct_timeout; 203 acct->timer.expires = jiffies + ACCT_TIMEOUT*HZ;
198 acct_globals.timer.expires = jiffies + ACCT_TIMEOUT*HZ; 204 add_timer(&acct->timer);
199 add_timer(&acct_globals.timer);
200 } 205 }
201 if (old_acct) { 206 if (old_acct) {
202 mnt_unpin(old_acct->f_path.mnt); 207 mnt_unpin(old_acct->f_path.mnt);
203 spin_unlock(&acct_globals.lock); 208 spin_unlock(&acct_lock);
204 do_acct_process(old_ns, old_acct); 209 do_acct_process(acct, old_ns, old_acct);
205 filp_close(old_acct, NULL); 210 filp_close(old_acct, NULL);
206 put_pid_ns(old_ns); 211 spin_lock(&acct_lock);
207 spin_lock(&acct_globals.lock);
208 } 212 }
209} 213}
210 214
@@ -212,6 +216,8 @@ static int acct_on(char *name)
212{ 216{
213 struct file *file; 217 struct file *file;
214 int error; 218 int error;
219 struct pid_namespace *ns;
220 struct bsd_acct_struct *acct = NULL;
215 221
216 /* Difference from BSD - they don't do O_APPEND */ 222 /* Difference from BSD - they don't do O_APPEND */
217 file = filp_open(name, O_WRONLY|O_APPEND|O_LARGEFILE, 0); 223 file = filp_open(name, O_WRONLY|O_APPEND|O_LARGEFILE, 0);
@@ -228,18 +234,34 @@ static int acct_on(char *name)
228 return -EIO; 234 return -EIO;
229 } 235 }
230 236
237 ns = task_active_pid_ns(current);
238 if (ns->bacct == NULL) {
239 acct = kzalloc(sizeof(struct bsd_acct_struct), GFP_KERNEL);
240 if (acct == NULL) {
241 filp_close(file, NULL);
242 return -ENOMEM;
243 }
244 }
245
231 error = security_acct(file); 246 error = security_acct(file);
232 if (error) { 247 if (error) {
248 kfree(acct);
233 filp_close(file, NULL); 249 filp_close(file, NULL);
234 return error; 250 return error;
235 } 251 }
236 252
237 spin_lock(&acct_globals.lock); 253 spin_lock(&acct_lock);
254 if (ns->bacct == NULL) {
255 ns->bacct = acct;
256 acct = NULL;
257 }
258
238 mnt_pin(file->f_path.mnt); 259 mnt_pin(file->f_path.mnt);
239 acct_file_reopen(file); 260 acct_file_reopen(ns->bacct, file, ns);
240 spin_unlock(&acct_globals.lock); 261 spin_unlock(&acct_lock);
241 262
242 mntput(file->f_path.mnt); /* it's pinned, now give up active reference */ 263 mntput(file->f_path.mnt); /* it's pinned, now give up active reference */
264 kfree(acct);
243 265
244 return 0; 266 return 0;
245} 267}
@@ -269,11 +291,17 @@ asmlinkage long sys_acct(const char __user *name)
269 error = acct_on(tmp); 291 error = acct_on(tmp);
270 putname(tmp); 292 putname(tmp);
271 } else { 293 } else {
294 struct bsd_acct_struct *acct;
295
296 acct = task_active_pid_ns(current)->bacct;
297 if (acct == NULL)
298 return 0;
299
272 error = security_acct(NULL); 300 error = security_acct(NULL);
273 if (!error) { 301 if (!error) {
274 spin_lock(&acct_globals.lock); 302 spin_lock(&acct_lock);
275 acct_file_reopen(NULL); 303 acct_file_reopen(acct, NULL, NULL);
276 spin_unlock(&acct_globals.lock); 304 spin_unlock(&acct_lock);
277 } 305 }
278 } 306 }
279 return error; 307 return error;
@@ -288,10 +316,16 @@ asmlinkage long sys_acct(const char __user *name)
288 */ 316 */
289void acct_auto_close_mnt(struct vfsmount *m) 317void acct_auto_close_mnt(struct vfsmount *m)
290{ 318{
291 spin_lock(&acct_globals.lock); 319 struct bsd_acct_struct *acct;
292 if (acct_globals.file && acct_globals.file->f_path.mnt == m) 320
293 acct_file_reopen(NULL); 321 spin_lock(&acct_lock);
294 spin_unlock(&acct_globals.lock); 322restart:
323 list_for_each_entry(acct, &acct_list, list)
324 if (acct->file && acct->file->f_path.mnt == m) {
325 acct_file_reopen(acct, NULL, NULL);
326 goto restart;
327 }
328 spin_unlock(&acct_lock);
295} 329}
296 330
297/** 331/**
@@ -303,12 +337,31 @@ void acct_auto_close_mnt(struct vfsmount *m)
303 */ 337 */
304void acct_auto_close(struct super_block *sb) 338void acct_auto_close(struct super_block *sb)
305{ 339{
306 spin_lock(&acct_globals.lock); 340 struct bsd_acct_struct *acct;
307 if (acct_globals.file && 341
308 acct_globals.file->f_path.mnt->mnt_sb == sb) { 342 spin_lock(&acct_lock);
309 acct_file_reopen(NULL); 343restart:
344 list_for_each_entry(acct, &acct_list, list)
345 if (acct->file && acct->file->f_path.mnt->mnt_sb == sb) {
346 acct_file_reopen(acct, NULL, NULL);
347 goto restart;
348 }
349 spin_unlock(&acct_lock);
350}
351
352void acct_exit_ns(struct pid_namespace *ns)
353{
354 struct bsd_acct_struct *acct;
355
356 spin_lock(&acct_lock);
357 acct = ns->bacct;
358 if (acct != NULL) {
359 if (acct->file != NULL)
360 acct_file_reopen(acct, NULL, NULL);
361
362 kfree(acct);
310 } 363 }
311 spin_unlock(&acct_globals.lock); 364 spin_unlock(&acct_lock);
312} 365}
313 366
314/* 367/*
@@ -425,7 +478,8 @@ static u32 encode_float(u64 value)
425/* 478/*
426 * do_acct_process does all actual work. Caller holds the reference to file. 479 * do_acct_process does all actual work. Caller holds the reference to file.
427 */ 480 */
428static void do_acct_process(struct pid_namespace *ns, struct file *file) 481static void do_acct_process(struct bsd_acct_struct *acct,
482 struct pid_namespace *ns, struct file *file)
429{ 483{
430 struct pacct_struct *pacct = &current->signal->pacct; 484 struct pacct_struct *pacct = &current->signal->pacct;
431 acct_t ac; 485 acct_t ac;
@@ -440,7 +494,7 @@ static void do_acct_process(struct pid_namespace *ns, struct file *file)
440 * First check to see if there is enough free_space to continue 494 * First check to see if there is enough free_space to continue
441 * the process accounting system. 495 * the process accounting system.
442 */ 496 */
443 if (!check_free_space(file)) 497 if (!check_free_space(acct, file))
444 return; 498 return;
445 499
446 /* 500 /*
@@ -577,34 +631,46 @@ void acct_collect(long exitcode, int group_dead)
577 spin_unlock_irq(&current->sighand->siglock); 631 spin_unlock_irq(&current->sighand->siglock);
578} 632}
579 633
580/** 634static void acct_process_in_ns(struct pid_namespace *ns)
581 * acct_process - now just a wrapper around do_acct_process
582 * @exitcode: task exit code
583 *
584 * handles process accounting for an exiting task
585 */
586void acct_process(void)
587{ 635{
588 struct file *file = NULL; 636 struct file *file = NULL;
589 struct pid_namespace *ns; 637 struct bsd_acct_struct *acct;
590 638
639 acct = ns->bacct;
591 /* 640 /*
592 * accelerate the common fastpath: 641 * accelerate the common fastpath:
593 */ 642 */
594 if (!acct_globals.file) 643 if (!acct || !acct->file)
595 return; 644 return;
596 645
597 spin_lock(&acct_globals.lock); 646 spin_lock(&acct_lock);
598 file = acct_globals.file; 647 file = acct->file;
599 if (unlikely(!file)) { 648 if (unlikely(!file)) {
600 spin_unlock(&acct_globals.lock); 649 spin_unlock(&acct_lock);
601 return; 650 return;
602 } 651 }
603 get_file(file); 652 get_file(file);
604 ns = get_pid_ns(acct_globals.ns); 653 spin_unlock(&acct_lock);
605 spin_unlock(&acct_globals.lock);
606 654
607 do_acct_process(ns, file); 655 do_acct_process(acct, ns, file);
608 fput(file); 656 fput(file);
609 put_pid_ns(ns); 657}
658
659/**
660 * acct_process - now just a wrapper around acct_process_in_ns,
661 * which in turn is a wrapper around do_acct_process.
662 *
663 * handles process accounting for an exiting task
664 */
665void acct_process(void)
666{
667 struct pid_namespace *ns;
668
669 /*
670 * This loop is safe lockless, since current is still
671 * alive and holds its namespace, which in turn holds
672 * its parent.
673 */
674 for (ns = task_active_pid_ns(current); ns != NULL; ns = ns->parent)
675 acct_process_in_ns(ns);
610} 676}
diff --git a/kernel/audit.c b/kernel/audit.c
index e092f1c0ce30..4414e93d8750 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -707,12 +707,14 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
707 if (status_get->mask & AUDIT_STATUS_ENABLED) { 707 if (status_get->mask & AUDIT_STATUS_ENABLED) {
708 err = audit_set_enabled(status_get->enabled, 708 err = audit_set_enabled(status_get->enabled,
709 loginuid, sessionid, sid); 709 loginuid, sessionid, sid);
710 if (err < 0) return err; 710 if (err < 0)
711 return err;
711 } 712 }
712 if (status_get->mask & AUDIT_STATUS_FAILURE) { 713 if (status_get->mask & AUDIT_STATUS_FAILURE) {
713 err = audit_set_failure(status_get->failure, 714 err = audit_set_failure(status_get->failure,
714 loginuid, sessionid, sid); 715 loginuid, sessionid, sid);
715 if (err < 0) return err; 716 if (err < 0)
717 return err;
716 } 718 }
717 if (status_get->mask & AUDIT_STATUS_PID) { 719 if (status_get->mask & AUDIT_STATUS_PID) {
718 int new_pid = status_get->pid; 720 int new_pid = status_get->pid;
@@ -725,9 +727,12 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
725 audit_pid = new_pid; 727 audit_pid = new_pid;
726 audit_nlk_pid = NETLINK_CB(skb).pid; 728 audit_nlk_pid = NETLINK_CB(skb).pid;
727 } 729 }
728 if (status_get->mask & AUDIT_STATUS_RATE_LIMIT) 730 if (status_get->mask & AUDIT_STATUS_RATE_LIMIT) {
729 err = audit_set_rate_limit(status_get->rate_limit, 731 err = audit_set_rate_limit(status_get->rate_limit,
730 loginuid, sessionid, sid); 732 loginuid, sessionid, sid);
733 if (err < 0)
734 return err;
735 }
731 if (status_get->mask & AUDIT_STATUS_BACKLOG_LIMIT) 736 if (status_get->mask & AUDIT_STATUS_BACKLOG_LIMIT)
732 err = audit_set_backlog_limit(status_get->backlog_limit, 737 err = audit_set_backlog_limit(status_get->backlog_limit,
733 loginuid, sessionid, sid); 738 loginuid, sessionid, sid);
@@ -1366,7 +1371,7 @@ int audit_string_contains_control(const char *string, size_t len)
1366{ 1371{
1367 const unsigned char *p; 1372 const unsigned char *p;
1368 for (p = string; p < (const unsigned char *)string + len && *p; p++) { 1373 for (p = string; p < (const unsigned char *)string + len && *p; p++) {
1369 if (*p == '"' || *p < 0x21 || *p > 0x7f) 1374 if (*p == '"' || *p < 0x21 || *p > 0x7e)
1370 return 1; 1375 return 1;
1371 } 1376 }
1372 return 0; 1377 return 0;
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index 98c50cc671bb..b7d354e2b0ef 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -1022,8 +1022,11 @@ static void audit_update_watch(struct audit_parent *parent,
1022 struct audit_buffer *ab; 1022 struct audit_buffer *ab;
1023 ab = audit_log_start(NULL, GFP_KERNEL, 1023 ab = audit_log_start(NULL, GFP_KERNEL,
1024 AUDIT_CONFIG_CHANGE); 1024 AUDIT_CONFIG_CHANGE);
1025 audit_log_format(ab, "auid=%u ses=%u",
1026 audit_get_loginuid(current),
1027 audit_get_sessionid(current));
1025 audit_log_format(ab, 1028 audit_log_format(ab,
1026 "op=updated rules specifying path="); 1029 " op=updated rules specifying path=");
1027 audit_log_untrustedstring(ab, owatch->path); 1030 audit_log_untrustedstring(ab, owatch->path);
1028 audit_log_format(ab, " with dev=%u ino=%lu\n", 1031 audit_log_format(ab, " with dev=%u ino=%lu\n",
1029 dev, ino); 1032 dev, ino);
@@ -1058,7 +1061,10 @@ static void audit_remove_parent_watches(struct audit_parent *parent)
1058 struct audit_buffer *ab; 1061 struct audit_buffer *ab;
1059 ab = audit_log_start(NULL, GFP_KERNEL, 1062 ab = audit_log_start(NULL, GFP_KERNEL,
1060 AUDIT_CONFIG_CHANGE); 1063 AUDIT_CONFIG_CHANGE);
1061 audit_log_format(ab, "op=remove rule path="); 1064 audit_log_format(ab, "auid=%u ses=%u",
1065 audit_get_loginuid(current),
1066 audit_get_sessionid(current));
1067 audit_log_format(ab, " op=remove rule path=");
1062 audit_log_untrustedstring(ab, w->path); 1068 audit_log_untrustedstring(ab, w->path);
1063 if (r->filterkey) { 1069 if (r->filterkey) {
1064 audit_log_format(ab, " key="); 1070 audit_log_format(ab, " key=");
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index c10e7aae04d7..972f8e61d36a 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -243,6 +243,9 @@ static inline int open_arg(int flags, int mask)
243 243
244static int audit_match_perm(struct audit_context *ctx, int mask) 244static int audit_match_perm(struct audit_context *ctx, int mask)
245{ 245{
246 if (unlikely(!ctx))
247 return 0;
248
246 unsigned n = ctx->major; 249 unsigned n = ctx->major;
247 switch (audit_classify_syscall(ctx->arch, n)) { 250 switch (audit_classify_syscall(ctx->arch, n)) {
248 case 0: /* native */ 251 case 0: /* native */
@@ -284,6 +287,10 @@ static int audit_match_filetype(struct audit_context *ctx, int which)
284{ 287{
285 unsigned index = which & ~S_IFMT; 288 unsigned index = which & ~S_IFMT;
286 mode_t mode = which & S_IFMT; 289 mode_t mode = which & S_IFMT;
290
291 if (unlikely(!ctx))
292 return 0;
293
287 if (index >= ctx->name_count) 294 if (index >= ctx->name_count)
288 return 0; 295 return 0;
289 if (ctx->names[index].ino == -1) 296 if (ctx->names[index].ino == -1)
@@ -610,7 +617,7 @@ static int audit_filter_rules(struct task_struct *tsk,
610 if (!result) 617 if (!result)
611 return 0; 618 return 0;
612 } 619 }
613 if (rule->filterkey) 620 if (rule->filterkey && ctx)
614 ctx->filterkey = kstrdup(rule->filterkey, GFP_ATOMIC); 621 ctx->filterkey = kstrdup(rule->filterkey, GFP_ATOMIC);
615 switch (rule->action) { 622 switch (rule->action) {
616 case AUDIT_NEVER: *state = AUDIT_DISABLED; break; 623 case AUDIT_NEVER: *state = AUDIT_DISABLED; break;
@@ -1476,7 +1483,8 @@ void audit_syscall_entry(int arch, int major,
1476 struct audit_context *context = tsk->audit_context; 1483 struct audit_context *context = tsk->audit_context;
1477 enum audit_state state; 1484 enum audit_state state;
1478 1485
1479 BUG_ON(!context); 1486 if (unlikely(!context))
1487 return;
1480 1488
1481 /* 1489 /*
1482 * This happens only on certain architectures that make system 1490 * This happens only on certain architectures that make system
@@ -2374,7 +2382,7 @@ int __audit_signal_info(int sig, struct task_struct *t)
2374 struct audit_context *ctx = tsk->audit_context; 2382 struct audit_context *ctx = tsk->audit_context;
2375 2383
2376 if (audit_pid && t->tgid == audit_pid) { 2384 if (audit_pid && t->tgid == audit_pid) {
2377 if (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1) { 2385 if (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1 || sig == SIGUSR2) {
2378 audit_sig_pid = tsk->pid; 2386 audit_sig_pid = tsk->pid;
2379 if (tsk->loginuid != -1) 2387 if (tsk->loginuid != -1)
2380 audit_sig_uid = tsk->loginuid; 2388 audit_sig_uid = tsk->loginuid;
diff --git a/kernel/capability.c b/kernel/capability.c
index 901e0fdc3fff..0101e847603e 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -115,11 +115,208 @@ static int cap_validate_magic(cap_user_header_t header, unsigned *tocopy)
115 return 0; 115 return 0;
116} 116}
117 117
118#ifndef CONFIG_SECURITY_FILE_CAPABILITIES
119
120/*
121 * Without filesystem capability support, we nominally support one process
122 * setting the capabilities of another
123 */
124static inline int cap_get_target_pid(pid_t pid, kernel_cap_t *pEp,
125 kernel_cap_t *pIp, kernel_cap_t *pPp)
126{
127 struct task_struct *target;
128 int ret;
129
130 spin_lock(&task_capability_lock);
131 read_lock(&tasklist_lock);
132
133 if (pid && pid != task_pid_vnr(current)) {
134 target = find_task_by_vpid(pid);
135 if (!target) {
136 ret = -ESRCH;
137 goto out;
138 }
139 } else
140 target = current;
141
142 ret = security_capget(target, pEp, pIp, pPp);
143
144out:
145 read_unlock(&tasklist_lock);
146 spin_unlock(&task_capability_lock);
147
148 return ret;
149}
150
151/*
152 * cap_set_pg - set capabilities for all processes in a given process
153 * group. We call this holding task_capability_lock and tasklist_lock.
154 */
155static inline int cap_set_pg(int pgrp_nr, kernel_cap_t *effective,
156 kernel_cap_t *inheritable,
157 kernel_cap_t *permitted)
158{
159 struct task_struct *g, *target;
160 int ret = -EPERM;
161 int found = 0;
162 struct pid *pgrp;
163
164 spin_lock(&task_capability_lock);
165 read_lock(&tasklist_lock);
166
167 pgrp = find_vpid(pgrp_nr);
168 do_each_pid_task(pgrp, PIDTYPE_PGID, g) {
169 target = g;
170 while_each_thread(g, target) {
171 if (!security_capset_check(target, effective,
172 inheritable, permitted)) {
173 security_capset_set(target, effective,
174 inheritable, permitted);
175 ret = 0;
176 }
177 found = 1;
178 }
179 } while_each_pid_task(pgrp, PIDTYPE_PGID, g);
180
181 read_unlock(&tasklist_lock);
182 spin_unlock(&task_capability_lock);
183
184 if (!found)
185 ret = 0;
186 return ret;
187}
188
118/* 189/*
119 * For sys_getproccap() and sys_setproccap(), any of the three 190 * cap_set_all - set capabilities for all processes other than init
120 * capability set pointers may be NULL -- indicating that that set is 191 * and self. We call this holding task_capability_lock and tasklist_lock.
121 * uninteresting and/or not to be changed.
122 */ 192 */
193static inline int cap_set_all(kernel_cap_t *effective,
194 kernel_cap_t *inheritable,
195 kernel_cap_t *permitted)
196{
197 struct task_struct *g, *target;
198 int ret = -EPERM;
199 int found = 0;
200
201 spin_lock(&task_capability_lock);
202 read_lock(&tasklist_lock);
203
204 do_each_thread(g, target) {
205 if (target == current
206 || is_container_init(target->group_leader))
207 continue;
208 found = 1;
209 if (security_capset_check(target, effective, inheritable,
210 permitted))
211 continue;
212 ret = 0;
213 security_capset_set(target, effective, inheritable, permitted);
214 } while_each_thread(g, target);
215
216 read_unlock(&tasklist_lock);
217 spin_unlock(&task_capability_lock);
218
219 if (!found)
220 ret = 0;
221
222 return ret;
223}
224
225/*
226 * Given the target pid does not refer to the current process we
227 * need more elaborate support... (This support is not present when
228 * filesystem capabilities are configured.)
229 */
230static inline int do_sys_capset_other_tasks(pid_t pid, kernel_cap_t *effective,
231 kernel_cap_t *inheritable,
232 kernel_cap_t *permitted)
233{
234 struct task_struct *target;
235 int ret;
236
237 if (!capable(CAP_SETPCAP))
238 return -EPERM;
239
240 if (pid == -1) /* all procs other than current and init */
241 return cap_set_all(effective, inheritable, permitted);
242
243 else if (pid < 0) /* all procs in process group */
244 return cap_set_pg(-pid, effective, inheritable, permitted);
245
246 /* target != current */
247 spin_lock(&task_capability_lock);
248 read_lock(&tasklist_lock);
249
250 target = find_task_by_vpid(pid);
251 if (!target)
252 ret = -ESRCH;
253 else {
254 ret = security_capset_check(target, effective, inheritable,
255 permitted);
256
257 /* having verified that the proposed changes are legal,
258 we now put them into effect. */
259 if (!ret)
260 security_capset_set(target, effective, inheritable,
261 permitted);
262 }
263
264 read_unlock(&tasklist_lock);
265 spin_unlock(&task_capability_lock);
266
267 return ret;
268}
269
270#else /* ie., def CONFIG_SECURITY_FILE_CAPABILITIES */
271
272/*
273 * If we have configured with filesystem capability support, then the
274 * only thing that can change the capabilities of the current process
275 * is the current process. As such, we can't be in this code at the
276 * same time as we are in the process of setting capabilities in this
277 * process. The net result is that we can limit our use of locks to
278 * when we are reading the caps of another process.
279 */
280static inline int cap_get_target_pid(pid_t pid, kernel_cap_t *pEp,
281 kernel_cap_t *pIp, kernel_cap_t *pPp)
282{
283 int ret;
284
285 if (pid && (pid != task_pid_vnr(current))) {
286 struct task_struct *target;
287
288 spin_lock(&task_capability_lock);
289 read_lock(&tasklist_lock);
290
291 target = find_task_by_vpid(pid);
292 if (!target)
293 ret = -ESRCH;
294 else
295 ret = security_capget(target, pEp, pIp, pPp);
296
297 read_unlock(&tasklist_lock);
298 spin_unlock(&task_capability_lock);
299 } else
300 ret = security_capget(current, pEp, pIp, pPp);
301
302 return ret;
303}
304
305/*
306 * With filesystem capability support configured, the kernel does not
307 * permit the changing of capabilities in one process by another
308 * process. (CAP_SETPCAP has much less broad semantics when configured
309 * this way.)
310 */
311static inline int do_sys_capset_other_tasks(pid_t pid,
312 kernel_cap_t *effective,
313 kernel_cap_t *inheritable,
314 kernel_cap_t *permitted)
315{
316 return -EPERM;
317}
318
319#endif /* ie., ndef CONFIG_SECURITY_FILE_CAPABILITIES */
123 320
124/* 321/*
125 * Atomically modify the effective capabilities returning the original 322 * Atomically modify the effective capabilities returning the original
@@ -155,7 +352,6 @@ asmlinkage long sys_capget(cap_user_header_t header, cap_user_data_t dataptr)
155{ 352{
156 int ret = 0; 353 int ret = 0;
157 pid_t pid; 354 pid_t pid;
158 struct task_struct *target;
159 unsigned tocopy; 355 unsigned tocopy;
160 kernel_cap_t pE, pI, pP; 356 kernel_cap_t pE, pI, pP;
161 357
@@ -169,23 +365,7 @@ asmlinkage long sys_capget(cap_user_header_t header, cap_user_data_t dataptr)
169 if (pid < 0) 365 if (pid < 0)
170 return -EINVAL; 366 return -EINVAL;
171 367
172 spin_lock(&task_capability_lock); 368 ret = cap_get_target_pid(pid, &pE, &pI, &pP);
173 read_lock(&tasklist_lock);
174
175 if (pid && pid != task_pid_vnr(current)) {
176 target = find_task_by_vpid(pid);
177 if (!target) {
178 ret = -ESRCH;
179 goto out;
180 }
181 } else
182 target = current;
183
184 ret = security_capget(target, &pE, &pI, &pP);
185
186out:
187 read_unlock(&tasklist_lock);
188 spin_unlock(&task_capability_lock);
189 369
190 if (!ret) { 370 if (!ret) {
191 struct __user_cap_data_struct kdata[_KERNEL_CAPABILITY_U32S]; 371 struct __user_cap_data_struct kdata[_KERNEL_CAPABILITY_U32S];
@@ -216,7 +396,6 @@ out:
216 * before modification is attempted and the application 396 * before modification is attempted and the application
217 * fails. 397 * fails.
218 */ 398 */
219
220 if (copy_to_user(dataptr, kdata, tocopy 399 if (copy_to_user(dataptr, kdata, tocopy
221 * sizeof(struct __user_cap_data_struct))) { 400 * sizeof(struct __user_cap_data_struct))) {
222 return -EFAULT; 401 return -EFAULT;
@@ -226,70 +405,8 @@ out:
226 return ret; 405 return ret;
227} 406}
228 407
229/*
230 * cap_set_pg - set capabilities for all processes in a given process
231 * group. We call this holding task_capability_lock and tasklist_lock.
232 */
233static inline int cap_set_pg(int pgrp_nr, kernel_cap_t *effective,
234 kernel_cap_t *inheritable,
235 kernel_cap_t *permitted)
236{
237 struct task_struct *g, *target;
238 int ret = -EPERM;
239 int found = 0;
240 struct pid *pgrp;
241
242 pgrp = find_vpid(pgrp_nr);
243 do_each_pid_task(pgrp, PIDTYPE_PGID, g) {
244 target = g;
245 while_each_thread(g, target) {
246 if (!security_capset_check(target, effective,
247 inheritable,
248 permitted)) {
249 security_capset_set(target, effective,
250 inheritable,
251 permitted);
252 ret = 0;
253 }
254 found = 1;
255 }
256 } while_each_pid_task(pgrp, PIDTYPE_PGID, g);
257
258 if (!found)
259 ret = 0;
260 return ret;
261}
262
263/*
264 * cap_set_all - set capabilities for all processes other than init
265 * and self. We call this holding task_capability_lock and tasklist_lock.
266 */
267static inline int cap_set_all(kernel_cap_t *effective,
268 kernel_cap_t *inheritable,
269 kernel_cap_t *permitted)
270{
271 struct task_struct *g, *target;
272 int ret = -EPERM;
273 int found = 0;
274
275 do_each_thread(g, target) {
276 if (target == current || is_container_init(target->group_leader))
277 continue;
278 found = 1;
279 if (security_capset_check(target, effective, inheritable,
280 permitted))
281 continue;
282 ret = 0;
283 security_capset_set(target, effective, inheritable, permitted);
284 } while_each_thread(g, target);
285
286 if (!found)
287 ret = 0;
288 return ret;
289}
290
291/** 408/**
292 * sys_capset - set capabilities for a process or a group of processes 409 * sys_capset - set capabilities for a process or (*) a group of processes
293 * @header: pointer to struct that contains capability version and 410 * @header: pointer to struct that contains capability version and
294 * target pid data 411 * target pid data
295 * @data: pointer to struct that contains the effective, permitted, 412 * @data: pointer to struct that contains the effective, permitted,
@@ -313,7 +430,6 @@ asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data)
313 struct __user_cap_data_struct kdata[_KERNEL_CAPABILITY_U32S]; 430 struct __user_cap_data_struct kdata[_KERNEL_CAPABILITY_U32S];
314 unsigned i, tocopy; 431 unsigned i, tocopy;
315 kernel_cap_t inheritable, permitted, effective; 432 kernel_cap_t inheritable, permitted, effective;
316 struct task_struct *target;
317 int ret; 433 int ret;
318 pid_t pid; 434 pid_t pid;
319 435
@@ -324,9 +440,6 @@ asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data)
324 if (get_user(pid, &header->pid)) 440 if (get_user(pid, &header->pid))
325 return -EFAULT; 441 return -EFAULT;
326 442
327 if (pid && pid != task_pid_vnr(current) && !capable(CAP_SETPCAP))
328 return -EPERM;
329
330 if (copy_from_user(&kdata, data, tocopy 443 if (copy_from_user(&kdata, data, tocopy
331 * sizeof(struct __user_cap_data_struct))) { 444 * sizeof(struct __user_cap_data_struct))) {
332 return -EFAULT; 445 return -EFAULT;
@@ -344,40 +457,31 @@ asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data)
344 i++; 457 i++;
345 } 458 }
346 459
347 spin_lock(&task_capability_lock); 460 if (pid && (pid != task_pid_vnr(current)))
348 read_lock(&tasklist_lock); 461 ret = do_sys_capset_other_tasks(pid, &effective, &inheritable,
349 462 &permitted);
350 if (pid > 0 && pid != task_pid_vnr(current)) { 463 else {
351 target = find_task_by_vpid(pid); 464 /*
352 if (!target) { 465 * This lock is required even when filesystem
353 ret = -ESRCH; 466 * capability support is configured - it protects the
354 goto out; 467 * sys_capget() call from returning incorrect data in
355 } 468 * the case that the targeted process is not the
356 } else 469 * current one.
357 target = current; 470 */
358 471 spin_lock(&task_capability_lock);
359 ret = 0;
360
361 /* having verified that the proposed changes are legal,
362 we now put them into effect. */
363 if (pid < 0) {
364 if (pid == -1) /* all procs other than current and init */
365 ret = cap_set_all(&effective, &inheritable, &permitted);
366 472
367 else /* all procs in process group */ 473 ret = security_capset_check(current, &effective, &inheritable,
368 ret = cap_set_pg(-pid, &effective, &inheritable,
369 &permitted);
370 } else {
371 ret = security_capset_check(target, &effective, &inheritable,
372 &permitted); 474 &permitted);
475 /*
476 * Having verified that the proposed changes are
477 * legal, we now put them into effect.
478 */
373 if (!ret) 479 if (!ret)
374 security_capset_set(target, &effective, &inheritable, 480 security_capset_set(current, &effective, &inheritable,
375 &permitted); 481 &permitted);
482 spin_unlock(&task_capability_lock);
376 } 483 }
377 484
378out:
379 read_unlock(&tasklist_lock);
380 spin_unlock(&task_capability_lock);
381 485
382 return ret; 486 return ret;
383} 487}
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 15ac0e1e4f4d..13932abde159 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -45,6 +45,7 @@
45#include <linux/delayacct.h> 45#include <linux/delayacct.h>
46#include <linux/cgroupstats.h> 46#include <linux/cgroupstats.h>
47#include <linux/hash.h> 47#include <linux/hash.h>
48#include <linux/namei.h>
48 49
49#include <asm/atomic.h> 50#include <asm/atomic.h>
50 51
@@ -89,11 +90,7 @@ struct cgroupfs_root {
89 /* Hierarchy-specific flags */ 90 /* Hierarchy-specific flags */
90 unsigned long flags; 91 unsigned long flags;
91 92
92 /* The path to use for release notifications. No locking 93 /* The path to use for release notifications. */
93 * between setting and use - so if userspace updates this
94 * while child cgroups exist, you could miss a
95 * notification. We ensure that it's always a valid
96 * NUL-terminated string */
97 char release_agent_path[PATH_MAX]; 94 char release_agent_path[PATH_MAX];
98}; 95};
99 96
@@ -118,7 +115,7 @@ static int root_count;
118 * extra work in the fork/exit path if none of the subsystems need to 115 * extra work in the fork/exit path if none of the subsystems need to
119 * be called. 116 * be called.
120 */ 117 */
121static int need_forkexit_callback; 118static int need_forkexit_callback __read_mostly;
122static int need_mm_owner_callback __read_mostly; 119static int need_mm_owner_callback __read_mostly;
123 120
124/* convenient tests for these bits */ 121/* convenient tests for these bits */
@@ -220,7 +217,7 @@ static struct hlist_head *css_set_hash(struct cgroup_subsys_state *css[])
220 * task until after the first call to cgroup_iter_start(). This 217 * task until after the first call to cgroup_iter_start(). This
221 * reduces the fork()/exit() overhead for people who have cgroups 218 * reduces the fork()/exit() overhead for people who have cgroups
222 * compiled into their kernel but not actually in use */ 219 * compiled into their kernel but not actually in use */
223static int use_task_css_set_links; 220static int use_task_css_set_links __read_mostly;
224 221
225/* When we create or destroy a css_set, the operation simply 222/* When we create or destroy a css_set, the operation simply
226 * takes/releases a reference count on all the cgroups referenced 223 * takes/releases a reference count on all the cgroups referenced
@@ -241,17 +238,20 @@ static int use_task_css_set_links;
241 */ 238 */
242static void unlink_css_set(struct css_set *cg) 239static void unlink_css_set(struct css_set *cg)
243{ 240{
241 struct cg_cgroup_link *link;
242 struct cg_cgroup_link *saved_link;
243
244 write_lock(&css_set_lock); 244 write_lock(&css_set_lock);
245 hlist_del(&cg->hlist); 245 hlist_del(&cg->hlist);
246 css_set_count--; 246 css_set_count--;
247 while (!list_empty(&cg->cg_links)) { 247
248 struct cg_cgroup_link *link; 248 list_for_each_entry_safe(link, saved_link, &cg->cg_links,
249 link = list_entry(cg->cg_links.next, 249 cg_link_list) {
250 struct cg_cgroup_link, cg_link_list);
251 list_del(&link->cg_link_list); 250 list_del(&link->cg_link_list);
252 list_del(&link->cgrp_link_list); 251 list_del(&link->cgrp_link_list);
253 kfree(link); 252 kfree(link);
254 } 253 }
254
255 write_unlock(&css_set_lock); 255 write_unlock(&css_set_lock);
256} 256}
257 257
@@ -355,6 +355,17 @@ static struct css_set *find_existing_css_set(
355 return NULL; 355 return NULL;
356} 356}
357 357
358static void free_cg_links(struct list_head *tmp)
359{
360 struct cg_cgroup_link *link;
361 struct cg_cgroup_link *saved_link;
362
363 list_for_each_entry_safe(link, saved_link, tmp, cgrp_link_list) {
364 list_del(&link->cgrp_link_list);
365 kfree(link);
366 }
367}
368
358/* 369/*
359 * allocate_cg_links() allocates "count" cg_cgroup_link structures 370 * allocate_cg_links() allocates "count" cg_cgroup_link structures
360 * and chains them on tmp through their cgrp_link_list fields. Returns 0 on 371 * and chains them on tmp through their cgrp_link_list fields. Returns 0 on
@@ -368,13 +379,7 @@ static int allocate_cg_links(int count, struct list_head *tmp)
368 for (i = 0; i < count; i++) { 379 for (i = 0; i < count; i++) {
369 link = kmalloc(sizeof(*link), GFP_KERNEL); 380 link = kmalloc(sizeof(*link), GFP_KERNEL);
370 if (!link) { 381 if (!link) {
371 while (!list_empty(tmp)) { 382 free_cg_links(tmp);
372 link = list_entry(tmp->next,
373 struct cg_cgroup_link,
374 cgrp_link_list);
375 list_del(&link->cgrp_link_list);
376 kfree(link);
377 }
378 return -ENOMEM; 383 return -ENOMEM;
379 } 384 }
380 list_add(&link->cgrp_link_list, tmp); 385 list_add(&link->cgrp_link_list, tmp);
@@ -382,18 +387,6 @@ static int allocate_cg_links(int count, struct list_head *tmp)
382 return 0; 387 return 0;
383} 388}
384 389
385static void free_cg_links(struct list_head *tmp)
386{
387 while (!list_empty(tmp)) {
388 struct cg_cgroup_link *link;
389 link = list_entry(tmp->next,
390 struct cg_cgroup_link,
391 cgrp_link_list);
392 list_del(&link->cgrp_link_list);
393 kfree(link);
394 }
395}
396
397/* 390/*
398 * find_css_set() takes an existing cgroup group and a 391 * find_css_set() takes an existing cgroup group and a
399 * cgroup object, and returns a css_set object that's 392 * cgroup object, and returns a css_set object that's
@@ -415,11 +408,11 @@ static struct css_set *find_css_set(
415 408
416 /* First see if we already have a cgroup group that matches 409 /* First see if we already have a cgroup group that matches
417 * the desired set */ 410 * the desired set */
418 write_lock(&css_set_lock); 411 read_lock(&css_set_lock);
419 res = find_existing_css_set(oldcg, cgrp, template); 412 res = find_existing_css_set(oldcg, cgrp, template);
420 if (res) 413 if (res)
421 get_css_set(res); 414 get_css_set(res);
422 write_unlock(&css_set_lock); 415 read_unlock(&css_set_lock);
423 416
424 if (res) 417 if (res)
425 return res; 418 return res;
@@ -507,10 +500,6 @@ static struct css_set *find_css_set(
507 * knows that the cgroup won't be removed, as cgroup_rmdir() 500 * knows that the cgroup won't be removed, as cgroup_rmdir()
508 * needs that mutex. 501 * needs that mutex.
509 * 502 *
510 * The cgroup_common_file_write handler for operations that modify
511 * the cgroup hierarchy holds cgroup_mutex across the entire operation,
512 * single threading all such cgroup modifications across the system.
513 *
514 * The fork and exit callbacks cgroup_fork() and cgroup_exit(), don't 503 * The fork and exit callbacks cgroup_fork() and cgroup_exit(), don't
515 * (usually) take cgroup_mutex. These are the two most performance 504 * (usually) take cgroup_mutex. These are the two most performance
516 * critical pieces of code here. The exception occurs on cgroup_exit(), 505 * critical pieces of code here. The exception occurs on cgroup_exit(),
@@ -962,7 +951,6 @@ static int cgroup_get_sb(struct file_system_type *fs_type,
962 struct super_block *sb; 951 struct super_block *sb;
963 struct cgroupfs_root *root; 952 struct cgroupfs_root *root;
964 struct list_head tmp_cg_links; 953 struct list_head tmp_cg_links;
965 INIT_LIST_HEAD(&tmp_cg_links);
966 954
967 /* First find the desired set of subsystems */ 955 /* First find the desired set of subsystems */
968 ret = parse_cgroupfs_options(data, &opts); 956 ret = parse_cgroupfs_options(data, &opts);
@@ -1093,6 +1081,8 @@ static void cgroup_kill_sb(struct super_block *sb) {
1093 struct cgroupfs_root *root = sb->s_fs_info; 1081 struct cgroupfs_root *root = sb->s_fs_info;
1094 struct cgroup *cgrp = &root->top_cgroup; 1082 struct cgroup *cgrp = &root->top_cgroup;
1095 int ret; 1083 int ret;
1084 struct cg_cgroup_link *link;
1085 struct cg_cgroup_link *saved_link;
1096 1086
1097 BUG_ON(!root); 1087 BUG_ON(!root);
1098 1088
@@ -1112,10 +1102,9 @@ static void cgroup_kill_sb(struct super_block *sb) {
1112 * root cgroup 1102 * root cgroup
1113 */ 1103 */
1114 write_lock(&css_set_lock); 1104 write_lock(&css_set_lock);
1115 while (!list_empty(&cgrp->css_sets)) { 1105
1116 struct cg_cgroup_link *link; 1106 list_for_each_entry_safe(link, saved_link, &cgrp->css_sets,
1117 link = list_entry(cgrp->css_sets.next, 1107 cgrp_link_list) {
1118 struct cg_cgroup_link, cgrp_link_list);
1119 list_del(&link->cg_link_list); 1108 list_del(&link->cg_link_list);
1120 list_del(&link->cgrp_link_list); 1109 list_del(&link->cgrp_link_list);
1121 kfree(link); 1110 kfree(link);
@@ -1281,18 +1270,14 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
1281} 1270}
1282 1271
1283/* 1272/*
1284 * Attach task with pid 'pid' to cgroup 'cgrp'. Call with 1273 * Attach task with pid 'pid' to cgroup 'cgrp'. Call with cgroup_mutex
1285 * cgroup_mutex, may take task_lock of task 1274 * held. May take task_lock of task
1286 */ 1275 */
1287static int attach_task_by_pid(struct cgroup *cgrp, char *pidbuf) 1276static int attach_task_by_pid(struct cgroup *cgrp, u64 pid)
1288{ 1277{
1289 pid_t pid;
1290 struct task_struct *tsk; 1278 struct task_struct *tsk;
1291 int ret; 1279 int ret;
1292 1280
1293 if (sscanf(pidbuf, "%d", &pid) != 1)
1294 return -EIO;
1295
1296 if (pid) { 1281 if (pid) {
1297 rcu_read_lock(); 1282 rcu_read_lock();
1298 tsk = find_task_by_vpid(pid); 1283 tsk = find_task_by_vpid(pid);
@@ -1318,6 +1303,16 @@ static int attach_task_by_pid(struct cgroup *cgrp, char *pidbuf)
1318 return ret; 1303 return ret;
1319} 1304}
1320 1305
1306static int cgroup_tasks_write(struct cgroup *cgrp, struct cftype *cft, u64 pid)
1307{
1308 int ret;
1309 if (!cgroup_lock_live_group(cgrp))
1310 return -ENODEV;
1311 ret = attach_task_by_pid(cgrp, pid);
1312 cgroup_unlock();
1313 return ret;
1314}
1315
1321/* The various types of files and directories in a cgroup file system */ 1316/* The various types of files and directories in a cgroup file system */
1322enum cgroup_filetype { 1317enum cgroup_filetype {
1323 FILE_ROOT, 1318 FILE_ROOT,
@@ -1327,12 +1322,54 @@ enum cgroup_filetype {
1327 FILE_RELEASE_AGENT, 1322 FILE_RELEASE_AGENT,
1328}; 1323};
1329 1324
1325/**
1326 * cgroup_lock_live_group - take cgroup_mutex and check that cgrp is alive.
1327 * @cgrp: the cgroup to be checked for liveness
1328 *
1329 * On success, returns true; the lock should be later released with
1330 * cgroup_unlock(). On failure returns false with no lock held.
1331 */
1332bool cgroup_lock_live_group(struct cgroup *cgrp)
1333{
1334 mutex_lock(&cgroup_mutex);
1335 if (cgroup_is_removed(cgrp)) {
1336 mutex_unlock(&cgroup_mutex);
1337 return false;
1338 }
1339 return true;
1340}
1341
1342static int cgroup_release_agent_write(struct cgroup *cgrp, struct cftype *cft,
1343 const char *buffer)
1344{
1345 BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
1346 if (!cgroup_lock_live_group(cgrp))
1347 return -ENODEV;
1348 strcpy(cgrp->root->release_agent_path, buffer);
1349 cgroup_unlock();
1350 return 0;
1351}
1352
1353static int cgroup_release_agent_show(struct cgroup *cgrp, struct cftype *cft,
1354 struct seq_file *seq)
1355{
1356 if (!cgroup_lock_live_group(cgrp))
1357 return -ENODEV;
1358 seq_puts(seq, cgrp->root->release_agent_path);
1359 seq_putc(seq, '\n');
1360 cgroup_unlock();
1361 return 0;
1362}
1363
1364/* A buffer size big enough for numbers or short strings */
1365#define CGROUP_LOCAL_BUFFER_SIZE 64
1366
1330static ssize_t cgroup_write_X64(struct cgroup *cgrp, struct cftype *cft, 1367static ssize_t cgroup_write_X64(struct cgroup *cgrp, struct cftype *cft,
1331 struct file *file, 1368 struct file *file,
1332 const char __user *userbuf, 1369 const char __user *userbuf,
1333 size_t nbytes, loff_t *unused_ppos) 1370 size_t nbytes, loff_t *unused_ppos)
1334{ 1371{
1335 char buffer[64]; 1372 char buffer[CGROUP_LOCAL_BUFFER_SIZE];
1336 int retval = 0; 1373 int retval = 0;
1337 char *end; 1374 char *end;
1338 1375
@@ -1361,68 +1398,39 @@ static ssize_t cgroup_write_X64(struct cgroup *cgrp, struct cftype *cft,
1361 return retval; 1398 return retval;
1362} 1399}
1363 1400
1364static ssize_t cgroup_common_file_write(struct cgroup *cgrp, 1401static ssize_t cgroup_write_string(struct cgroup *cgrp, struct cftype *cft,
1365 struct cftype *cft, 1402 struct file *file,
1366 struct file *file, 1403 const char __user *userbuf,
1367 const char __user *userbuf, 1404 size_t nbytes, loff_t *unused_ppos)
1368 size_t nbytes, loff_t *unused_ppos)
1369{ 1405{
1370 enum cgroup_filetype type = cft->private; 1406 char local_buffer[CGROUP_LOCAL_BUFFER_SIZE];
1371 char *buffer;
1372 int retval = 0; 1407 int retval = 0;
1408 size_t max_bytes = cft->max_write_len;
1409 char *buffer = local_buffer;
1373 1410
1374 if (nbytes >= PATH_MAX) 1411 if (!max_bytes)
1412 max_bytes = sizeof(local_buffer) - 1;
1413 if (nbytes >= max_bytes)
1375 return -E2BIG; 1414 return -E2BIG;
1376 1415 /* Allocate a dynamic buffer if we need one */
1377 /* +1 for nul-terminator */ 1416 if (nbytes >= sizeof(local_buffer)) {
1378 buffer = kmalloc(nbytes + 1, GFP_KERNEL); 1417 buffer = kmalloc(nbytes + 1, GFP_KERNEL);
1379 if (buffer == NULL) 1418 if (buffer == NULL)
1380 return -ENOMEM; 1419 return -ENOMEM;
1381 1420 }
1382 if (copy_from_user(buffer, userbuf, nbytes)) { 1421 if (nbytes && copy_from_user(buffer, userbuf, nbytes)) {
1383 retval = -EFAULT; 1422 retval = -EFAULT;
1384 goto out1; 1423 goto out;
1385 } 1424 }
1386 buffer[nbytes] = 0; /* nul-terminate */
1387 strstrip(buffer); /* strip -just- trailing whitespace */
1388 1425
1389 mutex_lock(&cgroup_mutex); 1426 buffer[nbytes] = 0; /* nul-terminate */
1390 1427 strstrip(buffer);
1391 /* 1428 retval = cft->write_string(cgrp, cft, buffer);
1392 * This was already checked for in cgroup_file_write(), but 1429 if (!retval)
1393 * check again now we're holding cgroup_mutex.
1394 */
1395 if (cgroup_is_removed(cgrp)) {
1396 retval = -ENODEV;
1397 goto out2;
1398 }
1399
1400 switch (type) {
1401 case FILE_TASKLIST:
1402 retval = attach_task_by_pid(cgrp, buffer);
1403 break;
1404 case FILE_NOTIFY_ON_RELEASE:
1405 clear_bit(CGRP_RELEASABLE, &cgrp->flags);
1406 if (simple_strtoul(buffer, NULL, 10) != 0)
1407 set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
1408 else
1409 clear_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
1410 break;
1411 case FILE_RELEASE_AGENT:
1412 BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
1413 strcpy(cgrp->root->release_agent_path, buffer);
1414 break;
1415 default:
1416 retval = -EINVAL;
1417 goto out2;
1418 }
1419
1420 if (retval == 0)
1421 retval = nbytes; 1430 retval = nbytes;
1422out2: 1431out:
1423 mutex_unlock(&cgroup_mutex); 1432 if (buffer != local_buffer)
1424out1: 1433 kfree(buffer);
1425 kfree(buffer);
1426 return retval; 1434 return retval;
1427} 1435}
1428 1436
@@ -1438,6 +1446,8 @@ static ssize_t cgroup_file_write(struct file *file, const char __user *buf,
1438 return cft->write(cgrp, cft, file, buf, nbytes, ppos); 1446 return cft->write(cgrp, cft, file, buf, nbytes, ppos);
1439 if (cft->write_u64 || cft->write_s64) 1447 if (cft->write_u64 || cft->write_s64)
1440 return cgroup_write_X64(cgrp, cft, file, buf, nbytes, ppos); 1448 return cgroup_write_X64(cgrp, cft, file, buf, nbytes, ppos);
1449 if (cft->write_string)
1450 return cgroup_write_string(cgrp, cft, file, buf, nbytes, ppos);
1441 if (cft->trigger) { 1451 if (cft->trigger) {
1442 int ret = cft->trigger(cgrp, (unsigned int)cft->private); 1452 int ret = cft->trigger(cgrp, (unsigned int)cft->private);
1443 return ret ? ret : nbytes; 1453 return ret ? ret : nbytes;
@@ -1450,7 +1460,7 @@ static ssize_t cgroup_read_u64(struct cgroup *cgrp, struct cftype *cft,
1450 char __user *buf, size_t nbytes, 1460 char __user *buf, size_t nbytes,
1451 loff_t *ppos) 1461 loff_t *ppos)
1452{ 1462{
1453 char tmp[64]; 1463 char tmp[CGROUP_LOCAL_BUFFER_SIZE];
1454 u64 val = cft->read_u64(cgrp, cft); 1464 u64 val = cft->read_u64(cgrp, cft);
1455 int len = sprintf(tmp, "%llu\n", (unsigned long long) val); 1465 int len = sprintf(tmp, "%llu\n", (unsigned long long) val);
1456 1466
@@ -1462,56 +1472,13 @@ static ssize_t cgroup_read_s64(struct cgroup *cgrp, struct cftype *cft,
1462 char __user *buf, size_t nbytes, 1472 char __user *buf, size_t nbytes,
1463 loff_t *ppos) 1473 loff_t *ppos)
1464{ 1474{
1465 char tmp[64]; 1475 char tmp[CGROUP_LOCAL_BUFFER_SIZE];
1466 s64 val = cft->read_s64(cgrp, cft); 1476 s64 val = cft->read_s64(cgrp, cft);
1467 int len = sprintf(tmp, "%lld\n", (long long) val); 1477 int len = sprintf(tmp, "%lld\n", (long long) val);
1468 1478
1469 return simple_read_from_buffer(buf, nbytes, ppos, tmp, len); 1479 return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
1470} 1480}
1471 1481
1472static ssize_t cgroup_common_file_read(struct cgroup *cgrp,
1473 struct cftype *cft,
1474 struct file *file,
1475 char __user *buf,
1476 size_t nbytes, loff_t *ppos)
1477{
1478 enum cgroup_filetype type = cft->private;
1479 char *page;
1480 ssize_t retval = 0;
1481 char *s;
1482
1483 if (!(page = (char *)__get_free_page(GFP_KERNEL)))
1484 return -ENOMEM;
1485
1486 s = page;
1487
1488 switch (type) {
1489 case FILE_RELEASE_AGENT:
1490 {
1491 struct cgroupfs_root *root;
1492 size_t n;
1493 mutex_lock(&cgroup_mutex);
1494 root = cgrp->root;
1495 n = strnlen(root->release_agent_path,
1496 sizeof(root->release_agent_path));
1497 n = min(n, (size_t) PAGE_SIZE);
1498 strncpy(s, root->release_agent_path, n);
1499 mutex_unlock(&cgroup_mutex);
1500 s += n;
1501 break;
1502 }
1503 default:
1504 retval = -EINVAL;
1505 goto out;
1506 }
1507 *s++ = '\n';
1508
1509 retval = simple_read_from_buffer(buf, nbytes, ppos, page, s - page);
1510out:
1511 free_page((unsigned long)page);
1512 return retval;
1513}
1514
1515static ssize_t cgroup_file_read(struct file *file, char __user *buf, 1482static ssize_t cgroup_file_read(struct file *file, char __user *buf,
1516 size_t nbytes, loff_t *ppos) 1483 size_t nbytes, loff_t *ppos)
1517{ 1484{
@@ -1560,7 +1527,7 @@ static int cgroup_seqfile_show(struct seq_file *m, void *arg)
1560 return cft->read_seq_string(state->cgroup, cft, m); 1527 return cft->read_seq_string(state->cgroup, cft, m);
1561} 1528}
1562 1529
1563int cgroup_seqfile_release(struct inode *inode, struct file *file) 1530static int cgroup_seqfile_release(struct inode *inode, struct file *file)
1564{ 1531{
1565 struct seq_file *seq = file->private_data; 1532 struct seq_file *seq = file->private_data;
1566 kfree(seq->private); 1533 kfree(seq->private);
@@ -1569,6 +1536,7 @@ int cgroup_seqfile_release(struct inode *inode, struct file *file)
1569 1536
1570static struct file_operations cgroup_seqfile_operations = { 1537static struct file_operations cgroup_seqfile_operations = {
1571 .read = seq_read, 1538 .read = seq_read,
1539 .write = cgroup_file_write,
1572 .llseek = seq_lseek, 1540 .llseek = seq_lseek,
1573 .release = cgroup_seqfile_release, 1541 .release = cgroup_seqfile_release,
1574}; 1542};
@@ -1756,15 +1724,11 @@ int cgroup_add_files(struct cgroup *cgrp,
1756int cgroup_task_count(const struct cgroup *cgrp) 1724int cgroup_task_count(const struct cgroup *cgrp)
1757{ 1725{
1758 int count = 0; 1726 int count = 0;
1759 struct list_head *l; 1727 struct cg_cgroup_link *link;
1760 1728
1761 read_lock(&css_set_lock); 1729 read_lock(&css_set_lock);
1762 l = cgrp->css_sets.next; 1730 list_for_each_entry(link, &cgrp->css_sets, cgrp_link_list) {
1763 while (l != &cgrp->css_sets) {
1764 struct cg_cgroup_link *link =
1765 list_entry(l, struct cg_cgroup_link, cgrp_link_list);
1766 count += atomic_read(&link->cg->ref.refcount); 1731 count += atomic_read(&link->cg->ref.refcount);
1767 l = l->next;
1768 } 1732 }
1769 read_unlock(&css_set_lock); 1733 read_unlock(&css_set_lock);
1770 return count; 1734 return count;
@@ -2227,6 +2191,18 @@ static u64 cgroup_read_notify_on_release(struct cgroup *cgrp,
2227 return notify_on_release(cgrp); 2191 return notify_on_release(cgrp);
2228} 2192}
2229 2193
2194static int cgroup_write_notify_on_release(struct cgroup *cgrp,
2195 struct cftype *cft,
2196 u64 val)
2197{
2198 clear_bit(CGRP_RELEASABLE, &cgrp->flags);
2199 if (val)
2200 set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
2201 else
2202 clear_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
2203 return 0;
2204}
2205
2230/* 2206/*
2231 * for the common functions, 'private' gives the type of file 2207 * for the common functions, 'private' gives the type of file
2232 */ 2208 */
@@ -2235,7 +2211,7 @@ static struct cftype files[] = {
2235 .name = "tasks", 2211 .name = "tasks",
2236 .open = cgroup_tasks_open, 2212 .open = cgroup_tasks_open,
2237 .read = cgroup_tasks_read, 2213 .read = cgroup_tasks_read,
2238 .write = cgroup_common_file_write, 2214 .write_u64 = cgroup_tasks_write,
2239 .release = cgroup_tasks_release, 2215 .release = cgroup_tasks_release,
2240 .private = FILE_TASKLIST, 2216 .private = FILE_TASKLIST,
2241 }, 2217 },
@@ -2243,15 +2219,16 @@ static struct cftype files[] = {
2243 { 2219 {
2244 .name = "notify_on_release", 2220 .name = "notify_on_release",
2245 .read_u64 = cgroup_read_notify_on_release, 2221 .read_u64 = cgroup_read_notify_on_release,
2246 .write = cgroup_common_file_write, 2222 .write_u64 = cgroup_write_notify_on_release,
2247 .private = FILE_NOTIFY_ON_RELEASE, 2223 .private = FILE_NOTIFY_ON_RELEASE,
2248 }, 2224 },
2249}; 2225};
2250 2226
2251static struct cftype cft_release_agent = { 2227static struct cftype cft_release_agent = {
2252 .name = "release_agent", 2228 .name = "release_agent",
2253 .read = cgroup_common_file_read, 2229 .read_seq_string = cgroup_release_agent_show,
2254 .write = cgroup_common_file_write, 2230 .write_string = cgroup_release_agent_write,
2231 .max_write_len = PATH_MAX,
2255 .private = FILE_RELEASE_AGENT, 2232 .private = FILE_RELEASE_AGENT,
2256}; 2233};
2257 2234
@@ -2391,7 +2368,7 @@ static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode)
2391 return cgroup_create(c_parent, dentry, mode | S_IFDIR); 2368 return cgroup_create(c_parent, dentry, mode | S_IFDIR);
2392} 2369}
2393 2370
2394static inline int cgroup_has_css_refs(struct cgroup *cgrp) 2371static int cgroup_has_css_refs(struct cgroup *cgrp)
2395{ 2372{
2396 /* Check the reference count on each subsystem. Since we 2373 /* Check the reference count on each subsystem. Since we
2397 * already established that there are no tasks in the 2374 * already established that there are no tasks in the
@@ -2869,16 +2846,17 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks)
2869 * cgroup_clone - clone the cgroup the given subsystem is attached to 2846 * cgroup_clone - clone the cgroup the given subsystem is attached to
2870 * @tsk: the task to be moved 2847 * @tsk: the task to be moved
2871 * @subsys: the given subsystem 2848 * @subsys: the given subsystem
2849 * @nodename: the name for the new cgroup
2872 * 2850 *
2873 * Duplicate the current cgroup in the hierarchy that the given 2851 * Duplicate the current cgroup in the hierarchy that the given
2874 * subsystem is attached to, and move this task into the new 2852 * subsystem is attached to, and move this task into the new
2875 * child. 2853 * child.
2876 */ 2854 */
2877int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys) 2855int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys,
2856 char *nodename)
2878{ 2857{
2879 struct dentry *dentry; 2858 struct dentry *dentry;
2880 int ret = 0; 2859 int ret = 0;
2881 char nodename[MAX_CGROUP_TYPE_NAMELEN];
2882 struct cgroup *parent, *child; 2860 struct cgroup *parent, *child;
2883 struct inode *inode; 2861 struct inode *inode;
2884 struct css_set *cg; 2862 struct css_set *cg;
@@ -2903,8 +2881,6 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys)
2903 cg = tsk->cgroups; 2881 cg = tsk->cgroups;
2904 parent = task_cgroup(tsk, subsys->subsys_id); 2882 parent = task_cgroup(tsk, subsys->subsys_id);
2905 2883
2906 snprintf(nodename, MAX_CGROUP_TYPE_NAMELEN, "%d", tsk->pid);
2907
2908 /* Pin the hierarchy */ 2884 /* Pin the hierarchy */
2909 atomic_inc(&parent->root->sb->s_active); 2885 atomic_inc(&parent->root->sb->s_active);
2910 2886
@@ -3078,27 +3054,24 @@ static void cgroup_release_agent(struct work_struct *work)
3078 while (!list_empty(&release_list)) { 3054 while (!list_empty(&release_list)) {
3079 char *argv[3], *envp[3]; 3055 char *argv[3], *envp[3];
3080 int i; 3056 int i;
3081 char *pathbuf; 3057 char *pathbuf = NULL, *agentbuf = NULL;
3082 struct cgroup *cgrp = list_entry(release_list.next, 3058 struct cgroup *cgrp = list_entry(release_list.next,
3083 struct cgroup, 3059 struct cgroup,
3084 release_list); 3060 release_list);
3085 list_del_init(&cgrp->release_list); 3061 list_del_init(&cgrp->release_list);
3086 spin_unlock(&release_list_lock); 3062 spin_unlock(&release_list_lock);
3087 pathbuf = kmalloc(PAGE_SIZE, GFP_KERNEL); 3063 pathbuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
3088 if (!pathbuf) { 3064 if (!pathbuf)
3089 spin_lock(&release_list_lock); 3065 goto continue_free;
3090 continue; 3066 if (cgroup_path(cgrp, pathbuf, PAGE_SIZE) < 0)
3091 } 3067 goto continue_free;
3092 3068 agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL);
3093 if (cgroup_path(cgrp, pathbuf, PAGE_SIZE) < 0) { 3069 if (!agentbuf)
3094 kfree(pathbuf); 3070 goto continue_free;
3095 spin_lock(&release_list_lock);
3096 continue;
3097 }
3098 3071
3099 i = 0; 3072 i = 0;
3100 argv[i++] = cgrp->root->release_agent_path; 3073 argv[i++] = agentbuf;
3101 argv[i++] = (char *)pathbuf; 3074 argv[i++] = pathbuf;
3102 argv[i] = NULL; 3075 argv[i] = NULL;
3103 3076
3104 i = 0; 3077 i = 0;
@@ -3112,8 +3085,10 @@ static void cgroup_release_agent(struct work_struct *work)
3112 * be a slow process */ 3085 * be a slow process */
3113 mutex_unlock(&cgroup_mutex); 3086 mutex_unlock(&cgroup_mutex);
3114 call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC); 3087 call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
3115 kfree(pathbuf);
3116 mutex_lock(&cgroup_mutex); 3088 mutex_lock(&cgroup_mutex);
3089 continue_free:
3090 kfree(pathbuf);
3091 kfree(agentbuf);
3117 spin_lock(&release_list_lock); 3092 spin_lock(&release_list_lock);
3118 } 3093 }
3119 spin_unlock(&release_list_lock); 3094 spin_unlock(&release_list_lock);
diff --git a/kernel/cpu.c b/kernel/cpu.c
index cfb1d43ab801..e202a68d1cc1 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -64,6 +64,8 @@ void __init cpu_hotplug_init(void)
64 cpu_hotplug.refcount = 0; 64 cpu_hotplug.refcount = 0;
65} 65}
66 66
67cpumask_t cpu_active_map;
68
67#ifdef CONFIG_HOTPLUG_CPU 69#ifdef CONFIG_HOTPLUG_CPU
68 70
69void get_online_cpus(void) 71void get_online_cpus(void)
@@ -214,7 +216,6 @@ static int __ref take_cpu_down(void *_param)
214static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) 216static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
215{ 217{
216 int err, nr_calls = 0; 218 int err, nr_calls = 0;
217 struct task_struct *p;
218 cpumask_t old_allowed, tmp; 219 cpumask_t old_allowed, tmp;
219 void *hcpu = (void *)(long)cpu; 220 void *hcpu = (void *)(long)cpu;
220 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; 221 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
@@ -247,21 +248,18 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
247 cpus_setall(tmp); 248 cpus_setall(tmp);
248 cpu_clear(cpu, tmp); 249 cpu_clear(cpu, tmp);
249 set_cpus_allowed_ptr(current, &tmp); 250 set_cpus_allowed_ptr(current, &tmp);
251 tmp = cpumask_of_cpu(cpu);
250 252
251 p = __stop_machine_run(take_cpu_down, &tcd_param, cpu); 253 err = __stop_machine(take_cpu_down, &tcd_param, &tmp);
252 254 if (err) {
253 if (IS_ERR(p) || cpu_online(cpu)) {
254 /* CPU didn't die: tell everyone. Can't complain. */ 255 /* CPU didn't die: tell everyone. Can't complain. */
255 if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, 256 if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
256 hcpu) == NOTIFY_BAD) 257 hcpu) == NOTIFY_BAD)
257 BUG(); 258 BUG();
258 259
259 if (IS_ERR(p)) { 260 goto out_allowed;
260 err = PTR_ERR(p);
261 goto out_allowed;
262 }
263 goto out_thread;
264 } 261 }
262 BUG_ON(cpu_online(cpu));
265 263
266 /* Wait for it to sleep (leaving idle task). */ 264 /* Wait for it to sleep (leaving idle task). */
267 while (!idle_cpu(cpu)) 265 while (!idle_cpu(cpu))
@@ -277,12 +275,15 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
277 275
278 check_for_tasks(cpu); 276 check_for_tasks(cpu);
279 277
280out_thread:
281 err = kthread_stop(p);
282out_allowed: 278out_allowed:
283 set_cpus_allowed_ptr(current, &old_allowed); 279 set_cpus_allowed_ptr(current, &old_allowed);
284out_release: 280out_release:
285 cpu_hotplug_done(); 281 cpu_hotplug_done();
282 if (!err) {
283 if (raw_notifier_call_chain(&cpu_chain, CPU_POST_DEAD | mod,
284 hcpu) == NOTIFY_BAD)
285 BUG();
286 }
286 return err; 287 return err;
287} 288}
288 289
@@ -291,11 +292,30 @@ int __ref cpu_down(unsigned int cpu)
291 int err = 0; 292 int err = 0;
292 293
293 cpu_maps_update_begin(); 294 cpu_maps_update_begin();
294 if (cpu_hotplug_disabled) 295
296 if (cpu_hotplug_disabled) {
295 err = -EBUSY; 297 err = -EBUSY;
296 else 298 goto out;
297 err = _cpu_down(cpu, 0); 299 }
300
301 cpu_clear(cpu, cpu_active_map);
302
303 /*
304 * Make sure the all cpus did the reschedule and are not
305 * using stale version of the cpu_active_map.
306 * This is not strictly necessary becuase stop_machine()
307 * that we run down the line already provides the required
308 * synchronization. But it's really a side effect and we do not
309 * want to depend on the innards of the stop_machine here.
310 */
311 synchronize_sched();
312
313 err = _cpu_down(cpu, 0);
314
315 if (cpu_online(cpu))
316 cpu_set(cpu, cpu_active_map);
298 317
318out:
299 cpu_maps_update_done(); 319 cpu_maps_update_done();
300 return err; 320 return err;
301} 321}
@@ -355,11 +375,18 @@ int __cpuinit cpu_up(unsigned int cpu)
355 } 375 }
356 376
357 cpu_maps_update_begin(); 377 cpu_maps_update_begin();
358 if (cpu_hotplug_disabled) 378
379 if (cpu_hotplug_disabled) {
359 err = -EBUSY; 380 err = -EBUSY;
360 else 381 goto out;
361 err = _cpu_up(cpu, 0); 382 }
362 383
384 err = _cpu_up(cpu, 0);
385
386 if (cpu_online(cpu))
387 cpu_set(cpu, cpu_active_map);
388
389out:
363 cpu_maps_update_done(); 390 cpu_maps_update_done();
364 return err; 391 return err;
365} 392}
@@ -413,7 +440,7 @@ void __ref enable_nonboot_cpus(void)
413 goto out; 440 goto out;
414 441
415 printk("Enabling non-boot CPUs ...\n"); 442 printk("Enabling non-boot CPUs ...\n");
416 for_each_cpu_mask(cpu, frozen_cpus) { 443 for_each_cpu_mask_nr(cpu, frozen_cpus) {
417 error = _cpu_up(cpu, 1); 444 error = _cpu_up(cpu, 1);
418 if (!error) { 445 if (!error) {
419 printk("CPU%d is up\n", cpu); 446 printk("CPU%d is up\n", cpu);
@@ -428,3 +455,28 @@ out:
428#endif /* CONFIG_PM_SLEEP_SMP */ 455#endif /* CONFIG_PM_SLEEP_SMP */
429 456
430#endif /* CONFIG_SMP */ 457#endif /* CONFIG_SMP */
458
459/*
460 * cpu_bit_bitmap[] is a special, "compressed" data structure that
461 * represents all NR_CPUS bits binary values of 1<<nr.
462 *
463 * It is used by cpumask_of_cpu() to get a constant address to a CPU
464 * mask value that has a single bit set only.
465 */
466
467/* cpu_bit_bitmap[0] is empty - so we can back into it */
468#define MASK_DECLARE_1(x) [x+1][0] = 1UL << (x)
469#define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
470#define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
471#define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
472
473const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
474
475 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
476 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
477#if BITS_PER_LONG > 32
478 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
479 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
480#endif
481};
482EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 459d601947a8..d5ab79cf516d 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -54,7 +54,6 @@
54#include <asm/uaccess.h> 54#include <asm/uaccess.h>
55#include <asm/atomic.h> 55#include <asm/atomic.h>
56#include <linux/mutex.h> 56#include <linux/mutex.h>
57#include <linux/kfifo.h>
58#include <linux/workqueue.h> 57#include <linux/workqueue.h>
59#include <linux/cgroup.h> 58#include <linux/cgroup.h>
60 59
@@ -227,10 +226,6 @@ static struct cpuset top_cpuset = {
227 * The task_struct fields mems_allowed and mems_generation may only 226 * The task_struct fields mems_allowed and mems_generation may only
228 * be accessed in the context of that task, so require no locks. 227 * be accessed in the context of that task, so require no locks.
229 * 228 *
230 * The cpuset_common_file_write handler for operations that modify
231 * the cpuset hierarchy holds cgroup_mutex across the entire operation,
232 * single threading all such cpuset modifications across the system.
233 *
234 * The cpuset_common_file_read() handlers only hold callback_mutex across 229 * The cpuset_common_file_read() handlers only hold callback_mutex across
235 * small pieces of code, such as when reading out possibly multi-word 230 * small pieces of code, such as when reading out possibly multi-word
236 * cpumasks and nodemasks. 231 * cpumasks and nodemasks.
@@ -369,7 +364,7 @@ void cpuset_update_task_memory_state(void)
369 my_cpusets_mem_gen = top_cpuset.mems_generation; 364 my_cpusets_mem_gen = top_cpuset.mems_generation;
370 } else { 365 } else {
371 rcu_read_lock(); 366 rcu_read_lock();
372 my_cpusets_mem_gen = task_cs(current)->mems_generation; 367 my_cpusets_mem_gen = task_cs(tsk)->mems_generation;
373 rcu_read_unlock(); 368 rcu_read_unlock();
374 } 369 }
375 370
@@ -490,21 +485,51 @@ static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
490static void 485static void
491update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c) 486update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
492{ 487{
493 if (!dattr)
494 return;
495 if (dattr->relax_domain_level < c->relax_domain_level) 488 if (dattr->relax_domain_level < c->relax_domain_level)
496 dattr->relax_domain_level = c->relax_domain_level; 489 dattr->relax_domain_level = c->relax_domain_level;
497 return; 490 return;
498} 491}
499 492
493static void
494update_domain_attr_tree(struct sched_domain_attr *dattr, struct cpuset *c)
495{
496 LIST_HEAD(q);
497
498 list_add(&c->stack_list, &q);
499 while (!list_empty(&q)) {
500 struct cpuset *cp;
501 struct cgroup *cont;
502 struct cpuset *child;
503
504 cp = list_first_entry(&q, struct cpuset, stack_list);
505 list_del(q.next);
506
507 if (cpus_empty(cp->cpus_allowed))
508 continue;
509
510 if (is_sched_load_balance(cp))
511 update_domain_attr(dattr, cp);
512
513 list_for_each_entry(cont, &cp->css.cgroup->children, sibling) {
514 child = cgroup_cs(cont);
515 list_add_tail(&child->stack_list, &q);
516 }
517 }
518}
519
500/* 520/*
501 * rebuild_sched_domains() 521 * rebuild_sched_domains()
502 * 522 *
503 * If the flag 'sched_load_balance' of any cpuset with non-empty 523 * This routine will be called to rebuild the scheduler's dynamic
504 * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset 524 * sched domains:
505 * which has that flag enabled, or if any cpuset with a non-empty 525 * - if the flag 'sched_load_balance' of any cpuset with non-empty
506 * 'cpus' is removed, then call this routine to rebuild the 526 * 'cpus' changes,
507 * scheduler's dynamic sched domains. 527 * - or if the 'cpus' allowed changes in any cpuset which has that
528 * flag enabled,
529 * - or if the 'sched_relax_domain_level' of any cpuset which has
530 * that flag enabled and with non-empty 'cpus' changes,
531 * - or if any cpuset with non-empty 'cpus' is removed,
532 * - or if a cpu gets offlined.
508 * 533 *
509 * This routine builds a partial partition of the systems CPUs 534 * This routine builds a partial partition of the systems CPUs
510 * (the set of non-overlappping cpumask_t's in the array 'part' 535 * (the set of non-overlappping cpumask_t's in the array 'part'
@@ -531,7 +556,7 @@ update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
531 * So the reverse nesting would risk an ABBA deadlock. 556 * So the reverse nesting would risk an ABBA deadlock.
532 * 557 *
533 * The three key local variables below are: 558 * The three key local variables below are:
534 * q - a kfifo queue of cpuset pointers, used to implement a 559 * q - a linked-list queue of cpuset pointers, used to implement a
535 * top-down scan of all cpusets. This scan loads a pointer 560 * top-down scan of all cpusets. This scan loads a pointer
536 * to each cpuset marked is_sched_load_balance into the 561 * to each cpuset marked is_sched_load_balance into the
537 * array 'csa'. For our purposes, rebuilding the schedulers 562 * array 'csa'. For our purposes, rebuilding the schedulers
@@ -564,9 +589,9 @@ update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
564 * partition_sched_domains(). 589 * partition_sched_domains().
565 */ 590 */
566 591
567static void rebuild_sched_domains(void) 592void rebuild_sched_domains(void)
568{ 593{
569 struct kfifo *q; /* queue of cpusets to be scanned */ 594 LIST_HEAD(q); /* queue of cpusets to be scanned*/
570 struct cpuset *cp; /* scans q */ 595 struct cpuset *cp; /* scans q */
571 struct cpuset **csa; /* array of all cpuset ptrs */ 596 struct cpuset **csa; /* array of all cpuset ptrs */
572 int csn; /* how many cpuset ptrs in csa so far */ 597 int csn; /* how many cpuset ptrs in csa so far */
@@ -576,7 +601,6 @@ static void rebuild_sched_domains(void)
576 int ndoms; /* number of sched domains in result */ 601 int ndoms; /* number of sched domains in result */
577 int nslot; /* next empty doms[] cpumask_t slot */ 602 int nslot; /* next empty doms[] cpumask_t slot */
578 603
579 q = NULL;
580 csa = NULL; 604 csa = NULL;
581 doms = NULL; 605 doms = NULL;
582 dattr = NULL; 606 dattr = NULL;
@@ -590,30 +614,42 @@ static void rebuild_sched_domains(void)
590 dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL); 614 dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL);
591 if (dattr) { 615 if (dattr) {
592 *dattr = SD_ATTR_INIT; 616 *dattr = SD_ATTR_INIT;
593 update_domain_attr(dattr, &top_cpuset); 617 update_domain_attr_tree(dattr, &top_cpuset);
594 } 618 }
595 *doms = top_cpuset.cpus_allowed; 619 *doms = top_cpuset.cpus_allowed;
596 goto rebuild; 620 goto rebuild;
597 } 621 }
598 622
599 q = kfifo_alloc(number_of_cpusets * sizeof(cp), GFP_KERNEL, NULL);
600 if (IS_ERR(q))
601 goto done;
602 csa = kmalloc(number_of_cpusets * sizeof(cp), GFP_KERNEL); 623 csa = kmalloc(number_of_cpusets * sizeof(cp), GFP_KERNEL);
603 if (!csa) 624 if (!csa)
604 goto done; 625 goto done;
605 csn = 0; 626 csn = 0;
606 627
607 cp = &top_cpuset; 628 list_add(&top_cpuset.stack_list, &q);
608 __kfifo_put(q, (void *)&cp, sizeof(cp)); 629 while (!list_empty(&q)) {
609 while (__kfifo_get(q, (void *)&cp, sizeof(cp))) {
610 struct cgroup *cont; 630 struct cgroup *cont;
611 struct cpuset *child; /* scans child cpusets of cp */ 631 struct cpuset *child; /* scans child cpusets of cp */
612 if (is_sched_load_balance(cp)) 632
633 cp = list_first_entry(&q, struct cpuset, stack_list);
634 list_del(q.next);
635
636 if (cpus_empty(cp->cpus_allowed))
637 continue;
638
639 /*
640 * All child cpusets contain a subset of the parent's cpus, so
641 * just skip them, and then we call update_domain_attr_tree()
642 * to calc relax_domain_level of the corresponding sched
643 * domain.
644 */
645 if (is_sched_load_balance(cp)) {
613 csa[csn++] = cp; 646 csa[csn++] = cp;
647 continue;
648 }
649
614 list_for_each_entry(cont, &cp->css.cgroup->children, sibling) { 650 list_for_each_entry(cont, &cp->css.cgroup->children, sibling) {
615 child = cgroup_cs(cont); 651 child = cgroup_cs(cont);
616 __kfifo_put(q, (void *)&child, sizeof(cp)); 652 list_add_tail(&child->stack_list, &q);
617 } 653 }
618 } 654 }
619 655
@@ -679,7 +715,9 @@ restart:
679 if (apn == b->pn) { 715 if (apn == b->pn) {
680 cpus_or(*dp, *dp, b->cpus_allowed); 716 cpus_or(*dp, *dp, b->cpus_allowed);
681 b->pn = -1; 717 b->pn = -1;
682 update_domain_attr(dattr, b); 718 if (dattr)
719 update_domain_attr_tree(dattr
720 + nslot, b);
683 } 721 }
684 } 722 }
685 nslot++; 723 nslot++;
@@ -694,43 +732,11 @@ rebuild:
694 put_online_cpus(); 732 put_online_cpus();
695 733
696done: 734done:
697 if (q && !IS_ERR(q))
698 kfifo_free(q);
699 kfree(csa); 735 kfree(csa);
700 /* Don't kfree(doms) -- partition_sched_domains() does that. */ 736 /* Don't kfree(doms) -- partition_sched_domains() does that. */
701 /* Don't kfree(dattr) -- partition_sched_domains() does that. */ 737 /* Don't kfree(dattr) -- partition_sched_domains() does that. */
702} 738}
703 739
704static inline int started_after_time(struct task_struct *t1,
705 struct timespec *time,
706 struct task_struct *t2)
707{
708 int start_diff = timespec_compare(&t1->start_time, time);
709 if (start_diff > 0) {
710 return 1;
711 } else if (start_diff < 0) {
712 return 0;
713 } else {
714 /*
715 * Arbitrarily, if two processes started at the same
716 * time, we'll say that the lower pointer value
717 * started first. Note that t2 may have exited by now
718 * so this may not be a valid pointer any longer, but
719 * that's fine - it still serves to distinguish
720 * between two tasks started (effectively)
721 * simultaneously.
722 */
723 return t1 > t2;
724 }
725}
726
727static inline int started_after(void *p1, void *p2)
728{
729 struct task_struct *t1 = p1;
730 struct task_struct *t2 = p2;
731 return started_after_time(t1, &t2->start_time, t2);
732}
733
734/** 740/**
735 * cpuset_test_cpumask - test a task's cpus_allowed versus its cpuset's 741 * cpuset_test_cpumask - test a task's cpus_allowed versus its cpuset's
736 * @tsk: task to test 742 * @tsk: task to test
@@ -766,15 +772,49 @@ static void cpuset_change_cpumask(struct task_struct *tsk,
766} 772}
767 773
768/** 774/**
775 * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
776 * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
777 *
778 * Called with cgroup_mutex held
779 *
780 * The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
781 * calling callback functions for each.
782 *
783 * Return 0 if successful, -errno if not.
784 */
785static int update_tasks_cpumask(struct cpuset *cs)
786{
787 struct cgroup_scanner scan;
788 struct ptr_heap heap;
789 int retval;
790
791 /*
792 * cgroup_scan_tasks() will initialize heap->gt for us.
793 * heap_init() is still needed here for we should not change
794 * cs->cpus_allowed when heap_init() fails.
795 */
796 retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL);
797 if (retval)
798 return retval;
799
800 scan.cg = cs->css.cgroup;
801 scan.test_task = cpuset_test_cpumask;
802 scan.process_task = cpuset_change_cpumask;
803 scan.heap = &heap;
804 retval = cgroup_scan_tasks(&scan);
805
806 heap_free(&heap);
807 return retval;
808}
809
810/**
769 * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it 811 * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
770 * @cs: the cpuset to consider 812 * @cs: the cpuset to consider
771 * @buf: buffer of cpu numbers written to this cpuset 813 * @buf: buffer of cpu numbers written to this cpuset
772 */ 814 */
773static int update_cpumask(struct cpuset *cs, char *buf) 815static int update_cpumask(struct cpuset *cs, const char *buf)
774{ 816{
775 struct cpuset trialcs; 817 struct cpuset trialcs;
776 struct cgroup_scanner scan;
777 struct ptr_heap heap;
778 int retval; 818 int retval;
779 int is_load_balanced; 819 int is_load_balanced;
780 820
@@ -790,7 +830,6 @@ static int update_cpumask(struct cpuset *cs, char *buf)
790 * that parsing. The validate_change() call ensures that cpusets 830 * that parsing. The validate_change() call ensures that cpusets
791 * with tasks have cpus. 831 * with tasks have cpus.
792 */ 832 */
793 buf = strstrip(buf);
794 if (!*buf) { 833 if (!*buf) {
795 cpus_clear(trialcs.cpus_allowed); 834 cpus_clear(trialcs.cpus_allowed);
796 } else { 835 } else {
@@ -809,10 +848,6 @@ static int update_cpumask(struct cpuset *cs, char *buf)
809 if (cpus_equal(cs->cpus_allowed, trialcs.cpus_allowed)) 848 if (cpus_equal(cs->cpus_allowed, trialcs.cpus_allowed))
810 return 0; 849 return 0;
811 850
812 retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, &started_after);
813 if (retval)
814 return retval;
815
816 is_load_balanced = is_sched_load_balance(&trialcs); 851 is_load_balanced = is_sched_load_balance(&trialcs);
817 852
818 mutex_lock(&callback_mutex); 853 mutex_lock(&callback_mutex);
@@ -823,12 +858,9 @@ static int update_cpumask(struct cpuset *cs, char *buf)
823 * Scan tasks in the cpuset, and update the cpumasks of any 858 * Scan tasks in the cpuset, and update the cpumasks of any
824 * that need an update. 859 * that need an update.
825 */ 860 */
826 scan.cg = cs->css.cgroup; 861 retval = update_tasks_cpumask(cs);
827 scan.test_task = cpuset_test_cpumask; 862 if (retval < 0)
828 scan.process_task = cpuset_change_cpumask; 863 return retval;
829 scan.heap = &heap;
830 cgroup_scan_tasks(&scan);
831 heap_free(&heap);
832 864
833 if (is_load_balanced) 865 if (is_load_balanced)
834 rebuild_sched_domains(); 866 rebuild_sched_domains();
@@ -884,74 +916,25 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
884 mutex_unlock(&callback_mutex); 916 mutex_unlock(&callback_mutex);
885} 917}
886 918
887/*
888 * Handle user request to change the 'mems' memory placement
889 * of a cpuset. Needs to validate the request, update the
890 * cpusets mems_allowed and mems_generation, and for each
891 * task in the cpuset, rebind any vma mempolicies and if
892 * the cpuset is marked 'memory_migrate', migrate the tasks
893 * pages to the new memory.
894 *
895 * Call with cgroup_mutex held. May take callback_mutex during call.
896 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
897 * lock each such tasks mm->mmap_sem, scan its vma's and rebind
898 * their mempolicies to the cpusets new mems_allowed.
899 */
900
901static void *cpuset_being_rebound; 919static void *cpuset_being_rebound;
902 920
903static int update_nodemask(struct cpuset *cs, char *buf) 921/**
922 * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
923 * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
924 * @oldmem: old mems_allowed of cpuset cs
925 *
926 * Called with cgroup_mutex held
927 * Return 0 if successful, -errno if not.
928 */
929static int update_tasks_nodemask(struct cpuset *cs, const nodemask_t *oldmem)
904{ 930{
905 struct cpuset trialcs;
906 nodemask_t oldmem;
907 struct task_struct *p; 931 struct task_struct *p;
908 struct mm_struct **mmarray; 932 struct mm_struct **mmarray;
909 int i, n, ntasks; 933 int i, n, ntasks;
910 int migrate; 934 int migrate;
911 int fudge; 935 int fudge;
912 int retval;
913 struct cgroup_iter it; 936 struct cgroup_iter it;
914 937 int retval;
915 /*
916 * top_cpuset.mems_allowed tracks node_stats[N_HIGH_MEMORY];
917 * it's read-only
918 */
919 if (cs == &top_cpuset)
920 return -EACCES;
921
922 trialcs = *cs;
923
924 /*
925 * An empty mems_allowed is ok iff there are no tasks in the cpuset.
926 * Since nodelist_parse() fails on an empty mask, we special case
927 * that parsing. The validate_change() call ensures that cpusets
928 * with tasks have memory.
929 */
930 buf = strstrip(buf);
931 if (!*buf) {
932 nodes_clear(trialcs.mems_allowed);
933 } else {
934 retval = nodelist_parse(buf, trialcs.mems_allowed);
935 if (retval < 0)
936 goto done;
937
938 if (!nodes_subset(trialcs.mems_allowed,
939 node_states[N_HIGH_MEMORY]))
940 return -EINVAL;
941 }
942 oldmem = cs->mems_allowed;
943 if (nodes_equal(oldmem, trialcs.mems_allowed)) {
944 retval = 0; /* Too easy - nothing to do */
945 goto done;
946 }
947 retval = validate_change(cs, &trialcs);
948 if (retval < 0)
949 goto done;
950
951 mutex_lock(&callback_mutex);
952 cs->mems_allowed = trialcs.mems_allowed;
953 cs->mems_generation = cpuset_mems_generation++;
954 mutex_unlock(&callback_mutex);
955 938
956 cpuset_being_rebound = cs; /* causes mpol_dup() rebind */ 939 cpuset_being_rebound = cs; /* causes mpol_dup() rebind */
957 940
@@ -1018,7 +1001,7 @@ static int update_nodemask(struct cpuset *cs, char *buf)
1018 1001
1019 mpol_rebind_mm(mm, &cs->mems_allowed); 1002 mpol_rebind_mm(mm, &cs->mems_allowed);
1020 if (migrate) 1003 if (migrate)
1021 cpuset_migrate_mm(mm, &oldmem, &cs->mems_allowed); 1004 cpuset_migrate_mm(mm, oldmem, &cs->mems_allowed);
1022 mmput(mm); 1005 mmput(mm);
1023 } 1006 }
1024 1007
@@ -1030,6 +1013,70 @@ done:
1030 return retval; 1013 return retval;
1031} 1014}
1032 1015
1016/*
1017 * Handle user request to change the 'mems' memory placement
1018 * of a cpuset. Needs to validate the request, update the
1019 * cpusets mems_allowed and mems_generation, and for each
1020 * task in the cpuset, rebind any vma mempolicies and if
1021 * the cpuset is marked 'memory_migrate', migrate the tasks
1022 * pages to the new memory.
1023 *
1024 * Call with cgroup_mutex held. May take callback_mutex during call.
1025 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
1026 * lock each such tasks mm->mmap_sem, scan its vma's and rebind
1027 * their mempolicies to the cpusets new mems_allowed.
1028 */
1029static int update_nodemask(struct cpuset *cs, const char *buf)
1030{
1031 struct cpuset trialcs;
1032 nodemask_t oldmem;
1033 int retval;
1034
1035 /*
1036 * top_cpuset.mems_allowed tracks node_stats[N_HIGH_MEMORY];
1037 * it's read-only
1038 */
1039 if (cs == &top_cpuset)
1040 return -EACCES;
1041
1042 trialcs = *cs;
1043
1044 /*
1045 * An empty mems_allowed is ok iff there are no tasks in the cpuset.
1046 * Since nodelist_parse() fails on an empty mask, we special case
1047 * that parsing. The validate_change() call ensures that cpusets
1048 * with tasks have memory.
1049 */
1050 if (!*buf) {
1051 nodes_clear(trialcs.mems_allowed);
1052 } else {
1053 retval = nodelist_parse(buf, trialcs.mems_allowed);
1054 if (retval < 0)
1055 goto done;
1056
1057 if (!nodes_subset(trialcs.mems_allowed,
1058 node_states[N_HIGH_MEMORY]))
1059 return -EINVAL;
1060 }
1061 oldmem = cs->mems_allowed;
1062 if (nodes_equal(oldmem, trialcs.mems_allowed)) {
1063 retval = 0; /* Too easy - nothing to do */
1064 goto done;
1065 }
1066 retval = validate_change(cs, &trialcs);
1067 if (retval < 0)
1068 goto done;
1069
1070 mutex_lock(&callback_mutex);
1071 cs->mems_allowed = trialcs.mems_allowed;
1072 cs->mems_generation = cpuset_mems_generation++;
1073 mutex_unlock(&callback_mutex);
1074
1075 retval = update_tasks_nodemask(cs, &oldmem);
1076done:
1077 return retval;
1078}
1079
1033int current_cpuset_is_being_rebound(void) 1080int current_cpuset_is_being_rebound(void)
1034{ 1081{
1035 return task_cs(current) == cpuset_being_rebound; 1082 return task_cs(current) == cpuset_being_rebound;
@@ -1042,7 +1089,8 @@ static int update_relax_domain_level(struct cpuset *cs, s64 val)
1042 1089
1043 if (val != cs->relax_domain_level) { 1090 if (val != cs->relax_domain_level) {
1044 cs->relax_domain_level = val; 1091 cs->relax_domain_level = val;
1045 rebuild_sched_domains(); 1092 if (!cpus_empty(cs->cpus_allowed) && is_sched_load_balance(cs))
1093 rebuild_sched_domains();
1046 } 1094 }
1047 1095
1048 return 0; 1096 return 0;
@@ -1254,72 +1302,14 @@ typedef enum {
1254 FILE_SPREAD_SLAB, 1302 FILE_SPREAD_SLAB,
1255} cpuset_filetype_t; 1303} cpuset_filetype_t;
1256 1304
1257static ssize_t cpuset_common_file_write(struct cgroup *cont,
1258 struct cftype *cft,
1259 struct file *file,
1260 const char __user *userbuf,
1261 size_t nbytes, loff_t *unused_ppos)
1262{
1263 struct cpuset *cs = cgroup_cs(cont);
1264 cpuset_filetype_t type = cft->private;
1265 char *buffer;
1266 int retval = 0;
1267
1268 /* Crude upper limit on largest legitimate cpulist user might write. */
1269 if (nbytes > 100U + 6 * max(NR_CPUS, MAX_NUMNODES))
1270 return -E2BIG;
1271
1272 /* +1 for nul-terminator */
1273 buffer = kmalloc(nbytes + 1, GFP_KERNEL);
1274 if (!buffer)
1275 return -ENOMEM;
1276
1277 if (copy_from_user(buffer, userbuf, nbytes)) {
1278 retval = -EFAULT;
1279 goto out1;
1280 }
1281 buffer[nbytes] = 0; /* nul-terminate */
1282
1283 cgroup_lock();
1284
1285 if (cgroup_is_removed(cont)) {
1286 retval = -ENODEV;
1287 goto out2;
1288 }
1289
1290 switch (type) {
1291 case FILE_CPULIST:
1292 retval = update_cpumask(cs, buffer);
1293 break;
1294 case FILE_MEMLIST:
1295 retval = update_nodemask(cs, buffer);
1296 break;
1297 default:
1298 retval = -EINVAL;
1299 goto out2;
1300 }
1301
1302 if (retval == 0)
1303 retval = nbytes;
1304out2:
1305 cgroup_unlock();
1306out1:
1307 kfree(buffer);
1308 return retval;
1309}
1310
1311static int cpuset_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val) 1305static int cpuset_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
1312{ 1306{
1313 int retval = 0; 1307 int retval = 0;
1314 struct cpuset *cs = cgroup_cs(cgrp); 1308 struct cpuset *cs = cgroup_cs(cgrp);
1315 cpuset_filetype_t type = cft->private; 1309 cpuset_filetype_t type = cft->private;
1316 1310
1317 cgroup_lock(); 1311 if (!cgroup_lock_live_group(cgrp))
1318
1319 if (cgroup_is_removed(cgrp)) {
1320 cgroup_unlock();
1321 return -ENODEV; 1312 return -ENODEV;
1322 }
1323 1313
1324 switch (type) { 1314 switch (type) {
1325 case FILE_CPU_EXCLUSIVE: 1315 case FILE_CPU_EXCLUSIVE:
@@ -1365,12 +1355,9 @@ static int cpuset_write_s64(struct cgroup *cgrp, struct cftype *cft, s64 val)
1365 struct cpuset *cs = cgroup_cs(cgrp); 1355 struct cpuset *cs = cgroup_cs(cgrp);
1366 cpuset_filetype_t type = cft->private; 1356 cpuset_filetype_t type = cft->private;
1367 1357
1368 cgroup_lock(); 1358 if (!cgroup_lock_live_group(cgrp))
1369
1370 if (cgroup_is_removed(cgrp)) {
1371 cgroup_unlock();
1372 return -ENODEV; 1359 return -ENODEV;
1373 } 1360
1374 switch (type) { 1361 switch (type) {
1375 case FILE_SCHED_RELAX_DOMAIN_LEVEL: 1362 case FILE_SCHED_RELAX_DOMAIN_LEVEL:
1376 retval = update_relax_domain_level(cs, val); 1363 retval = update_relax_domain_level(cs, val);
@@ -1384,6 +1371,32 @@ static int cpuset_write_s64(struct cgroup *cgrp, struct cftype *cft, s64 val)
1384} 1371}
1385 1372
1386/* 1373/*
1374 * Common handling for a write to a "cpus" or "mems" file.
1375 */
1376static int cpuset_write_resmask(struct cgroup *cgrp, struct cftype *cft,
1377 const char *buf)
1378{
1379 int retval = 0;
1380
1381 if (!cgroup_lock_live_group(cgrp))
1382 return -ENODEV;
1383
1384 switch (cft->private) {
1385 case FILE_CPULIST:
1386 retval = update_cpumask(cgroup_cs(cgrp), buf);
1387 break;
1388 case FILE_MEMLIST:
1389 retval = update_nodemask(cgroup_cs(cgrp), buf);
1390 break;
1391 default:
1392 retval = -EINVAL;
1393 break;
1394 }
1395 cgroup_unlock();
1396 return retval;
1397}
1398
1399/*
1387 * These ascii lists should be read in a single call, by using a user 1400 * These ascii lists should be read in a single call, by using a user
1388 * buffer large enough to hold the entire map. If read in smaller 1401 * buffer large enough to hold the entire map. If read in smaller
1389 * chunks, there is no guarantee of atomicity. Since the display format 1402 * chunks, there is no guarantee of atomicity. Since the display format
@@ -1502,14 +1515,16 @@ static struct cftype files[] = {
1502 { 1515 {
1503 .name = "cpus", 1516 .name = "cpus",
1504 .read = cpuset_common_file_read, 1517 .read = cpuset_common_file_read,
1505 .write = cpuset_common_file_write, 1518 .write_string = cpuset_write_resmask,
1519 .max_write_len = (100U + 6 * NR_CPUS),
1506 .private = FILE_CPULIST, 1520 .private = FILE_CPULIST,
1507 }, 1521 },
1508 1522
1509 { 1523 {
1510 .name = "mems", 1524 .name = "mems",
1511 .read = cpuset_common_file_read, 1525 .read = cpuset_common_file_read,
1512 .write = cpuset_common_file_write, 1526 .write_string = cpuset_write_resmask,
1527 .max_write_len = (100U + 6 * MAX_NUMNODES),
1513 .private = FILE_MEMLIST, 1528 .private = FILE_MEMLIST,
1514 }, 1529 },
1515 1530
@@ -1790,7 +1805,7 @@ static void move_member_tasks_to_cpuset(struct cpuset *from, struct cpuset *to)
1790 scan.scan.heap = NULL; 1805 scan.scan.heap = NULL;
1791 scan.to = to->css.cgroup; 1806 scan.to = to->css.cgroup;
1792 1807
1793 if (cgroup_scan_tasks((struct cgroup_scanner *)&scan)) 1808 if (cgroup_scan_tasks(&scan.scan))
1794 printk(KERN_ERR "move_member_tasks_to_cpuset: " 1809 printk(KERN_ERR "move_member_tasks_to_cpuset: "
1795 "cgroup_scan_tasks failed\n"); 1810 "cgroup_scan_tasks failed\n");
1796} 1811}
@@ -1846,29 +1861,29 @@ static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
1846 */ 1861 */
1847static void scan_for_empty_cpusets(const struct cpuset *root) 1862static void scan_for_empty_cpusets(const struct cpuset *root)
1848{ 1863{
1864 LIST_HEAD(queue);
1849 struct cpuset *cp; /* scans cpusets being updated */ 1865 struct cpuset *cp; /* scans cpusets being updated */
1850 struct cpuset *child; /* scans child cpusets of cp */ 1866 struct cpuset *child; /* scans child cpusets of cp */
1851 struct list_head queue;
1852 struct cgroup *cont; 1867 struct cgroup *cont;
1853 1868 nodemask_t oldmems;
1854 INIT_LIST_HEAD(&queue);
1855 1869
1856 list_add_tail((struct list_head *)&root->stack_list, &queue); 1870 list_add_tail((struct list_head *)&root->stack_list, &queue);
1857 1871
1858 while (!list_empty(&queue)) { 1872 while (!list_empty(&queue)) {
1859 cp = container_of(queue.next, struct cpuset, stack_list); 1873 cp = list_first_entry(&queue, struct cpuset, stack_list);
1860 list_del(queue.next); 1874 list_del(queue.next);
1861 list_for_each_entry(cont, &cp->css.cgroup->children, sibling) { 1875 list_for_each_entry(cont, &cp->css.cgroup->children, sibling) {
1862 child = cgroup_cs(cont); 1876 child = cgroup_cs(cont);
1863 list_add_tail(&child->stack_list, &queue); 1877 list_add_tail(&child->stack_list, &queue);
1864 } 1878 }
1865 cont = cp->css.cgroup;
1866 1879
1867 /* Continue past cpusets with all cpus, mems online */ 1880 /* Continue past cpusets with all cpus, mems online */
1868 if (cpus_subset(cp->cpus_allowed, cpu_online_map) && 1881 if (cpus_subset(cp->cpus_allowed, cpu_online_map) &&
1869 nodes_subset(cp->mems_allowed, node_states[N_HIGH_MEMORY])) 1882 nodes_subset(cp->mems_allowed, node_states[N_HIGH_MEMORY]))
1870 continue; 1883 continue;
1871 1884
1885 oldmems = cp->mems_allowed;
1886
1872 /* Remove offline cpus and mems from this cpuset. */ 1887 /* Remove offline cpus and mems from this cpuset. */
1873 mutex_lock(&callback_mutex); 1888 mutex_lock(&callback_mutex);
1874 cpus_and(cp->cpus_allowed, cp->cpus_allowed, cpu_online_map); 1889 cpus_and(cp->cpus_allowed, cp->cpus_allowed, cpu_online_map);
@@ -1880,6 +1895,10 @@ static void scan_for_empty_cpusets(const struct cpuset *root)
1880 if (cpus_empty(cp->cpus_allowed) || 1895 if (cpus_empty(cp->cpus_allowed) ||
1881 nodes_empty(cp->mems_allowed)) 1896 nodes_empty(cp->mems_allowed))
1882 remove_tasks_in_empty_cpuset(cp); 1897 remove_tasks_in_empty_cpuset(cp);
1898 else {
1899 update_tasks_cpumask(cp);
1900 update_tasks_nodemask(cp, &oldmems);
1901 }
1883 } 1902 }
1884} 1903}
1885 1904
@@ -1972,7 +1991,6 @@ void __init cpuset_init_smp(void)
1972} 1991}
1973 1992
1974/** 1993/**
1975
1976 * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset. 1994 * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
1977 * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed. 1995 * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
1978 * @pmask: pointer to cpumask_t variable to receive cpus_allowed set. 1996 * @pmask: pointer to cpumask_t variable to receive cpus_allowed set.
diff --git a/kernel/delayacct.c b/kernel/delayacct.c
index 10e43fd8b721..b3179dad71be 100644
--- a/kernel/delayacct.c
+++ b/kernel/delayacct.c
@@ -145,8 +145,11 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
145 d->blkio_delay_total = (tmp < d->blkio_delay_total) ? 0 : tmp; 145 d->blkio_delay_total = (tmp < d->blkio_delay_total) ? 0 : tmp;
146 tmp = d->swapin_delay_total + tsk->delays->swapin_delay; 146 tmp = d->swapin_delay_total + tsk->delays->swapin_delay;
147 d->swapin_delay_total = (tmp < d->swapin_delay_total) ? 0 : tmp; 147 d->swapin_delay_total = (tmp < d->swapin_delay_total) ? 0 : tmp;
148 tmp = d->freepages_delay_total + tsk->delays->freepages_delay;
149 d->freepages_delay_total = (tmp < d->freepages_delay_total) ? 0 : tmp;
148 d->blkio_count += tsk->delays->blkio_count; 150 d->blkio_count += tsk->delays->blkio_count;
149 d->swapin_count += tsk->delays->swapin_count; 151 d->swapin_count += tsk->delays->swapin_count;
152 d->freepages_count += tsk->delays->freepages_count;
150 spin_unlock_irqrestore(&tsk->delays->lock, flags); 153 spin_unlock_irqrestore(&tsk->delays->lock, flags);
151 154
152done: 155done:
@@ -165,3 +168,16 @@ __u64 __delayacct_blkio_ticks(struct task_struct *tsk)
165 return ret; 168 return ret;
166} 169}
167 170
171void __delayacct_freepages_start(void)
172{
173 delayacct_start(&current->delays->freepages_start);
174}
175
176void __delayacct_freepages_end(void)
177{
178 delayacct_end(&current->delays->freepages_start,
179 &current->delays->freepages_end,
180 &current->delays->freepages_delay,
181 &current->delays->freepages_count);
182}
183
diff --git a/kernel/dma-coherent.c b/kernel/dma-coherent.c
new file mode 100644
index 000000000000..c1d4d5b4c61c
--- /dev/null
+++ b/kernel/dma-coherent.c
@@ -0,0 +1,153 @@
1/*
2 * Coherent per-device memory handling.
3 * Borrowed from i386
4 */
5#include <linux/kernel.h>
6#include <linux/dma-mapping.h>
7
8struct dma_coherent_mem {
9 void *virt_base;
10 u32 device_base;
11 int size;
12 int flags;
13 unsigned long *bitmap;
14};
15
16int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
17 dma_addr_t device_addr, size_t size, int flags)
18{
19 void __iomem *mem_base = NULL;
20 int pages = size >> PAGE_SHIFT;
21 int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
22
23 if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
24 goto out;
25 if (!size)
26 goto out;
27 if (dev->dma_mem)
28 goto out;
29
30 /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
31
32 mem_base = ioremap(bus_addr, size);
33 if (!mem_base)
34 goto out;
35
36 dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
37 if (!dev->dma_mem)
38 goto out;
39 dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
40 if (!dev->dma_mem->bitmap)
41 goto free1_out;
42
43 dev->dma_mem->virt_base = mem_base;
44 dev->dma_mem->device_base = device_addr;
45 dev->dma_mem->size = pages;
46 dev->dma_mem->flags = flags;
47
48 if (flags & DMA_MEMORY_MAP)
49 return DMA_MEMORY_MAP;
50
51 return DMA_MEMORY_IO;
52
53 free1_out:
54 kfree(dev->dma_mem);
55 out:
56 if (mem_base)
57 iounmap(mem_base);
58 return 0;
59}
60EXPORT_SYMBOL(dma_declare_coherent_memory);
61
62void dma_release_declared_memory(struct device *dev)
63{
64 struct dma_coherent_mem *mem = dev->dma_mem;
65
66 if (!mem)
67 return;
68 dev->dma_mem = NULL;
69 iounmap(mem->virt_base);
70 kfree(mem->bitmap);
71 kfree(mem);
72}
73EXPORT_SYMBOL(dma_release_declared_memory);
74
75void *dma_mark_declared_memory_occupied(struct device *dev,
76 dma_addr_t device_addr, size_t size)
77{
78 struct dma_coherent_mem *mem = dev->dma_mem;
79 int pos, err;
80
81 size += device_addr & ~PAGE_MASK;
82
83 if (!mem)
84 return ERR_PTR(-EINVAL);
85
86 pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
87 err = bitmap_allocate_region(mem->bitmap, pos, get_order(size));
88 if (err != 0)
89 return ERR_PTR(err);
90 return mem->virt_base + (pos << PAGE_SHIFT);
91}
92EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
93
94/**
95 * dma_alloc_from_coherent() - try to allocate memory from the per-device coherent area
96 *
97 * @dev: device from which we allocate memory
98 * @size: size of requested memory area
99 * @dma_handle: This will be filled with the correct dma handle
100 * @ret: This pointer will be filled with the virtual address
101 * to allocated area.
102 *
103 * This function should be only called from per-arch dma_alloc_coherent()
104 * to support allocation from per-device coherent memory pools.
105 *
106 * Returns 0 if dma_alloc_coherent should continue with allocating from
107 * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
108 */
109int dma_alloc_from_coherent(struct device *dev, ssize_t size,
110 dma_addr_t *dma_handle, void **ret)
111{
112 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
113 int order = get_order(size);
114
115 if (mem) {
116 int page = bitmap_find_free_region(mem->bitmap, mem->size,
117 order);
118 if (page >= 0) {
119 *dma_handle = mem->device_base + (page << PAGE_SHIFT);
120 *ret = mem->virt_base + (page << PAGE_SHIFT);
121 memset(*ret, 0, size);
122 } else if (mem->flags & DMA_MEMORY_EXCLUSIVE)
123 *ret = NULL;
124 }
125 return (mem != NULL);
126}
127
128/**
129 * dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool
130 * @dev: device from which the memory was allocated
131 * @order: the order of pages allocated
132 * @vaddr: virtual address of allocated pages
133 *
134 * This checks whether the memory was allocated from the per-device
135 * coherent memory pool and if so, releases that memory.
136 *
137 * Returns 1 if we correctly released the memory, or 0 if
138 * dma_release_coherent() should proceed with releasing memory from
139 * generic pools.
140 */
141int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
142{
143 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
144
145 if (mem && vaddr >= mem->virt_base && vaddr <
146 (mem->virt_base + (mem->size << PAGE_SHIFT))) {
147 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
148
149 bitmap_release_region(mem->bitmap, page, order);
150 return 1;
151 }
152 return 0;
153}
diff --git a/kernel/exec_domain.c b/kernel/exec_domain.c
index a9e6bad9f706..0d407e886735 100644
--- a/kernel/exec_domain.c
+++ b/kernel/exec_domain.c
@@ -65,7 +65,7 @@ lookup_exec_domain(u_long personality)
65 goto out; 65 goto out;
66 } 66 }
67 67
68#ifdef CONFIG_KMOD 68#ifdef CONFIG_MODULES
69 read_unlock(&exec_domains_lock); 69 read_unlock(&exec_domains_lock);
70 request_module("personality-%ld", pers); 70 request_module("personality-%ld", pers);
71 read_lock(&exec_domains_lock); 71 read_lock(&exec_domains_lock);
@@ -168,7 +168,6 @@ __set_personality(u_long personality)
168 current->personality = personality; 168 current->personality = personality;
169 oep = current_thread_info()->exec_domain; 169 oep = current_thread_info()->exec_domain;
170 current_thread_info()->exec_domain = ep; 170 current_thread_info()->exec_domain = ep;
171 set_fs_altroot();
172 171
173 module_put(oep->module); 172 module_put(oep->module);
174 return 0; 173 return 0;
diff --git a/kernel/exit.c b/kernel/exit.c
index 93d2711b9381..38ec40630149 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -46,6 +46,7 @@
46#include <linux/resource.h> 46#include <linux/resource.h>
47#include <linux/blkdev.h> 47#include <linux/blkdev.h>
48#include <linux/task_io_accounting_ops.h> 48#include <linux/task_io_accounting_ops.h>
49#include <linux/tracehook.h>
49 50
50#include <asm/uaccess.h> 51#include <asm/uaccess.h>
51#include <asm/unistd.h> 52#include <asm/unistd.h>
@@ -85,7 +86,6 @@ static void __exit_signal(struct task_struct *tsk)
85 BUG_ON(!sig); 86 BUG_ON(!sig);
86 BUG_ON(!atomic_read(&sig->count)); 87 BUG_ON(!atomic_read(&sig->count));
87 88
88 rcu_read_lock();
89 sighand = rcu_dereference(tsk->sighand); 89 sighand = rcu_dereference(tsk->sighand);
90 spin_lock(&sighand->siglock); 90 spin_lock(&sighand->siglock);
91 91
@@ -121,6 +121,7 @@ static void __exit_signal(struct task_struct *tsk)
121 sig->nivcsw += tsk->nivcsw; 121 sig->nivcsw += tsk->nivcsw;
122 sig->inblock += task_io_get_inblock(tsk); 122 sig->inblock += task_io_get_inblock(tsk);
123 sig->oublock += task_io_get_oublock(tsk); 123 sig->oublock += task_io_get_oublock(tsk);
124 task_io_accounting_add(&sig->ioac, &tsk->ioac);
124 sig->sum_sched_runtime += tsk->se.sum_exec_runtime; 125 sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
125 sig = NULL; /* Marker for below. */ 126 sig = NULL; /* Marker for below. */
126 } 127 }
@@ -136,7 +137,6 @@ static void __exit_signal(struct task_struct *tsk)
136 tsk->signal = NULL; 137 tsk->signal = NULL;
137 tsk->sighand = NULL; 138 tsk->sighand = NULL;
138 spin_unlock(&sighand->siglock); 139 spin_unlock(&sighand->siglock);
139 rcu_read_unlock();
140 140
141 __cleanup_sighand(sighand); 141 __cleanup_sighand(sighand);
142 clear_tsk_thread_flag(tsk,TIF_SIGPENDING); 142 clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
@@ -152,27 +152,17 @@ static void delayed_put_task_struct(struct rcu_head *rhp)
152 put_task_struct(container_of(rhp, struct task_struct, rcu)); 152 put_task_struct(container_of(rhp, struct task_struct, rcu));
153} 153}
154 154
155/*
156 * Do final ptrace-related cleanup of a zombie being reaped.
157 *
158 * Called with write_lock(&tasklist_lock) held.
159 */
160static void ptrace_release_task(struct task_struct *p)
161{
162 BUG_ON(!list_empty(&p->ptraced));
163 ptrace_unlink(p);
164 BUG_ON(!list_empty(&p->ptrace_entry));
165}
166 155
167void release_task(struct task_struct * p) 156void release_task(struct task_struct * p)
168{ 157{
169 struct task_struct *leader; 158 struct task_struct *leader;
170 int zap_leader; 159 int zap_leader;
171repeat: 160repeat:
161 tracehook_prepare_release_task(p);
172 atomic_dec(&p->user->processes); 162 atomic_dec(&p->user->processes);
173 proc_flush_task(p); 163 proc_flush_task(p);
174 write_lock_irq(&tasklist_lock); 164 write_lock_irq(&tasklist_lock);
175 ptrace_release_task(p); 165 tracehook_finish_release_task(p);
176 __exit_signal(p); 166 __exit_signal(p);
177 167
178 /* 168 /*
@@ -194,6 +184,13 @@ repeat:
194 * that case. 184 * that case.
195 */ 185 */
196 zap_leader = task_detached(leader); 186 zap_leader = task_detached(leader);
187
188 /*
189 * This maintains the invariant that release_task()
190 * only runs on a task in EXIT_DEAD, just for sanity.
191 */
192 if (zap_leader)
193 leader->exit_state = EXIT_DEAD;
197 } 194 }
198 195
199 write_unlock_irq(&tasklist_lock); 196 write_unlock_irq(&tasklist_lock);
@@ -432,7 +429,7 @@ void daemonize(const char *name, ...)
432 * We don't want to have TIF_FREEZE set if the system-wide hibernation 429 * We don't want to have TIF_FREEZE set if the system-wide hibernation
433 * or suspend transition begins right now. 430 * or suspend transition begins right now.
434 */ 431 */
435 current->flags |= PF_NOFREEZE; 432 current->flags |= (PF_NOFREEZE | PF_KTHREAD);
436 433
437 if (current->nsproxy != &init_nsproxy) { 434 if (current->nsproxy != &init_nsproxy) {
438 get_nsproxy(&init_nsproxy); 435 get_nsproxy(&init_nsproxy);
@@ -557,8 +554,6 @@ void put_fs_struct(struct fs_struct *fs)
557 if (atomic_dec_and_test(&fs->count)) { 554 if (atomic_dec_and_test(&fs->count)) {
558 path_put(&fs->root); 555 path_put(&fs->root);
559 path_put(&fs->pwd); 556 path_put(&fs->pwd);
560 if (fs->altroot.dentry)
561 path_put(&fs->altroot);
562 kmem_cache_free(fs_cachep, fs); 557 kmem_cache_free(fs_cachep, fs);
563 } 558 }
564} 559}
@@ -666,26 +661,40 @@ assign_new_owner:
666static void exit_mm(struct task_struct * tsk) 661static void exit_mm(struct task_struct * tsk)
667{ 662{
668 struct mm_struct *mm = tsk->mm; 663 struct mm_struct *mm = tsk->mm;
664 struct core_state *core_state;
669 665
670 mm_release(tsk, mm); 666 mm_release(tsk, mm);
671 if (!mm) 667 if (!mm)
672 return; 668 return;
673 /* 669 /*
674 * Serialize with any possible pending coredump. 670 * Serialize with any possible pending coredump.
675 * We must hold mmap_sem around checking core_waiters 671 * We must hold mmap_sem around checking core_state
676 * and clearing tsk->mm. The core-inducing thread 672 * and clearing tsk->mm. The core-inducing thread
677 * will increment core_waiters for each thread in the 673 * will increment ->nr_threads for each thread in the
678 * group with ->mm != NULL. 674 * group with ->mm != NULL.
679 */ 675 */
680 down_read(&mm->mmap_sem); 676 down_read(&mm->mmap_sem);
681 if (mm->core_waiters) { 677 core_state = mm->core_state;
678 if (core_state) {
679 struct core_thread self;
682 up_read(&mm->mmap_sem); 680 up_read(&mm->mmap_sem);
683 down_write(&mm->mmap_sem);
684 if (!--mm->core_waiters)
685 complete(mm->core_startup_done);
686 up_write(&mm->mmap_sem);
687 681
688 wait_for_completion(&mm->core_done); 682 self.task = tsk;
683 self.next = xchg(&core_state->dumper.next, &self);
684 /*
685 * Implies mb(), the result of xchg() must be visible
686 * to core_state->dumper.
687 */
688 if (atomic_dec_and_test(&core_state->nr_threads))
689 complete(&core_state->startup);
690
691 for (;;) {
692 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
693 if (!self.task) /* see coredump_finish() */
694 break;
695 schedule();
696 }
697 __set_task_state(tsk, TASK_RUNNING);
689 down_read(&mm->mmap_sem); 698 down_read(&mm->mmap_sem);
690 } 699 }
691 atomic_inc(&mm->mm_count); 700 atomic_inc(&mm->mm_count);
@@ -863,7 +872,8 @@ static void forget_original_parent(struct task_struct *father)
863 */ 872 */
864static void exit_notify(struct task_struct *tsk, int group_dead) 873static void exit_notify(struct task_struct *tsk, int group_dead)
865{ 874{
866 int state; 875 int signal;
876 void *cookie;
867 877
868 /* 878 /*
869 * This does two things: 879 * This does two things:
@@ -900,22 +910,11 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
900 !capable(CAP_KILL)) 910 !capable(CAP_KILL))
901 tsk->exit_signal = SIGCHLD; 911 tsk->exit_signal = SIGCHLD;
902 912
903 /* If something other than our normal parent is ptracing us, then 913 signal = tracehook_notify_death(tsk, &cookie, group_dead);
904 * send it a SIGCHLD instead of honoring exit_signal. exit_signal 914 if (signal >= 0)
905 * only has special meaning to our real parent. 915 signal = do_notify_parent(tsk, signal);
906 */
907 if (!task_detached(tsk) && thread_group_empty(tsk)) {
908 int signal = ptrace_reparented(tsk) ?
909 SIGCHLD : tsk->exit_signal;
910 do_notify_parent(tsk, signal);
911 } else if (tsk->ptrace) {
912 do_notify_parent(tsk, SIGCHLD);
913 }
914 916
915 state = EXIT_ZOMBIE; 917 tsk->exit_state = signal == DEATH_REAP ? EXIT_DEAD : EXIT_ZOMBIE;
916 if (task_detached(tsk) && likely(!tsk->ptrace))
917 state = EXIT_DEAD;
918 tsk->exit_state = state;
919 918
920 /* mt-exec, de_thread() is waiting for us */ 919 /* mt-exec, de_thread() is waiting for us */
921 if (thread_group_leader(tsk) && 920 if (thread_group_leader(tsk) &&
@@ -925,8 +924,10 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
925 924
926 write_unlock_irq(&tasklist_lock); 925 write_unlock_irq(&tasklist_lock);
927 926
927 tracehook_report_death(tsk, signal, cookie, group_dead);
928
928 /* If the process is dead, release it - nobody will wait for it */ 929 /* If the process is dead, release it - nobody will wait for it */
929 if (state == EXIT_DEAD) 930 if (signal == DEATH_REAP)
930 release_task(tsk); 931 release_task(tsk);
931} 932}
932 933
@@ -1005,10 +1006,7 @@ NORET_TYPE void do_exit(long code)
1005 if (unlikely(!tsk->pid)) 1006 if (unlikely(!tsk->pid))
1006 panic("Attempted to kill the idle task!"); 1007 panic("Attempted to kill the idle task!");
1007 1008
1008 if (unlikely(current->ptrace & PT_TRACE_EXIT)) { 1009 tracehook_report_exit(&code);
1009 current->ptrace_message = code;
1010 ptrace_notify((PTRACE_EVENT_EXIT << 8) | SIGTRAP);
1011 }
1012 1010
1013 /* 1011 /*
1014 * We're taking recursive faults here in do_exit. Safest is to just 1012 * We're taking recursive faults here in do_exit. Safest is to just
@@ -1354,6 +1352,8 @@ static int wait_task_zombie(struct task_struct *p, int options,
1354 psig->coublock += 1352 psig->coublock +=
1355 task_io_get_oublock(p) + 1353 task_io_get_oublock(p) +
1356 sig->oublock + sig->coublock; 1354 sig->oublock + sig->coublock;
1355 task_io_accounting_add(&psig->ioac, &p->ioac);
1356 task_io_accounting_add(&psig->ioac, &sig->ioac);
1357 spin_unlock_irq(&p->parent->sighand->siglock); 1357 spin_unlock_irq(&p->parent->sighand->siglock);
1358 } 1358 }
1359 1359
diff --git a/kernel/fork.c b/kernel/fork.c
index adefc1131f27..7ce2ebe84796 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -27,15 +27,18 @@
27#include <linux/key.h> 27#include <linux/key.h>
28#include <linux/binfmts.h> 28#include <linux/binfmts.h>
29#include <linux/mman.h> 29#include <linux/mman.h>
30#include <linux/mmu_notifier.h>
30#include <linux/fs.h> 31#include <linux/fs.h>
31#include <linux/nsproxy.h> 32#include <linux/nsproxy.h>
32#include <linux/capability.h> 33#include <linux/capability.h>
33#include <linux/cpu.h> 34#include <linux/cpu.h>
34#include <linux/cgroup.h> 35#include <linux/cgroup.h>
35#include <linux/security.h> 36#include <linux/security.h>
37#include <linux/hugetlb.h>
36#include <linux/swap.h> 38#include <linux/swap.h>
37#include <linux/syscalls.h> 39#include <linux/syscalls.h>
38#include <linux/jiffies.h> 40#include <linux/jiffies.h>
41#include <linux/tracehook.h>
39#include <linux/futex.h> 42#include <linux/futex.h>
40#include <linux/task_io_accounting_ops.h> 43#include <linux/task_io_accounting_ops.h>
41#include <linux/rcupdate.h> 44#include <linux/rcupdate.h>
@@ -92,6 +95,23 @@ int nr_processes(void)
92static struct kmem_cache *task_struct_cachep; 95static struct kmem_cache *task_struct_cachep;
93#endif 96#endif
94 97
98#ifndef __HAVE_ARCH_THREAD_INFO_ALLOCATOR
99static inline struct thread_info *alloc_thread_info(struct task_struct *tsk)
100{
101#ifdef CONFIG_DEBUG_STACK_USAGE
102 gfp_t mask = GFP_KERNEL | __GFP_ZERO;
103#else
104 gfp_t mask = GFP_KERNEL;
105#endif
106 return (struct thread_info *)__get_free_pages(mask, THREAD_SIZE_ORDER);
107}
108
109static inline void free_thread_info(struct thread_info *ti)
110{
111 free_pages((unsigned long)ti, THREAD_SIZE_ORDER);
112}
113#endif
114
95/* SLAB cache for signal_struct structures (tsk->signal) */ 115/* SLAB cache for signal_struct structures (tsk->signal) */
96static struct kmem_cache *signal_cachep; 116static struct kmem_cache *signal_cachep;
97 117
@@ -307,6 +327,14 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
307 } 327 }
308 328
309 /* 329 /*
330 * Clear hugetlb-related page reserves for children. This only
331 * affects MAP_PRIVATE mappings. Faults generated by the child
332 * are not guaranteed to succeed, even if read-only
333 */
334 if (is_vm_hugetlb_page(tmp))
335 reset_vma_resv_huge_pages(tmp);
336
337 /*
310 * Link in the new vma and copy the page table entries. 338 * Link in the new vma and copy the page table entries.
311 */ 339 */
312 *pprev = tmp; 340 *pprev = tmp;
@@ -374,7 +402,7 @@ static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p)
374 INIT_LIST_HEAD(&mm->mmlist); 402 INIT_LIST_HEAD(&mm->mmlist);
375 mm->flags = (current->mm) ? current->mm->flags 403 mm->flags = (current->mm) ? current->mm->flags
376 : MMF_DUMP_FILTER_DEFAULT; 404 : MMF_DUMP_FILTER_DEFAULT;
377 mm->core_waiters = 0; 405 mm->core_state = NULL;
378 mm->nr_ptes = 0; 406 mm->nr_ptes = 0;
379 set_mm_counter(mm, file_rss, 0); 407 set_mm_counter(mm, file_rss, 0);
380 set_mm_counter(mm, anon_rss, 0); 408 set_mm_counter(mm, anon_rss, 0);
@@ -387,6 +415,7 @@ static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p)
387 415
388 if (likely(!mm_alloc_pgd(mm))) { 416 if (likely(!mm_alloc_pgd(mm))) {
389 mm->def_flags = 0; 417 mm->def_flags = 0;
418 mmu_notifier_mm_init(mm);
390 return mm; 419 return mm;
391 } 420 }
392 421
@@ -419,6 +448,7 @@ void __mmdrop(struct mm_struct *mm)
419 BUG_ON(mm == &init_mm); 448 BUG_ON(mm == &init_mm);
420 mm_free_pgd(mm); 449 mm_free_pgd(mm);
421 destroy_context(mm); 450 destroy_context(mm);
451 mmu_notifier_mm_destroy(mm);
422 free_mm(mm); 452 free_mm(mm);
423} 453}
424EXPORT_SYMBOL_GPL(__mmdrop); 454EXPORT_SYMBOL_GPL(__mmdrop);
@@ -448,7 +478,7 @@ EXPORT_SYMBOL_GPL(mmput);
448/** 478/**
449 * get_task_mm - acquire a reference to the task's mm 479 * get_task_mm - acquire a reference to the task's mm
450 * 480 *
451 * Returns %NULL if the task has no mm. Checks PF_BORROWED_MM (meaning 481 * Returns %NULL if the task has no mm. Checks PF_KTHREAD (meaning
452 * this kernel workthread has transiently adopted a user mm with use_mm, 482 * this kernel workthread has transiently adopted a user mm with use_mm,
453 * to do its AIO) is not set and if so returns a reference to it, after 483 * to do its AIO) is not set and if so returns a reference to it, after
454 * bumping up the use count. User must release the mm via mmput() 484 * bumping up the use count. User must release the mm via mmput()
@@ -461,7 +491,7 @@ struct mm_struct *get_task_mm(struct task_struct *task)
461 task_lock(task); 491 task_lock(task);
462 mm = task->mm; 492 mm = task->mm;
463 if (mm) { 493 if (mm) {
464 if (task->flags & PF_BORROWED_MM) 494 if (task->flags & PF_KTHREAD)
465 mm = NULL; 495 mm = NULL;
466 else 496 else
467 atomic_inc(&mm->mm_users); 497 atomic_inc(&mm->mm_users);
@@ -630,13 +660,6 @@ static struct fs_struct *__copy_fs_struct(struct fs_struct *old)
630 path_get(&old->root); 660 path_get(&old->root);
631 fs->pwd = old->pwd; 661 fs->pwd = old->pwd;
632 path_get(&old->pwd); 662 path_get(&old->pwd);
633 if (old->altroot.dentry) {
634 fs->altroot = old->altroot;
635 path_get(&old->altroot);
636 } else {
637 fs->altroot.mnt = NULL;
638 fs->altroot.dentry = NULL;
639 }
640 read_unlock(&old->lock); 663 read_unlock(&old->lock);
641 } 664 }
642 return fs; 665 return fs;
@@ -786,6 +809,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
786 sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0; 809 sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0;
787 sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0; 810 sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0;
788 sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0; 811 sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0;
812 task_io_accounting_init(&sig->ioac);
789 sig->sum_sched_runtime = 0; 813 sig->sum_sched_runtime = 0;
790 INIT_LIST_HEAD(&sig->cpu_timers[0]); 814 INIT_LIST_HEAD(&sig->cpu_timers[0]);
791 INIT_LIST_HEAD(&sig->cpu_timers[1]); 815 INIT_LIST_HEAD(&sig->cpu_timers[1]);
@@ -833,8 +857,7 @@ static void copy_flags(unsigned long clone_flags, struct task_struct *p)
833 857
834 new_flags &= ~PF_SUPERPRIV; 858 new_flags &= ~PF_SUPERPRIV;
835 new_flags |= PF_FORKNOEXEC; 859 new_flags |= PF_FORKNOEXEC;
836 if (!(clone_flags & CLONE_PTRACE)) 860 new_flags |= PF_STARTING;
837 p->ptrace = 0;
838 p->flags = new_flags; 861 p->flags = new_flags;
839 clear_freeze_flag(p); 862 clear_freeze_flag(p);
840} 863}
@@ -875,7 +898,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
875 struct pt_regs *regs, 898 struct pt_regs *regs,
876 unsigned long stack_size, 899 unsigned long stack_size,
877 int __user *child_tidptr, 900 int __user *child_tidptr,
878 struct pid *pid) 901 struct pid *pid,
902 int trace)
879{ 903{
880 int retval; 904 int retval;
881 struct task_struct *p; 905 struct task_struct *p;
@@ -968,13 +992,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
968 p->last_switch_timestamp = 0; 992 p->last_switch_timestamp = 0;
969#endif 993#endif
970 994
971#ifdef CONFIG_TASK_XACCT 995 task_io_accounting_init(&p->ioac);
972 p->rchar = 0; /* I/O counter: bytes read */
973 p->wchar = 0; /* I/O counter: bytes written */
974 p->syscr = 0; /* I/O counter: read syscalls */
975 p->syscw = 0; /* I/O counter: write syscalls */
976#endif
977 task_io_accounting_init(p);
978 acct_clear_integrals(p); 996 acct_clear_integrals(p);
979 997
980 p->it_virt_expires = cputime_zero; 998 p->it_virt_expires = cputime_zero;
@@ -1081,6 +1099,12 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1081 if (clone_flags & CLONE_THREAD) 1099 if (clone_flags & CLONE_THREAD)
1082 p->tgid = current->tgid; 1100 p->tgid = current->tgid;
1083 1101
1102 if (current->nsproxy != p->nsproxy) {
1103 retval = ns_cgroup_clone(p, pid);
1104 if (retval)
1105 goto bad_fork_free_pid;
1106 }
1107
1084 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; 1108 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
1085 /* 1109 /*
1086 * Clear TID on mm_release()? 1110 * Clear TID on mm_release()?
@@ -1125,8 +1149,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1125 */ 1149 */
1126 p->group_leader = p; 1150 p->group_leader = p;
1127 INIT_LIST_HEAD(&p->thread_group); 1151 INIT_LIST_HEAD(&p->thread_group);
1128 INIT_LIST_HEAD(&p->ptrace_entry);
1129 INIT_LIST_HEAD(&p->ptraced);
1130 1152
1131 /* Now that the task is set up, run cgroup callbacks if 1153 /* Now that the task is set up, run cgroup callbacks if
1132 * necessary. We need to run them before the task is visible 1154 * necessary. We need to run them before the task is visible
@@ -1157,7 +1179,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1157 p->real_parent = current->real_parent; 1179 p->real_parent = current->real_parent;
1158 else 1180 else
1159 p->real_parent = current; 1181 p->real_parent = current;
1160 p->parent = p->real_parent;
1161 1182
1162 spin_lock(&current->sighand->siglock); 1183 spin_lock(&current->sighand->siglock);
1163 1184
@@ -1199,8 +1220,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1199 1220
1200 if (likely(p->pid)) { 1221 if (likely(p->pid)) {
1201 list_add_tail(&p->sibling, &p->real_parent->children); 1222 list_add_tail(&p->sibling, &p->real_parent->children);
1202 if (unlikely(p->ptrace & PT_PTRACED)) 1223 tracehook_finish_clone(p, clone_flags, trace);
1203 __ptrace_link(p, current->parent);
1204 1224
1205 if (thread_group_leader(p)) { 1225 if (thread_group_leader(p)) {
1206 if (clone_flags & CLONE_NEWPID) 1226 if (clone_flags & CLONE_NEWPID)
@@ -1285,29 +1305,13 @@ struct task_struct * __cpuinit fork_idle(int cpu)
1285 struct pt_regs regs; 1305 struct pt_regs regs;
1286 1306
1287 task = copy_process(CLONE_VM, 0, idle_regs(&regs), 0, NULL, 1307 task = copy_process(CLONE_VM, 0, idle_regs(&regs), 0, NULL,
1288 &init_struct_pid); 1308 &init_struct_pid, 0);
1289 if (!IS_ERR(task)) 1309 if (!IS_ERR(task))
1290 init_idle(task, cpu); 1310 init_idle(task, cpu);
1291 1311
1292 return task; 1312 return task;
1293} 1313}
1294 1314
1295static int fork_traceflag(unsigned clone_flags)
1296{
1297 if (clone_flags & CLONE_UNTRACED)
1298 return 0;
1299 else if (clone_flags & CLONE_VFORK) {
1300 if (current->ptrace & PT_TRACE_VFORK)
1301 return PTRACE_EVENT_VFORK;
1302 } else if ((clone_flags & CSIGNAL) != SIGCHLD) {
1303 if (current->ptrace & PT_TRACE_CLONE)
1304 return PTRACE_EVENT_CLONE;
1305 } else if (current->ptrace & PT_TRACE_FORK)
1306 return PTRACE_EVENT_FORK;
1307
1308 return 0;
1309}
1310
1311/* 1315/*
1312 * Ok, this is the main fork-routine. 1316 * Ok, this is the main fork-routine.
1313 * 1317 *
@@ -1342,14 +1346,14 @@ long do_fork(unsigned long clone_flags,
1342 } 1346 }
1343 } 1347 }
1344 1348
1345 if (unlikely(current->ptrace)) { 1349 /*
1346 trace = fork_traceflag (clone_flags); 1350 * When called from kernel_thread, don't do user tracing stuff.
1347 if (trace) 1351 */
1348 clone_flags |= CLONE_PTRACE; 1352 if (likely(user_mode(regs)))
1349 } 1353 trace = tracehook_prepare_clone(clone_flags);
1350 1354
1351 p = copy_process(clone_flags, stack_start, regs, stack_size, 1355 p = copy_process(clone_flags, stack_start, regs, stack_size,
1352 child_tidptr, NULL); 1356 child_tidptr, NULL, trace);
1353 /* 1357 /*
1354 * Do this prior waking up the new thread - the thread pointer 1358 * Do this prior waking up the new thread - the thread pointer
1355 * might get invalid after that point, if the thread exits quickly. 1359 * might get invalid after that point, if the thread exits quickly.
@@ -1367,32 +1371,35 @@ long do_fork(unsigned long clone_flags,
1367 init_completion(&vfork); 1371 init_completion(&vfork);
1368 } 1372 }
1369 1373
1370 if ((p->ptrace & PT_PTRACED) || (clone_flags & CLONE_STOPPED)) { 1374 tracehook_report_clone(trace, regs, clone_flags, nr, p);
1375
1376 /*
1377 * We set PF_STARTING at creation in case tracing wants to
1378 * use this to distinguish a fully live task from one that
1379 * hasn't gotten to tracehook_report_clone() yet. Now we
1380 * clear it and set the child going.
1381 */
1382 p->flags &= ~PF_STARTING;
1383
1384 if (unlikely(clone_flags & CLONE_STOPPED)) {
1371 /* 1385 /*
1372 * We'll start up with an immediate SIGSTOP. 1386 * We'll start up with an immediate SIGSTOP.
1373 */ 1387 */
1374 sigaddset(&p->pending.signal, SIGSTOP); 1388 sigaddset(&p->pending.signal, SIGSTOP);
1375 set_tsk_thread_flag(p, TIF_SIGPENDING); 1389 set_tsk_thread_flag(p, TIF_SIGPENDING);
1376 }
1377
1378 if (!(clone_flags & CLONE_STOPPED))
1379 wake_up_new_task(p, clone_flags);
1380 else
1381 __set_task_state(p, TASK_STOPPED); 1390 __set_task_state(p, TASK_STOPPED);
1382 1391 } else {
1383 if (unlikely (trace)) { 1392 wake_up_new_task(p, clone_flags);
1384 current->ptrace_message = nr;
1385 ptrace_notify ((trace << 8) | SIGTRAP);
1386 } 1393 }
1387 1394
1395 tracehook_report_clone_complete(trace, regs,
1396 clone_flags, nr, p);
1397
1388 if (clone_flags & CLONE_VFORK) { 1398 if (clone_flags & CLONE_VFORK) {
1389 freezer_do_not_count(); 1399 freezer_do_not_count();
1390 wait_for_completion(&vfork); 1400 wait_for_completion(&vfork);
1391 freezer_count(); 1401 freezer_count();
1392 if (unlikely (current->ptrace & PT_TRACE_VFORK_DONE)) { 1402 tracehook_report_vfork_done(p, nr);
1393 current->ptrace_message = nr;
1394 ptrace_notify ((PTRACE_EVENT_VFORK_DONE << 8) | SIGTRAP);
1395 }
1396 } 1403 }
1397 } else { 1404 } else {
1398 nr = PTR_ERR(p); 1405 nr = PTR_ERR(p);
@@ -1404,7 +1411,7 @@ long do_fork(unsigned long clone_flags,
1404#define ARCH_MIN_MMSTRUCT_ALIGN 0 1411#define ARCH_MIN_MMSTRUCT_ALIGN 0
1405#endif 1412#endif
1406 1413
1407static void sighand_ctor(struct kmem_cache *cachep, void *data) 1414static void sighand_ctor(void *data)
1408{ 1415{
1409 struct sighand_struct *sighand = data; 1416 struct sighand_struct *sighand = data;
1410 1417
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 964964baefa2..3cd441ebf5d2 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -28,8 +28,7 @@ void dynamic_irq_init(unsigned int irq)
28 unsigned long flags; 28 unsigned long flags;
29 29
30 if (irq >= NR_IRQS) { 30 if (irq >= NR_IRQS) {
31 printk(KERN_ERR "Trying to initialize invalid IRQ%d\n", irq); 31 WARN(1, KERN_ERR "Trying to initialize invalid IRQ%d\n", irq);
32 WARN_ON(1);
33 return; 32 return;
34 } 33 }
35 34
@@ -62,8 +61,7 @@ void dynamic_irq_cleanup(unsigned int irq)
62 unsigned long flags; 61 unsigned long flags;
63 62
64 if (irq >= NR_IRQS) { 63 if (irq >= NR_IRQS) {
65 printk(KERN_ERR "Trying to cleanup invalid IRQ%d\n", irq); 64 WARN(1, KERN_ERR "Trying to cleanup invalid IRQ%d\n", irq);
66 WARN_ON(1);
67 return; 65 return;
68 } 66 }
69 67
@@ -71,9 +69,8 @@ void dynamic_irq_cleanup(unsigned int irq)
71 spin_lock_irqsave(&desc->lock, flags); 69 spin_lock_irqsave(&desc->lock, flags);
72 if (desc->action) { 70 if (desc->action) {
73 spin_unlock_irqrestore(&desc->lock, flags); 71 spin_unlock_irqrestore(&desc->lock, flags);
74 printk(KERN_ERR "Destroying IRQ%d without calling free_irq\n", 72 WARN(1, KERN_ERR "Destroying IRQ%d without calling free_irq\n",
75 irq); 73 irq);
76 WARN_ON(1);
77 return; 74 return;
78 } 75 }
79 desc->msi_desc = NULL; 76 desc->msi_desc = NULL;
@@ -96,8 +93,7 @@ int set_irq_chip(unsigned int irq, struct irq_chip *chip)
96 unsigned long flags; 93 unsigned long flags;
97 94
98 if (irq >= NR_IRQS) { 95 if (irq >= NR_IRQS) {
99 printk(KERN_ERR "Trying to install chip for IRQ%d\n", irq); 96 WARN(1, KERN_ERR "Trying to install chip for IRQ%d\n", irq);
100 WARN_ON(1);
101 return -EINVAL; 97 return -EINVAL;
102 } 98 }
103 99
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 77a51be36010..0314074fa232 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -177,8 +177,7 @@ static void __enable_irq(struct irq_desc *desc, unsigned int irq)
177{ 177{
178 switch (desc->depth) { 178 switch (desc->depth) {
179 case 0: 179 case 0:
180 printk(KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); 180 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
181 WARN_ON(1);
182 break; 181 break;
183 case 1: { 182 case 1: {
184 unsigned int status = desc->status & ~IRQ_DISABLED; 183 unsigned int status = desc->status & ~IRQ_DISABLED;
@@ -217,6 +216,17 @@ void enable_irq(unsigned int irq)
217} 216}
218EXPORT_SYMBOL(enable_irq); 217EXPORT_SYMBOL(enable_irq);
219 218
219int set_irq_wake_real(unsigned int irq, unsigned int on)
220{
221 struct irq_desc *desc = irq_desc + irq;
222 int ret = -ENXIO;
223
224 if (desc->chip->set_wake)
225 ret = desc->chip->set_wake(irq, on);
226
227 return ret;
228}
229
220/** 230/**
221 * set_irq_wake - control irq power management wakeup 231 * set_irq_wake - control irq power management wakeup
222 * @irq: interrupt to control 232 * @irq: interrupt to control
@@ -233,30 +243,32 @@ int set_irq_wake(unsigned int irq, unsigned int on)
233{ 243{
234 struct irq_desc *desc = irq_desc + irq; 244 struct irq_desc *desc = irq_desc + irq;
235 unsigned long flags; 245 unsigned long flags;
236 int ret = -ENXIO; 246 int ret = 0;
237 int (*set_wake)(unsigned, unsigned) = desc->chip->set_wake;
238 247
239 /* wakeup-capable irqs can be shared between drivers that 248 /* wakeup-capable irqs can be shared between drivers that
240 * don't need to have the same sleep mode behaviors. 249 * don't need to have the same sleep mode behaviors.
241 */ 250 */
242 spin_lock_irqsave(&desc->lock, flags); 251 spin_lock_irqsave(&desc->lock, flags);
243 if (on) { 252 if (on) {
244 if (desc->wake_depth++ == 0) 253 if (desc->wake_depth++ == 0) {
245 desc->status |= IRQ_WAKEUP; 254 ret = set_irq_wake_real(irq, on);
246 else 255 if (ret)
247 set_wake = NULL; 256 desc->wake_depth = 0;
257 else
258 desc->status |= IRQ_WAKEUP;
259 }
248 } else { 260 } else {
249 if (desc->wake_depth == 0) { 261 if (desc->wake_depth == 0) {
250 printk(KERN_WARNING "Unbalanced IRQ %d " 262 WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
251 "wake disable\n", irq); 263 } else if (--desc->wake_depth == 0) {
252 WARN_ON(1); 264 ret = set_irq_wake_real(irq, on);
253 } else if (--desc->wake_depth == 0) 265 if (ret)
254 desc->status &= ~IRQ_WAKEUP; 266 desc->wake_depth = 1;
255 else 267 else
256 set_wake = NULL; 268 desc->status &= ~IRQ_WAKEUP;
269 }
257 } 270 }
258 if (set_wake) 271
259 ret = desc->chip->set_wake(irq, on);
260 spin_unlock_irqrestore(&desc->lock, flags); 272 spin_unlock_irqrestore(&desc->lock, flags);
261 return ret; 273 return ret;
262} 274}
@@ -293,6 +305,31 @@ void compat_irq_chip_set_default_handler(struct irq_desc *desc)
293 desc->handle_irq = NULL; 305 desc->handle_irq = NULL;
294} 306}
295 307
308static int __irq_set_trigger(struct irq_chip *chip, unsigned int irq,
309 unsigned long flags)
310{
311 int ret;
312
313 if (!chip || !chip->set_type) {
314 /*
315 * IRQF_TRIGGER_* but the PIC does not support multiple
316 * flow-types?
317 */
318 pr_warning("No set_type function for IRQ %d (%s)\n", irq,
319 chip ? (chip->name ? : "unknown") : "unknown");
320 return 0;
321 }
322
323 ret = chip->set_type(irq, flags & IRQF_TRIGGER_MASK);
324
325 if (ret)
326 pr_err("setting trigger mode %d for irq %u failed (%pF)\n",
327 (int)(flags & IRQF_TRIGGER_MASK),
328 irq, chip->set_type);
329
330 return ret;
331}
332
296/* 333/*
297 * Internal function to register an irqaction - typically used to 334 * Internal function to register an irqaction - typically used to
298 * allocate special interrupts that are part of the architecture. 335 * allocate special interrupts that are part of the architecture.
@@ -304,6 +341,7 @@ int setup_irq(unsigned int irq, struct irqaction *new)
304 const char *old_name = NULL; 341 const char *old_name = NULL;
305 unsigned long flags; 342 unsigned long flags;
306 int shared = 0; 343 int shared = 0;
344 int ret;
307 345
308 if (irq >= NR_IRQS) 346 if (irq >= NR_IRQS)
309 return -EINVAL; 347 return -EINVAL;
@@ -361,35 +399,23 @@ int setup_irq(unsigned int irq, struct irqaction *new)
361 shared = 1; 399 shared = 1;
362 } 400 }
363 401
364 *p = new;
365
366 /* Exclude IRQ from balancing */
367 if (new->flags & IRQF_NOBALANCING)
368 desc->status |= IRQ_NO_BALANCING;
369
370 if (!shared) { 402 if (!shared) {
371 irq_chip_set_defaults(desc->chip); 403 irq_chip_set_defaults(desc->chip);
372 404
373#if defined(CONFIG_IRQ_PER_CPU)
374 if (new->flags & IRQF_PERCPU)
375 desc->status |= IRQ_PER_CPU;
376#endif
377
378 /* Setup the type (level, edge polarity) if configured: */ 405 /* Setup the type (level, edge polarity) if configured: */
379 if (new->flags & IRQF_TRIGGER_MASK) { 406 if (new->flags & IRQF_TRIGGER_MASK) {
380 if (desc->chip->set_type) 407 ret = __irq_set_trigger(desc->chip, irq, new->flags);
381 desc->chip->set_type(irq, 408
382 new->flags & IRQF_TRIGGER_MASK); 409 if (ret) {
383 else 410 spin_unlock_irqrestore(&desc->lock, flags);
384 /* 411 return ret;
385 * IRQF_TRIGGER_* but the PIC does not support 412 }
386 * multiple flow-types?
387 */
388 printk(KERN_WARNING "No IRQF_TRIGGER set_type "
389 "function for IRQ %d (%s)\n", irq,
390 desc->chip->name);
391 } else 413 } else
392 compat_irq_chip_set_default_handler(desc); 414 compat_irq_chip_set_default_handler(desc);
415#if defined(CONFIG_IRQ_PER_CPU)
416 if (new->flags & IRQF_PERCPU)
417 desc->status |= IRQ_PER_CPU;
418#endif
393 419
394 desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING | 420 desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING |
395 IRQ_INPROGRESS | IRQ_SPURIOUS_DISABLED); 421 IRQ_INPROGRESS | IRQ_SPURIOUS_DISABLED);
@@ -408,6 +434,13 @@ int setup_irq(unsigned int irq, struct irqaction *new)
408 /* Set default affinity mask once everything is setup */ 434 /* Set default affinity mask once everything is setup */
409 irq_select_affinity(irq); 435 irq_select_affinity(irq);
410 } 436 }
437
438 *p = new;
439
440 /* Exclude IRQ from balancing */
441 if (new->flags & IRQF_NOBALANCING)
442 desc->status |= IRQ_NO_BALANCING;
443
411 /* Reset broken irq detection when installing new handler */ 444 /* Reset broken irq detection when installing new handler */
412 desc->irq_count = 0; 445 desc->irq_count = 0;
413 desc->irqs_unhandled = 0; 446 desc->irqs_unhandled = 0;
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index 6fc0040f3e3a..38fc10ac7541 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -176,7 +176,7 @@ static unsigned long get_symbol_pos(unsigned long addr,
176 high = kallsyms_num_syms; 176 high = kallsyms_num_syms;
177 177
178 while (high - low > 1) { 178 while (high - low > 1) {
179 mid = (low + high) / 2; 179 mid = low + (high - low) / 2;
180 if (kallsyms_addresses[mid] <= addr) 180 if (kallsyms_addresses[mid] <= addr)
181 low = mid; 181 low = mid;
182 else 182 else
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 1c5fcacbcf33..c8a4370e2a34 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -24,6 +24,12 @@
24#include <linux/utsrelease.h> 24#include <linux/utsrelease.h>
25#include <linux/utsname.h> 25#include <linux/utsname.h>
26#include <linux/numa.h> 26#include <linux/numa.h>
27#include <linux/suspend.h>
28#include <linux/device.h>
29#include <linux/freezer.h>
30#include <linux/pm.h>
31#include <linux/cpu.h>
32#include <linux/console.h>
27 33
28#include <asm/page.h> 34#include <asm/page.h>
29#include <asm/uaccess.h> 35#include <asm/uaccess.h>
@@ -242,6 +248,12 @@ static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry,
242 goto out; 248 goto out;
243 } 249 }
244 250
251 image->swap_page = kimage_alloc_control_pages(image, 0);
252 if (!image->swap_page) {
253 printk(KERN_ERR "Could not allocate swap buffer\n");
254 goto out;
255 }
256
245 result = 0; 257 result = 0;
246 out: 258 out:
247 if (result == 0) 259 if (result == 0)
@@ -589,14 +601,12 @@ static void kimage_free_extra_pages(struct kimage *image)
589 kimage_free_page_list(&image->unuseable_pages); 601 kimage_free_page_list(&image->unuseable_pages);
590 602
591} 603}
592static int kimage_terminate(struct kimage *image) 604static void kimage_terminate(struct kimage *image)
593{ 605{
594 if (*image->entry != 0) 606 if (*image->entry != 0)
595 image->entry++; 607 image->entry++;
596 608
597 *image->entry = IND_DONE; 609 *image->entry = IND_DONE;
598
599 return 0;
600} 610}
601 611
602#define for_each_kimage_entry(image, ptr, entry) \ 612#define for_each_kimage_entry(image, ptr, entry) \
@@ -988,6 +998,8 @@ asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments,
988 if (result) 998 if (result)
989 goto out; 999 goto out;
990 1000
1001 if (flags & KEXEC_PRESERVE_CONTEXT)
1002 image->preserve_context = 1;
991 result = machine_kexec_prepare(image); 1003 result = machine_kexec_prepare(image);
992 if (result) 1004 if (result)
993 goto out; 1005 goto out;
@@ -997,9 +1009,7 @@ asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments,
997 if (result) 1009 if (result)
998 goto out; 1010 goto out;
999 } 1011 }
1000 result = kimage_terminate(image); 1012 kimage_terminate(image);
1001 if (result)
1002 goto out;
1003 } 1013 }
1004 /* Install the new kernel, and Uninstall the old */ 1014 /* Install the new kernel, and Uninstall the old */
1005 image = xchg(dest_image, image); 1015 image = xchg(dest_image, image);
@@ -1415,3 +1425,85 @@ static int __init crash_save_vmcoreinfo_init(void)
1415} 1425}
1416 1426
1417module_init(crash_save_vmcoreinfo_init) 1427module_init(crash_save_vmcoreinfo_init)
1428
1429/**
1430 * kernel_kexec - reboot the system
1431 *
1432 * Move into place and start executing a preloaded standalone
1433 * executable. If nothing was preloaded return an error.
1434 */
1435int kernel_kexec(void)
1436{
1437 int error = 0;
1438
1439 if (xchg(&kexec_lock, 1))
1440 return -EBUSY;
1441 if (!kexec_image) {
1442 error = -EINVAL;
1443 goto Unlock;
1444 }
1445
1446 if (kexec_image->preserve_context) {
1447#ifdef CONFIG_KEXEC_JUMP
1448 mutex_lock(&pm_mutex);
1449 pm_prepare_console();
1450 error = freeze_processes();
1451 if (error) {
1452 error = -EBUSY;
1453 goto Restore_console;
1454 }
1455 suspend_console();
1456 error = device_suspend(PMSG_FREEZE);
1457 if (error)
1458 goto Resume_console;
1459 error = disable_nonboot_cpus();
1460 if (error)
1461 goto Resume_devices;
1462 local_irq_disable();
1463 /* At this point, device_suspend() has been called,
1464 * but *not* device_power_down(). We *must*
1465 * device_power_down() now. Otherwise, drivers for
1466 * some devices (e.g. interrupt controllers) become
1467 * desynchronized with the actual state of the
1468 * hardware at resume time, and evil weirdness ensues.
1469 */
1470 error = device_power_down(PMSG_FREEZE);
1471 if (error)
1472 goto Enable_irqs;
1473 save_processor_state();
1474#endif
1475 } else {
1476 blocking_notifier_call_chain(&reboot_notifier_list,
1477 SYS_RESTART, NULL);
1478 system_state = SYSTEM_RESTART;
1479 device_shutdown();
1480 sysdev_shutdown();
1481 printk(KERN_EMERG "Starting new kernel\n");
1482 machine_shutdown();
1483 }
1484
1485 machine_kexec(kexec_image);
1486
1487 if (kexec_image->preserve_context) {
1488#ifdef CONFIG_KEXEC_JUMP
1489 restore_processor_state();
1490 device_power_up(PMSG_RESTORE);
1491 Enable_irqs:
1492 local_irq_enable();
1493 enable_nonboot_cpus();
1494 Resume_devices:
1495 device_resume(PMSG_RESTORE);
1496 Resume_console:
1497 resume_console();
1498 thaw_processes();
1499 Restore_console:
1500 pm_restore_console();
1501 mutex_unlock(&pm_mutex);
1502#endif
1503 }
1504
1505 Unlock:
1506 xchg(&kexec_lock, 0);
1507
1508 return error;
1509}
diff --git a/kernel/kgdb.c b/kernel/kgdb.c
index 3ec23c3ec97f..eaa21fc9ad1d 100644
--- a/kernel/kgdb.c
+++ b/kernel/kgdb.c
@@ -56,12 +56,14 @@
56 56
57static int kgdb_break_asap; 57static int kgdb_break_asap;
58 58
59#define KGDB_MAX_THREAD_QUERY 17
59struct kgdb_state { 60struct kgdb_state {
60 int ex_vector; 61 int ex_vector;
61 int signo; 62 int signo;
62 int err_code; 63 int err_code;
63 int cpu; 64 int cpu;
64 int pass_exception; 65 int pass_exception;
66 unsigned long thr_query;
65 unsigned long threadid; 67 unsigned long threadid;
66 long kgdb_usethreadid; 68 long kgdb_usethreadid;
67 struct pt_regs *linux_regs; 69 struct pt_regs *linux_regs;
@@ -166,13 +168,6 @@ early_param("nokgdbroundup", opt_nokgdbroundup);
166 * Weak aliases for breakpoint management, 168 * Weak aliases for breakpoint management,
167 * can be overriden by architectures when needed: 169 * can be overriden by architectures when needed:
168 */ 170 */
169int __weak kgdb_validate_break_address(unsigned long addr)
170{
171 char tmp_variable[BREAK_INSTR_SIZE];
172
173 return probe_kernel_read(tmp_variable, (char *)addr, BREAK_INSTR_SIZE);
174}
175
176int __weak kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr) 171int __weak kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr)
177{ 172{
178 int err; 173 int err;
@@ -191,6 +186,25 @@ int __weak kgdb_arch_remove_breakpoint(unsigned long addr, char *bundle)
191 (char *)bundle, BREAK_INSTR_SIZE); 186 (char *)bundle, BREAK_INSTR_SIZE);
192} 187}
193 188
189int __weak kgdb_validate_break_address(unsigned long addr)
190{
191 char tmp_variable[BREAK_INSTR_SIZE];
192 int err;
193 /* Validate setting the breakpoint and then removing it. In the
194 * remove fails, the kernel needs to emit a bad message because we
195 * are deep trouble not being able to put things back the way we
196 * found them.
197 */
198 err = kgdb_arch_set_breakpoint(addr, tmp_variable);
199 if (err)
200 return err;
201 err = kgdb_arch_remove_breakpoint(addr, tmp_variable);
202 if (err)
203 printk(KERN_ERR "KGDB: Critical breakpoint error, kernel "
204 "memory destroyed at: %lx", addr);
205 return err;
206}
207
194unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs) 208unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs)
195{ 209{
196 return instruction_pointer(regs); 210 return instruction_pointer(regs);
@@ -433,9 +447,14 @@ int kgdb_hex2long(char **ptr, unsigned long *long_val)
433{ 447{
434 int hex_val; 448 int hex_val;
435 int num = 0; 449 int num = 0;
450 int negate = 0;
436 451
437 *long_val = 0; 452 *long_val = 0;
438 453
454 if (**ptr == '-') {
455 negate = 1;
456 (*ptr)++;
457 }
439 while (**ptr) { 458 while (**ptr) {
440 hex_val = hex(**ptr); 459 hex_val = hex(**ptr);
441 if (hex_val < 0) 460 if (hex_val < 0)
@@ -446,6 +465,9 @@ int kgdb_hex2long(char **ptr, unsigned long *long_val)
446 (*ptr)++; 465 (*ptr)++;
447 } 466 }
448 467
468 if (negate)
469 *long_val = -*long_val;
470
449 return num; 471 return num;
450} 472}
451 473
@@ -515,10 +537,16 @@ static void int_to_threadref(unsigned char *id, int value)
515static struct task_struct *getthread(struct pt_regs *regs, int tid) 537static struct task_struct *getthread(struct pt_regs *regs, int tid)
516{ 538{
517 /* 539 /*
518 * Non-positive TIDs are remapped idle tasks: 540 * Non-positive TIDs are remapped to the cpu shadow information
519 */ 541 */
520 if (tid <= 0) 542 if (tid == 0 || tid == -1)
521 return idle_task(-tid); 543 tid = -atomic_read(&kgdb_active) - 2;
544 if (tid < 0) {
545 if (kgdb_info[-tid - 2].task)
546 return kgdb_info[-tid - 2].task;
547 else
548 return idle_task(-tid - 2);
549 }
522 550
523 /* 551 /*
524 * find_task_by_pid_ns() does not take the tasklist lock anymore 552 * find_task_by_pid_ns() does not take the tasklist lock anymore
@@ -725,14 +753,15 @@ setundefined:
725} 753}
726 754
727/* 755/*
728 * Remap normal tasks to their real PID, idle tasks to -1 ... -NR_CPUs: 756 * Remap normal tasks to their real PID,
757 * CPU shadow threads are mapped to -CPU - 2
729 */ 758 */
730static inline int shadow_pid(int realpid) 759static inline int shadow_pid(int realpid)
731{ 760{
732 if (realpid) 761 if (realpid)
733 return realpid; 762 return realpid;
734 763
735 return -1-raw_smp_processor_id(); 764 return -raw_smp_processor_id() - 2;
736} 765}
737 766
738static char gdbmsgbuf[BUFMAX + 1]; 767static char gdbmsgbuf[BUFMAX + 1];
@@ -826,7 +855,7 @@ static void gdb_cmd_getregs(struct kgdb_state *ks)
826 local_debuggerinfo = kgdb_info[ks->cpu].debuggerinfo; 855 local_debuggerinfo = kgdb_info[ks->cpu].debuggerinfo;
827 } else { 856 } else {
828 local_debuggerinfo = NULL; 857 local_debuggerinfo = NULL;
829 for (i = 0; i < NR_CPUS; i++) { 858 for_each_online_cpu(i) {
830 /* 859 /*
831 * Try to find the task on some other 860 * Try to find the task on some other
832 * or possibly this node if we do not 861 * or possibly this node if we do not
@@ -960,10 +989,13 @@ static int gdb_cmd_reboot(struct kgdb_state *ks)
960/* Handle the 'q' query packets */ 989/* Handle the 'q' query packets */
961static void gdb_cmd_query(struct kgdb_state *ks) 990static void gdb_cmd_query(struct kgdb_state *ks)
962{ 991{
963 struct task_struct *thread; 992 struct task_struct *g;
993 struct task_struct *p;
964 unsigned char thref[8]; 994 unsigned char thref[8];
965 char *ptr; 995 char *ptr;
966 int i; 996 int i;
997 int cpu;
998 int finished = 0;
967 999
968 switch (remcom_in_buffer[1]) { 1000 switch (remcom_in_buffer[1]) {
969 case 's': 1001 case 's':
@@ -973,22 +1005,34 @@ static void gdb_cmd_query(struct kgdb_state *ks)
973 break; 1005 break;
974 } 1006 }
975 1007
976 if (remcom_in_buffer[1] == 'f') 1008 i = 0;
977 ks->threadid = 1;
978
979 remcom_out_buffer[0] = 'm'; 1009 remcom_out_buffer[0] = 'm';
980 ptr = remcom_out_buffer + 1; 1010 ptr = remcom_out_buffer + 1;
981 1011 if (remcom_in_buffer[1] == 'f') {
982 for (i = 0; i < 17; ks->threadid++) { 1012 /* Each cpu is a shadow thread */
983 thread = getthread(ks->linux_regs, ks->threadid); 1013 for_each_online_cpu(cpu) {
984 if (thread) { 1014 ks->thr_query = 0;
985 int_to_threadref(thref, ks->threadid); 1015 int_to_threadref(thref, -cpu - 2);
986 pack_threadid(ptr, thref); 1016 pack_threadid(ptr, thref);
987 ptr += BUF_THREAD_ID_SIZE; 1017 ptr += BUF_THREAD_ID_SIZE;
988 *(ptr++) = ','; 1018 *(ptr++) = ',';
989 i++; 1019 i++;
990 } 1020 }
991 } 1021 }
1022
1023 do_each_thread(g, p) {
1024 if (i >= ks->thr_query && !finished) {
1025 int_to_threadref(thref, p->pid);
1026 pack_threadid(ptr, thref);
1027 ptr += BUF_THREAD_ID_SIZE;
1028 *(ptr++) = ',';
1029 ks->thr_query++;
1030 if (ks->thr_query % KGDB_MAX_THREAD_QUERY == 0)
1031 finished = 1;
1032 }
1033 i++;
1034 } while_each_thread(g, p);
1035
992 *(--ptr) = '\0'; 1036 *(--ptr) = '\0';
993 break; 1037 break;
994 1038
@@ -1011,15 +1055,15 @@ static void gdb_cmd_query(struct kgdb_state *ks)
1011 error_packet(remcom_out_buffer, -EINVAL); 1055 error_packet(remcom_out_buffer, -EINVAL);
1012 break; 1056 break;
1013 } 1057 }
1014 if (ks->threadid > 0) { 1058 if ((int)ks->threadid > 0) {
1015 kgdb_mem2hex(getthread(ks->linux_regs, 1059 kgdb_mem2hex(getthread(ks->linux_regs,
1016 ks->threadid)->comm, 1060 ks->threadid)->comm,
1017 remcom_out_buffer, 16); 1061 remcom_out_buffer, 16);
1018 } else { 1062 } else {
1019 static char tmpstr[23 + BUF_THREAD_ID_SIZE]; 1063 static char tmpstr[23 + BUF_THREAD_ID_SIZE];
1020 1064
1021 sprintf(tmpstr, "Shadow task %d for pid 0", 1065 sprintf(tmpstr, "shadowCPU%d",
1022 (int)(-ks->threadid-1)); 1066 (int)(-ks->threadid - 2));
1023 kgdb_mem2hex(tmpstr, remcom_out_buffer, strlen(tmpstr)); 1067 kgdb_mem2hex(tmpstr, remcom_out_buffer, strlen(tmpstr));
1024 } 1068 }
1025 break; 1069 break;
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 8df97d3dfda8..2456d1a0befb 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -42,7 +42,7 @@ extern int max_threads;
42 42
43static struct workqueue_struct *khelper_wq; 43static struct workqueue_struct *khelper_wq;
44 44
45#ifdef CONFIG_KMOD 45#ifdef CONFIG_MODULES
46 46
47/* 47/*
48 modprobe_path is set via /proc/sys. 48 modprobe_path is set via /proc/sys.
@@ -352,16 +352,17 @@ static inline void register_pm_notifier_callback(void) {}
352 * @path: path to usermode executable 352 * @path: path to usermode executable
353 * @argv: arg vector for process 353 * @argv: arg vector for process
354 * @envp: environment for process 354 * @envp: environment for process
355 * @gfp_mask: gfp mask for memory allocation
355 * 356 *
356 * Returns either %NULL on allocation failure, or a subprocess_info 357 * Returns either %NULL on allocation failure, or a subprocess_info
357 * structure. This should be passed to call_usermodehelper_exec to 358 * structure. This should be passed to call_usermodehelper_exec to
358 * exec the process and free the structure. 359 * exec the process and free the structure.
359 */ 360 */
360struct subprocess_info *call_usermodehelper_setup(char *path, 361struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
361 char **argv, char **envp) 362 char **envp, gfp_t gfp_mask)
362{ 363{
363 struct subprocess_info *sub_info; 364 struct subprocess_info *sub_info;
364 sub_info = kzalloc(sizeof(struct subprocess_info), GFP_ATOMIC); 365 sub_info = kzalloc(sizeof(struct subprocess_info), gfp_mask);
365 if (!sub_info) 366 if (!sub_info)
366 goto out; 367 goto out;
367 368
@@ -417,12 +418,12 @@ int call_usermodehelper_stdinpipe(struct subprocess_info *sub_info,
417{ 418{
418 struct file *f; 419 struct file *f;
419 420
420 f = create_write_pipe(); 421 f = create_write_pipe(0);
421 if (IS_ERR(f)) 422 if (IS_ERR(f))
422 return PTR_ERR(f); 423 return PTR_ERR(f);
423 *filp = f; 424 *filp = f;
424 425
425 f = create_read_pipe(f); 426 f = create_read_pipe(f, 0);
426 if (IS_ERR(f)) { 427 if (IS_ERR(f)) {
427 free_write_pipe(*filp); 428 free_write_pipe(*filp);
428 return PTR_ERR(f); 429 return PTR_ERR(f);
@@ -494,7 +495,7 @@ int call_usermodehelper_pipe(char *path, char **argv, char **envp,
494 struct subprocess_info *sub_info; 495 struct subprocess_info *sub_info;
495 int ret; 496 int ret;
496 497
497 sub_info = call_usermodehelper_setup(path, argv, envp); 498 sub_info = call_usermodehelper_setup(path, argv, envp, GFP_KERNEL);
498 if (sub_info == NULL) 499 if (sub_info == NULL)
499 return -ENOMEM; 500 return -ENOMEM;
500 501
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 1485ca8d0e00..75bc2cd9ebc6 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -62,6 +62,7 @@
62 addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name))) 62 addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
63#endif 63#endif
64 64
65static int kprobes_initialized;
65static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; 66static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
66static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; 67static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
67 68
@@ -69,8 +70,15 @@ static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
69static bool kprobe_enabled; 70static bool kprobe_enabled;
70 71
71DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */ 72DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
72DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */
73static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; 73static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
74static struct {
75 spinlock_t lock ____cacheline_aligned;
76} kretprobe_table_locks[KPROBE_TABLE_SIZE];
77
78static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
79{
80 return &(kretprobe_table_locks[hash].lock);
81}
74 82
75/* 83/*
76 * Normally, functions that we'd want to prohibit kprobes in, are marked 84 * Normally, functions that we'd want to prohibit kprobes in, are marked
@@ -368,26 +376,53 @@ void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
368 return; 376 return;
369} 377}
370 378
371/* Called with kretprobe_lock held */
372void __kprobes recycle_rp_inst(struct kretprobe_instance *ri, 379void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
373 struct hlist_head *head) 380 struct hlist_head *head)
374{ 381{
382 struct kretprobe *rp = ri->rp;
383
375 /* remove rp inst off the rprobe_inst_table */ 384 /* remove rp inst off the rprobe_inst_table */
376 hlist_del(&ri->hlist); 385 hlist_del(&ri->hlist);
377 if (ri->rp) { 386 INIT_HLIST_NODE(&ri->hlist);
378 /* remove rp inst off the used list */ 387 if (likely(rp)) {
379 hlist_del(&ri->uflist); 388 spin_lock(&rp->lock);
380 /* put rp inst back onto the free list */ 389 hlist_add_head(&ri->hlist, &rp->free_instances);
381 INIT_HLIST_NODE(&ri->uflist); 390 spin_unlock(&rp->lock);
382 hlist_add_head(&ri->uflist, &ri->rp->free_instances);
383 } else 391 } else
384 /* Unregistering */ 392 /* Unregistering */
385 hlist_add_head(&ri->hlist, head); 393 hlist_add_head(&ri->hlist, head);
386} 394}
387 395
388struct hlist_head __kprobes *kretprobe_inst_table_head(struct task_struct *tsk) 396void kretprobe_hash_lock(struct task_struct *tsk,
397 struct hlist_head **head, unsigned long *flags)
389{ 398{
390 return &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)]; 399 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
400 spinlock_t *hlist_lock;
401
402 *head = &kretprobe_inst_table[hash];
403 hlist_lock = kretprobe_table_lock_ptr(hash);
404 spin_lock_irqsave(hlist_lock, *flags);
405}
406
407void kretprobe_table_lock(unsigned long hash, unsigned long *flags)
408{
409 spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
410 spin_lock_irqsave(hlist_lock, *flags);
411}
412
413void kretprobe_hash_unlock(struct task_struct *tsk, unsigned long *flags)
414{
415 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
416 spinlock_t *hlist_lock;
417
418 hlist_lock = kretprobe_table_lock_ptr(hash);
419 spin_unlock_irqrestore(hlist_lock, *flags);
420}
421
422void kretprobe_table_unlock(unsigned long hash, unsigned long *flags)
423{
424 spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
425 spin_unlock_irqrestore(hlist_lock, *flags);
391} 426}
392 427
393/* 428/*
@@ -401,17 +436,21 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
401 struct kretprobe_instance *ri; 436 struct kretprobe_instance *ri;
402 struct hlist_head *head, empty_rp; 437 struct hlist_head *head, empty_rp;
403 struct hlist_node *node, *tmp; 438 struct hlist_node *node, *tmp;
404 unsigned long flags = 0; 439 unsigned long hash, flags = 0;
405 440
406 INIT_HLIST_HEAD(&empty_rp); 441 if (unlikely(!kprobes_initialized))
407 spin_lock_irqsave(&kretprobe_lock, flags); 442 /* Early boot. kretprobe_table_locks not yet initialized. */
408 head = kretprobe_inst_table_head(tk); 443 return;
444
445 hash = hash_ptr(tk, KPROBE_HASH_BITS);
446 head = &kretprobe_inst_table[hash];
447 kretprobe_table_lock(hash, &flags);
409 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 448 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
410 if (ri->task == tk) 449 if (ri->task == tk)
411 recycle_rp_inst(ri, &empty_rp); 450 recycle_rp_inst(ri, &empty_rp);
412 } 451 }
413 spin_unlock_irqrestore(&kretprobe_lock, flags); 452 kretprobe_table_unlock(hash, &flags);
414 453 INIT_HLIST_HEAD(&empty_rp);
415 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { 454 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
416 hlist_del(&ri->hlist); 455 hlist_del(&ri->hlist);
417 kfree(ri); 456 kfree(ri);
@@ -423,24 +462,29 @@ static inline void free_rp_inst(struct kretprobe *rp)
423 struct kretprobe_instance *ri; 462 struct kretprobe_instance *ri;
424 struct hlist_node *pos, *next; 463 struct hlist_node *pos, *next;
425 464
426 hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, uflist) { 465 hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, hlist) {
427 hlist_del(&ri->uflist); 466 hlist_del(&ri->hlist);
428 kfree(ri); 467 kfree(ri);
429 } 468 }
430} 469}
431 470
432static void __kprobes cleanup_rp_inst(struct kretprobe *rp) 471static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
433{ 472{
434 unsigned long flags; 473 unsigned long flags, hash;
435 struct kretprobe_instance *ri; 474 struct kretprobe_instance *ri;
436 struct hlist_node *pos, *next; 475 struct hlist_node *pos, *next;
476 struct hlist_head *head;
477
437 /* No race here */ 478 /* No race here */
438 spin_lock_irqsave(&kretprobe_lock, flags); 479 for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
439 hlist_for_each_entry_safe(ri, pos, next, &rp->used_instances, uflist) { 480 kretprobe_table_lock(hash, &flags);
440 ri->rp = NULL; 481 head = &kretprobe_inst_table[hash];
441 hlist_del(&ri->uflist); 482 hlist_for_each_entry_safe(ri, pos, next, head, hlist) {
483 if (ri->rp == rp)
484 ri->rp = NULL;
485 }
486 kretprobe_table_unlock(hash, &flags);
442 } 487 }
443 spin_unlock_irqrestore(&kretprobe_lock, flags);
444 free_rp_inst(rp); 488 free_rp_inst(rp);
445} 489}
446 490
@@ -831,32 +875,37 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p,
831 struct pt_regs *regs) 875 struct pt_regs *regs)
832{ 876{
833 struct kretprobe *rp = container_of(p, struct kretprobe, kp); 877 struct kretprobe *rp = container_of(p, struct kretprobe, kp);
834 unsigned long flags = 0; 878 unsigned long hash, flags = 0;
879 struct kretprobe_instance *ri;
835 880
836 /*TODO: consider to only swap the RA after the last pre_handler fired */ 881 /*TODO: consider to only swap the RA after the last pre_handler fired */
837 spin_lock_irqsave(&kretprobe_lock, flags); 882 hash = hash_ptr(current, KPROBE_HASH_BITS);
883 spin_lock_irqsave(&rp->lock, flags);
838 if (!hlist_empty(&rp->free_instances)) { 884 if (!hlist_empty(&rp->free_instances)) {
839 struct kretprobe_instance *ri;
840
841 ri = hlist_entry(rp->free_instances.first, 885 ri = hlist_entry(rp->free_instances.first,
842 struct kretprobe_instance, uflist); 886 struct kretprobe_instance, hlist);
887 hlist_del(&ri->hlist);
888 spin_unlock_irqrestore(&rp->lock, flags);
889
843 ri->rp = rp; 890 ri->rp = rp;
844 ri->task = current; 891 ri->task = current;
845 892
846 if (rp->entry_handler && rp->entry_handler(ri, regs)) { 893 if (rp->entry_handler && rp->entry_handler(ri, regs)) {
847 spin_unlock_irqrestore(&kretprobe_lock, flags); 894 spin_unlock_irqrestore(&rp->lock, flags);
848 return 0; 895 return 0;
849 } 896 }
850 897
851 arch_prepare_kretprobe(ri, regs); 898 arch_prepare_kretprobe(ri, regs);
852 899
853 /* XXX(hch): why is there no hlist_move_head? */ 900 /* XXX(hch): why is there no hlist_move_head? */
854 hlist_del(&ri->uflist); 901 INIT_HLIST_NODE(&ri->hlist);
855 hlist_add_head(&ri->uflist, &ri->rp->used_instances); 902 kretprobe_table_lock(hash, &flags);
856 hlist_add_head(&ri->hlist, kretprobe_inst_table_head(ri->task)); 903 hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]);
857 } else 904 kretprobe_table_unlock(hash, &flags);
905 } else {
858 rp->nmissed++; 906 rp->nmissed++;
859 spin_unlock_irqrestore(&kretprobe_lock, flags); 907 spin_unlock_irqrestore(&rp->lock, flags);
908 }
860 return 0; 909 return 0;
861} 910}
862 911
@@ -892,7 +941,7 @@ static int __kprobes __register_kretprobe(struct kretprobe *rp,
892 rp->maxactive = NR_CPUS; 941 rp->maxactive = NR_CPUS;
893#endif 942#endif
894 } 943 }
895 INIT_HLIST_HEAD(&rp->used_instances); 944 spin_lock_init(&rp->lock);
896 INIT_HLIST_HEAD(&rp->free_instances); 945 INIT_HLIST_HEAD(&rp->free_instances);
897 for (i = 0; i < rp->maxactive; i++) { 946 for (i = 0; i < rp->maxactive; i++) {
898 inst = kmalloc(sizeof(struct kretprobe_instance) + 947 inst = kmalloc(sizeof(struct kretprobe_instance) +
@@ -901,8 +950,8 @@ static int __kprobes __register_kretprobe(struct kretprobe *rp,
901 free_rp_inst(rp); 950 free_rp_inst(rp);
902 return -ENOMEM; 951 return -ENOMEM;
903 } 952 }
904 INIT_HLIST_NODE(&inst->uflist); 953 INIT_HLIST_NODE(&inst->hlist);
905 hlist_add_head(&inst->uflist, &rp->free_instances); 954 hlist_add_head(&inst->hlist, &rp->free_instances);
906 } 955 }
907 956
908 rp->nmissed = 0; 957 rp->nmissed = 0;
@@ -1009,6 +1058,7 @@ static int __init init_kprobes(void)
1009 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 1058 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1010 INIT_HLIST_HEAD(&kprobe_table[i]); 1059 INIT_HLIST_HEAD(&kprobe_table[i]);
1011 INIT_HLIST_HEAD(&kretprobe_inst_table[i]); 1060 INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
1061 spin_lock_init(&(kretprobe_table_locks[i].lock));
1012 } 1062 }
1013 1063
1014 /* 1064 /*
@@ -1050,6 +1100,7 @@ static int __init init_kprobes(void)
1050 err = arch_init_kprobes(); 1100 err = arch_init_kprobes();
1051 if (!err) 1101 if (!err)
1052 err = register_die_notifier(&kprobe_exceptions_nb); 1102 err = register_die_notifier(&kprobe_exceptions_nb);
1103 kprobes_initialized = (err == 0);
1053 1104
1054 if (!err) 1105 if (!err)
1055 init_test_probes(); 1106 init_test_probes();
@@ -1286,13 +1337,8 @@ EXPORT_SYMBOL_GPL(register_jprobe);
1286EXPORT_SYMBOL_GPL(unregister_jprobe); 1337EXPORT_SYMBOL_GPL(unregister_jprobe);
1287EXPORT_SYMBOL_GPL(register_jprobes); 1338EXPORT_SYMBOL_GPL(register_jprobes);
1288EXPORT_SYMBOL_GPL(unregister_jprobes); 1339EXPORT_SYMBOL_GPL(unregister_jprobes);
1289#ifdef CONFIG_KPROBES
1290EXPORT_SYMBOL_GPL(jprobe_return); 1340EXPORT_SYMBOL_GPL(jprobe_return);
1291#endif
1292
1293#ifdef CONFIG_KPROBES
1294EXPORT_SYMBOL_GPL(register_kretprobe); 1341EXPORT_SYMBOL_GPL(register_kretprobe);
1295EXPORT_SYMBOL_GPL(unregister_kretprobe); 1342EXPORT_SYMBOL_GPL(unregister_kretprobe);
1296EXPORT_SYMBOL_GPL(register_kretprobes); 1343EXPORT_SYMBOL_GPL(register_kretprobes);
1297EXPORT_SYMBOL_GPL(unregister_kretprobes); 1344EXPORT_SYMBOL_GPL(unregister_kretprobes);
1298#endif
diff --git a/kernel/kthread.c b/kernel/kthread.c
index ac3fb7326641..96cff2f8710b 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -106,7 +106,7 @@ static void create_kthread(struct kthread_create_info *create)
106 */ 106 */
107 sched_setscheduler(create->result, SCHED_NORMAL, &param); 107 sched_setscheduler(create->result, SCHED_NORMAL, &param);
108 set_user_nice(create->result, KTHREAD_NICE_LEVEL); 108 set_user_nice(create->result, KTHREAD_NICE_LEVEL);
109 set_cpus_allowed(create->result, CPU_MASK_ALL); 109 set_cpus_allowed_ptr(create->result, CPU_MASK_ALL_PTR);
110 } 110 }
111 complete(&create->done); 111 complete(&create->done);
112} 112}
@@ -176,7 +176,7 @@ void kthread_bind(struct task_struct *k, unsigned int cpu)
176 return; 176 return;
177 } 177 }
178 /* Must have done schedule() in kthread() before we set_task_cpu */ 178 /* Must have done schedule() in kthread() before we set_task_cpu */
179 wait_task_inactive(k); 179 wait_task_inactive(k, 0);
180 set_task_cpu(k, cpu); 180 set_task_cpu(k, cpu);
181 k->cpus_allowed = cpumask_of_cpu(cpu); 181 k->cpus_allowed = cpumask_of_cpu(cpu);
182 k->rt.nr_cpus_allowed = 1; 182 k->rt.nr_cpus_allowed = 1;
@@ -233,7 +233,7 @@ int kthreadd(void *unused)
233 set_task_comm(tsk, "kthreadd"); 233 set_task_comm(tsk, "kthreadd");
234 ignore_signals(tsk); 234 ignore_signals(tsk);
235 set_user_nice(tsk, KTHREAD_NICE_LEVEL); 235 set_user_nice(tsk, KTHREAD_NICE_LEVEL);
236 set_cpus_allowed(tsk, CPU_MASK_ALL); 236 set_cpus_allowed_ptr(tsk, CPU_MASK_ALL_PTR);
237 237
238 current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG; 238 current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG;
239 239
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index d38a64362973..1aa91fd6b06e 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -124,6 +124,15 @@ static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
124unsigned long nr_lock_classes; 124unsigned long nr_lock_classes;
125static struct lock_class lock_classes[MAX_LOCKDEP_KEYS]; 125static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
126 126
127static inline struct lock_class *hlock_class(struct held_lock *hlock)
128{
129 if (!hlock->class_idx) {
130 DEBUG_LOCKS_WARN_ON(1);
131 return NULL;
132 }
133 return lock_classes + hlock->class_idx - 1;
134}
135
127#ifdef CONFIG_LOCK_STAT 136#ifdef CONFIG_LOCK_STAT
128static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats); 137static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats);
129 138
@@ -222,7 +231,7 @@ static void lock_release_holdtime(struct held_lock *hlock)
222 231
223 holdtime = sched_clock() - hlock->holdtime_stamp; 232 holdtime = sched_clock() - hlock->holdtime_stamp;
224 233
225 stats = get_lock_stats(hlock->class); 234 stats = get_lock_stats(hlock_class(hlock));
226 if (hlock->read) 235 if (hlock->read)
227 lock_time_inc(&stats->read_holdtime, holdtime); 236 lock_time_inc(&stats->read_holdtime, holdtime);
228 else 237 else
@@ -372,6 +381,19 @@ unsigned int nr_process_chains;
372unsigned int max_lockdep_depth; 381unsigned int max_lockdep_depth;
373unsigned int max_recursion_depth; 382unsigned int max_recursion_depth;
374 383
384static unsigned int lockdep_dependency_gen_id;
385
386static bool lockdep_dependency_visit(struct lock_class *source,
387 unsigned int depth)
388{
389 if (!depth)
390 lockdep_dependency_gen_id++;
391 if (source->dep_gen_id == lockdep_dependency_gen_id)
392 return true;
393 source->dep_gen_id = lockdep_dependency_gen_id;
394 return false;
395}
396
375#ifdef CONFIG_DEBUG_LOCKDEP 397#ifdef CONFIG_DEBUG_LOCKDEP
376/* 398/*
377 * We cannot printk in early bootup code. Not even early_printk() 399 * We cannot printk in early bootup code. Not even early_printk()
@@ -505,7 +527,7 @@ static void print_lockdep_cache(struct lockdep_map *lock)
505 527
506static void print_lock(struct held_lock *hlock) 528static void print_lock(struct held_lock *hlock)
507{ 529{
508 print_lock_name(hlock->class); 530 print_lock_name(hlock_class(hlock));
509 printk(", at: "); 531 printk(", at: ");
510 print_ip_sym(hlock->acquire_ip); 532 print_ip_sym(hlock->acquire_ip);
511} 533}
@@ -558,6 +580,9 @@ static void print_lock_dependencies(struct lock_class *class, int depth)
558{ 580{
559 struct lock_list *entry; 581 struct lock_list *entry;
560 582
583 if (lockdep_dependency_visit(class, depth))
584 return;
585
561 if (DEBUG_LOCKS_WARN_ON(depth >= 20)) 586 if (DEBUG_LOCKS_WARN_ON(depth >= 20))
562 return; 587 return;
563 588
@@ -932,7 +957,7 @@ static noinline int print_circular_bug_tail(void)
932 if (debug_locks_silent) 957 if (debug_locks_silent)
933 return 0; 958 return 0;
934 959
935 this.class = check_source->class; 960 this.class = hlock_class(check_source);
936 if (!save_trace(&this.trace)) 961 if (!save_trace(&this.trace))
937 return 0; 962 return 0;
938 963
@@ -959,6 +984,67 @@ static int noinline print_infinite_recursion_bug(void)
959 return 0; 984 return 0;
960} 985}
961 986
987unsigned long __lockdep_count_forward_deps(struct lock_class *class,
988 unsigned int depth)
989{
990 struct lock_list *entry;
991 unsigned long ret = 1;
992
993 if (lockdep_dependency_visit(class, depth))
994 return 0;
995
996 /*
997 * Recurse this class's dependency list:
998 */
999 list_for_each_entry(entry, &class->locks_after, entry)
1000 ret += __lockdep_count_forward_deps(entry->class, depth + 1);
1001
1002 return ret;
1003}
1004
1005unsigned long lockdep_count_forward_deps(struct lock_class *class)
1006{
1007 unsigned long ret, flags;
1008
1009 local_irq_save(flags);
1010 __raw_spin_lock(&lockdep_lock);
1011 ret = __lockdep_count_forward_deps(class, 0);
1012 __raw_spin_unlock(&lockdep_lock);
1013 local_irq_restore(flags);
1014
1015 return ret;
1016}
1017
1018unsigned long __lockdep_count_backward_deps(struct lock_class *class,
1019 unsigned int depth)
1020{
1021 struct lock_list *entry;
1022 unsigned long ret = 1;
1023
1024 if (lockdep_dependency_visit(class, depth))
1025 return 0;
1026 /*
1027 * Recurse this class's dependency list:
1028 */
1029 list_for_each_entry(entry, &class->locks_before, entry)
1030 ret += __lockdep_count_backward_deps(entry->class, depth + 1);
1031
1032 return ret;
1033}
1034
1035unsigned long lockdep_count_backward_deps(struct lock_class *class)
1036{
1037 unsigned long ret, flags;
1038
1039 local_irq_save(flags);
1040 __raw_spin_lock(&lockdep_lock);
1041 ret = __lockdep_count_backward_deps(class, 0);
1042 __raw_spin_unlock(&lockdep_lock);
1043 local_irq_restore(flags);
1044
1045 return ret;
1046}
1047
962/* 1048/*
963 * Prove that the dependency graph starting at <entry> can not 1049 * Prove that the dependency graph starting at <entry> can not
964 * lead to <target>. Print an error and return 0 if it does. 1050 * lead to <target>. Print an error and return 0 if it does.
@@ -968,6 +1054,9 @@ check_noncircular(struct lock_class *source, unsigned int depth)
968{ 1054{
969 struct lock_list *entry; 1055 struct lock_list *entry;
970 1056
1057 if (lockdep_dependency_visit(source, depth))
1058 return 1;
1059
971 debug_atomic_inc(&nr_cyclic_check_recursions); 1060 debug_atomic_inc(&nr_cyclic_check_recursions);
972 if (depth > max_recursion_depth) 1061 if (depth > max_recursion_depth)
973 max_recursion_depth = depth; 1062 max_recursion_depth = depth;
@@ -977,7 +1066,7 @@ check_noncircular(struct lock_class *source, unsigned int depth)
977 * Check this lock's dependency list: 1066 * Check this lock's dependency list:
978 */ 1067 */
979 list_for_each_entry(entry, &source->locks_after, entry) { 1068 list_for_each_entry(entry, &source->locks_after, entry) {
980 if (entry->class == check_target->class) 1069 if (entry->class == hlock_class(check_target))
981 return print_circular_bug_header(entry, depth+1); 1070 return print_circular_bug_header(entry, depth+1);
982 debug_atomic_inc(&nr_cyclic_checks); 1071 debug_atomic_inc(&nr_cyclic_checks);
983 if (!check_noncircular(entry->class, depth+1)) 1072 if (!check_noncircular(entry->class, depth+1))
@@ -1011,6 +1100,9 @@ find_usage_forwards(struct lock_class *source, unsigned int depth)
1011 struct lock_list *entry; 1100 struct lock_list *entry;
1012 int ret; 1101 int ret;
1013 1102
1103 if (lockdep_dependency_visit(source, depth))
1104 return 1;
1105
1014 if (depth > max_recursion_depth) 1106 if (depth > max_recursion_depth)
1015 max_recursion_depth = depth; 1107 max_recursion_depth = depth;
1016 if (depth >= RECURSION_LIMIT) 1108 if (depth >= RECURSION_LIMIT)
@@ -1050,6 +1142,9 @@ find_usage_backwards(struct lock_class *source, unsigned int depth)
1050 struct lock_list *entry; 1142 struct lock_list *entry;
1051 int ret; 1143 int ret;
1052 1144
1145 if (lockdep_dependency_visit(source, depth))
1146 return 1;
1147
1053 if (!__raw_spin_is_locked(&lockdep_lock)) 1148 if (!__raw_spin_is_locked(&lockdep_lock))
1054 return DEBUG_LOCKS_WARN_ON(1); 1149 return DEBUG_LOCKS_WARN_ON(1);
1055 1150
@@ -1064,6 +1159,11 @@ find_usage_backwards(struct lock_class *source, unsigned int depth)
1064 return 2; 1159 return 2;
1065 } 1160 }
1066 1161
1162 if (!source && debug_locks_off_graph_unlock()) {
1163 WARN_ON(1);
1164 return 0;
1165 }
1166
1067 /* 1167 /*
1068 * Check this lock's dependency list: 1168 * Check this lock's dependency list:
1069 */ 1169 */
@@ -1103,9 +1203,9 @@ print_bad_irq_dependency(struct task_struct *curr,
1103 printk("\nand this task is already holding:\n"); 1203 printk("\nand this task is already holding:\n");
1104 print_lock(prev); 1204 print_lock(prev);
1105 printk("which would create a new lock dependency:\n"); 1205 printk("which would create a new lock dependency:\n");
1106 print_lock_name(prev->class); 1206 print_lock_name(hlock_class(prev));
1107 printk(" ->"); 1207 printk(" ->");
1108 print_lock_name(next->class); 1208 print_lock_name(hlock_class(next));
1109 printk("\n"); 1209 printk("\n");
1110 1210
1111 printk("\nbut this new dependency connects a %s-irq-safe lock:\n", 1211 printk("\nbut this new dependency connects a %s-irq-safe lock:\n",
@@ -1146,12 +1246,12 @@ check_usage(struct task_struct *curr, struct held_lock *prev,
1146 1246
1147 find_usage_bit = bit_backwards; 1247 find_usage_bit = bit_backwards;
1148 /* fills in <backwards_match> */ 1248 /* fills in <backwards_match> */
1149 ret = find_usage_backwards(prev->class, 0); 1249 ret = find_usage_backwards(hlock_class(prev), 0);
1150 if (!ret || ret == 1) 1250 if (!ret || ret == 1)
1151 return ret; 1251 return ret;
1152 1252
1153 find_usage_bit = bit_forwards; 1253 find_usage_bit = bit_forwards;
1154 ret = find_usage_forwards(next->class, 0); 1254 ret = find_usage_forwards(hlock_class(next), 0);
1155 if (!ret || ret == 1) 1255 if (!ret || ret == 1)
1156 return ret; 1256 return ret;
1157 /* ret == 2 */ 1257 /* ret == 2 */
@@ -1272,18 +1372,32 @@ check_deadlock(struct task_struct *curr, struct held_lock *next,
1272 struct lockdep_map *next_instance, int read) 1372 struct lockdep_map *next_instance, int read)
1273{ 1373{
1274 struct held_lock *prev; 1374 struct held_lock *prev;
1375 struct held_lock *nest = NULL;
1275 int i; 1376 int i;
1276 1377
1277 for (i = 0; i < curr->lockdep_depth; i++) { 1378 for (i = 0; i < curr->lockdep_depth; i++) {
1278 prev = curr->held_locks + i; 1379 prev = curr->held_locks + i;
1279 if (prev->class != next->class) 1380
1381 if (prev->instance == next->nest_lock)
1382 nest = prev;
1383
1384 if (hlock_class(prev) != hlock_class(next))
1280 continue; 1385 continue;
1386
1281 /* 1387 /*
1282 * Allow read-after-read recursion of the same 1388 * Allow read-after-read recursion of the same
1283 * lock class (i.e. read_lock(lock)+read_lock(lock)): 1389 * lock class (i.e. read_lock(lock)+read_lock(lock)):
1284 */ 1390 */
1285 if ((read == 2) && prev->read) 1391 if ((read == 2) && prev->read)
1286 return 2; 1392 return 2;
1393
1394 /*
1395 * We're holding the nest_lock, which serializes this lock's
1396 * nesting behaviour.
1397 */
1398 if (nest)
1399 return 2;
1400
1287 return print_deadlock_bug(curr, prev, next); 1401 return print_deadlock_bug(curr, prev, next);
1288 } 1402 }
1289 return 1; 1403 return 1;
@@ -1329,7 +1443,7 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
1329 */ 1443 */
1330 check_source = next; 1444 check_source = next;
1331 check_target = prev; 1445 check_target = prev;
1332 if (!(check_noncircular(next->class, 0))) 1446 if (!(check_noncircular(hlock_class(next), 0)))
1333 return print_circular_bug_tail(); 1447 return print_circular_bug_tail();
1334 1448
1335 if (!check_prev_add_irq(curr, prev, next)) 1449 if (!check_prev_add_irq(curr, prev, next))
@@ -1353,8 +1467,8 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
1353 * chains - the second one will be new, but L1 already has 1467 * chains - the second one will be new, but L1 already has
1354 * L2 added to its dependency list, due to the first chain.) 1468 * L2 added to its dependency list, due to the first chain.)
1355 */ 1469 */
1356 list_for_each_entry(entry, &prev->class->locks_after, entry) { 1470 list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) {
1357 if (entry->class == next->class) { 1471 if (entry->class == hlock_class(next)) {
1358 if (distance == 1) 1472 if (distance == 1)
1359 entry->distance = 1; 1473 entry->distance = 1;
1360 return 2; 1474 return 2;
@@ -1365,26 +1479,28 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
1365 * Ok, all validations passed, add the new lock 1479 * Ok, all validations passed, add the new lock
1366 * to the previous lock's dependency list: 1480 * to the previous lock's dependency list:
1367 */ 1481 */
1368 ret = add_lock_to_list(prev->class, next->class, 1482 ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
1369 &prev->class->locks_after, next->acquire_ip, distance); 1483 &hlock_class(prev)->locks_after,
1484 next->acquire_ip, distance);
1370 1485
1371 if (!ret) 1486 if (!ret)
1372 return 0; 1487 return 0;
1373 1488
1374 ret = add_lock_to_list(next->class, prev->class, 1489 ret = add_lock_to_list(hlock_class(next), hlock_class(prev),
1375 &next->class->locks_before, next->acquire_ip, distance); 1490 &hlock_class(next)->locks_before,
1491 next->acquire_ip, distance);
1376 if (!ret) 1492 if (!ret)
1377 return 0; 1493 return 0;
1378 1494
1379 /* 1495 /*
1380 * Debugging printouts: 1496 * Debugging printouts:
1381 */ 1497 */
1382 if (verbose(prev->class) || verbose(next->class)) { 1498 if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) {
1383 graph_unlock(); 1499 graph_unlock();
1384 printk("\n new dependency: "); 1500 printk("\n new dependency: ");
1385 print_lock_name(prev->class); 1501 print_lock_name(hlock_class(prev));
1386 printk(" => "); 1502 printk(" => ");
1387 print_lock_name(next->class); 1503 print_lock_name(hlock_class(next));
1388 printk("\n"); 1504 printk("\n");
1389 dump_stack(); 1505 dump_stack();
1390 return graph_lock(); 1506 return graph_lock();
@@ -1481,7 +1597,7 @@ static inline int lookup_chain_cache(struct task_struct *curr,
1481 struct held_lock *hlock, 1597 struct held_lock *hlock,
1482 u64 chain_key) 1598 u64 chain_key)
1483{ 1599{
1484 struct lock_class *class = hlock->class; 1600 struct lock_class *class = hlock_class(hlock);
1485 struct list_head *hash_head = chainhashentry(chain_key); 1601 struct list_head *hash_head = chainhashentry(chain_key);
1486 struct lock_chain *chain; 1602 struct lock_chain *chain;
1487 struct held_lock *hlock_curr, *hlock_next; 1603 struct held_lock *hlock_curr, *hlock_next;
@@ -1554,7 +1670,7 @@ cache_hit:
1554 if (likely(cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) { 1670 if (likely(cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
1555 chain->base = cn; 1671 chain->base = cn;
1556 for (j = 0; j < chain->depth - 1; j++, i++) { 1672 for (j = 0; j < chain->depth - 1; j++, i++) {
1557 int lock_id = curr->held_locks[i].class - lock_classes; 1673 int lock_id = curr->held_locks[i].class_idx - 1;
1558 chain_hlocks[chain->base + j] = lock_id; 1674 chain_hlocks[chain->base + j] = lock_id;
1559 } 1675 }
1560 chain_hlocks[chain->base + j] = class - lock_classes; 1676 chain_hlocks[chain->base + j] = class - lock_classes;
@@ -1650,7 +1766,7 @@ static void check_chain_key(struct task_struct *curr)
1650 WARN_ON(1); 1766 WARN_ON(1);
1651 return; 1767 return;
1652 } 1768 }
1653 id = hlock->class - lock_classes; 1769 id = hlock->class_idx - 1;
1654 if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS)) 1770 if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
1655 return; 1771 return;
1656 1772
@@ -1695,7 +1811,7 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this,
1695 print_lock(this); 1811 print_lock(this);
1696 1812
1697 printk("{%s} state was registered at:\n", usage_str[prev_bit]); 1813 printk("{%s} state was registered at:\n", usage_str[prev_bit]);
1698 print_stack_trace(this->class->usage_traces + prev_bit, 1); 1814 print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1);
1699 1815
1700 print_irqtrace_events(curr); 1816 print_irqtrace_events(curr);
1701 printk("\nother info that might help us debug this:\n"); 1817 printk("\nother info that might help us debug this:\n");
@@ -1714,7 +1830,7 @@ static inline int
1714valid_state(struct task_struct *curr, struct held_lock *this, 1830valid_state(struct task_struct *curr, struct held_lock *this,
1715 enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit) 1831 enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
1716{ 1832{
1717 if (unlikely(this->class->usage_mask & (1 << bad_bit))) 1833 if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit)))
1718 return print_usage_bug(curr, this, bad_bit, new_bit); 1834 return print_usage_bug(curr, this, bad_bit, new_bit);
1719 return 1; 1835 return 1;
1720} 1836}
@@ -1753,7 +1869,7 @@ print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other,
1753 lockdep_print_held_locks(curr); 1869 lockdep_print_held_locks(curr);
1754 1870
1755 printk("\nthe first lock's dependencies:\n"); 1871 printk("\nthe first lock's dependencies:\n");
1756 print_lock_dependencies(this->class, 0); 1872 print_lock_dependencies(hlock_class(this), 0);
1757 1873
1758 printk("\nthe second lock's dependencies:\n"); 1874 printk("\nthe second lock's dependencies:\n");
1759 print_lock_dependencies(other, 0); 1875 print_lock_dependencies(other, 0);
@@ -1776,7 +1892,7 @@ check_usage_forwards(struct task_struct *curr, struct held_lock *this,
1776 1892
1777 find_usage_bit = bit; 1893 find_usage_bit = bit;
1778 /* fills in <forwards_match> */ 1894 /* fills in <forwards_match> */
1779 ret = find_usage_forwards(this->class, 0); 1895 ret = find_usage_forwards(hlock_class(this), 0);
1780 if (!ret || ret == 1) 1896 if (!ret || ret == 1)
1781 return ret; 1897 return ret;
1782 1898
@@ -1795,7 +1911,7 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this,
1795 1911
1796 find_usage_bit = bit; 1912 find_usage_bit = bit;
1797 /* fills in <backwards_match> */ 1913 /* fills in <backwards_match> */
1798 ret = find_usage_backwards(this->class, 0); 1914 ret = find_usage_backwards(hlock_class(this), 0);
1799 if (!ret || ret == 1) 1915 if (!ret || ret == 1)
1800 return ret; 1916 return ret;
1801 1917
@@ -1861,7 +1977,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
1861 LOCK_ENABLED_HARDIRQS_READ, "hard-read")) 1977 LOCK_ENABLED_HARDIRQS_READ, "hard-read"))
1862 return 0; 1978 return 0;
1863#endif 1979#endif
1864 if (hardirq_verbose(this->class)) 1980 if (hardirq_verbose(hlock_class(this)))
1865 ret = 2; 1981 ret = 2;
1866 break; 1982 break;
1867 case LOCK_USED_IN_SOFTIRQ: 1983 case LOCK_USED_IN_SOFTIRQ:
@@ -1886,7 +2002,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
1886 LOCK_ENABLED_SOFTIRQS_READ, "soft-read")) 2002 LOCK_ENABLED_SOFTIRQS_READ, "soft-read"))
1887 return 0; 2003 return 0;
1888#endif 2004#endif
1889 if (softirq_verbose(this->class)) 2005 if (softirq_verbose(hlock_class(this)))
1890 ret = 2; 2006 ret = 2;
1891 break; 2007 break;
1892 case LOCK_USED_IN_HARDIRQ_READ: 2008 case LOCK_USED_IN_HARDIRQ_READ:
@@ -1899,7 +2015,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
1899 if (!check_usage_forwards(curr, this, 2015 if (!check_usage_forwards(curr, this,
1900 LOCK_ENABLED_HARDIRQS, "hard")) 2016 LOCK_ENABLED_HARDIRQS, "hard"))
1901 return 0; 2017 return 0;
1902 if (hardirq_verbose(this->class)) 2018 if (hardirq_verbose(hlock_class(this)))
1903 ret = 2; 2019 ret = 2;
1904 break; 2020 break;
1905 case LOCK_USED_IN_SOFTIRQ_READ: 2021 case LOCK_USED_IN_SOFTIRQ_READ:
@@ -1912,7 +2028,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
1912 if (!check_usage_forwards(curr, this, 2028 if (!check_usage_forwards(curr, this,
1913 LOCK_ENABLED_SOFTIRQS, "soft")) 2029 LOCK_ENABLED_SOFTIRQS, "soft"))
1914 return 0; 2030 return 0;
1915 if (softirq_verbose(this->class)) 2031 if (softirq_verbose(hlock_class(this)))
1916 ret = 2; 2032 ret = 2;
1917 break; 2033 break;
1918 case LOCK_ENABLED_HARDIRQS: 2034 case LOCK_ENABLED_HARDIRQS:
@@ -1938,7 +2054,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
1938 LOCK_USED_IN_HARDIRQ_READ, "hard-read")) 2054 LOCK_USED_IN_HARDIRQ_READ, "hard-read"))
1939 return 0; 2055 return 0;
1940#endif 2056#endif
1941 if (hardirq_verbose(this->class)) 2057 if (hardirq_verbose(hlock_class(this)))
1942 ret = 2; 2058 ret = 2;
1943 break; 2059 break;
1944 case LOCK_ENABLED_SOFTIRQS: 2060 case LOCK_ENABLED_SOFTIRQS:
@@ -1964,7 +2080,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
1964 LOCK_USED_IN_SOFTIRQ_READ, "soft-read")) 2080 LOCK_USED_IN_SOFTIRQ_READ, "soft-read"))
1965 return 0; 2081 return 0;
1966#endif 2082#endif
1967 if (softirq_verbose(this->class)) 2083 if (softirq_verbose(hlock_class(this)))
1968 ret = 2; 2084 ret = 2;
1969 break; 2085 break;
1970 case LOCK_ENABLED_HARDIRQS_READ: 2086 case LOCK_ENABLED_HARDIRQS_READ:
@@ -1979,7 +2095,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
1979 LOCK_USED_IN_HARDIRQ, "hard")) 2095 LOCK_USED_IN_HARDIRQ, "hard"))
1980 return 0; 2096 return 0;
1981#endif 2097#endif
1982 if (hardirq_verbose(this->class)) 2098 if (hardirq_verbose(hlock_class(this)))
1983 ret = 2; 2099 ret = 2;
1984 break; 2100 break;
1985 case LOCK_ENABLED_SOFTIRQS_READ: 2101 case LOCK_ENABLED_SOFTIRQS_READ:
@@ -1994,7 +2110,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
1994 LOCK_USED_IN_SOFTIRQ, "soft")) 2110 LOCK_USED_IN_SOFTIRQ, "soft"))
1995 return 0; 2111 return 0;
1996#endif 2112#endif
1997 if (softirq_verbose(this->class)) 2113 if (softirq_verbose(hlock_class(this)))
1998 ret = 2; 2114 ret = 2;
1999 break; 2115 break;
2000 default: 2116 default:
@@ -2310,7 +2426,7 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
2310 * If already set then do not dirty the cacheline, 2426 * If already set then do not dirty the cacheline,
2311 * nor do any checks: 2427 * nor do any checks:
2312 */ 2428 */
2313 if (likely(this->class->usage_mask & new_mask)) 2429 if (likely(hlock_class(this)->usage_mask & new_mask))
2314 return 1; 2430 return 1;
2315 2431
2316 if (!graph_lock()) 2432 if (!graph_lock())
@@ -2318,14 +2434,14 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
2318 /* 2434 /*
2319 * Make sure we didnt race: 2435 * Make sure we didnt race:
2320 */ 2436 */
2321 if (unlikely(this->class->usage_mask & new_mask)) { 2437 if (unlikely(hlock_class(this)->usage_mask & new_mask)) {
2322 graph_unlock(); 2438 graph_unlock();
2323 return 1; 2439 return 1;
2324 } 2440 }
2325 2441
2326 this->class->usage_mask |= new_mask; 2442 hlock_class(this)->usage_mask |= new_mask;
2327 2443
2328 if (!save_trace(this->class->usage_traces + new_bit)) 2444 if (!save_trace(hlock_class(this)->usage_traces + new_bit))
2329 return 0; 2445 return 0;
2330 2446
2331 switch (new_bit) { 2447 switch (new_bit) {
@@ -2405,7 +2521,7 @@ EXPORT_SYMBOL_GPL(lockdep_init_map);
2405 */ 2521 */
2406static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, 2522static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2407 int trylock, int read, int check, int hardirqs_off, 2523 int trylock, int read, int check, int hardirqs_off,
2408 unsigned long ip) 2524 struct lockdep_map *nest_lock, unsigned long ip)
2409{ 2525{
2410 struct task_struct *curr = current; 2526 struct task_struct *curr = current;
2411 struct lock_class *class = NULL; 2527 struct lock_class *class = NULL;
@@ -2459,10 +2575,12 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2459 return 0; 2575 return 0;
2460 2576
2461 hlock = curr->held_locks + depth; 2577 hlock = curr->held_locks + depth;
2462 2578 if (DEBUG_LOCKS_WARN_ON(!class))
2463 hlock->class = class; 2579 return 0;
2580 hlock->class_idx = class - lock_classes + 1;
2464 hlock->acquire_ip = ip; 2581 hlock->acquire_ip = ip;
2465 hlock->instance = lock; 2582 hlock->instance = lock;
2583 hlock->nest_lock = nest_lock;
2466 hlock->trylock = trylock; 2584 hlock->trylock = trylock;
2467 hlock->read = read; 2585 hlock->read = read;
2468 hlock->check = check; 2586 hlock->check = check;
@@ -2574,6 +2692,55 @@ static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,
2574 return 1; 2692 return 1;
2575} 2693}
2576 2694
2695static int
2696__lock_set_subclass(struct lockdep_map *lock,
2697 unsigned int subclass, unsigned long ip)
2698{
2699 struct task_struct *curr = current;
2700 struct held_lock *hlock, *prev_hlock;
2701 struct lock_class *class;
2702 unsigned int depth;
2703 int i;
2704
2705 depth = curr->lockdep_depth;
2706 if (DEBUG_LOCKS_WARN_ON(!depth))
2707 return 0;
2708
2709 prev_hlock = NULL;
2710 for (i = depth-1; i >= 0; i--) {
2711 hlock = curr->held_locks + i;
2712 /*
2713 * We must not cross into another context:
2714 */
2715 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
2716 break;
2717 if (hlock->instance == lock)
2718 goto found_it;
2719 prev_hlock = hlock;
2720 }
2721 return print_unlock_inbalance_bug(curr, lock, ip);
2722
2723found_it:
2724 class = register_lock_class(lock, subclass, 0);
2725 hlock->class_idx = class - lock_classes + 1;
2726
2727 curr->lockdep_depth = i;
2728 curr->curr_chain_key = hlock->prev_chain_key;
2729
2730 for (; i < depth; i++) {
2731 hlock = curr->held_locks + i;
2732 if (!__lock_acquire(hlock->instance,
2733 hlock_class(hlock)->subclass, hlock->trylock,
2734 hlock->read, hlock->check, hlock->hardirqs_off,
2735 hlock->nest_lock, hlock->acquire_ip))
2736 return 0;
2737 }
2738
2739 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
2740 return 0;
2741 return 1;
2742}
2743
2577/* 2744/*
2578 * Remove the lock to the list of currently held locks in a 2745 * Remove the lock to the list of currently held locks in a
2579 * potentially non-nested (out of order) manner. This is a 2746 * potentially non-nested (out of order) manner. This is a
@@ -2624,9 +2791,9 @@ found_it:
2624 for (i++; i < depth; i++) { 2791 for (i++; i < depth; i++) {
2625 hlock = curr->held_locks + i; 2792 hlock = curr->held_locks + i;
2626 if (!__lock_acquire(hlock->instance, 2793 if (!__lock_acquire(hlock->instance,
2627 hlock->class->subclass, hlock->trylock, 2794 hlock_class(hlock)->subclass, hlock->trylock,
2628 hlock->read, hlock->check, hlock->hardirqs_off, 2795 hlock->read, hlock->check, hlock->hardirqs_off,
2629 hlock->acquire_ip)) 2796 hlock->nest_lock, hlock->acquire_ip))
2630 return 0; 2797 return 0;
2631 } 2798 }
2632 2799
@@ -2669,7 +2836,7 @@ static int lock_release_nested(struct task_struct *curr,
2669 2836
2670#ifdef CONFIG_DEBUG_LOCKDEP 2837#ifdef CONFIG_DEBUG_LOCKDEP
2671 hlock->prev_chain_key = 0; 2838 hlock->prev_chain_key = 0;
2672 hlock->class = NULL; 2839 hlock->class_idx = 0;
2673 hlock->acquire_ip = 0; 2840 hlock->acquire_ip = 0;
2674 hlock->irq_context = 0; 2841 hlock->irq_context = 0;
2675#endif 2842#endif
@@ -2738,18 +2905,36 @@ static void check_flags(unsigned long flags)
2738#endif 2905#endif
2739} 2906}
2740 2907
2908void
2909lock_set_subclass(struct lockdep_map *lock,
2910 unsigned int subclass, unsigned long ip)
2911{
2912 unsigned long flags;
2913
2914 if (unlikely(current->lockdep_recursion))
2915 return;
2916
2917 raw_local_irq_save(flags);
2918 current->lockdep_recursion = 1;
2919 check_flags(flags);
2920 if (__lock_set_subclass(lock, subclass, ip))
2921 check_chain_key(current);
2922 current->lockdep_recursion = 0;
2923 raw_local_irq_restore(flags);
2924}
2925
2926EXPORT_SYMBOL_GPL(lock_set_subclass);
2927
2741/* 2928/*
2742 * We are not always called with irqs disabled - do that here, 2929 * We are not always called with irqs disabled - do that here,
2743 * and also avoid lockdep recursion: 2930 * and also avoid lockdep recursion:
2744 */ 2931 */
2745void lock_acquire(struct lockdep_map *lock, unsigned int subclass, 2932void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2746 int trylock, int read, int check, unsigned long ip) 2933 int trylock, int read, int check,
2934 struct lockdep_map *nest_lock, unsigned long ip)
2747{ 2935{
2748 unsigned long flags; 2936 unsigned long flags;
2749 2937
2750 if (unlikely(!lock_stat && !prove_locking))
2751 return;
2752
2753 if (unlikely(current->lockdep_recursion)) 2938 if (unlikely(current->lockdep_recursion))
2754 return; 2939 return;
2755 2940
@@ -2758,7 +2943,7 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2758 2943
2759 current->lockdep_recursion = 1; 2944 current->lockdep_recursion = 1;
2760 __lock_acquire(lock, subclass, trylock, read, check, 2945 __lock_acquire(lock, subclass, trylock, read, check,
2761 irqs_disabled_flags(flags), ip); 2946 irqs_disabled_flags(flags), nest_lock, ip);
2762 current->lockdep_recursion = 0; 2947 current->lockdep_recursion = 0;
2763 raw_local_irq_restore(flags); 2948 raw_local_irq_restore(flags);
2764} 2949}
@@ -2770,9 +2955,6 @@ void lock_release(struct lockdep_map *lock, int nested,
2770{ 2955{
2771 unsigned long flags; 2956 unsigned long flags;
2772 2957
2773 if (unlikely(!lock_stat && !prove_locking))
2774 return;
2775
2776 if (unlikely(current->lockdep_recursion)) 2958 if (unlikely(current->lockdep_recursion))
2777 return; 2959 return;
2778 2960
@@ -2845,9 +3027,9 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip)
2845found_it: 3027found_it:
2846 hlock->waittime_stamp = sched_clock(); 3028 hlock->waittime_stamp = sched_clock();
2847 3029
2848 point = lock_contention_point(hlock->class, ip); 3030 point = lock_contention_point(hlock_class(hlock), ip);
2849 3031
2850 stats = get_lock_stats(hlock->class); 3032 stats = get_lock_stats(hlock_class(hlock));
2851 if (point < ARRAY_SIZE(stats->contention_point)) 3033 if (point < ARRAY_SIZE(stats->contention_point))
2852 stats->contention_point[i]++; 3034 stats->contention_point[i]++;
2853 if (lock->cpu != smp_processor_id()) 3035 if (lock->cpu != smp_processor_id())
@@ -2893,7 +3075,7 @@ found_it:
2893 hlock->holdtime_stamp = now; 3075 hlock->holdtime_stamp = now;
2894 } 3076 }
2895 3077
2896 stats = get_lock_stats(hlock->class); 3078 stats = get_lock_stats(hlock_class(hlock));
2897 if (waittime) { 3079 if (waittime) {
2898 if (hlock->read) 3080 if (hlock->read)
2899 lock_time_inc(&stats->read_waittime, waittime); 3081 lock_time_inc(&stats->read_waittime, waittime);
@@ -2988,6 +3170,7 @@ static void zap_class(struct lock_class *class)
2988 list_del_rcu(&class->hash_entry); 3170 list_del_rcu(&class->hash_entry);
2989 list_del_rcu(&class->lock_entry); 3171 list_del_rcu(&class->lock_entry);
2990 3172
3173 class->key = NULL;
2991} 3174}
2992 3175
2993static inline int within(const void *addr, void *start, unsigned long size) 3176static inline int within(const void *addr, void *start, unsigned long size)
diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
index c3600a091a28..55db193d366d 100644
--- a/kernel/lockdep_internals.h
+++ b/kernel/lockdep_internals.h
@@ -17,9 +17,6 @@
17 */ 17 */
18#define MAX_LOCKDEP_ENTRIES 8192UL 18#define MAX_LOCKDEP_ENTRIES 8192UL
19 19
20#define MAX_LOCKDEP_KEYS_BITS 11
21#define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS)
22
23#define MAX_LOCKDEP_CHAINS_BITS 14 20#define MAX_LOCKDEP_CHAINS_BITS 14
24#define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS) 21#define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS)
25 22
@@ -53,6 +50,9 @@ extern unsigned int nr_process_chains;
53extern unsigned int max_lockdep_depth; 50extern unsigned int max_lockdep_depth;
54extern unsigned int max_recursion_depth; 51extern unsigned int max_recursion_depth;
55 52
53extern unsigned long lockdep_count_forward_deps(struct lock_class *);
54extern unsigned long lockdep_count_backward_deps(struct lock_class *);
55
56#ifdef CONFIG_DEBUG_LOCKDEP 56#ifdef CONFIG_DEBUG_LOCKDEP
57/* 57/*
58 * Various lockdep statistics: 58 * Various lockdep statistics:
diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
index 9b0e940e2545..fa19aee604c2 100644
--- a/kernel/lockdep_proc.c
+++ b/kernel/lockdep_proc.c
@@ -63,34 +63,6 @@ static void l_stop(struct seq_file *m, void *v)
63{ 63{
64} 64}
65 65
66static unsigned long count_forward_deps(struct lock_class *class)
67{
68 struct lock_list *entry;
69 unsigned long ret = 1;
70
71 /*
72 * Recurse this class's dependency list:
73 */
74 list_for_each_entry(entry, &class->locks_after, entry)
75 ret += count_forward_deps(entry->class);
76
77 return ret;
78}
79
80static unsigned long count_backward_deps(struct lock_class *class)
81{
82 struct lock_list *entry;
83 unsigned long ret = 1;
84
85 /*
86 * Recurse this class's dependency list:
87 */
88 list_for_each_entry(entry, &class->locks_before, entry)
89 ret += count_backward_deps(entry->class);
90
91 return ret;
92}
93
94static void print_name(struct seq_file *m, struct lock_class *class) 66static void print_name(struct seq_file *m, struct lock_class *class)
95{ 67{
96 char str[128]; 68 char str[128];
@@ -124,10 +96,10 @@ static int l_show(struct seq_file *m, void *v)
124#ifdef CONFIG_DEBUG_LOCKDEP 96#ifdef CONFIG_DEBUG_LOCKDEP
125 seq_printf(m, " OPS:%8ld", class->ops); 97 seq_printf(m, " OPS:%8ld", class->ops);
126#endif 98#endif
127 nr_forward_deps = count_forward_deps(class); 99 nr_forward_deps = lockdep_count_forward_deps(class);
128 seq_printf(m, " FD:%5ld", nr_forward_deps); 100 seq_printf(m, " FD:%5ld", nr_forward_deps);
129 101
130 nr_backward_deps = count_backward_deps(class); 102 nr_backward_deps = lockdep_count_backward_deps(class);
131 seq_printf(m, " BD:%5ld", nr_backward_deps); 103 seq_printf(m, " BD:%5ld", nr_backward_deps);
132 104
133 get_usage_chars(class, &c1, &c2, &c3, &c4); 105 get_usage_chars(class, &c1, &c2, &c3, &c4);
@@ -229,6 +201,9 @@ static int lc_show(struct seq_file *m, void *v)
229 201
230 for (i = 0; i < chain->depth; i++) { 202 for (i = 0; i < chain->depth; i++) {
231 class = lock_chain_get_class(chain, i); 203 class = lock_chain_get_class(chain, i);
204 if (!class->key)
205 continue;
206
232 seq_printf(m, "[%p] ", class->key); 207 seq_printf(m, "[%p] ", class->key);
233 print_name(m, class); 208 print_name(m, class);
234 seq_puts(m, "\n"); 209 seq_puts(m, "\n");
@@ -350,7 +325,7 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
350 if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ) 325 if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ)
351 nr_hardirq_read_unsafe++; 326 nr_hardirq_read_unsafe++;
352 327
353 sum_forward_deps += count_forward_deps(class); 328 sum_forward_deps += lockdep_count_forward_deps(class);
354 } 329 }
355#ifdef CONFIG_DEBUG_LOCKDEP 330#ifdef CONFIG_DEBUG_LOCKDEP
356 DEBUG_LOCKS_WARN_ON(debug_atomic_read(&nr_unused_locks) != nr_unused); 331 DEBUG_LOCKS_WARN_ON(debug_atomic_read(&nr_unused_locks) != nr_unused);
diff --git a/kernel/marker.c b/kernel/marker.c
index 1abfb923b761..7d1faecd7a51 100644
--- a/kernel/marker.c
+++ b/kernel/marker.c
@@ -126,6 +126,11 @@ void marker_probe_cb(const struct marker *mdata, void *call_private, ...)
126 struct marker_probe_closure *multi; 126 struct marker_probe_closure *multi;
127 int i; 127 int i;
128 /* 128 /*
129 * Read mdata->ptype before mdata->multi.
130 */
131 smp_rmb();
132 multi = mdata->multi;
133 /*
129 * multi points to an array, therefore accessing the array 134 * multi points to an array, therefore accessing the array
130 * depends on reading multi. However, even in this case, 135 * depends on reading multi. However, even in this case,
131 * we must insure that the pointer is read _before_ the array 136 * we must insure that the pointer is read _before_ the array
@@ -133,7 +138,6 @@ void marker_probe_cb(const struct marker *mdata, void *call_private, ...)
133 * in the fast path, so put the explicit barrier here. 138 * in the fast path, so put the explicit barrier here.
134 */ 139 */
135 smp_read_barrier_depends(); 140 smp_read_barrier_depends();
136 multi = mdata->multi;
137 for (i = 0; multi[i].func; i++) { 141 for (i = 0; multi[i].func; i++) {
138 va_start(args, call_private); 142 va_start(args, call_private);
139 multi[i].func(multi[i].probe_private, call_private, 143 multi[i].func(multi[i].probe_private, call_private,
@@ -175,6 +179,11 @@ void marker_probe_cb_noarg(const struct marker *mdata, void *call_private, ...)
175 struct marker_probe_closure *multi; 179 struct marker_probe_closure *multi;
176 int i; 180 int i;
177 /* 181 /*
182 * Read mdata->ptype before mdata->multi.
183 */
184 smp_rmb();
185 multi = mdata->multi;
186 /*
178 * multi points to an array, therefore accessing the array 187 * multi points to an array, therefore accessing the array
179 * depends on reading multi. However, even in this case, 188 * depends on reading multi. However, even in this case,
180 * we must insure that the pointer is read _before_ the array 189 * we must insure that the pointer is read _before_ the array
@@ -182,7 +191,6 @@ void marker_probe_cb_noarg(const struct marker *mdata, void *call_private, ...)
182 * in the fast path, so put the explicit barrier here. 191 * in the fast path, so put the explicit barrier here.
183 */ 192 */
184 smp_read_barrier_depends(); 193 smp_read_barrier_depends();
185 multi = mdata->multi;
186 for (i = 0; multi[i].func; i++) 194 for (i = 0; multi[i].func; i++)
187 multi[i].func(multi[i].probe_private, call_private, 195 multi[i].func(multi[i].probe_private, call_private,
188 mdata->format, &args); 196 mdata->format, &args);
@@ -441,7 +449,7 @@ static int remove_marker(const char *name)
441 hlist_del(&e->hlist); 449 hlist_del(&e->hlist);
442 /* Make sure the call_rcu has been executed */ 450 /* Make sure the call_rcu has been executed */
443 if (e->rcu_pending) 451 if (e->rcu_pending)
444 rcu_barrier(); 452 rcu_barrier_sched();
445 kfree(e); 453 kfree(e);
446 return 0; 454 return 0;
447} 455}
@@ -476,7 +484,7 @@ static int marker_set_format(struct marker_entry **entry, const char *format)
476 hlist_del(&(*entry)->hlist); 484 hlist_del(&(*entry)->hlist);
477 /* Make sure the call_rcu has been executed */ 485 /* Make sure the call_rcu has been executed */
478 if ((*entry)->rcu_pending) 486 if ((*entry)->rcu_pending)
479 rcu_barrier(); 487 rcu_barrier_sched();
480 kfree(*entry); 488 kfree(*entry);
481 *entry = e; 489 *entry = e;
482 trace_mark(core_marker_format, "name %s format %s", 490 trace_mark(core_marker_format, "name %s format %s",
@@ -655,7 +663,7 @@ int marker_probe_register(const char *name, const char *format,
655 * make sure it's executed now. 663 * make sure it's executed now.
656 */ 664 */
657 if (entry->rcu_pending) 665 if (entry->rcu_pending)
658 rcu_barrier(); 666 rcu_barrier_sched();
659 old = marker_entry_add_probe(entry, probe, probe_private); 667 old = marker_entry_add_probe(entry, probe, probe_private);
660 if (IS_ERR(old)) { 668 if (IS_ERR(old)) {
661 ret = PTR_ERR(old); 669 ret = PTR_ERR(old);
@@ -670,10 +678,7 @@ int marker_probe_register(const char *name, const char *format,
670 entry->rcu_pending = 1; 678 entry->rcu_pending = 1;
671 /* write rcu_pending before calling the RCU callback */ 679 /* write rcu_pending before calling the RCU callback */
672 smp_wmb(); 680 smp_wmb();
673#ifdef CONFIG_PREEMPT_RCU 681 call_rcu_sched(&entry->rcu, free_old_closure);
674 synchronize_sched(); /* Until we have the call_rcu_sched() */
675#endif
676 call_rcu(&entry->rcu, free_old_closure);
677end: 682end:
678 mutex_unlock(&markers_mutex); 683 mutex_unlock(&markers_mutex);
679 return ret; 684 return ret;
@@ -704,7 +709,7 @@ int marker_probe_unregister(const char *name,
704 if (!entry) 709 if (!entry)
705 goto end; 710 goto end;
706 if (entry->rcu_pending) 711 if (entry->rcu_pending)
707 rcu_barrier(); 712 rcu_barrier_sched();
708 old = marker_entry_remove_probe(entry, probe, probe_private); 713 old = marker_entry_remove_probe(entry, probe, probe_private);
709 mutex_unlock(&markers_mutex); 714 mutex_unlock(&markers_mutex);
710 marker_update_probes(); /* may update entry */ 715 marker_update_probes(); /* may update entry */
@@ -716,10 +721,7 @@ int marker_probe_unregister(const char *name,
716 entry->rcu_pending = 1; 721 entry->rcu_pending = 1;
717 /* write rcu_pending before calling the RCU callback */ 722 /* write rcu_pending before calling the RCU callback */
718 smp_wmb(); 723 smp_wmb();
719#ifdef CONFIG_PREEMPT_RCU 724 call_rcu_sched(&entry->rcu, free_old_closure);
720 synchronize_sched(); /* Until we have the call_rcu_sched() */
721#endif
722 call_rcu(&entry->rcu, free_old_closure);
723 remove_marker(name); /* Ignore busy error message */ 725 remove_marker(name); /* Ignore busy error message */
724 ret = 0; 726 ret = 0;
725end: 727end:
@@ -786,7 +788,7 @@ int marker_probe_unregister_private_data(marker_probe_func *probe,
786 goto end; 788 goto end;
787 } 789 }
788 if (entry->rcu_pending) 790 if (entry->rcu_pending)
789 rcu_barrier(); 791 rcu_barrier_sched();
790 old = marker_entry_remove_probe(entry, NULL, probe_private); 792 old = marker_entry_remove_probe(entry, NULL, probe_private);
791 mutex_unlock(&markers_mutex); 793 mutex_unlock(&markers_mutex);
792 marker_update_probes(); /* may update entry */ 794 marker_update_probes(); /* may update entry */
@@ -797,10 +799,7 @@ int marker_probe_unregister_private_data(marker_probe_func *probe,
797 entry->rcu_pending = 1; 799 entry->rcu_pending = 1;
798 /* write rcu_pending before calling the RCU callback */ 800 /* write rcu_pending before calling the RCU callback */
799 smp_wmb(); 801 smp_wmb();
800#ifdef CONFIG_PREEMPT_RCU 802 call_rcu_sched(&entry->rcu, free_old_closure);
801 synchronize_sched(); /* Until we have the call_rcu_sched() */
802#endif
803 call_rcu(&entry->rcu, free_old_closure);
804 remove_marker(entry->name); /* Ignore busy error message */ 803 remove_marker(entry->name); /* Ignore busy error message */
805end: 804end:
806 mutex_unlock(&markers_mutex); 805 mutex_unlock(&markers_mutex);
diff --git a/kernel/module.c b/kernel/module.c
index 5f80478b746d..61d212120df4 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -70,6 +70,9 @@ static DECLARE_WAIT_QUEUE_HEAD(module_wq);
70 70
71static BLOCKING_NOTIFIER_HEAD(module_notify_list); 71static BLOCKING_NOTIFIER_HEAD(module_notify_list);
72 72
73/* Bounds of module allocation, for speeding __module_text_address */
74static unsigned long module_addr_min = -1UL, module_addr_max = 0;
75
73int register_module_notifier(struct notifier_block * nb) 76int register_module_notifier(struct notifier_block * nb)
74{ 77{
75 return blocking_notifier_chain_register(&module_notify_list, nb); 78 return blocking_notifier_chain_register(&module_notify_list, nb);
@@ -134,17 +137,19 @@ extern const struct kernel_symbol __start___ksymtab_gpl[];
134extern const struct kernel_symbol __stop___ksymtab_gpl[]; 137extern const struct kernel_symbol __stop___ksymtab_gpl[];
135extern const struct kernel_symbol __start___ksymtab_gpl_future[]; 138extern const struct kernel_symbol __start___ksymtab_gpl_future[];
136extern const struct kernel_symbol __stop___ksymtab_gpl_future[]; 139extern const struct kernel_symbol __stop___ksymtab_gpl_future[];
137extern const struct kernel_symbol __start___ksymtab_unused[];
138extern const struct kernel_symbol __stop___ksymtab_unused[];
139extern const struct kernel_symbol __start___ksymtab_unused_gpl[];
140extern const struct kernel_symbol __stop___ksymtab_unused_gpl[];
141extern const struct kernel_symbol __start___ksymtab_gpl_future[]; 140extern const struct kernel_symbol __start___ksymtab_gpl_future[];
142extern const struct kernel_symbol __stop___ksymtab_gpl_future[]; 141extern const struct kernel_symbol __stop___ksymtab_gpl_future[];
143extern const unsigned long __start___kcrctab[]; 142extern const unsigned long __start___kcrctab[];
144extern const unsigned long __start___kcrctab_gpl[]; 143extern const unsigned long __start___kcrctab_gpl[];
145extern const unsigned long __start___kcrctab_gpl_future[]; 144extern const unsigned long __start___kcrctab_gpl_future[];
145#ifdef CONFIG_UNUSED_SYMBOLS
146extern const struct kernel_symbol __start___ksymtab_unused[];
147extern const struct kernel_symbol __stop___ksymtab_unused[];
148extern const struct kernel_symbol __start___ksymtab_unused_gpl[];
149extern const struct kernel_symbol __stop___ksymtab_unused_gpl[];
146extern const unsigned long __start___kcrctab_unused[]; 150extern const unsigned long __start___kcrctab_unused[];
147extern const unsigned long __start___kcrctab_unused_gpl[]; 151extern const unsigned long __start___kcrctab_unused_gpl[];
152#endif
148 153
149#ifndef CONFIG_MODVERSIONS 154#ifndef CONFIG_MODVERSIONS
150#define symversion(base, idx) NULL 155#define symversion(base, idx) NULL
@@ -152,152 +157,170 @@ extern const unsigned long __start___kcrctab_unused_gpl[];
152#define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL) 157#define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL)
153#endif 158#endif
154 159
155/* lookup symbol in given range of kernel_symbols */
156static const struct kernel_symbol *lookup_symbol(const char *name,
157 const struct kernel_symbol *start,
158 const struct kernel_symbol *stop)
159{
160 const struct kernel_symbol *ks = start;
161 for (; ks < stop; ks++)
162 if (strcmp(ks->name, name) == 0)
163 return ks;
164 return NULL;
165}
166
167static bool always_ok(bool gplok, bool warn, const char *name)
168{
169 return true;
170}
171
172static bool printk_unused_warning(bool gplok, bool warn, const char *name)
173{
174 if (warn) {
175 printk(KERN_WARNING "Symbol %s is marked as UNUSED, "
176 "however this module is using it.\n", name);
177 printk(KERN_WARNING
178 "This symbol will go away in the future.\n");
179 printk(KERN_WARNING
180 "Please evalute if this is the right api to use and if "
181 "it really is, submit a report the linux kernel "
182 "mailinglist together with submitting your code for "
183 "inclusion.\n");
184 }
185 return true;
186}
187
188static bool gpl_only_unused_warning(bool gplok, bool warn, const char *name)
189{
190 if (!gplok)
191 return false;
192 return printk_unused_warning(gplok, warn, name);
193}
194
195static bool gpl_only(bool gplok, bool warn, const char *name)
196{
197 return gplok;
198}
199
200static bool warn_if_not_gpl(bool gplok, bool warn, const char *name)
201{
202 if (!gplok && warn) {
203 printk(KERN_WARNING "Symbol %s is being used "
204 "by a non-GPL module, which will not "
205 "be allowed in the future\n", name);
206 printk(KERN_WARNING "Please see the file "
207 "Documentation/feature-removal-schedule.txt "
208 "in the kernel source tree for more details.\n");
209 }
210 return true;
211}
212
213struct symsearch { 160struct symsearch {
214 const struct kernel_symbol *start, *stop; 161 const struct kernel_symbol *start, *stop;
215 const unsigned long *crcs; 162 const unsigned long *crcs;
216 bool (*check)(bool gplok, bool warn, const char *name); 163 enum {
164 NOT_GPL_ONLY,
165 GPL_ONLY,
166 WILL_BE_GPL_ONLY,
167 } licence;
168 bool unused;
217}; 169};
218 170
219/* Look through this array of symbol tables for a symbol match which 171static bool each_symbol_in_section(const struct symsearch *arr,
220 * passes the check function. */ 172 unsigned int arrsize,
221static const struct kernel_symbol *search_symarrays(const struct symsearch *arr, 173 struct module *owner,
222 unsigned int num, 174 bool (*fn)(const struct symsearch *syms,
223 const char *name, 175 struct module *owner,
224 bool gplok, 176 unsigned int symnum, void *data),
225 bool warn, 177 void *data)
226 const unsigned long **crc)
227{ 178{
228 unsigned int i; 179 unsigned int i, j;
229 const struct kernel_symbol *ks;
230
231 for (i = 0; i < num; i++) {
232 ks = lookup_symbol(name, arr[i].start, arr[i].stop);
233 if (!ks || !arr[i].check(gplok, warn, name))
234 continue;
235 180
236 if (crc) 181 for (j = 0; j < arrsize; j++) {
237 *crc = symversion(arr[i].crcs, ks - arr[i].start); 182 for (i = 0; i < arr[j].stop - arr[j].start; i++)
238 return ks; 183 if (fn(&arr[j], owner, i, data))
184 return true;
239 } 185 }
240 return NULL; 186
187 return false;
241} 188}
242 189
243/* Find a symbol, return value, (optional) crc and (optional) module 190/* Returns true as soon as fn returns true, otherwise false. */
244 * which owns it */ 191static bool each_symbol(bool (*fn)(const struct symsearch *arr,
245static unsigned long find_symbol(const char *name, 192 struct module *owner,
246 struct module **owner, 193 unsigned int symnum, void *data),
247 const unsigned long **crc, 194 void *data)
248 bool gplok,
249 bool warn)
250{ 195{
251 struct module *mod; 196 struct module *mod;
252 const struct kernel_symbol *ks;
253 const struct symsearch arr[] = { 197 const struct symsearch arr[] = {
254 { __start___ksymtab, __stop___ksymtab, __start___kcrctab, 198 { __start___ksymtab, __stop___ksymtab, __start___kcrctab,
255 always_ok }, 199 NOT_GPL_ONLY, false },
256 { __start___ksymtab_gpl, __stop___ksymtab_gpl, 200 { __start___ksymtab_gpl, __stop___ksymtab_gpl,
257 __start___kcrctab_gpl, gpl_only }, 201 __start___kcrctab_gpl,
202 GPL_ONLY, false },
258 { __start___ksymtab_gpl_future, __stop___ksymtab_gpl_future, 203 { __start___ksymtab_gpl_future, __stop___ksymtab_gpl_future,
259 __start___kcrctab_gpl_future, warn_if_not_gpl }, 204 __start___kcrctab_gpl_future,
205 WILL_BE_GPL_ONLY, false },
206#ifdef CONFIG_UNUSED_SYMBOLS
260 { __start___ksymtab_unused, __stop___ksymtab_unused, 207 { __start___ksymtab_unused, __stop___ksymtab_unused,
261 __start___kcrctab_unused, printk_unused_warning }, 208 __start___kcrctab_unused,
209 NOT_GPL_ONLY, true },
262 { __start___ksymtab_unused_gpl, __stop___ksymtab_unused_gpl, 210 { __start___ksymtab_unused_gpl, __stop___ksymtab_unused_gpl,
263 __start___kcrctab_unused_gpl, gpl_only_unused_warning }, 211 __start___kcrctab_unused_gpl,
212 GPL_ONLY, true },
213#endif
264 }; 214 };
265 215
266 /* Core kernel first. */ 216 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data))
267 ks = search_symarrays(arr, ARRAY_SIZE(arr), name, gplok, warn, crc); 217 return true;
268 if (ks) {
269 if (owner)
270 *owner = NULL;
271 return ks->value;
272 }
273 218
274 /* Now try modules. */
275 list_for_each_entry(mod, &modules, list) { 219 list_for_each_entry(mod, &modules, list) {
276 struct symsearch arr[] = { 220 struct symsearch arr[] = {
277 { mod->syms, mod->syms + mod->num_syms, mod->crcs, 221 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
278 always_ok }, 222 NOT_GPL_ONLY, false },
279 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms, 223 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
280 mod->gpl_crcs, gpl_only }, 224 mod->gpl_crcs,
225 GPL_ONLY, false },
281 { mod->gpl_future_syms, 226 { mod->gpl_future_syms,
282 mod->gpl_future_syms + mod->num_gpl_future_syms, 227 mod->gpl_future_syms + mod->num_gpl_future_syms,
283 mod->gpl_future_crcs, warn_if_not_gpl }, 228 mod->gpl_future_crcs,
229 WILL_BE_GPL_ONLY, false },
230#ifdef CONFIG_UNUSED_SYMBOLS
284 { mod->unused_syms, 231 { mod->unused_syms,
285 mod->unused_syms + mod->num_unused_syms, 232 mod->unused_syms + mod->num_unused_syms,
286 mod->unused_crcs, printk_unused_warning }, 233 mod->unused_crcs,
234 NOT_GPL_ONLY, true },
287 { mod->unused_gpl_syms, 235 { mod->unused_gpl_syms,
288 mod->unused_gpl_syms + mod->num_unused_gpl_syms, 236 mod->unused_gpl_syms + mod->num_unused_gpl_syms,
289 mod->unused_gpl_crcs, gpl_only_unused_warning }, 237 mod->unused_gpl_crcs,
238 GPL_ONLY, true },
239#endif
290 }; 240 };
291 241
292 ks = search_symarrays(arr, ARRAY_SIZE(arr), 242 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
293 name, gplok, warn, crc); 243 return true;
294 if (ks) { 244 }
295 if (owner) 245 return false;
296 *owner = mod; 246}
297 return ks->value; 247
248struct find_symbol_arg {
249 /* Input */
250 const char *name;
251 bool gplok;
252 bool warn;
253
254 /* Output */
255 struct module *owner;
256 const unsigned long *crc;
257 unsigned long value;
258};
259
260static bool find_symbol_in_section(const struct symsearch *syms,
261 struct module *owner,
262 unsigned int symnum, void *data)
263{
264 struct find_symbol_arg *fsa = data;
265
266 if (strcmp(syms->start[symnum].name, fsa->name) != 0)
267 return false;
268
269 if (!fsa->gplok) {
270 if (syms->licence == GPL_ONLY)
271 return false;
272 if (syms->licence == WILL_BE_GPL_ONLY && fsa->warn) {
273 printk(KERN_WARNING "Symbol %s is being used "
274 "by a non-GPL module, which will not "
275 "be allowed in the future\n", fsa->name);
276 printk(KERN_WARNING "Please see the file "
277 "Documentation/feature-removal-schedule.txt "
278 "in the kernel source tree for more details.\n");
298 } 279 }
299 } 280 }
300 281
282#ifdef CONFIG_UNUSED_SYMBOLS
283 if (syms->unused && fsa->warn) {
284 printk(KERN_WARNING "Symbol %s is marked as UNUSED, "
285 "however this module is using it.\n", fsa->name);
286 printk(KERN_WARNING
287 "This symbol will go away in the future.\n");
288 printk(KERN_WARNING
289 "Please evalute if this is the right api to use and if "
290 "it really is, submit a report the linux kernel "
291 "mailinglist together with submitting your code for "
292 "inclusion.\n");
293 }
294#endif
295
296 fsa->owner = owner;
297 fsa->crc = symversion(syms->crcs, symnum);
298 fsa->value = syms->start[symnum].value;
299 return true;
300}
301
302/* Find a symbol, return value, (optional) crc and (optional) module
303 * which owns it */
304static unsigned long find_symbol(const char *name,
305 struct module **owner,
306 const unsigned long **crc,
307 bool gplok,
308 bool warn)
309{
310 struct find_symbol_arg fsa;
311
312 fsa.name = name;
313 fsa.gplok = gplok;
314 fsa.warn = warn;
315
316 if (each_symbol(find_symbol_in_section, &fsa)) {
317 if (owner)
318 *owner = fsa.owner;
319 if (crc)
320 *crc = fsa.crc;
321 return fsa.value;
322 }
323
301 DEBUGP("Failed to find symbol %s\n", name); 324 DEBUGP("Failed to find symbol %s\n", name);
302 return -ENOENT; 325 return -ENOENT;
303} 326}
@@ -639,8 +662,8 @@ static int __try_stop_module(void *_sref)
639{ 662{
640 struct stopref *sref = _sref; 663 struct stopref *sref = _sref;
641 664
642 /* If it's not unused, quit unless we are told to block. */ 665 /* If it's not unused, quit unless we're forcing. */
643 if ((sref->flags & O_NONBLOCK) && module_refcount(sref->mod) != 0) { 666 if (module_refcount(sref->mod) != 0) {
644 if (!(*sref->forced = try_force_unload(sref->flags))) 667 if (!(*sref->forced = try_force_unload(sref->flags)))
645 return -EWOULDBLOCK; 668 return -EWOULDBLOCK;
646 } 669 }
@@ -652,9 +675,16 @@ static int __try_stop_module(void *_sref)
652 675
653static int try_stop_module(struct module *mod, int flags, int *forced) 676static int try_stop_module(struct module *mod, int flags, int *forced)
654{ 677{
655 struct stopref sref = { mod, flags, forced }; 678 if (flags & O_NONBLOCK) {
679 struct stopref sref = { mod, flags, forced };
656 680
657 return stop_machine_run(__try_stop_module, &sref, NR_CPUS); 681 return stop_machine(__try_stop_module, &sref, NULL);
682 } else {
683 /* We don't need to stop the machine for this. */
684 mod->state = MODULE_STATE_GOING;
685 synchronize_sched();
686 return 0;
687 }
658} 688}
659 689
660unsigned int module_refcount(struct module *mod) 690unsigned int module_refcount(struct module *mod)
@@ -1386,7 +1416,7 @@ static int __unlink_module(void *_mod)
1386static void free_module(struct module *mod) 1416static void free_module(struct module *mod)
1387{ 1417{
1388 /* Delete from various lists */ 1418 /* Delete from various lists */
1389 stop_machine_run(__unlink_module, mod, NR_CPUS); 1419 stop_machine(__unlink_module, mod, NULL);
1390 remove_notes_attrs(mod); 1420 remove_notes_attrs(mod);
1391 remove_sect_attrs(mod); 1421 remove_sect_attrs(mod);
1392 mod_kobject_remove(mod); 1422 mod_kobject_remove(mod);
@@ -1445,8 +1475,10 @@ static int verify_export_symbols(struct module *mod)
1445 { mod->syms, mod->num_syms }, 1475 { mod->syms, mod->num_syms },
1446 { mod->gpl_syms, mod->num_gpl_syms }, 1476 { mod->gpl_syms, mod->num_gpl_syms },
1447 { mod->gpl_future_syms, mod->num_gpl_future_syms }, 1477 { mod->gpl_future_syms, mod->num_gpl_future_syms },
1478#ifdef CONFIG_UNUSED_SYMBOLS
1448 { mod->unused_syms, mod->num_unused_syms }, 1479 { mod->unused_syms, mod->num_unused_syms },
1449 { mod->unused_gpl_syms, mod->num_unused_gpl_syms }, 1480 { mod->unused_gpl_syms, mod->num_unused_gpl_syms },
1481#endif
1450 }; 1482 };
1451 1483
1452 for (i = 0; i < ARRAY_SIZE(arr); i++) { 1484 for (i = 0; i < ARRAY_SIZE(arr); i++) {
@@ -1526,7 +1558,7 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
1526} 1558}
1527 1559
1528/* Update size with this section: return offset. */ 1560/* Update size with this section: return offset. */
1529static long get_offset(unsigned long *size, Elf_Shdr *sechdr) 1561static long get_offset(unsigned int *size, Elf_Shdr *sechdr)
1530{ 1562{
1531 long ret; 1563 long ret;
1532 1564
@@ -1659,6 +1691,19 @@ static void setup_modinfo(struct module *mod, Elf_Shdr *sechdrs,
1659} 1691}
1660 1692
1661#ifdef CONFIG_KALLSYMS 1693#ifdef CONFIG_KALLSYMS
1694
1695/* lookup symbol in given range of kernel_symbols */
1696static const struct kernel_symbol *lookup_symbol(const char *name,
1697 const struct kernel_symbol *start,
1698 const struct kernel_symbol *stop)
1699{
1700 const struct kernel_symbol *ks = start;
1701 for (; ks < stop; ks++)
1702 if (strcmp(ks->name, name) == 0)
1703 return ks;
1704 return NULL;
1705}
1706
1662static int is_exported(const char *name, const struct module *mod) 1707static int is_exported(const char *name, const struct module *mod)
1663{ 1708{
1664 if (!mod && lookup_symbol(name, __start___ksymtab, __stop___ksymtab)) 1709 if (!mod && lookup_symbol(name, __start___ksymtab, __stop___ksymtab))
@@ -1738,6 +1783,20 @@ static inline void add_kallsyms(struct module *mod,
1738} 1783}
1739#endif /* CONFIG_KALLSYMS */ 1784#endif /* CONFIG_KALLSYMS */
1740 1785
1786static void *module_alloc_update_bounds(unsigned long size)
1787{
1788 void *ret = module_alloc(size);
1789
1790 if (ret) {
1791 /* Update module bounds. */
1792 if ((unsigned long)ret < module_addr_min)
1793 module_addr_min = (unsigned long)ret;
1794 if ((unsigned long)ret + size > module_addr_max)
1795 module_addr_max = (unsigned long)ret + size;
1796 }
1797 return ret;
1798}
1799
1741/* Allocate and load the module: note that size of section 0 is always 1800/* Allocate and load the module: note that size of section 0 is always
1742 zero, and we rely on this for optional sections. */ 1801 zero, and we rely on this for optional sections. */
1743static struct module *load_module(void __user *umod, 1802static struct module *load_module(void __user *umod,
@@ -1764,10 +1823,12 @@ static struct module *load_module(void __user *umod,
1764 unsigned int gplfutureindex; 1823 unsigned int gplfutureindex;
1765 unsigned int gplfuturecrcindex; 1824 unsigned int gplfuturecrcindex;
1766 unsigned int unwindex = 0; 1825 unsigned int unwindex = 0;
1826#ifdef CONFIG_UNUSED_SYMBOLS
1767 unsigned int unusedindex; 1827 unsigned int unusedindex;
1768 unsigned int unusedcrcindex; 1828 unsigned int unusedcrcindex;
1769 unsigned int unusedgplindex; 1829 unsigned int unusedgplindex;
1770 unsigned int unusedgplcrcindex; 1830 unsigned int unusedgplcrcindex;
1831#endif
1771 unsigned int markersindex; 1832 unsigned int markersindex;
1772 unsigned int markersstringsindex; 1833 unsigned int markersstringsindex;
1773 struct module *mod; 1834 struct module *mod;
@@ -1850,13 +1911,15 @@ static struct module *load_module(void __user *umod,
1850 exportindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab"); 1911 exportindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab");
1851 gplindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab_gpl"); 1912 gplindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab_gpl");
1852 gplfutureindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab_gpl_future"); 1913 gplfutureindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab_gpl_future");
1853 unusedindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab_unused");
1854 unusedgplindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab_unused_gpl");
1855 crcindex = find_sec(hdr, sechdrs, secstrings, "__kcrctab"); 1914 crcindex = find_sec(hdr, sechdrs, secstrings, "__kcrctab");
1856 gplcrcindex = find_sec(hdr, sechdrs, secstrings, "__kcrctab_gpl"); 1915 gplcrcindex = find_sec(hdr, sechdrs, secstrings, "__kcrctab_gpl");
1857 gplfuturecrcindex = find_sec(hdr, sechdrs, secstrings, "__kcrctab_gpl_future"); 1916 gplfuturecrcindex = find_sec(hdr, sechdrs, secstrings, "__kcrctab_gpl_future");
1917#ifdef CONFIG_UNUSED_SYMBOLS
1918 unusedindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab_unused");
1919 unusedgplindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab_unused_gpl");
1858 unusedcrcindex = find_sec(hdr, sechdrs, secstrings, "__kcrctab_unused"); 1920 unusedcrcindex = find_sec(hdr, sechdrs, secstrings, "__kcrctab_unused");
1859 unusedgplcrcindex = find_sec(hdr, sechdrs, secstrings, "__kcrctab_unused_gpl"); 1921 unusedgplcrcindex = find_sec(hdr, sechdrs, secstrings, "__kcrctab_unused_gpl");
1922#endif
1860 setupindex = find_sec(hdr, sechdrs, secstrings, "__param"); 1923 setupindex = find_sec(hdr, sechdrs, secstrings, "__param");
1861 exindex = find_sec(hdr, sechdrs, secstrings, "__ex_table"); 1924 exindex = find_sec(hdr, sechdrs, secstrings, "__ex_table");
1862 obsparmindex = find_sec(hdr, sechdrs, secstrings, "__obsparm"); 1925 obsparmindex = find_sec(hdr, sechdrs, secstrings, "__obsparm");
@@ -1935,7 +1998,7 @@ static struct module *load_module(void __user *umod,
1935 layout_sections(mod, hdr, sechdrs, secstrings); 1998 layout_sections(mod, hdr, sechdrs, secstrings);
1936 1999
1937 /* Do the allocs. */ 2000 /* Do the allocs. */
1938 ptr = module_alloc(mod->core_size); 2001 ptr = module_alloc_update_bounds(mod->core_size);
1939 if (!ptr) { 2002 if (!ptr) {
1940 err = -ENOMEM; 2003 err = -ENOMEM;
1941 goto free_percpu; 2004 goto free_percpu;
@@ -1943,7 +2006,7 @@ static struct module *load_module(void __user *umod,
1943 memset(ptr, 0, mod->core_size); 2006 memset(ptr, 0, mod->core_size);
1944 mod->module_core = ptr; 2007 mod->module_core = ptr;
1945 2008
1946 ptr = module_alloc(mod->init_size); 2009 ptr = module_alloc_update_bounds(mod->init_size);
1947 if (!ptr && mod->init_size) { 2010 if (!ptr && mod->init_size) {
1948 err = -ENOMEM; 2011 err = -ENOMEM;
1949 goto free_core; 2012 goto free_core;
@@ -2018,14 +2081,15 @@ static struct module *load_module(void __user *umod,
2018 mod->gpl_crcs = (void *)sechdrs[gplcrcindex].sh_addr; 2081 mod->gpl_crcs = (void *)sechdrs[gplcrcindex].sh_addr;
2019 mod->num_gpl_future_syms = sechdrs[gplfutureindex].sh_size / 2082 mod->num_gpl_future_syms = sechdrs[gplfutureindex].sh_size /
2020 sizeof(*mod->gpl_future_syms); 2083 sizeof(*mod->gpl_future_syms);
2021 mod->num_unused_syms = sechdrs[unusedindex].sh_size /
2022 sizeof(*mod->unused_syms);
2023 mod->num_unused_gpl_syms = sechdrs[unusedgplindex].sh_size /
2024 sizeof(*mod->unused_gpl_syms);
2025 mod->gpl_future_syms = (void *)sechdrs[gplfutureindex].sh_addr; 2084 mod->gpl_future_syms = (void *)sechdrs[gplfutureindex].sh_addr;
2026 if (gplfuturecrcindex) 2085 if (gplfuturecrcindex)
2027 mod->gpl_future_crcs = (void *)sechdrs[gplfuturecrcindex].sh_addr; 2086 mod->gpl_future_crcs = (void *)sechdrs[gplfuturecrcindex].sh_addr;
2028 2087
2088#ifdef CONFIG_UNUSED_SYMBOLS
2089 mod->num_unused_syms = sechdrs[unusedindex].sh_size /
2090 sizeof(*mod->unused_syms);
2091 mod->num_unused_gpl_syms = sechdrs[unusedgplindex].sh_size /
2092 sizeof(*mod->unused_gpl_syms);
2029 mod->unused_syms = (void *)sechdrs[unusedindex].sh_addr; 2093 mod->unused_syms = (void *)sechdrs[unusedindex].sh_addr;
2030 if (unusedcrcindex) 2094 if (unusedcrcindex)
2031 mod->unused_crcs = (void *)sechdrs[unusedcrcindex].sh_addr; 2095 mod->unused_crcs = (void *)sechdrs[unusedcrcindex].sh_addr;
@@ -2033,13 +2097,17 @@ static struct module *load_module(void __user *umod,
2033 if (unusedgplcrcindex) 2097 if (unusedgplcrcindex)
2034 mod->unused_gpl_crcs 2098 mod->unused_gpl_crcs
2035 = (void *)sechdrs[unusedgplcrcindex].sh_addr; 2099 = (void *)sechdrs[unusedgplcrcindex].sh_addr;
2100#endif
2036 2101
2037#ifdef CONFIG_MODVERSIONS 2102#ifdef CONFIG_MODVERSIONS
2038 if ((mod->num_syms && !crcindex) || 2103 if ((mod->num_syms && !crcindex)
2039 (mod->num_gpl_syms && !gplcrcindex) || 2104 || (mod->num_gpl_syms && !gplcrcindex)
2040 (mod->num_gpl_future_syms && !gplfuturecrcindex) || 2105 || (mod->num_gpl_future_syms && !gplfuturecrcindex)
2041 (mod->num_unused_syms && !unusedcrcindex) || 2106#ifdef CONFIG_UNUSED_SYMBOLS
2042 (mod->num_unused_gpl_syms && !unusedgplcrcindex)) { 2107 || (mod->num_unused_syms && !unusedcrcindex)
2108 || (mod->num_unused_gpl_syms && !unusedgplcrcindex)
2109#endif
2110 ) {
2043 printk(KERN_WARNING "%s: No versions for exported symbols.\n", mod->name); 2111 printk(KERN_WARNING "%s: No versions for exported symbols.\n", mod->name);
2044 err = try_to_force_load(mod, "nocrc"); 2112 err = try_to_force_load(mod, "nocrc");
2045 if (err) 2113 if (err)
@@ -2129,7 +2197,7 @@ static struct module *load_module(void __user *umod,
2129 /* Now sew it into the lists so we can get lockdep and oops 2197 /* Now sew it into the lists so we can get lockdep and oops
2130 * info during argument parsing. Noone should access us, since 2198 * info during argument parsing. Noone should access us, since
2131 * strong_try_module_get() will fail. */ 2199 * strong_try_module_get() will fail. */
2132 stop_machine_run(__link_module, mod, NR_CPUS); 2200 stop_machine(__link_module, mod, NULL);
2133 2201
2134 /* Size of section 0 is 0, so this works well if no params */ 2202 /* Size of section 0 is 0, so this works well if no params */
2135 err = parse_args(mod->name, mod->args, 2203 err = parse_args(mod->name, mod->args,
@@ -2163,7 +2231,7 @@ static struct module *load_module(void __user *umod,
2163 return mod; 2231 return mod;
2164 2232
2165 unlink: 2233 unlink:
2166 stop_machine_run(__unlink_module, mod, NR_CPUS); 2234 stop_machine(__unlink_module, mod, NULL);
2167 module_arch_cleanup(mod); 2235 module_arch_cleanup(mod);
2168 cleanup: 2236 cleanup:
2169 kobject_del(&mod->mkobj.kobj); 2237 kobject_del(&mod->mkobj.kobj);
@@ -2512,7 +2580,7 @@ static int m_show(struct seq_file *m, void *p)
2512 struct module *mod = list_entry(p, struct module, list); 2580 struct module *mod = list_entry(p, struct module, list);
2513 char buf[8]; 2581 char buf[8];
2514 2582
2515 seq_printf(m, "%s %lu", 2583 seq_printf(m, "%s %u",
2516 mod->name, mod->init_size + mod->core_size); 2584 mod->name, mod->init_size + mod->core_size);
2517 print_unload_info(m, mod); 2585 print_unload_info(m, mod);
2518 2586
@@ -2595,6 +2663,9 @@ struct module *__module_text_address(unsigned long addr)
2595{ 2663{
2596 struct module *mod; 2664 struct module *mod;
2597 2665
2666 if (addr < module_addr_min || addr > module_addr_max)
2667 return NULL;
2668
2598 list_for_each_entry(mod, &modules, list) 2669 list_for_each_entry(mod, &modules, list)
2599 if (within(addr, mod->module_init, mod->init_text_size) 2670 if (within(addr, mod->module_init, mod->init_text_size)
2600 || within(addr, mod->module_core, mod->core_text_size)) 2671 || within(addr, mod->module_core, mod->core_text_size))
diff --git a/kernel/mutex.c b/kernel/mutex.c
index bcdc9ac8ef60..12c779dc65d4 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -34,6 +34,7 @@
34/*** 34/***
35 * mutex_init - initialize the mutex 35 * mutex_init - initialize the mutex
36 * @lock: the mutex to be initialized 36 * @lock: the mutex to be initialized
37 * @key: the lock_class_key for the class; used by mutex lock debugging
37 * 38 *
38 * Initialize the mutex to unlocked state. 39 * Initialize the mutex to unlocked state.
39 * 40 *
diff --git a/kernel/ns_cgroup.c b/kernel/ns_cgroup.c
index 48d7ed6fc3a4..43c2111cd54d 100644
--- a/kernel/ns_cgroup.c
+++ b/kernel/ns_cgroup.c
@@ -7,6 +7,7 @@
7#include <linux/module.h> 7#include <linux/module.h>
8#include <linux/cgroup.h> 8#include <linux/cgroup.h>
9#include <linux/fs.h> 9#include <linux/fs.h>
10#include <linux/proc_fs.h>
10#include <linux/slab.h> 11#include <linux/slab.h>
11#include <linux/nsproxy.h> 12#include <linux/nsproxy.h>
12 13
@@ -24,9 +25,12 @@ static inline struct ns_cgroup *cgroup_to_ns(
24 struct ns_cgroup, css); 25 struct ns_cgroup, css);
25} 26}
26 27
27int ns_cgroup_clone(struct task_struct *task) 28int ns_cgroup_clone(struct task_struct *task, struct pid *pid)
28{ 29{
29 return cgroup_clone(task, &ns_subsys); 30 char name[PROC_NUMBUF];
31
32 snprintf(name, PROC_NUMBUF, "%d", pid_vnr(pid));
33 return cgroup_clone(task, &ns_subsys, name);
30} 34}
31 35
32/* 36/*
diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c
index adc785146a1c..21575fc46d05 100644
--- a/kernel/nsproxy.c
+++ b/kernel/nsproxy.c
@@ -157,12 +157,6 @@ int copy_namespaces(unsigned long flags, struct task_struct *tsk)
157 goto out; 157 goto out;
158 } 158 }
159 159
160 err = ns_cgroup_clone(tsk);
161 if (err) {
162 put_nsproxy(new_ns);
163 goto out;
164 }
165
166 tsk->nsproxy = new_ns; 160 tsk->nsproxy = new_ns;
167 161
168out: 162out:
@@ -209,7 +203,7 @@ int unshare_nsproxy_namespaces(unsigned long unshare_flags,
209 goto out; 203 goto out;
210 } 204 }
211 205
212 err = ns_cgroup_clone(current); 206 err = ns_cgroup_clone(current, task_pid(current));
213 if (err) 207 if (err)
214 put_nsproxy(*new_nsp); 208 put_nsproxy(*new_nsp);
215 209
diff --git a/kernel/panic.c b/kernel/panic.c
index 425567f45b9f..12c5a0a6c89b 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -318,6 +318,28 @@ void warn_on_slowpath(const char *file, int line)
318 add_taint(TAINT_WARN); 318 add_taint(TAINT_WARN);
319} 319}
320EXPORT_SYMBOL(warn_on_slowpath); 320EXPORT_SYMBOL(warn_on_slowpath);
321
322
323void warn_slowpath(const char *file, int line, const char *fmt, ...)
324{
325 va_list args;
326 char function[KSYM_SYMBOL_LEN];
327 unsigned long caller = (unsigned long)__builtin_return_address(0);
328 sprint_symbol(function, caller);
329
330 printk(KERN_WARNING "------------[ cut here ]------------\n");
331 printk(KERN_WARNING "WARNING: at %s:%d %s()\n", file,
332 line, function);
333 va_start(args, fmt);
334 vprintk(fmt, args);
335 va_end(args);
336
337 print_modules();
338 dump_stack();
339 print_oops_end_marker();
340 add_taint(TAINT_WARN);
341}
342EXPORT_SYMBOL(warn_slowpath);
321#endif 343#endif
322 344
323#ifdef CONFIG_CC_STACKPROTECTOR 345#ifdef CONFIG_CC_STACKPROTECTOR
diff --git a/kernel/pid.c b/kernel/pid.c
index 30bd5d4b2ac7..064e76afa507 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -309,12 +309,6 @@ struct pid *find_vpid(int nr)
309} 309}
310EXPORT_SYMBOL_GPL(find_vpid); 310EXPORT_SYMBOL_GPL(find_vpid);
311 311
312struct pid *find_pid(int nr)
313{
314 return find_pid_ns(nr, &init_pid_ns);
315}
316EXPORT_SYMBOL_GPL(find_pid);
317
318/* 312/*
319 * attach_pid() must be called with the tasklist_lock write-held. 313 * attach_pid() must be called with the tasklist_lock write-held.
320 */ 314 */
@@ -435,6 +429,7 @@ struct pid *find_get_pid(pid_t nr)
435 429
436 return pid; 430 return pid;
437} 431}
432EXPORT_SYMBOL_GPL(find_get_pid);
438 433
439pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns) 434pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
440{ 435{
@@ -482,7 +477,7 @@ EXPORT_SYMBOL(task_session_nr_ns);
482/* 477/*
483 * Used by proc to find the first pid that is greater then or equal to nr. 478 * Used by proc to find the first pid that is greater then or equal to nr.
484 * 479 *
485 * If there is a pid at nr this function is exactly the same as find_pid. 480 * If there is a pid at nr this function is exactly the same as find_pid_ns.
486 */ 481 */
487struct pid *find_ge_pid(int nr, struct pid_namespace *ns) 482struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
488{ 483{
@@ -497,7 +492,6 @@ struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
497 492
498 return pid; 493 return pid;
499} 494}
500EXPORT_SYMBOL_GPL(find_get_pid);
501 495
502/* 496/*
503 * The pid hash table is scaled according to the amount of memory in the 497 * The pid hash table is scaled according to the amount of memory in the
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index 98702b4b8851..ea567b78d1aa 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -12,6 +12,7 @@
12#include <linux/pid_namespace.h> 12#include <linux/pid_namespace.h>
13#include <linux/syscalls.h> 13#include <linux/syscalls.h>
14#include <linux/err.h> 14#include <linux/err.h>
15#include <linux/acct.h>
15 16
16#define BITS_PER_PAGE (PAGE_SIZE*8) 17#define BITS_PER_PAGE (PAGE_SIZE*8)
17 18
@@ -71,7 +72,7 @@ static struct pid_namespace *create_pid_namespace(unsigned int level)
71 struct pid_namespace *ns; 72 struct pid_namespace *ns;
72 int i; 73 int i;
73 74
74 ns = kmem_cache_alloc(pid_ns_cachep, GFP_KERNEL); 75 ns = kmem_cache_zalloc(pid_ns_cachep, GFP_KERNEL);
75 if (ns == NULL) 76 if (ns == NULL)
76 goto out; 77 goto out;
77 78
@@ -84,17 +85,13 @@ static struct pid_namespace *create_pid_namespace(unsigned int level)
84 goto out_free_map; 85 goto out_free_map;
85 86
86 kref_init(&ns->kref); 87 kref_init(&ns->kref);
87 ns->last_pid = 0;
88 ns->child_reaper = NULL;
89 ns->level = level; 88 ns->level = level;
90 89
91 set_bit(0, ns->pidmap[0].page); 90 set_bit(0, ns->pidmap[0].page);
92 atomic_set(&ns->pidmap[0].nr_free, BITS_PER_PAGE - 1); 91 atomic_set(&ns->pidmap[0].nr_free, BITS_PER_PAGE - 1);
93 92
94 for (i = 1; i < PIDMAP_ENTRIES; i++) { 93 for (i = 1; i < PIDMAP_ENTRIES; i++)
95 ns->pidmap[i].page = NULL;
96 atomic_set(&ns->pidmap[i].nr_free, BITS_PER_PAGE); 94 atomic_set(&ns->pidmap[i].nr_free, BITS_PER_PAGE);
97 }
98 95
99 return ns; 96 return ns;
100 97
@@ -185,6 +182,7 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
185 182
186 /* Child reaper for the pid namespace is going away */ 183 /* Child reaper for the pid namespace is going away */
187 pid_ns->child_reaper = NULL; 184 pid_ns->child_reaper = NULL;
185 acct_exit_ns(pid_ns);
188 return; 186 return;
189} 187}
190 188
diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
index 8cb757026386..da9c2dda6a4e 100644
--- a/kernel/pm_qos_params.c
+++ b/kernel/pm_qos_params.c
@@ -24,7 +24,7 @@
24 * requirement that the application has is cleaned up when closes the file 24 * requirement that the application has is cleaned up when closes the file
25 * pointer or exits the pm_qos_object will get an opportunity to clean up. 25 * pointer or exits the pm_qos_object will get an opportunity to clean up.
26 * 26 *
27 * mark gross mgross@linux.intel.com 27 * Mark Gross <mgross@linux.intel.com>
28 */ 28 */
29 29
30#include <linux/pm_qos_params.h> 30#include <linux/pm_qos_params.h>
@@ -211,8 +211,8 @@ EXPORT_SYMBOL_GPL(pm_qos_requirement);
211 * @value: defines the qos request 211 * @value: defines the qos request
212 * 212 *
213 * This function inserts a new entry in the pm_qos_class list of requested qos 213 * This function inserts a new entry in the pm_qos_class list of requested qos
214 * performance charactoistics. It recomputes the agregate QoS expectations for 214 * performance characteristics. It recomputes the aggregate QoS expectations
215 * the pm_qos_class of parrameters. 215 * for the pm_qos_class of parameters.
216 */ 216 */
217int pm_qos_add_requirement(int pm_qos_class, char *name, s32 value) 217int pm_qos_add_requirement(int pm_qos_class, char *name, s32 value)
218{ 218{
@@ -250,10 +250,10 @@ EXPORT_SYMBOL_GPL(pm_qos_add_requirement);
250 * @name: identifies the request 250 * @name: identifies the request
251 * @value: defines the qos request 251 * @value: defines the qos request
252 * 252 *
253 * Updates an existing qos requierement for the pm_qos_class of parameters along 253 * Updates an existing qos requirement for the pm_qos_class of parameters along
254 * with updating the target pm_qos_class value. 254 * with updating the target pm_qos_class value.
255 * 255 *
256 * If the named request isn't in the lest then no change is made. 256 * If the named request isn't in the list then no change is made.
257 */ 257 */
258int pm_qos_update_requirement(int pm_qos_class, char *name, s32 new_value) 258int pm_qos_update_requirement(int pm_qos_class, char *name, s32 new_value)
259{ 259{
@@ -287,7 +287,7 @@ EXPORT_SYMBOL_GPL(pm_qos_update_requirement);
287 * @pm_qos_class: identifies which list of qos request to us 287 * @pm_qos_class: identifies which list of qos request to us
288 * @name: identifies the request 288 * @name: identifies the request
289 * 289 *
290 * Will remove named qos request from pm_qos_class list of parrameters and 290 * Will remove named qos request from pm_qos_class list of parameters and
291 * recompute the current target value for the pm_qos_class. 291 * recompute the current target value for the pm_qos_class.
292 */ 292 */
293void pm_qos_remove_requirement(int pm_qos_class, char *name) 293void pm_qos_remove_requirement(int pm_qos_class, char *name)
@@ -319,7 +319,7 @@ EXPORT_SYMBOL_GPL(pm_qos_remove_requirement);
319 * @notifier: notifier block managed by caller. 319 * @notifier: notifier block managed by caller.
320 * 320 *
321 * will register the notifier into a notification chain that gets called 321 * will register the notifier into a notification chain that gets called
322 * uppon changes to the pm_qos_class target value. 322 * upon changes to the pm_qos_class target value.
323 */ 323 */
324 int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier) 324 int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier)
325{ 325{
@@ -338,7 +338,7 @@ EXPORT_SYMBOL_GPL(pm_qos_add_notifier);
338 * @notifier: notifier block to be removed. 338 * @notifier: notifier block to be removed.
339 * 339 *
340 * will remove the notifier from the notification chain that gets called 340 * will remove the notifier from the notification chain that gets called
341 * uppon changes to the pm_qos_class target value. 341 * upon changes to the pm_qos_class target value.
342 */ 342 */
343int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier) 343int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier)
344{ 344{
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index 0ffaeb075e35..e36d5798cbff 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -458,9 +458,6 @@ static void release_posix_timer(struct k_itimer *tmr, int it_id_set)
458 spin_unlock_irqrestore(&idr_lock, flags); 458 spin_unlock_irqrestore(&idr_lock, flags);
459 } 459 }
460 sigqueue_free(tmr->sigq); 460 sigqueue_free(tmr->sigq);
461 if (unlikely(tmr->it_process) &&
462 tmr->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID))
463 put_task_struct(tmr->it_process);
464 kmem_cache_free(posix_timers_cache, tmr); 461 kmem_cache_free(posix_timers_cache, tmr);
465} 462}
466 463
@@ -865,11 +862,10 @@ retry_delete:
865 * This keeps any tasks waiting on the spin lock from thinking 862 * This keeps any tasks waiting on the spin lock from thinking
866 * they got something (see the lock code above). 863 * they got something (see the lock code above).
867 */ 864 */
868 if (timer->it_process) { 865 if (timer->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID))
869 if (timer->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID)) 866 put_task_struct(timer->it_process);
870 put_task_struct(timer->it_process); 867 timer->it_process = NULL;
871 timer->it_process = NULL; 868
872 }
873 unlock_timer(timer, flags); 869 unlock_timer(timer, flags);
874 release_posix_timer(timer, IT_ID_SET); 870 release_posix_timer(timer, IT_ID_SET);
875 return 0; 871 return 0;
@@ -894,11 +890,10 @@ retry_delete:
894 * This keeps any tasks waiting on the spin lock from thinking 890 * This keeps any tasks waiting on the spin lock from thinking
895 * they got something (see the lock code above). 891 * they got something (see the lock code above).
896 */ 892 */
897 if (timer->it_process) { 893 if (timer->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID))
898 if (timer->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID)) 894 put_task_struct(timer->it_process);
899 put_task_struct(timer->it_process); 895 timer->it_process = NULL;
900 timer->it_process = NULL; 896
901 }
902 unlock_timer(timer, flags); 897 unlock_timer(timer, flags);
903 release_posix_timer(timer, IT_ID_SET); 898 release_posix_timer(timer, IT_ID_SET);
904} 899}
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index b45da40e8d25..dcd165f92a88 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -82,7 +82,7 @@ config PM_SLEEP_SMP
82 82
83config PM_SLEEP 83config PM_SLEEP
84 bool 84 bool
85 depends on SUSPEND || HIBERNATION 85 depends on SUSPEND || HIBERNATION || XEN_SAVE_RESTORE
86 default y 86 default y
87 87
88config SUSPEND 88config SUSPEND
@@ -94,6 +94,17 @@ config SUSPEND
94 powered and thus its contents are preserved, such as the 94 powered and thus its contents are preserved, such as the
95 suspend-to-RAM state (e.g. the ACPI S3 state). 95 suspend-to-RAM state (e.g. the ACPI S3 state).
96 96
97config PM_TEST_SUSPEND
98 bool "Test suspend/resume and wakealarm during bootup"
99 depends on SUSPEND && PM_DEBUG && RTC_LIB=y
100 ---help---
101 This option will let you suspend your machine during bootup, and
102 make it wake up a few seconds later using an RTC wakeup alarm.
103 Enable this with a kernel parameter like "test_suspend=mem".
104
105 You probably want to have your system's RTC driver statically
106 linked, ensuring that it's available when this test runs.
107
97config SUSPEND_FREEZER 108config SUSPEND_FREEZER
98 bool "Enable freezer for suspend to RAM/standby" \ 109 bool "Enable freezer for suspend to RAM/standby" \
99 if ARCH_WANTS_FREEZER_CONTROL || BROKEN 110 if ARCH_WANTS_FREEZER_CONTROL || BROKEN
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 3398f4651aa1..0b7476f5d2a6 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -132,6 +132,61 @@ static inline int suspend_test(int level) { return 0; }
132 132
133#ifdef CONFIG_SUSPEND 133#ifdef CONFIG_SUSPEND
134 134
135#ifdef CONFIG_PM_TEST_SUSPEND
136
137/*
138 * We test the system suspend code by setting an RTC wakealarm a short
139 * time in the future, then suspending. Suspending the devices won't
140 * normally take long ... some systems only need a few milliseconds.
141 *
142 * The time it takes is system-specific though, so when we test this
143 * during system bootup we allow a LOT of time.
144 */
145#define TEST_SUSPEND_SECONDS 5
146
147static unsigned long suspend_test_start_time;
148
149static void suspend_test_start(void)
150{
151 /* FIXME Use better timebase than "jiffies", ideally a clocksource.
152 * What we want is a hardware counter that will work correctly even
153 * during the irqs-are-off stages of the suspend/resume cycle...
154 */
155 suspend_test_start_time = jiffies;
156}
157
158static void suspend_test_finish(const char *label)
159{
160 long nj = jiffies - suspend_test_start_time;
161 unsigned msec;
162
163 msec = jiffies_to_msecs(abs(nj));
164 pr_info("PM: %s took %d.%03d seconds\n", label,
165 msec / 1000, msec % 1000);
166
167 /* Warning on suspend means the RTC alarm period needs to be
168 * larger -- the system was sooo slooowwww to suspend that the
169 * alarm (should have) fired before the system went to sleep!
170 *
171 * Warning on either suspend or resume also means the system
172 * has some performance issues. The stack dump of a WARN_ON
173 * is more likely to get the right attention than a printk...
174 */
175 WARN_ON(msec > (TEST_SUSPEND_SECONDS * 1000));
176}
177
178#else
179
180static void suspend_test_start(void)
181{
182}
183
184static void suspend_test_finish(const char *label)
185{
186}
187
188#endif
189
135/* This is just an arbitrary number */ 190/* This is just an arbitrary number */
136#define FREE_PAGE_NUMBER (100) 191#define FREE_PAGE_NUMBER (100)
137 192
@@ -266,12 +321,13 @@ int suspend_devices_and_enter(suspend_state_t state)
266 goto Close; 321 goto Close;
267 } 322 }
268 suspend_console(); 323 suspend_console();
324 suspend_test_start();
269 error = device_suspend(PMSG_SUSPEND); 325 error = device_suspend(PMSG_SUSPEND);
270 if (error) { 326 if (error) {
271 printk(KERN_ERR "PM: Some devices failed to suspend\n"); 327 printk(KERN_ERR "PM: Some devices failed to suspend\n");
272 goto Recover_platform; 328 goto Recover_platform;
273 } 329 }
274 330 suspend_test_finish("suspend devices");
275 if (suspend_test(TEST_DEVICES)) 331 if (suspend_test(TEST_DEVICES))
276 goto Recover_platform; 332 goto Recover_platform;
277 333
@@ -293,7 +349,9 @@ int suspend_devices_and_enter(suspend_state_t state)
293 if (suspend_ops->finish) 349 if (suspend_ops->finish)
294 suspend_ops->finish(); 350 suspend_ops->finish();
295 Resume_devices: 351 Resume_devices:
352 suspend_test_start();
296 device_resume(PMSG_RESUME); 353 device_resume(PMSG_RESUME);
354 suspend_test_finish("resume devices");
297 resume_console(); 355 resume_console();
298 Close: 356 Close:
299 if (suspend_ops->end) 357 if (suspend_ops->end)
@@ -521,3 +579,144 @@ static int __init pm_init(void)
521} 579}
522 580
523core_initcall(pm_init); 581core_initcall(pm_init);
582
583
584#ifdef CONFIG_PM_TEST_SUSPEND
585
586#include <linux/rtc.h>
587
588/*
589 * To test system suspend, we need a hands-off mechanism to resume the
590 * system. RTCs wake alarms are a common self-contained mechanism.
591 */
592
593static void __init test_wakealarm(struct rtc_device *rtc, suspend_state_t state)
594{
595 static char err_readtime[] __initdata =
596 KERN_ERR "PM: can't read %s time, err %d\n";
597 static char err_wakealarm [] __initdata =
598 KERN_ERR "PM: can't set %s wakealarm, err %d\n";
599 static char err_suspend[] __initdata =
600 KERN_ERR "PM: suspend test failed, error %d\n";
601 static char info_test[] __initdata =
602 KERN_INFO "PM: test RTC wakeup from '%s' suspend\n";
603
604 unsigned long now;
605 struct rtc_wkalrm alm;
606 int status;
607
608 /* this may fail if the RTC hasn't been initialized */
609 status = rtc_read_time(rtc, &alm.time);
610 if (status < 0) {
611 printk(err_readtime, rtc->dev.bus_id, status);
612 return;
613 }
614 rtc_tm_to_time(&alm.time, &now);
615
616 memset(&alm, 0, sizeof alm);
617 rtc_time_to_tm(now + TEST_SUSPEND_SECONDS, &alm.time);
618 alm.enabled = true;
619
620 status = rtc_set_alarm(rtc, &alm);
621 if (status < 0) {
622 printk(err_wakealarm, rtc->dev.bus_id, status);
623 return;
624 }
625
626 if (state == PM_SUSPEND_MEM) {
627 printk(info_test, pm_states[state]);
628 status = pm_suspend(state);
629 if (status == -ENODEV)
630 state = PM_SUSPEND_STANDBY;
631 }
632 if (state == PM_SUSPEND_STANDBY) {
633 printk(info_test, pm_states[state]);
634 status = pm_suspend(state);
635 }
636 if (status < 0)
637 printk(err_suspend, status);
638
639 /* Some platforms can't detect that the alarm triggered the
640 * wakeup, or (accordingly) disable it after it afterwards.
641 * It's supposed to give oneshot behavior; cope.
642 */
643 alm.enabled = false;
644 rtc_set_alarm(rtc, &alm);
645}
646
647static int __init has_wakealarm(struct device *dev, void *name_ptr)
648{
649 struct rtc_device *candidate = to_rtc_device(dev);
650
651 if (!candidate->ops->set_alarm)
652 return 0;
653 if (!device_may_wakeup(candidate->dev.parent))
654 return 0;
655
656 *(char **)name_ptr = dev->bus_id;
657 return 1;
658}
659
660/*
661 * Kernel options like "test_suspend=mem" force suspend/resume sanity tests
662 * at startup time. They're normally disabled, for faster boot and because
663 * we can't know which states really work on this particular system.
664 */
665static suspend_state_t test_state __initdata = PM_SUSPEND_ON;
666
667static char warn_bad_state[] __initdata =
668 KERN_WARNING "PM: can't test '%s' suspend state\n";
669
670static int __init setup_test_suspend(char *value)
671{
672 unsigned i;
673
674 /* "=mem" ==> "mem" */
675 value++;
676 for (i = 0; i < PM_SUSPEND_MAX; i++) {
677 if (!pm_states[i])
678 continue;
679 if (strcmp(pm_states[i], value) != 0)
680 continue;
681 test_state = (__force suspend_state_t) i;
682 return 0;
683 }
684 printk(warn_bad_state, value);
685 return 0;
686}
687__setup("test_suspend", setup_test_suspend);
688
689static int __init test_suspend(void)
690{
691 static char warn_no_rtc[] __initdata =
692 KERN_WARNING "PM: no wakealarm-capable RTC driver is ready\n";
693
694 char *pony = NULL;
695 struct rtc_device *rtc = NULL;
696
697 /* PM is initialized by now; is that state testable? */
698 if (test_state == PM_SUSPEND_ON)
699 goto done;
700 if (!valid_state(test_state)) {
701 printk(warn_bad_state, pm_states[test_state]);
702 goto done;
703 }
704
705 /* RTCs have initialized by now too ... can we use one? */
706 class_find_device(rtc_class, NULL, &pony, has_wakealarm);
707 if (pony)
708 rtc = rtc_class_open(pony);
709 if (!rtc) {
710 printk(warn_no_rtc);
711 goto done;
712 }
713
714 /* go for it */
715 test_wakealarm(rtc, test_state);
716 rtc_class_close(rtc);
717done:
718 return 0;
719}
720late_initcall(test_suspend);
721
722#endif /* CONFIG_PM_TEST_SUSPEND */
diff --git a/kernel/power/power.h b/kernel/power/power.h
index 700f44ec8406..acc0c101dbd5 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -53,8 +53,6 @@ extern int hibernation_platform_enter(void);
53 53
54extern int pfn_is_nosave(unsigned long); 54extern int pfn_is_nosave(unsigned long);
55 55
56extern struct mutex pm_mutex;
57
58#define power_attr(_name) \ 56#define power_attr(_name) \
59static struct kobj_attribute _name##_attr = { \ 57static struct kobj_attribute _name##_attr = { \
60 .attr = { \ 58 .attr = { \
diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
index 678ec736076b..72016f051477 100644
--- a/kernel/power/poweroff.c
+++ b/kernel/power/poweroff.c
@@ -10,6 +10,7 @@
10#include <linux/pm.h> 10#include <linux/pm.h>
11#include <linux/workqueue.h> 11#include <linux/workqueue.h>
12#include <linux/reboot.h> 12#include <linux/reboot.h>
13#include <linux/cpumask.h>
13 14
14/* 15/*
15 * When the user hits Sys-Rq o to power down the machine this is the 16 * When the user hits Sys-Rq o to power down the machine this is the
@@ -25,7 +26,8 @@ static DECLARE_WORK(poweroff_work, do_poweroff);
25 26
26static void handle_poweroff(int key, struct tty_struct *tty) 27static void handle_poweroff(int key, struct tty_struct *tty)
27{ 28{
28 schedule_work(&poweroff_work); 29 /* run sysrq poweroff on boot cpu */
30 schedule_work_on(first_cpu(cpu_online_map), &poweroff_work);
29} 31}
30 32
31static struct sysrq_key_op sysrq_poweroff_op = { 33static struct sysrq_key_op sysrq_poweroff_op = {
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 5fb87652f214..278946aecaf0 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -149,7 +149,7 @@ static int try_to_freeze_tasks(bool sig_only)
149 unsigned long end_time; 149 unsigned long end_time;
150 unsigned int todo; 150 unsigned int todo;
151 struct timeval start, end; 151 struct timeval start, end;
152 s64 elapsed_csecs64; 152 u64 elapsed_csecs64;
153 unsigned int elapsed_csecs; 153 unsigned int elapsed_csecs;
154 154
155 do_gettimeofday(&start); 155 do_gettimeofday(&start);
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 5f91a07c4eac..5d2ab836e998 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -205,8 +205,7 @@ static void chain_free(struct chain_allocator *ca, int clear_page_nosave)
205 * objects. The main list's elements are of type struct zone_bitmap 205 * objects. The main list's elements are of type struct zone_bitmap
206 * and each of them corresonds to one zone. For each zone bitmap 206 * and each of them corresonds to one zone. For each zone bitmap
207 * object there is a list of objects of type struct bm_block that 207 * object there is a list of objects of type struct bm_block that
208 * represent each blocks of bit chunks in which information is 208 * represent each blocks of bitmap in which information is stored.
209 * stored.
210 * 209 *
211 * struct memory_bitmap contains a pointer to the main list of zone 210 * struct memory_bitmap contains a pointer to the main list of zone
212 * bitmap objects, a struct bm_position used for browsing the bitmap, 211 * bitmap objects, a struct bm_position used for browsing the bitmap,
@@ -224,26 +223,27 @@ static void chain_free(struct chain_allocator *ca, int clear_page_nosave)
224 * pfns that correspond to the start and end of the represented zone. 223 * pfns that correspond to the start and end of the represented zone.
225 * 224 *
226 * struct bm_block contains a pointer to the memory page in which 225 * struct bm_block contains a pointer to the memory page in which
227 * information is stored (in the form of a block of bit chunks 226 * information is stored (in the form of a block of bitmap)
228 * of type unsigned long each). It also contains the pfns that 227 * It also contains the pfns that correspond to the start and end of
229 * correspond to the start and end of the represented memory area and 228 * the represented memory area.
230 * the number of bit chunks in the block.
231 */ 229 */
232 230
233#define BM_END_OF_MAP (~0UL) 231#define BM_END_OF_MAP (~0UL)
234 232
235#define BM_CHUNKS_PER_BLOCK (PAGE_SIZE / sizeof(long))
236#define BM_BITS_PER_CHUNK (sizeof(long) << 3)
237#define BM_BITS_PER_BLOCK (PAGE_SIZE << 3) 233#define BM_BITS_PER_BLOCK (PAGE_SIZE << 3)
238 234
239struct bm_block { 235struct bm_block {
240 struct bm_block *next; /* next element of the list */ 236 struct bm_block *next; /* next element of the list */
241 unsigned long start_pfn; /* pfn represented by the first bit */ 237 unsigned long start_pfn; /* pfn represented by the first bit */
242 unsigned long end_pfn; /* pfn represented by the last bit plus 1 */ 238 unsigned long end_pfn; /* pfn represented by the last bit plus 1 */
243 unsigned int size; /* number of bit chunks */ 239 unsigned long *data; /* bitmap representing pages */
244 unsigned long *data; /* chunks of bits representing pages */
245}; 240};
246 241
242static inline unsigned long bm_block_bits(struct bm_block *bb)
243{
244 return bb->end_pfn - bb->start_pfn;
245}
246
247struct zone_bitmap { 247struct zone_bitmap {
248 struct zone_bitmap *next; /* next element of the list */ 248 struct zone_bitmap *next; /* next element of the list */
249 unsigned long start_pfn; /* minimal pfn in this zone */ 249 unsigned long start_pfn; /* minimal pfn in this zone */
@@ -257,7 +257,6 @@ struct zone_bitmap {
257struct bm_position { 257struct bm_position {
258 struct zone_bitmap *zone_bm; 258 struct zone_bitmap *zone_bm;
259 struct bm_block *block; 259 struct bm_block *block;
260 int chunk;
261 int bit; 260 int bit;
262}; 261};
263 262
@@ -272,12 +271,6 @@ struct memory_bitmap {
272 271
273/* Functions that operate on memory bitmaps */ 272/* Functions that operate on memory bitmaps */
274 273
275static inline void memory_bm_reset_chunk(struct memory_bitmap *bm)
276{
277 bm->cur.chunk = 0;
278 bm->cur.bit = -1;
279}
280
281static void memory_bm_position_reset(struct memory_bitmap *bm) 274static void memory_bm_position_reset(struct memory_bitmap *bm)
282{ 275{
283 struct zone_bitmap *zone_bm; 276 struct zone_bitmap *zone_bm;
@@ -285,7 +278,7 @@ static void memory_bm_position_reset(struct memory_bitmap *bm)
285 zone_bm = bm->zone_bm_list; 278 zone_bm = bm->zone_bm_list;
286 bm->cur.zone_bm = zone_bm; 279 bm->cur.zone_bm = zone_bm;
287 bm->cur.block = zone_bm->bm_blocks; 280 bm->cur.block = zone_bm->bm_blocks;
288 memory_bm_reset_chunk(bm); 281 bm->cur.bit = 0;
289} 282}
290 283
291static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free); 284static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
@@ -394,12 +387,10 @@ memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed)
394 bb->start_pfn = pfn; 387 bb->start_pfn = pfn;
395 if (nr >= BM_BITS_PER_BLOCK) { 388 if (nr >= BM_BITS_PER_BLOCK) {
396 pfn += BM_BITS_PER_BLOCK; 389 pfn += BM_BITS_PER_BLOCK;
397 bb->size = BM_CHUNKS_PER_BLOCK;
398 nr -= BM_BITS_PER_BLOCK; 390 nr -= BM_BITS_PER_BLOCK;
399 } else { 391 } else {
400 /* This is executed only once in the loop */ 392 /* This is executed only once in the loop */
401 pfn += nr; 393 pfn += nr;
402 bb->size = DIV_ROUND_UP(nr, BM_BITS_PER_CHUNK);
403 } 394 }
404 bb->end_pfn = pfn; 395 bb->end_pfn = pfn;
405 bb = bb->next; 396 bb = bb->next;
@@ -478,8 +469,8 @@ static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
478 } 469 }
479 zone_bm->cur_block = bb; 470 zone_bm->cur_block = bb;
480 pfn -= bb->start_pfn; 471 pfn -= bb->start_pfn;
481 *bit_nr = pfn % BM_BITS_PER_CHUNK; 472 *bit_nr = pfn;
482 *addr = bb->data + pfn / BM_BITS_PER_CHUNK; 473 *addr = bb->data;
483 return 0; 474 return 0;
484} 475}
485 476
@@ -528,36 +519,6 @@ static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
528 return test_bit(bit, addr); 519 return test_bit(bit, addr);
529} 520}
530 521
531/* Two auxiliary functions for memory_bm_next_pfn */
532
533/* Find the first set bit in the given chunk, if there is one */
534
535static inline int next_bit_in_chunk(int bit, unsigned long *chunk_p)
536{
537 bit++;
538 while (bit < BM_BITS_PER_CHUNK) {
539 if (test_bit(bit, chunk_p))
540 return bit;
541
542 bit++;
543 }
544 return -1;
545}
546
547/* Find a chunk containing some bits set in given block of bits */
548
549static inline int next_chunk_in_block(int n, struct bm_block *bb)
550{
551 n++;
552 while (n < bb->size) {
553 if (bb->data[n])
554 return n;
555
556 n++;
557 }
558 return -1;
559}
560
561/** 522/**
562 * memory_bm_next_pfn - find the pfn that corresponds to the next set bit 523 * memory_bm_next_pfn - find the pfn that corresponds to the next set bit
563 * in the bitmap @bm. If the pfn cannot be found, BM_END_OF_MAP is 524 * in the bitmap @bm. If the pfn cannot be found, BM_END_OF_MAP is
@@ -571,40 +532,33 @@ static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
571{ 532{
572 struct zone_bitmap *zone_bm; 533 struct zone_bitmap *zone_bm;
573 struct bm_block *bb; 534 struct bm_block *bb;
574 int chunk;
575 int bit; 535 int bit;
576 536
577 do { 537 do {
578 bb = bm->cur.block; 538 bb = bm->cur.block;
579 do { 539 do {
580 chunk = bm->cur.chunk;
581 bit = bm->cur.bit; 540 bit = bm->cur.bit;
582 do { 541 bit = find_next_bit(bb->data, bm_block_bits(bb), bit);
583 bit = next_bit_in_chunk(bit, bb->data + chunk); 542 if (bit < bm_block_bits(bb))
584 if (bit >= 0) 543 goto Return_pfn;
585 goto Return_pfn; 544
586
587 chunk = next_chunk_in_block(chunk, bb);
588 bit = -1;
589 } while (chunk >= 0);
590 bb = bb->next; 545 bb = bb->next;
591 bm->cur.block = bb; 546 bm->cur.block = bb;
592 memory_bm_reset_chunk(bm); 547 bm->cur.bit = 0;
593 } while (bb); 548 } while (bb);
594 zone_bm = bm->cur.zone_bm->next; 549 zone_bm = bm->cur.zone_bm->next;
595 if (zone_bm) { 550 if (zone_bm) {
596 bm->cur.zone_bm = zone_bm; 551 bm->cur.zone_bm = zone_bm;
597 bm->cur.block = zone_bm->bm_blocks; 552 bm->cur.block = zone_bm->bm_blocks;
598 memory_bm_reset_chunk(bm); 553 bm->cur.bit = 0;
599 } 554 }
600 } while (zone_bm); 555 } while (zone_bm);
601 memory_bm_position_reset(bm); 556 memory_bm_position_reset(bm);
602 return BM_END_OF_MAP; 557 return BM_END_OF_MAP;
603 558
604 Return_pfn: 559 Return_pfn:
605 bm->cur.chunk = chunk; 560 bm->cur.bit = bit + 1;
606 bm->cur.bit = bit; 561 return bb->start_pfn + bit;
607 return bb->start_pfn + chunk * BM_BITS_PER_CHUNK + bit;
608} 562}
609 563
610/** 564/**
diff --git a/kernel/printk.c b/kernel/printk.c
index 07ad9e7f7a66..b51b1567bb55 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -933,7 +933,7 @@ void suspend_console(void)
933{ 933{
934 if (!console_suspend_enabled) 934 if (!console_suspend_enabled)
935 return; 935 return;
936 printk("Suspending console(s)\n"); 936 printk("Suspending console(s) (use no_console_suspend to debug)\n");
937 acquire_console_sem(); 937 acquire_console_sem();
938 console_suspended = 1; 938 console_suspended = 1;
939} 939}
@@ -1308,29 +1308,18 @@ void tty_write_message(struct tty_struct *tty, char *msg)
1308} 1308}
1309 1309
1310#if defined CONFIG_PRINTK 1310#if defined CONFIG_PRINTK
1311
1311/* 1312/*
1312 * printk rate limiting, lifted from the networking subsystem. 1313 * printk rate limiting, lifted from the networking subsystem.
1313 * 1314 *
1314 * This enforces a rate limit: not more than one kernel message 1315 * This enforces a rate limit: not more than 10 kernel messages
1315 * every printk_ratelimit_jiffies to make a denial-of-service 1316 * every 5s to make a denial-of-service attack impossible.
1316 * attack impossible.
1317 */ 1317 */
1318int __printk_ratelimit(int ratelimit_jiffies, int ratelimit_burst) 1318DEFINE_RATELIMIT_STATE(printk_ratelimit_state, 5 * HZ, 10);
1319{
1320 return __ratelimit(ratelimit_jiffies, ratelimit_burst);
1321}
1322EXPORT_SYMBOL(__printk_ratelimit);
1323
1324/* minimum time in jiffies between messages */
1325int printk_ratelimit_jiffies = 5 * HZ;
1326
1327/* number of messages we send before ratelimiting */
1328int printk_ratelimit_burst = 10;
1329 1319
1330int printk_ratelimit(void) 1320int printk_ratelimit(void)
1331{ 1321{
1332 return __printk_ratelimit(printk_ratelimit_jiffies, 1322 return __ratelimit(&printk_ratelimit_state);
1333 printk_ratelimit_burst);
1334} 1323}
1335EXPORT_SYMBOL(printk_ratelimit); 1324EXPORT_SYMBOL(printk_ratelimit);
1336 1325
diff --git a/kernel/profile.c b/kernel/profile.c
index 58926411eb2a..cd26bed4cc26 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -112,8 +112,6 @@ void __init profile_init(void)
112 112
113/* Profile event notifications */ 113/* Profile event notifications */
114 114
115#ifdef CONFIG_PROFILING
116
117static BLOCKING_NOTIFIER_HEAD(task_exit_notifier); 115static BLOCKING_NOTIFIER_HEAD(task_exit_notifier);
118static ATOMIC_NOTIFIER_HEAD(task_free_notifier); 116static ATOMIC_NOTIFIER_HEAD(task_free_notifier);
119static BLOCKING_NOTIFIER_HEAD(munmap_notifier); 117static BLOCKING_NOTIFIER_HEAD(munmap_notifier);
@@ -203,8 +201,6 @@ void unregister_timer_hook(int (*hook)(struct pt_regs *))
203} 201}
204EXPORT_SYMBOL_GPL(unregister_timer_hook); 202EXPORT_SYMBOL_GPL(unregister_timer_hook);
205 203
206#endif /* CONFIG_PROFILING */
207
208 204
209#ifdef CONFIG_SMP 205#ifdef CONFIG_SMP
210/* 206/*
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 8392a9da6450..082b3fcb32a0 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -107,7 +107,7 @@ int ptrace_check_attach(struct task_struct *child, int kill)
107 read_unlock(&tasklist_lock); 107 read_unlock(&tasklist_lock);
108 108
109 if (!ret && !kill) 109 if (!ret && !kill)
110 wait_task_inactive(child); 110 ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH;
111 111
112 /* All systems go.. */ 112 /* All systems go.. */
113 return ret; 113 return ret;
diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c
index 16eeeaa9d618..aad93cdc9f68 100644
--- a/kernel/rcuclassic.c
+++ b/kernel/rcuclassic.c
@@ -91,8 +91,8 @@ static void force_quiescent_state(struct rcu_data *rdp,
91 * rdp->cpu is the current cpu. 91 * rdp->cpu is the current cpu.
92 * 92 *
93 * cpu_online_map is updated by the _cpu_down() 93 * cpu_online_map is updated by the _cpu_down()
94 * using stop_machine_run(). Since we're in irqs disabled 94 * using __stop_machine(). Since we're in irqs disabled
95 * section, stop_machine_run() is not exectuting, hence 95 * section, __stop_machine() is not exectuting, hence
96 * the cpu_online_map is stable. 96 * the cpu_online_map is stable.
97 * 97 *
98 * However, a cpu might have been offlined _just_ before 98 * However, a cpu might have been offlined _just_ before
@@ -106,7 +106,7 @@ static void force_quiescent_state(struct rcu_data *rdp,
106 */ 106 */
107 cpus_and(cpumask, rcp->cpumask, cpu_online_map); 107 cpus_and(cpumask, rcp->cpumask, cpu_online_map);
108 cpu_clear(rdp->cpu, cpumask); 108 cpu_clear(rdp->cpu, cpumask);
109 for_each_cpu_mask(cpu, cpumask) 109 for_each_cpu_mask_nr(cpu, cpumask)
110 smp_send_reschedule(cpu); 110 smp_send_reschedule(cpu);
111 } 111 }
112} 112}
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c
index 6f62b77d93c4..27827931ca0d 100644
--- a/kernel/rcupreempt.c
+++ b/kernel/rcupreempt.c
@@ -756,7 +756,7 @@ rcu_try_flip_idle(void)
756 756
757 /* Now ask each CPU for acknowledgement of the flip. */ 757 /* Now ask each CPU for acknowledgement of the flip. */
758 758
759 for_each_cpu_mask(cpu, rcu_cpu_online_map) { 759 for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) {
760 per_cpu(rcu_flip_flag, cpu) = rcu_flipped; 760 per_cpu(rcu_flip_flag, cpu) = rcu_flipped;
761 dyntick_save_progress_counter(cpu); 761 dyntick_save_progress_counter(cpu);
762 } 762 }
@@ -774,7 +774,7 @@ rcu_try_flip_waitack(void)
774 int cpu; 774 int cpu;
775 775
776 RCU_TRACE_ME(rcupreempt_trace_try_flip_a1); 776 RCU_TRACE_ME(rcupreempt_trace_try_flip_a1);
777 for_each_cpu_mask(cpu, rcu_cpu_online_map) 777 for_each_cpu_mask_nr(cpu, rcu_cpu_online_map)
778 if (rcu_try_flip_waitack_needed(cpu) && 778 if (rcu_try_flip_waitack_needed(cpu) &&
779 per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) { 779 per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) {
780 RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1); 780 RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1);
@@ -806,7 +806,7 @@ rcu_try_flip_waitzero(void)
806 /* Check to see if the sum of the "last" counters is zero. */ 806 /* Check to see if the sum of the "last" counters is zero. */
807 807
808 RCU_TRACE_ME(rcupreempt_trace_try_flip_z1); 808 RCU_TRACE_ME(rcupreempt_trace_try_flip_z1);
809 for_each_cpu_mask(cpu, rcu_cpu_online_map) 809 for_each_cpu_mask_nr(cpu, rcu_cpu_online_map)
810 sum += RCU_DATA_CPU(cpu)->rcu_flipctr[lastidx]; 810 sum += RCU_DATA_CPU(cpu)->rcu_flipctr[lastidx];
811 if (sum != 0) { 811 if (sum != 0) {
812 RCU_TRACE_ME(rcupreempt_trace_try_flip_ze1); 812 RCU_TRACE_ME(rcupreempt_trace_try_flip_ze1);
@@ -821,7 +821,7 @@ rcu_try_flip_waitzero(void)
821 smp_mb(); /* ^^^^^^^^^^^^ */ 821 smp_mb(); /* ^^^^^^^^^^^^ */
822 822
823 /* Call for a memory barrier from each CPU. */ 823 /* Call for a memory barrier from each CPU. */
824 for_each_cpu_mask(cpu, rcu_cpu_online_map) { 824 for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) {
825 per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed; 825 per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed;
826 dyntick_save_progress_counter(cpu); 826 dyntick_save_progress_counter(cpu);
827 } 827 }
@@ -841,7 +841,7 @@ rcu_try_flip_waitmb(void)
841 int cpu; 841 int cpu;
842 842
843 RCU_TRACE_ME(rcupreempt_trace_try_flip_m1); 843 RCU_TRACE_ME(rcupreempt_trace_try_flip_m1);
844 for_each_cpu_mask(cpu, rcu_cpu_online_map) 844 for_each_cpu_mask_nr(cpu, rcu_cpu_online_map)
845 if (rcu_try_flip_waitmb_needed(cpu) && 845 if (rcu_try_flip_waitmb_needed(cpu) &&
846 per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) { 846 per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) {
847 RCU_TRACE_ME(rcupreempt_trace_try_flip_me1); 847 RCU_TRACE_ME(rcupreempt_trace_try_flip_me1);
diff --git a/kernel/relay.c b/kernel/relay.c
index 7de644cdec43..8d13a7855c08 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -407,6 +407,35 @@ void relay_reset(struct rchan *chan)
407} 407}
408EXPORT_SYMBOL_GPL(relay_reset); 408EXPORT_SYMBOL_GPL(relay_reset);
409 409
410static inline void relay_set_buf_dentry(struct rchan_buf *buf,
411 struct dentry *dentry)
412{
413 buf->dentry = dentry;
414 buf->dentry->d_inode->i_size = buf->early_bytes;
415}
416
417static struct dentry *relay_create_buf_file(struct rchan *chan,
418 struct rchan_buf *buf,
419 unsigned int cpu)
420{
421 struct dentry *dentry;
422 char *tmpname;
423
424 tmpname = kzalloc(NAME_MAX + 1, GFP_KERNEL);
425 if (!tmpname)
426 return NULL;
427 snprintf(tmpname, NAME_MAX, "%s%d", chan->base_filename, cpu);
428
429 /* Create file in fs */
430 dentry = chan->cb->create_buf_file(tmpname, chan->parent,
431 S_IRUSR, buf,
432 &chan->is_global);
433
434 kfree(tmpname);
435
436 return dentry;
437}
438
410/* 439/*
411 * relay_open_buf - create a new relay channel buffer 440 * relay_open_buf - create a new relay channel buffer
412 * 441 *
@@ -416,45 +445,34 @@ static struct rchan_buf *relay_open_buf(struct rchan *chan, unsigned int cpu)
416{ 445{
417 struct rchan_buf *buf = NULL; 446 struct rchan_buf *buf = NULL;
418 struct dentry *dentry; 447 struct dentry *dentry;
419 char *tmpname;
420 448
421 if (chan->is_global) 449 if (chan->is_global)
422 return chan->buf[0]; 450 return chan->buf[0];
423 451
424 tmpname = kzalloc(NAME_MAX + 1, GFP_KERNEL);
425 if (!tmpname)
426 goto end;
427 snprintf(tmpname, NAME_MAX, "%s%d", chan->base_filename, cpu);
428
429 buf = relay_create_buf(chan); 452 buf = relay_create_buf(chan);
430 if (!buf) 453 if (!buf)
431 goto free_name; 454 return NULL;
455
456 if (chan->has_base_filename) {
457 dentry = relay_create_buf_file(chan, buf, cpu);
458 if (!dentry)
459 goto free_buf;
460 relay_set_buf_dentry(buf, dentry);
461 }
432 462
433 buf->cpu = cpu; 463 buf->cpu = cpu;
434 __relay_reset(buf, 1); 464 __relay_reset(buf, 1);
435 465
436 /* Create file in fs */
437 dentry = chan->cb->create_buf_file(tmpname, chan->parent, S_IRUSR,
438 buf, &chan->is_global);
439 if (!dentry)
440 goto free_buf;
441
442 buf->dentry = dentry;
443
444 if(chan->is_global) { 466 if(chan->is_global) {
445 chan->buf[0] = buf; 467 chan->buf[0] = buf;
446 buf->cpu = 0; 468 buf->cpu = 0;
447 } 469 }
448 470
449 goto free_name; 471 return buf;
450 472
451free_buf: 473free_buf:
452 relay_destroy_buf(buf); 474 relay_destroy_buf(buf);
453 buf = NULL; 475 return NULL;
454free_name:
455 kfree(tmpname);
456end:
457 return buf;
458} 476}
459 477
460/** 478/**
@@ -537,8 +555,8 @@ static int __cpuinit relay_hotcpu_callback(struct notifier_block *nb,
537 555
538/** 556/**
539 * relay_open - create a new relay channel 557 * relay_open - create a new relay channel
540 * @base_filename: base name of files to create 558 * @base_filename: base name of files to create, %NULL for buffering only
541 * @parent: dentry of parent directory, %NULL for root directory 559 * @parent: dentry of parent directory, %NULL for root directory or buffer
542 * @subbuf_size: size of sub-buffers 560 * @subbuf_size: size of sub-buffers
543 * @n_subbufs: number of sub-buffers 561 * @n_subbufs: number of sub-buffers
544 * @cb: client callback functions 562 * @cb: client callback functions
@@ -560,8 +578,6 @@ struct rchan *relay_open(const char *base_filename,
560{ 578{
561 unsigned int i; 579 unsigned int i;
562 struct rchan *chan; 580 struct rchan *chan;
563 if (!base_filename)
564 return NULL;
565 581
566 if (!(subbuf_size && n_subbufs)) 582 if (!(subbuf_size && n_subbufs))
567 return NULL; 583 return NULL;
@@ -576,7 +592,10 @@ struct rchan *relay_open(const char *base_filename,
576 chan->alloc_size = FIX_SIZE(subbuf_size * n_subbufs); 592 chan->alloc_size = FIX_SIZE(subbuf_size * n_subbufs);
577 chan->parent = parent; 593 chan->parent = parent;
578 chan->private_data = private_data; 594 chan->private_data = private_data;
579 strlcpy(chan->base_filename, base_filename, NAME_MAX); 595 if (base_filename) {
596 chan->has_base_filename = 1;
597 strlcpy(chan->base_filename, base_filename, NAME_MAX);
598 }
580 setup_callbacks(chan, cb); 599 setup_callbacks(chan, cb);
581 kref_init(&chan->kref); 600 kref_init(&chan->kref);
582 601
@@ -604,6 +623,94 @@ free_bufs:
604} 623}
605EXPORT_SYMBOL_GPL(relay_open); 624EXPORT_SYMBOL_GPL(relay_open);
606 625
626struct rchan_percpu_buf_dispatcher {
627 struct rchan_buf *buf;
628 struct dentry *dentry;
629};
630
631/* Called in atomic context. */
632static void __relay_set_buf_dentry(void *info)
633{
634 struct rchan_percpu_buf_dispatcher *p = info;
635
636 relay_set_buf_dentry(p->buf, p->dentry);
637}
638
639/**
640 * relay_late_setup_files - triggers file creation
641 * @chan: channel to operate on
642 * @base_filename: base name of files to create
643 * @parent: dentry of parent directory, %NULL for root directory
644 *
645 * Returns 0 if successful, non-zero otherwise.
646 *
647 * Use to setup files for a previously buffer-only channel.
648 * Useful to do early tracing in kernel, before VFS is up, for example.
649 */
650int relay_late_setup_files(struct rchan *chan,
651 const char *base_filename,
652 struct dentry *parent)
653{
654 int err = 0;
655 unsigned int i, curr_cpu;
656 unsigned long flags;
657 struct dentry *dentry;
658 struct rchan_percpu_buf_dispatcher disp;
659
660 if (!chan || !base_filename)
661 return -EINVAL;
662
663 strlcpy(chan->base_filename, base_filename, NAME_MAX);
664
665 mutex_lock(&relay_channels_mutex);
666 /* Is chan already set up? */
667 if (unlikely(chan->has_base_filename))
668 return -EEXIST;
669 chan->has_base_filename = 1;
670 chan->parent = parent;
671 curr_cpu = get_cpu();
672 /*
673 * The CPU hotplug notifier ran before us and created buffers with
674 * no files associated. So it's safe to call relay_setup_buf_file()
675 * on all currently online CPUs.
676 */
677 for_each_online_cpu(i) {
678 if (unlikely(!chan->buf[i])) {
679 printk(KERN_ERR "relay_late_setup_files: CPU %u "
680 "has no buffer, it must have!\n", i);
681 BUG();
682 err = -EINVAL;
683 break;
684 }
685
686 dentry = relay_create_buf_file(chan, chan->buf[i], i);
687 if (unlikely(!dentry)) {
688 err = -EINVAL;
689 break;
690 }
691
692 if (curr_cpu == i) {
693 local_irq_save(flags);
694 relay_set_buf_dentry(chan->buf[i], dentry);
695 local_irq_restore(flags);
696 } else {
697 disp.buf = chan->buf[i];
698 disp.dentry = dentry;
699 smp_mb();
700 /* relay_channels_mutex must be held, so wait. */
701 err = smp_call_function_single(i,
702 __relay_set_buf_dentry,
703 &disp, 1);
704 }
705 if (unlikely(err))
706 break;
707 }
708 put_cpu();
709 mutex_unlock(&relay_channels_mutex);
710
711 return err;
712}
713
607/** 714/**
608 * relay_switch_subbuf - switch to a new sub-buffer 715 * relay_switch_subbuf - switch to a new sub-buffer
609 * @buf: channel buffer 716 * @buf: channel buffer
@@ -627,8 +734,13 @@ size_t relay_switch_subbuf(struct rchan_buf *buf, size_t length)
627 old_subbuf = buf->subbufs_produced % buf->chan->n_subbufs; 734 old_subbuf = buf->subbufs_produced % buf->chan->n_subbufs;
628 buf->padding[old_subbuf] = buf->prev_padding; 735 buf->padding[old_subbuf] = buf->prev_padding;
629 buf->subbufs_produced++; 736 buf->subbufs_produced++;
630 buf->dentry->d_inode->i_size += buf->chan->subbuf_size - 737 if (buf->dentry)
631 buf->padding[old_subbuf]; 738 buf->dentry->d_inode->i_size +=
739 buf->chan->subbuf_size -
740 buf->padding[old_subbuf];
741 else
742 buf->early_bytes += buf->chan->subbuf_size -
743 buf->padding[old_subbuf];
632 smp_mb(); 744 smp_mb();
633 if (waitqueue_active(&buf->read_wait)) 745 if (waitqueue_active(&buf->read_wait))
634 /* 746 /*
@@ -832,6 +944,10 @@ static void relay_file_read_consume(struct rchan_buf *buf,
832 size_t n_subbufs = buf->chan->n_subbufs; 944 size_t n_subbufs = buf->chan->n_subbufs;
833 size_t read_subbuf; 945 size_t read_subbuf;
834 946
947 if (buf->subbufs_produced == buf->subbufs_consumed &&
948 buf->offset == buf->bytes_consumed)
949 return;
950
835 if (buf->bytes_consumed + bytes_consumed > subbuf_size) { 951 if (buf->bytes_consumed + bytes_consumed > subbuf_size) {
836 relay_subbufs_consumed(buf->chan, buf->cpu, 1); 952 relay_subbufs_consumed(buf->chan, buf->cpu, 1);
837 buf->bytes_consumed = 0; 953 buf->bytes_consumed = 0;
@@ -863,6 +979,8 @@ static int relay_file_read_avail(struct rchan_buf *buf, size_t read_pos)
863 979
864 relay_file_read_consume(buf, read_pos, 0); 980 relay_file_read_consume(buf, read_pos, 0);
865 981
982 consumed = buf->subbufs_consumed;
983
866 if (unlikely(buf->offset > subbuf_size)) { 984 if (unlikely(buf->offset > subbuf_size)) {
867 if (produced == consumed) 985 if (produced == consumed)
868 return 0; 986 return 0;
@@ -881,8 +999,12 @@ static int relay_file_read_avail(struct rchan_buf *buf, size_t read_pos)
881 if (consumed > produced) 999 if (consumed > produced)
882 produced += n_subbufs * subbuf_size; 1000 produced += n_subbufs * subbuf_size;
883 1001
884 if (consumed == produced) 1002 if (consumed == produced) {
1003 if (buf->offset == subbuf_size &&
1004 buf->subbufs_produced > buf->subbufs_consumed)
1005 return 1;
885 return 0; 1006 return 0;
1007 }
886 1008
887 return 1; 1009 return 1;
888} 1010}
@@ -1237,4 +1359,4 @@ static __init int relay_init(void)
1237 return 0; 1359 return 0;
1238} 1360}
1239 1361
1240module_init(relay_init); 1362early_initcall(relay_init);
diff --git a/kernel/res_counter.c b/kernel/res_counter.c
index d3c61b4ebef2..f275c8eca772 100644
--- a/kernel/res_counter.c
+++ b/kernel/res_counter.c
@@ -13,6 +13,7 @@
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <linux/res_counter.h> 14#include <linux/res_counter.h>
15#include <linux/uaccess.h> 15#include <linux/uaccess.h>
16#include <linux/mm.h>
16 17
17void res_counter_init(struct res_counter *counter) 18void res_counter_init(struct res_counter *counter)
18{ 19{
@@ -102,44 +103,37 @@ u64 res_counter_read_u64(struct res_counter *counter, int member)
102 return *res_counter_member(counter, member); 103 return *res_counter_member(counter, member);
103} 104}
104 105
105ssize_t res_counter_write(struct res_counter *counter, int member, 106int res_counter_memparse_write_strategy(const char *buf,
106 const char __user *userbuf, size_t nbytes, loff_t *pos, 107 unsigned long long *res)
107 int (*write_strategy)(char *st_buf, unsigned long long *val))
108{ 108{
109 int ret; 109 char *end;
110 char *buf, *end; 110 /* FIXME - make memparse() take const char* args */
111 unsigned long flags; 111 *res = memparse((char *)buf, &end);
112 unsigned long long tmp, *val; 112 if (*end != '\0')
113 113 return -EINVAL;
114 buf = kmalloc(nbytes + 1, GFP_KERNEL);
115 ret = -ENOMEM;
116 if (buf == NULL)
117 goto out;
118 114
119 buf[nbytes] = '\0'; 115 *res = PAGE_ALIGN(*res);
120 ret = -EFAULT; 116 return 0;
121 if (copy_from_user(buf, userbuf, nbytes)) 117}
122 goto out_free;
123 118
124 ret = -EINVAL; 119int res_counter_write(struct res_counter *counter, int member,
120 const char *buf, write_strategy_fn write_strategy)
121{
122 char *end;
123 unsigned long flags;
124 unsigned long long tmp, *val;
125 125
126 strstrip(buf);
127 if (write_strategy) { 126 if (write_strategy) {
128 if (write_strategy(buf, &tmp)) { 127 if (write_strategy(buf, &tmp))
129 goto out_free; 128 return -EINVAL;
130 }
131 } else { 129 } else {
132 tmp = simple_strtoull(buf, &end, 10); 130 tmp = simple_strtoull(buf, &end, 10);
133 if (*end != '\0') 131 if (*end != '\0')
134 goto out_free; 132 return -EINVAL;
135 } 133 }
136 spin_lock_irqsave(&counter->lock, flags); 134 spin_lock_irqsave(&counter->lock, flags);
137 val = res_counter_member(counter, member); 135 val = res_counter_member(counter, member);
138 *val = tmp; 136 *val = tmp;
139 spin_unlock_irqrestore(&counter->lock, flags); 137 spin_unlock_irqrestore(&counter->lock, flags);
140 ret = nbytes; 138 return 0;
141out_free:
142 kfree(buf);
143out:
144 return ret;
145} 139}
diff --git a/kernel/resource.c b/kernel/resource.c
index 74af2d7cb5a1..f5b518eabefe 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -490,7 +490,7 @@ resource_size_t resource_alignment(struct resource *res)
490{ 490{
491 switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) { 491 switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) {
492 case IORESOURCE_SIZEALIGN: 492 case IORESOURCE_SIZEALIGN:
493 return res->end - res->start + 1; 493 return resource_size(res);
494 case IORESOURCE_STARTALIGN: 494 case IORESOURCE_STARTALIGN:
495 return res->start; 495 return res->start;
496 default: 496 default:
diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
index 092e4c620af9..a56f629b057a 100644
--- a/kernel/rtmutex-tester.c
+++ b/kernel/rtmutex-tester.c
@@ -297,8 +297,8 @@ static int test_func(void *data)
297 * 297 *
298 * opcode:data 298 * opcode:data
299 */ 299 */
300static ssize_t sysfs_test_command(struct sys_device *dev, const char *buf, 300static ssize_t sysfs_test_command(struct sys_device *dev, struct sysdev_attribute *attr,
301 size_t count) 301 const char *buf, size_t count)
302{ 302{
303 struct sched_param schedpar; 303 struct sched_param schedpar;
304 struct test_thread_data *td; 304 struct test_thread_data *td;
@@ -360,7 +360,8 @@ static ssize_t sysfs_test_command(struct sys_device *dev, const char *buf,
360 * @dev: thread to query 360 * @dev: thread to query
361 * @buf: char buffer to be filled with thread status info 361 * @buf: char buffer to be filled with thread status info
362 */ 362 */
363static ssize_t sysfs_test_status(struct sys_device *dev, char *buf) 363static ssize_t sysfs_test_status(struct sys_device *dev, struct sysdev_attribute *attr,
364 char *buf)
364{ 365{
365 struct test_thread_data *td; 366 struct test_thread_data *td;
366 struct task_struct *tsk; 367 struct task_struct *tsk;
diff --git a/kernel/sched.c b/kernel/sched.c
index 99e6d850ecab..ace566bdfc68 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -571,8 +571,10 @@ struct rq {
571#endif 571#endif
572 572
573#ifdef CONFIG_SCHED_HRTICK 573#ifdef CONFIG_SCHED_HRTICK
574 unsigned long hrtick_flags; 574#ifdef CONFIG_SMP
575 ktime_t hrtick_expire; 575 int hrtick_csd_pending;
576 struct call_single_data hrtick_csd;
577#endif
576 struct hrtimer hrtick_timer; 578 struct hrtimer hrtick_timer;
577#endif 579#endif
578 580
@@ -598,7 +600,6 @@ struct rq {
598 /* BKL stats */ 600 /* BKL stats */
599 unsigned int bkl_count; 601 unsigned int bkl_count;
600#endif 602#endif
601 struct lock_class_key rq_lock_key;
602}; 603};
603 604
604static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 605static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
@@ -983,13 +984,6 @@ static struct rq *this_rq_lock(void)
983 return rq; 984 return rq;
984} 985}
985 986
986static void __resched_task(struct task_struct *p, int tif_bit);
987
988static inline void resched_task(struct task_struct *p)
989{
990 __resched_task(p, TIF_NEED_RESCHED);
991}
992
993#ifdef CONFIG_SCHED_HRTICK 987#ifdef CONFIG_SCHED_HRTICK
994/* 988/*
995 * Use HR-timers to deliver accurate preemption points. 989 * Use HR-timers to deliver accurate preemption points.
@@ -1001,25 +995,6 @@ static inline void resched_task(struct task_struct *p)
1001 * When we get rescheduled we reprogram the hrtick_timer outside of the 995 * When we get rescheduled we reprogram the hrtick_timer outside of the
1002 * rq->lock. 996 * rq->lock.
1003 */ 997 */
1004static inline void resched_hrt(struct task_struct *p)
1005{
1006 __resched_task(p, TIF_HRTICK_RESCHED);
1007}
1008
1009static inline void resched_rq(struct rq *rq)
1010{
1011 unsigned long flags;
1012
1013 spin_lock_irqsave(&rq->lock, flags);
1014 resched_task(rq->curr);
1015 spin_unlock_irqrestore(&rq->lock, flags);
1016}
1017
1018enum {
1019 HRTICK_SET, /* re-programm hrtick_timer */
1020 HRTICK_RESET, /* not a new slice */
1021 HRTICK_BLOCK, /* stop hrtick operations */
1022};
1023 998
1024/* 999/*
1025 * Use hrtick when: 1000 * Use hrtick when:
@@ -1030,40 +1005,11 @@ static inline int hrtick_enabled(struct rq *rq)
1030{ 1005{
1031 if (!sched_feat(HRTICK)) 1006 if (!sched_feat(HRTICK))
1032 return 0; 1007 return 0;
1033 if (unlikely(test_bit(HRTICK_BLOCK, &rq->hrtick_flags))) 1008 if (!cpu_active(cpu_of(rq)))
1034 return 0; 1009 return 0;
1035 return hrtimer_is_hres_active(&rq->hrtick_timer); 1010 return hrtimer_is_hres_active(&rq->hrtick_timer);
1036} 1011}
1037 1012
1038/*
1039 * Called to set the hrtick timer state.
1040 *
1041 * called with rq->lock held and irqs disabled
1042 */
1043static void hrtick_start(struct rq *rq, u64 delay, int reset)
1044{
1045 assert_spin_locked(&rq->lock);
1046
1047 /*
1048 * preempt at: now + delay
1049 */
1050 rq->hrtick_expire =
1051 ktime_add_ns(rq->hrtick_timer.base->get_time(), delay);
1052 /*
1053 * indicate we need to program the timer
1054 */
1055 __set_bit(HRTICK_SET, &rq->hrtick_flags);
1056 if (reset)
1057 __set_bit(HRTICK_RESET, &rq->hrtick_flags);
1058
1059 /*
1060 * New slices are called from the schedule path and don't need a
1061 * forced reschedule.
1062 */
1063 if (reset)
1064 resched_hrt(rq->curr);
1065}
1066
1067static void hrtick_clear(struct rq *rq) 1013static void hrtick_clear(struct rq *rq)
1068{ 1014{
1069 if (hrtimer_active(&rq->hrtick_timer)) 1015 if (hrtimer_active(&rq->hrtick_timer))
@@ -1071,32 +1017,6 @@ static void hrtick_clear(struct rq *rq)
1071} 1017}
1072 1018
1073/* 1019/*
1074 * Update the timer from the possible pending state.
1075 */
1076static void hrtick_set(struct rq *rq)
1077{
1078 ktime_t time;
1079 int set, reset;
1080 unsigned long flags;
1081
1082 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
1083
1084 spin_lock_irqsave(&rq->lock, flags);
1085 set = __test_and_clear_bit(HRTICK_SET, &rq->hrtick_flags);
1086 reset = __test_and_clear_bit(HRTICK_RESET, &rq->hrtick_flags);
1087 time = rq->hrtick_expire;
1088 clear_thread_flag(TIF_HRTICK_RESCHED);
1089 spin_unlock_irqrestore(&rq->lock, flags);
1090
1091 if (set) {
1092 hrtimer_start(&rq->hrtick_timer, time, HRTIMER_MODE_ABS);
1093 if (reset && !hrtimer_active(&rq->hrtick_timer))
1094 resched_rq(rq);
1095 } else
1096 hrtick_clear(rq);
1097}
1098
1099/*
1100 * High-resolution timer tick. 1020 * High-resolution timer tick.
1101 * Runs from hardirq context with interrupts disabled. 1021 * Runs from hardirq context with interrupts disabled.
1102 */ 1022 */
@@ -1115,27 +1035,37 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer)
1115} 1035}
1116 1036
1117#ifdef CONFIG_SMP 1037#ifdef CONFIG_SMP
1118static void hotplug_hrtick_disable(int cpu) 1038/*
1039 * called from hardirq (IPI) context
1040 */
1041static void __hrtick_start(void *arg)
1119{ 1042{
1120 struct rq *rq = cpu_rq(cpu); 1043 struct rq *rq = arg;
1121 unsigned long flags;
1122 1044
1123 spin_lock_irqsave(&rq->lock, flags); 1045 spin_lock(&rq->lock);
1124 rq->hrtick_flags = 0; 1046 hrtimer_restart(&rq->hrtick_timer);
1125 __set_bit(HRTICK_BLOCK, &rq->hrtick_flags); 1047 rq->hrtick_csd_pending = 0;
1126 spin_unlock_irqrestore(&rq->lock, flags); 1048 spin_unlock(&rq->lock);
1127
1128 hrtick_clear(rq);
1129} 1049}
1130 1050
1131static void hotplug_hrtick_enable(int cpu) 1051/*
1052 * Called to set the hrtick timer state.
1053 *
1054 * called with rq->lock held and irqs disabled
1055 */
1056static void hrtick_start(struct rq *rq, u64 delay)
1132{ 1057{
1133 struct rq *rq = cpu_rq(cpu); 1058 struct hrtimer *timer = &rq->hrtick_timer;
1134 unsigned long flags; 1059 ktime_t time = ktime_add_ns(timer->base->get_time(), delay);
1135 1060
1136 spin_lock_irqsave(&rq->lock, flags); 1061 timer->expires = time;
1137 __clear_bit(HRTICK_BLOCK, &rq->hrtick_flags); 1062
1138 spin_unlock_irqrestore(&rq->lock, flags); 1063 if (rq == this_rq()) {
1064 hrtimer_restart(timer);
1065 } else if (!rq->hrtick_csd_pending) {
1066 __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd);
1067 rq->hrtick_csd_pending = 1;
1068 }
1139} 1069}
1140 1070
1141static int 1071static int
@@ -1150,16 +1080,7 @@ hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu)
1150 case CPU_DOWN_PREPARE_FROZEN: 1080 case CPU_DOWN_PREPARE_FROZEN:
1151 case CPU_DEAD: 1081 case CPU_DEAD:
1152 case CPU_DEAD_FROZEN: 1082 case CPU_DEAD_FROZEN:
1153 hotplug_hrtick_disable(cpu); 1083 hrtick_clear(cpu_rq(cpu));
1154 return NOTIFY_OK;
1155
1156 case CPU_UP_PREPARE:
1157 case CPU_UP_PREPARE_FROZEN:
1158 case CPU_DOWN_FAILED:
1159 case CPU_DOWN_FAILED_FROZEN:
1160 case CPU_ONLINE:
1161 case CPU_ONLINE_FROZEN:
1162 hotplug_hrtick_enable(cpu);
1163 return NOTIFY_OK; 1084 return NOTIFY_OK;
1164 } 1085 }
1165 1086
@@ -1170,46 +1091,45 @@ static void init_hrtick(void)
1170{ 1091{
1171 hotcpu_notifier(hotplug_hrtick, 0); 1092 hotcpu_notifier(hotplug_hrtick, 0);
1172} 1093}
1173#endif /* CONFIG_SMP */ 1094#else
1095/*
1096 * Called to set the hrtick timer state.
1097 *
1098 * called with rq->lock held and irqs disabled
1099 */
1100static void hrtick_start(struct rq *rq, u64 delay)
1101{
1102 hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), HRTIMER_MODE_REL);
1103}
1174 1104
1175static void init_rq_hrtick(struct rq *rq) 1105static void init_hrtick(void)
1176{ 1106{
1177 rq->hrtick_flags = 0;
1178 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1179 rq->hrtick_timer.function = hrtick;
1180 rq->hrtick_timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ;
1181} 1107}
1108#endif /* CONFIG_SMP */
1182 1109
1183void hrtick_resched(void) 1110static void init_rq_hrtick(struct rq *rq)
1184{ 1111{
1185 struct rq *rq; 1112#ifdef CONFIG_SMP
1186 unsigned long flags; 1113 rq->hrtick_csd_pending = 0;
1187 1114
1188 if (!test_thread_flag(TIF_HRTICK_RESCHED)) 1115 rq->hrtick_csd.flags = 0;
1189 return; 1116 rq->hrtick_csd.func = __hrtick_start;
1117 rq->hrtick_csd.info = rq;
1118#endif
1190 1119
1191 local_irq_save(flags); 1120 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1192 rq = cpu_rq(smp_processor_id()); 1121 rq->hrtick_timer.function = hrtick;
1193 hrtick_set(rq); 1122 rq->hrtick_timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ;
1194 local_irq_restore(flags);
1195} 1123}
1196#else 1124#else
1197static inline void hrtick_clear(struct rq *rq) 1125static inline void hrtick_clear(struct rq *rq)
1198{ 1126{
1199} 1127}
1200 1128
1201static inline void hrtick_set(struct rq *rq)
1202{
1203}
1204
1205static inline void init_rq_hrtick(struct rq *rq) 1129static inline void init_rq_hrtick(struct rq *rq)
1206{ 1130{
1207} 1131}
1208 1132
1209void hrtick_resched(void)
1210{
1211}
1212
1213static inline void init_hrtick(void) 1133static inline void init_hrtick(void)
1214{ 1134{
1215} 1135}
@@ -1228,16 +1148,16 @@ static inline void init_hrtick(void)
1228#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) 1148#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
1229#endif 1149#endif
1230 1150
1231static void __resched_task(struct task_struct *p, int tif_bit) 1151static void resched_task(struct task_struct *p)
1232{ 1152{
1233 int cpu; 1153 int cpu;
1234 1154
1235 assert_spin_locked(&task_rq(p)->lock); 1155 assert_spin_locked(&task_rq(p)->lock);
1236 1156
1237 if (unlikely(test_tsk_thread_flag(p, tif_bit))) 1157 if (unlikely(test_tsk_thread_flag(p, TIF_NEED_RESCHED)))
1238 return; 1158 return;
1239 1159
1240 set_tsk_thread_flag(p, tif_bit); 1160 set_tsk_thread_flag(p, TIF_NEED_RESCHED);
1241 1161
1242 cpu = task_cpu(p); 1162 cpu = task_cpu(p);
1243 if (cpu == smp_processor_id()) 1163 if (cpu == smp_processor_id())
@@ -1303,10 +1223,10 @@ void wake_up_idle_cpu(int cpu)
1303#endif /* CONFIG_NO_HZ */ 1223#endif /* CONFIG_NO_HZ */
1304 1224
1305#else /* !CONFIG_SMP */ 1225#else /* !CONFIG_SMP */
1306static void __resched_task(struct task_struct *p, int tif_bit) 1226static void resched_task(struct task_struct *p)
1307{ 1227{
1308 assert_spin_locked(&task_rq(p)->lock); 1228 assert_spin_locked(&task_rq(p)->lock);
1309 set_tsk_thread_flag(p, tif_bit); 1229 set_tsk_need_resched(p);
1310} 1230}
1311#endif /* CONFIG_SMP */ 1231#endif /* CONFIG_SMP */
1312 1232
@@ -1946,16 +1866,24 @@ migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
1946/* 1866/*
1947 * wait_task_inactive - wait for a thread to unschedule. 1867 * wait_task_inactive - wait for a thread to unschedule.
1948 * 1868 *
1869 * If @match_state is nonzero, it's the @p->state value just checked and
1870 * not expected to change. If it changes, i.e. @p might have woken up,
1871 * then return zero. When we succeed in waiting for @p to be off its CPU,
1872 * we return a positive number (its total switch count). If a second call
1873 * a short while later returns the same number, the caller can be sure that
1874 * @p has remained unscheduled the whole time.
1875 *
1949 * The caller must ensure that the task *will* unschedule sometime soon, 1876 * The caller must ensure that the task *will* unschedule sometime soon,
1950 * else this function might spin for a *long* time. This function can't 1877 * else this function might spin for a *long* time. This function can't
1951 * be called with interrupts off, or it may introduce deadlock with 1878 * be called with interrupts off, or it may introduce deadlock with
1952 * smp_call_function() if an IPI is sent by the same process we are 1879 * smp_call_function() if an IPI is sent by the same process we are
1953 * waiting to become inactive. 1880 * waiting to become inactive.
1954 */ 1881 */
1955void wait_task_inactive(struct task_struct *p) 1882unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1956{ 1883{
1957 unsigned long flags; 1884 unsigned long flags;
1958 int running, on_rq; 1885 int running, on_rq;
1886 unsigned long ncsw;
1959 struct rq *rq; 1887 struct rq *rq;
1960 1888
1961 for (;;) { 1889 for (;;) {
@@ -1978,8 +1906,11 @@ void wait_task_inactive(struct task_struct *p)
1978 * return false if the runqueue has changed and p 1906 * return false if the runqueue has changed and p
1979 * is actually now running somewhere else! 1907 * is actually now running somewhere else!
1980 */ 1908 */
1981 while (task_running(rq, p)) 1909 while (task_running(rq, p)) {
1910 if (match_state && unlikely(p->state != match_state))
1911 return 0;
1982 cpu_relax(); 1912 cpu_relax();
1913 }
1983 1914
1984 /* 1915 /*
1985 * Ok, time to look more closely! We need the rq 1916 * Ok, time to look more closely! We need the rq
@@ -1989,9 +1920,21 @@ void wait_task_inactive(struct task_struct *p)
1989 rq = task_rq_lock(p, &flags); 1920 rq = task_rq_lock(p, &flags);
1990 running = task_running(rq, p); 1921 running = task_running(rq, p);
1991 on_rq = p->se.on_rq; 1922 on_rq = p->se.on_rq;
1923 ncsw = 0;
1924 if (!match_state || p->state == match_state) {
1925 ncsw = p->nivcsw + p->nvcsw;
1926 if (unlikely(!ncsw))
1927 ncsw = 1;
1928 }
1992 task_rq_unlock(rq, &flags); 1929 task_rq_unlock(rq, &flags);
1993 1930
1994 /* 1931 /*
1932 * If it changed from the expected state, bail out now.
1933 */
1934 if (unlikely(!ncsw))
1935 break;
1936
1937 /*
1995 * Was it really running after all now that we 1938 * Was it really running after all now that we
1996 * checked with the proper locks actually held? 1939 * checked with the proper locks actually held?
1997 * 1940 *
@@ -2023,6 +1966,8 @@ void wait_task_inactive(struct task_struct *p)
2023 */ 1966 */
2024 break; 1967 break;
2025 } 1968 }
1969
1970 return ncsw;
2026} 1971}
2027 1972
2028/*** 1973/***
@@ -2108,7 +2053,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
2108 /* Tally up the load of all CPUs in the group */ 2053 /* Tally up the load of all CPUs in the group */
2109 avg_load = 0; 2054 avg_load = 0;
2110 2055
2111 for_each_cpu_mask(i, group->cpumask) { 2056 for_each_cpu_mask_nr(i, group->cpumask) {
2112 /* Bias balancing toward cpus of our domain */ 2057 /* Bias balancing toward cpus of our domain */
2113 if (local_group) 2058 if (local_group)
2114 load = source_load(i, load_idx); 2059 load = source_load(i, load_idx);
@@ -2150,7 +2095,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu,
2150 /* Traverse only the allowed CPUs */ 2095 /* Traverse only the allowed CPUs */
2151 cpus_and(*tmp, group->cpumask, p->cpus_allowed); 2096 cpus_and(*tmp, group->cpumask, p->cpus_allowed);
2152 2097
2153 for_each_cpu_mask(i, *tmp) { 2098 for_each_cpu_mask_nr(i, *tmp) {
2154 load = weighted_cpuload(i); 2099 load = weighted_cpuload(i);
2155 2100
2156 if (load < min_load || (load == min_load && i == this_cpu)) { 2101 if (load < min_load || (load == min_load && i == this_cpu)) {
@@ -2813,10 +2758,10 @@ static void double_rq_lock(struct rq *rq1, struct rq *rq2)
2813 } else { 2758 } else {
2814 if (rq1 < rq2) { 2759 if (rq1 < rq2) {
2815 spin_lock(&rq1->lock); 2760 spin_lock(&rq1->lock);
2816 spin_lock(&rq2->lock); 2761 spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
2817 } else { 2762 } else {
2818 spin_lock(&rq2->lock); 2763 spin_lock(&rq2->lock);
2819 spin_lock(&rq1->lock); 2764 spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
2820 } 2765 }
2821 } 2766 }
2822 update_rq_clock(rq1); 2767 update_rq_clock(rq1);
@@ -2859,14 +2804,21 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
2859 if (busiest < this_rq) { 2804 if (busiest < this_rq) {
2860 spin_unlock(&this_rq->lock); 2805 spin_unlock(&this_rq->lock);
2861 spin_lock(&busiest->lock); 2806 spin_lock(&busiest->lock);
2862 spin_lock(&this_rq->lock); 2807 spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING);
2863 ret = 1; 2808 ret = 1;
2864 } else 2809 } else
2865 spin_lock(&busiest->lock); 2810 spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING);
2866 } 2811 }
2867 return ret; 2812 return ret;
2868} 2813}
2869 2814
2815static void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
2816 __releases(busiest->lock)
2817{
2818 spin_unlock(&busiest->lock);
2819 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
2820}
2821
2870/* 2822/*
2871 * If dest_cpu is allowed for this process, migrate the task to it. 2823 * If dest_cpu is allowed for this process, migrate the task to it.
2872 * This is accomplished by forcing the cpu_allowed mask to only 2824 * This is accomplished by forcing the cpu_allowed mask to only
@@ -2881,7 +2833,7 @@ static void sched_migrate_task(struct task_struct *p, int dest_cpu)
2881 2833
2882 rq = task_rq_lock(p, &flags); 2834 rq = task_rq_lock(p, &flags);
2883 if (!cpu_isset(dest_cpu, p->cpus_allowed) 2835 if (!cpu_isset(dest_cpu, p->cpus_allowed)
2884 || unlikely(cpu_is_offline(dest_cpu))) 2836 || unlikely(!cpu_active(dest_cpu)))
2885 goto out; 2837 goto out;
2886 2838
2887 /* force the process onto the specified CPU */ 2839 /* force the process onto the specified CPU */
@@ -3168,7 +3120,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
3168 max_cpu_load = 0; 3120 max_cpu_load = 0;
3169 min_cpu_load = ~0UL; 3121 min_cpu_load = ~0UL;
3170 3122
3171 for_each_cpu_mask(i, group->cpumask) { 3123 for_each_cpu_mask_nr(i, group->cpumask) {
3172 struct rq *rq; 3124 struct rq *rq;
3173 3125
3174 if (!cpu_isset(i, *cpus)) 3126 if (!cpu_isset(i, *cpus))
@@ -3447,7 +3399,7 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
3447 unsigned long max_load = 0; 3399 unsigned long max_load = 0;
3448 int i; 3400 int i;
3449 3401
3450 for_each_cpu_mask(i, group->cpumask) { 3402 for_each_cpu_mask_nr(i, group->cpumask) {
3451 unsigned long wl; 3403 unsigned long wl;
3452 3404
3453 if (!cpu_isset(i, *cpus)) 3405 if (!cpu_isset(i, *cpus))
@@ -3691,7 +3643,7 @@ redo:
3691 ld_moved = move_tasks(this_rq, this_cpu, busiest, 3643 ld_moved = move_tasks(this_rq, this_cpu, busiest,
3692 imbalance, sd, CPU_NEWLY_IDLE, 3644 imbalance, sd, CPU_NEWLY_IDLE,
3693 &all_pinned); 3645 &all_pinned);
3694 spin_unlock(&busiest->lock); 3646 double_unlock_balance(this_rq, busiest);
3695 3647
3696 if (unlikely(all_pinned)) { 3648 if (unlikely(all_pinned)) {
3697 cpu_clear(cpu_of(busiest), *cpus); 3649 cpu_clear(cpu_of(busiest), *cpus);
@@ -3806,7 +3758,7 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
3806 else 3758 else
3807 schedstat_inc(sd, alb_failed); 3759 schedstat_inc(sd, alb_failed);
3808 } 3760 }
3809 spin_unlock(&target_rq->lock); 3761 double_unlock_balance(busiest_rq, target_rq);
3810} 3762}
3811 3763
3812#ifdef CONFIG_NO_HZ 3764#ifdef CONFIG_NO_HZ
@@ -3849,7 +3801,7 @@ int select_nohz_load_balancer(int stop_tick)
3849 /* 3801 /*
3850 * If we are going offline and still the leader, give up! 3802 * If we are going offline and still the leader, give up!
3851 */ 3803 */
3852 if (cpu_is_offline(cpu) && 3804 if (!cpu_active(cpu) &&
3853 atomic_read(&nohz.load_balancer) == cpu) { 3805 atomic_read(&nohz.load_balancer) == cpu) {
3854 if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu) 3806 if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu)
3855 BUG(); 3807 BUG();
@@ -3989,7 +3941,7 @@ static void run_rebalance_domains(struct softirq_action *h)
3989 int balance_cpu; 3941 int balance_cpu;
3990 3942
3991 cpu_clear(this_cpu, cpus); 3943 cpu_clear(this_cpu, cpus);
3992 for_each_cpu_mask(balance_cpu, cpus) { 3944 for_each_cpu_mask_nr(balance_cpu, cpus) {
3993 /* 3945 /*
3994 * If this cpu gets work to do, stop the load balancing 3946 * If this cpu gets work to do, stop the load balancing
3995 * work being done for other cpus. Next load 3947 * work being done for other cpus. Next load
@@ -4125,6 +4077,8 @@ void account_user_time(struct task_struct *p, cputime_t cputime)
4125 cpustat->nice = cputime64_add(cpustat->nice, tmp); 4077 cpustat->nice = cputime64_add(cpustat->nice, tmp);
4126 else 4078 else
4127 cpustat->user = cputime64_add(cpustat->user, tmp); 4079 cpustat->user = cputime64_add(cpustat->user, tmp);
4080 /* Account for user time used */
4081 acct_update_integrals(p);
4128} 4082}
4129 4083
4130/* 4084/*
@@ -4395,7 +4349,7 @@ asmlinkage void __sched schedule(void)
4395 struct task_struct *prev, *next; 4349 struct task_struct *prev, *next;
4396 unsigned long *switch_count; 4350 unsigned long *switch_count;
4397 struct rq *rq; 4351 struct rq *rq;
4398 int cpu, hrtick = sched_feat(HRTICK); 4352 int cpu;
4399 4353
4400need_resched: 4354need_resched:
4401 preempt_disable(); 4355 preempt_disable();
@@ -4410,7 +4364,7 @@ need_resched_nonpreemptible:
4410 4364
4411 schedule_debug(prev); 4365 schedule_debug(prev);
4412 4366
4413 if (hrtick) 4367 if (sched_feat(HRTICK))
4414 hrtick_clear(rq); 4368 hrtick_clear(rq);
4415 4369
4416 /* 4370 /*
@@ -4457,9 +4411,6 @@ need_resched_nonpreemptible:
4457 } else 4411 } else
4458 spin_unlock_irq(&rq->lock); 4412 spin_unlock_irq(&rq->lock);
4459 4413
4460 if (hrtick)
4461 hrtick_set(rq);
4462
4463 if (unlikely(reacquire_kernel_lock(current) < 0)) 4414 if (unlikely(reacquire_kernel_lock(current) < 0))
4464 goto need_resched_nonpreemptible; 4415 goto need_resched_nonpreemptible;
4465 4416
@@ -5059,19 +5010,21 @@ recheck:
5059 return -EPERM; 5010 return -EPERM;
5060 } 5011 }
5061 5012
5013 if (user) {
5062#ifdef CONFIG_RT_GROUP_SCHED 5014#ifdef CONFIG_RT_GROUP_SCHED
5063 /* 5015 /*
5064 * Do not allow realtime tasks into groups that have no runtime 5016 * Do not allow realtime tasks into groups that have no runtime
5065 * assigned. 5017 * assigned.
5066 */ 5018 */
5067 if (user 5019 if (rt_policy(policy) && task_group(p)->rt_bandwidth.rt_runtime == 0)
5068 && rt_policy(policy) && task_group(p)->rt_bandwidth.rt_runtime == 0) 5020 return -EPERM;
5069 return -EPERM;
5070#endif 5021#endif
5071 5022
5072 retval = security_task_setscheduler(p, policy, param); 5023 retval = security_task_setscheduler(p, policy, param);
5073 if (retval) 5024 if (retval)
5074 return retval; 5025 return retval;
5026 }
5027
5075 /* 5028 /*
5076 * make sure no PI-waiters arrive (or leave) while we are 5029 * make sure no PI-waiters arrive (or leave) while we are
5077 * changing the priority of the task: 5030 * changing the priority of the task:
@@ -5876,7 +5829,7 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
5876 struct rq *rq_dest, *rq_src; 5829 struct rq *rq_dest, *rq_src;
5877 int ret = 0, on_rq; 5830 int ret = 0, on_rq;
5878 5831
5879 if (unlikely(cpu_is_offline(dest_cpu))) 5832 if (unlikely(!cpu_active(dest_cpu)))
5880 return ret; 5833 return ret;
5881 5834
5882 rq_src = cpu_rq(src_cpu); 5835 rq_src = cpu_rq(src_cpu);
@@ -6469,7 +6422,7 @@ static struct notifier_block __cpuinitdata migration_notifier = {
6469 .priority = 10 6422 .priority = 10
6470}; 6423};
6471 6424
6472void __init migration_init(void) 6425static int __init migration_init(void)
6473{ 6426{
6474 void *cpu = (void *)(long)smp_processor_id(); 6427 void *cpu = (void *)(long)smp_processor_id();
6475 int err; 6428 int err;
@@ -6479,7 +6432,10 @@ void __init migration_init(void)
6479 BUG_ON(err == NOTIFY_BAD); 6432 BUG_ON(err == NOTIFY_BAD);
6480 migration_call(&migration_notifier, CPU_ONLINE, cpu); 6433 migration_call(&migration_notifier, CPU_ONLINE, cpu);
6481 register_cpu_notifier(&migration_notifier); 6434 register_cpu_notifier(&migration_notifier);
6435
6436 return err;
6482} 6437}
6438early_initcall(migration_init);
6483#endif 6439#endif
6484 6440
6485#ifdef CONFIG_SMP 6441#ifdef CONFIG_SMP
@@ -6768,7 +6724,8 @@ static cpumask_t cpu_isolated_map = CPU_MASK_NONE;
6768/* Setup the mask of cpus configured for isolated domains */ 6724/* Setup the mask of cpus configured for isolated domains */
6769static int __init isolated_cpu_setup(char *str) 6725static int __init isolated_cpu_setup(char *str)
6770{ 6726{
6771 int ints[NR_CPUS], i; 6727 static int __initdata ints[NR_CPUS];
6728 int i;
6772 6729
6773 str = get_options(str, ARRAY_SIZE(ints), ints); 6730 str = get_options(str, ARRAY_SIZE(ints), ints);
6774 cpus_clear(cpu_isolated_map); 6731 cpus_clear(cpu_isolated_map);
@@ -6802,7 +6759,7 @@ init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map,
6802 6759
6803 cpus_clear(*covered); 6760 cpus_clear(*covered);
6804 6761
6805 for_each_cpu_mask(i, *span) { 6762 for_each_cpu_mask_nr(i, *span) {
6806 struct sched_group *sg; 6763 struct sched_group *sg;
6807 int group = group_fn(i, cpu_map, &sg, tmpmask); 6764 int group = group_fn(i, cpu_map, &sg, tmpmask);
6808 int j; 6765 int j;
@@ -6813,7 +6770,7 @@ init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map,
6813 cpus_clear(sg->cpumask); 6770 cpus_clear(sg->cpumask);
6814 sg->__cpu_power = 0; 6771 sg->__cpu_power = 0;
6815 6772
6816 for_each_cpu_mask(j, *span) { 6773 for_each_cpu_mask_nr(j, *span) {
6817 if (group_fn(j, cpu_map, NULL, tmpmask) != group) 6774 if (group_fn(j, cpu_map, NULL, tmpmask) != group)
6818 continue; 6775 continue;
6819 6776
@@ -7013,7 +6970,7 @@ static void init_numa_sched_groups_power(struct sched_group *group_head)
7013 if (!sg) 6970 if (!sg)
7014 return; 6971 return;
7015 do { 6972 do {
7016 for_each_cpu_mask(j, sg->cpumask) { 6973 for_each_cpu_mask_nr(j, sg->cpumask) {
7017 struct sched_domain *sd; 6974 struct sched_domain *sd;
7018 6975
7019 sd = &per_cpu(phys_domains, j); 6976 sd = &per_cpu(phys_domains, j);
@@ -7038,7 +6995,7 @@ static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask)
7038{ 6995{
7039 int cpu, i; 6996 int cpu, i;
7040 6997
7041 for_each_cpu_mask(cpu, *cpu_map) { 6998 for_each_cpu_mask_nr(cpu, *cpu_map) {
7042 struct sched_group **sched_group_nodes 6999 struct sched_group **sched_group_nodes
7043 = sched_group_nodes_bycpu[cpu]; 7000 = sched_group_nodes_bycpu[cpu];
7044 7001
@@ -7277,7 +7234,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7277 /* 7234 /*
7278 * Set up domains for cpus specified by the cpu_map. 7235 * Set up domains for cpus specified by the cpu_map.
7279 */ 7236 */
7280 for_each_cpu_mask(i, *cpu_map) { 7237 for_each_cpu_mask_nr(i, *cpu_map) {
7281 struct sched_domain *sd = NULL, *p; 7238 struct sched_domain *sd = NULL, *p;
7282 SCHED_CPUMASK_VAR(nodemask, allmasks); 7239 SCHED_CPUMASK_VAR(nodemask, allmasks);
7283 7240
@@ -7344,7 +7301,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7344 7301
7345#ifdef CONFIG_SCHED_SMT 7302#ifdef CONFIG_SCHED_SMT
7346 /* Set up CPU (sibling) groups */ 7303 /* Set up CPU (sibling) groups */
7347 for_each_cpu_mask(i, *cpu_map) { 7304 for_each_cpu_mask_nr(i, *cpu_map) {
7348 SCHED_CPUMASK_VAR(this_sibling_map, allmasks); 7305 SCHED_CPUMASK_VAR(this_sibling_map, allmasks);
7349 SCHED_CPUMASK_VAR(send_covered, allmasks); 7306 SCHED_CPUMASK_VAR(send_covered, allmasks);
7350 7307
@@ -7361,7 +7318,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7361 7318
7362#ifdef CONFIG_SCHED_MC 7319#ifdef CONFIG_SCHED_MC
7363 /* Set up multi-core groups */ 7320 /* Set up multi-core groups */
7364 for_each_cpu_mask(i, *cpu_map) { 7321 for_each_cpu_mask_nr(i, *cpu_map) {
7365 SCHED_CPUMASK_VAR(this_core_map, allmasks); 7322 SCHED_CPUMASK_VAR(this_core_map, allmasks);
7366 SCHED_CPUMASK_VAR(send_covered, allmasks); 7323 SCHED_CPUMASK_VAR(send_covered, allmasks);
7367 7324
@@ -7428,7 +7385,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7428 goto error; 7385 goto error;
7429 } 7386 }
7430 sched_group_nodes[i] = sg; 7387 sched_group_nodes[i] = sg;
7431 for_each_cpu_mask(j, *nodemask) { 7388 for_each_cpu_mask_nr(j, *nodemask) {
7432 struct sched_domain *sd; 7389 struct sched_domain *sd;
7433 7390
7434 sd = &per_cpu(node_domains, j); 7391 sd = &per_cpu(node_domains, j);
@@ -7474,21 +7431,21 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7474 7431
7475 /* Calculate CPU power for physical packages and nodes */ 7432 /* Calculate CPU power for physical packages and nodes */
7476#ifdef CONFIG_SCHED_SMT 7433#ifdef CONFIG_SCHED_SMT
7477 for_each_cpu_mask(i, *cpu_map) { 7434 for_each_cpu_mask_nr(i, *cpu_map) {
7478 struct sched_domain *sd = &per_cpu(cpu_domains, i); 7435 struct sched_domain *sd = &per_cpu(cpu_domains, i);
7479 7436
7480 init_sched_groups_power(i, sd); 7437 init_sched_groups_power(i, sd);
7481 } 7438 }
7482#endif 7439#endif
7483#ifdef CONFIG_SCHED_MC 7440#ifdef CONFIG_SCHED_MC
7484 for_each_cpu_mask(i, *cpu_map) { 7441 for_each_cpu_mask_nr(i, *cpu_map) {
7485 struct sched_domain *sd = &per_cpu(core_domains, i); 7442 struct sched_domain *sd = &per_cpu(core_domains, i);
7486 7443
7487 init_sched_groups_power(i, sd); 7444 init_sched_groups_power(i, sd);
7488 } 7445 }
7489#endif 7446#endif
7490 7447
7491 for_each_cpu_mask(i, *cpu_map) { 7448 for_each_cpu_mask_nr(i, *cpu_map) {
7492 struct sched_domain *sd = &per_cpu(phys_domains, i); 7449 struct sched_domain *sd = &per_cpu(phys_domains, i);
7493 7450
7494 init_sched_groups_power(i, sd); 7451 init_sched_groups_power(i, sd);
@@ -7508,7 +7465,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7508#endif 7465#endif
7509 7466
7510 /* Attach the domains */ 7467 /* Attach the domains */
7511 for_each_cpu_mask(i, *cpu_map) { 7468 for_each_cpu_mask_nr(i, *cpu_map) {
7512 struct sched_domain *sd; 7469 struct sched_domain *sd;
7513#ifdef CONFIG_SCHED_SMT 7470#ifdef CONFIG_SCHED_SMT
7514 sd = &per_cpu(cpu_domains, i); 7471 sd = &per_cpu(cpu_domains, i);
@@ -7553,18 +7510,6 @@ void __attribute__((weak)) arch_update_cpu_topology(void)
7553} 7510}
7554 7511
7555/* 7512/*
7556 * Free current domain masks.
7557 * Called after all cpus are attached to NULL domain.
7558 */
7559static void free_sched_domains(void)
7560{
7561 ndoms_cur = 0;
7562 if (doms_cur != &fallback_doms)
7563 kfree(doms_cur);
7564 doms_cur = &fallback_doms;
7565}
7566
7567/*
7568 * Set up scheduler domains and groups. Callers must hold the hotplug lock. 7513 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
7569 * For now this just excludes isolated cpus, but could be used to 7514 * For now this just excludes isolated cpus, but could be used to
7570 * exclude other special cases in the future. 7515 * exclude other special cases in the future.
@@ -7603,7 +7548,7 @@ static void detach_destroy_domains(const cpumask_t *cpu_map)
7603 7548
7604 unregister_sched_domain_sysctl(); 7549 unregister_sched_domain_sysctl();
7605 7550
7606 for_each_cpu_mask(i, *cpu_map) 7551 for_each_cpu_mask_nr(i, *cpu_map)
7607 cpu_attach_domain(NULL, &def_root_domain, i); 7552 cpu_attach_domain(NULL, &def_root_domain, i);
7608 synchronize_sched(); 7553 synchronize_sched();
7609 arch_destroy_sched_domains(cpu_map, &tmpmask); 7554 arch_destroy_sched_domains(cpu_map, &tmpmask);
@@ -7642,7 +7587,7 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
7642 * ownership of it and will kfree it when done with it. If the caller 7587 * ownership of it and will kfree it when done with it. If the caller
7643 * failed the kmalloc call, then it can pass in doms_new == NULL, 7588 * failed the kmalloc call, then it can pass in doms_new == NULL,
7644 * and partition_sched_domains() will fallback to the single partition 7589 * and partition_sched_domains() will fallback to the single partition
7645 * 'fallback_doms'. 7590 * 'fallback_doms', it also forces the domains to be rebuilt.
7646 * 7591 *
7647 * Call with hotplug lock held 7592 * Call with hotplug lock held
7648 */ 7593 */
@@ -7656,12 +7601,8 @@ void partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
7656 /* always unregister in case we don't destroy any domains */ 7601 /* always unregister in case we don't destroy any domains */
7657 unregister_sched_domain_sysctl(); 7602 unregister_sched_domain_sysctl();
7658 7603
7659 if (doms_new == NULL) { 7604 if (doms_new == NULL)
7660 ndoms_new = 1; 7605 ndoms_new = 0;
7661 doms_new = &fallback_doms;
7662 cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map);
7663 dattr_new = NULL;
7664 }
7665 7606
7666 /* Destroy deleted domains */ 7607 /* Destroy deleted domains */
7667 for (i = 0; i < ndoms_cur; i++) { 7608 for (i = 0; i < ndoms_cur; i++) {
@@ -7676,6 +7617,14 @@ match1:
7676 ; 7617 ;
7677 } 7618 }
7678 7619
7620 if (doms_new == NULL) {
7621 ndoms_cur = 0;
7622 ndoms_new = 1;
7623 doms_new = &fallback_doms;
7624 cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map);
7625 dattr_new = NULL;
7626 }
7627
7679 /* Build new domains */ 7628 /* Build new domains */
7680 for (i = 0; i < ndoms_new; i++) { 7629 for (i = 0; i < ndoms_new; i++) {
7681 for (j = 0; j < ndoms_cur; j++) { 7630 for (j = 0; j < ndoms_cur; j++) {
@@ -7706,17 +7655,10 @@ match2:
7706#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) 7655#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
7707int arch_reinit_sched_domains(void) 7656int arch_reinit_sched_domains(void)
7708{ 7657{
7709 int err;
7710
7711 get_online_cpus(); 7658 get_online_cpus();
7712 mutex_lock(&sched_domains_mutex); 7659 rebuild_sched_domains();
7713 detach_destroy_domains(&cpu_online_map);
7714 free_sched_domains();
7715 err = arch_init_sched_domains(&cpu_online_map);
7716 mutex_unlock(&sched_domains_mutex);
7717 put_online_cpus(); 7660 put_online_cpus();
7718 7661 return 0;
7719 return err;
7720} 7662}
7721 7663
7722static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt) 7664static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt)
@@ -7737,30 +7679,34 @@ static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt)
7737} 7679}
7738 7680
7739#ifdef CONFIG_SCHED_MC 7681#ifdef CONFIG_SCHED_MC
7740static ssize_t sched_mc_power_savings_show(struct sys_device *dev, char *page) 7682static ssize_t sched_mc_power_savings_show(struct sysdev_class *class,
7683 char *page)
7741{ 7684{
7742 return sprintf(page, "%u\n", sched_mc_power_savings); 7685 return sprintf(page, "%u\n", sched_mc_power_savings);
7743} 7686}
7744static ssize_t sched_mc_power_savings_store(struct sys_device *dev, 7687static ssize_t sched_mc_power_savings_store(struct sysdev_class *class,
7745 const char *buf, size_t count) 7688 const char *buf, size_t count)
7746{ 7689{
7747 return sched_power_savings_store(buf, count, 0); 7690 return sched_power_savings_store(buf, count, 0);
7748} 7691}
7749static SYSDEV_ATTR(sched_mc_power_savings, 0644, sched_mc_power_savings_show, 7692static SYSDEV_CLASS_ATTR(sched_mc_power_savings, 0644,
7750 sched_mc_power_savings_store); 7693 sched_mc_power_savings_show,
7694 sched_mc_power_savings_store);
7751#endif 7695#endif
7752 7696
7753#ifdef CONFIG_SCHED_SMT 7697#ifdef CONFIG_SCHED_SMT
7754static ssize_t sched_smt_power_savings_show(struct sys_device *dev, char *page) 7698static ssize_t sched_smt_power_savings_show(struct sysdev_class *dev,
7699 char *page)
7755{ 7700{
7756 return sprintf(page, "%u\n", sched_smt_power_savings); 7701 return sprintf(page, "%u\n", sched_smt_power_savings);
7757} 7702}
7758static ssize_t sched_smt_power_savings_store(struct sys_device *dev, 7703static ssize_t sched_smt_power_savings_store(struct sysdev_class *dev,
7759 const char *buf, size_t count) 7704 const char *buf, size_t count)
7760{ 7705{
7761 return sched_power_savings_store(buf, count, 1); 7706 return sched_power_savings_store(buf, count, 1);
7762} 7707}
7763static SYSDEV_ATTR(sched_smt_power_savings, 0644, sched_smt_power_savings_show, 7708static SYSDEV_CLASS_ATTR(sched_smt_power_savings, 0644,
7709 sched_smt_power_savings_show,
7764 sched_smt_power_savings_store); 7710 sched_smt_power_savings_store);
7765#endif 7711#endif
7766 7712
@@ -7782,59 +7728,49 @@ int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
7782} 7728}
7783#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */ 7729#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
7784 7730
7731#ifndef CONFIG_CPUSETS
7785/* 7732/*
7786 * Force a reinitialization of the sched domains hierarchy. The domains 7733 * Add online and remove offline CPUs from the scheduler domains.
7787 * and groups cannot be updated in place without racing with the balancing 7734 * When cpusets are enabled they take over this function.
7788 * code, so we temporarily attach all running cpus to the NULL domain
7789 * which will prevent rebalancing while the sched domains are recalculated.
7790 */ 7735 */
7791static int update_sched_domains(struct notifier_block *nfb, 7736static int update_sched_domains(struct notifier_block *nfb,
7792 unsigned long action, void *hcpu) 7737 unsigned long action, void *hcpu)
7793{ 7738{
7739 switch (action) {
7740 case CPU_ONLINE:
7741 case CPU_ONLINE_FROZEN:
7742 case CPU_DEAD:
7743 case CPU_DEAD_FROZEN:
7744 partition_sched_domains(0, NULL, NULL);
7745 return NOTIFY_OK;
7746
7747 default:
7748 return NOTIFY_DONE;
7749 }
7750}
7751#endif
7752
7753static int update_runtime(struct notifier_block *nfb,
7754 unsigned long action, void *hcpu)
7755{
7794 int cpu = (int)(long)hcpu; 7756 int cpu = (int)(long)hcpu;
7795 7757
7796 switch (action) { 7758 switch (action) {
7797 case CPU_DOWN_PREPARE: 7759 case CPU_DOWN_PREPARE:
7798 case CPU_DOWN_PREPARE_FROZEN: 7760 case CPU_DOWN_PREPARE_FROZEN:
7799 disable_runtime(cpu_rq(cpu)); 7761 disable_runtime(cpu_rq(cpu));
7800 /* fall-through */
7801 case CPU_UP_PREPARE:
7802 case CPU_UP_PREPARE_FROZEN:
7803 detach_destroy_domains(&cpu_online_map);
7804 free_sched_domains();
7805 return NOTIFY_OK; 7762 return NOTIFY_OK;
7806 7763
7807
7808 case CPU_DOWN_FAILED: 7764 case CPU_DOWN_FAILED:
7809 case CPU_DOWN_FAILED_FROZEN: 7765 case CPU_DOWN_FAILED_FROZEN:
7810 case CPU_ONLINE: 7766 case CPU_ONLINE:
7811 case CPU_ONLINE_FROZEN: 7767 case CPU_ONLINE_FROZEN:
7812 enable_runtime(cpu_rq(cpu)); 7768 enable_runtime(cpu_rq(cpu));
7813 /* fall-through */ 7769 return NOTIFY_OK;
7814 case CPU_UP_CANCELED: 7770
7815 case CPU_UP_CANCELED_FROZEN:
7816 case CPU_DEAD:
7817 case CPU_DEAD_FROZEN:
7818 /*
7819 * Fall through and re-initialise the domains.
7820 */
7821 break;
7822 default: 7771 default:
7823 return NOTIFY_DONE; 7772 return NOTIFY_DONE;
7824 } 7773 }
7825
7826#ifndef CONFIG_CPUSETS
7827 /*
7828 * Create default domain partitioning if cpusets are disabled.
7829 * Otherwise we let cpusets rebuild the domains based on the
7830 * current setup.
7831 */
7832
7833 /* The hotplug lock is already held by cpu_up/cpu_down */
7834 arch_init_sched_domains(&cpu_online_map);
7835#endif
7836
7837 return NOTIFY_OK;
7838} 7774}
7839 7775
7840void __init sched_init_smp(void) 7776void __init sched_init_smp(void)
@@ -7854,8 +7790,15 @@ void __init sched_init_smp(void)
7854 cpu_set(smp_processor_id(), non_isolated_cpus); 7790 cpu_set(smp_processor_id(), non_isolated_cpus);
7855 mutex_unlock(&sched_domains_mutex); 7791 mutex_unlock(&sched_domains_mutex);
7856 put_online_cpus(); 7792 put_online_cpus();
7793
7794#ifndef CONFIG_CPUSETS
7857 /* XXX: Theoretical race here - CPU may be hotplugged now */ 7795 /* XXX: Theoretical race here - CPU may be hotplugged now */
7858 hotcpu_notifier(update_sched_domains, 0); 7796 hotcpu_notifier(update_sched_domains, 0);
7797#endif
7798
7799 /* RT runtime code needs to handle some hotplug events */
7800 hotcpu_notifier(update_runtime, 0);
7801
7859 init_hrtick(); 7802 init_hrtick();
7860 7803
7861 /* Move init over to a non-isolated CPU */ 7804 /* Move init over to a non-isolated CPU */
@@ -8063,7 +8006,6 @@ void __init sched_init(void)
8063 8006
8064 rq = cpu_rq(i); 8007 rq = cpu_rq(i);
8065 spin_lock_init(&rq->lock); 8008 spin_lock_init(&rq->lock);
8066 lockdep_set_class(&rq->lock, &rq->rq_lock_key);
8067 rq->nr_running = 0; 8009 rq->nr_running = 0;
8068 init_cfs_rq(&rq->cfs, rq); 8010 init_cfs_rq(&rq->cfs, rq);
8069 init_rt_rq(&rq->rt, rq); 8011 init_rt_rq(&rq->rt, rq);
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index f2aa987027d6..cf2cd6ce4cb2 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -878,7 +878,6 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
878#ifdef CONFIG_SCHED_HRTICK 878#ifdef CONFIG_SCHED_HRTICK
879static void hrtick_start_fair(struct rq *rq, struct task_struct *p) 879static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
880{ 880{
881 int requeue = rq->curr == p;
882 struct sched_entity *se = &p->se; 881 struct sched_entity *se = &p->se;
883 struct cfs_rq *cfs_rq = cfs_rq_of(se); 882 struct cfs_rq *cfs_rq = cfs_rq_of(se);
884 883
@@ -899,10 +898,10 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
899 * Don't schedule slices shorter than 10000ns, that just 898 * Don't schedule slices shorter than 10000ns, that just
900 * doesn't make sense. Rely on vruntime for fairness. 899 * doesn't make sense. Rely on vruntime for fairness.
901 */ 900 */
902 if (!requeue) 901 if (rq->curr != p)
903 delta = max(10000LL, delta); 902 delta = max(10000LL, delta);
904 903
905 hrtick_start(rq, delta, requeue); 904 hrtick_start(rq, delta);
906 } 905 }
907} 906}
908#else /* !CONFIG_SCHED_HRTICK */ 907#else /* !CONFIG_SCHED_HRTICK */
@@ -1004,6 +1003,8 @@ static void yield_task_fair(struct rq *rq)
1004 * not idle and an idle cpu is available. The span of cpus to 1003 * not idle and an idle cpu is available. The span of cpus to
1005 * search starts with cpus closest then further out as needed, 1004 * search starts with cpus closest then further out as needed,
1006 * so we always favor a closer, idle cpu. 1005 * so we always favor a closer, idle cpu.
1006 * Domains may include CPUs that are not usable for migration,
1007 * hence we need to mask them out (cpu_active_map)
1007 * 1008 *
1008 * Returns the CPU we should wake onto. 1009 * Returns the CPU we should wake onto.
1009 */ 1010 */
@@ -1031,7 +1032,8 @@ static int wake_idle(int cpu, struct task_struct *p)
1031 || ((sd->flags & SD_WAKE_IDLE_FAR) 1032 || ((sd->flags & SD_WAKE_IDLE_FAR)
1032 && !task_hot(p, task_rq(p)->clock, sd))) { 1033 && !task_hot(p, task_rq(p)->clock, sd))) {
1033 cpus_and(tmp, sd->span, p->cpus_allowed); 1034 cpus_and(tmp, sd->span, p->cpus_allowed);
1034 for_each_cpu_mask(i, tmp) { 1035 cpus_and(tmp, tmp, cpu_active_map);
1036 for_each_cpu_mask_nr(i, tmp) {
1035 if (idle_cpu(i)) { 1037 if (idle_cpu(i)) {
1036 if (i != task_cpu(p)) { 1038 if (i != task_cpu(p)) {
1037 schedstat_inc(p, 1039 schedstat_inc(p,
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 47ceac9e8552..6163e4cf885b 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -240,7 +240,7 @@ static int do_balance_runtime(struct rt_rq *rt_rq)
240 240
241 spin_lock(&rt_b->rt_runtime_lock); 241 spin_lock(&rt_b->rt_runtime_lock);
242 rt_period = ktime_to_ns(rt_b->rt_period); 242 rt_period = ktime_to_ns(rt_b->rt_period);
243 for_each_cpu_mask(i, rd->span) { 243 for_each_cpu_mask_nr(i, rd->span) {
244 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); 244 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
245 s64 diff; 245 s64 diff;
246 246
@@ -253,7 +253,7 @@ static int do_balance_runtime(struct rt_rq *rt_rq)
253 253
254 diff = iter->rt_runtime - iter->rt_time; 254 diff = iter->rt_runtime - iter->rt_time;
255 if (diff > 0) { 255 if (diff > 0) {
256 do_div(diff, weight); 256 diff = div_u64((u64)diff, weight);
257 if (rt_rq->rt_runtime + diff > rt_period) 257 if (rt_rq->rt_runtime + diff > rt_period)
258 diff = rt_period - rt_rq->rt_runtime; 258 diff = rt_period - rt_rq->rt_runtime;
259 iter->rt_runtime -= diff; 259 iter->rt_runtime -= diff;
@@ -505,7 +505,9 @@ void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
505 rt_rq->rt_nr_running++; 505 rt_rq->rt_nr_running++;
506#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 506#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
507 if (rt_se_prio(rt_se) < rt_rq->highest_prio) { 507 if (rt_se_prio(rt_se) < rt_rq->highest_prio) {
508#ifdef CONFIG_SMP
508 struct rq *rq = rq_of_rt_rq(rt_rq); 509 struct rq *rq = rq_of_rt_rq(rt_rq);
510#endif
509 511
510 rt_rq->highest_prio = rt_se_prio(rt_se); 512 rt_rq->highest_prio = rt_se_prio(rt_se);
511#ifdef CONFIG_SMP 513#ifdef CONFIG_SMP
@@ -599,11 +601,7 @@ static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
599 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) 601 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
600 return; 602 return;
601 603
602 if (rt_se->nr_cpus_allowed == 1) 604 list_add_tail(&rt_se->run_list, queue);
603 list_add(&rt_se->run_list, queue);
604 else
605 list_add_tail(&rt_se->run_list, queue);
606
607 __set_bit(rt_se_prio(rt_se), array->bitmap); 605 __set_bit(rt_se_prio(rt_se), array->bitmap);
608 606
609 inc_rt_tasks(rt_se, rt_rq); 607 inc_rt_tasks(rt_se, rt_rq);
@@ -688,32 +686,34 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
688 * Put task to the end of the run list without the overhead of dequeue 686 * Put task to the end of the run list without the overhead of dequeue
689 * followed by enqueue. 687 * followed by enqueue.
690 */ 688 */
691static 689static void
692void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se) 690requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
693{ 691{
694 struct rt_prio_array *array = &rt_rq->active;
695
696 if (on_rt_rq(rt_se)) { 692 if (on_rt_rq(rt_se)) {
697 list_del_init(&rt_se->run_list); 693 struct rt_prio_array *array = &rt_rq->active;
698 list_add_tail(&rt_se->run_list, 694 struct list_head *queue = array->queue + rt_se_prio(rt_se);
699 array->queue + rt_se_prio(rt_se)); 695
696 if (head)
697 list_move(&rt_se->run_list, queue);
698 else
699 list_move_tail(&rt_se->run_list, queue);
700 } 700 }
701} 701}
702 702
703static void requeue_task_rt(struct rq *rq, struct task_struct *p) 703static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
704{ 704{
705 struct sched_rt_entity *rt_se = &p->rt; 705 struct sched_rt_entity *rt_se = &p->rt;
706 struct rt_rq *rt_rq; 706 struct rt_rq *rt_rq;
707 707
708 for_each_sched_rt_entity(rt_se) { 708 for_each_sched_rt_entity(rt_se) {
709 rt_rq = rt_rq_of_se(rt_se); 709 rt_rq = rt_rq_of_se(rt_se);
710 requeue_rt_entity(rt_rq, rt_se); 710 requeue_rt_entity(rt_rq, rt_se, head);
711 } 711 }
712} 712}
713 713
714static void yield_task_rt(struct rq *rq) 714static void yield_task_rt(struct rq *rq)
715{ 715{
716 requeue_task_rt(rq, rq->curr); 716 requeue_task_rt(rq, rq->curr, 0);
717} 717}
718 718
719#ifdef CONFIG_SMP 719#ifdef CONFIG_SMP
@@ -753,6 +753,30 @@ static int select_task_rq_rt(struct task_struct *p, int sync)
753 */ 753 */
754 return task_cpu(p); 754 return task_cpu(p);
755} 755}
756
757static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
758{
759 cpumask_t mask;
760
761 if (rq->curr->rt.nr_cpus_allowed == 1)
762 return;
763
764 if (p->rt.nr_cpus_allowed != 1
765 && cpupri_find(&rq->rd->cpupri, p, &mask))
766 return;
767
768 if (!cpupri_find(&rq->rd->cpupri, rq->curr, &mask))
769 return;
770
771 /*
772 * There appears to be other cpus that can accept
773 * current and none to run 'p', so lets reschedule
774 * to try and push current away:
775 */
776 requeue_task_rt(rq, p, 1);
777 resched_task(rq->curr);
778}
779
756#endif /* CONFIG_SMP */ 780#endif /* CONFIG_SMP */
757 781
758/* 782/*
@@ -778,18 +802,8 @@ static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p)
778 * to move current somewhere else, making room for our non-migratable 802 * to move current somewhere else, making room for our non-migratable
779 * task. 803 * task.
780 */ 804 */
781 if((p->prio == rq->curr->prio) 805 if (p->prio == rq->curr->prio && !need_resched())
782 && p->rt.nr_cpus_allowed == 1 806 check_preempt_equal_prio(rq, p);
783 && rq->curr->rt.nr_cpus_allowed != 1) {
784 cpumask_t mask;
785
786 if (cpupri_find(&rq->rd->cpupri, rq->curr, &mask))
787 /*
788 * There appears to be other cpus that can accept
789 * current, so lets reschedule to try and push it away
790 */
791 resched_task(rq->curr);
792 }
793#endif 807#endif
794} 808}
795 809
@@ -847,6 +861,8 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
847#define RT_MAX_TRIES 3 861#define RT_MAX_TRIES 3
848 862
849static int double_lock_balance(struct rq *this_rq, struct rq *busiest); 863static int double_lock_balance(struct rq *this_rq, struct rq *busiest);
864static void double_unlock_balance(struct rq *this_rq, struct rq *busiest);
865
850static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep); 866static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
851 867
852static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) 868static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
@@ -922,6 +938,13 @@ static int find_lowest_rq(struct task_struct *task)
922 return -1; /* No targets found */ 938 return -1; /* No targets found */
923 939
924 /* 940 /*
941 * Only consider CPUs that are usable for migration.
942 * I guess we might want to change cpupri_find() to ignore those
943 * in the first place.
944 */
945 cpus_and(*lowest_mask, *lowest_mask, cpu_active_map);
946
947 /*
925 * At this point we have built a mask of cpus representing the 948 * At this point we have built a mask of cpus representing the
926 * lowest priority tasks in the system. Now we want to elect 949 * lowest priority tasks in the system. Now we want to elect
927 * the best one based on our affinity and topology. 950 * the best one based on our affinity and topology.
@@ -1001,7 +1024,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1001 break; 1024 break;
1002 1025
1003 /* try again */ 1026 /* try again */
1004 spin_unlock(&lowest_rq->lock); 1027 double_unlock_balance(rq, lowest_rq);
1005 lowest_rq = NULL; 1028 lowest_rq = NULL;
1006 } 1029 }
1007 1030
@@ -1070,7 +1093,7 @@ static int push_rt_task(struct rq *rq)
1070 1093
1071 resched_task(lowest_rq->curr); 1094 resched_task(lowest_rq->curr);
1072 1095
1073 spin_unlock(&lowest_rq->lock); 1096 double_unlock_balance(rq, lowest_rq);
1074 1097
1075 ret = 1; 1098 ret = 1;
1076out: 1099out:
@@ -1107,7 +1130,7 @@ static int pull_rt_task(struct rq *this_rq)
1107 1130
1108 next = pick_next_task_rt(this_rq); 1131 next = pick_next_task_rt(this_rq);
1109 1132
1110 for_each_cpu_mask(cpu, this_rq->rd->rto_mask) { 1133 for_each_cpu_mask_nr(cpu, this_rq->rd->rto_mask) {
1111 if (this_cpu == cpu) 1134 if (this_cpu == cpu)
1112 continue; 1135 continue;
1113 1136
@@ -1176,7 +1199,7 @@ static int pull_rt_task(struct rq *this_rq)
1176 1199
1177 } 1200 }
1178 skip: 1201 skip:
1179 spin_unlock(&src_rq->lock); 1202 double_unlock_balance(this_rq, src_rq);
1180 } 1203 }
1181 1204
1182 return ret; 1205 return ret;
@@ -1415,7 +1438,7 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
1415 * on the queue: 1438 * on the queue:
1416 */ 1439 */
1417 if (p->rt.run_list.prev != p->rt.run_list.next) { 1440 if (p->rt.run_list.prev != p->rt.run_list.next) {
1418 requeue_task_rt(rq, p); 1441 requeue_task_rt(rq, p, 0);
1419 set_tsk_need_resched(p); 1442 set_tsk_need_resched(p);
1420 } 1443 }
1421} 1444}
diff --git a/kernel/semaphore.c b/kernel/semaphore.c
index aaaeae8244e7..94a62c0d4ade 100644
--- a/kernel/semaphore.c
+++ b/kernel/semaphore.c
@@ -212,9 +212,7 @@ static inline int __sched __down_common(struct semaphore *sem, long state,
212 waiter.up = 0; 212 waiter.up = 0;
213 213
214 for (;;) { 214 for (;;) {
215 if (state == TASK_INTERRUPTIBLE && signal_pending(task)) 215 if (signal_pending_state(state, task))
216 goto interrupted;
217 if (state == TASK_KILLABLE && fatal_signal_pending(task))
218 goto interrupted; 216 goto interrupted;
219 if (timeout <= 0) 217 if (timeout <= 0)
220 goto timed_out; 218 goto timed_out;
diff --git a/kernel/signal.c b/kernel/signal.c
index c5bf0c0df658..c539f60c6f41 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -22,6 +22,7 @@
22#include <linux/ptrace.h> 22#include <linux/ptrace.h>
23#include <linux/signal.h> 23#include <linux/signal.h>
24#include <linux/signalfd.h> 24#include <linux/signalfd.h>
25#include <linux/tracehook.h>
25#include <linux/capability.h> 26#include <linux/capability.h>
26#include <linux/freezer.h> 27#include <linux/freezer.h>
27#include <linux/pid_namespace.h> 28#include <linux/pid_namespace.h>
@@ -39,24 +40,21 @@
39 40
40static struct kmem_cache *sigqueue_cachep; 41static struct kmem_cache *sigqueue_cachep;
41 42
42static int __sig_ignored(struct task_struct *t, int sig) 43static void __user *sig_handler(struct task_struct *t, int sig)
43{ 44{
44 void __user *handler; 45 return t->sighand->action[sig - 1].sa.sa_handler;
46}
45 47
48static int sig_handler_ignored(void __user *handler, int sig)
49{
46 /* Is it explicitly or implicitly ignored? */ 50 /* Is it explicitly or implicitly ignored? */
47
48 handler = t->sighand->action[sig - 1].sa.sa_handler;
49 return handler == SIG_IGN || 51 return handler == SIG_IGN ||
50 (handler == SIG_DFL && sig_kernel_ignore(sig)); 52 (handler == SIG_DFL && sig_kernel_ignore(sig));
51} 53}
52 54
53static int sig_ignored(struct task_struct *t, int sig) 55static int sig_ignored(struct task_struct *t, int sig)
54{ 56{
55 /* 57 void __user *handler;
56 * Tracers always want to know about signals..
57 */
58 if (t->ptrace & PT_PTRACED)
59 return 0;
60 58
61 /* 59 /*
62 * Blocked signals are never ignored, since the 60 * Blocked signals are never ignored, since the
@@ -66,7 +64,14 @@ static int sig_ignored(struct task_struct *t, int sig)
66 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig)) 64 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
67 return 0; 65 return 0;
68 66
69 return __sig_ignored(t, sig); 67 handler = sig_handler(t, sig);
68 if (!sig_handler_ignored(handler, sig))
69 return 0;
70
71 /*
72 * Tracers may want to know about even ignored signals.
73 */
74 return !tracehook_consider_ignored_signal(t, sig, handler);
70} 75}
71 76
72/* 77/*
@@ -129,7 +134,9 @@ void recalc_sigpending_and_wake(struct task_struct *t)
129 134
130void recalc_sigpending(void) 135void recalc_sigpending(void)
131{ 136{
132 if (!recalc_sigpending_tsk(current) && !freezing(current)) 137 if (unlikely(tracehook_force_sigpending()))
138 set_thread_flag(TIF_SIGPENDING);
139 else if (!recalc_sigpending_tsk(current) && !freezing(current))
133 clear_thread_flag(TIF_SIGPENDING); 140 clear_thread_flag(TIF_SIGPENDING);
134 141
135} 142}
@@ -295,12 +302,12 @@ flush_signal_handlers(struct task_struct *t, int force_default)
295 302
296int unhandled_signal(struct task_struct *tsk, int sig) 303int unhandled_signal(struct task_struct *tsk, int sig)
297{ 304{
305 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
298 if (is_global_init(tsk)) 306 if (is_global_init(tsk))
299 return 1; 307 return 1;
300 if (tsk->ptrace & PT_PTRACED) 308 if (handler != SIG_IGN && handler != SIG_DFL)
301 return 0; 309 return 0;
302 return (tsk->sighand->action[sig-1].sa.sa_handler == SIG_IGN) || 310 return !tracehook_consider_fatal_signal(tsk, sig, handler);
303 (tsk->sighand->action[sig-1].sa.sa_handler == SIG_DFL);
304} 311}
305 312
306 313
@@ -338,13 +345,9 @@ unblock_all_signals(void)
338 spin_unlock_irqrestore(&current->sighand->siglock, flags); 345 spin_unlock_irqrestore(&current->sighand->siglock, flags);
339} 346}
340 347
341static int collect_signal(int sig, struct sigpending *list, siginfo_t *info) 348static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
342{ 349{
343 struct sigqueue *q, *first = NULL; 350 struct sigqueue *q, *first = NULL;
344 int still_pending = 0;
345
346 if (unlikely(!sigismember(&list->signal, sig)))
347 return 0;
348 351
349 /* 352 /*
350 * Collect the siginfo appropriate to this signal. Check if 353 * Collect the siginfo appropriate to this signal. Check if
@@ -352,33 +355,30 @@ static int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
352 */ 355 */
353 list_for_each_entry(q, &list->list, list) { 356 list_for_each_entry(q, &list->list, list) {
354 if (q->info.si_signo == sig) { 357 if (q->info.si_signo == sig) {
355 if (first) { 358 if (first)
356 still_pending = 1; 359 goto still_pending;
357 break;
358 }
359 first = q; 360 first = q;
360 } 361 }
361 } 362 }
363
364 sigdelset(&list->signal, sig);
365
362 if (first) { 366 if (first) {
367still_pending:
363 list_del_init(&first->list); 368 list_del_init(&first->list);
364 copy_siginfo(info, &first->info); 369 copy_siginfo(info, &first->info);
365 __sigqueue_free(first); 370 __sigqueue_free(first);
366 if (!still_pending)
367 sigdelset(&list->signal, sig);
368 } else { 371 } else {
369
370 /* Ok, it wasn't in the queue. This must be 372 /* Ok, it wasn't in the queue. This must be
371 a fast-pathed signal or we must have been 373 a fast-pathed signal or we must have been
372 out of queue space. So zero out the info. 374 out of queue space. So zero out the info.
373 */ 375 */
374 sigdelset(&list->signal, sig);
375 info->si_signo = sig; 376 info->si_signo = sig;
376 info->si_errno = 0; 377 info->si_errno = 0;
377 info->si_code = 0; 378 info->si_code = 0;
378 info->si_pid = 0; 379 info->si_pid = 0;
379 info->si_uid = 0; 380 info->si_uid = 0;
380 } 381 }
381 return 1;
382} 382}
383 383
384static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, 384static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
@@ -396,8 +396,7 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
396 } 396 }
397 } 397 }
398 398
399 if (!collect_signal(sig, pending, info)) 399 collect_signal(sig, pending, info);
400 sig = 0;
401 } 400 }
402 401
403 return sig; 402 return sig;
@@ -462,8 +461,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
462 * is to alert stop-signal processing code when another 461 * is to alert stop-signal processing code when another
463 * processor has come along and cleared the flag. 462 * processor has come along and cleared the flag.
464 */ 463 */
465 if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT)) 464 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
466 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
467 } 465 }
468 if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) { 466 if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
469 /* 467 /*
@@ -600,9 +598,6 @@ static int check_kill_permission(int sig, struct siginfo *info,
600 return security_task_kill(t, info, sig, 0); 598 return security_task_kill(t, info, sig, 0);
601} 599}
602 600
603/* forward decl */
604static void do_notify_parent_cldstop(struct task_struct *tsk, int why);
605
606/* 601/*
607 * Handle magic process-wide effects of stop/continue signals. Unlike 602 * Handle magic process-wide effects of stop/continue signals. Unlike
608 * the signal actions, these happen immediately at signal-generation 603 * the signal actions, these happen immediately at signal-generation
@@ -765,7 +760,8 @@ static void complete_signal(int sig, struct task_struct *p, int group)
765 if (sig_fatal(p, sig) && 760 if (sig_fatal(p, sig) &&
766 !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) && 761 !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
767 !sigismember(&t->real_blocked, sig) && 762 !sigismember(&t->real_blocked, sig) &&
768 (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) { 763 (sig == SIGKILL ||
764 !tracehook_consider_fatal_signal(t, sig, SIG_DFL))) {
769 /* 765 /*
770 * This signal will be fatal to the whole group. 766 * This signal will be fatal to the whole group.
771 */ 767 */
@@ -1125,7 +1121,7 @@ EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
1125 * is probably wrong. Should make it like BSD or SYSV. 1121 * is probably wrong. Should make it like BSD or SYSV.
1126 */ 1122 */
1127 1123
1128static int kill_something_info(int sig, struct siginfo *info, int pid) 1124static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1129{ 1125{
1130 int ret; 1126 int ret;
1131 1127
@@ -1237,17 +1233,6 @@ int kill_pid(struct pid *pid, int sig, int priv)
1237} 1233}
1238EXPORT_SYMBOL(kill_pid); 1234EXPORT_SYMBOL(kill_pid);
1239 1235
1240int
1241kill_proc(pid_t pid, int sig, int priv)
1242{
1243 int ret;
1244
1245 rcu_read_lock();
1246 ret = kill_pid_info(sig, __si_special(priv), find_pid(pid));
1247 rcu_read_unlock();
1248 return ret;
1249}
1250
1251/* 1236/*
1252 * These functions support sending signals using preallocated sigqueue 1237 * These functions support sending signals using preallocated sigqueue
1253 * structures. This is needed "because realtime applications cannot 1238 * structures. This is needed "because realtime applications cannot
@@ -1344,9 +1329,11 @@ static inline void __wake_up_parent(struct task_struct *p,
1344/* 1329/*
1345 * Let a parent know about the death of a child. 1330 * Let a parent know about the death of a child.
1346 * For a stopped/continued status change, use do_notify_parent_cldstop instead. 1331 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1332 *
1333 * Returns -1 if our parent ignored us and so we've switched to
1334 * self-reaping, or else @sig.
1347 */ 1335 */
1348 1336int do_notify_parent(struct task_struct *tsk, int sig)
1349void do_notify_parent(struct task_struct *tsk, int sig)
1350{ 1337{
1351 struct siginfo info; 1338 struct siginfo info;
1352 unsigned long flags; 1339 unsigned long flags;
@@ -1380,10 +1367,9 @@ void do_notify_parent(struct task_struct *tsk, int sig)
1380 1367
1381 info.si_uid = tsk->uid; 1368 info.si_uid = tsk->uid;
1382 1369
1383 /* FIXME: find out whether or not this is supposed to be c*time. */ 1370 info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime,
1384 info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime,
1385 tsk->signal->utime)); 1371 tsk->signal->utime));
1386 info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime, 1372 info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime,
1387 tsk->signal->stime)); 1373 tsk->signal->stime));
1388 1374
1389 info.si_status = tsk->exit_code & 0x7f; 1375 info.si_status = tsk->exit_code & 0x7f;
@@ -1418,12 +1404,14 @@ void do_notify_parent(struct task_struct *tsk, int sig)
1418 */ 1404 */
1419 tsk->exit_signal = -1; 1405 tsk->exit_signal = -1;
1420 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) 1406 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1421 sig = 0; 1407 sig = -1;
1422 } 1408 }
1423 if (valid_signal(sig) && sig > 0) 1409 if (valid_signal(sig) && sig > 0)
1424 __group_send_sig_info(sig, &info, tsk->parent); 1410 __group_send_sig_info(sig, &info, tsk->parent);
1425 __wake_up_parent(tsk, tsk->parent); 1411 __wake_up_parent(tsk, tsk->parent);
1426 spin_unlock_irqrestore(&psig->siglock, flags); 1412 spin_unlock_irqrestore(&psig->siglock, flags);
1413
1414 return sig;
1427} 1415}
1428 1416
1429static void do_notify_parent_cldstop(struct task_struct *tsk, int why) 1417static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
@@ -1451,9 +1439,8 @@ static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
1451 1439
1452 info.si_uid = tsk->uid; 1440 info.si_uid = tsk->uid;
1453 1441
1454 /* FIXME: find out whether or not this is supposed to be c*time. */ 1442 info.si_utime = cputime_to_clock_t(tsk->utime);
1455 info.si_utime = cputime_to_jiffies(tsk->utime); 1443 info.si_stime = cputime_to_clock_t(tsk->stime);
1456 info.si_stime = cputime_to_jiffies(tsk->stime);
1457 1444
1458 info.si_code = why; 1445 info.si_code = why;
1459 switch (why) { 1446 switch (why) {
@@ -1492,10 +1479,10 @@ static inline int may_ptrace_stop(void)
1492 * is a deadlock situation, and pointless because our tracer 1479 * is a deadlock situation, and pointless because our tracer
1493 * is dead so don't allow us to stop. 1480 * is dead so don't allow us to stop.
1494 * If SIGKILL was already sent before the caller unlocked 1481 * If SIGKILL was already sent before the caller unlocked
1495 * ->siglock we must see ->core_waiters != 0. Otherwise it 1482 * ->siglock we must see ->core_state != NULL. Otherwise it
1496 * is safe to enter schedule(). 1483 * is safe to enter schedule().
1497 */ 1484 */
1498 if (unlikely(current->mm->core_waiters) && 1485 if (unlikely(current->mm->core_state) &&
1499 unlikely(current->mm == current->parent->mm)) 1486 unlikely(current->mm == current->parent->mm))
1500 return 0; 1487 return 0;
1501 1488
@@ -1508,9 +1495,8 @@ static inline int may_ptrace_stop(void)
1508 */ 1495 */
1509static int sigkill_pending(struct task_struct *tsk) 1496static int sigkill_pending(struct task_struct *tsk)
1510{ 1497{
1511 return ((sigismember(&tsk->pending.signal, SIGKILL) || 1498 return sigismember(&tsk->pending.signal, SIGKILL) ||
1512 sigismember(&tsk->signal->shared_pending.signal, SIGKILL)) && 1499 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1513 !unlikely(sigismember(&tsk->blocked, SIGKILL)));
1514} 1500}
1515 1501
1516/* 1502/*
@@ -1526,8 +1512,6 @@ static int sigkill_pending(struct task_struct *tsk)
1526 */ 1512 */
1527static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info) 1513static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
1528{ 1514{
1529 int killed = 0;
1530
1531 if (arch_ptrace_stop_needed(exit_code, info)) { 1515 if (arch_ptrace_stop_needed(exit_code, info)) {
1532 /* 1516 /*
1533 * The arch code has something special to do before a 1517 * The arch code has something special to do before a
@@ -1543,7 +1527,8 @@ static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
1543 spin_unlock_irq(&current->sighand->siglock); 1527 spin_unlock_irq(&current->sighand->siglock);
1544 arch_ptrace_stop(exit_code, info); 1528 arch_ptrace_stop(exit_code, info);
1545 spin_lock_irq(&current->sighand->siglock); 1529 spin_lock_irq(&current->sighand->siglock);
1546 killed = sigkill_pending(current); 1530 if (sigkill_pending(current))
1531 return;
1547 } 1532 }
1548 1533
1549 /* 1534 /*
@@ -1560,7 +1545,7 @@ static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
1560 __set_current_state(TASK_TRACED); 1545 __set_current_state(TASK_TRACED);
1561 spin_unlock_irq(&current->sighand->siglock); 1546 spin_unlock_irq(&current->sighand->siglock);
1562 read_lock(&tasklist_lock); 1547 read_lock(&tasklist_lock);
1563 if (!unlikely(killed) && may_ptrace_stop()) { 1548 if (may_ptrace_stop()) {
1564 do_notify_parent_cldstop(current, CLD_TRAPPED); 1549 do_notify_parent_cldstop(current, CLD_TRAPPED);
1565 read_unlock(&tasklist_lock); 1550 read_unlock(&tasklist_lock);
1566 schedule(); 1551 schedule();
@@ -1624,7 +1609,7 @@ finish_stop(int stop_count)
1624 * a group stop in progress and we are the last to stop, 1609 * a group stop in progress and we are the last to stop,
1625 * report to the parent. When ptraced, every thread reports itself. 1610 * report to the parent. When ptraced, every thread reports itself.
1626 */ 1611 */
1627 if (stop_count == 0 || (current->ptrace & PT_PTRACED)) { 1612 if (tracehook_notify_jctl(stop_count == 0, CLD_STOPPED)) {
1628 read_lock(&tasklist_lock); 1613 read_lock(&tasklist_lock);
1629 do_notify_parent_cldstop(current, CLD_STOPPED); 1614 do_notify_parent_cldstop(current, CLD_STOPPED);
1630 read_unlock(&tasklist_lock); 1615 read_unlock(&tasklist_lock);
@@ -1659,8 +1644,7 @@ static int do_signal_stop(int signr)
1659 } else { 1644 } else {
1660 struct task_struct *t; 1645 struct task_struct *t;
1661 1646
1662 if (unlikely((sig->flags & (SIGNAL_STOP_DEQUEUED | SIGNAL_UNKILLABLE)) 1647 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) ||
1663 != SIGNAL_STOP_DEQUEUED) ||
1664 unlikely(signal_group_exit(sig))) 1648 unlikely(signal_group_exit(sig)))
1665 return 0; 1649 return 0;
1666 /* 1650 /*
@@ -1761,6 +1745,9 @@ relock:
1761 signal->flags &= ~SIGNAL_CLD_MASK; 1745 signal->flags &= ~SIGNAL_CLD_MASK;
1762 spin_unlock_irq(&sighand->siglock); 1746 spin_unlock_irq(&sighand->siglock);
1763 1747
1748 if (unlikely(!tracehook_notify_jctl(1, why)))
1749 goto relock;
1750
1764 read_lock(&tasklist_lock); 1751 read_lock(&tasklist_lock);
1765 do_notify_parent_cldstop(current->group_leader, why); 1752 do_notify_parent_cldstop(current->group_leader, why);
1766 read_unlock(&tasklist_lock); 1753 read_unlock(&tasklist_lock);
@@ -1774,17 +1761,33 @@ relock:
1774 do_signal_stop(0)) 1761 do_signal_stop(0))
1775 goto relock; 1762 goto relock;
1776 1763
1777 signr = dequeue_signal(current, &current->blocked, info); 1764 /*
1778 if (!signr) 1765 * Tracing can induce an artifical signal and choose sigaction.
1779 break; /* will return 0 */ 1766 * The return value in @signr determines the default action,
1767 * but @info->si_signo is the signal number we will report.
1768 */
1769 signr = tracehook_get_signal(current, regs, info, return_ka);
1770 if (unlikely(signr < 0))
1771 goto relock;
1772 if (unlikely(signr != 0))
1773 ka = return_ka;
1774 else {
1775 signr = dequeue_signal(current, &current->blocked,
1776 info);
1780 1777
1781 if (signr != SIGKILL) {
1782 signr = ptrace_signal(signr, info, regs, cookie);
1783 if (!signr) 1778 if (!signr)
1784 continue; 1779 break; /* will return 0 */
1780
1781 if (signr != SIGKILL) {
1782 signr = ptrace_signal(signr, info,
1783 regs, cookie);
1784 if (!signr)
1785 continue;
1786 }
1787
1788 ka = &sighand->action[signr-1];
1785 } 1789 }
1786 1790
1787 ka = &sighand->action[signr-1];
1788 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */ 1791 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1789 continue; 1792 continue;
1790 if (ka->sa.sa_handler != SIG_DFL) { 1793 if (ka->sa.sa_handler != SIG_DFL) {
@@ -1832,7 +1835,7 @@ relock:
1832 spin_lock_irq(&sighand->siglock); 1835 spin_lock_irq(&sighand->siglock);
1833 } 1836 }
1834 1837
1835 if (likely(do_signal_stop(signr))) { 1838 if (likely(do_signal_stop(info->si_signo))) {
1836 /* It released the siglock. */ 1839 /* It released the siglock. */
1837 goto relock; 1840 goto relock;
1838 } 1841 }
@@ -1853,7 +1856,7 @@ relock:
1853 1856
1854 if (sig_kernel_coredump(signr)) { 1857 if (sig_kernel_coredump(signr)) {
1855 if (print_fatal_signals) 1858 if (print_fatal_signals)
1856 print_fatal_signal(regs, signr); 1859 print_fatal_signal(regs, info->si_signo);
1857 /* 1860 /*
1858 * If it was able to dump core, this kills all 1861 * If it was able to dump core, this kills all
1859 * other threads in the group and synchronizes with 1862 * other threads in the group and synchronizes with
@@ -1862,13 +1865,13 @@ relock:
1862 * first and our do_group_exit call below will use 1865 * first and our do_group_exit call below will use
1863 * that value and ignore the one we pass it. 1866 * that value and ignore the one we pass it.
1864 */ 1867 */
1865 do_coredump((long)signr, signr, regs); 1868 do_coredump(info->si_signo, info->si_signo, regs);
1866 } 1869 }
1867 1870
1868 /* 1871 /*
1869 * Death signals, no core dump. 1872 * Death signals, no core dump.
1870 */ 1873 */
1871 do_group_exit(signr); 1874 do_group_exit(info->si_signo);
1872 /* NOTREACHED */ 1875 /* NOTREACHED */
1873 } 1876 }
1874 spin_unlock_irq(&sighand->siglock); 1877 spin_unlock_irq(&sighand->siglock);
@@ -1910,7 +1913,7 @@ void exit_signals(struct task_struct *tsk)
1910out: 1913out:
1911 spin_unlock_irq(&tsk->sighand->siglock); 1914 spin_unlock_irq(&tsk->sighand->siglock);
1912 1915
1913 if (unlikely(group_stop)) { 1916 if (unlikely(group_stop) && tracehook_notify_jctl(1, CLD_STOPPED)) {
1914 read_lock(&tasklist_lock); 1917 read_lock(&tasklist_lock);
1915 do_notify_parent_cldstop(tsk, CLD_STOPPED); 1918 do_notify_parent_cldstop(tsk, CLD_STOPPED);
1916 read_unlock(&tasklist_lock); 1919 read_unlock(&tasklist_lock);
@@ -1921,8 +1924,6 @@ EXPORT_SYMBOL(recalc_sigpending);
1921EXPORT_SYMBOL_GPL(dequeue_signal); 1924EXPORT_SYMBOL_GPL(dequeue_signal);
1922EXPORT_SYMBOL(flush_signals); 1925EXPORT_SYMBOL(flush_signals);
1923EXPORT_SYMBOL(force_sig); 1926EXPORT_SYMBOL(force_sig);
1924EXPORT_SYMBOL(kill_proc);
1925EXPORT_SYMBOL(ptrace_notify);
1926EXPORT_SYMBOL(send_sig); 1927EXPORT_SYMBOL(send_sig);
1927EXPORT_SYMBOL(send_sig_info); 1928EXPORT_SYMBOL(send_sig_info);
1928EXPORT_SYMBOL(sigprocmask); 1929EXPORT_SYMBOL(sigprocmask);
@@ -2197,7 +2198,7 @@ sys_rt_sigtimedwait(const sigset_t __user *uthese,
2197} 2198}
2198 2199
2199asmlinkage long 2200asmlinkage long
2200sys_kill(int pid, int sig) 2201sys_kill(pid_t pid, int sig)
2201{ 2202{
2202 struct siginfo info; 2203 struct siginfo info;
2203 2204
@@ -2210,7 +2211,7 @@ sys_kill(int pid, int sig)
2210 return kill_something_info(sig, &info, pid); 2211 return kill_something_info(sig, &info, pid);
2211} 2212}
2212 2213
2213static int do_tkill(int tgid, int pid, int sig) 2214static int do_tkill(pid_t tgid, pid_t pid, int sig)
2214{ 2215{
2215 int error; 2216 int error;
2216 struct siginfo info; 2217 struct siginfo info;
@@ -2256,7 +2257,7 @@ static int do_tkill(int tgid, int pid, int sig)
2256 * exists but it's not belonging to the target process anymore. This 2257 * exists but it's not belonging to the target process anymore. This
2257 * method solves the problem of threads exiting and PIDs getting reused. 2258 * method solves the problem of threads exiting and PIDs getting reused.
2258 */ 2259 */
2259asmlinkage long sys_tgkill(int tgid, int pid, int sig) 2260asmlinkage long sys_tgkill(pid_t tgid, pid_t pid, int sig)
2260{ 2261{
2261 /* This is only valid for single tasks */ 2262 /* This is only valid for single tasks */
2262 if (pid <= 0 || tgid <= 0) 2263 if (pid <= 0 || tgid <= 0)
@@ -2269,7 +2270,7 @@ asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2269 * Send a signal to only one task, even if it's a CLONE_THREAD task. 2270 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2270 */ 2271 */
2271asmlinkage long 2272asmlinkage long
2272sys_tkill(int pid, int sig) 2273sys_tkill(pid_t pid, int sig)
2273{ 2274{
2274 /* This is only valid for single tasks */ 2275 /* This is only valid for single tasks */
2275 if (pid <= 0) 2276 if (pid <= 0)
@@ -2279,7 +2280,7 @@ sys_tkill(int pid, int sig)
2279} 2280}
2280 2281
2281asmlinkage long 2282asmlinkage long
2282sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo) 2283sys_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t __user *uinfo)
2283{ 2284{
2284 siginfo_t info; 2285 siginfo_t info;
2285 2286
@@ -2326,7 +2327,7 @@ int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2326 * (for example, SIGCHLD), shall cause the pending signal to 2327 * (for example, SIGCHLD), shall cause the pending signal to
2327 * be discarded, whether or not it is blocked" 2328 * be discarded, whether or not it is blocked"
2328 */ 2329 */
2329 if (__sig_ignored(t, sig)) { 2330 if (sig_handler_ignored(sig_handler(t, sig), sig)) {
2330 sigemptyset(&mask); 2331 sigemptyset(&mask);
2331 sigaddset(&mask, sig); 2332 sigaddset(&mask, sig);
2332 rm_from_queue_full(&mask, &t->signal->shared_pending); 2333 rm_from_queue_full(&mask, &t->signal->shared_pending);
diff --git a/kernel/smp.c b/kernel/smp.c
index 462c785ca1ee..e6084f6efb4d 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -33,7 +33,7 @@ struct call_single_queue {
33 spinlock_t lock; 33 spinlock_t lock;
34}; 34};
35 35
36void __cpuinit init_call_single_data(void) 36static int __cpuinit init_call_single_data(void)
37{ 37{
38 int i; 38 int i;
39 39
@@ -43,7 +43,9 @@ void __cpuinit init_call_single_data(void)
43 spin_lock_init(&q->lock); 43 spin_lock_init(&q->lock);
44 INIT_LIST_HEAD(&q->list); 44 INIT_LIST_HEAD(&q->list);
45 } 45 }
46 return 0;
46} 47}
48early_initcall(init_call_single_data);
47 49
48static void csd_flag_wait(struct call_single_data *data) 50static void csd_flag_wait(struct call_single_data *data)
49{ 51{
@@ -258,6 +260,41 @@ void __smp_call_function_single(int cpu, struct call_single_data *data)
258 generic_exec_single(cpu, data); 260 generic_exec_single(cpu, data);
259} 261}
260 262
263/* Dummy function */
264static void quiesce_dummy(void *unused)
265{
266}
267
268/*
269 * Ensure stack based data used in call function mask is safe to free.
270 *
271 * This is needed by smp_call_function_mask when using on-stack data, because
272 * a single call function queue is shared by all CPUs, and any CPU may pick up
273 * the data item on the queue at any time before it is deleted. So we need to
274 * ensure that all CPUs have transitioned through a quiescent state after
275 * this call.
276 *
277 * This is a very slow function, implemented by sending synchronous IPIs to
278 * all possible CPUs. For this reason, we have to alloc data rather than use
279 * stack based data even in the case of synchronous calls. The stack based
280 * data is then just used for deadlock/oom fallback which will be very rare.
281 *
282 * If a faster scheme can be made, we could go back to preferring stack based
283 * data -- the data allocation/free is non-zero cost.
284 */
285static void smp_call_function_mask_quiesce_stack(cpumask_t mask)
286{
287 struct call_single_data data;
288 int cpu;
289
290 data.func = quiesce_dummy;
291 data.info = NULL;
292 data.flags = CSD_FLAG_WAIT;
293
294 for_each_cpu_mask(cpu, mask)
295 generic_exec_single(cpu, &data);
296}
297
261/** 298/**
262 * smp_call_function_mask(): Run a function on a set of other CPUs. 299 * smp_call_function_mask(): Run a function on a set of other CPUs.
263 * @mask: The set of cpus to run on. 300 * @mask: The set of cpus to run on.
@@ -283,6 +320,7 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
283 cpumask_t allbutself; 320 cpumask_t allbutself;
284 unsigned long flags; 321 unsigned long flags;
285 int cpu, num_cpus; 322 int cpu, num_cpus;
323 int slowpath = 0;
286 324
287 /* Can deadlock when called with interrupts disabled */ 325 /* Can deadlock when called with interrupts disabled */
288 WARN_ON(irqs_disabled()); 326 WARN_ON(irqs_disabled());
@@ -304,15 +342,16 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
304 return smp_call_function_single(cpu, func, info, wait); 342 return smp_call_function_single(cpu, func, info, wait);
305 } 343 }
306 344
307 if (!wait) { 345 data = kmalloc(sizeof(*data), GFP_ATOMIC);
308 data = kmalloc(sizeof(*data), GFP_ATOMIC); 346 if (data) {
309 if (data) 347 data->csd.flags = CSD_FLAG_ALLOC;
310 data->csd.flags = CSD_FLAG_ALLOC; 348 if (wait)
311 } 349 data->csd.flags |= CSD_FLAG_WAIT;
312 if (!data) { 350 } else {
313 data = &d; 351 data = &d;
314 data->csd.flags = CSD_FLAG_WAIT; 352 data->csd.flags = CSD_FLAG_WAIT;
315 wait = 1; 353 wait = 1;
354 slowpath = 1;
316 } 355 }
317 356
318 spin_lock_init(&data->lock); 357 spin_lock_init(&data->lock);
@@ -329,8 +368,11 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
329 arch_send_call_function_ipi(mask); 368 arch_send_call_function_ipi(mask);
330 369
331 /* optionally wait for the CPUs to complete */ 370 /* optionally wait for the CPUs to complete */
332 if (wait) 371 if (wait) {
333 csd_flag_wait(&data->csd); 372 csd_flag_wait(&data->csd);
373 if (unlikely(slowpath))
374 smp_call_function_mask_quiesce_stack(allbutself);
375 }
334 376
335 return 0; 377 return 0;
336} 378}
diff --git a/kernel/softirq.c b/kernel/softirq.c
index f6b03d56c2bf..c506f266a6b9 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -630,7 +630,7 @@ static struct notifier_block __cpuinitdata cpu_nfb = {
630 .notifier_call = cpu_callback 630 .notifier_call = cpu_callback
631}; 631};
632 632
633__init int spawn_ksoftirqd(void) 633static __init int spawn_ksoftirqd(void)
634{ 634{
635 void *cpu = (void *)(long)smp_processor_id(); 635 void *cpu = (void *)(long)smp_processor_id();
636 int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); 636 int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
@@ -640,6 +640,7 @@ __init int spawn_ksoftirqd(void)
640 register_cpu_notifier(&cpu_nfb); 640 register_cpu_notifier(&cpu_nfb);
641 return 0; 641 return 0;
642} 642}
643early_initcall(spawn_ksoftirqd);
643 644
644#ifdef CONFIG_SMP 645#ifdef CONFIG_SMP
645/* 646/*
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index a272d78185eb..b75b492fbfcf 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -13,6 +13,7 @@
13#include <linux/delay.h> 13#include <linux/delay.h>
14#include <linux/freezer.h> 14#include <linux/freezer.h>
15#include <linux/kthread.h> 15#include <linux/kthread.h>
16#include <linux/lockdep.h>
16#include <linux/notifier.h> 17#include <linux/notifier.h>
17#include <linux/module.h> 18#include <linux/module.h>
18 19
@@ -25,7 +26,22 @@ static DEFINE_PER_CPU(unsigned long, print_timestamp);
25static DEFINE_PER_CPU(struct task_struct *, watchdog_task); 26static DEFINE_PER_CPU(struct task_struct *, watchdog_task);
26 27
27static int __read_mostly did_panic; 28static int __read_mostly did_panic;
28unsigned long __read_mostly softlockup_thresh = 60; 29int __read_mostly softlockup_thresh = 60;
30
31/*
32 * Should we panic (and reboot, if panic_timeout= is set) when a
33 * soft-lockup occurs:
34 */
35unsigned int __read_mostly softlockup_panic =
36 CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
37
38static int __init softlockup_panic_setup(char *str)
39{
40 softlockup_panic = simple_strtoul(str, NULL, 0);
41
42 return 1;
43}
44__setup("softlockup_panic=", softlockup_panic_setup);
29 45
30static int 46static int
31softlock_panic(struct notifier_block *this, unsigned long event, void *ptr) 47softlock_panic(struct notifier_block *this, unsigned long event, void *ptr)
@@ -84,6 +100,14 @@ void softlockup_tick(void)
84 struct pt_regs *regs = get_irq_regs(); 100 struct pt_regs *regs = get_irq_regs();
85 unsigned long now; 101 unsigned long now;
86 102
103 /* Is detection switched off? */
104 if (!per_cpu(watchdog_task, this_cpu) || softlockup_thresh <= 0) {
105 /* Be sure we don't false trigger if switched back on */
106 if (touch_timestamp)
107 per_cpu(touch_timestamp, this_cpu) = 0;
108 return;
109 }
110
87 if (touch_timestamp == 0) { 111 if (touch_timestamp == 0) {
88 __touch_softlockup_watchdog(); 112 __touch_softlockup_watchdog();
89 return; 113 return;
@@ -92,11 +116,8 @@ void softlockup_tick(void)
92 print_timestamp = per_cpu(print_timestamp, this_cpu); 116 print_timestamp = per_cpu(print_timestamp, this_cpu);
93 117
94 /* report at most once a second */ 118 /* report at most once a second */
95 if ((print_timestamp >= touch_timestamp && 119 if (print_timestamp == touch_timestamp || did_panic)
96 print_timestamp < (touch_timestamp + 1)) ||
97 did_panic || !per_cpu(watchdog_task, this_cpu)) {
98 return; 120 return;
99 }
100 121
101 /* do not print during early bootup: */ 122 /* do not print during early bootup: */
102 if (unlikely(system_state != SYSTEM_RUNNING)) { 123 if (unlikely(system_state != SYSTEM_RUNNING)) {
@@ -106,8 +127,11 @@ void softlockup_tick(void)
106 127
107 now = get_timestamp(this_cpu); 128 now = get_timestamp(this_cpu);
108 129
109 /* Wake up the high-prio watchdog task every second: */ 130 /*
110 if (now > (touch_timestamp + 1)) 131 * Wake up the high-prio watchdog task twice per
132 * threshold timespan.
133 */
134 if (now > touch_timestamp + softlockup_thresh/2)
111 wake_up_process(per_cpu(watchdog_task, this_cpu)); 135 wake_up_process(per_cpu(watchdog_task, this_cpu));
112 136
113 /* Warn about unreasonable delays: */ 137 /* Warn about unreasonable delays: */
@@ -121,11 +145,15 @@ void softlockup_tick(void)
121 this_cpu, now - touch_timestamp, 145 this_cpu, now - touch_timestamp,
122 current->comm, task_pid_nr(current)); 146 current->comm, task_pid_nr(current));
123 print_modules(); 147 print_modules();
148 print_irqtrace_events(current);
124 if (regs) 149 if (regs)
125 show_regs(regs); 150 show_regs(regs);
126 else 151 else
127 dump_stack(); 152 dump_stack();
128 spin_unlock(&print_lock); 153 spin_unlock(&print_lock);
154
155 if (softlockup_panic)
156 panic("softlockup: hung tasks");
129} 157}
130 158
131/* 159/*
@@ -178,6 +206,9 @@ static void check_hung_task(struct task_struct *t, unsigned long now)
178 206
179 t->last_switch_timestamp = now; 207 t->last_switch_timestamp = now;
180 touch_nmi_watchdog(); 208 touch_nmi_watchdog();
209
210 if (softlockup_panic)
211 panic("softlockup: blocked tasks");
181} 212}
182 213
183/* 214/*
@@ -307,14 +338,33 @@ static struct notifier_block __cpuinitdata cpu_nfb = {
307 .notifier_call = cpu_callback 338 .notifier_call = cpu_callback
308}; 339};
309 340
310__init void spawn_softlockup_task(void) 341static int __initdata nosoftlockup;
342
343static int __init nosoftlockup_setup(char *str)
344{
345 nosoftlockup = 1;
346 return 1;
347}
348__setup("nosoftlockup", nosoftlockup_setup);
349
350static int __init spawn_softlockup_task(void)
311{ 351{
312 void *cpu = (void *)(long)smp_processor_id(); 352 void *cpu = (void *)(long)smp_processor_id();
313 int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); 353 int err;
354
355 if (nosoftlockup)
356 return 0;
314 357
315 BUG_ON(err == NOTIFY_BAD); 358 err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
359 if (err == NOTIFY_BAD) {
360 BUG();
361 return 1;
362 }
316 cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); 363 cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
317 register_cpu_notifier(&cpu_nfb); 364 register_cpu_notifier(&cpu_nfb);
318 365
319 atomic_notifier_chain_register(&panic_notifier_list, &panic_block); 366 atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
367
368 return 0;
320} 369}
370early_initcall(spawn_softlockup_task);
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
index a1fb54c93cdd..44baeea94ab9 100644
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -292,6 +292,7 @@ void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
292} 292}
293 293
294EXPORT_SYMBOL(_spin_lock_nested); 294EXPORT_SYMBOL(_spin_lock_nested);
295
295unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass) 296unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass)
296{ 297{
297 unsigned long flags; 298 unsigned long flags;
@@ -314,6 +315,16 @@ unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclas
314 315
315EXPORT_SYMBOL(_spin_lock_irqsave_nested); 316EXPORT_SYMBOL(_spin_lock_irqsave_nested);
316 317
318void __lockfunc _spin_lock_nest_lock(spinlock_t *lock,
319 struct lockdep_map *nest_lock)
320{
321 preempt_disable();
322 spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
323 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
324}
325
326EXPORT_SYMBOL(_spin_lock_nest_lock);
327
317#endif 328#endif
318 329
319void __lockfunc _spin_unlock(spinlock_t *lock) 330void __lockfunc _spin_unlock(spinlock_t *lock)
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index ba9b2054ecbd..e446c7c7d6a9 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -1,4 +1,4 @@
1/* Copyright 2005 Rusty Russell rusty@rustcorp.com.au IBM Corporation. 1/* Copyright 2008, 2005 Rusty Russell rusty@rustcorp.com.au IBM Corporation.
2 * GPL v2 and any later version. 2 * GPL v2 and any later version.
3 */ 3 */
4#include <linux/cpu.h> 4#include <linux/cpu.h>
@@ -13,203 +13,178 @@
13#include <asm/atomic.h> 13#include <asm/atomic.h>
14#include <asm/uaccess.h> 14#include <asm/uaccess.h>
15 15
16/* Since we effect priority and affinity (both of which are visible 16/* This controls the threads on each CPU. */
17 * to, and settable by outside processes) we do indirection via a
18 * kthread. */
19
20/* Thread to stop each CPU in user context. */
21enum stopmachine_state { 17enum stopmachine_state {
22 STOPMACHINE_WAIT, 18 /* Dummy starting state for thread. */
19 STOPMACHINE_NONE,
20 /* Awaiting everyone to be scheduled. */
23 STOPMACHINE_PREPARE, 21 STOPMACHINE_PREPARE,
22 /* Disable interrupts. */
24 STOPMACHINE_DISABLE_IRQ, 23 STOPMACHINE_DISABLE_IRQ,
24 /* Run the function */
25 STOPMACHINE_RUN,
26 /* Exit */
25 STOPMACHINE_EXIT, 27 STOPMACHINE_EXIT,
26}; 28};
29static enum stopmachine_state state;
27 30
28static enum stopmachine_state stopmachine_state; 31struct stop_machine_data {
29static unsigned int stopmachine_num_threads; 32 int (*fn)(void *);
30static atomic_t stopmachine_thread_ack; 33 void *data;
31 34 int fnret;
32static int stopmachine(void *cpu) 35};
33{
34 int irqs_disabled = 0;
35 int prepared = 0;
36
37 set_cpus_allowed_ptr(current, &cpumask_of_cpu((int)(long)cpu));
38
39 /* Ack: we are alive */
40 smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */
41 atomic_inc(&stopmachine_thread_ack);
42
43 /* Simple state machine */
44 while (stopmachine_state != STOPMACHINE_EXIT) {
45 if (stopmachine_state == STOPMACHINE_DISABLE_IRQ
46 && !irqs_disabled) {
47 local_irq_disable();
48 hard_irq_disable();
49 irqs_disabled = 1;
50 /* Ack: irqs disabled. */
51 smp_mb(); /* Must read state first. */
52 atomic_inc(&stopmachine_thread_ack);
53 } else if (stopmachine_state == STOPMACHINE_PREPARE
54 && !prepared) {
55 /* Everyone is in place, hold CPU. */
56 preempt_disable();
57 prepared = 1;
58 smp_mb(); /* Must read state first. */
59 atomic_inc(&stopmachine_thread_ack);
60 }
61 /* Yield in first stage: migration threads need to
62 * help our sisters onto their CPUs. */
63 if (!prepared && !irqs_disabled)
64 yield();
65 cpu_relax();
66 }
67
68 /* Ack: we are exiting. */
69 smp_mb(); /* Must read state first. */
70 atomic_inc(&stopmachine_thread_ack);
71
72 if (irqs_disabled)
73 local_irq_enable();
74 if (prepared)
75 preempt_enable();
76 36
77 return 0; 37/* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */
78} 38static unsigned int num_threads;
39static atomic_t thread_ack;
40static struct completion finished;
41static DEFINE_MUTEX(lock);
79 42
80/* Change the thread state */ 43static void set_state(enum stopmachine_state newstate)
81static void stopmachine_set_state(enum stopmachine_state state)
82{ 44{
83 atomic_set(&stopmachine_thread_ack, 0); 45 /* Reset ack counter. */
46 atomic_set(&thread_ack, num_threads);
84 smp_wmb(); 47 smp_wmb();
85 stopmachine_state = state; 48 state = newstate;
86 while (atomic_read(&stopmachine_thread_ack) != stopmachine_num_threads)
87 cpu_relax();
88} 49}
89 50
90static int stop_machine(void) 51/* Last one to ack a state moves to the next state. */
52static void ack_state(void)
91{ 53{
92 int i, ret = 0; 54 if (atomic_dec_and_test(&thread_ack)) {
93 55 /* If we're the last one to ack the EXIT, we're finished. */
94 atomic_set(&stopmachine_thread_ack, 0); 56 if (state == STOPMACHINE_EXIT)
95 stopmachine_num_threads = 0; 57 complete(&finished);
96 stopmachine_state = STOPMACHINE_WAIT; 58 else
97 59 set_state(state + 1);
98 for_each_online_cpu(i) {
99 if (i == raw_smp_processor_id())
100 continue;
101 ret = kernel_thread(stopmachine, (void *)(long)i,CLONE_KERNEL);
102 if (ret < 0)
103 break;
104 stopmachine_num_threads++;
105 }
106
107 /* Wait for them all to come to life. */
108 while (atomic_read(&stopmachine_thread_ack) != stopmachine_num_threads) {
109 yield();
110 cpu_relax();
111 } 60 }
61}
112 62
113 /* If some failed, kill them all. */ 63/* This is the actual thread which stops the CPU. It exits by itself rather
114 if (ret < 0) { 64 * than waiting for kthread_stop(), because it's easier for hotplug CPU. */
115 stopmachine_set_state(STOPMACHINE_EXIT); 65static int stop_cpu(struct stop_machine_data *smdata)
116 return ret; 66{
117 } 67 enum stopmachine_state curstate = STOPMACHINE_NONE;
68 int uninitialized_var(ret);
118 69
119 /* Now they are all started, make them hold the CPUs, ready. */ 70 /* Simple state machine */
120 preempt_disable(); 71 do {
121 stopmachine_set_state(STOPMACHINE_PREPARE); 72 /* Chill out and ensure we re-read stopmachine_state. */
73 cpu_relax();
74 if (state != curstate) {
75 curstate = state;
76 switch (curstate) {
77 case STOPMACHINE_DISABLE_IRQ:
78 local_irq_disable();
79 hard_irq_disable();
80 break;
81 case STOPMACHINE_RUN:
82 /* |= allows error detection if functions on
83 * multiple CPUs. */
84 smdata->fnret |= smdata->fn(smdata->data);
85 break;
86 default:
87 break;
88 }
89 ack_state();
90 }
91 } while (curstate != STOPMACHINE_EXIT);
122 92
123 /* Make them disable irqs. */ 93 local_irq_enable();
124 local_irq_disable(); 94 do_exit(0);
125 hard_irq_disable(); 95}
126 stopmachine_set_state(STOPMACHINE_DISABLE_IRQ);
127 96
97/* Callback for CPUs which aren't supposed to do anything. */
98static int chill(void *unused)
99{
128 return 0; 100 return 0;
129} 101}
130 102
131static void restart_machine(void) 103int __stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus)
132{ 104{
133 stopmachine_set_state(STOPMACHINE_EXIT); 105 int i, err;
134 local_irq_enable(); 106 struct stop_machine_data active, idle;
135 preempt_enable_no_resched(); 107 struct task_struct **threads;
136} 108
109 active.fn = fn;
110 active.data = data;
111 active.fnret = 0;
112 idle.fn = chill;
113 idle.data = NULL;
114
115 /* This could be too big for stack on large machines. */
116 threads = kcalloc(NR_CPUS, sizeof(threads[0]), GFP_KERNEL);
117 if (!threads)
118 return -ENOMEM;
119
120 /* Set up initial state. */
121 mutex_lock(&lock);
122 init_completion(&finished);
123 num_threads = num_online_cpus();
124 set_state(STOPMACHINE_PREPARE);
137 125
138struct stop_machine_data { 126 for_each_online_cpu(i) {
139 int (*fn)(void *); 127 struct stop_machine_data *smdata = &idle;
140 void *data; 128 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
141 struct completion done;
142};
143 129
144static int do_stop(void *_smdata) 130 if (!cpus) {
145{ 131 if (i == first_cpu(cpu_online_map))
146 struct stop_machine_data *smdata = _smdata; 132 smdata = &active;
147 int ret; 133 } else {
134 if (cpu_isset(i, *cpus))
135 smdata = &active;
136 }
148 137
149 ret = stop_machine(); 138 threads[i] = kthread_create((void *)stop_cpu, smdata, "kstop%u",
150 if (ret == 0) { 139 i);
151 ret = smdata->fn(smdata->data); 140 if (IS_ERR(threads[i])) {
152 restart_machine(); 141 err = PTR_ERR(threads[i]);
153 } 142 threads[i] = NULL;
143 goto kill_threads;
144 }
154 145
155 /* We're done: you can kthread_stop us now */ 146 /* Place it onto correct cpu. */
156 complete(&smdata->done); 147 kthread_bind(threads[i], i);
157 148
158 /* Wait for kthread_stop */ 149 /* Make it highest prio. */
159 set_current_state(TASK_INTERRUPTIBLE); 150 if (sched_setscheduler_nocheck(threads[i], SCHED_FIFO, &param))
160 while (!kthread_should_stop()) { 151 BUG();
161 schedule();
162 set_current_state(TASK_INTERRUPTIBLE);
163 } 152 }
164 __set_current_state(TASK_RUNNING);
165 return ret;
166}
167 153
168struct task_struct *__stop_machine_run(int (*fn)(void *), void *data, 154 /* We've created all the threads. Wake them all: hold this CPU so one
169 unsigned int cpu) 155 * doesn't hit this CPU until we're ready. */
170{ 156 get_cpu();
171 static DEFINE_MUTEX(stopmachine_mutex); 157 for_each_online_cpu(i)
172 struct stop_machine_data smdata; 158 wake_up_process(threads[i]);
173 struct task_struct *p;
174 159
175 smdata.fn = fn; 160 /* This will release the thread on our CPU. */
176 smdata.data = data; 161 put_cpu();
177 init_completion(&smdata.done); 162 wait_for_completion(&finished);
163 mutex_unlock(&lock);
178 164
179 mutex_lock(&stopmachine_mutex); 165 kfree(threads);
180 166
181 /* If they don't care which CPU fn runs on, bind to any online one. */ 167 return active.fnret;
182 if (cpu == NR_CPUS)
183 cpu = raw_smp_processor_id();
184 168
185 p = kthread_create(do_stop, &smdata, "kstopmachine"); 169kill_threads:
186 if (!IS_ERR(p)) { 170 for_each_online_cpu(i)
187 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; 171 if (threads[i])
172 kthread_stop(threads[i]);
173 mutex_unlock(&lock);
188 174
189 /* One high-prio thread per cpu. We'll do this one. */ 175 kfree(threads);
190 sched_setscheduler_nocheck(p, SCHED_FIFO, &param); 176 return err;
191 kthread_bind(p, cpu);
192 wake_up_process(p);
193 wait_for_completion(&smdata.done);
194 }
195 mutex_unlock(&stopmachine_mutex);
196 return p;
197} 177}
198 178
199int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu) 179int stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus)
200{ 180{
201 struct task_struct *p;
202 int ret; 181 int ret;
203 182
204 /* No CPUs can come up or down during this. */ 183 /* No CPUs can come up or down during this. */
205 get_online_cpus(); 184 get_online_cpus();
206 p = __stop_machine_run(fn, data, cpu); 185 ret = __stop_machine(fn, data, cpus);
207 if (!IS_ERR(p))
208 ret = kthread_stop(p);
209 else
210 ret = PTR_ERR(p);
211 put_online_cpus(); 186 put_online_cpus();
212 187
213 return ret; 188 return ret;
214} 189}
215EXPORT_SYMBOL_GPL(stop_machine_run); 190EXPORT_SYMBOL_GPL(stop_machine);
diff --git a/kernel/sys.c b/kernel/sys.c
index 14e97282eb6c..c01858090a98 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -301,26 +301,6 @@ void kernel_restart(char *cmd)
301} 301}
302EXPORT_SYMBOL_GPL(kernel_restart); 302EXPORT_SYMBOL_GPL(kernel_restart);
303 303
304/**
305 * kernel_kexec - reboot the system
306 *
307 * Move into place and start executing a preloaded standalone
308 * executable. If nothing was preloaded return an error.
309 */
310static void kernel_kexec(void)
311{
312#ifdef CONFIG_KEXEC
313 struct kimage *image;
314 image = xchg(&kexec_image, NULL);
315 if (!image)
316 return;
317 kernel_restart_prepare(NULL);
318 printk(KERN_EMERG "Starting new kernel\n");
319 machine_shutdown();
320 machine_kexec(image);
321#endif
322}
323
324static void kernel_shutdown_prepare(enum system_states state) 304static void kernel_shutdown_prepare(enum system_states state)
325{ 305{
326 blocking_notifier_call_chain(&reboot_notifier_list, 306 blocking_notifier_call_chain(&reboot_notifier_list,
@@ -425,10 +405,15 @@ asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user
425 kernel_restart(buffer); 405 kernel_restart(buffer);
426 break; 406 break;
427 407
408#ifdef CONFIG_KEXEC
428 case LINUX_REBOOT_CMD_KEXEC: 409 case LINUX_REBOOT_CMD_KEXEC:
429 kernel_kexec(); 410 {
430 unlock_kernel(); 411 int ret;
431 return -EINVAL; 412 ret = kernel_kexec();
413 unlock_kernel();
414 return ret;
415 }
416#endif
432 417
433#ifdef CONFIG_HIBERNATION 418#ifdef CONFIG_HIBERNATION
434 case LINUX_REBOOT_CMD_SW_SUSPEND: 419 case LINUX_REBOOT_CMD_SW_SUSPEND:
@@ -1343,8 +1328,6 @@ EXPORT_SYMBOL(in_egroup_p);
1343 1328
1344DECLARE_RWSEM(uts_sem); 1329DECLARE_RWSEM(uts_sem);
1345 1330
1346EXPORT_SYMBOL(uts_sem);
1347
1348asmlinkage long sys_newuname(struct new_utsname __user * name) 1331asmlinkage long sys_newuname(struct new_utsname __user * name)
1349{ 1332{
1350 int errno = 0; 1333 int errno = 0;
@@ -1795,7 +1778,7 @@ int orderly_poweroff(bool force)
1795 goto out; 1778 goto out;
1796 } 1779 }
1797 1780
1798 info = call_usermodehelper_setup(argv[0], argv, envp); 1781 info = call_usermodehelper_setup(argv[0], argv, envp, GFP_ATOMIC);
1799 if (info == NULL) { 1782 if (info == NULL) {
1800 argv_free(argv); 1783 argv_free(argv);
1801 goto out; 1784 goto out;
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index 5b9b467de070..08d6e1bb99ac 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -31,6 +31,7 @@ cond_syscall(sys_socketpair);
31cond_syscall(sys_bind); 31cond_syscall(sys_bind);
32cond_syscall(sys_listen); 32cond_syscall(sys_listen);
33cond_syscall(sys_accept); 33cond_syscall(sys_accept);
34cond_syscall(sys_paccept);
34cond_syscall(sys_connect); 35cond_syscall(sys_connect);
35cond_syscall(sys_getsockname); 36cond_syscall(sys_getsockname);
36cond_syscall(sys_getpeername); 37cond_syscall(sys_getpeername);
@@ -56,9 +57,11 @@ cond_syscall(compat_sys_set_robust_list);
56cond_syscall(sys_get_robust_list); 57cond_syscall(sys_get_robust_list);
57cond_syscall(compat_sys_get_robust_list); 58cond_syscall(compat_sys_get_robust_list);
58cond_syscall(sys_epoll_create); 59cond_syscall(sys_epoll_create);
60cond_syscall(sys_epoll_create1);
59cond_syscall(sys_epoll_ctl); 61cond_syscall(sys_epoll_ctl);
60cond_syscall(sys_epoll_wait); 62cond_syscall(sys_epoll_wait);
61cond_syscall(sys_epoll_pwait); 63cond_syscall(sys_epoll_pwait);
64cond_syscall(compat_sys_epoll_pwait);
62cond_syscall(sys_semget); 65cond_syscall(sys_semget);
63cond_syscall(sys_semop); 66cond_syscall(sys_semop);
64cond_syscall(sys_semtimedop); 67cond_syscall(sys_semtimedop);
@@ -94,6 +97,7 @@ cond_syscall(sys_keyctl);
94cond_syscall(compat_sys_keyctl); 97cond_syscall(compat_sys_keyctl);
95cond_syscall(compat_sys_socketcall); 98cond_syscall(compat_sys_socketcall);
96cond_syscall(sys_inotify_init); 99cond_syscall(sys_inotify_init);
100cond_syscall(sys_inotify_init1);
97cond_syscall(sys_inotify_add_watch); 101cond_syscall(sys_inotify_add_watch);
98cond_syscall(sys_inotify_rm_watch); 102cond_syscall(sys_inotify_rm_watch);
99cond_syscall(sys_migrate_pages); 103cond_syscall(sys_migrate_pages);
@@ -154,10 +158,13 @@ cond_syscall(sys_ioprio_get);
154 158
155/* New file descriptors */ 159/* New file descriptors */
156cond_syscall(sys_signalfd); 160cond_syscall(sys_signalfd);
161cond_syscall(sys_signalfd4);
157cond_syscall(compat_sys_signalfd); 162cond_syscall(compat_sys_signalfd);
163cond_syscall(compat_sys_signalfd4);
158cond_syscall(sys_timerfd_create); 164cond_syscall(sys_timerfd_create);
159cond_syscall(sys_timerfd_settime); 165cond_syscall(sys_timerfd_settime);
160cond_syscall(sys_timerfd_gettime); 166cond_syscall(sys_timerfd_gettime);
161cond_syscall(compat_sys_timerfd_settime); 167cond_syscall(compat_sys_timerfd_settime);
162cond_syscall(compat_sys_timerfd_gettime); 168cond_syscall(compat_sys_timerfd_gettime);
163cond_syscall(sys_eventfd); 169cond_syscall(sys_eventfd);
170cond_syscall(sys_eventfd2);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 6b16e16428d8..fe4713347275 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -43,6 +43,7 @@
43#include <linux/limits.h> 43#include <linux/limits.h>
44#include <linux/dcache.h> 44#include <linux/dcache.h>
45#include <linux/syscalls.h> 45#include <linux/syscalls.h>
46#include <linux/vmstat.h>
46#include <linux/nfs_fs.h> 47#include <linux/nfs_fs.h>
47#include <linux/acpi.h> 48#include <linux/acpi.h>
48#include <linux/reboot.h> 49#include <linux/reboot.h>
@@ -80,7 +81,6 @@ extern int sysctl_drop_caches;
80extern int percpu_pagelist_fraction; 81extern int percpu_pagelist_fraction;
81extern int compat_log; 82extern int compat_log;
82extern int maps_protect; 83extern int maps_protect;
83extern int sysctl_stat_interval;
84extern int latencytop_enabled; 84extern int latencytop_enabled;
85extern int sysctl_nr_open_min, sysctl_nr_open_max; 85extern int sysctl_nr_open_min, sysctl_nr_open_max;
86#ifdef CONFIG_RCU_TORTURE_TEST 86#ifdef CONFIG_RCU_TORTURE_TEST
@@ -88,12 +88,13 @@ extern int rcutorture_runnable;
88#endif /* #ifdef CONFIG_RCU_TORTURE_TEST */ 88#endif /* #ifdef CONFIG_RCU_TORTURE_TEST */
89 89
90/* Constants used for minimum and maximum */ 90/* Constants used for minimum and maximum */
91#if defined(CONFIG_DETECT_SOFTLOCKUP) || defined(CONFIG_HIGHMEM) 91#if defined(CONFIG_HIGHMEM) || defined(CONFIG_DETECT_SOFTLOCKUP)
92static int one = 1; 92static int one = 1;
93#endif 93#endif
94 94
95#ifdef CONFIG_DETECT_SOFTLOCKUP 95#ifdef CONFIG_DETECT_SOFTLOCKUP
96static int sixty = 60; 96static int sixty = 60;
97static int neg_one = -1;
97#endif 98#endif
98 99
99#ifdef CONFIG_MMU 100#ifdef CONFIG_MMU
@@ -110,7 +111,7 @@ static int min_percpu_pagelist_fract = 8;
110 111
111static int ngroups_max = NGROUPS_MAX; 112static int ngroups_max = NGROUPS_MAX;
112 113
113#ifdef CONFIG_KMOD 114#ifdef CONFIG_MODULES
114extern char modprobe_path[]; 115extern char modprobe_path[];
115#endif 116#endif
116#ifdef CONFIG_CHR_DEV_SG 117#ifdef CONFIG_CHR_DEV_SG
@@ -159,12 +160,13 @@ static struct ctl_table root_table[];
159static struct ctl_table_root sysctl_table_root; 160static struct ctl_table_root sysctl_table_root;
160static struct ctl_table_header root_table_header = { 161static struct ctl_table_header root_table_header = {
161 .ctl_table = root_table, 162 .ctl_table = root_table,
162 .ctl_entry = LIST_HEAD_INIT(sysctl_table_root.header_list), 163 .ctl_entry = LIST_HEAD_INIT(sysctl_table_root.default_set.list),
163 .root = &sysctl_table_root, 164 .root = &sysctl_table_root,
165 .set = &sysctl_table_root.default_set,
164}; 166};
165static struct ctl_table_root sysctl_table_root = { 167static struct ctl_table_root sysctl_table_root = {
166 .root_list = LIST_HEAD_INIT(sysctl_table_root.root_list), 168 .root_list = LIST_HEAD_INIT(sysctl_table_root.root_list),
167 .header_list = LIST_HEAD_INIT(root_table_header.ctl_entry), 169 .default_set.list = LIST_HEAD_INIT(root_table_header.ctl_entry),
168}; 170};
169 171
170static struct ctl_table kern_table[]; 172static struct ctl_table kern_table[];
@@ -475,7 +477,7 @@ static struct ctl_table kern_table[] = {
475 .proc_handler = &ftrace_enable_sysctl, 477 .proc_handler = &ftrace_enable_sysctl,
476 }, 478 },
477#endif 479#endif
478#ifdef CONFIG_KMOD 480#ifdef CONFIG_MODULES
479 { 481 {
480 .ctl_name = KERN_MODPROBE, 482 .ctl_name = KERN_MODPROBE,
481 .procname = "modprobe", 483 .procname = "modprobe",
@@ -623,7 +625,7 @@ static struct ctl_table kern_table[] = {
623 { 625 {
624 .ctl_name = KERN_PRINTK_RATELIMIT, 626 .ctl_name = KERN_PRINTK_RATELIMIT,
625 .procname = "printk_ratelimit", 627 .procname = "printk_ratelimit",
626 .data = &printk_ratelimit_jiffies, 628 .data = &printk_ratelimit_state.interval,
627 .maxlen = sizeof(int), 629 .maxlen = sizeof(int),
628 .mode = 0644, 630 .mode = 0644,
629 .proc_handler = &proc_dointvec_jiffies, 631 .proc_handler = &proc_dointvec_jiffies,
@@ -632,7 +634,7 @@ static struct ctl_table kern_table[] = {
632 { 634 {
633 .ctl_name = KERN_PRINTK_RATELIMIT_BURST, 635 .ctl_name = KERN_PRINTK_RATELIMIT_BURST,
634 .procname = "printk_ratelimit_burst", 636 .procname = "printk_ratelimit_burst",
635 .data = &printk_ratelimit_burst, 637 .data = &printk_ratelimit_state.burst,
636 .maxlen = sizeof(int), 638 .maxlen = sizeof(int),
637 .mode = 0644, 639 .mode = 0644,
638 .proc_handler = &proc_dointvec, 640 .proc_handler = &proc_dointvec,
@@ -739,13 +741,24 @@ static struct ctl_table kern_table[] = {
739#ifdef CONFIG_DETECT_SOFTLOCKUP 741#ifdef CONFIG_DETECT_SOFTLOCKUP
740 { 742 {
741 .ctl_name = CTL_UNNUMBERED, 743 .ctl_name = CTL_UNNUMBERED,
744 .procname = "softlockup_panic",
745 .data = &softlockup_panic,
746 .maxlen = sizeof(int),
747 .mode = 0644,
748 .proc_handler = &proc_dointvec_minmax,
749 .strategy = &sysctl_intvec,
750 .extra1 = &zero,
751 .extra2 = &one,
752 },
753 {
754 .ctl_name = CTL_UNNUMBERED,
742 .procname = "softlockup_thresh", 755 .procname = "softlockup_thresh",
743 .data = &softlockup_thresh, 756 .data = &softlockup_thresh,
744 .maxlen = sizeof(unsigned long), 757 .maxlen = sizeof(int),
745 .mode = 0644, 758 .mode = 0644,
746 .proc_handler = &proc_doulongvec_minmax, 759 .proc_handler = &proc_dointvec_minmax,
747 .strategy = &sysctl_intvec, 760 .strategy = &sysctl_intvec,
748 .extra1 = &one, 761 .extra1 = &neg_one,
749 .extra2 = &sixty, 762 .extra2 = &sixty,
750 }, 763 },
751 { 764 {
@@ -947,7 +960,7 @@ static struct ctl_table vm_table[] = {
947#ifdef CONFIG_HUGETLB_PAGE 960#ifdef CONFIG_HUGETLB_PAGE
948 { 961 {
949 .procname = "nr_hugepages", 962 .procname = "nr_hugepages",
950 .data = &max_huge_pages, 963 .data = NULL,
951 .maxlen = sizeof(unsigned long), 964 .maxlen = sizeof(unsigned long),
952 .mode = 0644, 965 .mode = 0644,
953 .proc_handler = &hugetlb_sysctl_handler, 966 .proc_handler = &hugetlb_sysctl_handler,
@@ -973,10 +986,12 @@ static struct ctl_table vm_table[] = {
973 { 986 {
974 .ctl_name = CTL_UNNUMBERED, 987 .ctl_name = CTL_UNNUMBERED,
975 .procname = "nr_overcommit_hugepages", 988 .procname = "nr_overcommit_hugepages",
976 .data = &sysctl_overcommit_huge_pages, 989 .data = NULL,
977 .maxlen = sizeof(sysctl_overcommit_huge_pages), 990 .maxlen = sizeof(unsigned long),
978 .mode = 0644, 991 .mode = 0644,
979 .proc_handler = &hugetlb_overcommit_handler, 992 .proc_handler = &hugetlb_overcommit_handler,
993 .extra1 = (void *)&hugetlb_zero,
994 .extra2 = (void *)&hugetlb_infinity,
980 }, 995 },
981#endif 996#endif
982 { 997 {
@@ -1372,6 +1387,9 @@ static void start_unregistering(struct ctl_table_header *p)
1372 spin_unlock(&sysctl_lock); 1387 spin_unlock(&sysctl_lock);
1373 wait_for_completion(&wait); 1388 wait_for_completion(&wait);
1374 spin_lock(&sysctl_lock); 1389 spin_lock(&sysctl_lock);
1390 } else {
1391 /* anything non-NULL; we'll never dereference it */
1392 p->unregistering = ERR_PTR(-EINVAL);
1375 } 1393 }
1376 /* 1394 /*
1377 * do not remove from the list until nobody holds it; walking the 1395 * do not remove from the list until nobody holds it; walking the
@@ -1380,6 +1398,32 @@ static void start_unregistering(struct ctl_table_header *p)
1380 list_del_init(&p->ctl_entry); 1398 list_del_init(&p->ctl_entry);
1381} 1399}
1382 1400
1401void sysctl_head_get(struct ctl_table_header *head)
1402{
1403 spin_lock(&sysctl_lock);
1404 head->count++;
1405 spin_unlock(&sysctl_lock);
1406}
1407
1408void sysctl_head_put(struct ctl_table_header *head)
1409{
1410 spin_lock(&sysctl_lock);
1411 if (!--head->count)
1412 kfree(head);
1413 spin_unlock(&sysctl_lock);
1414}
1415
1416struct ctl_table_header *sysctl_head_grab(struct ctl_table_header *head)
1417{
1418 if (!head)
1419 BUG();
1420 spin_lock(&sysctl_lock);
1421 if (!use_table(head))
1422 head = ERR_PTR(-ENOENT);
1423 spin_unlock(&sysctl_lock);
1424 return head;
1425}
1426
1383void sysctl_head_finish(struct ctl_table_header *head) 1427void sysctl_head_finish(struct ctl_table_header *head)
1384{ 1428{
1385 if (!head) 1429 if (!head)
@@ -1389,14 +1433,20 @@ void sysctl_head_finish(struct ctl_table_header *head)
1389 spin_unlock(&sysctl_lock); 1433 spin_unlock(&sysctl_lock);
1390} 1434}
1391 1435
1436static struct ctl_table_set *
1437lookup_header_set(struct ctl_table_root *root, struct nsproxy *namespaces)
1438{
1439 struct ctl_table_set *set = &root->default_set;
1440 if (root->lookup)
1441 set = root->lookup(root, namespaces);
1442 return set;
1443}
1444
1392static struct list_head * 1445static struct list_head *
1393lookup_header_list(struct ctl_table_root *root, struct nsproxy *namespaces) 1446lookup_header_list(struct ctl_table_root *root, struct nsproxy *namespaces)
1394{ 1447{
1395 struct list_head *header_list; 1448 struct ctl_table_set *set = lookup_header_set(root, namespaces);
1396 header_list = &root->header_list; 1449 return &set->list;
1397 if (root->lookup)
1398 header_list = root->lookup(root, namespaces);
1399 return header_list;
1400} 1450}
1401 1451
1402struct ctl_table_header *__sysctl_head_next(struct nsproxy *namespaces, 1452struct ctl_table_header *__sysctl_head_next(struct nsproxy *namespaces,
@@ -1466,9 +1516,9 @@ static int do_sysctl_strategy(struct ctl_table_root *root,
1466 int op = 0, rc; 1516 int op = 0, rc;
1467 1517
1468 if (oldval) 1518 if (oldval)
1469 op |= 004; 1519 op |= MAY_READ;
1470 if (newval) 1520 if (newval)
1471 op |= 002; 1521 op |= MAY_WRITE;
1472 if (sysctl_perm(root, table, op)) 1522 if (sysctl_perm(root, table, op))
1473 return -EPERM; 1523 return -EPERM;
1474 1524
@@ -1510,7 +1560,7 @@ repeat:
1510 if (n == table->ctl_name) { 1560 if (n == table->ctl_name) {
1511 int error; 1561 int error;
1512 if (table->child) { 1562 if (table->child) {
1513 if (sysctl_perm(root, table, 001)) 1563 if (sysctl_perm(root, table, MAY_EXEC))
1514 return -EPERM; 1564 return -EPERM;
1515 name++; 1565 name++;
1516 nlen--; 1566 nlen--;
@@ -1585,7 +1635,7 @@ static int test_perm(int mode, int op)
1585 mode >>= 6; 1635 mode >>= 6;
1586 else if (in_egroup_p(0)) 1636 else if (in_egroup_p(0))
1587 mode >>= 3; 1637 mode >>= 3;
1588 if ((mode & op & 0007) == op) 1638 if ((op & ~mode & (MAY_READ|MAY_WRITE|MAY_EXEC)) == 0)
1589 return 0; 1639 return 0;
1590 return -EACCES; 1640 return -EACCES;
1591} 1641}
@@ -1595,7 +1645,7 @@ int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
1595 int error; 1645 int error;
1596 int mode; 1646 int mode;
1597 1647
1598 error = security_sysctl(table, op); 1648 error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
1599 if (error) 1649 if (error)
1600 return error; 1650 return error;
1601 1651
@@ -1630,6 +1680,54 @@ static __init int sysctl_init(void)
1630 1680
1631core_initcall(sysctl_init); 1681core_initcall(sysctl_init);
1632 1682
1683static struct ctl_table *is_branch_in(struct ctl_table *branch,
1684 struct ctl_table *table)
1685{
1686 struct ctl_table *p;
1687 const char *s = branch->procname;
1688
1689 /* branch should have named subdirectory as its first element */
1690 if (!s || !branch->child)
1691 return NULL;
1692
1693 /* ... and nothing else */
1694 if (branch[1].procname || branch[1].ctl_name)
1695 return NULL;
1696
1697 /* table should contain subdirectory with the same name */
1698 for (p = table; p->procname || p->ctl_name; p++) {
1699 if (!p->child)
1700 continue;
1701 if (p->procname && strcmp(p->procname, s) == 0)
1702 return p;
1703 }
1704 return NULL;
1705}
1706
1707/* see if attaching q to p would be an improvement */
1708static void try_attach(struct ctl_table_header *p, struct ctl_table_header *q)
1709{
1710 struct ctl_table *to = p->ctl_table, *by = q->ctl_table;
1711 struct ctl_table *next;
1712 int is_better = 0;
1713 int not_in_parent = !p->attached_by;
1714
1715 while ((next = is_branch_in(by, to)) != NULL) {
1716 if (by == q->attached_by)
1717 is_better = 1;
1718 if (to == p->attached_by)
1719 not_in_parent = 1;
1720 by = by->child;
1721 to = next->child;
1722 }
1723
1724 if (is_better && not_in_parent) {
1725 q->attached_by = by;
1726 q->attached_to = to;
1727 q->parent = p;
1728 }
1729}
1730
1633/** 1731/**
1634 * __register_sysctl_paths - register a sysctl hierarchy 1732 * __register_sysctl_paths - register a sysctl hierarchy
1635 * @root: List of sysctl headers to register on 1733 * @root: List of sysctl headers to register on
@@ -1706,10 +1804,10 @@ struct ctl_table_header *__register_sysctl_paths(
1706 struct nsproxy *namespaces, 1804 struct nsproxy *namespaces,
1707 const struct ctl_path *path, struct ctl_table *table) 1805 const struct ctl_path *path, struct ctl_table *table)
1708{ 1806{
1709 struct list_head *header_list;
1710 struct ctl_table_header *header; 1807 struct ctl_table_header *header;
1711 struct ctl_table *new, **prevp; 1808 struct ctl_table *new, **prevp;
1712 unsigned int n, npath; 1809 unsigned int n, npath;
1810 struct ctl_table_set *set;
1713 1811
1714 /* Count the path components */ 1812 /* Count the path components */
1715 for (npath = 0; path[npath].ctl_name || path[npath].procname; ++npath) 1813 for (npath = 0; path[npath].ctl_name || path[npath].procname; ++npath)
@@ -1751,6 +1849,7 @@ struct ctl_table_header *__register_sysctl_paths(
1751 header->unregistering = NULL; 1849 header->unregistering = NULL;
1752 header->root = root; 1850 header->root = root;
1753 sysctl_set_parent(NULL, header->ctl_table); 1851 sysctl_set_parent(NULL, header->ctl_table);
1852 header->count = 1;
1754#ifdef CONFIG_SYSCTL_SYSCALL_CHECK 1853#ifdef CONFIG_SYSCTL_SYSCALL_CHECK
1755 if (sysctl_check_table(namespaces, header->ctl_table)) { 1854 if (sysctl_check_table(namespaces, header->ctl_table)) {
1756 kfree(header); 1855 kfree(header);
@@ -1758,8 +1857,20 @@ struct ctl_table_header *__register_sysctl_paths(
1758 } 1857 }
1759#endif 1858#endif
1760 spin_lock(&sysctl_lock); 1859 spin_lock(&sysctl_lock);
1761 header_list = lookup_header_list(root, namespaces); 1860 header->set = lookup_header_set(root, namespaces);
1762 list_add_tail(&header->ctl_entry, header_list); 1861 header->attached_by = header->ctl_table;
1862 header->attached_to = root_table;
1863 header->parent = &root_table_header;
1864 for (set = header->set; set; set = set->parent) {
1865 struct ctl_table_header *p;
1866 list_for_each_entry(p, &set->list, ctl_entry) {
1867 if (p->unregistering)
1868 continue;
1869 try_attach(p, header);
1870 }
1871 }
1872 header->parent->count++;
1873 list_add_tail(&header->ctl_entry, &header->set->list);
1763 spin_unlock(&sysctl_lock); 1874 spin_unlock(&sysctl_lock);
1764 1875
1765 return header; 1876 return header;
@@ -1814,8 +1925,37 @@ void unregister_sysctl_table(struct ctl_table_header * header)
1814 1925
1815 spin_lock(&sysctl_lock); 1926 spin_lock(&sysctl_lock);
1816 start_unregistering(header); 1927 start_unregistering(header);
1928 if (!--header->parent->count) {
1929 WARN_ON(1);
1930 kfree(header->parent);
1931 }
1932 if (!--header->count)
1933 kfree(header);
1934 spin_unlock(&sysctl_lock);
1935}
1936
1937int sysctl_is_seen(struct ctl_table_header *p)
1938{
1939 struct ctl_table_set *set = p->set;
1940 int res;
1941 spin_lock(&sysctl_lock);
1942 if (p->unregistering)
1943 res = 0;
1944 else if (!set->is_seen)
1945 res = 1;
1946 else
1947 res = set->is_seen(set);
1817 spin_unlock(&sysctl_lock); 1948 spin_unlock(&sysctl_lock);
1818 kfree(header); 1949 return res;
1950}
1951
1952void setup_sysctl_set(struct ctl_table_set *p,
1953 struct ctl_table_set *parent,
1954 int (*is_seen)(struct ctl_table_set *))
1955{
1956 INIT_LIST_HEAD(&p->list);
1957 p->parent = parent ? parent : &sysctl_table_root.default_set;
1958 p->is_seen = is_seen;
1819} 1959}
1820 1960
1821#else /* !CONFIG_SYSCTL */ 1961#else /* !CONFIG_SYSCTL */
@@ -1834,6 +1974,16 @@ void unregister_sysctl_table(struct ctl_table_header * table)
1834{ 1974{
1835} 1975}
1836 1976
1977void setup_sysctl_set(struct ctl_table_set *p,
1978 struct ctl_table_set *parent,
1979 int (*is_seen)(struct ctl_table_set *))
1980{
1981}
1982
1983void sysctl_head_put(struct ctl_table_header *head)
1984{
1985}
1986
1837#endif /* CONFIG_SYSCTL */ 1987#endif /* CONFIG_SYSCTL */
1838 1988
1839/* 1989/*
diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
index c09350d564f2..c35da23ab8fb 100644
--- a/kernel/sysctl_check.c
+++ b/kernel/sysctl_check.c
@@ -1532,6 +1532,8 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
1532 sysctl_check_leaf(namespaces, table, &fail); 1532 sysctl_check_leaf(namespaces, table, &fail);
1533 } 1533 }
1534 sysctl_check_bin_path(table, &fail); 1534 sysctl_check_bin_path(table, &fail);
1535 if (table->mode > 0777)
1536 set_fail(&fail, table, "bogus .mode");
1535 if (fail) { 1537 if (fail) {
1536 set_fail(&fail, table, NULL); 1538 set_fail(&fail, table, NULL);
1537 error = -EINVAL; 1539 error = -EINVAL;
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index 4a23517169a6..bd6be76303cf 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -35,7 +35,7 @@
35 */ 35 */
36#define TASKSTATS_CPUMASK_MAXLEN (100+6*NR_CPUS) 36#define TASKSTATS_CPUMASK_MAXLEN (100+6*NR_CPUS)
37 37
38static DEFINE_PER_CPU(__u32, taskstats_seqnum) = { 0 }; 38static DEFINE_PER_CPU(__u32, taskstats_seqnum);
39static int family_registered; 39static int family_registered;
40struct kmem_cache *taskstats_cache; 40struct kmem_cache *taskstats_cache;
41 41
@@ -301,7 +301,7 @@ static int add_del_listener(pid_t pid, cpumask_t *maskp, int isadd)
301 return -EINVAL; 301 return -EINVAL;
302 302
303 if (isadd == REGISTER) { 303 if (isadd == REGISTER) {
304 for_each_cpu_mask(cpu, mask) { 304 for_each_cpu_mask_nr(cpu, mask) {
305 s = kmalloc_node(sizeof(struct listener), GFP_KERNEL, 305 s = kmalloc_node(sizeof(struct listener), GFP_KERNEL,
306 cpu_to_node(cpu)); 306 cpu_to_node(cpu));
307 if (!s) 307 if (!s)
@@ -320,7 +320,7 @@ static int add_del_listener(pid_t pid, cpumask_t *maskp, int isadd)
320 320
321 /* Deregister or cleanup */ 321 /* Deregister or cleanup */
322cleanup: 322cleanup:
323 for_each_cpu_mask(cpu, mask) { 323 for_each_cpu_mask_nr(cpu, mask) {
324 listeners = &per_cpu(listener_array, cpu); 324 listeners = &per_cpu(listener_array, cpu);
325 down_write(&listeners->sem); 325 down_write(&listeners->sem);
326 list_for_each_entry_safe(s, tmp, &listeners->list, list) { 326 list_for_each_entry_safe(s, tmp, &listeners->list, list) {
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index dadde5361f32..093d4acf993b 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -145,9 +145,9 @@ static void clocksource_watchdog(unsigned long data)
145 * Cycle through CPUs to check if the CPUs stay 145 * Cycle through CPUs to check if the CPUs stay
146 * synchronized to each other. 146 * synchronized to each other.
147 */ 147 */
148 int next_cpu = next_cpu(raw_smp_processor_id(), cpu_online_map); 148 int next_cpu = next_cpu_nr(raw_smp_processor_id(), cpu_online_map);
149 149
150 if (next_cpu >= NR_CPUS) 150 if (next_cpu >= nr_cpu_ids)
151 next_cpu = first_cpu(cpu_online_map); 151 next_cpu = first_cpu(cpu_online_map);
152 watchdog_timer.expires += WATCHDOG_INTERVAL; 152 watchdog_timer.expires += WATCHDOG_INTERVAL;
153 add_timer_on(&watchdog_timer, next_cpu); 153 add_timer_on(&watchdog_timer, next_cpu);
@@ -376,7 +376,8 @@ void clocksource_unregister(struct clocksource *cs)
376 * Provides sysfs interface for listing current clocksource. 376 * Provides sysfs interface for listing current clocksource.
377 */ 377 */
378static ssize_t 378static ssize_t
379sysfs_show_current_clocksources(struct sys_device *dev, char *buf) 379sysfs_show_current_clocksources(struct sys_device *dev,
380 struct sysdev_attribute *attr, char *buf)
380{ 381{
381 ssize_t count = 0; 382 ssize_t count = 0;
382 383
@@ -397,6 +398,7 @@ sysfs_show_current_clocksources(struct sys_device *dev, char *buf)
397 * clocksource selction. 398 * clocksource selction.
398 */ 399 */
399static ssize_t sysfs_override_clocksource(struct sys_device *dev, 400static ssize_t sysfs_override_clocksource(struct sys_device *dev,
401 struct sysdev_attribute *attr,
400 const char *buf, size_t count) 402 const char *buf, size_t count)
401{ 403{
402 struct clocksource *ovr = NULL; 404 struct clocksource *ovr = NULL;
@@ -449,7 +451,9 @@ static ssize_t sysfs_override_clocksource(struct sys_device *dev,
449 * Provides sysfs interface for listing registered clocksources 451 * Provides sysfs interface for listing registered clocksources
450 */ 452 */
451static ssize_t 453static ssize_t
452sysfs_show_available_clocksources(struct sys_device *dev, char *buf) 454sysfs_show_available_clocksources(struct sys_device *dev,
455 struct sysdev_attribute *attr,
456 char *buf)
453{ 457{
454 struct clocksource *src; 458 struct clocksource *src;
455 ssize_t count = 0; 459 ssize_t count = 0;
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index f48d0f09d32f..31463d370b94 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -399,8 +399,7 @@ again:
399 mask = CPU_MASK_NONE; 399 mask = CPU_MASK_NONE;
400 now = ktime_get(); 400 now = ktime_get();
401 /* Find all expired events */ 401 /* Find all expired events */
402 for (cpu = first_cpu(tick_broadcast_oneshot_mask); cpu != NR_CPUS; 402 for_each_cpu_mask_nr(cpu, tick_broadcast_oneshot_mask) {
403 cpu = next_cpu(cpu, tick_broadcast_oneshot_mask)) {
404 td = &per_cpu(tick_cpu_device, cpu); 403 td = &per_cpu(tick_cpu_device, cpu);
405 if (td->evtdev->next_event.tv64 <= now.tv64) 404 if (td->evtdev->next_event.tv64 <= now.tv64)
406 cpu_set(cpu, mask); 405 cpu_set(cpu, mask);
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 4f3886562b8c..80c4336f4188 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -135,7 +135,7 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
135 */ 135 */
136static void tick_setup_device(struct tick_device *td, 136static void tick_setup_device(struct tick_device *td,
137 struct clock_event_device *newdev, int cpu, 137 struct clock_event_device *newdev, int cpu,
138 cpumask_t cpumask) 138 const cpumask_t *cpumask)
139{ 139{
140 ktime_t next_event; 140 ktime_t next_event;
141 void (*handler)(struct clock_event_device *) = NULL; 141 void (*handler)(struct clock_event_device *) = NULL;
@@ -169,8 +169,8 @@ static void tick_setup_device(struct tick_device *td,
169 * When the device is not per cpu, pin the interrupt to the 169 * When the device is not per cpu, pin the interrupt to the
170 * current cpu: 170 * current cpu:
171 */ 171 */
172 if (!cpus_equal(newdev->cpumask, cpumask)) 172 if (!cpus_equal(newdev->cpumask, *cpumask))
173 irq_set_affinity(newdev->irq, cpumask); 173 irq_set_affinity(newdev->irq, *cpumask);
174 174
175 /* 175 /*
176 * When global broadcasting is active, check if the current 176 * When global broadcasting is active, check if the current
@@ -196,7 +196,6 @@ static int tick_check_new_device(struct clock_event_device *newdev)
196 struct tick_device *td; 196 struct tick_device *td;
197 int cpu, ret = NOTIFY_OK; 197 int cpu, ret = NOTIFY_OK;
198 unsigned long flags; 198 unsigned long flags;
199 cpumask_t cpumask;
200 199
201 spin_lock_irqsave(&tick_device_lock, flags); 200 spin_lock_irqsave(&tick_device_lock, flags);
202 201
@@ -206,10 +205,9 @@ static int tick_check_new_device(struct clock_event_device *newdev)
206 205
207 td = &per_cpu(tick_cpu_device, cpu); 206 td = &per_cpu(tick_cpu_device, cpu);
208 curdev = td->evtdev; 207 curdev = td->evtdev;
209 cpumask = cpumask_of_cpu(cpu);
210 208
211 /* cpu local device ? */ 209 /* cpu local device ? */
212 if (!cpus_equal(newdev->cpumask, cpumask)) { 210 if (!cpus_equal(newdev->cpumask, cpumask_of_cpu(cpu))) {
213 211
214 /* 212 /*
215 * If the cpu affinity of the device interrupt can not 213 * If the cpu affinity of the device interrupt can not
@@ -222,7 +220,7 @@ static int tick_check_new_device(struct clock_event_device *newdev)
222 * If we have a cpu local device already, do not replace it 220 * If we have a cpu local device already, do not replace it
223 * by a non cpu local device 221 * by a non cpu local device
224 */ 222 */
225 if (curdev && cpus_equal(curdev->cpumask, cpumask)) 223 if (curdev && cpus_equal(curdev->cpumask, cpumask_of_cpu(cpu)))
226 goto out_bc; 224 goto out_bc;
227 } 225 }
228 226
@@ -254,7 +252,7 @@ static int tick_check_new_device(struct clock_event_device *newdev)
254 curdev = NULL; 252 curdev = NULL;
255 } 253 }
256 clockevents_exchange_device(curdev, newdev); 254 clockevents_exchange_device(curdev, newdev);
257 tick_setup_device(td, newdev, cpu, cpumask); 255 tick_setup_device(td, newdev, cpu, &cpumask_of_cpu(cpu));
258 if (newdev->features & CLOCK_EVT_FEAT_ONESHOT) 256 if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
259 tick_oneshot_notify(); 257 tick_oneshot_notify();
260 258
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index a5c26d2b1323..825b4c00fe44 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -140,8 +140,6 @@ void tick_nohz_update_jiffies(void)
140 if (!ts->tick_stopped) 140 if (!ts->tick_stopped)
141 return; 141 return;
142 142
143 touch_softlockup_watchdog();
144
145 cpu_clear(cpu, nohz_cpu_mask); 143 cpu_clear(cpu, nohz_cpu_mask);
146 now = ktime_get(); 144 now = ktime_get();
147 ts->idle_waketime = now; 145 ts->idle_waketime = now;
@@ -149,6 +147,8 @@ void tick_nohz_update_jiffies(void)
149 local_irq_save(flags); 147 local_irq_save(flags);
150 tick_do_update_jiffies64(now); 148 tick_do_update_jiffies64(now);
151 local_irq_restore(flags); 149 local_irq_restore(flags);
150
151 touch_softlockup_watchdog();
152} 152}
153 153
154void tick_nohz_stop_idle(int cpu) 154void tick_nohz_stop_idle(int cpu)
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 4231a3dc224a..f6e3af31b403 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -587,7 +587,7 @@ static int __ftrace_modify_code(void *data)
587 587
588static void ftrace_run_update_code(int command) 588static void ftrace_run_update_code(int command)
589{ 589{
590 stop_machine_run(__ftrace_modify_code, &command, NR_CPUS); 590 stop_machine(__ftrace_modify_code, &command, NULL);
591} 591}
592 592
593void ftrace_disable_daemon(void) 593void ftrace_disable_daemon(void)
@@ -787,7 +787,7 @@ static int ftrace_update_code(void)
787 !ftrace_enabled || !ftraced_trigger) 787 !ftrace_enabled || !ftraced_trigger)
788 return 0; 788 return 0;
789 789
790 stop_machine_run(__ftrace_update_code, NULL, NR_CPUS); 790 stop_machine(__ftrace_update_code, NULL, NULL);
791 791
792 return 1; 792 return 1;
793} 793}
@@ -1564,7 +1564,7 @@ static int __init ftrace_dynamic_init(void)
1564 1564
1565 addr = (unsigned long)ftrace_record_ip; 1565 addr = (unsigned long)ftrace_record_ip;
1566 1566
1567 stop_machine_run(ftrace_dyn_arch_init, &addr, NR_CPUS); 1567 stop_machine(ftrace_dyn_arch_init, &addr, NULL);
1568 1568
1569 /* ftrace_dyn_arch_init places the return code in addr */ 1569 /* ftrace_dyn_arch_init places the return code in addr */
1570 if (addr) { 1570 if (addr) {
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 868e121c8e38..8f3fb3db61c3 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1183,7 +1183,6 @@ static void *find_next_entry_inc(struct trace_iterator *iter)
1183static void *s_next(struct seq_file *m, void *v, loff_t *pos) 1183static void *s_next(struct seq_file *m, void *v, loff_t *pos)
1184{ 1184{
1185 struct trace_iterator *iter = m->private; 1185 struct trace_iterator *iter = m->private;
1186 void *last_ent = iter->ent;
1187 int i = (int)*pos; 1186 int i = (int)*pos;
1188 void *ent; 1187 void *ent;
1189 1188
@@ -1203,9 +1202,6 @@ static void *s_next(struct seq_file *m, void *v, loff_t *pos)
1203 1202
1204 iter->pos = *pos; 1203 iter->pos = *pos;
1205 1204
1206 if (last_ent && !ent)
1207 seq_puts(m, "\n\nvim:ft=help\n");
1208
1209 return ent; 1205 return ent;
1210} 1206}
1211 1207
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 421d6fe3650e..ece6cfb649fa 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -253,12 +253,14 @@ void start_critical_timings(void)
253 if (preempt_trace() || irq_trace()) 253 if (preempt_trace() || irq_trace())
254 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); 254 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
255} 255}
256EXPORT_SYMBOL_GPL(start_critical_timings);
256 257
257void stop_critical_timings(void) 258void stop_critical_timings(void)
258{ 259{
259 if (preempt_trace() || irq_trace()) 260 if (preempt_trace() || irq_trace())
260 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); 261 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
261} 262}
263EXPORT_SYMBOL_GPL(stop_critical_timings);
262 264
263#ifdef CONFIG_IRQSOFF_TRACER 265#ifdef CONFIG_IRQSOFF_TRACER
264#ifdef CONFIG_PROVE_LOCKING 266#ifdef CONFIG_PROVE_LOCKING
@@ -337,12 +339,14 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller);
337#ifdef CONFIG_PREEMPT_TRACER 339#ifdef CONFIG_PREEMPT_TRACER
338void trace_preempt_on(unsigned long a0, unsigned long a1) 340void trace_preempt_on(unsigned long a0, unsigned long a1)
339{ 341{
340 stop_critical_timing(a0, a1); 342 if (preempt_trace())
343 stop_critical_timing(a0, a1);
341} 344}
342 345
343void trace_preempt_off(unsigned long a0, unsigned long a1) 346void trace_preempt_off(unsigned long a0, unsigned long a1)
344{ 347{
345 start_critical_timing(a0, a1); 348 if (preempt_trace())
349 start_critical_timing(a0, a1);
346} 350}
347#endif /* CONFIG_PREEMPT_TRACER */ 351#endif /* CONFIG_PREEMPT_TRACER */
348 352
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 3c8d61df4474..e303ccb62cdf 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -26,7 +26,8 @@ static struct task_struct *wakeup_task;
26static int wakeup_cpu; 26static int wakeup_cpu;
27static unsigned wakeup_prio = -1; 27static unsigned wakeup_prio = -1;
28 28
29static DEFINE_SPINLOCK(wakeup_lock); 29static raw_spinlock_t wakeup_lock =
30 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
30 31
31static void __wakeup_reset(struct trace_array *tr); 32static void __wakeup_reset(struct trace_array *tr);
32 33
@@ -56,7 +57,8 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
56 if (unlikely(disabled != 1)) 57 if (unlikely(disabled != 1))
57 goto out; 58 goto out;
58 59
59 spin_lock_irqsave(&wakeup_lock, flags); 60 local_irq_save(flags);
61 __raw_spin_lock(&wakeup_lock);
60 62
61 if (unlikely(!wakeup_task)) 63 if (unlikely(!wakeup_task))
62 goto unlock; 64 goto unlock;
@@ -71,7 +73,8 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
71 trace_function(tr, data, ip, parent_ip, flags); 73 trace_function(tr, data, ip, parent_ip, flags);
72 74
73 unlock: 75 unlock:
74 spin_unlock_irqrestore(&wakeup_lock, flags); 76 __raw_spin_unlock(&wakeup_lock);
77 local_irq_restore(flags);
75 78
76 out: 79 out:
77 atomic_dec(&data->disabled); 80 atomic_dec(&data->disabled);
@@ -145,7 +148,8 @@ wakeup_sched_switch(void *private, void *rq, struct task_struct *prev,
145 if (likely(disabled != 1)) 148 if (likely(disabled != 1))
146 goto out; 149 goto out;
147 150
148 spin_lock_irqsave(&wakeup_lock, flags); 151 local_irq_save(flags);
152 __raw_spin_lock(&wakeup_lock);
149 153
150 /* We could race with grabbing wakeup_lock */ 154 /* We could race with grabbing wakeup_lock */
151 if (unlikely(!tracer_enabled || next != wakeup_task)) 155 if (unlikely(!tracer_enabled || next != wakeup_task))
@@ -174,7 +178,8 @@ wakeup_sched_switch(void *private, void *rq, struct task_struct *prev,
174 178
175out_unlock: 179out_unlock:
176 __wakeup_reset(tr); 180 __wakeup_reset(tr);
177 spin_unlock_irqrestore(&wakeup_lock, flags); 181 __raw_spin_unlock(&wakeup_lock);
182 local_irq_restore(flags);
178out: 183out:
179 atomic_dec(&tr->data[cpu]->disabled); 184 atomic_dec(&tr->data[cpu]->disabled);
180} 185}
@@ -209,8 +214,6 @@ static void __wakeup_reset(struct trace_array *tr)
209 struct trace_array_cpu *data; 214 struct trace_array_cpu *data;
210 int cpu; 215 int cpu;
211 216
212 assert_spin_locked(&wakeup_lock);
213
214 for_each_possible_cpu(cpu) { 217 for_each_possible_cpu(cpu) {
215 data = tr->data[cpu]; 218 data = tr->data[cpu];
216 tracing_reset(data); 219 tracing_reset(data);
@@ -229,9 +232,11 @@ static void wakeup_reset(struct trace_array *tr)
229{ 232{
230 unsigned long flags; 233 unsigned long flags;
231 234
232 spin_lock_irqsave(&wakeup_lock, flags); 235 local_irq_save(flags);
236 __raw_spin_lock(&wakeup_lock);
233 __wakeup_reset(tr); 237 __wakeup_reset(tr);
234 spin_unlock_irqrestore(&wakeup_lock, flags); 238 __raw_spin_unlock(&wakeup_lock);
239 local_irq_restore(flags);
235} 240}
236 241
237static void 242static void
@@ -252,7 +257,7 @@ wakeup_check_start(struct trace_array *tr, struct task_struct *p,
252 goto out; 257 goto out;
253 258
254 /* interrupts should be off from try_to_wake_up */ 259 /* interrupts should be off from try_to_wake_up */
255 spin_lock(&wakeup_lock); 260 __raw_spin_lock(&wakeup_lock);
256 261
257 /* check for races. */ 262 /* check for races. */
258 if (!tracer_enabled || p->prio >= wakeup_prio) 263 if (!tracer_enabled || p->prio >= wakeup_prio)
@@ -274,7 +279,7 @@ wakeup_check_start(struct trace_array *tr, struct task_struct *p,
274 CALLER_ADDR1, CALLER_ADDR2, flags); 279 CALLER_ADDR1, CALLER_ADDR2, flags);
275 280
276out_locked: 281out_locked:
277 spin_unlock(&wakeup_lock); 282 __raw_spin_unlock(&wakeup_lock);
278out: 283out:
279 atomic_dec(&tr->data[cpu]->disabled); 284 atomic_dec(&tr->data[cpu]->disabled);
280} 285}
diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c
index 2301e1e7c606..bb948e52ce20 100644
--- a/kernel/trace/trace_sysprof.c
+++ b/kernel/trace/trace_sysprof.c
@@ -161,7 +161,7 @@ static void timer_notify(struct pt_regs *regs, int cpu)
161 __trace_special(tr, data, 2, regs->ip, 0); 161 __trace_special(tr, data, 2, regs->ip, 0);
162 162
163 while (i < sample_max_depth) { 163 while (i < sample_max_depth) {
164 frame.next_fp = 0; 164 frame.next_fp = NULL;
165 frame.return_address = 0; 165 frame.return_address = 0;
166 if (!copy_stack_frame(fp, &frame)) 166 if (!copy_stack_frame(fp, &frame))
167 break; 167 break;
diff --git a/kernel/tsacct.c b/kernel/tsacct.c
index 4ab1b584961b..8ebcd8532dfb 100644
--- a/kernel/tsacct.c
+++ b/kernel/tsacct.c
@@ -28,14 +28,14 @@
28void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk) 28void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk)
29{ 29{
30 struct timespec uptime, ts; 30 struct timespec uptime, ts;
31 s64 ac_etime; 31 u64 ac_etime;
32 32
33 BUILD_BUG_ON(TS_COMM_LEN < TASK_COMM_LEN); 33 BUILD_BUG_ON(TS_COMM_LEN < TASK_COMM_LEN);
34 34
35 /* calculate task elapsed time in timespec */ 35 /* calculate task elapsed time in timespec */
36 do_posix_clock_monotonic_gettime(&uptime); 36 do_posix_clock_monotonic_gettime(&uptime);
37 ts = timespec_sub(uptime, tsk->start_time); 37 ts = timespec_sub(uptime, tsk->start_time);
38 /* rebase elapsed time to usec */ 38 /* rebase elapsed time to usec (should never be negative) */
39 ac_etime = timespec_to_ns(&ts); 39 ac_etime = timespec_to_ns(&ts);
40 do_div(ac_etime, NSEC_PER_USEC); 40 do_div(ac_etime, NSEC_PER_USEC);
41 stats->ac_etime = ac_etime; 41 stats->ac_etime = ac_etime;
@@ -84,9 +84,9 @@ void xacct_add_tsk(struct taskstats *stats, struct task_struct *p)
84{ 84{
85 struct mm_struct *mm; 85 struct mm_struct *mm;
86 86
87 /* convert pages-jiffies to Mbyte-usec */ 87 /* convert pages-usec to Mbyte-usec */
88 stats->coremem = jiffies_to_usecs(p->acct_rss_mem1) * PAGE_SIZE / MB; 88 stats->coremem = p->acct_rss_mem1 * PAGE_SIZE / MB;
89 stats->virtmem = jiffies_to_usecs(p->acct_vm_mem1) * PAGE_SIZE / MB; 89 stats->virtmem = p->acct_vm_mem1 * PAGE_SIZE / MB;
90 mm = get_task_mm(p); 90 mm = get_task_mm(p);
91 if (mm) { 91 if (mm) {
92 /* adjust to KB unit */ 92 /* adjust to KB unit */
@@ -94,10 +94,10 @@ void xacct_add_tsk(struct taskstats *stats, struct task_struct *p)
94 stats->hiwater_vm = mm->hiwater_vm * PAGE_SIZE / KB; 94 stats->hiwater_vm = mm->hiwater_vm * PAGE_SIZE / KB;
95 mmput(mm); 95 mmput(mm);
96 } 96 }
97 stats->read_char = p->rchar; 97 stats->read_char = p->ioac.rchar;
98 stats->write_char = p->wchar; 98 stats->write_char = p->ioac.wchar;
99 stats->read_syscalls = p->syscr; 99 stats->read_syscalls = p->ioac.syscr;
100 stats->write_syscalls = p->syscw; 100 stats->write_syscalls = p->ioac.syscw;
101#ifdef CONFIG_TASK_IO_ACCOUNTING 101#ifdef CONFIG_TASK_IO_ACCOUNTING
102 stats->read_bytes = p->ioac.read_bytes; 102 stats->read_bytes = p->ioac.read_bytes;
103 stats->write_bytes = p->ioac.write_bytes; 103 stats->write_bytes = p->ioac.write_bytes;
@@ -118,12 +118,19 @@ void xacct_add_tsk(struct taskstats *stats, struct task_struct *p)
118void acct_update_integrals(struct task_struct *tsk) 118void acct_update_integrals(struct task_struct *tsk)
119{ 119{
120 if (likely(tsk->mm)) { 120 if (likely(tsk->mm)) {
121 long delta = cputime_to_jiffies( 121 cputime_t time, dtime;
122 cputime_sub(tsk->stime, tsk->acct_stimexpd)); 122 struct timeval value;
123 u64 delta;
124
125 time = tsk->stime + tsk->utime;
126 dtime = cputime_sub(time, tsk->acct_timexpd);
127 jiffies_to_timeval(cputime_to_jiffies(dtime), &value);
128 delta = value.tv_sec;
129 delta = delta * USEC_PER_SEC + value.tv_usec;
123 130
124 if (delta == 0) 131 if (delta == 0)
125 return; 132 return;
126 tsk->acct_stimexpd = tsk->stime; 133 tsk->acct_timexpd = time;
127 tsk->acct_rss_mem1 += delta * get_mm_rss(tsk->mm); 134 tsk->acct_rss_mem1 += delta * get_mm_rss(tsk->mm);
128 tsk->acct_vm_mem1 += delta * tsk->mm->total_vm; 135 tsk->acct_vm_mem1 += delta * tsk->mm->total_vm;
129 } 136 }
@@ -135,7 +142,7 @@ void acct_update_integrals(struct task_struct *tsk)
135 */ 142 */
136void acct_clear_integrals(struct task_struct *tsk) 143void acct_clear_integrals(struct task_struct *tsk)
137{ 144{
138 tsk->acct_stimexpd = 0; 145 tsk->acct_timexpd = 0;
139 tsk->acct_rss_mem1 = 0; 146 tsk->acct_rss_mem1 = 0;
140 tsk->acct_vm_mem1 = 0; 147 tsk->acct_vm_mem1 = 0;
141} 148}
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index ce7799540c91..4048e92aa04f 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -125,7 +125,7 @@ struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
125} 125}
126 126
127static void insert_work(struct cpu_workqueue_struct *cwq, 127static void insert_work(struct cpu_workqueue_struct *cwq,
128 struct work_struct *work, int tail) 128 struct work_struct *work, struct list_head *head)
129{ 129{
130 set_wq_data(work, cwq); 130 set_wq_data(work, cwq);
131 /* 131 /*
@@ -133,21 +133,17 @@ static void insert_work(struct cpu_workqueue_struct *cwq,
133 * result of list_add() below, see try_to_grab_pending(). 133 * result of list_add() below, see try_to_grab_pending().
134 */ 134 */
135 smp_wmb(); 135 smp_wmb();
136 if (tail) 136 list_add_tail(&work->entry, head);
137 list_add_tail(&work->entry, &cwq->worklist);
138 else
139 list_add(&work->entry, &cwq->worklist);
140 wake_up(&cwq->more_work); 137 wake_up(&cwq->more_work);
141} 138}
142 139
143/* Preempt must be disabled. */
144static void __queue_work(struct cpu_workqueue_struct *cwq, 140static void __queue_work(struct cpu_workqueue_struct *cwq,
145 struct work_struct *work) 141 struct work_struct *work)
146{ 142{
147 unsigned long flags; 143 unsigned long flags;
148 144
149 spin_lock_irqsave(&cwq->lock, flags); 145 spin_lock_irqsave(&cwq->lock, flags);
150 insert_work(cwq, work, 1); 146 insert_work(cwq, work, &cwq->worklist);
151 spin_unlock_irqrestore(&cwq->lock, flags); 147 spin_unlock_irqrestore(&cwq->lock, flags);
152} 148}
153 149
@@ -163,17 +159,39 @@ static void __queue_work(struct cpu_workqueue_struct *cwq,
163 */ 159 */
164int queue_work(struct workqueue_struct *wq, struct work_struct *work) 160int queue_work(struct workqueue_struct *wq, struct work_struct *work)
165{ 161{
162 int ret;
163
164 ret = queue_work_on(get_cpu(), wq, work);
165 put_cpu();
166
167 return ret;
168}
169EXPORT_SYMBOL_GPL(queue_work);
170
171/**
172 * queue_work_on - queue work on specific cpu
173 * @cpu: CPU number to execute work on
174 * @wq: workqueue to use
175 * @work: work to queue
176 *
177 * Returns 0 if @work was already on a queue, non-zero otherwise.
178 *
179 * We queue the work to a specific CPU, the caller must ensure it
180 * can't go away.
181 */
182int
183queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
184{
166 int ret = 0; 185 int ret = 0;
167 186
168 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { 187 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
169 BUG_ON(!list_empty(&work->entry)); 188 BUG_ON(!list_empty(&work->entry));
170 __queue_work(wq_per_cpu(wq, get_cpu()), work); 189 __queue_work(wq_per_cpu(wq, cpu), work);
171 put_cpu();
172 ret = 1; 190 ret = 1;
173 } 191 }
174 return ret; 192 return ret;
175} 193}
176EXPORT_SYMBOL_GPL(queue_work); 194EXPORT_SYMBOL_GPL(queue_work_on);
177 195
178static void delayed_work_timer_fn(unsigned long __data) 196static void delayed_work_timer_fn(unsigned long __data)
179{ 197{
@@ -272,11 +290,11 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
272 290
273 BUG_ON(get_wq_data(work) != cwq); 291 BUG_ON(get_wq_data(work) != cwq);
274 work_clear_pending(work); 292 work_clear_pending(work);
275 lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); 293 lock_map_acquire(&cwq->wq->lockdep_map);
276 lock_acquire(&lockdep_map, 0, 0, 0, 2, _THIS_IP_); 294 lock_map_acquire(&lockdep_map);
277 f(work); 295 f(work);
278 lock_release(&lockdep_map, 1, _THIS_IP_); 296 lock_map_release(&lockdep_map);
279 lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_); 297 lock_map_release(&cwq->wq->lockdep_map);
280 298
281 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { 299 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
282 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " 300 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
@@ -337,14 +355,14 @@ static void wq_barrier_func(struct work_struct *work)
337} 355}
338 356
339static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, 357static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
340 struct wq_barrier *barr, int tail) 358 struct wq_barrier *barr, struct list_head *head)
341{ 359{
342 INIT_WORK(&barr->work, wq_barrier_func); 360 INIT_WORK(&barr->work, wq_barrier_func);
343 __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work)); 361 __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work));
344 362
345 init_completion(&barr->done); 363 init_completion(&barr->done);
346 364
347 insert_work(cwq, &barr->work, tail); 365 insert_work(cwq, &barr->work, head);
348} 366}
349 367
350static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) 368static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
@@ -364,7 +382,7 @@ static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
364 active = 0; 382 active = 0;
365 spin_lock_irq(&cwq->lock); 383 spin_lock_irq(&cwq->lock);
366 if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) { 384 if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
367 insert_wq_barrier(cwq, &barr, 1); 385 insert_wq_barrier(cwq, &barr, &cwq->worklist);
368 active = 1; 386 active = 1;
369 } 387 }
370 spin_unlock_irq(&cwq->lock); 388 spin_unlock_irq(&cwq->lock);
@@ -395,13 +413,64 @@ void flush_workqueue(struct workqueue_struct *wq)
395 int cpu; 413 int cpu;
396 414
397 might_sleep(); 415 might_sleep();
398 lock_acquire(&wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); 416 lock_map_acquire(&wq->lockdep_map);
399 lock_release(&wq->lockdep_map, 1, _THIS_IP_); 417 lock_map_release(&wq->lockdep_map);
400 for_each_cpu_mask(cpu, *cpu_map) 418 for_each_cpu_mask_nr(cpu, *cpu_map)
401 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); 419 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
402} 420}
403EXPORT_SYMBOL_GPL(flush_workqueue); 421EXPORT_SYMBOL_GPL(flush_workqueue);
404 422
423/**
424 * flush_work - block until a work_struct's callback has terminated
425 * @work: the work which is to be flushed
426 *
427 * Returns false if @work has already terminated.
428 *
429 * It is expected that, prior to calling flush_work(), the caller has
430 * arranged for the work to not be requeued, otherwise it doesn't make
431 * sense to use this function.
432 */
433int flush_work(struct work_struct *work)
434{
435 struct cpu_workqueue_struct *cwq;
436 struct list_head *prev;
437 struct wq_barrier barr;
438
439 might_sleep();
440 cwq = get_wq_data(work);
441 if (!cwq)
442 return 0;
443
444 lock_map_acquire(&cwq->wq->lockdep_map);
445 lock_map_release(&cwq->wq->lockdep_map);
446
447 prev = NULL;
448 spin_lock_irq(&cwq->lock);
449 if (!list_empty(&work->entry)) {
450 /*
451 * See the comment near try_to_grab_pending()->smp_rmb().
452 * If it was re-queued under us we are not going to wait.
453 */
454 smp_rmb();
455 if (unlikely(cwq != get_wq_data(work)))
456 goto out;
457 prev = &work->entry;
458 } else {
459 if (cwq->current_work != work)
460 goto out;
461 prev = &cwq->worklist;
462 }
463 insert_wq_barrier(cwq, &barr, prev->next);
464out:
465 spin_unlock_irq(&cwq->lock);
466 if (!prev)
467 return 0;
468
469 wait_for_completion(&barr.done);
470 return 1;
471}
472EXPORT_SYMBOL_GPL(flush_work);
473
405/* 474/*
406 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit, 475 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
407 * so this work can't be re-armed in any way. 476 * so this work can't be re-armed in any way.
@@ -449,7 +518,7 @@ static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq,
449 518
450 spin_lock_irq(&cwq->lock); 519 spin_lock_irq(&cwq->lock);
451 if (unlikely(cwq->current_work == work)) { 520 if (unlikely(cwq->current_work == work)) {
452 insert_wq_barrier(cwq, &barr, 0); 521 insert_wq_barrier(cwq, &barr, cwq->worklist.next);
453 running = 1; 522 running = 1;
454 } 523 }
455 spin_unlock_irq(&cwq->lock); 524 spin_unlock_irq(&cwq->lock);
@@ -467,8 +536,8 @@ static void wait_on_work(struct work_struct *work)
467 536
468 might_sleep(); 537 might_sleep();
469 538
470 lock_acquire(&work->lockdep_map, 0, 0, 0, 2, _THIS_IP_); 539 lock_map_acquire(&work->lockdep_map);
471 lock_release(&work->lockdep_map, 1, _THIS_IP_); 540 lock_map_release(&work->lockdep_map);
472 541
473 cwq = get_wq_data(work); 542 cwq = get_wq_data(work);
474 if (!cwq) 543 if (!cwq)
@@ -477,7 +546,7 @@ static void wait_on_work(struct work_struct *work)
477 wq = cwq->wq; 546 wq = cwq->wq;
478 cpu_map = wq_cpu_map(wq); 547 cpu_map = wq_cpu_map(wq);
479 548
480 for_each_cpu_mask(cpu, *cpu_map) 549 for_each_cpu_mask_nr(cpu, *cpu_map)
481 wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work); 550 wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
482} 551}
483 552
@@ -553,6 +622,19 @@ int schedule_work(struct work_struct *work)
553} 622}
554EXPORT_SYMBOL(schedule_work); 623EXPORT_SYMBOL(schedule_work);
555 624
625/*
626 * schedule_work_on - put work task on a specific cpu
627 * @cpu: cpu to put the work task on
628 * @work: job to be done
629 *
630 * This puts a job on a specific cpu
631 */
632int schedule_work_on(int cpu, struct work_struct *work)
633{
634 return queue_work_on(cpu, keventd_wq, work);
635}
636EXPORT_SYMBOL(schedule_work_on);
637
556/** 638/**
557 * schedule_delayed_work - put work task in global workqueue after delay 639 * schedule_delayed_work - put work task in global workqueue after delay
558 * @dwork: job to be done 640 * @dwork: job to be done
@@ -607,10 +689,10 @@ int schedule_on_each_cpu(work_func_t func)
607 struct work_struct *work = per_cpu_ptr(works, cpu); 689 struct work_struct *work = per_cpu_ptr(works, cpu);
608 690
609 INIT_WORK(work, func); 691 INIT_WORK(work, func);
610 set_bit(WORK_STRUCT_PENDING, work_data_bits(work)); 692 schedule_work_on(cpu, work);
611 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work);
612 } 693 }
613 flush_workqueue(keventd_wq); 694 for_each_online_cpu(cpu)
695 flush_work(per_cpu_ptr(works, cpu));
614 put_online_cpus(); 696 put_online_cpus();
615 free_percpu(works); 697 free_percpu(works);
616 return 0; 698 return 0;
@@ -747,11 +829,22 @@ struct workqueue_struct *__create_workqueue_key(const char *name,
747 err = create_workqueue_thread(cwq, singlethread_cpu); 829 err = create_workqueue_thread(cwq, singlethread_cpu);
748 start_workqueue_thread(cwq, -1); 830 start_workqueue_thread(cwq, -1);
749 } else { 831 } else {
750 get_online_cpus(); 832 cpu_maps_update_begin();
833 /*
834 * We must place this wq on list even if the code below fails.
835 * cpu_down(cpu) can remove cpu from cpu_populated_map before
836 * destroy_workqueue() takes the lock, in that case we leak
837 * cwq[cpu]->thread.
838 */
751 spin_lock(&workqueue_lock); 839 spin_lock(&workqueue_lock);
752 list_add(&wq->list, &workqueues); 840 list_add(&wq->list, &workqueues);
753 spin_unlock(&workqueue_lock); 841 spin_unlock(&workqueue_lock);
754 842 /*
843 * We must initialize cwqs for each possible cpu even if we
844 * are going to call destroy_workqueue() finally. Otherwise
845 * cpu_up() can hit the uninitialized cwq once we drop the
846 * lock.
847 */
755 for_each_possible_cpu(cpu) { 848 for_each_possible_cpu(cpu) {
756 cwq = init_cpu_workqueue(wq, cpu); 849 cwq = init_cpu_workqueue(wq, cpu);
757 if (err || !cpu_online(cpu)) 850 if (err || !cpu_online(cpu))
@@ -759,7 +852,7 @@ struct workqueue_struct *__create_workqueue_key(const char *name,
759 err = create_workqueue_thread(cwq, cpu); 852 err = create_workqueue_thread(cwq, cpu);
760 start_workqueue_thread(cwq, cpu); 853 start_workqueue_thread(cwq, cpu);
761 } 854 }
762 put_online_cpus(); 855 cpu_maps_update_done();
763 } 856 }
764 857
765 if (err) { 858 if (err) {
@@ -773,18 +866,18 @@ EXPORT_SYMBOL_GPL(__create_workqueue_key);
773static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq) 866static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
774{ 867{
775 /* 868 /*
776 * Our caller is either destroy_workqueue() or CPU_DEAD, 869 * Our caller is either destroy_workqueue() or CPU_POST_DEAD,
777 * get_online_cpus() protects cwq->thread. 870 * cpu_add_remove_lock protects cwq->thread.
778 */ 871 */
779 if (cwq->thread == NULL) 872 if (cwq->thread == NULL)
780 return; 873 return;
781 874
782 lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); 875 lock_map_acquire(&cwq->wq->lockdep_map);
783 lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_); 876 lock_map_release(&cwq->wq->lockdep_map);
784 877
785 flush_cpu_workqueue(cwq); 878 flush_cpu_workqueue(cwq);
786 /* 879 /*
787 * If the caller is CPU_DEAD and cwq->worklist was not empty, 880 * If the caller is CPU_POST_DEAD and cwq->worklist was not empty,
788 * a concurrent flush_workqueue() can insert a barrier after us. 881 * a concurrent flush_workqueue() can insert a barrier after us.
789 * However, in that case run_workqueue() won't return and check 882 * However, in that case run_workqueue() won't return and check
790 * kthread_should_stop() until it flushes all work_struct's. 883 * kthread_should_stop() until it flushes all work_struct's.
@@ -808,14 +901,14 @@ void destroy_workqueue(struct workqueue_struct *wq)
808 const cpumask_t *cpu_map = wq_cpu_map(wq); 901 const cpumask_t *cpu_map = wq_cpu_map(wq);
809 int cpu; 902 int cpu;
810 903
811 get_online_cpus(); 904 cpu_maps_update_begin();
812 spin_lock(&workqueue_lock); 905 spin_lock(&workqueue_lock);
813 list_del(&wq->list); 906 list_del(&wq->list);
814 spin_unlock(&workqueue_lock); 907 spin_unlock(&workqueue_lock);
815 908
816 for_each_cpu_mask(cpu, *cpu_map) 909 for_each_cpu_mask_nr(cpu, *cpu_map)
817 cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu)); 910 cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
818 put_online_cpus(); 911 cpu_maps_update_done();
819 912
820 free_percpu(wq->cpu_wq); 913 free_percpu(wq->cpu_wq);
821 kfree(wq); 914 kfree(wq);
@@ -829,6 +922,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
829 unsigned int cpu = (unsigned long)hcpu; 922 unsigned int cpu = (unsigned long)hcpu;
830 struct cpu_workqueue_struct *cwq; 923 struct cpu_workqueue_struct *cwq;
831 struct workqueue_struct *wq; 924 struct workqueue_struct *wq;
925 int ret = NOTIFY_OK;
832 926
833 action &= ~CPU_TASKS_FROZEN; 927 action &= ~CPU_TASKS_FROZEN;
834 928
@@ -836,7 +930,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
836 case CPU_UP_PREPARE: 930 case CPU_UP_PREPARE:
837 cpu_set(cpu, cpu_populated_map); 931 cpu_set(cpu, cpu_populated_map);
838 } 932 }
839 933undo:
840 list_for_each_entry(wq, &workqueues, list) { 934 list_for_each_entry(wq, &workqueues, list) {
841 cwq = per_cpu_ptr(wq->cpu_wq, cpu); 935 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
842 936
@@ -846,7 +940,9 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
846 break; 940 break;
847 printk(KERN_ERR "workqueue [%s] for %i failed\n", 941 printk(KERN_ERR "workqueue [%s] for %i failed\n",
848 wq->name, cpu); 942 wq->name, cpu);
849 return NOTIFY_BAD; 943 action = CPU_UP_CANCELED;
944 ret = NOTIFY_BAD;
945 goto undo;
850 946
851 case CPU_ONLINE: 947 case CPU_ONLINE:
852 start_workqueue_thread(cwq, cpu); 948 start_workqueue_thread(cwq, cpu);
@@ -854,7 +950,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
854 950
855 case CPU_UP_CANCELED: 951 case CPU_UP_CANCELED:
856 start_workqueue_thread(cwq, -1); 952 start_workqueue_thread(cwq, -1);
857 case CPU_DEAD: 953 case CPU_POST_DEAD:
858 cleanup_workqueue_thread(cwq); 954 cleanup_workqueue_thread(cwq);
859 break; 955 break;
860 } 956 }
@@ -862,11 +958,11 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
862 958
863 switch (action) { 959 switch (action) {
864 case CPU_UP_CANCELED: 960 case CPU_UP_CANCELED:
865 case CPU_DEAD: 961 case CPU_POST_DEAD:
866 cpu_clear(cpu, cpu_populated_map); 962 cpu_clear(cpu, cpu_populated_map);
867 } 963 }
868 964
869 return NOTIFY_OK; 965 return ret;
870} 966}
871 967
872void __init init_workqueues(void) 968void __init init_workqueues(void)