diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-10-02 04:21:26 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-10-02 04:21:26 -0400 |
commit | d6d5aeb661fc14655c417f3582ae7ec52985d2a8 (patch) | |
tree | 5e168da05cb28d10b5accc74718428cfd5527201 /kernel | |
parent | 7e6e178ab1548c8d894a77593e757acf4510b8ba (diff) | |
parent | 94aca1dac6f6d21f4b07e4864baf7768cabcc6e7 (diff) |
Merge commit 'v2.6.27-rc8' into genirq
Diffstat (limited to 'kernel')
90 files changed, 4255 insertions, 2568 deletions
diff --git a/kernel/Kconfig.hz b/kernel/Kconfig.hz index 526128a2e622..94fabd534b03 100644 --- a/kernel/Kconfig.hz +++ b/kernel/Kconfig.hz | |||
@@ -55,4 +55,4 @@ config HZ | |||
55 | default 1000 if HZ_1000 | 55 | default 1000 if HZ_1000 |
56 | 56 | ||
57 | config SCHED_HRTICK | 57 | config SCHED_HRTICK |
58 | def_bool HIGH_RES_TIMERS && X86 | 58 | def_bool HIGH_RES_TIMERS && (!SMP || USE_GENERIC_SMP_HELPERS) |
diff --git a/kernel/Makefile b/kernel/Makefile index 985ddb7da4d0..4e1d7df7c3e2 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
@@ -2,7 +2,7 @@ | |||
2 | # Makefile for the linux kernel. | 2 | # Makefile for the linux kernel. |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \ | 5 | obj-y = sched.o fork.o exec_domain.o panic.o printk.o \ |
6 | cpu.o exit.o itimer.o time.o softirq.o resource.o \ | 6 | cpu.o exit.o itimer.o time.o softirq.o resource.o \ |
7 | sysctl.o capability.o ptrace.o timer.o user.o \ | 7 | sysctl.o capability.o ptrace.o timer.o user.o \ |
8 | signal.o sys.o kmod.o workqueue.o pid.o \ | 8 | signal.o sys.o kmod.o workqueue.o pid.o \ |
@@ -11,6 +11,8 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \ | |||
11 | hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ | 11 | hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ |
12 | notifier.o ksysfs.o pm_qos_params.o sched_clock.o | 12 | notifier.o ksysfs.o pm_qos_params.o sched_clock.o |
13 | 13 | ||
14 | CFLAGS_REMOVE_sched.o = -mno-spe | ||
15 | |||
14 | ifdef CONFIG_FTRACE | 16 | ifdef CONFIG_FTRACE |
15 | # Do not trace debug files and internal ftrace files | 17 | # Do not trace debug files and internal ftrace files |
16 | CFLAGS_REMOVE_lockdep.o = -pg | 18 | CFLAGS_REMOVE_lockdep.o = -pg |
@@ -22,6 +24,7 @@ CFLAGS_REMOVE_sched_clock.o = -pg | |||
22 | CFLAGS_REMOVE_sched.o = -mno-spe -pg | 24 | CFLAGS_REMOVE_sched.o = -mno-spe -pg |
23 | endif | 25 | endif |
24 | 26 | ||
27 | obj-$(CONFIG_PROFILING) += profile.o | ||
25 | obj-$(CONFIG_SYSCTL_SYSCALL_CHECK) += sysctl_check.o | 28 | obj-$(CONFIG_SYSCTL_SYSCALL_CHECK) += sysctl_check.o |
26 | obj-$(CONFIG_STACKTRACE) += stacktrace.o | 29 | obj-$(CONFIG_STACKTRACE) += stacktrace.o |
27 | obj-y += time/ | 30 | obj-y += time/ |
@@ -81,6 +84,7 @@ obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o | |||
81 | obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o | 84 | obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o |
82 | obj-$(CONFIG_MARKERS) += marker.o | 85 | obj-$(CONFIG_MARKERS) += marker.o |
83 | obj-$(CONFIG_LATENCYTOP) += latencytop.o | 86 | obj-$(CONFIG_LATENCYTOP) += latencytop.o |
87 | obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o | ||
84 | obj-$(CONFIG_FTRACE) += trace/ | 88 | obj-$(CONFIG_FTRACE) += trace/ |
85 | obj-$(CONFIG_TRACING) += trace/ | 89 | obj-$(CONFIG_TRACING) += trace/ |
86 | obj-$(CONFIG_SMP) += sched_cpupri.o | 90 | obj-$(CONFIG_SMP) += sched_cpupri.o |
diff --git a/kernel/acct.c b/kernel/acct.c index 91e1cfd734d2..dd68b9059418 100644 --- a/kernel/acct.c +++ b/kernel/acct.c | |||
@@ -75,37 +75,39 @@ int acct_parm[3] = {4, 2, 30}; | |||
75 | /* | 75 | /* |
76 | * External references and all of the globals. | 76 | * External references and all of the globals. |
77 | */ | 77 | */ |
78 | static void do_acct_process(struct pid_namespace *ns, struct file *); | 78 | static void do_acct_process(struct bsd_acct_struct *acct, |
79 | struct pid_namespace *ns, struct file *); | ||
79 | 80 | ||
80 | /* | 81 | /* |
81 | * This structure is used so that all the data protected by lock | 82 | * This structure is used so that all the data protected by lock |
82 | * can be placed in the same cache line as the lock. This primes | 83 | * can be placed in the same cache line as the lock. This primes |
83 | * the cache line to have the data after getting the lock. | 84 | * the cache line to have the data after getting the lock. |
84 | */ | 85 | */ |
85 | struct acct_glbs { | 86 | struct bsd_acct_struct { |
86 | spinlock_t lock; | ||
87 | volatile int active; | 87 | volatile int active; |
88 | volatile int needcheck; | 88 | volatile int needcheck; |
89 | struct file *file; | 89 | struct file *file; |
90 | struct pid_namespace *ns; | 90 | struct pid_namespace *ns; |
91 | struct timer_list timer; | 91 | struct timer_list timer; |
92 | struct list_head list; | ||
92 | }; | 93 | }; |
93 | 94 | ||
94 | static struct acct_glbs acct_globals __cacheline_aligned = | 95 | static DEFINE_SPINLOCK(acct_lock); |
95 | {__SPIN_LOCK_UNLOCKED(acct_globals.lock)}; | 96 | static LIST_HEAD(acct_list); |
96 | 97 | ||
97 | /* | 98 | /* |
98 | * Called whenever the timer says to check the free space. | 99 | * Called whenever the timer says to check the free space. |
99 | */ | 100 | */ |
100 | static void acct_timeout(unsigned long unused) | 101 | static void acct_timeout(unsigned long x) |
101 | { | 102 | { |
102 | acct_globals.needcheck = 1; | 103 | struct bsd_acct_struct *acct = (struct bsd_acct_struct *)x; |
104 | acct->needcheck = 1; | ||
103 | } | 105 | } |
104 | 106 | ||
105 | /* | 107 | /* |
106 | * Check the amount of free space and suspend/resume accordingly. | 108 | * Check the amount of free space and suspend/resume accordingly. |
107 | */ | 109 | */ |
108 | static int check_free_space(struct file *file) | 110 | static int check_free_space(struct bsd_acct_struct *acct, struct file *file) |
109 | { | 111 | { |
110 | struct kstatfs sbuf; | 112 | struct kstatfs sbuf; |
111 | int res; | 113 | int res; |
@@ -113,11 +115,11 @@ static int check_free_space(struct file *file) | |||
113 | sector_t resume; | 115 | sector_t resume; |
114 | sector_t suspend; | 116 | sector_t suspend; |
115 | 117 | ||
116 | spin_lock(&acct_globals.lock); | 118 | spin_lock(&acct_lock); |
117 | res = acct_globals.active; | 119 | res = acct->active; |
118 | if (!file || !acct_globals.needcheck) | 120 | if (!file || !acct->needcheck) |
119 | goto out; | 121 | goto out; |
120 | spin_unlock(&acct_globals.lock); | 122 | spin_unlock(&acct_lock); |
121 | 123 | ||
122 | /* May block */ | 124 | /* May block */ |
123 | if (vfs_statfs(file->f_path.dentry, &sbuf)) | 125 | if (vfs_statfs(file->f_path.dentry, &sbuf)) |
@@ -136,35 +138,35 @@ static int check_free_space(struct file *file) | |||
136 | act = 0; | 138 | act = 0; |
137 | 139 | ||
138 | /* | 140 | /* |
139 | * If some joker switched acct_globals.file under us we'ld better be | 141 | * If some joker switched acct->file under us we'ld better be |
140 | * silent and _not_ touch anything. | 142 | * silent and _not_ touch anything. |
141 | */ | 143 | */ |
142 | spin_lock(&acct_globals.lock); | 144 | spin_lock(&acct_lock); |
143 | if (file != acct_globals.file) { | 145 | if (file != acct->file) { |
144 | if (act) | 146 | if (act) |
145 | res = act>0; | 147 | res = act>0; |
146 | goto out; | 148 | goto out; |
147 | } | 149 | } |
148 | 150 | ||
149 | if (acct_globals.active) { | 151 | if (acct->active) { |
150 | if (act < 0) { | 152 | if (act < 0) { |
151 | acct_globals.active = 0; | 153 | acct->active = 0; |
152 | printk(KERN_INFO "Process accounting paused\n"); | 154 | printk(KERN_INFO "Process accounting paused\n"); |
153 | } | 155 | } |
154 | } else { | 156 | } else { |
155 | if (act > 0) { | 157 | if (act > 0) { |
156 | acct_globals.active = 1; | 158 | acct->active = 1; |
157 | printk(KERN_INFO "Process accounting resumed\n"); | 159 | printk(KERN_INFO "Process accounting resumed\n"); |
158 | } | 160 | } |
159 | } | 161 | } |
160 | 162 | ||
161 | del_timer(&acct_globals.timer); | 163 | del_timer(&acct->timer); |
162 | acct_globals.needcheck = 0; | 164 | acct->needcheck = 0; |
163 | acct_globals.timer.expires = jiffies + ACCT_TIMEOUT*HZ; | 165 | acct->timer.expires = jiffies + ACCT_TIMEOUT*HZ; |
164 | add_timer(&acct_globals.timer); | 166 | add_timer(&acct->timer); |
165 | res = acct_globals.active; | 167 | res = acct->active; |
166 | out: | 168 | out: |
167 | spin_unlock(&acct_globals.lock); | 169 | spin_unlock(&acct_lock); |
168 | return res; | 170 | return res; |
169 | } | 171 | } |
170 | 172 | ||
@@ -172,39 +174,41 @@ out: | |||
172 | * Close the old accounting file (if currently open) and then replace | 174 | * Close the old accounting file (if currently open) and then replace |
173 | * it with file (if non-NULL). | 175 | * it with file (if non-NULL). |
174 | * | 176 | * |
175 | * NOTE: acct_globals.lock MUST be held on entry and exit. | 177 | * NOTE: acct_lock MUST be held on entry and exit. |
176 | */ | 178 | */ |
177 | static void acct_file_reopen(struct file *file) | 179 | static void acct_file_reopen(struct bsd_acct_struct *acct, struct file *file, |
180 | struct pid_namespace *ns) | ||
178 | { | 181 | { |
179 | struct file *old_acct = NULL; | 182 | struct file *old_acct = NULL; |
180 | struct pid_namespace *old_ns = NULL; | 183 | struct pid_namespace *old_ns = NULL; |
181 | 184 | ||
182 | if (acct_globals.file) { | 185 | if (acct->file) { |
183 | old_acct = acct_globals.file; | 186 | old_acct = acct->file; |
184 | old_ns = acct_globals.ns; | 187 | old_ns = acct->ns; |
185 | del_timer(&acct_globals.timer); | 188 | del_timer(&acct->timer); |
186 | acct_globals.active = 0; | 189 | acct->active = 0; |
187 | acct_globals.needcheck = 0; | 190 | acct->needcheck = 0; |
188 | acct_globals.file = NULL; | 191 | acct->file = NULL; |
192 | acct->ns = NULL; | ||
193 | list_del(&acct->list); | ||
189 | } | 194 | } |
190 | if (file) { | 195 | if (file) { |
191 | acct_globals.file = file; | 196 | acct->file = file; |
192 | acct_globals.ns = get_pid_ns(task_active_pid_ns(current)); | 197 | acct->ns = ns; |
193 | acct_globals.needcheck = 0; | 198 | acct->needcheck = 0; |
194 | acct_globals.active = 1; | 199 | acct->active = 1; |
200 | list_add(&acct->list, &acct_list); | ||
195 | /* It's been deleted if it was used before so this is safe */ | 201 | /* It's been deleted if it was used before so this is safe */ |
196 | init_timer(&acct_globals.timer); | 202 | setup_timer(&acct->timer, acct_timeout, (unsigned long)acct); |
197 | acct_globals.timer.function = acct_timeout; | 203 | acct->timer.expires = jiffies + ACCT_TIMEOUT*HZ; |
198 | acct_globals.timer.expires = jiffies + ACCT_TIMEOUT*HZ; | 204 | add_timer(&acct->timer); |
199 | add_timer(&acct_globals.timer); | ||
200 | } | 205 | } |
201 | if (old_acct) { | 206 | if (old_acct) { |
202 | mnt_unpin(old_acct->f_path.mnt); | 207 | mnt_unpin(old_acct->f_path.mnt); |
203 | spin_unlock(&acct_globals.lock); | 208 | spin_unlock(&acct_lock); |
204 | do_acct_process(old_ns, old_acct); | 209 | do_acct_process(acct, old_ns, old_acct); |
205 | filp_close(old_acct, NULL); | 210 | filp_close(old_acct, NULL); |
206 | put_pid_ns(old_ns); | 211 | spin_lock(&acct_lock); |
207 | spin_lock(&acct_globals.lock); | ||
208 | } | 212 | } |
209 | } | 213 | } |
210 | 214 | ||
@@ -212,6 +216,8 @@ static int acct_on(char *name) | |||
212 | { | 216 | { |
213 | struct file *file; | 217 | struct file *file; |
214 | int error; | 218 | int error; |
219 | struct pid_namespace *ns; | ||
220 | struct bsd_acct_struct *acct = NULL; | ||
215 | 221 | ||
216 | /* Difference from BSD - they don't do O_APPEND */ | 222 | /* Difference from BSD - they don't do O_APPEND */ |
217 | file = filp_open(name, O_WRONLY|O_APPEND|O_LARGEFILE, 0); | 223 | file = filp_open(name, O_WRONLY|O_APPEND|O_LARGEFILE, 0); |
@@ -228,18 +234,34 @@ static int acct_on(char *name) | |||
228 | return -EIO; | 234 | return -EIO; |
229 | } | 235 | } |
230 | 236 | ||
237 | ns = task_active_pid_ns(current); | ||
238 | if (ns->bacct == NULL) { | ||
239 | acct = kzalloc(sizeof(struct bsd_acct_struct), GFP_KERNEL); | ||
240 | if (acct == NULL) { | ||
241 | filp_close(file, NULL); | ||
242 | return -ENOMEM; | ||
243 | } | ||
244 | } | ||
245 | |||
231 | error = security_acct(file); | 246 | error = security_acct(file); |
232 | if (error) { | 247 | if (error) { |
248 | kfree(acct); | ||
233 | filp_close(file, NULL); | 249 | filp_close(file, NULL); |
234 | return error; | 250 | return error; |
235 | } | 251 | } |
236 | 252 | ||
237 | spin_lock(&acct_globals.lock); | 253 | spin_lock(&acct_lock); |
254 | if (ns->bacct == NULL) { | ||
255 | ns->bacct = acct; | ||
256 | acct = NULL; | ||
257 | } | ||
258 | |||
238 | mnt_pin(file->f_path.mnt); | 259 | mnt_pin(file->f_path.mnt); |
239 | acct_file_reopen(file); | 260 | acct_file_reopen(ns->bacct, file, ns); |
240 | spin_unlock(&acct_globals.lock); | 261 | spin_unlock(&acct_lock); |
241 | 262 | ||
242 | mntput(file->f_path.mnt); /* it's pinned, now give up active reference */ | 263 | mntput(file->f_path.mnt); /* it's pinned, now give up active reference */ |
264 | kfree(acct); | ||
243 | 265 | ||
244 | return 0; | 266 | return 0; |
245 | } | 267 | } |
@@ -269,11 +291,17 @@ asmlinkage long sys_acct(const char __user *name) | |||
269 | error = acct_on(tmp); | 291 | error = acct_on(tmp); |
270 | putname(tmp); | 292 | putname(tmp); |
271 | } else { | 293 | } else { |
294 | struct bsd_acct_struct *acct; | ||
295 | |||
296 | acct = task_active_pid_ns(current)->bacct; | ||
297 | if (acct == NULL) | ||
298 | return 0; | ||
299 | |||
272 | error = security_acct(NULL); | 300 | error = security_acct(NULL); |
273 | if (!error) { | 301 | if (!error) { |
274 | spin_lock(&acct_globals.lock); | 302 | spin_lock(&acct_lock); |
275 | acct_file_reopen(NULL); | 303 | acct_file_reopen(acct, NULL, NULL); |
276 | spin_unlock(&acct_globals.lock); | 304 | spin_unlock(&acct_lock); |
277 | } | 305 | } |
278 | } | 306 | } |
279 | return error; | 307 | return error; |
@@ -288,10 +316,16 @@ asmlinkage long sys_acct(const char __user *name) | |||
288 | */ | 316 | */ |
289 | void acct_auto_close_mnt(struct vfsmount *m) | 317 | void acct_auto_close_mnt(struct vfsmount *m) |
290 | { | 318 | { |
291 | spin_lock(&acct_globals.lock); | 319 | struct bsd_acct_struct *acct; |
292 | if (acct_globals.file && acct_globals.file->f_path.mnt == m) | 320 | |
293 | acct_file_reopen(NULL); | 321 | spin_lock(&acct_lock); |
294 | spin_unlock(&acct_globals.lock); | 322 | restart: |
323 | list_for_each_entry(acct, &acct_list, list) | ||
324 | if (acct->file && acct->file->f_path.mnt == m) { | ||
325 | acct_file_reopen(acct, NULL, NULL); | ||
326 | goto restart; | ||
327 | } | ||
328 | spin_unlock(&acct_lock); | ||
295 | } | 329 | } |
296 | 330 | ||
297 | /** | 331 | /** |
@@ -303,12 +337,31 @@ void acct_auto_close_mnt(struct vfsmount *m) | |||
303 | */ | 337 | */ |
304 | void acct_auto_close(struct super_block *sb) | 338 | void acct_auto_close(struct super_block *sb) |
305 | { | 339 | { |
306 | spin_lock(&acct_globals.lock); | 340 | struct bsd_acct_struct *acct; |
307 | if (acct_globals.file && | 341 | |
308 | acct_globals.file->f_path.mnt->mnt_sb == sb) { | 342 | spin_lock(&acct_lock); |
309 | acct_file_reopen(NULL); | 343 | restart: |
344 | list_for_each_entry(acct, &acct_list, list) | ||
345 | if (acct->file && acct->file->f_path.mnt->mnt_sb == sb) { | ||
346 | acct_file_reopen(acct, NULL, NULL); | ||
347 | goto restart; | ||
348 | } | ||
349 | spin_unlock(&acct_lock); | ||
350 | } | ||
351 | |||
352 | void acct_exit_ns(struct pid_namespace *ns) | ||
353 | { | ||
354 | struct bsd_acct_struct *acct; | ||
355 | |||
356 | spin_lock(&acct_lock); | ||
357 | acct = ns->bacct; | ||
358 | if (acct != NULL) { | ||
359 | if (acct->file != NULL) | ||
360 | acct_file_reopen(acct, NULL, NULL); | ||
361 | |||
362 | kfree(acct); | ||
310 | } | 363 | } |
311 | spin_unlock(&acct_globals.lock); | 364 | spin_unlock(&acct_lock); |
312 | } | 365 | } |
313 | 366 | ||
314 | /* | 367 | /* |
@@ -425,7 +478,8 @@ static u32 encode_float(u64 value) | |||
425 | /* | 478 | /* |
426 | * do_acct_process does all actual work. Caller holds the reference to file. | 479 | * do_acct_process does all actual work. Caller holds the reference to file. |
427 | */ | 480 | */ |
428 | static void do_acct_process(struct pid_namespace *ns, struct file *file) | 481 | static void do_acct_process(struct bsd_acct_struct *acct, |
482 | struct pid_namespace *ns, struct file *file) | ||
429 | { | 483 | { |
430 | struct pacct_struct *pacct = ¤t->signal->pacct; | 484 | struct pacct_struct *pacct = ¤t->signal->pacct; |
431 | acct_t ac; | 485 | acct_t ac; |
@@ -440,7 +494,7 @@ static void do_acct_process(struct pid_namespace *ns, struct file *file) | |||
440 | * First check to see if there is enough free_space to continue | 494 | * First check to see if there is enough free_space to continue |
441 | * the process accounting system. | 495 | * the process accounting system. |
442 | */ | 496 | */ |
443 | if (!check_free_space(file)) | 497 | if (!check_free_space(acct, file)) |
444 | return; | 498 | return; |
445 | 499 | ||
446 | /* | 500 | /* |
@@ -577,34 +631,46 @@ void acct_collect(long exitcode, int group_dead) | |||
577 | spin_unlock_irq(¤t->sighand->siglock); | 631 | spin_unlock_irq(¤t->sighand->siglock); |
578 | } | 632 | } |
579 | 633 | ||
580 | /** | 634 | static void acct_process_in_ns(struct pid_namespace *ns) |
581 | * acct_process - now just a wrapper around do_acct_process | ||
582 | * @exitcode: task exit code | ||
583 | * | ||
584 | * handles process accounting for an exiting task | ||
585 | */ | ||
586 | void acct_process(void) | ||
587 | { | 635 | { |
588 | struct file *file = NULL; | 636 | struct file *file = NULL; |
589 | struct pid_namespace *ns; | 637 | struct bsd_acct_struct *acct; |
590 | 638 | ||
639 | acct = ns->bacct; | ||
591 | /* | 640 | /* |
592 | * accelerate the common fastpath: | 641 | * accelerate the common fastpath: |
593 | */ | 642 | */ |
594 | if (!acct_globals.file) | 643 | if (!acct || !acct->file) |
595 | return; | 644 | return; |
596 | 645 | ||
597 | spin_lock(&acct_globals.lock); | 646 | spin_lock(&acct_lock); |
598 | file = acct_globals.file; | 647 | file = acct->file; |
599 | if (unlikely(!file)) { | 648 | if (unlikely(!file)) { |
600 | spin_unlock(&acct_globals.lock); | 649 | spin_unlock(&acct_lock); |
601 | return; | 650 | return; |
602 | } | 651 | } |
603 | get_file(file); | 652 | get_file(file); |
604 | ns = get_pid_ns(acct_globals.ns); | 653 | spin_unlock(&acct_lock); |
605 | spin_unlock(&acct_globals.lock); | ||
606 | 654 | ||
607 | do_acct_process(ns, file); | 655 | do_acct_process(acct, ns, file); |
608 | fput(file); | 656 | fput(file); |
609 | put_pid_ns(ns); | 657 | } |
658 | |||
659 | /** | ||
660 | * acct_process - now just a wrapper around acct_process_in_ns, | ||
661 | * which in turn is a wrapper around do_acct_process. | ||
662 | * | ||
663 | * handles process accounting for an exiting task | ||
664 | */ | ||
665 | void acct_process(void) | ||
666 | { | ||
667 | struct pid_namespace *ns; | ||
668 | |||
669 | /* | ||
670 | * This loop is safe lockless, since current is still | ||
671 | * alive and holds its namespace, which in turn holds | ||
672 | * its parent. | ||
673 | */ | ||
674 | for (ns = task_active_pid_ns(current); ns != NULL; ns = ns->parent) | ||
675 | acct_process_in_ns(ns); | ||
610 | } | 676 | } |
diff --git a/kernel/audit.c b/kernel/audit.c index e092f1c0ce30..4414e93d8750 100644 --- a/kernel/audit.c +++ b/kernel/audit.c | |||
@@ -707,12 +707,14 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
707 | if (status_get->mask & AUDIT_STATUS_ENABLED) { | 707 | if (status_get->mask & AUDIT_STATUS_ENABLED) { |
708 | err = audit_set_enabled(status_get->enabled, | 708 | err = audit_set_enabled(status_get->enabled, |
709 | loginuid, sessionid, sid); | 709 | loginuid, sessionid, sid); |
710 | if (err < 0) return err; | 710 | if (err < 0) |
711 | return err; | ||
711 | } | 712 | } |
712 | if (status_get->mask & AUDIT_STATUS_FAILURE) { | 713 | if (status_get->mask & AUDIT_STATUS_FAILURE) { |
713 | err = audit_set_failure(status_get->failure, | 714 | err = audit_set_failure(status_get->failure, |
714 | loginuid, sessionid, sid); | 715 | loginuid, sessionid, sid); |
715 | if (err < 0) return err; | 716 | if (err < 0) |
717 | return err; | ||
716 | } | 718 | } |
717 | if (status_get->mask & AUDIT_STATUS_PID) { | 719 | if (status_get->mask & AUDIT_STATUS_PID) { |
718 | int new_pid = status_get->pid; | 720 | int new_pid = status_get->pid; |
@@ -725,9 +727,12 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
725 | audit_pid = new_pid; | 727 | audit_pid = new_pid; |
726 | audit_nlk_pid = NETLINK_CB(skb).pid; | 728 | audit_nlk_pid = NETLINK_CB(skb).pid; |
727 | } | 729 | } |
728 | if (status_get->mask & AUDIT_STATUS_RATE_LIMIT) | 730 | if (status_get->mask & AUDIT_STATUS_RATE_LIMIT) { |
729 | err = audit_set_rate_limit(status_get->rate_limit, | 731 | err = audit_set_rate_limit(status_get->rate_limit, |
730 | loginuid, sessionid, sid); | 732 | loginuid, sessionid, sid); |
733 | if (err < 0) | ||
734 | return err; | ||
735 | } | ||
731 | if (status_get->mask & AUDIT_STATUS_BACKLOG_LIMIT) | 736 | if (status_get->mask & AUDIT_STATUS_BACKLOG_LIMIT) |
732 | err = audit_set_backlog_limit(status_get->backlog_limit, | 737 | err = audit_set_backlog_limit(status_get->backlog_limit, |
733 | loginuid, sessionid, sid); | 738 | loginuid, sessionid, sid); |
@@ -1366,7 +1371,7 @@ int audit_string_contains_control(const char *string, size_t len) | |||
1366 | { | 1371 | { |
1367 | const unsigned char *p; | 1372 | const unsigned char *p; |
1368 | for (p = string; p < (const unsigned char *)string + len && *p; p++) { | 1373 | for (p = string; p < (const unsigned char *)string + len && *p; p++) { |
1369 | if (*p == '"' || *p < 0x21 || *p > 0x7f) | 1374 | if (*p == '"' || *p < 0x21 || *p > 0x7e) |
1370 | return 1; | 1375 | return 1; |
1371 | } | 1376 | } |
1372 | return 0; | 1377 | return 0; |
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c index 98c50cc671bb..b7d354e2b0ef 100644 --- a/kernel/auditfilter.c +++ b/kernel/auditfilter.c | |||
@@ -1022,8 +1022,11 @@ static void audit_update_watch(struct audit_parent *parent, | |||
1022 | struct audit_buffer *ab; | 1022 | struct audit_buffer *ab; |
1023 | ab = audit_log_start(NULL, GFP_KERNEL, | 1023 | ab = audit_log_start(NULL, GFP_KERNEL, |
1024 | AUDIT_CONFIG_CHANGE); | 1024 | AUDIT_CONFIG_CHANGE); |
1025 | audit_log_format(ab, "auid=%u ses=%u", | ||
1026 | audit_get_loginuid(current), | ||
1027 | audit_get_sessionid(current)); | ||
1025 | audit_log_format(ab, | 1028 | audit_log_format(ab, |
1026 | "op=updated rules specifying path="); | 1029 | " op=updated rules specifying path="); |
1027 | audit_log_untrustedstring(ab, owatch->path); | 1030 | audit_log_untrustedstring(ab, owatch->path); |
1028 | audit_log_format(ab, " with dev=%u ino=%lu\n", | 1031 | audit_log_format(ab, " with dev=%u ino=%lu\n", |
1029 | dev, ino); | 1032 | dev, ino); |
@@ -1058,7 +1061,10 @@ static void audit_remove_parent_watches(struct audit_parent *parent) | |||
1058 | struct audit_buffer *ab; | 1061 | struct audit_buffer *ab; |
1059 | ab = audit_log_start(NULL, GFP_KERNEL, | 1062 | ab = audit_log_start(NULL, GFP_KERNEL, |
1060 | AUDIT_CONFIG_CHANGE); | 1063 | AUDIT_CONFIG_CHANGE); |
1061 | audit_log_format(ab, "op=remove rule path="); | 1064 | audit_log_format(ab, "auid=%u ses=%u", |
1065 | audit_get_loginuid(current), | ||
1066 | audit_get_sessionid(current)); | ||
1067 | audit_log_format(ab, " op=remove rule path="); | ||
1062 | audit_log_untrustedstring(ab, w->path); | 1068 | audit_log_untrustedstring(ab, w->path); |
1063 | if (r->filterkey) { | 1069 | if (r->filterkey) { |
1064 | audit_log_format(ab, " key="); | 1070 | audit_log_format(ab, " key="); |
diff --git a/kernel/auditsc.c b/kernel/auditsc.c index c10e7aae04d7..59cedfb040e7 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c | |||
@@ -243,7 +243,11 @@ static inline int open_arg(int flags, int mask) | |||
243 | 243 | ||
244 | static int audit_match_perm(struct audit_context *ctx, int mask) | 244 | static int audit_match_perm(struct audit_context *ctx, int mask) |
245 | { | 245 | { |
246 | unsigned n = ctx->major; | 246 | unsigned n; |
247 | if (unlikely(!ctx)) | ||
248 | return 0; | ||
249 | |||
250 | n = ctx->major; | ||
247 | switch (audit_classify_syscall(ctx->arch, n)) { | 251 | switch (audit_classify_syscall(ctx->arch, n)) { |
248 | case 0: /* native */ | 252 | case 0: /* native */ |
249 | if ((mask & AUDIT_PERM_WRITE) && | 253 | if ((mask & AUDIT_PERM_WRITE) && |
@@ -284,6 +288,10 @@ static int audit_match_filetype(struct audit_context *ctx, int which) | |||
284 | { | 288 | { |
285 | unsigned index = which & ~S_IFMT; | 289 | unsigned index = which & ~S_IFMT; |
286 | mode_t mode = which & S_IFMT; | 290 | mode_t mode = which & S_IFMT; |
291 | |||
292 | if (unlikely(!ctx)) | ||
293 | return 0; | ||
294 | |||
287 | if (index >= ctx->name_count) | 295 | if (index >= ctx->name_count) |
288 | return 0; | 296 | return 0; |
289 | if (ctx->names[index].ino == -1) | 297 | if (ctx->names[index].ino == -1) |
@@ -610,7 +618,7 @@ static int audit_filter_rules(struct task_struct *tsk, | |||
610 | if (!result) | 618 | if (!result) |
611 | return 0; | 619 | return 0; |
612 | } | 620 | } |
613 | if (rule->filterkey) | 621 | if (rule->filterkey && ctx) |
614 | ctx->filterkey = kstrdup(rule->filterkey, GFP_ATOMIC); | 622 | ctx->filterkey = kstrdup(rule->filterkey, GFP_ATOMIC); |
615 | switch (rule->action) { | 623 | switch (rule->action) { |
616 | case AUDIT_NEVER: *state = AUDIT_DISABLED; break; | 624 | case AUDIT_NEVER: *state = AUDIT_DISABLED; break; |
@@ -1476,7 +1484,8 @@ void audit_syscall_entry(int arch, int major, | |||
1476 | struct audit_context *context = tsk->audit_context; | 1484 | struct audit_context *context = tsk->audit_context; |
1477 | enum audit_state state; | 1485 | enum audit_state state; |
1478 | 1486 | ||
1479 | BUG_ON(!context); | 1487 | if (unlikely(!context)) |
1488 | return; | ||
1480 | 1489 | ||
1481 | /* | 1490 | /* |
1482 | * This happens only on certain architectures that make system | 1491 | * This happens only on certain architectures that make system |
@@ -2374,7 +2383,7 @@ int __audit_signal_info(int sig, struct task_struct *t) | |||
2374 | struct audit_context *ctx = tsk->audit_context; | 2383 | struct audit_context *ctx = tsk->audit_context; |
2375 | 2384 | ||
2376 | if (audit_pid && t->tgid == audit_pid) { | 2385 | if (audit_pid && t->tgid == audit_pid) { |
2377 | if (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1) { | 2386 | if (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1 || sig == SIGUSR2) { |
2378 | audit_sig_pid = tsk->pid; | 2387 | audit_sig_pid = tsk->pid; |
2379 | if (tsk->loginuid != -1) | 2388 | if (tsk->loginuid != -1) |
2380 | audit_sig_uid = tsk->loginuid; | 2389 | audit_sig_uid = tsk->loginuid; |
diff --git a/kernel/capability.c b/kernel/capability.c index 901e0fdc3fff..33e51e78c2d8 100644 --- a/kernel/capability.c +++ b/kernel/capability.c | |||
@@ -115,11 +115,208 @@ static int cap_validate_magic(cap_user_header_t header, unsigned *tocopy) | |||
115 | return 0; | 115 | return 0; |
116 | } | 116 | } |
117 | 117 | ||
118 | #ifndef CONFIG_SECURITY_FILE_CAPABILITIES | ||
119 | |||
120 | /* | ||
121 | * Without filesystem capability support, we nominally support one process | ||
122 | * setting the capabilities of another | ||
123 | */ | ||
124 | static inline int cap_get_target_pid(pid_t pid, kernel_cap_t *pEp, | ||
125 | kernel_cap_t *pIp, kernel_cap_t *pPp) | ||
126 | { | ||
127 | struct task_struct *target; | ||
128 | int ret; | ||
129 | |||
130 | spin_lock(&task_capability_lock); | ||
131 | read_lock(&tasklist_lock); | ||
132 | |||
133 | if (pid && pid != task_pid_vnr(current)) { | ||
134 | target = find_task_by_vpid(pid); | ||
135 | if (!target) { | ||
136 | ret = -ESRCH; | ||
137 | goto out; | ||
138 | } | ||
139 | } else | ||
140 | target = current; | ||
141 | |||
142 | ret = security_capget(target, pEp, pIp, pPp); | ||
143 | |||
144 | out: | ||
145 | read_unlock(&tasklist_lock); | ||
146 | spin_unlock(&task_capability_lock); | ||
147 | |||
148 | return ret; | ||
149 | } | ||
150 | |||
151 | /* | ||
152 | * cap_set_pg - set capabilities for all processes in a given process | ||
153 | * group. We call this holding task_capability_lock and tasklist_lock. | ||
154 | */ | ||
155 | static inline int cap_set_pg(int pgrp_nr, kernel_cap_t *effective, | ||
156 | kernel_cap_t *inheritable, | ||
157 | kernel_cap_t *permitted) | ||
158 | { | ||
159 | struct task_struct *g, *target; | ||
160 | int ret = -EPERM; | ||
161 | int found = 0; | ||
162 | struct pid *pgrp; | ||
163 | |||
164 | spin_lock(&task_capability_lock); | ||
165 | read_lock(&tasklist_lock); | ||
166 | |||
167 | pgrp = find_vpid(pgrp_nr); | ||
168 | do_each_pid_task(pgrp, PIDTYPE_PGID, g) { | ||
169 | target = g; | ||
170 | while_each_thread(g, target) { | ||
171 | if (!security_capset_check(target, effective, | ||
172 | inheritable, permitted)) { | ||
173 | security_capset_set(target, effective, | ||
174 | inheritable, permitted); | ||
175 | ret = 0; | ||
176 | } | ||
177 | found = 1; | ||
178 | } | ||
179 | } while_each_pid_task(pgrp, PIDTYPE_PGID, g); | ||
180 | |||
181 | read_unlock(&tasklist_lock); | ||
182 | spin_unlock(&task_capability_lock); | ||
183 | |||
184 | if (!found) | ||
185 | ret = 0; | ||
186 | return ret; | ||
187 | } | ||
188 | |||
189 | /* | ||
190 | * cap_set_all - set capabilities for all processes other than init | ||
191 | * and self. We call this holding task_capability_lock and tasklist_lock. | ||
192 | */ | ||
193 | static inline int cap_set_all(kernel_cap_t *effective, | ||
194 | kernel_cap_t *inheritable, | ||
195 | kernel_cap_t *permitted) | ||
196 | { | ||
197 | struct task_struct *g, *target; | ||
198 | int ret = -EPERM; | ||
199 | int found = 0; | ||
200 | |||
201 | spin_lock(&task_capability_lock); | ||
202 | read_lock(&tasklist_lock); | ||
203 | |||
204 | do_each_thread(g, target) { | ||
205 | if (target == current | ||
206 | || is_container_init(target->group_leader)) | ||
207 | continue; | ||
208 | found = 1; | ||
209 | if (security_capset_check(target, effective, inheritable, | ||
210 | permitted)) | ||
211 | continue; | ||
212 | ret = 0; | ||
213 | security_capset_set(target, effective, inheritable, permitted); | ||
214 | } while_each_thread(g, target); | ||
215 | |||
216 | read_unlock(&tasklist_lock); | ||
217 | spin_unlock(&task_capability_lock); | ||
218 | |||
219 | if (!found) | ||
220 | ret = 0; | ||
221 | |||
222 | return ret; | ||
223 | } | ||
224 | |||
225 | /* | ||
226 | * Given the target pid does not refer to the current process we | ||
227 | * need more elaborate support... (This support is not present when | ||
228 | * filesystem capabilities are configured.) | ||
229 | */ | ||
230 | static inline int do_sys_capset_other_tasks(pid_t pid, kernel_cap_t *effective, | ||
231 | kernel_cap_t *inheritable, | ||
232 | kernel_cap_t *permitted) | ||
233 | { | ||
234 | struct task_struct *target; | ||
235 | int ret; | ||
236 | |||
237 | if (!capable(CAP_SETPCAP)) | ||
238 | return -EPERM; | ||
239 | |||
240 | if (pid == -1) /* all procs other than current and init */ | ||
241 | return cap_set_all(effective, inheritable, permitted); | ||
242 | |||
243 | else if (pid < 0) /* all procs in process group */ | ||
244 | return cap_set_pg(-pid, effective, inheritable, permitted); | ||
245 | |||
246 | /* target != current */ | ||
247 | spin_lock(&task_capability_lock); | ||
248 | read_lock(&tasklist_lock); | ||
249 | |||
250 | target = find_task_by_vpid(pid); | ||
251 | if (!target) | ||
252 | ret = -ESRCH; | ||
253 | else { | ||
254 | ret = security_capset_check(target, effective, inheritable, | ||
255 | permitted); | ||
256 | |||
257 | /* having verified that the proposed changes are legal, | ||
258 | we now put them into effect. */ | ||
259 | if (!ret) | ||
260 | security_capset_set(target, effective, inheritable, | ||
261 | permitted); | ||
262 | } | ||
263 | |||
264 | read_unlock(&tasklist_lock); | ||
265 | spin_unlock(&task_capability_lock); | ||
266 | |||
267 | return ret; | ||
268 | } | ||
269 | |||
270 | #else /* ie., def CONFIG_SECURITY_FILE_CAPABILITIES */ | ||
271 | |||
118 | /* | 272 | /* |
119 | * For sys_getproccap() and sys_setproccap(), any of the three | 273 | * If we have configured with filesystem capability support, then the |
120 | * capability set pointers may be NULL -- indicating that that set is | 274 | * only thing that can change the capabilities of the current process |
121 | * uninteresting and/or not to be changed. | 275 | * is the current process. As such, we can't be in this code at the |
276 | * same time as we are in the process of setting capabilities in this | ||
277 | * process. The net result is that we can limit our use of locks to | ||
278 | * when we are reading the caps of another process. | ||
122 | */ | 279 | */ |
280 | static inline int cap_get_target_pid(pid_t pid, kernel_cap_t *pEp, | ||
281 | kernel_cap_t *pIp, kernel_cap_t *pPp) | ||
282 | { | ||
283 | int ret; | ||
284 | |||
285 | if (pid && (pid != task_pid_vnr(current))) { | ||
286 | struct task_struct *target; | ||
287 | |||
288 | spin_lock(&task_capability_lock); | ||
289 | read_lock(&tasklist_lock); | ||
290 | |||
291 | target = find_task_by_vpid(pid); | ||
292 | if (!target) | ||
293 | ret = -ESRCH; | ||
294 | else | ||
295 | ret = security_capget(target, pEp, pIp, pPp); | ||
296 | |||
297 | read_unlock(&tasklist_lock); | ||
298 | spin_unlock(&task_capability_lock); | ||
299 | } else | ||
300 | ret = security_capget(current, pEp, pIp, pPp); | ||
301 | |||
302 | return ret; | ||
303 | } | ||
304 | |||
305 | /* | ||
306 | * With filesystem capability support configured, the kernel does not | ||
307 | * permit the changing of capabilities in one process by another | ||
308 | * process. (CAP_SETPCAP has much less broad semantics when configured | ||
309 | * this way.) | ||
310 | */ | ||
311 | static inline int do_sys_capset_other_tasks(pid_t pid, | ||
312 | kernel_cap_t *effective, | ||
313 | kernel_cap_t *inheritable, | ||
314 | kernel_cap_t *permitted) | ||
315 | { | ||
316 | return -EPERM; | ||
317 | } | ||
318 | |||
319 | #endif /* ie., ndef CONFIG_SECURITY_FILE_CAPABILITIES */ | ||
123 | 320 | ||
124 | /* | 321 | /* |
125 | * Atomically modify the effective capabilities returning the original | 322 | * Atomically modify the effective capabilities returning the original |
@@ -155,7 +352,6 @@ asmlinkage long sys_capget(cap_user_header_t header, cap_user_data_t dataptr) | |||
155 | { | 352 | { |
156 | int ret = 0; | 353 | int ret = 0; |
157 | pid_t pid; | 354 | pid_t pid; |
158 | struct task_struct *target; | ||
159 | unsigned tocopy; | 355 | unsigned tocopy; |
160 | kernel_cap_t pE, pI, pP; | 356 | kernel_cap_t pE, pI, pP; |
161 | 357 | ||
@@ -169,23 +365,7 @@ asmlinkage long sys_capget(cap_user_header_t header, cap_user_data_t dataptr) | |||
169 | if (pid < 0) | 365 | if (pid < 0) |
170 | return -EINVAL; | 366 | return -EINVAL; |
171 | 367 | ||
172 | spin_lock(&task_capability_lock); | 368 | ret = cap_get_target_pid(pid, &pE, &pI, &pP); |
173 | read_lock(&tasklist_lock); | ||
174 | |||
175 | if (pid && pid != task_pid_vnr(current)) { | ||
176 | target = find_task_by_vpid(pid); | ||
177 | if (!target) { | ||
178 | ret = -ESRCH; | ||
179 | goto out; | ||
180 | } | ||
181 | } else | ||
182 | target = current; | ||
183 | |||
184 | ret = security_capget(target, &pE, &pI, &pP); | ||
185 | |||
186 | out: | ||
187 | read_unlock(&tasklist_lock); | ||
188 | spin_unlock(&task_capability_lock); | ||
189 | 369 | ||
190 | if (!ret) { | 370 | if (!ret) { |
191 | struct __user_cap_data_struct kdata[_KERNEL_CAPABILITY_U32S]; | 371 | struct __user_cap_data_struct kdata[_KERNEL_CAPABILITY_U32S]; |
@@ -216,7 +396,6 @@ out: | |||
216 | * before modification is attempted and the application | 396 | * before modification is attempted and the application |
217 | * fails. | 397 | * fails. |
218 | */ | 398 | */ |
219 | |||
220 | if (copy_to_user(dataptr, kdata, tocopy | 399 | if (copy_to_user(dataptr, kdata, tocopy |
221 | * sizeof(struct __user_cap_data_struct))) { | 400 | * sizeof(struct __user_cap_data_struct))) { |
222 | return -EFAULT; | 401 | return -EFAULT; |
@@ -226,70 +405,8 @@ out: | |||
226 | return ret; | 405 | return ret; |
227 | } | 406 | } |
228 | 407 | ||
229 | /* | ||
230 | * cap_set_pg - set capabilities for all processes in a given process | ||
231 | * group. We call this holding task_capability_lock and tasklist_lock. | ||
232 | */ | ||
233 | static inline int cap_set_pg(int pgrp_nr, kernel_cap_t *effective, | ||
234 | kernel_cap_t *inheritable, | ||
235 | kernel_cap_t *permitted) | ||
236 | { | ||
237 | struct task_struct *g, *target; | ||
238 | int ret = -EPERM; | ||
239 | int found = 0; | ||
240 | struct pid *pgrp; | ||
241 | |||
242 | pgrp = find_vpid(pgrp_nr); | ||
243 | do_each_pid_task(pgrp, PIDTYPE_PGID, g) { | ||
244 | target = g; | ||
245 | while_each_thread(g, target) { | ||
246 | if (!security_capset_check(target, effective, | ||
247 | inheritable, | ||
248 | permitted)) { | ||
249 | security_capset_set(target, effective, | ||
250 | inheritable, | ||
251 | permitted); | ||
252 | ret = 0; | ||
253 | } | ||
254 | found = 1; | ||
255 | } | ||
256 | } while_each_pid_task(pgrp, PIDTYPE_PGID, g); | ||
257 | |||
258 | if (!found) | ||
259 | ret = 0; | ||
260 | return ret; | ||
261 | } | ||
262 | |||
263 | /* | ||
264 | * cap_set_all - set capabilities for all processes other than init | ||
265 | * and self. We call this holding task_capability_lock and tasklist_lock. | ||
266 | */ | ||
267 | static inline int cap_set_all(kernel_cap_t *effective, | ||
268 | kernel_cap_t *inheritable, | ||
269 | kernel_cap_t *permitted) | ||
270 | { | ||
271 | struct task_struct *g, *target; | ||
272 | int ret = -EPERM; | ||
273 | int found = 0; | ||
274 | |||
275 | do_each_thread(g, target) { | ||
276 | if (target == current || is_container_init(target->group_leader)) | ||
277 | continue; | ||
278 | found = 1; | ||
279 | if (security_capset_check(target, effective, inheritable, | ||
280 | permitted)) | ||
281 | continue; | ||
282 | ret = 0; | ||
283 | security_capset_set(target, effective, inheritable, permitted); | ||
284 | } while_each_thread(g, target); | ||
285 | |||
286 | if (!found) | ||
287 | ret = 0; | ||
288 | return ret; | ||
289 | } | ||
290 | |||
291 | /** | 408 | /** |
292 | * sys_capset - set capabilities for a process or a group of processes | 409 | * sys_capset - set capabilities for a process or (*) a group of processes |
293 | * @header: pointer to struct that contains capability version and | 410 | * @header: pointer to struct that contains capability version and |
294 | * target pid data | 411 | * target pid data |
295 | * @data: pointer to struct that contains the effective, permitted, | 412 | * @data: pointer to struct that contains the effective, permitted, |
@@ -313,7 +430,6 @@ asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data) | |||
313 | struct __user_cap_data_struct kdata[_KERNEL_CAPABILITY_U32S]; | 430 | struct __user_cap_data_struct kdata[_KERNEL_CAPABILITY_U32S]; |
314 | unsigned i, tocopy; | 431 | unsigned i, tocopy; |
315 | kernel_cap_t inheritable, permitted, effective; | 432 | kernel_cap_t inheritable, permitted, effective; |
316 | struct task_struct *target; | ||
317 | int ret; | 433 | int ret; |
318 | pid_t pid; | 434 | pid_t pid; |
319 | 435 | ||
@@ -324,9 +440,6 @@ asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data) | |||
324 | if (get_user(pid, &header->pid)) | 440 | if (get_user(pid, &header->pid)) |
325 | return -EFAULT; | 441 | return -EFAULT; |
326 | 442 | ||
327 | if (pid && pid != task_pid_vnr(current) && !capable(CAP_SETPCAP)) | ||
328 | return -EPERM; | ||
329 | |||
330 | if (copy_from_user(&kdata, data, tocopy | 443 | if (copy_from_user(&kdata, data, tocopy |
331 | * sizeof(struct __user_cap_data_struct))) { | 444 | * sizeof(struct __user_cap_data_struct))) { |
332 | return -EFAULT; | 445 | return -EFAULT; |
@@ -344,55 +457,51 @@ asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data) | |||
344 | i++; | 457 | i++; |
345 | } | 458 | } |
346 | 459 | ||
347 | spin_lock(&task_capability_lock); | 460 | if (pid && (pid != task_pid_vnr(current))) |
348 | read_lock(&tasklist_lock); | 461 | ret = do_sys_capset_other_tasks(pid, &effective, &inheritable, |
349 | 462 | &permitted); | |
350 | if (pid > 0 && pid != task_pid_vnr(current)) { | 463 | else { |
351 | target = find_task_by_vpid(pid); | 464 | /* |
352 | if (!target) { | 465 | * This lock is required even when filesystem |
353 | ret = -ESRCH; | 466 | * capability support is configured - it protects the |
354 | goto out; | 467 | * sys_capget() call from returning incorrect data in |
355 | } | 468 | * the case that the targeted process is not the |
356 | } else | 469 | * current one. |
357 | target = current; | 470 | */ |
358 | 471 | spin_lock(&task_capability_lock); | |
359 | ret = 0; | ||
360 | |||
361 | /* having verified that the proposed changes are legal, | ||
362 | we now put them into effect. */ | ||
363 | if (pid < 0) { | ||
364 | if (pid == -1) /* all procs other than current and init */ | ||
365 | ret = cap_set_all(&effective, &inheritable, &permitted); | ||
366 | 472 | ||
367 | else /* all procs in process group */ | 473 | ret = security_capset_check(current, &effective, &inheritable, |
368 | ret = cap_set_pg(-pid, &effective, &inheritable, | ||
369 | &permitted); | ||
370 | } else { | ||
371 | ret = security_capset_check(target, &effective, &inheritable, | ||
372 | &permitted); | 474 | &permitted); |
475 | /* | ||
476 | * Having verified that the proposed changes are | ||
477 | * legal, we now put them into effect. | ||
478 | */ | ||
373 | if (!ret) | 479 | if (!ret) |
374 | security_capset_set(target, &effective, &inheritable, | 480 | security_capset_set(current, &effective, &inheritable, |
375 | &permitted); | 481 | &permitted); |
482 | spin_unlock(&task_capability_lock); | ||
376 | } | 483 | } |
377 | 484 | ||
378 | out: | ||
379 | read_unlock(&tasklist_lock); | ||
380 | spin_unlock(&task_capability_lock); | ||
381 | 485 | ||
382 | return ret; | 486 | return ret; |
383 | } | 487 | } |
384 | 488 | ||
385 | int __capable(struct task_struct *t, int cap) | 489 | /** |
490 | * capable - Determine if the current task has a superior capability in effect | ||
491 | * @cap: The capability to be tested for | ||
492 | * | ||
493 | * Return true if the current task has the given superior capability currently | ||
494 | * available for use, false if not. | ||
495 | * | ||
496 | * This sets PF_SUPERPRIV on the task if the capability is available on the | ||
497 | * assumption that it's about to be used. | ||
498 | */ | ||
499 | int capable(int cap) | ||
386 | { | 500 | { |
387 | if (security_capable(t, cap) == 0) { | 501 | if (has_capability(current, cap)) { |
388 | t->flags |= PF_SUPERPRIV; | 502 | current->flags |= PF_SUPERPRIV; |
389 | return 1; | 503 | return 1; |
390 | } | 504 | } |
391 | return 0; | 505 | return 0; |
392 | } | 506 | } |
393 | |||
394 | int capable(int cap) | ||
395 | { | ||
396 | return __capable(current, cap); | ||
397 | } | ||
398 | EXPORT_SYMBOL(capable); | 507 | EXPORT_SYMBOL(capable); |
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 15ac0e1e4f4d..a0123d75ec9a 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -45,6 +45,7 @@ | |||
45 | #include <linux/delayacct.h> | 45 | #include <linux/delayacct.h> |
46 | #include <linux/cgroupstats.h> | 46 | #include <linux/cgroupstats.h> |
47 | #include <linux/hash.h> | 47 | #include <linux/hash.h> |
48 | #include <linux/namei.h> | ||
48 | 49 | ||
49 | #include <asm/atomic.h> | 50 | #include <asm/atomic.h> |
50 | 51 | ||
@@ -89,11 +90,7 @@ struct cgroupfs_root { | |||
89 | /* Hierarchy-specific flags */ | 90 | /* Hierarchy-specific flags */ |
90 | unsigned long flags; | 91 | unsigned long flags; |
91 | 92 | ||
92 | /* The path to use for release notifications. No locking | 93 | /* The path to use for release notifications. */ |
93 | * between setting and use - so if userspace updates this | ||
94 | * while child cgroups exist, you could miss a | ||
95 | * notification. We ensure that it's always a valid | ||
96 | * NUL-terminated string */ | ||
97 | char release_agent_path[PATH_MAX]; | 94 | char release_agent_path[PATH_MAX]; |
98 | }; | 95 | }; |
99 | 96 | ||
@@ -118,7 +115,7 @@ static int root_count; | |||
118 | * extra work in the fork/exit path if none of the subsystems need to | 115 | * extra work in the fork/exit path if none of the subsystems need to |
119 | * be called. | 116 | * be called. |
120 | */ | 117 | */ |
121 | static int need_forkexit_callback; | 118 | static int need_forkexit_callback __read_mostly; |
122 | static int need_mm_owner_callback __read_mostly; | 119 | static int need_mm_owner_callback __read_mostly; |
123 | 120 | ||
124 | /* convenient tests for these bits */ | 121 | /* convenient tests for these bits */ |
@@ -220,7 +217,7 @@ static struct hlist_head *css_set_hash(struct cgroup_subsys_state *css[]) | |||
220 | * task until after the first call to cgroup_iter_start(). This | 217 | * task until after the first call to cgroup_iter_start(). This |
221 | * reduces the fork()/exit() overhead for people who have cgroups | 218 | * reduces the fork()/exit() overhead for people who have cgroups |
222 | * compiled into their kernel but not actually in use */ | 219 | * compiled into their kernel but not actually in use */ |
223 | static int use_task_css_set_links; | 220 | static int use_task_css_set_links __read_mostly; |
224 | 221 | ||
225 | /* When we create or destroy a css_set, the operation simply | 222 | /* When we create or destroy a css_set, the operation simply |
226 | * takes/releases a reference count on all the cgroups referenced | 223 | * takes/releases a reference count on all the cgroups referenced |
@@ -241,17 +238,20 @@ static int use_task_css_set_links; | |||
241 | */ | 238 | */ |
242 | static void unlink_css_set(struct css_set *cg) | 239 | static void unlink_css_set(struct css_set *cg) |
243 | { | 240 | { |
241 | struct cg_cgroup_link *link; | ||
242 | struct cg_cgroup_link *saved_link; | ||
243 | |||
244 | write_lock(&css_set_lock); | 244 | write_lock(&css_set_lock); |
245 | hlist_del(&cg->hlist); | 245 | hlist_del(&cg->hlist); |
246 | css_set_count--; | 246 | css_set_count--; |
247 | while (!list_empty(&cg->cg_links)) { | 247 | |
248 | struct cg_cgroup_link *link; | 248 | list_for_each_entry_safe(link, saved_link, &cg->cg_links, |
249 | link = list_entry(cg->cg_links.next, | 249 | cg_link_list) { |
250 | struct cg_cgroup_link, cg_link_list); | ||
251 | list_del(&link->cg_link_list); | 250 | list_del(&link->cg_link_list); |
252 | list_del(&link->cgrp_link_list); | 251 | list_del(&link->cgrp_link_list); |
253 | kfree(link); | 252 | kfree(link); |
254 | } | 253 | } |
254 | |||
255 | write_unlock(&css_set_lock); | 255 | write_unlock(&css_set_lock); |
256 | } | 256 | } |
257 | 257 | ||
@@ -355,6 +355,17 @@ static struct css_set *find_existing_css_set( | |||
355 | return NULL; | 355 | return NULL; |
356 | } | 356 | } |
357 | 357 | ||
358 | static void free_cg_links(struct list_head *tmp) | ||
359 | { | ||
360 | struct cg_cgroup_link *link; | ||
361 | struct cg_cgroup_link *saved_link; | ||
362 | |||
363 | list_for_each_entry_safe(link, saved_link, tmp, cgrp_link_list) { | ||
364 | list_del(&link->cgrp_link_list); | ||
365 | kfree(link); | ||
366 | } | ||
367 | } | ||
368 | |||
358 | /* | 369 | /* |
359 | * allocate_cg_links() allocates "count" cg_cgroup_link structures | 370 | * allocate_cg_links() allocates "count" cg_cgroup_link structures |
360 | * and chains them on tmp through their cgrp_link_list fields. Returns 0 on | 371 | * and chains them on tmp through their cgrp_link_list fields. Returns 0 on |
@@ -368,13 +379,7 @@ static int allocate_cg_links(int count, struct list_head *tmp) | |||
368 | for (i = 0; i < count; i++) { | 379 | for (i = 0; i < count; i++) { |
369 | link = kmalloc(sizeof(*link), GFP_KERNEL); | 380 | link = kmalloc(sizeof(*link), GFP_KERNEL); |
370 | if (!link) { | 381 | if (!link) { |
371 | while (!list_empty(tmp)) { | 382 | free_cg_links(tmp); |
372 | link = list_entry(tmp->next, | ||
373 | struct cg_cgroup_link, | ||
374 | cgrp_link_list); | ||
375 | list_del(&link->cgrp_link_list); | ||
376 | kfree(link); | ||
377 | } | ||
378 | return -ENOMEM; | 383 | return -ENOMEM; |
379 | } | 384 | } |
380 | list_add(&link->cgrp_link_list, tmp); | 385 | list_add(&link->cgrp_link_list, tmp); |
@@ -382,18 +387,6 @@ static int allocate_cg_links(int count, struct list_head *tmp) | |||
382 | return 0; | 387 | return 0; |
383 | } | 388 | } |
384 | 389 | ||
385 | static void free_cg_links(struct list_head *tmp) | ||
386 | { | ||
387 | while (!list_empty(tmp)) { | ||
388 | struct cg_cgroup_link *link; | ||
389 | link = list_entry(tmp->next, | ||
390 | struct cg_cgroup_link, | ||
391 | cgrp_link_list); | ||
392 | list_del(&link->cgrp_link_list); | ||
393 | kfree(link); | ||
394 | } | ||
395 | } | ||
396 | |||
397 | /* | 390 | /* |
398 | * find_css_set() takes an existing cgroup group and a | 391 | * find_css_set() takes an existing cgroup group and a |
399 | * cgroup object, and returns a css_set object that's | 392 | * cgroup object, and returns a css_set object that's |
@@ -415,11 +408,11 @@ static struct css_set *find_css_set( | |||
415 | 408 | ||
416 | /* First see if we already have a cgroup group that matches | 409 | /* First see if we already have a cgroup group that matches |
417 | * the desired set */ | 410 | * the desired set */ |
418 | write_lock(&css_set_lock); | 411 | read_lock(&css_set_lock); |
419 | res = find_existing_css_set(oldcg, cgrp, template); | 412 | res = find_existing_css_set(oldcg, cgrp, template); |
420 | if (res) | 413 | if (res) |
421 | get_css_set(res); | 414 | get_css_set(res); |
422 | write_unlock(&css_set_lock); | 415 | read_unlock(&css_set_lock); |
423 | 416 | ||
424 | if (res) | 417 | if (res) |
425 | return res; | 418 | return res; |
@@ -507,10 +500,6 @@ static struct css_set *find_css_set( | |||
507 | * knows that the cgroup won't be removed, as cgroup_rmdir() | 500 | * knows that the cgroup won't be removed, as cgroup_rmdir() |
508 | * needs that mutex. | 501 | * needs that mutex. |
509 | * | 502 | * |
510 | * The cgroup_common_file_write handler for operations that modify | ||
511 | * the cgroup hierarchy holds cgroup_mutex across the entire operation, | ||
512 | * single threading all such cgroup modifications across the system. | ||
513 | * | ||
514 | * The fork and exit callbacks cgroup_fork() and cgroup_exit(), don't | 503 | * The fork and exit callbacks cgroup_fork() and cgroup_exit(), don't |
515 | * (usually) take cgroup_mutex. These are the two most performance | 504 | * (usually) take cgroup_mutex. These are the two most performance |
516 | * critical pieces of code here. The exception occurs on cgroup_exit(), | 505 | * critical pieces of code here. The exception occurs on cgroup_exit(), |
@@ -962,7 +951,6 @@ static int cgroup_get_sb(struct file_system_type *fs_type, | |||
962 | struct super_block *sb; | 951 | struct super_block *sb; |
963 | struct cgroupfs_root *root; | 952 | struct cgroupfs_root *root; |
964 | struct list_head tmp_cg_links; | 953 | struct list_head tmp_cg_links; |
965 | INIT_LIST_HEAD(&tmp_cg_links); | ||
966 | 954 | ||
967 | /* First find the desired set of subsystems */ | 955 | /* First find the desired set of subsystems */ |
968 | ret = parse_cgroupfs_options(data, &opts); | 956 | ret = parse_cgroupfs_options(data, &opts); |
@@ -1093,6 +1081,8 @@ static void cgroup_kill_sb(struct super_block *sb) { | |||
1093 | struct cgroupfs_root *root = sb->s_fs_info; | 1081 | struct cgroupfs_root *root = sb->s_fs_info; |
1094 | struct cgroup *cgrp = &root->top_cgroup; | 1082 | struct cgroup *cgrp = &root->top_cgroup; |
1095 | int ret; | 1083 | int ret; |
1084 | struct cg_cgroup_link *link; | ||
1085 | struct cg_cgroup_link *saved_link; | ||
1096 | 1086 | ||
1097 | BUG_ON(!root); | 1087 | BUG_ON(!root); |
1098 | 1088 | ||
@@ -1112,10 +1102,9 @@ static void cgroup_kill_sb(struct super_block *sb) { | |||
1112 | * root cgroup | 1102 | * root cgroup |
1113 | */ | 1103 | */ |
1114 | write_lock(&css_set_lock); | 1104 | write_lock(&css_set_lock); |
1115 | while (!list_empty(&cgrp->css_sets)) { | 1105 | |
1116 | struct cg_cgroup_link *link; | 1106 | list_for_each_entry_safe(link, saved_link, &cgrp->css_sets, |
1117 | link = list_entry(cgrp->css_sets.next, | 1107 | cgrp_link_list) { |
1118 | struct cg_cgroup_link, cgrp_link_list); | ||
1119 | list_del(&link->cg_link_list); | 1108 | list_del(&link->cg_link_list); |
1120 | list_del(&link->cgrp_link_list); | 1109 | list_del(&link->cgrp_link_list); |
1121 | kfree(link); | 1110 | kfree(link); |
@@ -1281,18 +1270,14 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk) | |||
1281 | } | 1270 | } |
1282 | 1271 | ||
1283 | /* | 1272 | /* |
1284 | * Attach task with pid 'pid' to cgroup 'cgrp'. Call with | 1273 | * Attach task with pid 'pid' to cgroup 'cgrp'. Call with cgroup_mutex |
1285 | * cgroup_mutex, may take task_lock of task | 1274 | * held. May take task_lock of task |
1286 | */ | 1275 | */ |
1287 | static int attach_task_by_pid(struct cgroup *cgrp, char *pidbuf) | 1276 | static int attach_task_by_pid(struct cgroup *cgrp, u64 pid) |
1288 | { | 1277 | { |
1289 | pid_t pid; | ||
1290 | struct task_struct *tsk; | 1278 | struct task_struct *tsk; |
1291 | int ret; | 1279 | int ret; |
1292 | 1280 | ||
1293 | if (sscanf(pidbuf, "%d", &pid) != 1) | ||
1294 | return -EIO; | ||
1295 | |||
1296 | if (pid) { | 1281 | if (pid) { |
1297 | rcu_read_lock(); | 1282 | rcu_read_lock(); |
1298 | tsk = find_task_by_vpid(pid); | 1283 | tsk = find_task_by_vpid(pid); |
@@ -1318,6 +1303,16 @@ static int attach_task_by_pid(struct cgroup *cgrp, char *pidbuf) | |||
1318 | return ret; | 1303 | return ret; |
1319 | } | 1304 | } |
1320 | 1305 | ||
1306 | static int cgroup_tasks_write(struct cgroup *cgrp, struct cftype *cft, u64 pid) | ||
1307 | { | ||
1308 | int ret; | ||
1309 | if (!cgroup_lock_live_group(cgrp)) | ||
1310 | return -ENODEV; | ||
1311 | ret = attach_task_by_pid(cgrp, pid); | ||
1312 | cgroup_unlock(); | ||
1313 | return ret; | ||
1314 | } | ||
1315 | |||
1321 | /* The various types of files and directories in a cgroup file system */ | 1316 | /* The various types of files and directories in a cgroup file system */ |
1322 | enum cgroup_filetype { | 1317 | enum cgroup_filetype { |
1323 | FILE_ROOT, | 1318 | FILE_ROOT, |
@@ -1327,12 +1322,54 @@ enum cgroup_filetype { | |||
1327 | FILE_RELEASE_AGENT, | 1322 | FILE_RELEASE_AGENT, |
1328 | }; | 1323 | }; |
1329 | 1324 | ||
1325 | /** | ||
1326 | * cgroup_lock_live_group - take cgroup_mutex and check that cgrp is alive. | ||
1327 | * @cgrp: the cgroup to be checked for liveness | ||
1328 | * | ||
1329 | * On success, returns true; the lock should be later released with | ||
1330 | * cgroup_unlock(). On failure returns false with no lock held. | ||
1331 | */ | ||
1332 | bool cgroup_lock_live_group(struct cgroup *cgrp) | ||
1333 | { | ||
1334 | mutex_lock(&cgroup_mutex); | ||
1335 | if (cgroup_is_removed(cgrp)) { | ||
1336 | mutex_unlock(&cgroup_mutex); | ||
1337 | return false; | ||
1338 | } | ||
1339 | return true; | ||
1340 | } | ||
1341 | |||
1342 | static int cgroup_release_agent_write(struct cgroup *cgrp, struct cftype *cft, | ||
1343 | const char *buffer) | ||
1344 | { | ||
1345 | BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX); | ||
1346 | if (!cgroup_lock_live_group(cgrp)) | ||
1347 | return -ENODEV; | ||
1348 | strcpy(cgrp->root->release_agent_path, buffer); | ||
1349 | cgroup_unlock(); | ||
1350 | return 0; | ||
1351 | } | ||
1352 | |||
1353 | static int cgroup_release_agent_show(struct cgroup *cgrp, struct cftype *cft, | ||
1354 | struct seq_file *seq) | ||
1355 | { | ||
1356 | if (!cgroup_lock_live_group(cgrp)) | ||
1357 | return -ENODEV; | ||
1358 | seq_puts(seq, cgrp->root->release_agent_path); | ||
1359 | seq_putc(seq, '\n'); | ||
1360 | cgroup_unlock(); | ||
1361 | return 0; | ||
1362 | } | ||
1363 | |||
1364 | /* A buffer size big enough for numbers or short strings */ | ||
1365 | #define CGROUP_LOCAL_BUFFER_SIZE 64 | ||
1366 | |||
1330 | static ssize_t cgroup_write_X64(struct cgroup *cgrp, struct cftype *cft, | 1367 | static ssize_t cgroup_write_X64(struct cgroup *cgrp, struct cftype *cft, |
1331 | struct file *file, | 1368 | struct file *file, |
1332 | const char __user *userbuf, | 1369 | const char __user *userbuf, |
1333 | size_t nbytes, loff_t *unused_ppos) | 1370 | size_t nbytes, loff_t *unused_ppos) |
1334 | { | 1371 | { |
1335 | char buffer[64]; | 1372 | char buffer[CGROUP_LOCAL_BUFFER_SIZE]; |
1336 | int retval = 0; | 1373 | int retval = 0; |
1337 | char *end; | 1374 | char *end; |
1338 | 1375 | ||
@@ -1361,68 +1398,39 @@ static ssize_t cgroup_write_X64(struct cgroup *cgrp, struct cftype *cft, | |||
1361 | return retval; | 1398 | return retval; |
1362 | } | 1399 | } |
1363 | 1400 | ||
1364 | static ssize_t cgroup_common_file_write(struct cgroup *cgrp, | 1401 | static ssize_t cgroup_write_string(struct cgroup *cgrp, struct cftype *cft, |
1365 | struct cftype *cft, | 1402 | struct file *file, |
1366 | struct file *file, | 1403 | const char __user *userbuf, |
1367 | const char __user *userbuf, | 1404 | size_t nbytes, loff_t *unused_ppos) |
1368 | size_t nbytes, loff_t *unused_ppos) | ||
1369 | { | 1405 | { |
1370 | enum cgroup_filetype type = cft->private; | 1406 | char local_buffer[CGROUP_LOCAL_BUFFER_SIZE]; |
1371 | char *buffer; | ||
1372 | int retval = 0; | 1407 | int retval = 0; |
1408 | size_t max_bytes = cft->max_write_len; | ||
1409 | char *buffer = local_buffer; | ||
1373 | 1410 | ||
1374 | if (nbytes >= PATH_MAX) | 1411 | if (!max_bytes) |
1412 | max_bytes = sizeof(local_buffer) - 1; | ||
1413 | if (nbytes >= max_bytes) | ||
1375 | return -E2BIG; | 1414 | return -E2BIG; |
1376 | 1415 | /* Allocate a dynamic buffer if we need one */ | |
1377 | /* +1 for nul-terminator */ | 1416 | if (nbytes >= sizeof(local_buffer)) { |
1378 | buffer = kmalloc(nbytes + 1, GFP_KERNEL); | 1417 | buffer = kmalloc(nbytes + 1, GFP_KERNEL); |
1379 | if (buffer == NULL) | 1418 | if (buffer == NULL) |
1380 | return -ENOMEM; | 1419 | return -ENOMEM; |
1381 | 1420 | } | |
1382 | if (copy_from_user(buffer, userbuf, nbytes)) { | 1421 | if (nbytes && copy_from_user(buffer, userbuf, nbytes)) { |
1383 | retval = -EFAULT; | 1422 | retval = -EFAULT; |
1384 | goto out1; | 1423 | goto out; |
1385 | } | 1424 | } |
1386 | buffer[nbytes] = 0; /* nul-terminate */ | ||
1387 | strstrip(buffer); /* strip -just- trailing whitespace */ | ||
1388 | 1425 | ||
1389 | mutex_lock(&cgroup_mutex); | 1426 | buffer[nbytes] = 0; /* nul-terminate */ |
1390 | 1427 | strstrip(buffer); | |
1391 | /* | 1428 | retval = cft->write_string(cgrp, cft, buffer); |
1392 | * This was already checked for in cgroup_file_write(), but | 1429 | if (!retval) |
1393 | * check again now we're holding cgroup_mutex. | ||
1394 | */ | ||
1395 | if (cgroup_is_removed(cgrp)) { | ||
1396 | retval = -ENODEV; | ||
1397 | goto out2; | ||
1398 | } | ||
1399 | |||
1400 | switch (type) { | ||
1401 | case FILE_TASKLIST: | ||
1402 | retval = attach_task_by_pid(cgrp, buffer); | ||
1403 | break; | ||
1404 | case FILE_NOTIFY_ON_RELEASE: | ||
1405 | clear_bit(CGRP_RELEASABLE, &cgrp->flags); | ||
1406 | if (simple_strtoul(buffer, NULL, 10) != 0) | ||
1407 | set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags); | ||
1408 | else | ||
1409 | clear_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags); | ||
1410 | break; | ||
1411 | case FILE_RELEASE_AGENT: | ||
1412 | BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX); | ||
1413 | strcpy(cgrp->root->release_agent_path, buffer); | ||
1414 | break; | ||
1415 | default: | ||
1416 | retval = -EINVAL; | ||
1417 | goto out2; | ||
1418 | } | ||
1419 | |||
1420 | if (retval == 0) | ||
1421 | retval = nbytes; | 1430 | retval = nbytes; |
1422 | out2: | 1431 | out: |
1423 | mutex_unlock(&cgroup_mutex); | 1432 | if (buffer != local_buffer) |
1424 | out1: | 1433 | kfree(buffer); |
1425 | kfree(buffer); | ||
1426 | return retval; | 1434 | return retval; |
1427 | } | 1435 | } |
1428 | 1436 | ||
@@ -1438,6 +1446,8 @@ static ssize_t cgroup_file_write(struct file *file, const char __user *buf, | |||
1438 | return cft->write(cgrp, cft, file, buf, nbytes, ppos); | 1446 | return cft->write(cgrp, cft, file, buf, nbytes, ppos); |
1439 | if (cft->write_u64 || cft->write_s64) | 1447 | if (cft->write_u64 || cft->write_s64) |
1440 | return cgroup_write_X64(cgrp, cft, file, buf, nbytes, ppos); | 1448 | return cgroup_write_X64(cgrp, cft, file, buf, nbytes, ppos); |
1449 | if (cft->write_string) | ||
1450 | return cgroup_write_string(cgrp, cft, file, buf, nbytes, ppos); | ||
1441 | if (cft->trigger) { | 1451 | if (cft->trigger) { |
1442 | int ret = cft->trigger(cgrp, (unsigned int)cft->private); | 1452 | int ret = cft->trigger(cgrp, (unsigned int)cft->private); |
1443 | return ret ? ret : nbytes; | 1453 | return ret ? ret : nbytes; |
@@ -1450,7 +1460,7 @@ static ssize_t cgroup_read_u64(struct cgroup *cgrp, struct cftype *cft, | |||
1450 | char __user *buf, size_t nbytes, | 1460 | char __user *buf, size_t nbytes, |
1451 | loff_t *ppos) | 1461 | loff_t *ppos) |
1452 | { | 1462 | { |
1453 | char tmp[64]; | 1463 | char tmp[CGROUP_LOCAL_BUFFER_SIZE]; |
1454 | u64 val = cft->read_u64(cgrp, cft); | 1464 | u64 val = cft->read_u64(cgrp, cft); |
1455 | int len = sprintf(tmp, "%llu\n", (unsigned long long) val); | 1465 | int len = sprintf(tmp, "%llu\n", (unsigned long long) val); |
1456 | 1466 | ||
@@ -1462,56 +1472,13 @@ static ssize_t cgroup_read_s64(struct cgroup *cgrp, struct cftype *cft, | |||
1462 | char __user *buf, size_t nbytes, | 1472 | char __user *buf, size_t nbytes, |
1463 | loff_t *ppos) | 1473 | loff_t *ppos) |
1464 | { | 1474 | { |
1465 | char tmp[64]; | 1475 | char tmp[CGROUP_LOCAL_BUFFER_SIZE]; |
1466 | s64 val = cft->read_s64(cgrp, cft); | 1476 | s64 val = cft->read_s64(cgrp, cft); |
1467 | int len = sprintf(tmp, "%lld\n", (long long) val); | 1477 | int len = sprintf(tmp, "%lld\n", (long long) val); |
1468 | 1478 | ||
1469 | return simple_read_from_buffer(buf, nbytes, ppos, tmp, len); | 1479 | return simple_read_from_buffer(buf, nbytes, ppos, tmp, len); |
1470 | } | 1480 | } |
1471 | 1481 | ||
1472 | static ssize_t cgroup_common_file_read(struct cgroup *cgrp, | ||
1473 | struct cftype *cft, | ||
1474 | struct file *file, | ||
1475 | char __user *buf, | ||
1476 | size_t nbytes, loff_t *ppos) | ||
1477 | { | ||
1478 | enum cgroup_filetype type = cft->private; | ||
1479 | char *page; | ||
1480 | ssize_t retval = 0; | ||
1481 | char *s; | ||
1482 | |||
1483 | if (!(page = (char *)__get_free_page(GFP_KERNEL))) | ||
1484 | return -ENOMEM; | ||
1485 | |||
1486 | s = page; | ||
1487 | |||
1488 | switch (type) { | ||
1489 | case FILE_RELEASE_AGENT: | ||
1490 | { | ||
1491 | struct cgroupfs_root *root; | ||
1492 | size_t n; | ||
1493 | mutex_lock(&cgroup_mutex); | ||
1494 | root = cgrp->root; | ||
1495 | n = strnlen(root->release_agent_path, | ||
1496 | sizeof(root->release_agent_path)); | ||
1497 | n = min(n, (size_t) PAGE_SIZE); | ||
1498 | strncpy(s, root->release_agent_path, n); | ||
1499 | mutex_unlock(&cgroup_mutex); | ||
1500 | s += n; | ||
1501 | break; | ||
1502 | } | ||
1503 | default: | ||
1504 | retval = -EINVAL; | ||
1505 | goto out; | ||
1506 | } | ||
1507 | *s++ = '\n'; | ||
1508 | |||
1509 | retval = simple_read_from_buffer(buf, nbytes, ppos, page, s - page); | ||
1510 | out: | ||
1511 | free_page((unsigned long)page); | ||
1512 | return retval; | ||
1513 | } | ||
1514 | |||
1515 | static ssize_t cgroup_file_read(struct file *file, char __user *buf, | 1482 | static ssize_t cgroup_file_read(struct file *file, char __user *buf, |
1516 | size_t nbytes, loff_t *ppos) | 1483 | size_t nbytes, loff_t *ppos) |
1517 | { | 1484 | { |
@@ -1560,7 +1527,7 @@ static int cgroup_seqfile_show(struct seq_file *m, void *arg) | |||
1560 | return cft->read_seq_string(state->cgroup, cft, m); | 1527 | return cft->read_seq_string(state->cgroup, cft, m); |
1561 | } | 1528 | } |
1562 | 1529 | ||
1563 | int cgroup_seqfile_release(struct inode *inode, struct file *file) | 1530 | static int cgroup_seqfile_release(struct inode *inode, struct file *file) |
1564 | { | 1531 | { |
1565 | struct seq_file *seq = file->private_data; | 1532 | struct seq_file *seq = file->private_data; |
1566 | kfree(seq->private); | 1533 | kfree(seq->private); |
@@ -1569,6 +1536,7 @@ int cgroup_seqfile_release(struct inode *inode, struct file *file) | |||
1569 | 1536 | ||
1570 | static struct file_operations cgroup_seqfile_operations = { | 1537 | static struct file_operations cgroup_seqfile_operations = { |
1571 | .read = seq_read, | 1538 | .read = seq_read, |
1539 | .write = cgroup_file_write, | ||
1572 | .llseek = seq_lseek, | 1540 | .llseek = seq_lseek, |
1573 | .release = cgroup_seqfile_release, | 1541 | .release = cgroup_seqfile_release, |
1574 | }; | 1542 | }; |
@@ -1756,15 +1724,11 @@ int cgroup_add_files(struct cgroup *cgrp, | |||
1756 | int cgroup_task_count(const struct cgroup *cgrp) | 1724 | int cgroup_task_count(const struct cgroup *cgrp) |
1757 | { | 1725 | { |
1758 | int count = 0; | 1726 | int count = 0; |
1759 | struct list_head *l; | 1727 | struct cg_cgroup_link *link; |
1760 | 1728 | ||
1761 | read_lock(&css_set_lock); | 1729 | read_lock(&css_set_lock); |
1762 | l = cgrp->css_sets.next; | 1730 | list_for_each_entry(link, &cgrp->css_sets, cgrp_link_list) { |
1763 | while (l != &cgrp->css_sets) { | ||
1764 | struct cg_cgroup_link *link = | ||
1765 | list_entry(l, struct cg_cgroup_link, cgrp_link_list); | ||
1766 | count += atomic_read(&link->cg->ref.refcount); | 1731 | count += atomic_read(&link->cg->ref.refcount); |
1767 | l = l->next; | ||
1768 | } | 1732 | } |
1769 | read_unlock(&css_set_lock); | 1733 | read_unlock(&css_set_lock); |
1770 | return count; | 1734 | return count; |
@@ -2227,6 +2191,18 @@ static u64 cgroup_read_notify_on_release(struct cgroup *cgrp, | |||
2227 | return notify_on_release(cgrp); | 2191 | return notify_on_release(cgrp); |
2228 | } | 2192 | } |
2229 | 2193 | ||
2194 | static int cgroup_write_notify_on_release(struct cgroup *cgrp, | ||
2195 | struct cftype *cft, | ||
2196 | u64 val) | ||
2197 | { | ||
2198 | clear_bit(CGRP_RELEASABLE, &cgrp->flags); | ||
2199 | if (val) | ||
2200 | set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags); | ||
2201 | else | ||
2202 | clear_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags); | ||
2203 | return 0; | ||
2204 | } | ||
2205 | |||
2230 | /* | 2206 | /* |
2231 | * for the common functions, 'private' gives the type of file | 2207 | * for the common functions, 'private' gives the type of file |
2232 | */ | 2208 | */ |
@@ -2235,7 +2211,7 @@ static struct cftype files[] = { | |||
2235 | .name = "tasks", | 2211 | .name = "tasks", |
2236 | .open = cgroup_tasks_open, | 2212 | .open = cgroup_tasks_open, |
2237 | .read = cgroup_tasks_read, | 2213 | .read = cgroup_tasks_read, |
2238 | .write = cgroup_common_file_write, | 2214 | .write_u64 = cgroup_tasks_write, |
2239 | .release = cgroup_tasks_release, | 2215 | .release = cgroup_tasks_release, |
2240 | .private = FILE_TASKLIST, | 2216 | .private = FILE_TASKLIST, |
2241 | }, | 2217 | }, |
@@ -2243,15 +2219,16 @@ static struct cftype files[] = { | |||
2243 | { | 2219 | { |
2244 | .name = "notify_on_release", | 2220 | .name = "notify_on_release", |
2245 | .read_u64 = cgroup_read_notify_on_release, | 2221 | .read_u64 = cgroup_read_notify_on_release, |
2246 | .write = cgroup_common_file_write, | 2222 | .write_u64 = cgroup_write_notify_on_release, |
2247 | .private = FILE_NOTIFY_ON_RELEASE, | 2223 | .private = FILE_NOTIFY_ON_RELEASE, |
2248 | }, | 2224 | }, |
2249 | }; | 2225 | }; |
2250 | 2226 | ||
2251 | static struct cftype cft_release_agent = { | 2227 | static struct cftype cft_release_agent = { |
2252 | .name = "release_agent", | 2228 | .name = "release_agent", |
2253 | .read = cgroup_common_file_read, | 2229 | .read_seq_string = cgroup_release_agent_show, |
2254 | .write = cgroup_common_file_write, | 2230 | .write_string = cgroup_release_agent_write, |
2231 | .max_write_len = PATH_MAX, | ||
2255 | .private = FILE_RELEASE_AGENT, | 2232 | .private = FILE_RELEASE_AGENT, |
2256 | }; | 2233 | }; |
2257 | 2234 | ||
@@ -2391,7 +2368,7 @@ static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode) | |||
2391 | return cgroup_create(c_parent, dentry, mode | S_IFDIR); | 2368 | return cgroup_create(c_parent, dentry, mode | S_IFDIR); |
2392 | } | 2369 | } |
2393 | 2370 | ||
2394 | static inline int cgroup_has_css_refs(struct cgroup *cgrp) | 2371 | static int cgroup_has_css_refs(struct cgroup *cgrp) |
2395 | { | 2372 | { |
2396 | /* Check the reference count on each subsystem. Since we | 2373 | /* Check the reference count on each subsystem. Since we |
2397 | * already established that there are no tasks in the | 2374 | * already established that there are no tasks in the |
@@ -2761,14 +2738,15 @@ void cgroup_fork_callbacks(struct task_struct *child) | |||
2761 | */ | 2738 | */ |
2762 | void cgroup_mm_owner_callbacks(struct task_struct *old, struct task_struct *new) | 2739 | void cgroup_mm_owner_callbacks(struct task_struct *old, struct task_struct *new) |
2763 | { | 2740 | { |
2764 | struct cgroup *oldcgrp, *newcgrp; | 2741 | struct cgroup *oldcgrp, *newcgrp = NULL; |
2765 | 2742 | ||
2766 | if (need_mm_owner_callback) { | 2743 | if (need_mm_owner_callback) { |
2767 | int i; | 2744 | int i; |
2768 | for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { | 2745 | for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { |
2769 | struct cgroup_subsys *ss = subsys[i]; | 2746 | struct cgroup_subsys *ss = subsys[i]; |
2770 | oldcgrp = task_cgroup(old, ss->subsys_id); | 2747 | oldcgrp = task_cgroup(old, ss->subsys_id); |
2771 | newcgrp = task_cgroup(new, ss->subsys_id); | 2748 | if (new) |
2749 | newcgrp = task_cgroup(new, ss->subsys_id); | ||
2772 | if (oldcgrp == newcgrp) | 2750 | if (oldcgrp == newcgrp) |
2773 | continue; | 2751 | continue; |
2774 | if (ss->mm_owner_changed) | 2752 | if (ss->mm_owner_changed) |
@@ -2869,16 +2847,17 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks) | |||
2869 | * cgroup_clone - clone the cgroup the given subsystem is attached to | 2847 | * cgroup_clone - clone the cgroup the given subsystem is attached to |
2870 | * @tsk: the task to be moved | 2848 | * @tsk: the task to be moved |
2871 | * @subsys: the given subsystem | 2849 | * @subsys: the given subsystem |
2850 | * @nodename: the name for the new cgroup | ||
2872 | * | 2851 | * |
2873 | * Duplicate the current cgroup in the hierarchy that the given | 2852 | * Duplicate the current cgroup in the hierarchy that the given |
2874 | * subsystem is attached to, and move this task into the new | 2853 | * subsystem is attached to, and move this task into the new |
2875 | * child. | 2854 | * child. |
2876 | */ | 2855 | */ |
2877 | int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys) | 2856 | int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys, |
2857 | char *nodename) | ||
2878 | { | 2858 | { |
2879 | struct dentry *dentry; | 2859 | struct dentry *dentry; |
2880 | int ret = 0; | 2860 | int ret = 0; |
2881 | char nodename[MAX_CGROUP_TYPE_NAMELEN]; | ||
2882 | struct cgroup *parent, *child; | 2861 | struct cgroup *parent, *child; |
2883 | struct inode *inode; | 2862 | struct inode *inode; |
2884 | struct css_set *cg; | 2863 | struct css_set *cg; |
@@ -2903,8 +2882,6 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys) | |||
2903 | cg = tsk->cgroups; | 2882 | cg = tsk->cgroups; |
2904 | parent = task_cgroup(tsk, subsys->subsys_id); | 2883 | parent = task_cgroup(tsk, subsys->subsys_id); |
2905 | 2884 | ||
2906 | snprintf(nodename, MAX_CGROUP_TYPE_NAMELEN, "%d", tsk->pid); | ||
2907 | |||
2908 | /* Pin the hierarchy */ | 2885 | /* Pin the hierarchy */ |
2909 | atomic_inc(&parent->root->sb->s_active); | 2886 | atomic_inc(&parent->root->sb->s_active); |
2910 | 2887 | ||
@@ -3078,27 +3055,24 @@ static void cgroup_release_agent(struct work_struct *work) | |||
3078 | while (!list_empty(&release_list)) { | 3055 | while (!list_empty(&release_list)) { |
3079 | char *argv[3], *envp[3]; | 3056 | char *argv[3], *envp[3]; |
3080 | int i; | 3057 | int i; |
3081 | char *pathbuf; | 3058 | char *pathbuf = NULL, *agentbuf = NULL; |
3082 | struct cgroup *cgrp = list_entry(release_list.next, | 3059 | struct cgroup *cgrp = list_entry(release_list.next, |
3083 | struct cgroup, | 3060 | struct cgroup, |
3084 | release_list); | 3061 | release_list); |
3085 | list_del_init(&cgrp->release_list); | 3062 | list_del_init(&cgrp->release_list); |
3086 | spin_unlock(&release_list_lock); | 3063 | spin_unlock(&release_list_lock); |
3087 | pathbuf = kmalloc(PAGE_SIZE, GFP_KERNEL); | 3064 | pathbuf = kmalloc(PAGE_SIZE, GFP_KERNEL); |
3088 | if (!pathbuf) { | 3065 | if (!pathbuf) |
3089 | spin_lock(&release_list_lock); | 3066 | goto continue_free; |
3090 | continue; | 3067 | if (cgroup_path(cgrp, pathbuf, PAGE_SIZE) < 0) |
3091 | } | 3068 | goto continue_free; |
3092 | 3069 | agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL); | |
3093 | if (cgroup_path(cgrp, pathbuf, PAGE_SIZE) < 0) { | 3070 | if (!agentbuf) |
3094 | kfree(pathbuf); | 3071 | goto continue_free; |
3095 | spin_lock(&release_list_lock); | ||
3096 | continue; | ||
3097 | } | ||
3098 | 3072 | ||
3099 | i = 0; | 3073 | i = 0; |
3100 | argv[i++] = cgrp->root->release_agent_path; | 3074 | argv[i++] = agentbuf; |
3101 | argv[i++] = (char *)pathbuf; | 3075 | argv[i++] = pathbuf; |
3102 | argv[i] = NULL; | 3076 | argv[i] = NULL; |
3103 | 3077 | ||
3104 | i = 0; | 3078 | i = 0; |
@@ -3112,8 +3086,10 @@ static void cgroup_release_agent(struct work_struct *work) | |||
3112 | * be a slow process */ | 3086 | * be a slow process */ |
3113 | mutex_unlock(&cgroup_mutex); | 3087 | mutex_unlock(&cgroup_mutex); |
3114 | call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC); | 3088 | call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC); |
3115 | kfree(pathbuf); | ||
3116 | mutex_lock(&cgroup_mutex); | 3089 | mutex_lock(&cgroup_mutex); |
3090 | continue_free: | ||
3091 | kfree(pathbuf); | ||
3092 | kfree(agentbuf); | ||
3117 | spin_lock(&release_list_lock); | 3093 | spin_lock(&release_list_lock); |
3118 | } | 3094 | } |
3119 | spin_unlock(&release_list_lock); | 3095 | spin_unlock(&release_list_lock); |
diff --git a/kernel/cpu.c b/kernel/cpu.c index cfb1d43ab801..f17e9854c246 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -64,6 +64,8 @@ void __init cpu_hotplug_init(void) | |||
64 | cpu_hotplug.refcount = 0; | 64 | cpu_hotplug.refcount = 0; |
65 | } | 65 | } |
66 | 66 | ||
67 | cpumask_t cpu_active_map; | ||
68 | |||
67 | #ifdef CONFIG_HOTPLUG_CPU | 69 | #ifdef CONFIG_HOTPLUG_CPU |
68 | 70 | ||
69 | void get_online_cpus(void) | 71 | void get_online_cpus(void) |
@@ -214,7 +216,6 @@ static int __ref take_cpu_down(void *_param) | |||
214 | static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) | 216 | static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) |
215 | { | 217 | { |
216 | int err, nr_calls = 0; | 218 | int err, nr_calls = 0; |
217 | struct task_struct *p; | ||
218 | cpumask_t old_allowed, tmp; | 219 | cpumask_t old_allowed, tmp; |
219 | void *hcpu = (void *)(long)cpu; | 220 | void *hcpu = (void *)(long)cpu; |
220 | unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; | 221 | unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; |
@@ -247,21 +248,18 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) | |||
247 | cpus_setall(tmp); | 248 | cpus_setall(tmp); |
248 | cpu_clear(cpu, tmp); | 249 | cpu_clear(cpu, tmp); |
249 | set_cpus_allowed_ptr(current, &tmp); | 250 | set_cpus_allowed_ptr(current, &tmp); |
251 | tmp = cpumask_of_cpu(cpu); | ||
250 | 252 | ||
251 | p = __stop_machine_run(take_cpu_down, &tcd_param, cpu); | 253 | err = __stop_machine(take_cpu_down, &tcd_param, &tmp); |
252 | 254 | if (err) { | |
253 | if (IS_ERR(p) || cpu_online(cpu)) { | ||
254 | /* CPU didn't die: tell everyone. Can't complain. */ | 255 | /* CPU didn't die: tell everyone. Can't complain. */ |
255 | if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, | 256 | if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, |
256 | hcpu) == NOTIFY_BAD) | 257 | hcpu) == NOTIFY_BAD) |
257 | BUG(); | 258 | BUG(); |
258 | 259 | ||
259 | if (IS_ERR(p)) { | 260 | goto out_allowed; |
260 | err = PTR_ERR(p); | ||
261 | goto out_allowed; | ||
262 | } | ||
263 | goto out_thread; | ||
264 | } | 261 | } |
262 | BUG_ON(cpu_online(cpu)); | ||
265 | 263 | ||
266 | /* Wait for it to sleep (leaving idle task). */ | 264 | /* Wait for it to sleep (leaving idle task). */ |
267 | while (!idle_cpu(cpu)) | 265 | while (!idle_cpu(cpu)) |
@@ -277,12 +275,15 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) | |||
277 | 275 | ||
278 | check_for_tasks(cpu); | 276 | check_for_tasks(cpu); |
279 | 277 | ||
280 | out_thread: | ||
281 | err = kthread_stop(p); | ||
282 | out_allowed: | 278 | out_allowed: |
283 | set_cpus_allowed_ptr(current, &old_allowed); | 279 | set_cpus_allowed_ptr(current, &old_allowed); |
284 | out_release: | 280 | out_release: |
285 | cpu_hotplug_done(); | 281 | cpu_hotplug_done(); |
282 | if (!err) { | ||
283 | if (raw_notifier_call_chain(&cpu_chain, CPU_POST_DEAD | mod, | ||
284 | hcpu) == NOTIFY_BAD) | ||
285 | BUG(); | ||
286 | } | ||
286 | return err; | 287 | return err; |
287 | } | 288 | } |
288 | 289 | ||
@@ -291,11 +292,30 @@ int __ref cpu_down(unsigned int cpu) | |||
291 | int err = 0; | 292 | int err = 0; |
292 | 293 | ||
293 | cpu_maps_update_begin(); | 294 | cpu_maps_update_begin(); |
294 | if (cpu_hotplug_disabled) | 295 | |
296 | if (cpu_hotplug_disabled) { | ||
295 | err = -EBUSY; | 297 | err = -EBUSY; |
296 | else | 298 | goto out; |
297 | err = _cpu_down(cpu, 0); | 299 | } |
300 | |||
301 | cpu_clear(cpu, cpu_active_map); | ||
302 | |||
303 | /* | ||
304 | * Make sure the all cpus did the reschedule and are not | ||
305 | * using stale version of the cpu_active_map. | ||
306 | * This is not strictly necessary becuase stop_machine() | ||
307 | * that we run down the line already provides the required | ||
308 | * synchronization. But it's really a side effect and we do not | ||
309 | * want to depend on the innards of the stop_machine here. | ||
310 | */ | ||
311 | synchronize_sched(); | ||
312 | |||
313 | err = _cpu_down(cpu, 0); | ||
314 | |||
315 | if (cpu_online(cpu)) | ||
316 | cpu_set(cpu, cpu_active_map); | ||
298 | 317 | ||
318 | out: | ||
299 | cpu_maps_update_done(); | 319 | cpu_maps_update_done(); |
300 | return err; | 320 | return err; |
301 | } | 321 | } |
@@ -329,6 +349,8 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen) | |||
329 | goto out_notify; | 349 | goto out_notify; |
330 | BUG_ON(!cpu_online(cpu)); | 350 | BUG_ON(!cpu_online(cpu)); |
331 | 351 | ||
352 | cpu_set(cpu, cpu_active_map); | ||
353 | |||
332 | /* Now call notifier in preparation. */ | 354 | /* Now call notifier in preparation. */ |
333 | raw_notifier_call_chain(&cpu_chain, CPU_ONLINE | mod, hcpu); | 355 | raw_notifier_call_chain(&cpu_chain, CPU_ONLINE | mod, hcpu); |
334 | 356 | ||
@@ -347,7 +369,7 @@ int __cpuinit cpu_up(unsigned int cpu) | |||
347 | if (!cpu_isset(cpu, cpu_possible_map)) { | 369 | if (!cpu_isset(cpu, cpu_possible_map)) { |
348 | printk(KERN_ERR "can't online cpu %d because it is not " | 370 | printk(KERN_ERR "can't online cpu %d because it is not " |
349 | "configured as may-hotadd at boot time\n", cpu); | 371 | "configured as may-hotadd at boot time\n", cpu); |
350 | #if defined(CONFIG_IA64) || defined(CONFIG_X86_64) || defined(CONFIG_S390) | 372 | #if defined(CONFIG_IA64) || defined(CONFIG_X86_64) |
351 | printk(KERN_ERR "please check additional_cpus= boot " | 373 | printk(KERN_ERR "please check additional_cpus= boot " |
352 | "parameter\n"); | 374 | "parameter\n"); |
353 | #endif | 375 | #endif |
@@ -355,11 +377,15 @@ int __cpuinit cpu_up(unsigned int cpu) | |||
355 | } | 377 | } |
356 | 378 | ||
357 | cpu_maps_update_begin(); | 379 | cpu_maps_update_begin(); |
358 | if (cpu_hotplug_disabled) | 380 | |
381 | if (cpu_hotplug_disabled) { | ||
359 | err = -EBUSY; | 382 | err = -EBUSY; |
360 | else | 383 | goto out; |
361 | err = _cpu_up(cpu, 0); | 384 | } |
385 | |||
386 | err = _cpu_up(cpu, 0); | ||
362 | 387 | ||
388 | out: | ||
363 | cpu_maps_update_done(); | 389 | cpu_maps_update_done(); |
364 | return err; | 390 | return err; |
365 | } | 391 | } |
@@ -413,7 +439,7 @@ void __ref enable_nonboot_cpus(void) | |||
413 | goto out; | 439 | goto out; |
414 | 440 | ||
415 | printk("Enabling non-boot CPUs ...\n"); | 441 | printk("Enabling non-boot CPUs ...\n"); |
416 | for_each_cpu_mask(cpu, frozen_cpus) { | 442 | for_each_cpu_mask_nr(cpu, frozen_cpus) { |
417 | error = _cpu_up(cpu, 1); | 443 | error = _cpu_up(cpu, 1); |
418 | if (!error) { | 444 | if (!error) { |
419 | printk("CPU%d is up\n", cpu); | 445 | printk("CPU%d is up\n", cpu); |
@@ -428,3 +454,28 @@ out: | |||
428 | #endif /* CONFIG_PM_SLEEP_SMP */ | 454 | #endif /* CONFIG_PM_SLEEP_SMP */ |
429 | 455 | ||
430 | #endif /* CONFIG_SMP */ | 456 | #endif /* CONFIG_SMP */ |
457 | |||
458 | /* | ||
459 | * cpu_bit_bitmap[] is a special, "compressed" data structure that | ||
460 | * represents all NR_CPUS bits binary values of 1<<nr. | ||
461 | * | ||
462 | * It is used by cpumask_of_cpu() to get a constant address to a CPU | ||
463 | * mask value that has a single bit set only. | ||
464 | */ | ||
465 | |||
466 | /* cpu_bit_bitmap[0] is empty - so we can back into it */ | ||
467 | #define MASK_DECLARE_1(x) [x+1][0] = 1UL << (x) | ||
468 | #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1) | ||
469 | #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2) | ||
470 | #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4) | ||
471 | |||
472 | const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = { | ||
473 | |||
474 | MASK_DECLARE_8(0), MASK_DECLARE_8(8), | ||
475 | MASK_DECLARE_8(16), MASK_DECLARE_8(24), | ||
476 | #if BITS_PER_LONG > 32 | ||
477 | MASK_DECLARE_8(32), MASK_DECLARE_8(40), | ||
478 | MASK_DECLARE_8(48), MASK_DECLARE_8(56), | ||
479 | #endif | ||
480 | }; | ||
481 | EXPORT_SYMBOL_GPL(cpu_bit_bitmap); | ||
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 459d601947a8..827cd9adccb2 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -14,6 +14,8 @@ | |||
14 | * 2003-10-22 Updates by Stephen Hemminger. | 14 | * 2003-10-22 Updates by Stephen Hemminger. |
15 | * 2004 May-July Rework by Paul Jackson. | 15 | * 2004 May-July Rework by Paul Jackson. |
16 | * 2006 Rework by Paul Menage to use generic cgroups | 16 | * 2006 Rework by Paul Menage to use generic cgroups |
17 | * 2008 Rework of the scheduler domains and CPU hotplug handling | ||
18 | * by Max Krasnyansky | ||
17 | * | 19 | * |
18 | * This file is subject to the terms and conditions of the GNU General Public | 20 | * This file is subject to the terms and conditions of the GNU General Public |
19 | * License. See the file COPYING in the main directory of the Linux | 21 | * License. See the file COPYING in the main directory of the Linux |
@@ -54,7 +56,6 @@ | |||
54 | #include <asm/uaccess.h> | 56 | #include <asm/uaccess.h> |
55 | #include <asm/atomic.h> | 57 | #include <asm/atomic.h> |
56 | #include <linux/mutex.h> | 58 | #include <linux/mutex.h> |
57 | #include <linux/kfifo.h> | ||
58 | #include <linux/workqueue.h> | 59 | #include <linux/workqueue.h> |
59 | #include <linux/cgroup.h> | 60 | #include <linux/cgroup.h> |
60 | 61 | ||
@@ -227,10 +228,6 @@ static struct cpuset top_cpuset = { | |||
227 | * The task_struct fields mems_allowed and mems_generation may only | 228 | * The task_struct fields mems_allowed and mems_generation may only |
228 | * be accessed in the context of that task, so require no locks. | 229 | * be accessed in the context of that task, so require no locks. |
229 | * | 230 | * |
230 | * The cpuset_common_file_write handler for operations that modify | ||
231 | * the cpuset hierarchy holds cgroup_mutex across the entire operation, | ||
232 | * single threading all such cpuset modifications across the system. | ||
233 | * | ||
234 | * The cpuset_common_file_read() handlers only hold callback_mutex across | 231 | * The cpuset_common_file_read() handlers only hold callback_mutex across |
235 | * small pieces of code, such as when reading out possibly multi-word | 232 | * small pieces of code, such as when reading out possibly multi-word |
236 | * cpumasks and nodemasks. | 233 | * cpumasks and nodemasks. |
@@ -241,9 +238,11 @@ static struct cpuset top_cpuset = { | |||
241 | 238 | ||
242 | static DEFINE_MUTEX(callback_mutex); | 239 | static DEFINE_MUTEX(callback_mutex); |
243 | 240 | ||
244 | /* This is ugly, but preserves the userspace API for existing cpuset | 241 | /* |
242 | * This is ugly, but preserves the userspace API for existing cpuset | ||
245 | * users. If someone tries to mount the "cpuset" filesystem, we | 243 | * users. If someone tries to mount the "cpuset" filesystem, we |
246 | * silently switch it to mount "cgroup" instead */ | 244 | * silently switch it to mount "cgroup" instead |
245 | */ | ||
247 | static int cpuset_get_sb(struct file_system_type *fs_type, | 246 | static int cpuset_get_sb(struct file_system_type *fs_type, |
248 | int flags, const char *unused_dev_name, | 247 | int flags, const char *unused_dev_name, |
249 | void *data, struct vfsmount *mnt) | 248 | void *data, struct vfsmount *mnt) |
@@ -369,7 +368,7 @@ void cpuset_update_task_memory_state(void) | |||
369 | my_cpusets_mem_gen = top_cpuset.mems_generation; | 368 | my_cpusets_mem_gen = top_cpuset.mems_generation; |
370 | } else { | 369 | } else { |
371 | rcu_read_lock(); | 370 | rcu_read_lock(); |
372 | my_cpusets_mem_gen = task_cs(current)->mems_generation; | 371 | my_cpusets_mem_gen = task_cs(tsk)->mems_generation; |
373 | rcu_read_unlock(); | 372 | rcu_read_unlock(); |
374 | } | 373 | } |
375 | 374 | ||
@@ -478,10 +477,9 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial) | |||
478 | } | 477 | } |
479 | 478 | ||
480 | /* | 479 | /* |
481 | * Helper routine for rebuild_sched_domains(). | 480 | * Helper routine for generate_sched_domains(). |
482 | * Do cpusets a, b have overlapping cpus_allowed masks? | 481 | * Do cpusets a, b have overlapping cpus_allowed masks? |
483 | */ | 482 | */ |
484 | |||
485 | static int cpusets_overlap(struct cpuset *a, struct cpuset *b) | 483 | static int cpusets_overlap(struct cpuset *a, struct cpuset *b) |
486 | { | 484 | { |
487 | return cpus_intersects(a->cpus_allowed, b->cpus_allowed); | 485 | return cpus_intersects(a->cpus_allowed, b->cpus_allowed); |
@@ -490,29 +488,48 @@ static int cpusets_overlap(struct cpuset *a, struct cpuset *b) | |||
490 | static void | 488 | static void |
491 | update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c) | 489 | update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c) |
492 | { | 490 | { |
493 | if (!dattr) | ||
494 | return; | ||
495 | if (dattr->relax_domain_level < c->relax_domain_level) | 491 | if (dattr->relax_domain_level < c->relax_domain_level) |
496 | dattr->relax_domain_level = c->relax_domain_level; | 492 | dattr->relax_domain_level = c->relax_domain_level; |
497 | return; | 493 | return; |
498 | } | 494 | } |
499 | 495 | ||
496 | static void | ||
497 | update_domain_attr_tree(struct sched_domain_attr *dattr, struct cpuset *c) | ||
498 | { | ||
499 | LIST_HEAD(q); | ||
500 | |||
501 | list_add(&c->stack_list, &q); | ||
502 | while (!list_empty(&q)) { | ||
503 | struct cpuset *cp; | ||
504 | struct cgroup *cont; | ||
505 | struct cpuset *child; | ||
506 | |||
507 | cp = list_first_entry(&q, struct cpuset, stack_list); | ||
508 | list_del(q.next); | ||
509 | |||
510 | if (cpus_empty(cp->cpus_allowed)) | ||
511 | continue; | ||
512 | |||
513 | if (is_sched_load_balance(cp)) | ||
514 | update_domain_attr(dattr, cp); | ||
515 | |||
516 | list_for_each_entry(cont, &cp->css.cgroup->children, sibling) { | ||
517 | child = cgroup_cs(cont); | ||
518 | list_add_tail(&child->stack_list, &q); | ||
519 | } | ||
520 | } | ||
521 | } | ||
522 | |||
500 | /* | 523 | /* |
501 | * rebuild_sched_domains() | 524 | * generate_sched_domains() |
502 | * | 525 | * |
503 | * If the flag 'sched_load_balance' of any cpuset with non-empty | 526 | * This function builds a partial partition of the systems CPUs |
504 | * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset | 527 | * A 'partial partition' is a set of non-overlapping subsets whose |
505 | * which has that flag enabled, or if any cpuset with a non-empty | 528 | * union is a subset of that set. |
506 | * 'cpus' is removed, then call this routine to rebuild the | 529 | * The output of this function needs to be passed to kernel/sched.c |
507 | * scheduler's dynamic sched domains. | 530 | * partition_sched_domains() routine, which will rebuild the scheduler's |
508 | * | 531 | * load balancing domains (sched domains) as specified by that partial |
509 | * This routine builds a partial partition of the systems CPUs | 532 | * partition. |
510 | * (the set of non-overlappping cpumask_t's in the array 'part' | ||
511 | * below), and passes that partial partition to the kernel/sched.c | ||
512 | * partition_sched_domains() routine, which will rebuild the | ||
513 | * schedulers load balancing domains (sched domains) as specified | ||
514 | * by that partial partition. A 'partial partition' is a set of | ||
515 | * non-overlapping subsets whose union is a subset of that set. | ||
516 | * | 533 | * |
517 | * See "What is sched_load_balance" in Documentation/cpusets.txt | 534 | * See "What is sched_load_balance" in Documentation/cpusets.txt |
518 | * for a background explanation of this. | 535 | * for a background explanation of this. |
@@ -522,16 +539,10 @@ update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c) | |||
522 | * domains when operating in the severe memory shortage situations | 539 | * domains when operating in the severe memory shortage situations |
523 | * that could cause allocation failures below. | 540 | * that could cause allocation failures below. |
524 | * | 541 | * |
525 | * Call with cgroup_mutex held. May take callback_mutex during | 542 | * Must be called with cgroup_lock held. |
526 | * call due to the kfifo_alloc() and kmalloc() calls. May nest | ||
527 | * a call to the get_online_cpus()/put_online_cpus() pair. | ||
528 | * Must not be called holding callback_mutex, because we must not | ||
529 | * call get_online_cpus() while holding callback_mutex. Elsewhere | ||
530 | * the kernel nests callback_mutex inside get_online_cpus() calls. | ||
531 | * So the reverse nesting would risk an ABBA deadlock. | ||
532 | * | 543 | * |
533 | * The three key local variables below are: | 544 | * The three key local variables below are: |
534 | * q - a kfifo queue of cpuset pointers, used to implement a | 545 | * q - a linked-list queue of cpuset pointers, used to implement a |
535 | * top-down scan of all cpusets. This scan loads a pointer | 546 | * top-down scan of all cpusets. This scan loads a pointer |
536 | * to each cpuset marked is_sched_load_balance into the | 547 | * to each cpuset marked is_sched_load_balance into the |
537 | * array 'csa'. For our purposes, rebuilding the schedulers | 548 | * array 'csa'. For our purposes, rebuilding the schedulers |
@@ -563,10 +574,10 @@ update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c) | |||
563 | * element of the partition (one sched domain) to be passed to | 574 | * element of the partition (one sched domain) to be passed to |
564 | * partition_sched_domains(). | 575 | * partition_sched_domains(). |
565 | */ | 576 | */ |
566 | 577 | static int generate_sched_domains(cpumask_t **domains, | |
567 | static void rebuild_sched_domains(void) | 578 | struct sched_domain_attr **attributes) |
568 | { | 579 | { |
569 | struct kfifo *q; /* queue of cpusets to be scanned */ | 580 | LIST_HEAD(q); /* queue of cpusets to be scanned */ |
570 | struct cpuset *cp; /* scans q */ | 581 | struct cpuset *cp; /* scans q */ |
571 | struct cpuset **csa; /* array of all cpuset ptrs */ | 582 | struct cpuset **csa; /* array of all cpuset ptrs */ |
572 | int csn; /* how many cpuset ptrs in csa so far */ | 583 | int csn; /* how many cpuset ptrs in csa so far */ |
@@ -576,44 +587,58 @@ static void rebuild_sched_domains(void) | |||
576 | int ndoms; /* number of sched domains in result */ | 587 | int ndoms; /* number of sched domains in result */ |
577 | int nslot; /* next empty doms[] cpumask_t slot */ | 588 | int nslot; /* next empty doms[] cpumask_t slot */ |
578 | 589 | ||
579 | q = NULL; | 590 | ndoms = 0; |
580 | csa = NULL; | ||
581 | doms = NULL; | 591 | doms = NULL; |
582 | dattr = NULL; | 592 | dattr = NULL; |
593 | csa = NULL; | ||
583 | 594 | ||
584 | /* Special case for the 99% of systems with one, full, sched domain */ | 595 | /* Special case for the 99% of systems with one, full, sched domain */ |
585 | if (is_sched_load_balance(&top_cpuset)) { | 596 | if (is_sched_load_balance(&top_cpuset)) { |
586 | ndoms = 1; | ||
587 | doms = kmalloc(sizeof(cpumask_t), GFP_KERNEL); | 597 | doms = kmalloc(sizeof(cpumask_t), GFP_KERNEL); |
588 | if (!doms) | 598 | if (!doms) |
589 | goto rebuild; | 599 | goto done; |
600 | |||
590 | dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL); | 601 | dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL); |
591 | if (dattr) { | 602 | if (dattr) { |
592 | *dattr = SD_ATTR_INIT; | 603 | *dattr = SD_ATTR_INIT; |
593 | update_domain_attr(dattr, &top_cpuset); | 604 | update_domain_attr_tree(dattr, &top_cpuset); |
594 | } | 605 | } |
595 | *doms = top_cpuset.cpus_allowed; | 606 | *doms = top_cpuset.cpus_allowed; |
596 | goto rebuild; | ||
597 | } | ||
598 | 607 | ||
599 | q = kfifo_alloc(number_of_cpusets * sizeof(cp), GFP_KERNEL, NULL); | 608 | ndoms = 1; |
600 | if (IS_ERR(q)) | ||
601 | goto done; | 609 | goto done; |
610 | } | ||
611 | |||
602 | csa = kmalloc(number_of_cpusets * sizeof(cp), GFP_KERNEL); | 612 | csa = kmalloc(number_of_cpusets * sizeof(cp), GFP_KERNEL); |
603 | if (!csa) | 613 | if (!csa) |
604 | goto done; | 614 | goto done; |
605 | csn = 0; | 615 | csn = 0; |
606 | 616 | ||
607 | cp = &top_cpuset; | 617 | list_add(&top_cpuset.stack_list, &q); |
608 | __kfifo_put(q, (void *)&cp, sizeof(cp)); | 618 | while (!list_empty(&q)) { |
609 | while (__kfifo_get(q, (void *)&cp, sizeof(cp))) { | ||
610 | struct cgroup *cont; | 619 | struct cgroup *cont; |
611 | struct cpuset *child; /* scans child cpusets of cp */ | 620 | struct cpuset *child; /* scans child cpusets of cp */ |
612 | if (is_sched_load_balance(cp)) | 621 | |
622 | cp = list_first_entry(&q, struct cpuset, stack_list); | ||
623 | list_del(q.next); | ||
624 | |||
625 | if (cpus_empty(cp->cpus_allowed)) | ||
626 | continue; | ||
627 | |||
628 | /* | ||
629 | * All child cpusets contain a subset of the parent's cpus, so | ||
630 | * just skip them, and then we call update_domain_attr_tree() | ||
631 | * to calc relax_domain_level of the corresponding sched | ||
632 | * domain. | ||
633 | */ | ||
634 | if (is_sched_load_balance(cp)) { | ||
613 | csa[csn++] = cp; | 635 | csa[csn++] = cp; |
636 | continue; | ||
637 | } | ||
638 | |||
614 | list_for_each_entry(cont, &cp->css.cgroup->children, sibling) { | 639 | list_for_each_entry(cont, &cp->css.cgroup->children, sibling) { |
615 | child = cgroup_cs(cont); | 640 | child = cgroup_cs(cont); |
616 | __kfifo_put(q, (void *)&child, sizeof(cp)); | 641 | list_add_tail(&child->stack_list, &q); |
617 | } | 642 | } |
618 | } | 643 | } |
619 | 644 | ||
@@ -644,91 +669,141 @@ restart: | |||
644 | } | 669 | } |
645 | } | 670 | } |
646 | 671 | ||
647 | /* Convert <csn, csa> to <ndoms, doms> */ | 672 | /* |
673 | * Now we know how many domains to create. | ||
674 | * Convert <csn, csa> to <ndoms, doms> and populate cpu masks. | ||
675 | */ | ||
648 | doms = kmalloc(ndoms * sizeof(cpumask_t), GFP_KERNEL); | 676 | doms = kmalloc(ndoms * sizeof(cpumask_t), GFP_KERNEL); |
649 | if (!doms) | 677 | if (!doms) { |
650 | goto rebuild; | 678 | ndoms = 0; |
679 | goto done; | ||
680 | } | ||
681 | |||
682 | /* | ||
683 | * The rest of the code, including the scheduler, can deal with | ||
684 | * dattr==NULL case. No need to abort if alloc fails. | ||
685 | */ | ||
651 | dattr = kmalloc(ndoms * sizeof(struct sched_domain_attr), GFP_KERNEL); | 686 | dattr = kmalloc(ndoms * sizeof(struct sched_domain_attr), GFP_KERNEL); |
652 | 687 | ||
653 | for (nslot = 0, i = 0; i < csn; i++) { | 688 | for (nslot = 0, i = 0; i < csn; i++) { |
654 | struct cpuset *a = csa[i]; | 689 | struct cpuset *a = csa[i]; |
690 | cpumask_t *dp; | ||
655 | int apn = a->pn; | 691 | int apn = a->pn; |
656 | 692 | ||
657 | if (apn >= 0) { | 693 | if (apn < 0) { |
658 | cpumask_t *dp = doms + nslot; | 694 | /* Skip completed partitions */ |
659 | 695 | continue; | |
660 | if (nslot == ndoms) { | 696 | } |
661 | static int warnings = 10; | 697 | |
662 | if (warnings) { | 698 | dp = doms + nslot; |
663 | printk(KERN_WARNING | 699 | |
664 | "rebuild_sched_domains confused:" | 700 | if (nslot == ndoms) { |
665 | " nslot %d, ndoms %d, csn %d, i %d," | 701 | static int warnings = 10; |
666 | " apn %d\n", | 702 | if (warnings) { |
667 | nslot, ndoms, csn, i, apn); | 703 | printk(KERN_WARNING |
668 | warnings--; | 704 | "rebuild_sched_domains confused:" |
669 | } | 705 | " nslot %d, ndoms %d, csn %d, i %d," |
670 | continue; | 706 | " apn %d\n", |
707 | nslot, ndoms, csn, i, apn); | ||
708 | warnings--; | ||
671 | } | 709 | } |
710 | continue; | ||
711 | } | ||
672 | 712 | ||
673 | cpus_clear(*dp); | 713 | cpus_clear(*dp); |
674 | if (dattr) | 714 | if (dattr) |
675 | *(dattr + nslot) = SD_ATTR_INIT; | 715 | *(dattr + nslot) = SD_ATTR_INIT; |
676 | for (j = i; j < csn; j++) { | 716 | for (j = i; j < csn; j++) { |
677 | struct cpuset *b = csa[j]; | 717 | struct cpuset *b = csa[j]; |
678 | 718 | ||
679 | if (apn == b->pn) { | 719 | if (apn == b->pn) { |
680 | cpus_or(*dp, *dp, b->cpus_allowed); | 720 | cpus_or(*dp, *dp, b->cpus_allowed); |
681 | b->pn = -1; | 721 | if (dattr) |
682 | update_domain_attr(dattr, b); | 722 | update_domain_attr_tree(dattr + nslot, b); |
683 | } | 723 | |
724 | /* Done with this partition */ | ||
725 | b->pn = -1; | ||
684 | } | 726 | } |
685 | nslot++; | ||
686 | } | 727 | } |
728 | nslot++; | ||
687 | } | 729 | } |
688 | BUG_ON(nslot != ndoms); | 730 | BUG_ON(nslot != ndoms); |
689 | 731 | ||
690 | rebuild: | ||
691 | /* Have scheduler rebuild sched domains */ | ||
692 | get_online_cpus(); | ||
693 | partition_sched_domains(ndoms, doms, dattr); | ||
694 | put_online_cpus(); | ||
695 | |||
696 | done: | 732 | done: |
697 | if (q && !IS_ERR(q)) | ||
698 | kfifo_free(q); | ||
699 | kfree(csa); | 733 | kfree(csa); |
700 | /* Don't kfree(doms) -- partition_sched_domains() does that. */ | 734 | |
701 | /* Don't kfree(dattr) -- partition_sched_domains() does that. */ | 735 | *domains = doms; |
736 | *attributes = dattr; | ||
737 | return ndoms; | ||
702 | } | 738 | } |
703 | 739 | ||
704 | static inline int started_after_time(struct task_struct *t1, | 740 | /* |
705 | struct timespec *time, | 741 | * Rebuild scheduler domains. |
706 | struct task_struct *t2) | 742 | * |
743 | * Call with neither cgroup_mutex held nor within get_online_cpus(). | ||
744 | * Takes both cgroup_mutex and get_online_cpus(). | ||
745 | * | ||
746 | * Cannot be directly called from cpuset code handling changes | ||
747 | * to the cpuset pseudo-filesystem, because it cannot be called | ||
748 | * from code that already holds cgroup_mutex. | ||
749 | */ | ||
750 | static void do_rebuild_sched_domains(struct work_struct *unused) | ||
707 | { | 751 | { |
708 | int start_diff = timespec_compare(&t1->start_time, time); | 752 | struct sched_domain_attr *attr; |
709 | if (start_diff > 0) { | 753 | cpumask_t *doms; |
710 | return 1; | 754 | int ndoms; |
711 | } else if (start_diff < 0) { | 755 | |
712 | return 0; | 756 | get_online_cpus(); |
713 | } else { | 757 | |
714 | /* | 758 | /* Generate domain masks and attrs */ |
715 | * Arbitrarily, if two processes started at the same | 759 | cgroup_lock(); |
716 | * time, we'll say that the lower pointer value | 760 | ndoms = generate_sched_domains(&doms, &attr); |
717 | * started first. Note that t2 may have exited by now | 761 | cgroup_unlock(); |
718 | * so this may not be a valid pointer any longer, but | 762 | |
719 | * that's fine - it still serves to distinguish | 763 | /* Have scheduler rebuild the domains */ |
720 | * between two tasks started (effectively) | 764 | partition_sched_domains(ndoms, doms, attr); |
721 | * simultaneously. | 765 | |
722 | */ | 766 | put_online_cpus(); |
723 | return t1 > t2; | 767 | } |
724 | } | 768 | |
769 | static DECLARE_WORK(rebuild_sched_domains_work, do_rebuild_sched_domains); | ||
770 | |||
771 | /* | ||
772 | * Rebuild scheduler domains, asynchronously via workqueue. | ||
773 | * | ||
774 | * If the flag 'sched_load_balance' of any cpuset with non-empty | ||
775 | * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset | ||
776 | * which has that flag enabled, or if any cpuset with a non-empty | ||
777 | * 'cpus' is removed, then call this routine to rebuild the | ||
778 | * scheduler's dynamic sched domains. | ||
779 | * | ||
780 | * The rebuild_sched_domains() and partition_sched_domains() | ||
781 | * routines must nest cgroup_lock() inside get_online_cpus(), | ||
782 | * but such cpuset changes as these must nest that locking the | ||
783 | * other way, holding cgroup_lock() for much of the code. | ||
784 | * | ||
785 | * So in order to avoid an ABBA deadlock, the cpuset code handling | ||
786 | * these user changes delegates the actual sched domain rebuilding | ||
787 | * to a separate workqueue thread, which ends up processing the | ||
788 | * above do_rebuild_sched_domains() function. | ||
789 | */ | ||
790 | static void async_rebuild_sched_domains(void) | ||
791 | { | ||
792 | schedule_work(&rebuild_sched_domains_work); | ||
725 | } | 793 | } |
726 | 794 | ||
727 | static inline int started_after(void *p1, void *p2) | 795 | /* |
796 | * Accomplishes the same scheduler domain rebuild as the above | ||
797 | * async_rebuild_sched_domains(), however it directly calls the | ||
798 | * rebuild routine synchronously rather than calling it via an | ||
799 | * asynchronous work thread. | ||
800 | * | ||
801 | * This can only be called from code that is not holding | ||
802 | * cgroup_mutex (not nested in a cgroup_lock() call.) | ||
803 | */ | ||
804 | void rebuild_sched_domains(void) | ||
728 | { | 805 | { |
729 | struct task_struct *t1 = p1; | 806 | do_rebuild_sched_domains(NULL); |
730 | struct task_struct *t2 = p2; | ||
731 | return started_after_time(t1, &t2->start_time, t2); | ||
732 | } | 807 | } |
733 | 808 | ||
734 | /** | 809 | /** |
@@ -766,15 +841,38 @@ static void cpuset_change_cpumask(struct task_struct *tsk, | |||
766 | } | 841 | } |
767 | 842 | ||
768 | /** | 843 | /** |
844 | * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset. | ||
845 | * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed | ||
846 | * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks() | ||
847 | * | ||
848 | * Called with cgroup_mutex held | ||
849 | * | ||
850 | * The cgroup_scan_tasks() function will scan all the tasks in a cgroup, | ||
851 | * calling callback functions for each. | ||
852 | * | ||
853 | * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0 | ||
854 | * if @heap != NULL. | ||
855 | */ | ||
856 | static void update_tasks_cpumask(struct cpuset *cs, struct ptr_heap *heap) | ||
857 | { | ||
858 | struct cgroup_scanner scan; | ||
859 | |||
860 | scan.cg = cs->css.cgroup; | ||
861 | scan.test_task = cpuset_test_cpumask; | ||
862 | scan.process_task = cpuset_change_cpumask; | ||
863 | scan.heap = heap; | ||
864 | cgroup_scan_tasks(&scan); | ||
865 | } | ||
866 | |||
867 | /** | ||
769 | * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it | 868 | * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it |
770 | * @cs: the cpuset to consider | 869 | * @cs: the cpuset to consider |
771 | * @buf: buffer of cpu numbers written to this cpuset | 870 | * @buf: buffer of cpu numbers written to this cpuset |
772 | */ | 871 | */ |
773 | static int update_cpumask(struct cpuset *cs, char *buf) | 872 | static int update_cpumask(struct cpuset *cs, const char *buf) |
774 | { | 873 | { |
775 | struct cpuset trialcs; | ||
776 | struct cgroup_scanner scan; | ||
777 | struct ptr_heap heap; | 874 | struct ptr_heap heap; |
875 | struct cpuset trialcs; | ||
778 | int retval; | 876 | int retval; |
779 | int is_load_balanced; | 877 | int is_load_balanced; |
780 | 878 | ||
@@ -790,7 +888,6 @@ static int update_cpumask(struct cpuset *cs, char *buf) | |||
790 | * that parsing. The validate_change() call ensures that cpusets | 888 | * that parsing. The validate_change() call ensures that cpusets |
791 | * with tasks have cpus. | 889 | * with tasks have cpus. |
792 | */ | 890 | */ |
793 | buf = strstrip(buf); | ||
794 | if (!*buf) { | 891 | if (!*buf) { |
795 | cpus_clear(trialcs.cpus_allowed); | 892 | cpus_clear(trialcs.cpus_allowed); |
796 | } else { | 893 | } else { |
@@ -809,7 +906,7 @@ static int update_cpumask(struct cpuset *cs, char *buf) | |||
809 | if (cpus_equal(cs->cpus_allowed, trialcs.cpus_allowed)) | 906 | if (cpus_equal(cs->cpus_allowed, trialcs.cpus_allowed)) |
810 | return 0; | 907 | return 0; |
811 | 908 | ||
812 | retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, &started_after); | 909 | retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL); |
813 | if (retval) | 910 | if (retval) |
814 | return retval; | 911 | return retval; |
815 | 912 | ||
@@ -823,15 +920,12 @@ static int update_cpumask(struct cpuset *cs, char *buf) | |||
823 | * Scan tasks in the cpuset, and update the cpumasks of any | 920 | * Scan tasks in the cpuset, and update the cpumasks of any |
824 | * that need an update. | 921 | * that need an update. |
825 | */ | 922 | */ |
826 | scan.cg = cs->css.cgroup; | 923 | update_tasks_cpumask(cs, &heap); |
827 | scan.test_task = cpuset_test_cpumask; | 924 | |
828 | scan.process_task = cpuset_change_cpumask; | ||
829 | scan.heap = &heap; | ||
830 | cgroup_scan_tasks(&scan); | ||
831 | heap_free(&heap); | 925 | heap_free(&heap); |
832 | 926 | ||
833 | if (is_load_balanced) | 927 | if (is_load_balanced) |
834 | rebuild_sched_domains(); | 928 | async_rebuild_sched_domains(); |
835 | return 0; | 929 | return 0; |
836 | } | 930 | } |
837 | 931 | ||
@@ -884,74 +978,25 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from, | |||
884 | mutex_unlock(&callback_mutex); | 978 | mutex_unlock(&callback_mutex); |
885 | } | 979 | } |
886 | 980 | ||
887 | /* | ||
888 | * Handle user request to change the 'mems' memory placement | ||
889 | * of a cpuset. Needs to validate the request, update the | ||
890 | * cpusets mems_allowed and mems_generation, and for each | ||
891 | * task in the cpuset, rebind any vma mempolicies and if | ||
892 | * the cpuset is marked 'memory_migrate', migrate the tasks | ||
893 | * pages to the new memory. | ||
894 | * | ||
895 | * Call with cgroup_mutex held. May take callback_mutex during call. | ||
896 | * Will take tasklist_lock, scan tasklist for tasks in cpuset cs, | ||
897 | * lock each such tasks mm->mmap_sem, scan its vma's and rebind | ||
898 | * their mempolicies to the cpusets new mems_allowed. | ||
899 | */ | ||
900 | |||
901 | static void *cpuset_being_rebound; | 981 | static void *cpuset_being_rebound; |
902 | 982 | ||
903 | static int update_nodemask(struct cpuset *cs, char *buf) | 983 | /** |
984 | * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset. | ||
985 | * @cs: the cpuset in which each task's mems_allowed mask needs to be changed | ||
986 | * @oldmem: old mems_allowed of cpuset cs | ||
987 | * | ||
988 | * Called with cgroup_mutex held | ||
989 | * Return 0 if successful, -errno if not. | ||
990 | */ | ||
991 | static int update_tasks_nodemask(struct cpuset *cs, const nodemask_t *oldmem) | ||
904 | { | 992 | { |
905 | struct cpuset trialcs; | ||
906 | nodemask_t oldmem; | ||
907 | struct task_struct *p; | 993 | struct task_struct *p; |
908 | struct mm_struct **mmarray; | 994 | struct mm_struct **mmarray; |
909 | int i, n, ntasks; | 995 | int i, n, ntasks; |
910 | int migrate; | 996 | int migrate; |
911 | int fudge; | 997 | int fudge; |
912 | int retval; | ||
913 | struct cgroup_iter it; | 998 | struct cgroup_iter it; |
914 | 999 | int retval; | |
915 | /* | ||
916 | * top_cpuset.mems_allowed tracks node_stats[N_HIGH_MEMORY]; | ||
917 | * it's read-only | ||
918 | */ | ||
919 | if (cs == &top_cpuset) | ||
920 | return -EACCES; | ||
921 | |||
922 | trialcs = *cs; | ||
923 | |||
924 | /* | ||
925 | * An empty mems_allowed is ok iff there are no tasks in the cpuset. | ||
926 | * Since nodelist_parse() fails on an empty mask, we special case | ||
927 | * that parsing. The validate_change() call ensures that cpusets | ||
928 | * with tasks have memory. | ||
929 | */ | ||
930 | buf = strstrip(buf); | ||
931 | if (!*buf) { | ||
932 | nodes_clear(trialcs.mems_allowed); | ||
933 | } else { | ||
934 | retval = nodelist_parse(buf, trialcs.mems_allowed); | ||
935 | if (retval < 0) | ||
936 | goto done; | ||
937 | |||
938 | if (!nodes_subset(trialcs.mems_allowed, | ||
939 | node_states[N_HIGH_MEMORY])) | ||
940 | return -EINVAL; | ||
941 | } | ||
942 | oldmem = cs->mems_allowed; | ||
943 | if (nodes_equal(oldmem, trialcs.mems_allowed)) { | ||
944 | retval = 0; /* Too easy - nothing to do */ | ||
945 | goto done; | ||
946 | } | ||
947 | retval = validate_change(cs, &trialcs); | ||
948 | if (retval < 0) | ||
949 | goto done; | ||
950 | |||
951 | mutex_lock(&callback_mutex); | ||
952 | cs->mems_allowed = trialcs.mems_allowed; | ||
953 | cs->mems_generation = cpuset_mems_generation++; | ||
954 | mutex_unlock(&callback_mutex); | ||
955 | 1000 | ||
956 | cpuset_being_rebound = cs; /* causes mpol_dup() rebind */ | 1001 | cpuset_being_rebound = cs; /* causes mpol_dup() rebind */ |
957 | 1002 | ||
@@ -1018,7 +1063,7 @@ static int update_nodemask(struct cpuset *cs, char *buf) | |||
1018 | 1063 | ||
1019 | mpol_rebind_mm(mm, &cs->mems_allowed); | 1064 | mpol_rebind_mm(mm, &cs->mems_allowed); |
1020 | if (migrate) | 1065 | if (migrate) |
1021 | cpuset_migrate_mm(mm, &oldmem, &cs->mems_allowed); | 1066 | cpuset_migrate_mm(mm, oldmem, &cs->mems_allowed); |
1022 | mmput(mm); | 1067 | mmput(mm); |
1023 | } | 1068 | } |
1024 | 1069 | ||
@@ -1030,6 +1075,70 @@ done: | |||
1030 | return retval; | 1075 | return retval; |
1031 | } | 1076 | } |
1032 | 1077 | ||
1078 | /* | ||
1079 | * Handle user request to change the 'mems' memory placement | ||
1080 | * of a cpuset. Needs to validate the request, update the | ||
1081 | * cpusets mems_allowed and mems_generation, and for each | ||
1082 | * task in the cpuset, rebind any vma mempolicies and if | ||
1083 | * the cpuset is marked 'memory_migrate', migrate the tasks | ||
1084 | * pages to the new memory. | ||
1085 | * | ||
1086 | * Call with cgroup_mutex held. May take callback_mutex during call. | ||
1087 | * Will take tasklist_lock, scan tasklist for tasks in cpuset cs, | ||
1088 | * lock each such tasks mm->mmap_sem, scan its vma's and rebind | ||
1089 | * their mempolicies to the cpusets new mems_allowed. | ||
1090 | */ | ||
1091 | static int update_nodemask(struct cpuset *cs, const char *buf) | ||
1092 | { | ||
1093 | struct cpuset trialcs; | ||
1094 | nodemask_t oldmem; | ||
1095 | int retval; | ||
1096 | |||
1097 | /* | ||
1098 | * top_cpuset.mems_allowed tracks node_stats[N_HIGH_MEMORY]; | ||
1099 | * it's read-only | ||
1100 | */ | ||
1101 | if (cs == &top_cpuset) | ||
1102 | return -EACCES; | ||
1103 | |||
1104 | trialcs = *cs; | ||
1105 | |||
1106 | /* | ||
1107 | * An empty mems_allowed is ok iff there are no tasks in the cpuset. | ||
1108 | * Since nodelist_parse() fails on an empty mask, we special case | ||
1109 | * that parsing. The validate_change() call ensures that cpusets | ||
1110 | * with tasks have memory. | ||
1111 | */ | ||
1112 | if (!*buf) { | ||
1113 | nodes_clear(trialcs.mems_allowed); | ||
1114 | } else { | ||
1115 | retval = nodelist_parse(buf, trialcs.mems_allowed); | ||
1116 | if (retval < 0) | ||
1117 | goto done; | ||
1118 | |||
1119 | if (!nodes_subset(trialcs.mems_allowed, | ||
1120 | node_states[N_HIGH_MEMORY])) | ||
1121 | return -EINVAL; | ||
1122 | } | ||
1123 | oldmem = cs->mems_allowed; | ||
1124 | if (nodes_equal(oldmem, trialcs.mems_allowed)) { | ||
1125 | retval = 0; /* Too easy - nothing to do */ | ||
1126 | goto done; | ||
1127 | } | ||
1128 | retval = validate_change(cs, &trialcs); | ||
1129 | if (retval < 0) | ||
1130 | goto done; | ||
1131 | |||
1132 | mutex_lock(&callback_mutex); | ||
1133 | cs->mems_allowed = trialcs.mems_allowed; | ||
1134 | cs->mems_generation = cpuset_mems_generation++; | ||
1135 | mutex_unlock(&callback_mutex); | ||
1136 | |||
1137 | retval = update_tasks_nodemask(cs, &oldmem); | ||
1138 | done: | ||
1139 | return retval; | ||
1140 | } | ||
1141 | |||
1033 | int current_cpuset_is_being_rebound(void) | 1142 | int current_cpuset_is_being_rebound(void) |
1034 | { | 1143 | { |
1035 | return task_cs(current) == cpuset_being_rebound; | 1144 | return task_cs(current) == cpuset_being_rebound; |
@@ -1042,7 +1151,8 @@ static int update_relax_domain_level(struct cpuset *cs, s64 val) | |||
1042 | 1151 | ||
1043 | if (val != cs->relax_domain_level) { | 1152 | if (val != cs->relax_domain_level) { |
1044 | cs->relax_domain_level = val; | 1153 | cs->relax_domain_level = val; |
1045 | rebuild_sched_domains(); | 1154 | if (!cpus_empty(cs->cpus_allowed) && is_sched_load_balance(cs)) |
1155 | async_rebuild_sched_domains(); | ||
1046 | } | 1156 | } |
1047 | 1157 | ||
1048 | return 0; | 1158 | return 0; |
@@ -1083,7 +1193,7 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, | |||
1083 | mutex_unlock(&callback_mutex); | 1193 | mutex_unlock(&callback_mutex); |
1084 | 1194 | ||
1085 | if (cpus_nonempty && balance_flag_changed) | 1195 | if (cpus_nonempty && balance_flag_changed) |
1086 | rebuild_sched_domains(); | 1196 | async_rebuild_sched_domains(); |
1087 | 1197 | ||
1088 | return 0; | 1198 | return 0; |
1089 | } | 1199 | } |
@@ -1254,72 +1364,14 @@ typedef enum { | |||
1254 | FILE_SPREAD_SLAB, | 1364 | FILE_SPREAD_SLAB, |
1255 | } cpuset_filetype_t; | 1365 | } cpuset_filetype_t; |
1256 | 1366 | ||
1257 | static ssize_t cpuset_common_file_write(struct cgroup *cont, | ||
1258 | struct cftype *cft, | ||
1259 | struct file *file, | ||
1260 | const char __user *userbuf, | ||
1261 | size_t nbytes, loff_t *unused_ppos) | ||
1262 | { | ||
1263 | struct cpuset *cs = cgroup_cs(cont); | ||
1264 | cpuset_filetype_t type = cft->private; | ||
1265 | char *buffer; | ||
1266 | int retval = 0; | ||
1267 | |||
1268 | /* Crude upper limit on largest legitimate cpulist user might write. */ | ||
1269 | if (nbytes > 100U + 6 * max(NR_CPUS, MAX_NUMNODES)) | ||
1270 | return -E2BIG; | ||
1271 | |||
1272 | /* +1 for nul-terminator */ | ||
1273 | buffer = kmalloc(nbytes + 1, GFP_KERNEL); | ||
1274 | if (!buffer) | ||
1275 | return -ENOMEM; | ||
1276 | |||
1277 | if (copy_from_user(buffer, userbuf, nbytes)) { | ||
1278 | retval = -EFAULT; | ||
1279 | goto out1; | ||
1280 | } | ||
1281 | buffer[nbytes] = 0; /* nul-terminate */ | ||
1282 | |||
1283 | cgroup_lock(); | ||
1284 | |||
1285 | if (cgroup_is_removed(cont)) { | ||
1286 | retval = -ENODEV; | ||
1287 | goto out2; | ||
1288 | } | ||
1289 | |||
1290 | switch (type) { | ||
1291 | case FILE_CPULIST: | ||
1292 | retval = update_cpumask(cs, buffer); | ||
1293 | break; | ||
1294 | case FILE_MEMLIST: | ||
1295 | retval = update_nodemask(cs, buffer); | ||
1296 | break; | ||
1297 | default: | ||
1298 | retval = -EINVAL; | ||
1299 | goto out2; | ||
1300 | } | ||
1301 | |||
1302 | if (retval == 0) | ||
1303 | retval = nbytes; | ||
1304 | out2: | ||
1305 | cgroup_unlock(); | ||
1306 | out1: | ||
1307 | kfree(buffer); | ||
1308 | return retval; | ||
1309 | } | ||
1310 | |||
1311 | static int cpuset_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val) | 1367 | static int cpuset_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val) |
1312 | { | 1368 | { |
1313 | int retval = 0; | 1369 | int retval = 0; |
1314 | struct cpuset *cs = cgroup_cs(cgrp); | 1370 | struct cpuset *cs = cgroup_cs(cgrp); |
1315 | cpuset_filetype_t type = cft->private; | 1371 | cpuset_filetype_t type = cft->private; |
1316 | 1372 | ||
1317 | cgroup_lock(); | 1373 | if (!cgroup_lock_live_group(cgrp)) |
1318 | |||
1319 | if (cgroup_is_removed(cgrp)) { | ||
1320 | cgroup_unlock(); | ||
1321 | return -ENODEV; | 1374 | return -ENODEV; |
1322 | } | ||
1323 | 1375 | ||
1324 | switch (type) { | 1376 | switch (type) { |
1325 | case FILE_CPU_EXCLUSIVE: | 1377 | case FILE_CPU_EXCLUSIVE: |
@@ -1365,12 +1417,9 @@ static int cpuset_write_s64(struct cgroup *cgrp, struct cftype *cft, s64 val) | |||
1365 | struct cpuset *cs = cgroup_cs(cgrp); | 1417 | struct cpuset *cs = cgroup_cs(cgrp); |
1366 | cpuset_filetype_t type = cft->private; | 1418 | cpuset_filetype_t type = cft->private; |
1367 | 1419 | ||
1368 | cgroup_lock(); | 1420 | if (!cgroup_lock_live_group(cgrp)) |
1369 | |||
1370 | if (cgroup_is_removed(cgrp)) { | ||
1371 | cgroup_unlock(); | ||
1372 | return -ENODEV; | 1421 | return -ENODEV; |
1373 | } | 1422 | |
1374 | switch (type) { | 1423 | switch (type) { |
1375 | case FILE_SCHED_RELAX_DOMAIN_LEVEL: | 1424 | case FILE_SCHED_RELAX_DOMAIN_LEVEL: |
1376 | retval = update_relax_domain_level(cs, val); | 1425 | retval = update_relax_domain_level(cs, val); |
@@ -1384,6 +1433,32 @@ static int cpuset_write_s64(struct cgroup *cgrp, struct cftype *cft, s64 val) | |||
1384 | } | 1433 | } |
1385 | 1434 | ||
1386 | /* | 1435 | /* |
1436 | * Common handling for a write to a "cpus" or "mems" file. | ||
1437 | */ | ||
1438 | static int cpuset_write_resmask(struct cgroup *cgrp, struct cftype *cft, | ||
1439 | const char *buf) | ||
1440 | { | ||
1441 | int retval = 0; | ||
1442 | |||
1443 | if (!cgroup_lock_live_group(cgrp)) | ||
1444 | return -ENODEV; | ||
1445 | |||
1446 | switch (cft->private) { | ||
1447 | case FILE_CPULIST: | ||
1448 | retval = update_cpumask(cgroup_cs(cgrp), buf); | ||
1449 | break; | ||
1450 | case FILE_MEMLIST: | ||
1451 | retval = update_nodemask(cgroup_cs(cgrp), buf); | ||
1452 | break; | ||
1453 | default: | ||
1454 | retval = -EINVAL; | ||
1455 | break; | ||
1456 | } | ||
1457 | cgroup_unlock(); | ||
1458 | return retval; | ||
1459 | } | ||
1460 | |||
1461 | /* | ||
1387 | * These ascii lists should be read in a single call, by using a user | 1462 | * These ascii lists should be read in a single call, by using a user |
1388 | * buffer large enough to hold the entire map. If read in smaller | 1463 | * buffer large enough to hold the entire map. If read in smaller |
1389 | * chunks, there is no guarantee of atomicity. Since the display format | 1464 | * chunks, there is no guarantee of atomicity. Since the display format |
@@ -1479,6 +1554,9 @@ static u64 cpuset_read_u64(struct cgroup *cont, struct cftype *cft) | |||
1479 | default: | 1554 | default: |
1480 | BUG(); | 1555 | BUG(); |
1481 | } | 1556 | } |
1557 | |||
1558 | /* Unreachable but makes gcc happy */ | ||
1559 | return 0; | ||
1482 | } | 1560 | } |
1483 | 1561 | ||
1484 | static s64 cpuset_read_s64(struct cgroup *cont, struct cftype *cft) | 1562 | static s64 cpuset_read_s64(struct cgroup *cont, struct cftype *cft) |
@@ -1491,6 +1569,9 @@ static s64 cpuset_read_s64(struct cgroup *cont, struct cftype *cft) | |||
1491 | default: | 1569 | default: |
1492 | BUG(); | 1570 | BUG(); |
1493 | } | 1571 | } |
1572 | |||
1573 | /* Unrechable but makes gcc happy */ | ||
1574 | return 0; | ||
1494 | } | 1575 | } |
1495 | 1576 | ||
1496 | 1577 | ||
@@ -1502,14 +1583,16 @@ static struct cftype files[] = { | |||
1502 | { | 1583 | { |
1503 | .name = "cpus", | 1584 | .name = "cpus", |
1504 | .read = cpuset_common_file_read, | 1585 | .read = cpuset_common_file_read, |
1505 | .write = cpuset_common_file_write, | 1586 | .write_string = cpuset_write_resmask, |
1587 | .max_write_len = (100U + 6 * NR_CPUS), | ||
1506 | .private = FILE_CPULIST, | 1588 | .private = FILE_CPULIST, |
1507 | }, | 1589 | }, |
1508 | 1590 | ||
1509 | { | 1591 | { |
1510 | .name = "mems", | 1592 | .name = "mems", |
1511 | .read = cpuset_common_file_read, | 1593 | .read = cpuset_common_file_read, |
1512 | .write = cpuset_common_file_write, | 1594 | .write_string = cpuset_write_resmask, |
1595 | .max_write_len = (100U + 6 * MAX_NUMNODES), | ||
1513 | .private = FILE_MEMLIST, | 1596 | .private = FILE_MEMLIST, |
1514 | }, | 1597 | }, |
1515 | 1598 | ||
@@ -1677,15 +1760,9 @@ static struct cgroup_subsys_state *cpuset_create( | |||
1677 | } | 1760 | } |
1678 | 1761 | ||
1679 | /* | 1762 | /* |
1680 | * Locking note on the strange update_flag() call below: | ||
1681 | * | ||
1682 | * If the cpuset being removed has its flag 'sched_load_balance' | 1763 | * If the cpuset being removed has its flag 'sched_load_balance' |
1683 | * enabled, then simulate turning sched_load_balance off, which | 1764 | * enabled, then simulate turning sched_load_balance off, which |
1684 | * will call rebuild_sched_domains(). The get_online_cpus() | 1765 | * will call async_rebuild_sched_domains(). |
1685 | * call in rebuild_sched_domains() must not be made while holding | ||
1686 | * callback_mutex. Elsewhere the kernel nests callback_mutex inside | ||
1687 | * get_online_cpus() calls. So the reverse nesting would risk an | ||
1688 | * ABBA deadlock. | ||
1689 | */ | 1766 | */ |
1690 | 1767 | ||
1691 | static void cpuset_destroy(struct cgroup_subsys *ss, struct cgroup *cont) | 1768 | static void cpuset_destroy(struct cgroup_subsys *ss, struct cgroup *cont) |
@@ -1704,7 +1781,7 @@ static void cpuset_destroy(struct cgroup_subsys *ss, struct cgroup *cont) | |||
1704 | struct cgroup_subsys cpuset_subsys = { | 1781 | struct cgroup_subsys cpuset_subsys = { |
1705 | .name = "cpuset", | 1782 | .name = "cpuset", |
1706 | .create = cpuset_create, | 1783 | .create = cpuset_create, |
1707 | .destroy = cpuset_destroy, | 1784 | .destroy = cpuset_destroy, |
1708 | .can_attach = cpuset_can_attach, | 1785 | .can_attach = cpuset_can_attach, |
1709 | .attach = cpuset_attach, | 1786 | .attach = cpuset_attach, |
1710 | .populate = cpuset_populate, | 1787 | .populate = cpuset_populate, |
@@ -1790,13 +1867,13 @@ static void move_member_tasks_to_cpuset(struct cpuset *from, struct cpuset *to) | |||
1790 | scan.scan.heap = NULL; | 1867 | scan.scan.heap = NULL; |
1791 | scan.to = to->css.cgroup; | 1868 | scan.to = to->css.cgroup; |
1792 | 1869 | ||
1793 | if (cgroup_scan_tasks((struct cgroup_scanner *)&scan)) | 1870 | if (cgroup_scan_tasks(&scan.scan)) |
1794 | printk(KERN_ERR "move_member_tasks_to_cpuset: " | 1871 | printk(KERN_ERR "move_member_tasks_to_cpuset: " |
1795 | "cgroup_scan_tasks failed\n"); | 1872 | "cgroup_scan_tasks failed\n"); |
1796 | } | 1873 | } |
1797 | 1874 | ||
1798 | /* | 1875 | /* |
1799 | * If common_cpu_mem_hotplug_unplug(), below, unplugs any CPUs | 1876 | * If CPU and/or memory hotplug handlers, below, unplug any CPUs |
1800 | * or memory nodes, we need to walk over the cpuset hierarchy, | 1877 | * or memory nodes, we need to walk over the cpuset hierarchy, |
1801 | * removing that CPU or node from all cpusets. If this removes the | 1878 | * removing that CPU or node from all cpusets. If this removes the |
1802 | * last CPU or node from a cpuset, then move the tasks in the empty | 1879 | * last CPU or node from a cpuset, then move the tasks in the empty |
@@ -1846,29 +1923,29 @@ static void remove_tasks_in_empty_cpuset(struct cpuset *cs) | |||
1846 | */ | 1923 | */ |
1847 | static void scan_for_empty_cpusets(const struct cpuset *root) | 1924 | static void scan_for_empty_cpusets(const struct cpuset *root) |
1848 | { | 1925 | { |
1926 | LIST_HEAD(queue); | ||
1849 | struct cpuset *cp; /* scans cpusets being updated */ | 1927 | struct cpuset *cp; /* scans cpusets being updated */ |
1850 | struct cpuset *child; /* scans child cpusets of cp */ | 1928 | struct cpuset *child; /* scans child cpusets of cp */ |
1851 | struct list_head queue; | ||
1852 | struct cgroup *cont; | 1929 | struct cgroup *cont; |
1853 | 1930 | nodemask_t oldmems; | |
1854 | INIT_LIST_HEAD(&queue); | ||
1855 | 1931 | ||
1856 | list_add_tail((struct list_head *)&root->stack_list, &queue); | 1932 | list_add_tail((struct list_head *)&root->stack_list, &queue); |
1857 | 1933 | ||
1858 | while (!list_empty(&queue)) { | 1934 | while (!list_empty(&queue)) { |
1859 | cp = container_of(queue.next, struct cpuset, stack_list); | 1935 | cp = list_first_entry(&queue, struct cpuset, stack_list); |
1860 | list_del(queue.next); | 1936 | list_del(queue.next); |
1861 | list_for_each_entry(cont, &cp->css.cgroup->children, sibling) { | 1937 | list_for_each_entry(cont, &cp->css.cgroup->children, sibling) { |
1862 | child = cgroup_cs(cont); | 1938 | child = cgroup_cs(cont); |
1863 | list_add_tail(&child->stack_list, &queue); | 1939 | list_add_tail(&child->stack_list, &queue); |
1864 | } | 1940 | } |
1865 | cont = cp->css.cgroup; | ||
1866 | 1941 | ||
1867 | /* Continue past cpusets with all cpus, mems online */ | 1942 | /* Continue past cpusets with all cpus, mems online */ |
1868 | if (cpus_subset(cp->cpus_allowed, cpu_online_map) && | 1943 | if (cpus_subset(cp->cpus_allowed, cpu_online_map) && |
1869 | nodes_subset(cp->mems_allowed, node_states[N_HIGH_MEMORY])) | 1944 | nodes_subset(cp->mems_allowed, node_states[N_HIGH_MEMORY])) |
1870 | continue; | 1945 | continue; |
1871 | 1946 | ||
1947 | oldmems = cp->mems_allowed; | ||
1948 | |||
1872 | /* Remove offline cpus and mems from this cpuset. */ | 1949 | /* Remove offline cpus and mems from this cpuset. */ |
1873 | mutex_lock(&callback_mutex); | 1950 | mutex_lock(&callback_mutex); |
1874 | cpus_and(cp->cpus_allowed, cp->cpus_allowed, cpu_online_map); | 1951 | cpus_and(cp->cpus_allowed, cp->cpus_allowed, cpu_online_map); |
@@ -1880,39 +1957,14 @@ static void scan_for_empty_cpusets(const struct cpuset *root) | |||
1880 | if (cpus_empty(cp->cpus_allowed) || | 1957 | if (cpus_empty(cp->cpus_allowed) || |
1881 | nodes_empty(cp->mems_allowed)) | 1958 | nodes_empty(cp->mems_allowed)) |
1882 | remove_tasks_in_empty_cpuset(cp); | 1959 | remove_tasks_in_empty_cpuset(cp); |
1960 | else { | ||
1961 | update_tasks_cpumask(cp, NULL); | ||
1962 | update_tasks_nodemask(cp, &oldmems); | ||
1963 | } | ||
1883 | } | 1964 | } |
1884 | } | 1965 | } |
1885 | 1966 | ||
1886 | /* | 1967 | /* |
1887 | * The cpus_allowed and mems_allowed nodemasks in the top_cpuset track | ||
1888 | * cpu_online_map and node_states[N_HIGH_MEMORY]. Force the top cpuset to | ||
1889 | * track what's online after any CPU or memory node hotplug or unplug event. | ||
1890 | * | ||
1891 | * Since there are two callers of this routine, one for CPU hotplug | ||
1892 | * events and one for memory node hotplug events, we could have coded | ||
1893 | * two separate routines here. We code it as a single common routine | ||
1894 | * in order to minimize text size. | ||
1895 | */ | ||
1896 | |||
1897 | static void common_cpu_mem_hotplug_unplug(int rebuild_sd) | ||
1898 | { | ||
1899 | cgroup_lock(); | ||
1900 | |||
1901 | top_cpuset.cpus_allowed = cpu_online_map; | ||
1902 | top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; | ||
1903 | scan_for_empty_cpusets(&top_cpuset); | ||
1904 | |||
1905 | /* | ||
1906 | * Scheduler destroys domains on hotplug events. | ||
1907 | * Rebuild them based on the current settings. | ||
1908 | */ | ||
1909 | if (rebuild_sd) | ||
1910 | rebuild_sched_domains(); | ||
1911 | |||
1912 | cgroup_unlock(); | ||
1913 | } | ||
1914 | |||
1915 | /* | ||
1916 | * The top_cpuset tracks what CPUs and Memory Nodes are online, | 1968 | * The top_cpuset tracks what CPUs and Memory Nodes are online, |
1917 | * period. This is necessary in order to make cpusets transparent | 1969 | * period. This is necessary in order to make cpusets transparent |
1918 | * (of no affect) on systems that are actively using CPU hotplug | 1970 | * (of no affect) on systems that are actively using CPU hotplug |
@@ -1920,40 +1972,52 @@ static void common_cpu_mem_hotplug_unplug(int rebuild_sd) | |||
1920 | * | 1972 | * |
1921 | * This routine ensures that top_cpuset.cpus_allowed tracks | 1973 | * This routine ensures that top_cpuset.cpus_allowed tracks |
1922 | * cpu_online_map on each CPU hotplug (cpuhp) event. | 1974 | * cpu_online_map on each CPU hotplug (cpuhp) event. |
1975 | * | ||
1976 | * Called within get_online_cpus(). Needs to call cgroup_lock() | ||
1977 | * before calling generate_sched_domains(). | ||
1923 | */ | 1978 | */ |
1924 | 1979 | static int cpuset_track_online_cpus(struct notifier_block *unused_nb, | |
1925 | static int cpuset_handle_cpuhp(struct notifier_block *unused_nb, | ||
1926 | unsigned long phase, void *unused_cpu) | 1980 | unsigned long phase, void *unused_cpu) |
1927 | { | 1981 | { |
1982 | struct sched_domain_attr *attr; | ||
1983 | cpumask_t *doms; | ||
1984 | int ndoms; | ||
1985 | |||
1928 | switch (phase) { | 1986 | switch (phase) { |
1929 | case CPU_UP_CANCELED: | ||
1930 | case CPU_UP_CANCELED_FROZEN: | ||
1931 | case CPU_DOWN_FAILED: | ||
1932 | case CPU_DOWN_FAILED_FROZEN: | ||
1933 | case CPU_ONLINE: | 1987 | case CPU_ONLINE: |
1934 | case CPU_ONLINE_FROZEN: | 1988 | case CPU_ONLINE_FROZEN: |
1935 | case CPU_DEAD: | 1989 | case CPU_DEAD: |
1936 | case CPU_DEAD_FROZEN: | 1990 | case CPU_DEAD_FROZEN: |
1937 | common_cpu_mem_hotplug_unplug(1); | ||
1938 | break; | 1991 | break; |
1992 | |||
1939 | default: | 1993 | default: |
1940 | return NOTIFY_DONE; | 1994 | return NOTIFY_DONE; |
1941 | } | 1995 | } |
1942 | 1996 | ||
1997 | cgroup_lock(); | ||
1998 | top_cpuset.cpus_allowed = cpu_online_map; | ||
1999 | scan_for_empty_cpusets(&top_cpuset); | ||
2000 | ndoms = generate_sched_domains(&doms, &attr); | ||
2001 | cgroup_unlock(); | ||
2002 | |||
2003 | /* Have scheduler rebuild the domains */ | ||
2004 | partition_sched_domains(ndoms, doms, attr); | ||
2005 | |||
1943 | return NOTIFY_OK; | 2006 | return NOTIFY_OK; |
1944 | } | 2007 | } |
1945 | 2008 | ||
1946 | #ifdef CONFIG_MEMORY_HOTPLUG | 2009 | #ifdef CONFIG_MEMORY_HOTPLUG |
1947 | /* | 2010 | /* |
1948 | * Keep top_cpuset.mems_allowed tracking node_states[N_HIGH_MEMORY]. | 2011 | * Keep top_cpuset.mems_allowed tracking node_states[N_HIGH_MEMORY]. |
1949 | * Call this routine anytime after you change | 2012 | * Call this routine anytime after node_states[N_HIGH_MEMORY] changes. |
1950 | * node_states[N_HIGH_MEMORY]. | 2013 | * See also the previous routine cpuset_track_online_cpus(). |
1951 | * See also the previous routine cpuset_handle_cpuhp(). | ||
1952 | */ | 2014 | */ |
1953 | |||
1954 | void cpuset_track_online_nodes(void) | 2015 | void cpuset_track_online_nodes(void) |
1955 | { | 2016 | { |
1956 | common_cpu_mem_hotplug_unplug(0); | 2017 | cgroup_lock(); |
2018 | top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; | ||
2019 | scan_for_empty_cpusets(&top_cpuset); | ||
2020 | cgroup_unlock(); | ||
1957 | } | 2021 | } |
1958 | #endif | 2022 | #endif |
1959 | 2023 | ||
@@ -1968,11 +2032,10 @@ void __init cpuset_init_smp(void) | |||
1968 | top_cpuset.cpus_allowed = cpu_online_map; | 2032 | top_cpuset.cpus_allowed = cpu_online_map; |
1969 | top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; | 2033 | top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; |
1970 | 2034 | ||
1971 | hotcpu_notifier(cpuset_handle_cpuhp, 0); | 2035 | hotcpu_notifier(cpuset_track_online_cpus, 0); |
1972 | } | 2036 | } |
1973 | 2037 | ||
1974 | /** | 2038 | /** |
1975 | |||
1976 | * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset. | 2039 | * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset. |
1977 | * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed. | 2040 | * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed. |
1978 | * @pmask: pointer to cpumask_t variable to receive cpus_allowed set. | 2041 | * @pmask: pointer to cpumask_t variable to receive cpus_allowed set. |
diff --git a/kernel/delayacct.c b/kernel/delayacct.c index 10e43fd8b721..b3179dad71be 100644 --- a/kernel/delayacct.c +++ b/kernel/delayacct.c | |||
@@ -145,8 +145,11 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) | |||
145 | d->blkio_delay_total = (tmp < d->blkio_delay_total) ? 0 : tmp; | 145 | d->blkio_delay_total = (tmp < d->blkio_delay_total) ? 0 : tmp; |
146 | tmp = d->swapin_delay_total + tsk->delays->swapin_delay; | 146 | tmp = d->swapin_delay_total + tsk->delays->swapin_delay; |
147 | d->swapin_delay_total = (tmp < d->swapin_delay_total) ? 0 : tmp; | 147 | d->swapin_delay_total = (tmp < d->swapin_delay_total) ? 0 : tmp; |
148 | tmp = d->freepages_delay_total + tsk->delays->freepages_delay; | ||
149 | d->freepages_delay_total = (tmp < d->freepages_delay_total) ? 0 : tmp; | ||
148 | d->blkio_count += tsk->delays->blkio_count; | 150 | d->blkio_count += tsk->delays->blkio_count; |
149 | d->swapin_count += tsk->delays->swapin_count; | 151 | d->swapin_count += tsk->delays->swapin_count; |
152 | d->freepages_count += tsk->delays->freepages_count; | ||
150 | spin_unlock_irqrestore(&tsk->delays->lock, flags); | 153 | spin_unlock_irqrestore(&tsk->delays->lock, flags); |
151 | 154 | ||
152 | done: | 155 | done: |
@@ -165,3 +168,16 @@ __u64 __delayacct_blkio_ticks(struct task_struct *tsk) | |||
165 | return ret; | 168 | return ret; |
166 | } | 169 | } |
167 | 170 | ||
171 | void __delayacct_freepages_start(void) | ||
172 | { | ||
173 | delayacct_start(¤t->delays->freepages_start); | ||
174 | } | ||
175 | |||
176 | void __delayacct_freepages_end(void) | ||
177 | { | ||
178 | delayacct_end(¤t->delays->freepages_start, | ||
179 | ¤t->delays->freepages_end, | ||
180 | ¤t->delays->freepages_delay, | ||
181 | ¤t->delays->freepages_count); | ||
182 | } | ||
183 | |||
diff --git a/kernel/dma-coherent.c b/kernel/dma-coherent.c new file mode 100644 index 000000000000..c1d4d5b4c61c --- /dev/null +++ b/kernel/dma-coherent.c | |||
@@ -0,0 +1,153 @@ | |||
1 | /* | ||
2 | * Coherent per-device memory handling. | ||
3 | * Borrowed from i386 | ||
4 | */ | ||
5 | #include <linux/kernel.h> | ||
6 | #include <linux/dma-mapping.h> | ||
7 | |||
8 | struct dma_coherent_mem { | ||
9 | void *virt_base; | ||
10 | u32 device_base; | ||
11 | int size; | ||
12 | int flags; | ||
13 | unsigned long *bitmap; | ||
14 | }; | ||
15 | |||
16 | int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, | ||
17 | dma_addr_t device_addr, size_t size, int flags) | ||
18 | { | ||
19 | void __iomem *mem_base = NULL; | ||
20 | int pages = size >> PAGE_SHIFT; | ||
21 | int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long); | ||
22 | |||
23 | if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0) | ||
24 | goto out; | ||
25 | if (!size) | ||
26 | goto out; | ||
27 | if (dev->dma_mem) | ||
28 | goto out; | ||
29 | |||
30 | /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */ | ||
31 | |||
32 | mem_base = ioremap(bus_addr, size); | ||
33 | if (!mem_base) | ||
34 | goto out; | ||
35 | |||
36 | dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL); | ||
37 | if (!dev->dma_mem) | ||
38 | goto out; | ||
39 | dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL); | ||
40 | if (!dev->dma_mem->bitmap) | ||
41 | goto free1_out; | ||
42 | |||
43 | dev->dma_mem->virt_base = mem_base; | ||
44 | dev->dma_mem->device_base = device_addr; | ||
45 | dev->dma_mem->size = pages; | ||
46 | dev->dma_mem->flags = flags; | ||
47 | |||
48 | if (flags & DMA_MEMORY_MAP) | ||
49 | return DMA_MEMORY_MAP; | ||
50 | |||
51 | return DMA_MEMORY_IO; | ||
52 | |||
53 | free1_out: | ||
54 | kfree(dev->dma_mem); | ||
55 | out: | ||
56 | if (mem_base) | ||
57 | iounmap(mem_base); | ||
58 | return 0; | ||
59 | } | ||
60 | EXPORT_SYMBOL(dma_declare_coherent_memory); | ||
61 | |||
62 | void dma_release_declared_memory(struct device *dev) | ||
63 | { | ||
64 | struct dma_coherent_mem *mem = dev->dma_mem; | ||
65 | |||
66 | if (!mem) | ||
67 | return; | ||
68 | dev->dma_mem = NULL; | ||
69 | iounmap(mem->virt_base); | ||
70 | kfree(mem->bitmap); | ||
71 | kfree(mem); | ||
72 | } | ||
73 | EXPORT_SYMBOL(dma_release_declared_memory); | ||
74 | |||
75 | void *dma_mark_declared_memory_occupied(struct device *dev, | ||
76 | dma_addr_t device_addr, size_t size) | ||
77 | { | ||
78 | struct dma_coherent_mem *mem = dev->dma_mem; | ||
79 | int pos, err; | ||
80 | |||
81 | size += device_addr & ~PAGE_MASK; | ||
82 | |||
83 | if (!mem) | ||
84 | return ERR_PTR(-EINVAL); | ||
85 | |||
86 | pos = (device_addr - mem->device_base) >> PAGE_SHIFT; | ||
87 | err = bitmap_allocate_region(mem->bitmap, pos, get_order(size)); | ||
88 | if (err != 0) | ||
89 | return ERR_PTR(err); | ||
90 | return mem->virt_base + (pos << PAGE_SHIFT); | ||
91 | } | ||
92 | EXPORT_SYMBOL(dma_mark_declared_memory_occupied); | ||
93 | |||
94 | /** | ||
95 | * dma_alloc_from_coherent() - try to allocate memory from the per-device coherent area | ||
96 | * | ||
97 | * @dev: device from which we allocate memory | ||
98 | * @size: size of requested memory area | ||
99 | * @dma_handle: This will be filled with the correct dma handle | ||
100 | * @ret: This pointer will be filled with the virtual address | ||
101 | * to allocated area. | ||
102 | * | ||
103 | * This function should be only called from per-arch dma_alloc_coherent() | ||
104 | * to support allocation from per-device coherent memory pools. | ||
105 | * | ||
106 | * Returns 0 if dma_alloc_coherent should continue with allocating from | ||
107 | * generic memory areas, or !0 if dma_alloc_coherent should return @ret. | ||
108 | */ | ||
109 | int dma_alloc_from_coherent(struct device *dev, ssize_t size, | ||
110 | dma_addr_t *dma_handle, void **ret) | ||
111 | { | ||
112 | struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; | ||
113 | int order = get_order(size); | ||
114 | |||
115 | if (mem) { | ||
116 | int page = bitmap_find_free_region(mem->bitmap, mem->size, | ||
117 | order); | ||
118 | if (page >= 0) { | ||
119 | *dma_handle = mem->device_base + (page << PAGE_SHIFT); | ||
120 | *ret = mem->virt_base + (page << PAGE_SHIFT); | ||
121 | memset(*ret, 0, size); | ||
122 | } else if (mem->flags & DMA_MEMORY_EXCLUSIVE) | ||
123 | *ret = NULL; | ||
124 | } | ||
125 | return (mem != NULL); | ||
126 | } | ||
127 | |||
128 | /** | ||
129 | * dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool | ||
130 | * @dev: device from which the memory was allocated | ||
131 | * @order: the order of pages allocated | ||
132 | * @vaddr: virtual address of allocated pages | ||
133 | * | ||
134 | * This checks whether the memory was allocated from the per-device | ||
135 | * coherent memory pool and if so, releases that memory. | ||
136 | * | ||
137 | * Returns 1 if we correctly released the memory, or 0 if | ||
138 | * dma_release_coherent() should proceed with releasing memory from | ||
139 | * generic pools. | ||
140 | */ | ||
141 | int dma_release_from_coherent(struct device *dev, int order, void *vaddr) | ||
142 | { | ||
143 | struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; | ||
144 | |||
145 | if (mem && vaddr >= mem->virt_base && vaddr < | ||
146 | (mem->virt_base + (mem->size << PAGE_SHIFT))) { | ||
147 | int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; | ||
148 | |||
149 | bitmap_release_region(mem->bitmap, page, order); | ||
150 | return 1; | ||
151 | } | ||
152 | return 0; | ||
153 | } | ||
diff --git a/kernel/exec_domain.c b/kernel/exec_domain.c index a9e6bad9f706..0d407e886735 100644 --- a/kernel/exec_domain.c +++ b/kernel/exec_domain.c | |||
@@ -65,7 +65,7 @@ lookup_exec_domain(u_long personality) | |||
65 | goto out; | 65 | goto out; |
66 | } | 66 | } |
67 | 67 | ||
68 | #ifdef CONFIG_KMOD | 68 | #ifdef CONFIG_MODULES |
69 | read_unlock(&exec_domains_lock); | 69 | read_unlock(&exec_domains_lock); |
70 | request_module("personality-%ld", pers); | 70 | request_module("personality-%ld", pers); |
71 | read_lock(&exec_domains_lock); | 71 | read_lock(&exec_domains_lock); |
@@ -168,7 +168,6 @@ __set_personality(u_long personality) | |||
168 | current->personality = personality; | 168 | current->personality = personality; |
169 | oep = current_thread_info()->exec_domain; | 169 | oep = current_thread_info()->exec_domain; |
170 | current_thread_info()->exec_domain = ep; | 170 | current_thread_info()->exec_domain = ep; |
171 | set_fs_altroot(); | ||
172 | 171 | ||
173 | module_put(oep->module); | 172 | module_put(oep->module); |
174 | return 0; | 173 | return 0; |
diff --git a/kernel/exit.c b/kernel/exit.c index 93d2711b9381..85a83c831856 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -46,6 +46,7 @@ | |||
46 | #include <linux/resource.h> | 46 | #include <linux/resource.h> |
47 | #include <linux/blkdev.h> | 47 | #include <linux/blkdev.h> |
48 | #include <linux/task_io_accounting_ops.h> | 48 | #include <linux/task_io_accounting_ops.h> |
49 | #include <linux/tracehook.h> | ||
49 | 50 | ||
50 | #include <asm/uaccess.h> | 51 | #include <asm/uaccess.h> |
51 | #include <asm/unistd.h> | 52 | #include <asm/unistd.h> |
@@ -85,7 +86,6 @@ static void __exit_signal(struct task_struct *tsk) | |||
85 | BUG_ON(!sig); | 86 | BUG_ON(!sig); |
86 | BUG_ON(!atomic_read(&sig->count)); | 87 | BUG_ON(!atomic_read(&sig->count)); |
87 | 88 | ||
88 | rcu_read_lock(); | ||
89 | sighand = rcu_dereference(tsk->sighand); | 89 | sighand = rcu_dereference(tsk->sighand); |
90 | spin_lock(&sighand->siglock); | 90 | spin_lock(&sighand->siglock); |
91 | 91 | ||
@@ -112,15 +112,16 @@ static void __exit_signal(struct task_struct *tsk) | |||
112 | * We won't ever get here for the group leader, since it | 112 | * We won't ever get here for the group leader, since it |
113 | * will have been the last reference on the signal_struct. | 113 | * will have been the last reference on the signal_struct. |
114 | */ | 114 | */ |
115 | sig->utime = cputime_add(sig->utime, tsk->utime); | 115 | sig->utime = cputime_add(sig->utime, task_utime(tsk)); |
116 | sig->stime = cputime_add(sig->stime, tsk->stime); | 116 | sig->stime = cputime_add(sig->stime, task_stime(tsk)); |
117 | sig->gtime = cputime_add(sig->gtime, tsk->gtime); | 117 | sig->gtime = cputime_add(sig->gtime, task_gtime(tsk)); |
118 | sig->min_flt += tsk->min_flt; | 118 | sig->min_flt += tsk->min_flt; |
119 | sig->maj_flt += tsk->maj_flt; | 119 | sig->maj_flt += tsk->maj_flt; |
120 | sig->nvcsw += tsk->nvcsw; | 120 | sig->nvcsw += tsk->nvcsw; |
121 | sig->nivcsw += tsk->nivcsw; | 121 | sig->nivcsw += tsk->nivcsw; |
122 | sig->inblock += task_io_get_inblock(tsk); | 122 | sig->inblock += task_io_get_inblock(tsk); |
123 | sig->oublock += task_io_get_oublock(tsk); | 123 | sig->oublock += task_io_get_oublock(tsk); |
124 | task_io_accounting_add(&sig->ioac, &tsk->ioac); | ||
124 | sig->sum_sched_runtime += tsk->se.sum_exec_runtime; | 125 | sig->sum_sched_runtime += tsk->se.sum_exec_runtime; |
125 | sig = NULL; /* Marker for below. */ | 126 | sig = NULL; /* Marker for below. */ |
126 | } | 127 | } |
@@ -136,7 +137,6 @@ static void __exit_signal(struct task_struct *tsk) | |||
136 | tsk->signal = NULL; | 137 | tsk->signal = NULL; |
137 | tsk->sighand = NULL; | 138 | tsk->sighand = NULL; |
138 | spin_unlock(&sighand->siglock); | 139 | spin_unlock(&sighand->siglock); |
139 | rcu_read_unlock(); | ||
140 | 140 | ||
141 | __cleanup_sighand(sighand); | 141 | __cleanup_sighand(sighand); |
142 | clear_tsk_thread_flag(tsk,TIF_SIGPENDING); | 142 | clear_tsk_thread_flag(tsk,TIF_SIGPENDING); |
@@ -152,27 +152,17 @@ static void delayed_put_task_struct(struct rcu_head *rhp) | |||
152 | put_task_struct(container_of(rhp, struct task_struct, rcu)); | 152 | put_task_struct(container_of(rhp, struct task_struct, rcu)); |
153 | } | 153 | } |
154 | 154 | ||
155 | /* | ||
156 | * Do final ptrace-related cleanup of a zombie being reaped. | ||
157 | * | ||
158 | * Called with write_lock(&tasklist_lock) held. | ||
159 | */ | ||
160 | static void ptrace_release_task(struct task_struct *p) | ||
161 | { | ||
162 | BUG_ON(!list_empty(&p->ptraced)); | ||
163 | ptrace_unlink(p); | ||
164 | BUG_ON(!list_empty(&p->ptrace_entry)); | ||
165 | } | ||
166 | 155 | ||
167 | void release_task(struct task_struct * p) | 156 | void release_task(struct task_struct * p) |
168 | { | 157 | { |
169 | struct task_struct *leader; | 158 | struct task_struct *leader; |
170 | int zap_leader; | 159 | int zap_leader; |
171 | repeat: | 160 | repeat: |
161 | tracehook_prepare_release_task(p); | ||
172 | atomic_dec(&p->user->processes); | 162 | atomic_dec(&p->user->processes); |
173 | proc_flush_task(p); | 163 | proc_flush_task(p); |
174 | write_lock_irq(&tasklist_lock); | 164 | write_lock_irq(&tasklist_lock); |
175 | ptrace_release_task(p); | 165 | tracehook_finish_release_task(p); |
176 | __exit_signal(p); | 166 | __exit_signal(p); |
177 | 167 | ||
178 | /* | 168 | /* |
@@ -194,6 +184,13 @@ repeat: | |||
194 | * that case. | 184 | * that case. |
195 | */ | 185 | */ |
196 | zap_leader = task_detached(leader); | 186 | zap_leader = task_detached(leader); |
187 | |||
188 | /* | ||
189 | * This maintains the invariant that release_task() | ||
190 | * only runs on a task in EXIT_DEAD, just for sanity. | ||
191 | */ | ||
192 | if (zap_leader) | ||
193 | leader->exit_state = EXIT_DEAD; | ||
197 | } | 194 | } |
198 | 195 | ||
199 | write_unlock_irq(&tasklist_lock); | 196 | write_unlock_irq(&tasklist_lock); |
@@ -432,7 +429,7 @@ void daemonize(const char *name, ...) | |||
432 | * We don't want to have TIF_FREEZE set if the system-wide hibernation | 429 | * We don't want to have TIF_FREEZE set if the system-wide hibernation |
433 | * or suspend transition begins right now. | 430 | * or suspend transition begins right now. |
434 | */ | 431 | */ |
435 | current->flags |= PF_NOFREEZE; | 432 | current->flags |= (PF_NOFREEZE | PF_KTHREAD); |
436 | 433 | ||
437 | if (current->nsproxy != &init_nsproxy) { | 434 | if (current->nsproxy != &init_nsproxy) { |
438 | get_nsproxy(&init_nsproxy); | 435 | get_nsproxy(&init_nsproxy); |
@@ -557,8 +554,6 @@ void put_fs_struct(struct fs_struct *fs) | |||
557 | if (atomic_dec_and_test(&fs->count)) { | 554 | if (atomic_dec_and_test(&fs->count)) { |
558 | path_put(&fs->root); | 555 | path_put(&fs->root); |
559 | path_put(&fs->pwd); | 556 | path_put(&fs->pwd); |
560 | if (fs->altroot.dentry) | ||
561 | path_put(&fs->altroot); | ||
562 | kmem_cache_free(fs_cachep, fs); | 557 | kmem_cache_free(fs_cachep, fs); |
563 | } | 558 | } |
564 | } | 559 | } |
@@ -588,8 +583,6 @@ mm_need_new_owner(struct mm_struct *mm, struct task_struct *p) | |||
588 | * If there are other users of the mm and the owner (us) is exiting | 583 | * If there are other users of the mm and the owner (us) is exiting |
589 | * we need to find a new owner to take on the responsibility. | 584 | * we need to find a new owner to take on the responsibility. |
590 | */ | 585 | */ |
591 | if (!mm) | ||
592 | return 0; | ||
593 | if (atomic_read(&mm->mm_users) <= 1) | 586 | if (atomic_read(&mm->mm_users) <= 1) |
594 | return 0; | 587 | return 0; |
595 | if (mm->owner != p) | 588 | if (mm->owner != p) |
@@ -632,6 +625,16 @@ retry: | |||
632 | } while_each_thread(g, c); | 625 | } while_each_thread(g, c); |
633 | 626 | ||
634 | read_unlock(&tasklist_lock); | 627 | read_unlock(&tasklist_lock); |
628 | /* | ||
629 | * We found no owner yet mm_users > 1: this implies that we are | ||
630 | * most likely racing with swapoff (try_to_unuse()) or /proc or | ||
631 | * ptrace or page migration (get_task_mm()). Mark owner as NULL, | ||
632 | * so that subsystems can understand the callback and take action. | ||
633 | */ | ||
634 | down_write(&mm->mmap_sem); | ||
635 | cgroup_mm_owner_callbacks(mm->owner, NULL); | ||
636 | mm->owner = NULL; | ||
637 | up_write(&mm->mmap_sem); | ||
635 | return; | 638 | return; |
636 | 639 | ||
637 | assign_new_owner: | 640 | assign_new_owner: |
@@ -666,26 +669,40 @@ assign_new_owner: | |||
666 | static void exit_mm(struct task_struct * tsk) | 669 | static void exit_mm(struct task_struct * tsk) |
667 | { | 670 | { |
668 | struct mm_struct *mm = tsk->mm; | 671 | struct mm_struct *mm = tsk->mm; |
672 | struct core_state *core_state; | ||
669 | 673 | ||
670 | mm_release(tsk, mm); | 674 | mm_release(tsk, mm); |
671 | if (!mm) | 675 | if (!mm) |
672 | return; | 676 | return; |
673 | /* | 677 | /* |
674 | * Serialize with any possible pending coredump. | 678 | * Serialize with any possible pending coredump. |
675 | * We must hold mmap_sem around checking core_waiters | 679 | * We must hold mmap_sem around checking core_state |
676 | * and clearing tsk->mm. The core-inducing thread | 680 | * and clearing tsk->mm. The core-inducing thread |
677 | * will increment core_waiters for each thread in the | 681 | * will increment ->nr_threads for each thread in the |
678 | * group with ->mm != NULL. | 682 | * group with ->mm != NULL. |
679 | */ | 683 | */ |
680 | down_read(&mm->mmap_sem); | 684 | down_read(&mm->mmap_sem); |
681 | if (mm->core_waiters) { | 685 | core_state = mm->core_state; |
686 | if (core_state) { | ||
687 | struct core_thread self; | ||
682 | up_read(&mm->mmap_sem); | 688 | up_read(&mm->mmap_sem); |
683 | down_write(&mm->mmap_sem); | ||
684 | if (!--mm->core_waiters) | ||
685 | complete(mm->core_startup_done); | ||
686 | up_write(&mm->mmap_sem); | ||
687 | 689 | ||
688 | wait_for_completion(&mm->core_done); | 690 | self.task = tsk; |
691 | self.next = xchg(&core_state->dumper.next, &self); | ||
692 | /* | ||
693 | * Implies mb(), the result of xchg() must be visible | ||
694 | * to core_state->dumper. | ||
695 | */ | ||
696 | if (atomic_dec_and_test(&core_state->nr_threads)) | ||
697 | complete(&core_state->startup); | ||
698 | |||
699 | for (;;) { | ||
700 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | ||
701 | if (!self.task) /* see coredump_finish() */ | ||
702 | break; | ||
703 | schedule(); | ||
704 | } | ||
705 | __set_task_state(tsk, TASK_RUNNING); | ||
689 | down_read(&mm->mmap_sem); | 706 | down_read(&mm->mmap_sem); |
690 | } | 707 | } |
691 | atomic_inc(&mm->mm_count); | 708 | atomic_inc(&mm->mm_count); |
@@ -822,26 +839,50 @@ static void reparent_thread(struct task_struct *p, struct task_struct *father) | |||
822 | * the child reaper process (ie "init") in our pid | 839 | * the child reaper process (ie "init") in our pid |
823 | * space. | 840 | * space. |
824 | */ | 841 | */ |
842 | static struct task_struct *find_new_reaper(struct task_struct *father) | ||
843 | { | ||
844 | struct pid_namespace *pid_ns = task_active_pid_ns(father); | ||
845 | struct task_struct *thread; | ||
846 | |||
847 | thread = father; | ||
848 | while_each_thread(father, thread) { | ||
849 | if (thread->flags & PF_EXITING) | ||
850 | continue; | ||
851 | if (unlikely(pid_ns->child_reaper == father)) | ||
852 | pid_ns->child_reaper = thread; | ||
853 | return thread; | ||
854 | } | ||
855 | |||
856 | if (unlikely(pid_ns->child_reaper == father)) { | ||
857 | write_unlock_irq(&tasklist_lock); | ||
858 | if (unlikely(pid_ns == &init_pid_ns)) | ||
859 | panic("Attempted to kill init!"); | ||
860 | |||
861 | zap_pid_ns_processes(pid_ns); | ||
862 | write_lock_irq(&tasklist_lock); | ||
863 | /* | ||
864 | * We can not clear ->child_reaper or leave it alone. | ||
865 | * There may by stealth EXIT_DEAD tasks on ->children, | ||
866 | * forget_original_parent() must move them somewhere. | ||
867 | */ | ||
868 | pid_ns->child_reaper = init_pid_ns.child_reaper; | ||
869 | } | ||
870 | |||
871 | return pid_ns->child_reaper; | ||
872 | } | ||
873 | |||
825 | static void forget_original_parent(struct task_struct *father) | 874 | static void forget_original_parent(struct task_struct *father) |
826 | { | 875 | { |
827 | struct task_struct *p, *n, *reaper = father; | 876 | struct task_struct *p, *n, *reaper; |
828 | LIST_HEAD(ptrace_dead); | 877 | LIST_HEAD(ptrace_dead); |
829 | 878 | ||
830 | write_lock_irq(&tasklist_lock); | 879 | write_lock_irq(&tasklist_lock); |
831 | 880 | reaper = find_new_reaper(father); | |
832 | /* | 881 | /* |
833 | * First clean up ptrace if we were using it. | 882 | * First clean up ptrace if we were using it. |
834 | */ | 883 | */ |
835 | ptrace_exit(father, &ptrace_dead); | 884 | ptrace_exit(father, &ptrace_dead); |
836 | 885 | ||
837 | do { | ||
838 | reaper = next_thread(reaper); | ||
839 | if (reaper == father) { | ||
840 | reaper = task_child_reaper(father); | ||
841 | break; | ||
842 | } | ||
843 | } while (reaper->flags & PF_EXITING); | ||
844 | |||
845 | list_for_each_entry_safe(p, n, &father->children, sibling) { | 886 | list_for_each_entry_safe(p, n, &father->children, sibling) { |
846 | p->real_parent = reaper; | 887 | p->real_parent = reaper; |
847 | if (p->parent == father) { | 888 | if (p->parent == father) { |
@@ -863,7 +904,8 @@ static void forget_original_parent(struct task_struct *father) | |||
863 | */ | 904 | */ |
864 | static void exit_notify(struct task_struct *tsk, int group_dead) | 905 | static void exit_notify(struct task_struct *tsk, int group_dead) |
865 | { | 906 | { |
866 | int state; | 907 | int signal; |
908 | void *cookie; | ||
867 | 909 | ||
868 | /* | 910 | /* |
869 | * This does two things: | 911 | * This does two things: |
@@ -900,33 +942,24 @@ static void exit_notify(struct task_struct *tsk, int group_dead) | |||
900 | !capable(CAP_KILL)) | 942 | !capable(CAP_KILL)) |
901 | tsk->exit_signal = SIGCHLD; | 943 | tsk->exit_signal = SIGCHLD; |
902 | 944 | ||
903 | /* If something other than our normal parent is ptracing us, then | 945 | signal = tracehook_notify_death(tsk, &cookie, group_dead); |
904 | * send it a SIGCHLD instead of honoring exit_signal. exit_signal | 946 | if (signal >= 0) |
905 | * only has special meaning to our real parent. | 947 | signal = do_notify_parent(tsk, signal); |
906 | */ | ||
907 | if (!task_detached(tsk) && thread_group_empty(tsk)) { | ||
908 | int signal = ptrace_reparented(tsk) ? | ||
909 | SIGCHLD : tsk->exit_signal; | ||
910 | do_notify_parent(tsk, signal); | ||
911 | } else if (tsk->ptrace) { | ||
912 | do_notify_parent(tsk, SIGCHLD); | ||
913 | } | ||
914 | 948 | ||
915 | state = EXIT_ZOMBIE; | 949 | tsk->exit_state = signal == DEATH_REAP ? EXIT_DEAD : EXIT_ZOMBIE; |
916 | if (task_detached(tsk) && likely(!tsk->ptrace)) | ||
917 | state = EXIT_DEAD; | ||
918 | tsk->exit_state = state; | ||
919 | 950 | ||
920 | /* mt-exec, de_thread() is waiting for us */ | 951 | /* mt-exec, de_thread() is waiting for us */ |
921 | if (thread_group_leader(tsk) && | 952 | if (thread_group_leader(tsk) && |
922 | tsk->signal->notify_count < 0 && | 953 | tsk->signal->group_exit_task && |
923 | tsk->signal->group_exit_task) | 954 | tsk->signal->notify_count < 0) |
924 | wake_up_process(tsk->signal->group_exit_task); | 955 | wake_up_process(tsk->signal->group_exit_task); |
925 | 956 | ||
926 | write_unlock_irq(&tasklist_lock); | 957 | write_unlock_irq(&tasklist_lock); |
927 | 958 | ||
959 | tracehook_report_death(tsk, signal, cookie, group_dead); | ||
960 | |||
928 | /* If the process is dead, release it - nobody will wait for it */ | 961 | /* If the process is dead, release it - nobody will wait for it */ |
929 | if (state == EXIT_DEAD) | 962 | if (signal == DEATH_REAP) |
930 | release_task(tsk); | 963 | release_task(tsk); |
931 | } | 964 | } |
932 | 965 | ||
@@ -958,39 +991,6 @@ static void check_stack_usage(void) | |||
958 | static inline void check_stack_usage(void) {} | 991 | static inline void check_stack_usage(void) {} |
959 | #endif | 992 | #endif |
960 | 993 | ||
961 | static inline void exit_child_reaper(struct task_struct *tsk) | ||
962 | { | ||
963 | if (likely(tsk->group_leader != task_child_reaper(tsk))) | ||
964 | return; | ||
965 | |||
966 | if (tsk->nsproxy->pid_ns == &init_pid_ns) | ||
967 | panic("Attempted to kill init!"); | ||
968 | |||
969 | /* | ||
970 | * @tsk is the last thread in the 'cgroup-init' and is exiting. | ||
971 | * Terminate all remaining processes in the namespace and reap them | ||
972 | * before exiting @tsk. | ||
973 | * | ||
974 | * Note that @tsk (last thread of cgroup-init) may not necessarily | ||
975 | * be the child-reaper (i.e main thread of cgroup-init) of the | ||
976 | * namespace i.e the child_reaper may have already exited. | ||
977 | * | ||
978 | * Even after a child_reaper exits, we let it inherit orphaned children, | ||
979 | * because, pid_ns->child_reaper remains valid as long as there is | ||
980 | * at least one living sub-thread in the cgroup init. | ||
981 | |||
982 | * This living sub-thread of the cgroup-init will be notified when | ||
983 | * a child inherited by the 'child-reaper' exits (do_notify_parent() | ||
984 | * uses __group_send_sig_info()). Further, when reaping child processes, | ||
985 | * do_wait() iterates over children of all living sub threads. | ||
986 | |||
987 | * i.e even though 'child_reaper' thread is listed as the parent of the | ||
988 | * orphaned children, any living sub-thread in the cgroup-init can | ||
989 | * perform the role of the child_reaper. | ||
990 | */ | ||
991 | zap_pid_ns_processes(tsk->nsproxy->pid_ns); | ||
992 | } | ||
993 | |||
994 | NORET_TYPE void do_exit(long code) | 994 | NORET_TYPE void do_exit(long code) |
995 | { | 995 | { |
996 | struct task_struct *tsk = current; | 996 | struct task_struct *tsk = current; |
@@ -1005,10 +1005,7 @@ NORET_TYPE void do_exit(long code) | |||
1005 | if (unlikely(!tsk->pid)) | 1005 | if (unlikely(!tsk->pid)) |
1006 | panic("Attempted to kill the idle task!"); | 1006 | panic("Attempted to kill the idle task!"); |
1007 | 1007 | ||
1008 | if (unlikely(current->ptrace & PT_TRACE_EXIT)) { | 1008 | tracehook_report_exit(&code); |
1009 | current->ptrace_message = code; | ||
1010 | ptrace_notify((PTRACE_EVENT_EXIT << 8) | SIGTRAP); | ||
1011 | } | ||
1012 | 1009 | ||
1013 | /* | 1010 | /* |
1014 | * We're taking recursive faults here in do_exit. Safest is to just | 1011 | * We're taking recursive faults here in do_exit. Safest is to just |
@@ -1053,7 +1050,6 @@ NORET_TYPE void do_exit(long code) | |||
1053 | } | 1050 | } |
1054 | group_dead = atomic_dec_and_test(&tsk->signal->live); | 1051 | group_dead = atomic_dec_and_test(&tsk->signal->live); |
1055 | if (group_dead) { | 1052 | if (group_dead) { |
1056 | exit_child_reaper(tsk); | ||
1057 | hrtimer_cancel(&tsk->signal->real_timer); | 1053 | hrtimer_cancel(&tsk->signal->real_timer); |
1058 | exit_itimers(tsk->signal); | 1054 | exit_itimers(tsk->signal); |
1059 | } | 1055 | } |
@@ -1354,6 +1350,8 @@ static int wait_task_zombie(struct task_struct *p, int options, | |||
1354 | psig->coublock += | 1350 | psig->coublock += |
1355 | task_io_get_oublock(p) + | 1351 | task_io_get_oublock(p) + |
1356 | sig->oublock + sig->coublock; | 1352 | sig->oublock + sig->coublock; |
1353 | task_io_accounting_add(&psig->ioac, &p->ioac); | ||
1354 | task_io_accounting_add(&psig->ioac, &sig->ioac); | ||
1357 | spin_unlock_irq(&p->parent->sighand->siglock); | 1355 | spin_unlock_irq(&p->parent->sighand->siglock); |
1358 | } | 1356 | } |
1359 | 1357 | ||
diff --git a/kernel/fork.c b/kernel/fork.c index adefc1131f27..7ce2ebe84796 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -27,15 +27,18 @@ | |||
27 | #include <linux/key.h> | 27 | #include <linux/key.h> |
28 | #include <linux/binfmts.h> | 28 | #include <linux/binfmts.h> |
29 | #include <linux/mman.h> | 29 | #include <linux/mman.h> |
30 | #include <linux/mmu_notifier.h> | ||
30 | #include <linux/fs.h> | 31 | #include <linux/fs.h> |
31 | #include <linux/nsproxy.h> | 32 | #include <linux/nsproxy.h> |
32 | #include <linux/capability.h> | 33 | #include <linux/capability.h> |
33 | #include <linux/cpu.h> | 34 | #include <linux/cpu.h> |
34 | #include <linux/cgroup.h> | 35 | #include <linux/cgroup.h> |
35 | #include <linux/security.h> | 36 | #include <linux/security.h> |
37 | #include <linux/hugetlb.h> | ||
36 | #include <linux/swap.h> | 38 | #include <linux/swap.h> |
37 | #include <linux/syscalls.h> | 39 | #include <linux/syscalls.h> |
38 | #include <linux/jiffies.h> | 40 | #include <linux/jiffies.h> |
41 | #include <linux/tracehook.h> | ||
39 | #include <linux/futex.h> | 42 | #include <linux/futex.h> |
40 | #include <linux/task_io_accounting_ops.h> | 43 | #include <linux/task_io_accounting_ops.h> |
41 | #include <linux/rcupdate.h> | 44 | #include <linux/rcupdate.h> |
@@ -92,6 +95,23 @@ int nr_processes(void) | |||
92 | static struct kmem_cache *task_struct_cachep; | 95 | static struct kmem_cache *task_struct_cachep; |
93 | #endif | 96 | #endif |
94 | 97 | ||
98 | #ifndef __HAVE_ARCH_THREAD_INFO_ALLOCATOR | ||
99 | static inline struct thread_info *alloc_thread_info(struct task_struct *tsk) | ||
100 | { | ||
101 | #ifdef CONFIG_DEBUG_STACK_USAGE | ||
102 | gfp_t mask = GFP_KERNEL | __GFP_ZERO; | ||
103 | #else | ||
104 | gfp_t mask = GFP_KERNEL; | ||
105 | #endif | ||
106 | return (struct thread_info *)__get_free_pages(mask, THREAD_SIZE_ORDER); | ||
107 | } | ||
108 | |||
109 | static inline void free_thread_info(struct thread_info *ti) | ||
110 | { | ||
111 | free_pages((unsigned long)ti, THREAD_SIZE_ORDER); | ||
112 | } | ||
113 | #endif | ||
114 | |||
95 | /* SLAB cache for signal_struct structures (tsk->signal) */ | 115 | /* SLAB cache for signal_struct structures (tsk->signal) */ |
96 | static struct kmem_cache *signal_cachep; | 116 | static struct kmem_cache *signal_cachep; |
97 | 117 | ||
@@ -307,6 +327,14 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) | |||
307 | } | 327 | } |
308 | 328 | ||
309 | /* | 329 | /* |
330 | * Clear hugetlb-related page reserves for children. This only | ||
331 | * affects MAP_PRIVATE mappings. Faults generated by the child | ||
332 | * are not guaranteed to succeed, even if read-only | ||
333 | */ | ||
334 | if (is_vm_hugetlb_page(tmp)) | ||
335 | reset_vma_resv_huge_pages(tmp); | ||
336 | |||
337 | /* | ||
310 | * Link in the new vma and copy the page table entries. | 338 | * Link in the new vma and copy the page table entries. |
311 | */ | 339 | */ |
312 | *pprev = tmp; | 340 | *pprev = tmp; |
@@ -374,7 +402,7 @@ static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p) | |||
374 | INIT_LIST_HEAD(&mm->mmlist); | 402 | INIT_LIST_HEAD(&mm->mmlist); |
375 | mm->flags = (current->mm) ? current->mm->flags | 403 | mm->flags = (current->mm) ? current->mm->flags |
376 | : MMF_DUMP_FILTER_DEFAULT; | 404 | : MMF_DUMP_FILTER_DEFAULT; |
377 | mm->core_waiters = 0; | 405 | mm->core_state = NULL; |
378 | mm->nr_ptes = 0; | 406 | mm->nr_ptes = 0; |
379 | set_mm_counter(mm, file_rss, 0); | 407 | set_mm_counter(mm, file_rss, 0); |
380 | set_mm_counter(mm, anon_rss, 0); | 408 | set_mm_counter(mm, anon_rss, 0); |
@@ -387,6 +415,7 @@ static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p) | |||
387 | 415 | ||
388 | if (likely(!mm_alloc_pgd(mm))) { | 416 | if (likely(!mm_alloc_pgd(mm))) { |
389 | mm->def_flags = 0; | 417 | mm->def_flags = 0; |
418 | mmu_notifier_mm_init(mm); | ||
390 | return mm; | 419 | return mm; |
391 | } | 420 | } |
392 | 421 | ||
@@ -419,6 +448,7 @@ void __mmdrop(struct mm_struct *mm) | |||
419 | BUG_ON(mm == &init_mm); | 448 | BUG_ON(mm == &init_mm); |
420 | mm_free_pgd(mm); | 449 | mm_free_pgd(mm); |
421 | destroy_context(mm); | 450 | destroy_context(mm); |
451 | mmu_notifier_mm_destroy(mm); | ||
422 | free_mm(mm); | 452 | free_mm(mm); |
423 | } | 453 | } |
424 | EXPORT_SYMBOL_GPL(__mmdrop); | 454 | EXPORT_SYMBOL_GPL(__mmdrop); |
@@ -448,7 +478,7 @@ EXPORT_SYMBOL_GPL(mmput); | |||
448 | /** | 478 | /** |
449 | * get_task_mm - acquire a reference to the task's mm | 479 | * get_task_mm - acquire a reference to the task's mm |
450 | * | 480 | * |
451 | * Returns %NULL if the task has no mm. Checks PF_BORROWED_MM (meaning | 481 | * Returns %NULL if the task has no mm. Checks PF_KTHREAD (meaning |
452 | * this kernel workthread has transiently adopted a user mm with use_mm, | 482 | * this kernel workthread has transiently adopted a user mm with use_mm, |
453 | * to do its AIO) is not set and if so returns a reference to it, after | 483 | * to do its AIO) is not set and if so returns a reference to it, after |
454 | * bumping up the use count. User must release the mm via mmput() | 484 | * bumping up the use count. User must release the mm via mmput() |
@@ -461,7 +491,7 @@ struct mm_struct *get_task_mm(struct task_struct *task) | |||
461 | task_lock(task); | 491 | task_lock(task); |
462 | mm = task->mm; | 492 | mm = task->mm; |
463 | if (mm) { | 493 | if (mm) { |
464 | if (task->flags & PF_BORROWED_MM) | 494 | if (task->flags & PF_KTHREAD) |
465 | mm = NULL; | 495 | mm = NULL; |
466 | else | 496 | else |
467 | atomic_inc(&mm->mm_users); | 497 | atomic_inc(&mm->mm_users); |
@@ -630,13 +660,6 @@ static struct fs_struct *__copy_fs_struct(struct fs_struct *old) | |||
630 | path_get(&old->root); | 660 | path_get(&old->root); |
631 | fs->pwd = old->pwd; | 661 | fs->pwd = old->pwd; |
632 | path_get(&old->pwd); | 662 | path_get(&old->pwd); |
633 | if (old->altroot.dentry) { | ||
634 | fs->altroot = old->altroot; | ||
635 | path_get(&old->altroot); | ||
636 | } else { | ||
637 | fs->altroot.mnt = NULL; | ||
638 | fs->altroot.dentry = NULL; | ||
639 | } | ||
640 | read_unlock(&old->lock); | 663 | read_unlock(&old->lock); |
641 | } | 664 | } |
642 | return fs; | 665 | return fs; |
@@ -786,6 +809,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) | |||
786 | sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0; | 809 | sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0; |
787 | sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0; | 810 | sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0; |
788 | sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0; | 811 | sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0; |
812 | task_io_accounting_init(&sig->ioac); | ||
789 | sig->sum_sched_runtime = 0; | 813 | sig->sum_sched_runtime = 0; |
790 | INIT_LIST_HEAD(&sig->cpu_timers[0]); | 814 | INIT_LIST_HEAD(&sig->cpu_timers[0]); |
791 | INIT_LIST_HEAD(&sig->cpu_timers[1]); | 815 | INIT_LIST_HEAD(&sig->cpu_timers[1]); |
@@ -833,8 +857,7 @@ static void copy_flags(unsigned long clone_flags, struct task_struct *p) | |||
833 | 857 | ||
834 | new_flags &= ~PF_SUPERPRIV; | 858 | new_flags &= ~PF_SUPERPRIV; |
835 | new_flags |= PF_FORKNOEXEC; | 859 | new_flags |= PF_FORKNOEXEC; |
836 | if (!(clone_flags & CLONE_PTRACE)) | 860 | new_flags |= PF_STARTING; |
837 | p->ptrace = 0; | ||
838 | p->flags = new_flags; | 861 | p->flags = new_flags; |
839 | clear_freeze_flag(p); | 862 | clear_freeze_flag(p); |
840 | } | 863 | } |
@@ -875,7 +898,8 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
875 | struct pt_regs *regs, | 898 | struct pt_regs *regs, |
876 | unsigned long stack_size, | 899 | unsigned long stack_size, |
877 | int __user *child_tidptr, | 900 | int __user *child_tidptr, |
878 | struct pid *pid) | 901 | struct pid *pid, |
902 | int trace) | ||
879 | { | 903 | { |
880 | int retval; | 904 | int retval; |
881 | struct task_struct *p; | 905 | struct task_struct *p; |
@@ -968,13 +992,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
968 | p->last_switch_timestamp = 0; | 992 | p->last_switch_timestamp = 0; |
969 | #endif | 993 | #endif |
970 | 994 | ||
971 | #ifdef CONFIG_TASK_XACCT | 995 | task_io_accounting_init(&p->ioac); |
972 | p->rchar = 0; /* I/O counter: bytes read */ | ||
973 | p->wchar = 0; /* I/O counter: bytes written */ | ||
974 | p->syscr = 0; /* I/O counter: read syscalls */ | ||
975 | p->syscw = 0; /* I/O counter: write syscalls */ | ||
976 | #endif | ||
977 | task_io_accounting_init(p); | ||
978 | acct_clear_integrals(p); | 996 | acct_clear_integrals(p); |
979 | 997 | ||
980 | p->it_virt_expires = cputime_zero; | 998 | p->it_virt_expires = cputime_zero; |
@@ -1081,6 +1099,12 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1081 | if (clone_flags & CLONE_THREAD) | 1099 | if (clone_flags & CLONE_THREAD) |
1082 | p->tgid = current->tgid; | 1100 | p->tgid = current->tgid; |
1083 | 1101 | ||
1102 | if (current->nsproxy != p->nsproxy) { | ||
1103 | retval = ns_cgroup_clone(p, pid); | ||
1104 | if (retval) | ||
1105 | goto bad_fork_free_pid; | ||
1106 | } | ||
1107 | |||
1084 | p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; | 1108 | p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; |
1085 | /* | 1109 | /* |
1086 | * Clear TID on mm_release()? | 1110 | * Clear TID on mm_release()? |
@@ -1125,8 +1149,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1125 | */ | 1149 | */ |
1126 | p->group_leader = p; | 1150 | p->group_leader = p; |
1127 | INIT_LIST_HEAD(&p->thread_group); | 1151 | INIT_LIST_HEAD(&p->thread_group); |
1128 | INIT_LIST_HEAD(&p->ptrace_entry); | ||
1129 | INIT_LIST_HEAD(&p->ptraced); | ||
1130 | 1152 | ||
1131 | /* Now that the task is set up, run cgroup callbacks if | 1153 | /* Now that the task is set up, run cgroup callbacks if |
1132 | * necessary. We need to run them before the task is visible | 1154 | * necessary. We need to run them before the task is visible |
@@ -1157,7 +1179,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1157 | p->real_parent = current->real_parent; | 1179 | p->real_parent = current->real_parent; |
1158 | else | 1180 | else |
1159 | p->real_parent = current; | 1181 | p->real_parent = current; |
1160 | p->parent = p->real_parent; | ||
1161 | 1182 | ||
1162 | spin_lock(¤t->sighand->siglock); | 1183 | spin_lock(¤t->sighand->siglock); |
1163 | 1184 | ||
@@ -1199,8 +1220,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1199 | 1220 | ||
1200 | if (likely(p->pid)) { | 1221 | if (likely(p->pid)) { |
1201 | list_add_tail(&p->sibling, &p->real_parent->children); | 1222 | list_add_tail(&p->sibling, &p->real_parent->children); |
1202 | if (unlikely(p->ptrace & PT_PTRACED)) | 1223 | tracehook_finish_clone(p, clone_flags, trace); |
1203 | __ptrace_link(p, current->parent); | ||
1204 | 1224 | ||
1205 | if (thread_group_leader(p)) { | 1225 | if (thread_group_leader(p)) { |
1206 | if (clone_flags & CLONE_NEWPID) | 1226 | if (clone_flags & CLONE_NEWPID) |
@@ -1285,29 +1305,13 @@ struct task_struct * __cpuinit fork_idle(int cpu) | |||
1285 | struct pt_regs regs; | 1305 | struct pt_regs regs; |
1286 | 1306 | ||
1287 | task = copy_process(CLONE_VM, 0, idle_regs(®s), 0, NULL, | 1307 | task = copy_process(CLONE_VM, 0, idle_regs(®s), 0, NULL, |
1288 | &init_struct_pid); | 1308 | &init_struct_pid, 0); |
1289 | if (!IS_ERR(task)) | 1309 | if (!IS_ERR(task)) |
1290 | init_idle(task, cpu); | 1310 | init_idle(task, cpu); |
1291 | 1311 | ||
1292 | return task; | 1312 | return task; |
1293 | } | 1313 | } |
1294 | 1314 | ||
1295 | static int fork_traceflag(unsigned clone_flags) | ||
1296 | { | ||
1297 | if (clone_flags & CLONE_UNTRACED) | ||
1298 | return 0; | ||
1299 | else if (clone_flags & CLONE_VFORK) { | ||
1300 | if (current->ptrace & PT_TRACE_VFORK) | ||
1301 | return PTRACE_EVENT_VFORK; | ||
1302 | } else if ((clone_flags & CSIGNAL) != SIGCHLD) { | ||
1303 | if (current->ptrace & PT_TRACE_CLONE) | ||
1304 | return PTRACE_EVENT_CLONE; | ||
1305 | } else if (current->ptrace & PT_TRACE_FORK) | ||
1306 | return PTRACE_EVENT_FORK; | ||
1307 | |||
1308 | return 0; | ||
1309 | } | ||
1310 | |||
1311 | /* | 1315 | /* |
1312 | * Ok, this is the main fork-routine. | 1316 | * Ok, this is the main fork-routine. |
1313 | * | 1317 | * |
@@ -1342,14 +1346,14 @@ long do_fork(unsigned long clone_flags, | |||
1342 | } | 1346 | } |
1343 | } | 1347 | } |
1344 | 1348 | ||
1345 | if (unlikely(current->ptrace)) { | 1349 | /* |
1346 | trace = fork_traceflag (clone_flags); | 1350 | * When called from kernel_thread, don't do user tracing stuff. |
1347 | if (trace) | 1351 | */ |
1348 | clone_flags |= CLONE_PTRACE; | 1352 | if (likely(user_mode(regs))) |
1349 | } | 1353 | trace = tracehook_prepare_clone(clone_flags); |
1350 | 1354 | ||
1351 | p = copy_process(clone_flags, stack_start, regs, stack_size, | 1355 | p = copy_process(clone_flags, stack_start, regs, stack_size, |
1352 | child_tidptr, NULL); | 1356 | child_tidptr, NULL, trace); |
1353 | /* | 1357 | /* |
1354 | * Do this prior waking up the new thread - the thread pointer | 1358 | * Do this prior waking up the new thread - the thread pointer |
1355 | * might get invalid after that point, if the thread exits quickly. | 1359 | * might get invalid after that point, if the thread exits quickly. |
@@ -1367,32 +1371,35 @@ long do_fork(unsigned long clone_flags, | |||
1367 | init_completion(&vfork); | 1371 | init_completion(&vfork); |
1368 | } | 1372 | } |
1369 | 1373 | ||
1370 | if ((p->ptrace & PT_PTRACED) || (clone_flags & CLONE_STOPPED)) { | 1374 | tracehook_report_clone(trace, regs, clone_flags, nr, p); |
1375 | |||
1376 | /* | ||
1377 | * We set PF_STARTING at creation in case tracing wants to | ||
1378 | * use this to distinguish a fully live task from one that | ||
1379 | * hasn't gotten to tracehook_report_clone() yet. Now we | ||
1380 | * clear it and set the child going. | ||
1381 | */ | ||
1382 | p->flags &= ~PF_STARTING; | ||
1383 | |||
1384 | if (unlikely(clone_flags & CLONE_STOPPED)) { | ||
1371 | /* | 1385 | /* |
1372 | * We'll start up with an immediate SIGSTOP. | 1386 | * We'll start up with an immediate SIGSTOP. |
1373 | */ | 1387 | */ |
1374 | sigaddset(&p->pending.signal, SIGSTOP); | 1388 | sigaddset(&p->pending.signal, SIGSTOP); |
1375 | set_tsk_thread_flag(p, TIF_SIGPENDING); | 1389 | set_tsk_thread_flag(p, TIF_SIGPENDING); |
1376 | } | ||
1377 | |||
1378 | if (!(clone_flags & CLONE_STOPPED)) | ||
1379 | wake_up_new_task(p, clone_flags); | ||
1380 | else | ||
1381 | __set_task_state(p, TASK_STOPPED); | 1390 | __set_task_state(p, TASK_STOPPED); |
1382 | 1391 | } else { | |
1383 | if (unlikely (trace)) { | 1392 | wake_up_new_task(p, clone_flags); |
1384 | current->ptrace_message = nr; | ||
1385 | ptrace_notify ((trace << 8) | SIGTRAP); | ||
1386 | } | 1393 | } |
1387 | 1394 | ||
1395 | tracehook_report_clone_complete(trace, regs, | ||
1396 | clone_flags, nr, p); | ||
1397 | |||
1388 | if (clone_flags & CLONE_VFORK) { | 1398 | if (clone_flags & CLONE_VFORK) { |
1389 | freezer_do_not_count(); | 1399 | freezer_do_not_count(); |
1390 | wait_for_completion(&vfork); | 1400 | wait_for_completion(&vfork); |
1391 | freezer_count(); | 1401 | freezer_count(); |
1392 | if (unlikely (current->ptrace & PT_TRACE_VFORK_DONE)) { | 1402 | tracehook_report_vfork_done(p, nr); |
1393 | current->ptrace_message = nr; | ||
1394 | ptrace_notify ((PTRACE_EVENT_VFORK_DONE << 8) | SIGTRAP); | ||
1395 | } | ||
1396 | } | 1403 | } |
1397 | } else { | 1404 | } else { |
1398 | nr = PTR_ERR(p); | 1405 | nr = PTR_ERR(p); |
@@ -1404,7 +1411,7 @@ long do_fork(unsigned long clone_flags, | |||
1404 | #define ARCH_MIN_MMSTRUCT_ALIGN 0 | 1411 | #define ARCH_MIN_MMSTRUCT_ALIGN 0 |
1405 | #endif | 1412 | #endif |
1406 | 1413 | ||
1407 | static void sighand_ctor(struct kmem_cache *cachep, void *data) | 1414 | static void sighand_ctor(void *data) |
1408 | { | 1415 | { |
1409 | struct sighand_struct *sighand = data; | 1416 | struct sighand_struct *sighand = data; |
1410 | 1417 | ||
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 240c64d59267..d663338cb4a8 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
@@ -28,8 +28,7 @@ void dynamic_irq_init(unsigned int irq) | |||
28 | unsigned long flags; | 28 | unsigned long flags; |
29 | 29 | ||
30 | if (irq >= NR_IRQS) { | 30 | if (irq >= NR_IRQS) { |
31 | printk(KERN_ERR "Trying to initialize invalid IRQ%d\n", irq); | 31 | WARN(1, KERN_ERR "Trying to initialize invalid IRQ%d\n", irq); |
32 | WARN_ON(1); | ||
33 | return; | 32 | return; |
34 | } | 33 | } |
35 | 34 | ||
@@ -62,8 +61,7 @@ void dynamic_irq_cleanup(unsigned int irq) | |||
62 | unsigned long flags; | 61 | unsigned long flags; |
63 | 62 | ||
64 | if (irq >= NR_IRQS) { | 63 | if (irq >= NR_IRQS) { |
65 | printk(KERN_ERR "Trying to cleanup invalid IRQ%d\n", irq); | 64 | WARN(1, KERN_ERR "Trying to cleanup invalid IRQ%d\n", irq); |
66 | WARN_ON(1); | ||
67 | return; | 65 | return; |
68 | } | 66 | } |
69 | 67 | ||
@@ -71,9 +69,8 @@ void dynamic_irq_cleanup(unsigned int irq) | |||
71 | spin_lock_irqsave(&desc->lock, flags); | 69 | spin_lock_irqsave(&desc->lock, flags); |
72 | if (desc->action) { | 70 | if (desc->action) { |
73 | spin_unlock_irqrestore(&desc->lock, flags); | 71 | spin_unlock_irqrestore(&desc->lock, flags); |
74 | printk(KERN_ERR "Destroying IRQ%d without calling free_irq\n", | 72 | WARN(1, KERN_ERR "Destroying IRQ%d without calling free_irq\n", |
75 | irq); | 73 | irq); |
76 | WARN_ON(1); | ||
77 | return; | 74 | return; |
78 | } | 75 | } |
79 | desc->msi_desc = NULL; | 76 | desc->msi_desc = NULL; |
@@ -96,8 +93,7 @@ int set_irq_chip(unsigned int irq, struct irq_chip *chip) | |||
96 | unsigned long flags; | 93 | unsigned long flags; |
97 | 94 | ||
98 | if (irq >= NR_IRQS) { | 95 | if (irq >= NR_IRQS) { |
99 | printk(KERN_ERR "Trying to install chip for IRQ%d\n", irq); | 96 | WARN(1, KERN_ERR "Trying to install chip for IRQ%d\n", irq); |
100 | WARN_ON(1); | ||
101 | return -EINVAL; | 97 | return -EINVAL; |
102 | } | 98 | } |
103 | 99 | ||
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 9aa3e7b81389..d62f69ba7453 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -177,8 +177,7 @@ static void __enable_irq(struct irq_desc *desc, unsigned int irq) | |||
177 | { | 177 | { |
178 | switch (desc->depth) { | 178 | switch (desc->depth) { |
179 | case 0: | 179 | case 0: |
180 | printk(KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); | 180 | WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); |
181 | WARN_ON(1); | ||
182 | break; | 181 | break; |
183 | case 1: { | 182 | case 1: { |
184 | unsigned int status = desc->status & ~IRQ_DISABLED; | 183 | unsigned int status = desc->status & ~IRQ_DISABLED; |
@@ -217,6 +216,17 @@ void enable_irq(unsigned int irq) | |||
217 | } | 216 | } |
218 | EXPORT_SYMBOL(enable_irq); | 217 | EXPORT_SYMBOL(enable_irq); |
219 | 218 | ||
219 | int set_irq_wake_real(unsigned int irq, unsigned int on) | ||
220 | { | ||
221 | struct irq_desc *desc = irq_desc + irq; | ||
222 | int ret = -ENXIO; | ||
223 | |||
224 | if (desc->chip->set_wake) | ||
225 | ret = desc->chip->set_wake(irq, on); | ||
226 | |||
227 | return ret; | ||
228 | } | ||
229 | |||
220 | /** | 230 | /** |
221 | * set_irq_wake - control irq power management wakeup | 231 | * set_irq_wake - control irq power management wakeup |
222 | * @irq: interrupt to control | 232 | * @irq: interrupt to control |
@@ -233,30 +243,32 @@ int set_irq_wake(unsigned int irq, unsigned int on) | |||
233 | { | 243 | { |
234 | struct irq_desc *desc = irq_desc + irq; | 244 | struct irq_desc *desc = irq_desc + irq; |
235 | unsigned long flags; | 245 | unsigned long flags; |
236 | int ret = -ENXIO; | 246 | int ret = 0; |
237 | int (*set_wake)(unsigned, unsigned) = desc->chip->set_wake; | ||
238 | 247 | ||
239 | /* wakeup-capable irqs can be shared between drivers that | 248 | /* wakeup-capable irqs can be shared between drivers that |
240 | * don't need to have the same sleep mode behaviors. | 249 | * don't need to have the same sleep mode behaviors. |
241 | */ | 250 | */ |
242 | spin_lock_irqsave(&desc->lock, flags); | 251 | spin_lock_irqsave(&desc->lock, flags); |
243 | if (on) { | 252 | if (on) { |
244 | if (desc->wake_depth++ == 0) | 253 | if (desc->wake_depth++ == 0) { |
245 | desc->status |= IRQ_WAKEUP; | 254 | ret = set_irq_wake_real(irq, on); |
246 | else | 255 | if (ret) |
247 | set_wake = NULL; | 256 | desc->wake_depth = 0; |
257 | else | ||
258 | desc->status |= IRQ_WAKEUP; | ||
259 | } | ||
248 | } else { | 260 | } else { |
249 | if (desc->wake_depth == 0) { | 261 | if (desc->wake_depth == 0) { |
250 | printk(KERN_WARNING "Unbalanced IRQ %d " | 262 | WARN(1, "Unbalanced IRQ %d wake disable\n", irq); |
251 | "wake disable\n", irq); | 263 | } else if (--desc->wake_depth == 0) { |
252 | WARN_ON(1); | 264 | ret = set_irq_wake_real(irq, on); |
253 | } else if (--desc->wake_depth == 0) | 265 | if (ret) |
254 | desc->status &= ~IRQ_WAKEUP; | 266 | desc->wake_depth = 1; |
255 | else | 267 | else |
256 | set_wake = NULL; | 268 | desc->status &= ~IRQ_WAKEUP; |
269 | } | ||
257 | } | 270 | } |
258 | if (set_wake) | 271 | |
259 | ret = desc->chip->set_wake(irq, on); | ||
260 | spin_unlock_irqrestore(&desc->lock, flags); | 272 | spin_unlock_irqrestore(&desc->lock, flags); |
261 | return ret; | 273 | return ret; |
262 | } | 274 | } |
@@ -293,6 +305,31 @@ void compat_irq_chip_set_default_handler(struct irq_desc *desc) | |||
293 | desc->handle_irq = NULL; | 305 | desc->handle_irq = NULL; |
294 | } | 306 | } |
295 | 307 | ||
308 | static int __irq_set_trigger(struct irq_chip *chip, unsigned int irq, | ||
309 | unsigned long flags) | ||
310 | { | ||
311 | int ret; | ||
312 | |||
313 | if (!chip || !chip->set_type) { | ||
314 | /* | ||
315 | * IRQF_TRIGGER_* but the PIC does not support multiple | ||
316 | * flow-types? | ||
317 | */ | ||
318 | pr_warning("No set_type function for IRQ %d (%s)\n", irq, | ||
319 | chip ? (chip->name ? : "unknown") : "unknown"); | ||
320 | return 0; | ||
321 | } | ||
322 | |||
323 | ret = chip->set_type(irq, flags & IRQF_TRIGGER_MASK); | ||
324 | |||
325 | if (ret) | ||
326 | pr_err("setting trigger mode %d for irq %u failed (%pF)\n", | ||
327 | (int)(flags & IRQF_TRIGGER_MASK), | ||
328 | irq, chip->set_type); | ||
329 | |||
330 | return ret; | ||
331 | } | ||
332 | |||
296 | /* | 333 | /* |
297 | * Internal function to register an irqaction - typically used to | 334 | * Internal function to register an irqaction - typically used to |
298 | * allocate special interrupts that are part of the architecture. | 335 | * allocate special interrupts that are part of the architecture. |
@@ -304,6 +341,7 @@ int setup_irq(unsigned int irq, struct irqaction *new) | |||
304 | const char *old_name = NULL; | 341 | const char *old_name = NULL; |
305 | unsigned long flags; | 342 | unsigned long flags; |
306 | int shared = 0; | 343 | int shared = 0; |
344 | int ret; | ||
307 | 345 | ||
308 | if (irq >= NR_IRQS) | 346 | if (irq >= NR_IRQS) |
309 | return -EINVAL; | 347 | return -EINVAL; |
@@ -361,35 +399,23 @@ int setup_irq(unsigned int irq, struct irqaction *new) | |||
361 | shared = 1; | 399 | shared = 1; |
362 | } | 400 | } |
363 | 401 | ||
364 | *p = new; | ||
365 | |||
366 | /* Exclude IRQ from balancing */ | ||
367 | if (new->flags & IRQF_NOBALANCING) | ||
368 | desc->status |= IRQ_NO_BALANCING; | ||
369 | |||
370 | if (!shared) { | 402 | if (!shared) { |
371 | irq_chip_set_defaults(desc->chip); | 403 | irq_chip_set_defaults(desc->chip); |
372 | 404 | ||
373 | #if defined(CONFIG_IRQ_PER_CPU) | ||
374 | if (new->flags & IRQF_PERCPU) | ||
375 | desc->status |= IRQ_PER_CPU; | ||
376 | #endif | ||
377 | |||
378 | /* Setup the type (level, edge polarity) if configured: */ | 405 | /* Setup the type (level, edge polarity) if configured: */ |
379 | if (new->flags & IRQF_TRIGGER_MASK) { | 406 | if (new->flags & IRQF_TRIGGER_MASK) { |
380 | if (desc->chip->set_type) | 407 | ret = __irq_set_trigger(desc->chip, irq, new->flags); |
381 | desc->chip->set_type(irq, | 408 | |
382 | new->flags & IRQF_TRIGGER_MASK); | 409 | if (ret) { |
383 | else | 410 | spin_unlock_irqrestore(&desc->lock, flags); |
384 | /* | 411 | return ret; |
385 | * IRQF_TRIGGER_* but the PIC does not support | 412 | } |
386 | * multiple flow-types? | ||
387 | */ | ||
388 | printk(KERN_WARNING "No IRQF_TRIGGER set_type " | ||
389 | "function for IRQ %d (%s)\n", irq, | ||
390 | desc->chip->name); | ||
391 | } else | 413 | } else |
392 | compat_irq_chip_set_default_handler(desc); | 414 | compat_irq_chip_set_default_handler(desc); |
415 | #if defined(CONFIG_IRQ_PER_CPU) | ||
416 | if (new->flags & IRQF_PERCPU) | ||
417 | desc->status |= IRQ_PER_CPU; | ||
418 | #endif | ||
393 | 419 | ||
394 | desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING | | 420 | desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING | |
395 | IRQ_INPROGRESS | IRQ_SPURIOUS_DISABLED); | 421 | IRQ_INPROGRESS | IRQ_SPURIOUS_DISABLED); |
@@ -405,6 +431,13 @@ int setup_irq(unsigned int irq, struct irqaction *new) | |||
405 | /* Set default affinity mask once everything is setup */ | 431 | /* Set default affinity mask once everything is setup */ |
406 | irq_select_affinity(irq); | 432 | irq_select_affinity(irq); |
407 | } | 433 | } |
434 | |||
435 | *p = new; | ||
436 | |||
437 | /* Exclude IRQ from balancing */ | ||
438 | if (new->flags & IRQF_NOBALANCING) | ||
439 | desc->status |= IRQ_NO_BALANCING; | ||
440 | |||
408 | /* Reset broken irq detection when installing new handler */ | 441 | /* Reset broken irq detection when installing new handler */ |
409 | desc->irq_count = 0; | 442 | desc->irq_count = 0; |
410 | desc->irqs_unhandled = 0; | 443 | desc->irqs_unhandled = 0; |
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index 6c6d35d68ee9..a09dd29c2fd7 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c | |||
@@ -8,6 +8,7 @@ | |||
8 | 8 | ||
9 | #include <linux/irq.h> | 9 | #include <linux/irq.h> |
10 | #include <linux/proc_fs.h> | 10 | #include <linux/proc_fs.h> |
11 | #include <linux/seq_file.h> | ||
11 | #include <linux/interrupt.h> | 12 | #include <linux/interrupt.h> |
12 | 13 | ||
13 | #include "internals.h" | 14 | #include "internals.h" |
@@ -16,23 +17,18 @@ static struct proc_dir_entry *root_irq_dir; | |||
16 | 17 | ||
17 | #ifdef CONFIG_SMP | 18 | #ifdef CONFIG_SMP |
18 | 19 | ||
19 | static int irq_affinity_read_proc(char *page, char **start, off_t off, | 20 | static int irq_affinity_proc_show(struct seq_file *m, void *v) |
20 | int count, int *eof, void *data) | ||
21 | { | 21 | { |
22 | struct irq_desc *desc = irq_desc + (long)data; | 22 | struct irq_desc *desc = irq_desc + (long)m->private; |
23 | cpumask_t *mask = &desc->affinity; | 23 | cpumask_t *mask = &desc->affinity; |
24 | int len; | ||
25 | 24 | ||
26 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 25 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
27 | if (desc->status & IRQ_MOVE_PENDING) | 26 | if (desc->status & IRQ_MOVE_PENDING) |
28 | mask = &desc->pending_mask; | 27 | mask = &desc->pending_mask; |
29 | #endif | 28 | #endif |
30 | len = cpumask_scnprintf(page, count, *mask); | 29 | seq_cpumask(m, mask); |
31 | 30 | seq_putc(m, '\n'); | |
32 | if (count - len < 2) | 31 | return 0; |
33 | return -EINVAL; | ||
34 | len += sprintf(page + len, "\n"); | ||
35 | return len; | ||
36 | } | 32 | } |
37 | 33 | ||
38 | #ifndef is_affinity_mask_valid | 34 | #ifndef is_affinity_mask_valid |
@@ -40,11 +36,12 @@ static int irq_affinity_read_proc(char *page, char **start, off_t off, | |||
40 | #endif | 36 | #endif |
41 | 37 | ||
42 | int no_irq_affinity; | 38 | int no_irq_affinity; |
43 | static int irq_affinity_write_proc(struct file *file, const char __user *buffer, | 39 | static ssize_t irq_affinity_proc_write(struct file *file, |
44 | unsigned long count, void *data) | 40 | const char __user *buffer, size_t count, loff_t *pos) |
45 | { | 41 | { |
46 | unsigned int irq = (int)(long)data, full_count = count, err; | 42 | unsigned int irq = (int)(long)PDE(file->f_path.dentry->d_inode)->data; |
47 | cpumask_t new_value; | 43 | cpumask_t new_value; |
44 | int err; | ||
48 | 45 | ||
49 | if (!irq_desc[irq].chip->set_affinity || no_irq_affinity || | 46 | if (!irq_desc[irq].chip->set_affinity || no_irq_affinity || |
50 | irq_balancing_disabled(irq)) | 47 | irq_balancing_disabled(irq)) |
@@ -65,28 +62,38 @@ static int irq_affinity_write_proc(struct file *file, const char __user *buffer, | |||
65 | if (!cpus_intersects(new_value, cpu_online_map)) | 62 | if (!cpus_intersects(new_value, cpu_online_map)) |
66 | /* Special case for empty set - allow the architecture | 63 | /* Special case for empty set - allow the architecture |
67 | code to set default SMP affinity. */ | 64 | code to set default SMP affinity. */ |
68 | return irq_select_affinity(irq) ? -EINVAL : full_count; | 65 | return irq_select_affinity(irq) ? -EINVAL : count; |
69 | 66 | ||
70 | irq_set_affinity(irq, new_value); | 67 | irq_set_affinity(irq, new_value); |
71 | 68 | ||
72 | return full_count; | 69 | return count; |
73 | } | 70 | } |
74 | 71 | ||
75 | static int default_affinity_read(char *page, char **start, off_t off, | 72 | static int irq_affinity_proc_open(struct inode *inode, struct file *file) |
76 | int count, int *eof, void *data) | ||
77 | { | 73 | { |
78 | int len = cpumask_scnprintf(page, count, irq_default_affinity); | 74 | return single_open(file, irq_affinity_proc_show, PDE(inode)->data); |
79 | if (count - len < 2) | ||
80 | return -EINVAL; | ||
81 | len += sprintf(page + len, "\n"); | ||
82 | return len; | ||
83 | } | 75 | } |
84 | 76 | ||
85 | static int default_affinity_write(struct file *file, const char __user *buffer, | 77 | static const struct file_operations irq_affinity_proc_fops = { |
86 | unsigned long count, void *data) | 78 | .open = irq_affinity_proc_open, |
79 | .read = seq_read, | ||
80 | .llseek = seq_lseek, | ||
81 | .release = single_release, | ||
82 | .write = irq_affinity_proc_write, | ||
83 | }; | ||
84 | |||
85 | static int default_affinity_show(struct seq_file *m, void *v) | ||
86 | { | ||
87 | seq_cpumask(m, &irq_default_affinity); | ||
88 | seq_putc(m, '\n'); | ||
89 | return 0; | ||
90 | } | ||
91 | |||
92 | static ssize_t default_affinity_write(struct file *file, | ||
93 | const char __user *buffer, size_t count, loff_t *ppos) | ||
87 | { | 94 | { |
88 | unsigned int full_count = count, err; | ||
89 | cpumask_t new_value; | 95 | cpumask_t new_value; |
96 | int err; | ||
90 | 97 | ||
91 | err = cpumask_parse_user(buffer, count, new_value); | 98 | err = cpumask_parse_user(buffer, count, new_value); |
92 | if (err) | 99 | if (err) |
@@ -105,8 +112,21 @@ static int default_affinity_write(struct file *file, const char __user *buffer, | |||
105 | 112 | ||
106 | irq_default_affinity = new_value; | 113 | irq_default_affinity = new_value; |
107 | 114 | ||
108 | return full_count; | 115 | return count; |
109 | } | 116 | } |
117 | |||
118 | static int default_affinity_open(struct inode *inode, struct file *file) | ||
119 | { | ||
120 | return single_open(file, default_affinity_show, NULL); | ||
121 | } | ||
122 | |||
123 | static const struct file_operations default_affinity_proc_fops = { | ||
124 | .open = default_affinity_open, | ||
125 | .read = seq_read, | ||
126 | .llseek = seq_lseek, | ||
127 | .release = single_release, | ||
128 | .write = default_affinity_write, | ||
129 | }; | ||
110 | #endif | 130 | #endif |
111 | 131 | ||
112 | static int irq_spurious_read(char *page, char **start, off_t off, | 132 | static int irq_spurious_read(char *page, char **start, off_t off, |
@@ -178,16 +198,9 @@ void register_irq_proc(unsigned int irq) | |||
178 | irq_desc[irq].dir = proc_mkdir(name, root_irq_dir); | 198 | irq_desc[irq].dir = proc_mkdir(name, root_irq_dir); |
179 | 199 | ||
180 | #ifdef CONFIG_SMP | 200 | #ifdef CONFIG_SMP |
181 | { | 201 | /* create /proc/irq/<irq>/smp_affinity */ |
182 | /* create /proc/irq/<irq>/smp_affinity */ | 202 | proc_create_data("smp_affinity", 0600, irq_desc[irq].dir, |
183 | entry = create_proc_entry("smp_affinity", 0600, irq_desc[irq].dir); | 203 | &irq_affinity_proc_fops, (void *)(long)irq); |
184 | |||
185 | if (entry) { | ||
186 | entry->data = (void *)(long)irq; | ||
187 | entry->read_proc = irq_affinity_read_proc; | ||
188 | entry->write_proc = irq_affinity_write_proc; | ||
189 | } | ||
190 | } | ||
191 | #endif | 204 | #endif |
192 | 205 | ||
193 | entry = create_proc_entry("spurious", 0444, irq_desc[irq].dir); | 206 | entry = create_proc_entry("spurious", 0444, irq_desc[irq].dir); |
@@ -208,15 +221,8 @@ void unregister_handler_proc(unsigned int irq, struct irqaction *action) | |||
208 | void register_default_affinity_proc(void) | 221 | void register_default_affinity_proc(void) |
209 | { | 222 | { |
210 | #ifdef CONFIG_SMP | 223 | #ifdef CONFIG_SMP |
211 | struct proc_dir_entry *entry; | 224 | proc_create("irq/default_smp_affinity", 0600, NULL, |
212 | 225 | &default_affinity_proc_fops); | |
213 | /* create /proc/irq/default_smp_affinity */ | ||
214 | entry = create_proc_entry("default_smp_affinity", 0600, root_irq_dir); | ||
215 | if (entry) { | ||
216 | entry->data = NULL; | ||
217 | entry->read_proc = default_affinity_read; | ||
218 | entry->write_proc = default_affinity_write; | ||
219 | } | ||
220 | #endif | 226 | #endif |
221 | } | 227 | } |
222 | 228 | ||
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c index 6fc0040f3e3a..38fc10ac7541 100644 --- a/kernel/kallsyms.c +++ b/kernel/kallsyms.c | |||
@@ -176,7 +176,7 @@ static unsigned long get_symbol_pos(unsigned long addr, | |||
176 | high = kallsyms_num_syms; | 176 | high = kallsyms_num_syms; |
177 | 177 | ||
178 | while (high - low > 1) { | 178 | while (high - low > 1) { |
179 | mid = (low + high) / 2; | 179 | mid = low + (high - low) / 2; |
180 | if (kallsyms_addresses[mid] <= addr) | 180 | if (kallsyms_addresses[mid] <= addr) |
181 | low = mid; | 181 | low = mid; |
182 | else | 182 | else |
diff --git a/kernel/kexec.c b/kernel/kexec.c index 1c5fcacbcf33..aef265325cd3 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c | |||
@@ -12,7 +12,7 @@ | |||
12 | #include <linux/slab.h> | 12 | #include <linux/slab.h> |
13 | #include <linux/fs.h> | 13 | #include <linux/fs.h> |
14 | #include <linux/kexec.h> | 14 | #include <linux/kexec.h> |
15 | #include <linux/spinlock.h> | 15 | #include <linux/mutex.h> |
16 | #include <linux/list.h> | 16 | #include <linux/list.h> |
17 | #include <linux/highmem.h> | 17 | #include <linux/highmem.h> |
18 | #include <linux/syscalls.h> | 18 | #include <linux/syscalls.h> |
@@ -24,6 +24,12 @@ | |||
24 | #include <linux/utsrelease.h> | 24 | #include <linux/utsrelease.h> |
25 | #include <linux/utsname.h> | 25 | #include <linux/utsname.h> |
26 | #include <linux/numa.h> | 26 | #include <linux/numa.h> |
27 | #include <linux/suspend.h> | ||
28 | #include <linux/device.h> | ||
29 | #include <linux/freezer.h> | ||
30 | #include <linux/pm.h> | ||
31 | #include <linux/cpu.h> | ||
32 | #include <linux/console.h> | ||
27 | 33 | ||
28 | #include <asm/page.h> | 34 | #include <asm/page.h> |
29 | #include <asm/uaccess.h> | 35 | #include <asm/uaccess.h> |
@@ -71,7 +77,7 @@ int kexec_should_crash(struct task_struct *p) | |||
71 | * | 77 | * |
72 | * The code for the transition from the current kernel to the | 78 | * The code for the transition from the current kernel to the |
73 | * the new kernel is placed in the control_code_buffer, whose size | 79 | * the new kernel is placed in the control_code_buffer, whose size |
74 | * is given by KEXEC_CONTROL_CODE_SIZE. In the best case only a single | 80 | * is given by KEXEC_CONTROL_PAGE_SIZE. In the best case only a single |
75 | * page of memory is necessary, but some architectures require more. | 81 | * page of memory is necessary, but some architectures require more. |
76 | * Because this memory must be identity mapped in the transition from | 82 | * Because this memory must be identity mapped in the transition from |
77 | * virtual to physical addresses it must live in the range | 83 | * virtual to physical addresses it must live in the range |
@@ -236,12 +242,18 @@ static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry, | |||
236 | */ | 242 | */ |
237 | result = -ENOMEM; | 243 | result = -ENOMEM; |
238 | image->control_code_page = kimage_alloc_control_pages(image, | 244 | image->control_code_page = kimage_alloc_control_pages(image, |
239 | get_order(KEXEC_CONTROL_CODE_SIZE)); | 245 | get_order(KEXEC_CONTROL_PAGE_SIZE)); |
240 | if (!image->control_code_page) { | 246 | if (!image->control_code_page) { |
241 | printk(KERN_ERR "Could not allocate control_code_buffer\n"); | 247 | printk(KERN_ERR "Could not allocate control_code_buffer\n"); |
242 | goto out; | 248 | goto out; |
243 | } | 249 | } |
244 | 250 | ||
251 | image->swap_page = kimage_alloc_control_pages(image, 0); | ||
252 | if (!image->swap_page) { | ||
253 | printk(KERN_ERR "Could not allocate swap buffer\n"); | ||
254 | goto out; | ||
255 | } | ||
256 | |||
245 | result = 0; | 257 | result = 0; |
246 | out: | 258 | out: |
247 | if (result == 0) | 259 | if (result == 0) |
@@ -305,7 +317,7 @@ static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry, | |||
305 | */ | 317 | */ |
306 | result = -ENOMEM; | 318 | result = -ENOMEM; |
307 | image->control_code_page = kimage_alloc_control_pages(image, | 319 | image->control_code_page = kimage_alloc_control_pages(image, |
308 | get_order(KEXEC_CONTROL_CODE_SIZE)); | 320 | get_order(KEXEC_CONTROL_PAGE_SIZE)); |
309 | if (!image->control_code_page) { | 321 | if (!image->control_code_page) { |
310 | printk(KERN_ERR "Could not allocate control_code_buffer\n"); | 322 | printk(KERN_ERR "Could not allocate control_code_buffer\n"); |
311 | goto out; | 323 | goto out; |
@@ -589,14 +601,12 @@ static void kimage_free_extra_pages(struct kimage *image) | |||
589 | kimage_free_page_list(&image->unuseable_pages); | 601 | kimage_free_page_list(&image->unuseable_pages); |
590 | 602 | ||
591 | } | 603 | } |
592 | static int kimage_terminate(struct kimage *image) | 604 | static void kimage_terminate(struct kimage *image) |
593 | { | 605 | { |
594 | if (*image->entry != 0) | 606 | if (*image->entry != 0) |
595 | image->entry++; | 607 | image->entry++; |
596 | 608 | ||
597 | *image->entry = IND_DONE; | 609 | *image->entry = IND_DONE; |
598 | |||
599 | return 0; | ||
600 | } | 610 | } |
601 | 611 | ||
602 | #define for_each_kimage_entry(image, ptr, entry) \ | 612 | #define for_each_kimage_entry(image, ptr, entry) \ |
@@ -743,8 +753,14 @@ static struct page *kimage_alloc_page(struct kimage *image, | |||
743 | *old = addr | (*old & ~PAGE_MASK); | 753 | *old = addr | (*old & ~PAGE_MASK); |
744 | 754 | ||
745 | /* The old page I have found cannot be a | 755 | /* The old page I have found cannot be a |
746 | * destination page, so return it. | 756 | * destination page, so return it if it's |
757 | * gfp_flags honor the ones passed in. | ||
747 | */ | 758 | */ |
759 | if (!(gfp_mask & __GFP_HIGHMEM) && | ||
760 | PageHighMem(old_page)) { | ||
761 | kimage_free_pages(old_page); | ||
762 | continue; | ||
763 | } | ||
748 | addr = old_addr; | 764 | addr = old_addr; |
749 | page = old_page; | 765 | page = old_page; |
750 | break; | 766 | break; |
@@ -914,19 +930,14 @@ static int kimage_load_segment(struct kimage *image, | |||
914 | */ | 930 | */ |
915 | struct kimage *kexec_image; | 931 | struct kimage *kexec_image; |
916 | struct kimage *kexec_crash_image; | 932 | struct kimage *kexec_crash_image; |
917 | /* | 933 | |
918 | * A home grown binary mutex. | 934 | static DEFINE_MUTEX(kexec_mutex); |
919 | * Nothing can wait so this mutex is safe to use | ||
920 | * in interrupt context :) | ||
921 | */ | ||
922 | static int kexec_lock; | ||
923 | 935 | ||
924 | asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments, | 936 | asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments, |
925 | struct kexec_segment __user *segments, | 937 | struct kexec_segment __user *segments, |
926 | unsigned long flags) | 938 | unsigned long flags) |
927 | { | 939 | { |
928 | struct kimage **dest_image, *image; | 940 | struct kimage **dest_image, *image; |
929 | int locked; | ||
930 | int result; | 941 | int result; |
931 | 942 | ||
932 | /* We only trust the superuser with rebooting the system. */ | 943 | /* We only trust the superuser with rebooting the system. */ |
@@ -962,8 +973,7 @@ asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments, | |||
962 | * | 973 | * |
963 | * KISS: always take the mutex. | 974 | * KISS: always take the mutex. |
964 | */ | 975 | */ |
965 | locked = xchg(&kexec_lock, 1); | 976 | if (!mutex_trylock(&kexec_mutex)) |
966 | if (locked) | ||
967 | return -EBUSY; | 977 | return -EBUSY; |
968 | 978 | ||
969 | dest_image = &kexec_image; | 979 | dest_image = &kexec_image; |
@@ -988,6 +998,8 @@ asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments, | |||
988 | if (result) | 998 | if (result) |
989 | goto out; | 999 | goto out; |
990 | 1000 | ||
1001 | if (flags & KEXEC_PRESERVE_CONTEXT) | ||
1002 | image->preserve_context = 1; | ||
991 | result = machine_kexec_prepare(image); | 1003 | result = machine_kexec_prepare(image); |
992 | if (result) | 1004 | if (result) |
993 | goto out; | 1005 | goto out; |
@@ -997,16 +1009,13 @@ asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments, | |||
997 | if (result) | 1009 | if (result) |
998 | goto out; | 1010 | goto out; |
999 | } | 1011 | } |
1000 | result = kimage_terminate(image); | 1012 | kimage_terminate(image); |
1001 | if (result) | ||
1002 | goto out; | ||
1003 | } | 1013 | } |
1004 | /* Install the new kernel, and Uninstall the old */ | 1014 | /* Install the new kernel, and Uninstall the old */ |
1005 | image = xchg(dest_image, image); | 1015 | image = xchg(dest_image, image); |
1006 | 1016 | ||
1007 | out: | 1017 | out: |
1008 | locked = xchg(&kexec_lock, 0); /* Release the mutex */ | 1018 | mutex_unlock(&kexec_mutex); |
1009 | BUG_ON(!locked); | ||
1010 | kimage_free(image); | 1019 | kimage_free(image); |
1011 | 1020 | ||
1012 | return result; | 1021 | return result; |
@@ -1053,10 +1062,7 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry, | |||
1053 | 1062 | ||
1054 | void crash_kexec(struct pt_regs *regs) | 1063 | void crash_kexec(struct pt_regs *regs) |
1055 | { | 1064 | { |
1056 | int locked; | 1065 | /* Take the kexec_mutex here to prevent sys_kexec_load |
1057 | |||
1058 | |||
1059 | /* Take the kexec_lock here to prevent sys_kexec_load | ||
1060 | * running on one cpu from replacing the crash kernel | 1066 | * running on one cpu from replacing the crash kernel |
1061 | * we are using after a panic on a different cpu. | 1067 | * we are using after a panic on a different cpu. |
1062 | * | 1068 | * |
@@ -1064,8 +1070,7 @@ void crash_kexec(struct pt_regs *regs) | |||
1064 | * of memory the xchg(&kexec_crash_image) would be | 1070 | * of memory the xchg(&kexec_crash_image) would be |
1065 | * sufficient. But since I reuse the memory... | 1071 | * sufficient. But since I reuse the memory... |
1066 | */ | 1072 | */ |
1067 | locked = xchg(&kexec_lock, 1); | 1073 | if (mutex_trylock(&kexec_mutex)) { |
1068 | if (!locked) { | ||
1069 | if (kexec_crash_image) { | 1074 | if (kexec_crash_image) { |
1070 | struct pt_regs fixed_regs; | 1075 | struct pt_regs fixed_regs; |
1071 | crash_setup_regs(&fixed_regs, regs); | 1076 | crash_setup_regs(&fixed_regs, regs); |
@@ -1073,8 +1078,7 @@ void crash_kexec(struct pt_regs *regs) | |||
1073 | machine_crash_shutdown(&fixed_regs); | 1078 | machine_crash_shutdown(&fixed_regs); |
1074 | machine_kexec(kexec_crash_image); | 1079 | machine_kexec(kexec_crash_image); |
1075 | } | 1080 | } |
1076 | locked = xchg(&kexec_lock, 0); | 1081 | mutex_unlock(&kexec_mutex); |
1077 | BUG_ON(!locked); | ||
1078 | } | 1082 | } |
1079 | } | 1083 | } |
1080 | 1084 | ||
@@ -1415,3 +1419,79 @@ static int __init crash_save_vmcoreinfo_init(void) | |||
1415 | } | 1419 | } |
1416 | 1420 | ||
1417 | module_init(crash_save_vmcoreinfo_init) | 1421 | module_init(crash_save_vmcoreinfo_init) |
1422 | |||
1423 | /* | ||
1424 | * Move into place and start executing a preloaded standalone | ||
1425 | * executable. If nothing was preloaded return an error. | ||
1426 | */ | ||
1427 | int kernel_kexec(void) | ||
1428 | { | ||
1429 | int error = 0; | ||
1430 | |||
1431 | if (!mutex_trylock(&kexec_mutex)) | ||
1432 | return -EBUSY; | ||
1433 | if (!kexec_image) { | ||
1434 | error = -EINVAL; | ||
1435 | goto Unlock; | ||
1436 | } | ||
1437 | |||
1438 | #ifdef CONFIG_KEXEC_JUMP | ||
1439 | if (kexec_image->preserve_context) { | ||
1440 | mutex_lock(&pm_mutex); | ||
1441 | pm_prepare_console(); | ||
1442 | error = freeze_processes(); | ||
1443 | if (error) { | ||
1444 | error = -EBUSY; | ||
1445 | goto Restore_console; | ||
1446 | } | ||
1447 | suspend_console(); | ||
1448 | error = device_suspend(PMSG_FREEZE); | ||
1449 | if (error) | ||
1450 | goto Resume_console; | ||
1451 | error = disable_nonboot_cpus(); | ||
1452 | if (error) | ||
1453 | goto Resume_devices; | ||
1454 | device_pm_lock(); | ||
1455 | local_irq_disable(); | ||
1456 | /* At this point, device_suspend() has been called, | ||
1457 | * but *not* device_power_down(). We *must* | ||
1458 | * device_power_down() now. Otherwise, drivers for | ||
1459 | * some devices (e.g. interrupt controllers) become | ||
1460 | * desynchronized with the actual state of the | ||
1461 | * hardware at resume time, and evil weirdness ensues. | ||
1462 | */ | ||
1463 | error = device_power_down(PMSG_FREEZE); | ||
1464 | if (error) | ||
1465 | goto Enable_irqs; | ||
1466 | } else | ||
1467 | #endif | ||
1468 | { | ||
1469 | kernel_restart_prepare(NULL); | ||
1470 | printk(KERN_EMERG "Starting new kernel\n"); | ||
1471 | machine_shutdown(); | ||
1472 | } | ||
1473 | |||
1474 | machine_kexec(kexec_image); | ||
1475 | |||
1476 | #ifdef CONFIG_KEXEC_JUMP | ||
1477 | if (kexec_image->preserve_context) { | ||
1478 | device_power_up(PMSG_RESTORE); | ||
1479 | Enable_irqs: | ||
1480 | local_irq_enable(); | ||
1481 | device_pm_unlock(); | ||
1482 | enable_nonboot_cpus(); | ||
1483 | Resume_devices: | ||
1484 | device_resume(PMSG_RESTORE); | ||
1485 | Resume_console: | ||
1486 | resume_console(); | ||
1487 | thaw_processes(); | ||
1488 | Restore_console: | ||
1489 | pm_restore_console(); | ||
1490 | mutex_unlock(&pm_mutex); | ||
1491 | } | ||
1492 | #endif | ||
1493 | |||
1494 | Unlock: | ||
1495 | mutex_unlock(&kexec_mutex); | ||
1496 | return error; | ||
1497 | } | ||
diff --git a/kernel/kgdb.c b/kernel/kgdb.c index 3ec23c3ec97f..25d955dbb989 100644 --- a/kernel/kgdb.c +++ b/kernel/kgdb.c | |||
@@ -56,12 +56,14 @@ | |||
56 | 56 | ||
57 | static int kgdb_break_asap; | 57 | static int kgdb_break_asap; |
58 | 58 | ||
59 | #define KGDB_MAX_THREAD_QUERY 17 | ||
59 | struct kgdb_state { | 60 | struct kgdb_state { |
60 | int ex_vector; | 61 | int ex_vector; |
61 | int signo; | 62 | int signo; |
62 | int err_code; | 63 | int err_code; |
63 | int cpu; | 64 | int cpu; |
64 | int pass_exception; | 65 | int pass_exception; |
66 | unsigned long thr_query; | ||
65 | unsigned long threadid; | 67 | unsigned long threadid; |
66 | long kgdb_usethreadid; | 68 | long kgdb_usethreadid; |
67 | struct pt_regs *linux_regs; | 69 | struct pt_regs *linux_regs; |
@@ -166,13 +168,6 @@ early_param("nokgdbroundup", opt_nokgdbroundup); | |||
166 | * Weak aliases for breakpoint management, | 168 | * Weak aliases for breakpoint management, |
167 | * can be overriden by architectures when needed: | 169 | * can be overriden by architectures when needed: |
168 | */ | 170 | */ |
169 | int __weak kgdb_validate_break_address(unsigned long addr) | ||
170 | { | ||
171 | char tmp_variable[BREAK_INSTR_SIZE]; | ||
172 | |||
173 | return probe_kernel_read(tmp_variable, (char *)addr, BREAK_INSTR_SIZE); | ||
174 | } | ||
175 | |||
176 | int __weak kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr) | 171 | int __weak kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr) |
177 | { | 172 | { |
178 | int err; | 173 | int err; |
@@ -191,6 +186,25 @@ int __weak kgdb_arch_remove_breakpoint(unsigned long addr, char *bundle) | |||
191 | (char *)bundle, BREAK_INSTR_SIZE); | 186 | (char *)bundle, BREAK_INSTR_SIZE); |
192 | } | 187 | } |
193 | 188 | ||
189 | int __weak kgdb_validate_break_address(unsigned long addr) | ||
190 | { | ||
191 | char tmp_variable[BREAK_INSTR_SIZE]; | ||
192 | int err; | ||
193 | /* Validate setting the breakpoint and then removing it. In the | ||
194 | * remove fails, the kernel needs to emit a bad message because we | ||
195 | * are deep trouble not being able to put things back the way we | ||
196 | * found them. | ||
197 | */ | ||
198 | err = kgdb_arch_set_breakpoint(addr, tmp_variable); | ||
199 | if (err) | ||
200 | return err; | ||
201 | err = kgdb_arch_remove_breakpoint(addr, tmp_variable); | ||
202 | if (err) | ||
203 | printk(KERN_ERR "KGDB: Critical breakpoint error, kernel " | ||
204 | "memory destroyed at: %lx", addr); | ||
205 | return err; | ||
206 | } | ||
207 | |||
194 | unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs) | 208 | unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs) |
195 | { | 209 | { |
196 | return instruction_pointer(regs); | 210 | return instruction_pointer(regs); |
@@ -433,9 +447,14 @@ int kgdb_hex2long(char **ptr, unsigned long *long_val) | |||
433 | { | 447 | { |
434 | int hex_val; | 448 | int hex_val; |
435 | int num = 0; | 449 | int num = 0; |
450 | int negate = 0; | ||
436 | 451 | ||
437 | *long_val = 0; | 452 | *long_val = 0; |
438 | 453 | ||
454 | if (**ptr == '-') { | ||
455 | negate = 1; | ||
456 | (*ptr)++; | ||
457 | } | ||
439 | while (**ptr) { | 458 | while (**ptr) { |
440 | hex_val = hex(**ptr); | 459 | hex_val = hex(**ptr); |
441 | if (hex_val < 0) | 460 | if (hex_val < 0) |
@@ -446,6 +465,9 @@ int kgdb_hex2long(char **ptr, unsigned long *long_val) | |||
446 | (*ptr)++; | 465 | (*ptr)++; |
447 | } | 466 | } |
448 | 467 | ||
468 | if (negate) | ||
469 | *long_val = -*long_val; | ||
470 | |||
449 | return num; | 471 | return num; |
450 | } | 472 | } |
451 | 473 | ||
@@ -466,7 +488,7 @@ static int write_mem_msg(int binary) | |||
466 | if (err) | 488 | if (err) |
467 | return err; | 489 | return err; |
468 | if (CACHE_FLUSH_IS_SAFE) | 490 | if (CACHE_FLUSH_IS_SAFE) |
469 | flush_icache_range(addr, addr + length + 1); | 491 | flush_icache_range(addr, addr + length); |
470 | return 0; | 492 | return 0; |
471 | } | 493 | } |
472 | 494 | ||
@@ -515,10 +537,16 @@ static void int_to_threadref(unsigned char *id, int value) | |||
515 | static struct task_struct *getthread(struct pt_regs *regs, int tid) | 537 | static struct task_struct *getthread(struct pt_regs *regs, int tid) |
516 | { | 538 | { |
517 | /* | 539 | /* |
518 | * Non-positive TIDs are remapped idle tasks: | 540 | * Non-positive TIDs are remapped to the cpu shadow information |
519 | */ | 541 | */ |
520 | if (tid <= 0) | 542 | if (tid == 0 || tid == -1) |
521 | return idle_task(-tid); | 543 | tid = -atomic_read(&kgdb_active) - 2; |
544 | if (tid < 0) { | ||
545 | if (kgdb_info[-tid - 2].task) | ||
546 | return kgdb_info[-tid - 2].task; | ||
547 | else | ||
548 | return idle_task(-tid - 2); | ||
549 | } | ||
522 | 550 | ||
523 | /* | 551 | /* |
524 | * find_task_by_pid_ns() does not take the tasklist lock anymore | 552 | * find_task_by_pid_ns() does not take the tasklist lock anymore |
@@ -725,14 +753,15 @@ setundefined: | |||
725 | } | 753 | } |
726 | 754 | ||
727 | /* | 755 | /* |
728 | * Remap normal tasks to their real PID, idle tasks to -1 ... -NR_CPUs: | 756 | * Remap normal tasks to their real PID, |
757 | * CPU shadow threads are mapped to -CPU - 2 | ||
729 | */ | 758 | */ |
730 | static inline int shadow_pid(int realpid) | 759 | static inline int shadow_pid(int realpid) |
731 | { | 760 | { |
732 | if (realpid) | 761 | if (realpid) |
733 | return realpid; | 762 | return realpid; |
734 | 763 | ||
735 | return -1-raw_smp_processor_id(); | 764 | return -raw_smp_processor_id() - 2; |
736 | } | 765 | } |
737 | 766 | ||
738 | static char gdbmsgbuf[BUFMAX + 1]; | 767 | static char gdbmsgbuf[BUFMAX + 1]; |
@@ -826,7 +855,7 @@ static void gdb_cmd_getregs(struct kgdb_state *ks) | |||
826 | local_debuggerinfo = kgdb_info[ks->cpu].debuggerinfo; | 855 | local_debuggerinfo = kgdb_info[ks->cpu].debuggerinfo; |
827 | } else { | 856 | } else { |
828 | local_debuggerinfo = NULL; | 857 | local_debuggerinfo = NULL; |
829 | for (i = 0; i < NR_CPUS; i++) { | 858 | for_each_online_cpu(i) { |
830 | /* | 859 | /* |
831 | * Try to find the task on some other | 860 | * Try to find the task on some other |
832 | * or possibly this node if we do not | 861 | * or possibly this node if we do not |
@@ -960,10 +989,13 @@ static int gdb_cmd_reboot(struct kgdb_state *ks) | |||
960 | /* Handle the 'q' query packets */ | 989 | /* Handle the 'q' query packets */ |
961 | static void gdb_cmd_query(struct kgdb_state *ks) | 990 | static void gdb_cmd_query(struct kgdb_state *ks) |
962 | { | 991 | { |
963 | struct task_struct *thread; | 992 | struct task_struct *g; |
993 | struct task_struct *p; | ||
964 | unsigned char thref[8]; | 994 | unsigned char thref[8]; |
965 | char *ptr; | 995 | char *ptr; |
966 | int i; | 996 | int i; |
997 | int cpu; | ||
998 | int finished = 0; | ||
967 | 999 | ||
968 | switch (remcom_in_buffer[1]) { | 1000 | switch (remcom_in_buffer[1]) { |
969 | case 's': | 1001 | case 's': |
@@ -973,22 +1005,34 @@ static void gdb_cmd_query(struct kgdb_state *ks) | |||
973 | break; | 1005 | break; |
974 | } | 1006 | } |
975 | 1007 | ||
976 | if (remcom_in_buffer[1] == 'f') | 1008 | i = 0; |
977 | ks->threadid = 1; | ||
978 | |||
979 | remcom_out_buffer[0] = 'm'; | 1009 | remcom_out_buffer[0] = 'm'; |
980 | ptr = remcom_out_buffer + 1; | 1010 | ptr = remcom_out_buffer + 1; |
981 | 1011 | if (remcom_in_buffer[1] == 'f') { | |
982 | for (i = 0; i < 17; ks->threadid++) { | 1012 | /* Each cpu is a shadow thread */ |
983 | thread = getthread(ks->linux_regs, ks->threadid); | 1013 | for_each_online_cpu(cpu) { |
984 | if (thread) { | 1014 | ks->thr_query = 0; |
985 | int_to_threadref(thref, ks->threadid); | 1015 | int_to_threadref(thref, -cpu - 2); |
986 | pack_threadid(ptr, thref); | 1016 | pack_threadid(ptr, thref); |
987 | ptr += BUF_THREAD_ID_SIZE; | 1017 | ptr += BUF_THREAD_ID_SIZE; |
988 | *(ptr++) = ','; | 1018 | *(ptr++) = ','; |
989 | i++; | 1019 | i++; |
990 | } | 1020 | } |
991 | } | 1021 | } |
1022 | |||
1023 | do_each_thread(g, p) { | ||
1024 | if (i >= ks->thr_query && !finished) { | ||
1025 | int_to_threadref(thref, p->pid); | ||
1026 | pack_threadid(ptr, thref); | ||
1027 | ptr += BUF_THREAD_ID_SIZE; | ||
1028 | *(ptr++) = ','; | ||
1029 | ks->thr_query++; | ||
1030 | if (ks->thr_query % KGDB_MAX_THREAD_QUERY == 0) | ||
1031 | finished = 1; | ||
1032 | } | ||
1033 | i++; | ||
1034 | } while_each_thread(g, p); | ||
1035 | |||
992 | *(--ptr) = '\0'; | 1036 | *(--ptr) = '\0'; |
993 | break; | 1037 | break; |
994 | 1038 | ||
@@ -1011,15 +1055,15 @@ static void gdb_cmd_query(struct kgdb_state *ks) | |||
1011 | error_packet(remcom_out_buffer, -EINVAL); | 1055 | error_packet(remcom_out_buffer, -EINVAL); |
1012 | break; | 1056 | break; |
1013 | } | 1057 | } |
1014 | if (ks->threadid > 0) { | 1058 | if ((int)ks->threadid > 0) { |
1015 | kgdb_mem2hex(getthread(ks->linux_regs, | 1059 | kgdb_mem2hex(getthread(ks->linux_regs, |
1016 | ks->threadid)->comm, | 1060 | ks->threadid)->comm, |
1017 | remcom_out_buffer, 16); | 1061 | remcom_out_buffer, 16); |
1018 | } else { | 1062 | } else { |
1019 | static char tmpstr[23 + BUF_THREAD_ID_SIZE]; | 1063 | static char tmpstr[23 + BUF_THREAD_ID_SIZE]; |
1020 | 1064 | ||
1021 | sprintf(tmpstr, "Shadow task %d for pid 0", | 1065 | sprintf(tmpstr, "shadowCPU%d", |
1022 | (int)(-ks->threadid-1)); | 1066 | (int)(-ks->threadid - 2)); |
1023 | kgdb_mem2hex(tmpstr, remcom_out_buffer, strlen(tmpstr)); | 1067 | kgdb_mem2hex(tmpstr, remcom_out_buffer, strlen(tmpstr)); |
1024 | } | 1068 | } |
1025 | break; | 1069 | break; |
@@ -1418,7 +1462,7 @@ acquirelock: | |||
1418 | * Get the passive CPU lock which will hold all the non-primary | 1462 | * Get the passive CPU lock which will hold all the non-primary |
1419 | * CPU in a spin state while the debugger is active | 1463 | * CPU in a spin state while the debugger is active |
1420 | */ | 1464 | */ |
1421 | if (!kgdb_single_step || !kgdb_contthread) { | 1465 | if (!kgdb_single_step) { |
1422 | for (i = 0; i < NR_CPUS; i++) | 1466 | for (i = 0; i < NR_CPUS; i++) |
1423 | atomic_set(&passive_cpu_wait[i], 1); | 1467 | atomic_set(&passive_cpu_wait[i], 1); |
1424 | } | 1468 | } |
@@ -1431,7 +1475,7 @@ acquirelock: | |||
1431 | 1475 | ||
1432 | #ifdef CONFIG_SMP | 1476 | #ifdef CONFIG_SMP |
1433 | /* Signal the other CPUs to enter kgdb_wait() */ | 1477 | /* Signal the other CPUs to enter kgdb_wait() */ |
1434 | if ((!kgdb_single_step || !kgdb_contthread) && kgdb_do_roundup) | 1478 | if ((!kgdb_single_step) && kgdb_do_roundup) |
1435 | kgdb_roundup_cpus(flags); | 1479 | kgdb_roundup_cpus(flags); |
1436 | #endif | 1480 | #endif |
1437 | 1481 | ||
@@ -1450,7 +1494,7 @@ acquirelock: | |||
1450 | kgdb_post_primary_code(ks->linux_regs, ks->ex_vector, ks->err_code); | 1494 | kgdb_post_primary_code(ks->linux_regs, ks->ex_vector, ks->err_code); |
1451 | kgdb_deactivate_sw_breakpoints(); | 1495 | kgdb_deactivate_sw_breakpoints(); |
1452 | kgdb_single_step = 0; | 1496 | kgdb_single_step = 0; |
1453 | kgdb_contthread = NULL; | 1497 | kgdb_contthread = current; |
1454 | exception_level = 0; | 1498 | exception_level = 0; |
1455 | 1499 | ||
1456 | /* Talk to debugger with gdbserial protocol */ | 1500 | /* Talk to debugger with gdbserial protocol */ |
@@ -1464,7 +1508,7 @@ acquirelock: | |||
1464 | kgdb_info[ks->cpu].task = NULL; | 1508 | kgdb_info[ks->cpu].task = NULL; |
1465 | atomic_set(&cpu_in_kgdb[ks->cpu], 0); | 1509 | atomic_set(&cpu_in_kgdb[ks->cpu], 0); |
1466 | 1510 | ||
1467 | if (!kgdb_single_step || !kgdb_contthread) { | 1511 | if (!kgdb_single_step) { |
1468 | for (i = NR_CPUS-1; i >= 0; i--) | 1512 | for (i = NR_CPUS-1; i >= 0; i--) |
1469 | atomic_set(&passive_cpu_wait[i], 0); | 1513 | atomic_set(&passive_cpu_wait[i], 0); |
1470 | /* | 1514 | /* |
diff --git a/kernel/kmod.c b/kernel/kmod.c index 8df97d3dfda8..2456d1a0befb 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c | |||
@@ -42,7 +42,7 @@ extern int max_threads; | |||
42 | 42 | ||
43 | static struct workqueue_struct *khelper_wq; | 43 | static struct workqueue_struct *khelper_wq; |
44 | 44 | ||
45 | #ifdef CONFIG_KMOD | 45 | #ifdef CONFIG_MODULES |
46 | 46 | ||
47 | /* | 47 | /* |
48 | modprobe_path is set via /proc/sys. | 48 | modprobe_path is set via /proc/sys. |
@@ -352,16 +352,17 @@ static inline void register_pm_notifier_callback(void) {} | |||
352 | * @path: path to usermode executable | 352 | * @path: path to usermode executable |
353 | * @argv: arg vector for process | 353 | * @argv: arg vector for process |
354 | * @envp: environment for process | 354 | * @envp: environment for process |
355 | * @gfp_mask: gfp mask for memory allocation | ||
355 | * | 356 | * |
356 | * Returns either %NULL on allocation failure, or a subprocess_info | 357 | * Returns either %NULL on allocation failure, or a subprocess_info |
357 | * structure. This should be passed to call_usermodehelper_exec to | 358 | * structure. This should be passed to call_usermodehelper_exec to |
358 | * exec the process and free the structure. | 359 | * exec the process and free the structure. |
359 | */ | 360 | */ |
360 | struct subprocess_info *call_usermodehelper_setup(char *path, | 361 | struct subprocess_info *call_usermodehelper_setup(char *path, char **argv, |
361 | char **argv, char **envp) | 362 | char **envp, gfp_t gfp_mask) |
362 | { | 363 | { |
363 | struct subprocess_info *sub_info; | 364 | struct subprocess_info *sub_info; |
364 | sub_info = kzalloc(sizeof(struct subprocess_info), GFP_ATOMIC); | 365 | sub_info = kzalloc(sizeof(struct subprocess_info), gfp_mask); |
365 | if (!sub_info) | 366 | if (!sub_info) |
366 | goto out; | 367 | goto out; |
367 | 368 | ||
@@ -417,12 +418,12 @@ int call_usermodehelper_stdinpipe(struct subprocess_info *sub_info, | |||
417 | { | 418 | { |
418 | struct file *f; | 419 | struct file *f; |
419 | 420 | ||
420 | f = create_write_pipe(); | 421 | f = create_write_pipe(0); |
421 | if (IS_ERR(f)) | 422 | if (IS_ERR(f)) |
422 | return PTR_ERR(f); | 423 | return PTR_ERR(f); |
423 | *filp = f; | 424 | *filp = f; |
424 | 425 | ||
425 | f = create_read_pipe(f); | 426 | f = create_read_pipe(f, 0); |
426 | if (IS_ERR(f)) { | 427 | if (IS_ERR(f)) { |
427 | free_write_pipe(*filp); | 428 | free_write_pipe(*filp); |
428 | return PTR_ERR(f); | 429 | return PTR_ERR(f); |
@@ -494,7 +495,7 @@ int call_usermodehelper_pipe(char *path, char **argv, char **envp, | |||
494 | struct subprocess_info *sub_info; | 495 | struct subprocess_info *sub_info; |
495 | int ret; | 496 | int ret; |
496 | 497 | ||
497 | sub_info = call_usermodehelper_setup(path, argv, envp); | 498 | sub_info = call_usermodehelper_setup(path, argv, envp, GFP_KERNEL); |
498 | if (sub_info == NULL) | 499 | if (sub_info == NULL) |
499 | return -ENOMEM; | 500 | return -ENOMEM; |
500 | 501 | ||
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 1485ca8d0e00..75bc2cd9ebc6 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -62,6 +62,7 @@ | |||
62 | addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name))) | 62 | addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name))) |
63 | #endif | 63 | #endif |
64 | 64 | ||
65 | static int kprobes_initialized; | ||
65 | static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; | 66 | static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; |
66 | static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; | 67 | static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; |
67 | 68 | ||
@@ -69,8 +70,15 @@ static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; | |||
69 | static bool kprobe_enabled; | 70 | static bool kprobe_enabled; |
70 | 71 | ||
71 | DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */ | 72 | DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */ |
72 | DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */ | ||
73 | static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; | 73 | static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; |
74 | static struct { | ||
75 | spinlock_t lock ____cacheline_aligned; | ||
76 | } kretprobe_table_locks[KPROBE_TABLE_SIZE]; | ||
77 | |||
78 | static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash) | ||
79 | { | ||
80 | return &(kretprobe_table_locks[hash].lock); | ||
81 | } | ||
74 | 82 | ||
75 | /* | 83 | /* |
76 | * Normally, functions that we'd want to prohibit kprobes in, are marked | 84 | * Normally, functions that we'd want to prohibit kprobes in, are marked |
@@ -368,26 +376,53 @@ void __kprobes kprobes_inc_nmissed_count(struct kprobe *p) | |||
368 | return; | 376 | return; |
369 | } | 377 | } |
370 | 378 | ||
371 | /* Called with kretprobe_lock held */ | ||
372 | void __kprobes recycle_rp_inst(struct kretprobe_instance *ri, | 379 | void __kprobes recycle_rp_inst(struct kretprobe_instance *ri, |
373 | struct hlist_head *head) | 380 | struct hlist_head *head) |
374 | { | 381 | { |
382 | struct kretprobe *rp = ri->rp; | ||
383 | |||
375 | /* remove rp inst off the rprobe_inst_table */ | 384 | /* remove rp inst off the rprobe_inst_table */ |
376 | hlist_del(&ri->hlist); | 385 | hlist_del(&ri->hlist); |
377 | if (ri->rp) { | 386 | INIT_HLIST_NODE(&ri->hlist); |
378 | /* remove rp inst off the used list */ | 387 | if (likely(rp)) { |
379 | hlist_del(&ri->uflist); | 388 | spin_lock(&rp->lock); |
380 | /* put rp inst back onto the free list */ | 389 | hlist_add_head(&ri->hlist, &rp->free_instances); |
381 | INIT_HLIST_NODE(&ri->uflist); | 390 | spin_unlock(&rp->lock); |
382 | hlist_add_head(&ri->uflist, &ri->rp->free_instances); | ||
383 | } else | 391 | } else |
384 | /* Unregistering */ | 392 | /* Unregistering */ |
385 | hlist_add_head(&ri->hlist, head); | 393 | hlist_add_head(&ri->hlist, head); |
386 | } | 394 | } |
387 | 395 | ||
388 | struct hlist_head __kprobes *kretprobe_inst_table_head(struct task_struct *tsk) | 396 | void kretprobe_hash_lock(struct task_struct *tsk, |
397 | struct hlist_head **head, unsigned long *flags) | ||
389 | { | 398 | { |
390 | return &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)]; | 399 | unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); |
400 | spinlock_t *hlist_lock; | ||
401 | |||
402 | *head = &kretprobe_inst_table[hash]; | ||
403 | hlist_lock = kretprobe_table_lock_ptr(hash); | ||
404 | spin_lock_irqsave(hlist_lock, *flags); | ||
405 | } | ||
406 | |||
407 | void kretprobe_table_lock(unsigned long hash, unsigned long *flags) | ||
408 | { | ||
409 | spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); | ||
410 | spin_lock_irqsave(hlist_lock, *flags); | ||
411 | } | ||
412 | |||
413 | void kretprobe_hash_unlock(struct task_struct *tsk, unsigned long *flags) | ||
414 | { | ||
415 | unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); | ||
416 | spinlock_t *hlist_lock; | ||
417 | |||
418 | hlist_lock = kretprobe_table_lock_ptr(hash); | ||
419 | spin_unlock_irqrestore(hlist_lock, *flags); | ||
420 | } | ||
421 | |||
422 | void kretprobe_table_unlock(unsigned long hash, unsigned long *flags) | ||
423 | { | ||
424 | spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); | ||
425 | spin_unlock_irqrestore(hlist_lock, *flags); | ||
391 | } | 426 | } |
392 | 427 | ||
393 | /* | 428 | /* |
@@ -401,17 +436,21 @@ void __kprobes kprobe_flush_task(struct task_struct *tk) | |||
401 | struct kretprobe_instance *ri; | 436 | struct kretprobe_instance *ri; |
402 | struct hlist_head *head, empty_rp; | 437 | struct hlist_head *head, empty_rp; |
403 | struct hlist_node *node, *tmp; | 438 | struct hlist_node *node, *tmp; |
404 | unsigned long flags = 0; | 439 | unsigned long hash, flags = 0; |
405 | 440 | ||
406 | INIT_HLIST_HEAD(&empty_rp); | 441 | if (unlikely(!kprobes_initialized)) |
407 | spin_lock_irqsave(&kretprobe_lock, flags); | 442 | /* Early boot. kretprobe_table_locks not yet initialized. */ |
408 | head = kretprobe_inst_table_head(tk); | 443 | return; |
444 | |||
445 | hash = hash_ptr(tk, KPROBE_HASH_BITS); | ||
446 | head = &kretprobe_inst_table[hash]; | ||
447 | kretprobe_table_lock(hash, &flags); | ||
409 | hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { | 448 | hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { |
410 | if (ri->task == tk) | 449 | if (ri->task == tk) |
411 | recycle_rp_inst(ri, &empty_rp); | 450 | recycle_rp_inst(ri, &empty_rp); |
412 | } | 451 | } |
413 | spin_unlock_irqrestore(&kretprobe_lock, flags); | 452 | kretprobe_table_unlock(hash, &flags); |
414 | 453 | INIT_HLIST_HEAD(&empty_rp); | |
415 | hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { | 454 | hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { |
416 | hlist_del(&ri->hlist); | 455 | hlist_del(&ri->hlist); |
417 | kfree(ri); | 456 | kfree(ri); |
@@ -423,24 +462,29 @@ static inline void free_rp_inst(struct kretprobe *rp) | |||
423 | struct kretprobe_instance *ri; | 462 | struct kretprobe_instance *ri; |
424 | struct hlist_node *pos, *next; | 463 | struct hlist_node *pos, *next; |
425 | 464 | ||
426 | hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, uflist) { | 465 | hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, hlist) { |
427 | hlist_del(&ri->uflist); | 466 | hlist_del(&ri->hlist); |
428 | kfree(ri); | 467 | kfree(ri); |
429 | } | 468 | } |
430 | } | 469 | } |
431 | 470 | ||
432 | static void __kprobes cleanup_rp_inst(struct kretprobe *rp) | 471 | static void __kprobes cleanup_rp_inst(struct kretprobe *rp) |
433 | { | 472 | { |
434 | unsigned long flags; | 473 | unsigned long flags, hash; |
435 | struct kretprobe_instance *ri; | 474 | struct kretprobe_instance *ri; |
436 | struct hlist_node *pos, *next; | 475 | struct hlist_node *pos, *next; |
476 | struct hlist_head *head; | ||
477 | |||
437 | /* No race here */ | 478 | /* No race here */ |
438 | spin_lock_irqsave(&kretprobe_lock, flags); | 479 | for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) { |
439 | hlist_for_each_entry_safe(ri, pos, next, &rp->used_instances, uflist) { | 480 | kretprobe_table_lock(hash, &flags); |
440 | ri->rp = NULL; | 481 | head = &kretprobe_inst_table[hash]; |
441 | hlist_del(&ri->uflist); | 482 | hlist_for_each_entry_safe(ri, pos, next, head, hlist) { |
483 | if (ri->rp == rp) | ||
484 | ri->rp = NULL; | ||
485 | } | ||
486 | kretprobe_table_unlock(hash, &flags); | ||
442 | } | 487 | } |
443 | spin_unlock_irqrestore(&kretprobe_lock, flags); | ||
444 | free_rp_inst(rp); | 488 | free_rp_inst(rp); |
445 | } | 489 | } |
446 | 490 | ||
@@ -831,32 +875,37 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p, | |||
831 | struct pt_regs *regs) | 875 | struct pt_regs *regs) |
832 | { | 876 | { |
833 | struct kretprobe *rp = container_of(p, struct kretprobe, kp); | 877 | struct kretprobe *rp = container_of(p, struct kretprobe, kp); |
834 | unsigned long flags = 0; | 878 | unsigned long hash, flags = 0; |
879 | struct kretprobe_instance *ri; | ||
835 | 880 | ||
836 | /*TODO: consider to only swap the RA after the last pre_handler fired */ | 881 | /*TODO: consider to only swap the RA after the last pre_handler fired */ |
837 | spin_lock_irqsave(&kretprobe_lock, flags); | 882 | hash = hash_ptr(current, KPROBE_HASH_BITS); |
883 | spin_lock_irqsave(&rp->lock, flags); | ||
838 | if (!hlist_empty(&rp->free_instances)) { | 884 | if (!hlist_empty(&rp->free_instances)) { |
839 | struct kretprobe_instance *ri; | ||
840 | |||
841 | ri = hlist_entry(rp->free_instances.first, | 885 | ri = hlist_entry(rp->free_instances.first, |
842 | struct kretprobe_instance, uflist); | 886 | struct kretprobe_instance, hlist); |
887 | hlist_del(&ri->hlist); | ||
888 | spin_unlock_irqrestore(&rp->lock, flags); | ||
889 | |||
843 | ri->rp = rp; | 890 | ri->rp = rp; |
844 | ri->task = current; | 891 | ri->task = current; |
845 | 892 | ||
846 | if (rp->entry_handler && rp->entry_handler(ri, regs)) { | 893 | if (rp->entry_handler && rp->entry_handler(ri, regs)) { |
847 | spin_unlock_irqrestore(&kretprobe_lock, flags); | 894 | spin_unlock_irqrestore(&rp->lock, flags); |
848 | return 0; | 895 | return 0; |
849 | } | 896 | } |
850 | 897 | ||
851 | arch_prepare_kretprobe(ri, regs); | 898 | arch_prepare_kretprobe(ri, regs); |
852 | 899 | ||
853 | /* XXX(hch): why is there no hlist_move_head? */ | 900 | /* XXX(hch): why is there no hlist_move_head? */ |
854 | hlist_del(&ri->uflist); | 901 | INIT_HLIST_NODE(&ri->hlist); |
855 | hlist_add_head(&ri->uflist, &ri->rp->used_instances); | 902 | kretprobe_table_lock(hash, &flags); |
856 | hlist_add_head(&ri->hlist, kretprobe_inst_table_head(ri->task)); | 903 | hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]); |
857 | } else | 904 | kretprobe_table_unlock(hash, &flags); |
905 | } else { | ||
858 | rp->nmissed++; | 906 | rp->nmissed++; |
859 | spin_unlock_irqrestore(&kretprobe_lock, flags); | 907 | spin_unlock_irqrestore(&rp->lock, flags); |
908 | } | ||
860 | return 0; | 909 | return 0; |
861 | } | 910 | } |
862 | 911 | ||
@@ -892,7 +941,7 @@ static int __kprobes __register_kretprobe(struct kretprobe *rp, | |||
892 | rp->maxactive = NR_CPUS; | 941 | rp->maxactive = NR_CPUS; |
893 | #endif | 942 | #endif |
894 | } | 943 | } |
895 | INIT_HLIST_HEAD(&rp->used_instances); | 944 | spin_lock_init(&rp->lock); |
896 | INIT_HLIST_HEAD(&rp->free_instances); | 945 | INIT_HLIST_HEAD(&rp->free_instances); |
897 | for (i = 0; i < rp->maxactive; i++) { | 946 | for (i = 0; i < rp->maxactive; i++) { |
898 | inst = kmalloc(sizeof(struct kretprobe_instance) + | 947 | inst = kmalloc(sizeof(struct kretprobe_instance) + |
@@ -901,8 +950,8 @@ static int __kprobes __register_kretprobe(struct kretprobe *rp, | |||
901 | free_rp_inst(rp); | 950 | free_rp_inst(rp); |
902 | return -ENOMEM; | 951 | return -ENOMEM; |
903 | } | 952 | } |
904 | INIT_HLIST_NODE(&inst->uflist); | 953 | INIT_HLIST_NODE(&inst->hlist); |
905 | hlist_add_head(&inst->uflist, &rp->free_instances); | 954 | hlist_add_head(&inst->hlist, &rp->free_instances); |
906 | } | 955 | } |
907 | 956 | ||
908 | rp->nmissed = 0; | 957 | rp->nmissed = 0; |
@@ -1009,6 +1058,7 @@ static int __init init_kprobes(void) | |||
1009 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { | 1058 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { |
1010 | INIT_HLIST_HEAD(&kprobe_table[i]); | 1059 | INIT_HLIST_HEAD(&kprobe_table[i]); |
1011 | INIT_HLIST_HEAD(&kretprobe_inst_table[i]); | 1060 | INIT_HLIST_HEAD(&kretprobe_inst_table[i]); |
1061 | spin_lock_init(&(kretprobe_table_locks[i].lock)); | ||
1012 | } | 1062 | } |
1013 | 1063 | ||
1014 | /* | 1064 | /* |
@@ -1050,6 +1100,7 @@ static int __init init_kprobes(void) | |||
1050 | err = arch_init_kprobes(); | 1100 | err = arch_init_kprobes(); |
1051 | if (!err) | 1101 | if (!err) |
1052 | err = register_die_notifier(&kprobe_exceptions_nb); | 1102 | err = register_die_notifier(&kprobe_exceptions_nb); |
1103 | kprobes_initialized = (err == 0); | ||
1053 | 1104 | ||
1054 | if (!err) | 1105 | if (!err) |
1055 | init_test_probes(); | 1106 | init_test_probes(); |
@@ -1286,13 +1337,8 @@ EXPORT_SYMBOL_GPL(register_jprobe); | |||
1286 | EXPORT_SYMBOL_GPL(unregister_jprobe); | 1337 | EXPORT_SYMBOL_GPL(unregister_jprobe); |
1287 | EXPORT_SYMBOL_GPL(register_jprobes); | 1338 | EXPORT_SYMBOL_GPL(register_jprobes); |
1288 | EXPORT_SYMBOL_GPL(unregister_jprobes); | 1339 | EXPORT_SYMBOL_GPL(unregister_jprobes); |
1289 | #ifdef CONFIG_KPROBES | ||
1290 | EXPORT_SYMBOL_GPL(jprobe_return); | 1340 | EXPORT_SYMBOL_GPL(jprobe_return); |
1291 | #endif | ||
1292 | |||
1293 | #ifdef CONFIG_KPROBES | ||
1294 | EXPORT_SYMBOL_GPL(register_kretprobe); | 1341 | EXPORT_SYMBOL_GPL(register_kretprobe); |
1295 | EXPORT_SYMBOL_GPL(unregister_kretprobe); | 1342 | EXPORT_SYMBOL_GPL(unregister_kretprobe); |
1296 | EXPORT_SYMBOL_GPL(register_kretprobes); | 1343 | EXPORT_SYMBOL_GPL(register_kretprobes); |
1297 | EXPORT_SYMBOL_GPL(unregister_kretprobes); | 1344 | EXPORT_SYMBOL_GPL(unregister_kretprobes); |
1298 | #endif | ||
diff --git a/kernel/kthread.c b/kernel/kthread.c index ac3fb7326641..96cff2f8710b 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c | |||
@@ -106,7 +106,7 @@ static void create_kthread(struct kthread_create_info *create) | |||
106 | */ | 106 | */ |
107 | sched_setscheduler(create->result, SCHED_NORMAL, ¶m); | 107 | sched_setscheduler(create->result, SCHED_NORMAL, ¶m); |
108 | set_user_nice(create->result, KTHREAD_NICE_LEVEL); | 108 | set_user_nice(create->result, KTHREAD_NICE_LEVEL); |
109 | set_cpus_allowed(create->result, CPU_MASK_ALL); | 109 | set_cpus_allowed_ptr(create->result, CPU_MASK_ALL_PTR); |
110 | } | 110 | } |
111 | complete(&create->done); | 111 | complete(&create->done); |
112 | } | 112 | } |
@@ -176,7 +176,7 @@ void kthread_bind(struct task_struct *k, unsigned int cpu) | |||
176 | return; | 176 | return; |
177 | } | 177 | } |
178 | /* Must have done schedule() in kthread() before we set_task_cpu */ | 178 | /* Must have done schedule() in kthread() before we set_task_cpu */ |
179 | wait_task_inactive(k); | 179 | wait_task_inactive(k, 0); |
180 | set_task_cpu(k, cpu); | 180 | set_task_cpu(k, cpu); |
181 | k->cpus_allowed = cpumask_of_cpu(cpu); | 181 | k->cpus_allowed = cpumask_of_cpu(cpu); |
182 | k->rt.nr_cpus_allowed = 1; | 182 | k->rt.nr_cpus_allowed = 1; |
@@ -233,7 +233,7 @@ int kthreadd(void *unused) | |||
233 | set_task_comm(tsk, "kthreadd"); | 233 | set_task_comm(tsk, "kthreadd"); |
234 | ignore_signals(tsk); | 234 | ignore_signals(tsk); |
235 | set_user_nice(tsk, KTHREAD_NICE_LEVEL); | 235 | set_user_nice(tsk, KTHREAD_NICE_LEVEL); |
236 | set_cpus_allowed(tsk, CPU_MASK_ALL); | 236 | set_cpus_allowed_ptr(tsk, CPU_MASK_ALL_PTR); |
237 | 237 | ||
238 | current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG; | 238 | current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG; |
239 | 239 | ||
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index d38a64362973..dbda475b13bd 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -124,6 +124,15 @@ static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES]; | |||
124 | unsigned long nr_lock_classes; | 124 | unsigned long nr_lock_classes; |
125 | static struct lock_class lock_classes[MAX_LOCKDEP_KEYS]; | 125 | static struct lock_class lock_classes[MAX_LOCKDEP_KEYS]; |
126 | 126 | ||
127 | static inline struct lock_class *hlock_class(struct held_lock *hlock) | ||
128 | { | ||
129 | if (!hlock->class_idx) { | ||
130 | DEBUG_LOCKS_WARN_ON(1); | ||
131 | return NULL; | ||
132 | } | ||
133 | return lock_classes + hlock->class_idx - 1; | ||
134 | } | ||
135 | |||
127 | #ifdef CONFIG_LOCK_STAT | 136 | #ifdef CONFIG_LOCK_STAT |
128 | static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats); | 137 | static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats); |
129 | 138 | ||
@@ -222,7 +231,7 @@ static void lock_release_holdtime(struct held_lock *hlock) | |||
222 | 231 | ||
223 | holdtime = sched_clock() - hlock->holdtime_stamp; | 232 | holdtime = sched_clock() - hlock->holdtime_stamp; |
224 | 233 | ||
225 | stats = get_lock_stats(hlock->class); | 234 | stats = get_lock_stats(hlock_class(hlock)); |
226 | if (hlock->read) | 235 | if (hlock->read) |
227 | lock_time_inc(&stats->read_holdtime, holdtime); | 236 | lock_time_inc(&stats->read_holdtime, holdtime); |
228 | else | 237 | else |
@@ -372,6 +381,19 @@ unsigned int nr_process_chains; | |||
372 | unsigned int max_lockdep_depth; | 381 | unsigned int max_lockdep_depth; |
373 | unsigned int max_recursion_depth; | 382 | unsigned int max_recursion_depth; |
374 | 383 | ||
384 | static unsigned int lockdep_dependency_gen_id; | ||
385 | |||
386 | static bool lockdep_dependency_visit(struct lock_class *source, | ||
387 | unsigned int depth) | ||
388 | { | ||
389 | if (!depth) | ||
390 | lockdep_dependency_gen_id++; | ||
391 | if (source->dep_gen_id == lockdep_dependency_gen_id) | ||
392 | return true; | ||
393 | source->dep_gen_id = lockdep_dependency_gen_id; | ||
394 | return false; | ||
395 | } | ||
396 | |||
375 | #ifdef CONFIG_DEBUG_LOCKDEP | 397 | #ifdef CONFIG_DEBUG_LOCKDEP |
376 | /* | 398 | /* |
377 | * We cannot printk in early bootup code. Not even early_printk() | 399 | * We cannot printk in early bootup code. Not even early_printk() |
@@ -505,7 +527,7 @@ static void print_lockdep_cache(struct lockdep_map *lock) | |||
505 | 527 | ||
506 | static void print_lock(struct held_lock *hlock) | 528 | static void print_lock(struct held_lock *hlock) |
507 | { | 529 | { |
508 | print_lock_name(hlock->class); | 530 | print_lock_name(hlock_class(hlock)); |
509 | printk(", at: "); | 531 | printk(", at: "); |
510 | print_ip_sym(hlock->acquire_ip); | 532 | print_ip_sym(hlock->acquire_ip); |
511 | } | 533 | } |
@@ -558,6 +580,9 @@ static void print_lock_dependencies(struct lock_class *class, int depth) | |||
558 | { | 580 | { |
559 | struct lock_list *entry; | 581 | struct lock_list *entry; |
560 | 582 | ||
583 | if (lockdep_dependency_visit(class, depth)) | ||
584 | return; | ||
585 | |||
561 | if (DEBUG_LOCKS_WARN_ON(depth >= 20)) | 586 | if (DEBUG_LOCKS_WARN_ON(depth >= 20)) |
562 | return; | 587 | return; |
563 | 588 | ||
@@ -850,11 +875,11 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this, | |||
850 | if (!entry) | 875 | if (!entry) |
851 | return 0; | 876 | return 0; |
852 | 877 | ||
853 | entry->class = this; | ||
854 | entry->distance = distance; | ||
855 | if (!save_trace(&entry->trace)) | 878 | if (!save_trace(&entry->trace)) |
856 | return 0; | 879 | return 0; |
857 | 880 | ||
881 | entry->class = this; | ||
882 | entry->distance = distance; | ||
858 | /* | 883 | /* |
859 | * Since we never remove from the dependency list, the list can | 884 | * Since we never remove from the dependency list, the list can |
860 | * be walked lockless by other CPUs, it's only allocation | 885 | * be walked lockless by other CPUs, it's only allocation |
@@ -932,7 +957,7 @@ static noinline int print_circular_bug_tail(void) | |||
932 | if (debug_locks_silent) | 957 | if (debug_locks_silent) |
933 | return 0; | 958 | return 0; |
934 | 959 | ||
935 | this.class = check_source->class; | 960 | this.class = hlock_class(check_source); |
936 | if (!save_trace(&this.trace)) | 961 | if (!save_trace(&this.trace)) |
937 | return 0; | 962 | return 0; |
938 | 963 | ||
@@ -959,6 +984,67 @@ static int noinline print_infinite_recursion_bug(void) | |||
959 | return 0; | 984 | return 0; |
960 | } | 985 | } |
961 | 986 | ||
987 | unsigned long __lockdep_count_forward_deps(struct lock_class *class, | ||
988 | unsigned int depth) | ||
989 | { | ||
990 | struct lock_list *entry; | ||
991 | unsigned long ret = 1; | ||
992 | |||
993 | if (lockdep_dependency_visit(class, depth)) | ||
994 | return 0; | ||
995 | |||
996 | /* | ||
997 | * Recurse this class's dependency list: | ||
998 | */ | ||
999 | list_for_each_entry(entry, &class->locks_after, entry) | ||
1000 | ret += __lockdep_count_forward_deps(entry->class, depth + 1); | ||
1001 | |||
1002 | return ret; | ||
1003 | } | ||
1004 | |||
1005 | unsigned long lockdep_count_forward_deps(struct lock_class *class) | ||
1006 | { | ||
1007 | unsigned long ret, flags; | ||
1008 | |||
1009 | local_irq_save(flags); | ||
1010 | __raw_spin_lock(&lockdep_lock); | ||
1011 | ret = __lockdep_count_forward_deps(class, 0); | ||
1012 | __raw_spin_unlock(&lockdep_lock); | ||
1013 | local_irq_restore(flags); | ||
1014 | |||
1015 | return ret; | ||
1016 | } | ||
1017 | |||
1018 | unsigned long __lockdep_count_backward_deps(struct lock_class *class, | ||
1019 | unsigned int depth) | ||
1020 | { | ||
1021 | struct lock_list *entry; | ||
1022 | unsigned long ret = 1; | ||
1023 | |||
1024 | if (lockdep_dependency_visit(class, depth)) | ||
1025 | return 0; | ||
1026 | /* | ||
1027 | * Recurse this class's dependency list: | ||
1028 | */ | ||
1029 | list_for_each_entry(entry, &class->locks_before, entry) | ||
1030 | ret += __lockdep_count_backward_deps(entry->class, depth + 1); | ||
1031 | |||
1032 | return ret; | ||
1033 | } | ||
1034 | |||
1035 | unsigned long lockdep_count_backward_deps(struct lock_class *class) | ||
1036 | { | ||
1037 | unsigned long ret, flags; | ||
1038 | |||
1039 | local_irq_save(flags); | ||
1040 | __raw_spin_lock(&lockdep_lock); | ||
1041 | ret = __lockdep_count_backward_deps(class, 0); | ||
1042 | __raw_spin_unlock(&lockdep_lock); | ||
1043 | local_irq_restore(flags); | ||
1044 | |||
1045 | return ret; | ||
1046 | } | ||
1047 | |||
962 | /* | 1048 | /* |
963 | * Prove that the dependency graph starting at <entry> can not | 1049 | * Prove that the dependency graph starting at <entry> can not |
964 | * lead to <target>. Print an error and return 0 if it does. | 1050 | * lead to <target>. Print an error and return 0 if it does. |
@@ -968,6 +1054,9 @@ check_noncircular(struct lock_class *source, unsigned int depth) | |||
968 | { | 1054 | { |
969 | struct lock_list *entry; | 1055 | struct lock_list *entry; |
970 | 1056 | ||
1057 | if (lockdep_dependency_visit(source, depth)) | ||
1058 | return 1; | ||
1059 | |||
971 | debug_atomic_inc(&nr_cyclic_check_recursions); | 1060 | debug_atomic_inc(&nr_cyclic_check_recursions); |
972 | if (depth > max_recursion_depth) | 1061 | if (depth > max_recursion_depth) |
973 | max_recursion_depth = depth; | 1062 | max_recursion_depth = depth; |
@@ -977,7 +1066,7 @@ check_noncircular(struct lock_class *source, unsigned int depth) | |||
977 | * Check this lock's dependency list: | 1066 | * Check this lock's dependency list: |
978 | */ | 1067 | */ |
979 | list_for_each_entry(entry, &source->locks_after, entry) { | 1068 | list_for_each_entry(entry, &source->locks_after, entry) { |
980 | if (entry->class == check_target->class) | 1069 | if (entry->class == hlock_class(check_target)) |
981 | return print_circular_bug_header(entry, depth+1); | 1070 | return print_circular_bug_header(entry, depth+1); |
982 | debug_atomic_inc(&nr_cyclic_checks); | 1071 | debug_atomic_inc(&nr_cyclic_checks); |
983 | if (!check_noncircular(entry->class, depth+1)) | 1072 | if (!check_noncircular(entry->class, depth+1)) |
@@ -1011,6 +1100,9 @@ find_usage_forwards(struct lock_class *source, unsigned int depth) | |||
1011 | struct lock_list *entry; | 1100 | struct lock_list *entry; |
1012 | int ret; | 1101 | int ret; |
1013 | 1102 | ||
1103 | if (lockdep_dependency_visit(source, depth)) | ||
1104 | return 1; | ||
1105 | |||
1014 | if (depth > max_recursion_depth) | 1106 | if (depth > max_recursion_depth) |
1015 | max_recursion_depth = depth; | 1107 | max_recursion_depth = depth; |
1016 | if (depth >= RECURSION_LIMIT) | 1108 | if (depth >= RECURSION_LIMIT) |
@@ -1050,6 +1142,9 @@ find_usage_backwards(struct lock_class *source, unsigned int depth) | |||
1050 | struct lock_list *entry; | 1142 | struct lock_list *entry; |
1051 | int ret; | 1143 | int ret; |
1052 | 1144 | ||
1145 | if (lockdep_dependency_visit(source, depth)) | ||
1146 | return 1; | ||
1147 | |||
1053 | if (!__raw_spin_is_locked(&lockdep_lock)) | 1148 | if (!__raw_spin_is_locked(&lockdep_lock)) |
1054 | return DEBUG_LOCKS_WARN_ON(1); | 1149 | return DEBUG_LOCKS_WARN_ON(1); |
1055 | 1150 | ||
@@ -1064,6 +1159,11 @@ find_usage_backwards(struct lock_class *source, unsigned int depth) | |||
1064 | return 2; | 1159 | return 2; |
1065 | } | 1160 | } |
1066 | 1161 | ||
1162 | if (!source && debug_locks_off_graph_unlock()) { | ||
1163 | WARN_ON(1); | ||
1164 | return 0; | ||
1165 | } | ||
1166 | |||
1067 | /* | 1167 | /* |
1068 | * Check this lock's dependency list: | 1168 | * Check this lock's dependency list: |
1069 | */ | 1169 | */ |
@@ -1103,9 +1203,9 @@ print_bad_irq_dependency(struct task_struct *curr, | |||
1103 | printk("\nand this task is already holding:\n"); | 1203 | printk("\nand this task is already holding:\n"); |
1104 | print_lock(prev); | 1204 | print_lock(prev); |
1105 | printk("which would create a new lock dependency:\n"); | 1205 | printk("which would create a new lock dependency:\n"); |
1106 | print_lock_name(prev->class); | 1206 | print_lock_name(hlock_class(prev)); |
1107 | printk(" ->"); | 1207 | printk(" ->"); |
1108 | print_lock_name(next->class); | 1208 | print_lock_name(hlock_class(next)); |
1109 | printk("\n"); | 1209 | printk("\n"); |
1110 | 1210 | ||
1111 | printk("\nbut this new dependency connects a %s-irq-safe lock:\n", | 1211 | printk("\nbut this new dependency connects a %s-irq-safe lock:\n", |
@@ -1146,12 +1246,12 @@ check_usage(struct task_struct *curr, struct held_lock *prev, | |||
1146 | 1246 | ||
1147 | find_usage_bit = bit_backwards; | 1247 | find_usage_bit = bit_backwards; |
1148 | /* fills in <backwards_match> */ | 1248 | /* fills in <backwards_match> */ |
1149 | ret = find_usage_backwards(prev->class, 0); | 1249 | ret = find_usage_backwards(hlock_class(prev), 0); |
1150 | if (!ret || ret == 1) | 1250 | if (!ret || ret == 1) |
1151 | return ret; | 1251 | return ret; |
1152 | 1252 | ||
1153 | find_usage_bit = bit_forwards; | 1253 | find_usage_bit = bit_forwards; |
1154 | ret = find_usage_forwards(next->class, 0); | 1254 | ret = find_usage_forwards(hlock_class(next), 0); |
1155 | if (!ret || ret == 1) | 1255 | if (!ret || ret == 1) |
1156 | return ret; | 1256 | return ret; |
1157 | /* ret == 2 */ | 1257 | /* ret == 2 */ |
@@ -1272,18 +1372,32 @@ check_deadlock(struct task_struct *curr, struct held_lock *next, | |||
1272 | struct lockdep_map *next_instance, int read) | 1372 | struct lockdep_map *next_instance, int read) |
1273 | { | 1373 | { |
1274 | struct held_lock *prev; | 1374 | struct held_lock *prev; |
1375 | struct held_lock *nest = NULL; | ||
1275 | int i; | 1376 | int i; |
1276 | 1377 | ||
1277 | for (i = 0; i < curr->lockdep_depth; i++) { | 1378 | for (i = 0; i < curr->lockdep_depth; i++) { |
1278 | prev = curr->held_locks + i; | 1379 | prev = curr->held_locks + i; |
1279 | if (prev->class != next->class) | 1380 | |
1381 | if (prev->instance == next->nest_lock) | ||
1382 | nest = prev; | ||
1383 | |||
1384 | if (hlock_class(prev) != hlock_class(next)) | ||
1280 | continue; | 1385 | continue; |
1386 | |||
1281 | /* | 1387 | /* |
1282 | * Allow read-after-read recursion of the same | 1388 | * Allow read-after-read recursion of the same |
1283 | * lock class (i.e. read_lock(lock)+read_lock(lock)): | 1389 | * lock class (i.e. read_lock(lock)+read_lock(lock)): |
1284 | */ | 1390 | */ |
1285 | if ((read == 2) && prev->read) | 1391 | if ((read == 2) && prev->read) |
1286 | return 2; | 1392 | return 2; |
1393 | |||
1394 | /* | ||
1395 | * We're holding the nest_lock, which serializes this lock's | ||
1396 | * nesting behaviour. | ||
1397 | */ | ||
1398 | if (nest) | ||
1399 | return 2; | ||
1400 | |||
1287 | return print_deadlock_bug(curr, prev, next); | 1401 | return print_deadlock_bug(curr, prev, next); |
1288 | } | 1402 | } |
1289 | return 1; | 1403 | return 1; |
@@ -1329,7 +1443,7 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev, | |||
1329 | */ | 1443 | */ |
1330 | check_source = next; | 1444 | check_source = next; |
1331 | check_target = prev; | 1445 | check_target = prev; |
1332 | if (!(check_noncircular(next->class, 0))) | 1446 | if (!(check_noncircular(hlock_class(next), 0))) |
1333 | return print_circular_bug_tail(); | 1447 | return print_circular_bug_tail(); |
1334 | 1448 | ||
1335 | if (!check_prev_add_irq(curr, prev, next)) | 1449 | if (!check_prev_add_irq(curr, prev, next)) |
@@ -1353,8 +1467,8 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev, | |||
1353 | * chains - the second one will be new, but L1 already has | 1467 | * chains - the second one will be new, but L1 already has |
1354 | * L2 added to its dependency list, due to the first chain.) | 1468 | * L2 added to its dependency list, due to the first chain.) |
1355 | */ | 1469 | */ |
1356 | list_for_each_entry(entry, &prev->class->locks_after, entry) { | 1470 | list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) { |
1357 | if (entry->class == next->class) { | 1471 | if (entry->class == hlock_class(next)) { |
1358 | if (distance == 1) | 1472 | if (distance == 1) |
1359 | entry->distance = 1; | 1473 | entry->distance = 1; |
1360 | return 2; | 1474 | return 2; |
@@ -1365,26 +1479,28 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev, | |||
1365 | * Ok, all validations passed, add the new lock | 1479 | * Ok, all validations passed, add the new lock |
1366 | * to the previous lock's dependency list: | 1480 | * to the previous lock's dependency list: |
1367 | */ | 1481 | */ |
1368 | ret = add_lock_to_list(prev->class, next->class, | 1482 | ret = add_lock_to_list(hlock_class(prev), hlock_class(next), |
1369 | &prev->class->locks_after, next->acquire_ip, distance); | 1483 | &hlock_class(prev)->locks_after, |
1484 | next->acquire_ip, distance); | ||
1370 | 1485 | ||
1371 | if (!ret) | 1486 | if (!ret) |
1372 | return 0; | 1487 | return 0; |
1373 | 1488 | ||
1374 | ret = add_lock_to_list(next->class, prev->class, | 1489 | ret = add_lock_to_list(hlock_class(next), hlock_class(prev), |
1375 | &next->class->locks_before, next->acquire_ip, distance); | 1490 | &hlock_class(next)->locks_before, |
1491 | next->acquire_ip, distance); | ||
1376 | if (!ret) | 1492 | if (!ret) |
1377 | return 0; | 1493 | return 0; |
1378 | 1494 | ||
1379 | /* | 1495 | /* |
1380 | * Debugging printouts: | 1496 | * Debugging printouts: |
1381 | */ | 1497 | */ |
1382 | if (verbose(prev->class) || verbose(next->class)) { | 1498 | if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) { |
1383 | graph_unlock(); | 1499 | graph_unlock(); |
1384 | printk("\n new dependency: "); | 1500 | printk("\n new dependency: "); |
1385 | print_lock_name(prev->class); | 1501 | print_lock_name(hlock_class(prev)); |
1386 | printk(" => "); | 1502 | printk(" => "); |
1387 | print_lock_name(next->class); | 1503 | print_lock_name(hlock_class(next)); |
1388 | printk("\n"); | 1504 | printk("\n"); |
1389 | dump_stack(); | 1505 | dump_stack(); |
1390 | return graph_lock(); | 1506 | return graph_lock(); |
@@ -1481,7 +1597,7 @@ static inline int lookup_chain_cache(struct task_struct *curr, | |||
1481 | struct held_lock *hlock, | 1597 | struct held_lock *hlock, |
1482 | u64 chain_key) | 1598 | u64 chain_key) |
1483 | { | 1599 | { |
1484 | struct lock_class *class = hlock->class; | 1600 | struct lock_class *class = hlock_class(hlock); |
1485 | struct list_head *hash_head = chainhashentry(chain_key); | 1601 | struct list_head *hash_head = chainhashentry(chain_key); |
1486 | struct lock_chain *chain; | 1602 | struct lock_chain *chain; |
1487 | struct held_lock *hlock_curr, *hlock_next; | 1603 | struct held_lock *hlock_curr, *hlock_next; |
@@ -1554,7 +1670,7 @@ cache_hit: | |||
1554 | if (likely(cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) { | 1670 | if (likely(cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) { |
1555 | chain->base = cn; | 1671 | chain->base = cn; |
1556 | for (j = 0; j < chain->depth - 1; j++, i++) { | 1672 | for (j = 0; j < chain->depth - 1; j++, i++) { |
1557 | int lock_id = curr->held_locks[i].class - lock_classes; | 1673 | int lock_id = curr->held_locks[i].class_idx - 1; |
1558 | chain_hlocks[chain->base + j] = lock_id; | 1674 | chain_hlocks[chain->base + j] = lock_id; |
1559 | } | 1675 | } |
1560 | chain_hlocks[chain->base + j] = class - lock_classes; | 1676 | chain_hlocks[chain->base + j] = class - lock_classes; |
@@ -1643,14 +1759,13 @@ static void check_chain_key(struct task_struct *curr) | |||
1643 | hlock = curr->held_locks + i; | 1759 | hlock = curr->held_locks + i; |
1644 | if (chain_key != hlock->prev_chain_key) { | 1760 | if (chain_key != hlock->prev_chain_key) { |
1645 | debug_locks_off(); | 1761 | debug_locks_off(); |
1646 | printk("hm#1, depth: %u [%u], %016Lx != %016Lx\n", | 1762 | WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n", |
1647 | curr->lockdep_depth, i, | 1763 | curr->lockdep_depth, i, |
1648 | (unsigned long long)chain_key, | 1764 | (unsigned long long)chain_key, |
1649 | (unsigned long long)hlock->prev_chain_key); | 1765 | (unsigned long long)hlock->prev_chain_key); |
1650 | WARN_ON(1); | ||
1651 | return; | 1766 | return; |
1652 | } | 1767 | } |
1653 | id = hlock->class - lock_classes; | 1768 | id = hlock->class_idx - 1; |
1654 | if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS)) | 1769 | if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS)) |
1655 | return; | 1770 | return; |
1656 | 1771 | ||
@@ -1662,11 +1777,10 @@ static void check_chain_key(struct task_struct *curr) | |||
1662 | } | 1777 | } |
1663 | if (chain_key != curr->curr_chain_key) { | 1778 | if (chain_key != curr->curr_chain_key) { |
1664 | debug_locks_off(); | 1779 | debug_locks_off(); |
1665 | printk("hm#2, depth: %u [%u], %016Lx != %016Lx\n", | 1780 | WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n", |
1666 | curr->lockdep_depth, i, | 1781 | curr->lockdep_depth, i, |
1667 | (unsigned long long)chain_key, | 1782 | (unsigned long long)chain_key, |
1668 | (unsigned long long)curr->curr_chain_key); | 1783 | (unsigned long long)curr->curr_chain_key); |
1669 | WARN_ON(1); | ||
1670 | } | 1784 | } |
1671 | #endif | 1785 | #endif |
1672 | } | 1786 | } |
@@ -1695,7 +1809,7 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this, | |||
1695 | print_lock(this); | 1809 | print_lock(this); |
1696 | 1810 | ||
1697 | printk("{%s} state was registered at:\n", usage_str[prev_bit]); | 1811 | printk("{%s} state was registered at:\n", usage_str[prev_bit]); |
1698 | print_stack_trace(this->class->usage_traces + prev_bit, 1); | 1812 | print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1); |
1699 | 1813 | ||
1700 | print_irqtrace_events(curr); | 1814 | print_irqtrace_events(curr); |
1701 | printk("\nother info that might help us debug this:\n"); | 1815 | printk("\nother info that might help us debug this:\n"); |
@@ -1714,7 +1828,7 @@ static inline int | |||
1714 | valid_state(struct task_struct *curr, struct held_lock *this, | 1828 | valid_state(struct task_struct *curr, struct held_lock *this, |
1715 | enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit) | 1829 | enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit) |
1716 | { | 1830 | { |
1717 | if (unlikely(this->class->usage_mask & (1 << bad_bit))) | 1831 | if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit))) |
1718 | return print_usage_bug(curr, this, bad_bit, new_bit); | 1832 | return print_usage_bug(curr, this, bad_bit, new_bit); |
1719 | return 1; | 1833 | return 1; |
1720 | } | 1834 | } |
@@ -1753,7 +1867,7 @@ print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other, | |||
1753 | lockdep_print_held_locks(curr); | 1867 | lockdep_print_held_locks(curr); |
1754 | 1868 | ||
1755 | printk("\nthe first lock's dependencies:\n"); | 1869 | printk("\nthe first lock's dependencies:\n"); |
1756 | print_lock_dependencies(this->class, 0); | 1870 | print_lock_dependencies(hlock_class(this), 0); |
1757 | 1871 | ||
1758 | printk("\nthe second lock's dependencies:\n"); | 1872 | printk("\nthe second lock's dependencies:\n"); |
1759 | print_lock_dependencies(other, 0); | 1873 | print_lock_dependencies(other, 0); |
@@ -1776,7 +1890,7 @@ check_usage_forwards(struct task_struct *curr, struct held_lock *this, | |||
1776 | 1890 | ||
1777 | find_usage_bit = bit; | 1891 | find_usage_bit = bit; |
1778 | /* fills in <forwards_match> */ | 1892 | /* fills in <forwards_match> */ |
1779 | ret = find_usage_forwards(this->class, 0); | 1893 | ret = find_usage_forwards(hlock_class(this), 0); |
1780 | if (!ret || ret == 1) | 1894 | if (!ret || ret == 1) |
1781 | return ret; | 1895 | return ret; |
1782 | 1896 | ||
@@ -1795,7 +1909,7 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this, | |||
1795 | 1909 | ||
1796 | find_usage_bit = bit; | 1910 | find_usage_bit = bit; |
1797 | /* fills in <backwards_match> */ | 1911 | /* fills in <backwards_match> */ |
1798 | ret = find_usage_backwards(this->class, 0); | 1912 | ret = find_usage_backwards(hlock_class(this), 0); |
1799 | if (!ret || ret == 1) | 1913 | if (!ret || ret == 1) |
1800 | return ret; | 1914 | return ret; |
1801 | 1915 | ||
@@ -1861,7 +1975,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, | |||
1861 | LOCK_ENABLED_HARDIRQS_READ, "hard-read")) | 1975 | LOCK_ENABLED_HARDIRQS_READ, "hard-read")) |
1862 | return 0; | 1976 | return 0; |
1863 | #endif | 1977 | #endif |
1864 | if (hardirq_verbose(this->class)) | 1978 | if (hardirq_verbose(hlock_class(this))) |
1865 | ret = 2; | 1979 | ret = 2; |
1866 | break; | 1980 | break; |
1867 | case LOCK_USED_IN_SOFTIRQ: | 1981 | case LOCK_USED_IN_SOFTIRQ: |
@@ -1886,7 +2000,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, | |||
1886 | LOCK_ENABLED_SOFTIRQS_READ, "soft-read")) | 2000 | LOCK_ENABLED_SOFTIRQS_READ, "soft-read")) |
1887 | return 0; | 2001 | return 0; |
1888 | #endif | 2002 | #endif |
1889 | if (softirq_verbose(this->class)) | 2003 | if (softirq_verbose(hlock_class(this))) |
1890 | ret = 2; | 2004 | ret = 2; |
1891 | break; | 2005 | break; |
1892 | case LOCK_USED_IN_HARDIRQ_READ: | 2006 | case LOCK_USED_IN_HARDIRQ_READ: |
@@ -1899,7 +2013,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, | |||
1899 | if (!check_usage_forwards(curr, this, | 2013 | if (!check_usage_forwards(curr, this, |
1900 | LOCK_ENABLED_HARDIRQS, "hard")) | 2014 | LOCK_ENABLED_HARDIRQS, "hard")) |
1901 | return 0; | 2015 | return 0; |
1902 | if (hardirq_verbose(this->class)) | 2016 | if (hardirq_verbose(hlock_class(this))) |
1903 | ret = 2; | 2017 | ret = 2; |
1904 | break; | 2018 | break; |
1905 | case LOCK_USED_IN_SOFTIRQ_READ: | 2019 | case LOCK_USED_IN_SOFTIRQ_READ: |
@@ -1912,7 +2026,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, | |||
1912 | if (!check_usage_forwards(curr, this, | 2026 | if (!check_usage_forwards(curr, this, |
1913 | LOCK_ENABLED_SOFTIRQS, "soft")) | 2027 | LOCK_ENABLED_SOFTIRQS, "soft")) |
1914 | return 0; | 2028 | return 0; |
1915 | if (softirq_verbose(this->class)) | 2029 | if (softirq_verbose(hlock_class(this))) |
1916 | ret = 2; | 2030 | ret = 2; |
1917 | break; | 2031 | break; |
1918 | case LOCK_ENABLED_HARDIRQS: | 2032 | case LOCK_ENABLED_HARDIRQS: |
@@ -1938,7 +2052,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, | |||
1938 | LOCK_USED_IN_HARDIRQ_READ, "hard-read")) | 2052 | LOCK_USED_IN_HARDIRQ_READ, "hard-read")) |
1939 | return 0; | 2053 | return 0; |
1940 | #endif | 2054 | #endif |
1941 | if (hardirq_verbose(this->class)) | 2055 | if (hardirq_verbose(hlock_class(this))) |
1942 | ret = 2; | 2056 | ret = 2; |
1943 | break; | 2057 | break; |
1944 | case LOCK_ENABLED_SOFTIRQS: | 2058 | case LOCK_ENABLED_SOFTIRQS: |
@@ -1964,7 +2078,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, | |||
1964 | LOCK_USED_IN_SOFTIRQ_READ, "soft-read")) | 2078 | LOCK_USED_IN_SOFTIRQ_READ, "soft-read")) |
1965 | return 0; | 2079 | return 0; |
1966 | #endif | 2080 | #endif |
1967 | if (softirq_verbose(this->class)) | 2081 | if (softirq_verbose(hlock_class(this))) |
1968 | ret = 2; | 2082 | ret = 2; |
1969 | break; | 2083 | break; |
1970 | case LOCK_ENABLED_HARDIRQS_READ: | 2084 | case LOCK_ENABLED_HARDIRQS_READ: |
@@ -1979,7 +2093,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, | |||
1979 | LOCK_USED_IN_HARDIRQ, "hard")) | 2093 | LOCK_USED_IN_HARDIRQ, "hard")) |
1980 | return 0; | 2094 | return 0; |
1981 | #endif | 2095 | #endif |
1982 | if (hardirq_verbose(this->class)) | 2096 | if (hardirq_verbose(hlock_class(this))) |
1983 | ret = 2; | 2097 | ret = 2; |
1984 | break; | 2098 | break; |
1985 | case LOCK_ENABLED_SOFTIRQS_READ: | 2099 | case LOCK_ENABLED_SOFTIRQS_READ: |
@@ -1994,7 +2108,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, | |||
1994 | LOCK_USED_IN_SOFTIRQ, "soft")) | 2108 | LOCK_USED_IN_SOFTIRQ, "soft")) |
1995 | return 0; | 2109 | return 0; |
1996 | #endif | 2110 | #endif |
1997 | if (softirq_verbose(this->class)) | 2111 | if (softirq_verbose(hlock_class(this))) |
1998 | ret = 2; | 2112 | ret = 2; |
1999 | break; | 2113 | break; |
2000 | default: | 2114 | default: |
@@ -2310,7 +2424,7 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this, | |||
2310 | * If already set then do not dirty the cacheline, | 2424 | * If already set then do not dirty the cacheline, |
2311 | * nor do any checks: | 2425 | * nor do any checks: |
2312 | */ | 2426 | */ |
2313 | if (likely(this->class->usage_mask & new_mask)) | 2427 | if (likely(hlock_class(this)->usage_mask & new_mask)) |
2314 | return 1; | 2428 | return 1; |
2315 | 2429 | ||
2316 | if (!graph_lock()) | 2430 | if (!graph_lock()) |
@@ -2318,14 +2432,14 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this, | |||
2318 | /* | 2432 | /* |
2319 | * Make sure we didnt race: | 2433 | * Make sure we didnt race: |
2320 | */ | 2434 | */ |
2321 | if (unlikely(this->class->usage_mask & new_mask)) { | 2435 | if (unlikely(hlock_class(this)->usage_mask & new_mask)) { |
2322 | graph_unlock(); | 2436 | graph_unlock(); |
2323 | return 1; | 2437 | return 1; |
2324 | } | 2438 | } |
2325 | 2439 | ||
2326 | this->class->usage_mask |= new_mask; | 2440 | hlock_class(this)->usage_mask |= new_mask; |
2327 | 2441 | ||
2328 | if (!save_trace(this->class->usage_traces + new_bit)) | 2442 | if (!save_trace(hlock_class(this)->usage_traces + new_bit)) |
2329 | return 0; | 2443 | return 0; |
2330 | 2444 | ||
2331 | switch (new_bit) { | 2445 | switch (new_bit) { |
@@ -2405,7 +2519,7 @@ EXPORT_SYMBOL_GPL(lockdep_init_map); | |||
2405 | */ | 2519 | */ |
2406 | static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | 2520 | static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, |
2407 | int trylock, int read, int check, int hardirqs_off, | 2521 | int trylock, int read, int check, int hardirqs_off, |
2408 | unsigned long ip) | 2522 | struct lockdep_map *nest_lock, unsigned long ip) |
2409 | { | 2523 | { |
2410 | struct task_struct *curr = current; | 2524 | struct task_struct *curr = current; |
2411 | struct lock_class *class = NULL; | 2525 | struct lock_class *class = NULL; |
@@ -2459,14 +2573,16 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
2459 | return 0; | 2573 | return 0; |
2460 | 2574 | ||
2461 | hlock = curr->held_locks + depth; | 2575 | hlock = curr->held_locks + depth; |
2462 | 2576 | if (DEBUG_LOCKS_WARN_ON(!class)) | |
2463 | hlock->class = class; | 2577 | return 0; |
2578 | hlock->class_idx = class - lock_classes + 1; | ||
2464 | hlock->acquire_ip = ip; | 2579 | hlock->acquire_ip = ip; |
2465 | hlock->instance = lock; | 2580 | hlock->instance = lock; |
2581 | hlock->nest_lock = nest_lock; | ||
2466 | hlock->trylock = trylock; | 2582 | hlock->trylock = trylock; |
2467 | hlock->read = read; | 2583 | hlock->read = read; |
2468 | hlock->check = check; | 2584 | hlock->check = check; |
2469 | hlock->hardirqs_off = hardirqs_off; | 2585 | hlock->hardirqs_off = !!hardirqs_off; |
2470 | #ifdef CONFIG_LOCK_STAT | 2586 | #ifdef CONFIG_LOCK_STAT |
2471 | hlock->waittime_stamp = 0; | 2587 | hlock->waittime_stamp = 0; |
2472 | hlock->holdtime_stamp = sched_clock(); | 2588 | hlock->holdtime_stamp = sched_clock(); |
@@ -2574,6 +2690,55 @@ static int check_unlock(struct task_struct *curr, struct lockdep_map *lock, | |||
2574 | return 1; | 2690 | return 1; |
2575 | } | 2691 | } |
2576 | 2692 | ||
2693 | static int | ||
2694 | __lock_set_subclass(struct lockdep_map *lock, | ||
2695 | unsigned int subclass, unsigned long ip) | ||
2696 | { | ||
2697 | struct task_struct *curr = current; | ||
2698 | struct held_lock *hlock, *prev_hlock; | ||
2699 | struct lock_class *class; | ||
2700 | unsigned int depth; | ||
2701 | int i; | ||
2702 | |||
2703 | depth = curr->lockdep_depth; | ||
2704 | if (DEBUG_LOCKS_WARN_ON(!depth)) | ||
2705 | return 0; | ||
2706 | |||
2707 | prev_hlock = NULL; | ||
2708 | for (i = depth-1; i >= 0; i--) { | ||
2709 | hlock = curr->held_locks + i; | ||
2710 | /* | ||
2711 | * We must not cross into another context: | ||
2712 | */ | ||
2713 | if (prev_hlock && prev_hlock->irq_context != hlock->irq_context) | ||
2714 | break; | ||
2715 | if (hlock->instance == lock) | ||
2716 | goto found_it; | ||
2717 | prev_hlock = hlock; | ||
2718 | } | ||
2719 | return print_unlock_inbalance_bug(curr, lock, ip); | ||
2720 | |||
2721 | found_it: | ||
2722 | class = register_lock_class(lock, subclass, 0); | ||
2723 | hlock->class_idx = class - lock_classes + 1; | ||
2724 | |||
2725 | curr->lockdep_depth = i; | ||
2726 | curr->curr_chain_key = hlock->prev_chain_key; | ||
2727 | |||
2728 | for (; i < depth; i++) { | ||
2729 | hlock = curr->held_locks + i; | ||
2730 | if (!__lock_acquire(hlock->instance, | ||
2731 | hlock_class(hlock)->subclass, hlock->trylock, | ||
2732 | hlock->read, hlock->check, hlock->hardirqs_off, | ||
2733 | hlock->nest_lock, hlock->acquire_ip)) | ||
2734 | return 0; | ||
2735 | } | ||
2736 | |||
2737 | if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth)) | ||
2738 | return 0; | ||
2739 | return 1; | ||
2740 | } | ||
2741 | |||
2577 | /* | 2742 | /* |
2578 | * Remove the lock to the list of currently held locks in a | 2743 | * Remove the lock to the list of currently held locks in a |
2579 | * potentially non-nested (out of order) manner. This is a | 2744 | * potentially non-nested (out of order) manner. This is a |
@@ -2624,9 +2789,9 @@ found_it: | |||
2624 | for (i++; i < depth; i++) { | 2789 | for (i++; i < depth; i++) { |
2625 | hlock = curr->held_locks + i; | 2790 | hlock = curr->held_locks + i; |
2626 | if (!__lock_acquire(hlock->instance, | 2791 | if (!__lock_acquire(hlock->instance, |
2627 | hlock->class->subclass, hlock->trylock, | 2792 | hlock_class(hlock)->subclass, hlock->trylock, |
2628 | hlock->read, hlock->check, hlock->hardirqs_off, | 2793 | hlock->read, hlock->check, hlock->hardirqs_off, |
2629 | hlock->acquire_ip)) | 2794 | hlock->nest_lock, hlock->acquire_ip)) |
2630 | return 0; | 2795 | return 0; |
2631 | } | 2796 | } |
2632 | 2797 | ||
@@ -2669,7 +2834,7 @@ static int lock_release_nested(struct task_struct *curr, | |||
2669 | 2834 | ||
2670 | #ifdef CONFIG_DEBUG_LOCKDEP | 2835 | #ifdef CONFIG_DEBUG_LOCKDEP |
2671 | hlock->prev_chain_key = 0; | 2836 | hlock->prev_chain_key = 0; |
2672 | hlock->class = NULL; | 2837 | hlock->class_idx = 0; |
2673 | hlock->acquire_ip = 0; | 2838 | hlock->acquire_ip = 0; |
2674 | hlock->irq_context = 0; | 2839 | hlock->irq_context = 0; |
2675 | #endif | 2840 | #endif |
@@ -2738,18 +2903,36 @@ static void check_flags(unsigned long flags) | |||
2738 | #endif | 2903 | #endif |
2739 | } | 2904 | } |
2740 | 2905 | ||
2906 | void | ||
2907 | lock_set_subclass(struct lockdep_map *lock, | ||
2908 | unsigned int subclass, unsigned long ip) | ||
2909 | { | ||
2910 | unsigned long flags; | ||
2911 | |||
2912 | if (unlikely(current->lockdep_recursion)) | ||
2913 | return; | ||
2914 | |||
2915 | raw_local_irq_save(flags); | ||
2916 | current->lockdep_recursion = 1; | ||
2917 | check_flags(flags); | ||
2918 | if (__lock_set_subclass(lock, subclass, ip)) | ||
2919 | check_chain_key(current); | ||
2920 | current->lockdep_recursion = 0; | ||
2921 | raw_local_irq_restore(flags); | ||
2922 | } | ||
2923 | |||
2924 | EXPORT_SYMBOL_GPL(lock_set_subclass); | ||
2925 | |||
2741 | /* | 2926 | /* |
2742 | * We are not always called with irqs disabled - do that here, | 2927 | * We are not always called with irqs disabled - do that here, |
2743 | * and also avoid lockdep recursion: | 2928 | * and also avoid lockdep recursion: |
2744 | */ | 2929 | */ |
2745 | void lock_acquire(struct lockdep_map *lock, unsigned int subclass, | 2930 | void lock_acquire(struct lockdep_map *lock, unsigned int subclass, |
2746 | int trylock, int read, int check, unsigned long ip) | 2931 | int trylock, int read, int check, |
2932 | struct lockdep_map *nest_lock, unsigned long ip) | ||
2747 | { | 2933 | { |
2748 | unsigned long flags; | 2934 | unsigned long flags; |
2749 | 2935 | ||
2750 | if (unlikely(!lock_stat && !prove_locking)) | ||
2751 | return; | ||
2752 | |||
2753 | if (unlikely(current->lockdep_recursion)) | 2936 | if (unlikely(current->lockdep_recursion)) |
2754 | return; | 2937 | return; |
2755 | 2938 | ||
@@ -2758,7 +2941,7 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
2758 | 2941 | ||
2759 | current->lockdep_recursion = 1; | 2942 | current->lockdep_recursion = 1; |
2760 | __lock_acquire(lock, subclass, trylock, read, check, | 2943 | __lock_acquire(lock, subclass, trylock, read, check, |
2761 | irqs_disabled_flags(flags), ip); | 2944 | irqs_disabled_flags(flags), nest_lock, ip); |
2762 | current->lockdep_recursion = 0; | 2945 | current->lockdep_recursion = 0; |
2763 | raw_local_irq_restore(flags); | 2946 | raw_local_irq_restore(flags); |
2764 | } | 2947 | } |
@@ -2770,9 +2953,6 @@ void lock_release(struct lockdep_map *lock, int nested, | |||
2770 | { | 2953 | { |
2771 | unsigned long flags; | 2954 | unsigned long flags; |
2772 | 2955 | ||
2773 | if (unlikely(!lock_stat && !prove_locking)) | ||
2774 | return; | ||
2775 | |||
2776 | if (unlikely(current->lockdep_recursion)) | 2956 | if (unlikely(current->lockdep_recursion)) |
2777 | return; | 2957 | return; |
2778 | 2958 | ||
@@ -2845,11 +3025,11 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip) | |||
2845 | found_it: | 3025 | found_it: |
2846 | hlock->waittime_stamp = sched_clock(); | 3026 | hlock->waittime_stamp = sched_clock(); |
2847 | 3027 | ||
2848 | point = lock_contention_point(hlock->class, ip); | 3028 | point = lock_contention_point(hlock_class(hlock), ip); |
2849 | 3029 | ||
2850 | stats = get_lock_stats(hlock->class); | 3030 | stats = get_lock_stats(hlock_class(hlock)); |
2851 | if (point < ARRAY_SIZE(stats->contention_point)) | 3031 | if (point < ARRAY_SIZE(stats->contention_point)) |
2852 | stats->contention_point[i]++; | 3032 | stats->contention_point[point]++; |
2853 | if (lock->cpu != smp_processor_id()) | 3033 | if (lock->cpu != smp_processor_id()) |
2854 | stats->bounces[bounce_contended + !!hlock->read]++; | 3034 | stats->bounces[bounce_contended + !!hlock->read]++; |
2855 | put_lock_stats(stats); | 3035 | put_lock_stats(stats); |
@@ -2893,7 +3073,7 @@ found_it: | |||
2893 | hlock->holdtime_stamp = now; | 3073 | hlock->holdtime_stamp = now; |
2894 | } | 3074 | } |
2895 | 3075 | ||
2896 | stats = get_lock_stats(hlock->class); | 3076 | stats = get_lock_stats(hlock_class(hlock)); |
2897 | if (waittime) { | 3077 | if (waittime) { |
2898 | if (hlock->read) | 3078 | if (hlock->read) |
2899 | lock_time_inc(&stats->read_waittime, waittime); | 3079 | lock_time_inc(&stats->read_waittime, waittime); |
@@ -2988,6 +3168,7 @@ static void zap_class(struct lock_class *class) | |||
2988 | list_del_rcu(&class->hash_entry); | 3168 | list_del_rcu(&class->hash_entry); |
2989 | list_del_rcu(&class->lock_entry); | 3169 | list_del_rcu(&class->lock_entry); |
2990 | 3170 | ||
3171 | class->key = NULL; | ||
2991 | } | 3172 | } |
2992 | 3173 | ||
2993 | static inline int within(const void *addr, void *start, unsigned long size) | 3174 | static inline int within(const void *addr, void *start, unsigned long size) |
diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h index c3600a091a28..56b196932c08 100644 --- a/kernel/lockdep_internals.h +++ b/kernel/lockdep_internals.h | |||
@@ -17,9 +17,6 @@ | |||
17 | */ | 17 | */ |
18 | #define MAX_LOCKDEP_ENTRIES 8192UL | 18 | #define MAX_LOCKDEP_ENTRIES 8192UL |
19 | 19 | ||
20 | #define MAX_LOCKDEP_KEYS_BITS 11 | ||
21 | #define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS) | ||
22 | |||
23 | #define MAX_LOCKDEP_CHAINS_BITS 14 | 20 | #define MAX_LOCKDEP_CHAINS_BITS 14 |
24 | #define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS) | 21 | #define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS) |
25 | 22 | ||
@@ -53,6 +50,22 @@ extern unsigned int nr_process_chains; | |||
53 | extern unsigned int max_lockdep_depth; | 50 | extern unsigned int max_lockdep_depth; |
54 | extern unsigned int max_recursion_depth; | 51 | extern unsigned int max_recursion_depth; |
55 | 52 | ||
53 | #ifdef CONFIG_PROVE_LOCKING | ||
54 | extern unsigned long lockdep_count_forward_deps(struct lock_class *); | ||
55 | extern unsigned long lockdep_count_backward_deps(struct lock_class *); | ||
56 | #else | ||
57 | static inline unsigned long | ||
58 | lockdep_count_forward_deps(struct lock_class *class) | ||
59 | { | ||
60 | return 0; | ||
61 | } | ||
62 | static inline unsigned long | ||
63 | lockdep_count_backward_deps(struct lock_class *class) | ||
64 | { | ||
65 | return 0; | ||
66 | } | ||
67 | #endif | ||
68 | |||
56 | #ifdef CONFIG_DEBUG_LOCKDEP | 69 | #ifdef CONFIG_DEBUG_LOCKDEP |
57 | /* | 70 | /* |
58 | * Various lockdep statistics: | 71 | * Various lockdep statistics: |
diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c index 9b0e940e2545..20dbcbf9c7dd 100644 --- a/kernel/lockdep_proc.c +++ b/kernel/lockdep_proc.c | |||
@@ -63,34 +63,6 @@ static void l_stop(struct seq_file *m, void *v) | |||
63 | { | 63 | { |
64 | } | 64 | } |
65 | 65 | ||
66 | static unsigned long count_forward_deps(struct lock_class *class) | ||
67 | { | ||
68 | struct lock_list *entry; | ||
69 | unsigned long ret = 1; | ||
70 | |||
71 | /* | ||
72 | * Recurse this class's dependency list: | ||
73 | */ | ||
74 | list_for_each_entry(entry, &class->locks_after, entry) | ||
75 | ret += count_forward_deps(entry->class); | ||
76 | |||
77 | return ret; | ||
78 | } | ||
79 | |||
80 | static unsigned long count_backward_deps(struct lock_class *class) | ||
81 | { | ||
82 | struct lock_list *entry; | ||
83 | unsigned long ret = 1; | ||
84 | |||
85 | /* | ||
86 | * Recurse this class's dependency list: | ||
87 | */ | ||
88 | list_for_each_entry(entry, &class->locks_before, entry) | ||
89 | ret += count_backward_deps(entry->class); | ||
90 | |||
91 | return ret; | ||
92 | } | ||
93 | |||
94 | static void print_name(struct seq_file *m, struct lock_class *class) | 66 | static void print_name(struct seq_file *m, struct lock_class *class) |
95 | { | 67 | { |
96 | char str[128]; | 68 | char str[128]; |
@@ -110,7 +82,6 @@ static void print_name(struct seq_file *m, struct lock_class *class) | |||
110 | 82 | ||
111 | static int l_show(struct seq_file *m, void *v) | 83 | static int l_show(struct seq_file *m, void *v) |
112 | { | 84 | { |
113 | unsigned long nr_forward_deps, nr_backward_deps; | ||
114 | struct lock_class *class = v; | 85 | struct lock_class *class = v; |
115 | struct lock_list *entry; | 86 | struct lock_list *entry; |
116 | char c1, c2, c3, c4; | 87 | char c1, c2, c3, c4; |
@@ -124,11 +95,10 @@ static int l_show(struct seq_file *m, void *v) | |||
124 | #ifdef CONFIG_DEBUG_LOCKDEP | 95 | #ifdef CONFIG_DEBUG_LOCKDEP |
125 | seq_printf(m, " OPS:%8ld", class->ops); | 96 | seq_printf(m, " OPS:%8ld", class->ops); |
126 | #endif | 97 | #endif |
127 | nr_forward_deps = count_forward_deps(class); | 98 | #ifdef CONFIG_PROVE_LOCKING |
128 | seq_printf(m, " FD:%5ld", nr_forward_deps); | 99 | seq_printf(m, " FD:%5ld", lockdep_count_forward_deps(class)); |
129 | 100 | seq_printf(m, " BD:%5ld", lockdep_count_backward_deps(class)); | |
130 | nr_backward_deps = count_backward_deps(class); | 101 | #endif |
131 | seq_printf(m, " BD:%5ld", nr_backward_deps); | ||
132 | 102 | ||
133 | get_usage_chars(class, &c1, &c2, &c3, &c4); | 103 | get_usage_chars(class, &c1, &c2, &c3, &c4); |
134 | seq_printf(m, " %c%c%c%c", c1, c2, c3, c4); | 104 | seq_printf(m, " %c%c%c%c", c1, c2, c3, c4); |
@@ -229,6 +199,9 @@ static int lc_show(struct seq_file *m, void *v) | |||
229 | 199 | ||
230 | for (i = 0; i < chain->depth; i++) { | 200 | for (i = 0; i < chain->depth; i++) { |
231 | class = lock_chain_get_class(chain, i); | 201 | class = lock_chain_get_class(chain, i); |
202 | if (!class->key) | ||
203 | continue; | ||
204 | |||
232 | seq_printf(m, "[%p] ", class->key); | 205 | seq_printf(m, "[%p] ", class->key); |
233 | print_name(m, class); | 206 | print_name(m, class); |
234 | seq_puts(m, "\n"); | 207 | seq_puts(m, "\n"); |
@@ -350,7 +323,9 @@ static int lockdep_stats_show(struct seq_file *m, void *v) | |||
350 | if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ) | 323 | if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ) |
351 | nr_hardirq_read_unsafe++; | 324 | nr_hardirq_read_unsafe++; |
352 | 325 | ||
353 | sum_forward_deps += count_forward_deps(class); | 326 | #ifdef CONFIG_PROVE_LOCKING |
327 | sum_forward_deps += lockdep_count_forward_deps(class); | ||
328 | #endif | ||
354 | } | 329 | } |
355 | #ifdef CONFIG_DEBUG_LOCKDEP | 330 | #ifdef CONFIG_DEBUG_LOCKDEP |
356 | DEBUG_LOCKS_WARN_ON(debug_atomic_read(&nr_unused_locks) != nr_unused); | 331 | DEBUG_LOCKS_WARN_ON(debug_atomic_read(&nr_unused_locks) != nr_unused); |
@@ -497,8 +472,9 @@ static void snprint_time(char *buf, size_t bufsiz, s64 nr) | |||
497 | { | 472 | { |
498 | unsigned long rem; | 473 | unsigned long rem; |
499 | 474 | ||
475 | nr += 5; /* for display rounding */ | ||
500 | rem = do_div(nr, 1000); /* XXX: do_div_signed */ | 476 | rem = do_div(nr, 1000); /* XXX: do_div_signed */ |
501 | snprintf(buf, bufsiz, "%lld.%02d", (long long)nr, ((int)rem+5)/10); | 477 | snprintf(buf, bufsiz, "%lld.%02d", (long long)nr, (int)rem/10); |
502 | } | 478 | } |
503 | 479 | ||
504 | static void seq_time(struct seq_file *m, s64 time) | 480 | static void seq_time(struct seq_file *m, s64 time) |
diff --git a/kernel/marker.c b/kernel/marker.c index 1abfb923b761..7d1faecd7a51 100644 --- a/kernel/marker.c +++ b/kernel/marker.c | |||
@@ -126,6 +126,11 @@ void marker_probe_cb(const struct marker *mdata, void *call_private, ...) | |||
126 | struct marker_probe_closure *multi; | 126 | struct marker_probe_closure *multi; |
127 | int i; | 127 | int i; |
128 | /* | 128 | /* |
129 | * Read mdata->ptype before mdata->multi. | ||
130 | */ | ||
131 | smp_rmb(); | ||
132 | multi = mdata->multi; | ||
133 | /* | ||
129 | * multi points to an array, therefore accessing the array | 134 | * multi points to an array, therefore accessing the array |
130 | * depends on reading multi. However, even in this case, | 135 | * depends on reading multi. However, even in this case, |
131 | * we must insure that the pointer is read _before_ the array | 136 | * we must insure that the pointer is read _before_ the array |
@@ -133,7 +138,6 @@ void marker_probe_cb(const struct marker *mdata, void *call_private, ...) | |||
133 | * in the fast path, so put the explicit barrier here. | 138 | * in the fast path, so put the explicit barrier here. |
134 | */ | 139 | */ |
135 | smp_read_barrier_depends(); | 140 | smp_read_barrier_depends(); |
136 | multi = mdata->multi; | ||
137 | for (i = 0; multi[i].func; i++) { | 141 | for (i = 0; multi[i].func; i++) { |
138 | va_start(args, call_private); | 142 | va_start(args, call_private); |
139 | multi[i].func(multi[i].probe_private, call_private, | 143 | multi[i].func(multi[i].probe_private, call_private, |
@@ -175,6 +179,11 @@ void marker_probe_cb_noarg(const struct marker *mdata, void *call_private, ...) | |||
175 | struct marker_probe_closure *multi; | 179 | struct marker_probe_closure *multi; |
176 | int i; | 180 | int i; |
177 | /* | 181 | /* |
182 | * Read mdata->ptype before mdata->multi. | ||
183 | */ | ||
184 | smp_rmb(); | ||
185 | multi = mdata->multi; | ||
186 | /* | ||
178 | * multi points to an array, therefore accessing the array | 187 | * multi points to an array, therefore accessing the array |
179 | * depends on reading multi. However, even in this case, | 188 | * depends on reading multi. However, even in this case, |
180 | * we must insure that the pointer is read _before_ the array | 189 | * we must insure that the pointer is read _before_ the array |
@@ -182,7 +191,6 @@ void marker_probe_cb_noarg(const struct marker *mdata, void *call_private, ...) | |||
182 | * in the fast path, so put the explicit barrier here. | 191 | * in the fast path, so put the explicit barrier here. |
183 | */ | 192 | */ |
184 | smp_read_barrier_depends(); | 193 | smp_read_barrier_depends(); |
185 | multi = mdata->multi; | ||
186 | for (i = 0; multi[i].func; i++) | 194 | for (i = 0; multi[i].func; i++) |
187 | multi[i].func(multi[i].probe_private, call_private, | 195 | multi[i].func(multi[i].probe_private, call_private, |
188 | mdata->format, &args); | 196 | mdata->format, &args); |
@@ -441,7 +449,7 @@ static int remove_marker(const char *name) | |||
441 | hlist_del(&e->hlist); | 449 | hlist_del(&e->hlist); |
442 | /* Make sure the call_rcu has been executed */ | 450 | /* Make sure the call_rcu has been executed */ |
443 | if (e->rcu_pending) | 451 | if (e->rcu_pending) |
444 | rcu_barrier(); | 452 | rcu_barrier_sched(); |
445 | kfree(e); | 453 | kfree(e); |
446 | return 0; | 454 | return 0; |
447 | } | 455 | } |
@@ -476,7 +484,7 @@ static int marker_set_format(struct marker_entry **entry, const char *format) | |||
476 | hlist_del(&(*entry)->hlist); | 484 | hlist_del(&(*entry)->hlist); |
477 | /* Make sure the call_rcu has been executed */ | 485 | /* Make sure the call_rcu has been executed */ |
478 | if ((*entry)->rcu_pending) | 486 | if ((*entry)->rcu_pending) |
479 | rcu_barrier(); | 487 | rcu_barrier_sched(); |
480 | kfree(*entry); | 488 | kfree(*entry); |
481 | *entry = e; | 489 | *entry = e; |
482 | trace_mark(core_marker_format, "name %s format %s", | 490 | trace_mark(core_marker_format, "name %s format %s", |
@@ -655,7 +663,7 @@ int marker_probe_register(const char *name, const char *format, | |||
655 | * make sure it's executed now. | 663 | * make sure it's executed now. |
656 | */ | 664 | */ |
657 | if (entry->rcu_pending) | 665 | if (entry->rcu_pending) |
658 | rcu_barrier(); | 666 | rcu_barrier_sched(); |
659 | old = marker_entry_add_probe(entry, probe, probe_private); | 667 | old = marker_entry_add_probe(entry, probe, probe_private); |
660 | if (IS_ERR(old)) { | 668 | if (IS_ERR(old)) { |
661 | ret = PTR_ERR(old); | 669 | ret = PTR_ERR(old); |
@@ -670,10 +678,7 @@ int marker_probe_register(const char *name, const char *format, | |||
670 | entry->rcu_pending = 1; | 678 | entry->rcu_pending = 1; |
671 | /* write rcu_pending before calling the RCU callback */ | 679 | /* write rcu_pending before calling the RCU callback */ |
672 | smp_wmb(); | 680 | smp_wmb(); |
673 | #ifdef CONFIG_PREEMPT_RCU | 681 | call_rcu_sched(&entry->rcu, free_old_closure); |
674 | synchronize_sched(); /* Until we have the call_rcu_sched() */ | ||
675 | #endif | ||
676 | call_rcu(&entry->rcu, free_old_closure); | ||
677 | end: | 682 | end: |
678 | mutex_unlock(&markers_mutex); | 683 | mutex_unlock(&markers_mutex); |
679 | return ret; | 684 | return ret; |
@@ -704,7 +709,7 @@ int marker_probe_unregister(const char *name, | |||
704 | if (!entry) | 709 | if (!entry) |
705 | goto end; | 710 | goto end; |
706 | if (entry->rcu_pending) | 711 | if (entry->rcu_pending) |
707 | rcu_barrier(); | 712 | rcu_barrier_sched(); |
708 | old = marker_entry_remove_probe(entry, probe, probe_private); | 713 | old = marker_entry_remove_probe(entry, probe, probe_private); |
709 | mutex_unlock(&markers_mutex); | 714 | mutex_unlock(&markers_mutex); |
710 | marker_update_probes(); /* may update entry */ | 715 | marker_update_probes(); /* may update entry */ |
@@ -716,10 +721,7 @@ int marker_probe_unregister(const char *name, | |||
716 | entry->rcu_pending = 1; | 721 | entry->rcu_pending = 1; |
717 | /* write rcu_pending before calling the RCU callback */ | 722 | /* write rcu_pending before calling the RCU callback */ |
718 | smp_wmb(); | 723 | smp_wmb(); |
719 | #ifdef CONFIG_PREEMPT_RCU | 724 | call_rcu_sched(&entry->rcu, free_old_closure); |
720 | synchronize_sched(); /* Until we have the call_rcu_sched() */ | ||
721 | #endif | ||
722 | call_rcu(&entry->rcu, free_old_closure); | ||
723 | remove_marker(name); /* Ignore busy error message */ | 725 | remove_marker(name); /* Ignore busy error message */ |
724 | ret = 0; | 726 | ret = 0; |
725 | end: | 727 | end: |
@@ -786,7 +788,7 @@ int marker_probe_unregister_private_data(marker_probe_func *probe, | |||
786 | goto end; | 788 | goto end; |
787 | } | 789 | } |
788 | if (entry->rcu_pending) | 790 | if (entry->rcu_pending) |
789 | rcu_barrier(); | 791 | rcu_barrier_sched(); |
790 | old = marker_entry_remove_probe(entry, NULL, probe_private); | 792 | old = marker_entry_remove_probe(entry, NULL, probe_private); |
791 | mutex_unlock(&markers_mutex); | 793 | mutex_unlock(&markers_mutex); |
792 | marker_update_probes(); /* may update entry */ | 794 | marker_update_probes(); /* may update entry */ |
@@ -797,10 +799,7 @@ int marker_probe_unregister_private_data(marker_probe_func *probe, | |||
797 | entry->rcu_pending = 1; | 799 | entry->rcu_pending = 1; |
798 | /* write rcu_pending before calling the RCU callback */ | 800 | /* write rcu_pending before calling the RCU callback */ |
799 | smp_wmb(); | 801 | smp_wmb(); |
800 | #ifdef CONFIG_PREEMPT_RCU | 802 | call_rcu_sched(&entry->rcu, free_old_closure); |
801 | synchronize_sched(); /* Until we have the call_rcu_sched() */ | ||
802 | #endif | ||
803 | call_rcu(&entry->rcu, free_old_closure); | ||
804 | remove_marker(entry->name); /* Ignore busy error message */ | 803 | remove_marker(entry->name); /* Ignore busy error message */ |
805 | end: | 804 | end: |
806 | mutex_unlock(&markers_mutex); | 805 | mutex_unlock(&markers_mutex); |
diff --git a/kernel/module.c b/kernel/module.c index 5f80478b746d..9db11911e04b 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -70,6 +70,9 @@ static DECLARE_WAIT_QUEUE_HEAD(module_wq); | |||
70 | 70 | ||
71 | static BLOCKING_NOTIFIER_HEAD(module_notify_list); | 71 | static BLOCKING_NOTIFIER_HEAD(module_notify_list); |
72 | 72 | ||
73 | /* Bounds of module allocation, for speeding __module_text_address */ | ||
74 | static unsigned long module_addr_min = -1UL, module_addr_max = 0; | ||
75 | |||
73 | int register_module_notifier(struct notifier_block * nb) | 76 | int register_module_notifier(struct notifier_block * nb) |
74 | { | 77 | { |
75 | return blocking_notifier_chain_register(&module_notify_list, nb); | 78 | return blocking_notifier_chain_register(&module_notify_list, nb); |
@@ -134,17 +137,19 @@ extern const struct kernel_symbol __start___ksymtab_gpl[]; | |||
134 | extern const struct kernel_symbol __stop___ksymtab_gpl[]; | 137 | extern const struct kernel_symbol __stop___ksymtab_gpl[]; |
135 | extern const struct kernel_symbol __start___ksymtab_gpl_future[]; | 138 | extern const struct kernel_symbol __start___ksymtab_gpl_future[]; |
136 | extern const struct kernel_symbol __stop___ksymtab_gpl_future[]; | 139 | extern const struct kernel_symbol __stop___ksymtab_gpl_future[]; |
137 | extern const struct kernel_symbol __start___ksymtab_unused[]; | ||
138 | extern const struct kernel_symbol __stop___ksymtab_unused[]; | ||
139 | extern const struct kernel_symbol __start___ksymtab_unused_gpl[]; | ||
140 | extern const struct kernel_symbol __stop___ksymtab_unused_gpl[]; | ||
141 | extern const struct kernel_symbol __start___ksymtab_gpl_future[]; | 140 | extern const struct kernel_symbol __start___ksymtab_gpl_future[]; |
142 | extern const struct kernel_symbol __stop___ksymtab_gpl_future[]; | 141 | extern const struct kernel_symbol __stop___ksymtab_gpl_future[]; |
143 | extern const unsigned long __start___kcrctab[]; | 142 | extern const unsigned long __start___kcrctab[]; |
144 | extern const unsigned long __start___kcrctab_gpl[]; | 143 | extern const unsigned long __start___kcrctab_gpl[]; |
145 | extern const unsigned long __start___kcrctab_gpl_future[]; | 144 | extern const unsigned long __start___kcrctab_gpl_future[]; |
145 | #ifdef CONFIG_UNUSED_SYMBOLS | ||
146 | extern const struct kernel_symbol __start___ksymtab_unused[]; | ||
147 | extern const struct kernel_symbol __stop___ksymtab_unused[]; | ||
148 | extern const struct kernel_symbol __start___ksymtab_unused_gpl[]; | ||
149 | extern const struct kernel_symbol __stop___ksymtab_unused_gpl[]; | ||
146 | extern const unsigned long __start___kcrctab_unused[]; | 150 | extern const unsigned long __start___kcrctab_unused[]; |
147 | extern const unsigned long __start___kcrctab_unused_gpl[]; | 151 | extern const unsigned long __start___kcrctab_unused_gpl[]; |
152 | #endif | ||
148 | 153 | ||
149 | #ifndef CONFIG_MODVERSIONS | 154 | #ifndef CONFIG_MODVERSIONS |
150 | #define symversion(base, idx) NULL | 155 | #define symversion(base, idx) NULL |
@@ -152,152 +157,170 @@ extern const unsigned long __start___kcrctab_unused_gpl[]; | |||
152 | #define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL) | 157 | #define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL) |
153 | #endif | 158 | #endif |
154 | 159 | ||
155 | /* lookup symbol in given range of kernel_symbols */ | ||
156 | static const struct kernel_symbol *lookup_symbol(const char *name, | ||
157 | const struct kernel_symbol *start, | ||
158 | const struct kernel_symbol *stop) | ||
159 | { | ||
160 | const struct kernel_symbol *ks = start; | ||
161 | for (; ks < stop; ks++) | ||
162 | if (strcmp(ks->name, name) == 0) | ||
163 | return ks; | ||
164 | return NULL; | ||
165 | } | ||
166 | |||
167 | static bool always_ok(bool gplok, bool warn, const char *name) | ||
168 | { | ||
169 | return true; | ||
170 | } | ||
171 | |||
172 | static bool printk_unused_warning(bool gplok, bool warn, const char *name) | ||
173 | { | ||
174 | if (warn) { | ||
175 | printk(KERN_WARNING "Symbol %s is marked as UNUSED, " | ||
176 | "however this module is using it.\n", name); | ||
177 | printk(KERN_WARNING | ||
178 | "This symbol will go away in the future.\n"); | ||
179 | printk(KERN_WARNING | ||
180 | "Please evalute if this is the right api to use and if " | ||
181 | "it really is, submit a report the linux kernel " | ||
182 | "mailinglist together with submitting your code for " | ||
183 | "inclusion.\n"); | ||
184 | } | ||
185 | return true; | ||
186 | } | ||
187 | |||
188 | static bool gpl_only_unused_warning(bool gplok, bool warn, const char *name) | ||
189 | { | ||
190 | if (!gplok) | ||
191 | return false; | ||
192 | return printk_unused_warning(gplok, warn, name); | ||
193 | } | ||
194 | |||
195 | static bool gpl_only(bool gplok, bool warn, const char *name) | ||
196 | { | ||
197 | return gplok; | ||
198 | } | ||
199 | |||
200 | static bool warn_if_not_gpl(bool gplok, bool warn, const char *name) | ||
201 | { | ||
202 | if (!gplok && warn) { | ||
203 | printk(KERN_WARNING "Symbol %s is being used " | ||
204 | "by a non-GPL module, which will not " | ||
205 | "be allowed in the future\n", name); | ||
206 | printk(KERN_WARNING "Please see the file " | ||
207 | "Documentation/feature-removal-schedule.txt " | ||
208 | "in the kernel source tree for more details.\n"); | ||
209 | } | ||
210 | return true; | ||
211 | } | ||
212 | |||
213 | struct symsearch { | 160 | struct symsearch { |
214 | const struct kernel_symbol *start, *stop; | 161 | const struct kernel_symbol *start, *stop; |
215 | const unsigned long *crcs; | 162 | const unsigned long *crcs; |
216 | bool (*check)(bool gplok, bool warn, const char *name); | 163 | enum { |
164 | NOT_GPL_ONLY, | ||
165 | GPL_ONLY, | ||
166 | WILL_BE_GPL_ONLY, | ||
167 | } licence; | ||
168 | bool unused; | ||
217 | }; | 169 | }; |
218 | 170 | ||
219 | /* Look through this array of symbol tables for a symbol match which | 171 | static bool each_symbol_in_section(const struct symsearch *arr, |
220 | * passes the check function. */ | 172 | unsigned int arrsize, |
221 | static const struct kernel_symbol *search_symarrays(const struct symsearch *arr, | 173 | struct module *owner, |
222 | unsigned int num, | 174 | bool (*fn)(const struct symsearch *syms, |
223 | const char *name, | 175 | struct module *owner, |
224 | bool gplok, | 176 | unsigned int symnum, void *data), |
225 | bool warn, | 177 | void *data) |
226 | const unsigned long **crc) | ||
227 | { | 178 | { |
228 | unsigned int i; | 179 | unsigned int i, j; |
229 | const struct kernel_symbol *ks; | ||
230 | |||
231 | for (i = 0; i < num; i++) { | ||
232 | ks = lookup_symbol(name, arr[i].start, arr[i].stop); | ||
233 | if (!ks || !arr[i].check(gplok, warn, name)) | ||
234 | continue; | ||
235 | 180 | ||
236 | if (crc) | 181 | for (j = 0; j < arrsize; j++) { |
237 | *crc = symversion(arr[i].crcs, ks - arr[i].start); | 182 | for (i = 0; i < arr[j].stop - arr[j].start; i++) |
238 | return ks; | 183 | if (fn(&arr[j], owner, i, data)) |
184 | return true; | ||
239 | } | 185 | } |
240 | return NULL; | 186 | |
187 | return false; | ||
241 | } | 188 | } |
242 | 189 | ||
243 | /* Find a symbol, return value, (optional) crc and (optional) module | 190 | /* Returns true as soon as fn returns true, otherwise false. */ |
244 | * which owns it */ | 191 | static bool each_symbol(bool (*fn)(const struct symsearch *arr, |
245 | static unsigned long find_symbol(const char *name, | 192 | struct module *owner, |
246 | struct module **owner, | 193 | unsigned int symnum, void *data), |
247 | const unsigned long **crc, | 194 | void *data) |
248 | bool gplok, | ||
249 | bool warn) | ||
250 | { | 195 | { |
251 | struct module *mod; | 196 | struct module *mod; |
252 | const struct kernel_symbol *ks; | ||
253 | const struct symsearch arr[] = { | 197 | const struct symsearch arr[] = { |
254 | { __start___ksymtab, __stop___ksymtab, __start___kcrctab, | 198 | { __start___ksymtab, __stop___ksymtab, __start___kcrctab, |
255 | always_ok }, | 199 | NOT_GPL_ONLY, false }, |
256 | { __start___ksymtab_gpl, __stop___ksymtab_gpl, | 200 | { __start___ksymtab_gpl, __stop___ksymtab_gpl, |
257 | __start___kcrctab_gpl, gpl_only }, | 201 | __start___kcrctab_gpl, |
202 | GPL_ONLY, false }, | ||
258 | { __start___ksymtab_gpl_future, __stop___ksymtab_gpl_future, | 203 | { __start___ksymtab_gpl_future, __stop___ksymtab_gpl_future, |
259 | __start___kcrctab_gpl_future, warn_if_not_gpl }, | 204 | __start___kcrctab_gpl_future, |
205 | WILL_BE_GPL_ONLY, false }, | ||
206 | #ifdef CONFIG_UNUSED_SYMBOLS | ||
260 | { __start___ksymtab_unused, __stop___ksymtab_unused, | 207 | { __start___ksymtab_unused, __stop___ksymtab_unused, |
261 | __start___kcrctab_unused, printk_unused_warning }, | 208 | __start___kcrctab_unused, |
209 | NOT_GPL_ONLY, true }, | ||
262 | { __start___ksymtab_unused_gpl, __stop___ksymtab_unused_gpl, | 210 | { __start___ksymtab_unused_gpl, __stop___ksymtab_unused_gpl, |
263 | __start___kcrctab_unused_gpl, gpl_only_unused_warning }, | 211 | __start___kcrctab_unused_gpl, |
212 | GPL_ONLY, true }, | ||
213 | #endif | ||
264 | }; | 214 | }; |
265 | 215 | ||
266 | /* Core kernel first. */ | 216 | if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data)) |
267 | ks = search_symarrays(arr, ARRAY_SIZE(arr), name, gplok, warn, crc); | 217 | return true; |
268 | if (ks) { | ||
269 | if (owner) | ||
270 | *owner = NULL; | ||
271 | return ks->value; | ||
272 | } | ||
273 | 218 | ||
274 | /* Now try modules. */ | ||
275 | list_for_each_entry(mod, &modules, list) { | 219 | list_for_each_entry(mod, &modules, list) { |
276 | struct symsearch arr[] = { | 220 | struct symsearch arr[] = { |
277 | { mod->syms, mod->syms + mod->num_syms, mod->crcs, | 221 | { mod->syms, mod->syms + mod->num_syms, mod->crcs, |
278 | always_ok }, | 222 | NOT_GPL_ONLY, false }, |
279 | { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms, | 223 | { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms, |
280 | mod->gpl_crcs, gpl_only }, | 224 | mod->gpl_crcs, |
225 | GPL_ONLY, false }, | ||
281 | { mod->gpl_future_syms, | 226 | { mod->gpl_future_syms, |
282 | mod->gpl_future_syms + mod->num_gpl_future_syms, | 227 | mod->gpl_future_syms + mod->num_gpl_future_syms, |
283 | mod->gpl_future_crcs, warn_if_not_gpl }, | 228 | mod->gpl_future_crcs, |
229 | WILL_BE_GPL_ONLY, false }, | ||
230 | #ifdef CONFIG_UNUSED_SYMBOLS | ||
284 | { mod->unused_syms, | 231 | { mod->unused_syms, |
285 | mod->unused_syms + mod->num_unused_syms, | 232 | mod->unused_syms + mod->num_unused_syms, |
286 | mod->unused_crcs, printk_unused_warning }, | 233 | mod->unused_crcs, |
234 | NOT_GPL_ONLY, true }, | ||
287 | { mod->unused_gpl_syms, | 235 | { mod->unused_gpl_syms, |
288 | mod->unused_gpl_syms + mod->num_unused_gpl_syms, | 236 | mod->unused_gpl_syms + mod->num_unused_gpl_syms, |
289 | mod->unused_gpl_crcs, gpl_only_unused_warning }, | 237 | mod->unused_gpl_crcs, |
238 | GPL_ONLY, true }, | ||
239 | #endif | ||
290 | }; | 240 | }; |
291 | 241 | ||
292 | ks = search_symarrays(arr, ARRAY_SIZE(arr), | 242 | if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data)) |
293 | name, gplok, warn, crc); | 243 | return true; |
294 | if (ks) { | 244 | } |
295 | if (owner) | 245 | return false; |
296 | *owner = mod; | 246 | } |
297 | return ks->value; | 247 | |
248 | struct find_symbol_arg { | ||
249 | /* Input */ | ||
250 | const char *name; | ||
251 | bool gplok; | ||
252 | bool warn; | ||
253 | |||
254 | /* Output */ | ||
255 | struct module *owner; | ||
256 | const unsigned long *crc; | ||
257 | unsigned long value; | ||
258 | }; | ||
259 | |||
260 | static bool find_symbol_in_section(const struct symsearch *syms, | ||
261 | struct module *owner, | ||
262 | unsigned int symnum, void *data) | ||
263 | { | ||
264 | struct find_symbol_arg *fsa = data; | ||
265 | |||
266 | if (strcmp(syms->start[symnum].name, fsa->name) != 0) | ||
267 | return false; | ||
268 | |||
269 | if (!fsa->gplok) { | ||
270 | if (syms->licence == GPL_ONLY) | ||
271 | return false; | ||
272 | if (syms->licence == WILL_BE_GPL_ONLY && fsa->warn) { | ||
273 | printk(KERN_WARNING "Symbol %s is being used " | ||
274 | "by a non-GPL module, which will not " | ||
275 | "be allowed in the future\n", fsa->name); | ||
276 | printk(KERN_WARNING "Please see the file " | ||
277 | "Documentation/feature-removal-schedule.txt " | ||
278 | "in the kernel source tree for more details.\n"); | ||
298 | } | 279 | } |
299 | } | 280 | } |
300 | 281 | ||
282 | #ifdef CONFIG_UNUSED_SYMBOLS | ||
283 | if (syms->unused && fsa->warn) { | ||
284 | printk(KERN_WARNING "Symbol %s is marked as UNUSED, " | ||
285 | "however this module is using it.\n", fsa->name); | ||
286 | printk(KERN_WARNING | ||
287 | "This symbol will go away in the future.\n"); | ||
288 | printk(KERN_WARNING | ||
289 | "Please evalute if this is the right api to use and if " | ||
290 | "it really is, submit a report the linux kernel " | ||
291 | "mailinglist together with submitting your code for " | ||
292 | "inclusion.\n"); | ||
293 | } | ||
294 | #endif | ||
295 | |||
296 | fsa->owner = owner; | ||
297 | fsa->crc = symversion(syms->crcs, symnum); | ||
298 | fsa->value = syms->start[symnum].value; | ||
299 | return true; | ||
300 | } | ||
301 | |||
302 | /* Find a symbol, return value, (optional) crc and (optional) module | ||
303 | * which owns it */ | ||
304 | static unsigned long find_symbol(const char *name, | ||
305 | struct module **owner, | ||
306 | const unsigned long **crc, | ||
307 | bool gplok, | ||
308 | bool warn) | ||
309 | { | ||
310 | struct find_symbol_arg fsa; | ||
311 | |||
312 | fsa.name = name; | ||
313 | fsa.gplok = gplok; | ||
314 | fsa.warn = warn; | ||
315 | |||
316 | if (each_symbol(find_symbol_in_section, &fsa)) { | ||
317 | if (owner) | ||
318 | *owner = fsa.owner; | ||
319 | if (crc) | ||
320 | *crc = fsa.crc; | ||
321 | return fsa.value; | ||
322 | } | ||
323 | |||
301 | DEBUGP("Failed to find symbol %s\n", name); | 324 | DEBUGP("Failed to find symbol %s\n", name); |
302 | return -ENOENT; | 325 | return -ENOENT; |
303 | } | 326 | } |
@@ -639,8 +662,8 @@ static int __try_stop_module(void *_sref) | |||
639 | { | 662 | { |
640 | struct stopref *sref = _sref; | 663 | struct stopref *sref = _sref; |
641 | 664 | ||
642 | /* If it's not unused, quit unless we are told to block. */ | 665 | /* If it's not unused, quit unless we're forcing. */ |
643 | if ((sref->flags & O_NONBLOCK) && module_refcount(sref->mod) != 0) { | 666 | if (module_refcount(sref->mod) != 0) { |
644 | if (!(*sref->forced = try_force_unload(sref->flags))) | 667 | if (!(*sref->forced = try_force_unload(sref->flags))) |
645 | return -EWOULDBLOCK; | 668 | return -EWOULDBLOCK; |
646 | } | 669 | } |
@@ -652,9 +675,16 @@ static int __try_stop_module(void *_sref) | |||
652 | 675 | ||
653 | static int try_stop_module(struct module *mod, int flags, int *forced) | 676 | static int try_stop_module(struct module *mod, int flags, int *forced) |
654 | { | 677 | { |
655 | struct stopref sref = { mod, flags, forced }; | 678 | if (flags & O_NONBLOCK) { |
679 | struct stopref sref = { mod, flags, forced }; | ||
656 | 680 | ||
657 | return stop_machine_run(__try_stop_module, &sref, NR_CPUS); | 681 | return stop_machine(__try_stop_module, &sref, NULL); |
682 | } else { | ||
683 | /* We don't need to stop the machine for this. */ | ||
684 | mod->state = MODULE_STATE_GOING; | ||
685 | synchronize_sched(); | ||
686 | return 0; | ||
687 | } | ||
658 | } | 688 | } |
659 | 689 | ||
660 | unsigned int module_refcount(struct module *mod) | 690 | unsigned int module_refcount(struct module *mod) |
@@ -1386,7 +1416,7 @@ static int __unlink_module(void *_mod) | |||
1386 | static void free_module(struct module *mod) | 1416 | static void free_module(struct module *mod) |
1387 | { | 1417 | { |
1388 | /* Delete from various lists */ | 1418 | /* Delete from various lists */ |
1389 | stop_machine_run(__unlink_module, mod, NR_CPUS); | 1419 | stop_machine(__unlink_module, mod, NULL); |
1390 | remove_notes_attrs(mod); | 1420 | remove_notes_attrs(mod); |
1391 | remove_sect_attrs(mod); | 1421 | remove_sect_attrs(mod); |
1392 | mod_kobject_remove(mod); | 1422 | mod_kobject_remove(mod); |
@@ -1445,8 +1475,10 @@ static int verify_export_symbols(struct module *mod) | |||
1445 | { mod->syms, mod->num_syms }, | 1475 | { mod->syms, mod->num_syms }, |
1446 | { mod->gpl_syms, mod->num_gpl_syms }, | 1476 | { mod->gpl_syms, mod->num_gpl_syms }, |
1447 | { mod->gpl_future_syms, mod->num_gpl_future_syms }, | 1477 | { mod->gpl_future_syms, mod->num_gpl_future_syms }, |
1478 | #ifdef CONFIG_UNUSED_SYMBOLS | ||
1448 | { mod->unused_syms, mod->num_unused_syms }, | 1479 | { mod->unused_syms, mod->num_unused_syms }, |
1449 | { mod->unused_gpl_syms, mod->num_unused_gpl_syms }, | 1480 | { mod->unused_gpl_syms, mod->num_unused_gpl_syms }, |
1481 | #endif | ||
1450 | }; | 1482 | }; |
1451 | 1483 | ||
1452 | for (i = 0; i < ARRAY_SIZE(arr); i++) { | 1484 | for (i = 0; i < ARRAY_SIZE(arr); i++) { |
@@ -1526,7 +1558,7 @@ static int simplify_symbols(Elf_Shdr *sechdrs, | |||
1526 | } | 1558 | } |
1527 | 1559 | ||
1528 | /* Update size with this section: return offset. */ | 1560 | /* Update size with this section: return offset. */ |
1529 | static long get_offset(unsigned long *size, Elf_Shdr *sechdr) | 1561 | static long get_offset(unsigned int *size, Elf_Shdr *sechdr) |
1530 | { | 1562 | { |
1531 | long ret; | 1563 | long ret; |
1532 | 1564 | ||
@@ -1659,6 +1691,19 @@ static void setup_modinfo(struct module *mod, Elf_Shdr *sechdrs, | |||
1659 | } | 1691 | } |
1660 | 1692 | ||
1661 | #ifdef CONFIG_KALLSYMS | 1693 | #ifdef CONFIG_KALLSYMS |
1694 | |||
1695 | /* lookup symbol in given range of kernel_symbols */ | ||
1696 | static const struct kernel_symbol *lookup_symbol(const char *name, | ||
1697 | const struct kernel_symbol *start, | ||
1698 | const struct kernel_symbol *stop) | ||
1699 | { | ||
1700 | const struct kernel_symbol *ks = start; | ||
1701 | for (; ks < stop; ks++) | ||
1702 | if (strcmp(ks->name, name) == 0) | ||
1703 | return ks; | ||
1704 | return NULL; | ||
1705 | } | ||
1706 | |||
1662 | static int is_exported(const char *name, const struct module *mod) | 1707 | static int is_exported(const char *name, const struct module *mod) |
1663 | { | 1708 | { |
1664 | if (!mod && lookup_symbol(name, __start___ksymtab, __stop___ksymtab)) | 1709 | if (!mod && lookup_symbol(name, __start___ksymtab, __stop___ksymtab)) |
@@ -1738,9 +1783,23 @@ static inline void add_kallsyms(struct module *mod, | |||
1738 | } | 1783 | } |
1739 | #endif /* CONFIG_KALLSYMS */ | 1784 | #endif /* CONFIG_KALLSYMS */ |
1740 | 1785 | ||
1786 | static void *module_alloc_update_bounds(unsigned long size) | ||
1787 | { | ||
1788 | void *ret = module_alloc(size); | ||
1789 | |||
1790 | if (ret) { | ||
1791 | /* Update module bounds. */ | ||
1792 | if ((unsigned long)ret < module_addr_min) | ||
1793 | module_addr_min = (unsigned long)ret; | ||
1794 | if ((unsigned long)ret + size > module_addr_max) | ||
1795 | module_addr_max = (unsigned long)ret + size; | ||
1796 | } | ||
1797 | return ret; | ||
1798 | } | ||
1799 | |||
1741 | /* Allocate and load the module: note that size of section 0 is always | 1800 | /* Allocate and load the module: note that size of section 0 is always |
1742 | zero, and we rely on this for optional sections. */ | 1801 | zero, and we rely on this for optional sections. */ |
1743 | static struct module *load_module(void __user *umod, | 1802 | static noinline struct module *load_module(void __user *umod, |
1744 | unsigned long len, | 1803 | unsigned long len, |
1745 | const char __user *uargs) | 1804 | const char __user *uargs) |
1746 | { | 1805 | { |
@@ -1764,10 +1823,12 @@ static struct module *load_module(void __user *umod, | |||
1764 | unsigned int gplfutureindex; | 1823 | unsigned int gplfutureindex; |
1765 | unsigned int gplfuturecrcindex; | 1824 | unsigned int gplfuturecrcindex; |
1766 | unsigned int unwindex = 0; | 1825 | unsigned int unwindex = 0; |
1826 | #ifdef CONFIG_UNUSED_SYMBOLS | ||
1767 | unsigned int unusedindex; | 1827 | unsigned int unusedindex; |
1768 | unsigned int unusedcrcindex; | 1828 | unsigned int unusedcrcindex; |
1769 | unsigned int unusedgplindex; | 1829 | unsigned int unusedgplindex; |
1770 | unsigned int unusedgplcrcindex; | 1830 | unsigned int unusedgplcrcindex; |
1831 | #endif | ||
1771 | unsigned int markersindex; | 1832 | unsigned int markersindex; |
1772 | unsigned int markersstringsindex; | 1833 | unsigned int markersstringsindex; |
1773 | struct module *mod; | 1834 | struct module *mod; |
@@ -1850,13 +1911,15 @@ static struct module *load_module(void __user *umod, | |||
1850 | exportindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab"); | 1911 | exportindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab"); |
1851 | gplindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab_gpl"); | 1912 | gplindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab_gpl"); |
1852 | gplfutureindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab_gpl_future"); | 1913 | gplfutureindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab_gpl_future"); |
1853 | unusedindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab_unused"); | ||
1854 | unusedgplindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab_unused_gpl"); | ||
1855 | crcindex = find_sec(hdr, sechdrs, secstrings, "__kcrctab"); | 1914 | crcindex = find_sec(hdr, sechdrs, secstrings, "__kcrctab"); |
1856 | gplcrcindex = find_sec(hdr, sechdrs, secstrings, "__kcrctab_gpl"); | 1915 | gplcrcindex = find_sec(hdr, sechdrs, secstrings, "__kcrctab_gpl"); |
1857 | gplfuturecrcindex = find_sec(hdr, sechdrs, secstrings, "__kcrctab_gpl_future"); | 1916 | gplfuturecrcindex = find_sec(hdr, sechdrs, secstrings, "__kcrctab_gpl_future"); |
1917 | #ifdef CONFIG_UNUSED_SYMBOLS | ||
1918 | unusedindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab_unused"); | ||
1919 | unusedgplindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab_unused_gpl"); | ||
1858 | unusedcrcindex = find_sec(hdr, sechdrs, secstrings, "__kcrctab_unused"); | 1920 | unusedcrcindex = find_sec(hdr, sechdrs, secstrings, "__kcrctab_unused"); |
1859 | unusedgplcrcindex = find_sec(hdr, sechdrs, secstrings, "__kcrctab_unused_gpl"); | 1921 | unusedgplcrcindex = find_sec(hdr, sechdrs, secstrings, "__kcrctab_unused_gpl"); |
1922 | #endif | ||
1860 | setupindex = find_sec(hdr, sechdrs, secstrings, "__param"); | 1923 | setupindex = find_sec(hdr, sechdrs, secstrings, "__param"); |
1861 | exindex = find_sec(hdr, sechdrs, secstrings, "__ex_table"); | 1924 | exindex = find_sec(hdr, sechdrs, secstrings, "__ex_table"); |
1862 | obsparmindex = find_sec(hdr, sechdrs, secstrings, "__obsparm"); | 1925 | obsparmindex = find_sec(hdr, sechdrs, secstrings, "__obsparm"); |
@@ -1935,7 +1998,7 @@ static struct module *load_module(void __user *umod, | |||
1935 | layout_sections(mod, hdr, sechdrs, secstrings); | 1998 | layout_sections(mod, hdr, sechdrs, secstrings); |
1936 | 1999 | ||
1937 | /* Do the allocs. */ | 2000 | /* Do the allocs. */ |
1938 | ptr = module_alloc(mod->core_size); | 2001 | ptr = module_alloc_update_bounds(mod->core_size); |
1939 | if (!ptr) { | 2002 | if (!ptr) { |
1940 | err = -ENOMEM; | 2003 | err = -ENOMEM; |
1941 | goto free_percpu; | 2004 | goto free_percpu; |
@@ -1943,7 +2006,7 @@ static struct module *load_module(void __user *umod, | |||
1943 | memset(ptr, 0, mod->core_size); | 2006 | memset(ptr, 0, mod->core_size); |
1944 | mod->module_core = ptr; | 2007 | mod->module_core = ptr; |
1945 | 2008 | ||
1946 | ptr = module_alloc(mod->init_size); | 2009 | ptr = module_alloc_update_bounds(mod->init_size); |
1947 | if (!ptr && mod->init_size) { | 2010 | if (!ptr && mod->init_size) { |
1948 | err = -ENOMEM; | 2011 | err = -ENOMEM; |
1949 | goto free_core; | 2012 | goto free_core; |
@@ -2018,14 +2081,15 @@ static struct module *load_module(void __user *umod, | |||
2018 | mod->gpl_crcs = (void *)sechdrs[gplcrcindex].sh_addr; | 2081 | mod->gpl_crcs = (void *)sechdrs[gplcrcindex].sh_addr; |
2019 | mod->num_gpl_future_syms = sechdrs[gplfutureindex].sh_size / | 2082 | mod->num_gpl_future_syms = sechdrs[gplfutureindex].sh_size / |
2020 | sizeof(*mod->gpl_future_syms); | 2083 | sizeof(*mod->gpl_future_syms); |
2021 | mod->num_unused_syms = sechdrs[unusedindex].sh_size / | ||
2022 | sizeof(*mod->unused_syms); | ||
2023 | mod->num_unused_gpl_syms = sechdrs[unusedgplindex].sh_size / | ||
2024 | sizeof(*mod->unused_gpl_syms); | ||
2025 | mod->gpl_future_syms = (void *)sechdrs[gplfutureindex].sh_addr; | 2084 | mod->gpl_future_syms = (void *)sechdrs[gplfutureindex].sh_addr; |
2026 | if (gplfuturecrcindex) | 2085 | if (gplfuturecrcindex) |
2027 | mod->gpl_future_crcs = (void *)sechdrs[gplfuturecrcindex].sh_addr; | 2086 | mod->gpl_future_crcs = (void *)sechdrs[gplfuturecrcindex].sh_addr; |
2028 | 2087 | ||
2088 | #ifdef CONFIG_UNUSED_SYMBOLS | ||
2089 | mod->num_unused_syms = sechdrs[unusedindex].sh_size / | ||
2090 | sizeof(*mod->unused_syms); | ||
2091 | mod->num_unused_gpl_syms = sechdrs[unusedgplindex].sh_size / | ||
2092 | sizeof(*mod->unused_gpl_syms); | ||
2029 | mod->unused_syms = (void *)sechdrs[unusedindex].sh_addr; | 2093 | mod->unused_syms = (void *)sechdrs[unusedindex].sh_addr; |
2030 | if (unusedcrcindex) | 2094 | if (unusedcrcindex) |
2031 | mod->unused_crcs = (void *)sechdrs[unusedcrcindex].sh_addr; | 2095 | mod->unused_crcs = (void *)sechdrs[unusedcrcindex].sh_addr; |
@@ -2033,13 +2097,17 @@ static struct module *load_module(void __user *umod, | |||
2033 | if (unusedgplcrcindex) | 2097 | if (unusedgplcrcindex) |
2034 | mod->unused_gpl_crcs | 2098 | mod->unused_gpl_crcs |
2035 | = (void *)sechdrs[unusedgplcrcindex].sh_addr; | 2099 | = (void *)sechdrs[unusedgplcrcindex].sh_addr; |
2100 | #endif | ||
2036 | 2101 | ||
2037 | #ifdef CONFIG_MODVERSIONS | 2102 | #ifdef CONFIG_MODVERSIONS |
2038 | if ((mod->num_syms && !crcindex) || | 2103 | if ((mod->num_syms && !crcindex) |
2039 | (mod->num_gpl_syms && !gplcrcindex) || | 2104 | || (mod->num_gpl_syms && !gplcrcindex) |
2040 | (mod->num_gpl_future_syms && !gplfuturecrcindex) || | 2105 | || (mod->num_gpl_future_syms && !gplfuturecrcindex) |
2041 | (mod->num_unused_syms && !unusedcrcindex) || | 2106 | #ifdef CONFIG_UNUSED_SYMBOLS |
2042 | (mod->num_unused_gpl_syms && !unusedgplcrcindex)) { | 2107 | || (mod->num_unused_syms && !unusedcrcindex) |
2108 | || (mod->num_unused_gpl_syms && !unusedgplcrcindex) | ||
2109 | #endif | ||
2110 | ) { | ||
2043 | printk(KERN_WARNING "%s: No versions for exported symbols.\n", mod->name); | 2111 | printk(KERN_WARNING "%s: No versions for exported symbols.\n", mod->name); |
2044 | err = try_to_force_load(mod, "nocrc"); | 2112 | err = try_to_force_load(mod, "nocrc"); |
2045 | if (err) | 2113 | if (err) |
@@ -2129,7 +2197,7 @@ static struct module *load_module(void __user *umod, | |||
2129 | /* Now sew it into the lists so we can get lockdep and oops | 2197 | /* Now sew it into the lists so we can get lockdep and oops |
2130 | * info during argument parsing. Noone should access us, since | 2198 | * info during argument parsing. Noone should access us, since |
2131 | * strong_try_module_get() will fail. */ | 2199 | * strong_try_module_get() will fail. */ |
2132 | stop_machine_run(__link_module, mod, NR_CPUS); | 2200 | stop_machine(__link_module, mod, NULL); |
2133 | 2201 | ||
2134 | /* Size of section 0 is 0, so this works well if no params */ | 2202 | /* Size of section 0 is 0, so this works well if no params */ |
2135 | err = parse_args(mod->name, mod->args, | 2203 | err = parse_args(mod->name, mod->args, |
@@ -2163,7 +2231,7 @@ static struct module *load_module(void __user *umod, | |||
2163 | return mod; | 2231 | return mod; |
2164 | 2232 | ||
2165 | unlink: | 2233 | unlink: |
2166 | stop_machine_run(__unlink_module, mod, NR_CPUS); | 2234 | stop_machine(__unlink_module, mod, NULL); |
2167 | module_arch_cleanup(mod); | 2235 | module_arch_cleanup(mod); |
2168 | cleanup: | 2236 | cleanup: |
2169 | kobject_del(&mod->mkobj.kobj); | 2237 | kobject_del(&mod->mkobj.kobj); |
@@ -2220,7 +2288,7 @@ sys_init_module(void __user *umod, | |||
2220 | 2288 | ||
2221 | /* Start the module */ | 2289 | /* Start the module */ |
2222 | if (mod->init != NULL) | 2290 | if (mod->init != NULL) |
2223 | ret = mod->init(); | 2291 | ret = do_one_initcall(mod->init); |
2224 | if (ret < 0) { | 2292 | if (ret < 0) { |
2225 | /* Init routine failed: abort. Try to protect us from | 2293 | /* Init routine failed: abort. Try to protect us from |
2226 | buggy refcounters. */ | 2294 | buggy refcounters. */ |
@@ -2512,7 +2580,7 @@ static int m_show(struct seq_file *m, void *p) | |||
2512 | struct module *mod = list_entry(p, struct module, list); | 2580 | struct module *mod = list_entry(p, struct module, list); |
2513 | char buf[8]; | 2581 | char buf[8]; |
2514 | 2582 | ||
2515 | seq_printf(m, "%s %lu", | 2583 | seq_printf(m, "%s %u", |
2516 | mod->name, mod->init_size + mod->core_size); | 2584 | mod->name, mod->init_size + mod->core_size); |
2517 | print_unload_info(m, mod); | 2585 | print_unload_info(m, mod); |
2518 | 2586 | ||
@@ -2595,6 +2663,9 @@ struct module *__module_text_address(unsigned long addr) | |||
2595 | { | 2663 | { |
2596 | struct module *mod; | 2664 | struct module *mod; |
2597 | 2665 | ||
2666 | if (addr < module_addr_min || addr > module_addr_max) | ||
2667 | return NULL; | ||
2668 | |||
2598 | list_for_each_entry(mod, &modules, list) | 2669 | list_for_each_entry(mod, &modules, list) |
2599 | if (within(addr, mod->module_init, mod->init_text_size) | 2670 | if (within(addr, mod->module_init, mod->init_text_size) |
2600 | || within(addr, mod->module_core, mod->core_text_size)) | 2671 | || within(addr, mod->module_core, mod->core_text_size)) |
diff --git a/kernel/mutex.c b/kernel/mutex.c index bcdc9ac8ef60..12c779dc65d4 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c | |||
@@ -34,6 +34,7 @@ | |||
34 | /*** | 34 | /*** |
35 | * mutex_init - initialize the mutex | 35 | * mutex_init - initialize the mutex |
36 | * @lock: the mutex to be initialized | 36 | * @lock: the mutex to be initialized |
37 | * @key: the lock_class_key for the class; used by mutex lock debugging | ||
37 | * | 38 | * |
38 | * Initialize the mutex to unlocked state. | 39 | * Initialize the mutex to unlocked state. |
39 | * | 40 | * |
diff --git a/kernel/ns_cgroup.c b/kernel/ns_cgroup.c index 48d7ed6fc3a4..43c2111cd54d 100644 --- a/kernel/ns_cgroup.c +++ b/kernel/ns_cgroup.c | |||
@@ -7,6 +7,7 @@ | |||
7 | #include <linux/module.h> | 7 | #include <linux/module.h> |
8 | #include <linux/cgroup.h> | 8 | #include <linux/cgroup.h> |
9 | #include <linux/fs.h> | 9 | #include <linux/fs.h> |
10 | #include <linux/proc_fs.h> | ||
10 | #include <linux/slab.h> | 11 | #include <linux/slab.h> |
11 | #include <linux/nsproxy.h> | 12 | #include <linux/nsproxy.h> |
12 | 13 | ||
@@ -24,9 +25,12 @@ static inline struct ns_cgroup *cgroup_to_ns( | |||
24 | struct ns_cgroup, css); | 25 | struct ns_cgroup, css); |
25 | } | 26 | } |
26 | 27 | ||
27 | int ns_cgroup_clone(struct task_struct *task) | 28 | int ns_cgroup_clone(struct task_struct *task, struct pid *pid) |
28 | { | 29 | { |
29 | return cgroup_clone(task, &ns_subsys); | 30 | char name[PROC_NUMBUF]; |
31 | |||
32 | snprintf(name, PROC_NUMBUF, "%d", pid_vnr(pid)); | ||
33 | return cgroup_clone(task, &ns_subsys, name); | ||
30 | } | 34 | } |
31 | 35 | ||
32 | /* | 36 | /* |
diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c index adc785146a1c..1d3ef29a2583 100644 --- a/kernel/nsproxy.c +++ b/kernel/nsproxy.c | |||
@@ -14,7 +14,6 @@ | |||
14 | */ | 14 | */ |
15 | 15 | ||
16 | #include <linux/module.h> | 16 | #include <linux/module.h> |
17 | #include <linux/version.h> | ||
18 | #include <linux/nsproxy.h> | 17 | #include <linux/nsproxy.h> |
19 | #include <linux/init_task.h> | 18 | #include <linux/init_task.h> |
20 | #include <linux/mnt_namespace.h> | 19 | #include <linux/mnt_namespace.h> |
@@ -157,12 +156,6 @@ int copy_namespaces(unsigned long flags, struct task_struct *tsk) | |||
157 | goto out; | 156 | goto out; |
158 | } | 157 | } |
159 | 158 | ||
160 | err = ns_cgroup_clone(tsk); | ||
161 | if (err) { | ||
162 | put_nsproxy(new_ns); | ||
163 | goto out; | ||
164 | } | ||
165 | |||
166 | tsk->nsproxy = new_ns; | 159 | tsk->nsproxy = new_ns; |
167 | 160 | ||
168 | out: | 161 | out: |
@@ -209,7 +202,7 @@ int unshare_nsproxy_namespaces(unsigned long unshare_flags, | |||
209 | goto out; | 202 | goto out; |
210 | } | 203 | } |
211 | 204 | ||
212 | err = ns_cgroup_clone(current); | 205 | err = ns_cgroup_clone(current, task_pid(current)); |
213 | if (err) | 206 | if (err) |
214 | put_nsproxy(*new_nsp); | 207 | put_nsproxy(*new_nsp); |
215 | 208 | ||
diff --git a/kernel/panic.c b/kernel/panic.c index 425567f45b9f..12c5a0a6c89b 100644 --- a/kernel/panic.c +++ b/kernel/panic.c | |||
@@ -318,6 +318,28 @@ void warn_on_slowpath(const char *file, int line) | |||
318 | add_taint(TAINT_WARN); | 318 | add_taint(TAINT_WARN); |
319 | } | 319 | } |
320 | EXPORT_SYMBOL(warn_on_slowpath); | 320 | EXPORT_SYMBOL(warn_on_slowpath); |
321 | |||
322 | |||
323 | void warn_slowpath(const char *file, int line, const char *fmt, ...) | ||
324 | { | ||
325 | va_list args; | ||
326 | char function[KSYM_SYMBOL_LEN]; | ||
327 | unsigned long caller = (unsigned long)__builtin_return_address(0); | ||
328 | sprint_symbol(function, caller); | ||
329 | |||
330 | printk(KERN_WARNING "------------[ cut here ]------------\n"); | ||
331 | printk(KERN_WARNING "WARNING: at %s:%d %s()\n", file, | ||
332 | line, function); | ||
333 | va_start(args, fmt); | ||
334 | vprintk(fmt, args); | ||
335 | va_end(args); | ||
336 | |||
337 | print_modules(); | ||
338 | dump_stack(); | ||
339 | print_oops_end_marker(); | ||
340 | add_taint(TAINT_WARN); | ||
341 | } | ||
342 | EXPORT_SYMBOL(warn_slowpath); | ||
321 | #endif | 343 | #endif |
322 | 344 | ||
323 | #ifdef CONFIG_CC_STACKPROTECTOR | 345 | #ifdef CONFIG_CC_STACKPROTECTOR |
diff --git a/kernel/pid.c b/kernel/pid.c index 30bd5d4b2ac7..064e76afa507 100644 --- a/kernel/pid.c +++ b/kernel/pid.c | |||
@@ -309,12 +309,6 @@ struct pid *find_vpid(int nr) | |||
309 | } | 309 | } |
310 | EXPORT_SYMBOL_GPL(find_vpid); | 310 | EXPORT_SYMBOL_GPL(find_vpid); |
311 | 311 | ||
312 | struct pid *find_pid(int nr) | ||
313 | { | ||
314 | return find_pid_ns(nr, &init_pid_ns); | ||
315 | } | ||
316 | EXPORT_SYMBOL_GPL(find_pid); | ||
317 | |||
318 | /* | 312 | /* |
319 | * attach_pid() must be called with the tasklist_lock write-held. | 313 | * attach_pid() must be called with the tasklist_lock write-held. |
320 | */ | 314 | */ |
@@ -435,6 +429,7 @@ struct pid *find_get_pid(pid_t nr) | |||
435 | 429 | ||
436 | return pid; | 430 | return pid; |
437 | } | 431 | } |
432 | EXPORT_SYMBOL_GPL(find_get_pid); | ||
438 | 433 | ||
439 | pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns) | 434 | pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns) |
440 | { | 435 | { |
@@ -482,7 +477,7 @@ EXPORT_SYMBOL(task_session_nr_ns); | |||
482 | /* | 477 | /* |
483 | * Used by proc to find the first pid that is greater then or equal to nr. | 478 | * Used by proc to find the first pid that is greater then or equal to nr. |
484 | * | 479 | * |
485 | * If there is a pid at nr this function is exactly the same as find_pid. | 480 | * If there is a pid at nr this function is exactly the same as find_pid_ns. |
486 | */ | 481 | */ |
487 | struct pid *find_ge_pid(int nr, struct pid_namespace *ns) | 482 | struct pid *find_ge_pid(int nr, struct pid_namespace *ns) |
488 | { | 483 | { |
@@ -497,7 +492,6 @@ struct pid *find_ge_pid(int nr, struct pid_namespace *ns) | |||
497 | 492 | ||
498 | return pid; | 493 | return pid; |
499 | } | 494 | } |
500 | EXPORT_SYMBOL_GPL(find_get_pid); | ||
501 | 495 | ||
502 | /* | 496 | /* |
503 | * The pid hash table is scaled according to the amount of memory in the | 497 | * The pid hash table is scaled according to the amount of memory in the |
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c index 98702b4b8851..fab8ea86fac3 100644 --- a/kernel/pid_namespace.c +++ b/kernel/pid_namespace.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/pid_namespace.h> | 12 | #include <linux/pid_namespace.h> |
13 | #include <linux/syscalls.h> | 13 | #include <linux/syscalls.h> |
14 | #include <linux/err.h> | 14 | #include <linux/err.h> |
15 | #include <linux/acct.h> | ||
15 | 16 | ||
16 | #define BITS_PER_PAGE (PAGE_SIZE*8) | 17 | #define BITS_PER_PAGE (PAGE_SIZE*8) |
17 | 18 | ||
@@ -71,7 +72,7 @@ static struct pid_namespace *create_pid_namespace(unsigned int level) | |||
71 | struct pid_namespace *ns; | 72 | struct pid_namespace *ns; |
72 | int i; | 73 | int i; |
73 | 74 | ||
74 | ns = kmem_cache_alloc(pid_ns_cachep, GFP_KERNEL); | 75 | ns = kmem_cache_zalloc(pid_ns_cachep, GFP_KERNEL); |
75 | if (ns == NULL) | 76 | if (ns == NULL) |
76 | goto out; | 77 | goto out; |
77 | 78 | ||
@@ -84,17 +85,13 @@ static struct pid_namespace *create_pid_namespace(unsigned int level) | |||
84 | goto out_free_map; | 85 | goto out_free_map; |
85 | 86 | ||
86 | kref_init(&ns->kref); | 87 | kref_init(&ns->kref); |
87 | ns->last_pid = 0; | ||
88 | ns->child_reaper = NULL; | ||
89 | ns->level = level; | 88 | ns->level = level; |
90 | 89 | ||
91 | set_bit(0, ns->pidmap[0].page); | 90 | set_bit(0, ns->pidmap[0].page); |
92 | atomic_set(&ns->pidmap[0].nr_free, BITS_PER_PAGE - 1); | 91 | atomic_set(&ns->pidmap[0].nr_free, BITS_PER_PAGE - 1); |
93 | 92 | ||
94 | for (i = 1; i < PIDMAP_ENTRIES; i++) { | 93 | for (i = 1; i < PIDMAP_ENTRIES; i++) |
95 | ns->pidmap[i].page = NULL; | ||
96 | atomic_set(&ns->pidmap[i].nr_free, BITS_PER_PAGE); | 94 | atomic_set(&ns->pidmap[i].nr_free, BITS_PER_PAGE); |
97 | } | ||
98 | 95 | ||
99 | return ns; | 96 | return ns; |
100 | 97 | ||
@@ -182,9 +179,7 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns) | |||
182 | rc = sys_wait4(-1, NULL, __WALL, NULL); | 179 | rc = sys_wait4(-1, NULL, __WALL, NULL); |
183 | } while (rc != -ECHILD); | 180 | } while (rc != -ECHILD); |
184 | 181 | ||
185 | 182 | acct_exit_ns(pid_ns); | |
186 | /* Child reaper for the pid namespace is going away */ | ||
187 | pid_ns->child_reaper = NULL; | ||
188 | return; | 183 | return; |
189 | } | 184 | } |
190 | 185 | ||
diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c index 8cb757026386..dfdec524d1b7 100644 --- a/kernel/pm_qos_params.c +++ b/kernel/pm_qos_params.c | |||
@@ -24,7 +24,7 @@ | |||
24 | * requirement that the application has is cleaned up when closes the file | 24 | * requirement that the application has is cleaned up when closes the file |
25 | * pointer or exits the pm_qos_object will get an opportunity to clean up. | 25 | * pointer or exits the pm_qos_object will get an opportunity to clean up. |
26 | * | 26 | * |
27 | * mark gross mgross@linux.intel.com | 27 | * Mark Gross <mgross@linux.intel.com> |
28 | */ | 28 | */ |
29 | 29 | ||
30 | #include <linux/pm_qos_params.h> | 30 | #include <linux/pm_qos_params.h> |
@@ -43,7 +43,7 @@ | |||
43 | #include <linux/uaccess.h> | 43 | #include <linux/uaccess.h> |
44 | 44 | ||
45 | /* | 45 | /* |
46 | * locking rule: all changes to target_value or requirements or notifiers lists | 46 | * locking rule: all changes to requirements or notifiers lists |
47 | * or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock | 47 | * or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock |
48 | * held, taken with _irqsave. One lock to rule them all | 48 | * held, taken with _irqsave. One lock to rule them all |
49 | */ | 49 | */ |
@@ -66,7 +66,7 @@ struct pm_qos_object { | |||
66 | struct miscdevice pm_qos_power_miscdev; | 66 | struct miscdevice pm_qos_power_miscdev; |
67 | char *name; | 67 | char *name; |
68 | s32 default_value; | 68 | s32 default_value; |
69 | s32 target_value; | 69 | atomic_t target_value; |
70 | s32 (*comparitor)(s32, s32); | 70 | s32 (*comparitor)(s32, s32); |
71 | }; | 71 | }; |
72 | 72 | ||
@@ -77,7 +77,7 @@ static struct pm_qos_object cpu_dma_pm_qos = { | |||
77 | .notifiers = &cpu_dma_lat_notifier, | 77 | .notifiers = &cpu_dma_lat_notifier, |
78 | .name = "cpu_dma_latency", | 78 | .name = "cpu_dma_latency", |
79 | .default_value = 2000 * USEC_PER_SEC, | 79 | .default_value = 2000 * USEC_PER_SEC, |
80 | .target_value = 2000 * USEC_PER_SEC, | 80 | .target_value = ATOMIC_INIT(2000 * USEC_PER_SEC), |
81 | .comparitor = min_compare | 81 | .comparitor = min_compare |
82 | }; | 82 | }; |
83 | 83 | ||
@@ -87,7 +87,7 @@ static struct pm_qos_object network_lat_pm_qos = { | |||
87 | .notifiers = &network_lat_notifier, | 87 | .notifiers = &network_lat_notifier, |
88 | .name = "network_latency", | 88 | .name = "network_latency", |
89 | .default_value = 2000 * USEC_PER_SEC, | 89 | .default_value = 2000 * USEC_PER_SEC, |
90 | .target_value = 2000 * USEC_PER_SEC, | 90 | .target_value = ATOMIC_INIT(2000 * USEC_PER_SEC), |
91 | .comparitor = min_compare | 91 | .comparitor = min_compare |
92 | }; | 92 | }; |
93 | 93 | ||
@@ -99,7 +99,7 @@ static struct pm_qos_object network_throughput_pm_qos = { | |||
99 | .notifiers = &network_throughput_notifier, | 99 | .notifiers = &network_throughput_notifier, |
100 | .name = "network_throughput", | 100 | .name = "network_throughput", |
101 | .default_value = 0, | 101 | .default_value = 0, |
102 | .target_value = 0, | 102 | .target_value = ATOMIC_INIT(0), |
103 | .comparitor = max_compare | 103 | .comparitor = max_compare |
104 | }; | 104 | }; |
105 | 105 | ||
@@ -150,11 +150,11 @@ static void update_target(int target) | |||
150 | extreme_value = pm_qos_array[target]->comparitor( | 150 | extreme_value = pm_qos_array[target]->comparitor( |
151 | extreme_value, node->value); | 151 | extreme_value, node->value); |
152 | } | 152 | } |
153 | if (pm_qos_array[target]->target_value != extreme_value) { | 153 | if (atomic_read(&pm_qos_array[target]->target_value) != extreme_value) { |
154 | call_notifier = 1; | 154 | call_notifier = 1; |
155 | pm_qos_array[target]->target_value = extreme_value; | 155 | atomic_set(&pm_qos_array[target]->target_value, extreme_value); |
156 | pr_debug(KERN_ERR "new target for qos %d is %d\n", target, | 156 | pr_debug(KERN_ERR "new target for qos %d is %d\n", target, |
157 | pm_qos_array[target]->target_value); | 157 | atomic_read(&pm_qos_array[target]->target_value)); |
158 | } | 158 | } |
159 | spin_unlock_irqrestore(&pm_qos_lock, flags); | 159 | spin_unlock_irqrestore(&pm_qos_lock, flags); |
160 | 160 | ||
@@ -193,14 +193,7 @@ static int find_pm_qos_object_by_minor(int minor) | |||
193 | */ | 193 | */ |
194 | int pm_qos_requirement(int pm_qos_class) | 194 | int pm_qos_requirement(int pm_qos_class) |
195 | { | 195 | { |
196 | int ret_val; | 196 | return atomic_read(&pm_qos_array[pm_qos_class]->target_value); |
197 | unsigned long flags; | ||
198 | |||
199 | spin_lock_irqsave(&pm_qos_lock, flags); | ||
200 | ret_val = pm_qos_array[pm_qos_class]->target_value; | ||
201 | spin_unlock_irqrestore(&pm_qos_lock, flags); | ||
202 | |||
203 | return ret_val; | ||
204 | } | 197 | } |
205 | EXPORT_SYMBOL_GPL(pm_qos_requirement); | 198 | EXPORT_SYMBOL_GPL(pm_qos_requirement); |
206 | 199 | ||
@@ -211,8 +204,8 @@ EXPORT_SYMBOL_GPL(pm_qos_requirement); | |||
211 | * @value: defines the qos request | 204 | * @value: defines the qos request |
212 | * | 205 | * |
213 | * This function inserts a new entry in the pm_qos_class list of requested qos | 206 | * This function inserts a new entry in the pm_qos_class list of requested qos |
214 | * performance charactoistics. It recomputes the agregate QoS expectations for | 207 | * performance characteristics. It recomputes the aggregate QoS expectations |
215 | * the pm_qos_class of parrameters. | 208 | * for the pm_qos_class of parameters. |
216 | */ | 209 | */ |
217 | int pm_qos_add_requirement(int pm_qos_class, char *name, s32 value) | 210 | int pm_qos_add_requirement(int pm_qos_class, char *name, s32 value) |
218 | { | 211 | { |
@@ -250,10 +243,10 @@ EXPORT_SYMBOL_GPL(pm_qos_add_requirement); | |||
250 | * @name: identifies the request | 243 | * @name: identifies the request |
251 | * @value: defines the qos request | 244 | * @value: defines the qos request |
252 | * | 245 | * |
253 | * Updates an existing qos requierement for the pm_qos_class of parameters along | 246 | * Updates an existing qos requirement for the pm_qos_class of parameters along |
254 | * with updating the target pm_qos_class value. | 247 | * with updating the target pm_qos_class value. |
255 | * | 248 | * |
256 | * If the named request isn't in the lest then no change is made. | 249 | * If the named request isn't in the list then no change is made. |
257 | */ | 250 | */ |
258 | int pm_qos_update_requirement(int pm_qos_class, char *name, s32 new_value) | 251 | int pm_qos_update_requirement(int pm_qos_class, char *name, s32 new_value) |
259 | { | 252 | { |
@@ -287,7 +280,7 @@ EXPORT_SYMBOL_GPL(pm_qos_update_requirement); | |||
287 | * @pm_qos_class: identifies which list of qos request to us | 280 | * @pm_qos_class: identifies which list of qos request to us |
288 | * @name: identifies the request | 281 | * @name: identifies the request |
289 | * | 282 | * |
290 | * Will remove named qos request from pm_qos_class list of parrameters and | 283 | * Will remove named qos request from pm_qos_class list of parameters and |
291 | * recompute the current target value for the pm_qos_class. | 284 | * recompute the current target value for the pm_qos_class. |
292 | */ | 285 | */ |
293 | void pm_qos_remove_requirement(int pm_qos_class, char *name) | 286 | void pm_qos_remove_requirement(int pm_qos_class, char *name) |
@@ -319,7 +312,7 @@ EXPORT_SYMBOL_GPL(pm_qos_remove_requirement); | |||
319 | * @notifier: notifier block managed by caller. | 312 | * @notifier: notifier block managed by caller. |
320 | * | 313 | * |
321 | * will register the notifier into a notification chain that gets called | 314 | * will register the notifier into a notification chain that gets called |
322 | * uppon changes to the pm_qos_class target value. | 315 | * upon changes to the pm_qos_class target value. |
323 | */ | 316 | */ |
324 | int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier) | 317 | int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier) |
325 | { | 318 | { |
@@ -338,7 +331,7 @@ EXPORT_SYMBOL_GPL(pm_qos_add_notifier); | |||
338 | * @notifier: notifier block to be removed. | 331 | * @notifier: notifier block to be removed. |
339 | * | 332 | * |
340 | * will remove the notifier from the notification chain that gets called | 333 | * will remove the notifier from the notification chain that gets called |
341 | * uppon changes to the pm_qos_class target value. | 334 | * upon changes to the pm_qos_class target value. |
342 | */ | 335 | */ |
343 | int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier) | 336 | int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier) |
344 | { | 337 | { |
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c index dbd8398ddb0b..e36d5798cbff 100644 --- a/kernel/posix-timers.c +++ b/kernel/posix-timers.c | |||
@@ -289,21 +289,29 @@ void do_schedule_next_timer(struct siginfo *info) | |||
289 | else | 289 | else |
290 | schedule_next_timer(timr); | 290 | schedule_next_timer(timr); |
291 | 291 | ||
292 | info->si_overrun = timr->it_overrun_last; | 292 | info->si_overrun += timr->it_overrun_last; |
293 | } | 293 | } |
294 | 294 | ||
295 | if (timr) | 295 | if (timr) |
296 | unlock_timer(timr, flags); | 296 | unlock_timer(timr, flags); |
297 | } | 297 | } |
298 | 298 | ||
299 | int posix_timer_event(struct k_itimer *timr,int si_private) | 299 | int posix_timer_event(struct k_itimer *timr, int si_private) |
300 | { | 300 | { |
301 | memset(&timr->sigq->info, 0, sizeof(siginfo_t)); | 301 | /* |
302 | * FIXME: if ->sigq is queued we can race with | ||
303 | * dequeue_signal()->do_schedule_next_timer(). | ||
304 | * | ||
305 | * If dequeue_signal() sees the "right" value of | ||
306 | * si_sys_private it calls do_schedule_next_timer(). | ||
307 | * We re-queue ->sigq and drop ->it_lock(). | ||
308 | * do_schedule_next_timer() locks the timer | ||
309 | * and re-schedules it while ->sigq is pending. | ||
310 | * Not really bad, but not that we want. | ||
311 | */ | ||
302 | timr->sigq->info.si_sys_private = si_private; | 312 | timr->sigq->info.si_sys_private = si_private; |
303 | /* Send signal to the process that owns this timer.*/ | ||
304 | 313 | ||
305 | timr->sigq->info.si_signo = timr->it_sigev_signo; | 314 | timr->sigq->info.si_signo = timr->it_sigev_signo; |
306 | timr->sigq->info.si_errno = 0; | ||
307 | timr->sigq->info.si_code = SI_TIMER; | 315 | timr->sigq->info.si_code = SI_TIMER; |
308 | timr->sigq->info.si_tid = timr->it_id; | 316 | timr->sigq->info.si_tid = timr->it_id; |
309 | timr->sigq->info.si_value = timr->it_sigev_value; | 317 | timr->sigq->info.si_value = timr->it_sigev_value; |
@@ -435,6 +443,7 @@ static struct k_itimer * alloc_posix_timer(void) | |||
435 | kmem_cache_free(posix_timers_cache, tmr); | 443 | kmem_cache_free(posix_timers_cache, tmr); |
436 | tmr = NULL; | 444 | tmr = NULL; |
437 | } | 445 | } |
446 | memset(&tmr->sigq->info, 0, sizeof(siginfo_t)); | ||
438 | return tmr; | 447 | return tmr; |
439 | } | 448 | } |
440 | 449 | ||
@@ -449,9 +458,6 @@ static void release_posix_timer(struct k_itimer *tmr, int it_id_set) | |||
449 | spin_unlock_irqrestore(&idr_lock, flags); | 458 | spin_unlock_irqrestore(&idr_lock, flags); |
450 | } | 459 | } |
451 | sigqueue_free(tmr->sigq); | 460 | sigqueue_free(tmr->sigq); |
452 | if (unlikely(tmr->it_process) && | ||
453 | tmr->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID)) | ||
454 | put_task_struct(tmr->it_process); | ||
455 | kmem_cache_free(posix_timers_cache, tmr); | 461 | kmem_cache_free(posix_timers_cache, tmr); |
456 | } | 462 | } |
457 | 463 | ||
@@ -856,11 +862,10 @@ retry_delete: | |||
856 | * This keeps any tasks waiting on the spin lock from thinking | 862 | * This keeps any tasks waiting on the spin lock from thinking |
857 | * they got something (see the lock code above). | 863 | * they got something (see the lock code above). |
858 | */ | 864 | */ |
859 | if (timer->it_process) { | 865 | if (timer->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID)) |
860 | if (timer->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID)) | 866 | put_task_struct(timer->it_process); |
861 | put_task_struct(timer->it_process); | 867 | timer->it_process = NULL; |
862 | timer->it_process = NULL; | 868 | |
863 | } | ||
864 | unlock_timer(timer, flags); | 869 | unlock_timer(timer, flags); |
865 | release_posix_timer(timer, IT_ID_SET); | 870 | release_posix_timer(timer, IT_ID_SET); |
866 | return 0; | 871 | return 0; |
@@ -885,11 +890,10 @@ retry_delete: | |||
885 | * This keeps any tasks waiting on the spin lock from thinking | 890 | * This keeps any tasks waiting on the spin lock from thinking |
886 | * they got something (see the lock code above). | 891 | * they got something (see the lock code above). |
887 | */ | 892 | */ |
888 | if (timer->it_process) { | 893 | if (timer->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID)) |
889 | if (timer->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID)) | 894 | put_task_struct(timer->it_process); |
890 | put_task_struct(timer->it_process); | 895 | timer->it_process = NULL; |
891 | timer->it_process = NULL; | 896 | |
892 | } | ||
893 | unlock_timer(timer, flags); | 897 | unlock_timer(timer, flags); |
894 | release_posix_timer(timer, IT_ID_SET); | 898 | release_posix_timer(timer, IT_ID_SET); |
895 | } | 899 | } |
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index b45da40e8d25..dcd165f92a88 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig | |||
@@ -82,7 +82,7 @@ config PM_SLEEP_SMP | |||
82 | 82 | ||
83 | config PM_SLEEP | 83 | config PM_SLEEP |
84 | bool | 84 | bool |
85 | depends on SUSPEND || HIBERNATION | 85 | depends on SUSPEND || HIBERNATION || XEN_SAVE_RESTORE |
86 | default y | 86 | default y |
87 | 87 | ||
88 | config SUSPEND | 88 | config SUSPEND |
@@ -94,6 +94,17 @@ config SUSPEND | |||
94 | powered and thus its contents are preserved, such as the | 94 | powered and thus its contents are preserved, such as the |
95 | suspend-to-RAM state (e.g. the ACPI S3 state). | 95 | suspend-to-RAM state (e.g. the ACPI S3 state). |
96 | 96 | ||
97 | config PM_TEST_SUSPEND | ||
98 | bool "Test suspend/resume and wakealarm during bootup" | ||
99 | depends on SUSPEND && PM_DEBUG && RTC_LIB=y | ||
100 | ---help--- | ||
101 | This option will let you suspend your machine during bootup, and | ||
102 | make it wake up a few seconds later using an RTC wakeup alarm. | ||
103 | Enable this with a kernel parameter like "test_suspend=mem". | ||
104 | |||
105 | You probably want to have your system's RTC driver statically | ||
106 | linked, ensuring that it's available when this test runs. | ||
107 | |||
97 | config SUSPEND_FREEZER | 108 | config SUSPEND_FREEZER |
98 | bool "Enable freezer for suspend to RAM/standby" \ | 109 | bool "Enable freezer for suspend to RAM/standby" \ |
99 | if ARCH_WANTS_FREEZER_CONTROL || BROKEN | 110 | if ARCH_WANTS_FREEZER_CONTROL || BROKEN |
diff --git a/kernel/power/disk.c b/kernel/power/disk.c index f011e0870b52..bbd85c60f741 100644 --- a/kernel/power/disk.c +++ b/kernel/power/disk.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/console.h> | 21 | #include <linux/console.h> |
22 | #include <linux/cpu.h> | 22 | #include <linux/cpu.h> |
23 | #include <linux/freezer.h> | 23 | #include <linux/freezer.h> |
24 | #include <linux/ftrace.h> | ||
24 | 25 | ||
25 | #include "power.h" | 26 | #include "power.h" |
26 | 27 | ||
@@ -255,7 +256,7 @@ static int create_image(int platform_mode) | |||
255 | 256 | ||
256 | int hibernation_snapshot(int platform_mode) | 257 | int hibernation_snapshot(int platform_mode) |
257 | { | 258 | { |
258 | int error; | 259 | int error, ftrace_save; |
259 | 260 | ||
260 | /* Free memory before shutting down devices. */ | 261 | /* Free memory before shutting down devices. */ |
261 | error = swsusp_shrink_memory(); | 262 | error = swsusp_shrink_memory(); |
@@ -267,6 +268,7 @@ int hibernation_snapshot(int platform_mode) | |||
267 | goto Close; | 268 | goto Close; |
268 | 269 | ||
269 | suspend_console(); | 270 | suspend_console(); |
271 | ftrace_save = __ftrace_enabled_save(); | ||
270 | error = device_suspend(PMSG_FREEZE); | 272 | error = device_suspend(PMSG_FREEZE); |
271 | if (error) | 273 | if (error) |
272 | goto Recover_platform; | 274 | goto Recover_platform; |
@@ -296,6 +298,7 @@ int hibernation_snapshot(int platform_mode) | |||
296 | Resume_devices: | 298 | Resume_devices: |
297 | device_resume(in_suspend ? | 299 | device_resume(in_suspend ? |
298 | (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE); | 300 | (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE); |
301 | __ftrace_enabled_restore(ftrace_save); | ||
299 | resume_console(); | 302 | resume_console(); |
300 | Close: | 303 | Close: |
301 | platform_end(platform_mode); | 304 | platform_end(platform_mode); |
@@ -366,10 +369,11 @@ static int resume_target_kernel(void) | |||
366 | 369 | ||
367 | int hibernation_restore(int platform_mode) | 370 | int hibernation_restore(int platform_mode) |
368 | { | 371 | { |
369 | int error; | 372 | int error, ftrace_save; |
370 | 373 | ||
371 | pm_prepare_console(); | 374 | pm_prepare_console(); |
372 | suspend_console(); | 375 | suspend_console(); |
376 | ftrace_save = __ftrace_enabled_save(); | ||
373 | error = device_suspend(PMSG_QUIESCE); | 377 | error = device_suspend(PMSG_QUIESCE); |
374 | if (error) | 378 | if (error) |
375 | goto Finish; | 379 | goto Finish; |
@@ -384,6 +388,7 @@ int hibernation_restore(int platform_mode) | |||
384 | platform_restore_cleanup(platform_mode); | 388 | platform_restore_cleanup(platform_mode); |
385 | device_resume(PMSG_RECOVER); | 389 | device_resume(PMSG_RECOVER); |
386 | Finish: | 390 | Finish: |
391 | __ftrace_enabled_restore(ftrace_save); | ||
387 | resume_console(); | 392 | resume_console(); |
388 | pm_restore_console(); | 393 | pm_restore_console(); |
389 | return error; | 394 | return error; |
@@ -396,7 +401,7 @@ int hibernation_restore(int platform_mode) | |||
396 | 401 | ||
397 | int hibernation_platform_enter(void) | 402 | int hibernation_platform_enter(void) |
398 | { | 403 | { |
399 | int error; | 404 | int error, ftrace_save; |
400 | 405 | ||
401 | if (!hibernation_ops) | 406 | if (!hibernation_ops) |
402 | return -ENOSYS; | 407 | return -ENOSYS; |
@@ -411,6 +416,7 @@ int hibernation_platform_enter(void) | |||
411 | goto Close; | 416 | goto Close; |
412 | 417 | ||
413 | suspend_console(); | 418 | suspend_console(); |
419 | ftrace_save = __ftrace_enabled_save(); | ||
414 | error = device_suspend(PMSG_HIBERNATE); | 420 | error = device_suspend(PMSG_HIBERNATE); |
415 | if (error) { | 421 | if (error) { |
416 | if (hibernation_ops->recover) | 422 | if (hibernation_ops->recover) |
@@ -445,6 +451,7 @@ int hibernation_platform_enter(void) | |||
445 | hibernation_ops->finish(); | 451 | hibernation_ops->finish(); |
446 | Resume_devices: | 452 | Resume_devices: |
447 | device_resume(PMSG_RESTORE); | 453 | device_resume(PMSG_RESTORE); |
454 | __ftrace_enabled_restore(ftrace_save); | ||
448 | resume_console(); | 455 | resume_console(); |
449 | Close: | 456 | Close: |
450 | hibernation_ops->end(); | 457 | hibernation_ops->end(); |
diff --git a/kernel/power/main.c b/kernel/power/main.c index 3398f4651aa1..540b16b68565 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/freezer.h> | 21 | #include <linux/freezer.h> |
22 | #include <linux/vmstat.h> | 22 | #include <linux/vmstat.h> |
23 | #include <linux/syscalls.h> | 23 | #include <linux/syscalls.h> |
24 | #include <linux/ftrace.h> | ||
24 | 25 | ||
25 | #include "power.h" | 26 | #include "power.h" |
26 | 27 | ||
@@ -132,6 +133,61 @@ static inline int suspend_test(int level) { return 0; } | |||
132 | 133 | ||
133 | #ifdef CONFIG_SUSPEND | 134 | #ifdef CONFIG_SUSPEND |
134 | 135 | ||
136 | #ifdef CONFIG_PM_TEST_SUSPEND | ||
137 | |||
138 | /* | ||
139 | * We test the system suspend code by setting an RTC wakealarm a short | ||
140 | * time in the future, then suspending. Suspending the devices won't | ||
141 | * normally take long ... some systems only need a few milliseconds. | ||
142 | * | ||
143 | * The time it takes is system-specific though, so when we test this | ||
144 | * during system bootup we allow a LOT of time. | ||
145 | */ | ||
146 | #define TEST_SUSPEND_SECONDS 5 | ||
147 | |||
148 | static unsigned long suspend_test_start_time; | ||
149 | |||
150 | static void suspend_test_start(void) | ||
151 | { | ||
152 | /* FIXME Use better timebase than "jiffies", ideally a clocksource. | ||
153 | * What we want is a hardware counter that will work correctly even | ||
154 | * during the irqs-are-off stages of the suspend/resume cycle... | ||
155 | */ | ||
156 | suspend_test_start_time = jiffies; | ||
157 | } | ||
158 | |||
159 | static void suspend_test_finish(const char *label) | ||
160 | { | ||
161 | long nj = jiffies - suspend_test_start_time; | ||
162 | unsigned msec; | ||
163 | |||
164 | msec = jiffies_to_msecs(abs(nj)); | ||
165 | pr_info("PM: %s took %d.%03d seconds\n", label, | ||
166 | msec / 1000, msec % 1000); | ||
167 | |||
168 | /* Warning on suspend means the RTC alarm period needs to be | ||
169 | * larger -- the system was sooo slooowwww to suspend that the | ||
170 | * alarm (should have) fired before the system went to sleep! | ||
171 | * | ||
172 | * Warning on either suspend or resume also means the system | ||
173 | * has some performance issues. The stack dump of a WARN_ON | ||
174 | * is more likely to get the right attention than a printk... | ||
175 | */ | ||
176 | WARN_ON(msec > (TEST_SUSPEND_SECONDS * 1000)); | ||
177 | } | ||
178 | |||
179 | #else | ||
180 | |||
181 | static void suspend_test_start(void) | ||
182 | { | ||
183 | } | ||
184 | |||
185 | static void suspend_test_finish(const char *label) | ||
186 | { | ||
187 | } | ||
188 | |||
189 | #endif | ||
190 | |||
135 | /* This is just an arbitrary number */ | 191 | /* This is just an arbitrary number */ |
136 | #define FREE_PAGE_NUMBER (100) | 192 | #define FREE_PAGE_NUMBER (100) |
137 | 193 | ||
@@ -255,7 +311,7 @@ static int suspend_enter(suspend_state_t state) | |||
255 | */ | 311 | */ |
256 | int suspend_devices_and_enter(suspend_state_t state) | 312 | int suspend_devices_and_enter(suspend_state_t state) |
257 | { | 313 | { |
258 | int error; | 314 | int error, ftrace_save; |
259 | 315 | ||
260 | if (!suspend_ops) | 316 | if (!suspend_ops) |
261 | return -ENOSYS; | 317 | return -ENOSYS; |
@@ -266,12 +322,14 @@ int suspend_devices_and_enter(suspend_state_t state) | |||
266 | goto Close; | 322 | goto Close; |
267 | } | 323 | } |
268 | suspend_console(); | 324 | suspend_console(); |
325 | ftrace_save = __ftrace_enabled_save(); | ||
326 | suspend_test_start(); | ||
269 | error = device_suspend(PMSG_SUSPEND); | 327 | error = device_suspend(PMSG_SUSPEND); |
270 | if (error) { | 328 | if (error) { |
271 | printk(KERN_ERR "PM: Some devices failed to suspend\n"); | 329 | printk(KERN_ERR "PM: Some devices failed to suspend\n"); |
272 | goto Recover_platform; | 330 | goto Recover_platform; |
273 | } | 331 | } |
274 | 332 | suspend_test_finish("suspend devices"); | |
275 | if (suspend_test(TEST_DEVICES)) | 333 | if (suspend_test(TEST_DEVICES)) |
276 | goto Recover_platform; | 334 | goto Recover_platform; |
277 | 335 | ||
@@ -293,7 +351,10 @@ int suspend_devices_and_enter(suspend_state_t state) | |||
293 | if (suspend_ops->finish) | 351 | if (suspend_ops->finish) |
294 | suspend_ops->finish(); | 352 | suspend_ops->finish(); |
295 | Resume_devices: | 353 | Resume_devices: |
354 | suspend_test_start(); | ||
296 | device_resume(PMSG_RESUME); | 355 | device_resume(PMSG_RESUME); |
356 | suspend_test_finish("resume devices"); | ||
357 | __ftrace_enabled_restore(ftrace_save); | ||
297 | resume_console(); | 358 | resume_console(); |
298 | Close: | 359 | Close: |
299 | if (suspend_ops->end) | 360 | if (suspend_ops->end) |
@@ -521,3 +582,144 @@ static int __init pm_init(void) | |||
521 | } | 582 | } |
522 | 583 | ||
523 | core_initcall(pm_init); | 584 | core_initcall(pm_init); |
585 | |||
586 | |||
587 | #ifdef CONFIG_PM_TEST_SUSPEND | ||
588 | |||
589 | #include <linux/rtc.h> | ||
590 | |||
591 | /* | ||
592 | * To test system suspend, we need a hands-off mechanism to resume the | ||
593 | * system. RTCs wake alarms are a common self-contained mechanism. | ||
594 | */ | ||
595 | |||
596 | static void __init test_wakealarm(struct rtc_device *rtc, suspend_state_t state) | ||
597 | { | ||
598 | static char err_readtime[] __initdata = | ||
599 | KERN_ERR "PM: can't read %s time, err %d\n"; | ||
600 | static char err_wakealarm [] __initdata = | ||
601 | KERN_ERR "PM: can't set %s wakealarm, err %d\n"; | ||
602 | static char err_suspend[] __initdata = | ||
603 | KERN_ERR "PM: suspend test failed, error %d\n"; | ||
604 | static char info_test[] __initdata = | ||
605 | KERN_INFO "PM: test RTC wakeup from '%s' suspend\n"; | ||
606 | |||
607 | unsigned long now; | ||
608 | struct rtc_wkalrm alm; | ||
609 | int status; | ||
610 | |||
611 | /* this may fail if the RTC hasn't been initialized */ | ||
612 | status = rtc_read_time(rtc, &alm.time); | ||
613 | if (status < 0) { | ||
614 | printk(err_readtime, rtc->dev.bus_id, status); | ||
615 | return; | ||
616 | } | ||
617 | rtc_tm_to_time(&alm.time, &now); | ||
618 | |||
619 | memset(&alm, 0, sizeof alm); | ||
620 | rtc_time_to_tm(now + TEST_SUSPEND_SECONDS, &alm.time); | ||
621 | alm.enabled = true; | ||
622 | |||
623 | status = rtc_set_alarm(rtc, &alm); | ||
624 | if (status < 0) { | ||
625 | printk(err_wakealarm, rtc->dev.bus_id, status); | ||
626 | return; | ||
627 | } | ||
628 | |||
629 | if (state == PM_SUSPEND_MEM) { | ||
630 | printk(info_test, pm_states[state]); | ||
631 | status = pm_suspend(state); | ||
632 | if (status == -ENODEV) | ||
633 | state = PM_SUSPEND_STANDBY; | ||
634 | } | ||
635 | if (state == PM_SUSPEND_STANDBY) { | ||
636 | printk(info_test, pm_states[state]); | ||
637 | status = pm_suspend(state); | ||
638 | } | ||
639 | if (status < 0) | ||
640 | printk(err_suspend, status); | ||
641 | |||
642 | /* Some platforms can't detect that the alarm triggered the | ||
643 | * wakeup, or (accordingly) disable it after it afterwards. | ||
644 | * It's supposed to give oneshot behavior; cope. | ||
645 | */ | ||
646 | alm.enabled = false; | ||
647 | rtc_set_alarm(rtc, &alm); | ||
648 | } | ||
649 | |||
650 | static int __init has_wakealarm(struct device *dev, void *name_ptr) | ||
651 | { | ||
652 | struct rtc_device *candidate = to_rtc_device(dev); | ||
653 | |||
654 | if (!candidate->ops->set_alarm) | ||
655 | return 0; | ||
656 | if (!device_may_wakeup(candidate->dev.parent)) | ||
657 | return 0; | ||
658 | |||
659 | *(char **)name_ptr = dev->bus_id; | ||
660 | return 1; | ||
661 | } | ||
662 | |||
663 | /* | ||
664 | * Kernel options like "test_suspend=mem" force suspend/resume sanity tests | ||
665 | * at startup time. They're normally disabled, for faster boot and because | ||
666 | * we can't know which states really work on this particular system. | ||
667 | */ | ||
668 | static suspend_state_t test_state __initdata = PM_SUSPEND_ON; | ||
669 | |||
670 | static char warn_bad_state[] __initdata = | ||
671 | KERN_WARNING "PM: can't test '%s' suspend state\n"; | ||
672 | |||
673 | static int __init setup_test_suspend(char *value) | ||
674 | { | ||
675 | unsigned i; | ||
676 | |||
677 | /* "=mem" ==> "mem" */ | ||
678 | value++; | ||
679 | for (i = 0; i < PM_SUSPEND_MAX; i++) { | ||
680 | if (!pm_states[i]) | ||
681 | continue; | ||
682 | if (strcmp(pm_states[i], value) != 0) | ||
683 | continue; | ||
684 | test_state = (__force suspend_state_t) i; | ||
685 | return 0; | ||
686 | } | ||
687 | printk(warn_bad_state, value); | ||
688 | return 0; | ||
689 | } | ||
690 | __setup("test_suspend", setup_test_suspend); | ||
691 | |||
692 | static int __init test_suspend(void) | ||
693 | { | ||
694 | static char warn_no_rtc[] __initdata = | ||
695 | KERN_WARNING "PM: no wakealarm-capable RTC driver is ready\n"; | ||
696 | |||
697 | char *pony = NULL; | ||
698 | struct rtc_device *rtc = NULL; | ||
699 | |||
700 | /* PM is initialized by now; is that state testable? */ | ||
701 | if (test_state == PM_SUSPEND_ON) | ||
702 | goto done; | ||
703 | if (!valid_state(test_state)) { | ||
704 | printk(warn_bad_state, pm_states[test_state]); | ||
705 | goto done; | ||
706 | } | ||
707 | |||
708 | /* RTCs have initialized by now too ... can we use one? */ | ||
709 | class_find_device(rtc_class, NULL, &pony, has_wakealarm); | ||
710 | if (pony) | ||
711 | rtc = rtc_class_open(pony); | ||
712 | if (!rtc) { | ||
713 | printk(warn_no_rtc); | ||
714 | goto done; | ||
715 | } | ||
716 | |||
717 | /* go for it */ | ||
718 | test_wakealarm(rtc, test_state); | ||
719 | rtc_class_close(rtc); | ||
720 | done: | ||
721 | return 0; | ||
722 | } | ||
723 | late_initcall(test_suspend); | ||
724 | |||
725 | #endif /* CONFIG_PM_TEST_SUSPEND */ | ||
diff --git a/kernel/power/power.h b/kernel/power/power.h index 700f44ec8406..acc0c101dbd5 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h | |||
@@ -53,8 +53,6 @@ extern int hibernation_platform_enter(void); | |||
53 | 53 | ||
54 | extern int pfn_is_nosave(unsigned long); | 54 | extern int pfn_is_nosave(unsigned long); |
55 | 55 | ||
56 | extern struct mutex pm_mutex; | ||
57 | |||
58 | #define power_attr(_name) \ | 56 | #define power_attr(_name) \ |
59 | static struct kobj_attribute _name##_attr = { \ | 57 | static struct kobj_attribute _name##_attr = { \ |
60 | .attr = { \ | 58 | .attr = { \ |
diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c index 678ec736076b..72016f051477 100644 --- a/kernel/power/poweroff.c +++ b/kernel/power/poweroff.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/pm.h> | 10 | #include <linux/pm.h> |
11 | #include <linux/workqueue.h> | 11 | #include <linux/workqueue.h> |
12 | #include <linux/reboot.h> | 12 | #include <linux/reboot.h> |
13 | #include <linux/cpumask.h> | ||
13 | 14 | ||
14 | /* | 15 | /* |
15 | * When the user hits Sys-Rq o to power down the machine this is the | 16 | * When the user hits Sys-Rq o to power down the machine this is the |
@@ -25,7 +26,8 @@ static DECLARE_WORK(poweroff_work, do_poweroff); | |||
25 | 26 | ||
26 | static void handle_poweroff(int key, struct tty_struct *tty) | 27 | static void handle_poweroff(int key, struct tty_struct *tty) |
27 | { | 28 | { |
28 | schedule_work(&poweroff_work); | 29 | /* run sysrq poweroff on boot cpu */ |
30 | schedule_work_on(first_cpu(cpu_online_map), &poweroff_work); | ||
29 | } | 31 | } |
30 | 32 | ||
31 | static struct sysrq_key_op sysrq_poweroff_op = { | 33 | static struct sysrq_key_op sysrq_poweroff_op = { |
diff --git a/kernel/power/process.c b/kernel/power/process.c index 5fb87652f214..278946aecaf0 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c | |||
@@ -149,7 +149,7 @@ static int try_to_freeze_tasks(bool sig_only) | |||
149 | unsigned long end_time; | 149 | unsigned long end_time; |
150 | unsigned int todo; | 150 | unsigned int todo; |
151 | struct timeval start, end; | 151 | struct timeval start, end; |
152 | s64 elapsed_csecs64; | 152 | u64 elapsed_csecs64; |
153 | unsigned int elapsed_csecs; | 153 | unsigned int elapsed_csecs; |
154 | 154 | ||
155 | do_gettimeofday(&start); | 155 | do_gettimeofday(&start); |
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 5f91a07c4eac..5d2ab836e998 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c | |||
@@ -205,8 +205,7 @@ static void chain_free(struct chain_allocator *ca, int clear_page_nosave) | |||
205 | * objects. The main list's elements are of type struct zone_bitmap | 205 | * objects. The main list's elements are of type struct zone_bitmap |
206 | * and each of them corresonds to one zone. For each zone bitmap | 206 | * and each of them corresonds to one zone. For each zone bitmap |
207 | * object there is a list of objects of type struct bm_block that | 207 | * object there is a list of objects of type struct bm_block that |
208 | * represent each blocks of bit chunks in which information is | 208 | * represent each blocks of bitmap in which information is stored. |
209 | * stored. | ||
210 | * | 209 | * |
211 | * struct memory_bitmap contains a pointer to the main list of zone | 210 | * struct memory_bitmap contains a pointer to the main list of zone |
212 | * bitmap objects, a struct bm_position used for browsing the bitmap, | 211 | * bitmap objects, a struct bm_position used for browsing the bitmap, |
@@ -224,26 +223,27 @@ static void chain_free(struct chain_allocator *ca, int clear_page_nosave) | |||
224 | * pfns that correspond to the start and end of the represented zone. | 223 | * pfns that correspond to the start and end of the represented zone. |
225 | * | 224 | * |
226 | * struct bm_block contains a pointer to the memory page in which | 225 | * struct bm_block contains a pointer to the memory page in which |
227 | * information is stored (in the form of a block of bit chunks | 226 | * information is stored (in the form of a block of bitmap) |
228 | * of type unsigned long each). It also contains the pfns that | 227 | * It also contains the pfns that correspond to the start and end of |
229 | * correspond to the start and end of the represented memory area and | 228 | * the represented memory area. |
230 | * the number of bit chunks in the block. | ||
231 | */ | 229 | */ |
232 | 230 | ||
233 | #define BM_END_OF_MAP (~0UL) | 231 | #define BM_END_OF_MAP (~0UL) |
234 | 232 | ||
235 | #define BM_CHUNKS_PER_BLOCK (PAGE_SIZE / sizeof(long)) | ||
236 | #define BM_BITS_PER_CHUNK (sizeof(long) << 3) | ||
237 | #define BM_BITS_PER_BLOCK (PAGE_SIZE << 3) | 233 | #define BM_BITS_PER_BLOCK (PAGE_SIZE << 3) |
238 | 234 | ||
239 | struct bm_block { | 235 | struct bm_block { |
240 | struct bm_block *next; /* next element of the list */ | 236 | struct bm_block *next; /* next element of the list */ |
241 | unsigned long start_pfn; /* pfn represented by the first bit */ | 237 | unsigned long start_pfn; /* pfn represented by the first bit */ |
242 | unsigned long end_pfn; /* pfn represented by the last bit plus 1 */ | 238 | unsigned long end_pfn; /* pfn represented by the last bit plus 1 */ |
243 | unsigned int size; /* number of bit chunks */ | 239 | unsigned long *data; /* bitmap representing pages */ |
244 | unsigned long *data; /* chunks of bits representing pages */ | ||
245 | }; | 240 | }; |
246 | 241 | ||
242 | static inline unsigned long bm_block_bits(struct bm_block *bb) | ||
243 | { | ||
244 | return bb->end_pfn - bb->start_pfn; | ||
245 | } | ||
246 | |||
247 | struct zone_bitmap { | 247 | struct zone_bitmap { |
248 | struct zone_bitmap *next; /* next element of the list */ | 248 | struct zone_bitmap *next; /* next element of the list */ |
249 | unsigned long start_pfn; /* minimal pfn in this zone */ | 249 | unsigned long start_pfn; /* minimal pfn in this zone */ |
@@ -257,7 +257,6 @@ struct zone_bitmap { | |||
257 | struct bm_position { | 257 | struct bm_position { |
258 | struct zone_bitmap *zone_bm; | 258 | struct zone_bitmap *zone_bm; |
259 | struct bm_block *block; | 259 | struct bm_block *block; |
260 | int chunk; | ||
261 | int bit; | 260 | int bit; |
262 | }; | 261 | }; |
263 | 262 | ||
@@ -272,12 +271,6 @@ struct memory_bitmap { | |||
272 | 271 | ||
273 | /* Functions that operate on memory bitmaps */ | 272 | /* Functions that operate on memory bitmaps */ |
274 | 273 | ||
275 | static inline void memory_bm_reset_chunk(struct memory_bitmap *bm) | ||
276 | { | ||
277 | bm->cur.chunk = 0; | ||
278 | bm->cur.bit = -1; | ||
279 | } | ||
280 | |||
281 | static void memory_bm_position_reset(struct memory_bitmap *bm) | 274 | static void memory_bm_position_reset(struct memory_bitmap *bm) |
282 | { | 275 | { |
283 | struct zone_bitmap *zone_bm; | 276 | struct zone_bitmap *zone_bm; |
@@ -285,7 +278,7 @@ static void memory_bm_position_reset(struct memory_bitmap *bm) | |||
285 | zone_bm = bm->zone_bm_list; | 278 | zone_bm = bm->zone_bm_list; |
286 | bm->cur.zone_bm = zone_bm; | 279 | bm->cur.zone_bm = zone_bm; |
287 | bm->cur.block = zone_bm->bm_blocks; | 280 | bm->cur.block = zone_bm->bm_blocks; |
288 | memory_bm_reset_chunk(bm); | 281 | bm->cur.bit = 0; |
289 | } | 282 | } |
290 | 283 | ||
291 | static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free); | 284 | static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free); |
@@ -394,12 +387,10 @@ memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed) | |||
394 | bb->start_pfn = pfn; | 387 | bb->start_pfn = pfn; |
395 | if (nr >= BM_BITS_PER_BLOCK) { | 388 | if (nr >= BM_BITS_PER_BLOCK) { |
396 | pfn += BM_BITS_PER_BLOCK; | 389 | pfn += BM_BITS_PER_BLOCK; |
397 | bb->size = BM_CHUNKS_PER_BLOCK; | ||
398 | nr -= BM_BITS_PER_BLOCK; | 390 | nr -= BM_BITS_PER_BLOCK; |
399 | } else { | 391 | } else { |
400 | /* This is executed only once in the loop */ | 392 | /* This is executed only once in the loop */ |
401 | pfn += nr; | 393 | pfn += nr; |
402 | bb->size = DIV_ROUND_UP(nr, BM_BITS_PER_CHUNK); | ||
403 | } | 394 | } |
404 | bb->end_pfn = pfn; | 395 | bb->end_pfn = pfn; |
405 | bb = bb->next; | 396 | bb = bb->next; |
@@ -478,8 +469,8 @@ static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn, | |||
478 | } | 469 | } |
479 | zone_bm->cur_block = bb; | 470 | zone_bm->cur_block = bb; |
480 | pfn -= bb->start_pfn; | 471 | pfn -= bb->start_pfn; |
481 | *bit_nr = pfn % BM_BITS_PER_CHUNK; | 472 | *bit_nr = pfn; |
482 | *addr = bb->data + pfn / BM_BITS_PER_CHUNK; | 473 | *addr = bb->data; |
483 | return 0; | 474 | return 0; |
484 | } | 475 | } |
485 | 476 | ||
@@ -528,36 +519,6 @@ static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn) | |||
528 | return test_bit(bit, addr); | 519 | return test_bit(bit, addr); |
529 | } | 520 | } |
530 | 521 | ||
531 | /* Two auxiliary functions for memory_bm_next_pfn */ | ||
532 | |||
533 | /* Find the first set bit in the given chunk, if there is one */ | ||
534 | |||
535 | static inline int next_bit_in_chunk(int bit, unsigned long *chunk_p) | ||
536 | { | ||
537 | bit++; | ||
538 | while (bit < BM_BITS_PER_CHUNK) { | ||
539 | if (test_bit(bit, chunk_p)) | ||
540 | return bit; | ||
541 | |||
542 | bit++; | ||
543 | } | ||
544 | return -1; | ||
545 | } | ||
546 | |||
547 | /* Find a chunk containing some bits set in given block of bits */ | ||
548 | |||
549 | static inline int next_chunk_in_block(int n, struct bm_block *bb) | ||
550 | { | ||
551 | n++; | ||
552 | while (n < bb->size) { | ||
553 | if (bb->data[n]) | ||
554 | return n; | ||
555 | |||
556 | n++; | ||
557 | } | ||
558 | return -1; | ||
559 | } | ||
560 | |||
561 | /** | 522 | /** |
562 | * memory_bm_next_pfn - find the pfn that corresponds to the next set bit | 523 | * memory_bm_next_pfn - find the pfn that corresponds to the next set bit |
563 | * in the bitmap @bm. If the pfn cannot be found, BM_END_OF_MAP is | 524 | * in the bitmap @bm. If the pfn cannot be found, BM_END_OF_MAP is |
@@ -571,40 +532,33 @@ static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm) | |||
571 | { | 532 | { |
572 | struct zone_bitmap *zone_bm; | 533 | struct zone_bitmap *zone_bm; |
573 | struct bm_block *bb; | 534 | struct bm_block *bb; |
574 | int chunk; | ||
575 | int bit; | 535 | int bit; |
576 | 536 | ||
577 | do { | 537 | do { |
578 | bb = bm->cur.block; | 538 | bb = bm->cur.block; |
579 | do { | 539 | do { |
580 | chunk = bm->cur.chunk; | ||
581 | bit = bm->cur.bit; | 540 | bit = bm->cur.bit; |
582 | do { | 541 | bit = find_next_bit(bb->data, bm_block_bits(bb), bit); |
583 | bit = next_bit_in_chunk(bit, bb->data + chunk); | 542 | if (bit < bm_block_bits(bb)) |
584 | if (bit >= 0) | 543 | goto Return_pfn; |
585 | goto Return_pfn; | 544 | |
586 | |||
587 | chunk = next_chunk_in_block(chunk, bb); | ||
588 | bit = -1; | ||
589 | } while (chunk >= 0); | ||
590 | bb = bb->next; | 545 | bb = bb->next; |
591 | bm->cur.block = bb; | 546 | bm->cur.block = bb; |
592 | memory_bm_reset_chunk(bm); | 547 | bm->cur.bit = 0; |
593 | } while (bb); | 548 | } while (bb); |
594 | zone_bm = bm->cur.zone_bm->next; | 549 | zone_bm = bm->cur.zone_bm->next; |
595 | if (zone_bm) { | 550 | if (zone_bm) { |
596 | bm->cur.zone_bm = zone_bm; | 551 | bm->cur.zone_bm = zone_bm; |
597 | bm->cur.block = zone_bm->bm_blocks; | 552 | bm->cur.block = zone_bm->bm_blocks; |
598 | memory_bm_reset_chunk(bm); | 553 | bm->cur.bit = 0; |
599 | } | 554 | } |
600 | } while (zone_bm); | 555 | } while (zone_bm); |
601 | memory_bm_position_reset(bm); | 556 | memory_bm_position_reset(bm); |
602 | return BM_END_OF_MAP; | 557 | return BM_END_OF_MAP; |
603 | 558 | ||
604 | Return_pfn: | 559 | Return_pfn: |
605 | bm->cur.chunk = chunk; | 560 | bm->cur.bit = bit + 1; |
606 | bm->cur.bit = bit; | 561 | return bb->start_pfn + bit; |
607 | return bb->start_pfn + chunk * BM_BITS_PER_CHUNK + bit; | ||
608 | } | 562 | } |
609 | 563 | ||
610 | /** | 564 | /** |
diff --git a/kernel/power/swap.c b/kernel/power/swap.c index a0abf9a463f9..80ccac849e46 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c | |||
@@ -14,7 +14,6 @@ | |||
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/file.h> | 15 | #include <linux/file.h> |
16 | #include <linux/utsname.h> | 16 | #include <linux/utsname.h> |
17 | #include <linux/version.h> | ||
18 | #include <linux/delay.h> | 17 | #include <linux/delay.h> |
19 | #include <linux/bitops.h> | 18 | #include <linux/bitops.h> |
20 | #include <linux/genhd.h> | 19 | #include <linux/genhd.h> |
diff --git a/kernel/printk.c b/kernel/printk.c index 07ad9e7f7a66..b51b1567bb55 100644 --- a/kernel/printk.c +++ b/kernel/printk.c | |||
@@ -933,7 +933,7 @@ void suspend_console(void) | |||
933 | { | 933 | { |
934 | if (!console_suspend_enabled) | 934 | if (!console_suspend_enabled) |
935 | return; | 935 | return; |
936 | printk("Suspending console(s)\n"); | 936 | printk("Suspending console(s) (use no_console_suspend to debug)\n"); |
937 | acquire_console_sem(); | 937 | acquire_console_sem(); |
938 | console_suspended = 1; | 938 | console_suspended = 1; |
939 | } | 939 | } |
@@ -1308,29 +1308,18 @@ void tty_write_message(struct tty_struct *tty, char *msg) | |||
1308 | } | 1308 | } |
1309 | 1309 | ||
1310 | #if defined CONFIG_PRINTK | 1310 | #if defined CONFIG_PRINTK |
1311 | |||
1311 | /* | 1312 | /* |
1312 | * printk rate limiting, lifted from the networking subsystem. | 1313 | * printk rate limiting, lifted from the networking subsystem. |
1313 | * | 1314 | * |
1314 | * This enforces a rate limit: not more than one kernel message | 1315 | * This enforces a rate limit: not more than 10 kernel messages |
1315 | * every printk_ratelimit_jiffies to make a denial-of-service | 1316 | * every 5s to make a denial-of-service attack impossible. |
1316 | * attack impossible. | ||
1317 | */ | 1317 | */ |
1318 | int __printk_ratelimit(int ratelimit_jiffies, int ratelimit_burst) | 1318 | DEFINE_RATELIMIT_STATE(printk_ratelimit_state, 5 * HZ, 10); |
1319 | { | ||
1320 | return __ratelimit(ratelimit_jiffies, ratelimit_burst); | ||
1321 | } | ||
1322 | EXPORT_SYMBOL(__printk_ratelimit); | ||
1323 | |||
1324 | /* minimum time in jiffies between messages */ | ||
1325 | int printk_ratelimit_jiffies = 5 * HZ; | ||
1326 | |||
1327 | /* number of messages we send before ratelimiting */ | ||
1328 | int printk_ratelimit_burst = 10; | ||
1329 | 1319 | ||
1330 | int printk_ratelimit(void) | 1320 | int printk_ratelimit(void) |
1331 | { | 1321 | { |
1332 | return __printk_ratelimit(printk_ratelimit_jiffies, | 1322 | return __ratelimit(&printk_ratelimit_state); |
1333 | printk_ratelimit_burst); | ||
1334 | } | 1323 | } |
1335 | EXPORT_SYMBOL(printk_ratelimit); | 1324 | EXPORT_SYMBOL(printk_ratelimit); |
1336 | 1325 | ||
diff --git a/kernel/profile.c b/kernel/profile.c index 58926411eb2a..cd26bed4cc26 100644 --- a/kernel/profile.c +++ b/kernel/profile.c | |||
@@ -112,8 +112,6 @@ void __init profile_init(void) | |||
112 | 112 | ||
113 | /* Profile event notifications */ | 113 | /* Profile event notifications */ |
114 | 114 | ||
115 | #ifdef CONFIG_PROFILING | ||
116 | |||
117 | static BLOCKING_NOTIFIER_HEAD(task_exit_notifier); | 115 | static BLOCKING_NOTIFIER_HEAD(task_exit_notifier); |
118 | static ATOMIC_NOTIFIER_HEAD(task_free_notifier); | 116 | static ATOMIC_NOTIFIER_HEAD(task_free_notifier); |
119 | static BLOCKING_NOTIFIER_HEAD(munmap_notifier); | 117 | static BLOCKING_NOTIFIER_HEAD(munmap_notifier); |
@@ -203,8 +201,6 @@ void unregister_timer_hook(int (*hook)(struct pt_regs *)) | |||
203 | } | 201 | } |
204 | EXPORT_SYMBOL_GPL(unregister_timer_hook); | 202 | EXPORT_SYMBOL_GPL(unregister_timer_hook); |
205 | 203 | ||
206 | #endif /* CONFIG_PROFILING */ | ||
207 | |||
208 | 204 | ||
209 | #ifdef CONFIG_SMP | 205 | #ifdef CONFIG_SMP |
210 | /* | 206 | /* |
diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 8392a9da6450..356699a96d56 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c | |||
@@ -107,7 +107,7 @@ int ptrace_check_attach(struct task_struct *child, int kill) | |||
107 | read_unlock(&tasklist_lock); | 107 | read_unlock(&tasklist_lock); |
108 | 108 | ||
109 | if (!ret && !kill) | 109 | if (!ret && !kill) |
110 | wait_task_inactive(child); | 110 | ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH; |
111 | 111 | ||
112 | /* All systems go.. */ | 112 | /* All systems go.. */ |
113 | return ret; | 113 | return ret; |
@@ -140,7 +140,7 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode) | |||
140 | if (!dumpable && !capable(CAP_SYS_PTRACE)) | 140 | if (!dumpable && !capable(CAP_SYS_PTRACE)) |
141 | return -EPERM; | 141 | return -EPERM; |
142 | 142 | ||
143 | return security_ptrace(current, task, mode); | 143 | return security_ptrace_may_access(task, mode); |
144 | } | 144 | } |
145 | 145 | ||
146 | bool ptrace_may_access(struct task_struct *task, unsigned int mode) | 146 | bool ptrace_may_access(struct task_struct *task, unsigned int mode) |
@@ -499,8 +499,7 @@ repeat: | |||
499 | goto repeat; | 499 | goto repeat; |
500 | } | 500 | } |
501 | 501 | ||
502 | ret = security_ptrace(current->parent, current, | 502 | ret = security_ptrace_traceme(current->parent); |
503 | PTRACE_MODE_ATTACH); | ||
504 | 503 | ||
505 | /* | 504 | /* |
506 | * Set the ptrace bit in the process ptrace flags. | 505 | * Set the ptrace bit in the process ptrace flags. |
diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c index 16eeeaa9d618..aad93cdc9f68 100644 --- a/kernel/rcuclassic.c +++ b/kernel/rcuclassic.c | |||
@@ -91,8 +91,8 @@ static void force_quiescent_state(struct rcu_data *rdp, | |||
91 | * rdp->cpu is the current cpu. | 91 | * rdp->cpu is the current cpu. |
92 | * | 92 | * |
93 | * cpu_online_map is updated by the _cpu_down() | 93 | * cpu_online_map is updated by the _cpu_down() |
94 | * using stop_machine_run(). Since we're in irqs disabled | 94 | * using __stop_machine(). Since we're in irqs disabled |
95 | * section, stop_machine_run() is not exectuting, hence | 95 | * section, __stop_machine() is not exectuting, hence |
96 | * the cpu_online_map is stable. | 96 | * the cpu_online_map is stable. |
97 | * | 97 | * |
98 | * However, a cpu might have been offlined _just_ before | 98 | * However, a cpu might have been offlined _just_ before |
@@ -106,7 +106,7 @@ static void force_quiescent_state(struct rcu_data *rdp, | |||
106 | */ | 106 | */ |
107 | cpus_and(cpumask, rcp->cpumask, cpu_online_map); | 107 | cpus_and(cpumask, rcp->cpumask, cpu_online_map); |
108 | cpu_clear(rdp->cpu, cpumask); | 108 | cpu_clear(rdp->cpu, cpumask); |
109 | for_each_cpu_mask(cpu, cpumask) | 109 | for_each_cpu_mask_nr(cpu, cpumask) |
110 | smp_send_reschedule(cpu); | 110 | smp_send_reschedule(cpu); |
111 | } | 111 | } |
112 | } | 112 | } |
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index f14f372cf6f5..467d5940f624 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c | |||
@@ -77,6 +77,7 @@ void wakeme_after_rcu(struct rcu_head *head) | |||
77 | * sections are delimited by rcu_read_lock() and rcu_read_unlock(), | 77 | * sections are delimited by rcu_read_lock() and rcu_read_unlock(), |
78 | * and may be nested. | 78 | * and may be nested. |
79 | */ | 79 | */ |
80 | void synchronize_rcu(void); /* Makes kernel-doc tools happy */ | ||
80 | synchronize_rcu_xxx(synchronize_rcu, call_rcu) | 81 | synchronize_rcu_xxx(synchronize_rcu, call_rcu) |
81 | EXPORT_SYMBOL_GPL(synchronize_rcu); | 82 | EXPORT_SYMBOL_GPL(synchronize_rcu); |
82 | 83 | ||
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c index 6f62b77d93c4..27827931ca0d 100644 --- a/kernel/rcupreempt.c +++ b/kernel/rcupreempt.c | |||
@@ -756,7 +756,7 @@ rcu_try_flip_idle(void) | |||
756 | 756 | ||
757 | /* Now ask each CPU for acknowledgement of the flip. */ | 757 | /* Now ask each CPU for acknowledgement of the flip. */ |
758 | 758 | ||
759 | for_each_cpu_mask(cpu, rcu_cpu_online_map) { | 759 | for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) { |
760 | per_cpu(rcu_flip_flag, cpu) = rcu_flipped; | 760 | per_cpu(rcu_flip_flag, cpu) = rcu_flipped; |
761 | dyntick_save_progress_counter(cpu); | 761 | dyntick_save_progress_counter(cpu); |
762 | } | 762 | } |
@@ -774,7 +774,7 @@ rcu_try_flip_waitack(void) | |||
774 | int cpu; | 774 | int cpu; |
775 | 775 | ||
776 | RCU_TRACE_ME(rcupreempt_trace_try_flip_a1); | 776 | RCU_TRACE_ME(rcupreempt_trace_try_flip_a1); |
777 | for_each_cpu_mask(cpu, rcu_cpu_online_map) | 777 | for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) |
778 | if (rcu_try_flip_waitack_needed(cpu) && | 778 | if (rcu_try_flip_waitack_needed(cpu) && |
779 | per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) { | 779 | per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) { |
780 | RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1); | 780 | RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1); |
@@ -806,7 +806,7 @@ rcu_try_flip_waitzero(void) | |||
806 | /* Check to see if the sum of the "last" counters is zero. */ | 806 | /* Check to see if the sum of the "last" counters is zero. */ |
807 | 807 | ||
808 | RCU_TRACE_ME(rcupreempt_trace_try_flip_z1); | 808 | RCU_TRACE_ME(rcupreempt_trace_try_flip_z1); |
809 | for_each_cpu_mask(cpu, rcu_cpu_online_map) | 809 | for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) |
810 | sum += RCU_DATA_CPU(cpu)->rcu_flipctr[lastidx]; | 810 | sum += RCU_DATA_CPU(cpu)->rcu_flipctr[lastidx]; |
811 | if (sum != 0) { | 811 | if (sum != 0) { |
812 | RCU_TRACE_ME(rcupreempt_trace_try_flip_ze1); | 812 | RCU_TRACE_ME(rcupreempt_trace_try_flip_ze1); |
@@ -821,7 +821,7 @@ rcu_try_flip_waitzero(void) | |||
821 | smp_mb(); /* ^^^^^^^^^^^^ */ | 821 | smp_mb(); /* ^^^^^^^^^^^^ */ |
822 | 822 | ||
823 | /* Call for a memory barrier from each CPU. */ | 823 | /* Call for a memory barrier from each CPU. */ |
824 | for_each_cpu_mask(cpu, rcu_cpu_online_map) { | 824 | for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) { |
825 | per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed; | 825 | per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed; |
826 | dyntick_save_progress_counter(cpu); | 826 | dyntick_save_progress_counter(cpu); |
827 | } | 827 | } |
@@ -841,7 +841,7 @@ rcu_try_flip_waitmb(void) | |||
841 | int cpu; | 841 | int cpu; |
842 | 842 | ||
843 | RCU_TRACE_ME(rcupreempt_trace_try_flip_m1); | 843 | RCU_TRACE_ME(rcupreempt_trace_try_flip_m1); |
844 | for_each_cpu_mask(cpu, rcu_cpu_online_map) | 844 | for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) |
845 | if (rcu_try_flip_waitmb_needed(cpu) && | 845 | if (rcu_try_flip_waitmb_needed(cpu) && |
846 | per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) { | 846 | per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) { |
847 | RCU_TRACE_ME(rcupreempt_trace_try_flip_me1); | 847 | RCU_TRACE_ME(rcupreempt_trace_try_flip_me1); |
diff --git a/kernel/relay.c b/kernel/relay.c index 7de644cdec43..8d13a7855c08 100644 --- a/kernel/relay.c +++ b/kernel/relay.c | |||
@@ -407,6 +407,35 @@ void relay_reset(struct rchan *chan) | |||
407 | } | 407 | } |
408 | EXPORT_SYMBOL_GPL(relay_reset); | 408 | EXPORT_SYMBOL_GPL(relay_reset); |
409 | 409 | ||
410 | static inline void relay_set_buf_dentry(struct rchan_buf *buf, | ||
411 | struct dentry *dentry) | ||
412 | { | ||
413 | buf->dentry = dentry; | ||
414 | buf->dentry->d_inode->i_size = buf->early_bytes; | ||
415 | } | ||
416 | |||
417 | static struct dentry *relay_create_buf_file(struct rchan *chan, | ||
418 | struct rchan_buf *buf, | ||
419 | unsigned int cpu) | ||
420 | { | ||
421 | struct dentry *dentry; | ||
422 | char *tmpname; | ||
423 | |||
424 | tmpname = kzalloc(NAME_MAX + 1, GFP_KERNEL); | ||
425 | if (!tmpname) | ||
426 | return NULL; | ||
427 | snprintf(tmpname, NAME_MAX, "%s%d", chan->base_filename, cpu); | ||
428 | |||
429 | /* Create file in fs */ | ||
430 | dentry = chan->cb->create_buf_file(tmpname, chan->parent, | ||
431 | S_IRUSR, buf, | ||
432 | &chan->is_global); | ||
433 | |||
434 | kfree(tmpname); | ||
435 | |||
436 | return dentry; | ||
437 | } | ||
438 | |||
410 | /* | 439 | /* |
411 | * relay_open_buf - create a new relay channel buffer | 440 | * relay_open_buf - create a new relay channel buffer |
412 | * | 441 | * |
@@ -416,45 +445,34 @@ static struct rchan_buf *relay_open_buf(struct rchan *chan, unsigned int cpu) | |||
416 | { | 445 | { |
417 | struct rchan_buf *buf = NULL; | 446 | struct rchan_buf *buf = NULL; |
418 | struct dentry *dentry; | 447 | struct dentry *dentry; |
419 | char *tmpname; | ||
420 | 448 | ||
421 | if (chan->is_global) | 449 | if (chan->is_global) |
422 | return chan->buf[0]; | 450 | return chan->buf[0]; |
423 | 451 | ||
424 | tmpname = kzalloc(NAME_MAX + 1, GFP_KERNEL); | ||
425 | if (!tmpname) | ||
426 | goto end; | ||
427 | snprintf(tmpname, NAME_MAX, "%s%d", chan->base_filename, cpu); | ||
428 | |||
429 | buf = relay_create_buf(chan); | 452 | buf = relay_create_buf(chan); |
430 | if (!buf) | 453 | if (!buf) |
431 | goto free_name; | 454 | return NULL; |
455 | |||
456 | if (chan->has_base_filename) { | ||
457 | dentry = relay_create_buf_file(chan, buf, cpu); | ||
458 | if (!dentry) | ||
459 | goto free_buf; | ||
460 | relay_set_buf_dentry(buf, dentry); | ||
461 | } | ||
432 | 462 | ||
433 | buf->cpu = cpu; | 463 | buf->cpu = cpu; |
434 | __relay_reset(buf, 1); | 464 | __relay_reset(buf, 1); |
435 | 465 | ||
436 | /* Create file in fs */ | ||
437 | dentry = chan->cb->create_buf_file(tmpname, chan->parent, S_IRUSR, | ||
438 | buf, &chan->is_global); | ||
439 | if (!dentry) | ||
440 | goto free_buf; | ||
441 | |||
442 | buf->dentry = dentry; | ||
443 | |||
444 | if(chan->is_global) { | 466 | if(chan->is_global) { |
445 | chan->buf[0] = buf; | 467 | chan->buf[0] = buf; |
446 | buf->cpu = 0; | 468 | buf->cpu = 0; |
447 | } | 469 | } |
448 | 470 | ||
449 | goto free_name; | 471 | return buf; |
450 | 472 | ||
451 | free_buf: | 473 | free_buf: |
452 | relay_destroy_buf(buf); | 474 | relay_destroy_buf(buf); |
453 | buf = NULL; | 475 | return NULL; |
454 | free_name: | ||
455 | kfree(tmpname); | ||
456 | end: | ||
457 | return buf; | ||
458 | } | 476 | } |
459 | 477 | ||
460 | /** | 478 | /** |
@@ -537,8 +555,8 @@ static int __cpuinit relay_hotcpu_callback(struct notifier_block *nb, | |||
537 | 555 | ||
538 | /** | 556 | /** |
539 | * relay_open - create a new relay channel | 557 | * relay_open - create a new relay channel |
540 | * @base_filename: base name of files to create | 558 | * @base_filename: base name of files to create, %NULL for buffering only |
541 | * @parent: dentry of parent directory, %NULL for root directory | 559 | * @parent: dentry of parent directory, %NULL for root directory or buffer |
542 | * @subbuf_size: size of sub-buffers | 560 | * @subbuf_size: size of sub-buffers |
543 | * @n_subbufs: number of sub-buffers | 561 | * @n_subbufs: number of sub-buffers |
544 | * @cb: client callback functions | 562 | * @cb: client callback functions |
@@ -560,8 +578,6 @@ struct rchan *relay_open(const char *base_filename, | |||
560 | { | 578 | { |
561 | unsigned int i; | 579 | unsigned int i; |
562 | struct rchan *chan; | 580 | struct rchan *chan; |
563 | if (!base_filename) | ||
564 | return NULL; | ||
565 | 581 | ||
566 | if (!(subbuf_size && n_subbufs)) | 582 | if (!(subbuf_size && n_subbufs)) |
567 | return NULL; | 583 | return NULL; |
@@ -576,7 +592,10 @@ struct rchan *relay_open(const char *base_filename, | |||
576 | chan->alloc_size = FIX_SIZE(subbuf_size * n_subbufs); | 592 | chan->alloc_size = FIX_SIZE(subbuf_size * n_subbufs); |
577 | chan->parent = parent; | 593 | chan->parent = parent; |
578 | chan->private_data = private_data; | 594 | chan->private_data = private_data; |
579 | strlcpy(chan->base_filename, base_filename, NAME_MAX); | 595 | if (base_filename) { |
596 | chan->has_base_filename = 1; | ||
597 | strlcpy(chan->base_filename, base_filename, NAME_MAX); | ||
598 | } | ||
580 | setup_callbacks(chan, cb); | 599 | setup_callbacks(chan, cb); |
581 | kref_init(&chan->kref); | 600 | kref_init(&chan->kref); |
582 | 601 | ||
@@ -604,6 +623,94 @@ free_bufs: | |||
604 | } | 623 | } |
605 | EXPORT_SYMBOL_GPL(relay_open); | 624 | EXPORT_SYMBOL_GPL(relay_open); |
606 | 625 | ||
626 | struct rchan_percpu_buf_dispatcher { | ||
627 | struct rchan_buf *buf; | ||
628 | struct dentry *dentry; | ||
629 | }; | ||
630 | |||
631 | /* Called in atomic context. */ | ||
632 | static void __relay_set_buf_dentry(void *info) | ||
633 | { | ||
634 | struct rchan_percpu_buf_dispatcher *p = info; | ||
635 | |||
636 | relay_set_buf_dentry(p->buf, p->dentry); | ||
637 | } | ||
638 | |||
639 | /** | ||
640 | * relay_late_setup_files - triggers file creation | ||
641 | * @chan: channel to operate on | ||
642 | * @base_filename: base name of files to create | ||
643 | * @parent: dentry of parent directory, %NULL for root directory | ||
644 | * | ||
645 | * Returns 0 if successful, non-zero otherwise. | ||
646 | * | ||
647 | * Use to setup files for a previously buffer-only channel. | ||
648 | * Useful to do early tracing in kernel, before VFS is up, for example. | ||
649 | */ | ||
650 | int relay_late_setup_files(struct rchan *chan, | ||
651 | const char *base_filename, | ||
652 | struct dentry *parent) | ||
653 | { | ||
654 | int err = 0; | ||
655 | unsigned int i, curr_cpu; | ||
656 | unsigned long flags; | ||
657 | struct dentry *dentry; | ||
658 | struct rchan_percpu_buf_dispatcher disp; | ||
659 | |||
660 | if (!chan || !base_filename) | ||
661 | return -EINVAL; | ||
662 | |||
663 | strlcpy(chan->base_filename, base_filename, NAME_MAX); | ||
664 | |||
665 | mutex_lock(&relay_channels_mutex); | ||
666 | /* Is chan already set up? */ | ||
667 | if (unlikely(chan->has_base_filename)) | ||
668 | return -EEXIST; | ||
669 | chan->has_base_filename = 1; | ||
670 | chan->parent = parent; | ||
671 | curr_cpu = get_cpu(); | ||
672 | /* | ||
673 | * The CPU hotplug notifier ran before us and created buffers with | ||
674 | * no files associated. So it's safe to call relay_setup_buf_file() | ||
675 | * on all currently online CPUs. | ||
676 | */ | ||
677 | for_each_online_cpu(i) { | ||
678 | if (unlikely(!chan->buf[i])) { | ||
679 | printk(KERN_ERR "relay_late_setup_files: CPU %u " | ||
680 | "has no buffer, it must have!\n", i); | ||
681 | BUG(); | ||
682 | err = -EINVAL; | ||
683 | break; | ||
684 | } | ||
685 | |||
686 | dentry = relay_create_buf_file(chan, chan->buf[i], i); | ||
687 | if (unlikely(!dentry)) { | ||
688 | err = -EINVAL; | ||
689 | break; | ||
690 | } | ||
691 | |||
692 | if (curr_cpu == i) { | ||
693 | local_irq_save(flags); | ||
694 | relay_set_buf_dentry(chan->buf[i], dentry); | ||
695 | local_irq_restore(flags); | ||
696 | } else { | ||
697 | disp.buf = chan->buf[i]; | ||
698 | disp.dentry = dentry; | ||
699 | smp_mb(); | ||
700 | /* relay_channels_mutex must be held, so wait. */ | ||
701 | err = smp_call_function_single(i, | ||
702 | __relay_set_buf_dentry, | ||
703 | &disp, 1); | ||
704 | } | ||
705 | if (unlikely(err)) | ||
706 | break; | ||
707 | } | ||
708 | put_cpu(); | ||
709 | mutex_unlock(&relay_channels_mutex); | ||
710 | |||
711 | return err; | ||
712 | } | ||
713 | |||
607 | /** | 714 | /** |
608 | * relay_switch_subbuf - switch to a new sub-buffer | 715 | * relay_switch_subbuf - switch to a new sub-buffer |
609 | * @buf: channel buffer | 716 | * @buf: channel buffer |
@@ -627,8 +734,13 @@ size_t relay_switch_subbuf(struct rchan_buf *buf, size_t length) | |||
627 | old_subbuf = buf->subbufs_produced % buf->chan->n_subbufs; | 734 | old_subbuf = buf->subbufs_produced % buf->chan->n_subbufs; |
628 | buf->padding[old_subbuf] = buf->prev_padding; | 735 | buf->padding[old_subbuf] = buf->prev_padding; |
629 | buf->subbufs_produced++; | 736 | buf->subbufs_produced++; |
630 | buf->dentry->d_inode->i_size += buf->chan->subbuf_size - | 737 | if (buf->dentry) |
631 | buf->padding[old_subbuf]; | 738 | buf->dentry->d_inode->i_size += |
739 | buf->chan->subbuf_size - | ||
740 | buf->padding[old_subbuf]; | ||
741 | else | ||
742 | buf->early_bytes += buf->chan->subbuf_size - | ||
743 | buf->padding[old_subbuf]; | ||
632 | smp_mb(); | 744 | smp_mb(); |
633 | if (waitqueue_active(&buf->read_wait)) | 745 | if (waitqueue_active(&buf->read_wait)) |
634 | /* | 746 | /* |
@@ -832,6 +944,10 @@ static void relay_file_read_consume(struct rchan_buf *buf, | |||
832 | size_t n_subbufs = buf->chan->n_subbufs; | 944 | size_t n_subbufs = buf->chan->n_subbufs; |
833 | size_t read_subbuf; | 945 | size_t read_subbuf; |
834 | 946 | ||
947 | if (buf->subbufs_produced == buf->subbufs_consumed && | ||
948 | buf->offset == buf->bytes_consumed) | ||
949 | return; | ||
950 | |||
835 | if (buf->bytes_consumed + bytes_consumed > subbuf_size) { | 951 | if (buf->bytes_consumed + bytes_consumed > subbuf_size) { |
836 | relay_subbufs_consumed(buf->chan, buf->cpu, 1); | 952 | relay_subbufs_consumed(buf->chan, buf->cpu, 1); |
837 | buf->bytes_consumed = 0; | 953 | buf->bytes_consumed = 0; |
@@ -863,6 +979,8 @@ static int relay_file_read_avail(struct rchan_buf *buf, size_t read_pos) | |||
863 | 979 | ||
864 | relay_file_read_consume(buf, read_pos, 0); | 980 | relay_file_read_consume(buf, read_pos, 0); |
865 | 981 | ||
982 | consumed = buf->subbufs_consumed; | ||
983 | |||
866 | if (unlikely(buf->offset > subbuf_size)) { | 984 | if (unlikely(buf->offset > subbuf_size)) { |
867 | if (produced == consumed) | 985 | if (produced == consumed) |
868 | return 0; | 986 | return 0; |
@@ -881,8 +999,12 @@ static int relay_file_read_avail(struct rchan_buf *buf, size_t read_pos) | |||
881 | if (consumed > produced) | 999 | if (consumed > produced) |
882 | produced += n_subbufs * subbuf_size; | 1000 | produced += n_subbufs * subbuf_size; |
883 | 1001 | ||
884 | if (consumed == produced) | 1002 | if (consumed == produced) { |
1003 | if (buf->offset == subbuf_size && | ||
1004 | buf->subbufs_produced > buf->subbufs_consumed) | ||
1005 | return 1; | ||
885 | return 0; | 1006 | return 0; |
1007 | } | ||
886 | 1008 | ||
887 | return 1; | 1009 | return 1; |
888 | } | 1010 | } |
@@ -1237,4 +1359,4 @@ static __init int relay_init(void) | |||
1237 | return 0; | 1359 | return 0; |
1238 | } | 1360 | } |
1239 | 1361 | ||
1240 | module_init(relay_init); | 1362 | early_initcall(relay_init); |
diff --git a/kernel/res_counter.c b/kernel/res_counter.c index d3c61b4ebef2..f275c8eca772 100644 --- a/kernel/res_counter.c +++ b/kernel/res_counter.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/slab.h> | 13 | #include <linux/slab.h> |
14 | #include <linux/res_counter.h> | 14 | #include <linux/res_counter.h> |
15 | #include <linux/uaccess.h> | 15 | #include <linux/uaccess.h> |
16 | #include <linux/mm.h> | ||
16 | 17 | ||
17 | void res_counter_init(struct res_counter *counter) | 18 | void res_counter_init(struct res_counter *counter) |
18 | { | 19 | { |
@@ -102,44 +103,37 @@ u64 res_counter_read_u64(struct res_counter *counter, int member) | |||
102 | return *res_counter_member(counter, member); | 103 | return *res_counter_member(counter, member); |
103 | } | 104 | } |
104 | 105 | ||
105 | ssize_t res_counter_write(struct res_counter *counter, int member, | 106 | int res_counter_memparse_write_strategy(const char *buf, |
106 | const char __user *userbuf, size_t nbytes, loff_t *pos, | 107 | unsigned long long *res) |
107 | int (*write_strategy)(char *st_buf, unsigned long long *val)) | ||
108 | { | 108 | { |
109 | int ret; | 109 | char *end; |
110 | char *buf, *end; | 110 | /* FIXME - make memparse() take const char* args */ |
111 | unsigned long flags; | 111 | *res = memparse((char *)buf, &end); |
112 | unsigned long long tmp, *val; | 112 | if (*end != '\0') |
113 | 113 | return -EINVAL; | |
114 | buf = kmalloc(nbytes + 1, GFP_KERNEL); | ||
115 | ret = -ENOMEM; | ||
116 | if (buf == NULL) | ||
117 | goto out; | ||
118 | 114 | ||
119 | buf[nbytes] = '\0'; | 115 | *res = PAGE_ALIGN(*res); |
120 | ret = -EFAULT; | 116 | return 0; |
121 | if (copy_from_user(buf, userbuf, nbytes)) | 117 | } |
122 | goto out_free; | ||
123 | 118 | ||
124 | ret = -EINVAL; | 119 | int res_counter_write(struct res_counter *counter, int member, |
120 | const char *buf, write_strategy_fn write_strategy) | ||
121 | { | ||
122 | char *end; | ||
123 | unsigned long flags; | ||
124 | unsigned long long tmp, *val; | ||
125 | 125 | ||
126 | strstrip(buf); | ||
127 | if (write_strategy) { | 126 | if (write_strategy) { |
128 | if (write_strategy(buf, &tmp)) { | 127 | if (write_strategy(buf, &tmp)) |
129 | goto out_free; | 128 | return -EINVAL; |
130 | } | ||
131 | } else { | 129 | } else { |
132 | tmp = simple_strtoull(buf, &end, 10); | 130 | tmp = simple_strtoull(buf, &end, 10); |
133 | if (*end != '\0') | 131 | if (*end != '\0') |
134 | goto out_free; | 132 | return -EINVAL; |
135 | } | 133 | } |
136 | spin_lock_irqsave(&counter->lock, flags); | 134 | spin_lock_irqsave(&counter->lock, flags); |
137 | val = res_counter_member(counter, member); | 135 | val = res_counter_member(counter, member); |
138 | *val = tmp; | 136 | *val = tmp; |
139 | spin_unlock_irqrestore(&counter->lock, flags); | 137 | spin_unlock_irqrestore(&counter->lock, flags); |
140 | ret = nbytes; | 138 | return 0; |
141 | out_free: | ||
142 | kfree(buf); | ||
143 | out: | ||
144 | return ret; | ||
145 | } | 139 | } |
diff --git a/kernel/resource.c b/kernel/resource.c index 74af2d7cb5a1..03d796c1b2e9 100644 --- a/kernel/resource.c +++ b/kernel/resource.c | |||
@@ -362,35 +362,21 @@ int allocate_resource(struct resource *root, struct resource *new, | |||
362 | 362 | ||
363 | EXPORT_SYMBOL(allocate_resource); | 363 | EXPORT_SYMBOL(allocate_resource); |
364 | 364 | ||
365 | /** | 365 | /* |
366 | * insert_resource - Inserts a resource in the resource tree | 366 | * Insert a resource into the resource tree. If successful, return NULL, |
367 | * @parent: parent of the new resource | 367 | * otherwise return the conflicting resource (compare to __request_resource()) |
368 | * @new: new resource to insert | ||
369 | * | ||
370 | * Returns 0 on success, -EBUSY if the resource can't be inserted. | ||
371 | * | ||
372 | * This function is equivalent to request_resource when no conflict | ||
373 | * happens. If a conflict happens, and the conflicting resources | ||
374 | * entirely fit within the range of the new resource, then the new | ||
375 | * resource is inserted and the conflicting resources become children of | ||
376 | * the new resource. | ||
377 | */ | 368 | */ |
378 | int insert_resource(struct resource *parent, struct resource *new) | 369 | static struct resource * __insert_resource(struct resource *parent, struct resource *new) |
379 | { | 370 | { |
380 | int result; | ||
381 | struct resource *first, *next; | 371 | struct resource *first, *next; |
382 | 372 | ||
383 | write_lock(&resource_lock); | ||
384 | |||
385 | for (;; parent = first) { | 373 | for (;; parent = first) { |
386 | result = 0; | ||
387 | first = __request_resource(parent, new); | 374 | first = __request_resource(parent, new); |
388 | if (!first) | 375 | if (!first) |
389 | goto out; | 376 | return first; |
390 | 377 | ||
391 | result = -EBUSY; | ||
392 | if (first == parent) | 378 | if (first == parent) |
393 | goto out; | 379 | return first; |
394 | 380 | ||
395 | if ((first->start > new->start) || (first->end < new->end)) | 381 | if ((first->start > new->start) || (first->end < new->end)) |
396 | break; | 382 | break; |
@@ -401,15 +387,13 @@ int insert_resource(struct resource *parent, struct resource *new) | |||
401 | for (next = first; ; next = next->sibling) { | 387 | for (next = first; ; next = next->sibling) { |
402 | /* Partial overlap? Bad, and unfixable */ | 388 | /* Partial overlap? Bad, and unfixable */ |
403 | if (next->start < new->start || next->end > new->end) | 389 | if (next->start < new->start || next->end > new->end) |
404 | goto out; | 390 | return next; |
405 | if (!next->sibling) | 391 | if (!next->sibling) |
406 | break; | 392 | break; |
407 | if (next->sibling->start > new->end) | 393 | if (next->sibling->start > new->end) |
408 | break; | 394 | break; |
409 | } | 395 | } |
410 | 396 | ||
411 | result = 0; | ||
412 | |||
413 | new->parent = parent; | 397 | new->parent = parent; |
414 | new->sibling = next->sibling; | 398 | new->sibling = next->sibling; |
415 | new->child = first; | 399 | new->child = first; |
@@ -426,10 +410,64 @@ int insert_resource(struct resource *parent, struct resource *new) | |||
426 | next = next->sibling; | 410 | next = next->sibling; |
427 | next->sibling = new; | 411 | next->sibling = new; |
428 | } | 412 | } |
413 | return NULL; | ||
414 | } | ||
429 | 415 | ||
430 | out: | 416 | /** |
417 | * insert_resource - Inserts a resource in the resource tree | ||
418 | * @parent: parent of the new resource | ||
419 | * @new: new resource to insert | ||
420 | * | ||
421 | * Returns 0 on success, -EBUSY if the resource can't be inserted. | ||
422 | * | ||
423 | * This function is equivalent to request_resource when no conflict | ||
424 | * happens. If a conflict happens, and the conflicting resources | ||
425 | * entirely fit within the range of the new resource, then the new | ||
426 | * resource is inserted and the conflicting resources become children of | ||
427 | * the new resource. | ||
428 | */ | ||
429 | int insert_resource(struct resource *parent, struct resource *new) | ||
430 | { | ||
431 | struct resource *conflict; | ||
432 | |||
433 | write_lock(&resource_lock); | ||
434 | conflict = __insert_resource(parent, new); | ||
435 | write_unlock(&resource_lock); | ||
436 | return conflict ? -EBUSY : 0; | ||
437 | } | ||
438 | |||
439 | /** | ||
440 | * insert_resource_expand_to_fit - Insert a resource into the resource tree | ||
441 | * @root: root resource descriptor | ||
442 | * @new: new resource to insert | ||
443 | * | ||
444 | * Insert a resource into the resource tree, possibly expanding it in order | ||
445 | * to make it encompass any conflicting resources. | ||
446 | */ | ||
447 | void insert_resource_expand_to_fit(struct resource *root, struct resource *new) | ||
448 | { | ||
449 | if (new->parent) | ||
450 | return; | ||
451 | |||
452 | write_lock(&resource_lock); | ||
453 | for (;;) { | ||
454 | struct resource *conflict; | ||
455 | |||
456 | conflict = __insert_resource(root, new); | ||
457 | if (!conflict) | ||
458 | break; | ||
459 | if (conflict == root) | ||
460 | break; | ||
461 | |||
462 | /* Ok, expand resource to cover the conflict, then try again .. */ | ||
463 | if (conflict->start < new->start) | ||
464 | new->start = conflict->start; | ||
465 | if (conflict->end > new->end) | ||
466 | new->end = conflict->end; | ||
467 | |||
468 | printk("Expanded resource %s due to conflict with %s\n", new->name, conflict->name); | ||
469 | } | ||
431 | write_unlock(&resource_lock); | 470 | write_unlock(&resource_lock); |
432 | return result; | ||
433 | } | 471 | } |
434 | 472 | ||
435 | /** | 473 | /** |
@@ -490,7 +528,7 @@ resource_size_t resource_alignment(struct resource *res) | |||
490 | { | 528 | { |
491 | switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) { | 529 | switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) { |
492 | case IORESOURCE_SIZEALIGN: | 530 | case IORESOURCE_SIZEALIGN: |
493 | return res->end - res->start + 1; | 531 | return resource_size(res); |
494 | case IORESOURCE_STARTALIGN: | 532 | case IORESOURCE_STARTALIGN: |
495 | return res->start; | 533 | return res->start; |
496 | default: | 534 | default: |
diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c index 092e4c620af9..a56f629b057a 100644 --- a/kernel/rtmutex-tester.c +++ b/kernel/rtmutex-tester.c | |||
@@ -297,8 +297,8 @@ static int test_func(void *data) | |||
297 | * | 297 | * |
298 | * opcode:data | 298 | * opcode:data |
299 | */ | 299 | */ |
300 | static ssize_t sysfs_test_command(struct sys_device *dev, const char *buf, | 300 | static ssize_t sysfs_test_command(struct sys_device *dev, struct sysdev_attribute *attr, |
301 | size_t count) | 301 | const char *buf, size_t count) |
302 | { | 302 | { |
303 | struct sched_param schedpar; | 303 | struct sched_param schedpar; |
304 | struct test_thread_data *td; | 304 | struct test_thread_data *td; |
@@ -360,7 +360,8 @@ static ssize_t sysfs_test_command(struct sys_device *dev, const char *buf, | |||
360 | * @dev: thread to query | 360 | * @dev: thread to query |
361 | * @buf: char buffer to be filled with thread status info | 361 | * @buf: char buffer to be filled with thread status info |
362 | */ | 362 | */ |
363 | static ssize_t sysfs_test_status(struct sys_device *dev, char *buf) | 363 | static ssize_t sysfs_test_status(struct sys_device *dev, struct sysdev_attribute *attr, |
364 | char *buf) | ||
364 | { | 365 | { |
365 | struct test_thread_data *td; | 366 | struct test_thread_data *td; |
366 | struct task_struct *tsk; | 367 | struct task_struct *tsk; |
diff --git a/kernel/sched.c b/kernel/sched.c index 99e6d850ecab..13dd2db9fb2d 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -571,8 +571,10 @@ struct rq { | |||
571 | #endif | 571 | #endif |
572 | 572 | ||
573 | #ifdef CONFIG_SCHED_HRTICK | 573 | #ifdef CONFIG_SCHED_HRTICK |
574 | unsigned long hrtick_flags; | 574 | #ifdef CONFIG_SMP |
575 | ktime_t hrtick_expire; | 575 | int hrtick_csd_pending; |
576 | struct call_single_data hrtick_csd; | ||
577 | #endif | ||
576 | struct hrtimer hrtick_timer; | 578 | struct hrtimer hrtick_timer; |
577 | #endif | 579 | #endif |
578 | 580 | ||
@@ -598,7 +600,6 @@ struct rq { | |||
598 | /* BKL stats */ | 600 | /* BKL stats */ |
599 | unsigned int bkl_count; | 601 | unsigned int bkl_count; |
600 | #endif | 602 | #endif |
601 | struct lock_class_key rq_lock_key; | ||
602 | }; | 603 | }; |
603 | 604 | ||
604 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); | 605 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); |
@@ -807,9 +808,9 @@ const_debug unsigned int sysctl_sched_nr_migrate = 32; | |||
807 | 808 | ||
808 | /* | 809 | /* |
809 | * ratelimit for updating the group shares. | 810 | * ratelimit for updating the group shares. |
810 | * default: 0.5ms | 811 | * default: 0.25ms |
811 | */ | 812 | */ |
812 | const_debug unsigned int sysctl_sched_shares_ratelimit = 500000; | 813 | unsigned int sysctl_sched_shares_ratelimit = 250000; |
813 | 814 | ||
814 | /* | 815 | /* |
815 | * period over which we measure -rt task cpu usage in us. | 816 | * period over which we measure -rt task cpu usage in us. |
@@ -832,7 +833,7 @@ static inline u64 global_rt_period(void) | |||
832 | 833 | ||
833 | static inline u64 global_rt_runtime(void) | 834 | static inline u64 global_rt_runtime(void) |
834 | { | 835 | { |
835 | if (sysctl_sched_rt_period < 0) | 836 | if (sysctl_sched_rt_runtime < 0) |
836 | return RUNTIME_INF; | 837 | return RUNTIME_INF; |
837 | 838 | ||
838 | return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; | 839 | return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; |
@@ -983,13 +984,6 @@ static struct rq *this_rq_lock(void) | |||
983 | return rq; | 984 | return rq; |
984 | } | 985 | } |
985 | 986 | ||
986 | static void __resched_task(struct task_struct *p, int tif_bit); | ||
987 | |||
988 | static inline void resched_task(struct task_struct *p) | ||
989 | { | ||
990 | __resched_task(p, TIF_NEED_RESCHED); | ||
991 | } | ||
992 | |||
993 | #ifdef CONFIG_SCHED_HRTICK | 987 | #ifdef CONFIG_SCHED_HRTICK |
994 | /* | 988 | /* |
995 | * Use HR-timers to deliver accurate preemption points. | 989 | * Use HR-timers to deliver accurate preemption points. |
@@ -1001,25 +995,6 @@ static inline void resched_task(struct task_struct *p) | |||
1001 | * When we get rescheduled we reprogram the hrtick_timer outside of the | 995 | * When we get rescheduled we reprogram the hrtick_timer outside of the |
1002 | * rq->lock. | 996 | * rq->lock. |
1003 | */ | 997 | */ |
1004 | static inline void resched_hrt(struct task_struct *p) | ||
1005 | { | ||
1006 | __resched_task(p, TIF_HRTICK_RESCHED); | ||
1007 | } | ||
1008 | |||
1009 | static inline void resched_rq(struct rq *rq) | ||
1010 | { | ||
1011 | unsigned long flags; | ||
1012 | |||
1013 | spin_lock_irqsave(&rq->lock, flags); | ||
1014 | resched_task(rq->curr); | ||
1015 | spin_unlock_irqrestore(&rq->lock, flags); | ||
1016 | } | ||
1017 | |||
1018 | enum { | ||
1019 | HRTICK_SET, /* re-programm hrtick_timer */ | ||
1020 | HRTICK_RESET, /* not a new slice */ | ||
1021 | HRTICK_BLOCK, /* stop hrtick operations */ | ||
1022 | }; | ||
1023 | 998 | ||
1024 | /* | 999 | /* |
1025 | * Use hrtick when: | 1000 | * Use hrtick when: |
@@ -1030,40 +1005,11 @@ static inline int hrtick_enabled(struct rq *rq) | |||
1030 | { | 1005 | { |
1031 | if (!sched_feat(HRTICK)) | 1006 | if (!sched_feat(HRTICK)) |
1032 | return 0; | 1007 | return 0; |
1033 | if (unlikely(test_bit(HRTICK_BLOCK, &rq->hrtick_flags))) | 1008 | if (!cpu_active(cpu_of(rq))) |
1034 | return 0; | 1009 | return 0; |
1035 | return hrtimer_is_hres_active(&rq->hrtick_timer); | 1010 | return hrtimer_is_hres_active(&rq->hrtick_timer); |
1036 | } | 1011 | } |
1037 | 1012 | ||
1038 | /* | ||
1039 | * Called to set the hrtick timer state. | ||
1040 | * | ||
1041 | * called with rq->lock held and irqs disabled | ||
1042 | */ | ||
1043 | static void hrtick_start(struct rq *rq, u64 delay, int reset) | ||
1044 | { | ||
1045 | assert_spin_locked(&rq->lock); | ||
1046 | |||
1047 | /* | ||
1048 | * preempt at: now + delay | ||
1049 | */ | ||
1050 | rq->hrtick_expire = | ||
1051 | ktime_add_ns(rq->hrtick_timer.base->get_time(), delay); | ||
1052 | /* | ||
1053 | * indicate we need to program the timer | ||
1054 | */ | ||
1055 | __set_bit(HRTICK_SET, &rq->hrtick_flags); | ||
1056 | if (reset) | ||
1057 | __set_bit(HRTICK_RESET, &rq->hrtick_flags); | ||
1058 | |||
1059 | /* | ||
1060 | * New slices are called from the schedule path and don't need a | ||
1061 | * forced reschedule. | ||
1062 | */ | ||
1063 | if (reset) | ||
1064 | resched_hrt(rq->curr); | ||
1065 | } | ||
1066 | |||
1067 | static void hrtick_clear(struct rq *rq) | 1013 | static void hrtick_clear(struct rq *rq) |
1068 | { | 1014 | { |
1069 | if (hrtimer_active(&rq->hrtick_timer)) | 1015 | if (hrtimer_active(&rq->hrtick_timer)) |
@@ -1071,32 +1017,6 @@ static void hrtick_clear(struct rq *rq) | |||
1071 | } | 1017 | } |
1072 | 1018 | ||
1073 | /* | 1019 | /* |
1074 | * Update the timer from the possible pending state. | ||
1075 | */ | ||
1076 | static void hrtick_set(struct rq *rq) | ||
1077 | { | ||
1078 | ktime_t time; | ||
1079 | int set, reset; | ||
1080 | unsigned long flags; | ||
1081 | |||
1082 | WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); | ||
1083 | |||
1084 | spin_lock_irqsave(&rq->lock, flags); | ||
1085 | set = __test_and_clear_bit(HRTICK_SET, &rq->hrtick_flags); | ||
1086 | reset = __test_and_clear_bit(HRTICK_RESET, &rq->hrtick_flags); | ||
1087 | time = rq->hrtick_expire; | ||
1088 | clear_thread_flag(TIF_HRTICK_RESCHED); | ||
1089 | spin_unlock_irqrestore(&rq->lock, flags); | ||
1090 | |||
1091 | if (set) { | ||
1092 | hrtimer_start(&rq->hrtick_timer, time, HRTIMER_MODE_ABS); | ||
1093 | if (reset && !hrtimer_active(&rq->hrtick_timer)) | ||
1094 | resched_rq(rq); | ||
1095 | } else | ||
1096 | hrtick_clear(rq); | ||
1097 | } | ||
1098 | |||
1099 | /* | ||
1100 | * High-resolution timer tick. | 1020 | * High-resolution timer tick. |
1101 | * Runs from hardirq context with interrupts disabled. | 1021 | * Runs from hardirq context with interrupts disabled. |
1102 | */ | 1022 | */ |
@@ -1115,27 +1035,37 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer) | |||
1115 | } | 1035 | } |
1116 | 1036 | ||
1117 | #ifdef CONFIG_SMP | 1037 | #ifdef CONFIG_SMP |
1118 | static void hotplug_hrtick_disable(int cpu) | 1038 | /* |
1039 | * called from hardirq (IPI) context | ||
1040 | */ | ||
1041 | static void __hrtick_start(void *arg) | ||
1119 | { | 1042 | { |
1120 | struct rq *rq = cpu_rq(cpu); | 1043 | struct rq *rq = arg; |
1121 | unsigned long flags; | ||
1122 | 1044 | ||
1123 | spin_lock_irqsave(&rq->lock, flags); | 1045 | spin_lock(&rq->lock); |
1124 | rq->hrtick_flags = 0; | 1046 | hrtimer_restart(&rq->hrtick_timer); |
1125 | __set_bit(HRTICK_BLOCK, &rq->hrtick_flags); | 1047 | rq->hrtick_csd_pending = 0; |
1126 | spin_unlock_irqrestore(&rq->lock, flags); | 1048 | spin_unlock(&rq->lock); |
1127 | |||
1128 | hrtick_clear(rq); | ||
1129 | } | 1049 | } |
1130 | 1050 | ||
1131 | static void hotplug_hrtick_enable(int cpu) | 1051 | /* |
1052 | * Called to set the hrtick timer state. | ||
1053 | * | ||
1054 | * called with rq->lock held and irqs disabled | ||
1055 | */ | ||
1056 | static void hrtick_start(struct rq *rq, u64 delay) | ||
1132 | { | 1057 | { |
1133 | struct rq *rq = cpu_rq(cpu); | 1058 | struct hrtimer *timer = &rq->hrtick_timer; |
1134 | unsigned long flags; | 1059 | ktime_t time = ktime_add_ns(timer->base->get_time(), delay); |
1135 | 1060 | ||
1136 | spin_lock_irqsave(&rq->lock, flags); | 1061 | timer->expires = time; |
1137 | __clear_bit(HRTICK_BLOCK, &rq->hrtick_flags); | 1062 | |
1138 | spin_unlock_irqrestore(&rq->lock, flags); | 1063 | if (rq == this_rq()) { |
1064 | hrtimer_restart(timer); | ||
1065 | } else if (!rq->hrtick_csd_pending) { | ||
1066 | __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd); | ||
1067 | rq->hrtick_csd_pending = 1; | ||
1068 | } | ||
1139 | } | 1069 | } |
1140 | 1070 | ||
1141 | static int | 1071 | static int |
@@ -1150,66 +1080,56 @@ hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
1150 | case CPU_DOWN_PREPARE_FROZEN: | 1080 | case CPU_DOWN_PREPARE_FROZEN: |
1151 | case CPU_DEAD: | 1081 | case CPU_DEAD: |
1152 | case CPU_DEAD_FROZEN: | 1082 | case CPU_DEAD_FROZEN: |
1153 | hotplug_hrtick_disable(cpu); | 1083 | hrtick_clear(cpu_rq(cpu)); |
1154 | return NOTIFY_OK; | ||
1155 | |||
1156 | case CPU_UP_PREPARE: | ||
1157 | case CPU_UP_PREPARE_FROZEN: | ||
1158 | case CPU_DOWN_FAILED: | ||
1159 | case CPU_DOWN_FAILED_FROZEN: | ||
1160 | case CPU_ONLINE: | ||
1161 | case CPU_ONLINE_FROZEN: | ||
1162 | hotplug_hrtick_enable(cpu); | ||
1163 | return NOTIFY_OK; | 1084 | return NOTIFY_OK; |
1164 | } | 1085 | } |
1165 | 1086 | ||
1166 | return NOTIFY_DONE; | 1087 | return NOTIFY_DONE; |
1167 | } | 1088 | } |
1168 | 1089 | ||
1169 | static void init_hrtick(void) | 1090 | static __init void init_hrtick(void) |
1170 | { | 1091 | { |
1171 | hotcpu_notifier(hotplug_hrtick, 0); | 1092 | hotcpu_notifier(hotplug_hrtick, 0); |
1172 | } | 1093 | } |
1173 | #endif /* CONFIG_SMP */ | 1094 | #else |
1095 | /* | ||
1096 | * Called to set the hrtick timer state. | ||
1097 | * | ||
1098 | * called with rq->lock held and irqs disabled | ||
1099 | */ | ||
1100 | static void hrtick_start(struct rq *rq, u64 delay) | ||
1101 | { | ||
1102 | hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), HRTIMER_MODE_REL); | ||
1103 | } | ||
1174 | 1104 | ||
1175 | static void init_rq_hrtick(struct rq *rq) | 1105 | static void init_hrtick(void) |
1176 | { | 1106 | { |
1177 | rq->hrtick_flags = 0; | ||
1178 | hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | ||
1179 | rq->hrtick_timer.function = hrtick; | ||
1180 | rq->hrtick_timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ; | ||
1181 | } | 1107 | } |
1108 | #endif /* CONFIG_SMP */ | ||
1182 | 1109 | ||
1183 | void hrtick_resched(void) | 1110 | static void init_rq_hrtick(struct rq *rq) |
1184 | { | 1111 | { |
1185 | struct rq *rq; | 1112 | #ifdef CONFIG_SMP |
1186 | unsigned long flags; | 1113 | rq->hrtick_csd_pending = 0; |
1187 | 1114 | ||
1188 | if (!test_thread_flag(TIF_HRTICK_RESCHED)) | 1115 | rq->hrtick_csd.flags = 0; |
1189 | return; | 1116 | rq->hrtick_csd.func = __hrtick_start; |
1117 | rq->hrtick_csd.info = rq; | ||
1118 | #endif | ||
1190 | 1119 | ||
1191 | local_irq_save(flags); | 1120 | hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
1192 | rq = cpu_rq(smp_processor_id()); | 1121 | rq->hrtick_timer.function = hrtick; |
1193 | hrtick_set(rq); | 1122 | rq->hrtick_timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ; |
1194 | local_irq_restore(flags); | ||
1195 | } | 1123 | } |
1196 | #else | 1124 | #else |
1197 | static inline void hrtick_clear(struct rq *rq) | 1125 | static inline void hrtick_clear(struct rq *rq) |
1198 | { | 1126 | { |
1199 | } | 1127 | } |
1200 | 1128 | ||
1201 | static inline void hrtick_set(struct rq *rq) | ||
1202 | { | ||
1203 | } | ||
1204 | |||
1205 | static inline void init_rq_hrtick(struct rq *rq) | 1129 | static inline void init_rq_hrtick(struct rq *rq) |
1206 | { | 1130 | { |
1207 | } | 1131 | } |
1208 | 1132 | ||
1209 | void hrtick_resched(void) | ||
1210 | { | ||
1211 | } | ||
1212 | |||
1213 | static inline void init_hrtick(void) | 1133 | static inline void init_hrtick(void) |
1214 | { | 1134 | { |
1215 | } | 1135 | } |
@@ -1228,16 +1148,16 @@ static inline void init_hrtick(void) | |||
1228 | #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) | 1148 | #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) |
1229 | #endif | 1149 | #endif |
1230 | 1150 | ||
1231 | static void __resched_task(struct task_struct *p, int tif_bit) | 1151 | static void resched_task(struct task_struct *p) |
1232 | { | 1152 | { |
1233 | int cpu; | 1153 | int cpu; |
1234 | 1154 | ||
1235 | assert_spin_locked(&task_rq(p)->lock); | 1155 | assert_spin_locked(&task_rq(p)->lock); |
1236 | 1156 | ||
1237 | if (unlikely(test_tsk_thread_flag(p, tif_bit))) | 1157 | if (unlikely(test_tsk_thread_flag(p, TIF_NEED_RESCHED))) |
1238 | return; | 1158 | return; |
1239 | 1159 | ||
1240 | set_tsk_thread_flag(p, tif_bit); | 1160 | set_tsk_thread_flag(p, TIF_NEED_RESCHED); |
1241 | 1161 | ||
1242 | cpu = task_cpu(p); | 1162 | cpu = task_cpu(p); |
1243 | if (cpu == smp_processor_id()) | 1163 | if (cpu == smp_processor_id()) |
@@ -1303,10 +1223,10 @@ void wake_up_idle_cpu(int cpu) | |||
1303 | #endif /* CONFIG_NO_HZ */ | 1223 | #endif /* CONFIG_NO_HZ */ |
1304 | 1224 | ||
1305 | #else /* !CONFIG_SMP */ | 1225 | #else /* !CONFIG_SMP */ |
1306 | static void __resched_task(struct task_struct *p, int tif_bit) | 1226 | static void resched_task(struct task_struct *p) |
1307 | { | 1227 | { |
1308 | assert_spin_locked(&task_rq(p)->lock); | 1228 | assert_spin_locked(&task_rq(p)->lock); |
1309 | set_tsk_thread_flag(p, tif_bit); | 1229 | set_tsk_need_resched(p); |
1310 | } | 1230 | } |
1311 | #endif /* CONFIG_SMP */ | 1231 | #endif /* CONFIG_SMP */ |
1312 | 1232 | ||
@@ -1946,16 +1866,24 @@ migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req) | |||
1946 | /* | 1866 | /* |
1947 | * wait_task_inactive - wait for a thread to unschedule. | 1867 | * wait_task_inactive - wait for a thread to unschedule. |
1948 | * | 1868 | * |
1869 | * If @match_state is nonzero, it's the @p->state value just checked and | ||
1870 | * not expected to change. If it changes, i.e. @p might have woken up, | ||
1871 | * then return zero. When we succeed in waiting for @p to be off its CPU, | ||
1872 | * we return a positive number (its total switch count). If a second call | ||
1873 | * a short while later returns the same number, the caller can be sure that | ||
1874 | * @p has remained unscheduled the whole time. | ||
1875 | * | ||
1949 | * The caller must ensure that the task *will* unschedule sometime soon, | 1876 | * The caller must ensure that the task *will* unschedule sometime soon, |
1950 | * else this function might spin for a *long* time. This function can't | 1877 | * else this function might spin for a *long* time. This function can't |
1951 | * be called with interrupts off, or it may introduce deadlock with | 1878 | * be called with interrupts off, or it may introduce deadlock with |
1952 | * smp_call_function() if an IPI is sent by the same process we are | 1879 | * smp_call_function() if an IPI is sent by the same process we are |
1953 | * waiting to become inactive. | 1880 | * waiting to become inactive. |
1954 | */ | 1881 | */ |
1955 | void wait_task_inactive(struct task_struct *p) | 1882 | unsigned long wait_task_inactive(struct task_struct *p, long match_state) |
1956 | { | 1883 | { |
1957 | unsigned long flags; | 1884 | unsigned long flags; |
1958 | int running, on_rq; | 1885 | int running, on_rq; |
1886 | unsigned long ncsw; | ||
1959 | struct rq *rq; | 1887 | struct rq *rq; |
1960 | 1888 | ||
1961 | for (;;) { | 1889 | for (;;) { |
@@ -1978,8 +1906,11 @@ void wait_task_inactive(struct task_struct *p) | |||
1978 | * return false if the runqueue has changed and p | 1906 | * return false if the runqueue has changed and p |
1979 | * is actually now running somewhere else! | 1907 | * is actually now running somewhere else! |
1980 | */ | 1908 | */ |
1981 | while (task_running(rq, p)) | 1909 | while (task_running(rq, p)) { |
1910 | if (match_state && unlikely(p->state != match_state)) | ||
1911 | return 0; | ||
1982 | cpu_relax(); | 1912 | cpu_relax(); |
1913 | } | ||
1983 | 1914 | ||
1984 | /* | 1915 | /* |
1985 | * Ok, time to look more closely! We need the rq | 1916 | * Ok, time to look more closely! We need the rq |
@@ -1989,9 +1920,21 @@ void wait_task_inactive(struct task_struct *p) | |||
1989 | rq = task_rq_lock(p, &flags); | 1920 | rq = task_rq_lock(p, &flags); |
1990 | running = task_running(rq, p); | 1921 | running = task_running(rq, p); |
1991 | on_rq = p->se.on_rq; | 1922 | on_rq = p->se.on_rq; |
1923 | ncsw = 0; | ||
1924 | if (!match_state || p->state == match_state) { | ||
1925 | ncsw = p->nivcsw + p->nvcsw; | ||
1926 | if (unlikely(!ncsw)) | ||
1927 | ncsw = 1; | ||
1928 | } | ||
1992 | task_rq_unlock(rq, &flags); | 1929 | task_rq_unlock(rq, &flags); |
1993 | 1930 | ||
1994 | /* | 1931 | /* |
1932 | * If it changed from the expected state, bail out now. | ||
1933 | */ | ||
1934 | if (unlikely(!ncsw)) | ||
1935 | break; | ||
1936 | |||
1937 | /* | ||
1995 | * Was it really running after all now that we | 1938 | * Was it really running after all now that we |
1996 | * checked with the proper locks actually held? | 1939 | * checked with the proper locks actually held? |
1997 | * | 1940 | * |
@@ -2023,6 +1966,8 @@ void wait_task_inactive(struct task_struct *p) | |||
2023 | */ | 1966 | */ |
2024 | break; | 1967 | break; |
2025 | } | 1968 | } |
1969 | |||
1970 | return ncsw; | ||
2026 | } | 1971 | } |
2027 | 1972 | ||
2028 | /*** | 1973 | /*** |
@@ -2108,7 +2053,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) | |||
2108 | /* Tally up the load of all CPUs in the group */ | 2053 | /* Tally up the load of all CPUs in the group */ |
2109 | avg_load = 0; | 2054 | avg_load = 0; |
2110 | 2055 | ||
2111 | for_each_cpu_mask(i, group->cpumask) { | 2056 | for_each_cpu_mask_nr(i, group->cpumask) { |
2112 | /* Bias balancing toward cpus of our domain */ | 2057 | /* Bias balancing toward cpus of our domain */ |
2113 | if (local_group) | 2058 | if (local_group) |
2114 | load = source_load(i, load_idx); | 2059 | load = source_load(i, load_idx); |
@@ -2150,7 +2095,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu, | |||
2150 | /* Traverse only the allowed CPUs */ | 2095 | /* Traverse only the allowed CPUs */ |
2151 | cpus_and(*tmp, group->cpumask, p->cpus_allowed); | 2096 | cpus_and(*tmp, group->cpumask, p->cpus_allowed); |
2152 | 2097 | ||
2153 | for_each_cpu_mask(i, *tmp) { | 2098 | for_each_cpu_mask_nr(i, *tmp) { |
2154 | load = weighted_cpuload(i); | 2099 | load = weighted_cpuload(i); |
2155 | 2100 | ||
2156 | if (load < min_load || (load == min_load && i == this_cpu)) { | 2101 | if (load < min_load || (load == min_load && i == this_cpu)) { |
@@ -2813,10 +2758,10 @@ static void double_rq_lock(struct rq *rq1, struct rq *rq2) | |||
2813 | } else { | 2758 | } else { |
2814 | if (rq1 < rq2) { | 2759 | if (rq1 < rq2) { |
2815 | spin_lock(&rq1->lock); | 2760 | spin_lock(&rq1->lock); |
2816 | spin_lock(&rq2->lock); | 2761 | spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING); |
2817 | } else { | 2762 | } else { |
2818 | spin_lock(&rq2->lock); | 2763 | spin_lock(&rq2->lock); |
2819 | spin_lock(&rq1->lock); | 2764 | spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); |
2820 | } | 2765 | } |
2821 | } | 2766 | } |
2822 | update_rq_clock(rq1); | 2767 | update_rq_clock(rq1); |
@@ -2859,14 +2804,21 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest) | |||
2859 | if (busiest < this_rq) { | 2804 | if (busiest < this_rq) { |
2860 | spin_unlock(&this_rq->lock); | 2805 | spin_unlock(&this_rq->lock); |
2861 | spin_lock(&busiest->lock); | 2806 | spin_lock(&busiest->lock); |
2862 | spin_lock(&this_rq->lock); | 2807 | spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING); |
2863 | ret = 1; | 2808 | ret = 1; |
2864 | } else | 2809 | } else |
2865 | spin_lock(&busiest->lock); | 2810 | spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING); |
2866 | } | 2811 | } |
2867 | return ret; | 2812 | return ret; |
2868 | } | 2813 | } |
2869 | 2814 | ||
2815 | static void double_unlock_balance(struct rq *this_rq, struct rq *busiest) | ||
2816 | __releases(busiest->lock) | ||
2817 | { | ||
2818 | spin_unlock(&busiest->lock); | ||
2819 | lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); | ||
2820 | } | ||
2821 | |||
2870 | /* | 2822 | /* |
2871 | * If dest_cpu is allowed for this process, migrate the task to it. | 2823 | * If dest_cpu is allowed for this process, migrate the task to it. |
2872 | * This is accomplished by forcing the cpu_allowed mask to only | 2824 | * This is accomplished by forcing the cpu_allowed mask to only |
@@ -2881,7 +2833,7 @@ static void sched_migrate_task(struct task_struct *p, int dest_cpu) | |||
2881 | 2833 | ||
2882 | rq = task_rq_lock(p, &flags); | 2834 | rq = task_rq_lock(p, &flags); |
2883 | if (!cpu_isset(dest_cpu, p->cpus_allowed) | 2835 | if (!cpu_isset(dest_cpu, p->cpus_allowed) |
2884 | || unlikely(cpu_is_offline(dest_cpu))) | 2836 | || unlikely(!cpu_active(dest_cpu))) |
2885 | goto out; | 2837 | goto out; |
2886 | 2838 | ||
2887 | /* force the process onto the specified CPU */ | 2839 | /* force the process onto the specified CPU */ |
@@ -3168,7 +3120,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
3168 | max_cpu_load = 0; | 3120 | max_cpu_load = 0; |
3169 | min_cpu_load = ~0UL; | 3121 | min_cpu_load = ~0UL; |
3170 | 3122 | ||
3171 | for_each_cpu_mask(i, group->cpumask) { | 3123 | for_each_cpu_mask_nr(i, group->cpumask) { |
3172 | struct rq *rq; | 3124 | struct rq *rq; |
3173 | 3125 | ||
3174 | if (!cpu_isset(i, *cpus)) | 3126 | if (!cpu_isset(i, *cpus)) |
@@ -3447,7 +3399,7 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle, | |||
3447 | unsigned long max_load = 0; | 3399 | unsigned long max_load = 0; |
3448 | int i; | 3400 | int i; |
3449 | 3401 | ||
3450 | for_each_cpu_mask(i, group->cpumask) { | 3402 | for_each_cpu_mask_nr(i, group->cpumask) { |
3451 | unsigned long wl; | 3403 | unsigned long wl; |
3452 | 3404 | ||
3453 | if (!cpu_isset(i, *cpus)) | 3405 | if (!cpu_isset(i, *cpus)) |
@@ -3691,7 +3643,7 @@ redo: | |||
3691 | ld_moved = move_tasks(this_rq, this_cpu, busiest, | 3643 | ld_moved = move_tasks(this_rq, this_cpu, busiest, |
3692 | imbalance, sd, CPU_NEWLY_IDLE, | 3644 | imbalance, sd, CPU_NEWLY_IDLE, |
3693 | &all_pinned); | 3645 | &all_pinned); |
3694 | spin_unlock(&busiest->lock); | 3646 | double_unlock_balance(this_rq, busiest); |
3695 | 3647 | ||
3696 | if (unlikely(all_pinned)) { | 3648 | if (unlikely(all_pinned)) { |
3697 | cpu_clear(cpu_of(busiest), *cpus); | 3649 | cpu_clear(cpu_of(busiest), *cpus); |
@@ -3806,7 +3758,7 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu) | |||
3806 | else | 3758 | else |
3807 | schedstat_inc(sd, alb_failed); | 3759 | schedstat_inc(sd, alb_failed); |
3808 | } | 3760 | } |
3809 | spin_unlock(&target_rq->lock); | 3761 | double_unlock_balance(busiest_rq, target_rq); |
3810 | } | 3762 | } |
3811 | 3763 | ||
3812 | #ifdef CONFIG_NO_HZ | 3764 | #ifdef CONFIG_NO_HZ |
@@ -3849,7 +3801,7 @@ int select_nohz_load_balancer(int stop_tick) | |||
3849 | /* | 3801 | /* |
3850 | * If we are going offline and still the leader, give up! | 3802 | * If we are going offline and still the leader, give up! |
3851 | */ | 3803 | */ |
3852 | if (cpu_is_offline(cpu) && | 3804 | if (!cpu_active(cpu) && |
3853 | atomic_read(&nohz.load_balancer) == cpu) { | 3805 | atomic_read(&nohz.load_balancer) == cpu) { |
3854 | if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu) | 3806 | if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu) |
3855 | BUG(); | 3807 | BUG(); |
@@ -3989,7 +3941,7 @@ static void run_rebalance_domains(struct softirq_action *h) | |||
3989 | int balance_cpu; | 3941 | int balance_cpu; |
3990 | 3942 | ||
3991 | cpu_clear(this_cpu, cpus); | 3943 | cpu_clear(this_cpu, cpus); |
3992 | for_each_cpu_mask(balance_cpu, cpus) { | 3944 | for_each_cpu_mask_nr(balance_cpu, cpus) { |
3993 | /* | 3945 | /* |
3994 | * If this cpu gets work to do, stop the load balancing | 3946 | * If this cpu gets work to do, stop the load balancing |
3995 | * work being done for other cpus. Next load | 3947 | * work being done for other cpus. Next load |
@@ -4125,6 +4077,8 @@ void account_user_time(struct task_struct *p, cputime_t cputime) | |||
4125 | cpustat->nice = cputime64_add(cpustat->nice, tmp); | 4077 | cpustat->nice = cputime64_add(cpustat->nice, tmp); |
4126 | else | 4078 | else |
4127 | cpustat->user = cputime64_add(cpustat->user, tmp); | 4079 | cpustat->user = cputime64_add(cpustat->user, tmp); |
4080 | /* Account for user time used */ | ||
4081 | acct_update_integrals(p); | ||
4128 | } | 4082 | } |
4129 | 4083 | ||
4130 | /* | 4084 | /* |
@@ -4225,6 +4179,65 @@ void account_steal_time(struct task_struct *p, cputime_t steal) | |||
4225 | } | 4179 | } |
4226 | 4180 | ||
4227 | /* | 4181 | /* |
4182 | * Use precise platform statistics if available: | ||
4183 | */ | ||
4184 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
4185 | cputime_t task_utime(struct task_struct *p) | ||
4186 | { | ||
4187 | return p->utime; | ||
4188 | } | ||
4189 | |||
4190 | cputime_t task_stime(struct task_struct *p) | ||
4191 | { | ||
4192 | return p->stime; | ||
4193 | } | ||
4194 | #else | ||
4195 | cputime_t task_utime(struct task_struct *p) | ||
4196 | { | ||
4197 | clock_t utime = cputime_to_clock_t(p->utime), | ||
4198 | total = utime + cputime_to_clock_t(p->stime); | ||
4199 | u64 temp; | ||
4200 | |||
4201 | /* | ||
4202 | * Use CFS's precise accounting: | ||
4203 | */ | ||
4204 | temp = (u64)nsec_to_clock_t(p->se.sum_exec_runtime); | ||
4205 | |||
4206 | if (total) { | ||
4207 | temp *= utime; | ||
4208 | do_div(temp, total); | ||
4209 | } | ||
4210 | utime = (clock_t)temp; | ||
4211 | |||
4212 | p->prev_utime = max(p->prev_utime, clock_t_to_cputime(utime)); | ||
4213 | return p->prev_utime; | ||
4214 | } | ||
4215 | |||
4216 | cputime_t task_stime(struct task_struct *p) | ||
4217 | { | ||
4218 | clock_t stime; | ||
4219 | |||
4220 | /* | ||
4221 | * Use CFS's precise accounting. (we subtract utime from | ||
4222 | * the total, to make sure the total observed by userspace | ||
4223 | * grows monotonically - apps rely on that): | ||
4224 | */ | ||
4225 | stime = nsec_to_clock_t(p->se.sum_exec_runtime) - | ||
4226 | cputime_to_clock_t(task_utime(p)); | ||
4227 | |||
4228 | if (stime >= 0) | ||
4229 | p->prev_stime = max(p->prev_stime, clock_t_to_cputime(stime)); | ||
4230 | |||
4231 | return p->prev_stime; | ||
4232 | } | ||
4233 | #endif | ||
4234 | |||
4235 | inline cputime_t task_gtime(struct task_struct *p) | ||
4236 | { | ||
4237 | return p->gtime; | ||
4238 | } | ||
4239 | |||
4240 | /* | ||
4228 | * This function gets called by the timer code, with HZ frequency. | 4241 | * This function gets called by the timer code, with HZ frequency. |
4229 | * We call it with interrupts disabled. | 4242 | * We call it with interrupts disabled. |
4230 | * | 4243 | * |
@@ -4395,7 +4408,7 @@ asmlinkage void __sched schedule(void) | |||
4395 | struct task_struct *prev, *next; | 4408 | struct task_struct *prev, *next; |
4396 | unsigned long *switch_count; | 4409 | unsigned long *switch_count; |
4397 | struct rq *rq; | 4410 | struct rq *rq; |
4398 | int cpu, hrtick = sched_feat(HRTICK); | 4411 | int cpu; |
4399 | 4412 | ||
4400 | need_resched: | 4413 | need_resched: |
4401 | preempt_disable(); | 4414 | preempt_disable(); |
@@ -4410,7 +4423,7 @@ need_resched_nonpreemptible: | |||
4410 | 4423 | ||
4411 | schedule_debug(prev); | 4424 | schedule_debug(prev); |
4412 | 4425 | ||
4413 | if (hrtick) | 4426 | if (sched_feat(HRTICK)) |
4414 | hrtick_clear(rq); | 4427 | hrtick_clear(rq); |
4415 | 4428 | ||
4416 | /* | 4429 | /* |
@@ -4457,9 +4470,6 @@ need_resched_nonpreemptible: | |||
4457 | } else | 4470 | } else |
4458 | spin_unlock_irq(&rq->lock); | 4471 | spin_unlock_irq(&rq->lock); |
4459 | 4472 | ||
4460 | if (hrtick) | ||
4461 | hrtick_set(rq); | ||
4462 | |||
4463 | if (unlikely(reacquire_kernel_lock(current) < 0)) | 4473 | if (unlikely(reacquire_kernel_lock(current) < 0)) |
4464 | goto need_resched_nonpreemptible; | 4474 | goto need_resched_nonpreemptible; |
4465 | 4475 | ||
@@ -4718,6 +4728,52 @@ int __sched wait_for_completion_killable(struct completion *x) | |||
4718 | } | 4728 | } |
4719 | EXPORT_SYMBOL(wait_for_completion_killable); | 4729 | EXPORT_SYMBOL(wait_for_completion_killable); |
4720 | 4730 | ||
4731 | /** | ||
4732 | * try_wait_for_completion - try to decrement a completion without blocking | ||
4733 | * @x: completion structure | ||
4734 | * | ||
4735 | * Returns: 0 if a decrement cannot be done without blocking | ||
4736 | * 1 if a decrement succeeded. | ||
4737 | * | ||
4738 | * If a completion is being used as a counting completion, | ||
4739 | * attempt to decrement the counter without blocking. This | ||
4740 | * enables us to avoid waiting if the resource the completion | ||
4741 | * is protecting is not available. | ||
4742 | */ | ||
4743 | bool try_wait_for_completion(struct completion *x) | ||
4744 | { | ||
4745 | int ret = 1; | ||
4746 | |||
4747 | spin_lock_irq(&x->wait.lock); | ||
4748 | if (!x->done) | ||
4749 | ret = 0; | ||
4750 | else | ||
4751 | x->done--; | ||
4752 | spin_unlock_irq(&x->wait.lock); | ||
4753 | return ret; | ||
4754 | } | ||
4755 | EXPORT_SYMBOL(try_wait_for_completion); | ||
4756 | |||
4757 | /** | ||
4758 | * completion_done - Test to see if a completion has any waiters | ||
4759 | * @x: completion structure | ||
4760 | * | ||
4761 | * Returns: 0 if there are waiters (wait_for_completion() in progress) | ||
4762 | * 1 if there are no waiters. | ||
4763 | * | ||
4764 | */ | ||
4765 | bool completion_done(struct completion *x) | ||
4766 | { | ||
4767 | int ret = 1; | ||
4768 | |||
4769 | spin_lock_irq(&x->wait.lock); | ||
4770 | if (!x->done) | ||
4771 | ret = 0; | ||
4772 | spin_unlock_irq(&x->wait.lock); | ||
4773 | return ret; | ||
4774 | } | ||
4775 | EXPORT_SYMBOL(completion_done); | ||
4776 | |||
4721 | static long __sched | 4777 | static long __sched |
4722 | sleep_on_common(wait_queue_head_t *q, int state, long timeout) | 4778 | sleep_on_common(wait_queue_head_t *q, int state, long timeout) |
4723 | { | 4779 | { |
@@ -5059,19 +5115,21 @@ recheck: | |||
5059 | return -EPERM; | 5115 | return -EPERM; |
5060 | } | 5116 | } |
5061 | 5117 | ||
5118 | if (user) { | ||
5062 | #ifdef CONFIG_RT_GROUP_SCHED | 5119 | #ifdef CONFIG_RT_GROUP_SCHED |
5063 | /* | 5120 | /* |
5064 | * Do not allow realtime tasks into groups that have no runtime | 5121 | * Do not allow realtime tasks into groups that have no runtime |
5065 | * assigned. | 5122 | * assigned. |
5066 | */ | 5123 | */ |
5067 | if (user | 5124 | if (rt_policy(policy) && task_group(p)->rt_bandwidth.rt_runtime == 0) |
5068 | && rt_policy(policy) && task_group(p)->rt_bandwidth.rt_runtime == 0) | 5125 | return -EPERM; |
5069 | return -EPERM; | ||
5070 | #endif | 5126 | #endif |
5071 | 5127 | ||
5072 | retval = security_task_setscheduler(p, policy, param); | 5128 | retval = security_task_setscheduler(p, policy, param); |
5073 | if (retval) | 5129 | if (retval) |
5074 | return retval; | 5130 | return retval; |
5131 | } | ||
5132 | |||
5075 | /* | 5133 | /* |
5076 | * make sure no PI-waiters arrive (or leave) while we are | 5134 | * make sure no PI-waiters arrive (or leave) while we are |
5077 | * changing the priority of the task: | 5135 | * changing the priority of the task: |
@@ -5787,6 +5845,8 @@ static inline void sched_init_granularity(void) | |||
5787 | sysctl_sched_latency = limit; | 5845 | sysctl_sched_latency = limit; |
5788 | 5846 | ||
5789 | sysctl_sched_wakeup_granularity *= factor; | 5847 | sysctl_sched_wakeup_granularity *= factor; |
5848 | |||
5849 | sysctl_sched_shares_ratelimit *= factor; | ||
5790 | } | 5850 | } |
5791 | 5851 | ||
5792 | #ifdef CONFIG_SMP | 5852 | #ifdef CONFIG_SMP |
@@ -5876,7 +5936,7 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) | |||
5876 | struct rq *rq_dest, *rq_src; | 5936 | struct rq *rq_dest, *rq_src; |
5877 | int ret = 0, on_rq; | 5937 | int ret = 0, on_rq; |
5878 | 5938 | ||
5879 | if (unlikely(cpu_is_offline(dest_cpu))) | 5939 | if (unlikely(!cpu_active(dest_cpu))) |
5880 | return ret; | 5940 | return ret; |
5881 | 5941 | ||
5882 | rq_src = cpu_rq(src_cpu); | 5942 | rq_src = cpu_rq(src_cpu); |
@@ -6469,7 +6529,7 @@ static struct notifier_block __cpuinitdata migration_notifier = { | |||
6469 | .priority = 10 | 6529 | .priority = 10 |
6470 | }; | 6530 | }; |
6471 | 6531 | ||
6472 | void __init migration_init(void) | 6532 | static int __init migration_init(void) |
6473 | { | 6533 | { |
6474 | void *cpu = (void *)(long)smp_processor_id(); | 6534 | void *cpu = (void *)(long)smp_processor_id(); |
6475 | int err; | 6535 | int err; |
@@ -6479,7 +6539,10 @@ void __init migration_init(void) | |||
6479 | BUG_ON(err == NOTIFY_BAD); | 6539 | BUG_ON(err == NOTIFY_BAD); |
6480 | migration_call(&migration_notifier, CPU_ONLINE, cpu); | 6540 | migration_call(&migration_notifier, CPU_ONLINE, cpu); |
6481 | register_cpu_notifier(&migration_notifier); | 6541 | register_cpu_notifier(&migration_notifier); |
6542 | |||
6543 | return err; | ||
6482 | } | 6544 | } |
6545 | early_initcall(migration_init); | ||
6483 | #endif | 6546 | #endif |
6484 | 6547 | ||
6485 | #ifdef CONFIG_SMP | 6548 | #ifdef CONFIG_SMP |
@@ -6768,7 +6831,8 @@ static cpumask_t cpu_isolated_map = CPU_MASK_NONE; | |||
6768 | /* Setup the mask of cpus configured for isolated domains */ | 6831 | /* Setup the mask of cpus configured for isolated domains */ |
6769 | static int __init isolated_cpu_setup(char *str) | 6832 | static int __init isolated_cpu_setup(char *str) |
6770 | { | 6833 | { |
6771 | int ints[NR_CPUS], i; | 6834 | static int __initdata ints[NR_CPUS]; |
6835 | int i; | ||
6772 | 6836 | ||
6773 | str = get_options(str, ARRAY_SIZE(ints), ints); | 6837 | str = get_options(str, ARRAY_SIZE(ints), ints); |
6774 | cpus_clear(cpu_isolated_map); | 6838 | cpus_clear(cpu_isolated_map); |
@@ -6802,7 +6866,7 @@ init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map, | |||
6802 | 6866 | ||
6803 | cpus_clear(*covered); | 6867 | cpus_clear(*covered); |
6804 | 6868 | ||
6805 | for_each_cpu_mask(i, *span) { | 6869 | for_each_cpu_mask_nr(i, *span) { |
6806 | struct sched_group *sg; | 6870 | struct sched_group *sg; |
6807 | int group = group_fn(i, cpu_map, &sg, tmpmask); | 6871 | int group = group_fn(i, cpu_map, &sg, tmpmask); |
6808 | int j; | 6872 | int j; |
@@ -6813,7 +6877,7 @@ init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map, | |||
6813 | cpus_clear(sg->cpumask); | 6877 | cpus_clear(sg->cpumask); |
6814 | sg->__cpu_power = 0; | 6878 | sg->__cpu_power = 0; |
6815 | 6879 | ||
6816 | for_each_cpu_mask(j, *span) { | 6880 | for_each_cpu_mask_nr(j, *span) { |
6817 | if (group_fn(j, cpu_map, NULL, tmpmask) != group) | 6881 | if (group_fn(j, cpu_map, NULL, tmpmask) != group) |
6818 | continue; | 6882 | continue; |
6819 | 6883 | ||
@@ -7013,7 +7077,7 @@ static void init_numa_sched_groups_power(struct sched_group *group_head) | |||
7013 | if (!sg) | 7077 | if (!sg) |
7014 | return; | 7078 | return; |
7015 | do { | 7079 | do { |
7016 | for_each_cpu_mask(j, sg->cpumask) { | 7080 | for_each_cpu_mask_nr(j, sg->cpumask) { |
7017 | struct sched_domain *sd; | 7081 | struct sched_domain *sd; |
7018 | 7082 | ||
7019 | sd = &per_cpu(phys_domains, j); | 7083 | sd = &per_cpu(phys_domains, j); |
@@ -7038,7 +7102,7 @@ static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask) | |||
7038 | { | 7102 | { |
7039 | int cpu, i; | 7103 | int cpu, i; |
7040 | 7104 | ||
7041 | for_each_cpu_mask(cpu, *cpu_map) { | 7105 | for_each_cpu_mask_nr(cpu, *cpu_map) { |
7042 | struct sched_group **sched_group_nodes | 7106 | struct sched_group **sched_group_nodes |
7043 | = sched_group_nodes_bycpu[cpu]; | 7107 | = sched_group_nodes_bycpu[cpu]; |
7044 | 7108 | ||
@@ -7277,7 +7341,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7277 | /* | 7341 | /* |
7278 | * Set up domains for cpus specified by the cpu_map. | 7342 | * Set up domains for cpus specified by the cpu_map. |
7279 | */ | 7343 | */ |
7280 | for_each_cpu_mask(i, *cpu_map) { | 7344 | for_each_cpu_mask_nr(i, *cpu_map) { |
7281 | struct sched_domain *sd = NULL, *p; | 7345 | struct sched_domain *sd = NULL, *p; |
7282 | SCHED_CPUMASK_VAR(nodemask, allmasks); | 7346 | SCHED_CPUMASK_VAR(nodemask, allmasks); |
7283 | 7347 | ||
@@ -7344,7 +7408,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7344 | 7408 | ||
7345 | #ifdef CONFIG_SCHED_SMT | 7409 | #ifdef CONFIG_SCHED_SMT |
7346 | /* Set up CPU (sibling) groups */ | 7410 | /* Set up CPU (sibling) groups */ |
7347 | for_each_cpu_mask(i, *cpu_map) { | 7411 | for_each_cpu_mask_nr(i, *cpu_map) { |
7348 | SCHED_CPUMASK_VAR(this_sibling_map, allmasks); | 7412 | SCHED_CPUMASK_VAR(this_sibling_map, allmasks); |
7349 | SCHED_CPUMASK_VAR(send_covered, allmasks); | 7413 | SCHED_CPUMASK_VAR(send_covered, allmasks); |
7350 | 7414 | ||
@@ -7361,7 +7425,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7361 | 7425 | ||
7362 | #ifdef CONFIG_SCHED_MC | 7426 | #ifdef CONFIG_SCHED_MC |
7363 | /* Set up multi-core groups */ | 7427 | /* Set up multi-core groups */ |
7364 | for_each_cpu_mask(i, *cpu_map) { | 7428 | for_each_cpu_mask_nr(i, *cpu_map) { |
7365 | SCHED_CPUMASK_VAR(this_core_map, allmasks); | 7429 | SCHED_CPUMASK_VAR(this_core_map, allmasks); |
7366 | SCHED_CPUMASK_VAR(send_covered, allmasks); | 7430 | SCHED_CPUMASK_VAR(send_covered, allmasks); |
7367 | 7431 | ||
@@ -7428,7 +7492,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7428 | goto error; | 7492 | goto error; |
7429 | } | 7493 | } |
7430 | sched_group_nodes[i] = sg; | 7494 | sched_group_nodes[i] = sg; |
7431 | for_each_cpu_mask(j, *nodemask) { | 7495 | for_each_cpu_mask_nr(j, *nodemask) { |
7432 | struct sched_domain *sd; | 7496 | struct sched_domain *sd; |
7433 | 7497 | ||
7434 | sd = &per_cpu(node_domains, j); | 7498 | sd = &per_cpu(node_domains, j); |
@@ -7474,21 +7538,21 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7474 | 7538 | ||
7475 | /* Calculate CPU power for physical packages and nodes */ | 7539 | /* Calculate CPU power for physical packages and nodes */ |
7476 | #ifdef CONFIG_SCHED_SMT | 7540 | #ifdef CONFIG_SCHED_SMT |
7477 | for_each_cpu_mask(i, *cpu_map) { | 7541 | for_each_cpu_mask_nr(i, *cpu_map) { |
7478 | struct sched_domain *sd = &per_cpu(cpu_domains, i); | 7542 | struct sched_domain *sd = &per_cpu(cpu_domains, i); |
7479 | 7543 | ||
7480 | init_sched_groups_power(i, sd); | 7544 | init_sched_groups_power(i, sd); |
7481 | } | 7545 | } |
7482 | #endif | 7546 | #endif |
7483 | #ifdef CONFIG_SCHED_MC | 7547 | #ifdef CONFIG_SCHED_MC |
7484 | for_each_cpu_mask(i, *cpu_map) { | 7548 | for_each_cpu_mask_nr(i, *cpu_map) { |
7485 | struct sched_domain *sd = &per_cpu(core_domains, i); | 7549 | struct sched_domain *sd = &per_cpu(core_domains, i); |
7486 | 7550 | ||
7487 | init_sched_groups_power(i, sd); | 7551 | init_sched_groups_power(i, sd); |
7488 | } | 7552 | } |
7489 | #endif | 7553 | #endif |
7490 | 7554 | ||
7491 | for_each_cpu_mask(i, *cpu_map) { | 7555 | for_each_cpu_mask_nr(i, *cpu_map) { |
7492 | struct sched_domain *sd = &per_cpu(phys_domains, i); | 7556 | struct sched_domain *sd = &per_cpu(phys_domains, i); |
7493 | 7557 | ||
7494 | init_sched_groups_power(i, sd); | 7558 | init_sched_groups_power(i, sd); |
@@ -7508,7 +7572,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7508 | #endif | 7572 | #endif |
7509 | 7573 | ||
7510 | /* Attach the domains */ | 7574 | /* Attach the domains */ |
7511 | for_each_cpu_mask(i, *cpu_map) { | 7575 | for_each_cpu_mask_nr(i, *cpu_map) { |
7512 | struct sched_domain *sd; | 7576 | struct sched_domain *sd; |
7513 | #ifdef CONFIG_SCHED_SMT | 7577 | #ifdef CONFIG_SCHED_SMT |
7514 | sd = &per_cpu(cpu_domains, i); | 7578 | sd = &per_cpu(cpu_domains, i); |
@@ -7553,18 +7617,6 @@ void __attribute__((weak)) arch_update_cpu_topology(void) | |||
7553 | } | 7617 | } |
7554 | 7618 | ||
7555 | /* | 7619 | /* |
7556 | * Free current domain masks. | ||
7557 | * Called after all cpus are attached to NULL domain. | ||
7558 | */ | ||
7559 | static void free_sched_domains(void) | ||
7560 | { | ||
7561 | ndoms_cur = 0; | ||
7562 | if (doms_cur != &fallback_doms) | ||
7563 | kfree(doms_cur); | ||
7564 | doms_cur = &fallback_doms; | ||
7565 | } | ||
7566 | |||
7567 | /* | ||
7568 | * Set up scheduler domains and groups. Callers must hold the hotplug lock. | 7620 | * Set up scheduler domains and groups. Callers must hold the hotplug lock. |
7569 | * For now this just excludes isolated cpus, but could be used to | 7621 | * For now this just excludes isolated cpus, but could be used to |
7570 | * exclude other special cases in the future. | 7622 | * exclude other special cases in the future. |
@@ -7603,7 +7655,7 @@ static void detach_destroy_domains(const cpumask_t *cpu_map) | |||
7603 | 7655 | ||
7604 | unregister_sched_domain_sysctl(); | 7656 | unregister_sched_domain_sysctl(); |
7605 | 7657 | ||
7606 | for_each_cpu_mask(i, *cpu_map) | 7658 | for_each_cpu_mask_nr(i, *cpu_map) |
7607 | cpu_attach_domain(NULL, &def_root_domain, i); | 7659 | cpu_attach_domain(NULL, &def_root_domain, i); |
7608 | synchronize_sched(); | 7660 | synchronize_sched(); |
7609 | arch_destroy_sched_domains(cpu_map, &tmpmask); | 7661 | arch_destroy_sched_domains(cpu_map, &tmpmask); |
@@ -7642,30 +7694,29 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, | |||
7642 | * ownership of it and will kfree it when done with it. If the caller | 7694 | * ownership of it and will kfree it when done with it. If the caller |
7643 | * failed the kmalloc call, then it can pass in doms_new == NULL, | 7695 | * failed the kmalloc call, then it can pass in doms_new == NULL, |
7644 | * and partition_sched_domains() will fallback to the single partition | 7696 | * and partition_sched_domains() will fallback to the single partition |
7645 | * 'fallback_doms'. | 7697 | * 'fallback_doms', it also forces the domains to be rebuilt. |
7698 | * | ||
7699 | * If doms_new==NULL it will be replaced with cpu_online_map. | ||
7700 | * ndoms_new==0 is a special case for destroying existing domains. | ||
7701 | * It will not create the default domain. | ||
7646 | * | 7702 | * |
7647 | * Call with hotplug lock held | 7703 | * Call with hotplug lock held |
7648 | */ | 7704 | */ |
7649 | void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, | 7705 | void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, |
7650 | struct sched_domain_attr *dattr_new) | 7706 | struct sched_domain_attr *dattr_new) |
7651 | { | 7707 | { |
7652 | int i, j; | 7708 | int i, j, n; |
7653 | 7709 | ||
7654 | mutex_lock(&sched_domains_mutex); | 7710 | mutex_lock(&sched_domains_mutex); |
7655 | 7711 | ||
7656 | /* always unregister in case we don't destroy any domains */ | 7712 | /* always unregister in case we don't destroy any domains */ |
7657 | unregister_sched_domain_sysctl(); | 7713 | unregister_sched_domain_sysctl(); |
7658 | 7714 | ||
7659 | if (doms_new == NULL) { | 7715 | n = doms_new ? ndoms_new : 0; |
7660 | ndoms_new = 1; | ||
7661 | doms_new = &fallback_doms; | ||
7662 | cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map); | ||
7663 | dattr_new = NULL; | ||
7664 | } | ||
7665 | 7716 | ||
7666 | /* Destroy deleted domains */ | 7717 | /* Destroy deleted domains */ |
7667 | for (i = 0; i < ndoms_cur; i++) { | 7718 | for (i = 0; i < ndoms_cur; i++) { |
7668 | for (j = 0; j < ndoms_new; j++) { | 7719 | for (j = 0; j < n; j++) { |
7669 | if (cpus_equal(doms_cur[i], doms_new[j]) | 7720 | if (cpus_equal(doms_cur[i], doms_new[j]) |
7670 | && dattrs_equal(dattr_cur, i, dattr_new, j)) | 7721 | && dattrs_equal(dattr_cur, i, dattr_new, j)) |
7671 | goto match1; | 7722 | goto match1; |
@@ -7676,6 +7727,13 @@ match1: | |||
7676 | ; | 7727 | ; |
7677 | } | 7728 | } |
7678 | 7729 | ||
7730 | if (doms_new == NULL) { | ||
7731 | ndoms_cur = 0; | ||
7732 | doms_new = &fallback_doms; | ||
7733 | cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map); | ||
7734 | dattr_new = NULL; | ||
7735 | } | ||
7736 | |||
7679 | /* Build new domains */ | 7737 | /* Build new domains */ |
7680 | for (i = 0; i < ndoms_new; i++) { | 7738 | for (i = 0; i < ndoms_new; i++) { |
7681 | for (j = 0; j < ndoms_cur; j++) { | 7739 | for (j = 0; j < ndoms_cur; j++) { |
@@ -7706,17 +7764,15 @@ match2: | |||
7706 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) | 7764 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) |
7707 | int arch_reinit_sched_domains(void) | 7765 | int arch_reinit_sched_domains(void) |
7708 | { | 7766 | { |
7709 | int err; | ||
7710 | |||
7711 | get_online_cpus(); | 7767 | get_online_cpus(); |
7712 | mutex_lock(&sched_domains_mutex); | 7768 | |
7713 | detach_destroy_domains(&cpu_online_map); | 7769 | /* Destroy domains first to force the rebuild */ |
7714 | free_sched_domains(); | 7770 | partition_sched_domains(0, NULL, NULL); |
7715 | err = arch_init_sched_domains(&cpu_online_map); | 7771 | |
7716 | mutex_unlock(&sched_domains_mutex); | 7772 | rebuild_sched_domains(); |
7717 | put_online_cpus(); | 7773 | put_online_cpus(); |
7718 | 7774 | ||
7719 | return err; | 7775 | return 0; |
7720 | } | 7776 | } |
7721 | 7777 | ||
7722 | static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt) | 7778 | static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt) |
@@ -7737,30 +7793,34 @@ static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt) | |||
7737 | } | 7793 | } |
7738 | 7794 | ||
7739 | #ifdef CONFIG_SCHED_MC | 7795 | #ifdef CONFIG_SCHED_MC |
7740 | static ssize_t sched_mc_power_savings_show(struct sys_device *dev, char *page) | 7796 | static ssize_t sched_mc_power_savings_show(struct sysdev_class *class, |
7797 | char *page) | ||
7741 | { | 7798 | { |
7742 | return sprintf(page, "%u\n", sched_mc_power_savings); | 7799 | return sprintf(page, "%u\n", sched_mc_power_savings); |
7743 | } | 7800 | } |
7744 | static ssize_t sched_mc_power_savings_store(struct sys_device *dev, | 7801 | static ssize_t sched_mc_power_savings_store(struct sysdev_class *class, |
7745 | const char *buf, size_t count) | 7802 | const char *buf, size_t count) |
7746 | { | 7803 | { |
7747 | return sched_power_savings_store(buf, count, 0); | 7804 | return sched_power_savings_store(buf, count, 0); |
7748 | } | 7805 | } |
7749 | static SYSDEV_ATTR(sched_mc_power_savings, 0644, sched_mc_power_savings_show, | 7806 | static SYSDEV_CLASS_ATTR(sched_mc_power_savings, 0644, |
7750 | sched_mc_power_savings_store); | 7807 | sched_mc_power_savings_show, |
7808 | sched_mc_power_savings_store); | ||
7751 | #endif | 7809 | #endif |
7752 | 7810 | ||
7753 | #ifdef CONFIG_SCHED_SMT | 7811 | #ifdef CONFIG_SCHED_SMT |
7754 | static ssize_t sched_smt_power_savings_show(struct sys_device *dev, char *page) | 7812 | static ssize_t sched_smt_power_savings_show(struct sysdev_class *dev, |
7813 | char *page) | ||
7755 | { | 7814 | { |
7756 | return sprintf(page, "%u\n", sched_smt_power_savings); | 7815 | return sprintf(page, "%u\n", sched_smt_power_savings); |
7757 | } | 7816 | } |
7758 | static ssize_t sched_smt_power_savings_store(struct sys_device *dev, | 7817 | static ssize_t sched_smt_power_savings_store(struct sysdev_class *dev, |
7759 | const char *buf, size_t count) | 7818 | const char *buf, size_t count) |
7760 | { | 7819 | { |
7761 | return sched_power_savings_store(buf, count, 1); | 7820 | return sched_power_savings_store(buf, count, 1); |
7762 | } | 7821 | } |
7763 | static SYSDEV_ATTR(sched_smt_power_savings, 0644, sched_smt_power_savings_show, | 7822 | static SYSDEV_CLASS_ATTR(sched_smt_power_savings, 0644, |
7823 | sched_smt_power_savings_show, | ||
7764 | sched_smt_power_savings_store); | 7824 | sched_smt_power_savings_store); |
7765 | #endif | 7825 | #endif |
7766 | 7826 | ||
@@ -7782,59 +7842,49 @@ int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls) | |||
7782 | } | 7842 | } |
7783 | #endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */ | 7843 | #endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */ |
7784 | 7844 | ||
7845 | #ifndef CONFIG_CPUSETS | ||
7785 | /* | 7846 | /* |
7786 | * Force a reinitialization of the sched domains hierarchy. The domains | 7847 | * Add online and remove offline CPUs from the scheduler domains. |
7787 | * and groups cannot be updated in place without racing with the balancing | 7848 | * When cpusets are enabled they take over this function. |
7788 | * code, so we temporarily attach all running cpus to the NULL domain | ||
7789 | * which will prevent rebalancing while the sched domains are recalculated. | ||
7790 | */ | 7849 | */ |
7791 | static int update_sched_domains(struct notifier_block *nfb, | 7850 | static int update_sched_domains(struct notifier_block *nfb, |
7792 | unsigned long action, void *hcpu) | 7851 | unsigned long action, void *hcpu) |
7793 | { | 7852 | { |
7853 | switch (action) { | ||
7854 | case CPU_ONLINE: | ||
7855 | case CPU_ONLINE_FROZEN: | ||
7856 | case CPU_DEAD: | ||
7857 | case CPU_DEAD_FROZEN: | ||
7858 | partition_sched_domains(1, NULL, NULL); | ||
7859 | return NOTIFY_OK; | ||
7860 | |||
7861 | default: | ||
7862 | return NOTIFY_DONE; | ||
7863 | } | ||
7864 | } | ||
7865 | #endif | ||
7866 | |||
7867 | static int update_runtime(struct notifier_block *nfb, | ||
7868 | unsigned long action, void *hcpu) | ||
7869 | { | ||
7794 | int cpu = (int)(long)hcpu; | 7870 | int cpu = (int)(long)hcpu; |
7795 | 7871 | ||
7796 | switch (action) { | 7872 | switch (action) { |
7797 | case CPU_DOWN_PREPARE: | 7873 | case CPU_DOWN_PREPARE: |
7798 | case CPU_DOWN_PREPARE_FROZEN: | 7874 | case CPU_DOWN_PREPARE_FROZEN: |
7799 | disable_runtime(cpu_rq(cpu)); | 7875 | disable_runtime(cpu_rq(cpu)); |
7800 | /* fall-through */ | ||
7801 | case CPU_UP_PREPARE: | ||
7802 | case CPU_UP_PREPARE_FROZEN: | ||
7803 | detach_destroy_domains(&cpu_online_map); | ||
7804 | free_sched_domains(); | ||
7805 | return NOTIFY_OK; | 7876 | return NOTIFY_OK; |
7806 | 7877 | ||
7807 | |||
7808 | case CPU_DOWN_FAILED: | 7878 | case CPU_DOWN_FAILED: |
7809 | case CPU_DOWN_FAILED_FROZEN: | 7879 | case CPU_DOWN_FAILED_FROZEN: |
7810 | case CPU_ONLINE: | 7880 | case CPU_ONLINE: |
7811 | case CPU_ONLINE_FROZEN: | 7881 | case CPU_ONLINE_FROZEN: |
7812 | enable_runtime(cpu_rq(cpu)); | 7882 | enable_runtime(cpu_rq(cpu)); |
7813 | /* fall-through */ | 7883 | return NOTIFY_OK; |
7814 | case CPU_UP_CANCELED: | 7884 | |
7815 | case CPU_UP_CANCELED_FROZEN: | ||
7816 | case CPU_DEAD: | ||
7817 | case CPU_DEAD_FROZEN: | ||
7818 | /* | ||
7819 | * Fall through and re-initialise the domains. | ||
7820 | */ | ||
7821 | break; | ||
7822 | default: | 7885 | default: |
7823 | return NOTIFY_DONE; | 7886 | return NOTIFY_DONE; |
7824 | } | 7887 | } |
7825 | |||
7826 | #ifndef CONFIG_CPUSETS | ||
7827 | /* | ||
7828 | * Create default domain partitioning if cpusets are disabled. | ||
7829 | * Otherwise we let cpusets rebuild the domains based on the | ||
7830 | * current setup. | ||
7831 | */ | ||
7832 | |||
7833 | /* The hotplug lock is already held by cpu_up/cpu_down */ | ||
7834 | arch_init_sched_domains(&cpu_online_map); | ||
7835 | #endif | ||
7836 | |||
7837 | return NOTIFY_OK; | ||
7838 | } | 7888 | } |
7839 | 7889 | ||
7840 | void __init sched_init_smp(void) | 7890 | void __init sched_init_smp(void) |
@@ -7854,8 +7904,15 @@ void __init sched_init_smp(void) | |||
7854 | cpu_set(smp_processor_id(), non_isolated_cpus); | 7904 | cpu_set(smp_processor_id(), non_isolated_cpus); |
7855 | mutex_unlock(&sched_domains_mutex); | 7905 | mutex_unlock(&sched_domains_mutex); |
7856 | put_online_cpus(); | 7906 | put_online_cpus(); |
7907 | |||
7908 | #ifndef CONFIG_CPUSETS | ||
7857 | /* XXX: Theoretical race here - CPU may be hotplugged now */ | 7909 | /* XXX: Theoretical race here - CPU may be hotplugged now */ |
7858 | hotcpu_notifier(update_sched_domains, 0); | 7910 | hotcpu_notifier(update_sched_domains, 0); |
7911 | #endif | ||
7912 | |||
7913 | /* RT runtime code needs to handle some hotplug events */ | ||
7914 | hotcpu_notifier(update_runtime, 0); | ||
7915 | |||
7859 | init_hrtick(); | 7916 | init_hrtick(); |
7860 | 7917 | ||
7861 | /* Move init over to a non-isolated CPU */ | 7918 | /* Move init over to a non-isolated CPU */ |
@@ -8063,7 +8120,6 @@ void __init sched_init(void) | |||
8063 | 8120 | ||
8064 | rq = cpu_rq(i); | 8121 | rq = cpu_rq(i); |
8065 | spin_lock_init(&rq->lock); | 8122 | spin_lock_init(&rq->lock); |
8066 | lockdep_set_class(&rq->lock, &rq->rq_lock_key); | ||
8067 | rq->nr_running = 0; | 8123 | rq->nr_running = 0; |
8068 | init_cfs_rq(&rq->cfs, rq); | 8124 | init_cfs_rq(&rq->cfs, rq); |
8069 | init_rt_rq(&rq->rt, rq); | 8125 | init_rt_rq(&rq->rt, rq); |
@@ -8520,8 +8576,8 @@ struct task_group *sched_create_group(struct task_group *parent) | |||
8520 | WARN_ON(!parent); /* root should already exist */ | 8576 | WARN_ON(!parent); /* root should already exist */ |
8521 | 8577 | ||
8522 | tg->parent = parent; | 8578 | tg->parent = parent; |
8523 | list_add_rcu(&tg->siblings, &parent->children); | ||
8524 | INIT_LIST_HEAD(&tg->children); | 8579 | INIT_LIST_HEAD(&tg->children); |
8580 | list_add_rcu(&tg->siblings, &parent->children); | ||
8525 | spin_unlock_irqrestore(&task_group_lock, flags); | 8581 | spin_unlock_irqrestore(&task_group_lock, flags); |
8526 | 8582 | ||
8527 | return tg; | 8583 | return tg; |
@@ -8853,6 +8909,9 @@ static int sched_rt_global_constraints(void) | |||
8853 | u64 rt_runtime, rt_period; | 8909 | u64 rt_runtime, rt_period; |
8854 | int ret = 0; | 8910 | int ret = 0; |
8855 | 8911 | ||
8912 | if (sysctl_sched_rt_period <= 0) | ||
8913 | return -EINVAL; | ||
8914 | |||
8856 | rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period); | 8915 | rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period); |
8857 | rt_runtime = tg->rt_bandwidth.rt_runtime; | 8916 | rt_runtime = tg->rt_bandwidth.rt_runtime; |
8858 | 8917 | ||
@@ -8869,6 +8928,9 @@ static int sched_rt_global_constraints(void) | |||
8869 | unsigned long flags; | 8928 | unsigned long flags; |
8870 | int i; | 8929 | int i; |
8871 | 8930 | ||
8931 | if (sysctl_sched_rt_period <= 0) | ||
8932 | return -EINVAL; | ||
8933 | |||
8872 | spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags); | 8934 | spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags); |
8873 | for_each_possible_cpu(i) { | 8935 | for_each_possible_cpu(i) { |
8874 | struct rt_rq *rt_rq = &cpu_rq(i)->rt; | 8936 | struct rt_rq *rt_rq = &cpu_rq(i)->rt; |
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c index 22ed55d1167f..e8ab096ddfe3 100644 --- a/kernel/sched_clock.c +++ b/kernel/sched_clock.c | |||
@@ -12,19 +12,17 @@ | |||
12 | * | 12 | * |
13 | * Create a semi stable clock from a mixture of other events, including: | 13 | * Create a semi stable clock from a mixture of other events, including: |
14 | * - gtod | 14 | * - gtod |
15 | * - jiffies | ||
16 | * - sched_clock() | 15 | * - sched_clock() |
17 | * - explicit idle events | 16 | * - explicit idle events |
18 | * | 17 | * |
19 | * We use gtod as base and the unstable clock deltas. The deltas are filtered, | 18 | * We use gtod as base and the unstable clock deltas. The deltas are filtered, |
20 | * making it monotonic and keeping it within an expected window. This window | 19 | * making it monotonic and keeping it within an expected window. |
21 | * is set up using jiffies. | ||
22 | * | 20 | * |
23 | * Furthermore, explicit sleep and wakeup hooks allow us to account for time | 21 | * Furthermore, explicit sleep and wakeup hooks allow us to account for time |
24 | * that is otherwise invisible (TSC gets stopped). | 22 | * that is otherwise invisible (TSC gets stopped). |
25 | * | 23 | * |
26 | * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat | 24 | * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat |
27 | * consistent between cpus (never more than 1 jiffies difference). | 25 | * consistent between cpus (never more than 2 jiffies difference). |
28 | */ | 26 | */ |
29 | #include <linux/sched.h> | 27 | #include <linux/sched.h> |
30 | #include <linux/percpu.h> | 28 | #include <linux/percpu.h> |
@@ -32,13 +30,19 @@ | |||
32 | #include <linux/ktime.h> | 30 | #include <linux/ktime.h> |
33 | #include <linux/module.h> | 31 | #include <linux/module.h> |
34 | 32 | ||
33 | /* | ||
34 | * Scheduler clock - returns current time in nanosec units. | ||
35 | * This is default implementation. | ||
36 | * Architectures and sub-architectures can override this. | ||
37 | */ | ||
38 | unsigned long long __attribute__((weak)) sched_clock(void) | ||
39 | { | ||
40 | return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ); | ||
41 | } | ||
35 | 42 | ||
36 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK | 43 | static __read_mostly int sched_clock_running; |
37 | 44 | ||
38 | #define MULTI_SHIFT 15 | 45 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK |
39 | /* Max is double, Min is 1/2 */ | ||
40 | #define MAX_MULTI (2LL << MULTI_SHIFT) | ||
41 | #define MIN_MULTI (1LL << (MULTI_SHIFT-1)) | ||
42 | 46 | ||
43 | struct sched_clock_data { | 47 | struct sched_clock_data { |
44 | /* | 48 | /* |
@@ -48,15 +52,9 @@ struct sched_clock_data { | |||
48 | */ | 52 | */ |
49 | raw_spinlock_t lock; | 53 | raw_spinlock_t lock; |
50 | 54 | ||
51 | unsigned long tick_jiffies; | ||
52 | u64 prev_raw; | ||
53 | u64 tick_raw; | 55 | u64 tick_raw; |
54 | u64 tick_gtod; | 56 | u64 tick_gtod; |
55 | u64 clock; | 57 | u64 clock; |
56 | s64 multi; | ||
57 | #ifdef CONFIG_NO_HZ | ||
58 | int check_max; | ||
59 | #endif | ||
60 | }; | 58 | }; |
61 | 59 | ||
62 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data); | 60 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data); |
@@ -71,121 +69,69 @@ static inline struct sched_clock_data *cpu_sdc(int cpu) | |||
71 | return &per_cpu(sched_clock_data, cpu); | 69 | return &per_cpu(sched_clock_data, cpu); |
72 | } | 70 | } |
73 | 71 | ||
74 | static __read_mostly int sched_clock_running; | ||
75 | |||
76 | void sched_clock_init(void) | 72 | void sched_clock_init(void) |
77 | { | 73 | { |
78 | u64 ktime_now = ktime_to_ns(ktime_get()); | 74 | u64 ktime_now = ktime_to_ns(ktime_get()); |
79 | unsigned long now_jiffies = jiffies; | ||
80 | int cpu; | 75 | int cpu; |
81 | 76 | ||
82 | for_each_possible_cpu(cpu) { | 77 | for_each_possible_cpu(cpu) { |
83 | struct sched_clock_data *scd = cpu_sdc(cpu); | 78 | struct sched_clock_data *scd = cpu_sdc(cpu); |
84 | 79 | ||
85 | scd->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 80 | scd->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; |
86 | scd->tick_jiffies = now_jiffies; | ||
87 | scd->prev_raw = 0; | ||
88 | scd->tick_raw = 0; | 81 | scd->tick_raw = 0; |
89 | scd->tick_gtod = ktime_now; | 82 | scd->tick_gtod = ktime_now; |
90 | scd->clock = ktime_now; | 83 | scd->clock = ktime_now; |
91 | scd->multi = 1 << MULTI_SHIFT; | ||
92 | #ifdef CONFIG_NO_HZ | ||
93 | scd->check_max = 1; | ||
94 | #endif | ||
95 | } | 84 | } |
96 | 85 | ||
97 | sched_clock_running = 1; | 86 | sched_clock_running = 1; |
98 | } | 87 | } |
99 | 88 | ||
100 | #ifdef CONFIG_NO_HZ | ||
101 | /* | 89 | /* |
102 | * The dynamic ticks makes the delta jiffies inaccurate. This | 90 | * min,max except they take wrapping into account |
103 | * prevents us from checking the maximum time update. | ||
104 | * Disable the maximum check during stopped ticks. | ||
105 | */ | 91 | */ |
106 | void sched_clock_tick_stop(int cpu) | ||
107 | { | ||
108 | struct sched_clock_data *scd = cpu_sdc(cpu); | ||
109 | |||
110 | scd->check_max = 0; | ||
111 | } | ||
112 | 92 | ||
113 | void sched_clock_tick_start(int cpu) | 93 | static inline u64 wrap_min(u64 x, u64 y) |
114 | { | 94 | { |
115 | struct sched_clock_data *scd = cpu_sdc(cpu); | 95 | return (s64)(x - y) < 0 ? x : y; |
116 | |||
117 | scd->check_max = 1; | ||
118 | } | 96 | } |
119 | 97 | ||
120 | static int check_max(struct sched_clock_data *scd) | 98 | static inline u64 wrap_max(u64 x, u64 y) |
121 | { | 99 | { |
122 | return scd->check_max; | 100 | return (s64)(x - y) > 0 ? x : y; |
123 | } | 101 | } |
124 | #else | ||
125 | static int check_max(struct sched_clock_data *scd) | ||
126 | { | ||
127 | return 1; | ||
128 | } | ||
129 | #endif /* CONFIG_NO_HZ */ | ||
130 | 102 | ||
131 | /* | 103 | /* |
132 | * update the percpu scd from the raw @now value | 104 | * update the percpu scd from the raw @now value |
133 | * | 105 | * |
134 | * - filter out backward motion | 106 | * - filter out backward motion |
135 | * - use jiffies to generate a min,max window to clip the raw values | 107 | * - use the GTOD tick value to create a window to filter crazy TSC values |
136 | */ | 108 | */ |
137 | static void __update_sched_clock(struct sched_clock_data *scd, u64 now, u64 *time) | 109 | static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now) |
138 | { | 110 | { |
139 | unsigned long now_jiffies = jiffies; | 111 | s64 delta = now - scd->tick_raw; |
140 | long delta_jiffies = now_jiffies - scd->tick_jiffies; | 112 | u64 clock, min_clock, max_clock; |
141 | u64 clock = scd->clock; | ||
142 | u64 min_clock, max_clock; | ||
143 | s64 delta = now - scd->prev_raw; | ||
144 | 113 | ||
145 | WARN_ON_ONCE(!irqs_disabled()); | 114 | WARN_ON_ONCE(!irqs_disabled()); |
146 | 115 | ||
147 | /* | 116 | if (unlikely(delta < 0)) |
148 | * At schedule tick the clock can be just under the gtod. We don't | 117 | delta = 0; |
149 | * want to push it too prematurely. | ||
150 | */ | ||
151 | min_clock = scd->tick_gtod + (delta_jiffies * TICK_NSEC); | ||
152 | if (min_clock > TICK_NSEC) | ||
153 | min_clock -= TICK_NSEC / 2; | ||
154 | |||
155 | if (unlikely(delta < 0)) { | ||
156 | clock++; | ||
157 | goto out; | ||
158 | } | ||
159 | 118 | ||
160 | /* | 119 | /* |
161 | * The clock must stay within a jiffie of the gtod. | 120 | * scd->clock = clamp(scd->tick_gtod + delta, |
162 | * But since we may be at the start of a jiffy or the end of one | 121 | * max(scd->tick_gtod, scd->clock), |
163 | * we add another jiffy buffer. | 122 | * scd->tick_gtod + TICK_NSEC); |
164 | */ | 123 | */ |
165 | max_clock = scd->tick_gtod + (2 + delta_jiffies) * TICK_NSEC; | ||
166 | 124 | ||
167 | delta *= scd->multi; | 125 | clock = scd->tick_gtod + delta; |
168 | delta >>= MULTI_SHIFT; | 126 | min_clock = wrap_max(scd->tick_gtod, scd->clock); |
127 | max_clock = scd->tick_gtod + TICK_NSEC; | ||
169 | 128 | ||
170 | if (unlikely(clock + delta > max_clock) && check_max(scd)) { | 129 | clock = wrap_max(clock, min_clock); |
171 | if (clock < max_clock) | 130 | clock = wrap_min(clock, max_clock); |
172 | clock = max_clock; | ||
173 | else | ||
174 | clock++; | ||
175 | } else { | ||
176 | clock += delta; | ||
177 | } | ||
178 | 131 | ||
179 | out: | 132 | scd->clock = clock; |
180 | if (unlikely(clock < min_clock)) | ||
181 | clock = min_clock; | ||
182 | 133 | ||
183 | if (time) | 134 | return scd->clock; |
184 | *time = clock; | ||
185 | else { | ||
186 | scd->prev_raw = now; | ||
187 | scd->clock = clock; | ||
188 | } | ||
189 | } | 135 | } |
190 | 136 | ||
191 | static void lock_double_clock(struct sched_clock_data *data1, | 137 | static void lock_double_clock(struct sched_clock_data *data1, |
@@ -203,7 +149,7 @@ static void lock_double_clock(struct sched_clock_data *data1, | |||
203 | u64 sched_clock_cpu(int cpu) | 149 | u64 sched_clock_cpu(int cpu) |
204 | { | 150 | { |
205 | struct sched_clock_data *scd = cpu_sdc(cpu); | 151 | struct sched_clock_data *scd = cpu_sdc(cpu); |
206 | u64 now, clock; | 152 | u64 now, clock, this_clock, remote_clock; |
207 | 153 | ||
208 | if (unlikely(!sched_clock_running)) | 154 | if (unlikely(!sched_clock_running)) |
209 | return 0ull; | 155 | return 0ull; |
@@ -212,43 +158,44 @@ u64 sched_clock_cpu(int cpu) | |||
212 | now = sched_clock(); | 158 | now = sched_clock(); |
213 | 159 | ||
214 | if (cpu != raw_smp_processor_id()) { | 160 | if (cpu != raw_smp_processor_id()) { |
215 | /* | ||
216 | * in order to update a remote cpu's clock based on our | ||
217 | * unstable raw time rebase it against: | ||
218 | * tick_raw (offset between raw counters) | ||
219 | * tick_gotd (tick offset between cpus) | ||
220 | */ | ||
221 | struct sched_clock_data *my_scd = this_scd(); | 161 | struct sched_clock_data *my_scd = this_scd(); |
222 | 162 | ||
223 | lock_double_clock(scd, my_scd); | 163 | lock_double_clock(scd, my_scd); |
224 | 164 | ||
225 | now -= my_scd->tick_raw; | 165 | this_clock = __update_sched_clock(my_scd, now); |
226 | now += scd->tick_raw; | 166 | remote_clock = scd->clock; |
227 | 167 | ||
228 | now += my_scd->tick_gtod; | 168 | /* |
229 | now -= scd->tick_gtod; | 169 | * Use the opportunity that we have both locks |
170 | * taken to couple the two clocks: we take the | ||
171 | * larger time as the latest time for both | ||
172 | * runqueues. (this creates monotonic movement) | ||
173 | */ | ||
174 | if (likely((s64)(remote_clock - this_clock) < 0)) { | ||
175 | clock = this_clock; | ||
176 | scd->clock = clock; | ||
177 | } else { | ||
178 | /* | ||
179 | * Should be rare, but possible: | ||
180 | */ | ||
181 | clock = remote_clock; | ||
182 | my_scd->clock = remote_clock; | ||
183 | } | ||
230 | 184 | ||
231 | __raw_spin_unlock(&my_scd->lock); | 185 | __raw_spin_unlock(&my_scd->lock); |
232 | |||
233 | __update_sched_clock(scd, now, &clock); | ||
234 | |||
235 | __raw_spin_unlock(&scd->lock); | ||
236 | |||
237 | } else { | 186 | } else { |
238 | __raw_spin_lock(&scd->lock); | 187 | __raw_spin_lock(&scd->lock); |
239 | __update_sched_clock(scd, now, NULL); | 188 | clock = __update_sched_clock(scd, now); |
240 | clock = scd->clock; | ||
241 | __raw_spin_unlock(&scd->lock); | ||
242 | } | 189 | } |
243 | 190 | ||
191 | __raw_spin_unlock(&scd->lock); | ||
192 | |||
244 | return clock; | 193 | return clock; |
245 | } | 194 | } |
246 | 195 | ||
247 | void sched_clock_tick(void) | 196 | void sched_clock_tick(void) |
248 | { | 197 | { |
249 | struct sched_clock_data *scd = this_scd(); | 198 | struct sched_clock_data *scd = this_scd(); |
250 | unsigned long now_jiffies = jiffies; | ||
251 | s64 mult, delta_gtod, delta_raw; | ||
252 | u64 now, now_gtod; | 199 | u64 now, now_gtod; |
253 | 200 | ||
254 | if (unlikely(!sched_clock_running)) | 201 | if (unlikely(!sched_clock_running)) |
@@ -260,29 +207,9 @@ void sched_clock_tick(void) | |||
260 | now = sched_clock(); | 207 | now = sched_clock(); |
261 | 208 | ||
262 | __raw_spin_lock(&scd->lock); | 209 | __raw_spin_lock(&scd->lock); |
263 | __update_sched_clock(scd, now, NULL); | ||
264 | /* | ||
265 | * update tick_gtod after __update_sched_clock() because that will | ||
266 | * already observe 1 new jiffy; adding a new tick_gtod to that would | ||
267 | * increase the clock 2 jiffies. | ||
268 | */ | ||
269 | delta_gtod = now_gtod - scd->tick_gtod; | ||
270 | delta_raw = now - scd->tick_raw; | ||
271 | |||
272 | if ((long)delta_raw > 0) { | ||
273 | mult = delta_gtod << MULTI_SHIFT; | ||
274 | do_div(mult, delta_raw); | ||
275 | scd->multi = mult; | ||
276 | if (scd->multi > MAX_MULTI) | ||
277 | scd->multi = MAX_MULTI; | ||
278 | else if (scd->multi < MIN_MULTI) | ||
279 | scd->multi = MIN_MULTI; | ||
280 | } else | ||
281 | scd->multi = 1 << MULTI_SHIFT; | ||
282 | |||
283 | scd->tick_raw = now; | 210 | scd->tick_raw = now; |
284 | scd->tick_gtod = now_gtod; | 211 | scd->tick_gtod = now_gtod; |
285 | scd->tick_jiffies = now_jiffies; | 212 | __update_sched_clock(scd, now); |
286 | __raw_spin_unlock(&scd->lock); | 213 | __raw_spin_unlock(&scd->lock); |
287 | } | 214 | } |
288 | 215 | ||
@@ -300,37 +227,28 @@ EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event); | |||
300 | */ | 227 | */ |
301 | void sched_clock_idle_wakeup_event(u64 delta_ns) | 228 | void sched_clock_idle_wakeup_event(u64 delta_ns) |
302 | { | 229 | { |
303 | struct sched_clock_data *scd = this_scd(); | 230 | sched_clock_tick(); |
304 | u64 now = sched_clock(); | ||
305 | |||
306 | /* | ||
307 | * Override the previous timestamp and ignore all | ||
308 | * sched_clock() deltas that occured while we idled, | ||
309 | * and use the PM-provided delta_ns to advance the | ||
310 | * rq clock: | ||
311 | */ | ||
312 | __raw_spin_lock(&scd->lock); | ||
313 | scd->prev_raw = now; | ||
314 | scd->clock += delta_ns; | ||
315 | scd->multi = 1 << MULTI_SHIFT; | ||
316 | __raw_spin_unlock(&scd->lock); | ||
317 | |||
318 | touch_softlockup_watchdog(); | 231 | touch_softlockup_watchdog(); |
319 | } | 232 | } |
320 | EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); | 233 | EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); |
321 | 234 | ||
322 | #endif | 235 | #else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ |
323 | 236 | ||
324 | /* | 237 | void sched_clock_init(void) |
325 | * Scheduler clock - returns current time in nanosec units. | ||
326 | * This is default implementation. | ||
327 | * Architectures and sub-architectures can override this. | ||
328 | */ | ||
329 | unsigned long long __attribute__((weak)) sched_clock(void) | ||
330 | { | 238 | { |
331 | return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ); | 239 | sched_clock_running = 1; |
332 | } | 240 | } |
333 | 241 | ||
242 | u64 sched_clock_cpu(int cpu) | ||
243 | { | ||
244 | if (unlikely(!sched_clock_running)) | ||
245 | return 0; | ||
246 | |||
247 | return sched_clock(); | ||
248 | } | ||
249 | |||
250 | #endif | ||
251 | |||
334 | unsigned long long cpu_clock(int cpu) | 252 | unsigned long long cpu_clock(int cpu) |
335 | { | 253 | { |
336 | unsigned long long clock; | 254 | unsigned long long clock; |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index f2aa987027d6..fb8994c6d4bb 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -878,7 +878,6 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) | |||
878 | #ifdef CONFIG_SCHED_HRTICK | 878 | #ifdef CONFIG_SCHED_HRTICK |
879 | static void hrtick_start_fair(struct rq *rq, struct task_struct *p) | 879 | static void hrtick_start_fair(struct rq *rq, struct task_struct *p) |
880 | { | 880 | { |
881 | int requeue = rq->curr == p; | ||
882 | struct sched_entity *se = &p->se; | 881 | struct sched_entity *se = &p->se; |
883 | struct cfs_rq *cfs_rq = cfs_rq_of(se); | 882 | struct cfs_rq *cfs_rq = cfs_rq_of(se); |
884 | 883 | ||
@@ -899,10 +898,10 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p) | |||
899 | * Don't schedule slices shorter than 10000ns, that just | 898 | * Don't schedule slices shorter than 10000ns, that just |
900 | * doesn't make sense. Rely on vruntime for fairness. | 899 | * doesn't make sense. Rely on vruntime for fairness. |
901 | */ | 900 | */ |
902 | if (!requeue) | 901 | if (rq->curr != p) |
903 | delta = max(10000LL, delta); | 902 | delta = max_t(s64, 10000LL, delta); |
904 | 903 | ||
905 | hrtick_start(rq, delta, requeue); | 904 | hrtick_start(rq, delta); |
906 | } | 905 | } |
907 | } | 906 | } |
908 | #else /* !CONFIG_SCHED_HRTICK */ | 907 | #else /* !CONFIG_SCHED_HRTICK */ |
@@ -1004,6 +1003,8 @@ static void yield_task_fair(struct rq *rq) | |||
1004 | * not idle and an idle cpu is available. The span of cpus to | 1003 | * not idle and an idle cpu is available. The span of cpus to |
1005 | * search starts with cpus closest then further out as needed, | 1004 | * search starts with cpus closest then further out as needed, |
1006 | * so we always favor a closer, idle cpu. | 1005 | * so we always favor a closer, idle cpu. |
1006 | * Domains may include CPUs that are not usable for migration, | ||
1007 | * hence we need to mask them out (cpu_active_map) | ||
1007 | * | 1008 | * |
1008 | * Returns the CPU we should wake onto. | 1009 | * Returns the CPU we should wake onto. |
1009 | */ | 1010 | */ |
@@ -1031,7 +1032,8 @@ static int wake_idle(int cpu, struct task_struct *p) | |||
1031 | || ((sd->flags & SD_WAKE_IDLE_FAR) | 1032 | || ((sd->flags & SD_WAKE_IDLE_FAR) |
1032 | && !task_hot(p, task_rq(p)->clock, sd))) { | 1033 | && !task_hot(p, task_rq(p)->clock, sd))) { |
1033 | cpus_and(tmp, sd->span, p->cpus_allowed); | 1034 | cpus_and(tmp, sd->span, p->cpus_allowed); |
1034 | for_each_cpu_mask(i, tmp) { | 1035 | cpus_and(tmp, tmp, cpu_active_map); |
1036 | for_each_cpu_mask_nr(i, tmp) { | ||
1035 | if (idle_cpu(i)) { | 1037 | if (idle_cpu(i)) { |
1036 | if (i != task_cpu(p)) { | 1038 | if (i != task_cpu(p)) { |
1037 | schedstat_inc(p, | 1039 | schedstat_inc(p, |
@@ -1440,18 +1442,23 @@ __load_balance_iterator(struct cfs_rq *cfs_rq, struct list_head *next) | |||
1440 | struct task_struct *p = NULL; | 1442 | struct task_struct *p = NULL; |
1441 | struct sched_entity *se; | 1443 | struct sched_entity *se; |
1442 | 1444 | ||
1443 | while (next != &cfs_rq->tasks) { | 1445 | if (next == &cfs_rq->tasks) |
1446 | return NULL; | ||
1447 | |||
1448 | /* Skip over entities that are not tasks */ | ||
1449 | do { | ||
1444 | se = list_entry(next, struct sched_entity, group_node); | 1450 | se = list_entry(next, struct sched_entity, group_node); |
1445 | next = next->next; | 1451 | next = next->next; |
1452 | } while (next != &cfs_rq->tasks && !entity_is_task(se)); | ||
1446 | 1453 | ||
1447 | /* Skip over entities that are not tasks */ | 1454 | if (next == &cfs_rq->tasks) |
1448 | if (entity_is_task(se)) { | 1455 | return NULL; |
1449 | p = task_of(se); | ||
1450 | break; | ||
1451 | } | ||
1452 | } | ||
1453 | 1456 | ||
1454 | cfs_rq->balance_iterator = next; | 1457 | cfs_rq->balance_iterator = next; |
1458 | |||
1459 | if (entity_is_task(se)) | ||
1460 | p = task_of(se); | ||
1461 | |||
1455 | return p; | 1462 | return p; |
1456 | } | 1463 | } |
1457 | 1464 | ||
diff --git a/kernel/sched_features.h b/kernel/sched_features.h index 862b06bd560a..9353ca78154e 100644 --- a/kernel/sched_features.h +++ b/kernel/sched_features.h | |||
@@ -8,6 +8,6 @@ SCHED_FEAT(SYNC_WAKEUPS, 1) | |||
8 | SCHED_FEAT(HRTICK, 1) | 8 | SCHED_FEAT(HRTICK, 1) |
9 | SCHED_FEAT(DOUBLE_TICK, 0) | 9 | SCHED_FEAT(DOUBLE_TICK, 0) |
10 | SCHED_FEAT(ASYM_GRAN, 1) | 10 | SCHED_FEAT(ASYM_GRAN, 1) |
11 | SCHED_FEAT(LB_BIAS, 0) | 11 | SCHED_FEAT(LB_BIAS, 1) |
12 | SCHED_FEAT(LB_WAKEUP_UPDATE, 1) | 12 | SCHED_FEAT(LB_WAKEUP_UPDATE, 1) |
13 | SCHED_FEAT(ASYM_EFF_LOAD, 1) | 13 | SCHED_FEAT(ASYM_EFF_LOAD, 1) |
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 47ceac9e8552..1113157b2058 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -199,6 +199,8 @@ static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) | |||
199 | 199 | ||
200 | static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq) | 200 | static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq) |
201 | { | 201 | { |
202 | if (rt_rq->rt_nr_running) | ||
203 | resched_task(rq_of_rt_rq(rt_rq)->curr); | ||
202 | } | 204 | } |
203 | 205 | ||
204 | static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq) | 206 | static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq) |
@@ -240,7 +242,7 @@ static int do_balance_runtime(struct rt_rq *rt_rq) | |||
240 | 242 | ||
241 | spin_lock(&rt_b->rt_runtime_lock); | 243 | spin_lock(&rt_b->rt_runtime_lock); |
242 | rt_period = ktime_to_ns(rt_b->rt_period); | 244 | rt_period = ktime_to_ns(rt_b->rt_period); |
243 | for_each_cpu_mask(i, rd->span) { | 245 | for_each_cpu_mask_nr(i, rd->span) { |
244 | struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); | 246 | struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); |
245 | s64 diff; | 247 | s64 diff; |
246 | 248 | ||
@@ -253,7 +255,7 @@ static int do_balance_runtime(struct rt_rq *rt_rq) | |||
253 | 255 | ||
254 | diff = iter->rt_runtime - iter->rt_time; | 256 | diff = iter->rt_runtime - iter->rt_time; |
255 | if (diff > 0) { | 257 | if (diff > 0) { |
256 | do_div(diff, weight); | 258 | diff = div_u64((u64)diff, weight); |
257 | if (rt_rq->rt_runtime + diff > rt_period) | 259 | if (rt_rq->rt_runtime + diff > rt_period) |
258 | diff = rt_period - rt_rq->rt_runtime; | 260 | diff = rt_period - rt_rq->rt_runtime; |
259 | iter->rt_runtime -= diff; | 261 | iter->rt_runtime -= diff; |
@@ -298,7 +300,7 @@ static void __disable_runtime(struct rq *rq) | |||
298 | struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); | 300 | struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); |
299 | s64 diff; | 301 | s64 diff; |
300 | 302 | ||
301 | if (iter == rt_rq) | 303 | if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF) |
302 | continue; | 304 | continue; |
303 | 305 | ||
304 | spin_lock(&iter->rt_runtime_lock); | 306 | spin_lock(&iter->rt_runtime_lock); |
@@ -348,6 +350,7 @@ static void __enable_runtime(struct rq *rq) | |||
348 | spin_lock(&rt_rq->rt_runtime_lock); | 350 | spin_lock(&rt_rq->rt_runtime_lock); |
349 | rt_rq->rt_runtime = rt_b->rt_runtime; | 351 | rt_rq->rt_runtime = rt_b->rt_runtime; |
350 | rt_rq->rt_time = 0; | 352 | rt_rq->rt_time = 0; |
353 | rt_rq->rt_throttled = 0; | ||
351 | spin_unlock(&rt_rq->rt_runtime_lock); | 354 | spin_unlock(&rt_rq->rt_runtime_lock); |
352 | spin_unlock(&rt_b->rt_runtime_lock); | 355 | spin_unlock(&rt_b->rt_runtime_lock); |
353 | } | 356 | } |
@@ -438,9 +441,6 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq) | |||
438 | { | 441 | { |
439 | u64 runtime = sched_rt_runtime(rt_rq); | 442 | u64 runtime = sched_rt_runtime(rt_rq); |
440 | 443 | ||
441 | if (runtime == RUNTIME_INF) | ||
442 | return 0; | ||
443 | |||
444 | if (rt_rq->rt_throttled) | 444 | if (rt_rq->rt_throttled) |
445 | return rt_rq_throttled(rt_rq); | 445 | return rt_rq_throttled(rt_rq); |
446 | 446 | ||
@@ -491,9 +491,11 @@ static void update_curr_rt(struct rq *rq) | |||
491 | rt_rq = rt_rq_of_se(rt_se); | 491 | rt_rq = rt_rq_of_se(rt_se); |
492 | 492 | ||
493 | spin_lock(&rt_rq->rt_runtime_lock); | 493 | spin_lock(&rt_rq->rt_runtime_lock); |
494 | rt_rq->rt_time += delta_exec; | 494 | if (sched_rt_runtime(rt_rq) != RUNTIME_INF) { |
495 | if (sched_rt_runtime_exceeded(rt_rq)) | 495 | rt_rq->rt_time += delta_exec; |
496 | resched_task(curr); | 496 | if (sched_rt_runtime_exceeded(rt_rq)) |
497 | resched_task(curr); | ||
498 | } | ||
497 | spin_unlock(&rt_rq->rt_runtime_lock); | 499 | spin_unlock(&rt_rq->rt_runtime_lock); |
498 | } | 500 | } |
499 | } | 501 | } |
@@ -505,7 +507,9 @@ void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | |||
505 | rt_rq->rt_nr_running++; | 507 | rt_rq->rt_nr_running++; |
506 | #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED | 508 | #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED |
507 | if (rt_se_prio(rt_se) < rt_rq->highest_prio) { | 509 | if (rt_se_prio(rt_se) < rt_rq->highest_prio) { |
510 | #ifdef CONFIG_SMP | ||
508 | struct rq *rq = rq_of_rt_rq(rt_rq); | 511 | struct rq *rq = rq_of_rt_rq(rt_rq); |
512 | #endif | ||
509 | 513 | ||
510 | rt_rq->highest_prio = rt_se_prio(rt_se); | 514 | rt_rq->highest_prio = rt_se_prio(rt_se); |
511 | #ifdef CONFIG_SMP | 515 | #ifdef CONFIG_SMP |
@@ -599,11 +603,7 @@ static void __enqueue_rt_entity(struct sched_rt_entity *rt_se) | |||
599 | if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) | 603 | if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) |
600 | return; | 604 | return; |
601 | 605 | ||
602 | if (rt_se->nr_cpus_allowed == 1) | 606 | list_add_tail(&rt_se->run_list, queue); |
603 | list_add(&rt_se->run_list, queue); | ||
604 | else | ||
605 | list_add_tail(&rt_se->run_list, queue); | ||
606 | |||
607 | __set_bit(rt_se_prio(rt_se), array->bitmap); | 607 | __set_bit(rt_se_prio(rt_se), array->bitmap); |
608 | 608 | ||
609 | inc_rt_tasks(rt_se, rt_rq); | 609 | inc_rt_tasks(rt_se, rt_rq); |
@@ -688,32 +688,34 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep) | |||
688 | * Put task to the end of the run list without the overhead of dequeue | 688 | * Put task to the end of the run list without the overhead of dequeue |
689 | * followed by enqueue. | 689 | * followed by enqueue. |
690 | */ | 690 | */ |
691 | static | 691 | static void |
692 | void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se) | 692 | requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head) |
693 | { | 693 | { |
694 | struct rt_prio_array *array = &rt_rq->active; | ||
695 | |||
696 | if (on_rt_rq(rt_se)) { | 694 | if (on_rt_rq(rt_se)) { |
697 | list_del_init(&rt_se->run_list); | 695 | struct rt_prio_array *array = &rt_rq->active; |
698 | list_add_tail(&rt_se->run_list, | 696 | struct list_head *queue = array->queue + rt_se_prio(rt_se); |
699 | array->queue + rt_se_prio(rt_se)); | 697 | |
698 | if (head) | ||
699 | list_move(&rt_se->run_list, queue); | ||
700 | else | ||
701 | list_move_tail(&rt_se->run_list, queue); | ||
700 | } | 702 | } |
701 | } | 703 | } |
702 | 704 | ||
703 | static void requeue_task_rt(struct rq *rq, struct task_struct *p) | 705 | static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head) |
704 | { | 706 | { |
705 | struct sched_rt_entity *rt_se = &p->rt; | 707 | struct sched_rt_entity *rt_se = &p->rt; |
706 | struct rt_rq *rt_rq; | 708 | struct rt_rq *rt_rq; |
707 | 709 | ||
708 | for_each_sched_rt_entity(rt_se) { | 710 | for_each_sched_rt_entity(rt_se) { |
709 | rt_rq = rt_rq_of_se(rt_se); | 711 | rt_rq = rt_rq_of_se(rt_se); |
710 | requeue_rt_entity(rt_rq, rt_se); | 712 | requeue_rt_entity(rt_rq, rt_se, head); |
711 | } | 713 | } |
712 | } | 714 | } |
713 | 715 | ||
714 | static void yield_task_rt(struct rq *rq) | 716 | static void yield_task_rt(struct rq *rq) |
715 | { | 717 | { |
716 | requeue_task_rt(rq, rq->curr); | 718 | requeue_task_rt(rq, rq->curr, 0); |
717 | } | 719 | } |
718 | 720 | ||
719 | #ifdef CONFIG_SMP | 721 | #ifdef CONFIG_SMP |
@@ -753,6 +755,30 @@ static int select_task_rq_rt(struct task_struct *p, int sync) | |||
753 | */ | 755 | */ |
754 | return task_cpu(p); | 756 | return task_cpu(p); |
755 | } | 757 | } |
758 | |||
759 | static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) | ||
760 | { | ||
761 | cpumask_t mask; | ||
762 | |||
763 | if (rq->curr->rt.nr_cpus_allowed == 1) | ||
764 | return; | ||
765 | |||
766 | if (p->rt.nr_cpus_allowed != 1 | ||
767 | && cpupri_find(&rq->rd->cpupri, p, &mask)) | ||
768 | return; | ||
769 | |||
770 | if (!cpupri_find(&rq->rd->cpupri, rq->curr, &mask)) | ||
771 | return; | ||
772 | |||
773 | /* | ||
774 | * There appears to be other cpus that can accept | ||
775 | * current and none to run 'p', so lets reschedule | ||
776 | * to try and push current away: | ||
777 | */ | ||
778 | requeue_task_rt(rq, p, 1); | ||
779 | resched_task(rq->curr); | ||
780 | } | ||
781 | |||
756 | #endif /* CONFIG_SMP */ | 782 | #endif /* CONFIG_SMP */ |
757 | 783 | ||
758 | /* | 784 | /* |
@@ -778,18 +804,8 @@ static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p) | |||
778 | * to move current somewhere else, making room for our non-migratable | 804 | * to move current somewhere else, making room for our non-migratable |
779 | * task. | 805 | * task. |
780 | */ | 806 | */ |
781 | if((p->prio == rq->curr->prio) | 807 | if (p->prio == rq->curr->prio && !need_resched()) |
782 | && p->rt.nr_cpus_allowed == 1 | 808 | check_preempt_equal_prio(rq, p); |
783 | && rq->curr->rt.nr_cpus_allowed != 1) { | ||
784 | cpumask_t mask; | ||
785 | |||
786 | if (cpupri_find(&rq->rd->cpupri, rq->curr, &mask)) | ||
787 | /* | ||
788 | * There appears to be other cpus that can accept | ||
789 | * current, so lets reschedule to try and push it away | ||
790 | */ | ||
791 | resched_task(rq->curr); | ||
792 | } | ||
793 | #endif | 809 | #endif |
794 | } | 810 | } |
795 | 811 | ||
@@ -847,6 +863,8 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p) | |||
847 | #define RT_MAX_TRIES 3 | 863 | #define RT_MAX_TRIES 3 |
848 | 864 | ||
849 | static int double_lock_balance(struct rq *this_rq, struct rq *busiest); | 865 | static int double_lock_balance(struct rq *this_rq, struct rq *busiest); |
866 | static void double_unlock_balance(struct rq *this_rq, struct rq *busiest); | ||
867 | |||
850 | static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep); | 868 | static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep); |
851 | 869 | ||
852 | static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) | 870 | static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) |
@@ -922,6 +940,13 @@ static int find_lowest_rq(struct task_struct *task) | |||
922 | return -1; /* No targets found */ | 940 | return -1; /* No targets found */ |
923 | 941 | ||
924 | /* | 942 | /* |
943 | * Only consider CPUs that are usable for migration. | ||
944 | * I guess we might want to change cpupri_find() to ignore those | ||
945 | * in the first place. | ||
946 | */ | ||
947 | cpus_and(*lowest_mask, *lowest_mask, cpu_active_map); | ||
948 | |||
949 | /* | ||
925 | * At this point we have built a mask of cpus representing the | 950 | * At this point we have built a mask of cpus representing the |
926 | * lowest priority tasks in the system. Now we want to elect | 951 | * lowest priority tasks in the system. Now we want to elect |
927 | * the best one based on our affinity and topology. | 952 | * the best one based on our affinity and topology. |
@@ -1001,7 +1026,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) | |||
1001 | break; | 1026 | break; |
1002 | 1027 | ||
1003 | /* try again */ | 1028 | /* try again */ |
1004 | spin_unlock(&lowest_rq->lock); | 1029 | double_unlock_balance(rq, lowest_rq); |
1005 | lowest_rq = NULL; | 1030 | lowest_rq = NULL; |
1006 | } | 1031 | } |
1007 | 1032 | ||
@@ -1070,7 +1095,7 @@ static int push_rt_task(struct rq *rq) | |||
1070 | 1095 | ||
1071 | resched_task(lowest_rq->curr); | 1096 | resched_task(lowest_rq->curr); |
1072 | 1097 | ||
1073 | spin_unlock(&lowest_rq->lock); | 1098 | double_unlock_balance(rq, lowest_rq); |
1074 | 1099 | ||
1075 | ret = 1; | 1100 | ret = 1; |
1076 | out: | 1101 | out: |
@@ -1107,7 +1132,7 @@ static int pull_rt_task(struct rq *this_rq) | |||
1107 | 1132 | ||
1108 | next = pick_next_task_rt(this_rq); | 1133 | next = pick_next_task_rt(this_rq); |
1109 | 1134 | ||
1110 | for_each_cpu_mask(cpu, this_rq->rd->rto_mask) { | 1135 | for_each_cpu_mask_nr(cpu, this_rq->rd->rto_mask) { |
1111 | if (this_cpu == cpu) | 1136 | if (this_cpu == cpu) |
1112 | continue; | 1137 | continue; |
1113 | 1138 | ||
@@ -1176,7 +1201,7 @@ static int pull_rt_task(struct rq *this_rq) | |||
1176 | 1201 | ||
1177 | } | 1202 | } |
1178 | skip: | 1203 | skip: |
1179 | spin_unlock(&src_rq->lock); | 1204 | double_unlock_balance(this_rq, src_rq); |
1180 | } | 1205 | } |
1181 | 1206 | ||
1182 | return ret; | 1207 | return ret; |
@@ -1415,7 +1440,7 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued) | |||
1415 | * on the queue: | 1440 | * on the queue: |
1416 | */ | 1441 | */ |
1417 | if (p->rt.run_list.prev != p->rt.run_list.next) { | 1442 | if (p->rt.run_list.prev != p->rt.run_list.next) { |
1418 | requeue_task_rt(rq, p); | 1443 | requeue_task_rt(rq, p, 0); |
1419 | set_tsk_need_resched(p); | 1444 | set_tsk_need_resched(p); |
1420 | } | 1445 | } |
1421 | } | 1446 | } |
diff --git a/kernel/semaphore.c b/kernel/semaphore.c index aaaeae8244e7..94a62c0d4ade 100644 --- a/kernel/semaphore.c +++ b/kernel/semaphore.c | |||
@@ -212,9 +212,7 @@ static inline int __sched __down_common(struct semaphore *sem, long state, | |||
212 | waiter.up = 0; | 212 | waiter.up = 0; |
213 | 213 | ||
214 | for (;;) { | 214 | for (;;) { |
215 | if (state == TASK_INTERRUPTIBLE && signal_pending(task)) | 215 | if (signal_pending_state(state, task)) |
216 | goto interrupted; | ||
217 | if (state == TASK_KILLABLE && fatal_signal_pending(task)) | ||
218 | goto interrupted; | 216 | goto interrupted; |
219 | if (timeout <= 0) | 217 | if (timeout <= 0) |
220 | goto timed_out; | 218 | goto timed_out; |
diff --git a/kernel/signal.c b/kernel/signal.c index 6c0958e52ea7..e661b01d340f 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/ptrace.h> | 22 | #include <linux/ptrace.h> |
23 | #include <linux/signal.h> | 23 | #include <linux/signal.h> |
24 | #include <linux/signalfd.h> | 24 | #include <linux/signalfd.h> |
25 | #include <linux/tracehook.h> | ||
25 | #include <linux/capability.h> | 26 | #include <linux/capability.h> |
26 | #include <linux/freezer.h> | 27 | #include <linux/freezer.h> |
27 | #include <linux/pid_namespace.h> | 28 | #include <linux/pid_namespace.h> |
@@ -39,24 +40,21 @@ | |||
39 | 40 | ||
40 | static struct kmem_cache *sigqueue_cachep; | 41 | static struct kmem_cache *sigqueue_cachep; |
41 | 42 | ||
42 | static int __sig_ignored(struct task_struct *t, int sig) | 43 | static void __user *sig_handler(struct task_struct *t, int sig) |
43 | { | 44 | { |
44 | void __user *handler; | 45 | return t->sighand->action[sig - 1].sa.sa_handler; |
46 | } | ||
45 | 47 | ||
48 | static int sig_handler_ignored(void __user *handler, int sig) | ||
49 | { | ||
46 | /* Is it explicitly or implicitly ignored? */ | 50 | /* Is it explicitly or implicitly ignored? */ |
47 | |||
48 | handler = t->sighand->action[sig - 1].sa.sa_handler; | ||
49 | return handler == SIG_IGN || | 51 | return handler == SIG_IGN || |
50 | (handler == SIG_DFL && sig_kernel_ignore(sig)); | 52 | (handler == SIG_DFL && sig_kernel_ignore(sig)); |
51 | } | 53 | } |
52 | 54 | ||
53 | static int sig_ignored(struct task_struct *t, int sig) | 55 | static int sig_ignored(struct task_struct *t, int sig) |
54 | { | 56 | { |
55 | /* | 57 | void __user *handler; |
56 | * Tracers always want to know about signals.. | ||
57 | */ | ||
58 | if (t->ptrace & PT_PTRACED) | ||
59 | return 0; | ||
60 | 58 | ||
61 | /* | 59 | /* |
62 | * Blocked signals are never ignored, since the | 60 | * Blocked signals are never ignored, since the |
@@ -66,7 +64,14 @@ static int sig_ignored(struct task_struct *t, int sig) | |||
66 | if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig)) | 64 | if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig)) |
67 | return 0; | 65 | return 0; |
68 | 66 | ||
69 | return __sig_ignored(t, sig); | 67 | handler = sig_handler(t, sig); |
68 | if (!sig_handler_ignored(handler, sig)) | ||
69 | return 0; | ||
70 | |||
71 | /* | ||
72 | * Tracers may want to know about even ignored signals. | ||
73 | */ | ||
74 | return !tracehook_consider_ignored_signal(t, sig, handler); | ||
70 | } | 75 | } |
71 | 76 | ||
72 | /* | 77 | /* |
@@ -129,7 +134,9 @@ void recalc_sigpending_and_wake(struct task_struct *t) | |||
129 | 134 | ||
130 | void recalc_sigpending(void) | 135 | void recalc_sigpending(void) |
131 | { | 136 | { |
132 | if (!recalc_sigpending_tsk(current) && !freezing(current)) | 137 | if (unlikely(tracehook_force_sigpending())) |
138 | set_thread_flag(TIF_SIGPENDING); | ||
139 | else if (!recalc_sigpending_tsk(current) && !freezing(current)) | ||
133 | clear_thread_flag(TIF_SIGPENDING); | 140 | clear_thread_flag(TIF_SIGPENDING); |
134 | 141 | ||
135 | } | 142 | } |
@@ -295,12 +302,12 @@ flush_signal_handlers(struct task_struct *t, int force_default) | |||
295 | 302 | ||
296 | int unhandled_signal(struct task_struct *tsk, int sig) | 303 | int unhandled_signal(struct task_struct *tsk, int sig) |
297 | { | 304 | { |
305 | void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler; | ||
298 | if (is_global_init(tsk)) | 306 | if (is_global_init(tsk)) |
299 | return 1; | 307 | return 1; |
300 | if (tsk->ptrace & PT_PTRACED) | 308 | if (handler != SIG_IGN && handler != SIG_DFL) |
301 | return 0; | 309 | return 0; |
302 | return (tsk->sighand->action[sig-1].sa.sa_handler == SIG_IGN) || | 310 | return !tracehook_consider_fatal_signal(tsk, sig, handler); |
303 | (tsk->sighand->action[sig-1].sa.sa_handler == SIG_DFL); | ||
304 | } | 311 | } |
305 | 312 | ||
306 | 313 | ||
@@ -338,13 +345,9 @@ unblock_all_signals(void) | |||
338 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); | 345 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); |
339 | } | 346 | } |
340 | 347 | ||
341 | static int collect_signal(int sig, struct sigpending *list, siginfo_t *info) | 348 | static void collect_signal(int sig, struct sigpending *list, siginfo_t *info) |
342 | { | 349 | { |
343 | struct sigqueue *q, *first = NULL; | 350 | struct sigqueue *q, *first = NULL; |
344 | int still_pending = 0; | ||
345 | |||
346 | if (unlikely(!sigismember(&list->signal, sig))) | ||
347 | return 0; | ||
348 | 351 | ||
349 | /* | 352 | /* |
350 | * Collect the siginfo appropriate to this signal. Check if | 353 | * Collect the siginfo appropriate to this signal. Check if |
@@ -352,33 +355,30 @@ static int collect_signal(int sig, struct sigpending *list, siginfo_t *info) | |||
352 | */ | 355 | */ |
353 | list_for_each_entry(q, &list->list, list) { | 356 | list_for_each_entry(q, &list->list, list) { |
354 | if (q->info.si_signo == sig) { | 357 | if (q->info.si_signo == sig) { |
355 | if (first) { | 358 | if (first) |
356 | still_pending = 1; | 359 | goto still_pending; |
357 | break; | ||
358 | } | ||
359 | first = q; | 360 | first = q; |
360 | } | 361 | } |
361 | } | 362 | } |
363 | |||
364 | sigdelset(&list->signal, sig); | ||
365 | |||
362 | if (first) { | 366 | if (first) { |
367 | still_pending: | ||
363 | list_del_init(&first->list); | 368 | list_del_init(&first->list); |
364 | copy_siginfo(info, &first->info); | 369 | copy_siginfo(info, &first->info); |
365 | __sigqueue_free(first); | 370 | __sigqueue_free(first); |
366 | if (!still_pending) | ||
367 | sigdelset(&list->signal, sig); | ||
368 | } else { | 371 | } else { |
369 | |||
370 | /* Ok, it wasn't in the queue. This must be | 372 | /* Ok, it wasn't in the queue. This must be |
371 | a fast-pathed signal or we must have been | 373 | a fast-pathed signal or we must have been |
372 | out of queue space. So zero out the info. | 374 | out of queue space. So zero out the info. |
373 | */ | 375 | */ |
374 | sigdelset(&list->signal, sig); | ||
375 | info->si_signo = sig; | 376 | info->si_signo = sig; |
376 | info->si_errno = 0; | 377 | info->si_errno = 0; |
377 | info->si_code = 0; | 378 | info->si_code = 0; |
378 | info->si_pid = 0; | 379 | info->si_pid = 0; |
379 | info->si_uid = 0; | 380 | info->si_uid = 0; |
380 | } | 381 | } |
381 | return 1; | ||
382 | } | 382 | } |
383 | 383 | ||
384 | static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, | 384 | static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, |
@@ -396,8 +396,7 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, | |||
396 | } | 396 | } |
397 | } | 397 | } |
398 | 398 | ||
399 | if (!collect_signal(sig, pending, info)) | 399 | collect_signal(sig, pending, info); |
400 | sig = 0; | ||
401 | } | 400 | } |
402 | 401 | ||
403 | return sig; | 402 | return sig; |
@@ -462,8 +461,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) | |||
462 | * is to alert stop-signal processing code when another | 461 | * is to alert stop-signal processing code when another |
463 | * processor has come along and cleared the flag. | 462 | * processor has come along and cleared the flag. |
464 | */ | 463 | */ |
465 | if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT)) | 464 | tsk->signal->flags |= SIGNAL_STOP_DEQUEUED; |
466 | tsk->signal->flags |= SIGNAL_STOP_DEQUEUED; | ||
467 | } | 465 | } |
468 | if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) { | 466 | if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) { |
469 | /* | 467 | /* |
@@ -600,9 +598,6 @@ static int check_kill_permission(int sig, struct siginfo *info, | |||
600 | return security_task_kill(t, info, sig, 0); | 598 | return security_task_kill(t, info, sig, 0); |
601 | } | 599 | } |
602 | 600 | ||
603 | /* forward decl */ | ||
604 | static void do_notify_parent_cldstop(struct task_struct *tsk, int why); | ||
605 | |||
606 | /* | 601 | /* |
607 | * Handle magic process-wide effects of stop/continue signals. Unlike | 602 | * Handle magic process-wide effects of stop/continue signals. Unlike |
608 | * the signal actions, these happen immediately at signal-generation | 603 | * the signal actions, these happen immediately at signal-generation |
@@ -765,7 +760,8 @@ static void complete_signal(int sig, struct task_struct *p, int group) | |||
765 | if (sig_fatal(p, sig) && | 760 | if (sig_fatal(p, sig) && |
766 | !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) && | 761 | !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) && |
767 | !sigismember(&t->real_blocked, sig) && | 762 | !sigismember(&t->real_blocked, sig) && |
768 | (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) { | 763 | (sig == SIGKILL || |
764 | !tracehook_consider_fatal_signal(t, sig, SIG_DFL))) { | ||
769 | /* | 765 | /* |
770 | * This signal will be fatal to the whole group. | 766 | * This signal will be fatal to the whole group. |
771 | */ | 767 | */ |
@@ -1125,7 +1121,7 @@ EXPORT_SYMBOL_GPL(kill_pid_info_as_uid); | |||
1125 | * is probably wrong. Should make it like BSD or SYSV. | 1121 | * is probably wrong. Should make it like BSD or SYSV. |
1126 | */ | 1122 | */ |
1127 | 1123 | ||
1128 | static int kill_something_info(int sig, struct siginfo *info, int pid) | 1124 | static int kill_something_info(int sig, struct siginfo *info, pid_t pid) |
1129 | { | 1125 | { |
1130 | int ret; | 1126 | int ret; |
1131 | 1127 | ||
@@ -1237,17 +1233,6 @@ int kill_pid(struct pid *pid, int sig, int priv) | |||
1237 | } | 1233 | } |
1238 | EXPORT_SYMBOL(kill_pid); | 1234 | EXPORT_SYMBOL(kill_pid); |
1239 | 1235 | ||
1240 | int | ||
1241 | kill_proc(pid_t pid, int sig, int priv) | ||
1242 | { | ||
1243 | int ret; | ||
1244 | |||
1245 | rcu_read_lock(); | ||
1246 | ret = kill_pid_info(sig, __si_special(priv), find_pid(pid)); | ||
1247 | rcu_read_unlock(); | ||
1248 | return ret; | ||
1249 | } | ||
1250 | |||
1251 | /* | 1236 | /* |
1252 | * These functions support sending signals using preallocated sigqueue | 1237 | * These functions support sending signals using preallocated sigqueue |
1253 | * structures. This is needed "because realtime applications cannot | 1238 | * structures. This is needed "because realtime applications cannot |
@@ -1319,6 +1304,7 @@ int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group) | |||
1319 | q->info.si_overrun++; | 1304 | q->info.si_overrun++; |
1320 | goto out; | 1305 | goto out; |
1321 | } | 1306 | } |
1307 | q->info.si_overrun = 0; | ||
1322 | 1308 | ||
1323 | signalfd_notify(t, sig); | 1309 | signalfd_notify(t, sig); |
1324 | pending = group ? &t->signal->shared_pending : &t->pending; | 1310 | pending = group ? &t->signal->shared_pending : &t->pending; |
@@ -1343,13 +1329,16 @@ static inline void __wake_up_parent(struct task_struct *p, | |||
1343 | /* | 1329 | /* |
1344 | * Let a parent know about the death of a child. | 1330 | * Let a parent know about the death of a child. |
1345 | * For a stopped/continued status change, use do_notify_parent_cldstop instead. | 1331 | * For a stopped/continued status change, use do_notify_parent_cldstop instead. |
1332 | * | ||
1333 | * Returns -1 if our parent ignored us and so we've switched to | ||
1334 | * self-reaping, or else @sig. | ||
1346 | */ | 1335 | */ |
1347 | 1336 | int do_notify_parent(struct task_struct *tsk, int sig) | |
1348 | void do_notify_parent(struct task_struct *tsk, int sig) | ||
1349 | { | 1337 | { |
1350 | struct siginfo info; | 1338 | struct siginfo info; |
1351 | unsigned long flags; | 1339 | unsigned long flags; |
1352 | struct sighand_struct *psig; | 1340 | struct sighand_struct *psig; |
1341 | int ret = sig; | ||
1353 | 1342 | ||
1354 | BUG_ON(sig == -1); | 1343 | BUG_ON(sig == -1); |
1355 | 1344 | ||
@@ -1379,10 +1368,9 @@ void do_notify_parent(struct task_struct *tsk, int sig) | |||
1379 | 1368 | ||
1380 | info.si_uid = tsk->uid; | 1369 | info.si_uid = tsk->uid; |
1381 | 1370 | ||
1382 | /* FIXME: find out whether or not this is supposed to be c*time. */ | 1371 | info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime, |
1383 | info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime, | ||
1384 | tsk->signal->utime)); | 1372 | tsk->signal->utime)); |
1385 | info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime, | 1373 | info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime, |
1386 | tsk->signal->stime)); | 1374 | tsk->signal->stime)); |
1387 | 1375 | ||
1388 | info.si_status = tsk->exit_code & 0x7f; | 1376 | info.si_status = tsk->exit_code & 0x7f; |
@@ -1415,14 +1403,16 @@ void do_notify_parent(struct task_struct *tsk, int sig) | |||
1415 | * is implementation-defined: we do (if you don't want | 1403 | * is implementation-defined: we do (if you don't want |
1416 | * it, just use SIG_IGN instead). | 1404 | * it, just use SIG_IGN instead). |
1417 | */ | 1405 | */ |
1418 | tsk->exit_signal = -1; | 1406 | ret = tsk->exit_signal = -1; |
1419 | if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) | 1407 | if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) |
1420 | sig = 0; | 1408 | sig = -1; |
1421 | } | 1409 | } |
1422 | if (valid_signal(sig) && sig > 0) | 1410 | if (valid_signal(sig) && sig > 0) |
1423 | __group_send_sig_info(sig, &info, tsk->parent); | 1411 | __group_send_sig_info(sig, &info, tsk->parent); |
1424 | __wake_up_parent(tsk, tsk->parent); | 1412 | __wake_up_parent(tsk, tsk->parent); |
1425 | spin_unlock_irqrestore(&psig->siglock, flags); | 1413 | spin_unlock_irqrestore(&psig->siglock, flags); |
1414 | |||
1415 | return ret; | ||
1426 | } | 1416 | } |
1427 | 1417 | ||
1428 | static void do_notify_parent_cldstop(struct task_struct *tsk, int why) | 1418 | static void do_notify_parent_cldstop(struct task_struct *tsk, int why) |
@@ -1450,9 +1440,8 @@ static void do_notify_parent_cldstop(struct task_struct *tsk, int why) | |||
1450 | 1440 | ||
1451 | info.si_uid = tsk->uid; | 1441 | info.si_uid = tsk->uid; |
1452 | 1442 | ||
1453 | /* FIXME: find out whether or not this is supposed to be c*time. */ | 1443 | info.si_utime = cputime_to_clock_t(tsk->utime); |
1454 | info.si_utime = cputime_to_jiffies(tsk->utime); | 1444 | info.si_stime = cputime_to_clock_t(tsk->stime); |
1455 | info.si_stime = cputime_to_jiffies(tsk->stime); | ||
1456 | 1445 | ||
1457 | info.si_code = why; | 1446 | info.si_code = why; |
1458 | switch (why) { | 1447 | switch (why) { |
@@ -1491,10 +1480,10 @@ static inline int may_ptrace_stop(void) | |||
1491 | * is a deadlock situation, and pointless because our tracer | 1480 | * is a deadlock situation, and pointless because our tracer |
1492 | * is dead so don't allow us to stop. | 1481 | * is dead so don't allow us to stop. |
1493 | * If SIGKILL was already sent before the caller unlocked | 1482 | * If SIGKILL was already sent before the caller unlocked |
1494 | * ->siglock we must see ->core_waiters != 0. Otherwise it | 1483 | * ->siglock we must see ->core_state != NULL. Otherwise it |
1495 | * is safe to enter schedule(). | 1484 | * is safe to enter schedule(). |
1496 | */ | 1485 | */ |
1497 | if (unlikely(current->mm->core_waiters) && | 1486 | if (unlikely(current->mm->core_state) && |
1498 | unlikely(current->mm == current->parent->mm)) | 1487 | unlikely(current->mm == current->parent->mm)) |
1499 | return 0; | 1488 | return 0; |
1500 | 1489 | ||
@@ -1507,9 +1496,8 @@ static inline int may_ptrace_stop(void) | |||
1507 | */ | 1496 | */ |
1508 | static int sigkill_pending(struct task_struct *tsk) | 1497 | static int sigkill_pending(struct task_struct *tsk) |
1509 | { | 1498 | { |
1510 | return ((sigismember(&tsk->pending.signal, SIGKILL) || | 1499 | return sigismember(&tsk->pending.signal, SIGKILL) || |
1511 | sigismember(&tsk->signal->shared_pending.signal, SIGKILL)) && | 1500 | sigismember(&tsk->signal->shared_pending.signal, SIGKILL); |
1512 | !unlikely(sigismember(&tsk->blocked, SIGKILL))); | ||
1513 | } | 1501 | } |
1514 | 1502 | ||
1515 | /* | 1503 | /* |
@@ -1525,8 +1513,6 @@ static int sigkill_pending(struct task_struct *tsk) | |||
1525 | */ | 1513 | */ |
1526 | static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info) | 1514 | static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info) |
1527 | { | 1515 | { |
1528 | int killed = 0; | ||
1529 | |||
1530 | if (arch_ptrace_stop_needed(exit_code, info)) { | 1516 | if (arch_ptrace_stop_needed(exit_code, info)) { |
1531 | /* | 1517 | /* |
1532 | * The arch code has something special to do before a | 1518 | * The arch code has something special to do before a |
@@ -1542,7 +1528,8 @@ static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info) | |||
1542 | spin_unlock_irq(¤t->sighand->siglock); | 1528 | spin_unlock_irq(¤t->sighand->siglock); |
1543 | arch_ptrace_stop(exit_code, info); | 1529 | arch_ptrace_stop(exit_code, info); |
1544 | spin_lock_irq(¤t->sighand->siglock); | 1530 | spin_lock_irq(¤t->sighand->siglock); |
1545 | killed = sigkill_pending(current); | 1531 | if (sigkill_pending(current)) |
1532 | return; | ||
1546 | } | 1533 | } |
1547 | 1534 | ||
1548 | /* | 1535 | /* |
@@ -1559,7 +1546,7 @@ static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info) | |||
1559 | __set_current_state(TASK_TRACED); | 1546 | __set_current_state(TASK_TRACED); |
1560 | spin_unlock_irq(¤t->sighand->siglock); | 1547 | spin_unlock_irq(¤t->sighand->siglock); |
1561 | read_lock(&tasklist_lock); | 1548 | read_lock(&tasklist_lock); |
1562 | if (!unlikely(killed) && may_ptrace_stop()) { | 1549 | if (may_ptrace_stop()) { |
1563 | do_notify_parent_cldstop(current, CLD_TRAPPED); | 1550 | do_notify_parent_cldstop(current, CLD_TRAPPED); |
1564 | read_unlock(&tasklist_lock); | 1551 | read_unlock(&tasklist_lock); |
1565 | schedule(); | 1552 | schedule(); |
@@ -1623,7 +1610,7 @@ finish_stop(int stop_count) | |||
1623 | * a group stop in progress and we are the last to stop, | 1610 | * a group stop in progress and we are the last to stop, |
1624 | * report to the parent. When ptraced, every thread reports itself. | 1611 | * report to the parent. When ptraced, every thread reports itself. |
1625 | */ | 1612 | */ |
1626 | if (stop_count == 0 || (current->ptrace & PT_PTRACED)) { | 1613 | if (tracehook_notify_jctl(stop_count == 0, CLD_STOPPED)) { |
1627 | read_lock(&tasklist_lock); | 1614 | read_lock(&tasklist_lock); |
1628 | do_notify_parent_cldstop(current, CLD_STOPPED); | 1615 | do_notify_parent_cldstop(current, CLD_STOPPED); |
1629 | read_unlock(&tasklist_lock); | 1616 | read_unlock(&tasklist_lock); |
@@ -1658,8 +1645,7 @@ static int do_signal_stop(int signr) | |||
1658 | } else { | 1645 | } else { |
1659 | struct task_struct *t; | 1646 | struct task_struct *t; |
1660 | 1647 | ||
1661 | if (unlikely((sig->flags & (SIGNAL_STOP_DEQUEUED | SIGNAL_UNKILLABLE)) | 1648 | if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) || |
1662 | != SIGNAL_STOP_DEQUEUED) || | ||
1663 | unlikely(signal_group_exit(sig))) | 1649 | unlikely(signal_group_exit(sig))) |
1664 | return 0; | 1650 | return 0; |
1665 | /* | 1651 | /* |
@@ -1760,6 +1746,9 @@ relock: | |||
1760 | signal->flags &= ~SIGNAL_CLD_MASK; | 1746 | signal->flags &= ~SIGNAL_CLD_MASK; |
1761 | spin_unlock_irq(&sighand->siglock); | 1747 | spin_unlock_irq(&sighand->siglock); |
1762 | 1748 | ||
1749 | if (unlikely(!tracehook_notify_jctl(1, why))) | ||
1750 | goto relock; | ||
1751 | |||
1763 | read_lock(&tasklist_lock); | 1752 | read_lock(&tasklist_lock); |
1764 | do_notify_parent_cldstop(current->group_leader, why); | 1753 | do_notify_parent_cldstop(current->group_leader, why); |
1765 | read_unlock(&tasklist_lock); | 1754 | read_unlock(&tasklist_lock); |
@@ -1773,17 +1762,33 @@ relock: | |||
1773 | do_signal_stop(0)) | 1762 | do_signal_stop(0)) |
1774 | goto relock; | 1763 | goto relock; |
1775 | 1764 | ||
1776 | signr = dequeue_signal(current, ¤t->blocked, info); | 1765 | /* |
1777 | if (!signr) | 1766 | * Tracing can induce an artifical signal and choose sigaction. |
1778 | break; /* will return 0 */ | 1767 | * The return value in @signr determines the default action, |
1768 | * but @info->si_signo is the signal number we will report. | ||
1769 | */ | ||
1770 | signr = tracehook_get_signal(current, regs, info, return_ka); | ||
1771 | if (unlikely(signr < 0)) | ||
1772 | goto relock; | ||
1773 | if (unlikely(signr != 0)) | ||
1774 | ka = return_ka; | ||
1775 | else { | ||
1776 | signr = dequeue_signal(current, ¤t->blocked, | ||
1777 | info); | ||
1779 | 1778 | ||
1780 | if (signr != SIGKILL) { | ||
1781 | signr = ptrace_signal(signr, info, regs, cookie); | ||
1782 | if (!signr) | 1779 | if (!signr) |
1783 | continue; | 1780 | break; /* will return 0 */ |
1781 | |||
1782 | if (signr != SIGKILL) { | ||
1783 | signr = ptrace_signal(signr, info, | ||
1784 | regs, cookie); | ||
1785 | if (!signr) | ||
1786 | continue; | ||
1787 | } | ||
1788 | |||
1789 | ka = &sighand->action[signr-1]; | ||
1784 | } | 1790 | } |
1785 | 1791 | ||
1786 | ka = &sighand->action[signr-1]; | ||
1787 | if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */ | 1792 | if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */ |
1788 | continue; | 1793 | continue; |
1789 | if (ka->sa.sa_handler != SIG_DFL) { | 1794 | if (ka->sa.sa_handler != SIG_DFL) { |
@@ -1831,7 +1836,7 @@ relock: | |||
1831 | spin_lock_irq(&sighand->siglock); | 1836 | spin_lock_irq(&sighand->siglock); |
1832 | } | 1837 | } |
1833 | 1838 | ||
1834 | if (likely(do_signal_stop(signr))) { | 1839 | if (likely(do_signal_stop(info->si_signo))) { |
1835 | /* It released the siglock. */ | 1840 | /* It released the siglock. */ |
1836 | goto relock; | 1841 | goto relock; |
1837 | } | 1842 | } |
@@ -1852,7 +1857,7 @@ relock: | |||
1852 | 1857 | ||
1853 | if (sig_kernel_coredump(signr)) { | 1858 | if (sig_kernel_coredump(signr)) { |
1854 | if (print_fatal_signals) | 1859 | if (print_fatal_signals) |
1855 | print_fatal_signal(regs, signr); | 1860 | print_fatal_signal(regs, info->si_signo); |
1856 | /* | 1861 | /* |
1857 | * If it was able to dump core, this kills all | 1862 | * If it was able to dump core, this kills all |
1858 | * other threads in the group and synchronizes with | 1863 | * other threads in the group and synchronizes with |
@@ -1861,13 +1866,13 @@ relock: | |||
1861 | * first and our do_group_exit call below will use | 1866 | * first and our do_group_exit call below will use |
1862 | * that value and ignore the one we pass it. | 1867 | * that value and ignore the one we pass it. |
1863 | */ | 1868 | */ |
1864 | do_coredump((long)signr, signr, regs); | 1869 | do_coredump(info->si_signo, info->si_signo, regs); |
1865 | } | 1870 | } |
1866 | 1871 | ||
1867 | /* | 1872 | /* |
1868 | * Death signals, no core dump. | 1873 | * Death signals, no core dump. |
1869 | */ | 1874 | */ |
1870 | do_group_exit(signr); | 1875 | do_group_exit(info->si_signo); |
1871 | /* NOTREACHED */ | 1876 | /* NOTREACHED */ |
1872 | } | 1877 | } |
1873 | spin_unlock_irq(&sighand->siglock); | 1878 | spin_unlock_irq(&sighand->siglock); |
@@ -1909,7 +1914,7 @@ void exit_signals(struct task_struct *tsk) | |||
1909 | out: | 1914 | out: |
1910 | spin_unlock_irq(&tsk->sighand->siglock); | 1915 | spin_unlock_irq(&tsk->sighand->siglock); |
1911 | 1916 | ||
1912 | if (unlikely(group_stop)) { | 1917 | if (unlikely(group_stop) && tracehook_notify_jctl(1, CLD_STOPPED)) { |
1913 | read_lock(&tasklist_lock); | 1918 | read_lock(&tasklist_lock); |
1914 | do_notify_parent_cldstop(tsk, CLD_STOPPED); | 1919 | do_notify_parent_cldstop(tsk, CLD_STOPPED); |
1915 | read_unlock(&tasklist_lock); | 1920 | read_unlock(&tasklist_lock); |
@@ -1920,8 +1925,6 @@ EXPORT_SYMBOL(recalc_sigpending); | |||
1920 | EXPORT_SYMBOL_GPL(dequeue_signal); | 1925 | EXPORT_SYMBOL_GPL(dequeue_signal); |
1921 | EXPORT_SYMBOL(flush_signals); | 1926 | EXPORT_SYMBOL(flush_signals); |
1922 | EXPORT_SYMBOL(force_sig); | 1927 | EXPORT_SYMBOL(force_sig); |
1923 | EXPORT_SYMBOL(kill_proc); | ||
1924 | EXPORT_SYMBOL(ptrace_notify); | ||
1925 | EXPORT_SYMBOL(send_sig); | 1928 | EXPORT_SYMBOL(send_sig); |
1926 | EXPORT_SYMBOL(send_sig_info); | 1929 | EXPORT_SYMBOL(send_sig_info); |
1927 | EXPORT_SYMBOL(sigprocmask); | 1930 | EXPORT_SYMBOL(sigprocmask); |
@@ -2196,7 +2199,7 @@ sys_rt_sigtimedwait(const sigset_t __user *uthese, | |||
2196 | } | 2199 | } |
2197 | 2200 | ||
2198 | asmlinkage long | 2201 | asmlinkage long |
2199 | sys_kill(int pid, int sig) | 2202 | sys_kill(pid_t pid, int sig) |
2200 | { | 2203 | { |
2201 | struct siginfo info; | 2204 | struct siginfo info; |
2202 | 2205 | ||
@@ -2209,7 +2212,7 @@ sys_kill(int pid, int sig) | |||
2209 | return kill_something_info(sig, &info, pid); | 2212 | return kill_something_info(sig, &info, pid); |
2210 | } | 2213 | } |
2211 | 2214 | ||
2212 | static int do_tkill(int tgid, int pid, int sig) | 2215 | static int do_tkill(pid_t tgid, pid_t pid, int sig) |
2213 | { | 2216 | { |
2214 | int error; | 2217 | int error; |
2215 | struct siginfo info; | 2218 | struct siginfo info; |
@@ -2255,7 +2258,7 @@ static int do_tkill(int tgid, int pid, int sig) | |||
2255 | * exists but it's not belonging to the target process anymore. This | 2258 | * exists but it's not belonging to the target process anymore. This |
2256 | * method solves the problem of threads exiting and PIDs getting reused. | 2259 | * method solves the problem of threads exiting and PIDs getting reused. |
2257 | */ | 2260 | */ |
2258 | asmlinkage long sys_tgkill(int tgid, int pid, int sig) | 2261 | asmlinkage long sys_tgkill(pid_t tgid, pid_t pid, int sig) |
2259 | { | 2262 | { |
2260 | /* This is only valid for single tasks */ | 2263 | /* This is only valid for single tasks */ |
2261 | if (pid <= 0 || tgid <= 0) | 2264 | if (pid <= 0 || tgid <= 0) |
@@ -2268,7 +2271,7 @@ asmlinkage long sys_tgkill(int tgid, int pid, int sig) | |||
2268 | * Send a signal to only one task, even if it's a CLONE_THREAD task. | 2271 | * Send a signal to only one task, even if it's a CLONE_THREAD task. |
2269 | */ | 2272 | */ |
2270 | asmlinkage long | 2273 | asmlinkage long |
2271 | sys_tkill(int pid, int sig) | 2274 | sys_tkill(pid_t pid, int sig) |
2272 | { | 2275 | { |
2273 | /* This is only valid for single tasks */ | 2276 | /* This is only valid for single tasks */ |
2274 | if (pid <= 0) | 2277 | if (pid <= 0) |
@@ -2278,7 +2281,7 @@ sys_tkill(int pid, int sig) | |||
2278 | } | 2281 | } |
2279 | 2282 | ||
2280 | asmlinkage long | 2283 | asmlinkage long |
2281 | sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo) | 2284 | sys_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t __user *uinfo) |
2282 | { | 2285 | { |
2283 | siginfo_t info; | 2286 | siginfo_t info; |
2284 | 2287 | ||
@@ -2325,7 +2328,7 @@ int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) | |||
2325 | * (for example, SIGCHLD), shall cause the pending signal to | 2328 | * (for example, SIGCHLD), shall cause the pending signal to |
2326 | * be discarded, whether or not it is blocked" | 2329 | * be discarded, whether or not it is blocked" |
2327 | */ | 2330 | */ |
2328 | if (__sig_ignored(t, sig)) { | 2331 | if (sig_handler_ignored(sig_handler(t, sig), sig)) { |
2329 | sigemptyset(&mask); | 2332 | sigemptyset(&mask); |
2330 | sigaddset(&mask, sig); | 2333 | sigaddset(&mask, sig); |
2331 | rm_from_queue_full(&mask, &t->signal->shared_pending); | 2334 | rm_from_queue_full(&mask, &t->signal->shared_pending); |
diff --git a/kernel/smp.c b/kernel/smp.c index 462c785ca1ee..f362a8553777 100644 --- a/kernel/smp.c +++ b/kernel/smp.c | |||
@@ -33,7 +33,7 @@ struct call_single_queue { | |||
33 | spinlock_t lock; | 33 | spinlock_t lock; |
34 | }; | 34 | }; |
35 | 35 | ||
36 | void __cpuinit init_call_single_data(void) | 36 | static int __cpuinit init_call_single_data(void) |
37 | { | 37 | { |
38 | int i; | 38 | int i; |
39 | 39 | ||
@@ -43,7 +43,9 @@ void __cpuinit init_call_single_data(void) | |||
43 | spin_lock_init(&q->lock); | 43 | spin_lock_init(&q->lock); |
44 | INIT_LIST_HEAD(&q->list); | 44 | INIT_LIST_HEAD(&q->list); |
45 | } | 45 | } |
46 | return 0; | ||
46 | } | 47 | } |
48 | early_initcall(init_call_single_data); | ||
47 | 49 | ||
48 | static void csd_flag_wait(struct call_single_data *data) | 50 | static void csd_flag_wait(struct call_single_data *data) |
49 | { | 51 | { |
@@ -133,7 +135,8 @@ void generic_smp_call_function_interrupt(void) | |||
133 | */ | 135 | */ |
134 | smp_wmb(); | 136 | smp_wmb(); |
135 | data->csd.flags &= ~CSD_FLAG_WAIT; | 137 | data->csd.flags &= ~CSD_FLAG_WAIT; |
136 | } else | 138 | } |
139 | if (data->csd.flags & CSD_FLAG_ALLOC) | ||
137 | call_rcu(&data->rcu_head, rcu_free_call_data); | 140 | call_rcu(&data->rcu_head, rcu_free_call_data); |
138 | } | 141 | } |
139 | rcu_read_unlock(); | 142 | rcu_read_unlock(); |
@@ -207,8 +210,10 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | |||
207 | { | 210 | { |
208 | struct call_single_data d; | 211 | struct call_single_data d; |
209 | unsigned long flags; | 212 | unsigned long flags; |
210 | /* prevent preemption and reschedule on another processor */ | 213 | /* prevent preemption and reschedule on another processor, |
214 | as well as CPU removal */ | ||
211 | int me = get_cpu(); | 215 | int me = get_cpu(); |
216 | int err = 0; | ||
212 | 217 | ||
213 | /* Can deadlock when called with interrupts disabled */ | 218 | /* Can deadlock when called with interrupts disabled */ |
214 | WARN_ON(irqs_disabled()); | 219 | WARN_ON(irqs_disabled()); |
@@ -217,7 +222,7 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | |||
217 | local_irq_save(flags); | 222 | local_irq_save(flags); |
218 | func(info); | 223 | func(info); |
219 | local_irq_restore(flags); | 224 | local_irq_restore(flags); |
220 | } else { | 225 | } else if ((unsigned)cpu < NR_CPUS && cpu_online(cpu)) { |
221 | struct call_single_data *data = NULL; | 226 | struct call_single_data *data = NULL; |
222 | 227 | ||
223 | if (!wait) { | 228 | if (!wait) { |
@@ -233,10 +238,12 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | |||
233 | data->func = func; | 238 | data->func = func; |
234 | data->info = info; | 239 | data->info = info; |
235 | generic_exec_single(cpu, data); | 240 | generic_exec_single(cpu, data); |
241 | } else { | ||
242 | err = -ENXIO; /* CPU not online */ | ||
236 | } | 243 | } |
237 | 244 | ||
238 | put_cpu(); | 245 | put_cpu(); |
239 | return 0; | 246 | return err; |
240 | } | 247 | } |
241 | EXPORT_SYMBOL(smp_call_function_single); | 248 | EXPORT_SYMBOL(smp_call_function_single); |
242 | 249 | ||
@@ -258,6 +265,42 @@ void __smp_call_function_single(int cpu, struct call_single_data *data) | |||
258 | generic_exec_single(cpu, data); | 265 | generic_exec_single(cpu, data); |
259 | } | 266 | } |
260 | 267 | ||
268 | /* Dummy function */ | ||
269 | static void quiesce_dummy(void *unused) | ||
270 | { | ||
271 | } | ||
272 | |||
273 | /* | ||
274 | * Ensure stack based data used in call function mask is safe to free. | ||
275 | * | ||
276 | * This is needed by smp_call_function_mask when using on-stack data, because | ||
277 | * a single call function queue is shared by all CPUs, and any CPU may pick up | ||
278 | * the data item on the queue at any time before it is deleted. So we need to | ||
279 | * ensure that all CPUs have transitioned through a quiescent state after | ||
280 | * this call. | ||
281 | * | ||
282 | * This is a very slow function, implemented by sending synchronous IPIs to | ||
283 | * all possible CPUs. For this reason, we have to alloc data rather than use | ||
284 | * stack based data even in the case of synchronous calls. The stack based | ||
285 | * data is then just used for deadlock/oom fallback which will be very rare. | ||
286 | * | ||
287 | * If a faster scheme can be made, we could go back to preferring stack based | ||
288 | * data -- the data allocation/free is non-zero cost. | ||
289 | */ | ||
290 | static void smp_call_function_mask_quiesce_stack(cpumask_t mask) | ||
291 | { | ||
292 | struct call_single_data data; | ||
293 | int cpu; | ||
294 | |||
295 | data.func = quiesce_dummy; | ||
296 | data.info = NULL; | ||
297 | |||
298 | for_each_cpu_mask(cpu, mask) { | ||
299 | data.flags = CSD_FLAG_WAIT; | ||
300 | generic_exec_single(cpu, &data); | ||
301 | } | ||
302 | } | ||
303 | |||
261 | /** | 304 | /** |
262 | * smp_call_function_mask(): Run a function on a set of other CPUs. | 305 | * smp_call_function_mask(): Run a function on a set of other CPUs. |
263 | * @mask: The set of cpus to run on. | 306 | * @mask: The set of cpus to run on. |
@@ -283,6 +326,7 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, | |||
283 | cpumask_t allbutself; | 326 | cpumask_t allbutself; |
284 | unsigned long flags; | 327 | unsigned long flags; |
285 | int cpu, num_cpus; | 328 | int cpu, num_cpus; |
329 | int slowpath = 0; | ||
286 | 330 | ||
287 | /* Can deadlock when called with interrupts disabled */ | 331 | /* Can deadlock when called with interrupts disabled */ |
288 | WARN_ON(irqs_disabled()); | 332 | WARN_ON(irqs_disabled()); |
@@ -304,15 +348,16 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, | |||
304 | return smp_call_function_single(cpu, func, info, wait); | 348 | return smp_call_function_single(cpu, func, info, wait); |
305 | } | 349 | } |
306 | 350 | ||
307 | if (!wait) { | 351 | data = kmalloc(sizeof(*data), GFP_ATOMIC); |
308 | data = kmalloc(sizeof(*data), GFP_ATOMIC); | 352 | if (data) { |
309 | if (data) | 353 | data->csd.flags = CSD_FLAG_ALLOC; |
310 | data->csd.flags = CSD_FLAG_ALLOC; | 354 | if (wait) |
311 | } | 355 | data->csd.flags |= CSD_FLAG_WAIT; |
312 | if (!data) { | 356 | } else { |
313 | data = &d; | 357 | data = &d; |
314 | data->csd.flags = CSD_FLAG_WAIT; | 358 | data->csd.flags = CSD_FLAG_WAIT; |
315 | wait = 1; | 359 | wait = 1; |
360 | slowpath = 1; | ||
316 | } | 361 | } |
317 | 362 | ||
318 | spin_lock_init(&data->lock); | 363 | spin_lock_init(&data->lock); |
@@ -329,8 +374,11 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, | |||
329 | arch_send_call_function_ipi(mask); | 374 | arch_send_call_function_ipi(mask); |
330 | 375 | ||
331 | /* optionally wait for the CPUs to complete */ | 376 | /* optionally wait for the CPUs to complete */ |
332 | if (wait) | 377 | if (wait) { |
333 | csd_flag_wait(&data->csd); | 378 | csd_flag_wait(&data->csd); |
379 | if (unlikely(slowpath)) | ||
380 | smp_call_function_mask_quiesce_stack(mask); | ||
381 | } | ||
334 | 382 | ||
335 | return 0; | 383 | return 0; |
336 | } | 384 | } |
diff --git a/kernel/softirq.c b/kernel/softirq.c index 81e2fe0f983a..c506f266a6b9 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
@@ -286,7 +286,7 @@ void irq_exit(void) | |||
286 | #ifdef CONFIG_NO_HZ | 286 | #ifdef CONFIG_NO_HZ |
287 | /* Make sure that timer wheel updates are propagated */ | 287 | /* Make sure that timer wheel updates are propagated */ |
288 | if (!in_interrupt() && idle_cpu(smp_processor_id()) && !need_resched()) | 288 | if (!in_interrupt() && idle_cpu(smp_processor_id()) && !need_resched()) |
289 | tick_nohz_stop_sched_tick(); | 289 | tick_nohz_stop_sched_tick(0); |
290 | rcu_irq_exit(); | 290 | rcu_irq_exit(); |
291 | #endif | 291 | #endif |
292 | preempt_enable_no_resched(); | 292 | preempt_enable_no_resched(); |
@@ -630,7 +630,7 @@ static struct notifier_block __cpuinitdata cpu_nfb = { | |||
630 | .notifier_call = cpu_callback | 630 | .notifier_call = cpu_callback |
631 | }; | 631 | }; |
632 | 632 | ||
633 | __init int spawn_ksoftirqd(void) | 633 | static __init int spawn_ksoftirqd(void) |
634 | { | 634 | { |
635 | void *cpu = (void *)(long)smp_processor_id(); | 635 | void *cpu = (void *)(long)smp_processor_id(); |
636 | int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); | 636 | int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); |
@@ -640,6 +640,7 @@ __init int spawn_ksoftirqd(void) | |||
640 | register_cpu_notifier(&cpu_nfb); | 640 | register_cpu_notifier(&cpu_nfb); |
641 | return 0; | 641 | return 0; |
642 | } | 642 | } |
643 | early_initcall(spawn_ksoftirqd); | ||
643 | 644 | ||
644 | #ifdef CONFIG_SMP | 645 | #ifdef CONFIG_SMP |
645 | /* | 646 | /* |
diff --git a/kernel/softlockup.c b/kernel/softlockup.c index a272d78185eb..cb838ee93a82 100644 --- a/kernel/softlockup.c +++ b/kernel/softlockup.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/delay.h> | 13 | #include <linux/delay.h> |
14 | #include <linux/freezer.h> | 14 | #include <linux/freezer.h> |
15 | #include <linux/kthread.h> | 15 | #include <linux/kthread.h> |
16 | #include <linux/lockdep.h> | ||
16 | #include <linux/notifier.h> | 17 | #include <linux/notifier.h> |
17 | #include <linux/module.h> | 18 | #include <linux/module.h> |
18 | 19 | ||
@@ -25,7 +26,22 @@ static DEFINE_PER_CPU(unsigned long, print_timestamp); | |||
25 | static DEFINE_PER_CPU(struct task_struct *, watchdog_task); | 26 | static DEFINE_PER_CPU(struct task_struct *, watchdog_task); |
26 | 27 | ||
27 | static int __read_mostly did_panic; | 28 | static int __read_mostly did_panic; |
28 | unsigned long __read_mostly softlockup_thresh = 60; | 29 | int __read_mostly softlockup_thresh = 60; |
30 | |||
31 | /* | ||
32 | * Should we panic (and reboot, if panic_timeout= is set) when a | ||
33 | * soft-lockup occurs: | ||
34 | */ | ||
35 | unsigned int __read_mostly softlockup_panic = | ||
36 | CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE; | ||
37 | |||
38 | static int __init softlockup_panic_setup(char *str) | ||
39 | { | ||
40 | softlockup_panic = simple_strtoul(str, NULL, 0); | ||
41 | |||
42 | return 1; | ||
43 | } | ||
44 | __setup("softlockup_panic=", softlockup_panic_setup); | ||
29 | 45 | ||
30 | static int | 46 | static int |
31 | softlock_panic(struct notifier_block *this, unsigned long event, void *ptr) | 47 | softlock_panic(struct notifier_block *this, unsigned long event, void *ptr) |
@@ -84,6 +100,14 @@ void softlockup_tick(void) | |||
84 | struct pt_regs *regs = get_irq_regs(); | 100 | struct pt_regs *regs = get_irq_regs(); |
85 | unsigned long now; | 101 | unsigned long now; |
86 | 102 | ||
103 | /* Is detection switched off? */ | ||
104 | if (!per_cpu(watchdog_task, this_cpu) || softlockup_thresh <= 0) { | ||
105 | /* Be sure we don't false trigger if switched back on */ | ||
106 | if (touch_timestamp) | ||
107 | per_cpu(touch_timestamp, this_cpu) = 0; | ||
108 | return; | ||
109 | } | ||
110 | |||
87 | if (touch_timestamp == 0) { | 111 | if (touch_timestamp == 0) { |
88 | __touch_softlockup_watchdog(); | 112 | __touch_softlockup_watchdog(); |
89 | return; | 113 | return; |
@@ -92,11 +116,8 @@ void softlockup_tick(void) | |||
92 | print_timestamp = per_cpu(print_timestamp, this_cpu); | 116 | print_timestamp = per_cpu(print_timestamp, this_cpu); |
93 | 117 | ||
94 | /* report at most once a second */ | 118 | /* report at most once a second */ |
95 | if ((print_timestamp >= touch_timestamp && | 119 | if (print_timestamp == touch_timestamp || did_panic) |
96 | print_timestamp < (touch_timestamp + 1)) || | ||
97 | did_panic || !per_cpu(watchdog_task, this_cpu)) { | ||
98 | return; | 120 | return; |
99 | } | ||
100 | 121 | ||
101 | /* do not print during early bootup: */ | 122 | /* do not print during early bootup: */ |
102 | if (unlikely(system_state != SYSTEM_RUNNING)) { | 123 | if (unlikely(system_state != SYSTEM_RUNNING)) { |
@@ -106,8 +127,11 @@ void softlockup_tick(void) | |||
106 | 127 | ||
107 | now = get_timestamp(this_cpu); | 128 | now = get_timestamp(this_cpu); |
108 | 129 | ||
109 | /* Wake up the high-prio watchdog task every second: */ | 130 | /* |
110 | if (now > (touch_timestamp + 1)) | 131 | * Wake up the high-prio watchdog task twice per |
132 | * threshold timespan. | ||
133 | */ | ||
134 | if (now > touch_timestamp + softlockup_thresh/2) | ||
111 | wake_up_process(per_cpu(watchdog_task, this_cpu)); | 135 | wake_up_process(per_cpu(watchdog_task, this_cpu)); |
112 | 136 | ||
113 | /* Warn about unreasonable delays: */ | 137 | /* Warn about unreasonable delays: */ |
@@ -121,11 +145,15 @@ void softlockup_tick(void) | |||
121 | this_cpu, now - touch_timestamp, | 145 | this_cpu, now - touch_timestamp, |
122 | current->comm, task_pid_nr(current)); | 146 | current->comm, task_pid_nr(current)); |
123 | print_modules(); | 147 | print_modules(); |
148 | print_irqtrace_events(current); | ||
124 | if (regs) | 149 | if (regs) |
125 | show_regs(regs); | 150 | show_regs(regs); |
126 | else | 151 | else |
127 | dump_stack(); | 152 | dump_stack(); |
128 | spin_unlock(&print_lock); | 153 | spin_unlock(&print_lock); |
154 | |||
155 | if (softlockup_panic) | ||
156 | panic("softlockup: hung tasks"); | ||
129 | } | 157 | } |
130 | 158 | ||
131 | /* | 159 | /* |
@@ -178,6 +206,9 @@ static void check_hung_task(struct task_struct *t, unsigned long now) | |||
178 | 206 | ||
179 | t->last_switch_timestamp = now; | 207 | t->last_switch_timestamp = now; |
180 | touch_nmi_watchdog(); | 208 | touch_nmi_watchdog(); |
209 | |||
210 | if (softlockup_panic) | ||
211 | panic("softlockup: blocked tasks"); | ||
181 | } | 212 | } |
182 | 213 | ||
183 | /* | 214 | /* |
@@ -202,7 +233,8 @@ static void check_hung_uninterruptible_tasks(int this_cpu) | |||
202 | do_each_thread(g, t) { | 233 | do_each_thread(g, t) { |
203 | if (!--max_count) | 234 | if (!--max_count) |
204 | goto unlock; | 235 | goto unlock; |
205 | if (t->state & TASK_UNINTERRUPTIBLE) | 236 | /* use "==" to skip the TASK_KILLABLE tasks waiting on NFS */ |
237 | if (t->state == TASK_UNINTERRUPTIBLE) | ||
206 | check_hung_task(t, now); | 238 | check_hung_task(t, now); |
207 | } while_each_thread(g, t); | 239 | } while_each_thread(g, t); |
208 | unlock: | 240 | unlock: |
@@ -307,14 +339,33 @@ static struct notifier_block __cpuinitdata cpu_nfb = { | |||
307 | .notifier_call = cpu_callback | 339 | .notifier_call = cpu_callback |
308 | }; | 340 | }; |
309 | 341 | ||
310 | __init void spawn_softlockup_task(void) | 342 | static int __initdata nosoftlockup; |
343 | |||
344 | static int __init nosoftlockup_setup(char *str) | ||
345 | { | ||
346 | nosoftlockup = 1; | ||
347 | return 1; | ||
348 | } | ||
349 | __setup("nosoftlockup", nosoftlockup_setup); | ||
350 | |||
351 | static int __init spawn_softlockup_task(void) | ||
311 | { | 352 | { |
312 | void *cpu = (void *)(long)smp_processor_id(); | 353 | void *cpu = (void *)(long)smp_processor_id(); |
313 | int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); | 354 | int err; |
355 | |||
356 | if (nosoftlockup) | ||
357 | return 0; | ||
314 | 358 | ||
315 | BUG_ON(err == NOTIFY_BAD); | 359 | err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); |
360 | if (err == NOTIFY_BAD) { | ||
361 | BUG(); | ||
362 | return 1; | ||
363 | } | ||
316 | cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); | 364 | cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); |
317 | register_cpu_notifier(&cpu_nfb); | 365 | register_cpu_notifier(&cpu_nfb); |
318 | 366 | ||
319 | atomic_notifier_chain_register(&panic_notifier_list, &panic_block); | 367 | atomic_notifier_chain_register(&panic_notifier_list, &panic_block); |
368 | |||
369 | return 0; | ||
320 | } | 370 | } |
371 | early_initcall(spawn_softlockup_task); | ||
diff --git a/kernel/spinlock.c b/kernel/spinlock.c index a1fb54c93cdd..29ab20749dd3 100644 --- a/kernel/spinlock.c +++ b/kernel/spinlock.c | |||
@@ -290,8 +290,8 @@ void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass) | |||
290 | spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); | 290 | spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); |
291 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | 291 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); |
292 | } | 292 | } |
293 | |||
294 | EXPORT_SYMBOL(_spin_lock_nested); | 293 | EXPORT_SYMBOL(_spin_lock_nested); |
294 | |||
295 | unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass) | 295 | unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass) |
296 | { | 296 | { |
297 | unsigned long flags; | 297 | unsigned long flags; |
@@ -311,9 +311,17 @@ unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclas | |||
311 | #endif | 311 | #endif |
312 | return flags; | 312 | return flags; |
313 | } | 313 | } |
314 | |||
315 | EXPORT_SYMBOL(_spin_lock_irqsave_nested); | 314 | EXPORT_SYMBOL(_spin_lock_irqsave_nested); |
316 | 315 | ||
316 | void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, | ||
317 | struct lockdep_map *nest_lock) | ||
318 | { | ||
319 | preempt_disable(); | ||
320 | spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_); | ||
321 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | ||
322 | } | ||
323 | EXPORT_SYMBOL(_spin_lock_nest_lock); | ||
324 | |||
317 | #endif | 325 | #endif |
318 | 326 | ||
319 | void __lockfunc _spin_unlock(spinlock_t *lock) | 327 | void __lockfunc _spin_unlock(spinlock_t *lock) |
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index ba9b2054ecbd..af3c7cea258b 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c | |||
@@ -1,4 +1,4 @@ | |||
1 | /* Copyright 2005 Rusty Russell rusty@rustcorp.com.au IBM Corporation. | 1 | /* Copyright 2008, 2005 Rusty Russell rusty@rustcorp.com.au IBM Corporation. |
2 | * GPL v2 and any later version. | 2 | * GPL v2 and any later version. |
3 | */ | 3 | */ |
4 | #include <linux/cpu.h> | 4 | #include <linux/cpu.h> |
@@ -13,203 +13,177 @@ | |||
13 | #include <asm/atomic.h> | 13 | #include <asm/atomic.h> |
14 | #include <asm/uaccess.h> | 14 | #include <asm/uaccess.h> |
15 | 15 | ||
16 | /* Since we effect priority and affinity (both of which are visible | 16 | /* This controls the threads on each CPU. */ |
17 | * to, and settable by outside processes) we do indirection via a | ||
18 | * kthread. */ | ||
19 | |||
20 | /* Thread to stop each CPU in user context. */ | ||
21 | enum stopmachine_state { | 17 | enum stopmachine_state { |
22 | STOPMACHINE_WAIT, | 18 | /* Dummy starting state for thread. */ |
19 | STOPMACHINE_NONE, | ||
20 | /* Awaiting everyone to be scheduled. */ | ||
23 | STOPMACHINE_PREPARE, | 21 | STOPMACHINE_PREPARE, |
22 | /* Disable interrupts. */ | ||
24 | STOPMACHINE_DISABLE_IRQ, | 23 | STOPMACHINE_DISABLE_IRQ, |
24 | /* Run the function */ | ||
25 | STOPMACHINE_RUN, | ||
26 | /* Exit */ | ||
25 | STOPMACHINE_EXIT, | 27 | STOPMACHINE_EXIT, |
26 | }; | 28 | }; |
29 | static enum stopmachine_state state; | ||
27 | 30 | ||
28 | static enum stopmachine_state stopmachine_state; | 31 | struct stop_machine_data { |
29 | static unsigned int stopmachine_num_threads; | 32 | int (*fn)(void *); |
30 | static atomic_t stopmachine_thread_ack; | 33 | void *data; |
31 | 34 | int fnret; | |
32 | static int stopmachine(void *cpu) | 35 | }; |
33 | { | ||
34 | int irqs_disabled = 0; | ||
35 | int prepared = 0; | ||
36 | |||
37 | set_cpus_allowed_ptr(current, &cpumask_of_cpu((int)(long)cpu)); | ||
38 | |||
39 | /* Ack: we are alive */ | ||
40 | smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */ | ||
41 | atomic_inc(&stopmachine_thread_ack); | ||
42 | |||
43 | /* Simple state machine */ | ||
44 | while (stopmachine_state != STOPMACHINE_EXIT) { | ||
45 | if (stopmachine_state == STOPMACHINE_DISABLE_IRQ | ||
46 | && !irqs_disabled) { | ||
47 | local_irq_disable(); | ||
48 | hard_irq_disable(); | ||
49 | irqs_disabled = 1; | ||
50 | /* Ack: irqs disabled. */ | ||
51 | smp_mb(); /* Must read state first. */ | ||
52 | atomic_inc(&stopmachine_thread_ack); | ||
53 | } else if (stopmachine_state == STOPMACHINE_PREPARE | ||
54 | && !prepared) { | ||
55 | /* Everyone is in place, hold CPU. */ | ||
56 | preempt_disable(); | ||
57 | prepared = 1; | ||
58 | smp_mb(); /* Must read state first. */ | ||
59 | atomic_inc(&stopmachine_thread_ack); | ||
60 | } | ||
61 | /* Yield in first stage: migration threads need to | ||
62 | * help our sisters onto their CPUs. */ | ||
63 | if (!prepared && !irqs_disabled) | ||
64 | yield(); | ||
65 | cpu_relax(); | ||
66 | } | ||
67 | |||
68 | /* Ack: we are exiting. */ | ||
69 | smp_mb(); /* Must read state first. */ | ||
70 | atomic_inc(&stopmachine_thread_ack); | ||
71 | |||
72 | if (irqs_disabled) | ||
73 | local_irq_enable(); | ||
74 | if (prepared) | ||
75 | preempt_enable(); | ||
76 | 36 | ||
77 | return 0; | 37 | /* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */ |
78 | } | 38 | static unsigned int num_threads; |
39 | static atomic_t thread_ack; | ||
40 | static struct completion finished; | ||
41 | static DEFINE_MUTEX(lock); | ||
79 | 42 | ||
80 | /* Change the thread state */ | 43 | static void set_state(enum stopmachine_state newstate) |
81 | static void stopmachine_set_state(enum stopmachine_state state) | ||
82 | { | 44 | { |
83 | atomic_set(&stopmachine_thread_ack, 0); | 45 | /* Reset ack counter. */ |
46 | atomic_set(&thread_ack, num_threads); | ||
84 | smp_wmb(); | 47 | smp_wmb(); |
85 | stopmachine_state = state; | 48 | state = newstate; |
86 | while (atomic_read(&stopmachine_thread_ack) != stopmachine_num_threads) | ||
87 | cpu_relax(); | ||
88 | } | 49 | } |
89 | 50 | ||
90 | static int stop_machine(void) | 51 | /* Last one to ack a state moves to the next state. */ |
52 | static void ack_state(void) | ||
91 | { | 53 | { |
92 | int i, ret = 0; | 54 | if (atomic_dec_and_test(&thread_ack)) { |
93 | 55 | /* If we're the last one to ack the EXIT, we're finished. */ | |
94 | atomic_set(&stopmachine_thread_ack, 0); | 56 | if (state == STOPMACHINE_EXIT) |
95 | stopmachine_num_threads = 0; | 57 | complete(&finished); |
96 | stopmachine_state = STOPMACHINE_WAIT; | 58 | else |
97 | 59 | set_state(state + 1); | |
98 | for_each_online_cpu(i) { | ||
99 | if (i == raw_smp_processor_id()) | ||
100 | continue; | ||
101 | ret = kernel_thread(stopmachine, (void *)(long)i,CLONE_KERNEL); | ||
102 | if (ret < 0) | ||
103 | break; | ||
104 | stopmachine_num_threads++; | ||
105 | } | ||
106 | |||
107 | /* Wait for them all to come to life. */ | ||
108 | while (atomic_read(&stopmachine_thread_ack) != stopmachine_num_threads) { | ||
109 | yield(); | ||
110 | cpu_relax(); | ||
111 | } | 60 | } |
61 | } | ||
112 | 62 | ||
113 | /* If some failed, kill them all. */ | 63 | /* This is the actual thread which stops the CPU. It exits by itself rather |
114 | if (ret < 0) { | 64 | * than waiting for kthread_stop(), because it's easier for hotplug CPU. */ |
115 | stopmachine_set_state(STOPMACHINE_EXIT); | 65 | static int stop_cpu(struct stop_machine_data *smdata) |
116 | return ret; | 66 | { |
117 | } | 67 | enum stopmachine_state curstate = STOPMACHINE_NONE; |
118 | 68 | ||
119 | /* Now they are all started, make them hold the CPUs, ready. */ | 69 | /* Simple state machine */ |
120 | preempt_disable(); | 70 | do { |
121 | stopmachine_set_state(STOPMACHINE_PREPARE); | 71 | /* Chill out and ensure we re-read stopmachine_state. */ |
72 | cpu_relax(); | ||
73 | if (state != curstate) { | ||
74 | curstate = state; | ||
75 | switch (curstate) { | ||
76 | case STOPMACHINE_DISABLE_IRQ: | ||
77 | local_irq_disable(); | ||
78 | hard_irq_disable(); | ||
79 | break; | ||
80 | case STOPMACHINE_RUN: | ||
81 | /* |= allows error detection if functions on | ||
82 | * multiple CPUs. */ | ||
83 | smdata->fnret |= smdata->fn(smdata->data); | ||
84 | break; | ||
85 | default: | ||
86 | break; | ||
87 | } | ||
88 | ack_state(); | ||
89 | } | ||
90 | } while (curstate != STOPMACHINE_EXIT); | ||
122 | 91 | ||
123 | /* Make them disable irqs. */ | 92 | local_irq_enable(); |
124 | local_irq_disable(); | 93 | do_exit(0); |
125 | hard_irq_disable(); | 94 | } |
126 | stopmachine_set_state(STOPMACHINE_DISABLE_IRQ); | ||
127 | 95 | ||
96 | /* Callback for CPUs which aren't supposed to do anything. */ | ||
97 | static int chill(void *unused) | ||
98 | { | ||
128 | return 0; | 99 | return 0; |
129 | } | 100 | } |
130 | 101 | ||
131 | static void restart_machine(void) | 102 | int __stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus) |
132 | { | 103 | { |
133 | stopmachine_set_state(STOPMACHINE_EXIT); | 104 | int i, err; |
134 | local_irq_enable(); | 105 | struct stop_machine_data active, idle; |
135 | preempt_enable_no_resched(); | 106 | struct task_struct **threads; |
136 | } | 107 | |
108 | active.fn = fn; | ||
109 | active.data = data; | ||
110 | active.fnret = 0; | ||
111 | idle.fn = chill; | ||
112 | idle.data = NULL; | ||
113 | |||
114 | /* This could be too big for stack on large machines. */ | ||
115 | threads = kcalloc(NR_CPUS, sizeof(threads[0]), GFP_KERNEL); | ||
116 | if (!threads) | ||
117 | return -ENOMEM; | ||
118 | |||
119 | /* Set up initial state. */ | ||
120 | mutex_lock(&lock); | ||
121 | init_completion(&finished); | ||
122 | num_threads = num_online_cpus(); | ||
123 | set_state(STOPMACHINE_PREPARE); | ||
137 | 124 | ||
138 | struct stop_machine_data { | 125 | for_each_online_cpu(i) { |
139 | int (*fn)(void *); | 126 | struct stop_machine_data *smdata = &idle; |
140 | void *data; | 127 | struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; |
141 | struct completion done; | ||
142 | }; | ||
143 | 128 | ||
144 | static int do_stop(void *_smdata) | 129 | if (!cpus) { |
145 | { | 130 | if (i == first_cpu(cpu_online_map)) |
146 | struct stop_machine_data *smdata = _smdata; | 131 | smdata = &active; |
147 | int ret; | 132 | } else { |
133 | if (cpu_isset(i, *cpus)) | ||
134 | smdata = &active; | ||
135 | } | ||
148 | 136 | ||
149 | ret = stop_machine(); | 137 | threads[i] = kthread_create((void *)stop_cpu, smdata, "kstop%u", |
150 | if (ret == 0) { | 138 | i); |
151 | ret = smdata->fn(smdata->data); | 139 | if (IS_ERR(threads[i])) { |
152 | restart_machine(); | 140 | err = PTR_ERR(threads[i]); |
153 | } | 141 | threads[i] = NULL; |
142 | goto kill_threads; | ||
143 | } | ||
154 | 144 | ||
155 | /* We're done: you can kthread_stop us now */ | 145 | /* Place it onto correct cpu. */ |
156 | complete(&smdata->done); | 146 | kthread_bind(threads[i], i); |
157 | 147 | ||
158 | /* Wait for kthread_stop */ | 148 | /* Make it highest prio. */ |
159 | set_current_state(TASK_INTERRUPTIBLE); | 149 | if (sched_setscheduler_nocheck(threads[i], SCHED_FIFO, ¶m)) |
160 | while (!kthread_should_stop()) { | 150 | BUG(); |
161 | schedule(); | ||
162 | set_current_state(TASK_INTERRUPTIBLE); | ||
163 | } | 151 | } |
164 | __set_current_state(TASK_RUNNING); | ||
165 | return ret; | ||
166 | } | ||
167 | 152 | ||
168 | struct task_struct *__stop_machine_run(int (*fn)(void *), void *data, | 153 | /* We've created all the threads. Wake them all: hold this CPU so one |
169 | unsigned int cpu) | 154 | * doesn't hit this CPU until we're ready. */ |
170 | { | 155 | get_cpu(); |
171 | static DEFINE_MUTEX(stopmachine_mutex); | 156 | for_each_online_cpu(i) |
172 | struct stop_machine_data smdata; | 157 | wake_up_process(threads[i]); |
173 | struct task_struct *p; | ||
174 | 158 | ||
175 | smdata.fn = fn; | 159 | /* This will release the thread on our CPU. */ |
176 | smdata.data = data; | 160 | put_cpu(); |
177 | init_completion(&smdata.done); | 161 | wait_for_completion(&finished); |
162 | mutex_unlock(&lock); | ||
178 | 163 | ||
179 | mutex_lock(&stopmachine_mutex); | 164 | kfree(threads); |
180 | 165 | ||
181 | /* If they don't care which CPU fn runs on, bind to any online one. */ | 166 | return active.fnret; |
182 | if (cpu == NR_CPUS) | ||
183 | cpu = raw_smp_processor_id(); | ||
184 | 167 | ||
185 | p = kthread_create(do_stop, &smdata, "kstopmachine"); | 168 | kill_threads: |
186 | if (!IS_ERR(p)) { | 169 | for_each_online_cpu(i) |
187 | struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; | 170 | if (threads[i]) |
171 | kthread_stop(threads[i]); | ||
172 | mutex_unlock(&lock); | ||
188 | 173 | ||
189 | /* One high-prio thread per cpu. We'll do this one. */ | 174 | kfree(threads); |
190 | sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m); | 175 | return err; |
191 | kthread_bind(p, cpu); | ||
192 | wake_up_process(p); | ||
193 | wait_for_completion(&smdata.done); | ||
194 | } | ||
195 | mutex_unlock(&stopmachine_mutex); | ||
196 | return p; | ||
197 | } | 176 | } |
198 | 177 | ||
199 | int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu) | 178 | int stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus) |
200 | { | 179 | { |
201 | struct task_struct *p; | ||
202 | int ret; | 180 | int ret; |
203 | 181 | ||
204 | /* No CPUs can come up or down during this. */ | 182 | /* No CPUs can come up or down during this. */ |
205 | get_online_cpus(); | 183 | get_online_cpus(); |
206 | p = __stop_machine_run(fn, data, cpu); | 184 | ret = __stop_machine(fn, data, cpus); |
207 | if (!IS_ERR(p)) | ||
208 | ret = kthread_stop(p); | ||
209 | else | ||
210 | ret = PTR_ERR(p); | ||
211 | put_online_cpus(); | 185 | put_online_cpus(); |
212 | 186 | ||
213 | return ret; | 187 | return ret; |
214 | } | 188 | } |
215 | EXPORT_SYMBOL_GPL(stop_machine_run); | 189 | EXPORT_SYMBOL_GPL(stop_machine); |
diff --git a/kernel/sys.c b/kernel/sys.c index 14e97282eb6c..038a7bc0901d 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -169,9 +169,9 @@ asmlinkage long sys_setpriority(int which, int who, int niceval) | |||
169 | pgrp = find_vpid(who); | 169 | pgrp = find_vpid(who); |
170 | else | 170 | else |
171 | pgrp = task_pgrp(current); | 171 | pgrp = task_pgrp(current); |
172 | do_each_pid_task(pgrp, PIDTYPE_PGID, p) { | 172 | do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { |
173 | error = set_one_prio(p, niceval, error); | 173 | error = set_one_prio(p, niceval, error); |
174 | } while_each_pid_task(pgrp, PIDTYPE_PGID, p); | 174 | } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); |
175 | break; | 175 | break; |
176 | case PRIO_USER: | 176 | case PRIO_USER: |
177 | user = current->user; | 177 | user = current->user; |
@@ -229,11 +229,11 @@ asmlinkage long sys_getpriority(int which, int who) | |||
229 | pgrp = find_vpid(who); | 229 | pgrp = find_vpid(who); |
230 | else | 230 | else |
231 | pgrp = task_pgrp(current); | 231 | pgrp = task_pgrp(current); |
232 | do_each_pid_task(pgrp, PIDTYPE_PGID, p) { | 232 | do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { |
233 | niceval = 20 - task_nice(p); | 233 | niceval = 20 - task_nice(p); |
234 | if (niceval > retval) | 234 | if (niceval > retval) |
235 | retval = niceval; | 235 | retval = niceval; |
236 | } while_each_pid_task(pgrp, PIDTYPE_PGID, p); | 236 | } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); |
237 | break; | 237 | break; |
238 | case PRIO_USER: | 238 | case PRIO_USER: |
239 | user = current->user; | 239 | user = current->user; |
@@ -274,7 +274,7 @@ void emergency_restart(void) | |||
274 | } | 274 | } |
275 | EXPORT_SYMBOL_GPL(emergency_restart); | 275 | EXPORT_SYMBOL_GPL(emergency_restart); |
276 | 276 | ||
277 | static void kernel_restart_prepare(char *cmd) | 277 | void kernel_restart_prepare(char *cmd) |
278 | { | 278 | { |
279 | blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd); | 279 | blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd); |
280 | system_state = SYSTEM_RESTART; | 280 | system_state = SYSTEM_RESTART; |
@@ -301,26 +301,6 @@ void kernel_restart(char *cmd) | |||
301 | } | 301 | } |
302 | EXPORT_SYMBOL_GPL(kernel_restart); | 302 | EXPORT_SYMBOL_GPL(kernel_restart); |
303 | 303 | ||
304 | /** | ||
305 | * kernel_kexec - reboot the system | ||
306 | * | ||
307 | * Move into place and start executing a preloaded standalone | ||
308 | * executable. If nothing was preloaded return an error. | ||
309 | */ | ||
310 | static void kernel_kexec(void) | ||
311 | { | ||
312 | #ifdef CONFIG_KEXEC | ||
313 | struct kimage *image; | ||
314 | image = xchg(&kexec_image, NULL); | ||
315 | if (!image) | ||
316 | return; | ||
317 | kernel_restart_prepare(NULL); | ||
318 | printk(KERN_EMERG "Starting new kernel\n"); | ||
319 | machine_shutdown(); | ||
320 | machine_kexec(image); | ||
321 | #endif | ||
322 | } | ||
323 | |||
324 | static void kernel_shutdown_prepare(enum system_states state) | 304 | static void kernel_shutdown_prepare(enum system_states state) |
325 | { | 305 | { |
326 | blocking_notifier_call_chain(&reboot_notifier_list, | 306 | blocking_notifier_call_chain(&reboot_notifier_list, |
@@ -425,10 +405,15 @@ asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user | |||
425 | kernel_restart(buffer); | 405 | kernel_restart(buffer); |
426 | break; | 406 | break; |
427 | 407 | ||
408 | #ifdef CONFIG_KEXEC | ||
428 | case LINUX_REBOOT_CMD_KEXEC: | 409 | case LINUX_REBOOT_CMD_KEXEC: |
429 | kernel_kexec(); | 410 | { |
430 | unlock_kernel(); | 411 | int ret; |
431 | return -EINVAL; | 412 | ret = kernel_kexec(); |
413 | unlock_kernel(); | ||
414 | return ret; | ||
415 | } | ||
416 | #endif | ||
432 | 417 | ||
433 | #ifdef CONFIG_HIBERNATION | 418 | #ifdef CONFIG_HIBERNATION |
434 | case LINUX_REBOOT_CMD_SW_SUSPEND: | 419 | case LINUX_REBOOT_CMD_SW_SUSPEND: |
@@ -1343,8 +1328,6 @@ EXPORT_SYMBOL(in_egroup_p); | |||
1343 | 1328 | ||
1344 | DECLARE_RWSEM(uts_sem); | 1329 | DECLARE_RWSEM(uts_sem); |
1345 | 1330 | ||
1346 | EXPORT_SYMBOL(uts_sem); | ||
1347 | |||
1348 | asmlinkage long sys_newuname(struct new_utsname __user * name) | 1331 | asmlinkage long sys_newuname(struct new_utsname __user * name) |
1349 | { | 1332 | { |
1350 | int errno = 0; | 1333 | int errno = 0; |
@@ -1795,7 +1778,7 @@ int orderly_poweroff(bool force) | |||
1795 | goto out; | 1778 | goto out; |
1796 | } | 1779 | } |
1797 | 1780 | ||
1798 | info = call_usermodehelper_setup(argv[0], argv, envp); | 1781 | info = call_usermodehelper_setup(argv[0], argv, envp, GFP_ATOMIC); |
1799 | if (info == NULL) { | 1782 | if (info == NULL) { |
1800 | argv_free(argv); | 1783 | argv_free(argv); |
1801 | goto out; | 1784 | goto out; |
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c index 5b9b467de070..08d6e1bb99ac 100644 --- a/kernel/sys_ni.c +++ b/kernel/sys_ni.c | |||
@@ -31,6 +31,7 @@ cond_syscall(sys_socketpair); | |||
31 | cond_syscall(sys_bind); | 31 | cond_syscall(sys_bind); |
32 | cond_syscall(sys_listen); | 32 | cond_syscall(sys_listen); |
33 | cond_syscall(sys_accept); | 33 | cond_syscall(sys_accept); |
34 | cond_syscall(sys_paccept); | ||
34 | cond_syscall(sys_connect); | 35 | cond_syscall(sys_connect); |
35 | cond_syscall(sys_getsockname); | 36 | cond_syscall(sys_getsockname); |
36 | cond_syscall(sys_getpeername); | 37 | cond_syscall(sys_getpeername); |
@@ -56,9 +57,11 @@ cond_syscall(compat_sys_set_robust_list); | |||
56 | cond_syscall(sys_get_robust_list); | 57 | cond_syscall(sys_get_robust_list); |
57 | cond_syscall(compat_sys_get_robust_list); | 58 | cond_syscall(compat_sys_get_robust_list); |
58 | cond_syscall(sys_epoll_create); | 59 | cond_syscall(sys_epoll_create); |
60 | cond_syscall(sys_epoll_create1); | ||
59 | cond_syscall(sys_epoll_ctl); | 61 | cond_syscall(sys_epoll_ctl); |
60 | cond_syscall(sys_epoll_wait); | 62 | cond_syscall(sys_epoll_wait); |
61 | cond_syscall(sys_epoll_pwait); | 63 | cond_syscall(sys_epoll_pwait); |
64 | cond_syscall(compat_sys_epoll_pwait); | ||
62 | cond_syscall(sys_semget); | 65 | cond_syscall(sys_semget); |
63 | cond_syscall(sys_semop); | 66 | cond_syscall(sys_semop); |
64 | cond_syscall(sys_semtimedop); | 67 | cond_syscall(sys_semtimedop); |
@@ -94,6 +97,7 @@ cond_syscall(sys_keyctl); | |||
94 | cond_syscall(compat_sys_keyctl); | 97 | cond_syscall(compat_sys_keyctl); |
95 | cond_syscall(compat_sys_socketcall); | 98 | cond_syscall(compat_sys_socketcall); |
96 | cond_syscall(sys_inotify_init); | 99 | cond_syscall(sys_inotify_init); |
100 | cond_syscall(sys_inotify_init1); | ||
97 | cond_syscall(sys_inotify_add_watch); | 101 | cond_syscall(sys_inotify_add_watch); |
98 | cond_syscall(sys_inotify_rm_watch); | 102 | cond_syscall(sys_inotify_rm_watch); |
99 | cond_syscall(sys_migrate_pages); | 103 | cond_syscall(sys_migrate_pages); |
@@ -154,10 +158,13 @@ cond_syscall(sys_ioprio_get); | |||
154 | 158 | ||
155 | /* New file descriptors */ | 159 | /* New file descriptors */ |
156 | cond_syscall(sys_signalfd); | 160 | cond_syscall(sys_signalfd); |
161 | cond_syscall(sys_signalfd4); | ||
157 | cond_syscall(compat_sys_signalfd); | 162 | cond_syscall(compat_sys_signalfd); |
163 | cond_syscall(compat_sys_signalfd4); | ||
158 | cond_syscall(sys_timerfd_create); | 164 | cond_syscall(sys_timerfd_create); |
159 | cond_syscall(sys_timerfd_settime); | 165 | cond_syscall(sys_timerfd_settime); |
160 | cond_syscall(sys_timerfd_gettime); | 166 | cond_syscall(sys_timerfd_gettime); |
161 | cond_syscall(compat_sys_timerfd_settime); | 167 | cond_syscall(compat_sys_timerfd_settime); |
162 | cond_syscall(compat_sys_timerfd_gettime); | 168 | cond_syscall(compat_sys_timerfd_gettime); |
163 | cond_syscall(sys_eventfd); | 169 | cond_syscall(sys_eventfd); |
170 | cond_syscall(sys_eventfd2); | ||
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 6b16e16428d8..50ec0886fa3d 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -43,6 +43,7 @@ | |||
43 | #include <linux/limits.h> | 43 | #include <linux/limits.h> |
44 | #include <linux/dcache.h> | 44 | #include <linux/dcache.h> |
45 | #include <linux/syscalls.h> | 45 | #include <linux/syscalls.h> |
46 | #include <linux/vmstat.h> | ||
46 | #include <linux/nfs_fs.h> | 47 | #include <linux/nfs_fs.h> |
47 | #include <linux/acpi.h> | 48 | #include <linux/acpi.h> |
48 | #include <linux/reboot.h> | 49 | #include <linux/reboot.h> |
@@ -80,7 +81,6 @@ extern int sysctl_drop_caches; | |||
80 | extern int percpu_pagelist_fraction; | 81 | extern int percpu_pagelist_fraction; |
81 | extern int compat_log; | 82 | extern int compat_log; |
82 | extern int maps_protect; | 83 | extern int maps_protect; |
83 | extern int sysctl_stat_interval; | ||
84 | extern int latencytop_enabled; | 84 | extern int latencytop_enabled; |
85 | extern int sysctl_nr_open_min, sysctl_nr_open_max; | 85 | extern int sysctl_nr_open_min, sysctl_nr_open_max; |
86 | #ifdef CONFIG_RCU_TORTURE_TEST | 86 | #ifdef CONFIG_RCU_TORTURE_TEST |
@@ -88,12 +88,13 @@ extern int rcutorture_runnable; | |||
88 | #endif /* #ifdef CONFIG_RCU_TORTURE_TEST */ | 88 | #endif /* #ifdef CONFIG_RCU_TORTURE_TEST */ |
89 | 89 | ||
90 | /* Constants used for minimum and maximum */ | 90 | /* Constants used for minimum and maximum */ |
91 | #if defined(CONFIG_DETECT_SOFTLOCKUP) || defined(CONFIG_HIGHMEM) | 91 | #if defined(CONFIG_HIGHMEM) || defined(CONFIG_DETECT_SOFTLOCKUP) |
92 | static int one = 1; | 92 | static int one = 1; |
93 | #endif | 93 | #endif |
94 | 94 | ||
95 | #ifdef CONFIG_DETECT_SOFTLOCKUP | 95 | #ifdef CONFIG_DETECT_SOFTLOCKUP |
96 | static int sixty = 60; | 96 | static int sixty = 60; |
97 | static int neg_one = -1; | ||
97 | #endif | 98 | #endif |
98 | 99 | ||
99 | #ifdef CONFIG_MMU | 100 | #ifdef CONFIG_MMU |
@@ -110,7 +111,7 @@ static int min_percpu_pagelist_fract = 8; | |||
110 | 111 | ||
111 | static int ngroups_max = NGROUPS_MAX; | 112 | static int ngroups_max = NGROUPS_MAX; |
112 | 113 | ||
113 | #ifdef CONFIG_KMOD | 114 | #ifdef CONFIG_MODULES |
114 | extern char modprobe_path[]; | 115 | extern char modprobe_path[]; |
115 | #endif | 116 | #endif |
116 | #ifdef CONFIG_CHR_DEV_SG | 117 | #ifdef CONFIG_CHR_DEV_SG |
@@ -158,13 +159,15 @@ static int proc_dointvec_taint(struct ctl_table *table, int write, struct file * | |||
158 | static struct ctl_table root_table[]; | 159 | static struct ctl_table root_table[]; |
159 | static struct ctl_table_root sysctl_table_root; | 160 | static struct ctl_table_root sysctl_table_root; |
160 | static struct ctl_table_header root_table_header = { | 161 | static struct ctl_table_header root_table_header = { |
162 | .count = 1, | ||
161 | .ctl_table = root_table, | 163 | .ctl_table = root_table, |
162 | .ctl_entry = LIST_HEAD_INIT(sysctl_table_root.header_list), | 164 | .ctl_entry = LIST_HEAD_INIT(sysctl_table_root.default_set.list), |
163 | .root = &sysctl_table_root, | 165 | .root = &sysctl_table_root, |
166 | .set = &sysctl_table_root.default_set, | ||
164 | }; | 167 | }; |
165 | static struct ctl_table_root sysctl_table_root = { | 168 | static struct ctl_table_root sysctl_table_root = { |
166 | .root_list = LIST_HEAD_INIT(sysctl_table_root.root_list), | 169 | .root_list = LIST_HEAD_INIT(sysctl_table_root.root_list), |
167 | .header_list = LIST_HEAD_INIT(root_table_header.ctl_entry), | 170 | .default_set.list = LIST_HEAD_INIT(root_table_header.ctl_entry), |
168 | }; | 171 | }; |
169 | 172 | ||
170 | static struct ctl_table kern_table[]; | 173 | static struct ctl_table kern_table[]; |
@@ -475,7 +478,7 @@ static struct ctl_table kern_table[] = { | |||
475 | .proc_handler = &ftrace_enable_sysctl, | 478 | .proc_handler = &ftrace_enable_sysctl, |
476 | }, | 479 | }, |
477 | #endif | 480 | #endif |
478 | #ifdef CONFIG_KMOD | 481 | #ifdef CONFIG_MODULES |
479 | { | 482 | { |
480 | .ctl_name = KERN_MODPROBE, | 483 | .ctl_name = KERN_MODPROBE, |
481 | .procname = "modprobe", | 484 | .procname = "modprobe", |
@@ -623,7 +626,7 @@ static struct ctl_table kern_table[] = { | |||
623 | { | 626 | { |
624 | .ctl_name = KERN_PRINTK_RATELIMIT, | 627 | .ctl_name = KERN_PRINTK_RATELIMIT, |
625 | .procname = "printk_ratelimit", | 628 | .procname = "printk_ratelimit", |
626 | .data = &printk_ratelimit_jiffies, | 629 | .data = &printk_ratelimit_state.interval, |
627 | .maxlen = sizeof(int), | 630 | .maxlen = sizeof(int), |
628 | .mode = 0644, | 631 | .mode = 0644, |
629 | .proc_handler = &proc_dointvec_jiffies, | 632 | .proc_handler = &proc_dointvec_jiffies, |
@@ -632,7 +635,7 @@ static struct ctl_table kern_table[] = { | |||
632 | { | 635 | { |
633 | .ctl_name = KERN_PRINTK_RATELIMIT_BURST, | 636 | .ctl_name = KERN_PRINTK_RATELIMIT_BURST, |
634 | .procname = "printk_ratelimit_burst", | 637 | .procname = "printk_ratelimit_burst", |
635 | .data = &printk_ratelimit_burst, | 638 | .data = &printk_ratelimit_state.burst, |
636 | .maxlen = sizeof(int), | 639 | .maxlen = sizeof(int), |
637 | .mode = 0644, | 640 | .mode = 0644, |
638 | .proc_handler = &proc_dointvec, | 641 | .proc_handler = &proc_dointvec, |
@@ -739,13 +742,24 @@ static struct ctl_table kern_table[] = { | |||
739 | #ifdef CONFIG_DETECT_SOFTLOCKUP | 742 | #ifdef CONFIG_DETECT_SOFTLOCKUP |
740 | { | 743 | { |
741 | .ctl_name = CTL_UNNUMBERED, | 744 | .ctl_name = CTL_UNNUMBERED, |
745 | .procname = "softlockup_panic", | ||
746 | .data = &softlockup_panic, | ||
747 | .maxlen = sizeof(int), | ||
748 | .mode = 0644, | ||
749 | .proc_handler = &proc_dointvec_minmax, | ||
750 | .strategy = &sysctl_intvec, | ||
751 | .extra1 = &zero, | ||
752 | .extra2 = &one, | ||
753 | }, | ||
754 | { | ||
755 | .ctl_name = CTL_UNNUMBERED, | ||
742 | .procname = "softlockup_thresh", | 756 | .procname = "softlockup_thresh", |
743 | .data = &softlockup_thresh, | 757 | .data = &softlockup_thresh, |
744 | .maxlen = sizeof(unsigned long), | 758 | .maxlen = sizeof(int), |
745 | .mode = 0644, | 759 | .mode = 0644, |
746 | .proc_handler = &proc_doulongvec_minmax, | 760 | .proc_handler = &proc_dointvec_minmax, |
747 | .strategy = &sysctl_intvec, | 761 | .strategy = &sysctl_intvec, |
748 | .extra1 = &one, | 762 | .extra1 = &neg_one, |
749 | .extra2 = &sixty, | 763 | .extra2 = &sixty, |
750 | }, | 764 | }, |
751 | { | 765 | { |
@@ -947,7 +961,7 @@ static struct ctl_table vm_table[] = { | |||
947 | #ifdef CONFIG_HUGETLB_PAGE | 961 | #ifdef CONFIG_HUGETLB_PAGE |
948 | { | 962 | { |
949 | .procname = "nr_hugepages", | 963 | .procname = "nr_hugepages", |
950 | .data = &max_huge_pages, | 964 | .data = NULL, |
951 | .maxlen = sizeof(unsigned long), | 965 | .maxlen = sizeof(unsigned long), |
952 | .mode = 0644, | 966 | .mode = 0644, |
953 | .proc_handler = &hugetlb_sysctl_handler, | 967 | .proc_handler = &hugetlb_sysctl_handler, |
@@ -973,10 +987,12 @@ static struct ctl_table vm_table[] = { | |||
973 | { | 987 | { |
974 | .ctl_name = CTL_UNNUMBERED, | 988 | .ctl_name = CTL_UNNUMBERED, |
975 | .procname = "nr_overcommit_hugepages", | 989 | .procname = "nr_overcommit_hugepages", |
976 | .data = &sysctl_overcommit_huge_pages, | 990 | .data = NULL, |
977 | .maxlen = sizeof(sysctl_overcommit_huge_pages), | 991 | .maxlen = sizeof(unsigned long), |
978 | .mode = 0644, | 992 | .mode = 0644, |
979 | .proc_handler = &hugetlb_overcommit_handler, | 993 | .proc_handler = &hugetlb_overcommit_handler, |
994 | .extra1 = (void *)&hugetlb_zero, | ||
995 | .extra2 = (void *)&hugetlb_infinity, | ||
980 | }, | 996 | }, |
981 | #endif | 997 | #endif |
982 | { | 998 | { |
@@ -1372,6 +1388,9 @@ static void start_unregistering(struct ctl_table_header *p) | |||
1372 | spin_unlock(&sysctl_lock); | 1388 | spin_unlock(&sysctl_lock); |
1373 | wait_for_completion(&wait); | 1389 | wait_for_completion(&wait); |
1374 | spin_lock(&sysctl_lock); | 1390 | spin_lock(&sysctl_lock); |
1391 | } else { | ||
1392 | /* anything non-NULL; we'll never dereference it */ | ||
1393 | p->unregistering = ERR_PTR(-EINVAL); | ||
1375 | } | 1394 | } |
1376 | /* | 1395 | /* |
1377 | * do not remove from the list until nobody holds it; walking the | 1396 | * do not remove from the list until nobody holds it; walking the |
@@ -1380,6 +1399,32 @@ static void start_unregistering(struct ctl_table_header *p) | |||
1380 | list_del_init(&p->ctl_entry); | 1399 | list_del_init(&p->ctl_entry); |
1381 | } | 1400 | } |
1382 | 1401 | ||
1402 | void sysctl_head_get(struct ctl_table_header *head) | ||
1403 | { | ||
1404 | spin_lock(&sysctl_lock); | ||
1405 | head->count++; | ||
1406 | spin_unlock(&sysctl_lock); | ||
1407 | } | ||
1408 | |||
1409 | void sysctl_head_put(struct ctl_table_header *head) | ||
1410 | { | ||
1411 | spin_lock(&sysctl_lock); | ||
1412 | if (!--head->count) | ||
1413 | kfree(head); | ||
1414 | spin_unlock(&sysctl_lock); | ||
1415 | } | ||
1416 | |||
1417 | struct ctl_table_header *sysctl_head_grab(struct ctl_table_header *head) | ||
1418 | { | ||
1419 | if (!head) | ||
1420 | BUG(); | ||
1421 | spin_lock(&sysctl_lock); | ||
1422 | if (!use_table(head)) | ||
1423 | head = ERR_PTR(-ENOENT); | ||
1424 | spin_unlock(&sysctl_lock); | ||
1425 | return head; | ||
1426 | } | ||
1427 | |||
1383 | void sysctl_head_finish(struct ctl_table_header *head) | 1428 | void sysctl_head_finish(struct ctl_table_header *head) |
1384 | { | 1429 | { |
1385 | if (!head) | 1430 | if (!head) |
@@ -1389,14 +1434,20 @@ void sysctl_head_finish(struct ctl_table_header *head) | |||
1389 | spin_unlock(&sysctl_lock); | 1434 | spin_unlock(&sysctl_lock); |
1390 | } | 1435 | } |
1391 | 1436 | ||
1437 | static struct ctl_table_set * | ||
1438 | lookup_header_set(struct ctl_table_root *root, struct nsproxy *namespaces) | ||
1439 | { | ||
1440 | struct ctl_table_set *set = &root->default_set; | ||
1441 | if (root->lookup) | ||
1442 | set = root->lookup(root, namespaces); | ||
1443 | return set; | ||
1444 | } | ||
1445 | |||
1392 | static struct list_head * | 1446 | static struct list_head * |
1393 | lookup_header_list(struct ctl_table_root *root, struct nsproxy *namespaces) | 1447 | lookup_header_list(struct ctl_table_root *root, struct nsproxy *namespaces) |
1394 | { | 1448 | { |
1395 | struct list_head *header_list; | 1449 | struct ctl_table_set *set = lookup_header_set(root, namespaces); |
1396 | header_list = &root->header_list; | 1450 | return &set->list; |
1397 | if (root->lookup) | ||
1398 | header_list = root->lookup(root, namespaces); | ||
1399 | return header_list; | ||
1400 | } | 1451 | } |
1401 | 1452 | ||
1402 | struct ctl_table_header *__sysctl_head_next(struct nsproxy *namespaces, | 1453 | struct ctl_table_header *__sysctl_head_next(struct nsproxy *namespaces, |
@@ -1466,9 +1517,9 @@ static int do_sysctl_strategy(struct ctl_table_root *root, | |||
1466 | int op = 0, rc; | 1517 | int op = 0, rc; |
1467 | 1518 | ||
1468 | if (oldval) | 1519 | if (oldval) |
1469 | op |= 004; | 1520 | op |= MAY_READ; |
1470 | if (newval) | 1521 | if (newval) |
1471 | op |= 002; | 1522 | op |= MAY_WRITE; |
1472 | if (sysctl_perm(root, table, op)) | 1523 | if (sysctl_perm(root, table, op)) |
1473 | return -EPERM; | 1524 | return -EPERM; |
1474 | 1525 | ||
@@ -1510,7 +1561,7 @@ repeat: | |||
1510 | if (n == table->ctl_name) { | 1561 | if (n == table->ctl_name) { |
1511 | int error; | 1562 | int error; |
1512 | if (table->child) { | 1563 | if (table->child) { |
1513 | if (sysctl_perm(root, table, 001)) | 1564 | if (sysctl_perm(root, table, MAY_EXEC)) |
1514 | return -EPERM; | 1565 | return -EPERM; |
1515 | name++; | 1566 | name++; |
1516 | nlen--; | 1567 | nlen--; |
@@ -1585,7 +1636,7 @@ static int test_perm(int mode, int op) | |||
1585 | mode >>= 6; | 1636 | mode >>= 6; |
1586 | else if (in_egroup_p(0)) | 1637 | else if (in_egroup_p(0)) |
1587 | mode >>= 3; | 1638 | mode >>= 3; |
1588 | if ((mode & op & 0007) == op) | 1639 | if ((op & ~mode & (MAY_READ|MAY_WRITE|MAY_EXEC)) == 0) |
1589 | return 0; | 1640 | return 0; |
1590 | return -EACCES; | 1641 | return -EACCES; |
1591 | } | 1642 | } |
@@ -1595,7 +1646,7 @@ int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op) | |||
1595 | int error; | 1646 | int error; |
1596 | int mode; | 1647 | int mode; |
1597 | 1648 | ||
1598 | error = security_sysctl(table, op); | 1649 | error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC)); |
1599 | if (error) | 1650 | if (error) |
1600 | return error; | 1651 | return error; |
1601 | 1652 | ||
@@ -1630,6 +1681,54 @@ static __init int sysctl_init(void) | |||
1630 | 1681 | ||
1631 | core_initcall(sysctl_init); | 1682 | core_initcall(sysctl_init); |
1632 | 1683 | ||
1684 | static struct ctl_table *is_branch_in(struct ctl_table *branch, | ||
1685 | struct ctl_table *table) | ||
1686 | { | ||
1687 | struct ctl_table *p; | ||
1688 | const char *s = branch->procname; | ||
1689 | |||
1690 | /* branch should have named subdirectory as its first element */ | ||
1691 | if (!s || !branch->child) | ||
1692 | return NULL; | ||
1693 | |||
1694 | /* ... and nothing else */ | ||
1695 | if (branch[1].procname || branch[1].ctl_name) | ||
1696 | return NULL; | ||
1697 | |||
1698 | /* table should contain subdirectory with the same name */ | ||
1699 | for (p = table; p->procname || p->ctl_name; p++) { | ||
1700 | if (!p->child) | ||
1701 | continue; | ||
1702 | if (p->procname && strcmp(p->procname, s) == 0) | ||
1703 | return p; | ||
1704 | } | ||
1705 | return NULL; | ||
1706 | } | ||
1707 | |||
1708 | /* see if attaching q to p would be an improvement */ | ||
1709 | static void try_attach(struct ctl_table_header *p, struct ctl_table_header *q) | ||
1710 | { | ||
1711 | struct ctl_table *to = p->ctl_table, *by = q->ctl_table; | ||
1712 | struct ctl_table *next; | ||
1713 | int is_better = 0; | ||
1714 | int not_in_parent = !p->attached_by; | ||
1715 | |||
1716 | while ((next = is_branch_in(by, to)) != NULL) { | ||
1717 | if (by == q->attached_by) | ||
1718 | is_better = 1; | ||
1719 | if (to == p->attached_by) | ||
1720 | not_in_parent = 1; | ||
1721 | by = by->child; | ||
1722 | to = next->child; | ||
1723 | } | ||
1724 | |||
1725 | if (is_better && not_in_parent) { | ||
1726 | q->attached_by = by; | ||
1727 | q->attached_to = to; | ||
1728 | q->parent = p; | ||
1729 | } | ||
1730 | } | ||
1731 | |||
1633 | /** | 1732 | /** |
1634 | * __register_sysctl_paths - register a sysctl hierarchy | 1733 | * __register_sysctl_paths - register a sysctl hierarchy |
1635 | * @root: List of sysctl headers to register on | 1734 | * @root: List of sysctl headers to register on |
@@ -1706,10 +1805,10 @@ struct ctl_table_header *__register_sysctl_paths( | |||
1706 | struct nsproxy *namespaces, | 1805 | struct nsproxy *namespaces, |
1707 | const struct ctl_path *path, struct ctl_table *table) | 1806 | const struct ctl_path *path, struct ctl_table *table) |
1708 | { | 1807 | { |
1709 | struct list_head *header_list; | ||
1710 | struct ctl_table_header *header; | 1808 | struct ctl_table_header *header; |
1711 | struct ctl_table *new, **prevp; | 1809 | struct ctl_table *new, **prevp; |
1712 | unsigned int n, npath; | 1810 | unsigned int n, npath; |
1811 | struct ctl_table_set *set; | ||
1713 | 1812 | ||
1714 | /* Count the path components */ | 1813 | /* Count the path components */ |
1715 | for (npath = 0; path[npath].ctl_name || path[npath].procname; ++npath) | 1814 | for (npath = 0; path[npath].ctl_name || path[npath].procname; ++npath) |
@@ -1751,6 +1850,7 @@ struct ctl_table_header *__register_sysctl_paths( | |||
1751 | header->unregistering = NULL; | 1850 | header->unregistering = NULL; |
1752 | header->root = root; | 1851 | header->root = root; |
1753 | sysctl_set_parent(NULL, header->ctl_table); | 1852 | sysctl_set_parent(NULL, header->ctl_table); |
1853 | header->count = 1; | ||
1754 | #ifdef CONFIG_SYSCTL_SYSCALL_CHECK | 1854 | #ifdef CONFIG_SYSCTL_SYSCALL_CHECK |
1755 | if (sysctl_check_table(namespaces, header->ctl_table)) { | 1855 | if (sysctl_check_table(namespaces, header->ctl_table)) { |
1756 | kfree(header); | 1856 | kfree(header); |
@@ -1758,8 +1858,20 @@ struct ctl_table_header *__register_sysctl_paths( | |||
1758 | } | 1858 | } |
1759 | #endif | 1859 | #endif |
1760 | spin_lock(&sysctl_lock); | 1860 | spin_lock(&sysctl_lock); |
1761 | header_list = lookup_header_list(root, namespaces); | 1861 | header->set = lookup_header_set(root, namespaces); |
1762 | list_add_tail(&header->ctl_entry, header_list); | 1862 | header->attached_by = header->ctl_table; |
1863 | header->attached_to = root_table; | ||
1864 | header->parent = &root_table_header; | ||
1865 | for (set = header->set; set; set = set->parent) { | ||
1866 | struct ctl_table_header *p; | ||
1867 | list_for_each_entry(p, &set->list, ctl_entry) { | ||
1868 | if (p->unregistering) | ||
1869 | continue; | ||
1870 | try_attach(p, header); | ||
1871 | } | ||
1872 | } | ||
1873 | header->parent->count++; | ||
1874 | list_add_tail(&header->ctl_entry, &header->set->list); | ||
1763 | spin_unlock(&sysctl_lock); | 1875 | spin_unlock(&sysctl_lock); |
1764 | 1876 | ||
1765 | return header; | 1877 | return header; |
@@ -1814,8 +1926,37 @@ void unregister_sysctl_table(struct ctl_table_header * header) | |||
1814 | 1926 | ||
1815 | spin_lock(&sysctl_lock); | 1927 | spin_lock(&sysctl_lock); |
1816 | start_unregistering(header); | 1928 | start_unregistering(header); |
1929 | if (!--header->parent->count) { | ||
1930 | WARN_ON(1); | ||
1931 | kfree(header->parent); | ||
1932 | } | ||
1933 | if (!--header->count) | ||
1934 | kfree(header); | ||
1935 | spin_unlock(&sysctl_lock); | ||
1936 | } | ||
1937 | |||
1938 | int sysctl_is_seen(struct ctl_table_header *p) | ||
1939 | { | ||
1940 | struct ctl_table_set *set = p->set; | ||
1941 | int res; | ||
1942 | spin_lock(&sysctl_lock); | ||
1943 | if (p->unregistering) | ||
1944 | res = 0; | ||
1945 | else if (!set->is_seen) | ||
1946 | res = 1; | ||
1947 | else | ||
1948 | res = set->is_seen(set); | ||
1817 | spin_unlock(&sysctl_lock); | 1949 | spin_unlock(&sysctl_lock); |
1818 | kfree(header); | 1950 | return res; |
1951 | } | ||
1952 | |||
1953 | void setup_sysctl_set(struct ctl_table_set *p, | ||
1954 | struct ctl_table_set *parent, | ||
1955 | int (*is_seen)(struct ctl_table_set *)) | ||
1956 | { | ||
1957 | INIT_LIST_HEAD(&p->list); | ||
1958 | p->parent = parent ? parent : &sysctl_table_root.default_set; | ||
1959 | p->is_seen = is_seen; | ||
1819 | } | 1960 | } |
1820 | 1961 | ||
1821 | #else /* !CONFIG_SYSCTL */ | 1962 | #else /* !CONFIG_SYSCTL */ |
@@ -1834,6 +1975,16 @@ void unregister_sysctl_table(struct ctl_table_header * table) | |||
1834 | { | 1975 | { |
1835 | } | 1976 | } |
1836 | 1977 | ||
1978 | void setup_sysctl_set(struct ctl_table_set *p, | ||
1979 | struct ctl_table_set *parent, | ||
1980 | int (*is_seen)(struct ctl_table_set *)) | ||
1981 | { | ||
1982 | } | ||
1983 | |||
1984 | void sysctl_head_put(struct ctl_table_header *head) | ||
1985 | { | ||
1986 | } | ||
1987 | |||
1837 | #endif /* CONFIG_SYSCTL */ | 1988 | #endif /* CONFIG_SYSCTL */ |
1838 | 1989 | ||
1839 | /* | 1990 | /* |
diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c index c09350d564f2..c35da23ab8fb 100644 --- a/kernel/sysctl_check.c +++ b/kernel/sysctl_check.c | |||
@@ -1532,6 +1532,8 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table) | |||
1532 | sysctl_check_leaf(namespaces, table, &fail); | 1532 | sysctl_check_leaf(namespaces, table, &fail); |
1533 | } | 1533 | } |
1534 | sysctl_check_bin_path(table, &fail); | 1534 | sysctl_check_bin_path(table, &fail); |
1535 | if (table->mode > 0777) | ||
1536 | set_fail(&fail, table, "bogus .mode"); | ||
1535 | if (fail) { | 1537 | if (fail) { |
1536 | set_fail(&fail, table, NULL); | 1538 | set_fail(&fail, table, NULL); |
1537 | error = -EINVAL; | 1539 | error = -EINVAL; |
diff --git a/kernel/taskstats.c b/kernel/taskstats.c index 4a23517169a6..bd6be76303cf 100644 --- a/kernel/taskstats.c +++ b/kernel/taskstats.c | |||
@@ -35,7 +35,7 @@ | |||
35 | */ | 35 | */ |
36 | #define TASKSTATS_CPUMASK_MAXLEN (100+6*NR_CPUS) | 36 | #define TASKSTATS_CPUMASK_MAXLEN (100+6*NR_CPUS) |
37 | 37 | ||
38 | static DEFINE_PER_CPU(__u32, taskstats_seqnum) = { 0 }; | 38 | static DEFINE_PER_CPU(__u32, taskstats_seqnum); |
39 | static int family_registered; | 39 | static int family_registered; |
40 | struct kmem_cache *taskstats_cache; | 40 | struct kmem_cache *taskstats_cache; |
41 | 41 | ||
@@ -301,7 +301,7 @@ static int add_del_listener(pid_t pid, cpumask_t *maskp, int isadd) | |||
301 | return -EINVAL; | 301 | return -EINVAL; |
302 | 302 | ||
303 | if (isadd == REGISTER) { | 303 | if (isadd == REGISTER) { |
304 | for_each_cpu_mask(cpu, mask) { | 304 | for_each_cpu_mask_nr(cpu, mask) { |
305 | s = kmalloc_node(sizeof(struct listener), GFP_KERNEL, | 305 | s = kmalloc_node(sizeof(struct listener), GFP_KERNEL, |
306 | cpu_to_node(cpu)); | 306 | cpu_to_node(cpu)); |
307 | if (!s) | 307 | if (!s) |
@@ -320,7 +320,7 @@ static int add_del_listener(pid_t pid, cpumask_t *maskp, int isadd) | |||
320 | 320 | ||
321 | /* Deregister or cleanup */ | 321 | /* Deregister or cleanup */ |
322 | cleanup: | 322 | cleanup: |
323 | for_each_cpu_mask(cpu, mask) { | 323 | for_each_cpu_mask_nr(cpu, mask) { |
324 | listeners = &per_cpu(listener_array, cpu); | 324 | listeners = &per_cpu(listener_array, cpu); |
325 | down_write(&listeners->sem); | 325 | down_write(&listeners->sem); |
326 | list_for_each_entry_safe(s, tmp, &listeners->list, list) { | 326 | list_for_each_entry_safe(s, tmp, &listeners->list, list) { |
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index 3d1e3e1a1971..f8d968063cea 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c | |||
@@ -72,6 +72,16 @@ void clockevents_set_mode(struct clock_event_device *dev, | |||
72 | } | 72 | } |
73 | 73 | ||
74 | /** | 74 | /** |
75 | * clockevents_shutdown - shutdown the device and clear next_event | ||
76 | * @dev: device to shutdown | ||
77 | */ | ||
78 | void clockevents_shutdown(struct clock_event_device *dev) | ||
79 | { | ||
80 | clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN); | ||
81 | dev->next_event.tv64 = KTIME_MAX; | ||
82 | } | ||
83 | |||
84 | /** | ||
75 | * clockevents_program_event - Reprogram the clock event device. | 85 | * clockevents_program_event - Reprogram the clock event device. |
76 | * @expires: absolute expiry time (monotonic clock) | 86 | * @expires: absolute expiry time (monotonic clock) |
77 | * | 87 | * |
@@ -177,7 +187,7 @@ void clockevents_register_device(struct clock_event_device *dev) | |||
177 | /* | 187 | /* |
178 | * Noop handler when we shut down an event device | 188 | * Noop handler when we shut down an event device |
179 | */ | 189 | */ |
180 | static void clockevents_handle_noop(struct clock_event_device *dev) | 190 | void clockevents_handle_noop(struct clock_event_device *dev) |
181 | { | 191 | { |
182 | } | 192 | } |
183 | 193 | ||
@@ -199,7 +209,6 @@ void clockevents_exchange_device(struct clock_event_device *old, | |||
199 | * released list and do a notify add later. | 209 | * released list and do a notify add later. |
200 | */ | 210 | */ |
201 | if (old) { | 211 | if (old) { |
202 | old->event_handler = clockevents_handle_noop; | ||
203 | clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED); | 212 | clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED); |
204 | list_del(&old->list); | 213 | list_del(&old->list); |
205 | list_add(&old->list, &clockevents_released); | 214 | list_add(&old->list, &clockevents_released); |
@@ -207,7 +216,7 @@ void clockevents_exchange_device(struct clock_event_device *old, | |||
207 | 216 | ||
208 | if (new) { | 217 | if (new) { |
209 | BUG_ON(new->mode != CLOCK_EVT_MODE_UNUSED); | 218 | BUG_ON(new->mode != CLOCK_EVT_MODE_UNUSED); |
210 | clockevents_set_mode(new, CLOCK_EVT_MODE_SHUTDOWN); | 219 | clockevents_shutdown(new); |
211 | } | 220 | } |
212 | local_irq_restore(flags); | 221 | local_irq_restore(flags); |
213 | } | 222 | } |
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index dadde5361f32..093d4acf993b 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c | |||
@@ -145,9 +145,9 @@ static void clocksource_watchdog(unsigned long data) | |||
145 | * Cycle through CPUs to check if the CPUs stay | 145 | * Cycle through CPUs to check if the CPUs stay |
146 | * synchronized to each other. | 146 | * synchronized to each other. |
147 | */ | 147 | */ |
148 | int next_cpu = next_cpu(raw_smp_processor_id(), cpu_online_map); | 148 | int next_cpu = next_cpu_nr(raw_smp_processor_id(), cpu_online_map); |
149 | 149 | ||
150 | if (next_cpu >= NR_CPUS) | 150 | if (next_cpu >= nr_cpu_ids) |
151 | next_cpu = first_cpu(cpu_online_map); | 151 | next_cpu = first_cpu(cpu_online_map); |
152 | watchdog_timer.expires += WATCHDOG_INTERVAL; | 152 | watchdog_timer.expires += WATCHDOG_INTERVAL; |
153 | add_timer_on(&watchdog_timer, next_cpu); | 153 | add_timer_on(&watchdog_timer, next_cpu); |
@@ -376,7 +376,8 @@ void clocksource_unregister(struct clocksource *cs) | |||
376 | * Provides sysfs interface for listing current clocksource. | 376 | * Provides sysfs interface for listing current clocksource. |
377 | */ | 377 | */ |
378 | static ssize_t | 378 | static ssize_t |
379 | sysfs_show_current_clocksources(struct sys_device *dev, char *buf) | 379 | sysfs_show_current_clocksources(struct sys_device *dev, |
380 | struct sysdev_attribute *attr, char *buf) | ||
380 | { | 381 | { |
381 | ssize_t count = 0; | 382 | ssize_t count = 0; |
382 | 383 | ||
@@ -397,6 +398,7 @@ sysfs_show_current_clocksources(struct sys_device *dev, char *buf) | |||
397 | * clocksource selction. | 398 | * clocksource selction. |
398 | */ | 399 | */ |
399 | static ssize_t sysfs_override_clocksource(struct sys_device *dev, | 400 | static ssize_t sysfs_override_clocksource(struct sys_device *dev, |
401 | struct sysdev_attribute *attr, | ||
400 | const char *buf, size_t count) | 402 | const char *buf, size_t count) |
401 | { | 403 | { |
402 | struct clocksource *ovr = NULL; | 404 | struct clocksource *ovr = NULL; |
@@ -449,7 +451,9 @@ static ssize_t sysfs_override_clocksource(struct sys_device *dev, | |||
449 | * Provides sysfs interface for listing registered clocksources | 451 | * Provides sysfs interface for listing registered clocksources |
450 | */ | 452 | */ |
451 | static ssize_t | 453 | static ssize_t |
452 | sysfs_show_available_clocksources(struct sys_device *dev, char *buf) | 454 | sysfs_show_available_clocksources(struct sys_device *dev, |
455 | struct sysdev_attribute *attr, | ||
456 | char *buf) | ||
453 | { | 457 | { |
454 | struct clocksource *src; | 458 | struct clocksource *src; |
455 | ssize_t count = 0; | 459 | ssize_t count = 0; |
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index 5125ddd8196b..1ad46f3df6e7 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c | |||
@@ -245,7 +245,7 @@ static void sync_cmos_clock(unsigned long dummy) | |||
245 | if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec / 2) | 245 | if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec / 2) |
246 | fail = update_persistent_clock(now); | 246 | fail = update_persistent_clock(now); |
247 | 247 | ||
248 | next.tv_nsec = (NSEC_PER_SEC / 2) - now.tv_nsec; | 248 | next.tv_nsec = (NSEC_PER_SEC / 2) - now.tv_nsec - (TICK_NSEC / 2); |
249 | if (next.tv_nsec <= 0) | 249 | if (next.tv_nsec <= 0) |
250 | next.tv_nsec += NSEC_PER_SEC; | 250 | next.tv_nsec += NSEC_PER_SEC; |
251 | 251 | ||
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index f48d0f09d32f..bd7034542399 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
@@ -175,6 +175,8 @@ static void tick_do_periodic_broadcast(void) | |||
175 | */ | 175 | */ |
176 | static void tick_handle_periodic_broadcast(struct clock_event_device *dev) | 176 | static void tick_handle_periodic_broadcast(struct clock_event_device *dev) |
177 | { | 177 | { |
178 | ktime_t next; | ||
179 | |||
178 | tick_do_periodic_broadcast(); | 180 | tick_do_periodic_broadcast(); |
179 | 181 | ||
180 | /* | 182 | /* |
@@ -185,10 +187,13 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev) | |||
185 | 187 | ||
186 | /* | 188 | /* |
187 | * Setup the next period for devices, which do not have | 189 | * Setup the next period for devices, which do not have |
188 | * periodic mode: | 190 | * periodic mode. We read dev->next_event first and add to it |
191 | * when the event alrady expired. clockevents_program_event() | ||
192 | * sets dev->next_event only when the event is really | ||
193 | * programmed to the device. | ||
189 | */ | 194 | */ |
190 | for (;;) { | 195 | for (next = dev->next_event; ;) { |
191 | ktime_t next = ktime_add(dev->next_event, tick_period); | 196 | next = ktime_add(next, tick_period); |
192 | 197 | ||
193 | if (!clockevents_program_event(dev, next, ktime_get())) | 198 | if (!clockevents_program_event(dev, next, ktime_get())) |
194 | return; | 199 | return; |
@@ -205,7 +210,7 @@ static void tick_do_broadcast_on_off(void *why) | |||
205 | struct clock_event_device *bc, *dev; | 210 | struct clock_event_device *bc, *dev; |
206 | struct tick_device *td; | 211 | struct tick_device *td; |
207 | unsigned long flags, *reason = why; | 212 | unsigned long flags, *reason = why; |
208 | int cpu; | 213 | int cpu, bc_stopped; |
209 | 214 | ||
210 | spin_lock_irqsave(&tick_broadcast_lock, flags); | 215 | spin_lock_irqsave(&tick_broadcast_lock, flags); |
211 | 216 | ||
@@ -223,14 +228,15 @@ static void tick_do_broadcast_on_off(void *why) | |||
223 | if (!tick_device_is_functional(dev)) | 228 | if (!tick_device_is_functional(dev)) |
224 | goto out; | 229 | goto out; |
225 | 230 | ||
231 | bc_stopped = cpus_empty(tick_broadcast_mask); | ||
232 | |||
226 | switch (*reason) { | 233 | switch (*reason) { |
227 | case CLOCK_EVT_NOTIFY_BROADCAST_ON: | 234 | case CLOCK_EVT_NOTIFY_BROADCAST_ON: |
228 | case CLOCK_EVT_NOTIFY_BROADCAST_FORCE: | 235 | case CLOCK_EVT_NOTIFY_BROADCAST_FORCE: |
229 | if (!cpu_isset(cpu, tick_broadcast_mask)) { | 236 | if (!cpu_isset(cpu, tick_broadcast_mask)) { |
230 | cpu_set(cpu, tick_broadcast_mask); | 237 | cpu_set(cpu, tick_broadcast_mask); |
231 | if (td->mode == TICKDEV_MODE_PERIODIC) | 238 | if (bc->mode == TICKDEV_MODE_PERIODIC) |
232 | clockevents_set_mode(dev, | 239 | clockevents_shutdown(dev); |
233 | CLOCK_EVT_MODE_SHUTDOWN); | ||
234 | } | 240 | } |
235 | if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE) | 241 | if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE) |
236 | tick_broadcast_force = 1; | 242 | tick_broadcast_force = 1; |
@@ -239,15 +245,16 @@ static void tick_do_broadcast_on_off(void *why) | |||
239 | if (!tick_broadcast_force && | 245 | if (!tick_broadcast_force && |
240 | cpu_isset(cpu, tick_broadcast_mask)) { | 246 | cpu_isset(cpu, tick_broadcast_mask)) { |
241 | cpu_clear(cpu, tick_broadcast_mask); | 247 | cpu_clear(cpu, tick_broadcast_mask); |
242 | if (td->mode == TICKDEV_MODE_PERIODIC) | 248 | if (bc->mode == TICKDEV_MODE_PERIODIC) |
243 | tick_setup_periodic(dev, 0); | 249 | tick_setup_periodic(dev, 0); |
244 | } | 250 | } |
245 | break; | 251 | break; |
246 | } | 252 | } |
247 | 253 | ||
248 | if (cpus_empty(tick_broadcast_mask)) | 254 | if (cpus_empty(tick_broadcast_mask)) { |
249 | clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN); | 255 | if (!bc_stopped) |
250 | else { | 256 | clockevents_shutdown(bc); |
257 | } else if (bc_stopped) { | ||
251 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) | 258 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) |
252 | tick_broadcast_start_periodic(bc); | 259 | tick_broadcast_start_periodic(bc); |
253 | else | 260 | else |
@@ -298,7 +305,7 @@ void tick_shutdown_broadcast(unsigned int *cpup) | |||
298 | 305 | ||
299 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) { | 306 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) { |
300 | if (bc && cpus_empty(tick_broadcast_mask)) | 307 | if (bc && cpus_empty(tick_broadcast_mask)) |
301 | clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN); | 308 | clockevents_shutdown(bc); |
302 | } | 309 | } |
303 | 310 | ||
304 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); | 311 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
@@ -313,7 +320,7 @@ void tick_suspend_broadcast(void) | |||
313 | 320 | ||
314 | bc = tick_broadcast_device.evtdev; | 321 | bc = tick_broadcast_device.evtdev; |
315 | if (bc) | 322 | if (bc) |
316 | clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN); | 323 | clockevents_shutdown(bc); |
317 | 324 | ||
318 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); | 325 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
319 | } | 326 | } |
@@ -364,16 +371,8 @@ cpumask_t *tick_get_broadcast_oneshot_mask(void) | |||
364 | static int tick_broadcast_set_event(ktime_t expires, int force) | 371 | static int tick_broadcast_set_event(ktime_t expires, int force) |
365 | { | 372 | { |
366 | struct clock_event_device *bc = tick_broadcast_device.evtdev; | 373 | struct clock_event_device *bc = tick_broadcast_device.evtdev; |
367 | ktime_t now = ktime_get(); | 374 | |
368 | int res; | 375 | return tick_dev_program_event(bc, expires, force); |
369 | |||
370 | for(;;) { | ||
371 | res = clockevents_program_event(bc, expires, now); | ||
372 | if (!res || !force) | ||
373 | return res; | ||
374 | now = ktime_get(); | ||
375 | expires = ktime_add(now, ktime_set(0, bc->min_delta_ns)); | ||
376 | } | ||
377 | } | 376 | } |
378 | 377 | ||
379 | int tick_resume_broadcast_oneshot(struct clock_event_device *bc) | 378 | int tick_resume_broadcast_oneshot(struct clock_event_device *bc) |
@@ -399,8 +398,7 @@ again: | |||
399 | mask = CPU_MASK_NONE; | 398 | mask = CPU_MASK_NONE; |
400 | now = ktime_get(); | 399 | now = ktime_get(); |
401 | /* Find all expired events */ | 400 | /* Find all expired events */ |
402 | for (cpu = first_cpu(tick_broadcast_oneshot_mask); cpu != NR_CPUS; | 401 | for_each_cpu_mask_nr(cpu, tick_broadcast_oneshot_mask) { |
403 | cpu = next_cpu(cpu, tick_broadcast_oneshot_mask)) { | ||
404 | td = &per_cpu(tick_cpu_device, cpu); | 402 | td = &per_cpu(tick_cpu_device, cpu); |
405 | if (td->evtdev->next_event.tv64 <= now.tv64) | 403 | if (td->evtdev->next_event.tv64 <= now.tv64) |
406 | cpu_set(cpu, mask); | 404 | cpu_set(cpu, mask); |
@@ -492,14 +490,52 @@ static void tick_broadcast_clear_oneshot(int cpu) | |||
492 | cpu_clear(cpu, tick_broadcast_oneshot_mask); | 490 | cpu_clear(cpu, tick_broadcast_oneshot_mask); |
493 | } | 491 | } |
494 | 492 | ||
493 | static void tick_broadcast_init_next_event(cpumask_t *mask, ktime_t expires) | ||
494 | { | ||
495 | struct tick_device *td; | ||
496 | int cpu; | ||
497 | |||
498 | for_each_cpu_mask_nr(cpu, *mask) { | ||
499 | td = &per_cpu(tick_cpu_device, cpu); | ||
500 | if (td->evtdev) | ||
501 | td->evtdev->next_event = expires; | ||
502 | } | ||
503 | } | ||
504 | |||
495 | /** | 505 | /** |
496 | * tick_broadcast_setup_oneshot - setup the broadcast device | 506 | * tick_broadcast_setup_oneshot - setup the broadcast device |
497 | */ | 507 | */ |
498 | void tick_broadcast_setup_oneshot(struct clock_event_device *bc) | 508 | void tick_broadcast_setup_oneshot(struct clock_event_device *bc) |
499 | { | 509 | { |
500 | bc->event_handler = tick_handle_oneshot_broadcast; | 510 | /* Set it up only once ! */ |
501 | clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); | 511 | if (bc->event_handler != tick_handle_oneshot_broadcast) { |
502 | bc->next_event.tv64 = KTIME_MAX; | 512 | int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC; |
513 | int cpu = smp_processor_id(); | ||
514 | cpumask_t mask; | ||
515 | |||
516 | bc->event_handler = tick_handle_oneshot_broadcast; | ||
517 | clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); | ||
518 | |||
519 | /* Take the do_timer update */ | ||
520 | tick_do_timer_cpu = cpu; | ||
521 | |||
522 | /* | ||
523 | * We must be careful here. There might be other CPUs | ||
524 | * waiting for periodic broadcast. We need to set the | ||
525 | * oneshot_mask bits for those and program the | ||
526 | * broadcast device to fire. | ||
527 | */ | ||
528 | mask = tick_broadcast_mask; | ||
529 | cpu_clear(cpu, mask); | ||
530 | cpus_or(tick_broadcast_oneshot_mask, | ||
531 | tick_broadcast_oneshot_mask, mask); | ||
532 | |||
533 | if (was_periodic && !cpus_empty(mask)) { | ||
534 | tick_broadcast_init_next_event(&mask, tick_next_period); | ||
535 | tick_broadcast_set_event(tick_next_period, 1); | ||
536 | } else | ||
537 | bc->next_event.tv64 = KTIME_MAX; | ||
538 | } | ||
503 | } | 539 | } |
504 | 540 | ||
505 | /* | 541 | /* |
@@ -539,4 +575,12 @@ void tick_shutdown_broadcast_oneshot(unsigned int *cpup) | |||
539 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); | 575 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
540 | } | 576 | } |
541 | 577 | ||
578 | /* | ||
579 | * Check, whether the broadcast device is in one shot mode | ||
580 | */ | ||
581 | int tick_broadcast_oneshot_active(void) | ||
582 | { | ||
583 | return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT; | ||
584 | } | ||
585 | |||
542 | #endif | 586 | #endif |
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 4f3886562b8c..df12434b43ca 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c | |||
@@ -33,7 +33,7 @@ DEFINE_PER_CPU(struct tick_device, tick_cpu_device); | |||
33 | */ | 33 | */ |
34 | ktime_t tick_next_period; | 34 | ktime_t tick_next_period; |
35 | ktime_t tick_period; | 35 | ktime_t tick_period; |
36 | int tick_do_timer_cpu __read_mostly = -1; | 36 | int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT; |
37 | DEFINE_SPINLOCK(tick_device_lock); | 37 | DEFINE_SPINLOCK(tick_device_lock); |
38 | 38 | ||
39 | /* | 39 | /* |
@@ -109,7 +109,8 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast) | |||
109 | if (!tick_device_is_functional(dev)) | 109 | if (!tick_device_is_functional(dev)) |
110 | return; | 110 | return; |
111 | 111 | ||
112 | if (dev->features & CLOCK_EVT_FEAT_PERIODIC) { | 112 | if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) && |
113 | !tick_broadcast_oneshot_active()) { | ||
113 | clockevents_set_mode(dev, CLOCK_EVT_MODE_PERIODIC); | 114 | clockevents_set_mode(dev, CLOCK_EVT_MODE_PERIODIC); |
114 | } else { | 115 | } else { |
115 | unsigned long seq; | 116 | unsigned long seq; |
@@ -135,7 +136,7 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast) | |||
135 | */ | 136 | */ |
136 | static void tick_setup_device(struct tick_device *td, | 137 | static void tick_setup_device(struct tick_device *td, |
137 | struct clock_event_device *newdev, int cpu, | 138 | struct clock_event_device *newdev, int cpu, |
138 | cpumask_t cpumask) | 139 | const cpumask_t *cpumask) |
139 | { | 140 | { |
140 | ktime_t next_event; | 141 | ktime_t next_event; |
141 | void (*handler)(struct clock_event_device *) = NULL; | 142 | void (*handler)(struct clock_event_device *) = NULL; |
@@ -148,7 +149,7 @@ static void tick_setup_device(struct tick_device *td, | |||
148 | * If no cpu took the do_timer update, assign it to | 149 | * If no cpu took the do_timer update, assign it to |
149 | * this cpu: | 150 | * this cpu: |
150 | */ | 151 | */ |
151 | if (tick_do_timer_cpu == -1) { | 152 | if (tick_do_timer_cpu == TICK_DO_TIMER_BOOT) { |
152 | tick_do_timer_cpu = cpu; | 153 | tick_do_timer_cpu = cpu; |
153 | tick_next_period = ktime_get(); | 154 | tick_next_period = ktime_get(); |
154 | tick_period = ktime_set(0, NSEC_PER_SEC / HZ); | 155 | tick_period = ktime_set(0, NSEC_PER_SEC / HZ); |
@@ -161,6 +162,7 @@ static void tick_setup_device(struct tick_device *td, | |||
161 | } else { | 162 | } else { |
162 | handler = td->evtdev->event_handler; | 163 | handler = td->evtdev->event_handler; |
163 | next_event = td->evtdev->next_event; | 164 | next_event = td->evtdev->next_event; |
165 | td->evtdev->event_handler = clockevents_handle_noop; | ||
164 | } | 166 | } |
165 | 167 | ||
166 | td->evtdev = newdev; | 168 | td->evtdev = newdev; |
@@ -169,8 +171,8 @@ static void tick_setup_device(struct tick_device *td, | |||
169 | * When the device is not per cpu, pin the interrupt to the | 171 | * When the device is not per cpu, pin the interrupt to the |
170 | * current cpu: | 172 | * current cpu: |
171 | */ | 173 | */ |
172 | if (!cpus_equal(newdev->cpumask, cpumask)) | 174 | if (!cpus_equal(newdev->cpumask, *cpumask)) |
173 | irq_set_affinity(newdev->irq, cpumask); | 175 | irq_set_affinity(newdev->irq, *cpumask); |
174 | 176 | ||
175 | /* | 177 | /* |
176 | * When global broadcasting is active, check if the current | 178 | * When global broadcasting is active, check if the current |
@@ -196,7 +198,6 @@ static int tick_check_new_device(struct clock_event_device *newdev) | |||
196 | struct tick_device *td; | 198 | struct tick_device *td; |
197 | int cpu, ret = NOTIFY_OK; | 199 | int cpu, ret = NOTIFY_OK; |
198 | unsigned long flags; | 200 | unsigned long flags; |
199 | cpumask_t cpumask; | ||
200 | 201 | ||
201 | spin_lock_irqsave(&tick_device_lock, flags); | 202 | spin_lock_irqsave(&tick_device_lock, flags); |
202 | 203 | ||
@@ -206,10 +207,9 @@ static int tick_check_new_device(struct clock_event_device *newdev) | |||
206 | 207 | ||
207 | td = &per_cpu(tick_cpu_device, cpu); | 208 | td = &per_cpu(tick_cpu_device, cpu); |
208 | curdev = td->evtdev; | 209 | curdev = td->evtdev; |
209 | cpumask = cpumask_of_cpu(cpu); | ||
210 | 210 | ||
211 | /* cpu local device ? */ | 211 | /* cpu local device ? */ |
212 | if (!cpus_equal(newdev->cpumask, cpumask)) { | 212 | if (!cpus_equal(newdev->cpumask, cpumask_of_cpu(cpu))) { |
213 | 213 | ||
214 | /* | 214 | /* |
215 | * If the cpu affinity of the device interrupt can not | 215 | * If the cpu affinity of the device interrupt can not |
@@ -222,7 +222,7 @@ static int tick_check_new_device(struct clock_event_device *newdev) | |||
222 | * If we have a cpu local device already, do not replace it | 222 | * If we have a cpu local device already, do not replace it |
223 | * by a non cpu local device | 223 | * by a non cpu local device |
224 | */ | 224 | */ |
225 | if (curdev && cpus_equal(curdev->cpumask, cpumask)) | 225 | if (curdev && cpus_equal(curdev->cpumask, cpumask_of_cpu(cpu))) |
226 | goto out_bc; | 226 | goto out_bc; |
227 | } | 227 | } |
228 | 228 | ||
@@ -250,11 +250,11 @@ static int tick_check_new_device(struct clock_event_device *newdev) | |||
250 | * not give it back to the clockevents layer ! | 250 | * not give it back to the clockevents layer ! |
251 | */ | 251 | */ |
252 | if (tick_is_broadcast_device(curdev)) { | 252 | if (tick_is_broadcast_device(curdev)) { |
253 | clockevents_set_mode(curdev, CLOCK_EVT_MODE_SHUTDOWN); | 253 | clockevents_shutdown(curdev); |
254 | curdev = NULL; | 254 | curdev = NULL; |
255 | } | 255 | } |
256 | clockevents_exchange_device(curdev, newdev); | 256 | clockevents_exchange_device(curdev, newdev); |
257 | tick_setup_device(td, newdev, cpu, cpumask); | 257 | tick_setup_device(td, newdev, cpu, &cpumask_of_cpu(cpu)); |
258 | if (newdev->features & CLOCK_EVT_FEAT_ONESHOT) | 258 | if (newdev->features & CLOCK_EVT_FEAT_ONESHOT) |
259 | tick_oneshot_notify(); | 259 | tick_oneshot_notify(); |
260 | 260 | ||
@@ -301,7 +301,8 @@ static void tick_shutdown(unsigned int *cpup) | |||
301 | if (*cpup == tick_do_timer_cpu) { | 301 | if (*cpup == tick_do_timer_cpu) { |
302 | int cpu = first_cpu(cpu_online_map); | 302 | int cpu = first_cpu(cpu_online_map); |
303 | 303 | ||
304 | tick_do_timer_cpu = (cpu != NR_CPUS) ? cpu : -1; | 304 | tick_do_timer_cpu = (cpu != NR_CPUS) ? cpu : |
305 | TICK_DO_TIMER_NONE; | ||
305 | } | 306 | } |
306 | spin_unlock_irqrestore(&tick_device_lock, flags); | 307 | spin_unlock_irqrestore(&tick_device_lock, flags); |
307 | } | 308 | } |
@@ -312,7 +313,7 @@ static void tick_suspend(void) | |||
312 | unsigned long flags; | 313 | unsigned long flags; |
313 | 314 | ||
314 | spin_lock_irqsave(&tick_device_lock, flags); | 315 | spin_lock_irqsave(&tick_device_lock, flags); |
315 | clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_SHUTDOWN); | 316 | clockevents_shutdown(td->evtdev); |
316 | spin_unlock_irqrestore(&tick_device_lock, flags); | 317 | spin_unlock_irqrestore(&tick_device_lock, flags); |
317 | } | 318 | } |
318 | 319 | ||
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h index f13f2b7f4fd4..469248782c23 100644 --- a/kernel/time/tick-internal.h +++ b/kernel/time/tick-internal.h | |||
@@ -1,6 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * tick internal variable and functions used by low/high res code | 2 | * tick internal variable and functions used by low/high res code |
3 | */ | 3 | */ |
4 | |||
5 | #define TICK_DO_TIMER_NONE -1 | ||
6 | #define TICK_DO_TIMER_BOOT -2 | ||
7 | |||
4 | DECLARE_PER_CPU(struct tick_device, tick_cpu_device); | 8 | DECLARE_PER_CPU(struct tick_device, tick_cpu_device); |
5 | extern spinlock_t tick_device_lock; | 9 | extern spinlock_t tick_device_lock; |
6 | extern ktime_t tick_next_period; | 10 | extern ktime_t tick_next_period; |
@@ -10,6 +14,8 @@ extern int tick_do_timer_cpu __read_mostly; | |||
10 | extern void tick_setup_periodic(struct clock_event_device *dev, int broadcast); | 14 | extern void tick_setup_periodic(struct clock_event_device *dev, int broadcast); |
11 | extern void tick_handle_periodic(struct clock_event_device *dev); | 15 | extern void tick_handle_periodic(struct clock_event_device *dev); |
12 | 16 | ||
17 | extern void clockevents_shutdown(struct clock_event_device *dev); | ||
18 | |||
13 | /* | 19 | /* |
14 | * NO_HZ / high resolution timer shared code | 20 | * NO_HZ / high resolution timer shared code |
15 | */ | 21 | */ |
@@ -17,6 +23,8 @@ extern void tick_handle_periodic(struct clock_event_device *dev); | |||
17 | extern void tick_setup_oneshot(struct clock_event_device *newdev, | 23 | extern void tick_setup_oneshot(struct clock_event_device *newdev, |
18 | void (*handler)(struct clock_event_device *), | 24 | void (*handler)(struct clock_event_device *), |
19 | ktime_t nextevt); | 25 | ktime_t nextevt); |
26 | extern int tick_dev_program_event(struct clock_event_device *dev, | ||
27 | ktime_t expires, int force); | ||
20 | extern int tick_program_event(ktime_t expires, int force); | 28 | extern int tick_program_event(ktime_t expires, int force); |
21 | extern void tick_oneshot_notify(void); | 29 | extern void tick_oneshot_notify(void); |
22 | extern int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *)); | 30 | extern int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *)); |
@@ -27,6 +35,7 @@ extern void tick_broadcast_oneshot_control(unsigned long reason); | |||
27 | extern void tick_broadcast_switch_to_oneshot(void); | 35 | extern void tick_broadcast_switch_to_oneshot(void); |
28 | extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup); | 36 | extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup); |
29 | extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc); | 37 | extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc); |
38 | extern int tick_broadcast_oneshot_active(void); | ||
30 | # else /* BROADCAST */ | 39 | # else /* BROADCAST */ |
31 | static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) | 40 | static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) |
32 | { | 41 | { |
@@ -35,6 +44,7 @@ static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) | |||
35 | static inline void tick_broadcast_oneshot_control(unsigned long reason) { } | 44 | static inline void tick_broadcast_oneshot_control(unsigned long reason) { } |
36 | static inline void tick_broadcast_switch_to_oneshot(void) { } | 45 | static inline void tick_broadcast_switch_to_oneshot(void) { } |
37 | static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } | 46 | static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } |
47 | static inline int tick_broadcast_oneshot_active(void) { return 0; } | ||
38 | # endif /* !BROADCAST */ | 48 | # endif /* !BROADCAST */ |
39 | 49 | ||
40 | #else /* !ONESHOT */ | 50 | #else /* !ONESHOT */ |
@@ -64,6 +74,7 @@ static inline int tick_resume_broadcast_oneshot(struct clock_event_device *bc) | |||
64 | { | 74 | { |
65 | return 0; | 75 | return 0; |
66 | } | 76 | } |
77 | static inline int tick_broadcast_oneshot_active(void) { return 0; } | ||
67 | #endif /* !TICK_ONESHOT */ | 78 | #endif /* !TICK_ONESHOT */ |
68 | 79 | ||
69 | /* | 80 | /* |
diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c index 450c04935b66..2e8de678e767 100644 --- a/kernel/time/tick-oneshot.c +++ b/kernel/time/tick-oneshot.c | |||
@@ -23,24 +23,56 @@ | |||
23 | #include "tick-internal.h" | 23 | #include "tick-internal.h" |
24 | 24 | ||
25 | /** | 25 | /** |
26 | * tick_program_event | 26 | * tick_program_event internal worker function |
27 | */ | 27 | */ |
28 | int tick_program_event(ktime_t expires, int force) | 28 | int tick_dev_program_event(struct clock_event_device *dev, ktime_t expires, |
29 | int force) | ||
29 | { | 30 | { |
30 | struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; | ||
31 | ktime_t now = ktime_get(); | 31 | ktime_t now = ktime_get(); |
32 | int i; | ||
32 | 33 | ||
33 | while (1) { | 34 | for (i = 0;;) { |
34 | int ret = clockevents_program_event(dev, expires, now); | 35 | int ret = clockevents_program_event(dev, expires, now); |
35 | 36 | ||
36 | if (!ret || !force) | 37 | if (!ret || !force) |
37 | return ret; | 38 | return ret; |
39 | |||
40 | /* | ||
41 | * We tried 2 times to program the device with the given | ||
42 | * min_delta_ns. If that's not working then we double it | ||
43 | * and emit a warning. | ||
44 | */ | ||
45 | if (++i > 2) { | ||
46 | /* Increase the min. delta and try again */ | ||
47 | if (!dev->min_delta_ns) | ||
48 | dev->min_delta_ns = 5000; | ||
49 | else | ||
50 | dev->min_delta_ns += dev->min_delta_ns >> 1; | ||
51 | |||
52 | printk(KERN_WARNING | ||
53 | "CE: %s increasing min_delta_ns to %lu nsec\n", | ||
54 | dev->name ? dev->name : "?", | ||
55 | dev->min_delta_ns << 1); | ||
56 | |||
57 | i = 0; | ||
58 | } | ||
59 | |||
38 | now = ktime_get(); | 60 | now = ktime_get(); |
39 | expires = ktime_add(now, ktime_set(0, dev->min_delta_ns)); | 61 | expires = ktime_add_ns(now, dev->min_delta_ns); |
40 | } | 62 | } |
41 | } | 63 | } |
42 | 64 | ||
43 | /** | 65 | /** |
66 | * tick_program_event | ||
67 | */ | ||
68 | int tick_program_event(ktime_t expires, int force) | ||
69 | { | ||
70 | struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; | ||
71 | |||
72 | return tick_dev_program_event(dev, expires, force); | ||
73 | } | ||
74 | |||
75 | /** | ||
44 | * tick_resume_onshot - resume oneshot mode | 76 | * tick_resume_onshot - resume oneshot mode |
45 | */ | 77 | */ |
46 | void tick_resume_oneshot(void) | 78 | void tick_resume_oneshot(void) |
@@ -61,7 +93,7 @@ void tick_setup_oneshot(struct clock_event_device *newdev, | |||
61 | { | 93 | { |
62 | newdev->event_handler = handler; | 94 | newdev->event_handler = handler; |
63 | clockevents_set_mode(newdev, CLOCK_EVT_MODE_ONESHOT); | 95 | clockevents_set_mode(newdev, CLOCK_EVT_MODE_ONESHOT); |
64 | clockevents_program_event(newdev, next_event, ktime_get()); | 96 | tick_dev_program_event(newdev, next_event, 1); |
65 | } | 97 | } |
66 | 98 | ||
67 | /** | 99 | /** |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index beef7ccdf842..39019b3f7621 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -75,6 +75,9 @@ static void tick_do_update_jiffies64(ktime_t now) | |||
75 | incr * ticks); | 75 | incr * ticks); |
76 | } | 76 | } |
77 | do_timer(++ticks); | 77 | do_timer(++ticks); |
78 | |||
79 | /* Keep the tick_next_period variable up to date */ | ||
80 | tick_next_period = ktime_add(last_jiffies_update, tick_period); | ||
78 | } | 81 | } |
79 | write_sequnlock(&xtime_lock); | 82 | write_sequnlock(&xtime_lock); |
80 | } | 83 | } |
@@ -140,8 +143,6 @@ void tick_nohz_update_jiffies(void) | |||
140 | if (!ts->tick_stopped) | 143 | if (!ts->tick_stopped) |
141 | return; | 144 | return; |
142 | 145 | ||
143 | touch_softlockup_watchdog(); | ||
144 | |||
145 | cpu_clear(cpu, nohz_cpu_mask); | 146 | cpu_clear(cpu, nohz_cpu_mask); |
146 | now = ktime_get(); | 147 | now = ktime_get(); |
147 | ts->idle_waketime = now; | 148 | ts->idle_waketime = now; |
@@ -149,6 +150,8 @@ void tick_nohz_update_jiffies(void) | |||
149 | local_irq_save(flags); | 150 | local_irq_save(flags); |
150 | tick_do_update_jiffies64(now); | 151 | tick_do_update_jiffies64(now); |
151 | local_irq_restore(flags); | 152 | local_irq_restore(flags); |
153 | |||
154 | touch_softlockup_watchdog(); | ||
152 | } | 155 | } |
153 | 156 | ||
154 | void tick_nohz_stop_idle(int cpu) | 157 | void tick_nohz_stop_idle(int cpu) |
@@ -162,6 +165,8 @@ void tick_nohz_stop_idle(int cpu) | |||
162 | ts->idle_lastupdate = now; | 165 | ts->idle_lastupdate = now; |
163 | ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); | 166 | ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); |
164 | ts->idle_active = 0; | 167 | ts->idle_active = 0; |
168 | |||
169 | sched_clock_idle_wakeup_event(0); | ||
165 | } | 170 | } |
166 | } | 171 | } |
167 | 172 | ||
@@ -177,6 +182,7 @@ static ktime_t tick_nohz_start_idle(struct tick_sched *ts) | |||
177 | } | 182 | } |
178 | ts->idle_entrytime = now; | 183 | ts->idle_entrytime = now; |
179 | ts->idle_active = 1; | 184 | ts->idle_active = 1; |
185 | sched_clock_idle_sleep_event(); | ||
180 | return now; | 186 | return now; |
181 | } | 187 | } |
182 | 188 | ||
@@ -195,7 +201,7 @@ u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time) | |||
195 | * Called either from the idle loop or from irq_exit() when an idle period was | 201 | * Called either from the idle loop or from irq_exit() when an idle period was |
196 | * just interrupted by an interrupt which did not cause a reschedule. | 202 | * just interrupted by an interrupt which did not cause a reschedule. |
197 | */ | 203 | */ |
198 | void tick_nohz_stop_sched_tick(void) | 204 | void tick_nohz_stop_sched_tick(int inidle) |
199 | { | 205 | { |
200 | unsigned long seq, last_jiffies, next_jiffies, delta_jiffies, flags; | 206 | unsigned long seq, last_jiffies, next_jiffies, delta_jiffies, flags; |
201 | struct tick_sched *ts; | 207 | struct tick_sched *ts; |
@@ -218,12 +224,17 @@ void tick_nohz_stop_sched_tick(void) | |||
218 | */ | 224 | */ |
219 | if (unlikely(!cpu_online(cpu))) { | 225 | if (unlikely(!cpu_online(cpu))) { |
220 | if (cpu == tick_do_timer_cpu) | 226 | if (cpu == tick_do_timer_cpu) |
221 | tick_do_timer_cpu = -1; | 227 | tick_do_timer_cpu = TICK_DO_TIMER_NONE; |
222 | } | 228 | } |
223 | 229 | ||
224 | if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) | 230 | if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) |
225 | goto end; | 231 | goto end; |
226 | 232 | ||
233 | if (!inidle && !ts->inidle) | ||
234 | goto end; | ||
235 | |||
236 | ts->inidle = 1; | ||
237 | |||
227 | if (need_resched()) | 238 | if (need_resched()) |
228 | goto end; | 239 | goto end; |
229 | 240 | ||
@@ -284,7 +295,6 @@ void tick_nohz_stop_sched_tick(void) | |||
284 | ts->tick_stopped = 1; | 295 | ts->tick_stopped = 1; |
285 | ts->idle_jiffies = last_jiffies; | 296 | ts->idle_jiffies = last_jiffies; |
286 | rcu_enter_nohz(); | 297 | rcu_enter_nohz(); |
287 | sched_clock_tick_stop(cpu); | ||
288 | } | 298 | } |
289 | 299 | ||
290 | /* | 300 | /* |
@@ -296,7 +306,7 @@ void tick_nohz_stop_sched_tick(void) | |||
296 | * invoked. | 306 | * invoked. |
297 | */ | 307 | */ |
298 | if (cpu == tick_do_timer_cpu) | 308 | if (cpu == tick_do_timer_cpu) |
299 | tick_do_timer_cpu = -1; | 309 | tick_do_timer_cpu = TICK_DO_TIMER_NONE; |
300 | 310 | ||
301 | ts->idle_sleeps++; | 311 | ts->idle_sleeps++; |
302 | 312 | ||
@@ -373,18 +383,20 @@ void tick_nohz_restart_sched_tick(void) | |||
373 | local_irq_disable(); | 383 | local_irq_disable(); |
374 | tick_nohz_stop_idle(cpu); | 384 | tick_nohz_stop_idle(cpu); |
375 | 385 | ||
376 | if (!ts->tick_stopped) { | 386 | if (!ts->inidle || !ts->tick_stopped) { |
387 | ts->inidle = 0; | ||
377 | local_irq_enable(); | 388 | local_irq_enable(); |
378 | return; | 389 | return; |
379 | } | 390 | } |
380 | 391 | ||
392 | ts->inidle = 0; | ||
393 | |||
381 | rcu_exit_nohz(); | 394 | rcu_exit_nohz(); |
382 | 395 | ||
383 | /* Update jiffies first */ | 396 | /* Update jiffies first */ |
384 | select_nohz_load_balancer(0); | 397 | select_nohz_load_balancer(0); |
385 | now = ktime_get(); | 398 | now = ktime_get(); |
386 | tick_do_update_jiffies64(now); | 399 | tick_do_update_jiffies64(now); |
387 | sched_clock_tick_start(cpu); | ||
388 | cpu_clear(cpu, nohz_cpu_mask); | 400 | cpu_clear(cpu, nohz_cpu_mask); |
389 | 401 | ||
390 | /* | 402 | /* |
@@ -459,7 +471,7 @@ static void tick_nohz_handler(struct clock_event_device *dev) | |||
459 | * this duty, then the jiffies update is still serialized by | 471 | * this duty, then the jiffies update is still serialized by |
460 | * xtime_lock. | 472 | * xtime_lock. |
461 | */ | 473 | */ |
462 | if (unlikely(tick_do_timer_cpu == -1)) | 474 | if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) |
463 | tick_do_timer_cpu = cpu; | 475 | tick_do_timer_cpu = cpu; |
464 | 476 | ||
465 | /* Check, if the jiffies need an update */ | 477 | /* Check, if the jiffies need an update */ |
@@ -561,7 +573,7 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) | |||
561 | * this duty, then the jiffies update is still serialized by | 573 | * this duty, then the jiffies update is still serialized by |
562 | * xtime_lock. | 574 | * xtime_lock. |
563 | */ | 575 | */ |
564 | if (unlikely(tick_do_timer_cpu == -1)) | 576 | if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) |
565 | tick_do_timer_cpu = cpu; | 577 | tick_do_timer_cpu = cpu; |
566 | #endif | 578 | #endif |
567 | 579 | ||
@@ -637,17 +649,21 @@ void tick_setup_sched_timer(void) | |||
637 | ts->nohz_mode = NOHZ_MODE_HIGHRES; | 649 | ts->nohz_mode = NOHZ_MODE_HIGHRES; |
638 | #endif | 650 | #endif |
639 | } | 651 | } |
652 | #endif /* HIGH_RES_TIMERS */ | ||
640 | 653 | ||
654 | #if defined CONFIG_NO_HZ || defined CONFIG_HIGH_RES_TIMERS | ||
641 | void tick_cancel_sched_timer(int cpu) | 655 | void tick_cancel_sched_timer(int cpu) |
642 | { | 656 | { |
643 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | 657 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
644 | 658 | ||
659 | # ifdef CONFIG_HIGH_RES_TIMERS | ||
645 | if (ts->sched_timer.base) | 660 | if (ts->sched_timer.base) |
646 | hrtimer_cancel(&ts->sched_timer); | 661 | hrtimer_cancel(&ts->sched_timer); |
662 | # endif | ||
647 | 663 | ||
648 | ts->nohz_mode = NOHZ_MODE_INACTIVE; | 664 | ts->nohz_mode = NOHZ_MODE_INACTIVE; |
649 | } | 665 | } |
650 | #endif /* HIGH_RES_TIMERS */ | 666 | #endif |
651 | 667 | ||
652 | /** | 668 | /** |
653 | * Async notification about clocksource changes | 669 | * Async notification about clocksource changes |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 4231a3dc224a..f6e3af31b403 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -587,7 +587,7 @@ static int __ftrace_modify_code(void *data) | |||
587 | 587 | ||
588 | static void ftrace_run_update_code(int command) | 588 | static void ftrace_run_update_code(int command) |
589 | { | 589 | { |
590 | stop_machine_run(__ftrace_modify_code, &command, NR_CPUS); | 590 | stop_machine(__ftrace_modify_code, &command, NULL); |
591 | } | 591 | } |
592 | 592 | ||
593 | void ftrace_disable_daemon(void) | 593 | void ftrace_disable_daemon(void) |
@@ -787,7 +787,7 @@ static int ftrace_update_code(void) | |||
787 | !ftrace_enabled || !ftraced_trigger) | 787 | !ftrace_enabled || !ftraced_trigger) |
788 | return 0; | 788 | return 0; |
789 | 789 | ||
790 | stop_machine_run(__ftrace_update_code, NULL, NR_CPUS); | 790 | stop_machine(__ftrace_update_code, NULL, NULL); |
791 | 791 | ||
792 | return 1; | 792 | return 1; |
793 | } | 793 | } |
@@ -1564,7 +1564,7 @@ static int __init ftrace_dynamic_init(void) | |||
1564 | 1564 | ||
1565 | addr = (unsigned long)ftrace_record_ip; | 1565 | addr = (unsigned long)ftrace_record_ip; |
1566 | 1566 | ||
1567 | stop_machine_run(ftrace_dyn_arch_init, &addr, NR_CPUS); | 1567 | stop_machine(ftrace_dyn_arch_init, &addr, NULL); |
1568 | 1568 | ||
1569 | /* ftrace_dyn_arch_init places the return code in addr */ | 1569 | /* ftrace_dyn_arch_init places the return code in addr */ |
1570 | if (addr) { | 1570 | if (addr) { |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 868e121c8e38..8f3fb3db61c3 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -1183,7 +1183,6 @@ static void *find_next_entry_inc(struct trace_iterator *iter) | |||
1183 | static void *s_next(struct seq_file *m, void *v, loff_t *pos) | 1183 | static void *s_next(struct seq_file *m, void *v, loff_t *pos) |
1184 | { | 1184 | { |
1185 | struct trace_iterator *iter = m->private; | 1185 | struct trace_iterator *iter = m->private; |
1186 | void *last_ent = iter->ent; | ||
1187 | int i = (int)*pos; | 1186 | int i = (int)*pos; |
1188 | void *ent; | 1187 | void *ent; |
1189 | 1188 | ||
@@ -1203,9 +1202,6 @@ static void *s_next(struct seq_file *m, void *v, loff_t *pos) | |||
1203 | 1202 | ||
1204 | iter->pos = *pos; | 1203 | iter->pos = *pos; |
1205 | 1204 | ||
1206 | if (last_ent && !ent) | ||
1207 | seq_puts(m, "\n\nvim:ft=help\n"); | ||
1208 | |||
1209 | return ent; | 1205 | return ent; |
1210 | } | 1206 | } |
1211 | 1207 | ||
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 421d6fe3650e..ece6cfb649fa 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c | |||
@@ -253,12 +253,14 @@ void start_critical_timings(void) | |||
253 | if (preempt_trace() || irq_trace()) | 253 | if (preempt_trace() || irq_trace()) |
254 | start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); | 254 | start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
255 | } | 255 | } |
256 | EXPORT_SYMBOL_GPL(start_critical_timings); | ||
256 | 257 | ||
257 | void stop_critical_timings(void) | 258 | void stop_critical_timings(void) |
258 | { | 259 | { |
259 | if (preempt_trace() || irq_trace()) | 260 | if (preempt_trace() || irq_trace()) |
260 | stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); | 261 | stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
261 | } | 262 | } |
263 | EXPORT_SYMBOL_GPL(stop_critical_timings); | ||
262 | 264 | ||
263 | #ifdef CONFIG_IRQSOFF_TRACER | 265 | #ifdef CONFIG_IRQSOFF_TRACER |
264 | #ifdef CONFIG_PROVE_LOCKING | 266 | #ifdef CONFIG_PROVE_LOCKING |
@@ -337,12 +339,14 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller); | |||
337 | #ifdef CONFIG_PREEMPT_TRACER | 339 | #ifdef CONFIG_PREEMPT_TRACER |
338 | void trace_preempt_on(unsigned long a0, unsigned long a1) | 340 | void trace_preempt_on(unsigned long a0, unsigned long a1) |
339 | { | 341 | { |
340 | stop_critical_timing(a0, a1); | 342 | if (preempt_trace()) |
343 | stop_critical_timing(a0, a1); | ||
341 | } | 344 | } |
342 | 345 | ||
343 | void trace_preempt_off(unsigned long a0, unsigned long a1) | 346 | void trace_preempt_off(unsigned long a0, unsigned long a1) |
344 | { | 347 | { |
345 | start_critical_timing(a0, a1); | 348 | if (preempt_trace()) |
349 | start_critical_timing(a0, a1); | ||
346 | } | 350 | } |
347 | #endif /* CONFIG_PREEMPT_TRACER */ | 351 | #endif /* CONFIG_PREEMPT_TRACER */ |
348 | 352 | ||
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 3c8d61df4474..e303ccb62cdf 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c | |||
@@ -26,7 +26,8 @@ static struct task_struct *wakeup_task; | |||
26 | static int wakeup_cpu; | 26 | static int wakeup_cpu; |
27 | static unsigned wakeup_prio = -1; | 27 | static unsigned wakeup_prio = -1; |
28 | 28 | ||
29 | static DEFINE_SPINLOCK(wakeup_lock); | 29 | static raw_spinlock_t wakeup_lock = |
30 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | ||
30 | 31 | ||
31 | static void __wakeup_reset(struct trace_array *tr); | 32 | static void __wakeup_reset(struct trace_array *tr); |
32 | 33 | ||
@@ -56,7 +57,8 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) | |||
56 | if (unlikely(disabled != 1)) | 57 | if (unlikely(disabled != 1)) |
57 | goto out; | 58 | goto out; |
58 | 59 | ||
59 | spin_lock_irqsave(&wakeup_lock, flags); | 60 | local_irq_save(flags); |
61 | __raw_spin_lock(&wakeup_lock); | ||
60 | 62 | ||
61 | if (unlikely(!wakeup_task)) | 63 | if (unlikely(!wakeup_task)) |
62 | goto unlock; | 64 | goto unlock; |
@@ -71,7 +73,8 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) | |||
71 | trace_function(tr, data, ip, parent_ip, flags); | 73 | trace_function(tr, data, ip, parent_ip, flags); |
72 | 74 | ||
73 | unlock: | 75 | unlock: |
74 | spin_unlock_irqrestore(&wakeup_lock, flags); | 76 | __raw_spin_unlock(&wakeup_lock); |
77 | local_irq_restore(flags); | ||
75 | 78 | ||
76 | out: | 79 | out: |
77 | atomic_dec(&data->disabled); | 80 | atomic_dec(&data->disabled); |
@@ -145,7 +148,8 @@ wakeup_sched_switch(void *private, void *rq, struct task_struct *prev, | |||
145 | if (likely(disabled != 1)) | 148 | if (likely(disabled != 1)) |
146 | goto out; | 149 | goto out; |
147 | 150 | ||
148 | spin_lock_irqsave(&wakeup_lock, flags); | 151 | local_irq_save(flags); |
152 | __raw_spin_lock(&wakeup_lock); | ||
149 | 153 | ||
150 | /* We could race with grabbing wakeup_lock */ | 154 | /* We could race with grabbing wakeup_lock */ |
151 | if (unlikely(!tracer_enabled || next != wakeup_task)) | 155 | if (unlikely(!tracer_enabled || next != wakeup_task)) |
@@ -174,7 +178,8 @@ wakeup_sched_switch(void *private, void *rq, struct task_struct *prev, | |||
174 | 178 | ||
175 | out_unlock: | 179 | out_unlock: |
176 | __wakeup_reset(tr); | 180 | __wakeup_reset(tr); |
177 | spin_unlock_irqrestore(&wakeup_lock, flags); | 181 | __raw_spin_unlock(&wakeup_lock); |
182 | local_irq_restore(flags); | ||
178 | out: | 183 | out: |
179 | atomic_dec(&tr->data[cpu]->disabled); | 184 | atomic_dec(&tr->data[cpu]->disabled); |
180 | } | 185 | } |
@@ -209,8 +214,6 @@ static void __wakeup_reset(struct trace_array *tr) | |||
209 | struct trace_array_cpu *data; | 214 | struct trace_array_cpu *data; |
210 | int cpu; | 215 | int cpu; |
211 | 216 | ||
212 | assert_spin_locked(&wakeup_lock); | ||
213 | |||
214 | for_each_possible_cpu(cpu) { | 217 | for_each_possible_cpu(cpu) { |
215 | data = tr->data[cpu]; | 218 | data = tr->data[cpu]; |
216 | tracing_reset(data); | 219 | tracing_reset(data); |
@@ -229,9 +232,11 @@ static void wakeup_reset(struct trace_array *tr) | |||
229 | { | 232 | { |
230 | unsigned long flags; | 233 | unsigned long flags; |
231 | 234 | ||
232 | spin_lock_irqsave(&wakeup_lock, flags); | 235 | local_irq_save(flags); |
236 | __raw_spin_lock(&wakeup_lock); | ||
233 | __wakeup_reset(tr); | 237 | __wakeup_reset(tr); |
234 | spin_unlock_irqrestore(&wakeup_lock, flags); | 238 | __raw_spin_unlock(&wakeup_lock); |
239 | local_irq_restore(flags); | ||
235 | } | 240 | } |
236 | 241 | ||
237 | static void | 242 | static void |
@@ -252,7 +257,7 @@ wakeup_check_start(struct trace_array *tr, struct task_struct *p, | |||
252 | goto out; | 257 | goto out; |
253 | 258 | ||
254 | /* interrupts should be off from try_to_wake_up */ | 259 | /* interrupts should be off from try_to_wake_up */ |
255 | spin_lock(&wakeup_lock); | 260 | __raw_spin_lock(&wakeup_lock); |
256 | 261 | ||
257 | /* check for races. */ | 262 | /* check for races. */ |
258 | if (!tracer_enabled || p->prio >= wakeup_prio) | 263 | if (!tracer_enabled || p->prio >= wakeup_prio) |
@@ -274,7 +279,7 @@ wakeup_check_start(struct trace_array *tr, struct task_struct *p, | |||
274 | CALLER_ADDR1, CALLER_ADDR2, flags); | 279 | CALLER_ADDR1, CALLER_ADDR2, flags); |
275 | 280 | ||
276 | out_locked: | 281 | out_locked: |
277 | spin_unlock(&wakeup_lock); | 282 | __raw_spin_unlock(&wakeup_lock); |
278 | out: | 283 | out: |
279 | atomic_dec(&tr->data[cpu]->disabled); | 284 | atomic_dec(&tr->data[cpu]->disabled); |
280 | } | 285 | } |
diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c index 2301e1e7c606..bb948e52ce20 100644 --- a/kernel/trace/trace_sysprof.c +++ b/kernel/trace/trace_sysprof.c | |||
@@ -161,7 +161,7 @@ static void timer_notify(struct pt_regs *regs, int cpu) | |||
161 | __trace_special(tr, data, 2, regs->ip, 0); | 161 | __trace_special(tr, data, 2, regs->ip, 0); |
162 | 162 | ||
163 | while (i < sample_max_depth) { | 163 | while (i < sample_max_depth) { |
164 | frame.next_fp = 0; | 164 | frame.next_fp = NULL; |
165 | frame.return_address = 0; | 165 | frame.return_address = 0; |
166 | if (!copy_stack_frame(fp, &frame)) | 166 | if (!copy_stack_frame(fp, &frame)) |
167 | break; | 167 | break; |
diff --git a/kernel/tsacct.c b/kernel/tsacct.c index 4ab1b584961b..8ebcd8532dfb 100644 --- a/kernel/tsacct.c +++ b/kernel/tsacct.c | |||
@@ -28,14 +28,14 @@ | |||
28 | void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk) | 28 | void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk) |
29 | { | 29 | { |
30 | struct timespec uptime, ts; | 30 | struct timespec uptime, ts; |
31 | s64 ac_etime; | 31 | u64 ac_etime; |
32 | 32 | ||
33 | BUILD_BUG_ON(TS_COMM_LEN < TASK_COMM_LEN); | 33 | BUILD_BUG_ON(TS_COMM_LEN < TASK_COMM_LEN); |
34 | 34 | ||
35 | /* calculate task elapsed time in timespec */ | 35 | /* calculate task elapsed time in timespec */ |
36 | do_posix_clock_monotonic_gettime(&uptime); | 36 | do_posix_clock_monotonic_gettime(&uptime); |
37 | ts = timespec_sub(uptime, tsk->start_time); | 37 | ts = timespec_sub(uptime, tsk->start_time); |
38 | /* rebase elapsed time to usec */ | 38 | /* rebase elapsed time to usec (should never be negative) */ |
39 | ac_etime = timespec_to_ns(&ts); | 39 | ac_etime = timespec_to_ns(&ts); |
40 | do_div(ac_etime, NSEC_PER_USEC); | 40 | do_div(ac_etime, NSEC_PER_USEC); |
41 | stats->ac_etime = ac_etime; | 41 | stats->ac_etime = ac_etime; |
@@ -84,9 +84,9 @@ void xacct_add_tsk(struct taskstats *stats, struct task_struct *p) | |||
84 | { | 84 | { |
85 | struct mm_struct *mm; | 85 | struct mm_struct *mm; |
86 | 86 | ||
87 | /* convert pages-jiffies to Mbyte-usec */ | 87 | /* convert pages-usec to Mbyte-usec */ |
88 | stats->coremem = jiffies_to_usecs(p->acct_rss_mem1) * PAGE_SIZE / MB; | 88 | stats->coremem = p->acct_rss_mem1 * PAGE_SIZE / MB; |
89 | stats->virtmem = jiffies_to_usecs(p->acct_vm_mem1) * PAGE_SIZE / MB; | 89 | stats->virtmem = p->acct_vm_mem1 * PAGE_SIZE / MB; |
90 | mm = get_task_mm(p); | 90 | mm = get_task_mm(p); |
91 | if (mm) { | 91 | if (mm) { |
92 | /* adjust to KB unit */ | 92 | /* adjust to KB unit */ |
@@ -94,10 +94,10 @@ void xacct_add_tsk(struct taskstats *stats, struct task_struct *p) | |||
94 | stats->hiwater_vm = mm->hiwater_vm * PAGE_SIZE / KB; | 94 | stats->hiwater_vm = mm->hiwater_vm * PAGE_SIZE / KB; |
95 | mmput(mm); | 95 | mmput(mm); |
96 | } | 96 | } |
97 | stats->read_char = p->rchar; | 97 | stats->read_char = p->ioac.rchar; |
98 | stats->write_char = p->wchar; | 98 | stats->write_char = p->ioac.wchar; |
99 | stats->read_syscalls = p->syscr; | 99 | stats->read_syscalls = p->ioac.syscr; |
100 | stats->write_syscalls = p->syscw; | 100 | stats->write_syscalls = p->ioac.syscw; |
101 | #ifdef CONFIG_TASK_IO_ACCOUNTING | 101 | #ifdef CONFIG_TASK_IO_ACCOUNTING |
102 | stats->read_bytes = p->ioac.read_bytes; | 102 | stats->read_bytes = p->ioac.read_bytes; |
103 | stats->write_bytes = p->ioac.write_bytes; | 103 | stats->write_bytes = p->ioac.write_bytes; |
@@ -118,12 +118,19 @@ void xacct_add_tsk(struct taskstats *stats, struct task_struct *p) | |||
118 | void acct_update_integrals(struct task_struct *tsk) | 118 | void acct_update_integrals(struct task_struct *tsk) |
119 | { | 119 | { |
120 | if (likely(tsk->mm)) { | 120 | if (likely(tsk->mm)) { |
121 | long delta = cputime_to_jiffies( | 121 | cputime_t time, dtime; |
122 | cputime_sub(tsk->stime, tsk->acct_stimexpd)); | 122 | struct timeval value; |
123 | u64 delta; | ||
124 | |||
125 | time = tsk->stime + tsk->utime; | ||
126 | dtime = cputime_sub(time, tsk->acct_timexpd); | ||
127 | jiffies_to_timeval(cputime_to_jiffies(dtime), &value); | ||
128 | delta = value.tv_sec; | ||
129 | delta = delta * USEC_PER_SEC + value.tv_usec; | ||
123 | 130 | ||
124 | if (delta == 0) | 131 | if (delta == 0) |
125 | return; | 132 | return; |
126 | tsk->acct_stimexpd = tsk->stime; | 133 | tsk->acct_timexpd = time; |
127 | tsk->acct_rss_mem1 += delta * get_mm_rss(tsk->mm); | 134 | tsk->acct_rss_mem1 += delta * get_mm_rss(tsk->mm); |
128 | tsk->acct_vm_mem1 += delta * tsk->mm->total_vm; | 135 | tsk->acct_vm_mem1 += delta * tsk->mm->total_vm; |
129 | } | 136 | } |
@@ -135,7 +142,7 @@ void acct_update_integrals(struct task_struct *tsk) | |||
135 | */ | 142 | */ |
136 | void acct_clear_integrals(struct task_struct *tsk) | 143 | void acct_clear_integrals(struct task_struct *tsk) |
137 | { | 144 | { |
138 | tsk->acct_stimexpd = 0; | 145 | tsk->acct_timexpd = 0; |
139 | tsk->acct_rss_mem1 = 0; | 146 | tsk->acct_rss_mem1 = 0; |
140 | tsk->acct_vm_mem1 = 0; | 147 | tsk->acct_vm_mem1 = 0; |
141 | } | 148 | } |
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index a9ab0596de44..532858fa5b88 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c | |||
@@ -6,7 +6,6 @@ | |||
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include <linux/module.h> | 8 | #include <linux/module.h> |
9 | #include <linux/version.h> | ||
10 | #include <linux/nsproxy.h> | 9 | #include <linux/nsproxy.h> |
11 | #include <linux/slab.h> | 10 | #include <linux/slab.h> |
12 | #include <linux/user_namespace.h> | 11 | #include <linux/user_namespace.h> |
diff --git a/kernel/utsname.c b/kernel/utsname.c index 64d398f12444..815237a55af8 100644 --- a/kernel/utsname.c +++ b/kernel/utsname.c | |||
@@ -12,7 +12,6 @@ | |||
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/uts.h> | 13 | #include <linux/uts.h> |
14 | #include <linux/utsname.h> | 14 | #include <linux/utsname.h> |
15 | #include <linux/version.h> | ||
16 | #include <linux/err.h> | 15 | #include <linux/err.h> |
17 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
18 | 17 | ||
diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c index fe3a56c2256d..4ab9659d269e 100644 --- a/kernel/utsname_sysctl.c +++ b/kernel/utsname_sysctl.c | |||
@@ -12,7 +12,6 @@ | |||
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/uts.h> | 13 | #include <linux/uts.h> |
14 | #include <linux/utsname.h> | 14 | #include <linux/utsname.h> |
15 | #include <linux/version.h> | ||
16 | #include <linux/sysctl.h> | 15 | #include <linux/sysctl.h> |
17 | 16 | ||
18 | static void *get_uts(ctl_table *table, int write) | 17 | static void *get_uts(ctl_table *table, int write) |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index ce7799540c91..4048e92aa04f 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -125,7 +125,7 @@ struct cpu_workqueue_struct *get_wq_data(struct work_struct *work) | |||
125 | } | 125 | } |
126 | 126 | ||
127 | static void insert_work(struct cpu_workqueue_struct *cwq, | 127 | static void insert_work(struct cpu_workqueue_struct *cwq, |
128 | struct work_struct *work, int tail) | 128 | struct work_struct *work, struct list_head *head) |
129 | { | 129 | { |
130 | set_wq_data(work, cwq); | 130 | set_wq_data(work, cwq); |
131 | /* | 131 | /* |
@@ -133,21 +133,17 @@ static void insert_work(struct cpu_workqueue_struct *cwq, | |||
133 | * result of list_add() below, see try_to_grab_pending(). | 133 | * result of list_add() below, see try_to_grab_pending(). |
134 | */ | 134 | */ |
135 | smp_wmb(); | 135 | smp_wmb(); |
136 | if (tail) | 136 | list_add_tail(&work->entry, head); |
137 | list_add_tail(&work->entry, &cwq->worklist); | ||
138 | else | ||
139 | list_add(&work->entry, &cwq->worklist); | ||
140 | wake_up(&cwq->more_work); | 137 | wake_up(&cwq->more_work); |
141 | } | 138 | } |
142 | 139 | ||
143 | /* Preempt must be disabled. */ | ||
144 | static void __queue_work(struct cpu_workqueue_struct *cwq, | 140 | static void __queue_work(struct cpu_workqueue_struct *cwq, |
145 | struct work_struct *work) | 141 | struct work_struct *work) |
146 | { | 142 | { |
147 | unsigned long flags; | 143 | unsigned long flags; |
148 | 144 | ||
149 | spin_lock_irqsave(&cwq->lock, flags); | 145 | spin_lock_irqsave(&cwq->lock, flags); |
150 | insert_work(cwq, work, 1); | 146 | insert_work(cwq, work, &cwq->worklist); |
151 | spin_unlock_irqrestore(&cwq->lock, flags); | 147 | spin_unlock_irqrestore(&cwq->lock, flags); |
152 | } | 148 | } |
153 | 149 | ||
@@ -163,17 +159,39 @@ static void __queue_work(struct cpu_workqueue_struct *cwq, | |||
163 | */ | 159 | */ |
164 | int queue_work(struct workqueue_struct *wq, struct work_struct *work) | 160 | int queue_work(struct workqueue_struct *wq, struct work_struct *work) |
165 | { | 161 | { |
162 | int ret; | ||
163 | |||
164 | ret = queue_work_on(get_cpu(), wq, work); | ||
165 | put_cpu(); | ||
166 | |||
167 | return ret; | ||
168 | } | ||
169 | EXPORT_SYMBOL_GPL(queue_work); | ||
170 | |||
171 | /** | ||
172 | * queue_work_on - queue work on specific cpu | ||
173 | * @cpu: CPU number to execute work on | ||
174 | * @wq: workqueue to use | ||
175 | * @work: work to queue | ||
176 | * | ||
177 | * Returns 0 if @work was already on a queue, non-zero otherwise. | ||
178 | * | ||
179 | * We queue the work to a specific CPU, the caller must ensure it | ||
180 | * can't go away. | ||
181 | */ | ||
182 | int | ||
183 | queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work) | ||
184 | { | ||
166 | int ret = 0; | 185 | int ret = 0; |
167 | 186 | ||
168 | if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { | 187 | if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { |
169 | BUG_ON(!list_empty(&work->entry)); | 188 | BUG_ON(!list_empty(&work->entry)); |
170 | __queue_work(wq_per_cpu(wq, get_cpu()), work); | 189 | __queue_work(wq_per_cpu(wq, cpu), work); |
171 | put_cpu(); | ||
172 | ret = 1; | 190 | ret = 1; |
173 | } | 191 | } |
174 | return ret; | 192 | return ret; |
175 | } | 193 | } |
176 | EXPORT_SYMBOL_GPL(queue_work); | 194 | EXPORT_SYMBOL_GPL(queue_work_on); |
177 | 195 | ||
178 | static void delayed_work_timer_fn(unsigned long __data) | 196 | static void delayed_work_timer_fn(unsigned long __data) |
179 | { | 197 | { |
@@ -272,11 +290,11 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq) | |||
272 | 290 | ||
273 | BUG_ON(get_wq_data(work) != cwq); | 291 | BUG_ON(get_wq_data(work) != cwq); |
274 | work_clear_pending(work); | 292 | work_clear_pending(work); |
275 | lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); | 293 | lock_map_acquire(&cwq->wq->lockdep_map); |
276 | lock_acquire(&lockdep_map, 0, 0, 0, 2, _THIS_IP_); | 294 | lock_map_acquire(&lockdep_map); |
277 | f(work); | 295 | f(work); |
278 | lock_release(&lockdep_map, 1, _THIS_IP_); | 296 | lock_map_release(&lockdep_map); |
279 | lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_); | 297 | lock_map_release(&cwq->wq->lockdep_map); |
280 | 298 | ||
281 | if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { | 299 | if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { |
282 | printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " | 300 | printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " |
@@ -337,14 +355,14 @@ static void wq_barrier_func(struct work_struct *work) | |||
337 | } | 355 | } |
338 | 356 | ||
339 | static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, | 357 | static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, |
340 | struct wq_barrier *barr, int tail) | 358 | struct wq_barrier *barr, struct list_head *head) |
341 | { | 359 | { |
342 | INIT_WORK(&barr->work, wq_barrier_func); | 360 | INIT_WORK(&barr->work, wq_barrier_func); |
343 | __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work)); | 361 | __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work)); |
344 | 362 | ||
345 | init_completion(&barr->done); | 363 | init_completion(&barr->done); |
346 | 364 | ||
347 | insert_work(cwq, &barr->work, tail); | 365 | insert_work(cwq, &barr->work, head); |
348 | } | 366 | } |
349 | 367 | ||
350 | static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) | 368 | static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) |
@@ -364,7 +382,7 @@ static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) | |||
364 | active = 0; | 382 | active = 0; |
365 | spin_lock_irq(&cwq->lock); | 383 | spin_lock_irq(&cwq->lock); |
366 | if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) { | 384 | if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) { |
367 | insert_wq_barrier(cwq, &barr, 1); | 385 | insert_wq_barrier(cwq, &barr, &cwq->worklist); |
368 | active = 1; | 386 | active = 1; |
369 | } | 387 | } |
370 | spin_unlock_irq(&cwq->lock); | 388 | spin_unlock_irq(&cwq->lock); |
@@ -395,13 +413,64 @@ void flush_workqueue(struct workqueue_struct *wq) | |||
395 | int cpu; | 413 | int cpu; |
396 | 414 | ||
397 | might_sleep(); | 415 | might_sleep(); |
398 | lock_acquire(&wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); | 416 | lock_map_acquire(&wq->lockdep_map); |
399 | lock_release(&wq->lockdep_map, 1, _THIS_IP_); | 417 | lock_map_release(&wq->lockdep_map); |
400 | for_each_cpu_mask(cpu, *cpu_map) | 418 | for_each_cpu_mask_nr(cpu, *cpu_map) |
401 | flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); | 419 | flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); |
402 | } | 420 | } |
403 | EXPORT_SYMBOL_GPL(flush_workqueue); | 421 | EXPORT_SYMBOL_GPL(flush_workqueue); |
404 | 422 | ||
423 | /** | ||
424 | * flush_work - block until a work_struct's callback has terminated | ||
425 | * @work: the work which is to be flushed | ||
426 | * | ||
427 | * Returns false if @work has already terminated. | ||
428 | * | ||
429 | * It is expected that, prior to calling flush_work(), the caller has | ||
430 | * arranged for the work to not be requeued, otherwise it doesn't make | ||
431 | * sense to use this function. | ||
432 | */ | ||
433 | int flush_work(struct work_struct *work) | ||
434 | { | ||
435 | struct cpu_workqueue_struct *cwq; | ||
436 | struct list_head *prev; | ||
437 | struct wq_barrier barr; | ||
438 | |||
439 | might_sleep(); | ||
440 | cwq = get_wq_data(work); | ||
441 | if (!cwq) | ||
442 | return 0; | ||
443 | |||
444 | lock_map_acquire(&cwq->wq->lockdep_map); | ||
445 | lock_map_release(&cwq->wq->lockdep_map); | ||
446 | |||
447 | prev = NULL; | ||
448 | spin_lock_irq(&cwq->lock); | ||
449 | if (!list_empty(&work->entry)) { | ||
450 | /* | ||
451 | * See the comment near try_to_grab_pending()->smp_rmb(). | ||
452 | * If it was re-queued under us we are not going to wait. | ||
453 | */ | ||
454 | smp_rmb(); | ||
455 | if (unlikely(cwq != get_wq_data(work))) | ||
456 | goto out; | ||
457 | prev = &work->entry; | ||
458 | } else { | ||
459 | if (cwq->current_work != work) | ||
460 | goto out; | ||
461 | prev = &cwq->worklist; | ||
462 | } | ||
463 | insert_wq_barrier(cwq, &barr, prev->next); | ||
464 | out: | ||
465 | spin_unlock_irq(&cwq->lock); | ||
466 | if (!prev) | ||
467 | return 0; | ||
468 | |||
469 | wait_for_completion(&barr.done); | ||
470 | return 1; | ||
471 | } | ||
472 | EXPORT_SYMBOL_GPL(flush_work); | ||
473 | |||
405 | /* | 474 | /* |
406 | * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit, | 475 | * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit, |
407 | * so this work can't be re-armed in any way. | 476 | * so this work can't be re-armed in any way. |
@@ -449,7 +518,7 @@ static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq, | |||
449 | 518 | ||
450 | spin_lock_irq(&cwq->lock); | 519 | spin_lock_irq(&cwq->lock); |
451 | if (unlikely(cwq->current_work == work)) { | 520 | if (unlikely(cwq->current_work == work)) { |
452 | insert_wq_barrier(cwq, &barr, 0); | 521 | insert_wq_barrier(cwq, &barr, cwq->worklist.next); |
453 | running = 1; | 522 | running = 1; |
454 | } | 523 | } |
455 | spin_unlock_irq(&cwq->lock); | 524 | spin_unlock_irq(&cwq->lock); |
@@ -467,8 +536,8 @@ static void wait_on_work(struct work_struct *work) | |||
467 | 536 | ||
468 | might_sleep(); | 537 | might_sleep(); |
469 | 538 | ||
470 | lock_acquire(&work->lockdep_map, 0, 0, 0, 2, _THIS_IP_); | 539 | lock_map_acquire(&work->lockdep_map); |
471 | lock_release(&work->lockdep_map, 1, _THIS_IP_); | 540 | lock_map_release(&work->lockdep_map); |
472 | 541 | ||
473 | cwq = get_wq_data(work); | 542 | cwq = get_wq_data(work); |
474 | if (!cwq) | 543 | if (!cwq) |
@@ -477,7 +546,7 @@ static void wait_on_work(struct work_struct *work) | |||
477 | wq = cwq->wq; | 546 | wq = cwq->wq; |
478 | cpu_map = wq_cpu_map(wq); | 547 | cpu_map = wq_cpu_map(wq); |
479 | 548 | ||
480 | for_each_cpu_mask(cpu, *cpu_map) | 549 | for_each_cpu_mask_nr(cpu, *cpu_map) |
481 | wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work); | 550 | wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work); |
482 | } | 551 | } |
483 | 552 | ||
@@ -553,6 +622,19 @@ int schedule_work(struct work_struct *work) | |||
553 | } | 622 | } |
554 | EXPORT_SYMBOL(schedule_work); | 623 | EXPORT_SYMBOL(schedule_work); |
555 | 624 | ||
625 | /* | ||
626 | * schedule_work_on - put work task on a specific cpu | ||
627 | * @cpu: cpu to put the work task on | ||
628 | * @work: job to be done | ||
629 | * | ||
630 | * This puts a job on a specific cpu | ||
631 | */ | ||
632 | int schedule_work_on(int cpu, struct work_struct *work) | ||
633 | { | ||
634 | return queue_work_on(cpu, keventd_wq, work); | ||
635 | } | ||
636 | EXPORT_SYMBOL(schedule_work_on); | ||
637 | |||
556 | /** | 638 | /** |
557 | * schedule_delayed_work - put work task in global workqueue after delay | 639 | * schedule_delayed_work - put work task in global workqueue after delay |
558 | * @dwork: job to be done | 640 | * @dwork: job to be done |
@@ -607,10 +689,10 @@ int schedule_on_each_cpu(work_func_t func) | |||
607 | struct work_struct *work = per_cpu_ptr(works, cpu); | 689 | struct work_struct *work = per_cpu_ptr(works, cpu); |
608 | 690 | ||
609 | INIT_WORK(work, func); | 691 | INIT_WORK(work, func); |
610 | set_bit(WORK_STRUCT_PENDING, work_data_bits(work)); | 692 | schedule_work_on(cpu, work); |
611 | __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work); | ||
612 | } | 693 | } |
613 | flush_workqueue(keventd_wq); | 694 | for_each_online_cpu(cpu) |
695 | flush_work(per_cpu_ptr(works, cpu)); | ||
614 | put_online_cpus(); | 696 | put_online_cpus(); |
615 | free_percpu(works); | 697 | free_percpu(works); |
616 | return 0; | 698 | return 0; |
@@ -747,11 +829,22 @@ struct workqueue_struct *__create_workqueue_key(const char *name, | |||
747 | err = create_workqueue_thread(cwq, singlethread_cpu); | 829 | err = create_workqueue_thread(cwq, singlethread_cpu); |
748 | start_workqueue_thread(cwq, -1); | 830 | start_workqueue_thread(cwq, -1); |
749 | } else { | 831 | } else { |
750 | get_online_cpus(); | 832 | cpu_maps_update_begin(); |
833 | /* | ||
834 | * We must place this wq on list even if the code below fails. | ||
835 | * cpu_down(cpu) can remove cpu from cpu_populated_map before | ||
836 | * destroy_workqueue() takes the lock, in that case we leak | ||
837 | * cwq[cpu]->thread. | ||
838 | */ | ||
751 | spin_lock(&workqueue_lock); | 839 | spin_lock(&workqueue_lock); |
752 | list_add(&wq->list, &workqueues); | 840 | list_add(&wq->list, &workqueues); |
753 | spin_unlock(&workqueue_lock); | 841 | spin_unlock(&workqueue_lock); |
754 | 842 | /* | |
843 | * We must initialize cwqs for each possible cpu even if we | ||
844 | * are going to call destroy_workqueue() finally. Otherwise | ||
845 | * cpu_up() can hit the uninitialized cwq once we drop the | ||
846 | * lock. | ||
847 | */ | ||
755 | for_each_possible_cpu(cpu) { | 848 | for_each_possible_cpu(cpu) { |
756 | cwq = init_cpu_workqueue(wq, cpu); | 849 | cwq = init_cpu_workqueue(wq, cpu); |
757 | if (err || !cpu_online(cpu)) | 850 | if (err || !cpu_online(cpu)) |
@@ -759,7 +852,7 @@ struct workqueue_struct *__create_workqueue_key(const char *name, | |||
759 | err = create_workqueue_thread(cwq, cpu); | 852 | err = create_workqueue_thread(cwq, cpu); |
760 | start_workqueue_thread(cwq, cpu); | 853 | start_workqueue_thread(cwq, cpu); |
761 | } | 854 | } |
762 | put_online_cpus(); | 855 | cpu_maps_update_done(); |
763 | } | 856 | } |
764 | 857 | ||
765 | if (err) { | 858 | if (err) { |
@@ -773,18 +866,18 @@ EXPORT_SYMBOL_GPL(__create_workqueue_key); | |||
773 | static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq) | 866 | static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq) |
774 | { | 867 | { |
775 | /* | 868 | /* |
776 | * Our caller is either destroy_workqueue() or CPU_DEAD, | 869 | * Our caller is either destroy_workqueue() or CPU_POST_DEAD, |
777 | * get_online_cpus() protects cwq->thread. | 870 | * cpu_add_remove_lock protects cwq->thread. |
778 | */ | 871 | */ |
779 | if (cwq->thread == NULL) | 872 | if (cwq->thread == NULL) |
780 | return; | 873 | return; |
781 | 874 | ||
782 | lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); | 875 | lock_map_acquire(&cwq->wq->lockdep_map); |
783 | lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_); | 876 | lock_map_release(&cwq->wq->lockdep_map); |
784 | 877 | ||
785 | flush_cpu_workqueue(cwq); | 878 | flush_cpu_workqueue(cwq); |
786 | /* | 879 | /* |
787 | * If the caller is CPU_DEAD and cwq->worklist was not empty, | 880 | * If the caller is CPU_POST_DEAD and cwq->worklist was not empty, |
788 | * a concurrent flush_workqueue() can insert a barrier after us. | 881 | * a concurrent flush_workqueue() can insert a barrier after us. |
789 | * However, in that case run_workqueue() won't return and check | 882 | * However, in that case run_workqueue() won't return and check |
790 | * kthread_should_stop() until it flushes all work_struct's. | 883 | * kthread_should_stop() until it flushes all work_struct's. |
@@ -808,14 +901,14 @@ void destroy_workqueue(struct workqueue_struct *wq) | |||
808 | const cpumask_t *cpu_map = wq_cpu_map(wq); | 901 | const cpumask_t *cpu_map = wq_cpu_map(wq); |
809 | int cpu; | 902 | int cpu; |
810 | 903 | ||
811 | get_online_cpus(); | 904 | cpu_maps_update_begin(); |
812 | spin_lock(&workqueue_lock); | 905 | spin_lock(&workqueue_lock); |
813 | list_del(&wq->list); | 906 | list_del(&wq->list); |
814 | spin_unlock(&workqueue_lock); | 907 | spin_unlock(&workqueue_lock); |
815 | 908 | ||
816 | for_each_cpu_mask(cpu, *cpu_map) | 909 | for_each_cpu_mask_nr(cpu, *cpu_map) |
817 | cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu)); | 910 | cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu)); |
818 | put_online_cpus(); | 911 | cpu_maps_update_done(); |
819 | 912 | ||
820 | free_percpu(wq->cpu_wq); | 913 | free_percpu(wq->cpu_wq); |
821 | kfree(wq); | 914 | kfree(wq); |
@@ -829,6 +922,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, | |||
829 | unsigned int cpu = (unsigned long)hcpu; | 922 | unsigned int cpu = (unsigned long)hcpu; |
830 | struct cpu_workqueue_struct *cwq; | 923 | struct cpu_workqueue_struct *cwq; |
831 | struct workqueue_struct *wq; | 924 | struct workqueue_struct *wq; |
925 | int ret = NOTIFY_OK; | ||
832 | 926 | ||
833 | action &= ~CPU_TASKS_FROZEN; | 927 | action &= ~CPU_TASKS_FROZEN; |
834 | 928 | ||
@@ -836,7 +930,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, | |||
836 | case CPU_UP_PREPARE: | 930 | case CPU_UP_PREPARE: |
837 | cpu_set(cpu, cpu_populated_map); | 931 | cpu_set(cpu, cpu_populated_map); |
838 | } | 932 | } |
839 | 933 | undo: | |
840 | list_for_each_entry(wq, &workqueues, list) { | 934 | list_for_each_entry(wq, &workqueues, list) { |
841 | cwq = per_cpu_ptr(wq->cpu_wq, cpu); | 935 | cwq = per_cpu_ptr(wq->cpu_wq, cpu); |
842 | 936 | ||
@@ -846,7 +940,9 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, | |||
846 | break; | 940 | break; |
847 | printk(KERN_ERR "workqueue [%s] for %i failed\n", | 941 | printk(KERN_ERR "workqueue [%s] for %i failed\n", |
848 | wq->name, cpu); | 942 | wq->name, cpu); |
849 | return NOTIFY_BAD; | 943 | action = CPU_UP_CANCELED; |
944 | ret = NOTIFY_BAD; | ||
945 | goto undo; | ||
850 | 946 | ||
851 | case CPU_ONLINE: | 947 | case CPU_ONLINE: |
852 | start_workqueue_thread(cwq, cpu); | 948 | start_workqueue_thread(cwq, cpu); |
@@ -854,7 +950,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, | |||
854 | 950 | ||
855 | case CPU_UP_CANCELED: | 951 | case CPU_UP_CANCELED: |
856 | start_workqueue_thread(cwq, -1); | 952 | start_workqueue_thread(cwq, -1); |
857 | case CPU_DEAD: | 953 | case CPU_POST_DEAD: |
858 | cleanup_workqueue_thread(cwq); | 954 | cleanup_workqueue_thread(cwq); |
859 | break; | 955 | break; |
860 | } | 956 | } |
@@ -862,11 +958,11 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, | |||
862 | 958 | ||
863 | switch (action) { | 959 | switch (action) { |
864 | case CPU_UP_CANCELED: | 960 | case CPU_UP_CANCELED: |
865 | case CPU_DEAD: | 961 | case CPU_POST_DEAD: |
866 | cpu_clear(cpu, cpu_populated_map); | 962 | cpu_clear(cpu, cpu_populated_map); |
867 | } | 963 | } |
868 | 964 | ||
869 | return NOTIFY_OK; | 965 | return ret; |
870 | } | 966 | } |
871 | 967 | ||
872 | void __init init_workqueues(void) | 968 | void __init init_workqueues(void) |