diff options
author | David S. Miller <davem@davemloft.net> | 2011-02-19 22:17:35 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-02-19 22:17:35 -0500 |
commit | da935c66bacb3ed9ada984b053297f87c2dff63a (patch) | |
tree | 46278da2b312c73f1375b830d7e5912bf23abd78 /kernel | |
parent | 9435eb1cf0b76b323019cebf8d16762a50a12a19 (diff) | |
parent | 2205a6ea93fea76f88b43727fea53f3ce3790d6f (diff) |
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts:
Documentation/feature-removal-schedule.txt
drivers/net/e1000e/netdev.c
net/xfrm/xfrm_policy.c
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/capability.c | 2 | ||||
-rw-r--r-- | kernel/cred.c | 16 | ||||
-rw-r--r-- | kernel/irq/migration.c | 14 | ||||
-rw-r--r-- | kernel/module.c | 16 | ||||
-rw-r--r-- | kernel/perf_event.c | 10 | ||||
-rw-r--r-- | kernel/power/main.c | 2 | ||||
-rw-r--r-- | kernel/power/process.c | 6 | ||||
-rw-r--r-- | kernel/power/snapshot.c | 7 | ||||
-rw-r--r-- | kernel/printk.c | 154 | ||||
-rw-r--r-- | kernel/ptrace.c | 2 | ||||
-rw-r--r-- | kernel/sched_fair.c | 13 | ||||
-rw-r--r-- | kernel/sched_rt.c | 2 | ||||
-rw-r--r-- | kernel/sys.c | 3 | ||||
-rw-r--r-- | kernel/sysctl.c | 3 | ||||
-rw-r--r-- | kernel/time/timer_list.c | 4 | ||||
-rw-r--r-- | kernel/timer.c | 8 | ||||
-rw-r--r-- | kernel/trace/blktrace.c | 7 | ||||
-rw-r--r-- | kernel/trace/trace_events.c | 12 | ||||
-rw-r--r-- | kernel/trace/trace_export.c | 6 | ||||
-rw-r--r-- | kernel/trace/trace_syscalls.c | 19 | ||||
-rw-r--r-- | kernel/tracepoint.c | 31 | ||||
-rw-r--r-- | kernel/watchdog.c | 53 | ||||
-rw-r--r-- | kernel/workqueue.c | 37 |
23 files changed, 241 insertions, 186 deletions
diff --git a/kernel/capability.c b/kernel/capability.c index 2f05303715a5..9e9385f132c8 100644 --- a/kernel/capability.c +++ b/kernel/capability.c | |||
@@ -306,7 +306,7 @@ int capable(int cap) | |||
306 | BUG(); | 306 | BUG(); |
307 | } | 307 | } |
308 | 308 | ||
309 | if (security_capable(cap) == 0) { | 309 | if (security_capable(current_cred(), cap) == 0) { |
310 | current->flags |= PF_SUPERPRIV; | 310 | current->flags |= PF_SUPERPRIV; |
311 | return 1; | 311 | return 1; |
312 | } | 312 | } |
diff --git a/kernel/cred.c b/kernel/cred.c index 6a1aa004e376..3a9d6dd53a6c 100644 --- a/kernel/cred.c +++ b/kernel/cred.c | |||
@@ -252,13 +252,13 @@ struct cred *cred_alloc_blank(void) | |||
252 | #endif | 252 | #endif |
253 | 253 | ||
254 | atomic_set(&new->usage, 1); | 254 | atomic_set(&new->usage, 1); |
255 | #ifdef CONFIG_DEBUG_CREDENTIALS | ||
256 | new->magic = CRED_MAGIC; | ||
257 | #endif | ||
255 | 258 | ||
256 | if (security_cred_alloc_blank(new, GFP_KERNEL) < 0) | 259 | if (security_cred_alloc_blank(new, GFP_KERNEL) < 0) |
257 | goto error; | 260 | goto error; |
258 | 261 | ||
259 | #ifdef CONFIG_DEBUG_CREDENTIALS | ||
260 | new->magic = CRED_MAGIC; | ||
261 | #endif | ||
262 | return new; | 262 | return new; |
263 | 263 | ||
264 | error: | 264 | error: |
@@ -657,6 +657,8 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon) | |||
657 | validate_creds(old); | 657 | validate_creds(old); |
658 | 658 | ||
659 | *new = *old; | 659 | *new = *old; |
660 | atomic_set(&new->usage, 1); | ||
661 | set_cred_subscribers(new, 0); | ||
660 | get_uid(new->user); | 662 | get_uid(new->user); |
661 | get_group_info(new->group_info); | 663 | get_group_info(new->group_info); |
662 | 664 | ||
@@ -674,8 +676,6 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon) | |||
674 | if (security_prepare_creds(new, old, GFP_KERNEL) < 0) | 676 | if (security_prepare_creds(new, old, GFP_KERNEL) < 0) |
675 | goto error; | 677 | goto error; |
676 | 678 | ||
677 | atomic_set(&new->usage, 1); | ||
678 | set_cred_subscribers(new, 0); | ||
679 | put_cred(old); | 679 | put_cred(old); |
680 | validate_creds(new); | 680 | validate_creds(new); |
681 | return new; | 681 | return new; |
@@ -748,7 +748,11 @@ bool creds_are_invalid(const struct cred *cred) | |||
748 | if (cred->magic != CRED_MAGIC) | 748 | if (cred->magic != CRED_MAGIC) |
749 | return true; | 749 | return true; |
750 | #ifdef CONFIG_SECURITY_SELINUX | 750 | #ifdef CONFIG_SECURITY_SELINUX |
751 | if (selinux_is_enabled()) { | 751 | /* |
752 | * cred->security == NULL if security_cred_alloc_blank() or | ||
753 | * security_prepare_creds() returned an error. | ||
754 | */ | ||
755 | if (selinux_is_enabled() && cred->security) { | ||
752 | if ((unsigned long) cred->security < PAGE_SIZE) | 756 | if ((unsigned long) cred->security < PAGE_SIZE) |
753 | return true; | 757 | return true; |
754 | if ((*(u32 *)cred->security & 0xffffff00) == | 758 | if ((*(u32 *)cred->security & 0xffffff00) == |
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c index 1d2541940480..441fd629ff04 100644 --- a/kernel/irq/migration.c +++ b/kernel/irq/migration.c | |||
@@ -56,6 +56,7 @@ void move_masked_irq(int irq) | |||
56 | void move_native_irq(int irq) | 56 | void move_native_irq(int irq) |
57 | { | 57 | { |
58 | struct irq_desc *desc = irq_to_desc(irq); | 58 | struct irq_desc *desc = irq_to_desc(irq); |
59 | bool masked; | ||
59 | 60 | ||
60 | if (likely(!(desc->status & IRQ_MOVE_PENDING))) | 61 | if (likely(!(desc->status & IRQ_MOVE_PENDING))) |
61 | return; | 62 | return; |
@@ -63,8 +64,15 @@ void move_native_irq(int irq) | |||
63 | if (unlikely(desc->status & IRQ_DISABLED)) | 64 | if (unlikely(desc->status & IRQ_DISABLED)) |
64 | return; | 65 | return; |
65 | 66 | ||
66 | desc->irq_data.chip->irq_mask(&desc->irq_data); | 67 | /* |
68 | * Be careful vs. already masked interrupts. If this is a | ||
69 | * threaded interrupt with ONESHOT set, we can end up with an | ||
70 | * interrupt storm. | ||
71 | */ | ||
72 | masked = desc->status & IRQ_MASKED; | ||
73 | if (!masked) | ||
74 | desc->irq_data.chip->irq_mask(&desc->irq_data); | ||
67 | move_masked_irq(irq); | 75 | move_masked_irq(irq); |
68 | desc->irq_data.chip->irq_unmask(&desc->irq_data); | 76 | if (!masked) |
77 | desc->irq_data.chip->irq_unmask(&desc->irq_data); | ||
69 | } | 78 | } |
70 | |||
diff --git a/kernel/module.c b/kernel/module.c index 34e00b708fad..efa290ea94bf 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -2460,9 +2460,9 @@ static void find_module_sections(struct module *mod, struct load_info *info) | |||
2460 | #endif | 2460 | #endif |
2461 | 2461 | ||
2462 | #ifdef CONFIG_TRACEPOINTS | 2462 | #ifdef CONFIG_TRACEPOINTS |
2463 | mod->tracepoints = section_objs(info, "__tracepoints", | 2463 | mod->tracepoints_ptrs = section_objs(info, "__tracepoints_ptrs", |
2464 | sizeof(*mod->tracepoints), | 2464 | sizeof(*mod->tracepoints_ptrs), |
2465 | &mod->num_tracepoints); | 2465 | &mod->num_tracepoints); |
2466 | #endif | 2466 | #endif |
2467 | #ifdef HAVE_JUMP_LABEL | 2467 | #ifdef HAVE_JUMP_LABEL |
2468 | mod->jump_entries = section_objs(info, "__jump_table", | 2468 | mod->jump_entries = section_objs(info, "__jump_table", |
@@ -3393,7 +3393,7 @@ void module_layout(struct module *mod, | |||
3393 | struct modversion_info *ver, | 3393 | struct modversion_info *ver, |
3394 | struct kernel_param *kp, | 3394 | struct kernel_param *kp, |
3395 | struct kernel_symbol *ks, | 3395 | struct kernel_symbol *ks, |
3396 | struct tracepoint *tp) | 3396 | struct tracepoint * const *tp) |
3397 | { | 3397 | { |
3398 | } | 3398 | } |
3399 | EXPORT_SYMBOL(module_layout); | 3399 | EXPORT_SYMBOL(module_layout); |
@@ -3407,8 +3407,8 @@ void module_update_tracepoints(void) | |||
3407 | mutex_lock(&module_mutex); | 3407 | mutex_lock(&module_mutex); |
3408 | list_for_each_entry(mod, &modules, list) | 3408 | list_for_each_entry(mod, &modules, list) |
3409 | if (!mod->taints) | 3409 | if (!mod->taints) |
3410 | tracepoint_update_probe_range(mod->tracepoints, | 3410 | tracepoint_update_probe_range(mod->tracepoints_ptrs, |
3411 | mod->tracepoints + mod->num_tracepoints); | 3411 | mod->tracepoints_ptrs + mod->num_tracepoints); |
3412 | mutex_unlock(&module_mutex); | 3412 | mutex_unlock(&module_mutex); |
3413 | } | 3413 | } |
3414 | 3414 | ||
@@ -3432,8 +3432,8 @@ int module_get_iter_tracepoints(struct tracepoint_iter *iter) | |||
3432 | else if (iter_mod > iter->module) | 3432 | else if (iter_mod > iter->module) |
3433 | iter->tracepoint = NULL; | 3433 | iter->tracepoint = NULL; |
3434 | found = tracepoint_get_iter_range(&iter->tracepoint, | 3434 | found = tracepoint_get_iter_range(&iter->tracepoint, |
3435 | iter_mod->tracepoints, | 3435 | iter_mod->tracepoints_ptrs, |
3436 | iter_mod->tracepoints | 3436 | iter_mod->tracepoints_ptrs |
3437 | + iter_mod->num_tracepoints); | 3437 | + iter_mod->num_tracepoints); |
3438 | if (found) { | 3438 | if (found) { |
3439 | iter->module = iter_mod; | 3439 | iter->module = iter_mod; |
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 126a302c481c..999835b6112b 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -1901,11 +1901,12 @@ static void __perf_event_read(void *info) | |||
1901 | return; | 1901 | return; |
1902 | 1902 | ||
1903 | raw_spin_lock(&ctx->lock); | 1903 | raw_spin_lock(&ctx->lock); |
1904 | update_context_time(ctx); | 1904 | if (ctx->is_active) |
1905 | update_context_time(ctx); | ||
1905 | update_event_times(event); | 1906 | update_event_times(event); |
1907 | if (event->state == PERF_EVENT_STATE_ACTIVE) | ||
1908 | event->pmu->read(event); | ||
1906 | raw_spin_unlock(&ctx->lock); | 1909 | raw_spin_unlock(&ctx->lock); |
1907 | |||
1908 | event->pmu->read(event); | ||
1909 | } | 1910 | } |
1910 | 1911 | ||
1911 | static inline u64 perf_event_count(struct perf_event *event) | 1912 | static inline u64 perf_event_count(struct perf_event *event) |
@@ -1999,8 +2000,7 @@ static int alloc_callchain_buffers(void) | |||
1999 | * accessed from NMI. Use a temporary manual per cpu allocation | 2000 | * accessed from NMI. Use a temporary manual per cpu allocation |
2000 | * until that gets sorted out. | 2001 | * until that gets sorted out. |
2001 | */ | 2002 | */ |
2002 | size = sizeof(*entries) + sizeof(struct perf_callchain_entry *) * | 2003 | size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]); |
2003 | num_possible_cpus(); | ||
2004 | 2004 | ||
2005 | entries = kzalloc(size, GFP_KERNEL); | 2005 | entries = kzalloc(size, GFP_KERNEL); |
2006 | if (!entries) | 2006 | if (!entries) |
diff --git a/kernel/power/main.c b/kernel/power/main.c index 7b5db6a8561e..701853042c28 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c | |||
@@ -326,7 +326,7 @@ EXPORT_SYMBOL_GPL(pm_wq); | |||
326 | 326 | ||
327 | static int __init pm_start_workqueue(void) | 327 | static int __init pm_start_workqueue(void) |
328 | { | 328 | { |
329 | pm_wq = alloc_workqueue("pm", WQ_FREEZEABLE, 0); | 329 | pm_wq = alloc_workqueue("pm", WQ_FREEZABLE, 0); |
330 | 330 | ||
331 | return pm_wq ? 0 : -ENOMEM; | 331 | return pm_wq ? 0 : -ENOMEM; |
332 | } | 332 | } |
diff --git a/kernel/power/process.c b/kernel/power/process.c index d6d2a10320e0..0cf3a27a6c9d 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c | |||
@@ -22,7 +22,7 @@ | |||
22 | */ | 22 | */ |
23 | #define TIMEOUT (20 * HZ) | 23 | #define TIMEOUT (20 * HZ) |
24 | 24 | ||
25 | static inline int freezeable(struct task_struct * p) | 25 | static inline int freezable(struct task_struct * p) |
26 | { | 26 | { |
27 | if ((p == current) || | 27 | if ((p == current) || |
28 | (p->flags & PF_NOFREEZE) || | 28 | (p->flags & PF_NOFREEZE) || |
@@ -53,7 +53,7 @@ static int try_to_freeze_tasks(bool sig_only) | |||
53 | todo = 0; | 53 | todo = 0; |
54 | read_lock(&tasklist_lock); | 54 | read_lock(&tasklist_lock); |
55 | do_each_thread(g, p) { | 55 | do_each_thread(g, p) { |
56 | if (frozen(p) || !freezeable(p)) | 56 | if (frozen(p) || !freezable(p)) |
57 | continue; | 57 | continue; |
58 | 58 | ||
59 | if (!freeze_task(p, sig_only)) | 59 | if (!freeze_task(p, sig_only)) |
@@ -167,7 +167,7 @@ static void thaw_tasks(bool nosig_only) | |||
167 | 167 | ||
168 | read_lock(&tasklist_lock); | 168 | read_lock(&tasklist_lock); |
169 | do_each_thread(g, p) { | 169 | do_each_thread(g, p) { |
170 | if (!freezeable(p)) | 170 | if (!freezable(p)) |
171 | continue; | 171 | continue; |
172 | 172 | ||
173 | if (nosig_only && should_send_signal(p)) | 173 | if (nosig_only && should_send_signal(p)) |
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 0dac75ea4456..64db648ff911 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c | |||
@@ -1519,11 +1519,8 @@ static int | |||
1519 | swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm, | 1519 | swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm, |
1520 | unsigned int nr_pages, unsigned int nr_highmem) | 1520 | unsigned int nr_pages, unsigned int nr_highmem) |
1521 | { | 1521 | { |
1522 | int error = 0; | ||
1523 | |||
1524 | if (nr_highmem > 0) { | 1522 | if (nr_highmem > 0) { |
1525 | error = get_highmem_buffer(PG_ANY); | 1523 | if (get_highmem_buffer(PG_ANY)) |
1526 | if (error) | ||
1527 | goto err_out; | 1524 | goto err_out; |
1528 | if (nr_highmem > alloc_highmem) { | 1525 | if (nr_highmem > alloc_highmem) { |
1529 | nr_highmem -= alloc_highmem; | 1526 | nr_highmem -= alloc_highmem; |
@@ -1546,7 +1543,7 @@ swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm, | |||
1546 | 1543 | ||
1547 | err_out: | 1544 | err_out: |
1548 | swsusp_free(); | 1545 | swsusp_free(); |
1549 | return error; | 1546 | return -ENOMEM; |
1550 | } | 1547 | } |
1551 | 1548 | ||
1552 | asmlinkage int swsusp_save(void) | 1549 | asmlinkage int swsusp_save(void) |
diff --git a/kernel/printk.c b/kernel/printk.c index 53d9a9ec88e6..36231525e22f 100644 --- a/kernel/printk.c +++ b/kernel/printk.c | |||
@@ -97,7 +97,7 @@ static int console_locked, console_suspended; | |||
97 | /* | 97 | /* |
98 | * logbuf_lock protects log_buf, log_start, log_end, con_start and logged_chars | 98 | * logbuf_lock protects log_buf, log_start, log_end, con_start and logged_chars |
99 | * It is also used in interesting ways to provide interlocking in | 99 | * It is also used in interesting ways to provide interlocking in |
100 | * release_console_sem(). | 100 | * console_unlock();. |
101 | */ | 101 | */ |
102 | static DEFINE_SPINLOCK(logbuf_lock); | 102 | static DEFINE_SPINLOCK(logbuf_lock); |
103 | 103 | ||
@@ -262,25 +262,47 @@ int dmesg_restrict = 1; | |||
262 | int dmesg_restrict; | 262 | int dmesg_restrict; |
263 | #endif | 263 | #endif |
264 | 264 | ||
265 | static int syslog_action_restricted(int type) | ||
266 | { | ||
267 | if (dmesg_restrict) | ||
268 | return 1; | ||
269 | /* Unless restricted, we allow "read all" and "get buffer size" for everybody */ | ||
270 | return type != SYSLOG_ACTION_READ_ALL && type != SYSLOG_ACTION_SIZE_BUFFER; | ||
271 | } | ||
272 | |||
273 | static int check_syslog_permissions(int type, bool from_file) | ||
274 | { | ||
275 | /* | ||
276 | * If this is from /proc/kmsg and we've already opened it, then we've | ||
277 | * already done the capabilities checks at open time. | ||
278 | */ | ||
279 | if (from_file && type != SYSLOG_ACTION_OPEN) | ||
280 | return 0; | ||
281 | |||
282 | if (syslog_action_restricted(type)) { | ||
283 | if (capable(CAP_SYSLOG)) | ||
284 | return 0; | ||
285 | /* For historical reasons, accept CAP_SYS_ADMIN too, with a warning */ | ||
286 | if (capable(CAP_SYS_ADMIN)) { | ||
287 | WARN_ONCE(1, "Attempt to access syslog with CAP_SYS_ADMIN " | ||
288 | "but no CAP_SYSLOG (deprecated).\n"); | ||
289 | return 0; | ||
290 | } | ||
291 | return -EPERM; | ||
292 | } | ||
293 | return 0; | ||
294 | } | ||
295 | |||
265 | int do_syslog(int type, char __user *buf, int len, bool from_file) | 296 | int do_syslog(int type, char __user *buf, int len, bool from_file) |
266 | { | 297 | { |
267 | unsigned i, j, limit, count; | 298 | unsigned i, j, limit, count; |
268 | int do_clear = 0; | 299 | int do_clear = 0; |
269 | char c; | 300 | char c; |
270 | int error = 0; | 301 | int error; |
271 | 302 | ||
272 | /* | 303 | error = check_syslog_permissions(type, from_file); |
273 | * If this is from /proc/kmsg we only do the capabilities checks | 304 | if (error) |
274 | * at open time. | 305 | goto out; |
275 | */ | ||
276 | if (type == SYSLOG_ACTION_OPEN || !from_file) { | ||
277 | if (dmesg_restrict && !capable(CAP_SYSLOG)) | ||
278 | goto warn; /* switch to return -EPERM after 2.6.39 */ | ||
279 | if ((type != SYSLOG_ACTION_READ_ALL && | ||
280 | type != SYSLOG_ACTION_SIZE_BUFFER) && | ||
281 | !capable(CAP_SYSLOG)) | ||
282 | goto warn; /* switch to return -EPERM after 2.6.39 */ | ||
283 | } | ||
284 | 306 | ||
285 | error = security_syslog(type); | 307 | error = security_syslog(type); |
286 | if (error) | 308 | if (error) |
@@ -423,12 +445,6 @@ int do_syslog(int type, char __user *buf, int len, bool from_file) | |||
423 | } | 445 | } |
424 | out: | 446 | out: |
425 | return error; | 447 | return error; |
426 | warn: | ||
427 | /* remove after 2.6.39 */ | ||
428 | if (capable(CAP_SYS_ADMIN)) | ||
429 | WARN_ONCE(1, "Attempt to access syslog with CAP_SYS_ADMIN " | ||
430 | "but no CAP_SYSLOG (deprecated and denied).\n"); | ||
431 | return -EPERM; | ||
432 | } | 448 | } |
433 | 449 | ||
434 | SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len) | 450 | SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len) |
@@ -501,7 +517,7 @@ static void _call_console_drivers(unsigned start, | |||
501 | /* | 517 | /* |
502 | * Call the console drivers, asking them to write out | 518 | * Call the console drivers, asking them to write out |
503 | * log_buf[start] to log_buf[end - 1]. | 519 | * log_buf[start] to log_buf[end - 1]. |
504 | * The console_sem must be held. | 520 | * The console_lock must be held. |
505 | */ | 521 | */ |
506 | static void call_console_drivers(unsigned start, unsigned end) | 522 | static void call_console_drivers(unsigned start, unsigned end) |
507 | { | 523 | { |
@@ -604,11 +620,11 @@ static int have_callable_console(void) | |||
604 | * | 620 | * |
605 | * This is printk(). It can be called from any context. We want it to work. | 621 | * This is printk(). It can be called from any context. We want it to work. |
606 | * | 622 | * |
607 | * We try to grab the console_sem. If we succeed, it's easy - we log the output and | 623 | * We try to grab the console_lock. If we succeed, it's easy - we log the output and |
608 | * call the console drivers. If we fail to get the semaphore we place the output | 624 | * call the console drivers. If we fail to get the semaphore we place the output |
609 | * into the log buffer and return. The current holder of the console_sem will | 625 | * into the log buffer and return. The current holder of the console_sem will |
610 | * notice the new output in release_console_sem() and will send it to the | 626 | * notice the new output in console_unlock(); and will send it to the |
611 | * consoles before releasing the semaphore. | 627 | * consoles before releasing the lock. |
612 | * | 628 | * |
613 | * One effect of this deferred printing is that code which calls printk() and | 629 | * One effect of this deferred printing is that code which calls printk() and |
614 | * then changes console_loglevel may break. This is because console_loglevel | 630 | * then changes console_loglevel may break. This is because console_loglevel |
@@ -659,19 +675,19 @@ static inline int can_use_console(unsigned int cpu) | |||
659 | /* | 675 | /* |
660 | * Try to get console ownership to actually show the kernel | 676 | * Try to get console ownership to actually show the kernel |
661 | * messages from a 'printk'. Return true (and with the | 677 | * messages from a 'printk'. Return true (and with the |
662 | * console_semaphore held, and 'console_locked' set) if it | 678 | * console_lock held, and 'console_locked' set) if it |
663 | * is successful, false otherwise. | 679 | * is successful, false otherwise. |
664 | * | 680 | * |
665 | * This gets called with the 'logbuf_lock' spinlock held and | 681 | * This gets called with the 'logbuf_lock' spinlock held and |
666 | * interrupts disabled. It should return with 'lockbuf_lock' | 682 | * interrupts disabled. It should return with 'lockbuf_lock' |
667 | * released but interrupts still disabled. | 683 | * released but interrupts still disabled. |
668 | */ | 684 | */ |
669 | static int acquire_console_semaphore_for_printk(unsigned int cpu) | 685 | static int console_trylock_for_printk(unsigned int cpu) |
670 | __releases(&logbuf_lock) | 686 | __releases(&logbuf_lock) |
671 | { | 687 | { |
672 | int retval = 0; | 688 | int retval = 0; |
673 | 689 | ||
674 | if (!try_acquire_console_sem()) { | 690 | if (console_trylock()) { |
675 | retval = 1; | 691 | retval = 1; |
676 | 692 | ||
677 | /* | 693 | /* |
@@ -827,12 +843,12 @@ asmlinkage int vprintk(const char *fmt, va_list args) | |||
827 | * actual magic (print out buffers, wake up klogd, | 843 | * actual magic (print out buffers, wake up klogd, |
828 | * etc). | 844 | * etc). |
829 | * | 845 | * |
830 | * The acquire_console_semaphore_for_printk() function | 846 | * The console_trylock_for_printk() function |
831 | * will release 'logbuf_lock' regardless of whether it | 847 | * will release 'logbuf_lock' regardless of whether it |
832 | * actually gets the semaphore or not. | 848 | * actually gets the semaphore or not. |
833 | */ | 849 | */ |
834 | if (acquire_console_semaphore_for_printk(this_cpu)) | 850 | if (console_trylock_for_printk(this_cpu)) |
835 | release_console_sem(); | 851 | console_unlock(); |
836 | 852 | ||
837 | lockdep_on(); | 853 | lockdep_on(); |
838 | out_restore_irqs: | 854 | out_restore_irqs: |
@@ -993,7 +1009,7 @@ void suspend_console(void) | |||
993 | if (!console_suspend_enabled) | 1009 | if (!console_suspend_enabled) |
994 | return; | 1010 | return; |
995 | printk("Suspending console(s) (use no_console_suspend to debug)\n"); | 1011 | printk("Suspending console(s) (use no_console_suspend to debug)\n"); |
996 | acquire_console_sem(); | 1012 | console_lock(); |
997 | console_suspended = 1; | 1013 | console_suspended = 1; |
998 | up(&console_sem); | 1014 | up(&console_sem); |
999 | } | 1015 | } |
@@ -1004,7 +1020,7 @@ void resume_console(void) | |||
1004 | return; | 1020 | return; |
1005 | down(&console_sem); | 1021 | down(&console_sem); |
1006 | console_suspended = 0; | 1022 | console_suspended = 0; |
1007 | release_console_sem(); | 1023 | console_unlock(); |
1008 | } | 1024 | } |
1009 | 1025 | ||
1010 | /** | 1026 | /** |
@@ -1027,21 +1043,21 @@ static int __cpuinit console_cpu_notify(struct notifier_block *self, | |||
1027 | case CPU_DYING: | 1043 | case CPU_DYING: |
1028 | case CPU_DOWN_FAILED: | 1044 | case CPU_DOWN_FAILED: |
1029 | case CPU_UP_CANCELED: | 1045 | case CPU_UP_CANCELED: |
1030 | acquire_console_sem(); | 1046 | console_lock(); |
1031 | release_console_sem(); | 1047 | console_unlock(); |
1032 | } | 1048 | } |
1033 | return NOTIFY_OK; | 1049 | return NOTIFY_OK; |
1034 | } | 1050 | } |
1035 | 1051 | ||
1036 | /** | 1052 | /** |
1037 | * acquire_console_sem - lock the console system for exclusive use. | 1053 | * console_lock - lock the console system for exclusive use. |
1038 | * | 1054 | * |
1039 | * Acquires a semaphore which guarantees that the caller has | 1055 | * Acquires a lock which guarantees that the caller has |
1040 | * exclusive access to the console system and the console_drivers list. | 1056 | * exclusive access to the console system and the console_drivers list. |
1041 | * | 1057 | * |
1042 | * Can sleep, returns nothing. | 1058 | * Can sleep, returns nothing. |
1043 | */ | 1059 | */ |
1044 | void acquire_console_sem(void) | 1060 | void console_lock(void) |
1045 | { | 1061 | { |
1046 | BUG_ON(in_interrupt()); | 1062 | BUG_ON(in_interrupt()); |
1047 | down(&console_sem); | 1063 | down(&console_sem); |
@@ -1050,21 +1066,29 @@ void acquire_console_sem(void) | |||
1050 | console_locked = 1; | 1066 | console_locked = 1; |
1051 | console_may_schedule = 1; | 1067 | console_may_schedule = 1; |
1052 | } | 1068 | } |
1053 | EXPORT_SYMBOL(acquire_console_sem); | 1069 | EXPORT_SYMBOL(console_lock); |
1054 | 1070 | ||
1055 | int try_acquire_console_sem(void) | 1071 | /** |
1072 | * console_trylock - try to lock the console system for exclusive use. | ||
1073 | * | ||
1074 | * Tried to acquire a lock which guarantees that the caller has | ||
1075 | * exclusive access to the console system and the console_drivers list. | ||
1076 | * | ||
1077 | * returns 1 on success, and 0 on failure to acquire the lock. | ||
1078 | */ | ||
1079 | int console_trylock(void) | ||
1056 | { | 1080 | { |
1057 | if (down_trylock(&console_sem)) | 1081 | if (down_trylock(&console_sem)) |
1058 | return -1; | 1082 | return 0; |
1059 | if (console_suspended) { | 1083 | if (console_suspended) { |
1060 | up(&console_sem); | 1084 | up(&console_sem); |
1061 | return -1; | 1085 | return 0; |
1062 | } | 1086 | } |
1063 | console_locked = 1; | 1087 | console_locked = 1; |
1064 | console_may_schedule = 0; | 1088 | console_may_schedule = 0; |
1065 | return 0; | 1089 | return 1; |
1066 | } | 1090 | } |
1067 | EXPORT_SYMBOL(try_acquire_console_sem); | 1091 | EXPORT_SYMBOL(console_trylock); |
1068 | 1092 | ||
1069 | int is_console_locked(void) | 1093 | int is_console_locked(void) |
1070 | { | 1094 | { |
@@ -1095,20 +1119,20 @@ void wake_up_klogd(void) | |||
1095 | } | 1119 | } |
1096 | 1120 | ||
1097 | /** | 1121 | /** |
1098 | * release_console_sem - unlock the console system | 1122 | * console_unlock - unlock the console system |
1099 | * | 1123 | * |
1100 | * Releases the semaphore which the caller holds on the console system | 1124 | * Releases the console_lock which the caller holds on the console system |
1101 | * and the console driver list. | 1125 | * and the console driver list. |
1102 | * | 1126 | * |
1103 | * While the semaphore was held, console output may have been buffered | 1127 | * While the console_lock was held, console output may have been buffered |
1104 | * by printk(). If this is the case, release_console_sem() emits | 1128 | * by printk(). If this is the case, console_unlock(); emits |
1105 | * the output prior to releasing the semaphore. | 1129 | * the output prior to releasing the lock. |
1106 | * | 1130 | * |
1107 | * If there is output waiting for klogd, we wake it up. | 1131 | * If there is output waiting for klogd, we wake it up. |
1108 | * | 1132 | * |
1109 | * release_console_sem() may be called from any context. | 1133 | * console_unlock(); may be called from any context. |
1110 | */ | 1134 | */ |
1111 | void release_console_sem(void) | 1135 | void console_unlock(void) |
1112 | { | 1136 | { |
1113 | unsigned long flags; | 1137 | unsigned long flags; |
1114 | unsigned _con_start, _log_end; | 1138 | unsigned _con_start, _log_end; |
@@ -1141,7 +1165,7 @@ void release_console_sem(void) | |||
1141 | if (wake_klogd) | 1165 | if (wake_klogd) |
1142 | wake_up_klogd(); | 1166 | wake_up_klogd(); |
1143 | } | 1167 | } |
1144 | EXPORT_SYMBOL(release_console_sem); | 1168 | EXPORT_SYMBOL(console_unlock); |
1145 | 1169 | ||
1146 | /** | 1170 | /** |
1147 | * console_conditional_schedule - yield the CPU if required | 1171 | * console_conditional_schedule - yield the CPU if required |
@@ -1150,7 +1174,7 @@ EXPORT_SYMBOL(release_console_sem); | |||
1150 | * if this CPU should yield the CPU to another task, do | 1174 | * if this CPU should yield the CPU to another task, do |
1151 | * so here. | 1175 | * so here. |
1152 | * | 1176 | * |
1153 | * Must be called within acquire_console_sem(). | 1177 | * Must be called within console_lock();. |
1154 | */ | 1178 | */ |
1155 | void __sched console_conditional_schedule(void) | 1179 | void __sched console_conditional_schedule(void) |
1156 | { | 1180 | { |
@@ -1171,14 +1195,14 @@ void console_unblank(void) | |||
1171 | if (down_trylock(&console_sem) != 0) | 1195 | if (down_trylock(&console_sem) != 0) |
1172 | return; | 1196 | return; |
1173 | } else | 1197 | } else |
1174 | acquire_console_sem(); | 1198 | console_lock(); |
1175 | 1199 | ||
1176 | console_locked = 1; | 1200 | console_locked = 1; |
1177 | console_may_schedule = 0; | 1201 | console_may_schedule = 0; |
1178 | for_each_console(c) | 1202 | for_each_console(c) |
1179 | if ((c->flags & CON_ENABLED) && c->unblank) | 1203 | if ((c->flags & CON_ENABLED) && c->unblank) |
1180 | c->unblank(); | 1204 | c->unblank(); |
1181 | release_console_sem(); | 1205 | console_unlock(); |
1182 | } | 1206 | } |
1183 | 1207 | ||
1184 | /* | 1208 | /* |
@@ -1189,7 +1213,7 @@ struct tty_driver *console_device(int *index) | |||
1189 | struct console *c; | 1213 | struct console *c; |
1190 | struct tty_driver *driver = NULL; | 1214 | struct tty_driver *driver = NULL; |
1191 | 1215 | ||
1192 | acquire_console_sem(); | 1216 | console_lock(); |
1193 | for_each_console(c) { | 1217 | for_each_console(c) { |
1194 | if (!c->device) | 1218 | if (!c->device) |
1195 | continue; | 1219 | continue; |
@@ -1197,7 +1221,7 @@ struct tty_driver *console_device(int *index) | |||
1197 | if (driver) | 1221 | if (driver) |
1198 | break; | 1222 | break; |
1199 | } | 1223 | } |
1200 | release_console_sem(); | 1224 | console_unlock(); |
1201 | return driver; | 1225 | return driver; |
1202 | } | 1226 | } |
1203 | 1227 | ||
@@ -1208,17 +1232,17 @@ struct tty_driver *console_device(int *index) | |||
1208 | */ | 1232 | */ |
1209 | void console_stop(struct console *console) | 1233 | void console_stop(struct console *console) |
1210 | { | 1234 | { |
1211 | acquire_console_sem(); | 1235 | console_lock(); |
1212 | console->flags &= ~CON_ENABLED; | 1236 | console->flags &= ~CON_ENABLED; |
1213 | release_console_sem(); | 1237 | console_unlock(); |
1214 | } | 1238 | } |
1215 | EXPORT_SYMBOL(console_stop); | 1239 | EXPORT_SYMBOL(console_stop); |
1216 | 1240 | ||
1217 | void console_start(struct console *console) | 1241 | void console_start(struct console *console) |
1218 | { | 1242 | { |
1219 | acquire_console_sem(); | 1243 | console_lock(); |
1220 | console->flags |= CON_ENABLED; | 1244 | console->flags |= CON_ENABLED; |
1221 | release_console_sem(); | 1245 | console_unlock(); |
1222 | } | 1246 | } |
1223 | EXPORT_SYMBOL(console_start); | 1247 | EXPORT_SYMBOL(console_start); |
1224 | 1248 | ||
@@ -1340,7 +1364,7 @@ void register_console(struct console *newcon) | |||
1340 | * Put this console in the list - keep the | 1364 | * Put this console in the list - keep the |
1341 | * preferred driver at the head of the list. | 1365 | * preferred driver at the head of the list. |
1342 | */ | 1366 | */ |
1343 | acquire_console_sem(); | 1367 | console_lock(); |
1344 | if ((newcon->flags & CON_CONSDEV) || console_drivers == NULL) { | 1368 | if ((newcon->flags & CON_CONSDEV) || console_drivers == NULL) { |
1345 | newcon->next = console_drivers; | 1369 | newcon->next = console_drivers; |
1346 | console_drivers = newcon; | 1370 | console_drivers = newcon; |
@@ -1352,14 +1376,14 @@ void register_console(struct console *newcon) | |||
1352 | } | 1376 | } |
1353 | if (newcon->flags & CON_PRINTBUFFER) { | 1377 | if (newcon->flags & CON_PRINTBUFFER) { |
1354 | /* | 1378 | /* |
1355 | * release_console_sem() will print out the buffered messages | 1379 | * console_unlock(); will print out the buffered messages |
1356 | * for us. | 1380 | * for us. |
1357 | */ | 1381 | */ |
1358 | spin_lock_irqsave(&logbuf_lock, flags); | 1382 | spin_lock_irqsave(&logbuf_lock, flags); |
1359 | con_start = log_start; | 1383 | con_start = log_start; |
1360 | spin_unlock_irqrestore(&logbuf_lock, flags); | 1384 | spin_unlock_irqrestore(&logbuf_lock, flags); |
1361 | } | 1385 | } |
1362 | release_console_sem(); | 1386 | console_unlock(); |
1363 | console_sysfs_notify(); | 1387 | console_sysfs_notify(); |
1364 | 1388 | ||
1365 | /* | 1389 | /* |
@@ -1396,7 +1420,7 @@ int unregister_console(struct console *console) | |||
1396 | return braille_unregister_console(console); | 1420 | return braille_unregister_console(console); |
1397 | #endif | 1421 | #endif |
1398 | 1422 | ||
1399 | acquire_console_sem(); | 1423 | console_lock(); |
1400 | if (console_drivers == console) { | 1424 | if (console_drivers == console) { |
1401 | console_drivers=console->next; | 1425 | console_drivers=console->next; |
1402 | res = 0; | 1426 | res = 0; |
@@ -1418,7 +1442,7 @@ int unregister_console(struct console *console) | |||
1418 | if (console_drivers != NULL && console->flags & CON_CONSDEV) | 1442 | if (console_drivers != NULL && console->flags & CON_CONSDEV) |
1419 | console_drivers->flags |= CON_CONSDEV; | 1443 | console_drivers->flags |= CON_CONSDEV; |
1420 | 1444 | ||
1421 | release_console_sem(); | 1445 | console_unlock(); |
1422 | console_sysfs_notify(); | 1446 | console_sysfs_notify(); |
1423 | return res; | 1447 | return res; |
1424 | } | 1448 | } |
diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 99bbaa3e5b0d..1708b1e2972d 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c | |||
@@ -313,7 +313,7 @@ int ptrace_detach(struct task_struct *child, unsigned int data) | |||
313 | child->exit_code = data; | 313 | child->exit_code = data; |
314 | dead = __ptrace_detach(current, child); | 314 | dead = __ptrace_detach(current, child); |
315 | if (!child->exit_state) | 315 | if (!child->exit_state) |
316 | wake_up_process(child); | 316 | wake_up_state(child, TASK_TRACED | TASK_STOPPED); |
317 | } | 317 | } |
318 | write_unlock_irq(&tasklist_lock); | 318 | write_unlock_irq(&tasklist_lock); |
319 | 319 | ||
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 354769979c02..0c26e2df450e 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -722,10 +722,10 @@ static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update) | |||
722 | u64 now, delta; | 722 | u64 now, delta; |
723 | unsigned long load = cfs_rq->load.weight; | 723 | unsigned long load = cfs_rq->load.weight; |
724 | 724 | ||
725 | if (!cfs_rq) | 725 | if (cfs_rq->tg == &root_task_group) |
726 | return; | 726 | return; |
727 | 727 | ||
728 | now = rq_of(cfs_rq)->clock; | 728 | now = rq_of(cfs_rq)->clock_task; |
729 | delta = now - cfs_rq->load_stamp; | 729 | delta = now - cfs_rq->load_stamp; |
730 | 730 | ||
731 | /* truncate load history at 4 idle periods */ | 731 | /* truncate load history at 4 idle periods */ |
@@ -830,9 +830,6 @@ static void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta) | |||
830 | struct sched_entity *se; | 830 | struct sched_entity *se; |
831 | long shares; | 831 | long shares; |
832 | 832 | ||
833 | if (!cfs_rq) | ||
834 | return; | ||
835 | |||
836 | tg = cfs_rq->tg; | 833 | tg = cfs_rq->tg; |
837 | se = tg->se[cpu_of(rq_of(cfs_rq))]; | 834 | se = tg->se[cpu_of(rq_of(cfs_rq))]; |
838 | if (!se) | 835 | if (!se) |
@@ -1432,7 +1429,7 @@ static inline unsigned long effective_load(struct task_group *tg, int cpu, | |||
1432 | 1429 | ||
1433 | static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync) | 1430 | static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync) |
1434 | { | 1431 | { |
1435 | unsigned long this_load, load; | 1432 | s64 this_load, load; |
1436 | int idx, this_cpu, prev_cpu; | 1433 | int idx, this_cpu, prev_cpu; |
1437 | unsigned long tl_per_task; | 1434 | unsigned long tl_per_task; |
1438 | struct task_group *tg; | 1435 | struct task_group *tg; |
@@ -1471,8 +1468,8 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync) | |||
1471 | * Otherwise check if either cpus are near enough in load to allow this | 1468 | * Otherwise check if either cpus are near enough in load to allow this |
1472 | * task to be woken on this_cpu. | 1469 | * task to be woken on this_cpu. |
1473 | */ | 1470 | */ |
1474 | if (this_load) { | 1471 | if (this_load > 0) { |
1475 | unsigned long this_eff_load, prev_eff_load; | 1472 | s64 this_eff_load, prev_eff_load; |
1476 | 1473 | ||
1477 | this_eff_load = 100; | 1474 | this_eff_load = 100; |
1478 | this_eff_load *= power_of(prev_cpu); | 1475 | this_eff_load *= power_of(prev_cpu); |
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index c914ec747ca6..ad6267714c84 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -625,7 +625,7 @@ static void update_curr_rt(struct rq *rq) | |||
625 | struct rt_rq *rt_rq = rt_rq_of_se(rt_se); | 625 | struct rt_rq *rt_rq = rt_rq_of_se(rt_se); |
626 | u64 delta_exec; | 626 | u64 delta_exec; |
627 | 627 | ||
628 | if (!task_has_rt_policy(curr)) | 628 | if (curr->sched_class != &rt_sched_class) |
629 | return; | 629 | return; |
630 | 630 | ||
631 | delta_exec = rq->clock_task - curr->se.exec_start; | 631 | delta_exec = rq->clock_task - curr->se.exec_start; |
diff --git a/kernel/sys.c b/kernel/sys.c index 31b71a276b40..18da702ec813 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -1385,7 +1385,8 @@ static int check_prlimit_permission(struct task_struct *task) | |||
1385 | const struct cred *cred = current_cred(), *tcred; | 1385 | const struct cred *cred = current_cred(), *tcred; |
1386 | 1386 | ||
1387 | tcred = __task_cred(task); | 1387 | tcred = __task_cred(task); |
1388 | if ((cred->uid != tcred->euid || | 1388 | if (current != task && |
1389 | (cred->uid != tcred->euid || | ||
1389 | cred->uid != tcred->suid || | 1390 | cred->uid != tcred->suid || |
1390 | cred->uid != tcred->uid || | 1391 | cred->uid != tcred->uid || |
1391 | cred->gid != tcred->egid || | 1392 | cred->gid != tcred->egid || |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index bc86bb32e126..0f1bd83db985 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -170,7 +170,8 @@ static int proc_taint(struct ctl_table *table, int write, | |||
170 | #endif | 170 | #endif |
171 | 171 | ||
172 | #ifdef CONFIG_MAGIC_SYSRQ | 172 | #ifdef CONFIG_MAGIC_SYSRQ |
173 | static int __sysrq_enabled; /* Note: sysrq code ises it's own private copy */ | 173 | /* Note: sysrq code uses it's own private copy */ |
174 | static int __sysrq_enabled = SYSRQ_DEFAULT_ENABLE; | ||
174 | 175 | ||
175 | static int sysrq_sysctl_handler(ctl_table *table, int write, | 176 | static int sysrq_sysctl_handler(ctl_table *table, int write, |
176 | void __user *buffer, size_t *lenp, | 177 | void __user *buffer, size_t *lenp, |
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c index 32a19f9397fc..3258455549f4 100644 --- a/kernel/time/timer_list.c +++ b/kernel/time/timer_list.c | |||
@@ -41,7 +41,7 @@ static void print_name_offset(struct seq_file *m, void *sym) | |||
41 | char symname[KSYM_NAME_LEN]; | 41 | char symname[KSYM_NAME_LEN]; |
42 | 42 | ||
43 | if (lookup_symbol_name((unsigned long)sym, symname) < 0) | 43 | if (lookup_symbol_name((unsigned long)sym, symname) < 0) |
44 | SEQ_printf(m, "<%p>", sym); | 44 | SEQ_printf(m, "<%pK>", sym); |
45 | else | 45 | else |
46 | SEQ_printf(m, "%s", symname); | 46 | SEQ_printf(m, "%s", symname); |
47 | } | 47 | } |
@@ -112,7 +112,7 @@ next_one: | |||
112 | static void | 112 | static void |
113 | print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now) | 113 | print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now) |
114 | { | 114 | { |
115 | SEQ_printf(m, " .base: %p\n", base); | 115 | SEQ_printf(m, " .base: %pK\n", base); |
116 | SEQ_printf(m, " .index: %d\n", | 116 | SEQ_printf(m, " .index: %d\n", |
117 | base->index); | 117 | base->index); |
118 | SEQ_printf(m, " .resolution: %Lu nsecs\n", | 118 | SEQ_printf(m, " .resolution: %Lu nsecs\n", |
diff --git a/kernel/timer.c b/kernel/timer.c index 43ca9936f2d0..d6459923d245 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -959,7 +959,7 @@ EXPORT_SYMBOL(try_to_del_timer_sync); | |||
959 | * | 959 | * |
960 | * Synchronization rules: Callers must prevent restarting of the timer, | 960 | * Synchronization rules: Callers must prevent restarting of the timer, |
961 | * otherwise this function is meaningless. It must not be called from | 961 | * otherwise this function is meaningless. It must not be called from |
962 | * hardirq contexts. The caller must not hold locks which would prevent | 962 | * interrupt contexts. The caller must not hold locks which would prevent |
963 | * completion of the timer's handler. The timer's handler must not call | 963 | * completion of the timer's handler. The timer's handler must not call |
964 | * add_timer_on(). Upon exit the timer is not queued and the handler is | 964 | * add_timer_on(). Upon exit the timer is not queued and the handler is |
965 | * not running on any CPU. | 965 | * not running on any CPU. |
@@ -969,10 +969,12 @@ EXPORT_SYMBOL(try_to_del_timer_sync); | |||
969 | int del_timer_sync(struct timer_list *timer) | 969 | int del_timer_sync(struct timer_list *timer) |
970 | { | 970 | { |
971 | #ifdef CONFIG_LOCKDEP | 971 | #ifdef CONFIG_LOCKDEP |
972 | local_bh_disable(); | 972 | unsigned long flags; |
973 | |||
974 | local_irq_save(flags); | ||
973 | lock_map_acquire(&timer->lockdep_map); | 975 | lock_map_acquire(&timer->lockdep_map); |
974 | lock_map_release(&timer->lockdep_map); | 976 | lock_map_release(&timer->lockdep_map); |
975 | local_bh_enable(); | 977 | local_irq_restore(flags); |
976 | #endif | 978 | #endif |
977 | /* | 979 | /* |
978 | * don't use it in hardirq context, because it | 980 | * don't use it in hardirq context, because it |
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 153562d0b93c..d95721f33702 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c | |||
@@ -138,6 +138,13 @@ void __trace_note_message(struct blk_trace *bt, const char *fmt, ...) | |||
138 | !blk_tracer_enabled)) | 138 | !blk_tracer_enabled)) |
139 | return; | 139 | return; |
140 | 140 | ||
141 | /* | ||
142 | * If the BLK_TC_NOTIFY action mask isn't set, don't send any note | ||
143 | * message to the trace. | ||
144 | */ | ||
145 | if (!(bt->act_mask & BLK_TC_NOTIFY)) | ||
146 | return; | ||
147 | |||
141 | local_irq_save(flags); | 148 | local_irq_save(flags); |
142 | buf = per_cpu_ptr(bt->msg_data, smp_processor_id()); | 149 | buf = per_cpu_ptr(bt->msg_data, smp_processor_id()); |
143 | va_start(args, fmt); | 150 | va_start(args, fmt); |
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 35fde09b81de..5f499e0438a4 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -1284,7 +1284,7 @@ trace_create_file_ops(struct module *mod) | |||
1284 | static void trace_module_add_events(struct module *mod) | 1284 | static void trace_module_add_events(struct module *mod) |
1285 | { | 1285 | { |
1286 | struct ftrace_module_file_ops *file_ops = NULL; | 1286 | struct ftrace_module_file_ops *file_ops = NULL; |
1287 | struct ftrace_event_call *call, *start, *end; | 1287 | struct ftrace_event_call **call, **start, **end; |
1288 | 1288 | ||
1289 | start = mod->trace_events; | 1289 | start = mod->trace_events; |
1290 | end = mod->trace_events + mod->num_trace_events; | 1290 | end = mod->trace_events + mod->num_trace_events; |
@@ -1297,7 +1297,7 @@ static void trace_module_add_events(struct module *mod) | |||
1297 | return; | 1297 | return; |
1298 | 1298 | ||
1299 | for_each_event(call, start, end) { | 1299 | for_each_event(call, start, end) { |
1300 | __trace_add_event_call(call, mod, | 1300 | __trace_add_event_call(*call, mod, |
1301 | &file_ops->id, &file_ops->enable, | 1301 | &file_ops->id, &file_ops->enable, |
1302 | &file_ops->filter, &file_ops->format); | 1302 | &file_ops->filter, &file_ops->format); |
1303 | } | 1303 | } |
@@ -1367,8 +1367,8 @@ static struct notifier_block trace_module_nb = { | |||
1367 | .priority = 0, | 1367 | .priority = 0, |
1368 | }; | 1368 | }; |
1369 | 1369 | ||
1370 | extern struct ftrace_event_call __start_ftrace_events[]; | 1370 | extern struct ftrace_event_call *__start_ftrace_events[]; |
1371 | extern struct ftrace_event_call __stop_ftrace_events[]; | 1371 | extern struct ftrace_event_call *__stop_ftrace_events[]; |
1372 | 1372 | ||
1373 | static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata; | 1373 | static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata; |
1374 | 1374 | ||
@@ -1384,7 +1384,7 @@ __setup("trace_event=", setup_trace_event); | |||
1384 | 1384 | ||
1385 | static __init int event_trace_init(void) | 1385 | static __init int event_trace_init(void) |
1386 | { | 1386 | { |
1387 | struct ftrace_event_call *call; | 1387 | struct ftrace_event_call **call; |
1388 | struct dentry *d_tracer; | 1388 | struct dentry *d_tracer; |
1389 | struct dentry *entry; | 1389 | struct dentry *entry; |
1390 | struct dentry *d_events; | 1390 | struct dentry *d_events; |
@@ -1430,7 +1430,7 @@ static __init int event_trace_init(void) | |||
1430 | pr_warning("tracing: Failed to allocate common fields"); | 1430 | pr_warning("tracing: Failed to allocate common fields"); |
1431 | 1431 | ||
1432 | for_each_event(call, __start_ftrace_events, __stop_ftrace_events) { | 1432 | for_each_event(call, __start_ftrace_events, __stop_ftrace_events) { |
1433 | __trace_add_event_call(call, NULL, &ftrace_event_id_fops, | 1433 | __trace_add_event_call(*call, NULL, &ftrace_event_id_fops, |
1434 | &ftrace_enable_fops, | 1434 | &ftrace_enable_fops, |
1435 | &ftrace_event_filter_fops, | 1435 | &ftrace_event_filter_fops, |
1436 | &ftrace_event_format_fops); | 1436 | &ftrace_event_format_fops); |
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c index 4b74d71705c0..bbeec31e0ae3 100644 --- a/kernel/trace/trace_export.c +++ b/kernel/trace/trace_export.c | |||
@@ -161,13 +161,13 @@ struct ftrace_event_class event_class_ftrace_##call = { \ | |||
161 | .fields = LIST_HEAD_INIT(event_class_ftrace_##call.fields),\ | 161 | .fields = LIST_HEAD_INIT(event_class_ftrace_##call.fields),\ |
162 | }; \ | 162 | }; \ |
163 | \ | 163 | \ |
164 | struct ftrace_event_call __used \ | 164 | struct ftrace_event_call __used event_##call = { \ |
165 | __attribute__((__aligned__(4))) \ | ||
166 | __attribute__((section("_ftrace_events"))) event_##call = { \ | ||
167 | .name = #call, \ | 165 | .name = #call, \ |
168 | .event.type = etype, \ | 166 | .event.type = etype, \ |
169 | .class = &event_class_ftrace_##call, \ | 167 | .class = &event_class_ftrace_##call, \ |
170 | .print_fmt = print, \ | 168 | .print_fmt = print, \ |
171 | }; \ | 169 | }; \ |
170 | struct ftrace_event_call __used \ | ||
171 | __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call; | ||
172 | 172 | ||
173 | #include "trace_entries.h" | 173 | #include "trace_entries.h" |
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index b706529b4fc7..5c9fe08d2093 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c | |||
@@ -55,20 +55,21 @@ struct ftrace_event_class event_class_syscall_exit = { | |||
55 | .raw_init = init_syscall_trace, | 55 | .raw_init = init_syscall_trace, |
56 | }; | 56 | }; |
57 | 57 | ||
58 | extern unsigned long __start_syscalls_metadata[]; | 58 | extern struct syscall_metadata *__start_syscalls_metadata[]; |
59 | extern unsigned long __stop_syscalls_metadata[]; | 59 | extern struct syscall_metadata *__stop_syscalls_metadata[]; |
60 | 60 | ||
61 | static struct syscall_metadata **syscalls_metadata; | 61 | static struct syscall_metadata **syscalls_metadata; |
62 | 62 | ||
63 | static struct syscall_metadata *find_syscall_meta(unsigned long syscall) | 63 | static __init struct syscall_metadata * |
64 | find_syscall_meta(unsigned long syscall) | ||
64 | { | 65 | { |
65 | struct syscall_metadata *start; | 66 | struct syscall_metadata **start; |
66 | struct syscall_metadata *stop; | 67 | struct syscall_metadata **stop; |
67 | char str[KSYM_SYMBOL_LEN]; | 68 | char str[KSYM_SYMBOL_LEN]; |
68 | 69 | ||
69 | 70 | ||
70 | start = (struct syscall_metadata *)__start_syscalls_metadata; | 71 | start = __start_syscalls_metadata; |
71 | stop = (struct syscall_metadata *)__stop_syscalls_metadata; | 72 | stop = __stop_syscalls_metadata; |
72 | kallsyms_lookup(syscall, NULL, NULL, NULL, str); | 73 | kallsyms_lookup(syscall, NULL, NULL, NULL, str); |
73 | 74 | ||
74 | for ( ; start < stop; start++) { | 75 | for ( ; start < stop; start++) { |
@@ -78,8 +79,8 @@ static struct syscall_metadata *find_syscall_meta(unsigned long syscall) | |||
78 | * with "SyS" instead of "sys", leading to an unwanted | 79 | * with "SyS" instead of "sys", leading to an unwanted |
79 | * mismatch. | 80 | * mismatch. |
80 | */ | 81 | */ |
81 | if (start->name && !strcmp(start->name + 3, str + 3)) | 82 | if ((*start)->name && !strcmp((*start)->name + 3, str + 3)) |
82 | return start; | 83 | return *start; |
83 | } | 84 | } |
84 | return NULL; | 85 | return NULL; |
85 | } | 86 | } |
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c index e95ee7f31d43..68187af4889e 100644 --- a/kernel/tracepoint.c +++ b/kernel/tracepoint.c | |||
@@ -27,8 +27,8 @@ | |||
27 | #include <linux/sched.h> | 27 | #include <linux/sched.h> |
28 | #include <linux/jump_label.h> | 28 | #include <linux/jump_label.h> |
29 | 29 | ||
30 | extern struct tracepoint __start___tracepoints[]; | 30 | extern struct tracepoint * const __start___tracepoints_ptrs[]; |
31 | extern struct tracepoint __stop___tracepoints[]; | 31 | extern struct tracepoint * const __stop___tracepoints_ptrs[]; |
32 | 32 | ||
33 | /* Set to 1 to enable tracepoint debug output */ | 33 | /* Set to 1 to enable tracepoint debug output */ |
34 | static const int tracepoint_debug; | 34 | static const int tracepoint_debug; |
@@ -298,10 +298,10 @@ static void disable_tracepoint(struct tracepoint *elem) | |||
298 | * | 298 | * |
299 | * Updates the probe callback corresponding to a range of tracepoints. | 299 | * Updates the probe callback corresponding to a range of tracepoints. |
300 | */ | 300 | */ |
301 | void | 301 | void tracepoint_update_probe_range(struct tracepoint * const *begin, |
302 | tracepoint_update_probe_range(struct tracepoint *begin, struct tracepoint *end) | 302 | struct tracepoint * const *end) |
303 | { | 303 | { |
304 | struct tracepoint *iter; | 304 | struct tracepoint * const *iter; |
305 | struct tracepoint_entry *mark_entry; | 305 | struct tracepoint_entry *mark_entry; |
306 | 306 | ||
307 | if (!begin) | 307 | if (!begin) |
@@ -309,12 +309,12 @@ tracepoint_update_probe_range(struct tracepoint *begin, struct tracepoint *end) | |||
309 | 309 | ||
310 | mutex_lock(&tracepoints_mutex); | 310 | mutex_lock(&tracepoints_mutex); |
311 | for (iter = begin; iter < end; iter++) { | 311 | for (iter = begin; iter < end; iter++) { |
312 | mark_entry = get_tracepoint(iter->name); | 312 | mark_entry = get_tracepoint((*iter)->name); |
313 | if (mark_entry) { | 313 | if (mark_entry) { |
314 | set_tracepoint(&mark_entry, iter, | 314 | set_tracepoint(&mark_entry, *iter, |
315 | !!mark_entry->refcount); | 315 | !!mark_entry->refcount); |
316 | } else { | 316 | } else { |
317 | disable_tracepoint(iter); | 317 | disable_tracepoint(*iter); |
318 | } | 318 | } |
319 | } | 319 | } |
320 | mutex_unlock(&tracepoints_mutex); | 320 | mutex_unlock(&tracepoints_mutex); |
@@ -326,8 +326,8 @@ tracepoint_update_probe_range(struct tracepoint *begin, struct tracepoint *end) | |||
326 | static void tracepoint_update_probes(void) | 326 | static void tracepoint_update_probes(void) |
327 | { | 327 | { |
328 | /* Core kernel tracepoints */ | 328 | /* Core kernel tracepoints */ |
329 | tracepoint_update_probe_range(__start___tracepoints, | 329 | tracepoint_update_probe_range(__start___tracepoints_ptrs, |
330 | __stop___tracepoints); | 330 | __stop___tracepoints_ptrs); |
331 | /* tracepoints in modules. */ | 331 | /* tracepoints in modules. */ |
332 | module_update_tracepoints(); | 332 | module_update_tracepoints(); |
333 | } | 333 | } |
@@ -514,8 +514,8 @@ EXPORT_SYMBOL_GPL(tracepoint_probe_update_all); | |||
514 | * Will return the first tracepoint in the range if the input tracepoint is | 514 | * Will return the first tracepoint in the range if the input tracepoint is |
515 | * NULL. | 515 | * NULL. |
516 | */ | 516 | */ |
517 | int tracepoint_get_iter_range(struct tracepoint **tracepoint, | 517 | int tracepoint_get_iter_range(struct tracepoint * const **tracepoint, |
518 | struct tracepoint *begin, struct tracepoint *end) | 518 | struct tracepoint * const *begin, struct tracepoint * const *end) |
519 | { | 519 | { |
520 | if (!*tracepoint && begin != end) { | 520 | if (!*tracepoint && begin != end) { |
521 | *tracepoint = begin; | 521 | *tracepoint = begin; |
@@ -534,7 +534,8 @@ static void tracepoint_get_iter(struct tracepoint_iter *iter) | |||
534 | /* Core kernel tracepoints */ | 534 | /* Core kernel tracepoints */ |
535 | if (!iter->module) { | 535 | if (!iter->module) { |
536 | found = tracepoint_get_iter_range(&iter->tracepoint, | 536 | found = tracepoint_get_iter_range(&iter->tracepoint, |
537 | __start___tracepoints, __stop___tracepoints); | 537 | __start___tracepoints_ptrs, |
538 | __stop___tracepoints_ptrs); | ||
538 | if (found) | 539 | if (found) |
539 | goto end; | 540 | goto end; |
540 | } | 541 | } |
@@ -585,8 +586,8 @@ int tracepoint_module_notify(struct notifier_block *self, | |||
585 | switch (val) { | 586 | switch (val) { |
586 | case MODULE_STATE_COMING: | 587 | case MODULE_STATE_COMING: |
587 | case MODULE_STATE_GOING: | 588 | case MODULE_STATE_GOING: |
588 | tracepoint_update_probe_range(mod->tracepoints, | 589 | tracepoint_update_probe_range(mod->tracepoints_ptrs, |
589 | mod->tracepoints + mod->num_tracepoints); | 590 | mod->tracepoints_ptrs + mod->num_tracepoints); |
590 | break; | 591 | break; |
591 | } | 592 | } |
592 | return 0; | 593 | return 0; |
diff --git a/kernel/watchdog.c b/kernel/watchdog.c index d7ebdf4cea98..18bb15776c57 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c | |||
@@ -27,7 +27,7 @@ | |||
27 | #include <asm/irq_regs.h> | 27 | #include <asm/irq_regs.h> |
28 | #include <linux/perf_event.h> | 28 | #include <linux/perf_event.h> |
29 | 29 | ||
30 | int watchdog_enabled; | 30 | int watchdog_enabled = 1; |
31 | int __read_mostly softlockup_thresh = 60; | 31 | int __read_mostly softlockup_thresh = 60; |
32 | 32 | ||
33 | static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts); | 33 | static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts); |
@@ -43,9 +43,6 @@ static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved); | |||
43 | static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); | 43 | static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); |
44 | #endif | 44 | #endif |
45 | 45 | ||
46 | static int no_watchdog; | ||
47 | |||
48 | |||
49 | /* boot commands */ | 46 | /* boot commands */ |
50 | /* | 47 | /* |
51 | * Should we panic when a soft-lockup or hard-lockup occurs: | 48 | * Should we panic when a soft-lockup or hard-lockup occurs: |
@@ -58,7 +55,7 @@ static int __init hardlockup_panic_setup(char *str) | |||
58 | if (!strncmp(str, "panic", 5)) | 55 | if (!strncmp(str, "panic", 5)) |
59 | hardlockup_panic = 1; | 56 | hardlockup_panic = 1; |
60 | else if (!strncmp(str, "0", 1)) | 57 | else if (!strncmp(str, "0", 1)) |
61 | no_watchdog = 1; | 58 | watchdog_enabled = 0; |
62 | return 1; | 59 | return 1; |
63 | } | 60 | } |
64 | __setup("nmi_watchdog=", hardlockup_panic_setup); | 61 | __setup("nmi_watchdog=", hardlockup_panic_setup); |
@@ -77,7 +74,7 @@ __setup("softlockup_panic=", softlockup_panic_setup); | |||
77 | 74 | ||
78 | static int __init nowatchdog_setup(char *str) | 75 | static int __init nowatchdog_setup(char *str) |
79 | { | 76 | { |
80 | no_watchdog = 1; | 77 | watchdog_enabled = 0; |
81 | return 1; | 78 | return 1; |
82 | } | 79 | } |
83 | __setup("nowatchdog", nowatchdog_setup); | 80 | __setup("nowatchdog", nowatchdog_setup); |
@@ -85,7 +82,7 @@ __setup("nowatchdog", nowatchdog_setup); | |||
85 | /* deprecated */ | 82 | /* deprecated */ |
86 | static int __init nosoftlockup_setup(char *str) | 83 | static int __init nosoftlockup_setup(char *str) |
87 | { | 84 | { |
88 | no_watchdog = 1; | 85 | watchdog_enabled = 0; |
89 | return 1; | 86 | return 1; |
90 | } | 87 | } |
91 | __setup("nosoftlockup", nosoftlockup_setup); | 88 | __setup("nosoftlockup", nosoftlockup_setup); |
@@ -366,8 +363,14 @@ static int watchdog_nmi_enable(int cpu) | |||
366 | goto out_save; | 363 | goto out_save; |
367 | } | 364 | } |
368 | 365 | ||
369 | printk(KERN_ERR "NMI watchdog disabled for cpu%i: unable to create perf event: %ld\n", | 366 | |
370 | cpu, PTR_ERR(event)); | 367 | /* vary the KERN level based on the returned errno */ |
368 | if (PTR_ERR(event) == -EOPNOTSUPP) | ||
369 | printk(KERN_INFO "NMI watchdog disabled (cpu%i): not supported (no LAPIC?)\n", cpu); | ||
370 | else if (PTR_ERR(event) == -ENOENT) | ||
371 | printk(KERN_WARNING "NMI watchdog disabled (cpu%i): hardware events not enabled\n", cpu); | ||
372 | else | ||
373 | printk(KERN_ERR "NMI watchdog disabled (cpu%i): unable to create perf event: %ld\n", cpu, PTR_ERR(event)); | ||
371 | return PTR_ERR(event); | 374 | return PTR_ERR(event); |
372 | 375 | ||
373 | /* success path */ | 376 | /* success path */ |
@@ -432,9 +435,6 @@ static int watchdog_enable(int cpu) | |||
432 | wake_up_process(p); | 435 | wake_up_process(p); |
433 | } | 436 | } |
434 | 437 | ||
435 | /* if any cpu succeeds, watchdog is considered enabled for the system */ | ||
436 | watchdog_enabled = 1; | ||
437 | |||
438 | return 0; | 438 | return 0; |
439 | } | 439 | } |
440 | 440 | ||
@@ -462,12 +462,16 @@ static void watchdog_disable(int cpu) | |||
462 | static void watchdog_enable_all_cpus(void) | 462 | static void watchdog_enable_all_cpus(void) |
463 | { | 463 | { |
464 | int cpu; | 464 | int cpu; |
465 | int result = 0; | 465 | |
466 | watchdog_enabled = 0; | ||
466 | 467 | ||
467 | for_each_online_cpu(cpu) | 468 | for_each_online_cpu(cpu) |
468 | result += watchdog_enable(cpu); | 469 | if (!watchdog_enable(cpu)) |
470 | /* if any cpu succeeds, watchdog is considered | ||
471 | enabled for the system */ | ||
472 | watchdog_enabled = 1; | ||
469 | 473 | ||
470 | if (result) | 474 | if (!watchdog_enabled) |
471 | printk(KERN_ERR "watchdog: failed to be enabled on some cpus\n"); | 475 | printk(KERN_ERR "watchdog: failed to be enabled on some cpus\n"); |
472 | 476 | ||
473 | } | 477 | } |
@@ -476,9 +480,6 @@ static void watchdog_disable_all_cpus(void) | |||
476 | { | 480 | { |
477 | int cpu; | 481 | int cpu; |
478 | 482 | ||
479 | if (no_watchdog) | ||
480 | return; | ||
481 | |||
482 | for_each_online_cpu(cpu) | 483 | for_each_online_cpu(cpu) |
483 | watchdog_disable(cpu); | 484 | watchdog_disable(cpu); |
484 | 485 | ||
@@ -498,10 +499,12 @@ int proc_dowatchdog_enabled(struct ctl_table *table, int write, | |||
498 | { | 499 | { |
499 | proc_dointvec(table, write, buffer, length, ppos); | 500 | proc_dointvec(table, write, buffer, length, ppos); |
500 | 501 | ||
501 | if (watchdog_enabled) | 502 | if (write) { |
502 | watchdog_enable_all_cpus(); | 503 | if (watchdog_enabled) |
503 | else | 504 | watchdog_enable_all_cpus(); |
504 | watchdog_disable_all_cpus(); | 505 | else |
506 | watchdog_disable_all_cpus(); | ||
507 | } | ||
505 | return 0; | 508 | return 0; |
506 | } | 509 | } |
507 | 510 | ||
@@ -530,7 +533,8 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
530 | break; | 533 | break; |
531 | case CPU_ONLINE: | 534 | case CPU_ONLINE: |
532 | case CPU_ONLINE_FROZEN: | 535 | case CPU_ONLINE_FROZEN: |
533 | err = watchdog_enable(hotcpu); | 536 | if (watchdog_enabled) |
537 | err = watchdog_enable(hotcpu); | ||
534 | break; | 538 | break; |
535 | #ifdef CONFIG_HOTPLUG_CPU | 539 | #ifdef CONFIG_HOTPLUG_CPU |
536 | case CPU_UP_CANCELED: | 540 | case CPU_UP_CANCELED: |
@@ -555,9 +559,6 @@ void __init lockup_detector_init(void) | |||
555 | void *cpu = (void *)(long)smp_processor_id(); | 559 | void *cpu = (void *)(long)smp_processor_id(); |
556 | int err; | 560 | int err; |
557 | 561 | ||
558 | if (no_watchdog) | ||
559 | return; | ||
560 | |||
561 | err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); | 562 | err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); |
562 | WARN_ON(notifier_to_errno(err)); | 563 | WARN_ON(notifier_to_errno(err)); |
563 | 564 | ||
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 11869faa6819..ee6578b578ad 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -79,7 +79,9 @@ enum { | |||
79 | MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */ | 79 | MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */ |
80 | IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */ | 80 | IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */ |
81 | 81 | ||
82 | MAYDAY_INITIAL_TIMEOUT = HZ / 100, /* call for help after 10ms */ | 82 | MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2, |
83 | /* call for help after 10ms | ||
84 | (min two ticks) */ | ||
83 | MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */ | 85 | MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */ |
84 | CREATE_COOLDOWN = HZ, /* time to breath after fail */ | 86 | CREATE_COOLDOWN = HZ, /* time to breath after fail */ |
85 | TRUSTEE_COOLDOWN = HZ / 10, /* for trustee draining */ | 87 | TRUSTEE_COOLDOWN = HZ / 10, /* for trustee draining */ |
@@ -2047,6 +2049,15 @@ repeat: | |||
2047 | move_linked_works(work, scheduled, &n); | 2049 | move_linked_works(work, scheduled, &n); |
2048 | 2050 | ||
2049 | process_scheduled_works(rescuer); | 2051 | process_scheduled_works(rescuer); |
2052 | |||
2053 | /* | ||
2054 | * Leave this gcwq. If keep_working() is %true, notify a | ||
2055 | * regular worker; otherwise, we end up with 0 concurrency | ||
2056 | * and stalling the execution. | ||
2057 | */ | ||
2058 | if (keep_working(gcwq)) | ||
2059 | wake_up_worker(gcwq); | ||
2060 | |||
2050 | spin_unlock_irq(&gcwq->lock); | 2061 | spin_unlock_irq(&gcwq->lock); |
2051 | } | 2062 | } |
2052 | 2063 | ||
@@ -2956,7 +2967,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name, | |||
2956 | */ | 2967 | */ |
2957 | spin_lock(&workqueue_lock); | 2968 | spin_lock(&workqueue_lock); |
2958 | 2969 | ||
2959 | if (workqueue_freezing && wq->flags & WQ_FREEZEABLE) | 2970 | if (workqueue_freezing && wq->flags & WQ_FREEZABLE) |
2960 | for_each_cwq_cpu(cpu, wq) | 2971 | for_each_cwq_cpu(cpu, wq) |
2961 | get_cwq(cpu, wq)->max_active = 0; | 2972 | get_cwq(cpu, wq)->max_active = 0; |
2962 | 2973 | ||
@@ -3068,7 +3079,7 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) | |||
3068 | 3079 | ||
3069 | spin_lock_irq(&gcwq->lock); | 3080 | spin_lock_irq(&gcwq->lock); |
3070 | 3081 | ||
3071 | if (!(wq->flags & WQ_FREEZEABLE) || | 3082 | if (!(wq->flags & WQ_FREEZABLE) || |
3072 | !(gcwq->flags & GCWQ_FREEZING)) | 3083 | !(gcwq->flags & GCWQ_FREEZING)) |
3073 | get_cwq(gcwq->cpu, wq)->max_active = max_active; | 3084 | get_cwq(gcwq->cpu, wq)->max_active = max_active; |
3074 | 3085 | ||
@@ -3318,7 +3329,7 @@ static int __cpuinit trustee_thread(void *__gcwq) | |||
3318 | * want to get it over with ASAP - spam rescuers, wake up as | 3329 | * want to get it over with ASAP - spam rescuers, wake up as |
3319 | * many idlers as necessary and create new ones till the | 3330 | * many idlers as necessary and create new ones till the |
3320 | * worklist is empty. Note that if the gcwq is frozen, there | 3331 | * worklist is empty. Note that if the gcwq is frozen, there |
3321 | * may be frozen works in freezeable cwqs. Don't declare | 3332 | * may be frozen works in freezable cwqs. Don't declare |
3322 | * completion while frozen. | 3333 | * completion while frozen. |
3323 | */ | 3334 | */ |
3324 | while (gcwq->nr_workers != gcwq->nr_idle || | 3335 | while (gcwq->nr_workers != gcwq->nr_idle || |
@@ -3576,9 +3587,9 @@ EXPORT_SYMBOL_GPL(work_on_cpu); | |||
3576 | /** | 3587 | /** |
3577 | * freeze_workqueues_begin - begin freezing workqueues | 3588 | * freeze_workqueues_begin - begin freezing workqueues |
3578 | * | 3589 | * |
3579 | * Start freezing workqueues. After this function returns, all | 3590 | * Start freezing workqueues. After this function returns, all freezable |
3580 | * freezeable workqueues will queue new works to their frozen_works | 3591 | * workqueues will queue new works to their frozen_works list instead of |
3581 | * list instead of gcwq->worklist. | 3592 | * gcwq->worklist. |
3582 | * | 3593 | * |
3583 | * CONTEXT: | 3594 | * CONTEXT: |
3584 | * Grabs and releases workqueue_lock and gcwq->lock's. | 3595 | * Grabs and releases workqueue_lock and gcwq->lock's. |
@@ -3604,7 +3615,7 @@ void freeze_workqueues_begin(void) | |||
3604 | list_for_each_entry(wq, &workqueues, list) { | 3615 | list_for_each_entry(wq, &workqueues, list) { |
3605 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); | 3616 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); |
3606 | 3617 | ||
3607 | if (cwq && wq->flags & WQ_FREEZEABLE) | 3618 | if (cwq && wq->flags & WQ_FREEZABLE) |
3608 | cwq->max_active = 0; | 3619 | cwq->max_active = 0; |
3609 | } | 3620 | } |
3610 | 3621 | ||
@@ -3615,7 +3626,7 @@ void freeze_workqueues_begin(void) | |||
3615 | } | 3626 | } |
3616 | 3627 | ||
3617 | /** | 3628 | /** |
3618 | * freeze_workqueues_busy - are freezeable workqueues still busy? | 3629 | * freeze_workqueues_busy - are freezable workqueues still busy? |
3619 | * | 3630 | * |
3620 | * Check whether freezing is complete. This function must be called | 3631 | * Check whether freezing is complete. This function must be called |
3621 | * between freeze_workqueues_begin() and thaw_workqueues(). | 3632 | * between freeze_workqueues_begin() and thaw_workqueues(). |
@@ -3624,8 +3635,8 @@ void freeze_workqueues_begin(void) | |||
3624 | * Grabs and releases workqueue_lock. | 3635 | * Grabs and releases workqueue_lock. |
3625 | * | 3636 | * |
3626 | * RETURNS: | 3637 | * RETURNS: |
3627 | * %true if some freezeable workqueues are still busy. %false if | 3638 | * %true if some freezable workqueues are still busy. %false if freezing |
3628 | * freezing is complete. | 3639 | * is complete. |
3629 | */ | 3640 | */ |
3630 | bool freeze_workqueues_busy(void) | 3641 | bool freeze_workqueues_busy(void) |
3631 | { | 3642 | { |
@@ -3645,7 +3656,7 @@ bool freeze_workqueues_busy(void) | |||
3645 | list_for_each_entry(wq, &workqueues, list) { | 3656 | list_for_each_entry(wq, &workqueues, list) { |
3646 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); | 3657 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); |
3647 | 3658 | ||
3648 | if (!cwq || !(wq->flags & WQ_FREEZEABLE)) | 3659 | if (!cwq || !(wq->flags & WQ_FREEZABLE)) |
3649 | continue; | 3660 | continue; |
3650 | 3661 | ||
3651 | BUG_ON(cwq->nr_active < 0); | 3662 | BUG_ON(cwq->nr_active < 0); |
@@ -3690,7 +3701,7 @@ void thaw_workqueues(void) | |||
3690 | list_for_each_entry(wq, &workqueues, list) { | 3701 | list_for_each_entry(wq, &workqueues, list) { |
3691 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); | 3702 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); |
3692 | 3703 | ||
3693 | if (!cwq || !(wq->flags & WQ_FREEZEABLE)) | 3704 | if (!cwq || !(wq->flags & WQ_FREEZABLE)) |
3694 | continue; | 3705 | continue; |
3695 | 3706 | ||
3696 | /* restore max_active and repopulate worklist */ | 3707 | /* restore max_active and repopulate worklist */ |