diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-04-29 08:46:59 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-04-29 08:47:05 -0400 |
commit | e7fd5d4b3d240f42c30a9e3d20a4689c4d3a795a (patch) | |
tree | 4ba588631dd8189a818a91c9e3976526071178b6 /kernel | |
parent | 1130b0296184bc21806225fd06d533515a99d2db (diff) | |
parent | 56a50adda49b2020156616c4eb15353e0f9ad7de (diff) |
Merge branch 'linus' into perfcounters/core
Merge reason: This brach was on -rc1, refresh it to almost-rc4 to pick up
the latest upstream fixes.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
41 files changed, 441 insertions, 265 deletions
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index 917ab9525568..6e7351739a82 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c | |||
@@ -734,9 +734,6 @@ int audit_tag_tree(char *old, char *new) | |||
734 | dentry = dget(path.dentry); | 734 | dentry = dget(path.dentry); |
735 | path_put(&path); | 735 | path_put(&path); |
736 | 736 | ||
737 | if (dentry == tagged->mnt_root && dentry == mnt->mnt_root) | ||
738 | follow_up(&mnt, &dentry); | ||
739 | |||
740 | list_add_tail(&list, &tagged->mnt_list); | 737 | list_add_tail(&list, &tagged->mnt_list); |
741 | 738 | ||
742 | mutex_lock(&audit_filter_mutex); | 739 | mutex_lock(&audit_filter_mutex); |
diff --git a/kernel/fork.c b/kernel/fork.c index 89c1efb3ccf4..d32fef4d38e5 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -800,6 +800,12 @@ static void posix_cpu_timers_init_group(struct signal_struct *sig) | |||
800 | sig->cputime_expires.virt_exp = cputime_zero; | 800 | sig->cputime_expires.virt_exp = cputime_zero; |
801 | sig->cputime_expires.sched_exp = 0; | 801 | sig->cputime_expires.sched_exp = 0; |
802 | 802 | ||
803 | if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) { | ||
804 | sig->cputime_expires.prof_exp = | ||
805 | secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur); | ||
806 | sig->cputimer.running = 1; | ||
807 | } | ||
808 | |||
803 | /* The timer lists. */ | 809 | /* The timer lists. */ |
804 | INIT_LIST_HEAD(&sig->cpu_timers[0]); | 810 | INIT_LIST_HEAD(&sig->cpu_timers[0]); |
805 | INIT_LIST_HEAD(&sig->cpu_timers[1]); | 811 | INIT_LIST_HEAD(&sig->cpu_timers[1]); |
@@ -815,11 +821,8 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) | |||
815 | atomic_inc(¤t->signal->live); | 821 | atomic_inc(¤t->signal->live); |
816 | return 0; | 822 | return 0; |
817 | } | 823 | } |
818 | sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); | ||
819 | |||
820 | if (sig) | ||
821 | posix_cpu_timers_init_group(sig); | ||
822 | 824 | ||
825 | sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); | ||
823 | tsk->signal = sig; | 826 | tsk->signal = sig; |
824 | if (!sig) | 827 | if (!sig) |
825 | return -ENOMEM; | 828 | return -ENOMEM; |
@@ -859,6 +862,8 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) | |||
859 | memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim); | 862 | memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim); |
860 | task_unlock(current->group_leader); | 863 | task_unlock(current->group_leader); |
861 | 864 | ||
865 | posix_cpu_timers_init_group(sig); | ||
866 | |||
862 | acct_init_pacct(&sig->pacct); | 867 | acct_init_pacct(&sig->pacct); |
863 | 868 | ||
864 | tty_audit_fork(sig); | 869 | tty_audit_fork(sig); |
diff --git a/kernel/futex.c b/kernel/futex.c index 6b50a024bca2..eef8cd26b5e5 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -883,7 +883,12 @@ retry_private: | |||
883 | out_unlock: | 883 | out_unlock: |
884 | double_unlock_hb(hb1, hb2); | 884 | double_unlock_hb(hb1, hb2); |
885 | 885 | ||
886 | /* drop_futex_key_refs() must be called outside the spinlocks. */ | 886 | /* |
887 | * drop_futex_key_refs() must be called outside the spinlocks. During | ||
888 | * the requeue we moved futex_q's from the hash bucket at key1 to the | ||
889 | * one at key2 and updated their key pointer. We no longer need to | ||
890 | * hold the references to key1. | ||
891 | */ | ||
887 | while (--drop_count >= 0) | 892 | while (--drop_count >= 0) |
888 | drop_futex_key_refs(&key1); | 893 | drop_futex_key_refs(&key1); |
889 | 894 | ||
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 7e2e7dd4cd2f..2734eca59243 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -109,10 +109,9 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) | |||
109 | spin_lock_irqsave(&desc->lock, flags); | 109 | spin_lock_irqsave(&desc->lock, flags); |
110 | 110 | ||
111 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 111 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
112 | if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) { | 112 | if (desc->status & IRQ_MOVE_PCNTXT) |
113 | cpumask_copy(desc->affinity, cpumask); | ||
114 | desc->chip->set_affinity(irq, cpumask); | 113 | desc->chip->set_affinity(irq, cpumask); |
115 | } else { | 114 | else { |
116 | desc->status |= IRQ_MOVE_PENDING; | 115 | desc->status |= IRQ_MOVE_PENDING; |
117 | cpumask_copy(desc->pending_mask, cpumask); | 116 | cpumask_copy(desc->pending_mask, cpumask); |
118 | } | 117 | } |
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c index 243d6121e50e..44bbdcbaf8d2 100644 --- a/kernel/irq/numa_migrate.c +++ b/kernel/irq/numa_migrate.c | |||
@@ -54,6 +54,7 @@ static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc, | |||
54 | static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc) | 54 | static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc) |
55 | { | 55 | { |
56 | free_kstat_irqs(old_desc, desc); | 56 | free_kstat_irqs(old_desc, desc); |
57 | free_desc_masks(old_desc, desc); | ||
57 | arch_free_chip_data(old_desc, desc); | 58 | arch_free_chip_data(old_desc, desc); |
58 | } | 59 | } |
59 | 60 | ||
diff --git a/kernel/kthread.c b/kernel/kthread.c index 84bbadd4d021..4ebaf8519abf 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c | |||
@@ -76,6 +76,7 @@ static int kthread(void *_create) | |||
76 | 76 | ||
77 | /* OK, tell user we're spawned, wait for stop or wakeup */ | 77 | /* OK, tell user we're spawned, wait for stop or wakeup */ |
78 | __set_current_state(TASK_UNINTERRUPTIBLE); | 78 | __set_current_state(TASK_UNINTERRUPTIBLE); |
79 | create->result = current; | ||
79 | complete(&create->started); | 80 | complete(&create->started); |
80 | schedule(); | 81 | schedule(); |
81 | 82 | ||
@@ -96,22 +97,10 @@ static void create_kthread(struct kthread_create_info *create) | |||
96 | 97 | ||
97 | /* We want our own signal handler (we take no signals by default). */ | 98 | /* We want our own signal handler (we take no signals by default). */ |
98 | pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD); | 99 | pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD); |
99 | if (pid < 0) { | 100 | if (pid < 0) |
100 | create->result = ERR_PTR(pid); | 101 | create->result = ERR_PTR(pid); |
101 | } else { | 102 | else |
102 | struct sched_param param = { .sched_priority = 0 }; | ||
103 | wait_for_completion(&create->started); | 103 | wait_for_completion(&create->started); |
104 | read_lock(&tasklist_lock); | ||
105 | create->result = find_task_by_pid_ns(pid, &init_pid_ns); | ||
106 | read_unlock(&tasklist_lock); | ||
107 | /* | ||
108 | * root may have changed our (kthreadd's) priority or CPU mask. | ||
109 | * The kernel thread should not inherit these properties. | ||
110 | */ | ||
111 | sched_setscheduler(create->result, SCHED_NORMAL, ¶m); | ||
112 | set_user_nice(create->result, KTHREAD_NICE_LEVEL); | ||
113 | set_cpus_allowed_ptr(create->result, cpu_all_mask); | ||
114 | } | ||
115 | complete(&create->done); | 104 | complete(&create->done); |
116 | } | 105 | } |
117 | 106 | ||
@@ -154,11 +143,20 @@ struct task_struct *kthread_create(int (*threadfn)(void *data), | |||
154 | wait_for_completion(&create.done); | 143 | wait_for_completion(&create.done); |
155 | 144 | ||
156 | if (!IS_ERR(create.result)) { | 145 | if (!IS_ERR(create.result)) { |
146 | struct sched_param param = { .sched_priority = 0 }; | ||
157 | va_list args; | 147 | va_list args; |
148 | |||
158 | va_start(args, namefmt); | 149 | va_start(args, namefmt); |
159 | vsnprintf(create.result->comm, sizeof(create.result->comm), | 150 | vsnprintf(create.result->comm, sizeof(create.result->comm), |
160 | namefmt, args); | 151 | namefmt, args); |
161 | va_end(args); | 152 | va_end(args); |
153 | /* | ||
154 | * root may have changed our (kthreadd's) priority or CPU mask. | ||
155 | * The kernel thread should not inherit these properties. | ||
156 | */ | ||
157 | sched_setscheduler_nocheck(create.result, SCHED_NORMAL, ¶m); | ||
158 | set_user_nice(create.result, KTHREAD_NICE_LEVEL); | ||
159 | set_cpus_allowed_ptr(create.result, cpu_all_mask); | ||
162 | } | 160 | } |
163 | return create.result; | 161 | return create.result; |
164 | } | 162 | } |
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index b0f011866969..accb40cdb12a 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -2490,13 +2490,20 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this, | |||
2490 | void lockdep_init_map(struct lockdep_map *lock, const char *name, | 2490 | void lockdep_init_map(struct lockdep_map *lock, const char *name, |
2491 | struct lock_class_key *key, int subclass) | 2491 | struct lock_class_key *key, int subclass) |
2492 | { | 2492 | { |
2493 | if (unlikely(!debug_locks)) | 2493 | lock->class_cache = NULL; |
2494 | #ifdef CONFIG_LOCK_STAT | ||
2495 | lock->cpu = raw_smp_processor_id(); | ||
2496 | #endif | ||
2497 | |||
2498 | if (DEBUG_LOCKS_WARN_ON(!name)) { | ||
2499 | lock->name = "NULL"; | ||
2494 | return; | 2500 | return; |
2501 | } | ||
2502 | |||
2503 | lock->name = name; | ||
2495 | 2504 | ||
2496 | if (DEBUG_LOCKS_WARN_ON(!key)) | 2505 | if (DEBUG_LOCKS_WARN_ON(!key)) |
2497 | return; | 2506 | return; |
2498 | if (DEBUG_LOCKS_WARN_ON(!name)) | ||
2499 | return; | ||
2500 | /* | 2507 | /* |
2501 | * Sanity check, the lock-class key must be persistent: | 2508 | * Sanity check, the lock-class key must be persistent: |
2502 | */ | 2509 | */ |
@@ -2505,12 +2512,11 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name, | |||
2505 | DEBUG_LOCKS_WARN_ON(1); | 2512 | DEBUG_LOCKS_WARN_ON(1); |
2506 | return; | 2513 | return; |
2507 | } | 2514 | } |
2508 | lock->name = name; | ||
2509 | lock->key = key; | 2515 | lock->key = key; |
2510 | lock->class_cache = NULL; | 2516 | |
2511 | #ifdef CONFIG_LOCK_STAT | 2517 | if (unlikely(!debug_locks)) |
2512 | lock->cpu = raw_smp_processor_id(); | 2518 | return; |
2513 | #endif | 2519 | |
2514 | if (subclass) | 2520 | if (subclass) |
2515 | register_lock_class(lock, subclass, 1); | 2521 | register_lock_class(lock, subclass, 1); |
2516 | } | 2522 | } |
diff --git a/kernel/module.c b/kernel/module.c index 05f014efa32c..e797812a4d95 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -2388,6 +2388,9 @@ SYSCALL_DEFINE3(init_module, void __user *, umod, | |||
2388 | blocking_notifier_call_chain(&module_notify_list, | 2388 | blocking_notifier_call_chain(&module_notify_list, |
2389 | MODULE_STATE_LIVE, mod); | 2389 | MODULE_STATE_LIVE, mod); |
2390 | 2390 | ||
2391 | /* We need to finish all async code before the module init sequence is done */ | ||
2392 | async_synchronize_full(); | ||
2393 | |||
2391 | mutex_lock(&module_mutex); | 2394 | mutex_lock(&module_mutex); |
2392 | /* Drop initial reference. */ | 2395 | /* Drop initial reference. */ |
2393 | module_put(mod); | 2396 | module_put(mod); |
diff --git a/kernel/mutex.c b/kernel/mutex.c index fd95eaa672e6..f415e80a9119 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c | |||
@@ -148,7 +148,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
148 | 148 | ||
149 | preempt_disable(); | 149 | preempt_disable(); |
150 | mutex_acquire(&lock->dep_map, subclass, 0, ip); | 150 | mutex_acquire(&lock->dep_map, subclass, 0, ip); |
151 | #if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES) | 151 | #if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES) && \ |
152 | !defined(CONFIG_HAVE_DEFAULT_NO_SPIN_MUTEXES) | ||
152 | /* | 153 | /* |
153 | * Optimistic spinning. | 154 | * Optimistic spinning. |
154 | * | 155 | * |
diff --git a/kernel/panic.c b/kernel/panic.c index 3fd8c5bf8b39..3dcaa1661357 100644 --- a/kernel/panic.c +++ b/kernel/panic.c | |||
@@ -213,8 +213,16 @@ unsigned long get_taint(void) | |||
213 | 213 | ||
214 | void add_taint(unsigned flag) | 214 | void add_taint(unsigned flag) |
215 | { | 215 | { |
216 | /* can't trust the integrity of the kernel anymore: */ | 216 | /* |
217 | debug_locks = 0; | 217 | * Can't trust the integrity of the kernel anymore. |
218 | * We don't call directly debug_locks_off() because the issue | ||
219 | * is not necessarily serious enough to set oops_in_progress to 1 | ||
220 | * Also we want to keep up lockdep for staging development and | ||
221 | * post-warning case. | ||
222 | */ | ||
223 | if (flag != TAINT_CRAP && flag != TAINT_WARN && __debug_locks_off()) | ||
224 | printk(KERN_WARNING "Disabling lock debugging due to kernel taint\n"); | ||
225 | |||
218 | set_bit(flag, &tainted_mask); | 226 | set_bit(flag, &tainted_mask); |
219 | } | 227 | } |
220 | EXPORT_SYMBOL(add_taint); | 228 | EXPORT_SYMBOL(add_taint); |
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 8e5d9a68b022..c9dcf98b4463 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c | |||
@@ -18,7 +18,7 @@ void update_rlimit_cpu(unsigned long rlim_new) | |||
18 | 18 | ||
19 | cputime = secs_to_cputime(rlim_new); | 19 | cputime = secs_to_cputime(rlim_new); |
20 | if (cputime_eq(current->signal->it_prof_expires, cputime_zero) || | 20 | if (cputime_eq(current->signal->it_prof_expires, cputime_zero) || |
21 | cputime_lt(current->signal->it_prof_expires, cputime)) { | 21 | cputime_gt(current->signal->it_prof_expires, cputime)) { |
22 | spin_lock_irq(¤t->sighand->siglock); | 22 | spin_lock_irq(¤t->sighand->siglock); |
23 | set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL); | 23 | set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL); |
24 | spin_unlock_irq(¤t->sighand->siglock); | 24 | spin_unlock_irq(¤t->sighand->siglock); |
@@ -224,7 +224,7 @@ static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p, | |||
224 | cpu->cpu = virt_ticks(p); | 224 | cpu->cpu = virt_ticks(p); |
225 | break; | 225 | break; |
226 | case CPUCLOCK_SCHED: | 226 | case CPUCLOCK_SCHED: |
227 | cpu->sched = p->se.sum_exec_runtime + task_delta_exec(p); | 227 | cpu->sched = task_sched_runtime(p); |
228 | break; | 228 | break; |
229 | } | 229 | } |
230 | return 0; | 230 | return 0; |
@@ -305,18 +305,19 @@ static int cpu_clock_sample_group(const clockid_t which_clock, | |||
305 | { | 305 | { |
306 | struct task_cputime cputime; | 306 | struct task_cputime cputime; |
307 | 307 | ||
308 | thread_group_cputime(p, &cputime); | ||
309 | switch (CPUCLOCK_WHICH(which_clock)) { | 308 | switch (CPUCLOCK_WHICH(which_clock)) { |
310 | default: | 309 | default: |
311 | return -EINVAL; | 310 | return -EINVAL; |
312 | case CPUCLOCK_PROF: | 311 | case CPUCLOCK_PROF: |
312 | thread_group_cputime(p, &cputime); | ||
313 | cpu->cpu = cputime_add(cputime.utime, cputime.stime); | 313 | cpu->cpu = cputime_add(cputime.utime, cputime.stime); |
314 | break; | 314 | break; |
315 | case CPUCLOCK_VIRT: | 315 | case CPUCLOCK_VIRT: |
316 | thread_group_cputime(p, &cputime); | ||
316 | cpu->cpu = cputime.utime; | 317 | cpu->cpu = cputime.utime; |
317 | break; | 318 | break; |
318 | case CPUCLOCK_SCHED: | 319 | case CPUCLOCK_SCHED: |
319 | cpu->sched = cputime.sum_exec_runtime + task_delta_exec(p); | 320 | cpu->sched = thread_group_sched_runtime(p); |
320 | break; | 321 | break; |
321 | } | 322 | } |
322 | return 0; | 323 | return 0; |
diff --git a/kernel/power/disk.c b/kernel/power/disk.c index 5f21ab2bbcdf..e71ca9cd81b2 100644 --- a/kernel/power/disk.c +++ b/kernel/power/disk.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/console.h> | 22 | #include <linux/console.h> |
23 | #include <linux/cpu.h> | 23 | #include <linux/cpu.h> |
24 | #include <linux/freezer.h> | 24 | #include <linux/freezer.h> |
25 | #include <scsi/scsi_scan.h> | ||
25 | #include <asm/suspend.h> | 26 | #include <asm/suspend.h> |
26 | 27 | ||
27 | #include "power.h" | 28 | #include "power.h" |
@@ -655,32 +656,42 @@ static int software_resume(void) | |||
655 | * here to avoid lockdep complaining. | 656 | * here to avoid lockdep complaining. |
656 | */ | 657 | */ |
657 | mutex_lock_nested(&pm_mutex, SINGLE_DEPTH_NESTING); | 658 | mutex_lock_nested(&pm_mutex, SINGLE_DEPTH_NESTING); |
659 | |||
660 | if (swsusp_resume_device) | ||
661 | goto Check_image; | ||
662 | |||
663 | if (!strlen(resume_file)) { | ||
664 | error = -ENOENT; | ||
665 | goto Unlock; | ||
666 | } | ||
667 | |||
668 | pr_debug("PM: Checking image partition %s\n", resume_file); | ||
669 | |||
670 | /* Check if the device is there */ | ||
671 | swsusp_resume_device = name_to_dev_t(resume_file); | ||
658 | if (!swsusp_resume_device) { | 672 | if (!swsusp_resume_device) { |
659 | if (!strlen(resume_file)) { | ||
660 | mutex_unlock(&pm_mutex); | ||
661 | return -ENOENT; | ||
662 | } | ||
663 | /* | 673 | /* |
664 | * Some device discovery might still be in progress; we need | 674 | * Some device discovery might still be in progress; we need |
665 | * to wait for this to finish. | 675 | * to wait for this to finish. |
666 | */ | 676 | */ |
667 | wait_for_device_probe(); | 677 | wait_for_device_probe(); |
678 | /* | ||
679 | * We can't depend on SCSI devices being available after loading | ||
680 | * one of their modules until scsi_complete_async_scans() is | ||
681 | * called and the resume device usually is a SCSI one. | ||
682 | */ | ||
683 | scsi_complete_async_scans(); | ||
684 | |||
668 | swsusp_resume_device = name_to_dev_t(resume_file); | 685 | swsusp_resume_device = name_to_dev_t(resume_file); |
669 | pr_debug("PM: Resume from partition %s\n", resume_file); | 686 | if (!swsusp_resume_device) { |
670 | } else { | 687 | error = -ENODEV; |
671 | pr_debug("PM: Resume from partition %d:%d\n", | 688 | goto Unlock; |
672 | MAJOR(swsusp_resume_device), | 689 | } |
673 | MINOR(swsusp_resume_device)); | ||
674 | } | 690 | } |
675 | 691 | ||
676 | if (noresume) { | 692 | Check_image: |
677 | /** | 693 | pr_debug("PM: Resume from partition %d:%d\n", |
678 | * FIXME: If noresume is specified, we need to find the | 694 | MAJOR(swsusp_resume_device), MINOR(swsusp_resume_device)); |
679 | * partition and reset it back to normal swap space. | ||
680 | */ | ||
681 | mutex_unlock(&pm_mutex); | ||
682 | return 0; | ||
683 | } | ||
684 | 695 | ||
685 | pr_debug("PM: Checking hibernation image.\n"); | 696 | pr_debug("PM: Checking hibernation image.\n"); |
686 | error = swsusp_check(); | 697 | error = swsusp_check(); |
diff --git a/kernel/power/main.c b/kernel/power/main.c index f172f41858bb..f99ed6a75eac 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c | |||
@@ -291,20 +291,26 @@ static int suspend_enter(suspend_state_t state) | |||
291 | 291 | ||
292 | device_pm_lock(); | 292 | device_pm_lock(); |
293 | 293 | ||
294 | if (suspend_ops->prepare) { | ||
295 | error = suspend_ops->prepare(); | ||
296 | if (error) | ||
297 | goto Done; | ||
298 | } | ||
299 | |||
294 | error = device_power_down(PMSG_SUSPEND); | 300 | error = device_power_down(PMSG_SUSPEND); |
295 | if (error) { | 301 | if (error) { |
296 | printk(KERN_ERR "PM: Some devices failed to power down\n"); | 302 | printk(KERN_ERR "PM: Some devices failed to power down\n"); |
297 | goto Done; | 303 | goto Platfrom_finish; |
298 | } | 304 | } |
299 | 305 | ||
300 | if (suspend_ops->prepare) { | 306 | if (suspend_ops->prepare_late) { |
301 | error = suspend_ops->prepare(); | 307 | error = suspend_ops->prepare_late(); |
302 | if (error) | 308 | if (error) |
303 | goto Power_up_devices; | 309 | goto Power_up_devices; |
304 | } | 310 | } |
305 | 311 | ||
306 | if (suspend_test(TEST_PLATFORM)) | 312 | if (suspend_test(TEST_PLATFORM)) |
307 | goto Platfrom_finish; | 313 | goto Platform_wake; |
308 | 314 | ||
309 | error = disable_nonboot_cpus(); | 315 | error = disable_nonboot_cpus(); |
310 | if (error || suspend_test(TEST_CPUS)) | 316 | if (error || suspend_test(TEST_CPUS)) |
@@ -326,13 +332,17 @@ static int suspend_enter(suspend_state_t state) | |||
326 | Enable_cpus: | 332 | Enable_cpus: |
327 | enable_nonboot_cpus(); | 333 | enable_nonboot_cpus(); |
328 | 334 | ||
329 | Platfrom_finish: | 335 | Platform_wake: |
330 | if (suspend_ops->finish) | 336 | if (suspend_ops->wake) |
331 | suspend_ops->finish(); | 337 | suspend_ops->wake(); |
332 | 338 | ||
333 | Power_up_devices: | 339 | Power_up_devices: |
334 | device_power_up(PMSG_RESUME); | 340 | device_power_up(PMSG_RESUME); |
335 | 341 | ||
342 | Platfrom_finish: | ||
343 | if (suspend_ops->finish) | ||
344 | suspend_ops->finish(); | ||
345 | |||
336 | Done: | 346 | Done: |
337 | device_pm_unlock(); | 347 | device_pm_unlock(); |
338 | 348 | ||
diff --git a/kernel/power/swap.c b/kernel/power/swap.c index 505f319e489c..8ba052c86d48 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c | |||
@@ -64,8 +64,6 @@ static int submit(int rw, pgoff_t page_off, struct page *page, | |||
64 | struct bio *bio; | 64 | struct bio *bio; |
65 | 65 | ||
66 | bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1); | 66 | bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1); |
67 | if (!bio) | ||
68 | return -ENOMEM; | ||
69 | bio->bi_sector = page_off * (PAGE_SIZE >> 9); | 67 | bio->bi_sector = page_off * (PAGE_SIZE >> 9); |
70 | bio->bi_bdev = resume_bdev; | 68 | bio->bi_bdev = resume_bdev; |
71 | bio->bi_end_io = end_swap_bio_read; | 69 | bio->bi_end_io = end_swap_bio_read; |
diff --git a/kernel/power/user.c b/kernel/power/user.c index 6c85359364f2..ed97375daae9 100644 --- a/kernel/power/user.c +++ b/kernel/power/user.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/cpu.h> | 24 | #include <linux/cpu.h> |
25 | #include <linux/freezer.h> | 25 | #include <linux/freezer.h> |
26 | #include <linux/smp_lock.h> | 26 | #include <linux/smp_lock.h> |
27 | #include <scsi/scsi_scan.h> | ||
27 | 28 | ||
28 | #include <asm/uaccess.h> | 29 | #include <asm/uaccess.h> |
29 | 30 | ||
@@ -92,6 +93,7 @@ static int snapshot_open(struct inode *inode, struct file *filp) | |||
92 | filp->private_data = data; | 93 | filp->private_data = data; |
93 | memset(&data->handle, 0, sizeof(struct snapshot_handle)); | 94 | memset(&data->handle, 0, sizeof(struct snapshot_handle)); |
94 | if ((filp->f_flags & O_ACCMODE) == O_RDONLY) { | 95 | if ((filp->f_flags & O_ACCMODE) == O_RDONLY) { |
96 | /* Hibernating. The image device should be accessible. */ | ||
95 | data->swap = swsusp_resume_device ? | 97 | data->swap = swsusp_resume_device ? |
96 | swap_type_of(swsusp_resume_device, 0, NULL) : -1; | 98 | swap_type_of(swsusp_resume_device, 0, NULL) : -1; |
97 | data->mode = O_RDONLY; | 99 | data->mode = O_RDONLY; |
@@ -99,6 +101,13 @@ static int snapshot_open(struct inode *inode, struct file *filp) | |||
99 | if (error) | 101 | if (error) |
100 | pm_notifier_call_chain(PM_POST_HIBERNATION); | 102 | pm_notifier_call_chain(PM_POST_HIBERNATION); |
101 | } else { | 103 | } else { |
104 | /* | ||
105 | * Resuming. We may need to wait for the image device to | ||
106 | * appear. | ||
107 | */ | ||
108 | wait_for_device_probe(); | ||
109 | scsi_complete_async_scans(); | ||
110 | |||
102 | data->swap = -1; | 111 | data->swap = -1; |
103 | data->mode = O_WRONLY; | 112 | data->mode = O_WRONLY; |
104 | error = pm_notifier_call_chain(PM_RESTORE_PREPARE); | 113 | error = pm_notifier_call_chain(PM_RESTORE_PREPARE); |
diff --git a/kernel/ptrace.c b/kernel/ptrace.c index aaad0ec34194..0692ab5a0d67 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c | |||
@@ -21,9 +21,7 @@ | |||
21 | #include <linux/audit.h> | 21 | #include <linux/audit.h> |
22 | #include <linux/pid_namespace.h> | 22 | #include <linux/pid_namespace.h> |
23 | #include <linux/syscalls.h> | 23 | #include <linux/syscalls.h> |
24 | 24 | #include <linux/uaccess.h> | |
25 | #include <asm/pgtable.h> | ||
26 | #include <asm/uaccess.h> | ||
27 | 25 | ||
28 | 26 | ||
29 | /* | 27 | /* |
@@ -48,7 +46,7 @@ void __ptrace_link(struct task_struct *child, struct task_struct *new_parent) | |||
48 | list_add(&child->ptrace_entry, &new_parent->ptraced); | 46 | list_add(&child->ptrace_entry, &new_parent->ptraced); |
49 | child->parent = new_parent; | 47 | child->parent = new_parent; |
50 | } | 48 | } |
51 | 49 | ||
52 | /* | 50 | /* |
53 | * Turn a tracing stop into a normal stop now, since with no tracer there | 51 | * Turn a tracing stop into a normal stop now, since with no tracer there |
54 | * would be no way to wake it up with SIGCONT or SIGKILL. If there was a | 52 | * would be no way to wake it up with SIGCONT or SIGKILL. If there was a |
@@ -173,7 +171,7 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode) | |||
173 | task_lock(task); | 171 | task_lock(task); |
174 | err = __ptrace_may_access(task, mode); | 172 | err = __ptrace_may_access(task, mode); |
175 | task_unlock(task); | 173 | task_unlock(task); |
176 | return (!err ? true : false); | 174 | return !err; |
177 | } | 175 | } |
178 | 176 | ||
179 | int ptrace_attach(struct task_struct *task) | 177 | int ptrace_attach(struct task_struct *task) |
@@ -190,7 +188,7 @@ int ptrace_attach(struct task_struct *task) | |||
190 | /* Protect exec's credential calculations against our interference; | 188 | /* Protect exec's credential calculations against our interference; |
191 | * SUID, SGID and LSM creds get determined differently under ptrace. | 189 | * SUID, SGID and LSM creds get determined differently under ptrace. |
192 | */ | 190 | */ |
193 | retval = mutex_lock_interruptible(¤t->cred_exec_mutex); | 191 | retval = mutex_lock_interruptible(&task->cred_exec_mutex); |
194 | if (retval < 0) | 192 | if (retval < 0) |
195 | goto out; | 193 | goto out; |
196 | 194 | ||
@@ -234,7 +232,7 @@ repeat: | |||
234 | bad: | 232 | bad: |
235 | write_unlock_irqrestore(&tasklist_lock, flags); | 233 | write_unlock_irqrestore(&tasklist_lock, flags); |
236 | task_unlock(task); | 234 | task_unlock(task); |
237 | mutex_unlock(¤t->cred_exec_mutex); | 235 | mutex_unlock(&task->cred_exec_mutex); |
238 | out: | 236 | out: |
239 | return retval; | 237 | return retval; |
240 | } | 238 | } |
@@ -358,7 +356,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst | |||
358 | copied += retval; | 356 | copied += retval; |
359 | src += retval; | 357 | src += retval; |
360 | dst += retval; | 358 | dst += retval; |
361 | len -= retval; | 359 | len -= retval; |
362 | } | 360 | } |
363 | return copied; | 361 | return copied; |
364 | } | 362 | } |
@@ -383,7 +381,7 @@ int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long ds | |||
383 | copied += retval; | 381 | copied += retval; |
384 | src += retval; | 382 | src += retval; |
385 | dst += retval; | 383 | dst += retval; |
386 | len -= retval; | 384 | len -= retval; |
387 | } | 385 | } |
388 | return copied; | 386 | return copied; |
389 | } | 387 | } |
@@ -496,9 +494,9 @@ static int ptrace_resume(struct task_struct *child, long request, long data) | |||
496 | if (unlikely(!arch_has_single_step())) | 494 | if (unlikely(!arch_has_single_step())) |
497 | return -EIO; | 495 | return -EIO; |
498 | user_enable_single_step(child); | 496 | user_enable_single_step(child); |
499 | } | 497 | } else { |
500 | else | ||
501 | user_disable_single_step(child); | 498 | user_disable_single_step(child); |
499 | } | ||
502 | 500 | ||
503 | child->exit_code = data; | 501 | child->exit_code = data; |
504 | wake_up_process(child); | 502 | wake_up_process(child); |
@@ -606,10 +604,11 @@ repeat: | |||
606 | ret = security_ptrace_traceme(current->parent); | 604 | ret = security_ptrace_traceme(current->parent); |
607 | 605 | ||
608 | /* | 606 | /* |
609 | * Set the ptrace bit in the process ptrace flags. | 607 | * Check PF_EXITING to ensure ->real_parent has not passed |
610 | * Then link us on our parent's ptraced list. | 608 | * exit_ptrace(). Otherwise we don't report the error but |
609 | * pretend ->real_parent untraces us right after return. | ||
611 | */ | 610 | */ |
612 | if (!ret) { | 611 | if (!ret && !(current->real_parent->flags & PF_EXITING)) { |
613 | current->ptrace |= PT_PTRACED; | 612 | current->ptrace |= PT_PTRACED; |
614 | __ptrace_link(current, current->real_parent); | 613 | __ptrace_link(current, current->real_parent); |
615 | } | 614 | } |
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index 2c7b8457d0d2..a967c9feb90a 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c | |||
@@ -58,6 +58,10 @@ static DEFINE_MUTEX(rcu_barrier_mutex); | |||
58 | static struct completion rcu_barrier_completion; | 58 | static struct completion rcu_barrier_completion; |
59 | int rcu_scheduler_active __read_mostly; | 59 | int rcu_scheduler_active __read_mostly; |
60 | 60 | ||
61 | static atomic_t rcu_migrate_type_count = ATOMIC_INIT(0); | ||
62 | static struct rcu_head rcu_migrate_head[3]; | ||
63 | static DECLARE_WAIT_QUEUE_HEAD(rcu_migrate_wq); | ||
64 | |||
61 | /* | 65 | /* |
62 | * Awaken the corresponding synchronize_rcu() instance now that a | 66 | * Awaken the corresponding synchronize_rcu() instance now that a |
63 | * grace period has elapsed. | 67 | * grace period has elapsed. |
@@ -122,7 +126,10 @@ static void rcu_barrier_func(void *type) | |||
122 | } | 126 | } |
123 | } | 127 | } |
124 | 128 | ||
125 | static inline void wait_migrated_callbacks(void); | 129 | static inline void wait_migrated_callbacks(void) |
130 | { | ||
131 | wait_event(rcu_migrate_wq, !atomic_read(&rcu_migrate_type_count)); | ||
132 | } | ||
126 | 133 | ||
127 | /* | 134 | /* |
128 | * Orchestrate the specified type of RCU barrier, waiting for all | 135 | * Orchestrate the specified type of RCU barrier, waiting for all |
@@ -179,21 +186,12 @@ void rcu_barrier_sched(void) | |||
179 | } | 186 | } |
180 | EXPORT_SYMBOL_GPL(rcu_barrier_sched); | 187 | EXPORT_SYMBOL_GPL(rcu_barrier_sched); |
181 | 188 | ||
182 | static atomic_t rcu_migrate_type_count = ATOMIC_INIT(0); | ||
183 | static struct rcu_head rcu_migrate_head[3]; | ||
184 | static DECLARE_WAIT_QUEUE_HEAD(rcu_migrate_wq); | ||
185 | |||
186 | static void rcu_migrate_callback(struct rcu_head *notused) | 189 | static void rcu_migrate_callback(struct rcu_head *notused) |
187 | { | 190 | { |
188 | if (atomic_dec_and_test(&rcu_migrate_type_count)) | 191 | if (atomic_dec_and_test(&rcu_migrate_type_count)) |
189 | wake_up(&rcu_migrate_wq); | 192 | wake_up(&rcu_migrate_wq); |
190 | } | 193 | } |
191 | 194 | ||
192 | static inline void wait_migrated_callbacks(void) | ||
193 | { | ||
194 | wait_event(rcu_migrate_wq, !atomic_read(&rcu_migrate_type_count)); | ||
195 | } | ||
196 | |||
197 | static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self, | 195 | static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self, |
198 | unsigned long action, void *hcpu) | 196 | unsigned long action, void *hcpu) |
199 | { | 197 | { |
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 7f3266922572..d2a372fb0b9b 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -530,8 +530,6 @@ static void note_new_gpnum(struct rcu_state *rsp, struct rcu_data *rdp) | |||
530 | rdp->qs_pending = 1; | 530 | rdp->qs_pending = 1; |
531 | rdp->passed_quiesc = 0; | 531 | rdp->passed_quiesc = 0; |
532 | rdp->gpnum = rsp->gpnum; | 532 | rdp->gpnum = rsp->gpnum; |
533 | rdp->n_rcu_pending_force_qs = rdp->n_rcu_pending + | ||
534 | RCU_JIFFIES_TILL_FORCE_QS; | ||
535 | } | 533 | } |
536 | 534 | ||
537 | /* | 535 | /* |
@@ -578,8 +576,6 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) | |||
578 | rsp->gpnum++; | 576 | rsp->gpnum++; |
579 | rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */ | 577 | rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */ |
580 | rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; | 578 | rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; |
581 | rdp->n_rcu_pending_force_qs = rdp->n_rcu_pending + | ||
582 | RCU_JIFFIES_TILL_FORCE_QS; | ||
583 | record_gp_stall_check_time(rsp); | 579 | record_gp_stall_check_time(rsp); |
584 | dyntick_record_completed(rsp, rsp->completed - 1); | 580 | dyntick_record_completed(rsp, rsp->completed - 1); |
585 | note_new_gpnum(rsp, rdp); | 581 | note_new_gpnum(rsp, rdp); |
@@ -1055,7 +1051,6 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) | |||
1055 | { | 1051 | { |
1056 | unsigned long flags; | 1052 | unsigned long flags; |
1057 | long lastcomp; | 1053 | long lastcomp; |
1058 | struct rcu_data *rdp = rsp->rda[smp_processor_id()]; | ||
1059 | struct rcu_node *rnp = rcu_get_root(rsp); | 1054 | struct rcu_node *rnp = rcu_get_root(rsp); |
1060 | u8 signaled; | 1055 | u8 signaled; |
1061 | 1056 | ||
@@ -1066,16 +1061,13 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) | |||
1066 | return; /* Someone else is already on the job. */ | 1061 | return; /* Someone else is already on the job. */ |
1067 | } | 1062 | } |
1068 | if (relaxed && | 1063 | if (relaxed && |
1069 | (long)(rsp->jiffies_force_qs - jiffies) >= 0 && | 1064 | (long)(rsp->jiffies_force_qs - jiffies) >= 0) |
1070 | (rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending) >= 0) | ||
1071 | goto unlock_ret; /* no emergency and done recently. */ | 1065 | goto unlock_ret; /* no emergency and done recently. */ |
1072 | rsp->n_force_qs++; | 1066 | rsp->n_force_qs++; |
1073 | spin_lock(&rnp->lock); | 1067 | spin_lock(&rnp->lock); |
1074 | lastcomp = rsp->completed; | 1068 | lastcomp = rsp->completed; |
1075 | signaled = rsp->signaled; | 1069 | signaled = rsp->signaled; |
1076 | rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; | 1070 | rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; |
1077 | rdp->n_rcu_pending_force_qs = rdp->n_rcu_pending + | ||
1078 | RCU_JIFFIES_TILL_FORCE_QS; | ||
1079 | if (lastcomp == rsp->gpnum) { | 1071 | if (lastcomp == rsp->gpnum) { |
1080 | rsp->n_force_qs_ngp++; | 1072 | rsp->n_force_qs_ngp++; |
1081 | spin_unlock(&rnp->lock); | 1073 | spin_unlock(&rnp->lock); |
@@ -1144,8 +1136,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) | |||
1144 | * If an RCU GP has gone long enough, go check for dyntick | 1136 | * If an RCU GP has gone long enough, go check for dyntick |
1145 | * idle CPUs and, if needed, send resched IPIs. | 1137 | * idle CPUs and, if needed, send resched IPIs. |
1146 | */ | 1138 | */ |
1147 | if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0 || | 1139 | if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0) |
1148 | (rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending) < 0) | ||
1149 | force_quiescent_state(rsp, 1); | 1140 | force_quiescent_state(rsp, 1); |
1150 | 1141 | ||
1151 | /* | 1142 | /* |
@@ -1230,8 +1221,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), | |||
1230 | if (unlikely(++rdp->qlen > qhimark)) { | 1221 | if (unlikely(++rdp->qlen > qhimark)) { |
1231 | rdp->blimit = LONG_MAX; | 1222 | rdp->blimit = LONG_MAX; |
1232 | force_quiescent_state(rsp, 0); | 1223 | force_quiescent_state(rsp, 0); |
1233 | } else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0 || | 1224 | } else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0) |
1234 | (rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending) < 0) | ||
1235 | force_quiescent_state(rsp, 1); | 1225 | force_quiescent_state(rsp, 1); |
1236 | local_irq_restore(flags); | 1226 | local_irq_restore(flags); |
1237 | } | 1227 | } |
@@ -1290,8 +1280,7 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) | |||
1290 | 1280 | ||
1291 | /* Has an RCU GP gone long enough to send resched IPIs &c? */ | 1281 | /* Has an RCU GP gone long enough to send resched IPIs &c? */ |
1292 | if (ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum) && | 1282 | if (ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum) && |
1293 | ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0 || | 1283 | ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0)) |
1294 | (rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending) < 0)) | ||
1295 | return 1; | 1284 | return 1; |
1296 | 1285 | ||
1297 | /* nothing to do */ | 1286 | /* nothing to do */ |
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c index 4ee954f6a8d5..4b1875ba9404 100644 --- a/kernel/rcutree_trace.c +++ b/kernel/rcutree_trace.c | |||
@@ -49,14 +49,12 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp) | |||
49 | { | 49 | { |
50 | if (!rdp->beenonline) | 50 | if (!rdp->beenonline) |
51 | return; | 51 | return; |
52 | seq_printf(m, "%3d%cc=%ld g=%ld pq=%d pqc=%ld qp=%d rpfq=%ld rp=%x", | 52 | seq_printf(m, "%3d%cc=%ld g=%ld pq=%d pqc=%ld qp=%d", |
53 | rdp->cpu, | 53 | rdp->cpu, |
54 | cpu_is_offline(rdp->cpu) ? '!' : ' ', | 54 | cpu_is_offline(rdp->cpu) ? '!' : ' ', |
55 | rdp->completed, rdp->gpnum, | 55 | rdp->completed, rdp->gpnum, |
56 | rdp->passed_quiesc, rdp->passed_quiesc_completed, | 56 | rdp->passed_quiesc, rdp->passed_quiesc_completed, |
57 | rdp->qs_pending, | 57 | rdp->qs_pending); |
58 | rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending, | ||
59 | (int)(rdp->n_rcu_pending & 0xffff)); | ||
60 | #ifdef CONFIG_NO_HZ | 58 | #ifdef CONFIG_NO_HZ |
61 | seq_printf(m, " dt=%d/%d dn=%d df=%lu", | 59 | seq_printf(m, " dt=%d/%d dn=%d df=%lu", |
62 | rdp->dynticks->dynticks, | 60 | rdp->dynticks->dynticks, |
@@ -102,14 +100,12 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp) | |||
102 | { | 100 | { |
103 | if (!rdp->beenonline) | 101 | if (!rdp->beenonline) |
104 | return; | 102 | return; |
105 | seq_printf(m, "%d,%s,%ld,%ld,%d,%ld,%d,%ld,%ld", | 103 | seq_printf(m, "%d,%s,%ld,%ld,%d,%ld,%d", |
106 | rdp->cpu, | 104 | rdp->cpu, |
107 | cpu_is_offline(rdp->cpu) ? "\"Y\"" : "\"N\"", | 105 | cpu_is_offline(rdp->cpu) ? "\"Y\"" : "\"N\"", |
108 | rdp->completed, rdp->gpnum, | 106 | rdp->completed, rdp->gpnum, |
109 | rdp->passed_quiesc, rdp->passed_quiesc_completed, | 107 | rdp->passed_quiesc, rdp->passed_quiesc_completed, |
110 | rdp->qs_pending, | 108 | rdp->qs_pending); |
111 | rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending, | ||
112 | rdp->n_rcu_pending); | ||
113 | #ifdef CONFIG_NO_HZ | 109 | #ifdef CONFIG_NO_HZ |
114 | seq_printf(m, ",%d,%d,%d,%lu", | 110 | seq_printf(m, ",%d,%d,%d,%lu", |
115 | rdp->dynticks->dynticks, | 111 | rdp->dynticks->dynticks, |
@@ -123,7 +119,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp) | |||
123 | 119 | ||
124 | static int show_rcudata_csv(struct seq_file *m, void *unused) | 120 | static int show_rcudata_csv(struct seq_file *m, void *unused) |
125 | { | 121 | { |
126 | seq_puts(m, "\"CPU\",\"Online?\",\"c\",\"g\",\"pq\",\"pqc\",\"pq\",\"rpfq\",\"rp\","); | 122 | seq_puts(m, "\"CPU\",\"Online?\",\"c\",\"g\",\"pq\",\"pqc\",\"pq\","); |
127 | #ifdef CONFIG_NO_HZ | 123 | #ifdef CONFIG_NO_HZ |
128 | seq_puts(m, "\"dt\",\"dt nesting\",\"dn\",\"df\","); | 124 | seq_puts(m, "\"dt\",\"dt nesting\",\"dn\",\"df\","); |
129 | #endif /* #ifdef CONFIG_NO_HZ */ | 125 | #endif /* #ifdef CONFIG_NO_HZ */ |
diff --git a/kernel/resource.c b/kernel/resource.c index fd5d7d574bb9..ac5f3a36923f 100644 --- a/kernel/resource.c +++ b/kernel/resource.c | |||
@@ -533,43 +533,21 @@ static void __init __reserve_region_with_split(struct resource *root, | |||
533 | res->end = end; | 533 | res->end = end; |
534 | res->flags = IORESOURCE_BUSY; | 534 | res->flags = IORESOURCE_BUSY; |
535 | 535 | ||
536 | for (;;) { | 536 | conflict = __request_resource(parent, res); |
537 | conflict = __request_resource(parent, res); | 537 | if (!conflict) |
538 | if (!conflict) | 538 | return; |
539 | break; | ||
540 | if (conflict != parent) { | ||
541 | parent = conflict; | ||
542 | if (!(conflict->flags & IORESOURCE_BUSY)) | ||
543 | continue; | ||
544 | } | ||
545 | |||
546 | /* Uhhuh, that didn't work out.. */ | ||
547 | kfree(res); | ||
548 | res = NULL; | ||
549 | break; | ||
550 | } | ||
551 | |||
552 | if (!res) { | ||
553 | /* failed, split and try again */ | ||
554 | |||
555 | /* conflict covered whole area */ | ||
556 | if (conflict->start <= start && conflict->end >= end) | ||
557 | return; | ||
558 | 539 | ||
559 | if (conflict->start > start) | 540 | /* failed, split and try again */ |
560 | __reserve_region_with_split(root, start, conflict->start-1, name); | 541 | kfree(res); |
561 | if (!(conflict->flags & IORESOURCE_BUSY)) { | ||
562 | resource_size_t common_start, common_end; | ||
563 | 542 | ||
564 | common_start = max(conflict->start, start); | 543 | /* conflict covered whole area */ |
565 | common_end = min(conflict->end, end); | 544 | if (conflict->start <= start && conflict->end >= end) |
566 | if (common_start < common_end) | 545 | return; |
567 | __reserve_region_with_split(root, common_start, common_end, name); | ||
568 | } | ||
569 | if (conflict->end < end) | ||
570 | __reserve_region_with_split(root, conflict->end+1, end, name); | ||
571 | } | ||
572 | 546 | ||
547 | if (conflict->start > start) | ||
548 | __reserve_region_with_split(root, start, conflict->start-1, name); | ||
549 | if (conflict->end < end) | ||
550 | __reserve_region_with_split(root, conflict->end+1, end, name); | ||
573 | } | 551 | } |
574 | 552 | ||
575 | void __init reserve_region_with_split(struct resource *root, | 553 | void __init reserve_region_with_split(struct resource *root, |
diff --git a/kernel/sched.c b/kernel/sched.c index a69278eef425..2f600e30dcf0 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -1419,10 +1419,22 @@ iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
1419 | struct rq_iterator *iterator); | 1419 | struct rq_iterator *iterator); |
1420 | #endif | 1420 | #endif |
1421 | 1421 | ||
1422 | /* Time spent by the tasks of the cpu accounting group executing in ... */ | ||
1423 | enum cpuacct_stat_index { | ||
1424 | CPUACCT_STAT_USER, /* ... user mode */ | ||
1425 | CPUACCT_STAT_SYSTEM, /* ... kernel mode */ | ||
1426 | |||
1427 | CPUACCT_STAT_NSTATS, | ||
1428 | }; | ||
1429 | |||
1422 | #ifdef CONFIG_CGROUP_CPUACCT | 1430 | #ifdef CONFIG_CGROUP_CPUACCT |
1423 | static void cpuacct_charge(struct task_struct *tsk, u64 cputime); | 1431 | static void cpuacct_charge(struct task_struct *tsk, u64 cputime); |
1432 | static void cpuacct_update_stats(struct task_struct *tsk, | ||
1433 | enum cpuacct_stat_index idx, cputime_t val); | ||
1424 | #else | 1434 | #else |
1425 | static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {} | 1435 | static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {} |
1436 | static inline void cpuacct_update_stats(struct task_struct *tsk, | ||
1437 | enum cpuacct_stat_index idx, cputime_t val) {} | ||
1426 | #endif | 1438 | #endif |
1427 | 1439 | ||
1428 | static inline void inc_cpu_load(struct rq *rq, unsigned long load) | 1440 | static inline void inc_cpu_load(struct rq *rq, unsigned long load) |
@@ -4547,9 +4559,25 @@ DEFINE_PER_CPU(struct kernel_stat, kstat); | |||
4547 | EXPORT_PER_CPU_SYMBOL(kstat); | 4559 | EXPORT_PER_CPU_SYMBOL(kstat); |
4548 | 4560 | ||
4549 | /* | 4561 | /* |
4550 | * Return any ns on the sched_clock that have not yet been banked in | 4562 | * Return any ns on the sched_clock that have not yet been accounted in |
4551 | * @p in case that task is currently running. | 4563 | * @p in case that task is currently running. |
4564 | * | ||
4565 | * Called with task_rq_lock() held on @rq. | ||
4552 | */ | 4566 | */ |
4567 | static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq) | ||
4568 | { | ||
4569 | u64 ns = 0; | ||
4570 | |||
4571 | if (task_current(rq, p)) { | ||
4572 | update_rq_clock(rq); | ||
4573 | ns = rq->clock - p->se.exec_start; | ||
4574 | if ((s64)ns < 0) | ||
4575 | ns = 0; | ||
4576 | } | ||
4577 | |||
4578 | return ns; | ||
4579 | } | ||
4580 | |||
4553 | unsigned long long task_delta_exec(struct task_struct *p) | 4581 | unsigned long long task_delta_exec(struct task_struct *p) |
4554 | { | 4582 | { |
4555 | unsigned long flags; | 4583 | unsigned long flags; |
@@ -4557,16 +4585,49 @@ unsigned long long task_delta_exec(struct task_struct *p) | |||
4557 | u64 ns = 0; | 4585 | u64 ns = 0; |
4558 | 4586 | ||
4559 | rq = task_rq_lock(p, &flags); | 4587 | rq = task_rq_lock(p, &flags); |
4588 | ns = do_task_delta_exec(p, rq); | ||
4589 | task_rq_unlock(rq, &flags); | ||
4560 | 4590 | ||
4561 | if (task_current(rq, p)) { | 4591 | return ns; |
4562 | u64 delta_exec; | 4592 | } |
4563 | 4593 | ||
4564 | update_rq_clock(rq); | 4594 | /* |
4565 | delta_exec = rq->clock - p->se.exec_start; | 4595 | * Return accounted runtime for the task. |
4566 | if ((s64)delta_exec > 0) | 4596 | * In case the task is currently running, return the runtime plus current's |
4567 | ns = delta_exec; | 4597 | * pending runtime that have not been accounted yet. |
4568 | } | 4598 | */ |
4599 | unsigned long long task_sched_runtime(struct task_struct *p) | ||
4600 | { | ||
4601 | unsigned long flags; | ||
4602 | struct rq *rq; | ||
4603 | u64 ns = 0; | ||
4604 | |||
4605 | rq = task_rq_lock(p, &flags); | ||
4606 | ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq); | ||
4607 | task_rq_unlock(rq, &flags); | ||
4608 | |||
4609 | return ns; | ||
4610 | } | ||
4611 | |||
4612 | /* | ||
4613 | * Return sum_exec_runtime for the thread group. | ||
4614 | * In case the task is currently running, return the sum plus current's | ||
4615 | * pending runtime that have not been accounted yet. | ||
4616 | * | ||
4617 | * Note that the thread group might have other running tasks as well, | ||
4618 | * so the return value not includes other pending runtime that other | ||
4619 | * running tasks might have. | ||
4620 | */ | ||
4621 | unsigned long long thread_group_sched_runtime(struct task_struct *p) | ||
4622 | { | ||
4623 | struct task_cputime totals; | ||
4624 | unsigned long flags; | ||
4625 | struct rq *rq; | ||
4626 | u64 ns; | ||
4569 | 4627 | ||
4628 | rq = task_rq_lock(p, &flags); | ||
4629 | thread_group_cputime(p, &totals); | ||
4630 | ns = totals.sum_exec_runtime + do_task_delta_exec(p, rq); | ||
4570 | task_rq_unlock(rq, &flags); | 4631 | task_rq_unlock(rq, &flags); |
4571 | 4632 | ||
4572 | return ns; | 4633 | return ns; |
@@ -4595,6 +4656,8 @@ void account_user_time(struct task_struct *p, cputime_t cputime, | |||
4595 | cpustat->nice = cputime64_add(cpustat->nice, tmp); | 4656 | cpustat->nice = cputime64_add(cpustat->nice, tmp); |
4596 | else | 4657 | else |
4597 | cpustat->user = cputime64_add(cpustat->user, tmp); | 4658 | cpustat->user = cputime64_add(cpustat->user, tmp); |
4659 | |||
4660 | cpuacct_update_stats(p, CPUACCT_STAT_USER, cputime); | ||
4598 | /* Account for user time used */ | 4661 | /* Account for user time used */ |
4599 | acct_update_integrals(p); | 4662 | acct_update_integrals(p); |
4600 | } | 4663 | } |
@@ -4656,6 +4719,8 @@ void account_system_time(struct task_struct *p, int hardirq_offset, | |||
4656 | else | 4719 | else |
4657 | cpustat->system = cputime64_add(cpustat->system, tmp); | 4720 | cpustat->system = cputime64_add(cpustat->system, tmp); |
4658 | 4721 | ||
4722 | cpuacct_update_stats(p, CPUACCT_STAT_SYSTEM, cputime); | ||
4723 | |||
4659 | /* Account for system time used */ | 4724 | /* Account for system time used */ |
4660 | acct_update_integrals(p); | 4725 | acct_update_integrals(p); |
4661 | } | 4726 | } |
@@ -4818,7 +4883,7 @@ void scheduler_tick(void) | |||
4818 | #endif | 4883 | #endif |
4819 | } | 4884 | } |
4820 | 4885 | ||
4821 | unsigned long get_parent_ip(unsigned long addr) | 4886 | notrace unsigned long get_parent_ip(unsigned long addr) |
4822 | { | 4887 | { |
4823 | if (in_lock_functions(addr)) { | 4888 | if (in_lock_functions(addr)) { |
4824 | addr = CALLER_ADDR2; | 4889 | addr = CALLER_ADDR2; |
@@ -7340,7 +7405,12 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, | |||
7340 | cpumask_or(groupmask, groupmask, sched_group_cpus(group)); | 7405 | cpumask_or(groupmask, groupmask, sched_group_cpus(group)); |
7341 | 7406 | ||
7342 | cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group)); | 7407 | cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group)); |
7408 | |||
7343 | printk(KERN_CONT " %s", str); | 7409 | printk(KERN_CONT " %s", str); |
7410 | if (group->__cpu_power != SCHED_LOAD_SCALE) { | ||
7411 | printk(KERN_CONT " (__cpu_power = %d)", | ||
7412 | group->__cpu_power); | ||
7413 | } | ||
7344 | 7414 | ||
7345 | group = group->next; | 7415 | group = group->next; |
7346 | } while (group != sd->groups); | 7416 | } while (group != sd->groups); |
@@ -9963,6 +10033,7 @@ struct cpuacct { | |||
9963 | struct cgroup_subsys_state css; | 10033 | struct cgroup_subsys_state css; |
9964 | /* cpuusage holds pointer to a u64-type object on every cpu */ | 10034 | /* cpuusage holds pointer to a u64-type object on every cpu */ |
9965 | u64 *cpuusage; | 10035 | u64 *cpuusage; |
10036 | struct percpu_counter cpustat[CPUACCT_STAT_NSTATS]; | ||
9966 | struct cpuacct *parent; | 10037 | struct cpuacct *parent; |
9967 | }; | 10038 | }; |
9968 | 10039 | ||
@@ -9987,20 +10058,32 @@ static struct cgroup_subsys_state *cpuacct_create( | |||
9987 | struct cgroup_subsys *ss, struct cgroup *cgrp) | 10058 | struct cgroup_subsys *ss, struct cgroup *cgrp) |
9988 | { | 10059 | { |
9989 | struct cpuacct *ca = kzalloc(sizeof(*ca), GFP_KERNEL); | 10060 | struct cpuacct *ca = kzalloc(sizeof(*ca), GFP_KERNEL); |
10061 | int i; | ||
9990 | 10062 | ||
9991 | if (!ca) | 10063 | if (!ca) |
9992 | return ERR_PTR(-ENOMEM); | 10064 | goto out; |
9993 | 10065 | ||
9994 | ca->cpuusage = alloc_percpu(u64); | 10066 | ca->cpuusage = alloc_percpu(u64); |
9995 | if (!ca->cpuusage) { | 10067 | if (!ca->cpuusage) |
9996 | kfree(ca); | 10068 | goto out_free_ca; |
9997 | return ERR_PTR(-ENOMEM); | 10069 | |
9998 | } | 10070 | for (i = 0; i < CPUACCT_STAT_NSTATS; i++) |
10071 | if (percpu_counter_init(&ca->cpustat[i], 0)) | ||
10072 | goto out_free_counters; | ||
9999 | 10073 | ||
10000 | if (cgrp->parent) | 10074 | if (cgrp->parent) |
10001 | ca->parent = cgroup_ca(cgrp->parent); | 10075 | ca->parent = cgroup_ca(cgrp->parent); |
10002 | 10076 | ||
10003 | return &ca->css; | 10077 | return &ca->css; |
10078 | |||
10079 | out_free_counters: | ||
10080 | while (--i >= 0) | ||
10081 | percpu_counter_destroy(&ca->cpustat[i]); | ||
10082 | free_percpu(ca->cpuusage); | ||
10083 | out_free_ca: | ||
10084 | kfree(ca); | ||
10085 | out: | ||
10086 | return ERR_PTR(-ENOMEM); | ||
10004 | } | 10087 | } |
10005 | 10088 | ||
10006 | /* destroy an existing cpu accounting group */ | 10089 | /* destroy an existing cpu accounting group */ |
@@ -10008,7 +10091,10 @@ static void | |||
10008 | cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp) | 10091 | cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp) |
10009 | { | 10092 | { |
10010 | struct cpuacct *ca = cgroup_ca(cgrp); | 10093 | struct cpuacct *ca = cgroup_ca(cgrp); |
10094 | int i; | ||
10011 | 10095 | ||
10096 | for (i = 0; i < CPUACCT_STAT_NSTATS; i++) | ||
10097 | percpu_counter_destroy(&ca->cpustat[i]); | ||
10012 | free_percpu(ca->cpuusage); | 10098 | free_percpu(ca->cpuusage); |
10013 | kfree(ca); | 10099 | kfree(ca); |
10014 | } | 10100 | } |
@@ -10095,6 +10181,25 @@ static int cpuacct_percpu_seq_read(struct cgroup *cgroup, struct cftype *cft, | |||
10095 | return 0; | 10181 | return 0; |
10096 | } | 10182 | } |
10097 | 10183 | ||
10184 | static const char *cpuacct_stat_desc[] = { | ||
10185 | [CPUACCT_STAT_USER] = "user", | ||
10186 | [CPUACCT_STAT_SYSTEM] = "system", | ||
10187 | }; | ||
10188 | |||
10189 | static int cpuacct_stats_show(struct cgroup *cgrp, struct cftype *cft, | ||
10190 | struct cgroup_map_cb *cb) | ||
10191 | { | ||
10192 | struct cpuacct *ca = cgroup_ca(cgrp); | ||
10193 | int i; | ||
10194 | |||
10195 | for (i = 0; i < CPUACCT_STAT_NSTATS; i++) { | ||
10196 | s64 val = percpu_counter_read(&ca->cpustat[i]); | ||
10197 | val = cputime64_to_clock_t(val); | ||
10198 | cb->fill(cb, cpuacct_stat_desc[i], val); | ||
10199 | } | ||
10200 | return 0; | ||
10201 | } | ||
10202 | |||
10098 | static struct cftype files[] = { | 10203 | static struct cftype files[] = { |
10099 | { | 10204 | { |
10100 | .name = "usage", | 10205 | .name = "usage", |
@@ -10105,7 +10210,10 @@ static struct cftype files[] = { | |||
10105 | .name = "usage_percpu", | 10210 | .name = "usage_percpu", |
10106 | .read_seq_string = cpuacct_percpu_seq_read, | 10211 | .read_seq_string = cpuacct_percpu_seq_read, |
10107 | }, | 10212 | }, |
10108 | 10213 | { | |
10214 | .name = "stat", | ||
10215 | .read_map = cpuacct_stats_show, | ||
10216 | }, | ||
10109 | }; | 10217 | }; |
10110 | 10218 | ||
10111 | static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp) | 10219 | static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp) |
@@ -10127,12 +10235,38 @@ static void cpuacct_charge(struct task_struct *tsk, u64 cputime) | |||
10127 | return; | 10235 | return; |
10128 | 10236 | ||
10129 | cpu = task_cpu(tsk); | 10237 | cpu = task_cpu(tsk); |
10238 | |||
10239 | rcu_read_lock(); | ||
10240 | |||
10130 | ca = task_ca(tsk); | 10241 | ca = task_ca(tsk); |
10131 | 10242 | ||
10132 | for (; ca; ca = ca->parent) { | 10243 | for (; ca; ca = ca->parent) { |
10133 | u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); | 10244 | u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); |
10134 | *cpuusage += cputime; | 10245 | *cpuusage += cputime; |
10135 | } | 10246 | } |
10247 | |||
10248 | rcu_read_unlock(); | ||
10249 | } | ||
10250 | |||
10251 | /* | ||
10252 | * Charge the system/user time to the task's accounting group. | ||
10253 | */ | ||
10254 | static void cpuacct_update_stats(struct task_struct *tsk, | ||
10255 | enum cpuacct_stat_index idx, cputime_t val) | ||
10256 | { | ||
10257 | struct cpuacct *ca; | ||
10258 | |||
10259 | if (unlikely(!cpuacct_subsys.active)) | ||
10260 | return; | ||
10261 | |||
10262 | rcu_read_lock(); | ||
10263 | ca = task_ca(tsk); | ||
10264 | |||
10265 | do { | ||
10266 | percpu_counter_add(&ca->cpustat[idx], val); | ||
10267 | ca = ca->parent; | ||
10268 | } while (ca); | ||
10269 | rcu_read_unlock(); | ||
10136 | } | 10270 | } |
10137 | 10271 | ||
10138 | struct cgroup_subsys cpuacct_subsys = { | 10272 | struct cgroup_subsys cpuacct_subsys = { |
diff --git a/kernel/sched_cpupri.c b/kernel/sched_cpupri.c index 1e00bfacf9b8..cdd3c89574cd 100644 --- a/kernel/sched_cpupri.c +++ b/kernel/sched_cpupri.c | |||
@@ -55,7 +55,7 @@ static int convert_prio(int prio) | |||
55 | * cpupri_find - find the best (lowest-pri) CPU in the system | 55 | * cpupri_find - find the best (lowest-pri) CPU in the system |
56 | * @cp: The cpupri context | 56 | * @cp: The cpupri context |
57 | * @p: The task | 57 | * @p: The task |
58 | * @lowest_mask: A mask to fill in with selected CPUs | 58 | * @lowest_mask: A mask to fill in with selected CPUs (or NULL) |
59 | * | 59 | * |
60 | * Note: This function returns the recommended CPUs as calculated during the | 60 | * Note: This function returns the recommended CPUs as calculated during the |
61 | * current invokation. By the time the call returns, the CPUs may have in | 61 | * current invokation. By the time the call returns, the CPUs may have in |
@@ -81,7 +81,8 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p, | |||
81 | if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) | 81 | if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) |
82 | continue; | 82 | continue; |
83 | 83 | ||
84 | cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask); | 84 | if (lowest_mask) |
85 | cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask); | ||
85 | return 1; | 86 | return 1; |
86 | } | 87 | } |
87 | 88 | ||
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 299d012b4394..f2c66f8f9712 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -948,20 +948,15 @@ static int select_task_rq_rt(struct task_struct *p, int sync) | |||
948 | 948 | ||
949 | static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) | 949 | static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) |
950 | { | 950 | { |
951 | cpumask_var_t mask; | ||
952 | |||
953 | if (rq->curr->rt.nr_cpus_allowed == 1) | 951 | if (rq->curr->rt.nr_cpus_allowed == 1) |
954 | return; | 952 | return; |
955 | 953 | ||
956 | if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) | ||
957 | return; | ||
958 | |||
959 | if (p->rt.nr_cpus_allowed != 1 | 954 | if (p->rt.nr_cpus_allowed != 1 |
960 | && cpupri_find(&rq->rd->cpupri, p, mask)) | 955 | && cpupri_find(&rq->rd->cpupri, p, NULL)) |
961 | goto free; | 956 | return; |
962 | 957 | ||
963 | if (!cpupri_find(&rq->rd->cpupri, rq->curr, mask)) | 958 | if (!cpupri_find(&rq->rd->cpupri, rq->curr, NULL)) |
964 | goto free; | 959 | return; |
965 | 960 | ||
966 | /* | 961 | /* |
967 | * There appears to be other cpus that can accept | 962 | * There appears to be other cpus that can accept |
@@ -970,8 +965,6 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) | |||
970 | */ | 965 | */ |
971 | requeue_task_rt(rq, p, 1); | 966 | requeue_task_rt(rq, p, 1); |
972 | resched_task(rq->curr); | 967 | resched_task(rq->curr); |
973 | free: | ||
974 | free_cpumask_var(mask); | ||
975 | } | 968 | } |
976 | 969 | ||
977 | #endif /* CONFIG_SMP */ | 970 | #endif /* CONFIG_SMP */ |
diff --git a/kernel/slow-work.c b/kernel/slow-work.c index cf2bc01186ef..b28d19135f43 100644 --- a/kernel/slow-work.c +++ b/kernel/slow-work.c | |||
@@ -609,14 +609,14 @@ void slow_work_unregister_user(void) | |||
609 | if (slow_work_user_count == 0) { | 609 | if (slow_work_user_count == 0) { |
610 | printk(KERN_NOTICE "Slow work thread pool: Shutting down\n"); | 610 | printk(KERN_NOTICE "Slow work thread pool: Shutting down\n"); |
611 | slow_work_threads_should_exit = true; | 611 | slow_work_threads_should_exit = true; |
612 | del_timer_sync(&slow_work_cull_timer); | ||
613 | del_timer_sync(&slow_work_oom_timer); | ||
612 | wake_up_all(&slow_work_thread_wq); | 614 | wake_up_all(&slow_work_thread_wq); |
613 | wait_for_completion(&slow_work_last_thread_exited); | 615 | wait_for_completion(&slow_work_last_thread_exited); |
614 | printk(KERN_NOTICE "Slow work thread pool:" | 616 | printk(KERN_NOTICE "Slow work thread pool:" |
615 | " Shut down complete\n"); | 617 | " Shut down complete\n"); |
616 | } | 618 | } |
617 | 619 | ||
618 | del_timer_sync(&slow_work_cull_timer); | ||
619 | |||
620 | mutex_unlock(&slow_work_user_lock); | 620 | mutex_unlock(&slow_work_user_lock); |
621 | } | 621 | } |
622 | EXPORT_SYMBOL(slow_work_unregister_user); | 622 | EXPORT_SYMBOL(slow_work_unregister_user); |
diff --git a/kernel/softirq.c b/kernel/softirq.c index 2fecefacdc5b..b525dd348511 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
@@ -472,9 +472,9 @@ void tasklet_kill(struct tasklet_struct *t) | |||
472 | printk("Attempt to kill tasklet from interrupt\n"); | 472 | printk("Attempt to kill tasklet from interrupt\n"); |
473 | 473 | ||
474 | while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { | 474 | while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { |
475 | do | 475 | do { |
476 | yield(); | 476 | yield(); |
477 | while (test_bit(TASKLET_STATE_SCHED, &t->state)); | 477 | } while (test_bit(TASKLET_STATE_SCHED, &t->state)); |
478 | } | 478 | } |
479 | tasklet_unlock_wait(t); | 479 | tasklet_unlock_wait(t); |
480 | clear_bit(TASKLET_STATE_SCHED, &t->state); | 480 | clear_bit(TASKLET_STATE_SCHED, &t->state); |
diff --git a/kernel/sys.c b/kernel/sys.c index 14c4c5613118..438d99a38c87 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -361,6 +361,7 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd, | |||
361 | void __user *, arg) | 361 | void __user *, arg) |
362 | { | 362 | { |
363 | char buffer[256]; | 363 | char buffer[256]; |
364 | int ret = 0; | ||
364 | 365 | ||
365 | /* We only trust the superuser with rebooting the system. */ | 366 | /* We only trust the superuser with rebooting the system. */ |
366 | if (!capable(CAP_SYS_BOOT)) | 367 | if (!capable(CAP_SYS_BOOT)) |
@@ -398,7 +399,7 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd, | |||
398 | kernel_halt(); | 399 | kernel_halt(); |
399 | unlock_kernel(); | 400 | unlock_kernel(); |
400 | do_exit(0); | 401 | do_exit(0); |
401 | break; | 402 | panic("cannot halt"); |
402 | 403 | ||
403 | case LINUX_REBOOT_CMD_POWER_OFF: | 404 | case LINUX_REBOOT_CMD_POWER_OFF: |
404 | kernel_power_off(); | 405 | kernel_power_off(); |
@@ -418,29 +419,22 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd, | |||
418 | 419 | ||
419 | #ifdef CONFIG_KEXEC | 420 | #ifdef CONFIG_KEXEC |
420 | case LINUX_REBOOT_CMD_KEXEC: | 421 | case LINUX_REBOOT_CMD_KEXEC: |
421 | { | 422 | ret = kernel_kexec(); |
422 | int ret; | 423 | break; |
423 | ret = kernel_kexec(); | ||
424 | unlock_kernel(); | ||
425 | return ret; | ||
426 | } | ||
427 | #endif | 424 | #endif |
428 | 425 | ||
429 | #ifdef CONFIG_HIBERNATION | 426 | #ifdef CONFIG_HIBERNATION |
430 | case LINUX_REBOOT_CMD_SW_SUSPEND: | 427 | case LINUX_REBOOT_CMD_SW_SUSPEND: |
431 | { | 428 | ret = hibernate(); |
432 | int ret = hibernate(); | 429 | break; |
433 | unlock_kernel(); | ||
434 | return ret; | ||
435 | } | ||
436 | #endif | 430 | #endif |
437 | 431 | ||
438 | default: | 432 | default: |
439 | unlock_kernel(); | 433 | ret = -EINVAL; |
440 | return -EINVAL; | 434 | break; |
441 | } | 435 | } |
442 | unlock_kernel(); | 436 | unlock_kernel(); |
443 | return 0; | 437 | return ret; |
444 | } | 438 | } |
445 | 439 | ||
446 | static void deferred_cad(struct work_struct *dummy) | 440 | static void deferred_cad(struct work_struct *dummy) |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 8ba457838d95..8203d70928d5 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -903,16 +903,6 @@ static struct ctl_table kern_table[] = { | |||
903 | .proc_handler = &proc_dointvec, | 903 | .proc_handler = &proc_dointvec, |
904 | }, | 904 | }, |
905 | #endif | 905 | #endif |
906 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
907 | { | ||
908 | .ctl_name = CTL_UNNUMBERED, | ||
909 | .procname = "scan_unevictable_pages", | ||
910 | .data = &scan_unevictable_pages, | ||
911 | .maxlen = sizeof(scan_unevictable_pages), | ||
912 | .mode = 0644, | ||
913 | .proc_handler = &scan_unevictable_handler, | ||
914 | }, | ||
915 | #endif | ||
916 | #ifdef CONFIG_SLOW_WORK | 906 | #ifdef CONFIG_SLOW_WORK |
917 | { | 907 | { |
918 | .ctl_name = CTL_UNNUMBERED, | 908 | .ctl_name = CTL_UNNUMBERED, |
@@ -1313,6 +1303,16 @@ static struct ctl_table vm_table[] = { | |||
1313 | .extra2 = &one, | 1303 | .extra2 = &one, |
1314 | }, | 1304 | }, |
1315 | #endif | 1305 | #endif |
1306 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
1307 | { | ||
1308 | .ctl_name = CTL_UNNUMBERED, | ||
1309 | .procname = "scan_unevictable_pages", | ||
1310 | .data = &scan_unevictable_pages, | ||
1311 | .maxlen = sizeof(scan_unevictable_pages), | ||
1312 | .mode = 0644, | ||
1313 | .proc_handler = &scan_unevictable_handler, | ||
1314 | }, | ||
1315 | #endif | ||
1316 | /* | 1316 | /* |
1317 | * NOTE: do not add new entries to this table unless you have read | 1317 | * NOTE: do not add new entries to this table unless you have read |
1318 | * Documentation/sysctl/ctl_unnumbered.txt | 1318 | * Documentation/sysctl/ctl_unnumbered.txt |
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index c46c931a7fe7..ecfd7b5187e0 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c | |||
@@ -181,12 +181,12 @@ static void clocksource_watchdog(unsigned long data) | |||
181 | 181 | ||
182 | resumed = test_and_clear_bit(0, &watchdog_resumed); | 182 | resumed = test_and_clear_bit(0, &watchdog_resumed); |
183 | 183 | ||
184 | wdnow = watchdog->read(); | 184 | wdnow = watchdog->read(watchdog); |
185 | wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask); | 185 | wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask); |
186 | watchdog_last = wdnow; | 186 | watchdog_last = wdnow; |
187 | 187 | ||
188 | list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) { | 188 | list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) { |
189 | csnow = cs->read(); | 189 | csnow = cs->read(cs); |
190 | 190 | ||
191 | if (unlikely(resumed)) { | 191 | if (unlikely(resumed)) { |
192 | cs->wd_last = csnow; | 192 | cs->wd_last = csnow; |
@@ -247,7 +247,7 @@ static void clocksource_check_watchdog(struct clocksource *cs) | |||
247 | 247 | ||
248 | list_add(&cs->wd_list, &watchdog_list); | 248 | list_add(&cs->wd_list, &watchdog_list); |
249 | if (!started && watchdog) { | 249 | if (!started && watchdog) { |
250 | watchdog_last = watchdog->read(); | 250 | watchdog_last = watchdog->read(watchdog); |
251 | watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; | 251 | watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; |
252 | add_timer_on(&watchdog_timer, | 252 | add_timer_on(&watchdog_timer, |
253 | cpumask_first(cpu_online_mask)); | 253 | cpumask_first(cpu_online_mask)); |
@@ -268,7 +268,7 @@ static void clocksource_check_watchdog(struct clocksource *cs) | |||
268 | cse->flags &= ~CLOCK_SOURCE_WATCHDOG; | 268 | cse->flags &= ~CLOCK_SOURCE_WATCHDOG; |
269 | /* Start if list is not empty */ | 269 | /* Start if list is not empty */ |
270 | if (!list_empty(&watchdog_list)) { | 270 | if (!list_empty(&watchdog_list)) { |
271 | watchdog_last = watchdog->read(); | 271 | watchdog_last = watchdog->read(watchdog); |
272 | watchdog_timer.expires = | 272 | watchdog_timer.expires = |
273 | jiffies + WATCHDOG_INTERVAL; | 273 | jiffies + WATCHDOG_INTERVAL; |
274 | add_timer_on(&watchdog_timer, | 274 | add_timer_on(&watchdog_timer, |
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c index 06f197560f3b..c3f6c30816e3 100644 --- a/kernel/time/jiffies.c +++ b/kernel/time/jiffies.c | |||
@@ -50,7 +50,7 @@ | |||
50 | */ | 50 | */ |
51 | #define JIFFIES_SHIFT 8 | 51 | #define JIFFIES_SHIFT 8 |
52 | 52 | ||
53 | static cycle_t jiffies_read(void) | 53 | static cycle_t jiffies_read(struct clocksource *cs) |
54 | { | 54 | { |
55 | return (cycle_t) jiffies; | 55 | return (cycle_t) jiffies; |
56 | } | 56 | } |
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 900f1b6598d1..687dff49f6e7 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
@@ -182,7 +182,7 @@ EXPORT_SYMBOL(do_settimeofday); | |||
182 | */ | 182 | */ |
183 | static void change_clocksource(void) | 183 | static void change_clocksource(void) |
184 | { | 184 | { |
185 | struct clocksource *new; | 185 | struct clocksource *new, *old; |
186 | 186 | ||
187 | new = clocksource_get_next(); | 187 | new = clocksource_get_next(); |
188 | 188 | ||
@@ -191,11 +191,16 @@ static void change_clocksource(void) | |||
191 | 191 | ||
192 | clocksource_forward_now(); | 192 | clocksource_forward_now(); |
193 | 193 | ||
194 | new->raw_time = clock->raw_time; | 194 | if (clocksource_enable(new)) |
195 | return; | ||
195 | 196 | ||
197 | new->raw_time = clock->raw_time; | ||
198 | old = clock; | ||
196 | clock = new; | 199 | clock = new; |
200 | clocksource_disable(old); | ||
201 | |||
197 | clock->cycle_last = 0; | 202 | clock->cycle_last = 0; |
198 | clock->cycle_last = clocksource_read(new); | 203 | clock->cycle_last = clocksource_read(clock); |
199 | clock->error = 0; | 204 | clock->error = 0; |
200 | clock->xtime_nsec = 0; | 205 | clock->xtime_nsec = 0; |
201 | clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); | 206 | clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); |
@@ -292,6 +297,7 @@ void __init timekeeping_init(void) | |||
292 | ntp_init(); | 297 | ntp_init(); |
293 | 298 | ||
294 | clock = clocksource_get_next(); | 299 | clock = clocksource_get_next(); |
300 | clocksource_enable(clock); | ||
295 | clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); | 301 | clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); |
296 | clock->cycle_last = clocksource_read(clock); | 302 | clock->cycle_last = clocksource_read(clock); |
297 | 303 | ||
diff --git a/kernel/timer.c b/kernel/timer.c index 672ca25fbc43..fed53be44fd9 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -532,10 +532,13 @@ static void __init_timer(struct timer_list *timer, | |||
532 | } | 532 | } |
533 | 533 | ||
534 | /** | 534 | /** |
535 | * init_timer - initialize a timer. | 535 | * init_timer_key - initialize a timer |
536 | * @timer: the timer to be initialized | 536 | * @timer: the timer to be initialized |
537 | * @name: name of the timer | ||
538 | * @key: lockdep class key of the fake lock used for tracking timer | ||
539 | * sync lock dependencies | ||
537 | * | 540 | * |
538 | * init_timer() must be done to a timer prior calling *any* of the | 541 | * init_timer_key() must be done to a timer prior calling *any* of the |
539 | * other timer functions. | 542 | * other timer functions. |
540 | */ | 543 | */ |
541 | void init_timer_key(struct timer_list *timer, | 544 | void init_timer_key(struct timer_list *timer, |
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 2246141bda4d..417d1985e299 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
@@ -312,7 +312,7 @@ config KMEMTRACE | |||
312 | and profile kernel code. | 312 | and profile kernel code. |
313 | 313 | ||
314 | This requires an userspace application to use. See | 314 | This requires an userspace application to use. See |
315 | Documentation/vm/kmemtrace.txt for more information. | 315 | Documentation/trace/kmemtrace.txt for more information. |
316 | 316 | ||
317 | Saying Y will make the kernel somewhat larger and slower. However, | 317 | Saying Y will make the kernel somewhat larger and slower. However, |
318 | if you disable kmemtrace at run-time or boot-time, the performance | 318 | if you disable kmemtrace at run-time or boot-time, the performance |
@@ -403,7 +403,7 @@ config MMIOTRACE | |||
403 | implementation and works via page faults. Tracing is disabled by | 403 | implementation and works via page faults. Tracing is disabled by |
404 | default and can be enabled at run-time. | 404 | default and can be enabled at run-time. |
405 | 405 | ||
406 | See Documentation/tracers/mmiotrace.txt. | 406 | See Documentation/trace/mmiotrace.txt. |
407 | If you are not helping to develop drivers, say N. | 407 | If you are not helping to develop drivers, say N. |
408 | 408 | ||
409 | config MMIOTRACE_TEST | 409 | config MMIOTRACE_TEST |
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index b32ff446c3fb..921ef5d1f0ba 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c | |||
@@ -1377,12 +1377,12 @@ static int blk_trace_str2mask(const char *str) | |||
1377 | { | 1377 | { |
1378 | int i; | 1378 | int i; |
1379 | int mask = 0; | 1379 | int mask = 0; |
1380 | char *s, *token; | 1380 | char *buf, *s, *token; |
1381 | 1381 | ||
1382 | s = kstrdup(str, GFP_KERNEL); | 1382 | buf = kstrdup(str, GFP_KERNEL); |
1383 | if (s == NULL) | 1383 | if (buf == NULL) |
1384 | return -ENOMEM; | 1384 | return -ENOMEM; |
1385 | s = strstrip(s); | 1385 | s = strstrip(buf); |
1386 | 1386 | ||
1387 | while (1) { | 1387 | while (1) { |
1388 | token = strsep(&s, ","); | 1388 | token = strsep(&s, ","); |
@@ -1403,7 +1403,7 @@ static int blk_trace_str2mask(const char *str) | |||
1403 | break; | 1403 | break; |
1404 | } | 1404 | } |
1405 | } | 1405 | } |
1406 | kfree(s); | 1406 | kfree(buf); |
1407 | 1407 | ||
1408 | return mask; | 1408 | return mask; |
1409 | } | 1409 | } |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 9d28476a9851..1ce5dc6372b8 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -3277,19 +3277,13 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp) | |||
3277 | 3277 | ||
3278 | info->tr = &global_trace; | 3278 | info->tr = &global_trace; |
3279 | info->cpu = cpu; | 3279 | info->cpu = cpu; |
3280 | info->spare = ring_buffer_alloc_read_page(info->tr->buffer); | 3280 | info->spare = NULL; |
3281 | /* Force reading ring buffer for first read */ | 3281 | /* Force reading ring buffer for first read */ |
3282 | info->read = (unsigned int)-1; | 3282 | info->read = (unsigned int)-1; |
3283 | if (!info->spare) | ||
3284 | goto out; | ||
3285 | 3283 | ||
3286 | filp->private_data = info; | 3284 | filp->private_data = info; |
3287 | 3285 | ||
3288 | return 0; | 3286 | return nonseekable_open(inode, filp); |
3289 | |||
3290 | out: | ||
3291 | kfree(info); | ||
3292 | return -ENOMEM; | ||
3293 | } | 3287 | } |
3294 | 3288 | ||
3295 | static ssize_t | 3289 | static ssize_t |
@@ -3304,6 +3298,11 @@ tracing_buffers_read(struct file *filp, char __user *ubuf, | |||
3304 | if (!count) | 3298 | if (!count) |
3305 | return 0; | 3299 | return 0; |
3306 | 3300 | ||
3301 | if (!info->spare) | ||
3302 | info->spare = ring_buffer_alloc_read_page(info->tr->buffer); | ||
3303 | if (!info->spare) | ||
3304 | return -ENOMEM; | ||
3305 | |||
3307 | /* Do we have previous read data to read? */ | 3306 | /* Do we have previous read data to read? */ |
3308 | if (info->read < PAGE_SIZE) | 3307 | if (info->read < PAGE_SIZE) |
3309 | goto read; | 3308 | goto read; |
@@ -3342,7 +3341,8 @@ static int tracing_buffers_release(struct inode *inode, struct file *file) | |||
3342 | { | 3341 | { |
3343 | struct ftrace_buffer_info *info = file->private_data; | 3342 | struct ftrace_buffer_info *info = file->private_data; |
3344 | 3343 | ||
3345 | ring_buffer_free_read_page(info->tr->buffer, info->spare); | 3344 | if (info->spare) |
3345 | ring_buffer_free_read_page(info->tr->buffer, info->spare); | ||
3346 | kfree(info); | 3346 | kfree(info); |
3347 | 3347 | ||
3348 | return 0; | 3348 | return 0; |
@@ -3428,14 +3428,19 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, | |||
3428 | int size, i; | 3428 | int size, i; |
3429 | size_t ret; | 3429 | size_t ret; |
3430 | 3430 | ||
3431 | /* | 3431 | if (*ppos & (PAGE_SIZE - 1)) { |
3432 | * We can't seek on a buffer input | 3432 | WARN_ONCE(1, "Ftrace: previous read must page-align\n"); |
3433 | */ | 3433 | return -EINVAL; |
3434 | if (unlikely(*ppos)) | 3434 | } |
3435 | return -ESPIPE; | ||
3436 | 3435 | ||
3436 | if (len & (PAGE_SIZE - 1)) { | ||
3437 | WARN_ONCE(1, "Ftrace: splice_read should page-align\n"); | ||
3438 | if (len < PAGE_SIZE) | ||
3439 | return -EINVAL; | ||
3440 | len &= PAGE_MASK; | ||
3441 | } | ||
3437 | 3442 | ||
3438 | for (i = 0; i < PIPE_BUFFERS && len; i++, len -= size) { | 3443 | for (i = 0; i < PIPE_BUFFERS && len; i++, len -= PAGE_SIZE) { |
3439 | struct page *page; | 3444 | struct page *page; |
3440 | int r; | 3445 | int r; |
3441 | 3446 | ||
@@ -3474,6 +3479,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, | |||
3474 | spd.partial[i].offset = 0; | 3479 | spd.partial[i].offset = 0; |
3475 | spd.partial[i].private = (unsigned long)ref; | 3480 | spd.partial[i].private = (unsigned long)ref; |
3476 | spd.nr_pages++; | 3481 | spd.nr_pages++; |
3482 | *ppos += PAGE_SIZE; | ||
3477 | } | 3483 | } |
3478 | 3484 | ||
3479 | spd.nr_pages = i; | 3485 | spd.nr_pages = i; |
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c index ad8c22efff41..8333715e4066 100644 --- a/kernel/trace/trace_branch.c +++ b/kernel/trace/trace_branch.c | |||
@@ -155,6 +155,13 @@ static enum print_line_t trace_branch_print(struct trace_iterator *iter, | |||
155 | return TRACE_TYPE_HANDLED; | 155 | return TRACE_TYPE_HANDLED; |
156 | } | 156 | } |
157 | 157 | ||
158 | static void branch_print_header(struct seq_file *s) | ||
159 | { | ||
160 | seq_puts(s, "# TASK-PID CPU# TIMESTAMP CORRECT" | ||
161 | " FUNC:FILE:LINE\n"); | ||
162 | seq_puts(s, "# | | | | | " | ||
163 | " |\n"); | ||
164 | } | ||
158 | 165 | ||
159 | static struct trace_event trace_branch_event = { | 166 | static struct trace_event trace_branch_event = { |
160 | .type = TRACE_BRANCH, | 167 | .type = TRACE_BRANCH, |
@@ -169,6 +176,7 @@ static struct tracer branch_trace __read_mostly = | |||
169 | #ifdef CONFIG_FTRACE_SELFTEST | 176 | #ifdef CONFIG_FTRACE_SELFTEST |
170 | .selftest = trace_selftest_startup_branch, | 177 | .selftest = trace_selftest_startup_branch, |
171 | #endif /* CONFIG_FTRACE_SELFTEST */ | 178 | #endif /* CONFIG_FTRACE_SELFTEST */ |
179 | .print_header = branch_print_header, | ||
172 | }; | 180 | }; |
173 | 181 | ||
174 | __init static int init_branch_tracer(void) | 182 | __init static int init_branch_tracer(void) |
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 64ec4d278ffb..576f4fa2af0d 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -503,6 +503,7 @@ event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
503 | 503 | ||
504 | if (copy_from_user(&buf, ubuf, cnt)) | 504 | if (copy_from_user(&buf, ubuf, cnt)) |
505 | return -EFAULT; | 505 | return -EFAULT; |
506 | buf[cnt] = '\0'; | ||
506 | 507 | ||
507 | pred = kzalloc(sizeof(*pred), GFP_KERNEL); | 508 | pred = kzalloc(sizeof(*pred), GFP_KERNEL); |
508 | if (!pred) | 509 | if (!pred) |
@@ -520,9 +521,10 @@ event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
520 | return cnt; | 521 | return cnt; |
521 | } | 522 | } |
522 | 523 | ||
523 | if (filter_add_pred(call, pred)) { | 524 | err = filter_add_pred(call, pred); |
525 | if (err < 0) { | ||
524 | filter_free_pred(pred); | 526 | filter_free_pred(pred); |
525 | return -EINVAL; | 527 | return err; |
526 | } | 528 | } |
527 | 529 | ||
528 | *ppos += cnt; | 530 | *ppos += cnt; |
@@ -569,6 +571,7 @@ subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
569 | 571 | ||
570 | if (copy_from_user(&buf, ubuf, cnt)) | 572 | if (copy_from_user(&buf, ubuf, cnt)) |
571 | return -EFAULT; | 573 | return -EFAULT; |
574 | buf[cnt] = '\0'; | ||
572 | 575 | ||
573 | pred = kzalloc(sizeof(*pred), GFP_KERNEL); | 576 | pred = kzalloc(sizeof(*pred), GFP_KERNEL); |
574 | if (!pred) | 577 | if (!pred) |
@@ -586,10 +589,11 @@ subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
586 | return cnt; | 589 | return cnt; |
587 | } | 590 | } |
588 | 591 | ||
589 | if (filter_add_subsystem_pred(system, pred)) { | 592 | err = filter_add_subsystem_pred(system, pred); |
593 | if (err < 0) { | ||
590 | filter_free_subsystem_preds(system); | 594 | filter_free_subsystem_preds(system); |
591 | filter_free_pred(pred); | 595 | filter_free_pred(pred); |
592 | return -EINVAL; | 596 | return err; |
593 | } | 597 | } |
594 | 598 | ||
595 | *ppos += cnt; | 599 | *ppos += cnt; |
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 026be412f356..e03cbf1e38f3 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c | |||
@@ -215,7 +215,7 @@ static int __filter_add_pred(struct ftrace_event_call *call, | |||
215 | } | 215 | } |
216 | } | 216 | } |
217 | 217 | ||
218 | return -ENOMEM; | 218 | return -ENOSPC; |
219 | } | 219 | } |
220 | 220 | ||
221 | static int is_string_field(const char *type) | 221 | static int is_string_field(const char *type) |
@@ -319,7 +319,7 @@ int filter_add_subsystem_pred(struct event_subsystem *system, | |||
319 | } | 319 | } |
320 | 320 | ||
321 | if (i == MAX_FILTER_PRED) | 321 | if (i == MAX_FILTER_PRED) |
322 | return -EINVAL; | 322 | return -ENOSPC; |
323 | 323 | ||
324 | events_for_each(call) { | 324 | events_for_each(call) { |
325 | int err; | 325 | int err; |
@@ -410,16 +410,22 @@ int filter_parse(char **pbuf, struct filter_pred *pred) | |||
410 | } | 410 | } |
411 | } | 411 | } |
412 | 412 | ||
413 | if (!val_str) { | ||
414 | pred->field_name = NULL; | ||
415 | return -EINVAL; | ||
416 | } | ||
417 | |||
413 | pred->field_name = kstrdup(pred->field_name, GFP_KERNEL); | 418 | pred->field_name = kstrdup(pred->field_name, GFP_KERNEL); |
414 | if (!pred->field_name) | 419 | if (!pred->field_name) |
415 | return -ENOMEM; | 420 | return -ENOMEM; |
416 | 421 | ||
417 | pred->val = simple_strtoull(val_str, &tmp, 10); | 422 | pred->val = simple_strtoull(val_str, &tmp, 0); |
418 | if (tmp == val_str) { | 423 | if (tmp == val_str) { |
419 | pred->str_val = kstrdup(val_str, GFP_KERNEL); | 424 | pred->str_val = kstrdup(val_str, GFP_KERNEL); |
420 | if (!pred->str_val) | 425 | if (!pred->str_val) |
421 | return -ENOMEM; | 426 | return -ENOMEM; |
422 | } | 427 | } else if (*tmp != '\0') |
428 | return -EINVAL; | ||
423 | 429 | ||
424 | return 0; | 430 | return 0; |
425 | } | 431 | } |
diff --git a/kernel/trace/trace_events_stage_2.h b/kernel/trace/trace_events_stage_2.h index 30743f7d4110..d363c6672c6c 100644 --- a/kernel/trace/trace_events_stage_2.h +++ b/kernel/trace/trace_events_stage_2.h | |||
@@ -105,10 +105,10 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ | |||
105 | return 0; | 105 | return 0; |
106 | 106 | ||
107 | #undef __entry | 107 | #undef __entry |
108 | #define __entry "REC" | 108 | #define __entry REC |
109 | 109 | ||
110 | #undef TP_printk | 110 | #undef TP_printk |
111 | #define TP_printk(fmt, args...) "%s, %s\n", #fmt, #args | 111 | #define TP_printk(fmt, args...) "%s, %s\n", #fmt, __stringify(args) |
112 | 112 | ||
113 | #undef TP_fast_assign | 113 | #undef TP_fast_assign |
114 | #define TP_fast_assign(args...) args | 114 | #define TP_fast_assign(args...) args |
diff --git a/kernel/trace/trace_power.c b/kernel/trace/trace_power.c index bae791ebcc51..118439709fb7 100644 --- a/kernel/trace/trace_power.c +++ b/kernel/trace/trace_power.c | |||
@@ -186,6 +186,12 @@ static enum print_line_t power_print_line(struct trace_iterator *iter) | |||
186 | return TRACE_TYPE_UNHANDLED; | 186 | return TRACE_TYPE_UNHANDLED; |
187 | } | 187 | } |
188 | 188 | ||
189 | static void power_print_header(struct seq_file *s) | ||
190 | { | ||
191 | seq_puts(s, "# TIMESTAMP STATE EVENT\n"); | ||
192 | seq_puts(s, "# | | |\n"); | ||
193 | } | ||
194 | |||
189 | static struct tracer power_tracer __read_mostly = | 195 | static struct tracer power_tracer __read_mostly = |
190 | { | 196 | { |
191 | .name = "power", | 197 | .name = "power", |
@@ -194,6 +200,7 @@ static struct tracer power_tracer __read_mostly = | |||
194 | .stop = stop_power_trace, | 200 | .stop = stop_power_trace, |
195 | .reset = power_trace_reset, | 201 | .reset = power_trace_reset, |
196 | .print_line = power_print_line, | 202 | .print_line = power_print_line, |
203 | .print_header = power_print_header, | ||
197 | }; | 204 | }; |
198 | 205 | ||
199 | static int init_power_trace(void) | 206 | static int init_power_trace(void) |
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index a2a3af29c943..5e579645ac86 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c | |||
@@ -1,5 +1,5 @@ | |||
1 | #include <trace/syscall.h> | ||
1 | #include <linux/kernel.h> | 2 | #include <linux/kernel.h> |
2 | #include <linux/ftrace.h> | ||
3 | #include <asm/syscall.h> | 3 | #include <asm/syscall.h> |
4 | 4 | ||
5 | #include "trace_output.h" | 5 | #include "trace_output.h" |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index b6b966ce1451..f71fb2a08950 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -966,20 +966,20 @@ undo: | |||
966 | } | 966 | } |
967 | 967 | ||
968 | #ifdef CONFIG_SMP | 968 | #ifdef CONFIG_SMP |
969 | static struct workqueue_struct *work_on_cpu_wq __read_mostly; | ||
970 | 969 | ||
971 | struct work_for_cpu { | 970 | struct work_for_cpu { |
972 | struct work_struct work; | 971 | struct completion completion; |
973 | long (*fn)(void *); | 972 | long (*fn)(void *); |
974 | void *arg; | 973 | void *arg; |
975 | long ret; | 974 | long ret; |
976 | }; | 975 | }; |
977 | 976 | ||
978 | static void do_work_for_cpu(struct work_struct *w) | 977 | static int do_work_for_cpu(void *_wfc) |
979 | { | 978 | { |
980 | struct work_for_cpu *wfc = container_of(w, struct work_for_cpu, work); | 979 | struct work_for_cpu *wfc = _wfc; |
981 | |||
982 | wfc->ret = wfc->fn(wfc->arg); | 980 | wfc->ret = wfc->fn(wfc->arg); |
981 | complete(&wfc->completion); | ||
982 | return 0; | ||
983 | } | 983 | } |
984 | 984 | ||
985 | /** | 985 | /** |
@@ -990,17 +990,23 @@ static void do_work_for_cpu(struct work_struct *w) | |||
990 | * | 990 | * |
991 | * This will return the value @fn returns. | 991 | * This will return the value @fn returns. |
992 | * It is up to the caller to ensure that the cpu doesn't go offline. | 992 | * It is up to the caller to ensure that the cpu doesn't go offline. |
993 | * The caller must not hold any locks which would prevent @fn from completing. | ||
993 | */ | 994 | */ |
994 | long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) | 995 | long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) |
995 | { | 996 | { |
996 | struct work_for_cpu wfc; | 997 | struct task_struct *sub_thread; |
997 | 998 | struct work_for_cpu wfc = { | |
998 | INIT_WORK(&wfc.work, do_work_for_cpu); | 999 | .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion), |
999 | wfc.fn = fn; | 1000 | .fn = fn, |
1000 | wfc.arg = arg; | 1001 | .arg = arg, |
1001 | queue_work_on(cpu, work_on_cpu_wq, &wfc.work); | 1002 | }; |
1002 | flush_work(&wfc.work); | 1003 | |
1003 | 1004 | sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu"); | |
1005 | if (IS_ERR(sub_thread)) | ||
1006 | return PTR_ERR(sub_thread); | ||
1007 | kthread_bind(sub_thread, cpu); | ||
1008 | wake_up_process(sub_thread); | ||
1009 | wait_for_completion(&wfc.completion); | ||
1004 | return wfc.ret; | 1010 | return wfc.ret; |
1005 | } | 1011 | } |
1006 | EXPORT_SYMBOL_GPL(work_on_cpu); | 1012 | EXPORT_SYMBOL_GPL(work_on_cpu); |
@@ -1016,8 +1022,4 @@ void __init init_workqueues(void) | |||
1016 | hotcpu_notifier(workqueue_cpu_callback, 0); | 1022 | hotcpu_notifier(workqueue_cpu_callback, 0); |
1017 | keventd_wq = create_workqueue("events"); | 1023 | keventd_wq = create_workqueue("events"); |
1018 | BUG_ON(!keventd_wq); | 1024 | BUG_ON(!keventd_wq); |
1019 | #ifdef CONFIG_SMP | ||
1020 | work_on_cpu_wq = create_workqueue("work_on_cpu"); | ||
1021 | BUG_ON(!work_on_cpu_wq); | ||
1022 | #endif | ||
1023 | } | 1025 | } |