aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/audit_tree.c3
-rw-r--r--kernel/auditfilter.c4
-rw-r--r--kernel/fork.c13
-rw-r--r--kernel/futex.c7
-rw-r--r--kernel/irq/handle.c2
-rw-r--r--kernel/irq/manage.c5
-rw-r--r--kernel/irq/numa_migrate.c1
-rw-r--r--kernel/kprobes.c31
-rw-r--r--kernel/kthread.c26
-rw-r--r--kernel/lockdep.c22
-rw-r--r--kernel/module.c3
-rw-r--r--kernel/mutex.c3
-rw-r--r--kernel/panic.c25
-rw-r--r--kernel/posix-cpu-timers.c10
-rw-r--r--kernel/power/disk.c45
-rw-r--r--kernel/power/main.c24
-rw-r--r--kernel/power/swap.c2
-rw-r--r--kernel/power/user.c9
-rw-r--r--kernel/ptrace.c27
-rw-r--r--kernel/rcupdate.c18
-rw-r--r--kernel/rcutree.c19
-rw-r--r--kernel/rcutree_trace.c14
-rw-r--r--kernel/resource.c46
-rw-r--r--kernel/sched.c12
-rw-r--r--kernel/slow-work.c4
-rw-r--r--kernel/softirq.c4
-rw-r--r--kernel/sys.c24
-rw-r--r--kernel/sysctl.c25
-rw-r--r--kernel/time/clocksource.c8
-rw-r--r--kernel/time/jiffies.c2
-rw-r--r--kernel/time/tick-common.c12
-rw-r--r--kernel/time/timekeeping.c12
-rw-r--r--kernel/timer.c7
-rw-r--r--kernel/trace/Kconfig4
-rw-r--r--kernel/trace/blktrace.c10
-rw-r--r--kernel/trace/trace.c37
-rw-r--r--kernel/trace/trace_branch.c8
-rw-r--r--kernel/trace/trace_events.c12
-rw-r--r--kernel/trace/trace_events_filter.c14
-rw-r--r--kernel/trace/trace_events_stage_2.h4
-rw-r--r--kernel/trace/trace_power.c7
-rw-r--r--kernel/trace/trace_syscalls.c2
-rw-r--r--kernel/workqueue.c36
43 files changed, 343 insertions, 260 deletions
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index 917ab9525568..6e7351739a82 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -734,9 +734,6 @@ int audit_tag_tree(char *old, char *new)
734 dentry = dget(path.dentry); 734 dentry = dget(path.dentry);
735 path_put(&path); 735 path_put(&path);
736 736
737 if (dentry == tagged->mnt_root && dentry == mnt->mnt_root)
738 follow_up(&mnt, &dentry);
739
740 list_add_tail(&list, &tagged->mnt_list); 737 list_add_tail(&list, &tagged->mnt_list);
741 738
742 mutex_lock(&audit_filter_mutex); 739 mutex_lock(&audit_filter_mutex);
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index a6fe71fd5d1b..713098ee5a02 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -1028,7 +1028,7 @@ static void audit_update_watch(struct audit_parent *parent,
1028 1028
1029 if (audit_enabled) { 1029 if (audit_enabled) {
1030 struct audit_buffer *ab; 1030 struct audit_buffer *ab;
1031 ab = audit_log_start(NULL, GFP_KERNEL, 1031 ab = audit_log_start(NULL, GFP_NOFS,
1032 AUDIT_CONFIG_CHANGE); 1032 AUDIT_CONFIG_CHANGE);
1033 audit_log_format(ab, "auid=%u ses=%u", 1033 audit_log_format(ab, "auid=%u ses=%u",
1034 audit_get_loginuid(current), 1034 audit_get_loginuid(current),
@@ -1067,7 +1067,7 @@ static void audit_remove_parent_watches(struct audit_parent *parent)
1067 e = container_of(r, struct audit_entry, rule); 1067 e = container_of(r, struct audit_entry, rule);
1068 if (audit_enabled) { 1068 if (audit_enabled) {
1069 struct audit_buffer *ab; 1069 struct audit_buffer *ab;
1070 ab = audit_log_start(NULL, GFP_KERNEL, 1070 ab = audit_log_start(NULL, GFP_NOFS,
1071 AUDIT_CONFIG_CHANGE); 1071 AUDIT_CONFIG_CHANGE);
1072 audit_log_format(ab, "auid=%u ses=%u", 1072 audit_log_format(ab, "auid=%u ses=%u",
1073 audit_get_loginuid(current), 1073 audit_get_loginuid(current),
diff --git a/kernel/fork.c b/kernel/fork.c
index 989c7c202b3d..b9e2edd00726 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -800,6 +800,12 @@ static void posix_cpu_timers_init_group(struct signal_struct *sig)
800 sig->cputime_expires.virt_exp = cputime_zero; 800 sig->cputime_expires.virt_exp = cputime_zero;
801 sig->cputime_expires.sched_exp = 0; 801 sig->cputime_expires.sched_exp = 0;
802 802
803 if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
804 sig->cputime_expires.prof_exp =
805 secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur);
806 sig->cputimer.running = 1;
807 }
808
803 /* The timer lists. */ 809 /* The timer lists. */
804 INIT_LIST_HEAD(&sig->cpu_timers[0]); 810 INIT_LIST_HEAD(&sig->cpu_timers[0]);
805 INIT_LIST_HEAD(&sig->cpu_timers[1]); 811 INIT_LIST_HEAD(&sig->cpu_timers[1]);
@@ -815,11 +821,8 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
815 atomic_inc(&current->signal->live); 821 atomic_inc(&current->signal->live);
816 return 0; 822 return 0;
817 } 823 }
818 sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
819
820 if (sig)
821 posix_cpu_timers_init_group(sig);
822 824
825 sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
823 tsk->signal = sig; 826 tsk->signal = sig;
824 if (!sig) 827 if (!sig)
825 return -ENOMEM; 828 return -ENOMEM;
@@ -859,6 +862,8 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
859 memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim); 862 memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
860 task_unlock(current->group_leader); 863 task_unlock(current->group_leader);
861 864
865 posix_cpu_timers_init_group(sig);
866
862 acct_init_pacct(&sig->pacct); 867 acct_init_pacct(&sig->pacct);
863 868
864 tty_audit_fork(sig); 869 tty_audit_fork(sig);
diff --git a/kernel/futex.c b/kernel/futex.c
index 6b50a024bca2..eef8cd26b5e5 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -883,7 +883,12 @@ retry_private:
883out_unlock: 883out_unlock:
884 double_unlock_hb(hb1, hb2); 884 double_unlock_hb(hb1, hb2);
885 885
886 /* drop_futex_key_refs() must be called outside the spinlocks. */ 886 /*
887 * drop_futex_key_refs() must be called outside the spinlocks. During
888 * the requeue we moved futex_q's from the hash bucket at key1 to the
889 * one at key2 and updated their key pointer. We no longer need to
890 * hold the references to key1.
891 */
887 while (--drop_count >= 0) 892 while (--drop_count >= 0)
888 drop_futex_key_refs(&key1); 893 drop_futex_key_refs(&key1);
889 894
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index d82142be8dd2..26e08754744f 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -363,8 +363,6 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
363 irqreturn_t ret, retval = IRQ_NONE; 363 irqreturn_t ret, retval = IRQ_NONE;
364 unsigned int status = 0; 364 unsigned int status = 0;
365 365
366 WARN_ONCE(!in_irq(), "BUG: IRQ handler called from non-hardirq context!");
367
368 if (!(action->flags & IRQF_DISABLED)) 366 if (!(action->flags & IRQF_DISABLED))
369 local_irq_enable_in_hardirq(); 367 local_irq_enable_in_hardirq();
370 368
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 7e2e7dd4cd2f..2734eca59243 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -109,10 +109,9 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
109 spin_lock_irqsave(&desc->lock, flags); 109 spin_lock_irqsave(&desc->lock, flags);
110 110
111#ifdef CONFIG_GENERIC_PENDING_IRQ 111#ifdef CONFIG_GENERIC_PENDING_IRQ
112 if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) { 112 if (desc->status & IRQ_MOVE_PCNTXT)
113 cpumask_copy(desc->affinity, cpumask);
114 desc->chip->set_affinity(irq, cpumask); 113 desc->chip->set_affinity(irq, cpumask);
115 } else { 114 else {
116 desc->status |= IRQ_MOVE_PENDING; 115 desc->status |= IRQ_MOVE_PENDING;
117 cpumask_copy(desc->pending_mask, cpumask); 116 cpumask_copy(desc->pending_mask, cpumask);
118 } 117 }
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c
index 243d6121e50e..44bbdcbaf8d2 100644
--- a/kernel/irq/numa_migrate.c
+++ b/kernel/irq/numa_migrate.c
@@ -54,6 +54,7 @@ static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc,
54static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc) 54static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc)
55{ 55{
56 free_kstat_irqs(old_desc, desc); 56 free_kstat_irqs(old_desc, desc);
57 free_desc_masks(old_desc, desc);
57 arch_free_chip_data(old_desc, desc); 58 arch_free_chip_data(old_desc, desc);
58} 59}
59 60
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index a5e74ddee0e2..c0fa54b276d9 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -319,6 +319,22 @@ struct kprobe __kprobes *get_kprobe(void *addr)
319 return NULL; 319 return NULL;
320} 320}
321 321
322/* Arm a kprobe with text_mutex */
323static void __kprobes arm_kprobe(struct kprobe *kp)
324{
325 mutex_lock(&text_mutex);
326 arch_arm_kprobe(kp);
327 mutex_unlock(&text_mutex);
328}
329
330/* Disarm a kprobe with text_mutex */
331static void __kprobes disarm_kprobe(struct kprobe *kp)
332{
333 mutex_lock(&text_mutex);
334 arch_disarm_kprobe(kp);
335 mutex_unlock(&text_mutex);
336}
337
322/* 338/*
323 * Aggregate handlers for multiple kprobes support - these handlers 339 * Aggregate handlers for multiple kprobes support - these handlers
324 * take care of invoking the individual kprobe handlers on p->list 340 * take care of invoking the individual kprobe handlers on p->list
@@ -538,7 +554,7 @@ static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
538 ap->flags &= ~KPROBE_FLAG_DISABLED; 554 ap->flags &= ~KPROBE_FLAG_DISABLED;
539 if (!kprobes_all_disarmed) 555 if (!kprobes_all_disarmed)
540 /* Arm the breakpoint again. */ 556 /* Arm the breakpoint again. */
541 arch_arm_kprobe(ap); 557 arm_kprobe(ap);
542 } 558 }
543 return 0; 559 return 0;
544} 560}
@@ -789,11 +805,8 @@ static int __kprobes __unregister_kprobe_top(struct kprobe *p)
789 * enabled and not gone - otherwise, the breakpoint would 805 * enabled and not gone - otherwise, the breakpoint would
790 * already have been removed. We save on flushing icache. 806 * already have been removed. We save on flushing icache.
791 */ 807 */
792 if (!kprobes_all_disarmed && !kprobe_disabled(old_p)) { 808 if (!kprobes_all_disarmed && !kprobe_disabled(old_p))
793 mutex_lock(&text_mutex); 809 disarm_kprobe(p);
794 arch_disarm_kprobe(p);
795 mutex_unlock(&text_mutex);
796 }
797 hlist_del_rcu(&old_p->hlist); 810 hlist_del_rcu(&old_p->hlist);
798 } else { 811 } else {
799 if (p->break_handler && !kprobe_gone(p)) 812 if (p->break_handler && !kprobe_gone(p))
@@ -810,7 +823,7 @@ noclean:
810 if (!kprobe_disabled(old_p)) { 823 if (!kprobe_disabled(old_p)) {
811 try_to_disable_aggr_kprobe(old_p); 824 try_to_disable_aggr_kprobe(old_p);
812 if (!kprobes_all_disarmed && kprobe_disabled(old_p)) 825 if (!kprobes_all_disarmed && kprobe_disabled(old_p))
813 arch_disarm_kprobe(old_p); 826 disarm_kprobe(old_p);
814 } 827 }
815 } 828 }
816 return 0; 829 return 0;
@@ -1364,7 +1377,7 @@ int __kprobes disable_kprobe(struct kprobe *kp)
1364 try_to_disable_aggr_kprobe(p); 1377 try_to_disable_aggr_kprobe(p);
1365 1378
1366 if (!kprobes_all_disarmed && kprobe_disabled(p)) 1379 if (!kprobes_all_disarmed && kprobe_disabled(p))
1367 arch_disarm_kprobe(p); 1380 disarm_kprobe(p);
1368out: 1381out:
1369 mutex_unlock(&kprobe_mutex); 1382 mutex_unlock(&kprobe_mutex);
1370 return ret; 1383 return ret;
@@ -1393,7 +1406,7 @@ int __kprobes enable_kprobe(struct kprobe *kp)
1393 } 1406 }
1394 1407
1395 if (!kprobes_all_disarmed && kprobe_disabled(p)) 1408 if (!kprobes_all_disarmed && kprobe_disabled(p))
1396 arch_arm_kprobe(p); 1409 arm_kprobe(p);
1397 1410
1398 p->flags &= ~KPROBE_FLAG_DISABLED; 1411 p->flags &= ~KPROBE_FLAG_DISABLED;
1399 if (p != kp) 1412 if (p != kp)
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 84bbadd4d021..4ebaf8519abf 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -76,6 +76,7 @@ static int kthread(void *_create)
76 76
77 /* OK, tell user we're spawned, wait for stop or wakeup */ 77 /* OK, tell user we're spawned, wait for stop or wakeup */
78 __set_current_state(TASK_UNINTERRUPTIBLE); 78 __set_current_state(TASK_UNINTERRUPTIBLE);
79 create->result = current;
79 complete(&create->started); 80 complete(&create->started);
80 schedule(); 81 schedule();
81 82
@@ -96,22 +97,10 @@ static void create_kthread(struct kthread_create_info *create)
96 97
97 /* We want our own signal handler (we take no signals by default). */ 98 /* We want our own signal handler (we take no signals by default). */
98 pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD); 99 pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
99 if (pid < 0) { 100 if (pid < 0)
100 create->result = ERR_PTR(pid); 101 create->result = ERR_PTR(pid);
101 } else { 102 else
102 struct sched_param param = { .sched_priority = 0 };
103 wait_for_completion(&create->started); 103 wait_for_completion(&create->started);
104 read_lock(&tasklist_lock);
105 create->result = find_task_by_pid_ns(pid, &init_pid_ns);
106 read_unlock(&tasklist_lock);
107 /*
108 * root may have changed our (kthreadd's) priority or CPU mask.
109 * The kernel thread should not inherit these properties.
110 */
111 sched_setscheduler(create->result, SCHED_NORMAL, &param);
112 set_user_nice(create->result, KTHREAD_NICE_LEVEL);
113 set_cpus_allowed_ptr(create->result, cpu_all_mask);
114 }
115 complete(&create->done); 104 complete(&create->done);
116} 105}
117 106
@@ -154,11 +143,20 @@ struct task_struct *kthread_create(int (*threadfn)(void *data),
154 wait_for_completion(&create.done); 143 wait_for_completion(&create.done);
155 144
156 if (!IS_ERR(create.result)) { 145 if (!IS_ERR(create.result)) {
146 struct sched_param param = { .sched_priority = 0 };
157 va_list args; 147 va_list args;
148
158 va_start(args, namefmt); 149 va_start(args, namefmt);
159 vsnprintf(create.result->comm, sizeof(create.result->comm), 150 vsnprintf(create.result->comm, sizeof(create.result->comm),
160 namefmt, args); 151 namefmt, args);
161 va_end(args); 152 va_end(args);
153 /*
154 * root may have changed our (kthreadd's) priority or CPU mask.
155 * The kernel thread should not inherit these properties.
156 */
157 sched_setscheduler_nocheck(create.result, SCHED_NORMAL, &param);
158 set_user_nice(create.result, KTHREAD_NICE_LEVEL);
159 set_cpus_allowed_ptr(create.result, cpu_all_mask);
162 } 160 }
163 return create.result; 161 return create.result;
164} 162}
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index b0f011866969..accb40cdb12a 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -2490,13 +2490,20 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
2490void lockdep_init_map(struct lockdep_map *lock, const char *name, 2490void lockdep_init_map(struct lockdep_map *lock, const char *name,
2491 struct lock_class_key *key, int subclass) 2491 struct lock_class_key *key, int subclass)
2492{ 2492{
2493 if (unlikely(!debug_locks)) 2493 lock->class_cache = NULL;
2494#ifdef CONFIG_LOCK_STAT
2495 lock->cpu = raw_smp_processor_id();
2496#endif
2497
2498 if (DEBUG_LOCKS_WARN_ON(!name)) {
2499 lock->name = "NULL";
2494 return; 2500 return;
2501 }
2502
2503 lock->name = name;
2495 2504
2496 if (DEBUG_LOCKS_WARN_ON(!key)) 2505 if (DEBUG_LOCKS_WARN_ON(!key))
2497 return; 2506 return;
2498 if (DEBUG_LOCKS_WARN_ON(!name))
2499 return;
2500 /* 2507 /*
2501 * Sanity check, the lock-class key must be persistent: 2508 * Sanity check, the lock-class key must be persistent:
2502 */ 2509 */
@@ -2505,12 +2512,11 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
2505 DEBUG_LOCKS_WARN_ON(1); 2512 DEBUG_LOCKS_WARN_ON(1);
2506 return; 2513 return;
2507 } 2514 }
2508 lock->name = name;
2509 lock->key = key; 2515 lock->key = key;
2510 lock->class_cache = NULL; 2516
2511#ifdef CONFIG_LOCK_STAT 2517 if (unlikely(!debug_locks))
2512 lock->cpu = raw_smp_processor_id(); 2518 return;
2513#endif 2519
2514 if (subclass) 2520 if (subclass)
2515 register_lock_class(lock, subclass, 1); 2521 register_lock_class(lock, subclass, 1);
2516} 2522}
diff --git a/kernel/module.c b/kernel/module.c
index 05f014efa32c..e797812a4d95 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2388,6 +2388,9 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
2388 blocking_notifier_call_chain(&module_notify_list, 2388 blocking_notifier_call_chain(&module_notify_list,
2389 MODULE_STATE_LIVE, mod); 2389 MODULE_STATE_LIVE, mod);
2390 2390
2391 /* We need to finish all async code before the module init sequence is done */
2392 async_synchronize_full();
2393
2391 mutex_lock(&module_mutex); 2394 mutex_lock(&module_mutex);
2392 /* Drop initial reference. */ 2395 /* Drop initial reference. */
2393 module_put(mod); 2396 module_put(mod);
diff --git a/kernel/mutex.c b/kernel/mutex.c
index e1fb73510409..6ca5fe96e393 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -148,7 +148,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
148 148
149 preempt_disable(); 149 preempt_disable();
150 mutex_acquire(&lock->dep_map, subclass, 0, ip); 150 mutex_acquire(&lock->dep_map, subclass, 0, ip);
151#if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES) 151#if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES) && \
152 !defined(CONFIG_HAVE_DEFAULT_NO_SPIN_MUTEXES)
152 /* 153 /*
153 * Optimistic spinning. 154 * Optimistic spinning.
154 * 155 *
diff --git a/kernel/panic.c b/kernel/panic.c
index 3fd8c5bf8b39..874ecf1307ae 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -213,8 +213,16 @@ unsigned long get_taint(void)
213 213
214void add_taint(unsigned flag) 214void add_taint(unsigned flag)
215{ 215{
216 /* can't trust the integrity of the kernel anymore: */ 216 /*
217 debug_locks = 0; 217 * Can't trust the integrity of the kernel anymore.
218 * We don't call directly debug_locks_off() because the issue
219 * is not necessarily serious enough to set oops_in_progress to 1
220 * Also we want to keep up lockdep for staging development and
221 * post-warning case.
222 */
223 if (flag != TAINT_CRAP && flag != TAINT_WARN && __debug_locks_off())
224 printk(KERN_WARNING "Disabling lock debugging due to kernel taint\n");
225
218 set_bit(flag, &tainted_mask); 226 set_bit(flag, &tainted_mask);
219} 227}
220EXPORT_SYMBOL(add_taint); 228EXPORT_SYMBOL(add_taint);
@@ -332,7 +340,7 @@ void oops_exit(void)
332} 340}
333 341
334#ifdef WANT_WARN_ON_SLOWPATH 342#ifdef WANT_WARN_ON_SLOWPATH
335void warn_slowpath(const char *file, int line, const char *fmt, ...) 343void warn_slowpath_fmt(const char *file, int line, const char *fmt, ...)
336{ 344{
337 va_list args; 345 va_list args;
338 char function[KSYM_SYMBOL_LEN]; 346 char function[KSYM_SYMBOL_LEN];
@@ -348,7 +356,7 @@ void warn_slowpath(const char *file, int line, const char *fmt, ...)
348 if (board) 356 if (board)
349 printk(KERN_WARNING "Hardware name: %s\n", board); 357 printk(KERN_WARNING "Hardware name: %s\n", board);
350 358
351 if (fmt) { 359 if (*fmt) {
352 va_start(args, fmt); 360 va_start(args, fmt);
353 vprintk(fmt, args); 361 vprintk(fmt, args);
354 va_end(args); 362 va_end(args);
@@ -359,7 +367,14 @@ void warn_slowpath(const char *file, int line, const char *fmt, ...)
359 print_oops_end_marker(); 367 print_oops_end_marker();
360 add_taint(TAINT_WARN); 368 add_taint(TAINT_WARN);
361} 369}
362EXPORT_SYMBOL(warn_slowpath); 370EXPORT_SYMBOL(warn_slowpath_fmt);
371
372void warn_slowpath_null(const char *file, int line)
373{
374 static const char *empty = "";
375 warn_slowpath_fmt(file, line, empty);
376}
377EXPORT_SYMBOL(warn_slowpath_null);
363#endif 378#endif
364 379
365#ifdef CONFIG_CC_STACKPROTECTOR 380#ifdef CONFIG_CC_STACKPROTECTOR
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 6f7b869c011d..bece7c0b67b2 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -18,7 +18,7 @@ void update_rlimit_cpu(unsigned long rlim_new)
18 18
19 cputime = secs_to_cputime(rlim_new); 19 cputime = secs_to_cputime(rlim_new);
20 if (cputime_eq(current->signal->it_prof_expires, cputime_zero) || 20 if (cputime_eq(current->signal->it_prof_expires, cputime_zero) ||
21 cputime_lt(current->signal->it_prof_expires, cputime)) { 21 cputime_gt(current->signal->it_prof_expires, cputime)) {
22 spin_lock_irq(&current->sighand->siglock); 22 spin_lock_irq(&current->sighand->siglock);
23 set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL); 23 set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL);
24 spin_unlock_irq(&current->sighand->siglock); 24 spin_unlock_irq(&current->sighand->siglock);
@@ -1420,19 +1420,19 @@ void run_posix_cpu_timers(struct task_struct *tsk)
1420 * timer call will interfere. 1420 * timer call will interfere.
1421 */ 1421 */
1422 list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) { 1422 list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) {
1423 int firing; 1423 int cpu_firing;
1424
1424 spin_lock(&timer->it_lock); 1425 spin_lock(&timer->it_lock);
1425 list_del_init(&timer->it.cpu.entry); 1426 list_del_init(&timer->it.cpu.entry);
1426 firing = timer->it.cpu.firing; 1427 cpu_firing = timer->it.cpu.firing;
1427 timer->it.cpu.firing = 0; 1428 timer->it.cpu.firing = 0;
1428 /* 1429 /*
1429 * The firing flag is -1 if we collided with a reset 1430 * The firing flag is -1 if we collided with a reset
1430 * of the timer, which already reported this 1431 * of the timer, which already reported this
1431 * almost-firing as an overrun. So don't generate an event. 1432 * almost-firing as an overrun. So don't generate an event.
1432 */ 1433 */
1433 if (likely(firing >= 0)) { 1434 if (likely(cpu_firing >= 0))
1434 cpu_timer_fire(timer); 1435 cpu_timer_fire(timer);
1435 }
1436 spin_unlock(&timer->it_lock); 1436 spin_unlock(&timer->it_lock);
1437 } 1437 }
1438} 1438}
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index 5f21ab2bbcdf..e71ca9cd81b2 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -22,6 +22,7 @@
22#include <linux/console.h> 22#include <linux/console.h>
23#include <linux/cpu.h> 23#include <linux/cpu.h>
24#include <linux/freezer.h> 24#include <linux/freezer.h>
25#include <scsi/scsi_scan.h>
25#include <asm/suspend.h> 26#include <asm/suspend.h>
26 27
27#include "power.h" 28#include "power.h"
@@ -655,32 +656,42 @@ static int software_resume(void)
655 * here to avoid lockdep complaining. 656 * here to avoid lockdep complaining.
656 */ 657 */
657 mutex_lock_nested(&pm_mutex, SINGLE_DEPTH_NESTING); 658 mutex_lock_nested(&pm_mutex, SINGLE_DEPTH_NESTING);
659
660 if (swsusp_resume_device)
661 goto Check_image;
662
663 if (!strlen(resume_file)) {
664 error = -ENOENT;
665 goto Unlock;
666 }
667
668 pr_debug("PM: Checking image partition %s\n", resume_file);
669
670 /* Check if the device is there */
671 swsusp_resume_device = name_to_dev_t(resume_file);
658 if (!swsusp_resume_device) { 672 if (!swsusp_resume_device) {
659 if (!strlen(resume_file)) {
660 mutex_unlock(&pm_mutex);
661 return -ENOENT;
662 }
663 /* 673 /*
664 * Some device discovery might still be in progress; we need 674 * Some device discovery might still be in progress; we need
665 * to wait for this to finish. 675 * to wait for this to finish.
666 */ 676 */
667 wait_for_device_probe(); 677 wait_for_device_probe();
678 /*
679 * We can't depend on SCSI devices being available after loading
680 * one of their modules until scsi_complete_async_scans() is
681 * called and the resume device usually is a SCSI one.
682 */
683 scsi_complete_async_scans();
684
668 swsusp_resume_device = name_to_dev_t(resume_file); 685 swsusp_resume_device = name_to_dev_t(resume_file);
669 pr_debug("PM: Resume from partition %s\n", resume_file); 686 if (!swsusp_resume_device) {
670 } else { 687 error = -ENODEV;
671 pr_debug("PM: Resume from partition %d:%d\n", 688 goto Unlock;
672 MAJOR(swsusp_resume_device), 689 }
673 MINOR(swsusp_resume_device));
674 } 690 }
675 691
676 if (noresume) { 692 Check_image:
677 /** 693 pr_debug("PM: Resume from partition %d:%d\n",
678 * FIXME: If noresume is specified, we need to find the 694 MAJOR(swsusp_resume_device), MINOR(swsusp_resume_device));
679 * partition and reset it back to normal swap space.
680 */
681 mutex_unlock(&pm_mutex);
682 return 0;
683 }
684 695
685 pr_debug("PM: Checking hibernation image.\n"); 696 pr_debug("PM: Checking hibernation image.\n");
686 error = swsusp_check(); 697 error = swsusp_check();
diff --git a/kernel/power/main.c b/kernel/power/main.c
index f172f41858bb..f99ed6a75eac 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -291,20 +291,26 @@ static int suspend_enter(suspend_state_t state)
291 291
292 device_pm_lock(); 292 device_pm_lock();
293 293
294 if (suspend_ops->prepare) {
295 error = suspend_ops->prepare();
296 if (error)
297 goto Done;
298 }
299
294 error = device_power_down(PMSG_SUSPEND); 300 error = device_power_down(PMSG_SUSPEND);
295 if (error) { 301 if (error) {
296 printk(KERN_ERR "PM: Some devices failed to power down\n"); 302 printk(KERN_ERR "PM: Some devices failed to power down\n");
297 goto Done; 303 goto Platfrom_finish;
298 } 304 }
299 305
300 if (suspend_ops->prepare) { 306 if (suspend_ops->prepare_late) {
301 error = suspend_ops->prepare(); 307 error = suspend_ops->prepare_late();
302 if (error) 308 if (error)
303 goto Power_up_devices; 309 goto Power_up_devices;
304 } 310 }
305 311
306 if (suspend_test(TEST_PLATFORM)) 312 if (suspend_test(TEST_PLATFORM))
307 goto Platfrom_finish; 313 goto Platform_wake;
308 314
309 error = disable_nonboot_cpus(); 315 error = disable_nonboot_cpus();
310 if (error || suspend_test(TEST_CPUS)) 316 if (error || suspend_test(TEST_CPUS))
@@ -326,13 +332,17 @@ static int suspend_enter(suspend_state_t state)
326 Enable_cpus: 332 Enable_cpus:
327 enable_nonboot_cpus(); 333 enable_nonboot_cpus();
328 334
329 Platfrom_finish: 335 Platform_wake:
330 if (suspend_ops->finish) 336 if (suspend_ops->wake)
331 suspend_ops->finish(); 337 suspend_ops->wake();
332 338
333 Power_up_devices: 339 Power_up_devices:
334 device_power_up(PMSG_RESUME); 340 device_power_up(PMSG_RESUME);
335 341
342 Platfrom_finish:
343 if (suspend_ops->finish)
344 suspend_ops->finish();
345
336 Done: 346 Done:
337 device_pm_unlock(); 347 device_pm_unlock();
338 348
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 505f319e489c..8ba052c86d48 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -64,8 +64,6 @@ static int submit(int rw, pgoff_t page_off, struct page *page,
64 struct bio *bio; 64 struct bio *bio;
65 65
66 bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1); 66 bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1);
67 if (!bio)
68 return -ENOMEM;
69 bio->bi_sector = page_off * (PAGE_SIZE >> 9); 67 bio->bi_sector = page_off * (PAGE_SIZE >> 9);
70 bio->bi_bdev = resume_bdev; 68 bio->bi_bdev = resume_bdev;
71 bio->bi_end_io = end_swap_bio_read; 69 bio->bi_end_io = end_swap_bio_read;
diff --git a/kernel/power/user.c b/kernel/power/user.c
index 6c85359364f2..ed97375daae9 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -24,6 +24,7 @@
24#include <linux/cpu.h> 24#include <linux/cpu.h>
25#include <linux/freezer.h> 25#include <linux/freezer.h>
26#include <linux/smp_lock.h> 26#include <linux/smp_lock.h>
27#include <scsi/scsi_scan.h>
27 28
28#include <asm/uaccess.h> 29#include <asm/uaccess.h>
29 30
@@ -92,6 +93,7 @@ static int snapshot_open(struct inode *inode, struct file *filp)
92 filp->private_data = data; 93 filp->private_data = data;
93 memset(&data->handle, 0, sizeof(struct snapshot_handle)); 94 memset(&data->handle, 0, sizeof(struct snapshot_handle));
94 if ((filp->f_flags & O_ACCMODE) == O_RDONLY) { 95 if ((filp->f_flags & O_ACCMODE) == O_RDONLY) {
96 /* Hibernating. The image device should be accessible. */
95 data->swap = swsusp_resume_device ? 97 data->swap = swsusp_resume_device ?
96 swap_type_of(swsusp_resume_device, 0, NULL) : -1; 98 swap_type_of(swsusp_resume_device, 0, NULL) : -1;
97 data->mode = O_RDONLY; 99 data->mode = O_RDONLY;
@@ -99,6 +101,13 @@ static int snapshot_open(struct inode *inode, struct file *filp)
99 if (error) 101 if (error)
100 pm_notifier_call_chain(PM_POST_HIBERNATION); 102 pm_notifier_call_chain(PM_POST_HIBERNATION);
101 } else { 103 } else {
104 /*
105 * Resuming. We may need to wait for the image device to
106 * appear.
107 */
108 wait_for_device_probe();
109 scsi_complete_async_scans();
110
102 data->swap = -1; 111 data->swap = -1;
103 data->mode = O_WRONLY; 112 data->mode = O_WRONLY;
104 error = pm_notifier_call_chain(PM_RESTORE_PREPARE); 113 error = pm_notifier_call_chain(PM_RESTORE_PREPARE);
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index aaad0ec34194..0692ab5a0d67 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -21,9 +21,7 @@
21#include <linux/audit.h> 21#include <linux/audit.h>
22#include <linux/pid_namespace.h> 22#include <linux/pid_namespace.h>
23#include <linux/syscalls.h> 23#include <linux/syscalls.h>
24 24#include <linux/uaccess.h>
25#include <asm/pgtable.h>
26#include <asm/uaccess.h>
27 25
28 26
29/* 27/*
@@ -48,7 +46,7 @@ void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
48 list_add(&child->ptrace_entry, &new_parent->ptraced); 46 list_add(&child->ptrace_entry, &new_parent->ptraced);
49 child->parent = new_parent; 47 child->parent = new_parent;
50} 48}
51 49
52/* 50/*
53 * Turn a tracing stop into a normal stop now, since with no tracer there 51 * Turn a tracing stop into a normal stop now, since with no tracer there
54 * would be no way to wake it up with SIGCONT or SIGKILL. If there was a 52 * would be no way to wake it up with SIGCONT or SIGKILL. If there was a
@@ -173,7 +171,7 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
173 task_lock(task); 171 task_lock(task);
174 err = __ptrace_may_access(task, mode); 172 err = __ptrace_may_access(task, mode);
175 task_unlock(task); 173 task_unlock(task);
176 return (!err ? true : false); 174 return !err;
177} 175}
178 176
179int ptrace_attach(struct task_struct *task) 177int ptrace_attach(struct task_struct *task)
@@ -190,7 +188,7 @@ int ptrace_attach(struct task_struct *task)
190 /* Protect exec's credential calculations against our interference; 188 /* Protect exec's credential calculations against our interference;
191 * SUID, SGID and LSM creds get determined differently under ptrace. 189 * SUID, SGID and LSM creds get determined differently under ptrace.
192 */ 190 */
193 retval = mutex_lock_interruptible(&current->cred_exec_mutex); 191 retval = mutex_lock_interruptible(&task->cred_exec_mutex);
194 if (retval < 0) 192 if (retval < 0)
195 goto out; 193 goto out;
196 194
@@ -234,7 +232,7 @@ repeat:
234bad: 232bad:
235 write_unlock_irqrestore(&tasklist_lock, flags); 233 write_unlock_irqrestore(&tasklist_lock, flags);
236 task_unlock(task); 234 task_unlock(task);
237 mutex_unlock(&current->cred_exec_mutex); 235 mutex_unlock(&task->cred_exec_mutex);
238out: 236out:
239 return retval; 237 return retval;
240} 238}
@@ -358,7 +356,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
358 copied += retval; 356 copied += retval;
359 src += retval; 357 src += retval;
360 dst += retval; 358 dst += retval;
361 len -= retval; 359 len -= retval;
362 } 360 }
363 return copied; 361 return copied;
364} 362}
@@ -383,7 +381,7 @@ int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long ds
383 copied += retval; 381 copied += retval;
384 src += retval; 382 src += retval;
385 dst += retval; 383 dst += retval;
386 len -= retval; 384 len -= retval;
387 } 385 }
388 return copied; 386 return copied;
389} 387}
@@ -496,9 +494,9 @@ static int ptrace_resume(struct task_struct *child, long request, long data)
496 if (unlikely(!arch_has_single_step())) 494 if (unlikely(!arch_has_single_step()))
497 return -EIO; 495 return -EIO;
498 user_enable_single_step(child); 496 user_enable_single_step(child);
499 } 497 } else {
500 else
501 user_disable_single_step(child); 498 user_disable_single_step(child);
499 }
502 500
503 child->exit_code = data; 501 child->exit_code = data;
504 wake_up_process(child); 502 wake_up_process(child);
@@ -606,10 +604,11 @@ repeat:
606 ret = security_ptrace_traceme(current->parent); 604 ret = security_ptrace_traceme(current->parent);
607 605
608 /* 606 /*
609 * Set the ptrace bit in the process ptrace flags. 607 * Check PF_EXITING to ensure ->real_parent has not passed
610 * Then link us on our parent's ptraced list. 608 * exit_ptrace(). Otherwise we don't report the error but
609 * pretend ->real_parent untraces us right after return.
611 */ 610 */
612 if (!ret) { 611 if (!ret && !(current->real_parent->flags & PF_EXITING)) {
613 current->ptrace |= PT_PTRACED; 612 current->ptrace |= PT_PTRACED;
614 __ptrace_link(current, current->real_parent); 613 __ptrace_link(current, current->real_parent);
615 } 614 }
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 2c7b8457d0d2..a967c9feb90a 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -58,6 +58,10 @@ static DEFINE_MUTEX(rcu_barrier_mutex);
58static struct completion rcu_barrier_completion; 58static struct completion rcu_barrier_completion;
59int rcu_scheduler_active __read_mostly; 59int rcu_scheduler_active __read_mostly;
60 60
61static atomic_t rcu_migrate_type_count = ATOMIC_INIT(0);
62static struct rcu_head rcu_migrate_head[3];
63static DECLARE_WAIT_QUEUE_HEAD(rcu_migrate_wq);
64
61/* 65/*
62 * Awaken the corresponding synchronize_rcu() instance now that a 66 * Awaken the corresponding synchronize_rcu() instance now that a
63 * grace period has elapsed. 67 * grace period has elapsed.
@@ -122,7 +126,10 @@ static void rcu_barrier_func(void *type)
122 } 126 }
123} 127}
124 128
125static inline void wait_migrated_callbacks(void); 129static inline void wait_migrated_callbacks(void)
130{
131 wait_event(rcu_migrate_wq, !atomic_read(&rcu_migrate_type_count));
132}
126 133
127/* 134/*
128 * Orchestrate the specified type of RCU barrier, waiting for all 135 * Orchestrate the specified type of RCU barrier, waiting for all
@@ -179,21 +186,12 @@ void rcu_barrier_sched(void)
179} 186}
180EXPORT_SYMBOL_GPL(rcu_barrier_sched); 187EXPORT_SYMBOL_GPL(rcu_barrier_sched);
181 188
182static atomic_t rcu_migrate_type_count = ATOMIC_INIT(0);
183static struct rcu_head rcu_migrate_head[3];
184static DECLARE_WAIT_QUEUE_HEAD(rcu_migrate_wq);
185
186static void rcu_migrate_callback(struct rcu_head *notused) 189static void rcu_migrate_callback(struct rcu_head *notused)
187{ 190{
188 if (atomic_dec_and_test(&rcu_migrate_type_count)) 191 if (atomic_dec_and_test(&rcu_migrate_type_count))
189 wake_up(&rcu_migrate_wq); 192 wake_up(&rcu_migrate_wq);
190} 193}
191 194
192static inline void wait_migrated_callbacks(void)
193{
194 wait_event(rcu_migrate_wq, !atomic_read(&rcu_migrate_type_count));
195}
196
197static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self, 195static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self,
198 unsigned long action, void *hcpu) 196 unsigned long action, void *hcpu)
199{ 197{
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 7f3266922572..d2a372fb0b9b 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -530,8 +530,6 @@ static void note_new_gpnum(struct rcu_state *rsp, struct rcu_data *rdp)
530 rdp->qs_pending = 1; 530 rdp->qs_pending = 1;
531 rdp->passed_quiesc = 0; 531 rdp->passed_quiesc = 0;
532 rdp->gpnum = rsp->gpnum; 532 rdp->gpnum = rsp->gpnum;
533 rdp->n_rcu_pending_force_qs = rdp->n_rcu_pending +
534 RCU_JIFFIES_TILL_FORCE_QS;
535} 533}
536 534
537/* 535/*
@@ -578,8 +576,6 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
578 rsp->gpnum++; 576 rsp->gpnum++;
579 rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */ 577 rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */
580 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; 578 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
581 rdp->n_rcu_pending_force_qs = rdp->n_rcu_pending +
582 RCU_JIFFIES_TILL_FORCE_QS;
583 record_gp_stall_check_time(rsp); 579 record_gp_stall_check_time(rsp);
584 dyntick_record_completed(rsp, rsp->completed - 1); 580 dyntick_record_completed(rsp, rsp->completed - 1);
585 note_new_gpnum(rsp, rdp); 581 note_new_gpnum(rsp, rdp);
@@ -1055,7 +1051,6 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
1055{ 1051{
1056 unsigned long flags; 1052 unsigned long flags;
1057 long lastcomp; 1053 long lastcomp;
1058 struct rcu_data *rdp = rsp->rda[smp_processor_id()];
1059 struct rcu_node *rnp = rcu_get_root(rsp); 1054 struct rcu_node *rnp = rcu_get_root(rsp);
1060 u8 signaled; 1055 u8 signaled;
1061 1056
@@ -1066,16 +1061,13 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
1066 return; /* Someone else is already on the job. */ 1061 return; /* Someone else is already on the job. */
1067 } 1062 }
1068 if (relaxed && 1063 if (relaxed &&
1069 (long)(rsp->jiffies_force_qs - jiffies) >= 0 && 1064 (long)(rsp->jiffies_force_qs - jiffies) >= 0)
1070 (rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending) >= 0)
1071 goto unlock_ret; /* no emergency and done recently. */ 1065 goto unlock_ret; /* no emergency and done recently. */
1072 rsp->n_force_qs++; 1066 rsp->n_force_qs++;
1073 spin_lock(&rnp->lock); 1067 spin_lock(&rnp->lock);
1074 lastcomp = rsp->completed; 1068 lastcomp = rsp->completed;
1075 signaled = rsp->signaled; 1069 signaled = rsp->signaled;
1076 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; 1070 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
1077 rdp->n_rcu_pending_force_qs = rdp->n_rcu_pending +
1078 RCU_JIFFIES_TILL_FORCE_QS;
1079 if (lastcomp == rsp->gpnum) { 1071 if (lastcomp == rsp->gpnum) {
1080 rsp->n_force_qs_ngp++; 1072 rsp->n_force_qs_ngp++;
1081 spin_unlock(&rnp->lock); 1073 spin_unlock(&rnp->lock);
@@ -1144,8 +1136,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
1144 * If an RCU GP has gone long enough, go check for dyntick 1136 * If an RCU GP has gone long enough, go check for dyntick
1145 * idle CPUs and, if needed, send resched IPIs. 1137 * idle CPUs and, if needed, send resched IPIs.
1146 */ 1138 */
1147 if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0 || 1139 if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0)
1148 (rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending) < 0)
1149 force_quiescent_state(rsp, 1); 1140 force_quiescent_state(rsp, 1);
1150 1141
1151 /* 1142 /*
@@ -1230,8 +1221,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
1230 if (unlikely(++rdp->qlen > qhimark)) { 1221 if (unlikely(++rdp->qlen > qhimark)) {
1231 rdp->blimit = LONG_MAX; 1222 rdp->blimit = LONG_MAX;
1232 force_quiescent_state(rsp, 0); 1223 force_quiescent_state(rsp, 0);
1233 } else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0 || 1224 } else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0)
1234 (rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending) < 0)
1235 force_quiescent_state(rsp, 1); 1225 force_quiescent_state(rsp, 1);
1236 local_irq_restore(flags); 1226 local_irq_restore(flags);
1237} 1227}
@@ -1290,8 +1280,7 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
1290 1280
1291 /* Has an RCU GP gone long enough to send resched IPIs &c? */ 1281 /* Has an RCU GP gone long enough to send resched IPIs &c? */
1292 if (ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum) && 1282 if (ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum) &&
1293 ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0 || 1283 ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0))
1294 (rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending) < 0))
1295 return 1; 1284 return 1;
1296 1285
1297 /* nothing to do */ 1286 /* nothing to do */
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
index 4ee954f6a8d5..4b1875ba9404 100644
--- a/kernel/rcutree_trace.c
+++ b/kernel/rcutree_trace.c
@@ -49,14 +49,12 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
49{ 49{
50 if (!rdp->beenonline) 50 if (!rdp->beenonline)
51 return; 51 return;
52 seq_printf(m, "%3d%cc=%ld g=%ld pq=%d pqc=%ld qp=%d rpfq=%ld rp=%x", 52 seq_printf(m, "%3d%cc=%ld g=%ld pq=%d pqc=%ld qp=%d",
53 rdp->cpu, 53 rdp->cpu,
54 cpu_is_offline(rdp->cpu) ? '!' : ' ', 54 cpu_is_offline(rdp->cpu) ? '!' : ' ',
55 rdp->completed, rdp->gpnum, 55 rdp->completed, rdp->gpnum,
56 rdp->passed_quiesc, rdp->passed_quiesc_completed, 56 rdp->passed_quiesc, rdp->passed_quiesc_completed,
57 rdp->qs_pending, 57 rdp->qs_pending);
58 rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending,
59 (int)(rdp->n_rcu_pending & 0xffff));
60#ifdef CONFIG_NO_HZ 58#ifdef CONFIG_NO_HZ
61 seq_printf(m, " dt=%d/%d dn=%d df=%lu", 59 seq_printf(m, " dt=%d/%d dn=%d df=%lu",
62 rdp->dynticks->dynticks, 60 rdp->dynticks->dynticks,
@@ -102,14 +100,12 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
102{ 100{
103 if (!rdp->beenonline) 101 if (!rdp->beenonline)
104 return; 102 return;
105 seq_printf(m, "%d,%s,%ld,%ld,%d,%ld,%d,%ld,%ld", 103 seq_printf(m, "%d,%s,%ld,%ld,%d,%ld,%d",
106 rdp->cpu, 104 rdp->cpu,
107 cpu_is_offline(rdp->cpu) ? "\"Y\"" : "\"N\"", 105 cpu_is_offline(rdp->cpu) ? "\"Y\"" : "\"N\"",
108 rdp->completed, rdp->gpnum, 106 rdp->completed, rdp->gpnum,
109 rdp->passed_quiesc, rdp->passed_quiesc_completed, 107 rdp->passed_quiesc, rdp->passed_quiesc_completed,
110 rdp->qs_pending, 108 rdp->qs_pending);
111 rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending,
112 rdp->n_rcu_pending);
113#ifdef CONFIG_NO_HZ 109#ifdef CONFIG_NO_HZ
114 seq_printf(m, ",%d,%d,%d,%lu", 110 seq_printf(m, ",%d,%d,%d,%lu",
115 rdp->dynticks->dynticks, 111 rdp->dynticks->dynticks,
@@ -123,7 +119,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
123 119
124static int show_rcudata_csv(struct seq_file *m, void *unused) 120static int show_rcudata_csv(struct seq_file *m, void *unused)
125{ 121{
126 seq_puts(m, "\"CPU\",\"Online?\",\"c\",\"g\",\"pq\",\"pqc\",\"pq\",\"rpfq\",\"rp\","); 122 seq_puts(m, "\"CPU\",\"Online?\",\"c\",\"g\",\"pq\",\"pqc\",\"pq\",");
127#ifdef CONFIG_NO_HZ 123#ifdef CONFIG_NO_HZ
128 seq_puts(m, "\"dt\",\"dt nesting\",\"dn\",\"df\","); 124 seq_puts(m, "\"dt\",\"dt nesting\",\"dn\",\"df\",");
129#endif /* #ifdef CONFIG_NO_HZ */ 125#endif /* #ifdef CONFIG_NO_HZ */
diff --git a/kernel/resource.c b/kernel/resource.c
index fd5d7d574bb9..ac5f3a36923f 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -533,43 +533,21 @@ static void __init __reserve_region_with_split(struct resource *root,
533 res->end = end; 533 res->end = end;
534 res->flags = IORESOURCE_BUSY; 534 res->flags = IORESOURCE_BUSY;
535 535
536 for (;;) { 536 conflict = __request_resource(parent, res);
537 conflict = __request_resource(parent, res); 537 if (!conflict)
538 if (!conflict) 538 return;
539 break;
540 if (conflict != parent) {
541 parent = conflict;
542 if (!(conflict->flags & IORESOURCE_BUSY))
543 continue;
544 }
545
546 /* Uhhuh, that didn't work out.. */
547 kfree(res);
548 res = NULL;
549 break;
550 }
551
552 if (!res) {
553 /* failed, split and try again */
554
555 /* conflict covered whole area */
556 if (conflict->start <= start && conflict->end >= end)
557 return;
558 539
559 if (conflict->start > start) 540 /* failed, split and try again */
560 __reserve_region_with_split(root, start, conflict->start-1, name); 541 kfree(res);
561 if (!(conflict->flags & IORESOURCE_BUSY)) {
562 resource_size_t common_start, common_end;
563 542
564 common_start = max(conflict->start, start); 543 /* conflict covered whole area */
565 common_end = min(conflict->end, end); 544 if (conflict->start <= start && conflict->end >= end)
566 if (common_start < common_end) 545 return;
567 __reserve_region_with_split(root, common_start, common_end, name);
568 }
569 if (conflict->end < end)
570 __reserve_region_with_split(root, conflict->end+1, end, name);
571 }
572 546
547 if (conflict->start > start)
548 __reserve_region_with_split(root, start, conflict->start-1, name);
549 if (conflict->end < end)
550 __reserve_region_with_split(root, conflict->end+1, end, name);
573} 551}
574 552
575void __init reserve_region_with_split(struct resource *root, 553void __init reserve_region_with_split(struct resource *root,
diff --git a/kernel/sched.c b/kernel/sched.c
index 5aa63f50c696..8908d190a348 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4856,7 +4856,7 @@ void account_process_tick(struct task_struct *p, int user_tick)
4856 4856
4857 if (user_tick) 4857 if (user_tick)
4858 account_user_time(p, one_jiffy, one_jiffy_scaled); 4858 account_user_time(p, one_jiffy, one_jiffy_scaled);
4859 else if (p != rq->idle) 4859 else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
4860 account_system_time(p, HARDIRQ_OFFSET, one_jiffy, 4860 account_system_time(p, HARDIRQ_OFFSET, one_jiffy,
4861 one_jiffy_scaled); 4861 one_jiffy_scaled);
4862 else 4862 else
@@ -4970,7 +4970,7 @@ void scheduler_tick(void)
4970#endif 4970#endif
4971} 4971}
4972 4972
4973unsigned long get_parent_ip(unsigned long addr) 4973notrace unsigned long get_parent_ip(unsigned long addr)
4974{ 4974{
4975 if (in_lock_functions(addr)) { 4975 if (in_lock_functions(addr)) {
4976 addr = CALLER_ADDR2; 4976 addr = CALLER_ADDR2;
@@ -7488,8 +7488,12 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
7488 cpumask_or(groupmask, groupmask, sched_group_cpus(group)); 7488 cpumask_or(groupmask, groupmask, sched_group_cpus(group));
7489 7489
7490 cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group)); 7490 cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
7491 printk(KERN_CONT " %s (__cpu_power = %d)", str, 7491
7492 group->__cpu_power); 7492 printk(KERN_CONT " %s", str);
7493 if (group->__cpu_power != SCHED_LOAD_SCALE) {
7494 printk(KERN_CONT " (__cpu_power = %d)",
7495 group->__cpu_power);
7496 }
7493 7497
7494 group = group->next; 7498 group = group->next;
7495 } while (group != sd->groups); 7499 } while (group != sd->groups);
diff --git a/kernel/slow-work.c b/kernel/slow-work.c
index cf2bc01186ef..b28d19135f43 100644
--- a/kernel/slow-work.c
+++ b/kernel/slow-work.c
@@ -609,14 +609,14 @@ void slow_work_unregister_user(void)
609 if (slow_work_user_count == 0) { 609 if (slow_work_user_count == 0) {
610 printk(KERN_NOTICE "Slow work thread pool: Shutting down\n"); 610 printk(KERN_NOTICE "Slow work thread pool: Shutting down\n");
611 slow_work_threads_should_exit = true; 611 slow_work_threads_should_exit = true;
612 del_timer_sync(&slow_work_cull_timer);
613 del_timer_sync(&slow_work_oom_timer);
612 wake_up_all(&slow_work_thread_wq); 614 wake_up_all(&slow_work_thread_wq);
613 wait_for_completion(&slow_work_last_thread_exited); 615 wait_for_completion(&slow_work_last_thread_exited);
614 printk(KERN_NOTICE "Slow work thread pool:" 616 printk(KERN_NOTICE "Slow work thread pool:"
615 " Shut down complete\n"); 617 " Shut down complete\n");
616 } 618 }
617 619
618 del_timer_sync(&slow_work_cull_timer);
619
620 mutex_unlock(&slow_work_user_lock); 620 mutex_unlock(&slow_work_user_lock);
621} 621}
622EXPORT_SYMBOL(slow_work_unregister_user); 622EXPORT_SYMBOL(slow_work_unregister_user);
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 2fecefacdc5b..b525dd348511 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -472,9 +472,9 @@ void tasklet_kill(struct tasklet_struct *t)
472 printk("Attempt to kill tasklet from interrupt\n"); 472 printk("Attempt to kill tasklet from interrupt\n");
473 473
474 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { 474 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
475 do 475 do {
476 yield(); 476 yield();
477 while (test_bit(TASKLET_STATE_SCHED, &t->state)); 477 } while (test_bit(TASKLET_STATE_SCHED, &t->state));
478 } 478 }
479 tasklet_unlock_wait(t); 479 tasklet_unlock_wait(t);
480 clear_bit(TASKLET_STATE_SCHED, &t->state); 480 clear_bit(TASKLET_STATE_SCHED, &t->state);
diff --git a/kernel/sys.c b/kernel/sys.c
index 51dbb55604e8..e7998cf31498 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -360,6 +360,7 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
360 void __user *, arg) 360 void __user *, arg)
361{ 361{
362 char buffer[256]; 362 char buffer[256];
363 int ret = 0;
363 364
364 /* We only trust the superuser with rebooting the system. */ 365 /* We only trust the superuser with rebooting the system. */
365 if (!capable(CAP_SYS_BOOT)) 366 if (!capable(CAP_SYS_BOOT))
@@ -397,7 +398,7 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
397 kernel_halt(); 398 kernel_halt();
398 unlock_kernel(); 399 unlock_kernel();
399 do_exit(0); 400 do_exit(0);
400 break; 401 panic("cannot halt");
401 402
402 case LINUX_REBOOT_CMD_POWER_OFF: 403 case LINUX_REBOOT_CMD_POWER_OFF:
403 kernel_power_off(); 404 kernel_power_off();
@@ -417,29 +418,22 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
417 418
418#ifdef CONFIG_KEXEC 419#ifdef CONFIG_KEXEC
419 case LINUX_REBOOT_CMD_KEXEC: 420 case LINUX_REBOOT_CMD_KEXEC:
420 { 421 ret = kernel_kexec();
421 int ret; 422 break;
422 ret = kernel_kexec();
423 unlock_kernel();
424 return ret;
425 }
426#endif 423#endif
427 424
428#ifdef CONFIG_HIBERNATION 425#ifdef CONFIG_HIBERNATION
429 case LINUX_REBOOT_CMD_SW_SUSPEND: 426 case LINUX_REBOOT_CMD_SW_SUSPEND:
430 { 427 ret = hibernate();
431 int ret = hibernate(); 428 break;
432 unlock_kernel();
433 return ret;
434 }
435#endif 429#endif
436 430
437 default: 431 default:
438 unlock_kernel(); 432 ret = -EINVAL;
439 return -EINVAL; 433 break;
440 } 434 }
441 unlock_kernel(); 435 unlock_kernel();
442 return 0; 436 return ret;
443} 437}
444 438
445static void deferred_cad(struct work_struct *dummy) 439static void deferred_cad(struct work_struct *dummy)
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 4286b62b34a0..ea78fa101ad6 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -103,6 +103,9 @@ static unsigned long one_ul = 1;
103static int one_hundred = 100; 103static int one_hundred = 100;
104static int one_thousand = 1000; 104static int one_thousand = 1000;
105 105
106/* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
107static unsigned long dirty_bytes_min = 2 * PAGE_SIZE;
108
106/* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */ 109/* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */
107static int maxolduid = 65535; 110static int maxolduid = 65535;
108static int minolduid; 111static int minolduid;
@@ -902,16 +905,6 @@ static struct ctl_table kern_table[] = {
902 .proc_handler = &proc_dointvec, 905 .proc_handler = &proc_dointvec,
903 }, 906 },
904#endif 907#endif
905#ifdef CONFIG_UNEVICTABLE_LRU
906 {
907 .ctl_name = CTL_UNNUMBERED,
908 .procname = "scan_unevictable_pages",
909 .data = &scan_unevictable_pages,
910 .maxlen = sizeof(scan_unevictable_pages),
911 .mode = 0644,
912 .proc_handler = &scan_unevictable_handler,
913 },
914#endif
915#ifdef CONFIG_SLOW_WORK 908#ifdef CONFIG_SLOW_WORK
916 { 909 {
917 .ctl_name = CTL_UNNUMBERED, 910 .ctl_name = CTL_UNNUMBERED,
@@ -1016,7 +1009,7 @@ static struct ctl_table vm_table[] = {
1016 .mode = 0644, 1009 .mode = 0644,
1017 .proc_handler = &dirty_bytes_handler, 1010 .proc_handler = &dirty_bytes_handler,
1018 .strategy = &sysctl_intvec, 1011 .strategy = &sysctl_intvec,
1019 .extra1 = &one_ul, 1012 .extra1 = &dirty_bytes_min,
1020 }, 1013 },
1021 { 1014 {
1022 .procname = "dirty_writeback_centisecs", 1015 .procname = "dirty_writeback_centisecs",
@@ -1302,6 +1295,16 @@ static struct ctl_table vm_table[] = {
1302 .extra2 = &one, 1295 .extra2 = &one,
1303 }, 1296 },
1304#endif 1297#endif
1298#ifdef CONFIG_UNEVICTABLE_LRU
1299 {
1300 .ctl_name = CTL_UNNUMBERED,
1301 .procname = "scan_unevictable_pages",
1302 .data = &scan_unevictable_pages,
1303 .maxlen = sizeof(scan_unevictable_pages),
1304 .mode = 0644,
1305 .proc_handler = &scan_unevictable_handler,
1306 },
1307#endif
1305/* 1308/*
1306 * NOTE: do not add new entries to this table unless you have read 1309 * NOTE: do not add new entries to this table unless you have read
1307 * Documentation/sysctl/ctl_unnumbered.txt 1310 * Documentation/sysctl/ctl_unnumbered.txt
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index c46c931a7fe7..ecfd7b5187e0 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -181,12 +181,12 @@ static void clocksource_watchdog(unsigned long data)
181 181
182 resumed = test_and_clear_bit(0, &watchdog_resumed); 182 resumed = test_and_clear_bit(0, &watchdog_resumed);
183 183
184 wdnow = watchdog->read(); 184 wdnow = watchdog->read(watchdog);
185 wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask); 185 wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask);
186 watchdog_last = wdnow; 186 watchdog_last = wdnow;
187 187
188 list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) { 188 list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) {
189 csnow = cs->read(); 189 csnow = cs->read(cs);
190 190
191 if (unlikely(resumed)) { 191 if (unlikely(resumed)) {
192 cs->wd_last = csnow; 192 cs->wd_last = csnow;
@@ -247,7 +247,7 @@ static void clocksource_check_watchdog(struct clocksource *cs)
247 247
248 list_add(&cs->wd_list, &watchdog_list); 248 list_add(&cs->wd_list, &watchdog_list);
249 if (!started && watchdog) { 249 if (!started && watchdog) {
250 watchdog_last = watchdog->read(); 250 watchdog_last = watchdog->read(watchdog);
251 watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; 251 watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
252 add_timer_on(&watchdog_timer, 252 add_timer_on(&watchdog_timer,
253 cpumask_first(cpu_online_mask)); 253 cpumask_first(cpu_online_mask));
@@ -268,7 +268,7 @@ static void clocksource_check_watchdog(struct clocksource *cs)
268 cse->flags &= ~CLOCK_SOURCE_WATCHDOG; 268 cse->flags &= ~CLOCK_SOURCE_WATCHDOG;
269 /* Start if list is not empty */ 269 /* Start if list is not empty */
270 if (!list_empty(&watchdog_list)) { 270 if (!list_empty(&watchdog_list)) {
271 watchdog_last = watchdog->read(); 271 watchdog_last = watchdog->read(watchdog);
272 watchdog_timer.expires = 272 watchdog_timer.expires =
273 jiffies + WATCHDOG_INTERVAL; 273 jiffies + WATCHDOG_INTERVAL;
274 add_timer_on(&watchdog_timer, 274 add_timer_on(&watchdog_timer,
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
index 06f197560f3b..c3f6c30816e3 100644
--- a/kernel/time/jiffies.c
+++ b/kernel/time/jiffies.c
@@ -50,7 +50,7 @@
50 */ 50 */
51#define JIFFIES_SHIFT 8 51#define JIFFIES_SHIFT 8
52 52
53static cycle_t jiffies_read(void) 53static cycle_t jiffies_read(struct clocksource *cs)
54{ 54{
55 return (cycle_t) jiffies; 55 return (cycle_t) jiffies;
56} 56}
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 21a5ca849514..83c4417b6a3c 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -93,7 +93,17 @@ void tick_handle_periodic(struct clock_event_device *dev)
93 for (;;) { 93 for (;;) {
94 if (!clockevents_program_event(dev, next, ktime_get())) 94 if (!clockevents_program_event(dev, next, ktime_get()))
95 return; 95 return;
96 tick_periodic(cpu); 96 /*
97 * Have to be careful here. If we're in oneshot mode,
98 * before we call tick_periodic() in a loop, we need
99 * to be sure we're using a real hardware clocksource.
100 * Otherwise we could get trapped in an infinite
101 * loop, as the tick_periodic() increments jiffies,
102 * when then will increment time, posibly causing
103 * the loop to trigger again and again.
104 */
105 if (timekeeping_valid_for_hres())
106 tick_periodic(cpu);
97 next = ktime_add(next, tick_period); 107 next = ktime_add(next, tick_period);
98 } 108 }
99} 109}
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 900f1b6598d1..687dff49f6e7 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -182,7 +182,7 @@ EXPORT_SYMBOL(do_settimeofday);
182 */ 182 */
183static void change_clocksource(void) 183static void change_clocksource(void)
184{ 184{
185 struct clocksource *new; 185 struct clocksource *new, *old;
186 186
187 new = clocksource_get_next(); 187 new = clocksource_get_next();
188 188
@@ -191,11 +191,16 @@ static void change_clocksource(void)
191 191
192 clocksource_forward_now(); 192 clocksource_forward_now();
193 193
194 new->raw_time = clock->raw_time; 194 if (clocksource_enable(new))
195 return;
195 196
197 new->raw_time = clock->raw_time;
198 old = clock;
196 clock = new; 199 clock = new;
200 clocksource_disable(old);
201
197 clock->cycle_last = 0; 202 clock->cycle_last = 0;
198 clock->cycle_last = clocksource_read(new); 203 clock->cycle_last = clocksource_read(clock);
199 clock->error = 0; 204 clock->error = 0;
200 clock->xtime_nsec = 0; 205 clock->xtime_nsec = 0;
201 clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); 206 clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH);
@@ -292,6 +297,7 @@ void __init timekeeping_init(void)
292 ntp_init(); 297 ntp_init();
293 298
294 clock = clocksource_get_next(); 299 clock = clocksource_get_next();
300 clocksource_enable(clock);
295 clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); 301 clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH);
296 clock->cycle_last = clocksource_read(clock); 302 clock->cycle_last = clocksource_read(clock);
297 303
diff --git a/kernel/timer.c b/kernel/timer.c
index b4555568b4e4..cffffad01c31 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -531,10 +531,13 @@ static void __init_timer(struct timer_list *timer,
531} 531}
532 532
533/** 533/**
534 * init_timer - initialize a timer. 534 * init_timer_key - initialize a timer
535 * @timer: the timer to be initialized 535 * @timer: the timer to be initialized
536 * @name: name of the timer
537 * @key: lockdep class key of the fake lock used for tracking timer
538 * sync lock dependencies
536 * 539 *
537 * init_timer() must be done to a timer prior calling *any* of the 540 * init_timer_key() must be done to a timer prior calling *any* of the
538 * other timer functions. 541 * other timer functions.
539 */ 542 */
540void init_timer_key(struct timer_list *timer, 543void init_timer_key(struct timer_list *timer,
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 2246141bda4d..417d1985e299 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -312,7 +312,7 @@ config KMEMTRACE
312 and profile kernel code. 312 and profile kernel code.
313 313
314 This requires an userspace application to use. See 314 This requires an userspace application to use. See
315 Documentation/vm/kmemtrace.txt for more information. 315 Documentation/trace/kmemtrace.txt for more information.
316 316
317 Saying Y will make the kernel somewhat larger and slower. However, 317 Saying Y will make the kernel somewhat larger and slower. However,
318 if you disable kmemtrace at run-time or boot-time, the performance 318 if you disable kmemtrace at run-time or boot-time, the performance
@@ -403,7 +403,7 @@ config MMIOTRACE
403 implementation and works via page faults. Tracing is disabled by 403 implementation and works via page faults. Tracing is disabled by
404 default and can be enabled at run-time. 404 default and can be enabled at run-time.
405 405
406 See Documentation/tracers/mmiotrace.txt. 406 See Documentation/trace/mmiotrace.txt.
407 If you are not helping to develop drivers, say N. 407 If you are not helping to develop drivers, say N.
408 408
409config MMIOTRACE_TEST 409config MMIOTRACE_TEST
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index b32ff446c3fb..921ef5d1f0ba 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -1377,12 +1377,12 @@ static int blk_trace_str2mask(const char *str)
1377{ 1377{
1378 int i; 1378 int i;
1379 int mask = 0; 1379 int mask = 0;
1380 char *s, *token; 1380 char *buf, *s, *token;
1381 1381
1382 s = kstrdup(str, GFP_KERNEL); 1382 buf = kstrdup(str, GFP_KERNEL);
1383 if (s == NULL) 1383 if (buf == NULL)
1384 return -ENOMEM; 1384 return -ENOMEM;
1385 s = strstrip(s); 1385 s = strstrip(buf);
1386 1386
1387 while (1) { 1387 while (1) {
1388 token = strsep(&s, ","); 1388 token = strsep(&s, ",");
@@ -1403,7 +1403,7 @@ static int blk_trace_str2mask(const char *str)
1403 break; 1403 break;
1404 } 1404 }
1405 } 1405 }
1406 kfree(s); 1406 kfree(buf);
1407 1407
1408 return mask; 1408 return mask;
1409} 1409}
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 9d28476a9851..a884c09006c4 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3277,19 +3277,13 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp)
3277 3277
3278 info->tr = &global_trace; 3278 info->tr = &global_trace;
3279 info->cpu = cpu; 3279 info->cpu = cpu;
3280 info->spare = ring_buffer_alloc_read_page(info->tr->buffer); 3280 info->spare = NULL;
3281 /* Force reading ring buffer for first read */ 3281 /* Force reading ring buffer for first read */
3282 info->read = (unsigned int)-1; 3282 info->read = (unsigned int)-1;
3283 if (!info->spare)
3284 goto out;
3285 3283
3286 filp->private_data = info; 3284 filp->private_data = info;
3287 3285
3288 return 0; 3286 return nonseekable_open(inode, filp);
3289
3290 out:
3291 kfree(info);
3292 return -ENOMEM;
3293} 3287}
3294 3288
3295static ssize_t 3289static ssize_t
@@ -3304,6 +3298,11 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
3304 if (!count) 3298 if (!count)
3305 return 0; 3299 return 0;
3306 3300
3301 if (!info->spare)
3302 info->spare = ring_buffer_alloc_read_page(info->tr->buffer);
3303 if (!info->spare)
3304 return -ENOMEM;
3305
3307 /* Do we have previous read data to read? */ 3306 /* Do we have previous read data to read? */
3308 if (info->read < PAGE_SIZE) 3307 if (info->read < PAGE_SIZE)
3309 goto read; 3308 goto read;
@@ -3342,7 +3341,8 @@ static int tracing_buffers_release(struct inode *inode, struct file *file)
3342{ 3341{
3343 struct ftrace_buffer_info *info = file->private_data; 3342 struct ftrace_buffer_info *info = file->private_data;
3344 3343
3345 ring_buffer_free_read_page(info->tr->buffer, info->spare); 3344 if (info->spare)
3345 ring_buffer_free_read_page(info->tr->buffer, info->spare);
3346 kfree(info); 3346 kfree(info);
3347 3347
3348 return 0; 3348 return 0;
@@ -3428,14 +3428,19 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
3428 int size, i; 3428 int size, i;
3429 size_t ret; 3429 size_t ret;
3430 3430
3431 /* 3431 if (*ppos & (PAGE_SIZE - 1)) {
3432 * We can't seek on a buffer input 3432 WARN_ONCE(1, "Ftrace: previous read must page-align\n");
3433 */ 3433 return -EINVAL;
3434 if (unlikely(*ppos)) 3434 }
3435 return -ESPIPE;
3436 3435
3436 if (len & (PAGE_SIZE - 1)) {
3437 WARN_ONCE(1, "Ftrace: splice_read should page-align\n");
3438 if (len < PAGE_SIZE)
3439 return -EINVAL;
3440 len &= PAGE_MASK;
3441 }
3437 3442
3438 for (i = 0; i < PIPE_BUFFERS && len; i++, len -= size) { 3443 for (i = 0; i < PIPE_BUFFERS && len; i++, len -= PAGE_SIZE) {
3439 struct page *page; 3444 struct page *page;
3440 int r; 3445 int r;
3441 3446
@@ -3443,6 +3448,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
3443 if (!ref) 3448 if (!ref)
3444 break; 3449 break;
3445 3450
3451 ref->ref = 1;
3446 ref->buffer = info->tr->buffer; 3452 ref->buffer = info->tr->buffer;
3447 ref->page = ring_buffer_alloc_read_page(ref->buffer); 3453 ref->page = ring_buffer_alloc_read_page(ref->buffer);
3448 if (!ref->page) { 3454 if (!ref->page) {
@@ -3474,6 +3480,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
3474 spd.partial[i].offset = 0; 3480 spd.partial[i].offset = 0;
3475 spd.partial[i].private = (unsigned long)ref; 3481 spd.partial[i].private = (unsigned long)ref;
3476 spd.nr_pages++; 3482 spd.nr_pages++;
3483 *ppos += PAGE_SIZE;
3477 } 3484 }
3478 3485
3479 spd.nr_pages = i; 3486 spd.nr_pages = i;
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
index ad8c22efff41..8333715e4066 100644
--- a/kernel/trace/trace_branch.c
+++ b/kernel/trace/trace_branch.c
@@ -155,6 +155,13 @@ static enum print_line_t trace_branch_print(struct trace_iterator *iter,
155 return TRACE_TYPE_HANDLED; 155 return TRACE_TYPE_HANDLED;
156} 156}
157 157
158static void branch_print_header(struct seq_file *s)
159{
160 seq_puts(s, "# TASK-PID CPU# TIMESTAMP CORRECT"
161 " FUNC:FILE:LINE\n");
162 seq_puts(s, "# | | | | | "
163 " |\n");
164}
158 165
159static struct trace_event trace_branch_event = { 166static struct trace_event trace_branch_event = {
160 .type = TRACE_BRANCH, 167 .type = TRACE_BRANCH,
@@ -169,6 +176,7 @@ static struct tracer branch_trace __read_mostly =
169#ifdef CONFIG_FTRACE_SELFTEST 176#ifdef CONFIG_FTRACE_SELFTEST
170 .selftest = trace_selftest_startup_branch, 177 .selftest = trace_selftest_startup_branch,
171#endif /* CONFIG_FTRACE_SELFTEST */ 178#endif /* CONFIG_FTRACE_SELFTEST */
179 .print_header = branch_print_header,
172}; 180};
173 181
174__init static int init_branch_tracer(void) 182__init static int init_branch_tracer(void)
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 64ec4d278ffb..576f4fa2af0d 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -503,6 +503,7 @@ event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
503 503
504 if (copy_from_user(&buf, ubuf, cnt)) 504 if (copy_from_user(&buf, ubuf, cnt))
505 return -EFAULT; 505 return -EFAULT;
506 buf[cnt] = '\0';
506 507
507 pred = kzalloc(sizeof(*pred), GFP_KERNEL); 508 pred = kzalloc(sizeof(*pred), GFP_KERNEL);
508 if (!pred) 509 if (!pred)
@@ -520,9 +521,10 @@ event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
520 return cnt; 521 return cnt;
521 } 522 }
522 523
523 if (filter_add_pred(call, pred)) { 524 err = filter_add_pred(call, pred);
525 if (err < 0) {
524 filter_free_pred(pred); 526 filter_free_pred(pred);
525 return -EINVAL; 527 return err;
526 } 528 }
527 529
528 *ppos += cnt; 530 *ppos += cnt;
@@ -569,6 +571,7 @@ subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
569 571
570 if (copy_from_user(&buf, ubuf, cnt)) 572 if (copy_from_user(&buf, ubuf, cnt))
571 return -EFAULT; 573 return -EFAULT;
574 buf[cnt] = '\0';
572 575
573 pred = kzalloc(sizeof(*pred), GFP_KERNEL); 576 pred = kzalloc(sizeof(*pred), GFP_KERNEL);
574 if (!pred) 577 if (!pred)
@@ -586,10 +589,11 @@ subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
586 return cnt; 589 return cnt;
587 } 590 }
588 591
589 if (filter_add_subsystem_pred(system, pred)) { 592 err = filter_add_subsystem_pred(system, pred);
593 if (err < 0) {
590 filter_free_subsystem_preds(system); 594 filter_free_subsystem_preds(system);
591 filter_free_pred(pred); 595 filter_free_pred(pred);
592 return -EINVAL; 596 return err;
593 } 597 }
594 598
595 *ppos += cnt; 599 *ppos += cnt;
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 026be412f356..e03cbf1e38f3 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -215,7 +215,7 @@ static int __filter_add_pred(struct ftrace_event_call *call,
215 } 215 }
216 } 216 }
217 217
218 return -ENOMEM; 218 return -ENOSPC;
219} 219}
220 220
221static int is_string_field(const char *type) 221static int is_string_field(const char *type)
@@ -319,7 +319,7 @@ int filter_add_subsystem_pred(struct event_subsystem *system,
319 } 319 }
320 320
321 if (i == MAX_FILTER_PRED) 321 if (i == MAX_FILTER_PRED)
322 return -EINVAL; 322 return -ENOSPC;
323 323
324 events_for_each(call) { 324 events_for_each(call) {
325 int err; 325 int err;
@@ -410,16 +410,22 @@ int filter_parse(char **pbuf, struct filter_pred *pred)
410 } 410 }
411 } 411 }
412 412
413 if (!val_str) {
414 pred->field_name = NULL;
415 return -EINVAL;
416 }
417
413 pred->field_name = kstrdup(pred->field_name, GFP_KERNEL); 418 pred->field_name = kstrdup(pred->field_name, GFP_KERNEL);
414 if (!pred->field_name) 419 if (!pred->field_name)
415 return -ENOMEM; 420 return -ENOMEM;
416 421
417 pred->val = simple_strtoull(val_str, &tmp, 10); 422 pred->val = simple_strtoull(val_str, &tmp, 0);
418 if (tmp == val_str) { 423 if (tmp == val_str) {
419 pred->str_val = kstrdup(val_str, GFP_KERNEL); 424 pred->str_val = kstrdup(val_str, GFP_KERNEL);
420 if (!pred->str_val) 425 if (!pred->str_val)
421 return -ENOMEM; 426 return -ENOMEM;
422 } 427 } else if (*tmp != '\0')
428 return -EINVAL;
423 429
424 return 0; 430 return 0;
425} 431}
diff --git a/kernel/trace/trace_events_stage_2.h b/kernel/trace/trace_events_stage_2.h
index 30743f7d4110..d363c6672c6c 100644
--- a/kernel/trace/trace_events_stage_2.h
+++ b/kernel/trace/trace_events_stage_2.h
@@ -105,10 +105,10 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
105 return 0; 105 return 0;
106 106
107#undef __entry 107#undef __entry
108#define __entry "REC" 108#define __entry REC
109 109
110#undef TP_printk 110#undef TP_printk
111#define TP_printk(fmt, args...) "%s, %s\n", #fmt, #args 111#define TP_printk(fmt, args...) "%s, %s\n", #fmt, __stringify(args)
112 112
113#undef TP_fast_assign 113#undef TP_fast_assign
114#define TP_fast_assign(args...) args 114#define TP_fast_assign(args...) args
diff --git a/kernel/trace/trace_power.c b/kernel/trace/trace_power.c
index bae791ebcc51..118439709fb7 100644
--- a/kernel/trace/trace_power.c
+++ b/kernel/trace/trace_power.c
@@ -186,6 +186,12 @@ static enum print_line_t power_print_line(struct trace_iterator *iter)
186 return TRACE_TYPE_UNHANDLED; 186 return TRACE_TYPE_UNHANDLED;
187} 187}
188 188
189static void power_print_header(struct seq_file *s)
190{
191 seq_puts(s, "# TIMESTAMP STATE EVENT\n");
192 seq_puts(s, "# | | |\n");
193}
194
189static struct tracer power_tracer __read_mostly = 195static struct tracer power_tracer __read_mostly =
190{ 196{
191 .name = "power", 197 .name = "power",
@@ -194,6 +200,7 @@ static struct tracer power_tracer __read_mostly =
194 .stop = stop_power_trace, 200 .stop = stop_power_trace,
195 .reset = power_trace_reset, 201 .reset = power_trace_reset,
196 .print_line = power_print_line, 202 .print_line = power_print_line,
203 .print_header = power_print_header,
197}; 204};
198 205
199static int init_power_trace(void) 206static int init_power_trace(void)
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index a2a3af29c943..5e579645ac86 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -1,5 +1,5 @@
1#include <trace/syscall.h>
1#include <linux/kernel.h> 2#include <linux/kernel.h>
2#include <linux/ftrace.h>
3#include <asm/syscall.h> 3#include <asm/syscall.h>
4 4
5#include "trace_output.h" 5#include "trace_output.h"
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index b6b966ce1451..f71fb2a08950 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -966,20 +966,20 @@ undo:
966} 966}
967 967
968#ifdef CONFIG_SMP 968#ifdef CONFIG_SMP
969static struct workqueue_struct *work_on_cpu_wq __read_mostly;
970 969
971struct work_for_cpu { 970struct work_for_cpu {
972 struct work_struct work; 971 struct completion completion;
973 long (*fn)(void *); 972 long (*fn)(void *);
974 void *arg; 973 void *arg;
975 long ret; 974 long ret;
976}; 975};
977 976
978static void do_work_for_cpu(struct work_struct *w) 977static int do_work_for_cpu(void *_wfc)
979{ 978{
980 struct work_for_cpu *wfc = container_of(w, struct work_for_cpu, work); 979 struct work_for_cpu *wfc = _wfc;
981
982 wfc->ret = wfc->fn(wfc->arg); 980 wfc->ret = wfc->fn(wfc->arg);
981 complete(&wfc->completion);
982 return 0;
983} 983}
984 984
985/** 985/**
@@ -990,17 +990,23 @@ static void do_work_for_cpu(struct work_struct *w)
990 * 990 *
991 * This will return the value @fn returns. 991 * This will return the value @fn returns.
992 * It is up to the caller to ensure that the cpu doesn't go offline. 992 * It is up to the caller to ensure that the cpu doesn't go offline.
993 * The caller must not hold any locks which would prevent @fn from completing.
993 */ 994 */
994long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) 995long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
995{ 996{
996 struct work_for_cpu wfc; 997 struct task_struct *sub_thread;
997 998 struct work_for_cpu wfc = {
998 INIT_WORK(&wfc.work, do_work_for_cpu); 999 .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
999 wfc.fn = fn; 1000 .fn = fn,
1000 wfc.arg = arg; 1001 .arg = arg,
1001 queue_work_on(cpu, work_on_cpu_wq, &wfc.work); 1002 };
1002 flush_work(&wfc.work); 1003
1003 1004 sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu");
1005 if (IS_ERR(sub_thread))
1006 return PTR_ERR(sub_thread);
1007 kthread_bind(sub_thread, cpu);
1008 wake_up_process(sub_thread);
1009 wait_for_completion(&wfc.completion);
1004 return wfc.ret; 1010 return wfc.ret;
1005} 1011}
1006EXPORT_SYMBOL_GPL(work_on_cpu); 1012EXPORT_SYMBOL_GPL(work_on_cpu);
@@ -1016,8 +1022,4 @@ void __init init_workqueues(void)
1016 hotcpu_notifier(workqueue_cpu_callback, 0); 1022 hotcpu_notifier(workqueue_cpu_callback, 0);
1017 keventd_wq = create_workqueue("events"); 1023 keventd_wq = create_workqueue("events");
1018 BUG_ON(!keventd_wq); 1024 BUG_ON(!keventd_wq);
1019#ifdef CONFIG_SMP
1020 work_on_cpu_wq = create_workqueue("work_on_cpu");
1021 BUG_ON(!work_on_cpu_wq);
1022#endif
1023} 1025}