aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/audit_tree.c3
-rw-r--r--kernel/irq/handle.c2
-rw-r--r--kernel/irq/manage.c5
-rw-r--r--kernel/lockdep.c22
-rw-r--r--kernel/panic.c2
-rw-r--r--kernel/posix-cpu-timers.c8
-rw-r--r--kernel/power/disk.c51
-rw-r--r--kernel/power/main.c24
-rw-r--r--kernel/power/swap.c2
-rw-r--r--kernel/ptrace.c11
-rw-r--r--kernel/rcupdate.c18
-rw-r--r--kernel/rcutree.c19
-rw-r--r--kernel/rcutree_trace.c14
-rw-r--r--kernel/resource.c46
-rw-r--r--kernel/sched.c12
-rw-r--r--kernel/slow-work.c4
-rw-r--r--kernel/softirq.c4
-rw-r--r--kernel/sys.c24
-rw-r--r--kernel/sysctl.c25
-rw-r--r--kernel/time/clocksource.c8
-rw-r--r--kernel/time/jiffies.c2
-rw-r--r--kernel/time/tick-common.c12
-rw-r--r--kernel/time/timekeeping.c12
-rw-r--r--kernel/trace/trace_branch.c8
-rw-r--r--kernel/trace/trace_power.c7
25 files changed, 175 insertions, 170 deletions
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index 917ab9525568..6e7351739a82 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -734,9 +734,6 @@ int audit_tag_tree(char *old, char *new)
734 dentry = dget(path.dentry); 734 dentry = dget(path.dentry);
735 path_put(&path); 735 path_put(&path);
736 736
737 if (dentry == tagged->mnt_root && dentry == mnt->mnt_root)
738 follow_up(&mnt, &dentry);
739
740 list_add_tail(&list, &tagged->mnt_list); 737 list_add_tail(&list, &tagged->mnt_list);
741 738
742 mutex_lock(&audit_filter_mutex); 739 mutex_lock(&audit_filter_mutex);
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index e68bb5aebe02..5dd2572993cf 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -360,8 +360,6 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
360 irqreturn_t ret, retval = IRQ_NONE; 360 irqreturn_t ret, retval = IRQ_NONE;
361 unsigned int status = 0; 361 unsigned int status = 0;
362 362
363 WARN_ONCE(!in_irq(), "BUG: IRQ handler called from non-hardirq context!");
364
365 if (!(action->flags & IRQF_DISABLED)) 363 if (!(action->flags & IRQF_DISABLED))
366 local_irq_enable_in_hardirq(); 364 local_irq_enable_in_hardirq();
367 365
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 7e2e7dd4cd2f..2734eca59243 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -109,10 +109,9 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
109 spin_lock_irqsave(&desc->lock, flags); 109 spin_lock_irqsave(&desc->lock, flags);
110 110
111#ifdef CONFIG_GENERIC_PENDING_IRQ 111#ifdef CONFIG_GENERIC_PENDING_IRQ
112 if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) { 112 if (desc->status & IRQ_MOVE_PCNTXT)
113 cpumask_copy(desc->affinity, cpumask);
114 desc->chip->set_affinity(irq, cpumask); 113 desc->chip->set_affinity(irq, cpumask);
115 } else { 114 else {
116 desc->status |= IRQ_MOVE_PENDING; 115 desc->status |= IRQ_MOVE_PENDING;
117 cpumask_copy(desc->pending_mask, cpumask); 116 cpumask_copy(desc->pending_mask, cpumask);
118 } 117 }
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 47b201ecc6df..8bbeef996c76 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -2492,13 +2492,20 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
2492void lockdep_init_map(struct lockdep_map *lock, const char *name, 2492void lockdep_init_map(struct lockdep_map *lock, const char *name,
2493 struct lock_class_key *key, int subclass) 2493 struct lock_class_key *key, int subclass)
2494{ 2494{
2495 if (unlikely(!debug_locks)) 2495 lock->class_cache = NULL;
2496#ifdef CONFIG_LOCK_STAT
2497 lock->cpu = raw_smp_processor_id();
2498#endif
2499
2500 if (DEBUG_LOCKS_WARN_ON(!name)) {
2501 lock->name = "NULL";
2496 return; 2502 return;
2503 }
2504
2505 lock->name = name;
2497 2506
2498 if (DEBUG_LOCKS_WARN_ON(!key)) 2507 if (DEBUG_LOCKS_WARN_ON(!key))
2499 return; 2508 return;
2500 if (DEBUG_LOCKS_WARN_ON(!name))
2501 return;
2502 /* 2509 /*
2503 * Sanity check, the lock-class key must be persistent: 2510 * Sanity check, the lock-class key must be persistent:
2504 */ 2511 */
@@ -2507,12 +2514,11 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
2507 DEBUG_LOCKS_WARN_ON(1); 2514 DEBUG_LOCKS_WARN_ON(1);
2508 return; 2515 return;
2509 } 2516 }
2510 lock->name = name;
2511 lock->key = key; 2517 lock->key = key;
2512 lock->class_cache = NULL; 2518
2513#ifdef CONFIG_LOCK_STAT 2519 if (unlikely(!debug_locks))
2514 lock->cpu = raw_smp_processor_id(); 2520 return;
2515#endif 2521
2516 if (subclass) 2522 if (subclass)
2517 register_lock_class(lock, subclass, 1); 2523 register_lock_class(lock, subclass, 1);
2518} 2524}
diff --git a/kernel/panic.c b/kernel/panic.c
index 934fb377f4b3..3dcaa1661357 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -221,7 +221,7 @@ void add_taint(unsigned flag)
221 * post-warning case. 221 * post-warning case.
222 */ 222 */
223 if (flag != TAINT_CRAP && flag != TAINT_WARN && __debug_locks_off()) 223 if (flag != TAINT_CRAP && flag != TAINT_WARN && __debug_locks_off())
224 printk(KERN_WARNING "Disabling lockdep due to kernel taint\n"); 224 printk(KERN_WARNING "Disabling lock debugging due to kernel taint\n");
225 225
226 set_bit(flag, &tainted_mask); 226 set_bit(flag, &tainted_mask);
227} 227}
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index c9dcf98b4463..bece7c0b67b2 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -1420,19 +1420,19 @@ void run_posix_cpu_timers(struct task_struct *tsk)
1420 * timer call will interfere. 1420 * timer call will interfere.
1421 */ 1421 */
1422 list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) { 1422 list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) {
1423 int firing; 1423 int cpu_firing;
1424
1424 spin_lock(&timer->it_lock); 1425 spin_lock(&timer->it_lock);
1425 list_del_init(&timer->it.cpu.entry); 1426 list_del_init(&timer->it.cpu.entry);
1426 firing = timer->it.cpu.firing; 1427 cpu_firing = timer->it.cpu.firing;
1427 timer->it.cpu.firing = 0; 1428 timer->it.cpu.firing = 0;
1428 /* 1429 /*
1429 * The firing flag is -1 if we collided with a reset 1430 * The firing flag is -1 if we collided with a reset
1430 * of the timer, which already reported this 1431 * of the timer, which already reported this
1431 * almost-firing as an overrun. So don't generate an event. 1432 * almost-firing as an overrun. So don't generate an event.
1432 */ 1433 */
1433 if (likely(firing >= 0)) { 1434 if (likely(cpu_firing >= 0))
1434 cpu_timer_fire(timer); 1435 cpu_timer_fire(timer);
1435 }
1436 spin_unlock(&timer->it_lock); 1436 spin_unlock(&timer->it_lock);
1437 } 1437 }
1438} 1438}
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index 0854770b63b9..e71ca9cd81b2 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -646,13 +646,6 @@ static int software_resume(void)
646 return 0; 646 return 0;
647 647
648 /* 648 /*
649 * We can't depend on SCSI devices being available after loading one of
650 * their modules if scsi_complete_async_scans() is not called and the
651 * resume device usually is a SCSI one.
652 */
653 scsi_complete_async_scans();
654
655 /*
656 * name_to_dev_t() below takes a sysfs buffer mutex when sysfs 649 * name_to_dev_t() below takes a sysfs buffer mutex when sysfs
657 * is configured into the kernel. Since the regular hibernate 650 * is configured into the kernel. Since the regular hibernate
658 * trigger path is via sysfs which takes a buffer mutex before 651 * trigger path is via sysfs which takes a buffer mutex before
@@ -663,32 +656,42 @@ static int software_resume(void)
663 * here to avoid lockdep complaining. 656 * here to avoid lockdep complaining.
664 */ 657 */
665 mutex_lock_nested(&pm_mutex, SINGLE_DEPTH_NESTING); 658 mutex_lock_nested(&pm_mutex, SINGLE_DEPTH_NESTING);
659
660 if (swsusp_resume_device)
661 goto Check_image;
662
663 if (!strlen(resume_file)) {
664 error = -ENOENT;
665 goto Unlock;
666 }
667
668 pr_debug("PM: Checking image partition %s\n", resume_file);
669
670 /* Check if the device is there */
671 swsusp_resume_device = name_to_dev_t(resume_file);
666 if (!swsusp_resume_device) { 672 if (!swsusp_resume_device) {
667 if (!strlen(resume_file)) {
668 mutex_unlock(&pm_mutex);
669 return -ENOENT;
670 }
671 /* 673 /*
672 * Some device discovery might still be in progress; we need 674 * Some device discovery might still be in progress; we need
673 * to wait for this to finish. 675 * to wait for this to finish.
674 */ 676 */
675 wait_for_device_probe(); 677 wait_for_device_probe();
678 /*
679 * We can't depend on SCSI devices being available after loading
680 * one of their modules until scsi_complete_async_scans() is
681 * called and the resume device usually is a SCSI one.
682 */
683 scsi_complete_async_scans();
684
676 swsusp_resume_device = name_to_dev_t(resume_file); 685 swsusp_resume_device = name_to_dev_t(resume_file);
677 pr_debug("PM: Resume from partition %s\n", resume_file); 686 if (!swsusp_resume_device) {
678 } else { 687 error = -ENODEV;
679 pr_debug("PM: Resume from partition %d:%d\n", 688 goto Unlock;
680 MAJOR(swsusp_resume_device), 689 }
681 MINOR(swsusp_resume_device));
682 } 690 }
683 691
684 if (noresume) { 692 Check_image:
685 /** 693 pr_debug("PM: Resume from partition %d:%d\n",
686 * FIXME: If noresume is specified, we need to find the 694 MAJOR(swsusp_resume_device), MINOR(swsusp_resume_device));
687 * partition and reset it back to normal swap space.
688 */
689 mutex_unlock(&pm_mutex);
690 return 0;
691 }
692 695
693 pr_debug("PM: Checking hibernation image.\n"); 696 pr_debug("PM: Checking hibernation image.\n");
694 error = swsusp_check(); 697 error = swsusp_check();
diff --git a/kernel/power/main.c b/kernel/power/main.c
index f172f41858bb..f99ed6a75eac 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -291,20 +291,26 @@ static int suspend_enter(suspend_state_t state)
291 291
292 device_pm_lock(); 292 device_pm_lock();
293 293
294 if (suspend_ops->prepare) {
295 error = suspend_ops->prepare();
296 if (error)
297 goto Done;
298 }
299
294 error = device_power_down(PMSG_SUSPEND); 300 error = device_power_down(PMSG_SUSPEND);
295 if (error) { 301 if (error) {
296 printk(KERN_ERR "PM: Some devices failed to power down\n"); 302 printk(KERN_ERR "PM: Some devices failed to power down\n");
297 goto Done; 303 goto Platfrom_finish;
298 } 304 }
299 305
300 if (suspend_ops->prepare) { 306 if (suspend_ops->prepare_late) {
301 error = suspend_ops->prepare(); 307 error = suspend_ops->prepare_late();
302 if (error) 308 if (error)
303 goto Power_up_devices; 309 goto Power_up_devices;
304 } 310 }
305 311
306 if (suspend_test(TEST_PLATFORM)) 312 if (suspend_test(TEST_PLATFORM))
307 goto Platfrom_finish; 313 goto Platform_wake;
308 314
309 error = disable_nonboot_cpus(); 315 error = disable_nonboot_cpus();
310 if (error || suspend_test(TEST_CPUS)) 316 if (error || suspend_test(TEST_CPUS))
@@ -326,13 +332,17 @@ static int suspend_enter(suspend_state_t state)
326 Enable_cpus: 332 Enable_cpus:
327 enable_nonboot_cpus(); 333 enable_nonboot_cpus();
328 334
329 Platfrom_finish: 335 Platform_wake:
330 if (suspend_ops->finish) 336 if (suspend_ops->wake)
331 suspend_ops->finish(); 337 suspend_ops->wake();
332 338
333 Power_up_devices: 339 Power_up_devices:
334 device_power_up(PMSG_RESUME); 340 device_power_up(PMSG_RESUME);
335 341
342 Platfrom_finish:
343 if (suspend_ops->finish)
344 suspend_ops->finish();
345
336 Done: 346 Done:
337 device_pm_unlock(); 347 device_pm_unlock();
338 348
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 505f319e489c..8ba052c86d48 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -64,8 +64,6 @@ static int submit(int rw, pgoff_t page_off, struct page *page,
64 struct bio *bio; 64 struct bio *bio;
65 65
66 bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1); 66 bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1);
67 if (!bio)
68 return -ENOMEM;
69 bio->bi_sector = page_off * (PAGE_SIZE >> 9); 67 bio->bi_sector = page_off * (PAGE_SIZE >> 9);
70 bio->bi_bdev = resume_bdev; 68 bio->bi_bdev = resume_bdev;
71 bio->bi_end_io = end_swap_bio_read; 69 bio->bi_end_io = end_swap_bio_read;
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 64191fa09b7e..0692ab5a0d67 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -188,7 +188,7 @@ int ptrace_attach(struct task_struct *task)
188 /* Protect exec's credential calculations against our interference; 188 /* Protect exec's credential calculations against our interference;
189 * SUID, SGID and LSM creds get determined differently under ptrace. 189 * SUID, SGID and LSM creds get determined differently under ptrace.
190 */ 190 */
191 retval = mutex_lock_interruptible(&current->cred_exec_mutex); 191 retval = mutex_lock_interruptible(&task->cred_exec_mutex);
192 if (retval < 0) 192 if (retval < 0)
193 goto out; 193 goto out;
194 194
@@ -232,7 +232,7 @@ repeat:
232bad: 232bad:
233 write_unlock_irqrestore(&tasklist_lock, flags); 233 write_unlock_irqrestore(&tasklist_lock, flags);
234 task_unlock(task); 234 task_unlock(task);
235 mutex_unlock(&current->cred_exec_mutex); 235 mutex_unlock(&task->cred_exec_mutex);
236out: 236out:
237 return retval; 237 return retval;
238} 238}
@@ -604,10 +604,11 @@ repeat:
604 ret = security_ptrace_traceme(current->parent); 604 ret = security_ptrace_traceme(current->parent);
605 605
606 /* 606 /*
607 * Set the ptrace bit in the process ptrace flags. 607 * Check PF_EXITING to ensure ->real_parent has not passed
608 * Then link us on our parent's ptraced list. 608 * exit_ptrace(). Otherwise we don't report the error but
609 * pretend ->real_parent untraces us right after return.
609 */ 610 */
610 if (!ret) { 611 if (!ret && !(current->real_parent->flags & PF_EXITING)) {
611 current->ptrace |= PT_PTRACED; 612 current->ptrace |= PT_PTRACED;
612 __ptrace_link(current, current->real_parent); 613 __ptrace_link(current, current->real_parent);
613 } 614 }
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 2c7b8457d0d2..a967c9feb90a 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -58,6 +58,10 @@ static DEFINE_MUTEX(rcu_barrier_mutex);
58static struct completion rcu_barrier_completion; 58static struct completion rcu_barrier_completion;
59int rcu_scheduler_active __read_mostly; 59int rcu_scheduler_active __read_mostly;
60 60
61static atomic_t rcu_migrate_type_count = ATOMIC_INIT(0);
62static struct rcu_head rcu_migrate_head[3];
63static DECLARE_WAIT_QUEUE_HEAD(rcu_migrate_wq);
64
61/* 65/*
62 * Awaken the corresponding synchronize_rcu() instance now that a 66 * Awaken the corresponding synchronize_rcu() instance now that a
63 * grace period has elapsed. 67 * grace period has elapsed.
@@ -122,7 +126,10 @@ static void rcu_barrier_func(void *type)
122 } 126 }
123} 127}
124 128
125static inline void wait_migrated_callbacks(void); 129static inline void wait_migrated_callbacks(void)
130{
131 wait_event(rcu_migrate_wq, !atomic_read(&rcu_migrate_type_count));
132}
126 133
127/* 134/*
128 * Orchestrate the specified type of RCU barrier, waiting for all 135 * Orchestrate the specified type of RCU barrier, waiting for all
@@ -179,21 +186,12 @@ void rcu_barrier_sched(void)
179} 186}
180EXPORT_SYMBOL_GPL(rcu_barrier_sched); 187EXPORT_SYMBOL_GPL(rcu_barrier_sched);
181 188
182static atomic_t rcu_migrate_type_count = ATOMIC_INIT(0);
183static struct rcu_head rcu_migrate_head[3];
184static DECLARE_WAIT_QUEUE_HEAD(rcu_migrate_wq);
185
186static void rcu_migrate_callback(struct rcu_head *notused) 189static void rcu_migrate_callback(struct rcu_head *notused)
187{ 190{
188 if (atomic_dec_and_test(&rcu_migrate_type_count)) 191 if (atomic_dec_and_test(&rcu_migrate_type_count))
189 wake_up(&rcu_migrate_wq); 192 wake_up(&rcu_migrate_wq);
190} 193}
191 194
192static inline void wait_migrated_callbacks(void)
193{
194 wait_event(rcu_migrate_wq, !atomic_read(&rcu_migrate_type_count));
195}
196
197static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self, 195static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self,
198 unsigned long action, void *hcpu) 196 unsigned long action, void *hcpu)
199{ 197{
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 7f3266922572..d2a372fb0b9b 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -530,8 +530,6 @@ static void note_new_gpnum(struct rcu_state *rsp, struct rcu_data *rdp)
530 rdp->qs_pending = 1; 530 rdp->qs_pending = 1;
531 rdp->passed_quiesc = 0; 531 rdp->passed_quiesc = 0;
532 rdp->gpnum = rsp->gpnum; 532 rdp->gpnum = rsp->gpnum;
533 rdp->n_rcu_pending_force_qs = rdp->n_rcu_pending +
534 RCU_JIFFIES_TILL_FORCE_QS;
535} 533}
536 534
537/* 535/*
@@ -578,8 +576,6 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
578 rsp->gpnum++; 576 rsp->gpnum++;
579 rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */ 577 rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */
580 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; 578 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
581 rdp->n_rcu_pending_force_qs = rdp->n_rcu_pending +
582 RCU_JIFFIES_TILL_FORCE_QS;
583 record_gp_stall_check_time(rsp); 579 record_gp_stall_check_time(rsp);
584 dyntick_record_completed(rsp, rsp->completed - 1); 580 dyntick_record_completed(rsp, rsp->completed - 1);
585 note_new_gpnum(rsp, rdp); 581 note_new_gpnum(rsp, rdp);
@@ -1055,7 +1051,6 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
1055{ 1051{
1056 unsigned long flags; 1052 unsigned long flags;
1057 long lastcomp; 1053 long lastcomp;
1058 struct rcu_data *rdp = rsp->rda[smp_processor_id()];
1059 struct rcu_node *rnp = rcu_get_root(rsp); 1054 struct rcu_node *rnp = rcu_get_root(rsp);
1060 u8 signaled; 1055 u8 signaled;
1061 1056
@@ -1066,16 +1061,13 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
1066 return; /* Someone else is already on the job. */ 1061 return; /* Someone else is already on the job. */
1067 } 1062 }
1068 if (relaxed && 1063 if (relaxed &&
1069 (long)(rsp->jiffies_force_qs - jiffies) >= 0 && 1064 (long)(rsp->jiffies_force_qs - jiffies) >= 0)
1070 (rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending) >= 0)
1071 goto unlock_ret; /* no emergency and done recently. */ 1065 goto unlock_ret; /* no emergency and done recently. */
1072 rsp->n_force_qs++; 1066 rsp->n_force_qs++;
1073 spin_lock(&rnp->lock); 1067 spin_lock(&rnp->lock);
1074 lastcomp = rsp->completed; 1068 lastcomp = rsp->completed;
1075 signaled = rsp->signaled; 1069 signaled = rsp->signaled;
1076 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; 1070 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
1077 rdp->n_rcu_pending_force_qs = rdp->n_rcu_pending +
1078 RCU_JIFFIES_TILL_FORCE_QS;
1079 if (lastcomp == rsp->gpnum) { 1071 if (lastcomp == rsp->gpnum) {
1080 rsp->n_force_qs_ngp++; 1072 rsp->n_force_qs_ngp++;
1081 spin_unlock(&rnp->lock); 1073 spin_unlock(&rnp->lock);
@@ -1144,8 +1136,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
1144 * If an RCU GP has gone long enough, go check for dyntick 1136 * If an RCU GP has gone long enough, go check for dyntick
1145 * idle CPUs and, if needed, send resched IPIs. 1137 * idle CPUs and, if needed, send resched IPIs.
1146 */ 1138 */
1147 if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0 || 1139 if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0)
1148 (rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending) < 0)
1149 force_quiescent_state(rsp, 1); 1140 force_quiescent_state(rsp, 1);
1150 1141
1151 /* 1142 /*
@@ -1230,8 +1221,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
1230 if (unlikely(++rdp->qlen > qhimark)) { 1221 if (unlikely(++rdp->qlen > qhimark)) {
1231 rdp->blimit = LONG_MAX; 1222 rdp->blimit = LONG_MAX;
1232 force_quiescent_state(rsp, 0); 1223 force_quiescent_state(rsp, 0);
1233 } else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0 || 1224 } else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0)
1234 (rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending) < 0)
1235 force_quiescent_state(rsp, 1); 1225 force_quiescent_state(rsp, 1);
1236 local_irq_restore(flags); 1226 local_irq_restore(flags);
1237} 1227}
@@ -1290,8 +1280,7 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
1290 1280
1291 /* Has an RCU GP gone long enough to send resched IPIs &c? */ 1281 /* Has an RCU GP gone long enough to send resched IPIs &c? */
1292 if (ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum) && 1282 if (ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum) &&
1293 ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0 || 1283 ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0))
1294 (rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending) < 0))
1295 return 1; 1284 return 1;
1296 1285
1297 /* nothing to do */ 1286 /* nothing to do */
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
index 4ee954f6a8d5..4b1875ba9404 100644
--- a/kernel/rcutree_trace.c
+++ b/kernel/rcutree_trace.c
@@ -49,14 +49,12 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
49{ 49{
50 if (!rdp->beenonline) 50 if (!rdp->beenonline)
51 return; 51 return;
52 seq_printf(m, "%3d%cc=%ld g=%ld pq=%d pqc=%ld qp=%d rpfq=%ld rp=%x", 52 seq_printf(m, "%3d%cc=%ld g=%ld pq=%d pqc=%ld qp=%d",
53 rdp->cpu, 53 rdp->cpu,
54 cpu_is_offline(rdp->cpu) ? '!' : ' ', 54 cpu_is_offline(rdp->cpu) ? '!' : ' ',
55 rdp->completed, rdp->gpnum, 55 rdp->completed, rdp->gpnum,
56 rdp->passed_quiesc, rdp->passed_quiesc_completed, 56 rdp->passed_quiesc, rdp->passed_quiesc_completed,
57 rdp->qs_pending, 57 rdp->qs_pending);
58 rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending,
59 (int)(rdp->n_rcu_pending & 0xffff));
60#ifdef CONFIG_NO_HZ 58#ifdef CONFIG_NO_HZ
61 seq_printf(m, " dt=%d/%d dn=%d df=%lu", 59 seq_printf(m, " dt=%d/%d dn=%d df=%lu",
62 rdp->dynticks->dynticks, 60 rdp->dynticks->dynticks,
@@ -102,14 +100,12 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
102{ 100{
103 if (!rdp->beenonline) 101 if (!rdp->beenonline)
104 return; 102 return;
105 seq_printf(m, "%d,%s,%ld,%ld,%d,%ld,%d,%ld,%ld", 103 seq_printf(m, "%d,%s,%ld,%ld,%d,%ld,%d",
106 rdp->cpu, 104 rdp->cpu,
107 cpu_is_offline(rdp->cpu) ? "\"Y\"" : "\"N\"", 105 cpu_is_offline(rdp->cpu) ? "\"Y\"" : "\"N\"",
108 rdp->completed, rdp->gpnum, 106 rdp->completed, rdp->gpnum,
109 rdp->passed_quiesc, rdp->passed_quiesc_completed, 107 rdp->passed_quiesc, rdp->passed_quiesc_completed,
110 rdp->qs_pending, 108 rdp->qs_pending);
111 rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending,
112 rdp->n_rcu_pending);
113#ifdef CONFIG_NO_HZ 109#ifdef CONFIG_NO_HZ
114 seq_printf(m, ",%d,%d,%d,%lu", 110 seq_printf(m, ",%d,%d,%d,%lu",
115 rdp->dynticks->dynticks, 111 rdp->dynticks->dynticks,
@@ -123,7 +119,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
123 119
124static int show_rcudata_csv(struct seq_file *m, void *unused) 120static int show_rcudata_csv(struct seq_file *m, void *unused)
125{ 121{
126 seq_puts(m, "\"CPU\",\"Online?\",\"c\",\"g\",\"pq\",\"pqc\",\"pq\",\"rpfq\",\"rp\","); 122 seq_puts(m, "\"CPU\",\"Online?\",\"c\",\"g\",\"pq\",\"pqc\",\"pq\",");
127#ifdef CONFIG_NO_HZ 123#ifdef CONFIG_NO_HZ
128 seq_puts(m, "\"dt\",\"dt nesting\",\"dn\",\"df\","); 124 seq_puts(m, "\"dt\",\"dt nesting\",\"dn\",\"df\",");
129#endif /* #ifdef CONFIG_NO_HZ */ 125#endif /* #ifdef CONFIG_NO_HZ */
diff --git a/kernel/resource.c b/kernel/resource.c
index fd5d7d574bb9..ac5f3a36923f 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -533,43 +533,21 @@ static void __init __reserve_region_with_split(struct resource *root,
533 res->end = end; 533 res->end = end;
534 res->flags = IORESOURCE_BUSY; 534 res->flags = IORESOURCE_BUSY;
535 535
536 for (;;) { 536 conflict = __request_resource(parent, res);
537 conflict = __request_resource(parent, res); 537 if (!conflict)
538 if (!conflict) 538 return;
539 break;
540 if (conflict != parent) {
541 parent = conflict;
542 if (!(conflict->flags & IORESOURCE_BUSY))
543 continue;
544 }
545
546 /* Uhhuh, that didn't work out.. */
547 kfree(res);
548 res = NULL;
549 break;
550 }
551
552 if (!res) {
553 /* failed, split and try again */
554
555 /* conflict covered whole area */
556 if (conflict->start <= start && conflict->end >= end)
557 return;
558 539
559 if (conflict->start > start) 540 /* failed, split and try again */
560 __reserve_region_with_split(root, start, conflict->start-1, name); 541 kfree(res);
561 if (!(conflict->flags & IORESOURCE_BUSY)) {
562 resource_size_t common_start, common_end;
563 542
564 common_start = max(conflict->start, start); 543 /* conflict covered whole area */
565 common_end = min(conflict->end, end); 544 if (conflict->start <= start && conflict->end >= end)
566 if (common_start < common_end) 545 return;
567 __reserve_region_with_split(root, common_start, common_end, name);
568 }
569 if (conflict->end < end)
570 __reserve_region_with_split(root, conflict->end+1, end, name);
571 }
572 546
547 if (conflict->start > start)
548 __reserve_region_with_split(root, start, conflict->start-1, name);
549 if (conflict->end < end)
550 __reserve_region_with_split(root, conflict->end+1, end, name);
573} 551}
574 552
575void __init reserve_region_with_split(struct resource *root, 553void __init reserve_region_with_split(struct resource *root,
diff --git a/kernel/sched.c b/kernel/sched.c
index 9cdedbd181ce..14a19b17674e 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4728,7 +4728,7 @@ void account_process_tick(struct task_struct *p, int user_tick)
4728 4728
4729 if (user_tick) 4729 if (user_tick)
4730 account_user_time(p, one_jiffy, one_jiffy_scaled); 4730 account_user_time(p, one_jiffy, one_jiffy_scaled);
4731 else if (p != rq->idle) 4731 else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
4732 account_system_time(p, HARDIRQ_OFFSET, one_jiffy, 4732 account_system_time(p, HARDIRQ_OFFSET, one_jiffy,
4733 one_jiffy_scaled); 4733 one_jiffy_scaled);
4734 else 4734 else
@@ -4842,7 +4842,7 @@ void scheduler_tick(void)
4842#endif 4842#endif
4843} 4843}
4844 4844
4845unsigned long get_parent_ip(unsigned long addr) 4845notrace unsigned long get_parent_ip(unsigned long addr)
4846{ 4846{
4847 if (in_lock_functions(addr)) { 4847 if (in_lock_functions(addr)) {
4848 addr = CALLER_ADDR2; 4848 addr = CALLER_ADDR2;
@@ -7363,8 +7363,12 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
7363 cpumask_or(groupmask, groupmask, sched_group_cpus(group)); 7363 cpumask_or(groupmask, groupmask, sched_group_cpus(group));
7364 7364
7365 cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group)); 7365 cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
7366 printk(KERN_CONT " %s (__cpu_power = %d)", str, 7366
7367 group->__cpu_power); 7367 printk(KERN_CONT " %s", str);
7368 if (group->__cpu_power != SCHED_LOAD_SCALE) {
7369 printk(KERN_CONT " (__cpu_power = %d)",
7370 group->__cpu_power);
7371 }
7368 7372
7369 group = group->next; 7373 group = group->next;
7370 } while (group != sd->groups); 7374 } while (group != sd->groups);
diff --git a/kernel/slow-work.c b/kernel/slow-work.c
index cf2bc01186ef..b28d19135f43 100644
--- a/kernel/slow-work.c
+++ b/kernel/slow-work.c
@@ -609,14 +609,14 @@ void slow_work_unregister_user(void)
609 if (slow_work_user_count == 0) { 609 if (slow_work_user_count == 0) {
610 printk(KERN_NOTICE "Slow work thread pool: Shutting down\n"); 610 printk(KERN_NOTICE "Slow work thread pool: Shutting down\n");
611 slow_work_threads_should_exit = true; 611 slow_work_threads_should_exit = true;
612 del_timer_sync(&slow_work_cull_timer);
613 del_timer_sync(&slow_work_oom_timer);
612 wake_up_all(&slow_work_thread_wq); 614 wake_up_all(&slow_work_thread_wq);
613 wait_for_completion(&slow_work_last_thread_exited); 615 wait_for_completion(&slow_work_last_thread_exited);
614 printk(KERN_NOTICE "Slow work thread pool:" 616 printk(KERN_NOTICE "Slow work thread pool:"
615 " Shut down complete\n"); 617 " Shut down complete\n");
616 } 618 }
617 619
618 del_timer_sync(&slow_work_cull_timer);
619
620 mutex_unlock(&slow_work_user_lock); 620 mutex_unlock(&slow_work_user_lock);
621} 621}
622EXPORT_SYMBOL(slow_work_unregister_user); 622EXPORT_SYMBOL(slow_work_unregister_user);
diff --git a/kernel/softirq.c b/kernel/softirq.c
index d4ba347a872d..dc4d0cfdcb2d 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -471,9 +471,9 @@ void tasklet_kill(struct tasklet_struct *t)
471 printk("Attempt to kill tasklet from interrupt\n"); 471 printk("Attempt to kill tasklet from interrupt\n");
472 472
473 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { 473 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
474 do 474 do {
475 yield(); 475 yield();
476 while (test_bit(TASKLET_STATE_SCHED, &t->state)); 476 } while (test_bit(TASKLET_STATE_SCHED, &t->state));
477 } 477 }
478 tasklet_unlock_wait(t); 478 tasklet_unlock_wait(t);
479 clear_bit(TASKLET_STATE_SCHED, &t->state); 479 clear_bit(TASKLET_STATE_SCHED, &t->state);
diff --git a/kernel/sys.c b/kernel/sys.c
index 51dbb55604e8..e7998cf31498 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -360,6 +360,7 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
360 void __user *, arg) 360 void __user *, arg)
361{ 361{
362 char buffer[256]; 362 char buffer[256];
363 int ret = 0;
363 364
364 /* We only trust the superuser with rebooting the system. */ 365 /* We only trust the superuser with rebooting the system. */
365 if (!capable(CAP_SYS_BOOT)) 366 if (!capable(CAP_SYS_BOOT))
@@ -397,7 +398,7 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
397 kernel_halt(); 398 kernel_halt();
398 unlock_kernel(); 399 unlock_kernel();
399 do_exit(0); 400 do_exit(0);
400 break; 401 panic("cannot halt");
401 402
402 case LINUX_REBOOT_CMD_POWER_OFF: 403 case LINUX_REBOOT_CMD_POWER_OFF:
403 kernel_power_off(); 404 kernel_power_off();
@@ -417,29 +418,22 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
417 418
418#ifdef CONFIG_KEXEC 419#ifdef CONFIG_KEXEC
419 case LINUX_REBOOT_CMD_KEXEC: 420 case LINUX_REBOOT_CMD_KEXEC:
420 { 421 ret = kernel_kexec();
421 int ret; 422 break;
422 ret = kernel_kexec();
423 unlock_kernel();
424 return ret;
425 }
426#endif 423#endif
427 424
428#ifdef CONFIG_HIBERNATION 425#ifdef CONFIG_HIBERNATION
429 case LINUX_REBOOT_CMD_SW_SUSPEND: 426 case LINUX_REBOOT_CMD_SW_SUSPEND:
430 { 427 ret = hibernate();
431 int ret = hibernate(); 428 break;
432 unlock_kernel();
433 return ret;
434 }
435#endif 429#endif
436 430
437 default: 431 default:
438 unlock_kernel(); 432 ret = -EINVAL;
439 return -EINVAL; 433 break;
440 } 434 }
441 unlock_kernel(); 435 unlock_kernel();
442 return 0; 436 return ret;
443} 437}
444 438
445static void deferred_cad(struct work_struct *dummy) 439static void deferred_cad(struct work_struct *dummy)
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 4286b62b34a0..ea78fa101ad6 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -103,6 +103,9 @@ static unsigned long one_ul = 1;
103static int one_hundred = 100; 103static int one_hundred = 100;
104static int one_thousand = 1000; 104static int one_thousand = 1000;
105 105
106/* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
107static unsigned long dirty_bytes_min = 2 * PAGE_SIZE;
108
106/* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */ 109/* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */
107static int maxolduid = 65535; 110static int maxolduid = 65535;
108static int minolduid; 111static int minolduid;
@@ -902,16 +905,6 @@ static struct ctl_table kern_table[] = {
902 .proc_handler = &proc_dointvec, 905 .proc_handler = &proc_dointvec,
903 }, 906 },
904#endif 907#endif
905#ifdef CONFIG_UNEVICTABLE_LRU
906 {
907 .ctl_name = CTL_UNNUMBERED,
908 .procname = "scan_unevictable_pages",
909 .data = &scan_unevictable_pages,
910 .maxlen = sizeof(scan_unevictable_pages),
911 .mode = 0644,
912 .proc_handler = &scan_unevictable_handler,
913 },
914#endif
915#ifdef CONFIG_SLOW_WORK 908#ifdef CONFIG_SLOW_WORK
916 { 909 {
917 .ctl_name = CTL_UNNUMBERED, 910 .ctl_name = CTL_UNNUMBERED,
@@ -1016,7 +1009,7 @@ static struct ctl_table vm_table[] = {
1016 .mode = 0644, 1009 .mode = 0644,
1017 .proc_handler = &dirty_bytes_handler, 1010 .proc_handler = &dirty_bytes_handler,
1018 .strategy = &sysctl_intvec, 1011 .strategy = &sysctl_intvec,
1019 .extra1 = &one_ul, 1012 .extra1 = &dirty_bytes_min,
1020 }, 1013 },
1021 { 1014 {
1022 .procname = "dirty_writeback_centisecs", 1015 .procname = "dirty_writeback_centisecs",
@@ -1302,6 +1295,16 @@ static struct ctl_table vm_table[] = {
1302 .extra2 = &one, 1295 .extra2 = &one,
1303 }, 1296 },
1304#endif 1297#endif
1298#ifdef CONFIG_UNEVICTABLE_LRU
1299 {
1300 .ctl_name = CTL_UNNUMBERED,
1301 .procname = "scan_unevictable_pages",
1302 .data = &scan_unevictable_pages,
1303 .maxlen = sizeof(scan_unevictable_pages),
1304 .mode = 0644,
1305 .proc_handler = &scan_unevictable_handler,
1306 },
1307#endif
1305/* 1308/*
1306 * NOTE: do not add new entries to this table unless you have read 1309 * NOTE: do not add new entries to this table unless you have read
1307 * Documentation/sysctl/ctl_unnumbered.txt 1310 * Documentation/sysctl/ctl_unnumbered.txt
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index c46c931a7fe7..ecfd7b5187e0 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -181,12 +181,12 @@ static void clocksource_watchdog(unsigned long data)
181 181
182 resumed = test_and_clear_bit(0, &watchdog_resumed); 182 resumed = test_and_clear_bit(0, &watchdog_resumed);
183 183
184 wdnow = watchdog->read(); 184 wdnow = watchdog->read(watchdog);
185 wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask); 185 wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask);
186 watchdog_last = wdnow; 186 watchdog_last = wdnow;
187 187
188 list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) { 188 list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) {
189 csnow = cs->read(); 189 csnow = cs->read(cs);
190 190
191 if (unlikely(resumed)) { 191 if (unlikely(resumed)) {
192 cs->wd_last = csnow; 192 cs->wd_last = csnow;
@@ -247,7 +247,7 @@ static void clocksource_check_watchdog(struct clocksource *cs)
247 247
248 list_add(&cs->wd_list, &watchdog_list); 248 list_add(&cs->wd_list, &watchdog_list);
249 if (!started && watchdog) { 249 if (!started && watchdog) {
250 watchdog_last = watchdog->read(); 250 watchdog_last = watchdog->read(watchdog);
251 watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; 251 watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
252 add_timer_on(&watchdog_timer, 252 add_timer_on(&watchdog_timer,
253 cpumask_first(cpu_online_mask)); 253 cpumask_first(cpu_online_mask));
@@ -268,7 +268,7 @@ static void clocksource_check_watchdog(struct clocksource *cs)
268 cse->flags &= ~CLOCK_SOURCE_WATCHDOG; 268 cse->flags &= ~CLOCK_SOURCE_WATCHDOG;
269 /* Start if list is not empty */ 269 /* Start if list is not empty */
270 if (!list_empty(&watchdog_list)) { 270 if (!list_empty(&watchdog_list)) {
271 watchdog_last = watchdog->read(); 271 watchdog_last = watchdog->read(watchdog);
272 watchdog_timer.expires = 272 watchdog_timer.expires =
273 jiffies + WATCHDOG_INTERVAL; 273 jiffies + WATCHDOG_INTERVAL;
274 add_timer_on(&watchdog_timer, 274 add_timer_on(&watchdog_timer,
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
index 06f197560f3b..c3f6c30816e3 100644
--- a/kernel/time/jiffies.c
+++ b/kernel/time/jiffies.c
@@ -50,7 +50,7 @@
50 */ 50 */
51#define JIFFIES_SHIFT 8 51#define JIFFIES_SHIFT 8
52 52
53static cycle_t jiffies_read(void) 53static cycle_t jiffies_read(struct clocksource *cs)
54{ 54{
55 return (cycle_t) jiffies; 55 return (cycle_t) jiffies;
56} 56}
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 21a5ca849514..83c4417b6a3c 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -93,7 +93,17 @@ void tick_handle_periodic(struct clock_event_device *dev)
93 for (;;) { 93 for (;;) {
94 if (!clockevents_program_event(dev, next, ktime_get())) 94 if (!clockevents_program_event(dev, next, ktime_get()))
95 return; 95 return;
96 tick_periodic(cpu); 96 /*
97 * Have to be careful here. If we're in oneshot mode,
98 * before we call tick_periodic() in a loop, we need
99 * to be sure we're using a real hardware clocksource.
100 * Otherwise we could get trapped in an infinite
101 * loop, as the tick_periodic() increments jiffies,
102 * when then will increment time, posibly causing
103 * the loop to trigger again and again.
104 */
105 if (timekeeping_valid_for_hres())
106 tick_periodic(cpu);
97 next = ktime_add(next, tick_period); 107 next = ktime_add(next, tick_period);
98 } 108 }
99} 109}
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 900f1b6598d1..687dff49f6e7 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -182,7 +182,7 @@ EXPORT_SYMBOL(do_settimeofday);
182 */ 182 */
183static void change_clocksource(void) 183static void change_clocksource(void)
184{ 184{
185 struct clocksource *new; 185 struct clocksource *new, *old;
186 186
187 new = clocksource_get_next(); 187 new = clocksource_get_next();
188 188
@@ -191,11 +191,16 @@ static void change_clocksource(void)
191 191
192 clocksource_forward_now(); 192 clocksource_forward_now();
193 193
194 new->raw_time = clock->raw_time; 194 if (clocksource_enable(new))
195 return;
195 196
197 new->raw_time = clock->raw_time;
198 old = clock;
196 clock = new; 199 clock = new;
200 clocksource_disable(old);
201
197 clock->cycle_last = 0; 202 clock->cycle_last = 0;
198 clock->cycle_last = clocksource_read(new); 203 clock->cycle_last = clocksource_read(clock);
199 clock->error = 0; 204 clock->error = 0;
200 clock->xtime_nsec = 0; 205 clock->xtime_nsec = 0;
201 clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); 206 clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH);
@@ -292,6 +297,7 @@ void __init timekeeping_init(void)
292 ntp_init(); 297 ntp_init();
293 298
294 clock = clocksource_get_next(); 299 clock = clocksource_get_next();
300 clocksource_enable(clock);
295 clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); 301 clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH);
296 clock->cycle_last = clocksource_read(clock); 302 clock->cycle_last = clocksource_read(clock);
297 303
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
index 8e64e604f5a7..7a7a9fd249a9 100644
--- a/kernel/trace/trace_branch.c
+++ b/kernel/trace/trace_branch.c
@@ -157,6 +157,13 @@ static enum print_line_t trace_branch_print(struct trace_iterator *iter,
157 return TRACE_TYPE_HANDLED; 157 return TRACE_TYPE_HANDLED;
158} 158}
159 159
160static void branch_print_header(struct seq_file *s)
161{
162 seq_puts(s, "# TASK-PID CPU# TIMESTAMP CORRECT"
163 " FUNC:FILE:LINE\n");
164 seq_puts(s, "# | | | | | "
165 " |\n");
166}
160 167
161static struct trace_event trace_branch_event = { 168static struct trace_event trace_branch_event = {
162 .type = TRACE_BRANCH, 169 .type = TRACE_BRANCH,
@@ -171,6 +178,7 @@ static struct tracer branch_trace __read_mostly =
171#ifdef CONFIG_FTRACE_SELFTEST 178#ifdef CONFIG_FTRACE_SELFTEST
172 .selftest = trace_selftest_startup_branch, 179 .selftest = trace_selftest_startup_branch,
173#endif /* CONFIG_FTRACE_SELFTEST */ 180#endif /* CONFIG_FTRACE_SELFTEST */
181 .print_header = branch_print_header,
174}; 182};
175 183
176__init static int init_branch_tracer(void) 184__init static int init_branch_tracer(void)
diff --git a/kernel/trace/trace_power.c b/kernel/trace/trace_power.c
index 810a5b7cf1c5..8a30d9874cd4 100644
--- a/kernel/trace/trace_power.c
+++ b/kernel/trace/trace_power.c
@@ -190,6 +190,12 @@ static enum print_line_t power_print_line(struct trace_iterator *iter)
190 return TRACE_TYPE_UNHANDLED; 190 return TRACE_TYPE_UNHANDLED;
191} 191}
192 192
193static void power_print_header(struct seq_file *s)
194{
195 seq_puts(s, "# TIMESTAMP STATE EVENT\n");
196 seq_puts(s, "# | | |\n");
197}
198
193static struct tracer power_tracer __read_mostly = 199static struct tracer power_tracer __read_mostly =
194{ 200{
195 .name = "power", 201 .name = "power",
@@ -198,6 +204,7 @@ static struct tracer power_tracer __read_mostly =
198 .stop = stop_power_trace, 204 .stop = stop_power_trace,
199 .reset = power_trace_reset, 205 .reset = power_trace_reset,
200 .print_line = power_print_line, 206 .print_line = power_print_line,
207 .print_header = power_print_header,
201}; 208};
202 209
203static int init_power_trace(void) 210static int init_power_trace(void)