aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2011-05-02 15:37:03 -0400
committerThomas Gleixner <tglx@linutronix.de>2011-05-02 15:37:08 -0400
commit3687a2c0d81b23d30db4384ca804a701fc686e16 (patch)
tree8f0582f6939ef9f0f7079bbbcc4025e46ea19f79 /kernel
parentb4d246b12410b53506c311e5e0b6abb71ead65c6 (diff)
parentc7bcecbe98fe29e87ac7d01c70c5998806e0989f (diff)
Merge branch 'linus' into timers/core
Reason: Pick up the hrtimer_clock_to_base_table fix from mainline Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/hrtimer.c10
-rw-r--r--kernel/trace/Kconfig2
-rw-r--r--kernel/watchdog.c5
-rw-r--r--kernel/workqueue.c8
4 files changed, 17 insertions, 8 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 9017478c5d4c..87fdb3f8db14 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -81,7 +81,11 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
81 } 81 }
82}; 82};
83 83
84static int hrtimer_clock_to_base_table[MAX_CLOCKS]; 84static int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
85 [CLOCK_REALTIME] = HRTIMER_BASE_REALTIME,
86 [CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC,
87 [CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME,
88};
85 89
86static inline int hrtimer_clockid_to_base(clockid_t clock_id) 90static inline int hrtimer_clockid_to_base(clockid_t clock_id)
87{ 91{
@@ -1722,10 +1726,6 @@ static struct notifier_block __cpuinitdata hrtimers_nb = {
1722 1726
1723void __init hrtimers_init(void) 1727void __init hrtimers_init(void)
1724{ 1728{
1725 hrtimer_clock_to_base_table[CLOCK_REALTIME] = HRTIMER_BASE_REALTIME;
1726 hrtimer_clock_to_base_table[CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC;
1727 hrtimer_clock_to_base_table[CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME;
1728
1729 hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE, 1729 hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
1730 (void *)(long)smp_processor_id()); 1730 (void *)(long)smp_processor_id());
1731 register_cpu_notifier(&hrtimers_nb); 1731 register_cpu_notifier(&hrtimers_nb);
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 61d7d59f4a1a..2ad39e556cb4 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -141,7 +141,7 @@ if FTRACE
141config FUNCTION_TRACER 141config FUNCTION_TRACER
142 bool "Kernel Function Tracer" 142 bool "Kernel Function Tracer"
143 depends on HAVE_FUNCTION_TRACER 143 depends on HAVE_FUNCTION_TRACER
144 select FRAME_POINTER if !ARM_UNWIND && !S390 144 select FRAME_POINTER if !ARM_UNWIND && !S390 && !MICROBLAZE
145 select KALLSYMS 145 select KALLSYMS
146 select GENERIC_TRACER 146 select GENERIC_TRACER
147 select CONTEXT_SWITCH_TRACER 147 select CONTEXT_SWITCH_TRACER
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 140dce750450..14733d4d156b 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -430,9 +430,12 @@ static int watchdog_enable(int cpu)
430 p = kthread_create(watchdog, (void *)(unsigned long)cpu, "watchdog/%d", cpu); 430 p = kthread_create(watchdog, (void *)(unsigned long)cpu, "watchdog/%d", cpu);
431 if (IS_ERR(p)) { 431 if (IS_ERR(p)) {
432 printk(KERN_ERR "softlockup watchdog for %i failed\n", cpu); 432 printk(KERN_ERR "softlockup watchdog for %i failed\n", cpu);
433 if (!err) 433 if (!err) {
434 /* if hardlockup hasn't already set this */ 434 /* if hardlockup hasn't already set this */
435 err = PTR_ERR(p); 435 err = PTR_ERR(p);
436 /* and disable the perf event */
437 watchdog_nmi_disable(cpu);
438 }
436 goto out; 439 goto out;
437 } 440 }
438 kthread_bind(p, cpu); 441 kthread_bind(p, cpu);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 8859a41806dd..e3378e8d3a5c 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1291,8 +1291,14 @@ __acquires(&gcwq->lock)
1291 return true; 1291 return true;
1292 spin_unlock_irq(&gcwq->lock); 1292 spin_unlock_irq(&gcwq->lock);
1293 1293
1294 /* CPU has come up in between, retry migration */ 1294 /*
1295 * We've raced with CPU hot[un]plug. Give it a breather
1296 * and retry migration. cond_resched() is required here;
1297 * otherwise, we might deadlock against cpu_stop trying to
1298 * bring down the CPU on non-preemptive kernel.
1299 */
1295 cpu_relax(); 1300 cpu_relax();
1301 cond_resched();
1296 } 1302 }
1297} 1303}
1298 1304