diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-07-26 16:25:47 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-07-26 16:25:47 -0400 |
| commit | a048d3aff8b828b6c0fa7ddd90a531248ab4e0f9 (patch) | |
| tree | 8edfb5bd152aef0d48c4a165247906db347c3a56 | |
| parent | fb3b80614438f8b73b3879a19350439d6aa402da (diff) | |
| parent | 1fe371044b21b226b96a9dd959e971b50b28c78e (diff) | |
Merge branch 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
ftrace: fix modular build
ftrace: disable tracing on acpi idle calls
ftrace: remove latency-tracer leftover
ftrace: only trace preempt off with preempt tracer
ftrace: fix 4d3702b6 (post-v2.6.26): WARNING: at kernel/lockdep.c:2731 check_flags (ftrace)
| -rw-r--r-- | drivers/acpi/processor_idle.c | 6 | ||||
| -rw-r--r-- | kernel/trace/trace.c | 3 | ||||
| -rw-r--r-- | kernel/trace/trace_irqsoff.c | 8 | ||||
| -rw-r--r-- | kernel/trace/trace_sched_wakeup.c | 27 |
4 files changed, 28 insertions, 16 deletions
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index d592dbb1d12a..b7f2963693a7 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
| @@ -272,6 +272,8 @@ static atomic_t c3_cpu_count; | |||
| 272 | /* Common C-state entry for C2, C3, .. */ | 272 | /* Common C-state entry for C2, C3, .. */ |
| 273 | static void acpi_cstate_enter(struct acpi_processor_cx *cstate) | 273 | static void acpi_cstate_enter(struct acpi_processor_cx *cstate) |
| 274 | { | 274 | { |
| 275 | /* Don't trace irqs off for idle */ | ||
| 276 | stop_critical_timings(); | ||
| 275 | if (cstate->entry_method == ACPI_CSTATE_FFH) { | 277 | if (cstate->entry_method == ACPI_CSTATE_FFH) { |
| 276 | /* Call into architectural FFH based C-state */ | 278 | /* Call into architectural FFH based C-state */ |
| 277 | acpi_processor_ffh_cstate_enter(cstate); | 279 | acpi_processor_ffh_cstate_enter(cstate); |
| @@ -284,6 +286,7 @@ static void acpi_cstate_enter(struct acpi_processor_cx *cstate) | |||
| 284 | gets asserted in time to freeze execution properly. */ | 286 | gets asserted in time to freeze execution properly. */ |
| 285 | unused = inl(acpi_gbl_FADT.xpm_timer_block.address); | 287 | unused = inl(acpi_gbl_FADT.xpm_timer_block.address); |
| 286 | } | 288 | } |
| 289 | start_critical_timings(); | ||
| 287 | } | 290 | } |
| 288 | #endif /* !CONFIG_CPU_IDLE */ | 291 | #endif /* !CONFIG_CPU_IDLE */ |
| 289 | 292 | ||
| @@ -1418,6 +1421,8 @@ static inline void acpi_idle_update_bm_rld(struct acpi_processor *pr, | |||
| 1418 | */ | 1421 | */ |
| 1419 | static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) | 1422 | static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) |
| 1420 | { | 1423 | { |
| 1424 | /* Don't trace irqs off for idle */ | ||
| 1425 | stop_critical_timings(); | ||
| 1421 | if (cx->entry_method == ACPI_CSTATE_FFH) { | 1426 | if (cx->entry_method == ACPI_CSTATE_FFH) { |
| 1422 | /* Call into architectural FFH based C-state */ | 1427 | /* Call into architectural FFH based C-state */ |
| 1423 | acpi_processor_ffh_cstate_enter(cx); | 1428 | acpi_processor_ffh_cstate_enter(cx); |
| @@ -1432,6 +1437,7 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) | |||
| 1432 | gets asserted in time to freeze execution properly. */ | 1437 | gets asserted in time to freeze execution properly. */ |
| 1433 | unused = inl(acpi_gbl_FADT.xpm_timer_block.address); | 1438 | unused = inl(acpi_gbl_FADT.xpm_timer_block.address); |
| 1434 | } | 1439 | } |
| 1440 | start_critical_timings(); | ||
| 1435 | } | 1441 | } |
| 1436 | 1442 | ||
| 1437 | /** | 1443 | /** |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 868e121c8e38..fc20e09a6cb1 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -1203,9 +1203,6 @@ static void *s_next(struct seq_file *m, void *v, loff_t *pos) | |||
| 1203 | 1203 | ||
| 1204 | iter->pos = *pos; | 1204 | iter->pos = *pos; |
| 1205 | 1205 | ||
| 1206 | if (last_ent && !ent) | ||
| 1207 | seq_puts(m, "\n\nvim:ft=help\n"); | ||
| 1208 | |||
| 1209 | return ent; | 1206 | return ent; |
| 1210 | } | 1207 | } |
| 1211 | 1208 | ||
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 421d6fe3650e..ece6cfb649fa 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c | |||
| @@ -253,12 +253,14 @@ void start_critical_timings(void) | |||
| 253 | if (preempt_trace() || irq_trace()) | 253 | if (preempt_trace() || irq_trace()) |
| 254 | start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); | 254 | start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
| 255 | } | 255 | } |
| 256 | EXPORT_SYMBOL_GPL(start_critical_timings); | ||
| 256 | 257 | ||
| 257 | void stop_critical_timings(void) | 258 | void stop_critical_timings(void) |
| 258 | { | 259 | { |
| 259 | if (preempt_trace() || irq_trace()) | 260 | if (preempt_trace() || irq_trace()) |
| 260 | stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); | 261 | stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
| 261 | } | 262 | } |
| 263 | EXPORT_SYMBOL_GPL(stop_critical_timings); | ||
| 262 | 264 | ||
| 263 | #ifdef CONFIG_IRQSOFF_TRACER | 265 | #ifdef CONFIG_IRQSOFF_TRACER |
| 264 | #ifdef CONFIG_PROVE_LOCKING | 266 | #ifdef CONFIG_PROVE_LOCKING |
| @@ -337,12 +339,14 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller); | |||
| 337 | #ifdef CONFIG_PREEMPT_TRACER | 339 | #ifdef CONFIG_PREEMPT_TRACER |
| 338 | void trace_preempt_on(unsigned long a0, unsigned long a1) | 340 | void trace_preempt_on(unsigned long a0, unsigned long a1) |
| 339 | { | 341 | { |
| 340 | stop_critical_timing(a0, a1); | 342 | if (preempt_trace()) |
| 343 | stop_critical_timing(a0, a1); | ||
| 341 | } | 344 | } |
| 342 | 345 | ||
| 343 | void trace_preempt_off(unsigned long a0, unsigned long a1) | 346 | void trace_preempt_off(unsigned long a0, unsigned long a1) |
| 344 | { | 347 | { |
| 345 | start_critical_timing(a0, a1); | 348 | if (preempt_trace()) |
| 349 | start_critical_timing(a0, a1); | ||
| 346 | } | 350 | } |
| 347 | #endif /* CONFIG_PREEMPT_TRACER */ | 351 | #endif /* CONFIG_PREEMPT_TRACER */ |
| 348 | 352 | ||
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 3c8d61df4474..e303ccb62cdf 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c | |||
| @@ -26,7 +26,8 @@ static struct task_struct *wakeup_task; | |||
| 26 | static int wakeup_cpu; | 26 | static int wakeup_cpu; |
| 27 | static unsigned wakeup_prio = -1; | 27 | static unsigned wakeup_prio = -1; |
| 28 | 28 | ||
| 29 | static DEFINE_SPINLOCK(wakeup_lock); | 29 | static raw_spinlock_t wakeup_lock = |
| 30 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | ||
| 30 | 31 | ||
| 31 | static void __wakeup_reset(struct trace_array *tr); | 32 | static void __wakeup_reset(struct trace_array *tr); |
| 32 | 33 | ||
| @@ -56,7 +57,8 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) | |||
| 56 | if (unlikely(disabled != 1)) | 57 | if (unlikely(disabled != 1)) |
| 57 | goto out; | 58 | goto out; |
| 58 | 59 | ||
| 59 | spin_lock_irqsave(&wakeup_lock, flags); | 60 | local_irq_save(flags); |
| 61 | __raw_spin_lock(&wakeup_lock); | ||
| 60 | 62 | ||
| 61 | if (unlikely(!wakeup_task)) | 63 | if (unlikely(!wakeup_task)) |
| 62 | goto unlock; | 64 | goto unlock; |
| @@ -71,7 +73,8 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) | |||
| 71 | trace_function(tr, data, ip, parent_ip, flags); | 73 | trace_function(tr, data, ip, parent_ip, flags); |
| 72 | 74 | ||
| 73 | unlock: | 75 | unlock: |
| 74 | spin_unlock_irqrestore(&wakeup_lock, flags); | 76 | __raw_spin_unlock(&wakeup_lock); |
| 77 | local_irq_restore(flags); | ||
| 75 | 78 | ||
| 76 | out: | 79 | out: |
| 77 | atomic_dec(&data->disabled); | 80 | atomic_dec(&data->disabled); |
| @@ -145,7 +148,8 @@ wakeup_sched_switch(void *private, void *rq, struct task_struct *prev, | |||
| 145 | if (likely(disabled != 1)) | 148 | if (likely(disabled != 1)) |
| 146 | goto out; | 149 | goto out; |
| 147 | 150 | ||
| 148 | spin_lock_irqsave(&wakeup_lock, flags); | 151 | local_irq_save(flags); |
| 152 | __raw_spin_lock(&wakeup_lock); | ||
| 149 | 153 | ||
| 150 | /* We could race with grabbing wakeup_lock */ | 154 | /* We could race with grabbing wakeup_lock */ |
| 151 | if (unlikely(!tracer_enabled || next != wakeup_task)) | 155 | if (unlikely(!tracer_enabled || next != wakeup_task)) |
| @@ -174,7 +178,8 @@ wakeup_sched_switch(void *private, void *rq, struct task_struct *prev, | |||
| 174 | 178 | ||
| 175 | out_unlock: | 179 | out_unlock: |
| 176 | __wakeup_reset(tr); | 180 | __wakeup_reset(tr); |
| 177 | spin_unlock_irqrestore(&wakeup_lock, flags); | 181 | __raw_spin_unlock(&wakeup_lock); |
| 182 | local_irq_restore(flags); | ||
| 178 | out: | 183 | out: |
| 179 | atomic_dec(&tr->data[cpu]->disabled); | 184 | atomic_dec(&tr->data[cpu]->disabled); |
| 180 | } | 185 | } |
| @@ -209,8 +214,6 @@ static void __wakeup_reset(struct trace_array *tr) | |||
| 209 | struct trace_array_cpu *data; | 214 | struct trace_array_cpu *data; |
| 210 | int cpu; | 215 | int cpu; |
| 211 | 216 | ||
| 212 | assert_spin_locked(&wakeup_lock); | ||
| 213 | |||
| 214 | for_each_possible_cpu(cpu) { | 217 | for_each_possible_cpu(cpu) { |
| 215 | data = tr->data[cpu]; | 218 | data = tr->data[cpu]; |
| 216 | tracing_reset(data); | 219 | tracing_reset(data); |
| @@ -229,9 +232,11 @@ static void wakeup_reset(struct trace_array *tr) | |||
| 229 | { | 232 | { |
| 230 | unsigned long flags; | 233 | unsigned long flags; |
| 231 | 234 | ||
| 232 | spin_lock_irqsave(&wakeup_lock, flags); | 235 | local_irq_save(flags); |
| 236 | __raw_spin_lock(&wakeup_lock); | ||
| 233 | __wakeup_reset(tr); | 237 | __wakeup_reset(tr); |
| 234 | spin_unlock_irqrestore(&wakeup_lock, flags); | 238 | __raw_spin_unlock(&wakeup_lock); |
| 239 | local_irq_restore(flags); | ||
| 235 | } | 240 | } |
| 236 | 241 | ||
| 237 | static void | 242 | static void |
| @@ -252,7 +257,7 @@ wakeup_check_start(struct trace_array *tr, struct task_struct *p, | |||
| 252 | goto out; | 257 | goto out; |
| 253 | 258 | ||
| 254 | /* interrupts should be off from try_to_wake_up */ | 259 | /* interrupts should be off from try_to_wake_up */ |
| 255 | spin_lock(&wakeup_lock); | 260 | __raw_spin_lock(&wakeup_lock); |
| 256 | 261 | ||
| 257 | /* check for races. */ | 262 | /* check for races. */ |
| 258 | if (!tracer_enabled || p->prio >= wakeup_prio) | 263 | if (!tracer_enabled || p->prio >= wakeup_prio) |
| @@ -274,7 +279,7 @@ wakeup_check_start(struct trace_array *tr, struct task_struct *p, | |||
| 274 | CALLER_ADDR1, CALLER_ADDR2, flags); | 279 | CALLER_ADDR1, CALLER_ADDR2, flags); |
| 275 | 280 | ||
| 276 | out_locked: | 281 | out_locked: |
| 277 | spin_unlock(&wakeup_lock); | 282 | __raw_spin_unlock(&wakeup_lock); |
| 278 | out: | 283 | out: |
| 279 | atomic_dec(&tr->data[cpu]->disabled); | 284 | atomic_dec(&tr->data[cpu]->disabled); |
| 280 | } | 285 | } |
