diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-10-21 15:54:49 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-10-21 15:54:49 -0400 |
commit | 5d70f79b5ef6ea2de4f72a37b2d96e2601e40a22 (patch) | |
tree | a0d6de0930ba83ecf4629c2e2e261f5eaa2d8f33 /kernel/watchdog.c | |
parent | 888a6f77e0418b049f83d37547c209b904d30af4 (diff) | |
parent | 750ed158bf6c782d2813da1bca2c824365a0b777 (diff) |
Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (163 commits)
tracing: Fix compile issue for trace_sched_wakeup.c
[S390] hardirq: remove pointless header file includes
[IA64] Move local_softirq_pending() definition
perf, powerpc: Fix power_pmu_event_init to not use event->ctx
ftrace: Remove recursion between recordmcount and scripts/mod/empty
jump_label: Add COND_STMT(), reducer wrappery
perf: Optimize sw events
perf: Use jump_labels to optimize the scheduler hooks
jump_label: Add atomic_t interface
jump_label: Use more consistent naming
perf, hw_breakpoint: Fix crash in hw_breakpoint creation
perf: Find task before event alloc
perf: Fix task refcount bugs
perf: Fix group moving
irq_work: Add generic hardirq context callbacks
perf_events: Fix transaction recovery in group_sched_in()
perf_events: Fix bogus AMD64 generic TLB events
perf_events: Fix bogus context time tracking
tracing: Remove parent recording in latency tracer graph options
tracing: Use one prologue for the preempt irqs off tracer function tracers
...
Diffstat (limited to 'kernel/watchdog.c')
-rw-r--r-- | kernel/watchdog.c | 41 |
1 files changed, 15 insertions, 26 deletions
diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 2feb2870d3a1..bafba687a6d8 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c | |||
@@ -43,7 +43,6 @@ static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved); | |||
43 | static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); | 43 | static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); |
44 | #endif | 44 | #endif |
45 | 45 | ||
46 | static int __read_mostly did_panic; | ||
47 | static int __initdata no_watchdog; | 46 | static int __initdata no_watchdog; |
48 | 47 | ||
49 | 48 | ||
@@ -187,18 +186,6 @@ static int is_softlockup(unsigned long touch_ts) | |||
187 | return 0; | 186 | return 0; |
188 | } | 187 | } |
189 | 188 | ||
190 | static int | ||
191 | watchdog_panic(struct notifier_block *this, unsigned long event, void *ptr) | ||
192 | { | ||
193 | did_panic = 1; | ||
194 | |||
195 | return NOTIFY_DONE; | ||
196 | } | ||
197 | |||
198 | static struct notifier_block panic_block = { | ||
199 | .notifier_call = watchdog_panic, | ||
200 | }; | ||
201 | |||
202 | #ifdef CONFIG_HARDLOCKUP_DETECTOR | 189 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
203 | static struct perf_event_attr wd_hw_attr = { | 190 | static struct perf_event_attr wd_hw_attr = { |
204 | .type = PERF_TYPE_HARDWARE, | 191 | .type = PERF_TYPE_HARDWARE, |
@@ -371,14 +358,14 @@ static int watchdog_nmi_enable(int cpu) | |||
371 | /* Try to register using hardware perf events */ | 358 | /* Try to register using hardware perf events */ |
372 | wd_attr = &wd_hw_attr; | 359 | wd_attr = &wd_hw_attr; |
373 | wd_attr->sample_period = hw_nmi_get_sample_period(); | 360 | wd_attr->sample_period = hw_nmi_get_sample_period(); |
374 | event = perf_event_create_kernel_counter(wd_attr, cpu, -1, watchdog_overflow_callback); | 361 | event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback); |
375 | if (!IS_ERR(event)) { | 362 | if (!IS_ERR(event)) { |
376 | printk(KERN_INFO "NMI watchdog enabled, takes one hw-pmu counter.\n"); | 363 | printk(KERN_INFO "NMI watchdog enabled, takes one hw-pmu counter.\n"); |
377 | goto out_save; | 364 | goto out_save; |
378 | } | 365 | } |
379 | 366 | ||
380 | printk(KERN_ERR "NMI watchdog failed to create perf event on cpu%i: %p\n", cpu, event); | 367 | printk(KERN_ERR "NMI watchdog failed to create perf event on cpu%i: %p\n", cpu, event); |
381 | return -1; | 368 | return PTR_ERR(event); |
382 | 369 | ||
383 | /* success path */ | 370 | /* success path */ |
384 | out_save: | 371 | out_save: |
@@ -422,17 +409,19 @@ static int watchdog_prepare_cpu(int cpu) | |||
422 | static int watchdog_enable(int cpu) | 409 | static int watchdog_enable(int cpu) |
423 | { | 410 | { |
424 | struct task_struct *p = per_cpu(softlockup_watchdog, cpu); | 411 | struct task_struct *p = per_cpu(softlockup_watchdog, cpu); |
412 | int err; | ||
425 | 413 | ||
426 | /* enable the perf event */ | 414 | /* enable the perf event */ |
427 | if (watchdog_nmi_enable(cpu) != 0) | 415 | err = watchdog_nmi_enable(cpu); |
428 | return -1; | 416 | if (err) |
417 | return err; | ||
429 | 418 | ||
430 | /* create the watchdog thread */ | 419 | /* create the watchdog thread */ |
431 | if (!p) { | 420 | if (!p) { |
432 | p = kthread_create(watchdog, (void *)(unsigned long)cpu, "watchdog/%d", cpu); | 421 | p = kthread_create(watchdog, (void *)(unsigned long)cpu, "watchdog/%d", cpu); |
433 | if (IS_ERR(p)) { | 422 | if (IS_ERR(p)) { |
434 | printk(KERN_ERR "softlockup watchdog for %i failed\n", cpu); | 423 | printk(KERN_ERR "softlockup watchdog for %i failed\n", cpu); |
435 | return -1; | 424 | return PTR_ERR(p); |
436 | } | 425 | } |
437 | kthread_bind(p, cpu); | 426 | kthread_bind(p, cpu); |
438 | per_cpu(watchdog_touch_ts, cpu) = 0; | 427 | per_cpu(watchdog_touch_ts, cpu) = 0; |
@@ -484,6 +473,9 @@ static void watchdog_disable_all_cpus(void) | |||
484 | { | 473 | { |
485 | int cpu; | 474 | int cpu; |
486 | 475 | ||
476 | if (no_watchdog) | ||
477 | return; | ||
478 | |||
487 | for_each_online_cpu(cpu) | 479 | for_each_online_cpu(cpu) |
488 | watchdog_disable(cpu); | 480 | watchdog_disable(cpu); |
489 | 481 | ||
@@ -526,17 +518,16 @@ static int __cpuinit | |||
526 | cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) | 518 | cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) |
527 | { | 519 | { |
528 | int hotcpu = (unsigned long)hcpu; | 520 | int hotcpu = (unsigned long)hcpu; |
521 | int err = 0; | ||
529 | 522 | ||
530 | switch (action) { | 523 | switch (action) { |
531 | case CPU_UP_PREPARE: | 524 | case CPU_UP_PREPARE: |
532 | case CPU_UP_PREPARE_FROZEN: | 525 | case CPU_UP_PREPARE_FROZEN: |
533 | if (watchdog_prepare_cpu(hotcpu)) | 526 | err = watchdog_prepare_cpu(hotcpu); |
534 | return NOTIFY_BAD; | ||
535 | break; | 527 | break; |
536 | case CPU_ONLINE: | 528 | case CPU_ONLINE: |
537 | case CPU_ONLINE_FROZEN: | 529 | case CPU_ONLINE_FROZEN: |
538 | if (watchdog_enable(hotcpu)) | 530 | err = watchdog_enable(hotcpu); |
539 | return NOTIFY_BAD; | ||
540 | break; | 531 | break; |
541 | #ifdef CONFIG_HOTPLUG_CPU | 532 | #ifdef CONFIG_HOTPLUG_CPU |
542 | case CPU_UP_CANCELED: | 533 | case CPU_UP_CANCELED: |
@@ -549,7 +540,7 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
549 | break; | 540 | break; |
550 | #endif /* CONFIG_HOTPLUG_CPU */ | 541 | #endif /* CONFIG_HOTPLUG_CPU */ |
551 | } | 542 | } |
552 | return NOTIFY_OK; | 543 | return notifier_from_errno(err); |
553 | } | 544 | } |
554 | 545 | ||
555 | static struct notifier_block __cpuinitdata cpu_nfb = { | 546 | static struct notifier_block __cpuinitdata cpu_nfb = { |
@@ -565,13 +556,11 @@ static int __init spawn_watchdog_task(void) | |||
565 | return 0; | 556 | return 0; |
566 | 557 | ||
567 | err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); | 558 | err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); |
568 | WARN_ON(err == NOTIFY_BAD); | 559 | WARN_ON(notifier_to_errno(err)); |
569 | 560 | ||
570 | cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); | 561 | cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); |
571 | register_cpu_notifier(&cpu_nfb); | 562 | register_cpu_notifier(&cpu_nfb); |
572 | 563 | ||
573 | atomic_notifier_chain_register(&panic_notifier_list, &panic_block); | ||
574 | |||
575 | return 0; | 564 | return 0; |
576 | } | 565 | } |
577 | early_initcall(spawn_watchdog_task); | 566 | early_initcall(spawn_watchdog_task); |