diff options
Diffstat (limited to 'kernel/watchdog.c')
| -rw-r--r-- | kernel/watchdog.c | 68 |
1 files changed, 55 insertions, 13 deletions
diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 1241d8c91d5e..4431610f049a 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c | |||
| @@ -486,7 +486,52 @@ static struct smp_hotplug_thread watchdog_threads = { | |||
| 486 | .unpark = watchdog_enable, | 486 | .unpark = watchdog_enable, |
| 487 | }; | 487 | }; |
| 488 | 488 | ||
| 489 | static int watchdog_enable_all_cpus(void) | 489 | static void restart_watchdog_hrtimer(void *info) |
| 490 | { | ||
| 491 | struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer); | ||
| 492 | int ret; | ||
| 493 | |||
| 494 | /* | ||
| 495 | * No need to cancel and restart hrtimer if it is currently executing | ||
| 496 | * because it will reprogram itself with the new period now. | ||
| 497 | * We should never see it unqueued here because we are running per-cpu | ||
| 498 | * with interrupts disabled. | ||
| 499 | */ | ||
| 500 | ret = hrtimer_try_to_cancel(hrtimer); | ||
| 501 | if (ret == 1) | ||
| 502 | hrtimer_start(hrtimer, ns_to_ktime(sample_period), | ||
| 503 | HRTIMER_MODE_REL_PINNED); | ||
| 504 | } | ||
| 505 | |||
| 506 | static void update_timers(int cpu) | ||
| 507 | { | ||
| 508 | struct call_single_data data = {.func = restart_watchdog_hrtimer}; | ||
| 509 | /* | ||
| 510 | * Make sure that perf event counter will adopt to a new | ||
| 511 | * sampling period. Updating the sampling period directly would | ||
| 512 | * be much nicer but we do not have an API for that now so | ||
| 513 | * let's use a big hammer. | ||
| 514 | * Hrtimer will adopt the new period on the next tick but this | ||
| 515 | * might be late already so we have to restart the timer as well. | ||
| 516 | */ | ||
| 517 | watchdog_nmi_disable(cpu); | ||
| 518 | __smp_call_function_single(cpu, &data, 1); | ||
| 519 | watchdog_nmi_enable(cpu); | ||
| 520 | } | ||
| 521 | |||
| 522 | static void update_timers_all_cpus(void) | ||
| 523 | { | ||
| 524 | int cpu; | ||
| 525 | |||
| 526 | get_online_cpus(); | ||
| 527 | preempt_disable(); | ||
| 528 | for_each_online_cpu(cpu) | ||
| 529 | update_timers(cpu); | ||
| 530 | preempt_enable(); | ||
| 531 | put_online_cpus(); | ||
| 532 | } | ||
| 533 | |||
| 534 | static int watchdog_enable_all_cpus(bool sample_period_changed) | ||
| 490 | { | 535 | { |
| 491 | int err = 0; | 536 | int err = 0; |
| 492 | 537 | ||
| @@ -496,6 +541,8 @@ static int watchdog_enable_all_cpus(void) | |||
| 496 | pr_err("Failed to create watchdog threads, disabled\n"); | 541 | pr_err("Failed to create watchdog threads, disabled\n"); |
| 497 | else | 542 | else |
| 498 | watchdog_running = 1; | 543 | watchdog_running = 1; |
| 544 | } else if (sample_period_changed) { | ||
| 545 | update_timers_all_cpus(); | ||
| 499 | } | 546 | } |
| 500 | 547 | ||
| 501 | return err; | 548 | return err; |
| @@ -520,13 +567,15 @@ int proc_dowatchdog(struct ctl_table *table, int write, | |||
| 520 | void __user *buffer, size_t *lenp, loff_t *ppos) | 567 | void __user *buffer, size_t *lenp, loff_t *ppos) |
| 521 | { | 568 | { |
| 522 | int err, old_thresh, old_enabled; | 569 | int err, old_thresh, old_enabled; |
| 570 | static DEFINE_MUTEX(watchdog_proc_mutex); | ||
| 523 | 571 | ||
| 572 | mutex_lock(&watchdog_proc_mutex); | ||
| 524 | old_thresh = ACCESS_ONCE(watchdog_thresh); | 573 | old_thresh = ACCESS_ONCE(watchdog_thresh); |
| 525 | old_enabled = ACCESS_ONCE(watchdog_user_enabled); | 574 | old_enabled = ACCESS_ONCE(watchdog_user_enabled); |
| 526 | 575 | ||
| 527 | err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); | 576 | err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); |
| 528 | if (err || !write) | 577 | if (err || !write) |
| 529 | return err; | 578 | goto out; |
| 530 | 579 | ||
| 531 | set_sample_period(); | 580 | set_sample_period(); |
| 532 | /* | 581 | /* |
| @@ -535,7 +584,7 @@ int proc_dowatchdog(struct ctl_table *table, int write, | |||
| 535 | * watchdog_*_all_cpus() function takes care of this. | 584 | * watchdog_*_all_cpus() function takes care of this. |
| 536 | */ | 585 | */ |
| 537 | if (watchdog_user_enabled && watchdog_thresh) | 586 | if (watchdog_user_enabled && watchdog_thresh) |
| 538 | err = watchdog_enable_all_cpus(); | 587 | err = watchdog_enable_all_cpus(old_thresh != watchdog_thresh); |
| 539 | else | 588 | else |
| 540 | watchdog_disable_all_cpus(); | 589 | watchdog_disable_all_cpus(); |
| 541 | 590 | ||
| @@ -544,7 +593,8 @@ int proc_dowatchdog(struct ctl_table *table, int write, | |||
| 544 | watchdog_thresh = old_thresh; | 593 | watchdog_thresh = old_thresh; |
| 545 | watchdog_user_enabled = old_enabled; | 594 | watchdog_user_enabled = old_enabled; |
| 546 | } | 595 | } |
| 547 | 596 | out: | |
| 597 | mutex_unlock(&watchdog_proc_mutex); | ||
| 548 | return err; | 598 | return err; |
| 549 | } | 599 | } |
| 550 | #endif /* CONFIG_SYSCTL */ | 600 | #endif /* CONFIG_SYSCTL */ |
| @@ -553,14 +603,6 @@ void __init lockup_detector_init(void) | |||
| 553 | { | 603 | { |
| 554 | set_sample_period(); | 604 | set_sample_period(); |
| 555 | 605 | ||
| 556 | #ifdef CONFIG_NO_HZ_FULL | ||
| 557 | if (watchdog_user_enabled) { | ||
| 558 | watchdog_user_enabled = 0; | ||
| 559 | pr_warning("Disabled lockup detectors by default for full dynticks\n"); | ||
| 560 | pr_warning("You can reactivate it with 'sysctl -w kernel.watchdog=1'\n"); | ||
| 561 | } | ||
| 562 | #endif | ||
| 563 | |||
| 564 | if (watchdog_user_enabled) | 606 | if (watchdog_user_enabled) |
| 565 | watchdog_enable_all_cpus(); | 607 | watchdog_enable_all_cpus(false); |
| 566 | } | 608 | } |
