diff options
author | Jacob Pan <jacob.jun.pan@linux.intel.com> | 2016-11-28 16:44:52 -0500 |
---|---|---|
committer | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2016-11-29 17:34:11 -0500 |
commit | feb6cd6a0f9f7d214351624d79e408cb2af91631 (patch) | |
tree | f3b93ba4f06bb5b5288407c29c1ff4db0812636c | |
parent | cb91fef1b71954e3edc79fb4171b43f6aa2028c7 (diff) |
thermal/intel_powerclamp: stop sched tick in forced idle
With the introduction of play_idle(), idle injection kthread can
go through the normal idle task processing to get correct accounting
and turn off scheduler tick when possible.
Signed-off-by: Jacob Pan <jacob.jun.pan@linux.intel.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
-rw-r--r-- | drivers/thermal/intel_powerclamp.c | 35 |
1 files changed, 1 insertions, 34 deletions
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c index c99af71c0ae7..6d93f1d65dba 100644 --- a/drivers/thermal/intel_powerclamp.c +++ b/drivers/thermal/intel_powerclamp.c | |||
@@ -92,7 +92,6 @@ struct powerclamp_worker_data { | |||
92 | struct kthread_worker *worker; | 92 | struct kthread_worker *worker; |
93 | struct kthread_work balancing_work; | 93 | struct kthread_work balancing_work; |
94 | struct kthread_delayed_work idle_injection_work; | 94 | struct kthread_delayed_work idle_injection_work; |
95 | struct timer_list wakeup_timer; | ||
96 | unsigned int cpu; | 95 | unsigned int cpu; |
97 | unsigned int count; | 96 | unsigned int count; |
98 | unsigned int guard; | 97 | unsigned int guard; |
@@ -277,11 +276,6 @@ static u64 pkg_state_counter(void) | |||
277 | return count; | 276 | return count; |
278 | } | 277 | } |
279 | 278 | ||
280 | static void noop_timer(unsigned long foo) | ||
281 | { | ||
282 | /* empty... just the fact that we get the interrupt wakes us up */ | ||
283 | } | ||
284 | |||
285 | static unsigned int get_compensation(int ratio) | 279 | static unsigned int get_compensation(int ratio) |
286 | { | 280 | { |
287 | unsigned int comp = 0; | 281 | unsigned int comp = 0; |
@@ -431,7 +425,6 @@ static void clamp_balancing_func(struct kthread_work *work) | |||
431 | static void clamp_idle_injection_func(struct kthread_work *work) | 425 | static void clamp_idle_injection_func(struct kthread_work *work) |
432 | { | 426 | { |
433 | struct powerclamp_worker_data *w_data; | 427 | struct powerclamp_worker_data *w_data; |
434 | unsigned long target_jiffies; | ||
435 | 428 | ||
436 | w_data = container_of(work, struct powerclamp_worker_data, | 429 | w_data = container_of(work, struct powerclamp_worker_data, |
437 | idle_injection_work.work); | 430 | idle_injection_work.work); |
@@ -452,31 +445,7 @@ static void clamp_idle_injection_func(struct kthread_work *work) | |||
452 | if (should_skip) | 445 | if (should_skip) |
453 | goto balance; | 446 | goto balance; |
454 | 447 | ||
455 | target_jiffies = jiffies + w_data->duration_jiffies; | 448 | play_idle(jiffies_to_msecs(w_data->duration_jiffies)); |
456 | mod_timer(&w_data->wakeup_timer, target_jiffies); | ||
457 | if (unlikely(local_softirq_pending())) | ||
458 | goto balance; | ||
459 | /* | ||
460 | * stop tick sched during idle time, interrupts are still | ||
461 | * allowed. thus jiffies are updated properly. | ||
462 | */ | ||
463 | preempt_disable(); | ||
464 | /* mwait until target jiffies is reached */ | ||
465 | while (time_before(jiffies, target_jiffies)) { | ||
466 | unsigned long ecx = 1; | ||
467 | unsigned long eax = target_mwait; | ||
468 | |||
469 | /* | ||
470 | * REVISIT: may call enter_idle() to notify drivers who | ||
471 | * can save power during cpu idle. same for exit_idle() | ||
472 | */ | ||
473 | local_touch_nmi(); | ||
474 | stop_critical_timings(); | ||
475 | mwait_idle_with_hints(eax, ecx); | ||
476 | start_critical_timings(); | ||
477 | atomic_inc(&idle_wakeup_counter); | ||
478 | } | ||
479 | preempt_enable(); | ||
480 | 449 | ||
481 | balance: | 450 | balance: |
482 | if (clamping && w_data->clamping && cpu_online(w_data->cpu)) | 451 | if (clamping && w_data->clamping && cpu_online(w_data->cpu)) |
@@ -538,7 +507,6 @@ static void start_power_clamp_worker(unsigned long cpu) | |||
538 | w_data->cpu = cpu; | 507 | w_data->cpu = cpu; |
539 | w_data->clamping = true; | 508 | w_data->clamping = true; |
540 | set_bit(cpu, cpu_clamping_mask); | 509 | set_bit(cpu, cpu_clamping_mask); |
541 | setup_timer(&w_data->wakeup_timer, noop_timer, 0); | ||
542 | sched_setscheduler(worker->task, SCHED_FIFO, &sparam); | 510 | sched_setscheduler(worker->task, SCHED_FIFO, &sparam); |
543 | kthread_init_work(&w_data->balancing_work, clamp_balancing_func); | 511 | kthread_init_work(&w_data->balancing_work, clamp_balancing_func); |
544 | kthread_init_delayed_work(&w_data->idle_injection_work, | 512 | kthread_init_delayed_work(&w_data->idle_injection_work, |
@@ -570,7 +538,6 @@ static void stop_power_clamp_worker(unsigned long cpu) | |||
570 | * a big deal. The balancing work is fast and destroy kthread | 538 | * a big deal. The balancing work is fast and destroy kthread |
571 | * will wait for it. | 539 | * will wait for it. |
572 | */ | 540 | */ |
573 | del_timer_sync(&w_data->wakeup_timer); | ||
574 | clear_bit(w_data->cpu, cpu_clamping_mask); | 541 | clear_bit(w_data->cpu, cpu_clamping_mask); |
575 | kthread_destroy_worker(w_data->worker); | 542 | kthread_destroy_worker(w_data->worker); |
576 | 543 | ||