summaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorJoel Fernandes (Google) <joel@joelfernandes.org>2018-05-22 18:55:53 -0400
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2018-05-23 04:37:56 -0400
commit152db033d77589df9ff1b93c1b311d4cd2e93bd0 (patch)
treee8a93de140011a50752e7311973abe74f137913a /kernel/sched
parent036399782bf51dafb932b680b260936b2b5f8dd6 (diff)
schedutil: Allow cpufreq requests to be made even when kthread kicked
Currently there is a chance of a schedutil cpufreq update request to be dropped if there is a pending update request. This pending request can be delayed if there is a scheduling delay of the irq_work and the wake up of the schedutil governor kthread. A very bad scenario is when a schedutil request was already just made, such as to reduce the CPU frequency, then a newer request to increase CPU frequency (even sched deadline urgent frequency increase requests) can be dropped, even though the rate limits suggest that its Ok to process a request. This is because of the way the work_in_progress flag is used. This patch improves the situation by allowing new requests to happen even though the old one is still being processed. Note that in this approach, if an irq_work was already issued, we just update next_freq and don't bother to queue another request so there's no extra work being done to make this happen. Acked-by: Viresh Kumar <viresh.kumar@linaro.org> Acked-by: Juri Lelli <juri.lelli@redhat.com> Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/cpufreq_schedutil.c34
1 files changed, 26 insertions, 8 deletions
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index caf435c14a52..178946e36393 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -92,9 +92,6 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
92 !cpufreq_this_cpu_can_update(sg_policy->policy)) 92 !cpufreq_this_cpu_can_update(sg_policy->policy))
93 return false; 93 return false;
94 94
95 if (sg_policy->work_in_progress)
96 return false;
97
98 if (unlikely(sg_policy->need_freq_update)) 95 if (unlikely(sg_policy->need_freq_update))
99 return true; 96 return true;
100 97
@@ -121,7 +118,7 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
121 118
122 policy->cur = next_freq; 119 policy->cur = next_freq;
123 trace_cpu_frequency(next_freq, smp_processor_id()); 120 trace_cpu_frequency(next_freq, smp_processor_id());
124 } else { 121 } else if (!sg_policy->work_in_progress) {
125 sg_policy->work_in_progress = true; 122 sg_policy->work_in_progress = true;
126 irq_work_queue(&sg_policy->irq_work); 123 irq_work_queue(&sg_policy->irq_work);
127 } 124 }
@@ -366,6 +363,13 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
366 363
367 ignore_dl_rate_limit(sg_cpu, sg_policy); 364 ignore_dl_rate_limit(sg_cpu, sg_policy);
368 365
366 /*
367 * For slow-switch systems, single policy requests can't run at the
368 * moment if update is in progress, unless we acquire update_lock.
369 */
370 if (sg_policy->work_in_progress)
371 return;
372
369 if (!sugov_should_update_freq(sg_policy, time)) 373 if (!sugov_should_update_freq(sg_policy, time))
370 return; 374 return;
371 375
@@ -440,13 +444,27 @@ sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags)
440static void sugov_work(struct kthread_work *work) 444static void sugov_work(struct kthread_work *work)
441{ 445{
442 struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work); 446 struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work);
447 unsigned int freq;
448 unsigned long flags;
449
450 /*
451 * Hold sg_policy->update_lock shortly to handle the case where:
452 * incase sg_policy->next_freq is read here, and then updated by
453 * sugov_update_shared just before work_in_progress is set to false
454 * here, we may miss queueing the new update.
455 *
456 * Note: If a work was queued after the update_lock is released,
457 * sugov_work will just be called again by kthread_work code; and the
458 * request will be proceed before the sugov thread sleeps.
459 */
460 raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
461 freq = sg_policy->next_freq;
462 sg_policy->work_in_progress = false;
463 raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
443 464
444 mutex_lock(&sg_policy->work_lock); 465 mutex_lock(&sg_policy->work_lock);
445 __cpufreq_driver_target(sg_policy->policy, sg_policy->next_freq, 466 __cpufreq_driver_target(sg_policy->policy, freq, CPUFREQ_RELATION_L);
446 CPUFREQ_RELATION_L);
447 mutex_unlock(&sg_policy->work_lock); 467 mutex_unlock(&sg_policy->work_lock);
448
449 sg_policy->work_in_progress = false;
450} 468}
451 469
452static void sugov_irq_work(struct irq_work *irq_work) 470static void sugov_irq_work(struct irq_work *irq_work)