aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2018-05-23 05:47:45 -0400
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2018-05-24 04:21:18 -0400
commita61dec7447456858dfc88fe056017a91ab903ed0 (patch)
treecc296fea94a97c09349f4dbf057a61dd2b004f92
parent152db033d77589df9ff1b93c1b311d4cd2e93bd0 (diff)
cpufreq: schedutil: Avoid missing updates for one-CPU policies
Commit 152db033d775 (schedutil: Allow cpufreq requests to be made even when kthread kicked) made changes to prevent utilization updates from being discarded during processing a previous request, but it left a small window in which that still can happen in the one-CPU policy case. Namely, updates coming in after setting work_in_progress in sugov_update_commit() and clearing it in sugov_work() will still be dropped due to the work_in_progress check in sugov_update_single(). To close that window, rearrange the code so as to acquire the update lock around the deferred update branch in sugov_update_single() and drop the work_in_progress check from it. Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com> Reviewed-by: Juri Lelli <juri.lelli@redhat.com> Acked-by: Viresh Kumar <viresh.kumar@linaro.org> Reviewed-by: Joel Fernandes (Google) <joel@joelfernandes.org>
-rw-r--r--kernel/sched/cpufreq_schedutil.c70
1 files changed, 47 insertions, 23 deletions
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 178946e36393..fd76497efeb1 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -100,25 +100,41 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
100 return delta_ns >= sg_policy->freq_update_delay_ns; 100 return delta_ns >= sg_policy->freq_update_delay_ns;
101} 101}
102 102
103static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time, 103static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time,
104 unsigned int next_freq) 104 unsigned int next_freq)
105{ 105{
106 struct cpufreq_policy *policy = sg_policy->policy;
107
108 if (sg_policy->next_freq == next_freq) 106 if (sg_policy->next_freq == next_freq)
109 return; 107 return false;
110 108
111 sg_policy->next_freq = next_freq; 109 sg_policy->next_freq = next_freq;
112 sg_policy->last_freq_update_time = time; 110 sg_policy->last_freq_update_time = time;
113 111
114 if (policy->fast_switch_enabled) { 112 return true;
115 next_freq = cpufreq_driver_fast_switch(policy, next_freq); 113}
116 if (!next_freq) 114
117 return; 115static void sugov_fast_switch(struct sugov_policy *sg_policy, u64 time,
116 unsigned int next_freq)
117{
118 struct cpufreq_policy *policy = sg_policy->policy;
119
120 if (!sugov_update_next_freq(sg_policy, time, next_freq))
121 return;
122
123 next_freq = cpufreq_driver_fast_switch(policy, next_freq);
124 if (!next_freq)
125 return;
118 126
119 policy->cur = next_freq; 127 policy->cur = next_freq;
120 trace_cpu_frequency(next_freq, smp_processor_id()); 128 trace_cpu_frequency(next_freq, smp_processor_id());
121 } else if (!sg_policy->work_in_progress) { 129}
130
131static void sugov_deferred_update(struct sugov_policy *sg_policy, u64 time,
132 unsigned int next_freq)
133{
134 if (!sugov_update_next_freq(sg_policy, time, next_freq))
135 return;
136
137 if (!sg_policy->work_in_progress) {
122 sg_policy->work_in_progress = true; 138 sg_policy->work_in_progress = true;
123 irq_work_queue(&sg_policy->irq_work); 139 irq_work_queue(&sg_policy->irq_work);
124 } 140 }
@@ -363,13 +379,6 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
363 379
364 ignore_dl_rate_limit(sg_cpu, sg_policy); 380 ignore_dl_rate_limit(sg_cpu, sg_policy);
365 381
366 /*
367 * For slow-switch systems, single policy requests can't run at the
368 * moment if update is in progress, unless we acquire update_lock.
369 */
370 if (sg_policy->work_in_progress)
371 return;
372
373 if (!sugov_should_update_freq(sg_policy, time)) 382 if (!sugov_should_update_freq(sg_policy, time))
374 return; 383 return;
375 384
@@ -391,7 +400,18 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
391 sg_policy->cached_raw_freq = 0; 400 sg_policy->cached_raw_freq = 0;
392 } 401 }
393 402
394 sugov_update_commit(sg_policy, time, next_f); 403 /*
404 * This code runs under rq->lock for the target CPU, so it won't run
405 * concurrently on two different CPUs for the same target and it is not
406 * necessary to acquire the lock in the fast switch case.
407 */
408 if (sg_policy->policy->fast_switch_enabled) {
409 sugov_fast_switch(sg_policy, time, next_f);
410 } else {
411 raw_spin_lock(&sg_policy->update_lock);
412 sugov_deferred_update(sg_policy, time, next_f);
413 raw_spin_unlock(&sg_policy->update_lock);
414 }
395} 415}
396 416
397static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time) 417static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
@@ -435,7 +455,11 @@ sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags)
435 455
436 if (sugov_should_update_freq(sg_policy, time)) { 456 if (sugov_should_update_freq(sg_policy, time)) {
437 next_f = sugov_next_freq_shared(sg_cpu, time); 457 next_f = sugov_next_freq_shared(sg_cpu, time);
438 sugov_update_commit(sg_policy, time, next_f); 458
459 if (sg_policy->policy->fast_switch_enabled)
460 sugov_fast_switch(sg_policy, time, next_f);
461 else
462 sugov_deferred_update(sg_policy, time, next_f);
439 } 463 }
440 464
441 raw_spin_unlock(&sg_policy->update_lock); 465 raw_spin_unlock(&sg_policy->update_lock);
@@ -450,11 +474,11 @@ static void sugov_work(struct kthread_work *work)
450 /* 474 /*
451 * Hold sg_policy->update_lock shortly to handle the case where: 475 * Hold sg_policy->update_lock shortly to handle the case where:
452 * incase sg_policy->next_freq is read here, and then updated by 476 * incase sg_policy->next_freq is read here, and then updated by
453 * sugov_update_shared just before work_in_progress is set to false 477 * sugov_deferred_update() just before work_in_progress is set to false
454 * here, we may miss queueing the new update. 478 * here, we may miss queueing the new update.
455 * 479 *
456 * Note: If a work was queued after the update_lock is released, 480 * Note: If a work was queued after the update_lock is released,
457 * sugov_work will just be called again by kthread_work code; and the 481 * sugov_work() will just be called again by kthread_work code; and the
458 * request will be proceed before the sugov thread sleeps. 482 * request will be proceed before the sugov thread sleeps.
459 */ 483 */
460 raw_spin_lock_irqsave(&sg_policy->update_lock, flags); 484 raw_spin_lock_irqsave(&sg_policy->update_lock, flags);