aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSrinivas Pandruvada <srinivas.pandruvada@linux.intel.com>2018-06-05 17:42:40 -0400
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2018-06-06 02:37:52 -0400
commit52ccc4314293272397b117f3cc6f0f368c81431c (patch)
treebdea5ca8701a87611ee85c366cc09c46ae26f2df
parente0efd5be63e821066b5e6325cf237eb41367552f (diff)
cpufreq: intel_pstate: HWP boost performance on IO wakeup
This change uses SCHED_CPUFREQ_IOWAIT flag to boost HWP performance. Since SCHED_CPUFREQ_IOWAIT flag is set frequently, we don't start boosting steps unless we see two consecutive flags in two ticks. This avoids boosting due to IO because of regular system activities. To avoid synchronization issues, the actual processing of the flag is done on the local CPU callback. Reported-by: Mel Gorman <mgorman@techsingularity.net> Tested-by: Giovanni Gherdovich <ggherdovich@suse.cz> Signed-off-by: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
-rw-r--r--drivers/cpufreq/intel_pstate.c39
1 files changed, 39 insertions, 0 deletions
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 3949e3861f55..5b2b6b6d1ff4 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -223,6 +223,8 @@ struct global_params {
223 * operation 223 * operation
224 * @hwp_req_cached: Cached value of the last HWP Request MSR 224 * @hwp_req_cached: Cached value of the last HWP Request MSR
225 * @hwp_cap_cached: Cached value of the last HWP Capabilities MSR 225 * @hwp_cap_cached: Cached value of the last HWP Capabilities MSR
226 * @last_io_update: Last time when IO wake flag was set
227 * @sched_flags: Store scheduler flags for possible cross CPU update
226 * @hwp_boost_min: Last HWP boosted min performance 228 * @hwp_boost_min: Last HWP boosted min performance
227 * 229 *
228 * This structure stores per CPU instance data for all CPUs. 230 * This structure stores per CPU instance data for all CPUs.
@@ -258,6 +260,8 @@ struct cpudata {
258 s16 epp_saved; 260 s16 epp_saved;
259 u64 hwp_req_cached; 261 u64 hwp_req_cached;
260 u64 hwp_cap_cached; 262 u64 hwp_cap_cached;
263 u64 last_io_update;
264 unsigned int sched_flags;
261 u32 hwp_boost_min; 265 u32 hwp_boost_min;
262}; 266};
263 267
@@ -1460,9 +1464,44 @@ static inline void intel_pstate_hwp_boost_down(struct cpudata *cpu)
1460 cpu->last_update = cpu->sample.time; 1464 cpu->last_update = cpu->sample.time;
1461} 1465}
1462 1466
1467static inline void intel_pstate_update_util_hwp_local(struct cpudata *cpu,
1468 u64 time)
1469{
1470 cpu->sample.time = time;
1471
1472 if (cpu->sched_flags & SCHED_CPUFREQ_IOWAIT) {
1473 bool do_io = false;
1474
1475 cpu->sched_flags = 0;
1476 /*
1477 * Set iowait_boost flag and update time. Since IO WAIT flag
1478 * is set all the time, we can't just conclude that there is
1479 * some IO bound activity is scheduled on this CPU with just
1480 * one occurrence. If we receive at least two in two
1481 * consecutive ticks, then we treat as boost candidate.
1482 */
1483 if (time_before64(time, cpu->last_io_update + 2 * TICK_NSEC))
1484 do_io = true;
1485
1486 cpu->last_io_update = time;
1487
1488 if (do_io)
1489 intel_pstate_hwp_boost_up(cpu);
1490
1491 } else {
1492 intel_pstate_hwp_boost_down(cpu);
1493 }
1494}
1495
1463static inline void intel_pstate_update_util_hwp(struct update_util_data *data, 1496static inline void intel_pstate_update_util_hwp(struct update_util_data *data,
1464 u64 time, unsigned int flags) 1497 u64 time, unsigned int flags)
1465{ 1498{
1499 struct cpudata *cpu = container_of(data, struct cpudata, update_util);
1500
1501 cpu->sched_flags |= flags;
1502
1503 if (smp_processor_id() == cpu->cpu)
1504 intel_pstate_update_util_hwp_local(cpu, time);
1466} 1505}
1467 1506
1468static inline void intel_pstate_calc_avg_perf(struct cpudata *cpu) 1507static inline void intel_pstate_calc_avg_perf(struct cpudata *cpu)