aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2016-02-14 20:13:42 -0500
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2016-03-09 08:41:03 -0500
commite4db2813d2e558b6b6bee464308678a57732b390 (patch)
tree2cd9422569ce684971136492b12323d40c3e0a26
parentf62b93740c30d0a3f50258d45415f00b763dd70a (diff)
cpufreq: governor: Avoid atomic operations in hot paths
Rework the handling of work items by dbs_update_util_handler() and dbs_work_handler() so the former (which is executed in scheduler paths) only uses atomic operations when absolutely necessary. That is, when the policy is shared and dbs_update_util_handler() has already decided that this is the time to queue up a work item. In particular, this avoids the atomic ops entirely on platforms where policy objects are never shared. Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com> Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
-rw-r--r--drivers/cpufreq/cpufreq_governor.c51
-rw-r--r--drivers/cpufreq/cpufreq_governor.h3
2 files changed, 38 insertions, 16 deletions
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index c78af11a51f0..e5a08a13ca84 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -304,6 +304,7 @@ static void gov_cancel_work(struct cpufreq_policy *policy)
304 irq_work_sync(&policy_dbs->irq_work); 304 irq_work_sync(&policy_dbs->irq_work);
305 cancel_work_sync(&policy_dbs->work); 305 cancel_work_sync(&policy_dbs->work);
306 atomic_set(&policy_dbs->work_count, 0); 306 atomic_set(&policy_dbs->work_count, 0);
307 policy_dbs->work_in_progress = false;
307} 308}
308 309
309static void dbs_work_handler(struct work_struct *work) 310static void dbs_work_handler(struct work_struct *work)
@@ -326,13 +327,15 @@ static void dbs_work_handler(struct work_struct *work)
326 policy_dbs->sample_delay_ns = jiffies_to_nsecs(delay); 327 policy_dbs->sample_delay_ns = jiffies_to_nsecs(delay);
327 mutex_unlock(&policy_dbs->timer_mutex); 328 mutex_unlock(&policy_dbs->timer_mutex);
328 329
330 /* Allow the utilization update handler to queue up more work. */
331 atomic_set(&policy_dbs->work_count, 0);
329 /* 332 /*
330 * If the atomic operation below is reordered with respect to the 333 * If the update below is reordered with respect to the sample delay
331 * sample delay modification, the utilization update handler may end 334 * modification, the utilization update handler may end up using a stale
332 * up using a stale sample delay value. 335 * sample delay value.
333 */ 336 */
334 smp_mb__before_atomic(); 337 smp_wmb();
335 atomic_dec(&policy_dbs->work_count); 338 policy_dbs->work_in_progress = false;
336} 339}
337 340
338static void dbs_irq_work(struct irq_work *irq_work) 341static void dbs_irq_work(struct irq_work *irq_work)
@@ -348,6 +351,7 @@ static void dbs_update_util_handler(struct update_util_data *data, u64 time,
348{ 351{
349 struct cpu_dbs_info *cdbs = container_of(data, struct cpu_dbs_info, update_util); 352 struct cpu_dbs_info *cdbs = container_of(data, struct cpu_dbs_info, update_util);
350 struct policy_dbs_info *policy_dbs = cdbs->policy_dbs; 353 struct policy_dbs_info *policy_dbs = cdbs->policy_dbs;
354 u64 delta_ns;
351 355
352 /* 356 /*
353 * The work may not be allowed to be queued up right now. 357 * The work may not be allowed to be queued up right now.
@@ -355,17 +359,30 @@ static void dbs_update_util_handler(struct update_util_data *data, u64 time,
355 * - Work has already been queued up or is in progress. 359 * - Work has already been queued up or is in progress.
356 * - It is too early (too little time from the previous sample). 360 * - It is too early (too little time from the previous sample).
357 */ 361 */
358 if (atomic_inc_return(&policy_dbs->work_count) == 1) { 362 if (policy_dbs->work_in_progress)
359 u64 delta_ns; 363 return;
360 364
361 delta_ns = time - policy_dbs->last_sample_time; 365 /*
362 if ((s64)delta_ns >= policy_dbs->sample_delay_ns) { 366 * If the reads below are reordered before the check above, the value
363 policy_dbs->last_sample_time = time; 367 * of sample_delay_ns used in the computation may be stale.
364 irq_work_queue(&policy_dbs->irq_work); 368 */
365 return; 369 smp_rmb();
366 } 370 delta_ns = time - policy_dbs->last_sample_time;
367 } 371 if ((s64)delta_ns < policy_dbs->sample_delay_ns)
368 atomic_dec(&policy_dbs->work_count); 372 return;
373
374 /*
375 * If the policy is not shared, the irq_work may be queued up right away
376 * at this point. Otherwise, we need to ensure that only one of the
377 * CPUs sharing the policy will do that.
378 */
379 if (policy_dbs->is_shared &&
380 !atomic_add_unless(&policy_dbs->work_count, 1, 1))
381 return;
382
383 policy_dbs->last_sample_time = time;
384 policy_dbs->work_in_progress = true;
385 irq_work_queue(&policy_dbs->irq_work);
369} 386}
370 387
371static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *policy, 388static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *policy,
@@ -542,6 +559,8 @@ static int cpufreq_governor_start(struct cpufreq_policy *policy)
542 if (!policy->cur) 559 if (!policy->cur)
543 return -EINVAL; 560 return -EINVAL;
544 561
562 policy_dbs->is_shared = policy_is_shared(policy);
563
545 sampling_rate = dbs_data->sampling_rate; 564 sampling_rate = dbs_data->sampling_rate;
546 ignore_nice = dbs_data->ignore_nice_load; 565 ignore_nice = dbs_data->ignore_nice_load;
547 566
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index 8138eff5e25b..521daac38ba5 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -130,6 +130,9 @@ struct policy_dbs_info {
130 /* dbs_data may be shared between multiple policy objects */ 130 /* dbs_data may be shared between multiple policy objects */
131 struct dbs_data *dbs_data; 131 struct dbs_data *dbs_data;
132 struct list_head list; 132 struct list_head list;
133 /* Status indicators */
134 bool is_shared; /* This object is used by multiple CPUs */
135 bool work_in_progress; /* Work is being queued up or in progress */
133}; 136};
134 137
135static inline void gov_update_sample_delay(struct policy_dbs_info *policy_dbs, 138static inline void gov_update_sample_delay(struct policy_dbs_info *policy_dbs,