aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_sysfs.c
diff options
context:
space:
mode:
authorTom O'Rourke <Tom.O'Rourke@intel.com>2013-09-16 17:56:43 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2013-10-10 08:23:39 -0400
commit5c9669cee534cbb834d51aae115267f5e561b622 (patch)
tree12b901434fc7dc686b5fb645384bb9f754b26a0a /drivers/gpu/drm/i915/i915_sysfs.c
parent09e14bf3ba4b72be4d57d99e3620beae4fb1ecd8 (diff)
drm/i915: Finish enabling rps before use by sysfs or debugfs
Enabling rps (turbo setup) was put in a work queue because it may take quite awhile. This change flushes the work queue to initialize rps values before use by sysfs or debugfs. Specifically, rps.delayed_resume_work is flushed before using rps.hw_max, rps.max_delay, rps.min_delay, or rps.cur_delay. This change fixes a problem in sysfs where show functions using uninitialized values show incorrect values and store functions using uninitialized values in range checks incorrectly fail to store valid input values. This change also addresses similar use before initialized problems in debugfs. Signed-off-by: Tom O'Rourke <Tom.O'Rourke@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/i915_sysfs.c')
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c10
1 files changed, 10 insertions, 0 deletions
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 8003886361b8..9ff1e4d96909 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -251,6 +251,8 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
251 struct drm_i915_private *dev_priv = dev->dev_private; 251 struct drm_i915_private *dev_priv = dev->dev_private;
252 int ret; 252 int ret;
253 253
254 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
255
254 mutex_lock(&dev_priv->rps.hw_lock); 256 mutex_lock(&dev_priv->rps.hw_lock);
255 if (IS_VALLEYVIEW(dev_priv->dev)) { 257 if (IS_VALLEYVIEW(dev_priv->dev)) {
256 u32 freq; 258 u32 freq;
@@ -283,6 +285,8 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute
283 struct drm_i915_private *dev_priv = dev->dev_private; 285 struct drm_i915_private *dev_priv = dev->dev_private;
284 int ret; 286 int ret;
285 287
288 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
289
286 mutex_lock(&dev_priv->rps.hw_lock); 290 mutex_lock(&dev_priv->rps.hw_lock);
287 if (IS_VALLEYVIEW(dev_priv->dev)) 291 if (IS_VALLEYVIEW(dev_priv->dev))
288 ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.max_delay); 292 ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.max_delay);
@@ -307,6 +311,8 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
307 if (ret) 311 if (ret)
308 return ret; 312 return ret;
309 313
314 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
315
310 mutex_lock(&dev_priv->rps.hw_lock); 316 mutex_lock(&dev_priv->rps.hw_lock);
311 317
312 if (IS_VALLEYVIEW(dev_priv->dev)) { 318 if (IS_VALLEYVIEW(dev_priv->dev)) {
@@ -355,6 +361,8 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute
355 struct drm_i915_private *dev_priv = dev->dev_private; 361 struct drm_i915_private *dev_priv = dev->dev_private;
356 int ret; 362 int ret;
357 363
364 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
365
358 mutex_lock(&dev_priv->rps.hw_lock); 366 mutex_lock(&dev_priv->rps.hw_lock);
359 if (IS_VALLEYVIEW(dev_priv->dev)) 367 if (IS_VALLEYVIEW(dev_priv->dev))
360 ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.min_delay); 368 ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.min_delay);
@@ -379,6 +387,8 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
379 if (ret) 387 if (ret)
380 return ret; 388 return ret;
381 389
390 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
391
382 mutex_lock(&dev_priv->rps.hw_lock); 392 mutex_lock(&dev_priv->rps.hw_lock);
383 393
384 if (IS_VALLEYVIEW(dev)) { 394 if (IS_VALLEYVIEW(dev)) {