aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_sysfs.c
diff options
context:
space:
mode:
authorBen Widawsky <benjamin.widawsky@intel.com>2014-03-19 21:31:11 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2014-03-20 09:45:41 -0400
commitb39fb2977392c0a996700bb9b8416a7b2ecce8fd (patch)
tree72884c1e0e749b6c68ee8f0b3c4bd91205d3006c /drivers/gpu/drm/i915/i915_sysfs.c
parent1f05c944e007809d7040a1cc87b16934fedd23f6 (diff)
drm/i915: Rename and comment all the RPS *stuff*
The names of the struct members for RPS are stupid. Every time I need to do anything in this code I have to spend a significant amount of time to remember what it all means. By renaming the variables (and adding the comments) I hope to clear up the situation. Indeed doing this make some upcoming patches more readable. I've avoided ILK because it's possible that the naming used for Ironlake matches what is in the docs. I believe the ILK power docs were never published, and I am too lazy to dig them up. v2: leave rp0, and rp1 in the names. It is useful to have these limits available at times. min_freq and max_freq (which may be equal to rp0, or rp1 depending on the platform) represent the actual HW min and max. Cc: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Ben Widawsky <ben@bwidawsk.net> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/i915_sysfs.c')
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c32
1 files changed, 16 insertions, 16 deletions
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index e9ffefb720de..e3fa8cd419da 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -269,7 +269,7 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
269 freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 269 freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
270 ret = vlv_gpu_freq(dev_priv, (freq >> 8) & 0xff); 270 ret = vlv_gpu_freq(dev_priv, (freq >> 8) & 0xff);
271 } else { 271 } else {
272 ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER; 272 ret = dev_priv->rps.cur_freq * GT_FREQUENCY_MULTIPLIER;
273 } 273 }
274 mutex_unlock(&dev_priv->rps.hw_lock); 274 mutex_unlock(&dev_priv->rps.hw_lock);
275 275
@@ -284,7 +284,7 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
284 struct drm_i915_private *dev_priv = dev->dev_private; 284 struct drm_i915_private *dev_priv = dev->dev_private;
285 285
286 return snprintf(buf, PAGE_SIZE, "%d\n", 286 return snprintf(buf, PAGE_SIZE, "%d\n",
287 vlv_gpu_freq(dev_priv, dev_priv->rps.rpe_delay)); 287 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
288} 288}
289 289
290static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) 290static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
@@ -298,9 +298,9 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute
298 298
299 mutex_lock(&dev_priv->rps.hw_lock); 299 mutex_lock(&dev_priv->rps.hw_lock);
300 if (IS_VALLEYVIEW(dev_priv->dev)) 300 if (IS_VALLEYVIEW(dev_priv->dev))
301 ret = vlv_gpu_freq(dev_priv, dev_priv->rps.max_delay); 301 ret = vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
302 else 302 else
303 ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER; 303 ret = dev_priv->rps.max_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
304 mutex_unlock(&dev_priv->rps.hw_lock); 304 mutex_unlock(&dev_priv->rps.hw_lock);
305 305
306 return snprintf(buf, PAGE_SIZE, "%d\n", ret); 306 return snprintf(buf, PAGE_SIZE, "%d\n", ret);
@@ -334,13 +334,13 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
334 val /= GT_FREQUENCY_MULTIPLIER; 334 val /= GT_FREQUENCY_MULTIPLIER;
335 335
336 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 336 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
337 hw_max = dev_priv->rps.hw_max; 337 hw_max = dev_priv->rps.max_freq;
338 non_oc_max = (rp_state_cap & 0xff); 338 non_oc_max = (rp_state_cap & 0xff);
339 hw_min = ((rp_state_cap & 0xff0000) >> 16); 339 hw_min = ((rp_state_cap & 0xff0000) >> 16);
340 } 340 }
341 341
342 if (val < hw_min || val > hw_max || 342 if (val < hw_min || val > hw_max ||
343 val < dev_priv->rps.min_delay) { 343 val < dev_priv->rps.min_freq_softlimit) {
344 mutex_unlock(&dev_priv->rps.hw_lock); 344 mutex_unlock(&dev_priv->rps.hw_lock);
345 return -EINVAL; 345 return -EINVAL;
346 } 346 }
@@ -349,9 +349,9 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
349 DRM_DEBUG("User requested overclocking to %d\n", 349 DRM_DEBUG("User requested overclocking to %d\n",
350 val * GT_FREQUENCY_MULTIPLIER); 350 val * GT_FREQUENCY_MULTIPLIER);
351 351
352 dev_priv->rps.max_delay = val; 352 dev_priv->rps.max_freq_softlimit = val;
353 353
354 if (dev_priv->rps.cur_delay > val) { 354 if (dev_priv->rps.cur_freq > val) {
355 if (IS_VALLEYVIEW(dev)) 355 if (IS_VALLEYVIEW(dev))
356 valleyview_set_rps(dev, val); 356 valleyview_set_rps(dev, val);
357 else 357 else
@@ -360,7 +360,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
360 /* We still need gen6_set_rps to process the new max_delay and 360 /* We still need gen6_set_rps to process the new max_delay and
361 * update the interrupt limits even though frequency request is 361 * update the interrupt limits even though frequency request is
362 * unchanged. */ 362 * unchanged. */
363 gen6_set_rps(dev, dev_priv->rps.cur_delay); 363 gen6_set_rps(dev, dev_priv->rps.cur_freq);
364 } 364 }
365 365
366 mutex_unlock(&dev_priv->rps.hw_lock); 366 mutex_unlock(&dev_priv->rps.hw_lock);
@@ -379,9 +379,9 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute
379 379
380 mutex_lock(&dev_priv->rps.hw_lock); 380 mutex_lock(&dev_priv->rps.hw_lock);
381 if (IS_VALLEYVIEW(dev_priv->dev)) 381 if (IS_VALLEYVIEW(dev_priv->dev))
382 ret = vlv_gpu_freq(dev_priv, dev_priv->rps.min_delay); 382 ret = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
383 else 383 else
384 ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER; 384 ret = dev_priv->rps.min_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
385 mutex_unlock(&dev_priv->rps.hw_lock); 385 mutex_unlock(&dev_priv->rps.hw_lock);
386 386
387 return snprintf(buf, PAGE_SIZE, "%d\n", ret); 387 return snprintf(buf, PAGE_SIZE, "%d\n", ret);
@@ -414,18 +414,18 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
414 val /= GT_FREQUENCY_MULTIPLIER; 414 val /= GT_FREQUENCY_MULTIPLIER;
415 415
416 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 416 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
417 hw_max = dev_priv->rps.hw_max; 417 hw_max = dev_priv->rps.max_freq;
418 hw_min = ((rp_state_cap & 0xff0000) >> 16); 418 hw_min = ((rp_state_cap & 0xff0000) >> 16);
419 } 419 }
420 420
421 if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) { 421 if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) {
422 mutex_unlock(&dev_priv->rps.hw_lock); 422 mutex_unlock(&dev_priv->rps.hw_lock);
423 return -EINVAL; 423 return -EINVAL;
424 } 424 }
425 425
426 dev_priv->rps.min_delay = val; 426 dev_priv->rps.min_freq_softlimit = val;
427 427
428 if (dev_priv->rps.cur_delay < val) { 428 if (dev_priv->rps.cur_freq < val) {
429 if (IS_VALLEYVIEW(dev)) 429 if (IS_VALLEYVIEW(dev))
430 valleyview_set_rps(dev, val); 430 valleyview_set_rps(dev, val);
431 else 431 else
@@ -434,7 +434,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
434 /* We still need gen6_set_rps to process the new min_delay and 434 /* We still need gen6_set_rps to process the new min_delay and
435 * update the interrupt limits even though frequency request is 435 * update the interrupt limits even though frequency request is
436 * unchanged. */ 436 * unchanged. */
437 gen6_set_rps(dev, dev_priv->rps.cur_delay); 437 gen6_set_rps(dev, dev_priv->rps.cur_freq);
438 } 438 }
439 439
440 mutex_unlock(&dev_priv->rps.hw_lock); 440 mutex_unlock(&dev_priv->rps.hw_lock);