aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSagar Arun Kamble <sagar.a.kamble@intel.com>2017-10-10 17:30:06 -0400
committerChris Wilson <chris@chris-wilson.co.uk>2017-10-11 03:56:59 -0400
commit562d9bae08a10335368bf54ea5cc7e4f6185bccc (patch)
treebed8838c1e250e1104b3d224f9e81ae484fa4b16
parent9f817501bd7facfe2bffacd637f4332e5991e57a (diff)
drm/i915: Name structure in dev_priv that contains RPS/RC6 state as "gt_pm"
Prepared substructure rps for RPS related state. autoenable_work is used for RC6 too hence it is defined outside rps structure. As we do this lot many functions are refactored to use intel_rps *rps to access rps related members. Hence renamed intel_rps_client pointer variables to rps_client in various functions. v2: Rebase. v3: s/pm/gt_pm (Chris) Refactored access to rps structure by declaring struct intel_rps * in many functions. Signed-off-by: Sagar Arun Kamble <sagar.a.kamble@intel.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Imre Deak <imre.deak@intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Reviewed-by: Radoslaw Szwichtenberg <radoslaw.szwichtenberg@intel.com> #1 Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Link: https://patchwork.freedesktop.org/patch/msgid/1507360055-19948-9-git-send-email-sagar.a.kamble@intel.com Acked-by: Imre Deak <imre.deak@intel.com> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Link: https://patchwork.freedesktop.org/patch/msgid/20171010213010.7415-8-chris@chris-wilson.co.uk
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c99
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h14
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c21
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.c2
-rw-r--r--drivers/gpu/drm/i915/i915_guc_submission.c10
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c87
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c54
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h2
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c315
10 files changed, 330 insertions, 276 deletions
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index e733097fa647..0bb6e01121fc 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1080,6 +1080,7 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
1080static int i915_frequency_info(struct seq_file *m, void *unused) 1080static int i915_frequency_info(struct seq_file *m, void *unused)
1081{ 1081{
1082 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1082 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1083 struct intel_rps *rps = &dev_priv->gt_pm.rps;
1083 int ret = 0; 1084 int ret = 0;
1084 1085
1085 intel_runtime_pm_get(dev_priv); 1086 intel_runtime_pm_get(dev_priv);
@@ -1116,20 +1117,20 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
1116 intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff)); 1117 intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1117 1118
1118 seq_printf(m, "current GPU freq: %d MHz\n", 1119 seq_printf(m, "current GPU freq: %d MHz\n",
1119 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq)); 1120 intel_gpu_freq(dev_priv, rps->cur_freq));
1120 1121
1121 seq_printf(m, "max GPU freq: %d MHz\n", 1122 seq_printf(m, "max GPU freq: %d MHz\n",
1122 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq)); 1123 intel_gpu_freq(dev_priv, rps->max_freq));
1123 1124
1124 seq_printf(m, "min GPU freq: %d MHz\n", 1125 seq_printf(m, "min GPU freq: %d MHz\n",
1125 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq)); 1126 intel_gpu_freq(dev_priv, rps->min_freq));
1126 1127
1127 seq_printf(m, "idle GPU freq: %d MHz\n", 1128 seq_printf(m, "idle GPU freq: %d MHz\n",
1128 intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq)); 1129 intel_gpu_freq(dev_priv, rps->idle_freq));
1129 1130
1130 seq_printf(m, 1131 seq_printf(m,
1131 "efficient (RPe) frequency: %d MHz\n", 1132 "efficient (RPe) frequency: %d MHz\n",
1132 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq)); 1133 intel_gpu_freq(dev_priv, rps->efficient_freq));
1133 mutex_unlock(&dev_priv->pcu_lock); 1134 mutex_unlock(&dev_priv->pcu_lock);
1134 } else if (INTEL_GEN(dev_priv) >= 6) { 1135 } else if (INTEL_GEN(dev_priv) >= 6) {
1135 u32 rp_state_limits; 1136 u32 rp_state_limits;
@@ -1210,7 +1211,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
1210 seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n", 1211 seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
1211 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask); 1212 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
1212 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n", 1213 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
1213 dev_priv->rps.pm_intrmsk_mbz); 1214 rps->pm_intrmsk_mbz);
1214 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); 1215 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
1215 seq_printf(m, "Render p-state ratio: %d\n", 1216 seq_printf(m, "Render p-state ratio: %d\n",
1216 (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8); 1217 (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
@@ -1230,8 +1231,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
1230 rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup)); 1231 rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
1231 seq_printf(m, "RP PREV UP: %d (%dus)\n", 1232 seq_printf(m, "RP PREV UP: %d (%dus)\n",
1232 rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup)); 1233 rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
1233 seq_printf(m, "Up threshold: %d%%\n", 1234 seq_printf(m, "Up threshold: %d%%\n", rps->up_threshold);
1234 dev_priv->rps.up_threshold);
1235 1235
1236 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n", 1236 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
1237 rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei)); 1237 rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
@@ -1239,8 +1239,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
1239 rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown)); 1239 rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
1240 seq_printf(m, "RP PREV DOWN: %d (%dus)\n", 1240 seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
1241 rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown)); 1241 rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
1242 seq_printf(m, "Down threshold: %d%%\n", 1242 seq_printf(m, "Down threshold: %d%%\n", rps->down_threshold);
1243 dev_priv->rps.down_threshold);
1244 1243
1245 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 : 1244 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
1246 rp_state_cap >> 16) & 0xff; 1245 rp_state_cap >> 16) & 0xff;
@@ -1262,22 +1261,22 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
1262 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", 1261 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
1263 intel_gpu_freq(dev_priv, max_freq)); 1262 intel_gpu_freq(dev_priv, max_freq));
1264 seq_printf(m, "Max overclocked frequency: %dMHz\n", 1263 seq_printf(m, "Max overclocked frequency: %dMHz\n",
1265 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq)); 1264 intel_gpu_freq(dev_priv, rps->max_freq));
1266 1265
1267 seq_printf(m, "Current freq: %d MHz\n", 1266 seq_printf(m, "Current freq: %d MHz\n",
1268 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq)); 1267 intel_gpu_freq(dev_priv, rps->cur_freq));
1269 seq_printf(m, "Actual freq: %d MHz\n", cagf); 1268 seq_printf(m, "Actual freq: %d MHz\n", cagf);
1270 seq_printf(m, "Idle freq: %d MHz\n", 1269 seq_printf(m, "Idle freq: %d MHz\n",
1271 intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq)); 1270 intel_gpu_freq(dev_priv, rps->idle_freq));
1272 seq_printf(m, "Min freq: %d MHz\n", 1271 seq_printf(m, "Min freq: %d MHz\n",
1273 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq)); 1272 intel_gpu_freq(dev_priv, rps->min_freq));
1274 seq_printf(m, "Boost freq: %d MHz\n", 1273 seq_printf(m, "Boost freq: %d MHz\n",
1275 intel_gpu_freq(dev_priv, dev_priv->rps.boost_freq)); 1274 intel_gpu_freq(dev_priv, rps->boost_freq));
1276 seq_printf(m, "Max freq: %d MHz\n", 1275 seq_printf(m, "Max freq: %d MHz\n",
1277 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq)); 1276 intel_gpu_freq(dev_priv, rps->max_freq));
1278 seq_printf(m, 1277 seq_printf(m,
1279 "efficient (RPe) frequency: %d MHz\n", 1278 "efficient (RPe) frequency: %d MHz\n",
1280 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq)); 1279 intel_gpu_freq(dev_priv, rps->efficient_freq));
1281 } else { 1280 } else {
1282 seq_puts(m, "no P-state info available\n"); 1281 seq_puts(m, "no P-state info available\n");
1283 } 1282 }
@@ -1831,6 +1830,7 @@ static int i915_emon_status(struct seq_file *m, void *unused)
1831static int i915_ring_freq_table(struct seq_file *m, void *unused) 1830static int i915_ring_freq_table(struct seq_file *m, void *unused)
1832{ 1831{
1833 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1832 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1833 struct intel_rps *rps = &dev_priv->gt_pm.rps;
1834 int ret = 0; 1834 int ret = 0;
1835 int gpu_freq, ia_freq; 1835 int gpu_freq, ia_freq;
1836 unsigned int max_gpu_freq, min_gpu_freq; 1836 unsigned int max_gpu_freq, min_gpu_freq;
@@ -1848,13 +1848,11 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
1848 1848
1849 if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) { 1849 if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
1850 /* Convert GT frequency to 50 HZ units */ 1850 /* Convert GT frequency to 50 HZ units */
1851 min_gpu_freq = 1851 min_gpu_freq = rps->min_freq_softlimit / GEN9_FREQ_SCALER;
1852 dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER; 1852 max_gpu_freq = rps->max_freq_softlimit / GEN9_FREQ_SCALER;
1853 max_gpu_freq =
1854 dev_priv->rps.max_freq_softlimit / GEN9_FREQ_SCALER;
1855 } else { 1853 } else {
1856 min_gpu_freq = dev_priv->rps.min_freq_softlimit; 1854 min_gpu_freq = rps->min_freq_softlimit;
1857 max_gpu_freq = dev_priv->rps.max_freq_softlimit; 1855 max_gpu_freq = rps->max_freq_softlimit;
1858 } 1856 }
1859 1857
1860 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n"); 1858 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
@@ -2307,25 +2305,26 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
2307{ 2305{
2308 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2306 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2309 struct drm_device *dev = &dev_priv->drm; 2307 struct drm_device *dev = &dev_priv->drm;
2308 struct intel_rps *rps = &dev_priv->gt_pm.rps;
2310 struct drm_file *file; 2309 struct drm_file *file;
2311 2310
2312 seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.enabled); 2311 seq_printf(m, "RPS enabled? %d\n", rps->enabled);
2313 seq_printf(m, "GPU busy? %s [%d requests]\n", 2312 seq_printf(m, "GPU busy? %s [%d requests]\n",
2314 yesno(dev_priv->gt.awake), dev_priv->gt.active_requests); 2313 yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
2315 seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv)); 2314 seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
2316 seq_printf(m, "Boosts outstanding? %d\n", 2315 seq_printf(m, "Boosts outstanding? %d\n",
2317 atomic_read(&dev_priv->rps.num_waiters)); 2316 atomic_read(&rps->num_waiters));
2318 seq_printf(m, "Frequency requested %d\n", 2317 seq_printf(m, "Frequency requested %d\n",
2319 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq)); 2318 intel_gpu_freq(dev_priv, rps->cur_freq));
2320 seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n", 2319 seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
2321 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq), 2320 intel_gpu_freq(dev_priv, rps->min_freq),
2322 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit), 2321 intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
2323 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit), 2322 intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
2324 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq)); 2323 intel_gpu_freq(dev_priv, rps->max_freq));
2325 seq_printf(m, " idle:%d, efficient:%d, boost:%d\n", 2324 seq_printf(m, " idle:%d, efficient:%d, boost:%d\n",
2326 intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq), 2325 intel_gpu_freq(dev_priv, rps->idle_freq),
2327 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq), 2326 intel_gpu_freq(dev_priv, rps->efficient_freq),
2328 intel_gpu_freq(dev_priv, dev_priv->rps.boost_freq)); 2327 intel_gpu_freq(dev_priv, rps->boost_freq));
2329 2328
2330 mutex_lock(&dev->filelist_mutex); 2329 mutex_lock(&dev->filelist_mutex);
2331 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 2330 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
@@ -2337,15 +2336,15 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
2337 seq_printf(m, "%s [%d]: %d boosts\n", 2336 seq_printf(m, "%s [%d]: %d boosts\n",
2338 task ? task->comm : "<unknown>", 2337 task ? task->comm : "<unknown>",
2339 task ? task->pid : -1, 2338 task ? task->pid : -1,
2340 atomic_read(&file_priv->rps.boosts)); 2339 atomic_read(&file_priv->rps_client.boosts));
2341 rcu_read_unlock(); 2340 rcu_read_unlock();
2342 } 2341 }
2343 seq_printf(m, "Kernel (anonymous) boosts: %d\n", 2342 seq_printf(m, "Kernel (anonymous) boosts: %d\n",
2344 atomic_read(&dev_priv->rps.boosts)); 2343 atomic_read(&rps->boosts));
2345 mutex_unlock(&dev->filelist_mutex); 2344 mutex_unlock(&dev->filelist_mutex);
2346 2345
2347 if (INTEL_GEN(dev_priv) >= 6 && 2346 if (INTEL_GEN(dev_priv) >= 6 &&
2348 dev_priv->rps.enabled && 2347 rps->enabled &&
2349 dev_priv->gt.active_requests) { 2348 dev_priv->gt.active_requests) {
2350 u32 rpup, rpupei; 2349 u32 rpup, rpupei;
2351 u32 rpdown, rpdownei; 2350 u32 rpdown, rpdownei;
@@ -2358,13 +2357,13 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
2358 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 2357 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2359 2358
2360 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n", 2359 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
2361 rps_power_to_str(dev_priv->rps.power)); 2360 rps_power_to_str(rps->power));
2362 seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n", 2361 seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n",
2363 rpup && rpupei ? 100 * rpup / rpupei : 0, 2362 rpup && rpupei ? 100 * rpup / rpupei : 0,
2364 dev_priv->rps.up_threshold); 2363 rps->up_threshold);
2365 seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n", 2364 seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n",
2366 rpdown && rpdownei ? 100 * rpdown / rpdownei : 0, 2365 rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
2367 dev_priv->rps.down_threshold); 2366 rps->down_threshold);
2368 } else { 2367 } else {
2369 seq_puts(m, "\nRPS Autotuning inactive\n"); 2368 seq_puts(m, "\nRPS Autotuning inactive\n");
2370 } 2369 }
@@ -4304,7 +4303,7 @@ i915_max_freq_get(void *data, u64 *val)
4304 if (INTEL_GEN(dev_priv) < 6) 4303 if (INTEL_GEN(dev_priv) < 6)
4305 return -ENODEV; 4304 return -ENODEV;
4306 4305
4307 *val = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit); 4306 *val = intel_gpu_freq(dev_priv, dev_priv->gt_pm.rps.max_freq_softlimit);
4308 return 0; 4307 return 0;
4309} 4308}
4310 4309
@@ -4312,6 +4311,7 @@ static int
4312i915_max_freq_set(void *data, u64 val) 4311i915_max_freq_set(void *data, u64 val)
4313{ 4312{
4314 struct drm_i915_private *dev_priv = data; 4313 struct drm_i915_private *dev_priv = data;
4314 struct intel_rps *rps = &dev_priv->gt_pm.rps;
4315 u32 hw_max, hw_min; 4315 u32 hw_max, hw_min;
4316 int ret; 4316 int ret;
4317 4317
@@ -4329,15 +4329,15 @@ i915_max_freq_set(void *data, u64 val)
4329 */ 4329 */
4330 val = intel_freq_opcode(dev_priv, val); 4330 val = intel_freq_opcode(dev_priv, val);
4331 4331
4332 hw_max = dev_priv->rps.max_freq; 4332 hw_max = rps->max_freq;
4333 hw_min = dev_priv->rps.min_freq; 4333 hw_min = rps->min_freq;
4334 4334
4335 if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) { 4335 if (val < hw_min || val > hw_max || val < rps->min_freq_softlimit) {
4336 mutex_unlock(&dev_priv->pcu_lock); 4336 mutex_unlock(&dev_priv->pcu_lock);
4337 return -EINVAL; 4337 return -EINVAL;
4338 } 4338 }
4339 4339
4340 dev_priv->rps.max_freq_softlimit = val; 4340 rps->max_freq_softlimit = val;
4341 4341
4342 if (intel_set_rps(dev_priv, val)) 4342 if (intel_set_rps(dev_priv, val))
4343 DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n"); 4343 DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n");
@@ -4359,7 +4359,7 @@ i915_min_freq_get(void *data, u64 *val)
4359 if (INTEL_GEN(dev_priv) < 6) 4359 if (INTEL_GEN(dev_priv) < 6)
4360 return -ENODEV; 4360 return -ENODEV;
4361 4361
4362 *val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit); 4362 *val = intel_gpu_freq(dev_priv, dev_priv->gt_pm.rps.min_freq_softlimit);
4363 return 0; 4363 return 0;
4364} 4364}
4365 4365
@@ -4367,6 +4367,7 @@ static int
4367i915_min_freq_set(void *data, u64 val) 4367i915_min_freq_set(void *data, u64 val)
4368{ 4368{
4369 struct drm_i915_private *dev_priv = data; 4369 struct drm_i915_private *dev_priv = data;
4370 struct intel_rps *rps = &dev_priv->gt_pm.rps;
4370 u32 hw_max, hw_min; 4371 u32 hw_max, hw_min;
4371 int ret; 4372 int ret;
4372 4373
@@ -4384,16 +4385,16 @@ i915_min_freq_set(void *data, u64 val)
4384 */ 4385 */
4385 val = intel_freq_opcode(dev_priv, val); 4386 val = intel_freq_opcode(dev_priv, val);
4386 4387
4387 hw_max = dev_priv->rps.max_freq; 4388 hw_max = rps->max_freq;
4388 hw_min = dev_priv->rps.min_freq; 4389 hw_min = rps->min_freq;
4389 4390
4390 if (val < hw_min || 4391 if (val < hw_min ||
4391 val > hw_max || val > dev_priv->rps.max_freq_softlimit) { 4392 val > hw_max || val > rps->max_freq_softlimit) {
4392 mutex_unlock(&dev_priv->pcu_lock); 4393 mutex_unlock(&dev_priv->pcu_lock);
4393 return -EINVAL; 4394 return -EINVAL;
4394 } 4395 }
4395 4396
4396 dev_priv->rps.min_freq_softlimit = val; 4397 rps->min_freq_softlimit = val;
4397 4398
4398 if (intel_set_rps(dev_priv, val)) 4399 if (intel_set_rps(dev_priv, val))
4399 DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n"); 4400 DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n");
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 9ebbb08dcf2d..9b8a19149154 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -2502,7 +2502,7 @@ static int intel_runtime_suspend(struct device *kdev)
2502 struct drm_i915_private *dev_priv = to_i915(dev); 2502 struct drm_i915_private *dev_priv = to_i915(dev);
2503 int ret; 2503 int ret;
2504 2504
2505 if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6()))) 2505 if (WARN_ON_ONCE(!(dev_priv->gt_pm.rps.enabled && intel_enable_rc6())))
2506 return -ENODEV; 2506 return -ENODEV;
2507 2507
2508 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv))) 2508 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index fca7b939495f..521348ee7242 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -609,7 +609,7 @@ struct drm_i915_file_private {
609 609
610 struct intel_rps_client { 610 struct intel_rps_client {
611 atomic_t boosts; 611 atomic_t boosts;
612 } rps; 612 } rps_client;
613 613
614 unsigned int bsd_engine; 614 unsigned int bsd_engine;
615 615
@@ -1317,7 +1317,7 @@ struct intel_rps_ei {
1317 u32 media_c0; 1317 u32 media_c0;
1318}; 1318};
1319 1319
1320struct intel_gen6_power_mgmt { 1320struct intel_rps {
1321 /* 1321 /*
1322 * work, interrupts_enabled and pm_iir are protected by 1322 * work, interrupts_enabled and pm_iir are protected by
1323 * dev_priv->irq_lock 1323 * dev_priv->irq_lock
@@ -1358,7 +1358,6 @@ struct intel_gen6_power_mgmt {
1358 enum { LOW_POWER, BETWEEN, HIGH_POWER } power; 1358 enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
1359 1359
1360 bool enabled; 1360 bool enabled;
1361 struct delayed_work autoenable_work;
1362 atomic_t num_waiters; 1361 atomic_t num_waiters;
1363 atomic_t boosts; 1362 atomic_t boosts;
1364 1363
@@ -1366,6 +1365,11 @@ struct intel_gen6_power_mgmt {
1366 struct intel_rps_ei ei; 1365 struct intel_rps_ei ei;
1367}; 1366};
1368 1367
1368struct intel_gen6_power_mgmt {
1369 struct intel_rps rps;
1370 struct delayed_work autoenable_work;
1371};
1372
1369/* defined intel_pm.c */ 1373/* defined intel_pm.c */
1370extern spinlock_t mchdev_lock; 1374extern spinlock_t mchdev_lock;
1371 1375
@@ -2421,8 +2425,8 @@ struct drm_i915_private {
2421 */ 2425 */
2422 struct mutex pcu_lock; 2426 struct mutex pcu_lock;
2423 2427
2424 /* gen6+ rps state */ 2428 /* gen6+ GT PM state */
2425 struct intel_gen6_power_mgmt rps; 2429 struct intel_gen6_power_mgmt gt_pm;
2426 2430
2427 /* ilk-only ips/rps state. Everything in here is protected by the global 2431 /* ilk-only ips/rps state. Everything in here is protected by the global
2428 * mchdev_lock in intel_pm.c */ 2432 * mchdev_lock in intel_pm.c */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index e829e8c900e8..f76890b74d00 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -358,7 +358,7 @@ static long
358i915_gem_object_wait_fence(struct dma_fence *fence, 358i915_gem_object_wait_fence(struct dma_fence *fence,
359 unsigned int flags, 359 unsigned int flags,
360 long timeout, 360 long timeout,
361 struct intel_rps_client *rps) 361 struct intel_rps_client *rps_client)
362{ 362{
363 struct drm_i915_gem_request *rq; 363 struct drm_i915_gem_request *rq;
364 364
@@ -391,11 +391,11 @@ i915_gem_object_wait_fence(struct dma_fence *fence,
391 * forcing the clocks too high for the whole system, we only allow 391 * forcing the clocks too high for the whole system, we only allow
392 * each client to waitboost once in a busy period. 392 * each client to waitboost once in a busy period.
393 */ 393 */
394 if (rps) { 394 if (rps_client) {
395 if (INTEL_GEN(rq->i915) >= 6) 395 if (INTEL_GEN(rq->i915) >= 6)
396 gen6_rps_boost(rq, rps); 396 gen6_rps_boost(rq, rps_client);
397 else 397 else
398 rps = NULL; 398 rps_client = NULL;
399 } 399 }
400 400
401 timeout = i915_wait_request(rq, flags, timeout); 401 timeout = i915_wait_request(rq, flags, timeout);
@@ -411,7 +411,7 @@ static long
411i915_gem_object_wait_reservation(struct reservation_object *resv, 411i915_gem_object_wait_reservation(struct reservation_object *resv,
412 unsigned int flags, 412 unsigned int flags,
413 long timeout, 413 long timeout,
414 struct intel_rps_client *rps) 414 struct intel_rps_client *rps_client)
415{ 415{
416 unsigned int seq = __read_seqcount_begin(&resv->seq); 416 unsigned int seq = __read_seqcount_begin(&resv->seq);
417 struct dma_fence *excl; 417 struct dma_fence *excl;
@@ -430,7 +430,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
430 for (i = 0; i < count; i++) { 430 for (i = 0; i < count; i++) {
431 timeout = i915_gem_object_wait_fence(shared[i], 431 timeout = i915_gem_object_wait_fence(shared[i],
432 flags, timeout, 432 flags, timeout,
433 rps); 433 rps_client);
434 if (timeout < 0) 434 if (timeout < 0)
435 break; 435 break;
436 436
@@ -447,7 +447,8 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
447 } 447 }
448 448
449 if (excl && timeout >= 0) { 449 if (excl && timeout >= 0) {
450 timeout = i915_gem_object_wait_fence(excl, flags, timeout, rps); 450 timeout = i915_gem_object_wait_fence(excl, flags, timeout,
451 rps_client);
451 prune_fences = timeout >= 0; 452 prune_fences = timeout >= 0;
452 } 453 }
453 454
@@ -543,7 +544,7 @@ int
543i915_gem_object_wait(struct drm_i915_gem_object *obj, 544i915_gem_object_wait(struct drm_i915_gem_object *obj,
544 unsigned int flags, 545 unsigned int flags,
545 long timeout, 546 long timeout,
546 struct intel_rps_client *rps) 547 struct intel_rps_client *rps_client)
547{ 548{
548 might_sleep(); 549 might_sleep();
549#if IS_ENABLED(CONFIG_LOCKDEP) 550#if IS_ENABLED(CONFIG_LOCKDEP)
@@ -555,7 +556,7 @@ i915_gem_object_wait(struct drm_i915_gem_object *obj,
555 556
556 timeout = i915_gem_object_wait_reservation(obj->resv, 557 timeout = i915_gem_object_wait_reservation(obj->resv,
557 flags, timeout, 558 flags, timeout,
558 rps); 559 rps_client);
559 return timeout < 0 ? timeout : 0; 560 return timeout < 0 ? timeout : 0;
560} 561}
561 562
@@ -563,7 +564,7 @@ static struct intel_rps_client *to_rps_client(struct drm_file *file)
563{ 564{
564 struct drm_i915_file_private *fpriv = file->driver_priv; 565 struct drm_i915_file_private *fpriv = file->driver_priv;
565 566
566 return &fpriv->rps; 567 return &fpriv->rps_client;
567} 568}
568 569
569static int 570static int
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index b100b38f1dd2..d5f4023e5d63 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -416,7 +416,7 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
416 416
417 spin_lock_irq(&request->lock); 417 spin_lock_irq(&request->lock);
418 if (request->waitboost) 418 if (request->waitboost)
419 atomic_dec(&request->i915->rps.num_waiters); 419 atomic_dec(&request->i915->gt_pm.rps.num_waiters);
420 dma_fence_signal_locked(&request->fence); 420 dma_fence_signal_locked(&request->fence);
421 spin_unlock_irq(&request->lock); 421 spin_unlock_irq(&request->lock);
422 422
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index f15de4dcefde..a2e8114b739d 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -1028,6 +1028,7 @@ void i915_guc_submission_fini(struct drm_i915_private *dev_priv)
1028 1028
1029static void guc_interrupts_capture(struct drm_i915_private *dev_priv) 1029static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
1030{ 1030{
1031 struct intel_rps *rps = &dev_priv->gt_pm.rps;
1031 struct intel_engine_cs *engine; 1032 struct intel_engine_cs *engine;
1032 enum intel_engine_id id; 1033 enum intel_engine_id id;
1033 int irqs; 1034 int irqs;
@@ -1064,12 +1065,13 @@ static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
1064 * Here we CLEAR REDIRECT_TO_GUC bit in pm_intrmsk_mbz, which will 1065 * Here we CLEAR REDIRECT_TO_GUC bit in pm_intrmsk_mbz, which will
1065 * result in the register bit being left SET! 1066 * result in the register bit being left SET!
1066 */ 1067 */
1067 dev_priv->rps.pm_intrmsk_mbz |= ARAT_EXPIRED_INTRMSK; 1068 rps->pm_intrmsk_mbz |= ARAT_EXPIRED_INTRMSK;
1068 dev_priv->rps.pm_intrmsk_mbz &= ~GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; 1069 rps->pm_intrmsk_mbz &= ~GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
1069} 1070}
1070 1071
1071static void guc_interrupts_release(struct drm_i915_private *dev_priv) 1072static void guc_interrupts_release(struct drm_i915_private *dev_priv)
1072{ 1073{
1074 struct intel_rps *rps = &dev_priv->gt_pm.rps;
1073 struct intel_engine_cs *engine; 1075 struct intel_engine_cs *engine;
1074 enum intel_engine_id id; 1076 enum intel_engine_id id;
1075 int irqs; 1077 int irqs;
@@ -1088,8 +1090,8 @@ static void guc_interrupts_release(struct drm_i915_private *dev_priv)
1088 I915_WRITE(GUC_VCS2_VCS1_IER, 0); 1090 I915_WRITE(GUC_VCS2_VCS1_IER, 0);
1089 I915_WRITE(GUC_WD_VECS_IER, 0); 1091 I915_WRITE(GUC_WD_VECS_IER, 0);
1090 1092
1091 dev_priv->rps.pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; 1093 rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
1092 dev_priv->rps.pm_intrmsk_mbz &= ~ARAT_EXPIRED_INTRMSK; 1094 rps->pm_intrmsk_mbz &= ~ARAT_EXPIRED_INTRMSK;
1093} 1095}
1094 1096
1095int i915_guc_submission_enable(struct drm_i915_private *dev_priv) 1097int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 1844d3fe8f1f..b1296a55c1e4 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -404,19 +404,21 @@ void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
404{ 404{
405 spin_lock_irq(&dev_priv->irq_lock); 405 spin_lock_irq(&dev_priv->irq_lock);
406 gen6_reset_pm_iir(dev_priv, dev_priv->pm_rps_events); 406 gen6_reset_pm_iir(dev_priv, dev_priv->pm_rps_events);
407 dev_priv->rps.pm_iir = 0; 407 dev_priv->gt_pm.rps.pm_iir = 0;
408 spin_unlock_irq(&dev_priv->irq_lock); 408 spin_unlock_irq(&dev_priv->irq_lock);
409} 409}
410 410
411void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv) 411void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
412{ 412{
413 if (READ_ONCE(dev_priv->rps.interrupts_enabled)) 413 struct intel_rps *rps = &dev_priv->gt_pm.rps;
414
415 if (READ_ONCE(rps->interrupts_enabled))
414 return; 416 return;
415 417
416 spin_lock_irq(&dev_priv->irq_lock); 418 spin_lock_irq(&dev_priv->irq_lock);
417 WARN_ON_ONCE(dev_priv->rps.pm_iir); 419 WARN_ON_ONCE(rps->pm_iir);
418 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); 420 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
419 dev_priv->rps.interrupts_enabled = true; 421 rps->interrupts_enabled = true;
420 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 422 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
421 423
422 spin_unlock_irq(&dev_priv->irq_lock); 424 spin_unlock_irq(&dev_priv->irq_lock);
@@ -424,11 +426,13 @@ void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
424 426
425void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv) 427void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
426{ 428{
427 if (!READ_ONCE(dev_priv->rps.interrupts_enabled)) 429 struct intel_rps *rps = &dev_priv->gt_pm.rps;
430
431 if (!READ_ONCE(rps->interrupts_enabled))
428 return; 432 return;
429 433
430 spin_lock_irq(&dev_priv->irq_lock); 434 spin_lock_irq(&dev_priv->irq_lock);
431 dev_priv->rps.interrupts_enabled = false; 435 rps->interrupts_enabled = false;
432 436
433 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u)); 437 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
434 438
@@ -442,7 +446,7 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
442 * we will reset the GPU to minimum frequencies, so the current 446 * we will reset the GPU to minimum frequencies, so the current
443 * state of the worker can be discarded. 447 * state of the worker can be discarded.
444 */ 448 */
445 cancel_work_sync(&dev_priv->rps.work); 449 cancel_work_sync(&rps->work);
446 gen6_reset_rps_interrupts(dev_priv); 450 gen6_reset_rps_interrupts(dev_priv);
447} 451}
448 452
@@ -1119,12 +1123,13 @@ static void vlv_c0_read(struct drm_i915_private *dev_priv,
1119 1123
1120void gen6_rps_reset_ei(struct drm_i915_private *dev_priv) 1124void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
1121{ 1125{
1122 memset(&dev_priv->rps.ei, 0, sizeof(dev_priv->rps.ei)); 1126 memset(&dev_priv->gt_pm.rps.ei, 0, sizeof(dev_priv->gt_pm.rps.ei));
1123} 1127}
1124 1128
1125static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) 1129static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
1126{ 1130{
1127 const struct intel_rps_ei *prev = &dev_priv->rps.ei; 1131 struct intel_rps *rps = &dev_priv->gt_pm.rps;
1132 const struct intel_rps_ei *prev = &rps->ei;
1128 struct intel_rps_ei now; 1133 struct intel_rps_ei now;
1129 u32 events = 0; 1134 u32 events = 0;
1130 1135
@@ -1151,28 +1156,29 @@ static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
1151 c0 = max(render, media); 1156 c0 = max(render, media);
1152 c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */ 1157 c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */
1153 1158
1154 if (c0 > time * dev_priv->rps.up_threshold) 1159 if (c0 > time * rps->up_threshold)
1155 events = GEN6_PM_RP_UP_THRESHOLD; 1160 events = GEN6_PM_RP_UP_THRESHOLD;
1156 else if (c0 < time * dev_priv->rps.down_threshold) 1161 else if (c0 < time * rps->down_threshold)
1157 events = GEN6_PM_RP_DOWN_THRESHOLD; 1162 events = GEN6_PM_RP_DOWN_THRESHOLD;
1158 } 1163 }
1159 1164
1160 dev_priv->rps.ei = now; 1165 rps->ei = now;
1161 return events; 1166 return events;
1162} 1167}
1163 1168
1164static void gen6_pm_rps_work(struct work_struct *work) 1169static void gen6_pm_rps_work(struct work_struct *work)
1165{ 1170{
1166 struct drm_i915_private *dev_priv = 1171 struct drm_i915_private *dev_priv =
1167 container_of(work, struct drm_i915_private, rps.work); 1172 container_of(work, struct drm_i915_private, gt_pm.rps.work);
1173 struct intel_rps *rps = &dev_priv->gt_pm.rps;
1168 bool client_boost = false; 1174 bool client_boost = false;
1169 int new_delay, adj, min, max; 1175 int new_delay, adj, min, max;
1170 u32 pm_iir = 0; 1176 u32 pm_iir = 0;
1171 1177
1172 spin_lock_irq(&dev_priv->irq_lock); 1178 spin_lock_irq(&dev_priv->irq_lock);
1173 if (dev_priv->rps.interrupts_enabled) { 1179 if (rps->interrupts_enabled) {
1174 pm_iir = fetch_and_zero(&dev_priv->rps.pm_iir); 1180 pm_iir = fetch_and_zero(&rps->pm_iir);
1175 client_boost = atomic_read(&dev_priv->rps.num_waiters); 1181 client_boost = atomic_read(&rps->num_waiters);
1176 } 1182 }
1177 spin_unlock_irq(&dev_priv->irq_lock); 1183 spin_unlock_irq(&dev_priv->irq_lock);
1178 1184
@@ -1185,14 +1191,14 @@ static void gen6_pm_rps_work(struct work_struct *work)
1185 1191
1186 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir); 1192 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
1187 1193
1188 adj = dev_priv->rps.last_adj; 1194 adj = rps->last_adj;
1189 new_delay = dev_priv->rps.cur_freq; 1195 new_delay = rps->cur_freq;
1190 min = dev_priv->rps.min_freq_softlimit; 1196 min = rps->min_freq_softlimit;
1191 max = dev_priv->rps.max_freq_softlimit; 1197 max = rps->max_freq_softlimit;
1192 if (client_boost) 1198 if (client_boost)
1193 max = dev_priv->rps.max_freq; 1199 max = rps->max_freq;
1194 if (client_boost && new_delay < dev_priv->rps.boost_freq) { 1200 if (client_boost && new_delay < rps->boost_freq) {
1195 new_delay = dev_priv->rps.boost_freq; 1201 new_delay = rps->boost_freq;
1196 adj = 0; 1202 adj = 0;
1197 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 1203 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1198 if (adj > 0) 1204 if (adj > 0)
@@ -1200,15 +1206,15 @@ static void gen6_pm_rps_work(struct work_struct *work)
1200 else /* CHV needs even encode values */ 1206 else /* CHV needs even encode values */
1201 adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1; 1207 adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
1202 1208
1203 if (new_delay >= dev_priv->rps.max_freq_softlimit) 1209 if (new_delay >= rps->max_freq_softlimit)
1204 adj = 0; 1210 adj = 0;
1205 } else if (client_boost) { 1211 } else if (client_boost) {
1206 adj = 0; 1212 adj = 0;
1207 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 1213 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1208 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq) 1214 if (rps->cur_freq > rps->efficient_freq)
1209 new_delay = dev_priv->rps.efficient_freq; 1215 new_delay = rps->efficient_freq;
1210 else if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit) 1216 else if (rps->cur_freq > rps->min_freq_softlimit)
1211 new_delay = dev_priv->rps.min_freq_softlimit; 1217 new_delay = rps->min_freq_softlimit;
1212 adj = 0; 1218 adj = 0;
1213 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1219 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1214 if (adj < 0) 1220 if (adj < 0)
@@ -1216,13 +1222,13 @@ static void gen6_pm_rps_work(struct work_struct *work)
1216 else /* CHV needs even encode values */ 1222 else /* CHV needs even encode values */
1217 adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1; 1223 adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
1218 1224
1219 if (new_delay <= dev_priv->rps.min_freq_softlimit) 1225 if (new_delay <= rps->min_freq_softlimit)
1220 adj = 0; 1226 adj = 0;
1221 } else { /* unknown event */ 1227 } else { /* unknown event */
1222 adj = 0; 1228 adj = 0;
1223 } 1229 }
1224 1230
1225 dev_priv->rps.last_adj = adj; 1231 rps->last_adj = adj;
1226 1232
1227 /* sysfs frequency interfaces may have snuck in while servicing the 1233 /* sysfs frequency interfaces may have snuck in while servicing the
1228 * interrupt 1234 * interrupt
@@ -1232,7 +1238,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
1232 1238
1233 if (intel_set_rps(dev_priv, new_delay)) { 1239 if (intel_set_rps(dev_priv, new_delay)) {
1234 DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n"); 1240 DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n");
1235 dev_priv->rps.last_adj = 0; 1241 rps->last_adj = 0;
1236 } 1242 }
1237 1243
1238 mutex_unlock(&dev_priv->pcu_lock); 1244 mutex_unlock(&dev_priv->pcu_lock);
@@ -1240,7 +1246,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
1240out: 1246out:
1241 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ 1247 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
1242 spin_lock_irq(&dev_priv->irq_lock); 1248 spin_lock_irq(&dev_priv->irq_lock);
1243 if (dev_priv->rps.interrupts_enabled) 1249 if (rps->interrupts_enabled)
1244 gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events); 1250 gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events);
1245 spin_unlock_irq(&dev_priv->irq_lock); 1251 spin_unlock_irq(&dev_priv->irq_lock);
1246} 1252}
@@ -1721,12 +1727,14 @@ static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1721 * the work queue. */ 1727 * the work queue. */
1722static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1728static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1723{ 1729{
1730 struct intel_rps *rps = &dev_priv->gt_pm.rps;
1731
1724 if (pm_iir & dev_priv->pm_rps_events) { 1732 if (pm_iir & dev_priv->pm_rps_events) {
1725 spin_lock(&dev_priv->irq_lock); 1733 spin_lock(&dev_priv->irq_lock);
1726 gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); 1734 gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1727 if (dev_priv->rps.interrupts_enabled) { 1735 if (rps->interrupts_enabled) {
1728 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; 1736 rps->pm_iir |= pm_iir & dev_priv->pm_rps_events;
1729 schedule_work(&dev_priv->rps.work); 1737 schedule_work(&rps->work);
1730 } 1738 }
1731 spin_unlock(&dev_priv->irq_lock); 1739 spin_unlock(&dev_priv->irq_lock);
1732 } 1740 }
@@ -4007,11 +4015,12 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
4007void intel_irq_init(struct drm_i915_private *dev_priv) 4015void intel_irq_init(struct drm_i915_private *dev_priv)
4008{ 4016{
4009 struct drm_device *dev = &dev_priv->drm; 4017 struct drm_device *dev = &dev_priv->drm;
4018 struct intel_rps *rps = &dev_priv->gt_pm.rps;
4010 int i; 4019 int i;
4011 4020
4012 intel_hpd_init_work(dev_priv); 4021 intel_hpd_init_work(dev_priv);
4013 4022
4014 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 4023 INIT_WORK(&rps->work, gen6_pm_rps_work);
4015 4024
4016 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 4025 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4017 for (i = 0; i < MAX_L3_SLICES; ++i) 4026 for (i = 0; i < MAX_L3_SLICES; ++i)
@@ -4027,7 +4036,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
4027 else 4036 else
4028 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; 4037 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4029 4038
4030 dev_priv->rps.pm_intrmsk_mbz = 0; 4039 rps->pm_intrmsk_mbz = 0;
4031 4040
4032 /* 4041 /*
4033 * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer 4042 * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer
@@ -4036,10 +4045,10 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
4036 * TODO: verify if this can be reproduced on VLV,CHV. 4045 * TODO: verify if this can be reproduced on VLV,CHV.
4037 */ 4046 */
4038 if (INTEL_GEN(dev_priv) <= 7) 4047 if (INTEL_GEN(dev_priv) <= 7)
4039 dev_priv->rps.pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED; 4048 rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
4040 4049
4041 if (INTEL_GEN(dev_priv) >= 8) 4050 if (INTEL_GEN(dev_priv) >= 8)
4042 dev_priv->rps.pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; 4051 rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
4043 4052
4044 if (IS_GEN2(dev_priv)) { 4053 if (IS_GEN2(dev_priv)) {
4045 /* Gen2 doesn't have a hardware frame counter */ 4054 /* Gen2 doesn't have a hardware frame counter */
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 79fbab49d1d0..0a57f9867f7f 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -275,7 +275,7 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
275 275
276 return snprintf(buf, PAGE_SIZE, "%d\n", 276 return snprintf(buf, PAGE_SIZE, "%d\n",
277 intel_gpu_freq(dev_priv, 277 intel_gpu_freq(dev_priv,
278 dev_priv->rps.cur_freq)); 278 dev_priv->gt_pm.rps.cur_freq));
279} 279}
280 280
281static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) 281static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
@@ -284,7 +284,7 @@ static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribu
284 284
285 return snprintf(buf, PAGE_SIZE, "%d\n", 285 return snprintf(buf, PAGE_SIZE, "%d\n",
286 intel_gpu_freq(dev_priv, 286 intel_gpu_freq(dev_priv,
287 dev_priv->rps.boost_freq)); 287 dev_priv->gt_pm.rps.boost_freq));
288} 288}
289 289
290static ssize_t gt_boost_freq_mhz_store(struct device *kdev, 290static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
@@ -292,6 +292,7 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
292 const char *buf, size_t count) 292 const char *buf, size_t count)
293{ 293{
294 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); 294 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
295 struct intel_rps *rps = &dev_priv->gt_pm.rps;
295 u32 val; 296 u32 val;
296 ssize_t ret; 297 ssize_t ret;
297 298
@@ -301,11 +302,11 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
301 302
302 /* Validate against (static) hardware limits */ 303 /* Validate against (static) hardware limits */
303 val = intel_freq_opcode(dev_priv, val); 304 val = intel_freq_opcode(dev_priv, val);
304 if (val < dev_priv->rps.min_freq || val > dev_priv->rps.max_freq) 305 if (val < rps->min_freq || val > rps->max_freq)
305 return -EINVAL; 306 return -EINVAL;
306 307
307 mutex_lock(&dev_priv->pcu_lock); 308 mutex_lock(&dev_priv->pcu_lock);
308 dev_priv->rps.boost_freq = val; 309 rps->boost_freq = val;
309 mutex_unlock(&dev_priv->pcu_lock); 310 mutex_unlock(&dev_priv->pcu_lock);
310 311
311 return count; 312 return count;
@@ -318,7 +319,7 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
318 319
319 return snprintf(buf, PAGE_SIZE, "%d\n", 320 return snprintf(buf, PAGE_SIZE, "%d\n",
320 intel_gpu_freq(dev_priv, 321 intel_gpu_freq(dev_priv,
321 dev_priv->rps.efficient_freq)); 322 dev_priv->gt_pm.rps.efficient_freq));
322} 323}
323 324
324static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) 325static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
@@ -327,7 +328,7 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute
327 328
328 return snprintf(buf, PAGE_SIZE, "%d\n", 329 return snprintf(buf, PAGE_SIZE, "%d\n",
329 intel_gpu_freq(dev_priv, 330 intel_gpu_freq(dev_priv,
330 dev_priv->rps.max_freq_softlimit)); 331 dev_priv->gt_pm.rps.max_freq_softlimit));
331} 332}
332 333
333static ssize_t gt_max_freq_mhz_store(struct device *kdev, 334static ssize_t gt_max_freq_mhz_store(struct device *kdev,
@@ -335,6 +336,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
335 const char *buf, size_t count) 336 const char *buf, size_t count)
336{ 337{
337 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); 338 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
339 struct intel_rps *rps = &dev_priv->gt_pm.rps;
338 u32 val; 340 u32 val;
339 ssize_t ret; 341 ssize_t ret;
340 342
@@ -348,23 +350,23 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
348 350
349 val = intel_freq_opcode(dev_priv, val); 351 val = intel_freq_opcode(dev_priv, val);
350 352
351 if (val < dev_priv->rps.min_freq || 353 if (val < rps->min_freq ||
352 val > dev_priv->rps.max_freq || 354 val > rps->max_freq ||
353 val < dev_priv->rps.min_freq_softlimit) { 355 val < rps->min_freq_softlimit) {
354 mutex_unlock(&dev_priv->pcu_lock); 356 mutex_unlock(&dev_priv->pcu_lock);
355 intel_runtime_pm_put(dev_priv); 357 intel_runtime_pm_put(dev_priv);
356 return -EINVAL; 358 return -EINVAL;
357 } 359 }
358 360
359 if (val > dev_priv->rps.rp0_freq) 361 if (val > rps->rp0_freq)
360 DRM_DEBUG("User requested overclocking to %d\n", 362 DRM_DEBUG("User requested overclocking to %d\n",
361 intel_gpu_freq(dev_priv, val)); 363 intel_gpu_freq(dev_priv, val));
362 364
363 dev_priv->rps.max_freq_softlimit = val; 365 rps->max_freq_softlimit = val;
364 366
365 val = clamp_t(int, dev_priv->rps.cur_freq, 367 val = clamp_t(int, rps->cur_freq,
366 dev_priv->rps.min_freq_softlimit, 368 rps->min_freq_softlimit,
367 dev_priv->rps.max_freq_softlimit); 369 rps->max_freq_softlimit);
368 370
369 /* We still need *_set_rps to process the new max_delay and 371 /* We still need *_set_rps to process the new max_delay and
370 * update the interrupt limits and PMINTRMSK even though 372 * update the interrupt limits and PMINTRMSK even though
@@ -384,7 +386,7 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute
384 386
385 return snprintf(buf, PAGE_SIZE, "%d\n", 387 return snprintf(buf, PAGE_SIZE, "%d\n",
386 intel_gpu_freq(dev_priv, 388 intel_gpu_freq(dev_priv,
387 dev_priv->rps.min_freq_softlimit)); 389 dev_priv->gt_pm.rps.min_freq_softlimit));
388} 390}
389 391
390static ssize_t gt_min_freq_mhz_store(struct device *kdev, 392static ssize_t gt_min_freq_mhz_store(struct device *kdev,
@@ -392,6 +394,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
392 const char *buf, size_t count) 394 const char *buf, size_t count)
393{ 395{
394 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); 396 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
397 struct intel_rps *rps = &dev_priv->gt_pm.rps;
395 u32 val; 398 u32 val;
396 ssize_t ret; 399 ssize_t ret;
397 400
@@ -405,19 +408,19 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
405 408
406 val = intel_freq_opcode(dev_priv, val); 409 val = intel_freq_opcode(dev_priv, val);
407 410
408 if (val < dev_priv->rps.min_freq || 411 if (val < rps->min_freq ||
409 val > dev_priv->rps.max_freq || 412 val > rps->max_freq ||
410 val > dev_priv->rps.max_freq_softlimit) { 413 val > rps->max_freq_softlimit) {
411 mutex_unlock(&dev_priv->pcu_lock); 414 mutex_unlock(&dev_priv->pcu_lock);
412 intel_runtime_pm_put(dev_priv); 415 intel_runtime_pm_put(dev_priv);
413 return -EINVAL; 416 return -EINVAL;
414 } 417 }
415 418
416 dev_priv->rps.min_freq_softlimit = val; 419 rps->min_freq_softlimit = val;
417 420
418 val = clamp_t(int, dev_priv->rps.cur_freq, 421 val = clamp_t(int, rps->cur_freq,
419 dev_priv->rps.min_freq_softlimit, 422 rps->min_freq_softlimit,
420 dev_priv->rps.max_freq_softlimit); 423 rps->max_freq_softlimit);
421 424
422 /* We still need *_set_rps to process the new min_delay and 425 /* We still need *_set_rps to process the new min_delay and
423 * update the interrupt limits and PMINTRMSK even though 426 * update the interrupt limits and PMINTRMSK even though
@@ -448,14 +451,15 @@ static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
448static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) 451static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
449{ 452{
450 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); 453 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
454 struct intel_rps *rps = &dev_priv->gt_pm.rps;
451 u32 val; 455 u32 val;
452 456
453 if (attr == &dev_attr_gt_RP0_freq_mhz) 457 if (attr == &dev_attr_gt_RP0_freq_mhz)
454 val = intel_gpu_freq(dev_priv, dev_priv->rps.rp0_freq); 458 val = intel_gpu_freq(dev_priv, rps->rp0_freq);
455 else if (attr == &dev_attr_gt_RP1_freq_mhz) 459 else if (attr == &dev_attr_gt_RP1_freq_mhz)
456 val = intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq); 460 val = intel_gpu_freq(dev_priv, rps->rp1_freq);
457 else if (attr == &dev_attr_gt_RPn_freq_mhz) 461 else if (attr == &dev_attr_gt_RPn_freq_mhz)
458 val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq); 462 val = intel_gpu_freq(dev_priv, rps->min_freq);
459 else 463 else
460 BUG(); 464 BUG();
461 465
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 3fd428b99c37..53acfc475e35 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1243,7 +1243,7 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv);
1243static inline u32 gen6_sanitize_rps_pm_mask(const struct drm_i915_private *i915, 1243static inline u32 gen6_sanitize_rps_pm_mask(const struct drm_i915_private *i915,
1244 u32 mask) 1244 u32 mask)
1245{ 1245{
1246 return mask & ~i915->rps.pm_intrmsk_mbz; 1246 return mask & ~i915->gt_pm.rps.pm_intrmsk_mbz;
1247} 1247}
1248 1248
1249void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv); 1249void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 512f2b0513e0..9097489e1993 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -5988,6 +5988,7 @@ static void ironlake_disable_drps(struct drm_i915_private *dev_priv)
5988 */ 5988 */
5989static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val) 5989static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
5990{ 5990{
5991 struct intel_rps *rps = &dev_priv->gt_pm.rps;
5991 u32 limits; 5992 u32 limits;
5992 5993
5993 /* Only set the down limit when we've reached the lowest level to avoid 5994 /* Only set the down limit when we've reached the lowest level to avoid
@@ -5997,13 +5998,13 @@ static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
5997 * frequency, if the down threshold expires in that window we will not 5998 * frequency, if the down threshold expires in that window we will not
5998 * receive a down interrupt. */ 5999 * receive a down interrupt. */
5999 if (INTEL_GEN(dev_priv) >= 9) { 6000 if (INTEL_GEN(dev_priv) >= 9) {
6000 limits = (dev_priv->rps.max_freq_softlimit) << 23; 6001 limits = (rps->max_freq_softlimit) << 23;
6001 if (val <= dev_priv->rps.min_freq_softlimit) 6002 if (val <= rps->min_freq_softlimit)
6002 limits |= (dev_priv->rps.min_freq_softlimit) << 14; 6003 limits |= (rps->min_freq_softlimit) << 14;
6003 } else { 6004 } else {
6004 limits = dev_priv->rps.max_freq_softlimit << 24; 6005 limits = rps->max_freq_softlimit << 24;
6005 if (val <= dev_priv->rps.min_freq_softlimit) 6006 if (val <= rps->min_freq_softlimit)
6006 limits |= dev_priv->rps.min_freq_softlimit << 16; 6007 limits |= rps->min_freq_softlimit << 16;
6007 } 6008 }
6008 6009
6009 return limits; 6010 return limits;
@@ -6011,39 +6012,40 @@ static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
6011 6012
6012static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val) 6013static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
6013{ 6014{
6015 struct intel_rps *rps = &dev_priv->gt_pm.rps;
6014 int new_power; 6016 int new_power;
6015 u32 threshold_up = 0, threshold_down = 0; /* in % */ 6017 u32 threshold_up = 0, threshold_down = 0; /* in % */
6016 u32 ei_up = 0, ei_down = 0; 6018 u32 ei_up = 0, ei_down = 0;
6017 6019
6018 new_power = dev_priv->rps.power; 6020 new_power = rps->power;
6019 switch (dev_priv->rps.power) { 6021 switch (rps->power) {
6020 case LOW_POWER: 6022 case LOW_POWER:
6021 if (val > dev_priv->rps.efficient_freq + 1 && 6023 if (val > rps->efficient_freq + 1 &&
6022 val > dev_priv->rps.cur_freq) 6024 val > rps->cur_freq)
6023 new_power = BETWEEN; 6025 new_power = BETWEEN;
6024 break; 6026 break;
6025 6027
6026 case BETWEEN: 6028 case BETWEEN:
6027 if (val <= dev_priv->rps.efficient_freq && 6029 if (val <= rps->efficient_freq &&
6028 val < dev_priv->rps.cur_freq) 6030 val < rps->cur_freq)
6029 new_power = LOW_POWER; 6031 new_power = LOW_POWER;
6030 else if (val >= dev_priv->rps.rp0_freq && 6032 else if (val >= rps->rp0_freq &&
6031 val > dev_priv->rps.cur_freq) 6033 val > rps->cur_freq)
6032 new_power = HIGH_POWER; 6034 new_power = HIGH_POWER;
6033 break; 6035 break;
6034 6036
6035 case HIGH_POWER: 6037 case HIGH_POWER:
6036 if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 && 6038 if (val < (rps->rp1_freq + rps->rp0_freq) >> 1 &&
6037 val < dev_priv->rps.cur_freq) 6039 val < rps->cur_freq)
6038 new_power = BETWEEN; 6040 new_power = BETWEEN;
6039 break; 6041 break;
6040 } 6042 }
6041 /* Max/min bins are special */ 6043 /* Max/min bins are special */
6042 if (val <= dev_priv->rps.min_freq_softlimit) 6044 if (val <= rps->min_freq_softlimit)
6043 new_power = LOW_POWER; 6045 new_power = LOW_POWER;
6044 if (val >= dev_priv->rps.max_freq_softlimit) 6046 if (val >= rps->max_freq_softlimit)
6045 new_power = HIGH_POWER; 6047 new_power = HIGH_POWER;
6046 if (new_power == dev_priv->rps.power) 6048 if (new_power == rps->power)
6047 return; 6049 return;
6048 6050
6049 /* Note the units here are not exactly 1us, but 1280ns. */ 6051 /* Note the units here are not exactly 1us, but 1280ns. */
@@ -6106,20 +6108,21 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
6106 GEN6_RP_DOWN_IDLE_AVG); 6108 GEN6_RP_DOWN_IDLE_AVG);
6107 6109
6108skip_hw_write: 6110skip_hw_write:
6109 dev_priv->rps.power = new_power; 6111 rps->power = new_power;
6110 dev_priv->rps.up_threshold = threshold_up; 6112 rps->up_threshold = threshold_up;
6111 dev_priv->rps.down_threshold = threshold_down; 6113 rps->down_threshold = threshold_down;
6112 dev_priv->rps.last_adj = 0; 6114 rps->last_adj = 0;
6113} 6115}
6114 6116
6115static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val) 6117static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
6116{ 6118{
6119 struct intel_rps *rps = &dev_priv->gt_pm.rps;
6117 u32 mask = 0; 6120 u32 mask = 0;
6118 6121
6119 /* We use UP_EI_EXPIRED interupts for both up/down in manual mode */ 6122 /* We use UP_EI_EXPIRED interupts for both up/down in manual mode */
6120 if (val > dev_priv->rps.min_freq_softlimit) 6123 if (val > rps->min_freq_softlimit)
6121 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT; 6124 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
6122 if (val < dev_priv->rps.max_freq_softlimit) 6125 if (val < rps->max_freq_softlimit)
6123 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD; 6126 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
6124 6127
6125 mask &= dev_priv->pm_rps_events; 6128 mask &= dev_priv->pm_rps_events;
@@ -6132,10 +6135,12 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
6132 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */ 6135 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
6133static int gen6_set_rps(struct drm_i915_private *dev_priv, u8 val) 6136static int gen6_set_rps(struct drm_i915_private *dev_priv, u8 val)
6134{ 6137{
6138 struct intel_rps *rps = &dev_priv->gt_pm.rps;
6139
6135 /* min/max delay may still have been modified so be sure to 6140 /* min/max delay may still have been modified so be sure to
6136 * write the limits value. 6141 * write the limits value.
6137 */ 6142 */
6138 if (val != dev_priv->rps.cur_freq) { 6143 if (val != rps->cur_freq) {
6139 gen6_set_rps_thresholds(dev_priv, val); 6144 gen6_set_rps_thresholds(dev_priv, val);
6140 6145
6141 if (INTEL_GEN(dev_priv) >= 9) 6146 if (INTEL_GEN(dev_priv) >= 9)
@@ -6157,7 +6162,7 @@ static int gen6_set_rps(struct drm_i915_private *dev_priv, u8 val)
6157 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, intel_rps_limits(dev_priv, val)); 6162 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, intel_rps_limits(dev_priv, val));
6158 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val)); 6163 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
6159 6164
6160 dev_priv->rps.cur_freq = val; 6165 rps->cur_freq = val;
6161 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val)); 6166 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
6162 6167
6163 return 0; 6168 return 0;
@@ -6173,7 +6178,7 @@ static int valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val)
6173 6178
6174 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val)); 6179 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
6175 6180
6176 if (val != dev_priv->rps.cur_freq) { 6181 if (val != dev_priv->gt_pm.rps.cur_freq) {
6177 err = vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val); 6182 err = vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
6178 if (err) 6183 if (err)
6179 return err; 6184 return err;
@@ -6181,7 +6186,7 @@ static int valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val)
6181 gen6_set_rps_thresholds(dev_priv, val); 6186 gen6_set_rps_thresholds(dev_priv, val);
6182 } 6187 }
6183 6188
6184 dev_priv->rps.cur_freq = val; 6189 dev_priv->gt_pm.rps.cur_freq = val;
6185 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val)); 6190 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
6186 6191
6187 return 0; 6192 return 0;
@@ -6196,10 +6201,11 @@ static int valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val)
6196*/ 6201*/
6197static void vlv_set_rps_idle(struct drm_i915_private *dev_priv) 6202static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
6198{ 6203{
6199 u32 val = dev_priv->rps.idle_freq; 6204 struct intel_rps *rps = &dev_priv->gt_pm.rps;
6205 u32 val = rps->idle_freq;
6200 int err; 6206 int err;
6201 6207
6202 if (dev_priv->rps.cur_freq <= val) 6208 if (rps->cur_freq <= val)
6203 return; 6209 return;
6204 6210
6205 /* The punit delays the write of the frequency and voltage until it 6211 /* The punit delays the write of the frequency and voltage until it
@@ -6224,27 +6230,29 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
6224 6230
6225void gen6_rps_busy(struct drm_i915_private *dev_priv) 6231void gen6_rps_busy(struct drm_i915_private *dev_priv)
6226{ 6232{
6233 struct intel_rps *rps = &dev_priv->gt_pm.rps;
6234
6227 mutex_lock(&dev_priv->pcu_lock); 6235 mutex_lock(&dev_priv->pcu_lock);
6228 if (dev_priv->rps.enabled) { 6236 if (rps->enabled) {
6229 u8 freq; 6237 u8 freq;
6230 6238
6231 if (dev_priv->pm_rps_events & GEN6_PM_RP_UP_EI_EXPIRED) 6239 if (dev_priv->pm_rps_events & GEN6_PM_RP_UP_EI_EXPIRED)
6232 gen6_rps_reset_ei(dev_priv); 6240 gen6_rps_reset_ei(dev_priv);
6233 I915_WRITE(GEN6_PMINTRMSK, 6241 I915_WRITE(GEN6_PMINTRMSK,
6234 gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq)); 6242 gen6_rps_pm_mask(dev_priv, rps->cur_freq));
6235 6243
6236 gen6_enable_rps_interrupts(dev_priv); 6244 gen6_enable_rps_interrupts(dev_priv);
6237 6245
6238 /* Use the user's desired frequency as a guide, but for better 6246 /* Use the user's desired frequency as a guide, but for better
6239 * performance, jump directly to RPe as our starting frequency. 6247 * performance, jump directly to RPe as our starting frequency.
6240 */ 6248 */
6241 freq = max(dev_priv->rps.cur_freq, 6249 freq = max(rps->cur_freq,
6242 dev_priv->rps.efficient_freq); 6250 rps->efficient_freq);
6243 6251
6244 if (intel_set_rps(dev_priv, 6252 if (intel_set_rps(dev_priv,
6245 clamp(freq, 6253 clamp(freq,
6246 dev_priv->rps.min_freq_softlimit, 6254 rps->min_freq_softlimit,
6247 dev_priv->rps.max_freq_softlimit))) 6255 rps->max_freq_softlimit)))
6248 DRM_DEBUG_DRIVER("Failed to set idle frequency\n"); 6256 DRM_DEBUG_DRIVER("Failed to set idle frequency\n");
6249 } 6257 }
6250 mutex_unlock(&dev_priv->pcu_lock); 6258 mutex_unlock(&dev_priv->pcu_lock);
@@ -6252,6 +6260,8 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv)
6252 6260
6253void gen6_rps_idle(struct drm_i915_private *dev_priv) 6261void gen6_rps_idle(struct drm_i915_private *dev_priv)
6254{ 6262{
6263 struct intel_rps *rps = &dev_priv->gt_pm.rps;
6264
6255 /* Flush our bottom-half so that it does not race with us 6265 /* Flush our bottom-half so that it does not race with us
6256 * setting the idle frequency and so that it is bounded by 6266 * setting the idle frequency and so that it is bounded by
6257 * our rpm wakeref. And then disable the interrupts to stop any 6267 * our rpm wakeref. And then disable the interrupts to stop any
@@ -6260,12 +6270,12 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
6260 gen6_disable_rps_interrupts(dev_priv); 6270 gen6_disable_rps_interrupts(dev_priv);
6261 6271
6262 mutex_lock(&dev_priv->pcu_lock); 6272 mutex_lock(&dev_priv->pcu_lock);
6263 if (dev_priv->rps.enabled) { 6273 if (rps->enabled) {
6264 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 6274 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6265 vlv_set_rps_idle(dev_priv); 6275 vlv_set_rps_idle(dev_priv);
6266 else 6276 else
6267 gen6_set_rps(dev_priv, dev_priv->rps.idle_freq); 6277 gen6_set_rps(dev_priv, rps->idle_freq);
6268 dev_priv->rps.last_adj = 0; 6278 rps->last_adj = 0;
6269 I915_WRITE(GEN6_PMINTRMSK, 6279 I915_WRITE(GEN6_PMINTRMSK,
6270 gen6_sanitize_rps_pm_mask(dev_priv, ~0)); 6280 gen6_sanitize_rps_pm_mask(dev_priv, ~0));
6271 } 6281 }
@@ -6273,22 +6283,22 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
6273} 6283}
6274 6284
6275void gen6_rps_boost(struct drm_i915_gem_request *rq, 6285void gen6_rps_boost(struct drm_i915_gem_request *rq,
6276 struct intel_rps_client *rps) 6286 struct intel_rps_client *rps_client)
6277{ 6287{
6278 struct drm_i915_private *i915 = rq->i915; 6288 struct intel_rps *rps = &rq->i915->gt_pm.rps;
6279 unsigned long flags; 6289 unsigned long flags;
6280 bool boost; 6290 bool boost;
6281 6291
6282 /* This is intentionally racy! We peek at the state here, then 6292 /* This is intentionally racy! We peek at the state here, then
6283 * validate inside the RPS worker. 6293 * validate inside the RPS worker.
6284 */ 6294 */
6285 if (!i915->rps.enabled) 6295 if (!rps->enabled)
6286 return; 6296 return;
6287 6297
6288 boost = false; 6298 boost = false;
6289 spin_lock_irqsave(&rq->lock, flags); 6299 spin_lock_irqsave(&rq->lock, flags);
6290 if (!rq->waitboost && !i915_gem_request_completed(rq)) { 6300 if (!rq->waitboost && !i915_gem_request_completed(rq)) {
6291 atomic_inc(&i915->rps.num_waiters); 6301 atomic_inc(&rps->num_waiters);
6292 rq->waitboost = true; 6302 rq->waitboost = true;
6293 boost = true; 6303 boost = true;
6294 } 6304 }
@@ -6296,22 +6306,23 @@ void gen6_rps_boost(struct drm_i915_gem_request *rq,
6296 if (!boost) 6306 if (!boost)
6297 return; 6307 return;
6298 6308
6299 if (READ_ONCE(i915->rps.cur_freq) < i915->rps.boost_freq) 6309 if (READ_ONCE(rps->cur_freq) < rps->boost_freq)
6300 schedule_work(&i915->rps.work); 6310 schedule_work(&rps->work);
6301 6311
6302 atomic_inc(rps ? &rps->boosts : &i915->rps.boosts); 6312 atomic_inc(rps_client ? &rps_client->boosts : &rps->boosts);
6303} 6313}
6304 6314
6305int intel_set_rps(struct drm_i915_private *dev_priv, u8 val) 6315int intel_set_rps(struct drm_i915_private *dev_priv, u8 val)
6306{ 6316{
6317 struct intel_rps *rps = &dev_priv->gt_pm.rps;
6307 int err; 6318 int err;
6308 6319
6309 lockdep_assert_held(&dev_priv->pcu_lock); 6320 lockdep_assert_held(&dev_priv->pcu_lock);
6310 GEM_BUG_ON(val > dev_priv->rps.max_freq); 6321 GEM_BUG_ON(val > rps->max_freq);
6311 GEM_BUG_ON(val < dev_priv->rps.min_freq); 6322 GEM_BUG_ON(val < rps->min_freq);
6312 6323
6313 if (!dev_priv->rps.enabled) { 6324 if (!rps->enabled) {
6314 dev_priv->rps.cur_freq = val; 6325 rps->cur_freq = val;
6315 return 0; 6326 return 0;
6316 } 6327 }
6317 6328
@@ -6493,24 +6504,26 @@ int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6)
6493 6504
6494static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv) 6505static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
6495{ 6506{
6507 struct intel_rps *rps = &dev_priv->gt_pm.rps;
6508
6496 /* All of these values are in units of 50MHz */ 6509 /* All of these values are in units of 50MHz */
6497 6510
6498 /* static values from HW: RP0 > RP1 > RPn (min_freq) */ 6511 /* static values from HW: RP0 > RP1 > RPn (min_freq) */
6499 if (IS_GEN9_LP(dev_priv)) { 6512 if (IS_GEN9_LP(dev_priv)) {
6500 u32 rp_state_cap = I915_READ(BXT_RP_STATE_CAP); 6513 u32 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
6501 dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff; 6514 rps->rp0_freq = (rp_state_cap >> 16) & 0xff;
6502 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff; 6515 rps->rp1_freq = (rp_state_cap >> 8) & 0xff;
6503 dev_priv->rps.min_freq = (rp_state_cap >> 0) & 0xff; 6516 rps->min_freq = (rp_state_cap >> 0) & 0xff;
6504 } else { 6517 } else {
6505 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 6518 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
6506 dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff; 6519 rps->rp0_freq = (rp_state_cap >> 0) & 0xff;
6507 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff; 6520 rps->rp1_freq = (rp_state_cap >> 8) & 0xff;
6508 dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff; 6521 rps->min_freq = (rp_state_cap >> 16) & 0xff;
6509 } 6522 }
6510 /* hw_max = RP0 until we check for overclocking */ 6523 /* hw_max = RP0 until we check for overclocking */
6511 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq; 6524 rps->max_freq = rps->rp0_freq;
6512 6525
6513 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq; 6526 rps->efficient_freq = rps->rp1_freq;
6514 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) || 6527 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) ||
6515 IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) { 6528 IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
6516 u32 ddcc_status = 0; 6529 u32 ddcc_status = 0;
@@ -6518,33 +6531,34 @@ static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
6518 if (sandybridge_pcode_read(dev_priv, 6531 if (sandybridge_pcode_read(dev_priv,
6519 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL, 6532 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
6520 &ddcc_status) == 0) 6533 &ddcc_status) == 0)
6521 dev_priv->rps.efficient_freq = 6534 rps->efficient_freq =
6522 clamp_t(u8, 6535 clamp_t(u8,
6523 ((ddcc_status >> 8) & 0xff), 6536 ((ddcc_status >> 8) & 0xff),
6524 dev_priv->rps.min_freq, 6537 rps->min_freq,
6525 dev_priv->rps.max_freq); 6538 rps->max_freq);
6526 } 6539 }
6527 6540
6528 if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) { 6541 if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
6529 /* Store the frequency values in 16.66 MHZ units, which is 6542 /* Store the frequency values in 16.66 MHZ units, which is
6530 * the natural hardware unit for SKL 6543 * the natural hardware unit for SKL
6531 */ 6544 */
6532 dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER; 6545 rps->rp0_freq *= GEN9_FREQ_SCALER;
6533 dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER; 6546 rps->rp1_freq *= GEN9_FREQ_SCALER;
6534 dev_priv->rps.min_freq *= GEN9_FREQ_SCALER; 6547 rps->min_freq *= GEN9_FREQ_SCALER;
6535 dev_priv->rps.max_freq *= GEN9_FREQ_SCALER; 6548 rps->max_freq *= GEN9_FREQ_SCALER;
6536 dev_priv->rps.efficient_freq *= GEN9_FREQ_SCALER; 6549 rps->efficient_freq *= GEN9_FREQ_SCALER;
6537 } 6550 }
6538} 6551}
6539 6552
6540static void reset_rps(struct drm_i915_private *dev_priv, 6553static void reset_rps(struct drm_i915_private *dev_priv,
6541 int (*set)(struct drm_i915_private *, u8)) 6554 int (*set)(struct drm_i915_private *, u8))
6542{ 6555{
6543 u8 freq = dev_priv->rps.cur_freq; 6556 struct intel_rps *rps = &dev_priv->gt_pm.rps;
6557 u8 freq = rps->cur_freq;
6544 6558
6545 /* force a reset */ 6559 /* force a reset */
6546 dev_priv->rps.power = -1; 6560 rps->power = -1;
6547 dev_priv->rps.cur_freq = -1; 6561 rps->cur_freq = -1;
6548 6562
6549 if (set(dev_priv, freq)) 6563 if (set(dev_priv, freq))
6550 DRM_ERROR("Failed to reset RPS to initial values\n"); 6564 DRM_ERROR("Failed to reset RPS to initial values\n");
@@ -6557,7 +6571,7 @@ static void gen9_enable_rps(struct drm_i915_private *dev_priv)
6557 6571
6558 /* Program defaults and thresholds for RPS*/ 6572 /* Program defaults and thresholds for RPS*/
6559 I915_WRITE(GEN6_RC_VIDEO_FREQ, 6573 I915_WRITE(GEN6_RC_VIDEO_FREQ,
6560 GEN9_FREQUENCY(dev_priv->rps.rp1_freq)); 6574 GEN9_FREQUENCY(dev_priv->gt_pm.rps.rp1_freq));
6561 6575
6562 /* 1 second timeout*/ 6576 /* 1 second timeout*/
6563 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 6577 I915_WRITE(GEN6_RP_DOWN_TIMEOUT,
@@ -6670,20 +6684,22 @@ static void gen8_enable_rc6(struct drm_i915_private *dev_priv)
6670 6684
6671static void gen8_enable_rps(struct drm_i915_private *dev_priv) 6685static void gen8_enable_rps(struct drm_i915_private *dev_priv)
6672{ 6686{
6687 struct intel_rps *rps = &dev_priv->gt_pm.rps;
6688
6673 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 6689 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
6674 6690
6675 /* 1 Program defaults and thresholds for RPS*/ 6691 /* 1 Program defaults and thresholds for RPS*/
6676 I915_WRITE(GEN6_RPNSWREQ, 6692 I915_WRITE(GEN6_RPNSWREQ,
6677 HSW_FREQUENCY(dev_priv->rps.rp1_freq)); 6693 HSW_FREQUENCY(rps->rp1_freq));
6678 I915_WRITE(GEN6_RC_VIDEO_FREQ, 6694 I915_WRITE(GEN6_RC_VIDEO_FREQ,
6679 HSW_FREQUENCY(dev_priv->rps.rp1_freq)); 6695 HSW_FREQUENCY(rps->rp1_freq));
6680 /* NB: Docs say 1s, and 1000000 - which aren't equivalent */ 6696 /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
6681 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */ 6697 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
6682 6698
6683 /* Docs recommend 900MHz, and 300 MHz respectively */ 6699 /* Docs recommend 900MHz, and 300 MHz respectively */
6684 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, 6700 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
6685 dev_priv->rps.max_freq_softlimit << 24 | 6701 rps->max_freq_softlimit << 24 |
6686 dev_priv->rps.min_freq_softlimit << 16); 6702 rps->min_freq_softlimit << 16);
6687 6703
6688 I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */ 6704 I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
6689 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/ 6705 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
@@ -6810,6 +6826,7 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv)
6810 6826
6811static void gen6_update_ring_freq(struct drm_i915_private *dev_priv) 6827static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
6812{ 6828{
6829 struct intel_rps *rps = &dev_priv->gt_pm.rps;
6813 int min_freq = 15; 6830 int min_freq = 15;
6814 unsigned int gpu_freq; 6831 unsigned int gpu_freq;
6815 unsigned int max_ia_freq, min_ring_freq; 6832 unsigned int max_ia_freq, min_ring_freq;
@@ -6840,11 +6857,11 @@ static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
6840 6857
6841 if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) { 6858 if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
6842 /* Convert GT frequency to 50 HZ units */ 6859 /* Convert GT frequency to 50 HZ units */
6843 min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER; 6860 min_gpu_freq = rps->min_freq / GEN9_FREQ_SCALER;
6844 max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER; 6861 max_gpu_freq = rps->max_freq / GEN9_FREQ_SCALER;
6845 } else { 6862 } else {
6846 min_gpu_freq = dev_priv->rps.min_freq; 6863 min_gpu_freq = rps->min_freq;
6847 max_gpu_freq = dev_priv->rps.max_freq; 6864 max_gpu_freq = rps->max_freq;
6848 } 6865 }
6849 6866
6850 /* 6867 /*
@@ -7095,17 +7112,18 @@ static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv)
7095 7112
7096static void vlv_init_gpll_ref_freq(struct drm_i915_private *dev_priv) 7113static void vlv_init_gpll_ref_freq(struct drm_i915_private *dev_priv)
7097{ 7114{
7098 dev_priv->rps.gpll_ref_freq = 7115 dev_priv->gt_pm.rps.gpll_ref_freq =
7099 vlv_get_cck_clock(dev_priv, "GPLL ref", 7116 vlv_get_cck_clock(dev_priv, "GPLL ref",
7100 CCK_GPLL_CLOCK_CONTROL, 7117 CCK_GPLL_CLOCK_CONTROL,
7101 dev_priv->czclk_freq); 7118 dev_priv->czclk_freq);
7102 7119
7103 DRM_DEBUG_DRIVER("GPLL reference freq: %d kHz\n", 7120 DRM_DEBUG_DRIVER("GPLL reference freq: %d kHz\n",
7104 dev_priv->rps.gpll_ref_freq); 7121 dev_priv->gt_pm.rps.gpll_ref_freq);
7105} 7122}
7106 7123
7107static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv) 7124static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv)
7108{ 7125{
7126 struct intel_rps *rps = &dev_priv->gt_pm.rps;
7109 u32 val; 7127 u32 val;
7110 7128
7111 valleyview_setup_pctx(dev_priv); 7129 valleyview_setup_pctx(dev_priv);
@@ -7127,30 +7145,31 @@ static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv)
7127 } 7145 }
7128 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq); 7146 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
7129 7147
7130 dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv); 7148 rps->max_freq = valleyview_rps_max_freq(dev_priv);
7131 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq; 7149 rps->rp0_freq = rps->max_freq;
7132 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n", 7150 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
7133 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq), 7151 intel_gpu_freq(dev_priv, rps->max_freq),
7134 dev_priv->rps.max_freq); 7152 rps->max_freq);
7135 7153
7136 dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv); 7154 rps->efficient_freq = valleyview_rps_rpe_freq(dev_priv);
7137 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n", 7155 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
7138 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq), 7156 intel_gpu_freq(dev_priv, rps->efficient_freq),
7139 dev_priv->rps.efficient_freq); 7157 rps->efficient_freq);
7140 7158
7141 dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv); 7159 rps->rp1_freq = valleyview_rps_guar_freq(dev_priv);
7142 DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n", 7160 DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
7143 intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq), 7161 intel_gpu_freq(dev_priv, rps->rp1_freq),
7144 dev_priv->rps.rp1_freq); 7162 rps->rp1_freq);
7145 7163
7146 dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv); 7164 rps->min_freq = valleyview_rps_min_freq(dev_priv);
7147 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n", 7165 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
7148 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq), 7166 intel_gpu_freq(dev_priv, rps->min_freq),
7149 dev_priv->rps.min_freq); 7167 rps->min_freq);
7150} 7168}
7151 7169
7152static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv) 7170static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv)
7153{ 7171{
7172 struct intel_rps *rps = &dev_priv->gt_pm.rps;
7154 u32 val; 7173 u32 val;
7155 7174
7156 cherryview_setup_pctx(dev_priv); 7175 cherryview_setup_pctx(dev_priv);
@@ -7171,31 +7190,29 @@ static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv)
7171 } 7190 }
7172 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq); 7191 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
7173 7192
7174 dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv); 7193 rps->max_freq = cherryview_rps_max_freq(dev_priv);
7175 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq; 7194 rps->rp0_freq = rps->max_freq;
7176 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n", 7195 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
7177 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq), 7196 intel_gpu_freq(dev_priv, rps->max_freq),
7178 dev_priv->rps.max_freq); 7197 rps->max_freq);
7179 7198
7180 dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv); 7199 rps->efficient_freq = cherryview_rps_rpe_freq(dev_priv);
7181 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n", 7200 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
7182 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq), 7201 intel_gpu_freq(dev_priv, rps->efficient_freq),
7183 dev_priv->rps.efficient_freq); 7202 rps->efficient_freq);
7184 7203
7185 dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv); 7204 rps->rp1_freq = cherryview_rps_guar_freq(dev_priv);
7186 DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n", 7205 DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
7187 intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq), 7206 intel_gpu_freq(dev_priv, rps->rp1_freq),
7188 dev_priv->rps.rp1_freq); 7207 rps->rp1_freq);
7189 7208
7190 dev_priv->rps.min_freq = cherryview_rps_min_freq(dev_priv); 7209 rps->min_freq = cherryview_rps_min_freq(dev_priv);
7191 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n", 7210 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
7192 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq), 7211 intel_gpu_freq(dev_priv, rps->min_freq),
7193 dev_priv->rps.min_freq); 7212 rps->min_freq);
7194 7213
7195 WARN_ONCE((dev_priv->rps.max_freq | 7214 WARN_ONCE((rps->max_freq | rps->efficient_freq | rps->rp1_freq |
7196 dev_priv->rps.efficient_freq | 7215 rps->min_freq) & 1,
7197 dev_priv->rps.rp1_freq |
7198 dev_priv->rps.min_freq) & 1,
7199 "Odd GPU freq values\n"); 7216 "Odd GPU freq values\n");
7200} 7217}
7201 7218
@@ -7584,7 +7601,7 @@ static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
7584 7601
7585 lockdep_assert_held(&mchdev_lock); 7602 lockdep_assert_held(&mchdev_lock);
7586 7603
7587 pxvid = I915_READ(PXVFREQ(dev_priv->rps.cur_freq)); 7604 pxvid = I915_READ(PXVFREQ(dev_priv->gt_pm.rps.cur_freq));
7588 pxvid = (pxvid >> 24) & 0x7f; 7605 pxvid = (pxvid >> 24) & 0x7f;
7589 ext_v = pvid_to_extvid(dev_priv, pxvid); 7606 ext_v = pvid_to_extvid(dev_priv, pxvid);
7590 7607
@@ -7871,6 +7888,8 @@ static void intel_init_emon(struct drm_i915_private *dev_priv)
7871 7888
7872void intel_init_gt_powersave(struct drm_i915_private *dev_priv) 7889void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
7873{ 7890{
7891 struct intel_rps *rps = &dev_priv->gt_pm.rps;
7892
7874 /* 7893 /*
7875 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a 7894 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
7876 * requirement. 7895 * requirement.
@@ -7892,16 +7911,16 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
7892 gen6_init_rps_frequencies(dev_priv); 7911 gen6_init_rps_frequencies(dev_priv);
7893 7912
7894 /* Derive initial user preferences/limits from the hardware limits */ 7913 /* Derive initial user preferences/limits from the hardware limits */
7895 dev_priv->rps.idle_freq = dev_priv->rps.min_freq; 7914 rps->idle_freq = rps->min_freq;
7896 dev_priv->rps.cur_freq = dev_priv->rps.idle_freq; 7915 rps->cur_freq = rps->idle_freq;
7897 7916
7898 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq; 7917 rps->max_freq_softlimit = rps->max_freq;
7899 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq; 7918 rps->min_freq_softlimit = rps->min_freq;
7900 7919
7901 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 7920 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
7902 dev_priv->rps.min_freq_softlimit = 7921 rps->min_freq_softlimit =
7903 max_t(int, 7922 max_t(int,
7904 dev_priv->rps.efficient_freq, 7923 rps->efficient_freq,
7905 intel_freq_opcode(dev_priv, 450)); 7924 intel_freq_opcode(dev_priv, 450));
7906 7925
7907 /* After setting max-softlimit, find the overclock max freq */ 7926 /* After setting max-softlimit, find the overclock max freq */
@@ -7912,14 +7931,14 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
7912 sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &params); 7931 sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &params);
7913 if (params & BIT(31)) { /* OC supported */ 7932 if (params & BIT(31)) { /* OC supported */
7914 DRM_DEBUG_DRIVER("Overclocking supported, max: %dMHz, overclock: %dMHz\n", 7933 DRM_DEBUG_DRIVER("Overclocking supported, max: %dMHz, overclock: %dMHz\n",
7915 (dev_priv->rps.max_freq & 0xff) * 50, 7934 (rps->max_freq & 0xff) * 50,
7916 (params & 0xff) * 50); 7935 (params & 0xff) * 50);
7917 dev_priv->rps.max_freq = params & 0xff; 7936 rps->max_freq = params & 0xff;
7918 } 7937 }
7919 } 7938 }
7920 7939
7921 /* Finally allow us to boost to max by default */ 7940 /* Finally allow us to boost to max by default */
7922 dev_priv->rps.boost_freq = dev_priv->rps.max_freq; 7941 rps->boost_freq = rps->max_freq;
7923 7942
7924 mutex_unlock(&dev_priv->pcu_lock); 7943 mutex_unlock(&dev_priv->pcu_lock);
7925 mutex_unlock(&dev_priv->drm.struct_mutex); 7944 mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -7949,7 +7968,7 @@ void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv)
7949 if (INTEL_GEN(dev_priv) < 6) 7968 if (INTEL_GEN(dev_priv) < 6)
7950 return; 7969 return;
7951 7970
7952 if (cancel_delayed_work_sync(&dev_priv->rps.autoenable_work)) 7971 if (cancel_delayed_work_sync(&dev_priv->gt_pm.autoenable_work))
7953 intel_runtime_pm_put(dev_priv); 7972 intel_runtime_pm_put(dev_priv);
7954 7973
7955 /* gen6_rps_idle() will be called later to disable interrupts */ 7974 /* gen6_rps_idle() will be called later to disable interrupts */
@@ -7957,7 +7976,7 @@ void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv)
7957 7976
7958void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv) 7977void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv)
7959{ 7978{
7960 dev_priv->rps.enabled = true; /* force disabling */ 7979 dev_priv->gt_pm.rps.enabled = true; /* force disabling */
7961 intel_disable_gt_powersave(dev_priv); 7980 intel_disable_gt_powersave(dev_priv);
7962 7981
7963 gen6_reset_rps_interrupts(dev_priv); 7982 gen6_reset_rps_interrupts(dev_priv);
@@ -7965,7 +7984,9 @@ void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv)
7965 7984
7966void intel_disable_gt_powersave(struct drm_i915_private *dev_priv) 7985void intel_disable_gt_powersave(struct drm_i915_private *dev_priv)
7967{ 7986{
7968 if (!READ_ONCE(dev_priv->rps.enabled)) 7987 struct intel_rps *rps = &dev_priv->gt_pm.rps;
7988
7989 if (!READ_ONCE(rps->enabled))
7969 return; 7990 return;
7970 7991
7971 mutex_lock(&dev_priv->pcu_lock); 7992 mutex_lock(&dev_priv->pcu_lock);
@@ -7986,16 +8007,18 @@ void intel_disable_gt_powersave(struct drm_i915_private *dev_priv)
7986 ironlake_disable_drps(dev_priv); 8007 ironlake_disable_drps(dev_priv);
7987 } 8008 }
7988 8009
7989 dev_priv->rps.enabled = false; 8010 rps->enabled = false;
7990 mutex_unlock(&dev_priv->pcu_lock); 8011 mutex_unlock(&dev_priv->pcu_lock);
7991} 8012}
7992 8013
7993void intel_enable_gt_powersave(struct drm_i915_private *dev_priv) 8014void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
7994{ 8015{
8016 struct intel_rps *rps = &dev_priv->gt_pm.rps;
8017
7995 /* We shouldn't be disabling as we submit, so this should be less 8018 /* We shouldn't be disabling as we submit, so this should be less
7996 * racy than it appears! 8019 * racy than it appears!
7997 */ 8020 */
7998 if (READ_ONCE(dev_priv->rps.enabled)) 8021 if (READ_ONCE(rps->enabled))
7999 return; 8022 return;
8000 8023
8001 /* Powersaving is controlled by the host when inside a VM */ 8024 /* Powersaving is controlled by the host when inside a VM */
@@ -8028,24 +8051,26 @@ void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
8028 intel_init_emon(dev_priv); 8051 intel_init_emon(dev_priv);
8029 } 8052 }
8030 8053
8031 WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq); 8054 WARN_ON(rps->max_freq < rps->min_freq);
8032 WARN_ON(dev_priv->rps.idle_freq > dev_priv->rps.max_freq); 8055 WARN_ON(rps->idle_freq > rps->max_freq);
8033 8056
8034 WARN_ON(dev_priv->rps.efficient_freq < dev_priv->rps.min_freq); 8057 WARN_ON(rps->efficient_freq < rps->min_freq);
8035 WARN_ON(dev_priv->rps.efficient_freq > dev_priv->rps.max_freq); 8058 WARN_ON(rps->efficient_freq > rps->max_freq);
8036 8059
8037 dev_priv->rps.enabled = true; 8060 rps->enabled = true;
8038 mutex_unlock(&dev_priv->pcu_lock); 8061 mutex_unlock(&dev_priv->pcu_lock);
8039} 8062}
8040 8063
8041static void __intel_autoenable_gt_powersave(struct work_struct *work) 8064static void __intel_autoenable_gt_powersave(struct work_struct *work)
8042{ 8065{
8043 struct drm_i915_private *dev_priv = 8066 struct drm_i915_private *dev_priv =
8044 container_of(work, typeof(*dev_priv), rps.autoenable_work.work); 8067 container_of(work,
8068 typeof(*dev_priv),
8069 gt_pm.autoenable_work.work);
8045 struct intel_engine_cs *rcs; 8070 struct intel_engine_cs *rcs;
8046 struct drm_i915_gem_request *req; 8071 struct drm_i915_gem_request *req;
8047 8072
8048 if (READ_ONCE(dev_priv->rps.enabled)) 8073 if (READ_ONCE(dev_priv->gt_pm.rps.enabled))
8049 goto out; 8074 goto out;
8050 8075
8051 rcs = dev_priv->engine[RCS]; 8076 rcs = dev_priv->engine[RCS];
@@ -8075,7 +8100,7 @@ out:
8075 8100
8076void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv) 8101void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv)
8077{ 8102{
8078 if (READ_ONCE(dev_priv->rps.enabled)) 8103 if (READ_ONCE(dev_priv->gt_pm.rps.enabled))
8079 return; 8104 return;
8080 8105
8081 if (IS_IRONLAKE_M(dev_priv)) { 8106 if (IS_IRONLAKE_M(dev_priv)) {
@@ -8095,7 +8120,7 @@ void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv)
8095 * runtime resume it's necessary). 8120 * runtime resume it's necessary).
8096 */ 8121 */
8097 if (queue_delayed_work(dev_priv->wq, 8122 if (queue_delayed_work(dev_priv->wq,
8098 &dev_priv->rps.autoenable_work, 8123 &dev_priv->gt_pm.autoenable_work,
8099 round_jiffies_up_relative(HZ))) 8124 round_jiffies_up_relative(HZ)))
8100 intel_runtime_pm_get_noresume(dev_priv); 8125 intel_runtime_pm_get_noresume(dev_priv);
8101 } 8126 }
@@ -9289,31 +9314,39 @@ out:
9289 9314
9290static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val) 9315static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
9291{ 9316{
9317 struct intel_rps *rps = &dev_priv->gt_pm.rps;
9318
9292 /* 9319 /*
9293 * N = val - 0xb7 9320 * N = val - 0xb7
9294 * Slow = Fast = GPLL ref * N 9321 * Slow = Fast = GPLL ref * N
9295 */ 9322 */
9296 return DIV_ROUND_CLOSEST(dev_priv->rps.gpll_ref_freq * (val - 0xb7), 1000); 9323 return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * (val - 0xb7), 1000);
9297} 9324}
9298 9325
9299static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val) 9326static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
9300{ 9327{
9301 return DIV_ROUND_CLOSEST(1000 * val, dev_priv->rps.gpll_ref_freq) + 0xb7; 9328 struct intel_rps *rps = &dev_priv->gt_pm.rps;
9329
9330 return DIV_ROUND_CLOSEST(1000 * val, rps->gpll_ref_freq) + 0xb7;
9302} 9331}
9303 9332
9304static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val) 9333static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
9305{ 9334{
9335 struct intel_rps *rps = &dev_priv->gt_pm.rps;
9336
9306 /* 9337 /*
9307 * N = val / 2 9338 * N = val / 2
9308 * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2 9339 * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2
9309 */ 9340 */
9310 return DIV_ROUND_CLOSEST(dev_priv->rps.gpll_ref_freq * val, 2 * 2 * 1000); 9341 return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * val, 2 * 2 * 1000);
9311} 9342}
9312 9343
9313static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val) 9344static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
9314{ 9345{
9346 struct intel_rps *rps = &dev_priv->gt_pm.rps;
9347
9315 /* CHV needs even values */ 9348 /* CHV needs even values */
9316 return DIV_ROUND_CLOSEST(2 * 1000 * val, dev_priv->rps.gpll_ref_freq) * 2; 9349 return DIV_ROUND_CLOSEST(2 * 1000 * val, rps->gpll_ref_freq) * 2;
9317} 9350}
9318 9351
9319int intel_gpu_freq(struct drm_i915_private *dev_priv, int val) 9352int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
@@ -9346,9 +9379,9 @@ void intel_pm_setup(struct drm_i915_private *dev_priv)
9346{ 9379{
9347 mutex_init(&dev_priv->pcu_lock); 9380 mutex_init(&dev_priv->pcu_lock);
9348 9381
9349 INIT_DELAYED_WORK(&dev_priv->rps.autoenable_work, 9382 INIT_DELAYED_WORK(&dev_priv->gt_pm.autoenable_work,
9350 __intel_autoenable_gt_powersave); 9383 __intel_autoenable_gt_powersave);
9351 atomic_set(&dev_priv->rps.num_waiters, 0); 9384 atomic_set(&dev_priv->gt_pm.rps.num_waiters, 0);
9352 9385
9353 dev_priv->runtime_pm.suspended = false; 9386 dev_priv->runtime_pm.suspended = false;
9354 atomic_set(&dev_priv->runtime_pm.wakeref_count, 0); 9387 atomic_set(&dev_priv->runtime_pm.wakeref_count, 0);