diff options
author | Sagar Arun Kamble <sagar.a.kamble@intel.com> | 2017-10-10 17:30:06 -0400 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2017-10-11 03:56:59 -0400 |
commit | 562d9bae08a10335368bf54ea5cc7e4f6185bccc (patch) | |
tree | bed8838c1e250e1104b3d224f9e81ae484fa4b16 /drivers/gpu/drm/i915/i915_irq.c | |
parent | 9f817501bd7facfe2bffacd637f4332e5991e57a (diff) |
drm/i915: Name structure in dev_priv that contains RPS/RC6 state as "gt_pm"
Prepared substructure rps for RPS related state. autoenable_work is
used for RC6 too hence it is defined outside rps structure. As we do
this lot many functions are refactored to use intel_rps *rps to access
rps related members. Hence renamed intel_rps_client pointer variables
to rps_client in various functions.
v2: Rebase.
v3: s/pm/gt_pm (Chris)
Refactored access to rps structure by declaring struct intel_rps * in
many functions.
Signed-off-by: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Imre Deak <imre.deak@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Radoslaw Szwichtenberg <radoslaw.szwichtenberg@intel.com> #1
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/1507360055-19948-9-git-send-email-sagar.a.kamble@intel.com
Acked-by: Imre Deak <imre.deak@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20171010213010.7415-8-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/i915_irq.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_irq.c | 87 |
1 files changed, 48 insertions, 39 deletions
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 1844d3fe8f1f..b1296a55c1e4 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -404,19 +404,21 @@ void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv) | |||
404 | { | 404 | { |
405 | spin_lock_irq(&dev_priv->irq_lock); | 405 | spin_lock_irq(&dev_priv->irq_lock); |
406 | gen6_reset_pm_iir(dev_priv, dev_priv->pm_rps_events); | 406 | gen6_reset_pm_iir(dev_priv, dev_priv->pm_rps_events); |
407 | dev_priv->rps.pm_iir = 0; | 407 | dev_priv->gt_pm.rps.pm_iir = 0; |
408 | spin_unlock_irq(&dev_priv->irq_lock); | 408 | spin_unlock_irq(&dev_priv->irq_lock); |
409 | } | 409 | } |
410 | 410 | ||
411 | void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv) | 411 | void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv) |
412 | { | 412 | { |
413 | if (READ_ONCE(dev_priv->rps.interrupts_enabled)) | 413 | struct intel_rps *rps = &dev_priv->gt_pm.rps; |
414 | |||
415 | if (READ_ONCE(rps->interrupts_enabled)) | ||
414 | return; | 416 | return; |
415 | 417 | ||
416 | spin_lock_irq(&dev_priv->irq_lock); | 418 | spin_lock_irq(&dev_priv->irq_lock); |
417 | WARN_ON_ONCE(dev_priv->rps.pm_iir); | 419 | WARN_ON_ONCE(rps->pm_iir); |
418 | WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); | 420 | WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); |
419 | dev_priv->rps.interrupts_enabled = true; | 421 | rps->interrupts_enabled = true; |
420 | gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); | 422 | gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); |
421 | 423 | ||
422 | spin_unlock_irq(&dev_priv->irq_lock); | 424 | spin_unlock_irq(&dev_priv->irq_lock); |
@@ -424,11 +426,13 @@ void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv) | |||
424 | 426 | ||
425 | void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv) | 427 | void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv) |
426 | { | 428 | { |
427 | if (!READ_ONCE(dev_priv->rps.interrupts_enabled)) | 429 | struct intel_rps *rps = &dev_priv->gt_pm.rps; |
430 | |||
431 | if (!READ_ONCE(rps->interrupts_enabled)) | ||
428 | return; | 432 | return; |
429 | 433 | ||
430 | spin_lock_irq(&dev_priv->irq_lock); | 434 | spin_lock_irq(&dev_priv->irq_lock); |
431 | dev_priv->rps.interrupts_enabled = false; | 435 | rps->interrupts_enabled = false; |
432 | 436 | ||
433 | I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u)); | 437 | I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u)); |
434 | 438 | ||
@@ -442,7 +446,7 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv) | |||
442 | * we will reset the GPU to minimum frequencies, so the current | 446 | * we will reset the GPU to minimum frequencies, so the current |
443 | * state of the worker can be discarded. | 447 | * state of the worker can be discarded. |
444 | */ | 448 | */ |
445 | cancel_work_sync(&dev_priv->rps.work); | 449 | cancel_work_sync(&rps->work); |
446 | gen6_reset_rps_interrupts(dev_priv); | 450 | gen6_reset_rps_interrupts(dev_priv); |
447 | } | 451 | } |
448 | 452 | ||
@@ -1119,12 +1123,13 @@ static void vlv_c0_read(struct drm_i915_private *dev_priv, | |||
1119 | 1123 | ||
1120 | void gen6_rps_reset_ei(struct drm_i915_private *dev_priv) | 1124 | void gen6_rps_reset_ei(struct drm_i915_private *dev_priv) |
1121 | { | 1125 | { |
1122 | memset(&dev_priv->rps.ei, 0, sizeof(dev_priv->rps.ei)); | 1126 | memset(&dev_priv->gt_pm.rps.ei, 0, sizeof(dev_priv->gt_pm.rps.ei)); |
1123 | } | 1127 | } |
1124 | 1128 | ||
1125 | static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) | 1129 | static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) |
1126 | { | 1130 | { |
1127 | const struct intel_rps_ei *prev = &dev_priv->rps.ei; | 1131 | struct intel_rps *rps = &dev_priv->gt_pm.rps; |
1132 | const struct intel_rps_ei *prev = &rps->ei; | ||
1128 | struct intel_rps_ei now; | 1133 | struct intel_rps_ei now; |
1129 | u32 events = 0; | 1134 | u32 events = 0; |
1130 | 1135 | ||
@@ -1151,28 +1156,29 @@ static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) | |||
1151 | c0 = max(render, media); | 1156 | c0 = max(render, media); |
1152 | c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */ | 1157 | c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */ |
1153 | 1158 | ||
1154 | if (c0 > time * dev_priv->rps.up_threshold) | 1159 | if (c0 > time * rps->up_threshold) |
1155 | events = GEN6_PM_RP_UP_THRESHOLD; | 1160 | events = GEN6_PM_RP_UP_THRESHOLD; |
1156 | else if (c0 < time * dev_priv->rps.down_threshold) | 1161 | else if (c0 < time * rps->down_threshold) |
1157 | events = GEN6_PM_RP_DOWN_THRESHOLD; | 1162 | events = GEN6_PM_RP_DOWN_THRESHOLD; |
1158 | } | 1163 | } |
1159 | 1164 | ||
1160 | dev_priv->rps.ei = now; | 1165 | rps->ei = now; |
1161 | return events; | 1166 | return events; |
1162 | } | 1167 | } |
1163 | 1168 | ||
1164 | static void gen6_pm_rps_work(struct work_struct *work) | 1169 | static void gen6_pm_rps_work(struct work_struct *work) |
1165 | { | 1170 | { |
1166 | struct drm_i915_private *dev_priv = | 1171 | struct drm_i915_private *dev_priv = |
1167 | container_of(work, struct drm_i915_private, rps.work); | 1172 | container_of(work, struct drm_i915_private, gt_pm.rps.work); |
1173 | struct intel_rps *rps = &dev_priv->gt_pm.rps; | ||
1168 | bool client_boost = false; | 1174 | bool client_boost = false; |
1169 | int new_delay, adj, min, max; | 1175 | int new_delay, adj, min, max; |
1170 | u32 pm_iir = 0; | 1176 | u32 pm_iir = 0; |
1171 | 1177 | ||
1172 | spin_lock_irq(&dev_priv->irq_lock); | 1178 | spin_lock_irq(&dev_priv->irq_lock); |
1173 | if (dev_priv->rps.interrupts_enabled) { | 1179 | if (rps->interrupts_enabled) { |
1174 | pm_iir = fetch_and_zero(&dev_priv->rps.pm_iir); | 1180 | pm_iir = fetch_and_zero(&rps->pm_iir); |
1175 | client_boost = atomic_read(&dev_priv->rps.num_waiters); | 1181 | client_boost = atomic_read(&rps->num_waiters); |
1176 | } | 1182 | } |
1177 | spin_unlock_irq(&dev_priv->irq_lock); | 1183 | spin_unlock_irq(&dev_priv->irq_lock); |
1178 | 1184 | ||
@@ -1185,14 +1191,14 @@ static void gen6_pm_rps_work(struct work_struct *work) | |||
1185 | 1191 | ||
1186 | pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir); | 1192 | pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir); |
1187 | 1193 | ||
1188 | adj = dev_priv->rps.last_adj; | 1194 | adj = rps->last_adj; |
1189 | new_delay = dev_priv->rps.cur_freq; | 1195 | new_delay = rps->cur_freq; |
1190 | min = dev_priv->rps.min_freq_softlimit; | 1196 | min = rps->min_freq_softlimit; |
1191 | max = dev_priv->rps.max_freq_softlimit; | 1197 | max = rps->max_freq_softlimit; |
1192 | if (client_boost) | 1198 | if (client_boost) |
1193 | max = dev_priv->rps.max_freq; | 1199 | max = rps->max_freq; |
1194 | if (client_boost && new_delay < dev_priv->rps.boost_freq) { | 1200 | if (client_boost && new_delay < rps->boost_freq) { |
1195 | new_delay = dev_priv->rps.boost_freq; | 1201 | new_delay = rps->boost_freq; |
1196 | adj = 0; | 1202 | adj = 0; |
1197 | } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { | 1203 | } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { |
1198 | if (adj > 0) | 1204 | if (adj > 0) |
@@ -1200,15 +1206,15 @@ static void gen6_pm_rps_work(struct work_struct *work) | |||
1200 | else /* CHV needs even encode values */ | 1206 | else /* CHV needs even encode values */ |
1201 | adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1; | 1207 | adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1; |
1202 | 1208 | ||
1203 | if (new_delay >= dev_priv->rps.max_freq_softlimit) | 1209 | if (new_delay >= rps->max_freq_softlimit) |
1204 | adj = 0; | 1210 | adj = 0; |
1205 | } else if (client_boost) { | 1211 | } else if (client_boost) { |
1206 | adj = 0; | 1212 | adj = 0; |
1207 | } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { | 1213 | } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { |
1208 | if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq) | 1214 | if (rps->cur_freq > rps->efficient_freq) |
1209 | new_delay = dev_priv->rps.efficient_freq; | 1215 | new_delay = rps->efficient_freq; |
1210 | else if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit) | 1216 | else if (rps->cur_freq > rps->min_freq_softlimit) |
1211 | new_delay = dev_priv->rps.min_freq_softlimit; | 1217 | new_delay = rps->min_freq_softlimit; |
1212 | adj = 0; | 1218 | adj = 0; |
1213 | } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { | 1219 | } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { |
1214 | if (adj < 0) | 1220 | if (adj < 0) |
@@ -1216,13 +1222,13 @@ static void gen6_pm_rps_work(struct work_struct *work) | |||
1216 | else /* CHV needs even encode values */ | 1222 | else /* CHV needs even encode values */ |
1217 | adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1; | 1223 | adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1; |
1218 | 1224 | ||
1219 | if (new_delay <= dev_priv->rps.min_freq_softlimit) | 1225 | if (new_delay <= rps->min_freq_softlimit) |
1220 | adj = 0; | 1226 | adj = 0; |
1221 | } else { /* unknown event */ | 1227 | } else { /* unknown event */ |
1222 | adj = 0; | 1228 | adj = 0; |
1223 | } | 1229 | } |
1224 | 1230 | ||
1225 | dev_priv->rps.last_adj = adj; | 1231 | rps->last_adj = adj; |
1226 | 1232 | ||
1227 | /* sysfs frequency interfaces may have snuck in while servicing the | 1233 | /* sysfs frequency interfaces may have snuck in while servicing the |
1228 | * interrupt | 1234 | * interrupt |
@@ -1232,7 +1238,7 @@ static void gen6_pm_rps_work(struct work_struct *work) | |||
1232 | 1238 | ||
1233 | if (intel_set_rps(dev_priv, new_delay)) { | 1239 | if (intel_set_rps(dev_priv, new_delay)) { |
1234 | DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n"); | 1240 | DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n"); |
1235 | dev_priv->rps.last_adj = 0; | 1241 | rps->last_adj = 0; |
1236 | } | 1242 | } |
1237 | 1243 | ||
1238 | mutex_unlock(&dev_priv->pcu_lock); | 1244 | mutex_unlock(&dev_priv->pcu_lock); |
@@ -1240,7 +1246,7 @@ static void gen6_pm_rps_work(struct work_struct *work) | |||
1240 | out: | 1246 | out: |
1241 | /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ | 1247 | /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ |
1242 | spin_lock_irq(&dev_priv->irq_lock); | 1248 | spin_lock_irq(&dev_priv->irq_lock); |
1243 | if (dev_priv->rps.interrupts_enabled) | 1249 | if (rps->interrupts_enabled) |
1244 | gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events); | 1250 | gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events); |
1245 | spin_unlock_irq(&dev_priv->irq_lock); | 1251 | spin_unlock_irq(&dev_priv->irq_lock); |
1246 | } | 1252 | } |
@@ -1721,12 +1727,14 @@ static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, | |||
1721 | * the work queue. */ | 1727 | * the work queue. */ |
1722 | static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) | 1728 | static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) |
1723 | { | 1729 | { |
1730 | struct intel_rps *rps = &dev_priv->gt_pm.rps; | ||
1731 | |||
1724 | if (pm_iir & dev_priv->pm_rps_events) { | 1732 | if (pm_iir & dev_priv->pm_rps_events) { |
1725 | spin_lock(&dev_priv->irq_lock); | 1733 | spin_lock(&dev_priv->irq_lock); |
1726 | gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); | 1734 | gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); |
1727 | if (dev_priv->rps.interrupts_enabled) { | 1735 | if (rps->interrupts_enabled) { |
1728 | dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; | 1736 | rps->pm_iir |= pm_iir & dev_priv->pm_rps_events; |
1729 | schedule_work(&dev_priv->rps.work); | 1737 | schedule_work(&rps->work); |
1730 | } | 1738 | } |
1731 | spin_unlock(&dev_priv->irq_lock); | 1739 | spin_unlock(&dev_priv->irq_lock); |
1732 | } | 1740 | } |
@@ -4007,11 +4015,12 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) | |||
4007 | void intel_irq_init(struct drm_i915_private *dev_priv) | 4015 | void intel_irq_init(struct drm_i915_private *dev_priv) |
4008 | { | 4016 | { |
4009 | struct drm_device *dev = &dev_priv->drm; | 4017 | struct drm_device *dev = &dev_priv->drm; |
4018 | struct intel_rps *rps = &dev_priv->gt_pm.rps; | ||
4010 | int i; | 4019 | int i; |
4011 | 4020 | ||
4012 | intel_hpd_init_work(dev_priv); | 4021 | intel_hpd_init_work(dev_priv); |
4013 | 4022 | ||
4014 | INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); | 4023 | INIT_WORK(&rps->work, gen6_pm_rps_work); |
4015 | 4024 | ||
4016 | INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); | 4025 | INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); |
4017 | for (i = 0; i < MAX_L3_SLICES; ++i) | 4026 | for (i = 0; i < MAX_L3_SLICES; ++i) |
@@ -4027,7 +4036,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv) | |||
4027 | else | 4036 | else |
4028 | dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; | 4037 | dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; |
4029 | 4038 | ||
4030 | dev_priv->rps.pm_intrmsk_mbz = 0; | 4039 | rps->pm_intrmsk_mbz = 0; |
4031 | 4040 | ||
4032 | /* | 4041 | /* |
4033 | * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer | 4042 | * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer |
@@ -4036,10 +4045,10 @@ void intel_irq_init(struct drm_i915_private *dev_priv) | |||
4036 | * TODO: verify if this can be reproduced on VLV,CHV. | 4045 | * TODO: verify if this can be reproduced on VLV,CHV. |
4037 | */ | 4046 | */ |
4038 | if (INTEL_GEN(dev_priv) <= 7) | 4047 | if (INTEL_GEN(dev_priv) <= 7) |
4039 | dev_priv->rps.pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED; | 4048 | rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED; |
4040 | 4049 | ||
4041 | if (INTEL_GEN(dev_priv) >= 8) | 4050 | if (INTEL_GEN(dev_priv) >= 8) |
4042 | dev_priv->rps.pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; | 4051 | rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; |
4043 | 4052 | ||
4044 | if (IS_GEN2(dev_priv)) { | 4053 | if (IS_GEN2(dev_priv)) { |
4045 | /* Gen2 doesn't have a hardware frame counter */ | 4054 | /* Gen2 doesn't have a hardware frame counter */ |