diff options
-rw-r--r-- | drivers/gpu/drm/i915/i915_reset.c | 90 |
1 files changed, 39 insertions, 51 deletions
diff --git a/drivers/gpu/drm/i915/i915_reset.c b/drivers/gpu/drm/i915/i915_reset.c index 342d9ee42601..2f840858572c 100644 --- a/drivers/gpu/drm/i915/i915_reset.c +++ b/drivers/gpu/drm/i915/i915_reset.c | |||
@@ -12,6 +12,8 @@ | |||
12 | 12 | ||
13 | #include "intel_guc.h" | 13 | #include "intel_guc.h" |
14 | 14 | ||
15 | #define RESET_MAX_RETRIES 3 | ||
16 | |||
15 | static void engine_skip_context(struct i915_request *rq) | 17 | static void engine_skip_context(struct i915_request *rq) |
16 | { | 18 | { |
17 | struct intel_engine_cs *engine = rq->engine; | 19 | struct intel_engine_cs *engine = rq->engine; |
@@ -144,14 +146,14 @@ static int i915_do_reset(struct drm_i915_private *i915, | |||
144 | 146 | ||
145 | /* Assert reset for at least 20 usec, and wait for acknowledgement. */ | 147 | /* Assert reset for at least 20 usec, and wait for acknowledgement. */ |
146 | pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE); | 148 | pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE); |
147 | usleep_range(50, 200); | 149 | udelay(50); |
148 | err = wait_for(i915_in_reset(pdev), 500); | 150 | err = wait_for_atomic(i915_in_reset(pdev), 50); |
149 | 151 | ||
150 | /* Clear the reset request. */ | 152 | /* Clear the reset request. */ |
151 | pci_write_config_byte(pdev, I915_GDRST, 0); | 153 | pci_write_config_byte(pdev, I915_GDRST, 0); |
152 | usleep_range(50, 200); | 154 | udelay(50); |
153 | if (!err) | 155 | if (!err) |
154 | err = wait_for(!i915_in_reset(pdev), 500); | 156 | err = wait_for_atomic(!i915_in_reset(pdev), 50); |
155 | 157 | ||
156 | return err; | 158 | return err; |
157 | } | 159 | } |
@@ -171,7 +173,7 @@ static int g33_do_reset(struct drm_i915_private *i915, | |||
171 | struct pci_dev *pdev = i915->drm.pdev; | 173 | struct pci_dev *pdev = i915->drm.pdev; |
172 | 174 | ||
173 | pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE); | 175 | pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE); |
174 | return wait_for(g4x_reset_complete(pdev), 500); | 176 | return wait_for_atomic(g4x_reset_complete(pdev), 50); |
175 | } | 177 | } |
176 | 178 | ||
177 | static int g4x_do_reset(struct drm_i915_private *dev_priv, | 179 | static int g4x_do_reset(struct drm_i915_private *dev_priv, |
@@ -182,13 +184,13 @@ static int g4x_do_reset(struct drm_i915_private *dev_priv, | |||
182 | int ret; | 184 | int ret; |
183 | 185 | ||
184 | /* WaVcpClkGateDisableForMediaReset:ctg,elk */ | 186 | /* WaVcpClkGateDisableForMediaReset:ctg,elk */ |
185 | I915_WRITE(VDECCLK_GATE_D, | 187 | I915_WRITE_FW(VDECCLK_GATE_D, |
186 | I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE); | 188 | I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE); |
187 | POSTING_READ(VDECCLK_GATE_D); | 189 | POSTING_READ_FW(VDECCLK_GATE_D); |
188 | 190 | ||
189 | pci_write_config_byte(pdev, I915_GDRST, | 191 | pci_write_config_byte(pdev, I915_GDRST, |
190 | GRDOM_MEDIA | GRDOM_RESET_ENABLE); | 192 | GRDOM_MEDIA | GRDOM_RESET_ENABLE); |
191 | ret = wait_for(g4x_reset_complete(pdev), 500); | 193 | ret = wait_for_atomic(g4x_reset_complete(pdev), 50); |
192 | if (ret) { | 194 | if (ret) { |
193 | DRM_DEBUG_DRIVER("Wait for media reset failed\n"); | 195 | DRM_DEBUG_DRIVER("Wait for media reset failed\n"); |
194 | goto out; | 196 | goto out; |
@@ -196,7 +198,7 @@ static int g4x_do_reset(struct drm_i915_private *dev_priv, | |||
196 | 198 | ||
197 | pci_write_config_byte(pdev, I915_GDRST, | 199 | pci_write_config_byte(pdev, I915_GDRST, |
198 | GRDOM_RENDER | GRDOM_RESET_ENABLE); | 200 | GRDOM_RENDER | GRDOM_RESET_ENABLE); |
199 | ret = wait_for(g4x_reset_complete(pdev), 500); | 201 | ret = wait_for_atomic(g4x_reset_complete(pdev), 50); |
200 | if (ret) { | 202 | if (ret) { |
201 | DRM_DEBUG_DRIVER("Wait for render reset failed\n"); | 203 | DRM_DEBUG_DRIVER("Wait for render reset failed\n"); |
202 | goto out; | 204 | goto out; |
@@ -205,9 +207,9 @@ static int g4x_do_reset(struct drm_i915_private *dev_priv, | |||
205 | out: | 207 | out: |
206 | pci_write_config_byte(pdev, I915_GDRST, 0); | 208 | pci_write_config_byte(pdev, I915_GDRST, 0); |
207 | 209 | ||
208 | I915_WRITE(VDECCLK_GATE_D, | 210 | I915_WRITE_FW(VDECCLK_GATE_D, |
209 | I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE); | 211 | I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE); |
210 | POSTING_READ(VDECCLK_GATE_D); | 212 | POSTING_READ_FW(VDECCLK_GATE_D); |
211 | 213 | ||
212 | return ret; | 214 | return ret; |
213 | } | 215 | } |
@@ -218,27 +220,29 @@ static int ironlake_do_reset(struct drm_i915_private *dev_priv, | |||
218 | { | 220 | { |
219 | int ret; | 221 | int ret; |
220 | 222 | ||
221 | I915_WRITE(ILK_GDSR, ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE); | 223 | I915_WRITE_FW(ILK_GDSR, ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE); |
222 | ret = intel_wait_for_register(dev_priv, | 224 | ret = __intel_wait_for_register_fw(dev_priv, ILK_GDSR, |
223 | ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0, | 225 | ILK_GRDOM_RESET_ENABLE, 0, |
224 | 500); | 226 | 5000, 0, |
227 | NULL); | ||
225 | if (ret) { | 228 | if (ret) { |
226 | DRM_DEBUG_DRIVER("Wait for render reset failed\n"); | 229 | DRM_DEBUG_DRIVER("Wait for render reset failed\n"); |
227 | goto out; | 230 | goto out; |
228 | } | 231 | } |
229 | 232 | ||
230 | I915_WRITE(ILK_GDSR, ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE); | 233 | I915_WRITE_FW(ILK_GDSR, ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE); |
231 | ret = intel_wait_for_register(dev_priv, | 234 | ret = __intel_wait_for_register_fw(dev_priv, ILK_GDSR, |
232 | ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0, | 235 | ILK_GRDOM_RESET_ENABLE, 0, |
233 | 500); | 236 | 5000, 0, |
237 | NULL); | ||
234 | if (ret) { | 238 | if (ret) { |
235 | DRM_DEBUG_DRIVER("Wait for media reset failed\n"); | 239 | DRM_DEBUG_DRIVER("Wait for media reset failed\n"); |
236 | goto out; | 240 | goto out; |
237 | } | 241 | } |
238 | 242 | ||
239 | out: | 243 | out: |
240 | I915_WRITE(ILK_GDSR, 0); | 244 | I915_WRITE_FW(ILK_GDSR, 0); |
241 | POSTING_READ(ILK_GDSR); | 245 | POSTING_READ_FW(ILK_GDSR); |
242 | return ret; | 246 | return ret; |
243 | } | 247 | } |
244 | 248 | ||
@@ -527,32 +531,21 @@ static reset_func intel_get_gpu_reset(struct drm_i915_private *i915) | |||
527 | 531 | ||
528 | int intel_gpu_reset(struct drm_i915_private *i915, unsigned int engine_mask) | 532 | int intel_gpu_reset(struct drm_i915_private *i915, unsigned int engine_mask) |
529 | { | 533 | { |
530 | reset_func reset = intel_get_gpu_reset(i915); | 534 | const int retries = engine_mask == ALL_ENGINES ? RESET_MAX_RETRIES : 1; |
535 | reset_func reset; | ||
536 | int ret = -ETIMEDOUT; | ||
531 | int retry; | 537 | int retry; |
532 | int ret; | ||
533 | 538 | ||
534 | /* | 539 | reset = intel_get_gpu_reset(i915); |
535 | * We want to perform per-engine reset from atomic context (e.g. | 540 | if (!reset) |
536 | * softirq), which imposes the constraint that we cannot sleep. | 541 | return -ENODEV; |
537 | * However, experience suggests that spending a bit of time waiting | ||
538 | * for a reset helps in various cases, so for a full-device reset | ||
539 | * we apply the opposite rule and wait if we want to. As we should | ||
540 | * always follow up a failed per-engine reset with a full device reset, | ||
541 | * being a little faster, stricter and more error prone for the | ||
542 | * atomic case seems an acceptable compromise. | ||
543 | * | ||
544 | * Unfortunately this leads to a bimodal routine, when the goal was | ||
545 | * to have a single reset function that worked for resetting any | ||
546 | * number of engines simultaneously. | ||
547 | */ | ||
548 | might_sleep_if(engine_mask == ALL_ENGINES); | ||
549 | 542 | ||
550 | /* | 543 | /* |
551 | * If the power well sleeps during the reset, the reset | 544 | * If the power well sleeps during the reset, the reset |
552 | * request may be dropped and never completes (causing -EIO). | 545 | * request may be dropped and never completes (causing -EIO). |
553 | */ | 546 | */ |
554 | intel_uncore_forcewake_get(i915, FORCEWAKE_ALL); | 547 | intel_uncore_forcewake_get(i915, FORCEWAKE_ALL); |
555 | for (retry = 0; retry < 3; retry++) { | 548 | for (retry = 0; ret == -ETIMEDOUT && retry < retries; retry++) { |
556 | /* | 549 | /* |
557 | * We stop engines, otherwise we might get failed reset and a | 550 | * We stop engines, otherwise we might get failed reset and a |
558 | * dead gpu (on elk). Also as modern gpu as kbl can suffer | 551 | * dead gpu (on elk). Also as modern gpu as kbl can suffer |
@@ -569,15 +562,10 @@ int intel_gpu_reset(struct drm_i915_private *i915, unsigned int engine_mask) | |||
569 | */ | 562 | */ |
570 | i915_stop_engines(i915, engine_mask); | 563 | i915_stop_engines(i915, engine_mask); |
571 | 564 | ||
572 | ret = -ENODEV; | 565 | GEM_TRACE("engine_mask=%x\n", engine_mask); |
573 | if (reset) { | 566 | preempt_disable(); |
574 | GEM_TRACE("engine_mask=%x\n", engine_mask); | 567 | ret = reset(i915, engine_mask, retry); |
575 | ret = reset(i915, engine_mask, retry); | 568 | preempt_enable(); |
576 | } | ||
577 | if (ret != -ETIMEDOUT || engine_mask != ALL_ENGINES) | ||
578 | break; | ||
579 | |||
580 | cond_resched(); | ||
581 | } | 569 | } |
582 | intel_uncore_forcewake_put(i915, FORCEWAKE_ALL); | 570 | intel_uncore_forcewake_put(i915, FORCEWAKE_ALL); |
583 | 571 | ||
@@ -1014,7 +1002,7 @@ void i915_reset(struct drm_i915_private *i915, | |||
1014 | goto error; | 1002 | goto error; |
1015 | } | 1003 | } |
1016 | 1004 | ||
1017 | for (i = 0; i < 3; i++) { | 1005 | for (i = 0; i < RESET_MAX_RETRIES; i++) { |
1018 | ret = intel_gpu_reset(i915, ALL_ENGINES); | 1006 | ret = intel_gpu_reset(i915, ALL_ENGINES); |
1019 | if (ret == 0) | 1007 | if (ret == 0) |
1020 | break; | 1008 | break; |