aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_ringbuffer.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2018-12-18 05:27:12 -0500
committerChris Wilson <chris@chris-wilson.co.uk>2018-12-18 09:24:46 -0500
commit060f23225d8203b8cd9e412d984e5237e63c83dc (patch)
tree3186a3282b6726da331c3a77f9c0e09b4206eab3 /drivers/gpu/drm/i915/intel_ringbuffer.c
parentb265a2a6255f581258ccfdccbd2efca51a142fe2 (diff)
drm/i915: Apply missed interrupt after reset w/a to all ringbuffer gen
Having completed a test run of gem_eio across all machines in CI we also observe the phenomenon (of lost interrupts after resetting the GPU) on gen3 machines as well as the previously sighted gen6/gen7. Let's apply the same HWSTAM workaround that was effective for gen6+ for all, as although we haven't seen the same failure on gen4/5 it seems prudent to keep the code the same. As a consequence we can remove the extra setting of HWSTAM and apply the register from a single site. v2: Delazy and move the HWSTAM into its own function v3: Mask off all HWSP writes on driver unload and engine cleanup. v4: And what about the physical hwsp? v5: No, engine->init_hw() is not called from driver_init_hw(), don't be daft. Really scrub HWSTAM as early as we can in driver_init_mmio() v6: Rename set_hwsp as it was setting the mask not the hwsp register. v7: Ville pointed out that although vcs(bsd) was introduced for g4x/ilk, per-engine HWSTAM was not introduced until gen6! References: https://bugs.freedesktop.org/show_bug.cgi?id=108735 Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Ville Syrjälä <ville.syrjala@linux.intel.com> Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20181218102712.11058-1-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c101
1 files changed, 62 insertions, 39 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index fdeca2b877c9..65fd92eb071d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -379,11 +379,25 @@ gen7_render_ring_flush(struct i915_request *rq, u32 mode)
379 return 0; 379 return 0;
380} 380}
381 381
382static void ring_setup_phys_status_page(struct intel_engine_cs *engine) 382static void set_hwstam(struct intel_engine_cs *engine, u32 mask)
383{
384 /*
385 * Keep the render interrupt unmasked as this papers over
386 * lost interrupts following a reset.
387 */
388 if (engine->class == RENDER_CLASS) {
389 if (INTEL_GEN(engine->i915) >= 6)
390 mask &= ~BIT(0);
391 else
392 mask &= ~I915_USER_INTERRUPT;
393 }
394
395 intel_engine_set_hwsp_writemask(engine, mask);
396}
397
398static void set_hws_pga(struct intel_engine_cs *engine, phys_addr_t phys)
383{ 399{
384 struct drm_i915_private *dev_priv = engine->i915; 400 struct drm_i915_private *dev_priv = engine->i915;
385 struct page *page = virt_to_page(engine->status_page.page_addr);
386 phys_addr_t phys = PFN_PHYS(page_to_pfn(page));
387 u32 addr; 401 u32 addr;
388 402
389 addr = lower_32_bits(phys); 403 addr = lower_32_bits(phys);
@@ -393,12 +407,22 @@ static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
393 I915_WRITE(HWS_PGA, addr); 407 I915_WRITE(HWS_PGA, addr);
394} 408}
395 409
396static void intel_ring_setup_status_page(struct intel_engine_cs *engine) 410static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
411{
412 struct page *page = virt_to_page(engine->status_page.page_addr);
413 phys_addr_t phys = PFN_PHYS(page_to_pfn(page));
414
415 set_hws_pga(engine, phys);
416 set_hwstam(engine, ~0u);
417}
418
419static void set_hwsp(struct intel_engine_cs *engine, u32 offset)
397{ 420{
398 struct drm_i915_private *dev_priv = engine->i915; 421 struct drm_i915_private *dev_priv = engine->i915;
399 i915_reg_t mmio; 422 i915_reg_t hwsp;
400 423
401 /* The ring status page addresses are no longer next to the rest of 424 /*
425 * The ring status page addresses are no longer next to the rest of
402 * the ring registers as of gen7. 426 * the ring registers as of gen7.
403 */ 427 */
404 if (IS_GEN(dev_priv, 7)) { 428 if (IS_GEN(dev_priv, 7)) {
@@ -410,56 +434,55 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
410 default: 434 default:
411 GEM_BUG_ON(engine->id); 435 GEM_BUG_ON(engine->id);
412 case RCS: 436 case RCS:
413 mmio = RENDER_HWS_PGA_GEN7; 437 hwsp = RENDER_HWS_PGA_GEN7;
414 break; 438 break;
415 case BCS: 439 case BCS:
416 mmio = BLT_HWS_PGA_GEN7; 440 hwsp = BLT_HWS_PGA_GEN7;
417 break; 441 break;
418 case VCS: 442 case VCS:
419 mmio = BSD_HWS_PGA_GEN7; 443 hwsp = BSD_HWS_PGA_GEN7;
420 break; 444 break;
421 case VECS: 445 case VECS:
422 mmio = VEBOX_HWS_PGA_GEN7; 446 hwsp = VEBOX_HWS_PGA_GEN7;
423 break; 447 break;
424 } 448 }
425 } else if (IS_GEN(dev_priv, 6)) { 449 } else if (IS_GEN(dev_priv, 6)) {
426 mmio = RING_HWS_PGA_GEN6(engine->mmio_base); 450 hwsp = RING_HWS_PGA_GEN6(engine->mmio_base);
427 } else { 451 } else {
428 mmio = RING_HWS_PGA(engine->mmio_base); 452 hwsp = RING_HWS_PGA(engine->mmio_base);
429 } 453 }
430 454
431 if (INTEL_GEN(dev_priv) >= 6) { 455 I915_WRITE(hwsp, offset);
432 u32 mask = ~0u; 456 POSTING_READ(hwsp);
457}
433 458
434 /* 459static void flush_cs_tlb(struct intel_engine_cs *engine)
435 * Keep the render interrupt unmasked as this papers over 460{
436 * lost interrupts following a reset. 461 struct drm_i915_private *dev_priv = engine->i915;
437 */ 462 i915_reg_t instpm = RING_INSTPM(engine->mmio_base);
438 if (engine->id == RCS)
439 mask &= ~BIT(0);
440 463
441 I915_WRITE(RING_HWSTAM(engine->mmio_base), mask); 464 if (!IS_GEN_RANGE(dev_priv, 6, 7))
442 } 465 return;
443 466
444 I915_WRITE(mmio, engine->status_page.ggtt_offset); 467 /* ring should be idle before issuing a sync flush*/
445 POSTING_READ(mmio); 468 WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
446 469
447 /* Flush the TLB for this page */ 470 I915_WRITE(instpm,
448 if (IS_GEN_RANGE(dev_priv, 6, 7)) { 471 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
449 i915_reg_t reg = RING_INSTPM(engine->mmio_base); 472 INSTPM_SYNC_FLUSH));
473 if (intel_wait_for_register(dev_priv,
474 instpm, INSTPM_SYNC_FLUSH, 0,
475 1000))
476 DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
477 engine->name);
478}
450 479
451 /* ring should be idle before issuing a sync flush*/ 480static void ring_setup_status_page(struct intel_engine_cs *engine)
452 WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0); 481{
482 set_hwsp(engine, engine->status_page.ggtt_offset);
483 set_hwstam(engine, ~0u);
453 484
454 I915_WRITE(reg, 485 flush_cs_tlb(engine);
455 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
456 INSTPM_SYNC_FLUSH));
457 if (intel_wait_for_register(dev_priv,
458 reg, INSTPM_SYNC_FLUSH, 0,
459 1000))
460 DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
461 engine->name);
462 }
463} 486}
464 487
465static bool stop_ring(struct intel_engine_cs *engine) 488static bool stop_ring(struct intel_engine_cs *engine)
@@ -529,7 +552,7 @@ static int init_ring_common(struct intel_engine_cs *engine)
529 if (HWS_NEEDS_PHYSICAL(dev_priv)) 552 if (HWS_NEEDS_PHYSICAL(dev_priv))
530 ring_setup_phys_status_page(engine); 553 ring_setup_phys_status_page(engine);
531 else 554 else
532 intel_ring_setup_status_page(engine); 555 ring_setup_status_page(engine);
533 556
534 intel_engine_reset_breadcrumbs(engine); 557 intel_engine_reset_breadcrumbs(engine);
535 558