diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_irq.c')
| -rw-r--r-- | drivers/gpu/drm/i915/i915_irq.c | 410 |
1 files changed, 260 insertions, 150 deletions
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 633c18785c1e..f9bc3aaa90d0 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
| @@ -243,6 +243,41 @@ void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, | |||
| 243 | spin_unlock_irq(&dev_priv->irq_lock); | 243 | spin_unlock_irq(&dev_priv->irq_lock); |
| 244 | } | 244 | } |
| 245 | 245 | ||
| 246 | static u32 | ||
| 247 | gen11_gt_engine_identity(struct drm_i915_private * const i915, | ||
| 248 | const unsigned int bank, const unsigned int bit); | ||
| 249 | |||
| 250 | bool gen11_reset_one_iir(struct drm_i915_private * const i915, | ||
| 251 | const unsigned int bank, | ||
| 252 | const unsigned int bit) | ||
| 253 | { | ||
| 254 | void __iomem * const regs = i915->regs; | ||
| 255 | u32 dw; | ||
| 256 | |||
| 257 | lockdep_assert_held(&i915->irq_lock); | ||
| 258 | |||
| 259 | dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank)); | ||
| 260 | if (dw & BIT(bit)) { | ||
| 261 | /* | ||
| 262 | * According to the BSpec, DW_IIR bits cannot be cleared without | ||
| 263 | * first servicing the Selector & Shared IIR registers. | ||
| 264 | */ | ||
| 265 | gen11_gt_engine_identity(i915, bank, bit); | ||
| 266 | |||
| 267 | /* | ||
| 268 | * We locked GT INT DW by reading it. If we want to (try | ||
| 269 | * to) recover from this succesfully, we need to clear | ||
| 270 | * our bit, otherwise we are locking the register for | ||
| 271 | * everybody. | ||
| 272 | */ | ||
| 273 | raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit)); | ||
| 274 | |||
| 275 | return true; | ||
| 276 | } | ||
| 277 | |||
| 278 | return false; | ||
| 279 | } | ||
| 280 | |||
| 246 | /** | 281 | /** |
| 247 | * ilk_update_display_irq - update DEIMR | 282 | * ilk_update_display_irq - update DEIMR |
| 248 | * @dev_priv: driver private | 283 | * @dev_priv: driver private |
| @@ -308,17 +343,29 @@ void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) | |||
| 308 | 343 | ||
| 309 | static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv) | 344 | static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv) |
| 310 | { | 345 | { |
| 346 | WARN_ON_ONCE(INTEL_GEN(dev_priv) >= 11); | ||
| 347 | |||
| 311 | return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR; | 348 | return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR; |
| 312 | } | 349 | } |
| 313 | 350 | ||
| 314 | static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv) | 351 | static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv) |
| 315 | { | 352 | { |
| 316 | return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR; | 353 | if (INTEL_GEN(dev_priv) >= 11) |
| 354 | return GEN11_GPM_WGBOXPERF_INTR_MASK; | ||
| 355 | else if (INTEL_GEN(dev_priv) >= 8) | ||
| 356 | return GEN8_GT_IMR(2); | ||
| 357 | else | ||
| 358 | return GEN6_PMIMR; | ||
| 317 | } | 359 | } |
| 318 | 360 | ||
| 319 | static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv) | 361 | static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv) |
| 320 | { | 362 | { |
| 321 | return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER; | 363 | if (INTEL_GEN(dev_priv) >= 11) |
| 364 | return GEN11_GPM_WGBOXPERF_INTR_ENABLE; | ||
| 365 | else if (INTEL_GEN(dev_priv) >= 8) | ||
| 366 | return GEN8_GT_IER(2); | ||
| 367 | else | ||
| 368 | return GEN6_PMIER; | ||
| 322 | } | 369 | } |
| 323 | 370 | ||
| 324 | /** | 371 | /** |
| @@ -400,6 +447,18 @@ static void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_m | |||
| 400 | /* though a barrier is missing here, but don't really need a one */ | 447 | /* though a barrier is missing here, but don't really need a one */ |
| 401 | } | 448 | } |
| 402 | 449 | ||
| 450 | void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv) | ||
| 451 | { | ||
| 452 | spin_lock_irq(&dev_priv->irq_lock); | ||
| 453 | |||
| 454 | while (gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM)) | ||
| 455 | ; | ||
| 456 | |||
| 457 | dev_priv->gt_pm.rps.pm_iir = 0; | ||
| 458 | |||
| 459 | spin_unlock_irq(&dev_priv->irq_lock); | ||
| 460 | } | ||
| 461 | |||
| 403 | void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv) | 462 | void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv) |
| 404 | { | 463 | { |
| 405 | spin_lock_irq(&dev_priv->irq_lock); | 464 | spin_lock_irq(&dev_priv->irq_lock); |
| @@ -415,12 +474,14 @@ void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv) | |||
| 415 | if (READ_ONCE(rps->interrupts_enabled)) | 474 | if (READ_ONCE(rps->interrupts_enabled)) |
| 416 | return; | 475 | return; |
| 417 | 476 | ||
| 418 | if (WARN_ON_ONCE(IS_GEN11(dev_priv))) | ||
| 419 | return; | ||
| 420 | |||
| 421 | spin_lock_irq(&dev_priv->irq_lock); | 477 | spin_lock_irq(&dev_priv->irq_lock); |
| 422 | WARN_ON_ONCE(rps->pm_iir); | 478 | WARN_ON_ONCE(rps->pm_iir); |
| 423 | WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); | 479 | |
| 480 | if (INTEL_GEN(dev_priv) >= 11) | ||
| 481 | WARN_ON_ONCE(gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM)); | ||
| 482 | else | ||
| 483 | WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); | ||
| 484 | |||
| 424 | rps->interrupts_enabled = true; | 485 | rps->interrupts_enabled = true; |
| 425 | gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); | 486 | gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); |
| 426 | 487 | ||
| @@ -434,9 +495,6 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv) | |||
| 434 | if (!READ_ONCE(rps->interrupts_enabled)) | 495 | if (!READ_ONCE(rps->interrupts_enabled)) |
| 435 | return; | 496 | return; |
| 436 | 497 | ||
| 437 | if (WARN_ON_ONCE(IS_GEN11(dev_priv))) | ||
| 438 | return; | ||
| 439 | |||
| 440 | spin_lock_irq(&dev_priv->irq_lock); | 498 | spin_lock_irq(&dev_priv->irq_lock); |
| 441 | rps->interrupts_enabled = false; | 499 | rps->interrupts_enabled = false; |
| 442 | 500 | ||
| @@ -453,7 +511,10 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv) | |||
| 453 | * state of the worker can be discarded. | 511 | * state of the worker can be discarded. |
| 454 | */ | 512 | */ |
| 455 | cancel_work_sync(&rps->work); | 513 | cancel_work_sync(&rps->work); |
| 456 | gen6_reset_rps_interrupts(dev_priv); | 514 | if (INTEL_GEN(dev_priv) >= 11) |
| 515 | gen11_reset_rps_interrupts(dev_priv); | ||
| 516 | else | ||
| 517 | gen6_reset_rps_interrupts(dev_priv); | ||
| 457 | } | 518 | } |
| 458 | 519 | ||
| 459 | void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv) | 520 | void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv) |
| @@ -1399,19 +1460,18 @@ static void snb_gt_irq_handler(struct drm_i915_private *dev_priv, | |||
| 1399 | } | 1460 | } |
| 1400 | 1461 | ||
| 1401 | static void | 1462 | static void |
| 1402 | gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift) | 1463 | gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir) |
| 1403 | { | 1464 | { |
| 1404 | struct intel_engine_execlists * const execlists = &engine->execlists; | 1465 | struct intel_engine_execlists * const execlists = &engine->execlists; |
| 1405 | bool tasklet = false; | 1466 | bool tasklet = false; |
| 1406 | 1467 | ||
| 1407 | if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) { | 1468 | if (iir & GT_CONTEXT_SWITCH_INTERRUPT) { |
| 1408 | if (READ_ONCE(engine->execlists.active)) { | 1469 | if (READ_ONCE(engine->execlists.active)) |
| 1409 | __set_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); | 1470 | tasklet = !test_and_set_bit(ENGINE_IRQ_EXECLIST, |
| 1410 | tasklet = true; | 1471 | &engine->irq_posted); |
| 1411 | } | ||
| 1412 | } | 1472 | } |
| 1413 | 1473 | ||
| 1414 | if (iir & (GT_RENDER_USER_INTERRUPT << test_shift)) { | 1474 | if (iir & GT_RENDER_USER_INTERRUPT) { |
| 1415 | notify_ring(engine); | 1475 | notify_ring(engine); |
| 1416 | tasklet |= USES_GUC_SUBMISSION(engine->i915); | 1476 | tasklet |= USES_GUC_SUBMISSION(engine->i915); |
| 1417 | } | 1477 | } |
| @@ -1466,21 +1526,21 @@ static void gen8_gt_irq_handler(struct drm_i915_private *i915, | |||
| 1466 | { | 1526 | { |
| 1467 | if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { | 1527 | if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { |
| 1468 | gen8_cs_irq_handler(i915->engine[RCS], | 1528 | gen8_cs_irq_handler(i915->engine[RCS], |
| 1469 | gt_iir[0], GEN8_RCS_IRQ_SHIFT); | 1529 | gt_iir[0] >> GEN8_RCS_IRQ_SHIFT); |
| 1470 | gen8_cs_irq_handler(i915->engine[BCS], | 1530 | gen8_cs_irq_handler(i915->engine[BCS], |
| 1471 | gt_iir[0], GEN8_BCS_IRQ_SHIFT); | 1531 | gt_iir[0] >> GEN8_BCS_IRQ_SHIFT); |
| 1472 | } | 1532 | } |
| 1473 | 1533 | ||
| 1474 | if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { | 1534 | if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { |
| 1475 | gen8_cs_irq_handler(i915->engine[VCS], | 1535 | gen8_cs_irq_handler(i915->engine[VCS], |
| 1476 | gt_iir[1], GEN8_VCS1_IRQ_SHIFT); | 1536 | gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT); |
| 1477 | gen8_cs_irq_handler(i915->engine[VCS2], | 1537 | gen8_cs_irq_handler(i915->engine[VCS2], |
| 1478 | gt_iir[1], GEN8_VCS2_IRQ_SHIFT); | 1538 | gt_iir[1] >> GEN8_VCS2_IRQ_SHIFT); |
| 1479 | } | 1539 | } |
| 1480 | 1540 | ||
| 1481 | if (master_ctl & GEN8_GT_VECS_IRQ) { | 1541 | if (master_ctl & GEN8_GT_VECS_IRQ) { |
| 1482 | gen8_cs_irq_handler(i915->engine[VECS], | 1542 | gen8_cs_irq_handler(i915->engine[VECS], |
| 1483 | gt_iir[3], GEN8_VECS_IRQ_SHIFT); | 1543 | gt_iir[3] >> GEN8_VECS_IRQ_SHIFT); |
| 1484 | } | 1544 | } |
| 1485 | 1545 | ||
| 1486 | if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { | 1546 | if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { |
| @@ -1627,7 +1687,7 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, | |||
| 1627 | int head, tail; | 1687 | int head, tail; |
| 1628 | 1688 | ||
| 1629 | spin_lock(&pipe_crc->lock); | 1689 | spin_lock(&pipe_crc->lock); |
| 1630 | if (pipe_crc->source) { | 1690 | if (pipe_crc->source && !crtc->base.crc.opened) { |
| 1631 | if (!pipe_crc->entries) { | 1691 | if (!pipe_crc->entries) { |
| 1632 | spin_unlock(&pipe_crc->lock); | 1692 | spin_unlock(&pipe_crc->lock); |
| 1633 | DRM_DEBUG_KMS("spurious interrupt\n"); | 1693 | DRM_DEBUG_KMS("spurious interrupt\n"); |
| @@ -1667,7 +1727,7 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, | |||
| 1667 | * On GEN8+ sometimes the second CRC is bonkers as well, so | 1727 | * On GEN8+ sometimes the second CRC is bonkers as well, so |
| 1668 | * don't trust that one either. | 1728 | * don't trust that one either. |
| 1669 | */ | 1729 | */ |
| 1670 | if (pipe_crc->skipped == 0 || | 1730 | if (pipe_crc->skipped <= 0 || |
| 1671 | (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) { | 1731 | (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) { |
| 1672 | pipe_crc->skipped++; | 1732 | pipe_crc->skipped++; |
| 1673 | spin_unlock(&pipe_crc->lock); | 1733 | spin_unlock(&pipe_crc->lock); |
| @@ -1766,37 +1826,8 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) | |||
| 1766 | 1826 | ||
| 1767 | static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir) | 1827 | static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir) |
| 1768 | { | 1828 | { |
| 1769 | if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT) { | 1829 | if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT) |
| 1770 | /* Sample the log buffer flush related bits & clear them out now | 1830 | intel_guc_to_host_event_handler(&dev_priv->guc); |
| 1771 | * itself from the message identity register to minimize the | ||
| 1772 | * probability of losing a flush interrupt, when there are back | ||
| 1773 | * to back flush interrupts. | ||
| 1774 | * There can be a new flush interrupt, for different log buffer | ||
| 1775 | * type (like for ISR), whilst Host is handling one (for DPC). | ||
| 1776 | * Since same bit is used in message register for ISR & DPC, it | ||
| 1777 | * could happen that GuC sets the bit for 2nd interrupt but Host | ||
| 1778 | * clears out the bit on handling the 1st interrupt. | ||
| 1779 | */ | ||
| 1780 | u32 msg, flush; | ||
| 1781 | |||
| 1782 | msg = I915_READ(SOFT_SCRATCH(15)); | ||
| 1783 | flush = msg & (INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED | | ||
| 1784 | INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER); | ||
| 1785 | if (flush) { | ||
| 1786 | /* Clear the message bits that are handled */ | ||
| 1787 | I915_WRITE(SOFT_SCRATCH(15), msg & ~flush); | ||
| 1788 | |||
| 1789 | /* Handle flush interrupt in bottom half */ | ||
| 1790 | queue_work(dev_priv->guc.log.runtime.flush_wq, | ||
| 1791 | &dev_priv->guc.log.runtime.flush_work); | ||
| 1792 | |||
| 1793 | dev_priv->guc.log.flush_interrupt_count++; | ||
| 1794 | } else { | ||
| 1795 | /* Not clearing of unhandled event bits won't result in | ||
| 1796 | * re-triggering of the interrupt. | ||
| 1797 | */ | ||
| 1798 | } | ||
| 1799 | } | ||
| 1800 | } | 1831 | } |
| 1801 | 1832 | ||
| 1802 | static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv) | 1833 | static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv) |
| @@ -2433,6 +2464,13 @@ static void ivb_display_irq_handler(struct drm_i915_private *dev_priv, | |||
| 2433 | if (de_iir & DE_ERR_INT_IVB) | 2464 | if (de_iir & DE_ERR_INT_IVB) |
| 2434 | ivb_err_int_handler(dev_priv); | 2465 | ivb_err_int_handler(dev_priv); |
| 2435 | 2466 | ||
| 2467 | if (de_iir & DE_EDP_PSR_INT_HSW) { | ||
| 2468 | u32 psr_iir = I915_READ(EDP_PSR_IIR); | ||
| 2469 | |||
| 2470 | intel_psr_irq_handler(dev_priv, psr_iir); | ||
| 2471 | I915_WRITE(EDP_PSR_IIR, psr_iir); | ||
| 2472 | } | ||
| 2473 | |||
| 2436 | if (de_iir & DE_AUX_CHANNEL_A_IVB) | 2474 | if (de_iir & DE_AUX_CHANNEL_A_IVB) |
| 2437 | dp_aux_irq_handler(dev_priv); | 2475 | dp_aux_irq_handler(dev_priv); |
| 2438 | 2476 | ||
| @@ -2562,11 +2600,25 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) | |||
| 2562 | if (master_ctl & GEN8_DE_MISC_IRQ) { | 2600 | if (master_ctl & GEN8_DE_MISC_IRQ) { |
| 2563 | iir = I915_READ(GEN8_DE_MISC_IIR); | 2601 | iir = I915_READ(GEN8_DE_MISC_IIR); |
| 2564 | if (iir) { | 2602 | if (iir) { |
| 2603 | bool found = false; | ||
| 2604 | |||
| 2565 | I915_WRITE(GEN8_DE_MISC_IIR, iir); | 2605 | I915_WRITE(GEN8_DE_MISC_IIR, iir); |
| 2566 | ret = IRQ_HANDLED; | 2606 | ret = IRQ_HANDLED; |
| 2567 | if (iir & GEN8_DE_MISC_GSE) | 2607 | |
| 2608 | if (iir & GEN8_DE_MISC_GSE) { | ||
| 2568 | intel_opregion_asle_intr(dev_priv); | 2609 | intel_opregion_asle_intr(dev_priv); |
| 2569 | else | 2610 | found = true; |
| 2611 | } | ||
| 2612 | |||
| 2613 | if (iir & GEN8_DE_EDP_PSR) { | ||
| 2614 | u32 psr_iir = I915_READ(EDP_PSR_IIR); | ||
| 2615 | |||
| 2616 | intel_psr_irq_handler(dev_priv, psr_iir); | ||
| 2617 | I915_WRITE(EDP_PSR_IIR, psr_iir); | ||
| 2618 | found = true; | ||
| 2619 | } | ||
| 2620 | |||
| 2621 | if (!found) | ||
| 2570 | DRM_ERROR("Unexpected DE Misc interrupt\n"); | 2622 | DRM_ERROR("Unexpected DE Misc interrupt\n"); |
| 2571 | } | 2623 | } |
| 2572 | else | 2624 | else |
| @@ -2762,58 +2814,16 @@ static void __fini_wedge(struct wedge_me *w) | |||
| 2762 | (W)->i915; \ | 2814 | (W)->i915; \ |
| 2763 | __fini_wedge((W))) | 2815 | __fini_wedge((W))) |
| 2764 | 2816 | ||
| 2765 | static __always_inline void | ||
| 2766 | gen11_cs_irq_handler(struct intel_engine_cs * const engine, const u32 iir) | ||
| 2767 | { | ||
| 2768 | gen8_cs_irq_handler(engine, iir, 0); | ||
| 2769 | } | ||
| 2770 | |||
| 2771 | static void | ||
| 2772 | gen11_gt_engine_irq_handler(struct drm_i915_private * const i915, | ||
| 2773 | const unsigned int bank, | ||
| 2774 | const unsigned int engine_n, | ||
| 2775 | const u16 iir) | ||
| 2776 | { | ||
| 2777 | struct intel_engine_cs ** const engine = i915->engine; | ||
| 2778 | |||
| 2779 | switch (bank) { | ||
| 2780 | case 0: | ||
| 2781 | switch (engine_n) { | ||
| 2782 | |||
| 2783 | case GEN11_RCS0: | ||
| 2784 | return gen11_cs_irq_handler(engine[RCS], iir); | ||
| 2785 | |||
| 2786 | case GEN11_BCS: | ||
| 2787 | return gen11_cs_irq_handler(engine[BCS], iir); | ||
| 2788 | } | ||
| 2789 | case 1: | ||
| 2790 | switch (engine_n) { | ||
| 2791 | |||
| 2792 | case GEN11_VCS(0): | ||
| 2793 | return gen11_cs_irq_handler(engine[_VCS(0)], iir); | ||
| 2794 | case GEN11_VCS(1): | ||
| 2795 | return gen11_cs_irq_handler(engine[_VCS(1)], iir); | ||
| 2796 | case GEN11_VCS(2): | ||
| 2797 | return gen11_cs_irq_handler(engine[_VCS(2)], iir); | ||
| 2798 | case GEN11_VCS(3): | ||
| 2799 | return gen11_cs_irq_handler(engine[_VCS(3)], iir); | ||
| 2800 | |||
| 2801 | case GEN11_VECS(0): | ||
| 2802 | return gen11_cs_irq_handler(engine[_VECS(0)], iir); | ||
| 2803 | case GEN11_VECS(1): | ||
| 2804 | return gen11_cs_irq_handler(engine[_VECS(1)], iir); | ||
| 2805 | } | ||
| 2806 | } | ||
| 2807 | } | ||
| 2808 | |||
| 2809 | static u32 | 2817 | static u32 |
| 2810 | gen11_gt_engine_intr(struct drm_i915_private * const i915, | 2818 | gen11_gt_engine_identity(struct drm_i915_private * const i915, |
| 2811 | const unsigned int bank, const unsigned int bit) | 2819 | const unsigned int bank, const unsigned int bit) |
| 2812 | { | 2820 | { |
| 2813 | void __iomem * const regs = i915->regs; | 2821 | void __iomem * const regs = i915->regs; |
| 2814 | u32 timeout_ts; | 2822 | u32 timeout_ts; |
| 2815 | u32 ident; | 2823 | u32 ident; |
| 2816 | 2824 | ||
| 2825 | lockdep_assert_held(&i915->irq_lock); | ||
| 2826 | |||
| 2817 | raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit)); | 2827 | raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit)); |
| 2818 | 2828 | ||
| 2819 | /* | 2829 | /* |
| @@ -2835,42 +2845,101 @@ gen11_gt_engine_intr(struct drm_i915_private * const i915, | |||
| 2835 | raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank), | 2845 | raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank), |
| 2836 | GEN11_INTR_DATA_VALID); | 2846 | GEN11_INTR_DATA_VALID); |
| 2837 | 2847 | ||
| 2838 | return ident & GEN11_INTR_ENGINE_MASK; | 2848 | return ident; |
| 2839 | } | 2849 | } |
| 2840 | 2850 | ||
| 2841 | static void | 2851 | static void |
| 2842 | gen11_gt_irq_handler(struct drm_i915_private * const i915, | 2852 | gen11_other_irq_handler(struct drm_i915_private * const i915, |
| 2843 | const u32 master_ctl) | 2853 | const u8 instance, const u16 iir) |
| 2854 | { | ||
| 2855 | if (instance == OTHER_GTPM_INSTANCE) | ||
| 2856 | return gen6_rps_irq_handler(i915, iir); | ||
| 2857 | |||
| 2858 | WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n", | ||
| 2859 | instance, iir); | ||
| 2860 | } | ||
| 2861 | |||
| 2862 | static void | ||
| 2863 | gen11_engine_irq_handler(struct drm_i915_private * const i915, | ||
| 2864 | const u8 class, const u8 instance, const u16 iir) | ||
| 2865 | { | ||
| 2866 | struct intel_engine_cs *engine; | ||
| 2867 | |||
| 2868 | if (instance <= MAX_ENGINE_INSTANCE) | ||
| 2869 | engine = i915->engine_class[class][instance]; | ||
| 2870 | else | ||
| 2871 | engine = NULL; | ||
| 2872 | |||
| 2873 | if (likely(engine)) | ||
| 2874 | return gen8_cs_irq_handler(engine, iir); | ||
| 2875 | |||
| 2876 | WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n", | ||
| 2877 | class, instance); | ||
| 2878 | } | ||
| 2879 | |||
| 2880 | static void | ||
| 2881 | gen11_gt_identity_handler(struct drm_i915_private * const i915, | ||
| 2882 | const u32 identity) | ||
| 2883 | { | ||
| 2884 | const u8 class = GEN11_INTR_ENGINE_CLASS(identity); | ||
| 2885 | const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity); | ||
| 2886 | const u16 intr = GEN11_INTR_ENGINE_INTR(identity); | ||
| 2887 | |||
| 2888 | if (unlikely(!intr)) | ||
| 2889 | return; | ||
| 2890 | |||
| 2891 | if (class <= COPY_ENGINE_CLASS) | ||
| 2892 | return gen11_engine_irq_handler(i915, class, instance, intr); | ||
| 2893 | |||
| 2894 | if (class == OTHER_CLASS) | ||
| 2895 | return gen11_other_irq_handler(i915, instance, intr); | ||
| 2896 | |||
| 2897 | WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n", | ||
| 2898 | class, instance, intr); | ||
| 2899 | } | ||
| 2900 | |||
| 2901 | static void | ||
| 2902 | gen11_gt_bank_handler(struct drm_i915_private * const i915, | ||
| 2903 | const unsigned int bank) | ||
| 2844 | { | 2904 | { |
| 2845 | void __iomem * const regs = i915->regs; | 2905 | void __iomem * const regs = i915->regs; |
| 2846 | unsigned int bank; | 2906 | unsigned long intr_dw; |
| 2907 | unsigned int bit; | ||
| 2847 | 2908 | ||
| 2848 | for (bank = 0; bank < 2; bank++) { | 2909 | lockdep_assert_held(&i915->irq_lock); |
| 2849 | unsigned long intr_dw; | ||
| 2850 | unsigned int bit; | ||
| 2851 | 2910 | ||
| 2852 | if (!(master_ctl & GEN11_GT_DW_IRQ(bank))) | 2911 | intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank)); |
| 2853 | continue; | ||
| 2854 | 2912 | ||
| 2855 | intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank)); | 2913 | if (unlikely(!intr_dw)) { |
| 2914 | DRM_ERROR("GT_INTR_DW%u blank!\n", bank); | ||
| 2915 | return; | ||
| 2916 | } | ||
| 2856 | 2917 | ||
| 2857 | if (unlikely(!intr_dw)) { | 2918 | for_each_set_bit(bit, &intr_dw, 32) { |
| 2858 | DRM_ERROR("GT_INTR_DW%u blank!\n", bank); | 2919 | const u32 ident = gen11_gt_engine_identity(i915, |
| 2859 | continue; | 2920 | bank, bit); |
| 2860 | } | ||
| 2861 | 2921 | ||
| 2862 | for_each_set_bit(bit, &intr_dw, 32) { | 2922 | gen11_gt_identity_handler(i915, ident); |
| 2863 | const u16 iir = gen11_gt_engine_intr(i915, bank, bit); | 2923 | } |
| 2864 | 2924 | ||
| 2865 | if (unlikely(!iir)) | 2925 | /* Clear must be after shared has been served for engine */ |
| 2866 | continue; | 2926 | raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw); |
| 2927 | } | ||
| 2867 | 2928 | ||
| 2868 | gen11_gt_engine_irq_handler(i915, bank, bit, iir); | 2929 | static void |
| 2869 | } | 2930 | gen11_gt_irq_handler(struct drm_i915_private * const i915, |
| 2931 | const u32 master_ctl) | ||
| 2932 | { | ||
| 2933 | unsigned int bank; | ||
| 2870 | 2934 | ||
| 2871 | /* Clear must be after shared has been served for engine */ | 2935 | spin_lock(&i915->irq_lock); |
| 2872 | raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw); | 2936 | |
| 2937 | for (bank = 0; bank < 2; bank++) { | ||
| 2938 | if (master_ctl & GEN11_GT_DW_IRQ(bank)) | ||
| 2939 | gen11_gt_bank_handler(i915, bank); | ||
| 2873 | } | 2940 | } |
| 2941 | |||
| 2942 | spin_unlock(&i915->irq_lock); | ||
| 2874 | } | 2943 | } |
| 2875 | 2944 | ||
| 2876 | static irqreturn_t gen11_irq_handler(int irq, void *arg) | 2945 | static irqreturn_t gen11_irq_handler(int irq, void *arg) |
| @@ -2912,15 +2981,11 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg) | |||
| 2912 | return IRQ_HANDLED; | 2981 | return IRQ_HANDLED; |
| 2913 | } | 2982 | } |
| 2914 | 2983 | ||
| 2915 | /** | 2984 | static void i915_reset_device(struct drm_i915_private *dev_priv, |
| 2916 | * i915_reset_device - do process context error handling work | 2985 | u32 engine_mask, |
| 2917 | * @dev_priv: i915 device private | 2986 | const char *reason) |
| 2918 | * | ||
| 2919 | * Fire an error uevent so userspace can see that a hang or error | ||
| 2920 | * was detected. | ||
| 2921 | */ | ||
| 2922 | static void i915_reset_device(struct drm_i915_private *dev_priv) | ||
| 2923 | { | 2987 | { |
| 2988 | struct i915_gpu_error *error = &dev_priv->gpu_error; | ||
| 2924 | struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj; | 2989 | struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj; |
| 2925 | char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; | 2990 | char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; |
| 2926 | char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; | 2991 | char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; |
| @@ -2936,29 +3001,35 @@ static void i915_reset_device(struct drm_i915_private *dev_priv) | |||
| 2936 | i915_wedge_on_timeout(&w, dev_priv, 5*HZ) { | 3001 | i915_wedge_on_timeout(&w, dev_priv, 5*HZ) { |
| 2937 | intel_prepare_reset(dev_priv); | 3002 | intel_prepare_reset(dev_priv); |
| 2938 | 3003 | ||
| 3004 | error->reason = reason; | ||
| 3005 | error->stalled_mask = engine_mask; | ||
| 3006 | |||
| 2939 | /* Signal that locked waiters should reset the GPU */ | 3007 | /* Signal that locked waiters should reset the GPU */ |
| 2940 | set_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags); | 3008 | smp_mb__before_atomic(); |
| 2941 | wake_up_all(&dev_priv->gpu_error.wait_queue); | 3009 | set_bit(I915_RESET_HANDOFF, &error->flags); |
| 3010 | wake_up_all(&error->wait_queue); | ||
| 2942 | 3011 | ||
| 2943 | /* Wait for anyone holding the lock to wakeup, without | 3012 | /* Wait for anyone holding the lock to wakeup, without |
| 2944 | * blocking indefinitely on struct_mutex. | 3013 | * blocking indefinitely on struct_mutex. |
| 2945 | */ | 3014 | */ |
| 2946 | do { | 3015 | do { |
| 2947 | if (mutex_trylock(&dev_priv->drm.struct_mutex)) { | 3016 | if (mutex_trylock(&dev_priv->drm.struct_mutex)) { |
| 2948 | i915_reset(dev_priv, 0); | 3017 | i915_reset(dev_priv, engine_mask, reason); |
| 2949 | mutex_unlock(&dev_priv->drm.struct_mutex); | 3018 | mutex_unlock(&dev_priv->drm.struct_mutex); |
| 2950 | } | 3019 | } |
| 2951 | } while (wait_on_bit_timeout(&dev_priv->gpu_error.flags, | 3020 | } while (wait_on_bit_timeout(&error->flags, |
| 2952 | I915_RESET_HANDOFF, | 3021 | I915_RESET_HANDOFF, |
| 2953 | TASK_UNINTERRUPTIBLE, | 3022 | TASK_UNINTERRUPTIBLE, |
| 2954 | 1)); | 3023 | 1)); |
| 2955 | 3024 | ||
| 3025 | error->stalled_mask = 0; | ||
| 3026 | error->reason = NULL; | ||
| 3027 | |||
| 2956 | intel_finish_reset(dev_priv); | 3028 | intel_finish_reset(dev_priv); |
| 2957 | } | 3029 | } |
| 2958 | 3030 | ||
| 2959 | if (!test_bit(I915_WEDGED, &dev_priv->gpu_error.flags)) | 3031 | if (!test_bit(I915_WEDGED, &error->flags)) |
| 2960 | kobject_uevent_env(kobj, | 3032 | kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event); |
| 2961 | KOBJ_CHANGE, reset_done_event); | ||
| 2962 | } | 3033 | } |
| 2963 | 3034 | ||
| 2964 | static void i915_clear_error_registers(struct drm_i915_private *dev_priv) | 3035 | static void i915_clear_error_registers(struct drm_i915_private *dev_priv) |
| @@ -2990,6 +3061,7 @@ static void i915_clear_error_registers(struct drm_i915_private *dev_priv) | |||
| 2990 | * i915_handle_error - handle a gpu error | 3061 | * i915_handle_error - handle a gpu error |
| 2991 | * @dev_priv: i915 device private | 3062 | * @dev_priv: i915 device private |
| 2992 | * @engine_mask: mask representing engines that are hung | 3063 | * @engine_mask: mask representing engines that are hung |
| 3064 | * @flags: control flags | ||
| 2993 | * @fmt: Error message format string | 3065 | * @fmt: Error message format string |
| 2994 | * | 3066 | * |
| 2995 | * Do some basic checking of register state at error time and | 3067 | * Do some basic checking of register state at error time and |
| @@ -3000,16 +3072,23 @@ static void i915_clear_error_registers(struct drm_i915_private *dev_priv) | |||
| 3000 | */ | 3072 | */ |
| 3001 | void i915_handle_error(struct drm_i915_private *dev_priv, | 3073 | void i915_handle_error(struct drm_i915_private *dev_priv, |
| 3002 | u32 engine_mask, | 3074 | u32 engine_mask, |
| 3075 | unsigned long flags, | ||
| 3003 | const char *fmt, ...) | 3076 | const char *fmt, ...) |
| 3004 | { | 3077 | { |
| 3005 | struct intel_engine_cs *engine; | 3078 | struct intel_engine_cs *engine; |
| 3006 | unsigned int tmp; | 3079 | unsigned int tmp; |
| 3007 | va_list args; | ||
| 3008 | char error_msg[80]; | 3080 | char error_msg[80]; |
| 3081 | char *msg = NULL; | ||
| 3009 | 3082 | ||
| 3010 | va_start(args, fmt); | 3083 | if (fmt) { |
| 3011 | vscnprintf(error_msg, sizeof(error_msg), fmt, args); | 3084 | va_list args; |
| 3012 | va_end(args); | 3085 | |
| 3086 | va_start(args, fmt); | ||
| 3087 | vscnprintf(error_msg, sizeof(error_msg), fmt, args); | ||
| 3088 | va_end(args); | ||
| 3089 | |||
| 3090 | msg = error_msg; | ||
| 3091 | } | ||
| 3013 | 3092 | ||
| 3014 | /* | 3093 | /* |
| 3015 | * In most cases it's guaranteed that we get here with an RPM | 3094 | * In most cases it's guaranteed that we get here with an RPM |
| @@ -3020,8 +3099,12 @@ void i915_handle_error(struct drm_i915_private *dev_priv, | |||
| 3020 | */ | 3099 | */ |
| 3021 | intel_runtime_pm_get(dev_priv); | 3100 | intel_runtime_pm_get(dev_priv); |
| 3022 | 3101 | ||
| 3023 | i915_capture_error_state(dev_priv, engine_mask, error_msg); | 3102 | engine_mask &= INTEL_INFO(dev_priv)->ring_mask; |
| 3024 | i915_clear_error_registers(dev_priv); | 3103 | |
| 3104 | if (flags & I915_ERROR_CAPTURE) { | ||
| 3105 | i915_capture_error_state(dev_priv, engine_mask, msg); | ||
| 3106 | i915_clear_error_registers(dev_priv); | ||
| 3107 | } | ||
| 3025 | 3108 | ||
| 3026 | /* | 3109 | /* |
| 3027 | * Try engine reset when available. We fall back to full reset if | 3110 | * Try engine reset when available. We fall back to full reset if |
| @@ -3034,7 +3117,7 @@ void i915_handle_error(struct drm_i915_private *dev_priv, | |||
| 3034 | &dev_priv->gpu_error.flags)) | 3117 | &dev_priv->gpu_error.flags)) |
| 3035 | continue; | 3118 | continue; |
| 3036 | 3119 | ||
| 3037 | if (i915_reset_engine(engine, 0) == 0) | 3120 | if (i915_reset_engine(engine, msg) == 0) |
| 3038 | engine_mask &= ~intel_engine_flag(engine); | 3121 | engine_mask &= ~intel_engine_flag(engine); |
| 3039 | 3122 | ||
| 3040 | clear_bit(I915_RESET_ENGINE + engine->id, | 3123 | clear_bit(I915_RESET_ENGINE + engine->id, |
| @@ -3064,7 +3147,7 @@ void i915_handle_error(struct drm_i915_private *dev_priv, | |||
| 3064 | TASK_UNINTERRUPTIBLE); | 3147 | TASK_UNINTERRUPTIBLE); |
| 3065 | } | 3148 | } |
| 3066 | 3149 | ||
| 3067 | i915_reset_device(dev_priv); | 3150 | i915_reset_device(dev_priv, engine_mask, msg); |
| 3068 | 3151 | ||
| 3069 | for_each_engine(engine, dev_priv, tmp) { | 3152 | for_each_engine(engine, dev_priv, tmp) { |
| 3070 | clear_bit(I915_RESET_ENGINE + engine->id, | 3153 | clear_bit(I915_RESET_ENGINE + engine->id, |
| @@ -3286,6 +3369,11 @@ static void ironlake_irq_reset(struct drm_device *dev) | |||
| 3286 | if (IS_GEN7(dev_priv)) | 3369 | if (IS_GEN7(dev_priv)) |
| 3287 | I915_WRITE(GEN7_ERR_INT, 0xffffffff); | 3370 | I915_WRITE(GEN7_ERR_INT, 0xffffffff); |
| 3288 | 3371 | ||
| 3372 | if (IS_HASWELL(dev_priv)) { | ||
| 3373 | I915_WRITE(EDP_PSR_IMR, 0xffffffff); | ||
| 3374 | I915_WRITE(EDP_PSR_IIR, 0xffffffff); | ||
| 3375 | } | ||
| 3376 | |||
| 3289 | gen5_gt_irq_reset(dev_priv); | 3377 | gen5_gt_irq_reset(dev_priv); |
| 3290 | 3378 | ||
| 3291 | ibx_irq_reset(dev_priv); | 3379 | ibx_irq_reset(dev_priv); |
| @@ -3324,6 +3412,9 @@ static void gen8_irq_reset(struct drm_device *dev) | |||
| 3324 | 3412 | ||
| 3325 | gen8_gt_irq_reset(dev_priv); | 3413 | gen8_gt_irq_reset(dev_priv); |
| 3326 | 3414 | ||
| 3415 | I915_WRITE(EDP_PSR_IMR, 0xffffffff); | ||
| 3416 | I915_WRITE(EDP_PSR_IIR, 0xffffffff); | ||
| 3417 | |||
| 3327 | for_each_pipe(dev_priv, pipe) | 3418 | for_each_pipe(dev_priv, pipe) |
| 3328 | if (intel_display_power_is_enabled(dev_priv, | 3419 | if (intel_display_power_is_enabled(dev_priv, |
| 3329 | POWER_DOMAIN_PIPE(pipe))) | 3420 | POWER_DOMAIN_PIPE(pipe))) |
| @@ -3349,6 +3440,9 @@ static void gen11_gt_irq_reset(struct drm_i915_private *dev_priv) | |||
| 3349 | I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK, ~0); | 3440 | I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK, ~0); |
| 3350 | I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK, ~0); | 3441 | I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK, ~0); |
| 3351 | I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~0); | 3442 | I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~0); |
| 3443 | |||
| 3444 | I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0); | ||
| 3445 | I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0); | ||
| 3352 | } | 3446 | } |
| 3353 | 3447 | ||
| 3354 | static void gen11_irq_reset(struct drm_device *dev) | 3448 | static void gen11_irq_reset(struct drm_device *dev) |
| @@ -3697,6 +3791,12 @@ static int ironlake_irq_postinstall(struct drm_device *dev) | |||
| 3697 | DE_DP_A_HOTPLUG); | 3791 | DE_DP_A_HOTPLUG); |
| 3698 | } | 3792 | } |
| 3699 | 3793 | ||
| 3794 | if (IS_HASWELL(dev_priv)) { | ||
| 3795 | gen3_assert_iir_is_zero(dev_priv, EDP_PSR_IIR); | ||
| 3796 | intel_psr_irq_control(dev_priv, dev_priv->psr.debug); | ||
| 3797 | display_mask |= DE_EDP_PSR_INT_HSW; | ||
| 3798 | } | ||
| 3799 | |||
| 3700 | dev_priv->irq_mask = ~display_mask; | 3800 | dev_priv->irq_mask = ~display_mask; |
| 3701 | 3801 | ||
| 3702 | ibx_irq_pre_postinstall(dev); | 3802 | ibx_irq_pre_postinstall(dev); |
| @@ -3807,7 +3907,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) | |||
| 3807 | uint32_t de_pipe_enables; | 3907 | uint32_t de_pipe_enables; |
| 3808 | u32 de_port_masked = GEN8_AUX_CHANNEL_A; | 3908 | u32 de_port_masked = GEN8_AUX_CHANNEL_A; |
| 3809 | u32 de_port_enables; | 3909 | u32 de_port_enables; |
| 3810 | u32 de_misc_masked = GEN8_DE_MISC_GSE; | 3910 | u32 de_misc_masked = GEN8_DE_MISC_GSE | GEN8_DE_EDP_PSR; |
| 3811 | enum pipe pipe; | 3911 | enum pipe pipe; |
| 3812 | 3912 | ||
| 3813 | if (INTEL_GEN(dev_priv) >= 9) { | 3913 | if (INTEL_GEN(dev_priv) >= 9) { |
| @@ -3832,6 +3932,9 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) | |||
| 3832 | else if (IS_BROADWELL(dev_priv)) | 3932 | else if (IS_BROADWELL(dev_priv)) |
| 3833 | de_port_enables |= GEN8_PORT_DP_A_HOTPLUG; | 3933 | de_port_enables |= GEN8_PORT_DP_A_HOTPLUG; |
| 3834 | 3934 | ||
| 3935 | gen3_assert_iir_is_zero(dev_priv, EDP_PSR_IIR); | ||
| 3936 | intel_psr_irq_control(dev_priv, dev_priv->psr.debug); | ||
| 3937 | |||
| 3835 | for_each_pipe(dev_priv, pipe) { | 3938 | for_each_pipe(dev_priv, pipe) { |
| 3836 | dev_priv->de_irq_mask[pipe] = ~de_pipe_masked; | 3939 | dev_priv->de_irq_mask[pipe] = ~de_pipe_masked; |
| 3837 | 3940 | ||
| @@ -3887,7 +3990,14 @@ static void gen11_gt_irq_postinstall(struct drm_i915_private *dev_priv) | |||
| 3887 | I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK, ~(irqs | irqs << 16)); | 3990 | I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK, ~(irqs | irqs << 16)); |
| 3888 | I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~(irqs | irqs << 16)); | 3991 | I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~(irqs | irqs << 16)); |
| 3889 | 3992 | ||
| 3890 | dev_priv->pm_imr = 0xffffffff; /* TODO */ | 3993 | /* |
| 3994 | * RPS interrupts will get enabled/disabled on demand when RPS itself | ||
| 3995 | * is enabled/disabled. | ||
| 3996 | */ | ||
| 3997 | dev_priv->pm_ier = 0x0; | ||
| 3998 | dev_priv->pm_imr = ~dev_priv->pm_ier; | ||
| 3999 | I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0); | ||
| 4000 | I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0); | ||
| 3891 | } | 4001 | } |
| 3892 | 4002 | ||
| 3893 | static int gen11_irq_postinstall(struct drm_device *dev) | 4003 | static int gen11_irq_postinstall(struct drm_device *dev) |
