diff options
Diffstat (limited to 'drivers/gpu/drm/i915/intel_hotplug.c')
-rw-r--r-- | drivers/gpu/drm/i915/intel_hotplug.c | 134 |
1 files changed, 113 insertions, 21 deletions
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c index bee673005d48..f48957ea100d 100644 --- a/drivers/gpu/drm/i915/intel_hotplug.c +++ b/drivers/gpu/drm/i915/intel_hotplug.c | |||
@@ -144,7 +144,7 @@ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv, | |||
144 | 144 | ||
145 | static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv) | 145 | static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv) |
146 | { | 146 | { |
147 | struct drm_device *dev = dev_priv->dev; | 147 | struct drm_device *dev = &dev_priv->drm; |
148 | struct drm_mode_config *mode_config = &dev->mode_config; | 148 | struct drm_mode_config *mode_config = &dev->mode_config; |
149 | struct intel_connector *intel_connector; | 149 | struct intel_connector *intel_connector; |
150 | struct intel_encoder *intel_encoder; | 150 | struct intel_encoder *intel_encoder; |
@@ -191,7 +191,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) | |||
191 | struct drm_i915_private *dev_priv = | 191 | struct drm_i915_private *dev_priv = |
192 | container_of(work, typeof(*dev_priv), | 192 | container_of(work, typeof(*dev_priv), |
193 | hotplug.reenable_work.work); | 193 | hotplug.reenable_work.work); |
194 | struct drm_device *dev = dev_priv->dev; | 194 | struct drm_device *dev = &dev_priv->drm; |
195 | struct drm_mode_config *mode_config = &dev->mode_config; | 195 | struct drm_mode_config *mode_config = &dev->mode_config; |
196 | int i; | 196 | int i; |
197 | 197 | ||
@@ -220,7 +220,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) | |||
220 | } | 220 | } |
221 | } | 221 | } |
222 | if (dev_priv->display.hpd_irq_setup) | 222 | if (dev_priv->display.hpd_irq_setup) |
223 | dev_priv->display.hpd_irq_setup(dev); | 223 | dev_priv->display.hpd_irq_setup(dev_priv); |
224 | spin_unlock_irq(&dev_priv->irq_lock); | 224 | spin_unlock_irq(&dev_priv->irq_lock); |
225 | 225 | ||
226 | intel_runtime_pm_put(dev_priv); | 226 | intel_runtime_pm_put(dev_priv); |
@@ -302,7 +302,7 @@ static void i915_hotplug_work_func(struct work_struct *work) | |||
302 | { | 302 | { |
303 | struct drm_i915_private *dev_priv = | 303 | struct drm_i915_private *dev_priv = |
304 | container_of(work, struct drm_i915_private, hotplug.hotplug_work); | 304 | container_of(work, struct drm_i915_private, hotplug.hotplug_work); |
305 | struct drm_device *dev = dev_priv->dev; | 305 | struct drm_device *dev = &dev_priv->drm; |
306 | struct drm_mode_config *mode_config = &dev->mode_config; | 306 | struct drm_mode_config *mode_config = &dev->mode_config; |
307 | struct intel_connector *intel_connector; | 307 | struct intel_connector *intel_connector; |
308 | struct intel_encoder *intel_encoder; | 308 | struct intel_encoder *intel_encoder; |
@@ -346,7 +346,7 @@ static void i915_hotplug_work_func(struct work_struct *work) | |||
346 | 346 | ||
347 | /** | 347 | /** |
348 | * intel_hpd_irq_handler - main hotplug irq handler | 348 | * intel_hpd_irq_handler - main hotplug irq handler |
349 | * @dev: drm device | 349 | * @dev_priv: drm_i915_private |
350 | * @pin_mask: a mask of hpd pins that have triggered the irq | 350 | * @pin_mask: a mask of hpd pins that have triggered the irq |
351 | * @long_mask: a mask of hpd pins that may be long hpd pulses | 351 | * @long_mask: a mask of hpd pins that may be long hpd pulses |
352 | * | 352 | * |
@@ -360,10 +360,9 @@ static void i915_hotplug_work_func(struct work_struct *work) | |||
360 | * Here, we do hotplug irq storm detection and mitigation, and pass further | 360 | * Here, we do hotplug irq storm detection and mitigation, and pass further |
361 | * processing to appropriate bottom halves. | 361 | * processing to appropriate bottom halves. |
362 | */ | 362 | */ |
363 | void intel_hpd_irq_handler(struct drm_device *dev, | 363 | void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, |
364 | u32 pin_mask, u32 long_mask) | 364 | u32 pin_mask, u32 long_mask) |
365 | { | 365 | { |
366 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
367 | int i; | 366 | int i; |
368 | enum port port; | 367 | enum port port; |
369 | bool storm_detected = false; | 368 | bool storm_detected = false; |
@@ -407,7 +406,7 @@ void intel_hpd_irq_handler(struct drm_device *dev, | |||
407 | * hotplug bits itself. So only WARN about unexpected | 406 | * hotplug bits itself. So only WARN about unexpected |
408 | * interrupts on saner platforms. | 407 | * interrupts on saner platforms. |
409 | */ | 408 | */ |
410 | WARN_ONCE(!HAS_GMCH_DISPLAY(dev), | 409 | WARN_ONCE(!HAS_GMCH_DISPLAY(dev_priv), |
411 | "Received HPD interrupt on pin %d although disabled\n", i); | 410 | "Received HPD interrupt on pin %d although disabled\n", i); |
412 | continue; | 411 | continue; |
413 | } | 412 | } |
@@ -427,7 +426,7 @@ void intel_hpd_irq_handler(struct drm_device *dev, | |||
427 | } | 426 | } |
428 | 427 | ||
429 | if (storm_detected) | 428 | if (storm_detected) |
430 | dev_priv->display.hpd_irq_setup(dev); | 429 | dev_priv->display.hpd_irq_setup(dev_priv); |
431 | spin_unlock(&dev_priv->irq_lock); | 430 | spin_unlock(&dev_priv->irq_lock); |
432 | 431 | ||
433 | /* | 432 | /* |
@@ -453,20 +452,47 @@ void intel_hpd_irq_handler(struct drm_device *dev, | |||
453 | * | 452 | * |
454 | * This is a separate step from interrupt enabling to simplify the locking rules | 453 | * This is a separate step from interrupt enabling to simplify the locking rules |
455 | * in the driver load and resume code. | 454 | * in the driver load and resume code. |
455 | * | ||
456 | * Also see: intel_hpd_poll_init(), which enables connector polling | ||
456 | */ | 457 | */ |
457 | void intel_hpd_init(struct drm_i915_private *dev_priv) | 458 | void intel_hpd_init(struct drm_i915_private *dev_priv) |
458 | { | 459 | { |
459 | struct drm_device *dev = dev_priv->dev; | ||
460 | struct drm_mode_config *mode_config = &dev->mode_config; | ||
461 | struct drm_connector *connector; | ||
462 | int i; | 460 | int i; |
463 | 461 | ||
464 | for_each_hpd_pin(i) { | 462 | for_each_hpd_pin(i) { |
465 | dev_priv->hotplug.stats[i].count = 0; | 463 | dev_priv->hotplug.stats[i].count = 0; |
466 | dev_priv->hotplug.stats[i].state = HPD_ENABLED; | 464 | dev_priv->hotplug.stats[i].state = HPD_ENABLED; |
467 | } | 465 | } |
466 | |||
467 | WRITE_ONCE(dev_priv->hotplug.poll_enabled, false); | ||
468 | schedule_work(&dev_priv->hotplug.poll_init_work); | ||
469 | |||
470 | /* | ||
471 | * Interrupt setup is already guaranteed to be single-threaded, this is | ||
472 | * just to make the assert_spin_locked checks happy. | ||
473 | */ | ||
474 | spin_lock_irq(&dev_priv->irq_lock); | ||
475 | if (dev_priv->display.hpd_irq_setup) | ||
476 | dev_priv->display.hpd_irq_setup(dev_priv); | ||
477 | spin_unlock_irq(&dev_priv->irq_lock); | ||
478 | } | ||
479 | |||
480 | void i915_hpd_poll_init_work(struct work_struct *work) { | ||
481 | struct drm_i915_private *dev_priv = | ||
482 | container_of(work, struct drm_i915_private, | ||
483 | hotplug.poll_init_work); | ||
484 | struct drm_device *dev = &dev_priv->drm; | ||
485 | struct drm_mode_config *mode_config = &dev->mode_config; | ||
486 | struct drm_connector *connector; | ||
487 | bool enabled; | ||
488 | |||
489 | mutex_lock(&dev->mode_config.mutex); | ||
490 | |||
491 | enabled = READ_ONCE(dev_priv->hotplug.poll_enabled); | ||
492 | |||
468 | list_for_each_entry(connector, &mode_config->connector_list, head) { | 493 | list_for_each_entry(connector, &mode_config->connector_list, head) { |
469 | struct intel_connector *intel_connector = to_intel_connector(connector); | 494 | struct intel_connector *intel_connector = |
495 | to_intel_connector(connector); | ||
470 | connector->polled = intel_connector->polled; | 496 | connector->polled = intel_connector->polled; |
471 | 497 | ||
472 | /* MST has a dynamic intel_connector->encoder and it's reprobing | 498 | /* MST has a dynamic intel_connector->encoder and it's reprobing |
@@ -475,24 +501,62 @@ void intel_hpd_init(struct drm_i915_private *dev_priv) | |||
475 | continue; | 501 | continue; |
476 | 502 | ||
477 | if (!connector->polled && I915_HAS_HOTPLUG(dev) && | 503 | if (!connector->polled && I915_HAS_HOTPLUG(dev) && |
478 | intel_connector->encoder->hpd_pin > HPD_NONE) | 504 | intel_connector->encoder->hpd_pin > HPD_NONE) { |
479 | connector->polled = DRM_CONNECTOR_POLL_HPD; | 505 | connector->polled = enabled ? |
506 | DRM_CONNECTOR_POLL_CONNECT | | ||
507 | DRM_CONNECTOR_POLL_DISCONNECT : | ||
508 | DRM_CONNECTOR_POLL_HPD; | ||
509 | } | ||
480 | } | 510 | } |
481 | 511 | ||
512 | if (enabled) | ||
513 | drm_kms_helper_poll_enable_locked(dev); | ||
514 | |||
515 | mutex_unlock(&dev->mode_config.mutex); | ||
516 | |||
482 | /* | 517 | /* |
483 | * Interrupt setup is already guaranteed to be single-threaded, this is | 518 | * We might have missed any hotplugs that happened while we were |
484 | * just to make the assert_spin_locked checks happy. | 519 | * in the middle of disabling polling |
485 | */ | 520 | */ |
486 | spin_lock_irq(&dev_priv->irq_lock); | 521 | if (!enabled) |
487 | if (dev_priv->display.hpd_irq_setup) | 522 | drm_helper_hpd_irq_event(dev); |
488 | dev_priv->display.hpd_irq_setup(dev); | 523 | } |
489 | spin_unlock_irq(&dev_priv->irq_lock); | 524 | |
525 | /** | ||
526 | * intel_hpd_poll_init - enables/disables polling for connectors with hpd | ||
527 | * @dev_priv: i915 device instance | ||
528 | * @enabled: Whether to enable or disable polling | ||
529 | * | ||
530 | * This function enables polling for all connectors, regardless of whether or | ||
531 | * not they support hotplug detection. Under certain conditions HPD may not be | ||
532 | * functional. On most Intel GPUs, this happens when we enter runtime suspend. | ||
533 | * On Valleyview and Cherryview systems, this also happens when we shut off all | ||
534 | * of the powerwells. | ||
535 | * | ||
536 | * Since this function can get called in contexts where we're already holding | ||
537 | * dev->mode_config.mutex, we do the actual hotplug enabling in a seperate | ||
538 | * worker. | ||
539 | * | ||
540 | * Also see: intel_hpd_init(), which restores hpd handling. | ||
541 | */ | ||
542 | void intel_hpd_poll_init(struct drm_i915_private *dev_priv) | ||
543 | { | ||
544 | WRITE_ONCE(dev_priv->hotplug.poll_enabled, true); | ||
545 | |||
546 | /* | ||
547 | * We might already be holding dev->mode_config.mutex, so do this in a | ||
548 | * seperate worker | ||
549 | * As well, there's no issue if we race here since we always reschedule | ||
550 | * this worker anyway | ||
551 | */ | ||
552 | schedule_work(&dev_priv->hotplug.poll_init_work); | ||
490 | } | 553 | } |
491 | 554 | ||
492 | void intel_hpd_init_work(struct drm_i915_private *dev_priv) | 555 | void intel_hpd_init_work(struct drm_i915_private *dev_priv) |
493 | { | 556 | { |
494 | INIT_WORK(&dev_priv->hotplug.hotplug_work, i915_hotplug_work_func); | 557 | INIT_WORK(&dev_priv->hotplug.hotplug_work, i915_hotplug_work_func); |
495 | INIT_WORK(&dev_priv->hotplug.dig_port_work, i915_digport_work_func); | 558 | INIT_WORK(&dev_priv->hotplug.dig_port_work, i915_digport_work_func); |
559 | INIT_WORK(&dev_priv->hotplug.poll_init_work, i915_hpd_poll_init_work); | ||
496 | INIT_DELAYED_WORK(&dev_priv->hotplug.reenable_work, | 560 | INIT_DELAYED_WORK(&dev_priv->hotplug.reenable_work, |
497 | intel_hpd_irq_storm_reenable_work); | 561 | intel_hpd_irq_storm_reenable_work); |
498 | } | 562 | } |
@@ -509,5 +573,33 @@ void intel_hpd_cancel_work(struct drm_i915_private *dev_priv) | |||
509 | 573 | ||
510 | cancel_work_sync(&dev_priv->hotplug.dig_port_work); | 574 | cancel_work_sync(&dev_priv->hotplug.dig_port_work); |
511 | cancel_work_sync(&dev_priv->hotplug.hotplug_work); | 575 | cancel_work_sync(&dev_priv->hotplug.hotplug_work); |
576 | cancel_work_sync(&dev_priv->hotplug.poll_init_work); | ||
512 | cancel_delayed_work_sync(&dev_priv->hotplug.reenable_work); | 577 | cancel_delayed_work_sync(&dev_priv->hotplug.reenable_work); |
513 | } | 578 | } |
579 | |||
580 | bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin) | ||
581 | { | ||
582 | bool ret = false; | ||
583 | |||
584 | if (pin == HPD_NONE) | ||
585 | return false; | ||
586 | |||
587 | spin_lock_irq(&dev_priv->irq_lock); | ||
588 | if (dev_priv->hotplug.stats[pin].state == HPD_ENABLED) { | ||
589 | dev_priv->hotplug.stats[pin].state = HPD_DISABLED; | ||
590 | ret = true; | ||
591 | } | ||
592 | spin_unlock_irq(&dev_priv->irq_lock); | ||
593 | |||
594 | return ret; | ||
595 | } | ||
596 | |||
597 | void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin) | ||
598 | { | ||
599 | if (pin == HPD_NONE) | ||
600 | return; | ||
601 | |||
602 | spin_lock_irq(&dev_priv->irq_lock); | ||
603 | dev_priv->hotplug.stats[pin].state = HPD_ENABLED; | ||
604 | spin_unlock_irq(&dev_priv->irq_lock); | ||
605 | } | ||