diff options
26 files changed, 275 insertions, 102 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index c5b8c81b9440..0a8eceb75902 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
| @@ -379,15 +379,15 @@ static const struct pci_device_id pciidlist[] = { /* aka */ | |||
| 379 | INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */ | 379 | INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */ |
| 380 | INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */ | 380 | INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */ |
| 381 | INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT2 mobile */ | 381 | INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT2 mobile */ |
| 382 | INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT1 desktop */ | 382 | INTEL_VGA_DEVICE(0x0D02, &intel_haswell_d_info), /* CRW GT1 desktop */ |
| 383 | INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT2 desktop */ | ||
| 383 | INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT2 desktop */ | 384 | INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT2 desktop */ |
| 384 | INTEL_VGA_DEVICE(0x0D32, &intel_haswell_d_info), /* CRW GT2 desktop */ | 385 | INTEL_VGA_DEVICE(0x0D0A, &intel_haswell_d_info), /* CRW GT1 server */ |
| 385 | INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT1 server */ | 386 | INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT2 server */ |
| 386 | INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT2 server */ | 387 | INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT2 server */ |
| 387 | INTEL_VGA_DEVICE(0x0D3A, &intel_haswell_d_info), /* CRW GT2 server */ | 388 | INTEL_VGA_DEVICE(0x0D06, &intel_haswell_m_info), /* CRW GT1 mobile */ |
| 388 | INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT1 mobile */ | 389 | INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT2 mobile */ |
| 389 | INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT2 mobile */ | 390 | INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT2 mobile */ |
| 390 | INTEL_VGA_DEVICE(0x0D36, &intel_haswell_m_info), /* CRW GT2 mobile */ | ||
| 391 | INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info), | 391 | INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info), |
| 392 | INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info), | 392 | INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info), |
| 393 | INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info), | 393 | INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info), |
| @@ -495,6 +495,7 @@ static int i915_drm_freeze(struct drm_device *dev) | |||
| 495 | intel_modeset_disable(dev); | 495 | intel_modeset_disable(dev); |
| 496 | 496 | ||
| 497 | drm_irq_uninstall(dev); | 497 | drm_irq_uninstall(dev); |
| 498 | dev_priv->enable_hotplug_processing = false; | ||
| 498 | } | 499 | } |
| 499 | 500 | ||
| 500 | i915_save_state(dev); | 501 | i915_save_state(dev); |
| @@ -568,10 +569,20 @@ static int __i915_drm_thaw(struct drm_device *dev) | |||
| 568 | error = i915_gem_init_hw(dev); | 569 | error = i915_gem_init_hw(dev); |
| 569 | mutex_unlock(&dev->struct_mutex); | 570 | mutex_unlock(&dev->struct_mutex); |
| 570 | 571 | ||
| 572 | /* We need working interrupts for modeset enabling ... */ | ||
| 573 | drm_irq_install(dev); | ||
| 574 | |||
| 571 | intel_modeset_init_hw(dev); | 575 | intel_modeset_init_hw(dev); |
| 572 | intel_modeset_setup_hw_state(dev, false); | 576 | intel_modeset_setup_hw_state(dev, false); |
| 573 | drm_irq_install(dev); | 577 | |
| 578 | /* | ||
| 579 | * ... but also need to make sure that hotplug processing | ||
| 580 | * doesn't cause havoc. Like in the driver load code we don't | ||
| 581 | * bother with the tiny race here where we might loose hotplug | ||
| 582 | * notifications. | ||
| 583 | * */ | ||
| 574 | intel_hpd_init(dev); | 584 | intel_hpd_init(dev); |
| 585 | dev_priv->enable_hotplug_processing = true; | ||
| 575 | } | 586 | } |
| 576 | 587 | ||
| 577 | intel_opregion_init(dev); | 588 | intel_opregion_init(dev); |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 2cd97d1cc920..3c7bb0410b51 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
| @@ -701,7 +701,7 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg) | |||
| 701 | { | 701 | { |
| 702 | struct drm_device *dev = (struct drm_device *) arg; | 702 | struct drm_device *dev = (struct drm_device *) arg; |
| 703 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 703 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 704 | u32 de_iir, gt_iir, de_ier, pm_iir; | 704 | u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier; |
| 705 | irqreturn_t ret = IRQ_NONE; | 705 | irqreturn_t ret = IRQ_NONE; |
| 706 | int i; | 706 | int i; |
| 707 | 707 | ||
| @@ -711,6 +711,15 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg) | |||
| 711 | de_ier = I915_READ(DEIER); | 711 | de_ier = I915_READ(DEIER); |
| 712 | I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); | 712 | I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); |
| 713 | 713 | ||
| 714 | /* Disable south interrupts. We'll only write to SDEIIR once, so further | ||
| 715 | * interrupts will will be stored on its back queue, and then we'll be | ||
| 716 | * able to process them after we restore SDEIER (as soon as we restore | ||
| 717 | * it, we'll get an interrupt if SDEIIR still has something to process | ||
| 718 | * due to its back queue). */ | ||
| 719 | sde_ier = I915_READ(SDEIER); | ||
| 720 | I915_WRITE(SDEIER, 0); | ||
| 721 | POSTING_READ(SDEIER); | ||
| 722 | |||
| 714 | gt_iir = I915_READ(GTIIR); | 723 | gt_iir = I915_READ(GTIIR); |
| 715 | if (gt_iir) { | 724 | if (gt_iir) { |
| 716 | snb_gt_irq_handler(dev, dev_priv, gt_iir); | 725 | snb_gt_irq_handler(dev, dev_priv, gt_iir); |
| @@ -759,6 +768,8 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg) | |||
| 759 | 768 | ||
| 760 | I915_WRITE(DEIER, de_ier); | 769 | I915_WRITE(DEIER, de_ier); |
| 761 | POSTING_READ(DEIER); | 770 | POSTING_READ(DEIER); |
| 771 | I915_WRITE(SDEIER, sde_ier); | ||
| 772 | POSTING_READ(SDEIER); | ||
| 762 | 773 | ||
| 763 | return ret; | 774 | return ret; |
| 764 | } | 775 | } |
| @@ -778,7 +789,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) | |||
| 778 | struct drm_device *dev = (struct drm_device *) arg; | 789 | struct drm_device *dev = (struct drm_device *) arg; |
| 779 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 790 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 780 | int ret = IRQ_NONE; | 791 | int ret = IRQ_NONE; |
| 781 | u32 de_iir, gt_iir, de_ier, pm_iir; | 792 | u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier; |
| 782 | 793 | ||
| 783 | atomic_inc(&dev_priv->irq_received); | 794 | atomic_inc(&dev_priv->irq_received); |
| 784 | 795 | ||
| @@ -787,6 +798,15 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) | |||
| 787 | I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); | 798 | I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); |
| 788 | POSTING_READ(DEIER); | 799 | POSTING_READ(DEIER); |
| 789 | 800 | ||
| 801 | /* Disable south interrupts. We'll only write to SDEIIR once, so further | ||
| 802 | * interrupts will will be stored on its back queue, and then we'll be | ||
| 803 | * able to process them after we restore SDEIER (as soon as we restore | ||
| 804 | * it, we'll get an interrupt if SDEIIR still has something to process | ||
| 805 | * due to its back queue). */ | ||
| 806 | sde_ier = I915_READ(SDEIER); | ||
| 807 | I915_WRITE(SDEIER, 0); | ||
| 808 | POSTING_READ(SDEIER); | ||
| 809 | |||
| 790 | de_iir = I915_READ(DEIIR); | 810 | de_iir = I915_READ(DEIIR); |
| 791 | gt_iir = I915_READ(GTIIR); | 811 | gt_iir = I915_READ(GTIIR); |
| 792 | pm_iir = I915_READ(GEN6_PMIIR); | 812 | pm_iir = I915_READ(GEN6_PMIIR); |
| @@ -849,6 +869,8 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) | |||
| 849 | done: | 869 | done: |
| 850 | I915_WRITE(DEIER, de_ier); | 870 | I915_WRITE(DEIER, de_ier); |
| 851 | POSTING_READ(DEIER); | 871 | POSTING_READ(DEIER); |
| 872 | I915_WRITE(SDEIER, sde_ier); | ||
| 873 | POSTING_READ(SDEIER); | ||
| 852 | 874 | ||
| 853 | return ret; | 875 | return ret; |
| 854 | } | 876 | } |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 527b664d3434..848992f67d56 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
| @@ -1613,9 +1613,9 @@ | |||
| 1613 | #define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16) | 1613 | #define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16) |
| 1614 | #define ADPA_USE_VGA_HVPOLARITY (1<<15) | 1614 | #define ADPA_USE_VGA_HVPOLARITY (1<<15) |
| 1615 | #define ADPA_SETS_HVPOLARITY 0 | 1615 | #define ADPA_SETS_HVPOLARITY 0 |
| 1616 | #define ADPA_VSYNC_CNTL_DISABLE (1<<11) | 1616 | #define ADPA_VSYNC_CNTL_DISABLE (1<<10) |
| 1617 | #define ADPA_VSYNC_CNTL_ENABLE 0 | 1617 | #define ADPA_VSYNC_CNTL_ENABLE 0 |
| 1618 | #define ADPA_HSYNC_CNTL_DISABLE (1<<10) | 1618 | #define ADPA_HSYNC_CNTL_DISABLE (1<<11) |
| 1619 | #define ADPA_HSYNC_CNTL_ENABLE 0 | 1619 | #define ADPA_HSYNC_CNTL_ENABLE 0 |
| 1620 | #define ADPA_VSYNC_ACTIVE_HIGH (1<<4) | 1620 | #define ADPA_VSYNC_ACTIVE_HIGH (1<<4) |
| 1621 | #define ADPA_VSYNC_ACTIVE_LOW 0 | 1621 | #define ADPA_VSYNC_ACTIVE_LOW 0 |
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 969d08c72d10..32a3693905ec 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
| @@ -88,7 +88,7 @@ static void intel_disable_crt(struct intel_encoder *encoder) | |||
| 88 | u32 temp; | 88 | u32 temp; |
| 89 | 89 | ||
| 90 | temp = I915_READ(crt->adpa_reg); | 90 | temp = I915_READ(crt->adpa_reg); |
| 91 | temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE); | 91 | temp |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE; |
| 92 | temp &= ~ADPA_DAC_ENABLE; | 92 | temp &= ~ADPA_DAC_ENABLE; |
| 93 | I915_WRITE(crt->adpa_reg, temp); | 93 | I915_WRITE(crt->adpa_reg, temp); |
| 94 | } | 94 | } |
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index d64af5aa4a1c..8d0bac3c35d7 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c | |||
| @@ -1391,8 +1391,8 @@ void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder) | |||
| 1391 | struct intel_dp *intel_dp = &intel_dig_port->dp; | 1391 | struct intel_dp *intel_dp = &intel_dig_port->dp; |
| 1392 | struct drm_i915_private *dev_priv = encoder->dev->dev_private; | 1392 | struct drm_i915_private *dev_priv = encoder->dev->dev_private; |
| 1393 | enum port port = intel_dig_port->port; | 1393 | enum port port = intel_dig_port->port; |
| 1394 | bool wait; | ||
| 1395 | uint32_t val; | 1394 | uint32_t val; |
| 1395 | bool wait = false; | ||
| 1396 | 1396 | ||
| 1397 | if (I915_READ(DP_TP_CTL(port)) & DP_TP_CTL_ENABLE) { | 1397 | if (I915_READ(DP_TP_CTL(port)) & DP_TP_CTL_ENABLE) { |
| 1398 | val = I915_READ(DDI_BUF_CTL(port)); | 1398 | val = I915_READ(DDI_BUF_CTL(port)); |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index a05ac2c91ba2..287b42c9d1a8 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -3604,6 +3604,30 @@ static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable) | |||
| 3604 | */ | 3604 | */ |
| 3605 | } | 3605 | } |
| 3606 | 3606 | ||
| 3607 | /** | ||
| 3608 | * i9xx_fixup_plane - ugly workaround for G45 to fire up the hardware | ||
| 3609 | * cursor plane briefly if not already running after enabling the display | ||
| 3610 | * plane. | ||
| 3611 | * This workaround avoids occasional blank screens when self refresh is | ||
| 3612 | * enabled. | ||
| 3613 | */ | ||
| 3614 | static void | ||
| 3615 | g4x_fixup_plane(struct drm_i915_private *dev_priv, enum pipe pipe) | ||
| 3616 | { | ||
| 3617 | u32 cntl = I915_READ(CURCNTR(pipe)); | ||
| 3618 | |||
| 3619 | if ((cntl & CURSOR_MODE) == 0) { | ||
| 3620 | u32 fw_bcl_self = I915_READ(FW_BLC_SELF); | ||
| 3621 | |||
| 3622 | I915_WRITE(FW_BLC_SELF, fw_bcl_self & ~FW_BLC_SELF_EN); | ||
| 3623 | I915_WRITE(CURCNTR(pipe), CURSOR_MODE_64_ARGB_AX); | ||
| 3624 | intel_wait_for_vblank(dev_priv->dev, pipe); | ||
| 3625 | I915_WRITE(CURCNTR(pipe), cntl); | ||
| 3626 | I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe))); | ||
| 3627 | I915_WRITE(FW_BLC_SELF, fw_bcl_self); | ||
| 3628 | } | ||
| 3629 | } | ||
| 3630 | |||
| 3607 | static void i9xx_crtc_enable(struct drm_crtc *crtc) | 3631 | static void i9xx_crtc_enable(struct drm_crtc *crtc) |
| 3608 | { | 3632 | { |
| 3609 | struct drm_device *dev = crtc->dev; | 3633 | struct drm_device *dev = crtc->dev; |
| @@ -3629,6 +3653,8 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc) | |||
| 3629 | 3653 | ||
| 3630 | intel_enable_pipe(dev_priv, pipe, false); | 3654 | intel_enable_pipe(dev_priv, pipe, false); |
| 3631 | intel_enable_plane(dev_priv, plane, pipe); | 3655 | intel_enable_plane(dev_priv, plane, pipe); |
| 3656 | if (IS_G4X(dev)) | ||
| 3657 | g4x_fixup_plane(dev_priv, pipe); | ||
| 3632 | 3658 | ||
| 3633 | intel_crtc_load_lut(crtc); | 3659 | intel_crtc_load_lut(crtc); |
| 3634 | intel_update_fbc(dev); | 3660 | intel_update_fbc(dev); |
| @@ -7256,8 +7282,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
| 7256 | { | 7282 | { |
| 7257 | struct drm_device *dev = crtc->dev; | 7283 | struct drm_device *dev = crtc->dev; |
| 7258 | struct drm_i915_private *dev_priv = dev->dev_private; | 7284 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 7259 | struct intel_framebuffer *intel_fb; | 7285 | struct drm_framebuffer *old_fb = crtc->fb; |
| 7260 | struct drm_i915_gem_object *obj; | 7286 | struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj; |
| 7261 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 7287 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
| 7262 | struct intel_unpin_work *work; | 7288 | struct intel_unpin_work *work; |
| 7263 | unsigned long flags; | 7289 | unsigned long flags; |
| @@ -7282,8 +7308,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
| 7282 | 7308 | ||
| 7283 | work->event = event; | 7309 | work->event = event; |
| 7284 | work->crtc = crtc; | 7310 | work->crtc = crtc; |
| 7285 | intel_fb = to_intel_framebuffer(crtc->fb); | 7311 | work->old_fb_obj = to_intel_framebuffer(old_fb)->obj; |
| 7286 | work->old_fb_obj = intel_fb->obj; | ||
| 7287 | INIT_WORK(&work->work, intel_unpin_work_fn); | 7312 | INIT_WORK(&work->work, intel_unpin_work_fn); |
| 7288 | 7313 | ||
| 7289 | ret = drm_vblank_get(dev, intel_crtc->pipe); | 7314 | ret = drm_vblank_get(dev, intel_crtc->pipe); |
| @@ -7303,9 +7328,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
| 7303 | intel_crtc->unpin_work = work; | 7328 | intel_crtc->unpin_work = work; |
| 7304 | spin_unlock_irqrestore(&dev->event_lock, flags); | 7329 | spin_unlock_irqrestore(&dev->event_lock, flags); |
| 7305 | 7330 | ||
| 7306 | intel_fb = to_intel_framebuffer(fb); | ||
| 7307 | obj = intel_fb->obj; | ||
| 7308 | |||
| 7309 | if (atomic_read(&intel_crtc->unpin_work_count) >= 2) | 7331 | if (atomic_read(&intel_crtc->unpin_work_count) >= 2) |
| 7310 | flush_workqueue(dev_priv->wq); | 7332 | flush_workqueue(dev_priv->wq); |
| 7311 | 7333 | ||
| @@ -7340,6 +7362,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
| 7340 | 7362 | ||
| 7341 | cleanup_pending: | 7363 | cleanup_pending: |
| 7342 | atomic_dec(&intel_crtc->unpin_work_count); | 7364 | atomic_dec(&intel_crtc->unpin_work_count); |
| 7365 | crtc->fb = old_fb; | ||
| 7343 | drm_gem_object_unreference(&work->old_fb_obj->base); | 7366 | drm_gem_object_unreference(&work->old_fb_obj->base); |
| 7344 | drm_gem_object_unreference(&obj->base); | 7367 | drm_gem_object_unreference(&obj->base); |
| 7345 | mutex_unlock(&dev->struct_mutex); | 7368 | mutex_unlock(&dev->struct_mutex); |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index f61cb7998c72..6f728e5ee793 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
| @@ -353,7 +353,8 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq) | |||
| 353 | 353 | ||
| 354 | #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0) | 354 | #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0) |
| 355 | if (has_aux_irq) | 355 | if (has_aux_irq) |
| 356 | done = wait_event_timeout(dev_priv->gmbus_wait_queue, C, 10); | 356 | done = wait_event_timeout(dev_priv->gmbus_wait_queue, C, |
| 357 | msecs_to_jiffies(10)); | ||
| 357 | else | 358 | else |
| 358 | done = wait_for_atomic(C, 10) == 0; | 359 | done = wait_for_atomic(C, 10) == 0; |
| 359 | if (!done) | 360 | if (!done) |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 61fee7fcdc2c..a1794c6df1bf 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
| @@ -2574,7 +2574,7 @@ static void gen6_enable_rps(struct drm_device *dev) | |||
| 2574 | I915_WRITE(GEN6_RC_SLEEP, 0); | 2574 | I915_WRITE(GEN6_RC_SLEEP, 0); |
| 2575 | I915_WRITE(GEN6_RC1e_THRESHOLD, 1000); | 2575 | I915_WRITE(GEN6_RC1e_THRESHOLD, 1000); |
| 2576 | I915_WRITE(GEN6_RC6_THRESHOLD, 50000); | 2576 | I915_WRITE(GEN6_RC6_THRESHOLD, 50000); |
| 2577 | I915_WRITE(GEN6_RC6p_THRESHOLD, 100000); | 2577 | I915_WRITE(GEN6_RC6p_THRESHOLD, 150000); |
| 2578 | I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ | 2578 | I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ |
| 2579 | 2579 | ||
| 2580 | /* Check if we are enabling RC6 */ | 2580 | /* Check if we are enabling RC6 */ |
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h index 5ea5033eae0a..4d932c46725d 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.h +++ b/drivers/gpu/drm/mgag200/mgag200_drv.h | |||
| @@ -112,7 +112,6 @@ struct mga_framebuffer { | |||
| 112 | struct mga_fbdev { | 112 | struct mga_fbdev { |
| 113 | struct drm_fb_helper helper; | 113 | struct drm_fb_helper helper; |
| 114 | struct mga_framebuffer mfb; | 114 | struct mga_framebuffer mfb; |
| 115 | struct list_head fbdev_list; | ||
| 116 | void *sysram; | 115 | void *sysram; |
| 117 | int size; | 116 | int size; |
| 118 | struct ttm_bo_kmap_obj mapping; | 117 | struct ttm_bo_kmap_obj mapping; |
diff --git a/drivers/gpu/drm/mgag200/mgag200_i2c.c b/drivers/gpu/drm/mgag200/mgag200_i2c.c index 5a88ec51b513..d3dcf54e6233 100644 --- a/drivers/gpu/drm/mgag200/mgag200_i2c.c +++ b/drivers/gpu/drm/mgag200/mgag200_i2c.c | |||
| @@ -92,6 +92,7 @@ struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev) | |||
| 92 | int ret; | 92 | int ret; |
| 93 | int data, clock; | 93 | int data, clock; |
| 94 | 94 | ||
| 95 | WREG_DAC(MGA1064_GEN_IO_CTL2, 1); | ||
| 95 | WREG_DAC(MGA1064_GEN_IO_DATA, 0xff); | 96 | WREG_DAC(MGA1064_GEN_IO_DATA, 0xff); |
| 96 | WREG_DAC(MGA1064_GEN_IO_CTL, 0); | 97 | WREG_DAC(MGA1064_GEN_IO_CTL, 0); |
| 97 | 98 | ||
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index d3d99a28ddef..a274b9906ef8 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c | |||
| @@ -1406,6 +1406,14 @@ static int mga_vga_get_modes(struct drm_connector *connector) | |||
| 1406 | static int mga_vga_mode_valid(struct drm_connector *connector, | 1406 | static int mga_vga_mode_valid(struct drm_connector *connector, |
| 1407 | struct drm_display_mode *mode) | 1407 | struct drm_display_mode *mode) |
| 1408 | { | 1408 | { |
| 1409 | struct drm_device *dev = connector->dev; | ||
| 1410 | struct mga_device *mdev = (struct mga_device*)dev->dev_private; | ||
| 1411 | struct mga_fbdev *mfbdev = mdev->mfbdev; | ||
| 1412 | struct drm_fb_helper *fb_helper = &mfbdev->helper; | ||
| 1413 | struct drm_fb_helper_connector *fb_helper_conn = NULL; | ||
| 1414 | int bpp = 32; | ||
| 1415 | int i = 0; | ||
| 1416 | |||
| 1409 | /* FIXME: Add bandwidth and g200se limitations */ | 1417 | /* FIXME: Add bandwidth and g200se limitations */ |
| 1410 | 1418 | ||
| 1411 | if (mode->crtc_hdisplay > 2048 || mode->crtc_hsync_start > 4096 || | 1419 | if (mode->crtc_hdisplay > 2048 || mode->crtc_hsync_start > 4096 || |
| @@ -1415,6 +1423,25 @@ static int mga_vga_mode_valid(struct drm_connector *connector, | |||
| 1415 | return MODE_BAD; | 1423 | return MODE_BAD; |
| 1416 | } | 1424 | } |
| 1417 | 1425 | ||
| 1426 | /* Validate the mode input by the user */ | ||
| 1427 | for (i = 0; i < fb_helper->connector_count; i++) { | ||
| 1428 | if (fb_helper->connector_info[i]->connector == connector) { | ||
| 1429 | /* Found the helper for this connector */ | ||
| 1430 | fb_helper_conn = fb_helper->connector_info[i]; | ||
| 1431 | if (fb_helper_conn->cmdline_mode.specified) { | ||
| 1432 | if (fb_helper_conn->cmdline_mode.bpp_specified) { | ||
| 1433 | bpp = fb_helper_conn->cmdline_mode.bpp; | ||
| 1434 | } | ||
| 1435 | } | ||
| 1436 | } | ||
| 1437 | } | ||
| 1438 | |||
| 1439 | if ((mode->hdisplay * mode->vdisplay * (bpp/8)) > mdev->mc.vram_size) { | ||
| 1440 | if (fb_helper_conn) | ||
| 1441 | fb_helper_conn->cmdline_mode.specified = false; | ||
| 1442 | return MODE_BAD; | ||
| 1443 | } | ||
| 1444 | |||
| 1418 | return MODE_OK; | 1445 | return MODE_OK; |
| 1419 | } | 1446 | } |
| 1420 | 1447 | ||
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c index 61cec0f6ff1c..4857f913efdd 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c | |||
| @@ -350,7 +350,7 @@ nve0_graph_init_gpc_0(struct nvc0_graph_priv *priv) | |||
| 350 | nv_wr32(priv, GPC_UNIT(gpc, 0x0918), magicgpc918); | 350 | nv_wr32(priv, GPC_UNIT(gpc, 0x0918), magicgpc918); |
| 351 | } | 351 | } |
| 352 | 352 | ||
| 353 | nv_wr32(priv, GPC_BCAST(0x1bd4), magicgpc918); | 353 | nv_wr32(priv, GPC_BCAST(0x3fd4), magicgpc918); |
| 354 | nv_wr32(priv, GPC_BCAST(0x08ac), nv_rd32(priv, 0x100800)); | 354 | nv_wr32(priv, GPC_BCAST(0x08ac), nv_rd32(priv, 0x100800)); |
| 355 | } | 355 | } |
| 356 | 356 | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c index 2cc1e6a5eb6a..9c41b58d57e2 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c +++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c | |||
| @@ -869,7 +869,7 @@ init_idx_addr_latched(struct nvbios_init *init) | |||
| 869 | init->offset += 2; | 869 | init->offset += 2; |
| 870 | 870 | ||
| 871 | init_wr32(init, dreg, idata); | 871 | init_wr32(init, dreg, idata); |
| 872 | init_mask(init, creg, ~mask, data | idata); | 872 | init_mask(init, creg, ~mask, data | iaddr); |
| 873 | } | 873 | } |
| 874 | } | 874 | } |
| 875 | 875 | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c index a114a0ed7e98..2e98e8a3f1aa 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c | |||
| @@ -142,6 +142,7 @@ nouveau_i2c_port_create_(struct nouveau_object *parent, | |||
| 142 | /* drop port's i2c subdev refcount, i2c handles this itself */ | 142 | /* drop port's i2c subdev refcount, i2c handles this itself */ |
| 143 | if (ret == 0) { | 143 | if (ret == 0) { |
| 144 | list_add_tail(&port->head, &i2c->ports); | 144 | list_add_tail(&port->head, &i2c->ports); |
| 145 | atomic_dec(&parent->refcount); | ||
| 145 | atomic_dec(&engine->refcount); | 146 | atomic_dec(&engine->refcount); |
| 146 | } | 147 | } |
| 147 | 148 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_agp.c b/drivers/gpu/drm/nouveau/nouveau_agp.c index d28430cd2ba6..6e7a55f93a85 100644 --- a/drivers/gpu/drm/nouveau/nouveau_agp.c +++ b/drivers/gpu/drm/nouveau/nouveau_agp.c | |||
| @@ -47,6 +47,18 @@ nouveau_agp_enabled(struct nouveau_drm *drm) | |||
| 47 | if (drm->agp.stat == UNKNOWN) { | 47 | if (drm->agp.stat == UNKNOWN) { |
| 48 | if (!nouveau_agpmode) | 48 | if (!nouveau_agpmode) |
| 49 | return false; | 49 | return false; |
| 50 | #ifdef __powerpc__ | ||
| 51 | /* Disable AGP by default on all PowerPC machines for | ||
| 52 | * now -- At least some UniNorth-2 AGP bridges are | ||
| 53 | * known to be broken: DMA from the host to the card | ||
| 54 | * works just fine, but writeback from the card to the | ||
| 55 | * host goes straight to memory untranslated bypassing | ||
| 56 | * the GATT somehow, making them quite painful to deal | ||
| 57 | * with... | ||
| 58 | */ | ||
| 59 | if (nouveau_agpmode == -1) | ||
| 60 | return false; | ||
| 61 | #endif | ||
| 50 | return true; | 62 | return true; |
| 51 | } | 63 | } |
| 52 | 64 | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index a6237c9cbbc3..87a5a56ed358 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c | |||
| @@ -55,9 +55,9 @@ | |||
| 55 | 55 | ||
| 56 | /* offsets in shared sync bo of various structures */ | 56 | /* offsets in shared sync bo of various structures */ |
| 57 | #define EVO_SYNC(c, o) ((c) * 0x0100 + (o)) | 57 | #define EVO_SYNC(c, o) ((c) * 0x0100 + (o)) |
| 58 | #define EVO_MAST_NTFY EVO_SYNC( 0, 0x00) | 58 | #define EVO_MAST_NTFY EVO_SYNC( 0, 0x00) |
| 59 | #define EVO_FLIP_SEM0(c) EVO_SYNC((c), 0x00) | 59 | #define EVO_FLIP_SEM0(c) EVO_SYNC((c) + 1, 0x00) |
| 60 | #define EVO_FLIP_SEM1(c) EVO_SYNC((c), 0x10) | 60 | #define EVO_FLIP_SEM1(c) EVO_SYNC((c) + 1, 0x10) |
| 61 | 61 | ||
| 62 | #define EVO_CORE_HANDLE (0xd1500000) | 62 | #define EVO_CORE_HANDLE (0xd1500000) |
| 63 | #define EVO_CHAN_HANDLE(t,i) (0xd15c0000 | (((t) & 0x00ff) << 8) | (i)) | 63 | #define EVO_CHAN_HANDLE(t,i) (0xd15c0000 | (((t) & 0x00ff) << 8) | (i)) |
| @@ -341,10 +341,8 @@ struct nv50_curs { | |||
| 341 | 341 | ||
| 342 | struct nv50_sync { | 342 | struct nv50_sync { |
| 343 | struct nv50_dmac base; | 343 | struct nv50_dmac base; |
| 344 | struct { | 344 | u32 addr; |
| 345 | u32 offset; | 345 | u32 data; |
| 346 | u16 value; | ||
| 347 | } sem; | ||
| 348 | }; | 346 | }; |
| 349 | 347 | ||
| 350 | struct nv50_ovly { | 348 | struct nv50_ovly { |
| @@ -471,13 +469,33 @@ nv50_display_crtc_sema(struct drm_device *dev, int crtc) | |||
| 471 | return nv50_disp(dev)->sync; | 469 | return nv50_disp(dev)->sync; |
| 472 | } | 470 | } |
| 473 | 471 | ||
| 472 | struct nv50_display_flip { | ||
| 473 | struct nv50_disp *disp; | ||
| 474 | struct nv50_sync *chan; | ||
| 475 | }; | ||
| 476 | |||
| 477 | static bool | ||
| 478 | nv50_display_flip_wait(void *data) | ||
| 479 | { | ||
| 480 | struct nv50_display_flip *flip = data; | ||
| 481 | if (nouveau_bo_rd32(flip->disp->sync, flip->chan->addr / 4) == | ||
| 482 | flip->chan->data); | ||
| 483 | return true; | ||
| 484 | usleep_range(1, 2); | ||
| 485 | return false; | ||
| 486 | } | ||
| 487 | |||
| 474 | void | 488 | void |
| 475 | nv50_display_flip_stop(struct drm_crtc *crtc) | 489 | nv50_display_flip_stop(struct drm_crtc *crtc) |
| 476 | { | 490 | { |
| 477 | struct nv50_sync *sync = nv50_sync(crtc); | 491 | struct nouveau_device *device = nouveau_dev(crtc->dev); |
| 492 | struct nv50_display_flip flip = { | ||
| 493 | .disp = nv50_disp(crtc->dev), | ||
| 494 | .chan = nv50_sync(crtc), | ||
| 495 | }; | ||
| 478 | u32 *push; | 496 | u32 *push; |
| 479 | 497 | ||
| 480 | push = evo_wait(sync, 8); | 498 | push = evo_wait(flip.chan, 8); |
| 481 | if (push) { | 499 | if (push) { |
| 482 | evo_mthd(push, 0x0084, 1); | 500 | evo_mthd(push, 0x0084, 1); |
| 483 | evo_data(push, 0x00000000); | 501 | evo_data(push, 0x00000000); |
| @@ -487,8 +505,10 @@ nv50_display_flip_stop(struct drm_crtc *crtc) | |||
| 487 | evo_data(push, 0x00000000); | 505 | evo_data(push, 0x00000000); |
| 488 | evo_mthd(push, 0x0080, 1); | 506 | evo_mthd(push, 0x0080, 1); |
| 489 | evo_data(push, 0x00000000); | 507 | evo_data(push, 0x00000000); |
| 490 | evo_kick(push, sync); | 508 | evo_kick(push, flip.chan); |
| 491 | } | 509 | } |
| 510 | |||
| 511 | nv_wait_cb(device, nv50_display_flip_wait, &flip); | ||
| 492 | } | 512 | } |
| 493 | 513 | ||
| 494 | int | 514 | int |
| @@ -496,11 +516,10 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
| 496 | struct nouveau_channel *chan, u32 swap_interval) | 516 | struct nouveau_channel *chan, u32 swap_interval) |
| 497 | { | 517 | { |
| 498 | struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb); | 518 | struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb); |
| 499 | struct nv50_disp *disp = nv50_disp(crtc->dev); | ||
| 500 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | 519 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); |
| 501 | struct nv50_sync *sync = nv50_sync(crtc); | 520 | struct nv50_sync *sync = nv50_sync(crtc); |
| 521 | int head = nv_crtc->index, ret; | ||
| 502 | u32 *push; | 522 | u32 *push; |
| 503 | int ret; | ||
| 504 | 523 | ||
| 505 | swap_interval <<= 4; | 524 | swap_interval <<= 4; |
| 506 | if (swap_interval == 0) | 525 | if (swap_interval == 0) |
| @@ -510,58 +529,64 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
| 510 | if (unlikely(push == NULL)) | 529 | if (unlikely(push == NULL)) |
| 511 | return -EBUSY; | 530 | return -EBUSY; |
| 512 | 531 | ||
| 513 | /* synchronise with the rendering channel, if necessary */ | 532 | if (chan && nv_mclass(chan->object) < NV84_CHANNEL_IND_CLASS) { |
| 514 | if (likely(chan)) { | 533 | ret = RING_SPACE(chan, 8); |
| 534 | if (ret) | ||
| 535 | return ret; | ||
| 536 | |||
| 537 | BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2); | ||
| 538 | OUT_RING (chan, NvEvoSema0 + head); | ||
| 539 | OUT_RING (chan, sync->addr ^ 0x10); | ||
| 540 | BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1); | ||
| 541 | OUT_RING (chan, sync->data + 1); | ||
| 542 | BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_OFFSET, 2); | ||
| 543 | OUT_RING (chan, sync->addr); | ||
| 544 | OUT_RING (chan, sync->data); | ||
| 545 | } else | ||
| 546 | if (chan && nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) { | ||
| 547 | u64 addr = nv84_fence_crtc(chan, head) + sync->addr; | ||
| 548 | ret = RING_SPACE(chan, 12); | ||
| 549 | if (ret) | ||
| 550 | return ret; | ||
| 551 | |||
| 552 | BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1); | ||
| 553 | OUT_RING (chan, chan->vram); | ||
| 554 | BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); | ||
| 555 | OUT_RING (chan, upper_32_bits(addr ^ 0x10)); | ||
| 556 | OUT_RING (chan, lower_32_bits(addr ^ 0x10)); | ||
| 557 | OUT_RING (chan, sync->data + 1); | ||
| 558 | OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG); | ||
| 559 | BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); | ||
| 560 | OUT_RING (chan, upper_32_bits(addr)); | ||
| 561 | OUT_RING (chan, lower_32_bits(addr)); | ||
| 562 | OUT_RING (chan, sync->data); | ||
| 563 | OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL); | ||
| 564 | } else | ||
| 565 | if (chan) { | ||
| 566 | u64 addr = nv84_fence_crtc(chan, head) + sync->addr; | ||
| 515 | ret = RING_SPACE(chan, 10); | 567 | ret = RING_SPACE(chan, 10); |
| 516 | if (ret) | 568 | if (ret) |
| 517 | return ret; | 569 | return ret; |
| 518 | 570 | ||
| 519 | if (nv_mclass(chan->object) < NV84_CHANNEL_IND_CLASS) { | 571 | BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); |
| 520 | BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2); | 572 | OUT_RING (chan, upper_32_bits(addr ^ 0x10)); |
| 521 | OUT_RING (chan, NvEvoSema0 + nv_crtc->index); | 573 | OUT_RING (chan, lower_32_bits(addr ^ 0x10)); |
| 522 | OUT_RING (chan, sync->sem.offset); | 574 | OUT_RING (chan, sync->data + 1); |
| 523 | BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1); | 575 | OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG | |
| 524 | OUT_RING (chan, 0xf00d0000 | sync->sem.value); | 576 | NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD); |
| 525 | BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_OFFSET, 2); | 577 | BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); |
| 526 | OUT_RING (chan, sync->sem.offset ^ 0x10); | 578 | OUT_RING (chan, upper_32_bits(addr)); |
| 527 | OUT_RING (chan, 0x74b1e000); | 579 | OUT_RING (chan, lower_32_bits(addr)); |
| 528 | BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1); | 580 | OUT_RING (chan, sync->data); |
| 529 | OUT_RING (chan, NvSema); | 581 | OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL | |
| 530 | } else | 582 | NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD); |
| 531 | if (nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) { | 583 | } |
| 532 | u64 offset = nv84_fence_crtc(chan, nv_crtc->index); | ||
| 533 | offset += sync->sem.offset; | ||
| 534 | |||
| 535 | BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); | ||
| 536 | OUT_RING (chan, upper_32_bits(offset)); | ||
| 537 | OUT_RING (chan, lower_32_bits(offset)); | ||
| 538 | OUT_RING (chan, 0xf00d0000 | sync->sem.value); | ||
| 539 | OUT_RING (chan, 0x00000002); | ||
| 540 | BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); | ||
| 541 | OUT_RING (chan, upper_32_bits(offset)); | ||
| 542 | OUT_RING (chan, lower_32_bits(offset ^ 0x10)); | ||
| 543 | OUT_RING (chan, 0x74b1e000); | ||
| 544 | OUT_RING (chan, 0x00000001); | ||
| 545 | } else { | ||
| 546 | u64 offset = nv84_fence_crtc(chan, nv_crtc->index); | ||
| 547 | offset += sync->sem.offset; | ||
| 548 | |||
| 549 | BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); | ||
| 550 | OUT_RING (chan, upper_32_bits(offset)); | ||
| 551 | OUT_RING (chan, lower_32_bits(offset)); | ||
| 552 | OUT_RING (chan, 0xf00d0000 | sync->sem.value); | ||
| 553 | OUT_RING (chan, 0x00001002); | ||
| 554 | BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); | ||
| 555 | OUT_RING (chan, upper_32_bits(offset)); | ||
| 556 | OUT_RING (chan, lower_32_bits(offset ^ 0x10)); | ||
| 557 | OUT_RING (chan, 0x74b1e000); | ||
| 558 | OUT_RING (chan, 0x00001001); | ||
| 559 | } | ||
| 560 | 584 | ||
| 585 | if (chan) { | ||
| 586 | sync->addr ^= 0x10; | ||
| 587 | sync->data++; | ||
| 561 | FIRE_RING (chan); | 588 | FIRE_RING (chan); |
| 562 | } else { | 589 | } else { |
| 563 | nouveau_bo_wr32(disp->sync, sync->sem.offset / 4, | ||
| 564 | 0xf00d0000 | sync->sem.value); | ||
| 565 | evo_sync(crtc->dev); | 590 | evo_sync(crtc->dev); |
| 566 | } | 591 | } |
| 567 | 592 | ||
| @@ -575,9 +600,9 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
| 575 | evo_data(push, 0x40000000); | 600 | evo_data(push, 0x40000000); |
| 576 | } | 601 | } |
| 577 | evo_mthd(push, 0x0088, 4); | 602 | evo_mthd(push, 0x0088, 4); |
| 578 | evo_data(push, sync->sem.offset); | 603 | evo_data(push, sync->addr); |
| 579 | evo_data(push, 0xf00d0000 | sync->sem.value); | 604 | evo_data(push, sync->data++); |
| 580 | evo_data(push, 0x74b1e000); | 605 | evo_data(push, sync->data); |
| 581 | evo_data(push, NvEvoSync); | 606 | evo_data(push, NvEvoSync); |
| 582 | evo_mthd(push, 0x00a0, 2); | 607 | evo_mthd(push, 0x00a0, 2); |
| 583 | evo_data(push, 0x00000000); | 608 | evo_data(push, 0x00000000); |
| @@ -605,9 +630,6 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
| 605 | evo_mthd(push, 0x0080, 1); | 630 | evo_mthd(push, 0x0080, 1); |
| 606 | evo_data(push, 0x00000000); | 631 | evo_data(push, 0x00000000); |
| 607 | evo_kick(push, sync); | 632 | evo_kick(push, sync); |
| 608 | |||
| 609 | sync->sem.offset ^= 0x10; | ||
| 610 | sync->sem.value++; | ||
| 611 | return 0; | 633 | return 0; |
| 612 | } | 634 | } |
| 613 | 635 | ||
| @@ -1379,7 +1401,8 @@ nv50_crtc_create(struct drm_device *dev, struct nouveau_object *core, int index) | |||
| 1379 | if (ret) | 1401 | if (ret) |
| 1380 | goto out; | 1402 | goto out; |
| 1381 | 1403 | ||
| 1382 | head->sync.sem.offset = EVO_SYNC(1 + index, 0x00); | 1404 | head->sync.addr = EVO_FLIP_SEM0(index); |
| 1405 | head->sync.data = 0x00000000; | ||
| 1383 | 1406 | ||
| 1384 | /* allocate overlay resources */ | 1407 | /* allocate overlay resources */ |
| 1385 | ret = nv50_pioc_create(disp->core, NV50_DISP_OIMM_CLASS, index, | 1408 | ret = nv50_pioc_create(disp->core, NV50_DISP_OIMM_CLASS, index, |
| @@ -2112,15 +2135,23 @@ nv50_display_fini(struct drm_device *dev) | |||
| 2112 | int | 2135 | int |
| 2113 | nv50_display_init(struct drm_device *dev) | 2136 | nv50_display_init(struct drm_device *dev) |
| 2114 | { | 2137 | { |
| 2115 | u32 *push = evo_wait(nv50_mast(dev), 32); | 2138 | struct nv50_disp *disp = nv50_disp(dev); |
| 2116 | if (push) { | 2139 | struct drm_crtc *crtc; |
| 2117 | evo_mthd(push, 0x0088, 1); | 2140 | u32 *push; |
| 2118 | evo_data(push, NvEvoSync); | 2141 | |
| 2119 | evo_kick(push, nv50_mast(dev)); | 2142 | push = evo_wait(nv50_mast(dev), 32); |
| 2120 | return 0; | 2143 | if (!push) |
| 2144 | return -EBUSY; | ||
| 2145 | |||
| 2146 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
| 2147 | struct nv50_sync *sync = nv50_sync(crtc); | ||
| 2148 | nouveau_bo_wr32(disp->sync, sync->addr / 4, sync->data); | ||
| 2121 | } | 2149 | } |
| 2122 | 2150 | ||
| 2123 | return -EBUSY; | 2151 | evo_mthd(push, 0x0088, 1); |
| 2152 | evo_data(push, NvEvoSync); | ||
| 2153 | evo_kick(push, nv50_mast(dev)); | ||
| 2154 | return 0; | ||
| 2124 | } | 2155 | } |
| 2125 | 2156 | ||
| 2126 | void | 2157 | void |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 3c38ea46531c..305a657bf215 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
| @@ -2438,6 +2438,12 @@ static u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev) | |||
| 2438 | if (tmp & L2_BUSY) | 2438 | if (tmp & L2_BUSY) |
| 2439 | reset_mask |= RADEON_RESET_VMC; | 2439 | reset_mask |= RADEON_RESET_VMC; |
| 2440 | 2440 | ||
| 2441 | /* Skip MC reset as it's mostly likely not hung, just busy */ | ||
| 2442 | if (reset_mask & RADEON_RESET_MC) { | ||
| 2443 | DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask); | ||
| 2444 | reset_mask &= ~RADEON_RESET_MC; | ||
| 2445 | } | ||
| 2446 | |||
| 2441 | return reset_mask; | 2447 | return reset_mask; |
| 2442 | } | 2448 | } |
| 2443 | 2449 | ||
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c index 99fb13286fd0..eb8ac315f92f 100644 --- a/drivers/gpu/drm/radeon/evergreen_cs.c +++ b/drivers/gpu/drm/radeon/evergreen_cs.c | |||
| @@ -834,7 +834,7 @@ static int evergreen_cs_track_validate_texture(struct radeon_cs_parser *p, | |||
| 834 | __func__, __LINE__, toffset, surf.base_align); | 834 | __func__, __LINE__, toffset, surf.base_align); |
| 835 | return -EINVAL; | 835 | return -EINVAL; |
| 836 | } | 836 | } |
| 837 | if (moffset & (surf.base_align - 1)) { | 837 | if (surf.nsamples <= 1 && moffset & (surf.base_align - 1)) { |
| 838 | dev_warn(p->dev, "%s:%d mipmap bo base %ld not aligned with %ld\n", | 838 | dev_warn(p->dev, "%s:%d mipmap bo base %ld not aligned with %ld\n", |
| 839 | __func__, __LINE__, moffset, surf.base_align); | 839 | __func__, __LINE__, moffset, surf.base_align); |
| 840 | return -EINVAL; | 840 | return -EINVAL; |
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 7cead763be9e..d4c633e12863 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c | |||
| @@ -1381,6 +1381,12 @@ static u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev) | |||
| 1381 | if (tmp & L2_BUSY) | 1381 | if (tmp & L2_BUSY) |
| 1382 | reset_mask |= RADEON_RESET_VMC; | 1382 | reset_mask |= RADEON_RESET_VMC; |
| 1383 | 1383 | ||
| 1384 | /* Skip MC reset as it's mostly likely not hung, just busy */ | ||
| 1385 | if (reset_mask & RADEON_RESET_MC) { | ||
| 1386 | DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask); | ||
| 1387 | reset_mask &= ~RADEON_RESET_MC; | ||
| 1388 | } | ||
| 1389 | |||
| 1384 | return reset_mask; | 1390 | return reset_mask; |
| 1385 | } | 1391 | } |
| 1386 | 1392 | ||
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 6d4b5611daf4..0740db3fcd22 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
| @@ -1394,6 +1394,12 @@ static u32 r600_gpu_check_soft_reset(struct radeon_device *rdev) | |||
| 1394 | if (r600_is_display_hung(rdev)) | 1394 | if (r600_is_display_hung(rdev)) |
| 1395 | reset_mask |= RADEON_RESET_DISPLAY; | 1395 | reset_mask |= RADEON_RESET_DISPLAY; |
| 1396 | 1396 | ||
| 1397 | /* Skip MC reset as it's mostly likely not hung, just busy */ | ||
| 1398 | if (reset_mask & RADEON_RESET_MC) { | ||
| 1399 | DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask); | ||
| 1400 | reset_mask &= ~RADEON_RESET_MC; | ||
| 1401 | } | ||
| 1402 | |||
| 1397 | return reset_mask; | 1403 | return reset_mask; |
| 1398 | } | 1404 | } |
| 1399 | 1405 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 3e403bdda58f..78edadc9e86b 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c | |||
| @@ -970,6 +970,15 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct | |||
| 970 | found = 1; | 970 | found = 1; |
| 971 | } | 971 | } |
| 972 | 972 | ||
| 973 | /* quirks */ | ||
| 974 | /* Radeon 9100 (R200) */ | ||
| 975 | if ((dev->pdev->device == 0x514D) && | ||
| 976 | (dev->pdev->subsystem_vendor == 0x174B) && | ||
| 977 | (dev->pdev->subsystem_device == 0x7149)) { | ||
| 978 | /* vbios value is bad, use the default */ | ||
| 979 | found = 0; | ||
| 980 | } | ||
| 981 | |||
| 973 | if (!found) /* fallback to defaults */ | 982 | if (!found) /* fallback to defaults */ |
| 974 | radeon_legacy_get_primary_dac_info_from_table(rdev, p_dac); | 983 | radeon_legacy_get_primary_dac_info_from_table(rdev, p_dac); |
| 975 | 984 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 167758488ed6..66a7f0fd9620 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c | |||
| @@ -70,9 +70,10 @@ | |||
| 70 | * 2.27.0 - r600-SI: Add CS ioctl support for async DMA | 70 | * 2.27.0 - r600-SI: Add CS ioctl support for async DMA |
| 71 | * 2.28.0 - r600-eg: Add MEM_WRITE packet support | 71 | * 2.28.0 - r600-eg: Add MEM_WRITE packet support |
| 72 | * 2.29.0 - R500 FP16 color clear registers | 72 | * 2.29.0 - R500 FP16 color clear registers |
| 73 | * 2.30.0 - fix for FMASK texturing | ||
| 73 | */ | 74 | */ |
| 74 | #define KMS_DRIVER_MAJOR 2 | 75 | #define KMS_DRIVER_MAJOR 2 |
| 75 | #define KMS_DRIVER_MINOR 29 | 76 | #define KMS_DRIVER_MINOR 30 |
| 76 | #define KMS_DRIVER_PATCHLEVEL 0 | 77 | #define KMS_DRIVER_PATCHLEVEL 0 |
| 77 | int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); | 78 | int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); |
| 78 | int radeon_driver_unload_kms(struct drm_device *dev); | 79 | int radeon_driver_unload_kms(struct drm_device *dev); |
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index 90374dd77960..48f80cd42d8f 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c | |||
| @@ -400,6 +400,9 @@ void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block) | |||
| 400 | { | 400 | { |
| 401 | unsigned long irqflags; | 401 | unsigned long irqflags; |
| 402 | 402 | ||
| 403 | if (!rdev->ddev->irq_enabled) | ||
| 404 | return; | ||
| 405 | |||
| 403 | spin_lock_irqsave(&rdev->irq.lock, irqflags); | 406 | spin_lock_irqsave(&rdev->irq.lock, irqflags); |
| 404 | rdev->irq.afmt[block] = true; | 407 | rdev->irq.afmt[block] = true; |
| 405 | radeon_irq_set(rdev); | 408 | radeon_irq_set(rdev); |
| @@ -419,6 +422,9 @@ void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block) | |||
| 419 | { | 422 | { |
| 420 | unsigned long irqflags; | 423 | unsigned long irqflags; |
| 421 | 424 | ||
| 425 | if (!rdev->ddev->irq_enabled) | ||
| 426 | return; | ||
| 427 | |||
| 422 | spin_lock_irqsave(&rdev->irq.lock, irqflags); | 428 | spin_lock_irqsave(&rdev->irq.lock, irqflags); |
| 423 | rdev->irq.afmt[block] = false; | 429 | rdev->irq.afmt[block] = false; |
| 424 | radeon_irq_set(rdev); | 430 | radeon_irq_set(rdev); |
| @@ -438,6 +444,9 @@ void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask) | |||
| 438 | unsigned long irqflags; | 444 | unsigned long irqflags; |
| 439 | int i; | 445 | int i; |
| 440 | 446 | ||
| 447 | if (!rdev->ddev->irq_enabled) | ||
| 448 | return; | ||
| 449 | |||
| 441 | spin_lock_irqsave(&rdev->irq.lock, irqflags); | 450 | spin_lock_irqsave(&rdev->irq.lock, irqflags); |
| 442 | for (i = 0; i < RADEON_MAX_HPD_PINS; ++i) | 451 | for (i = 0; i < RADEON_MAX_HPD_PINS; ++i) |
| 443 | rdev->irq.hpd[i] |= !!(hpd_mask & (1 << i)); | 452 | rdev->irq.hpd[i] |= !!(hpd_mask & (1 << i)); |
| @@ -458,6 +467,9 @@ void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask) | |||
| 458 | unsigned long irqflags; | 467 | unsigned long irqflags; |
| 459 | int i; | 468 | int i; |
| 460 | 469 | ||
| 470 | if (!rdev->ddev->irq_enabled) | ||
| 471 | return; | ||
| 472 | |||
| 461 | spin_lock_irqsave(&rdev->irq.lock, irqflags); | 473 | spin_lock_irqsave(&rdev->irq.lock, irqflags); |
| 462 | for (i = 0; i < RADEON_MAX_HPD_PINS; ++i) | 474 | for (i = 0; i < RADEON_MAX_HPD_PINS; ++i) |
| 463 | rdev->irq.hpd[i] &= !(hpd_mask & (1 << i)); | 475 | rdev->irq.hpd[i] &= !(hpd_mask & (1 << i)); |
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 80979ed951eb..9128120da044 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c | |||
| @@ -2284,6 +2284,12 @@ static u32 si_gpu_check_soft_reset(struct radeon_device *rdev) | |||
| 2284 | if (tmp & L2_BUSY) | 2284 | if (tmp & L2_BUSY) |
| 2285 | reset_mask |= RADEON_RESET_VMC; | 2285 | reset_mask |= RADEON_RESET_VMC; |
| 2286 | 2286 | ||
| 2287 | /* Skip MC reset as it's mostly likely not hung, just busy */ | ||
| 2288 | if (reset_mask & RADEON_RESET_MC) { | ||
| 2289 | DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask); | ||
| 2290 | reset_mask &= ~RADEON_RESET_MC; | ||
| 2291 | } | ||
| 2292 | |||
| 2287 | return reset_mask; | 2293 | return reset_mask; |
| 2288 | } | 2294 | } |
| 2289 | 2295 | ||
diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig index c92955df0658..be1daf7344d3 100644 --- a/drivers/gpu/drm/tegra/Kconfig +++ b/drivers/gpu/drm/tegra/Kconfig | |||
| @@ -4,7 +4,6 @@ config DRM_TEGRA | |||
| 4 | select DRM_KMS_HELPER | 4 | select DRM_KMS_HELPER |
| 5 | select DRM_GEM_CMA_HELPER | 5 | select DRM_GEM_CMA_HELPER |
| 6 | select DRM_KMS_CMA_HELPER | 6 | select DRM_KMS_CMA_HELPER |
| 7 | select DRM_HDMI | ||
| 8 | select FB_CFB_FILLRECT | 7 | select FB_CFB_FILLRECT |
| 9 | select FB_CFB_COPYAREA | 8 | select FB_CFB_COPYAREA |
| 10 | select FB_CFB_IMAGEBLIT | 9 | select FB_CFB_IMAGEBLIT |
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 8839b3a24660..e3e0d651c6ca 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h | |||
| @@ -443,12 +443,12 @@ struct drm_crtc { | |||
| 443 | * @dpms: set power state (see drm_crtc_funcs above) | 443 | * @dpms: set power state (see drm_crtc_funcs above) |
| 444 | * @save: save connector state | 444 | * @save: save connector state |
| 445 | * @restore: restore connector state | 445 | * @restore: restore connector state |
| 446 | * @reset: reset connector after state has been invalidate (e.g. resume) | 446 | * @reset: reset connector after state has been invalidated (e.g. resume) |
| 447 | * @detect: is this connector active? | 447 | * @detect: is this connector active? |
| 448 | * @fill_modes: fill mode list for this connector | 448 | * @fill_modes: fill mode list for this connector |
| 449 | * @set_property: property for this connector may need update | 449 | * @set_property: property for this connector may need an update |
| 450 | * @destroy: make object go away | 450 | * @destroy: make object go away |
| 451 | * @force: notify the driver the connector is forced on | 451 | * @force: notify the driver that the connector is forced on |
| 452 | * | 452 | * |
| 453 | * Each CRTC may have one or more connectors attached to it. The functions | 453 | * Each CRTC may have one or more connectors attached to it. The functions |
| 454 | * below allow the core DRM code to control connectors, enumerate available modes, | 454 | * below allow the core DRM code to control connectors, enumerate available modes, |
