diff options
25 files changed, 792 insertions, 639 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 5f467845ef72..95c6bcb6bf22 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -3584,6 +3584,7 @@ M: Daniel Vetter <daniel.vetter@intel.com> | |||
3584 | M: Jani Nikula <jani.nikula@linux.intel.com> | 3584 | M: Jani Nikula <jani.nikula@linux.intel.com> |
3585 | L: intel-gfx@lists.freedesktop.org | 3585 | L: intel-gfx@lists.freedesktop.org |
3586 | L: dri-devel@lists.freedesktop.org | 3586 | L: dri-devel@lists.freedesktop.org |
3587 | W: https://01.org/linuxgraphics/ | ||
3587 | Q: http://patchwork.freedesktop.org/project/intel-gfx/ | 3588 | Q: http://patchwork.freedesktop.org/project/intel-gfx/ |
3588 | T: git git://anongit.freedesktop.org/drm-intel | 3589 | T: git git://anongit.freedesktop.org/drm-intel |
3589 | S: Supported | 3590 | S: Supported |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 3f2a7a7c7cd4..a3b22bdacd44 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -1850,7 +1850,7 @@ static int i915_opregion(struct seq_file *m, void *unused) | |||
1850 | goto out; | 1850 | goto out; |
1851 | 1851 | ||
1852 | if (opregion->header) { | 1852 | if (opregion->header) { |
1853 | memcpy_fromio(data, opregion->header, OPREGION_SIZE); | 1853 | memcpy(data, opregion->header, OPREGION_SIZE); |
1854 | seq_write(m, data, OPREGION_SIZE); | 1854 | seq_write(m, data, OPREGION_SIZE); |
1855 | } | 1855 | } |
1856 | 1856 | ||
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 5adba06a85d1..8afda459a26e 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -450,14 +450,14 @@ struct opregion_swsci; | |||
450 | struct opregion_asle; | 450 | struct opregion_asle; |
451 | 451 | ||
452 | struct intel_opregion { | 452 | struct intel_opregion { |
453 | struct opregion_header __iomem *header; | 453 | struct opregion_header *header; |
454 | struct opregion_acpi __iomem *acpi; | 454 | struct opregion_acpi *acpi; |
455 | struct opregion_swsci __iomem *swsci; | 455 | struct opregion_swsci *swsci; |
456 | u32 swsci_gbda_sub_functions; | 456 | u32 swsci_gbda_sub_functions; |
457 | u32 swsci_sbcb_sub_functions; | 457 | u32 swsci_sbcb_sub_functions; |
458 | struct opregion_asle __iomem *asle; | 458 | struct opregion_asle *asle; |
459 | void __iomem *vbt; | 459 | void *vbt; |
460 | u32 __iomem *lid_state; | 460 | u32 *lid_state; |
461 | struct work_struct asle_work; | 461 | struct work_struct asle_work; |
462 | }; | 462 | }; |
463 | #define OPREGION_SIZE (8*1024) | 463 | #define OPREGION_SIZE (8*1024) |
@@ -628,6 +628,10 @@ struct drm_i915_display_funcs { | |||
628 | struct dpll *match_clock, | 628 | struct dpll *match_clock, |
629 | struct dpll *best_clock); | 629 | struct dpll *best_clock); |
630 | void (*update_wm)(struct drm_crtc *crtc); | 630 | void (*update_wm)(struct drm_crtc *crtc); |
631 | void (*update_sprite_wm)(struct drm_plane *plane, | ||
632 | struct drm_crtc *crtc, | ||
633 | uint32_t sprite_width, uint32_t sprite_height, | ||
634 | int pixel_size, bool enable, bool scaled); | ||
631 | int (*modeset_calc_cdclk)(struct drm_atomic_state *state); | 635 | int (*modeset_calc_cdclk)(struct drm_atomic_state *state); |
632 | void (*modeset_commit_cdclk)(struct drm_atomic_state *state); | 636 | void (*modeset_commit_cdclk)(struct drm_atomic_state *state); |
633 | /* Returns the active state of the crtc, and if the crtc is active, | 637 | /* Returns the active state of the crtc, and if the crtc is active, |
@@ -1031,7 +1035,7 @@ struct i915_suspend_saved_registers { | |||
1031 | u32 saveMI_ARB_STATE; | 1035 | u32 saveMI_ARB_STATE; |
1032 | u32 saveSWF0[16]; | 1036 | u32 saveSWF0[16]; |
1033 | u32 saveSWF1[16]; | 1037 | u32 saveSWF1[16]; |
1034 | u32 saveSWF2[3]; | 1038 | u32 saveSWF3[3]; |
1035 | uint64_t saveFENCE[I915_MAX_NUM_FENCES]; | 1039 | uint64_t saveFENCE[I915_MAX_NUM_FENCES]; |
1036 | u32 savePCH_PORT_HOTPLUG; | 1040 | u32 savePCH_PORT_HOTPLUG; |
1037 | u16 saveGCDGMBUS; | 1041 | u16 saveGCDGMBUS; |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 1e67484fd5dc..e57061ac0219 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -3657,53 +3657,106 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) | |||
3657 | return 0; | 3657 | return 0; |
3658 | } | 3658 | } |
3659 | 3659 | ||
3660 | /** | ||
3661 | * Changes the cache-level of an object across all VMA. | ||
3662 | * | ||
3663 | * After this function returns, the object will be in the new cache-level | ||
3664 | * across all GTT and the contents of the backing storage will be coherent, | ||
3665 | * with respect to the new cache-level. In order to keep the backing storage | ||
3666 | * coherent for all users, we only allow a single cache level to be set | ||
3667 | * globally on the object and prevent it from being changed whilst the | ||
3668 | * hardware is reading from the object. That is if the object is currently | ||
3669 | * on the scanout it will be set to uncached (or equivalent display | ||
3670 | * cache coherency) and all non-MOCS GPU access will also be uncached so | ||
3671 | * that all direct access to the scanout remains coherent. | ||
3672 | */ | ||
3660 | int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, | 3673 | int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, |
3661 | enum i915_cache_level cache_level) | 3674 | enum i915_cache_level cache_level) |
3662 | { | 3675 | { |
3663 | struct drm_device *dev = obj->base.dev; | 3676 | struct drm_device *dev = obj->base.dev; |
3664 | struct i915_vma *vma, *next; | 3677 | struct i915_vma *vma, *next; |
3678 | bool bound = false; | ||
3665 | int ret = 0; | 3679 | int ret = 0; |
3666 | 3680 | ||
3667 | if (obj->cache_level == cache_level) | 3681 | if (obj->cache_level == cache_level) |
3668 | goto out; | 3682 | goto out; |
3669 | 3683 | ||
3670 | if (i915_gem_obj_is_pinned(obj)) { | 3684 | /* Inspect the list of currently bound VMA and unbind any that would |
3671 | DRM_DEBUG("can not change the cache level of pinned objects\n"); | 3685 | * be invalid given the new cache-level. This is principally to |
3672 | return -EBUSY; | 3686 | * catch the issue of the CS prefetch crossing page boundaries and |
3673 | } | 3687 | * reading an invalid PTE on older architectures. |
3674 | 3688 | */ | |
3675 | list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) { | 3689 | list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) { |
3690 | if (!drm_mm_node_allocated(&vma->node)) | ||
3691 | continue; | ||
3692 | |||
3693 | if (vma->pin_count) { | ||
3694 | DRM_DEBUG("can not change the cache level of pinned objects\n"); | ||
3695 | return -EBUSY; | ||
3696 | } | ||
3697 | |||
3676 | if (!i915_gem_valid_gtt_space(vma, cache_level)) { | 3698 | if (!i915_gem_valid_gtt_space(vma, cache_level)) { |
3677 | ret = i915_vma_unbind(vma); | 3699 | ret = i915_vma_unbind(vma); |
3678 | if (ret) | 3700 | if (ret) |
3679 | return ret; | 3701 | return ret; |
3680 | } | 3702 | } else |
3703 | bound = true; | ||
3681 | } | 3704 | } |
3682 | 3705 | ||
3683 | if (i915_gem_obj_bound_any(obj)) { | 3706 | /* We can reuse the existing drm_mm nodes but need to change the |
3707 | * cache-level on the PTE. We could simply unbind them all and | ||
3708 | * rebind with the correct cache-level on next use. However since | ||
3709 | * we already have a valid slot, dma mapping, pages etc, we may as | ||
3710 | * rewrite the PTE in the belief that doing so tramples upon less | ||
3711 | * state and so involves less work. | ||
3712 | */ | ||
3713 | if (bound) { | ||
3714 | /* Before we change the PTE, the GPU must not be accessing it. | ||
3715 | * If we wait upon the object, we know that all the bound | ||
3716 | * VMA are no longer active. | ||
3717 | */ | ||
3684 | ret = i915_gem_object_wait_rendering(obj, false); | 3718 | ret = i915_gem_object_wait_rendering(obj, false); |
3685 | if (ret) | 3719 | if (ret) |
3686 | return ret; | 3720 | return ret; |
3687 | 3721 | ||
3688 | i915_gem_object_finish_gtt(obj); | 3722 | if (!HAS_LLC(dev) && cache_level != I915_CACHE_NONE) { |
3689 | 3723 | /* Access to snoopable pages through the GTT is | |
3690 | /* Before SandyBridge, you could not use tiling or fence | 3724 | * incoherent and on some machines causes a hard |
3691 | * registers with snooped memory, so relinquish any fences | 3725 | * lockup. Relinquish the CPU mmaping to force |
3692 | * currently pointing to our region in the aperture. | 3726 | * userspace to refault in the pages and we can |
3693 | */ | 3727 | * then double check if the GTT mapping is still |
3694 | if (INTEL_INFO(dev)->gen < 6) { | 3728 | * valid for that pointer access. |
3729 | */ | ||
3730 | i915_gem_release_mmap(obj); | ||
3731 | |||
3732 | /* As we no longer need a fence for GTT access, | ||
3733 | * we can relinquish it now (and so prevent having | ||
3734 | * to steal a fence from someone else on the next | ||
3735 | * fence request). Note GPU activity would have | ||
3736 | * dropped the fence as all snoopable access is | ||
3737 | * supposed to be linear. | ||
3738 | */ | ||
3695 | ret = i915_gem_object_put_fence(obj); | 3739 | ret = i915_gem_object_put_fence(obj); |
3696 | if (ret) | 3740 | if (ret) |
3697 | return ret; | 3741 | return ret; |
3742 | } else { | ||
3743 | /* We either have incoherent backing store and | ||
3744 | * so no GTT access or the architecture is fully | ||
3745 | * coherent. In such cases, existing GTT mmaps | ||
3746 | * ignore the cache bit in the PTE and we can | ||
3747 | * rewrite it without confusing the GPU or having | ||
3748 | * to force userspace to fault back in its mmaps. | ||
3749 | */ | ||
3698 | } | 3750 | } |
3699 | 3751 | ||
3700 | list_for_each_entry(vma, &obj->vma_list, vma_link) | 3752 | list_for_each_entry(vma, &obj->vma_list, vma_link) { |
3701 | if (drm_mm_node_allocated(&vma->node)) { | 3753 | if (!drm_mm_node_allocated(&vma->node)) |
3702 | ret = i915_vma_bind(vma, cache_level, | 3754 | continue; |
3703 | PIN_UPDATE); | 3755 | |
3704 | if (ret) | 3756 | ret = i915_vma_bind(vma, cache_level, PIN_UPDATE); |
3705 | return ret; | 3757 | if (ret) |
3706 | } | 3758 | return ret; |
3759 | } | ||
3707 | } | 3760 | } |
3708 | 3761 | ||
3709 | list_for_each_entry(vma, &obj->vma_list, vma_link) | 3762 | list_for_each_entry(vma, &obj->vma_list, vma_link) |
@@ -3711,6 +3764,10 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, | |||
3711 | obj->cache_level = cache_level; | 3764 | obj->cache_level = cache_level; |
3712 | 3765 | ||
3713 | out: | 3766 | out: |
3767 | /* Flush the dirty CPU caches to the backing storage so that the | ||
3768 | * object is now coherent at its new cache level (with respect | ||
3769 | * to the access domain). | ||
3770 | */ | ||
3714 | if (obj->cache_dirty && | 3771 | if (obj->cache_dirty && |
3715 | obj->base.write_domain != I915_GEM_DOMAIN_CPU && | 3772 | obj->base.write_domain != I915_GEM_DOMAIN_CPU && |
3716 | cpu_write_needs_clflush(obj)) { | 3773 | cpu_write_needs_clflush(obj)) { |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 620d57e2526b..43f35d12b677 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
@@ -2502,6 +2502,36 @@ static int ggtt_bind_vma(struct i915_vma *vma, | |||
2502 | enum i915_cache_level cache_level, | 2502 | enum i915_cache_level cache_level, |
2503 | u32 flags) | 2503 | u32 flags) |
2504 | { | 2504 | { |
2505 | struct drm_i915_gem_object *obj = vma->obj; | ||
2506 | u32 pte_flags = 0; | ||
2507 | int ret; | ||
2508 | |||
2509 | ret = i915_get_ggtt_vma_pages(vma); | ||
2510 | if (ret) | ||
2511 | return ret; | ||
2512 | |||
2513 | /* Currently applicable only to VLV */ | ||
2514 | if (obj->gt_ro) | ||
2515 | pte_flags |= PTE_READ_ONLY; | ||
2516 | |||
2517 | vma->vm->insert_entries(vma->vm, vma->ggtt_view.pages, | ||
2518 | vma->node.start, | ||
2519 | cache_level, pte_flags); | ||
2520 | |||
2521 | /* | ||
2522 | * Without aliasing PPGTT there's no difference between | ||
2523 | * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally | ||
2524 | * upgrade to both bound if we bind either to avoid double-binding. | ||
2525 | */ | ||
2526 | vma->bound |= GLOBAL_BIND | LOCAL_BIND; | ||
2527 | |||
2528 | return 0; | ||
2529 | } | ||
2530 | |||
2531 | static int aliasing_gtt_bind_vma(struct i915_vma *vma, | ||
2532 | enum i915_cache_level cache_level, | ||
2533 | u32 flags) | ||
2534 | { | ||
2505 | struct drm_device *dev = vma->vm->dev; | 2535 | struct drm_device *dev = vma->vm->dev; |
2506 | struct drm_i915_private *dev_priv = dev->dev_private; | 2536 | struct drm_i915_private *dev_priv = dev->dev_private; |
2507 | struct drm_i915_gem_object *obj = vma->obj; | 2537 | struct drm_i915_gem_object *obj = vma->obj; |
@@ -2519,23 +2549,13 @@ static int ggtt_bind_vma(struct i915_vma *vma, | |||
2519 | pte_flags |= PTE_READ_ONLY; | 2549 | pte_flags |= PTE_READ_ONLY; |
2520 | 2550 | ||
2521 | 2551 | ||
2522 | if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) { | 2552 | if (flags & GLOBAL_BIND) { |
2523 | vma->vm->insert_entries(vma->vm, pages, | 2553 | vma->vm->insert_entries(vma->vm, pages, |
2524 | vma->node.start, | 2554 | vma->node.start, |
2525 | cache_level, pte_flags); | 2555 | cache_level, pte_flags); |
2526 | |||
2527 | /* Note the inconsistency here is due to absence of the | ||
2528 | * aliasing ppgtt on gen4 and earlier. Though we always | ||
2529 | * request PIN_USER for execbuffer (translated to LOCAL_BIND), | ||
2530 | * without the appgtt, we cannot honour that request and so | ||
2531 | * must substitute it with a global binding. Since we do this | ||
2532 | * behind the upper layers back, we need to explicitly set | ||
2533 | * the bound flag ourselves. | ||
2534 | */ | ||
2535 | vma->bound |= GLOBAL_BIND; | ||
2536 | } | 2556 | } |
2537 | 2557 | ||
2538 | if (dev_priv->mm.aliasing_ppgtt && flags & LOCAL_BIND) { | 2558 | if (flags & LOCAL_BIND) { |
2539 | struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt; | 2559 | struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt; |
2540 | appgtt->base.insert_entries(&appgtt->base, pages, | 2560 | appgtt->base.insert_entries(&appgtt->base, pages, |
2541 | vma->node.start, | 2561 | vma->node.start, |
@@ -2699,6 +2719,8 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev, | |||
2699 | true); | 2719 | true); |
2700 | 2720 | ||
2701 | dev_priv->mm.aliasing_ppgtt = ppgtt; | 2721 | dev_priv->mm.aliasing_ppgtt = ppgtt; |
2722 | WARN_ON(dev_priv->gtt.base.bind_vma != ggtt_bind_vma); | ||
2723 | dev_priv->gtt.base.bind_vma = aliasing_gtt_bind_vma; | ||
2702 | } | 2724 | } |
2703 | 2725 | ||
2704 | return 0; | 2726 | return 0; |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 4fb8a2f56281..d68328fa175b 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -139,27 +139,30 @@ static const u32 hpd_bxt[HPD_NUM_PINS] = { | |||
139 | /* | 139 | /* |
140 | * We should clear IMR at preinstall/uninstall, and just check at postinstall. | 140 | * We should clear IMR at preinstall/uninstall, and just check at postinstall. |
141 | */ | 141 | */ |
142 | #define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \ | 142 | static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv, u32 reg) |
143 | u32 val = I915_READ(reg); \ | 143 | { |
144 | if (val) { \ | 144 | u32 val = I915_READ(reg); |
145 | WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \ | 145 | |
146 | (reg), val); \ | 146 | if (val == 0) |
147 | I915_WRITE((reg), 0xffffffff); \ | 147 | return; |
148 | POSTING_READ(reg); \ | 148 | |
149 | I915_WRITE((reg), 0xffffffff); \ | 149 | WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", |
150 | POSTING_READ(reg); \ | 150 | reg, val); |
151 | } \ | 151 | I915_WRITE(reg, 0xffffffff); |
152 | } while (0) | 152 | POSTING_READ(reg); |
153 | I915_WRITE(reg, 0xffffffff); | ||
154 | POSTING_READ(reg); | ||
155 | } | ||
153 | 156 | ||
154 | #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \ | 157 | #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \ |
155 | GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \ | 158 | gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \ |
156 | I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \ | 159 | I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \ |
157 | I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \ | 160 | I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \ |
158 | POSTING_READ(GEN8_##type##_IMR(which)); \ | 161 | POSTING_READ(GEN8_##type##_IMR(which)); \ |
159 | } while (0) | 162 | } while (0) |
160 | 163 | ||
161 | #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \ | 164 | #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \ |
162 | GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \ | 165 | gen5_assert_iir_is_zero(dev_priv, type##IIR); \ |
163 | I915_WRITE(type##IER, (ier_val)); \ | 166 | I915_WRITE(type##IER, (ier_val)); \ |
164 | I915_WRITE(type##IMR, (imr_val)); \ | 167 | I915_WRITE(type##IMR, (imr_val)); \ |
165 | POSTING_READ(type##IMR); \ | 168 | POSTING_READ(type##IMR); \ |
@@ -707,12 +710,11 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe) | |||
707 | return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; | 710 | return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; |
708 | } | 711 | } |
709 | 712 | ||
710 | static u32 gm45_get_vblank_counter(struct drm_device *dev, unsigned int pipe) | 713 | static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe) |
711 | { | 714 | { |
712 | struct drm_i915_private *dev_priv = dev->dev_private; | 715 | struct drm_i915_private *dev_priv = dev->dev_private; |
713 | int reg = PIPE_FRMCOUNT_GM45(pipe); | ||
714 | 716 | ||
715 | return I915_READ(reg); | 717 | return I915_READ(PIPE_FRMCOUNT_G4X(pipe)); |
716 | } | 718 | } |
717 | 719 | ||
718 | /* raw reads, only for fast reads of display block, no need for forcewake etc. */ | 720 | /* raw reads, only for fast reads of display block, no need for forcewake etc. */ |
@@ -3365,7 +3367,7 @@ static void ibx_irq_postinstall(struct drm_device *dev) | |||
3365 | else | 3367 | else |
3366 | mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; | 3368 | mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; |
3367 | 3369 | ||
3368 | GEN5_ASSERT_IIR_IS_ZERO(SDEIIR); | 3370 | gen5_assert_iir_is_zero(dev_priv, SDEIIR); |
3369 | I915_WRITE(SDEIMR, ~mask); | 3371 | I915_WRITE(SDEIMR, ~mask); |
3370 | } | 3372 | } |
3371 | 3373 | ||
@@ -4397,7 +4399,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv) | |||
4397 | dev->driver->get_vblank_counter = i8xx_get_vblank_counter; | 4399 | dev->driver->get_vblank_counter = i8xx_get_vblank_counter; |
4398 | } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) { | 4400 | } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) { |
4399 | dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ | 4401 | dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ |
4400 | dev->driver->get_vblank_counter = gm45_get_vblank_counter; | 4402 | dev->driver->get_vblank_counter = g4x_get_vblank_counter; |
4401 | } else { | 4403 | } else { |
4402 | dev->driver->get_vblank_counter = i915_get_vblank_counter; | 4404 | dev->driver->get_vblank_counter = i915_get_vblank_counter; |
4403 | dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ | 4405 | dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 6be853d2233c..bc7b8faba84d 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -429,7 +429,7 @@ | |||
429 | #define ASYNC_FLIP (1<<22) | 429 | #define ASYNC_FLIP (1<<22) |
430 | #define DISPLAY_PLANE_A (0<<20) | 430 | #define DISPLAY_PLANE_A (0<<20) |
431 | #define DISPLAY_PLANE_B (1<<20) | 431 | #define DISPLAY_PLANE_B (1<<20) |
432 | #define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|(len-2)) | 432 | #define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|((len)-2)) |
433 | #define PIPE_CONTROL_FLUSH_L3 (1<<27) | 433 | #define PIPE_CONTROL_FLUSH_L3 (1<<27) |
434 | #define PIPE_CONTROL_GLOBAL_GTT_IVB (1<<24) /* gen7+ */ | 434 | #define PIPE_CONTROL_GLOBAL_GTT_IVB (1<<24) /* gen7+ */ |
435 | #define PIPE_CONTROL_MMIO_WRITE (1<<23) | 435 | #define PIPE_CONTROL_MMIO_WRITE (1<<23) |
@@ -1255,7 +1255,7 @@ enum skl_disp_power_wells { | |||
1255 | #define PORT_PLL_DCO_AMP_OVR_EN_H (1<<27) | 1255 | #define PORT_PLL_DCO_AMP_OVR_EN_H (1<<27) |
1256 | #define PORT_PLL_DCO_AMP_DEFAULT 15 | 1256 | #define PORT_PLL_DCO_AMP_DEFAULT 15 |
1257 | #define PORT_PLL_DCO_AMP_MASK 0x3c00 | 1257 | #define PORT_PLL_DCO_AMP_MASK 0x3c00 |
1258 | #define PORT_PLL_DCO_AMP(x) (x<<10) | 1258 | #define PORT_PLL_DCO_AMP(x) ((x)<<10) |
1259 | #define _PORT_PLL_BASE(port) _PORT3(port, _PORT_PLL_0_A, \ | 1259 | #define _PORT_PLL_BASE(port) _PORT3(port, _PORT_PLL_0_A, \ |
1260 | _PORT_PLL_0_B, \ | 1260 | _PORT_PLL_0_B, \ |
1261 | _PORT_PLL_0_C) | 1261 | _PORT_PLL_0_C) |
@@ -1552,8 +1552,8 @@ enum skl_disp_power_wells { | |||
1552 | #define RENDER_HWS_PGA_GEN7 (0x04080) | 1552 | #define RENDER_HWS_PGA_GEN7 (0x04080) |
1553 | #define RING_FAULT_REG(ring) (0x4094 + 0x100*(ring)->id) | 1553 | #define RING_FAULT_REG(ring) (0x4094 + 0x100*(ring)->id) |
1554 | #define RING_FAULT_GTTSEL_MASK (1<<11) | 1554 | #define RING_FAULT_GTTSEL_MASK (1<<11) |
1555 | #define RING_FAULT_SRCID(x) ((x >> 3) & 0xff) | 1555 | #define RING_FAULT_SRCID(x) (((x) >> 3) & 0xff) |
1556 | #define RING_FAULT_FAULT_TYPE(x) ((x >> 1) & 0x3) | 1556 | #define RING_FAULT_FAULT_TYPE(x) (((x) >> 1) & 0x3) |
1557 | #define RING_FAULT_VALID (1<<0) | 1557 | #define RING_FAULT_VALID (1<<0) |
1558 | #define DONE_REG 0x40b0 | 1558 | #define DONE_REG 0x40b0 |
1559 | #define GEN8_PRIVATE_PAT_LO 0x40e0 | 1559 | #define GEN8_PRIVATE_PAT_LO 0x40e0 |
@@ -1641,9 +1641,9 @@ enum skl_disp_power_wells { | |||
1641 | #define ERR_INT_PIPE_CRC_DONE_B (1<<5) | 1641 | #define ERR_INT_PIPE_CRC_DONE_B (1<<5) |
1642 | #define ERR_INT_FIFO_UNDERRUN_B (1<<3) | 1642 | #define ERR_INT_FIFO_UNDERRUN_B (1<<3) |
1643 | #define ERR_INT_PIPE_CRC_DONE_A (1<<2) | 1643 | #define ERR_INT_PIPE_CRC_DONE_A (1<<2) |
1644 | #define ERR_INT_PIPE_CRC_DONE(pipe) (1<<(2 + pipe*3)) | 1644 | #define ERR_INT_PIPE_CRC_DONE(pipe) (1<<(2 + (pipe)*3)) |
1645 | #define ERR_INT_FIFO_UNDERRUN_A (1<<0) | 1645 | #define ERR_INT_FIFO_UNDERRUN_A (1<<0) |
1646 | #define ERR_INT_FIFO_UNDERRUN(pipe) (1<<(pipe*3)) | 1646 | #define ERR_INT_FIFO_UNDERRUN(pipe) (1<<((pipe)*3)) |
1647 | 1647 | ||
1648 | #define GEN8_FAULT_TLB_DATA0 0x04b10 | 1648 | #define GEN8_FAULT_TLB_DATA0 0x04b10 |
1649 | #define GEN8_FAULT_TLB_DATA1 0x04b14 | 1649 | #define GEN8_FAULT_TLB_DATA1 0x04b14 |
@@ -1704,8 +1704,8 @@ enum skl_disp_power_wells { | |||
1704 | #define GEN6_WIZ_HASHING_16x4 GEN6_WIZ_HASHING(1, 0) | 1704 | #define GEN6_WIZ_HASHING_16x4 GEN6_WIZ_HASHING(1, 0) |
1705 | #define GEN6_WIZ_HASHING_MASK GEN6_WIZ_HASHING(1, 1) | 1705 | #define GEN6_WIZ_HASHING_MASK GEN6_WIZ_HASHING(1, 1) |
1706 | #define GEN6_TD_FOUR_ROW_DISPATCH_DISABLE (1 << 5) | 1706 | #define GEN6_TD_FOUR_ROW_DISPATCH_DISABLE (1 << 5) |
1707 | #define GEN9_IZ_HASHING_MASK(slice) (0x3 << (slice * 2)) | 1707 | #define GEN9_IZ_HASHING_MASK(slice) (0x3 << ((slice) * 2)) |
1708 | #define GEN9_IZ_HASHING(slice, val) ((val) << (slice * 2)) | 1708 | #define GEN9_IZ_HASHING(slice, val) ((val) << ((slice) * 2)) |
1709 | 1709 | ||
1710 | #define GFX_MODE 0x02520 | 1710 | #define GFX_MODE 0x02520 |
1711 | #define GFX_MODE_GEN7 0x0229c | 1711 | #define GFX_MODE_GEN7 0x0229c |
@@ -2144,7 +2144,7 @@ enum skl_disp_power_wells { | |||
2144 | # define GPIO_DATA_VAL_IN (1 << 12) | 2144 | # define GPIO_DATA_VAL_IN (1 << 12) |
2145 | # define GPIO_DATA_PULLUP_DISABLE (1 << 13) | 2145 | # define GPIO_DATA_PULLUP_DISABLE (1 << 13) |
2146 | 2146 | ||
2147 | #define GMBUS0 0x5100 /* clock/port select */ | 2147 | #define GMBUS0 (dev_priv->gpio_mmio_base + 0x5100) /* clock/port select */ |
2148 | #define GMBUS_RATE_100KHZ (0<<8) | 2148 | #define GMBUS_RATE_100KHZ (0<<8) |
2149 | #define GMBUS_RATE_50KHZ (1<<8) | 2149 | #define GMBUS_RATE_50KHZ (1<<8) |
2150 | #define GMBUS_RATE_400KHZ (2<<8) /* reserved on Pineview */ | 2150 | #define GMBUS_RATE_400KHZ (2<<8) /* reserved on Pineview */ |
@@ -2163,7 +2163,7 @@ enum skl_disp_power_wells { | |||
2163 | #define GMBUS_PIN_2_BXT 2 | 2163 | #define GMBUS_PIN_2_BXT 2 |
2164 | #define GMBUS_PIN_3_BXT 3 | 2164 | #define GMBUS_PIN_3_BXT 3 |
2165 | #define GMBUS_NUM_PINS 7 /* including 0 */ | 2165 | #define GMBUS_NUM_PINS 7 /* including 0 */ |
2166 | #define GMBUS1 0x5104 /* command/status */ | 2166 | #define GMBUS1 (dev_priv->gpio_mmio_base + 0x5104) /* command/status */ |
2167 | #define GMBUS_SW_CLR_INT (1<<31) | 2167 | #define GMBUS_SW_CLR_INT (1<<31) |
2168 | #define GMBUS_SW_RDY (1<<30) | 2168 | #define GMBUS_SW_RDY (1<<30) |
2169 | #define GMBUS_ENT (1<<29) /* enable timeout */ | 2169 | #define GMBUS_ENT (1<<29) /* enable timeout */ |
@@ -2177,7 +2177,7 @@ enum skl_disp_power_wells { | |||
2177 | #define GMBUS_SLAVE_ADDR_SHIFT 1 | 2177 | #define GMBUS_SLAVE_ADDR_SHIFT 1 |
2178 | #define GMBUS_SLAVE_READ (1<<0) | 2178 | #define GMBUS_SLAVE_READ (1<<0) |
2179 | #define GMBUS_SLAVE_WRITE (0<<0) | 2179 | #define GMBUS_SLAVE_WRITE (0<<0) |
2180 | #define GMBUS2 0x5108 /* status */ | 2180 | #define GMBUS2 (dev_priv->gpio_mmio_base + 0x5108) /* status */ |
2181 | #define GMBUS_INUSE (1<<15) | 2181 | #define GMBUS_INUSE (1<<15) |
2182 | #define GMBUS_HW_WAIT_PHASE (1<<14) | 2182 | #define GMBUS_HW_WAIT_PHASE (1<<14) |
2183 | #define GMBUS_STALL_TIMEOUT (1<<13) | 2183 | #define GMBUS_STALL_TIMEOUT (1<<13) |
@@ -2185,14 +2185,14 @@ enum skl_disp_power_wells { | |||
2185 | #define GMBUS_HW_RDY (1<<11) | 2185 | #define GMBUS_HW_RDY (1<<11) |
2186 | #define GMBUS_SATOER (1<<10) | 2186 | #define GMBUS_SATOER (1<<10) |
2187 | #define GMBUS_ACTIVE (1<<9) | 2187 | #define GMBUS_ACTIVE (1<<9) |
2188 | #define GMBUS3 0x510c /* data buffer bytes 3-0 */ | 2188 | #define GMBUS3 (dev_priv->gpio_mmio_base + 0x510c) /* data buffer bytes 3-0 */ |
2189 | #define GMBUS4 0x5110 /* interrupt mask (Pineview+) */ | 2189 | #define GMBUS4 (dev_priv->gpio_mmio_base + 0x5110) /* interrupt mask (Pineview+) */ |
2190 | #define GMBUS_SLAVE_TIMEOUT_EN (1<<4) | 2190 | #define GMBUS_SLAVE_TIMEOUT_EN (1<<4) |
2191 | #define GMBUS_NAK_EN (1<<3) | 2191 | #define GMBUS_NAK_EN (1<<3) |
2192 | #define GMBUS_IDLE_EN (1<<2) | 2192 | #define GMBUS_IDLE_EN (1<<2) |
2193 | #define GMBUS_HW_WAIT_EN (1<<1) | 2193 | #define GMBUS_HW_WAIT_EN (1<<1) |
2194 | #define GMBUS_HW_RDY_EN (1<<0) | 2194 | #define GMBUS_HW_RDY_EN (1<<0) |
2195 | #define GMBUS5 0x5120 /* byte index */ | 2195 | #define GMBUS5 (dev_priv->gpio_mmio_base + 0x5120) /* byte index */ |
2196 | #define GMBUS_2BYTE_INDEX_EN (1<<31) | 2196 | #define GMBUS_2BYTE_INDEX_EN (1<<31) |
2197 | 2197 | ||
2198 | /* | 2198 | /* |
@@ -2866,21 +2866,21 @@ enum skl_disp_power_wells { | |||
2866 | * doesn't need saving on GT1 | 2866 | * doesn't need saving on GT1 |
2867 | */ | 2867 | */ |
2868 | #define CXT_SIZE 0x21a0 | 2868 | #define CXT_SIZE 0x21a0 |
2869 | #define GEN6_CXT_POWER_SIZE(cxt_reg) ((cxt_reg >> 24) & 0x3f) | 2869 | #define GEN6_CXT_POWER_SIZE(cxt_reg) (((cxt_reg) >> 24) & 0x3f) |
2870 | #define GEN6_CXT_RING_SIZE(cxt_reg) ((cxt_reg >> 18) & 0x3f) | 2870 | #define GEN6_CXT_RING_SIZE(cxt_reg) (((cxt_reg) >> 18) & 0x3f) |
2871 | #define GEN6_CXT_RENDER_SIZE(cxt_reg) ((cxt_reg >> 12) & 0x3f) | 2871 | #define GEN6_CXT_RENDER_SIZE(cxt_reg) (((cxt_reg) >> 12) & 0x3f) |
2872 | #define GEN6_CXT_EXTENDED_SIZE(cxt_reg) ((cxt_reg >> 6) & 0x3f) | 2872 | #define GEN6_CXT_EXTENDED_SIZE(cxt_reg) (((cxt_reg) >> 6) & 0x3f) |
2873 | #define GEN6_CXT_PIPELINE_SIZE(cxt_reg) ((cxt_reg >> 0) & 0x3f) | 2873 | #define GEN6_CXT_PIPELINE_SIZE(cxt_reg) (((cxt_reg) >> 0) & 0x3f) |
2874 | #define GEN6_CXT_TOTAL_SIZE(cxt_reg) (GEN6_CXT_RING_SIZE(cxt_reg) + \ | 2874 | #define GEN6_CXT_TOTAL_SIZE(cxt_reg) (GEN6_CXT_RING_SIZE(cxt_reg) + \ |
2875 | GEN6_CXT_EXTENDED_SIZE(cxt_reg) + \ | 2875 | GEN6_CXT_EXTENDED_SIZE(cxt_reg) + \ |
2876 | GEN6_CXT_PIPELINE_SIZE(cxt_reg)) | 2876 | GEN6_CXT_PIPELINE_SIZE(cxt_reg)) |
2877 | #define GEN7_CXT_SIZE 0x21a8 | 2877 | #define GEN7_CXT_SIZE 0x21a8 |
2878 | #define GEN7_CXT_POWER_SIZE(ctx_reg) ((ctx_reg >> 25) & 0x7f) | 2878 | #define GEN7_CXT_POWER_SIZE(ctx_reg) (((ctx_reg) >> 25) & 0x7f) |
2879 | #define GEN7_CXT_RING_SIZE(ctx_reg) ((ctx_reg >> 22) & 0x7) | 2879 | #define GEN7_CXT_RING_SIZE(ctx_reg) (((ctx_reg) >> 22) & 0x7) |
2880 | #define GEN7_CXT_RENDER_SIZE(ctx_reg) ((ctx_reg >> 16) & 0x3f) | 2880 | #define GEN7_CXT_RENDER_SIZE(ctx_reg) (((ctx_reg) >> 16) & 0x3f) |
2881 | #define GEN7_CXT_EXTENDED_SIZE(ctx_reg) ((ctx_reg >> 9) & 0x7f) | 2881 | #define GEN7_CXT_EXTENDED_SIZE(ctx_reg) (((ctx_reg) >> 9) & 0x7f) |
2882 | #define GEN7_CXT_GT1_SIZE(ctx_reg) ((ctx_reg >> 6) & 0x7) | 2882 | #define GEN7_CXT_GT1_SIZE(ctx_reg) (((ctx_reg) >> 6) & 0x7) |
2883 | #define GEN7_CXT_VFSTATE_SIZE(ctx_reg) ((ctx_reg >> 0) & 0x3f) | 2883 | #define GEN7_CXT_VFSTATE_SIZE(ctx_reg) (((ctx_reg) >> 0) & 0x3f) |
2884 | #define GEN7_CXT_TOTAL_SIZE(ctx_reg) (GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \ | 2884 | #define GEN7_CXT_TOTAL_SIZE(ctx_reg) (GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \ |
2885 | GEN7_CXT_VFSTATE_SIZE(ctx_reg)) | 2885 | GEN7_CXT_VFSTATE_SIZE(ctx_reg)) |
2886 | /* Haswell does have the CXT_SIZE register however it does not appear to be | 2886 | /* Haswell does have the CXT_SIZE register however it does not appear to be |
@@ -4284,7 +4284,7 @@ enum skl_disp_power_wells { | |||
4284 | #define DP_AUX_CH_CTL_PSR_DATA_AUX_REG_SKL (1 << 14) | 4284 | #define DP_AUX_CH_CTL_PSR_DATA_AUX_REG_SKL (1 << 14) |
4285 | #define DP_AUX_CH_CTL_FS_DATA_AUX_REG_SKL (1 << 13) | 4285 | #define DP_AUX_CH_CTL_FS_DATA_AUX_REG_SKL (1 << 13) |
4286 | #define DP_AUX_CH_CTL_GTC_DATA_AUX_REG_SKL (1 << 12) | 4286 | #define DP_AUX_CH_CTL_GTC_DATA_AUX_REG_SKL (1 << 12) |
4287 | #define DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL_MASK (1f << 5) | 4287 | #define DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL_MASK (0x1f << 5) |
4288 | #define DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(c) (((c) - 1) << 5) | 4288 | #define DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(c) (((c) - 1) << 5) |
4289 | #define DP_AUX_CH_CTL_SYNC_PULSE_SKL(c) ((c) - 1) | 4289 | #define DP_AUX_CH_CTL_SYNC_PULSE_SKL(c) ((c) - 1) |
4290 | 4290 | ||
@@ -4846,10 +4846,10 @@ enum skl_disp_power_wells { | |||
4846 | #define PIPE_PIXEL_MASK 0x00ffffff | 4846 | #define PIPE_PIXEL_MASK 0x00ffffff |
4847 | #define PIPE_PIXEL_SHIFT 0 | 4847 | #define PIPE_PIXEL_SHIFT 0 |
4848 | /* GM45+ just has to be different */ | 4848 | /* GM45+ just has to be different */ |
4849 | #define _PIPEA_FRMCOUNT_GM45 0x70040 | 4849 | #define _PIPEA_FRMCOUNT_G4X 0x70040 |
4850 | #define _PIPEA_FLIPCOUNT_GM45 0x70044 | 4850 | #define _PIPEA_FLIPCOUNT_G4X 0x70044 |
4851 | #define PIPE_FRMCOUNT_GM45(pipe) _PIPE2(pipe, _PIPEA_FRMCOUNT_GM45) | 4851 | #define PIPE_FRMCOUNT_G4X(pipe) _PIPE2(pipe, _PIPEA_FRMCOUNT_G4X) |
4852 | #define PIPE_FLIPCOUNT_GM45(pipe) _PIPE2(pipe, _PIPEA_FLIPCOUNT_GM45) | 4852 | #define PIPE_FLIPCOUNT_G4X(pipe) _PIPE2(pipe, _PIPEA_FLIPCOUNT_G4X) |
4853 | 4853 | ||
4854 | /* Cursor A & B regs */ | 4854 | /* Cursor A & B regs */ |
4855 | #define _CURACNTR 0x70080 | 4855 | #define _CURACNTR 0x70080 |
@@ -4991,20 +4991,20 @@ enum skl_disp_power_wells { | |||
4991 | #define I915_LO_DISPBASE(val) (val & ~DISP_BASEADDR_MASK) | 4991 | #define I915_LO_DISPBASE(val) (val & ~DISP_BASEADDR_MASK) |
4992 | #define I915_HI_DISPBASE(val) (val & DISP_BASEADDR_MASK) | 4992 | #define I915_HI_DISPBASE(val) (val & DISP_BASEADDR_MASK) |
4993 | 4993 | ||
4994 | /* VBIOS flags */ | 4994 | /* |
4995 | #define SWF00 (dev_priv->info.display_mmio_offset + 0x71410) | 4995 | * VBIOS flags |
4996 | #define SWF01 (dev_priv->info.display_mmio_offset + 0x71414) | 4996 | * gen2: |
4997 | #define SWF02 (dev_priv->info.display_mmio_offset + 0x71418) | 4997 | * [00:06] alm,mgm |
4998 | #define SWF03 (dev_priv->info.display_mmio_offset + 0x7141c) | 4998 | * [10:16] all |
4999 | #define SWF04 (dev_priv->info.display_mmio_offset + 0x71420) | 4999 | * [30:32] alm,mgm |
5000 | #define SWF05 (dev_priv->info.display_mmio_offset + 0x71424) | 5000 | * gen3+: |
5001 | #define SWF06 (dev_priv->info.display_mmio_offset + 0x71428) | 5001 | * [00:0f] all |
5002 | #define SWF10 (dev_priv->info.display_mmio_offset + 0x70410) | 5002 | * [10:1f] all |
5003 | #define SWF11 (dev_priv->info.display_mmio_offset + 0x70414) | 5003 | * [30:32] all |
5004 | #define SWF14 (dev_priv->info.display_mmio_offset + 0x71420) | 5004 | */ |
5005 | #define SWF30 (dev_priv->info.display_mmio_offset + 0x72414) | 5005 | #define SWF0(i) (dev_priv->info.display_mmio_offset + 0x70410 + (i) * 4) |
5006 | #define SWF31 (dev_priv->info.display_mmio_offset + 0x72418) | 5006 | #define SWF1(i) (dev_priv->info.display_mmio_offset + 0x71410 + (i) * 4) |
5007 | #define SWF32 (dev_priv->info.display_mmio_offset + 0x7241c) | 5007 | #define SWF3(i) (dev_priv->info.display_mmio_offset + 0x72414 + (i) * 4) |
5008 | 5008 | ||
5009 | /* Pipe B */ | 5009 | /* Pipe B */ |
5010 | #define _PIPEBDSL (dev_priv->info.display_mmio_offset + 0x71000) | 5010 | #define _PIPEBDSL (dev_priv->info.display_mmio_offset + 0x71000) |
@@ -5012,8 +5012,8 @@ enum skl_disp_power_wells { | |||
5012 | #define _PIPEBSTAT (dev_priv->info.display_mmio_offset + 0x71024) | 5012 | #define _PIPEBSTAT (dev_priv->info.display_mmio_offset + 0x71024) |
5013 | #define _PIPEBFRAMEHIGH 0x71040 | 5013 | #define _PIPEBFRAMEHIGH 0x71040 |
5014 | #define _PIPEBFRAMEPIXEL 0x71044 | 5014 | #define _PIPEBFRAMEPIXEL 0x71044 |
5015 | #define _PIPEB_FRMCOUNT_GM45 (dev_priv->info.display_mmio_offset + 0x71040) | 5015 | #define _PIPEB_FRMCOUNT_G4X (dev_priv->info.display_mmio_offset + 0x71040) |
5016 | #define _PIPEB_FLIPCOUNT_GM45 (dev_priv->info.display_mmio_offset + 0x71044) | 5016 | #define _PIPEB_FLIPCOUNT_G4X (dev_priv->info.display_mmio_offset + 0x71044) |
5017 | 5017 | ||
5018 | 5018 | ||
5019 | /* Display B control */ | 5019 | /* Display B control */ |
@@ -5223,18 +5223,18 @@ enum skl_disp_power_wells { | |||
5223 | #define _SPBCONSTALPHA (VLV_DISPLAY_BASE + 0x722a8) | 5223 | #define _SPBCONSTALPHA (VLV_DISPLAY_BASE + 0x722a8) |
5224 | #define _SPBGAMC (VLV_DISPLAY_BASE + 0x722f4) | 5224 | #define _SPBGAMC (VLV_DISPLAY_BASE + 0x722f4) |
5225 | 5225 | ||
5226 | #define SPCNTR(pipe, plane) _PIPE(pipe * 2 + plane, _SPACNTR, _SPBCNTR) | 5226 | #define SPCNTR(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPACNTR, _SPBCNTR) |
5227 | #define SPLINOFF(pipe, plane) _PIPE(pipe * 2 + plane, _SPALINOFF, _SPBLINOFF) | 5227 | #define SPLINOFF(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPALINOFF, _SPBLINOFF) |
5228 | #define SPSTRIDE(pipe, plane) _PIPE(pipe * 2 + plane, _SPASTRIDE, _SPBSTRIDE) | 5228 | #define SPSTRIDE(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPASTRIDE, _SPBSTRIDE) |
5229 | #define SPPOS(pipe, plane) _PIPE(pipe * 2 + plane, _SPAPOS, _SPBPOS) | 5229 | #define SPPOS(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPAPOS, _SPBPOS) |
5230 | #define SPSIZE(pipe, plane) _PIPE(pipe * 2 + plane, _SPASIZE, _SPBSIZE) | 5230 | #define SPSIZE(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPASIZE, _SPBSIZE) |
5231 | #define SPKEYMINVAL(pipe, plane) _PIPE(pipe * 2 + plane, _SPAKEYMINVAL, _SPBKEYMINVAL) | 5231 | #define SPKEYMINVAL(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPAKEYMINVAL, _SPBKEYMINVAL) |
5232 | #define SPKEYMSK(pipe, plane) _PIPE(pipe * 2 + plane, _SPAKEYMSK, _SPBKEYMSK) | 5232 | #define SPKEYMSK(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPAKEYMSK, _SPBKEYMSK) |
5233 | #define SPSURF(pipe, plane) _PIPE(pipe * 2 + plane, _SPASURF, _SPBSURF) | 5233 | #define SPSURF(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPASURF, _SPBSURF) |
5234 | #define SPKEYMAXVAL(pipe, plane) _PIPE(pipe * 2 + plane, _SPAKEYMAXVAL, _SPBKEYMAXVAL) | 5234 | #define SPKEYMAXVAL(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPAKEYMAXVAL, _SPBKEYMAXVAL) |
5235 | #define SPTILEOFF(pipe, plane) _PIPE(pipe * 2 + plane, _SPATILEOFF, _SPBTILEOFF) | 5235 | #define SPTILEOFF(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPATILEOFF, _SPBTILEOFF) |
5236 | #define SPCONSTALPHA(pipe, plane) _PIPE(pipe * 2 + plane, _SPACONSTALPHA, _SPBCONSTALPHA) | 5236 | #define SPCONSTALPHA(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPACONSTALPHA, _SPBCONSTALPHA) |
5237 | #define SPGAMC(pipe, plane) _PIPE(pipe * 2 + plane, _SPAGAMC, _SPBGAMC) | 5237 | #define SPGAMC(pipe, plane) _PIPE((pipe) * 2 + (plane), _SPAGAMC, _SPBGAMC) |
5238 | 5238 | ||
5239 | /* | 5239 | /* |
5240 | * CHV pipe B sprite CSC | 5240 | * CHV pipe B sprite CSC |
@@ -5580,7 +5580,7 @@ enum skl_disp_power_wells { | |||
5580 | #define PS_SCALER_MODE_DYN (0 << 28) | 5580 | #define PS_SCALER_MODE_DYN (0 << 28) |
5581 | #define PS_SCALER_MODE_HQ (1 << 28) | 5581 | #define PS_SCALER_MODE_HQ (1 << 28) |
5582 | #define PS_PLANE_SEL_MASK (7 << 25) | 5582 | #define PS_PLANE_SEL_MASK (7 << 25) |
5583 | #define PS_PLANE_SEL(plane) ((plane + 1) << 25) | 5583 | #define PS_PLANE_SEL(plane) (((plane) + 1) << 25) |
5584 | #define PS_FILTER_MASK (3 << 23) | 5584 | #define PS_FILTER_MASK (3 << 23) |
5585 | #define PS_FILTER_MEDIUM (0 << 23) | 5585 | #define PS_FILTER_MEDIUM (0 << 23) |
5586 | #define PS_FILTER_EDGE_ENHANCE (2 << 23) | 5586 | #define PS_FILTER_EDGE_ENHANCE (2 << 23) |
@@ -5745,7 +5745,7 @@ enum skl_disp_power_wells { | |||
5745 | #define DE_PLANEA_FLIP_DONE_IVB (1<<3) | 5745 | #define DE_PLANEA_FLIP_DONE_IVB (1<<3) |
5746 | #define DE_PLANE_FLIP_DONE_IVB(plane) (1<< (3 + 5*(plane))) | 5746 | #define DE_PLANE_FLIP_DONE_IVB(plane) (1<< (3 + 5*(plane))) |
5747 | #define DE_PIPEA_VBLANK_IVB (1<<0) | 5747 | #define DE_PIPEA_VBLANK_IVB (1<<0) |
5748 | #define DE_PIPE_VBLANK_IVB(pipe) (1 << (pipe * 5)) | 5748 | #define DE_PIPE_VBLANK_IVB(pipe) (1 << ((pipe) * 5)) |
5749 | 5749 | ||
5750 | #define VLV_MASTER_IER 0x4400c /* Gunit master IER */ | 5750 | #define VLV_MASTER_IER 0x4400c /* Gunit master IER */ |
5751 | #define MASTER_INTERRUPT_ENABLE (1<<31) | 5751 | #define MASTER_INTERRUPT_ENABLE (1<<31) |
@@ -5769,7 +5769,7 @@ enum skl_disp_power_wells { | |||
5769 | #define GEN8_DE_PIPE_C_IRQ (1<<18) | 5769 | #define GEN8_DE_PIPE_C_IRQ (1<<18) |
5770 | #define GEN8_DE_PIPE_B_IRQ (1<<17) | 5770 | #define GEN8_DE_PIPE_B_IRQ (1<<17) |
5771 | #define GEN8_DE_PIPE_A_IRQ (1<<16) | 5771 | #define GEN8_DE_PIPE_A_IRQ (1<<16) |
5772 | #define GEN8_DE_PIPE_IRQ(pipe) (1<<(16+pipe)) | 5772 | #define GEN8_DE_PIPE_IRQ(pipe) (1<<(16+(pipe))) |
5773 | #define GEN8_GT_VECS_IRQ (1<<6) | 5773 | #define GEN8_GT_VECS_IRQ (1<<6) |
5774 | #define GEN8_GT_PM_IRQ (1<<4) | 5774 | #define GEN8_GT_PM_IRQ (1<<4) |
5775 | #define GEN8_GT_VCS2_IRQ (1<<3) | 5775 | #define GEN8_GT_VCS2_IRQ (1<<3) |
@@ -5813,7 +5813,7 @@ enum skl_disp_power_wells { | |||
5813 | #define GEN9_PIPE_PLANE3_FLIP_DONE (1 << 5) | 5813 | #define GEN9_PIPE_PLANE3_FLIP_DONE (1 << 5) |
5814 | #define GEN9_PIPE_PLANE2_FLIP_DONE (1 << 4) | 5814 | #define GEN9_PIPE_PLANE2_FLIP_DONE (1 << 4) |
5815 | #define GEN9_PIPE_PLANE1_FLIP_DONE (1 << 3) | 5815 | #define GEN9_PIPE_PLANE1_FLIP_DONE (1 << 3) |
5816 | #define GEN9_PIPE_PLANE_FLIP_DONE(p) (1 << (3 + p)) | 5816 | #define GEN9_PIPE_PLANE_FLIP_DONE(p) (1 << (3 + (p))) |
5817 | #define GEN8_DE_PIPE_IRQ_FAULT_ERRORS \ | 5817 | #define GEN8_DE_PIPE_IRQ_FAULT_ERRORS \ |
5818 | (GEN8_PIPE_CURSOR_FAULT | \ | 5818 | (GEN8_PIPE_CURSOR_FAULT | \ |
5819 | GEN8_PIPE_SPRITE_FAULT | \ | 5819 | GEN8_PIPE_SPRITE_FAULT | \ |
@@ -6072,7 +6072,7 @@ enum skl_disp_power_wells { | |||
6072 | #define SERR_INT_TRANS_C_FIFO_UNDERRUN (1<<6) | 6072 | #define SERR_INT_TRANS_C_FIFO_UNDERRUN (1<<6) |
6073 | #define SERR_INT_TRANS_B_FIFO_UNDERRUN (1<<3) | 6073 | #define SERR_INT_TRANS_B_FIFO_UNDERRUN (1<<3) |
6074 | #define SERR_INT_TRANS_A_FIFO_UNDERRUN (1<<0) | 6074 | #define SERR_INT_TRANS_A_FIFO_UNDERRUN (1<<0) |
6075 | #define SERR_INT_TRANS_FIFO_UNDERRUN(pipe) (1<<(pipe*3)) | 6075 | #define SERR_INT_TRANS_FIFO_UNDERRUN(pipe) (1<<((pipe)*3)) |
6076 | 6076 | ||
6077 | /* digital port hotplug */ | 6077 | /* digital port hotplug */ |
6078 | #define PCH_PORT_HOTPLUG 0xc4030 /* SHOTPLUG_CTL */ | 6078 | #define PCH_PORT_HOTPLUG 0xc4030 /* SHOTPLUG_CTL */ |
@@ -6183,9 +6183,9 @@ enum skl_disp_power_wells { | |||
6183 | #define PCH_SSC4_AUX_PARMS 0xc6214 | 6183 | #define PCH_SSC4_AUX_PARMS 0xc6214 |
6184 | 6184 | ||
6185 | #define PCH_DPLL_SEL 0xc7000 | 6185 | #define PCH_DPLL_SEL 0xc7000 |
6186 | #define TRANS_DPLLB_SEL(pipe) (1 << (pipe * 4)) | 6186 | #define TRANS_DPLLB_SEL(pipe) (1 << ((pipe) * 4)) |
6187 | #define TRANS_DPLLA_SEL(pipe) 0 | 6187 | #define TRANS_DPLLA_SEL(pipe) 0 |
6188 | #define TRANS_DPLL_ENABLE(pipe) (1 << (pipe * 4 + 3)) | 6188 | #define TRANS_DPLL_ENABLE(pipe) (1 << ((pipe) * 4 + 3)) |
6189 | 6189 | ||
6190 | /* transcoder */ | 6190 | /* transcoder */ |
6191 | 6191 | ||
@@ -6286,16 +6286,16 @@ enum skl_disp_power_wells { | |||
6286 | 6286 | ||
6287 | #define HSW_TVIDEO_DIP_CTL(trans) \ | 6287 | #define HSW_TVIDEO_DIP_CTL(trans) \ |
6288 | _TRANSCODER2(trans, HSW_VIDEO_DIP_CTL_A) | 6288 | _TRANSCODER2(trans, HSW_VIDEO_DIP_CTL_A) |
6289 | #define HSW_TVIDEO_DIP_AVI_DATA(trans) \ | 6289 | #define HSW_TVIDEO_DIP_AVI_DATA(trans, i) \ |
6290 | _TRANSCODER2(trans, HSW_VIDEO_DIP_AVI_DATA_A) | 6290 | (_TRANSCODER2(trans, HSW_VIDEO_DIP_AVI_DATA_A) + (i) * 4) |
6291 | #define HSW_TVIDEO_DIP_VS_DATA(trans) \ | 6291 | #define HSW_TVIDEO_DIP_VS_DATA(trans, i) \ |
6292 | _TRANSCODER2(trans, HSW_VIDEO_DIP_VS_DATA_A) | 6292 | (_TRANSCODER2(trans, HSW_VIDEO_DIP_VS_DATA_A) + (i) * 4) |
6293 | #define HSW_TVIDEO_DIP_SPD_DATA(trans) \ | 6293 | #define HSW_TVIDEO_DIP_SPD_DATA(trans, i) \ |
6294 | _TRANSCODER2(trans, HSW_VIDEO_DIP_SPD_DATA_A) | 6294 | (_TRANSCODER2(trans, HSW_VIDEO_DIP_SPD_DATA_A) + (i) * 4) |
6295 | #define HSW_TVIDEO_DIP_GCP(trans) \ | 6295 | #define HSW_TVIDEO_DIP_GCP(trans) \ |
6296 | _TRANSCODER2(trans, HSW_VIDEO_DIP_GCP_A) | 6296 | _TRANSCODER2(trans, HSW_VIDEO_DIP_GCP_A) |
6297 | #define HSW_TVIDEO_DIP_VSC_DATA(trans) \ | 6297 | #define HSW_TVIDEO_DIP_VSC_DATA(trans, i) \ |
6298 | _TRANSCODER2(trans, HSW_VIDEO_DIP_VSC_DATA_A) | 6298 | (_TRANSCODER2(trans, HSW_VIDEO_DIP_VSC_DATA_A) + (i) * 4) |
6299 | 6299 | ||
6300 | #define HSW_STEREO_3D_CTL_A 0x70020 | 6300 | #define HSW_STEREO_3D_CTL_A 0x70020 |
6301 | #define S3D_ENABLE (1<<31) | 6301 | #define S3D_ENABLE (1<<31) |
@@ -6587,10 +6587,10 @@ enum skl_disp_power_wells { | |||
6587 | #define _BXT_PP_ON_DELAYS2 0xc7308 | 6587 | #define _BXT_PP_ON_DELAYS2 0xc7308 |
6588 | #define _BXT_PP_OFF_DELAYS2 0xc730c | 6588 | #define _BXT_PP_OFF_DELAYS2 0xc730c |
6589 | 6589 | ||
6590 | #define BXT_PP_STATUS(n) ((!n) ? PCH_PP_STATUS : _BXT_PP_STATUS2) | 6590 | #define BXT_PP_STATUS(n) _PIPE(n, PCH_PP_STATUS, _BXT_PP_STATUS2) |
6591 | #define BXT_PP_CONTROL(n) ((!n) ? PCH_PP_CONTROL : _BXT_PP_CONTROL2) | 6591 | #define BXT_PP_CONTROL(n) _PIPE(n, PCH_PP_CONTROL, _BXT_PP_CONTROL2) |
6592 | #define BXT_PP_ON_DELAYS(n) ((!n) ? PCH_PP_ON_DELAYS : _BXT_PP_ON_DELAYS2) | 6592 | #define BXT_PP_ON_DELAYS(n) _PIPE(n, PCH_PP_ON_DELAYS, _BXT_PP_ON_DELAYS2) |
6593 | #define BXT_PP_OFF_DELAYS(n) ((!n) ? PCH_PP_OFF_DELAYS : _BXT_PP_OFF_DELAYS2) | 6593 | #define BXT_PP_OFF_DELAYS(n) _PIPE(n, PCH_PP_OFF_DELAYS, _BXT_PP_OFF_DELAYS2) |
6594 | 6594 | ||
6595 | #define PCH_DP_B 0xe4100 | 6595 | #define PCH_DP_B 0xe4100 |
6596 | #define PCH_DPB_AUX_CH_CTL 0xe4110 | 6596 | #define PCH_DPB_AUX_CH_CTL 0xe4110 |
@@ -7348,7 +7348,7 @@ enum skl_disp_power_wells { | |||
7348 | #define TRANS_CLK_SEL(tran) _TRANSCODER(tran, TRANS_CLK_SEL_A, TRANS_CLK_SEL_B) | 7348 | #define TRANS_CLK_SEL(tran) _TRANSCODER(tran, TRANS_CLK_SEL_A, TRANS_CLK_SEL_B) |
7349 | /* For each transcoder, we need to select the corresponding port clock */ | 7349 | /* For each transcoder, we need to select the corresponding port clock */ |
7350 | #define TRANS_CLK_SEL_DISABLED (0x0<<29) | 7350 | #define TRANS_CLK_SEL_DISABLED (0x0<<29) |
7351 | #define TRANS_CLK_SEL_PORT(x) ((x+1)<<29) | 7351 | #define TRANS_CLK_SEL_PORT(x) (((x)+1)<<29) |
7352 | 7352 | ||
7353 | #define TRANSA_MSA_MISC 0x60410 | 7353 | #define TRANSA_MSA_MISC 0x60410 |
7354 | #define TRANSB_MSA_MISC 0x61410 | 7354 | #define TRANSB_MSA_MISC 0x61410 |
@@ -7421,10 +7421,10 @@ enum skl_disp_power_wells { | |||
7421 | 7421 | ||
7422 | /* DPLL control2 */ | 7422 | /* DPLL control2 */ |
7423 | #define DPLL_CTRL2 0x6C05C | 7423 | #define DPLL_CTRL2 0x6C05C |
7424 | #define DPLL_CTRL2_DDI_CLK_OFF(port) (1<<(port+15)) | 7424 | #define DPLL_CTRL2_DDI_CLK_OFF(port) (1<<((port)+15)) |
7425 | #define DPLL_CTRL2_DDI_CLK_SEL_MASK(port) (3<<((port)*3+1)) | 7425 | #define DPLL_CTRL2_DDI_CLK_SEL_MASK(port) (3<<((port)*3+1)) |
7426 | #define DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port) ((port)*3+1) | 7426 | #define DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port) ((port)*3+1) |
7427 | #define DPLL_CTRL2_DDI_CLK_SEL(clk, port) (clk<<((port)*3+1)) | 7427 | #define DPLL_CTRL2_DDI_CLK_SEL(clk, port) ((clk)<<((port)*3+1)) |
7428 | #define DPLL_CTRL2_DDI_SEL_OVERRIDE(port) (1<<((port)*3)) | 7428 | #define DPLL_CTRL2_DDI_SEL_OVERRIDE(port) (1<<((port)*3)) |
7429 | 7429 | ||
7430 | /* DPLL Status */ | 7430 | /* DPLL Status */ |
@@ -7437,23 +7437,23 @@ enum skl_disp_power_wells { | |||
7437 | #define DPLL3_CFGCR1 0x6C050 | 7437 | #define DPLL3_CFGCR1 0x6C050 |
7438 | #define DPLL_CFGCR1_FREQ_ENABLE (1<<31) | 7438 | #define DPLL_CFGCR1_FREQ_ENABLE (1<<31) |
7439 | #define DPLL_CFGCR1_DCO_FRACTION_MASK (0x7fff<<9) | 7439 | #define DPLL_CFGCR1_DCO_FRACTION_MASK (0x7fff<<9) |
7440 | #define DPLL_CFGCR1_DCO_FRACTION(x) (x<<9) | 7440 | #define DPLL_CFGCR1_DCO_FRACTION(x) ((x)<<9) |
7441 | #define DPLL_CFGCR1_DCO_INTEGER_MASK (0x1ff) | 7441 | #define DPLL_CFGCR1_DCO_INTEGER_MASK (0x1ff) |
7442 | 7442 | ||
7443 | #define DPLL1_CFGCR2 0x6C044 | 7443 | #define DPLL1_CFGCR2 0x6C044 |
7444 | #define DPLL2_CFGCR2 0x6C04C | 7444 | #define DPLL2_CFGCR2 0x6C04C |
7445 | #define DPLL3_CFGCR2 0x6C054 | 7445 | #define DPLL3_CFGCR2 0x6C054 |
7446 | #define DPLL_CFGCR2_QDIV_RATIO_MASK (0xff<<8) | 7446 | #define DPLL_CFGCR2_QDIV_RATIO_MASK (0xff<<8) |
7447 | #define DPLL_CFGCR2_QDIV_RATIO(x) (x<<8) | 7447 | #define DPLL_CFGCR2_QDIV_RATIO(x) ((x)<<8) |
7448 | #define DPLL_CFGCR2_QDIV_MODE(x) (x<<7) | 7448 | #define DPLL_CFGCR2_QDIV_MODE(x) ((x)<<7) |
7449 | #define DPLL_CFGCR2_KDIV_MASK (3<<5) | 7449 | #define DPLL_CFGCR2_KDIV_MASK (3<<5) |
7450 | #define DPLL_CFGCR2_KDIV(x) (x<<5) | 7450 | #define DPLL_CFGCR2_KDIV(x) ((x)<<5) |
7451 | #define DPLL_CFGCR2_KDIV_5 (0<<5) | 7451 | #define DPLL_CFGCR2_KDIV_5 (0<<5) |
7452 | #define DPLL_CFGCR2_KDIV_2 (1<<5) | 7452 | #define DPLL_CFGCR2_KDIV_2 (1<<5) |
7453 | #define DPLL_CFGCR2_KDIV_3 (2<<5) | 7453 | #define DPLL_CFGCR2_KDIV_3 (2<<5) |
7454 | #define DPLL_CFGCR2_KDIV_1 (3<<5) | 7454 | #define DPLL_CFGCR2_KDIV_1 (3<<5) |
7455 | #define DPLL_CFGCR2_PDIV_MASK (7<<2) | 7455 | #define DPLL_CFGCR2_PDIV_MASK (7<<2) |
7456 | #define DPLL_CFGCR2_PDIV(x) (x<<2) | 7456 | #define DPLL_CFGCR2_PDIV(x) ((x)<<2) |
7457 | #define DPLL_CFGCR2_PDIV_1 (0<<2) | 7457 | #define DPLL_CFGCR2_PDIV_1 (0<<2) |
7458 | #define DPLL_CFGCR2_PDIV_2 (1<<2) | 7458 | #define DPLL_CFGCR2_PDIV_2 (1<<2) |
7459 | #define DPLL_CFGCR2_PDIV_3 (2<<2) | 7459 | #define DPLL_CFGCR2_PDIV_3 (2<<2) |
@@ -7979,7 +7979,7 @@ enum skl_disp_power_wells { | |||
7979 | #define VIRTUAL_CHANNEL_SHIFT 6 | 7979 | #define VIRTUAL_CHANNEL_SHIFT 6 |
7980 | #define VIRTUAL_CHANNEL_MASK (3 << 6) | 7980 | #define VIRTUAL_CHANNEL_MASK (3 << 6) |
7981 | #define DATA_TYPE_SHIFT 0 | 7981 | #define DATA_TYPE_SHIFT 0 |
7982 | #define DATA_TYPE_MASK (3f << 0) | 7982 | #define DATA_TYPE_MASK (0x3f << 0) |
7983 | /* data type values, see include/video/mipi_display.h */ | 7983 | /* data type values, see include/video/mipi_display.h */ |
7984 | 7984 | ||
7985 | #define _MIPIA_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb074) | 7985 | #define _MIPIA_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb074) |
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 1ccac618468e..2d9182189422 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c | |||
@@ -122,12 +122,24 @@ int i915_save_state(struct drm_device *dev) | |||
122 | dev_priv->regfile.saveMI_ARB_STATE = I915_READ(MI_ARB_STATE); | 122 | dev_priv->regfile.saveMI_ARB_STATE = I915_READ(MI_ARB_STATE); |
123 | 123 | ||
124 | /* Scratch space */ | 124 | /* Scratch space */ |
125 | for (i = 0; i < 16; i++) { | 125 | if (IS_GEN2(dev_priv) && IS_MOBILE(dev_priv)) { |
126 | dev_priv->regfile.saveSWF0[i] = I915_READ(SWF00 + (i << 2)); | 126 | for (i = 0; i < 7; i++) { |
127 | dev_priv->regfile.saveSWF1[i] = I915_READ(SWF10 + (i << 2)); | 127 | dev_priv->regfile.saveSWF0[i] = I915_READ(SWF0(i)); |
128 | dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i)); | ||
129 | } | ||
130 | for (i = 0; i < 3; i++) | ||
131 | dev_priv->regfile.saveSWF3[i] = I915_READ(SWF3(i)); | ||
132 | } else if (IS_GEN2(dev_priv)) { | ||
133 | for (i = 0; i < 7; i++) | ||
134 | dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i)); | ||
135 | } else if (HAS_GMCH_DISPLAY(dev_priv)) { | ||
136 | for (i = 0; i < 16; i++) { | ||
137 | dev_priv->regfile.saveSWF0[i] = I915_READ(SWF0(i)); | ||
138 | dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i)); | ||
139 | } | ||
140 | for (i = 0; i < 3; i++) | ||
141 | dev_priv->regfile.saveSWF3[i] = I915_READ(SWF3(i)); | ||
128 | } | 142 | } |
129 | for (i = 0; i < 3; i++) | ||
130 | dev_priv->regfile.saveSWF2[i] = I915_READ(SWF30 + (i << 2)); | ||
131 | 143 | ||
132 | mutex_unlock(&dev->struct_mutex); | 144 | mutex_unlock(&dev->struct_mutex); |
133 | 145 | ||
@@ -156,12 +168,25 @@ int i915_restore_state(struct drm_device *dev) | |||
156 | /* Memory arbitration state */ | 168 | /* Memory arbitration state */ |
157 | I915_WRITE(MI_ARB_STATE, dev_priv->regfile.saveMI_ARB_STATE | 0xffff0000); | 169 | I915_WRITE(MI_ARB_STATE, dev_priv->regfile.saveMI_ARB_STATE | 0xffff0000); |
158 | 170 | ||
159 | for (i = 0; i < 16; i++) { | 171 | /* Scratch space */ |
160 | I915_WRITE(SWF00 + (i << 2), dev_priv->regfile.saveSWF0[i]); | 172 | if (IS_GEN2(dev_priv) && IS_MOBILE(dev_priv)) { |
161 | I915_WRITE(SWF10 + (i << 2), dev_priv->regfile.saveSWF1[i]); | 173 | for (i = 0; i < 7; i++) { |
174 | I915_WRITE(SWF0(i), dev_priv->regfile.saveSWF0[i]); | ||
175 | I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]); | ||
176 | } | ||
177 | for (i = 0; i < 3; i++) | ||
178 | I915_WRITE(SWF3(i), dev_priv->regfile.saveSWF3[i]); | ||
179 | } else if (IS_GEN2(dev_priv)) { | ||
180 | for (i = 0; i < 7; i++) | ||
181 | I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]); | ||
182 | } else if (HAS_GMCH_DISPLAY(dev_priv)) { | ||
183 | for (i = 0; i < 16; i++) { | ||
184 | I915_WRITE(SWF0(i), dev_priv->regfile.saveSWF0[i]); | ||
185 | I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]); | ||
186 | } | ||
187 | for (i = 0; i < 3; i++) | ||
188 | I915_WRITE(SWF3(i), dev_priv->regfile.saveSWF3[i]); | ||
162 | } | 189 | } |
163 | for (i = 0; i < 3; i++) | ||
164 | I915_WRITE(SWF30 + (i << 2), dev_priv->regfile.saveSWF2[i]); | ||
165 | 190 | ||
166 | mutex_unlock(&dev->struct_mutex); | 191 | mutex_unlock(&dev->struct_mutex); |
167 | 192 | ||
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c index 8b13b9d0373a..eb638a1e69d2 100644 --- a/drivers/gpu/drm/i915/intel_acpi.c +++ b/drivers/gpu/drm/i915/intel_acpi.c | |||
@@ -5,7 +5,6 @@ | |||
5 | */ | 5 | */ |
6 | #include <linux/pci.h> | 6 | #include <linux/pci.h> |
7 | #include <linux/acpi.h> | 7 | #include <linux/acpi.h> |
8 | #include <linux/vga_switcheroo.h> | ||
9 | #include <drm/drmP.h> | 8 | #include <drm/drmP.h> |
10 | #include "i915_drv.h" | 9 | #include "i915_drv.h" |
11 | 10 | ||
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c index 05b12032d262..f1975f267710 100644 --- a/drivers/gpu/drm/i915/intel_atomic.c +++ b/drivers/gpu/drm/i915/intel_atomic.c | |||
@@ -94,7 +94,6 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc) | |||
94 | __drm_atomic_helper_crtc_duplicate_state(crtc, &crtc_state->base); | 94 | __drm_atomic_helper_crtc_duplicate_state(crtc, &crtc_state->base); |
95 | 95 | ||
96 | crtc_state->update_pipe = false; | 96 | crtc_state->update_pipe = false; |
97 | crtc_state->disable_lp_wm = false; | ||
98 | 97 | ||
99 | return &crtc_state->base; | 98 | return &crtc_state->base; |
100 | } | 99 | } |
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c index 56c2f54801c4..4dccd9b003a1 100644 --- a/drivers/gpu/drm/i915/intel_audio.c +++ b/drivers/gpu/drm/i915/intel_audio.c | |||
@@ -61,21 +61,21 @@ static const struct { | |||
61 | int clock; | 61 | int clock; |
62 | u32 config; | 62 | u32 config; |
63 | } hdmi_audio_clock[] = { | 63 | } hdmi_audio_clock[] = { |
64 | { DIV_ROUND_UP(25200 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 }, | 64 | { 25175, AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 }, |
65 | { 25200, AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 }, /* default per bspec */ | 65 | { 25200, AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 }, /* default per bspec */ |
66 | { 27000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 }, | 66 | { 27000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 }, |
67 | { 27000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 }, | 67 | { 27027, AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 }, |
68 | { 54000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 }, | 68 | { 54000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 }, |
69 | { 54000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 }, | 69 | { 54054, AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 }, |
70 | { DIV_ROUND_UP(74250 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 }, | 70 | { 74176, AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 }, |
71 | { 74250, AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 }, | 71 | { 74250, AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 }, |
72 | { DIV_ROUND_UP(148500 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 }, | 72 | { 148352, AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 }, |
73 | { 148500, AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 }, | 73 | { 148500, AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 }, |
74 | }; | 74 | }; |
75 | 75 | ||
76 | /* HDMI N/CTS table */ | 76 | /* HDMI N/CTS table */ |
77 | #define TMDS_297M 297000 | 77 | #define TMDS_297M 297000 |
78 | #define TMDS_296M DIV_ROUND_UP(297000 * 1000, 1001) | 78 | #define TMDS_296M 296703 |
79 | static const struct { | 79 | static const struct { |
80 | int sample_rate; | 80 | int sample_rate; |
81 | int clock; | 81 | int clock; |
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 68421c273c8c..ce82f9c7df24 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c | |||
@@ -1231,20 +1231,13 @@ static const struct dmi_system_id intel_no_opregion_vbt[] = { | |||
1231 | { } | 1231 | { } |
1232 | }; | 1232 | }; |
1233 | 1233 | ||
1234 | static const struct bdb_header *validate_vbt(const void __iomem *_base, | 1234 | static const struct bdb_header *validate_vbt(const void *base, |
1235 | size_t size, | 1235 | size_t size, |
1236 | const void __iomem *_vbt, | 1236 | const void *_vbt, |
1237 | const char *source) | 1237 | const char *source) |
1238 | { | 1238 | { |
1239 | /* | 1239 | size_t offset = _vbt - base; |
1240 | * This is the one place where we explicitly discard the address space | 1240 | const struct vbt_header *vbt = _vbt; |
1241 | * (__iomem) of the BIOS/VBT. (And this will cause a sparse complaint.) | ||
1242 | * From now on everything is based on 'base', and treated as regular | ||
1243 | * memory. | ||
1244 | */ | ||
1245 | const void *base = (const void *) _base; | ||
1246 | size_t offset = _vbt - _base; | ||
1247 | const struct vbt_header *vbt = base + offset; | ||
1248 | const struct bdb_header *bdb; | 1241 | const struct bdb_header *bdb; |
1249 | 1242 | ||
1250 | if (offset + sizeof(struct vbt_header) > size) { | 1243 | if (offset + sizeof(struct vbt_header) > size) { |
@@ -1282,7 +1275,15 @@ static const struct bdb_header *find_vbt(void __iomem *bios, size_t size) | |||
1282 | /* Scour memory looking for the VBT signature. */ | 1275 | /* Scour memory looking for the VBT signature. */ |
1283 | for (i = 0; i + 4 < size; i++) { | 1276 | for (i = 0; i + 4 < size; i++) { |
1284 | if (ioread32(bios + i) == *((const u32 *) "$VBT")) { | 1277 | if (ioread32(bios + i) == *((const u32 *) "$VBT")) { |
1285 | bdb = validate_vbt(bios, size, bios + i, "PCI ROM"); | 1278 | /* |
1279 | * This is the one place where we explicitly discard the | ||
1280 | * address space (__iomem) of the BIOS/VBT. From now on | ||
1281 | * everything is based on 'base', and treated as regular | ||
1282 | * memory. | ||
1283 | */ | ||
1284 | void *_bios = (void __force *) bios; | ||
1285 | |||
1286 | bdb = validate_vbt(_bios, size, _bios + i, "PCI ROM"); | ||
1286 | break; | 1287 | break; |
1287 | } | 1288 | } |
1288 | } | 1289 | } |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index cddb0c692334..82128b95785c 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -1157,12 +1157,10 @@ static const char *state_string(bool enabled) | |||
1157 | void assert_pll(struct drm_i915_private *dev_priv, | 1157 | void assert_pll(struct drm_i915_private *dev_priv, |
1158 | enum pipe pipe, bool state) | 1158 | enum pipe pipe, bool state) |
1159 | { | 1159 | { |
1160 | int reg; | ||
1161 | u32 val; | 1160 | u32 val; |
1162 | bool cur_state; | 1161 | bool cur_state; |
1163 | 1162 | ||
1164 | reg = DPLL(pipe); | 1163 | val = I915_READ(DPLL(pipe)); |
1165 | val = I915_READ(reg); | ||
1166 | cur_state = !!(val & DPLL_VCO_ENABLE); | 1164 | cur_state = !!(val & DPLL_VCO_ENABLE); |
1167 | I915_STATE_WARN(cur_state != state, | 1165 | I915_STATE_WARN(cur_state != state, |
1168 | "PLL state assertion failure (expected %s, current %s)\n", | 1166 | "PLL state assertion failure (expected %s, current %s)\n", |
@@ -1219,20 +1217,16 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv, | |||
1219 | static void assert_fdi_tx(struct drm_i915_private *dev_priv, | 1217 | static void assert_fdi_tx(struct drm_i915_private *dev_priv, |
1220 | enum pipe pipe, bool state) | 1218 | enum pipe pipe, bool state) |
1221 | { | 1219 | { |
1222 | int reg; | ||
1223 | u32 val; | ||
1224 | bool cur_state; | 1220 | bool cur_state; |
1225 | enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, | 1221 | enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, |
1226 | pipe); | 1222 | pipe); |
1227 | 1223 | ||
1228 | if (HAS_DDI(dev_priv->dev)) { | 1224 | if (HAS_DDI(dev_priv->dev)) { |
1229 | /* DDI does not have a specific FDI_TX register */ | 1225 | /* DDI does not have a specific FDI_TX register */ |
1230 | reg = TRANS_DDI_FUNC_CTL(cpu_transcoder); | 1226 | u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); |
1231 | val = I915_READ(reg); | ||
1232 | cur_state = !!(val & TRANS_DDI_FUNC_ENABLE); | 1227 | cur_state = !!(val & TRANS_DDI_FUNC_ENABLE); |
1233 | } else { | 1228 | } else { |
1234 | reg = FDI_TX_CTL(pipe); | 1229 | u32 val = I915_READ(FDI_TX_CTL(pipe)); |
1235 | val = I915_READ(reg); | ||
1236 | cur_state = !!(val & FDI_TX_ENABLE); | 1230 | cur_state = !!(val & FDI_TX_ENABLE); |
1237 | } | 1231 | } |
1238 | I915_STATE_WARN(cur_state != state, | 1232 | I915_STATE_WARN(cur_state != state, |
@@ -1245,12 +1239,10 @@ static void assert_fdi_tx(struct drm_i915_private *dev_priv, | |||
1245 | static void assert_fdi_rx(struct drm_i915_private *dev_priv, | 1239 | static void assert_fdi_rx(struct drm_i915_private *dev_priv, |
1246 | enum pipe pipe, bool state) | 1240 | enum pipe pipe, bool state) |
1247 | { | 1241 | { |
1248 | int reg; | ||
1249 | u32 val; | 1242 | u32 val; |
1250 | bool cur_state; | 1243 | bool cur_state; |
1251 | 1244 | ||
1252 | reg = FDI_RX_CTL(pipe); | 1245 | val = I915_READ(FDI_RX_CTL(pipe)); |
1253 | val = I915_READ(reg); | ||
1254 | cur_state = !!(val & FDI_RX_ENABLE); | 1246 | cur_state = !!(val & FDI_RX_ENABLE); |
1255 | I915_STATE_WARN(cur_state != state, | 1247 | I915_STATE_WARN(cur_state != state, |
1256 | "FDI RX state assertion failure (expected %s, current %s)\n", | 1248 | "FDI RX state assertion failure (expected %s, current %s)\n", |
@@ -1262,7 +1254,6 @@ static void assert_fdi_rx(struct drm_i915_private *dev_priv, | |||
1262 | static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv, | 1254 | static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv, |
1263 | enum pipe pipe) | 1255 | enum pipe pipe) |
1264 | { | 1256 | { |
1265 | int reg; | ||
1266 | u32 val; | 1257 | u32 val; |
1267 | 1258 | ||
1268 | /* ILK FDI PLL is always enabled */ | 1259 | /* ILK FDI PLL is always enabled */ |
@@ -1273,20 +1264,17 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv, | |||
1273 | if (HAS_DDI(dev_priv->dev)) | 1264 | if (HAS_DDI(dev_priv->dev)) |
1274 | return; | 1265 | return; |
1275 | 1266 | ||
1276 | reg = FDI_TX_CTL(pipe); | 1267 | val = I915_READ(FDI_TX_CTL(pipe)); |
1277 | val = I915_READ(reg); | ||
1278 | I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n"); | 1268 | I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n"); |
1279 | } | 1269 | } |
1280 | 1270 | ||
1281 | void assert_fdi_rx_pll(struct drm_i915_private *dev_priv, | 1271 | void assert_fdi_rx_pll(struct drm_i915_private *dev_priv, |
1282 | enum pipe pipe, bool state) | 1272 | enum pipe pipe, bool state) |
1283 | { | 1273 | { |
1284 | int reg; | ||
1285 | u32 val; | 1274 | u32 val; |
1286 | bool cur_state; | 1275 | bool cur_state; |
1287 | 1276 | ||
1288 | reg = FDI_RX_CTL(pipe); | 1277 | val = I915_READ(FDI_RX_CTL(pipe)); |
1289 | val = I915_READ(reg); | ||
1290 | cur_state = !!(val & FDI_RX_PLL_ENABLE); | 1278 | cur_state = !!(val & FDI_RX_PLL_ENABLE); |
1291 | I915_STATE_WARN(cur_state != state, | 1279 | I915_STATE_WARN(cur_state != state, |
1292 | "FDI RX PLL assertion failure (expected %s, current %s)\n", | 1280 | "FDI RX PLL assertion failure (expected %s, current %s)\n", |
@@ -1356,8 +1344,6 @@ static void assert_cursor(struct drm_i915_private *dev_priv, | |||
1356 | void assert_pipe(struct drm_i915_private *dev_priv, | 1344 | void assert_pipe(struct drm_i915_private *dev_priv, |
1357 | enum pipe pipe, bool state) | 1345 | enum pipe pipe, bool state) |
1358 | { | 1346 | { |
1359 | int reg; | ||
1360 | u32 val; | ||
1361 | bool cur_state; | 1347 | bool cur_state; |
1362 | enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, | 1348 | enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, |
1363 | pipe); | 1349 | pipe); |
@@ -1371,8 +1357,7 @@ void assert_pipe(struct drm_i915_private *dev_priv, | |||
1371 | POWER_DOMAIN_TRANSCODER(cpu_transcoder))) { | 1357 | POWER_DOMAIN_TRANSCODER(cpu_transcoder))) { |
1372 | cur_state = false; | 1358 | cur_state = false; |
1373 | } else { | 1359 | } else { |
1374 | reg = PIPECONF(cpu_transcoder); | 1360 | u32 val = I915_READ(PIPECONF(cpu_transcoder)); |
1375 | val = I915_READ(reg); | ||
1376 | cur_state = !!(val & PIPECONF_ENABLE); | 1361 | cur_state = !!(val & PIPECONF_ENABLE); |
1377 | } | 1362 | } |
1378 | 1363 | ||
@@ -1384,12 +1369,10 @@ void assert_pipe(struct drm_i915_private *dev_priv, | |||
1384 | static void assert_plane(struct drm_i915_private *dev_priv, | 1369 | static void assert_plane(struct drm_i915_private *dev_priv, |
1385 | enum plane plane, bool state) | 1370 | enum plane plane, bool state) |
1386 | { | 1371 | { |
1387 | int reg; | ||
1388 | u32 val; | 1372 | u32 val; |
1389 | bool cur_state; | 1373 | bool cur_state; |
1390 | 1374 | ||
1391 | reg = DSPCNTR(plane); | 1375 | val = I915_READ(DSPCNTR(plane)); |
1392 | val = I915_READ(reg); | ||
1393 | cur_state = !!(val & DISPLAY_PLANE_ENABLE); | 1376 | cur_state = !!(val & DISPLAY_PLANE_ENABLE); |
1394 | I915_STATE_WARN(cur_state != state, | 1377 | I915_STATE_WARN(cur_state != state, |
1395 | "plane %c assertion failure (expected %s, current %s)\n", | 1378 | "plane %c assertion failure (expected %s, current %s)\n", |
@@ -1403,14 +1386,11 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv, | |||
1403 | enum pipe pipe) | 1386 | enum pipe pipe) |
1404 | { | 1387 | { |
1405 | struct drm_device *dev = dev_priv->dev; | 1388 | struct drm_device *dev = dev_priv->dev; |
1406 | int reg, i; | 1389 | int i; |
1407 | u32 val; | ||
1408 | int cur_pipe; | ||
1409 | 1390 | ||
1410 | /* Primary planes are fixed to pipes on gen4+ */ | 1391 | /* Primary planes are fixed to pipes on gen4+ */ |
1411 | if (INTEL_INFO(dev)->gen >= 4) { | 1392 | if (INTEL_INFO(dev)->gen >= 4) { |
1412 | reg = DSPCNTR(pipe); | 1393 | u32 val = I915_READ(DSPCNTR(pipe)); |
1413 | val = I915_READ(reg); | ||
1414 | I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE, | 1394 | I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE, |
1415 | "plane %c assertion failure, should be disabled but not\n", | 1395 | "plane %c assertion failure, should be disabled but not\n", |
1416 | plane_name(pipe)); | 1396 | plane_name(pipe)); |
@@ -1419,9 +1399,8 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv, | |||
1419 | 1399 | ||
1420 | /* Need to check both planes against the pipe */ | 1400 | /* Need to check both planes against the pipe */ |
1421 | for_each_pipe(dev_priv, i) { | 1401 | for_each_pipe(dev_priv, i) { |
1422 | reg = DSPCNTR(i); | 1402 | u32 val = I915_READ(DSPCNTR(i)); |
1423 | val = I915_READ(reg); | 1403 | enum pipe cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> |
1424 | cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> | ||
1425 | DISPPLANE_SEL_PIPE_SHIFT; | 1404 | DISPPLANE_SEL_PIPE_SHIFT; |
1426 | I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe, | 1405 | I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe, |
1427 | "plane %c assertion failure, should be off on pipe %c but is still active\n", | 1406 | "plane %c assertion failure, should be off on pipe %c but is still active\n", |
@@ -1433,33 +1412,29 @@ static void assert_sprites_disabled(struct drm_i915_private *dev_priv, | |||
1433 | enum pipe pipe) | 1412 | enum pipe pipe) |
1434 | { | 1413 | { |
1435 | struct drm_device *dev = dev_priv->dev; | 1414 | struct drm_device *dev = dev_priv->dev; |
1436 | int reg, sprite; | 1415 | int sprite; |
1437 | u32 val; | ||
1438 | 1416 | ||
1439 | if (INTEL_INFO(dev)->gen >= 9) { | 1417 | if (INTEL_INFO(dev)->gen >= 9) { |
1440 | for_each_sprite(dev_priv, pipe, sprite) { | 1418 | for_each_sprite(dev_priv, pipe, sprite) { |
1441 | val = I915_READ(PLANE_CTL(pipe, sprite)); | 1419 | u32 val = I915_READ(PLANE_CTL(pipe, sprite)); |
1442 | I915_STATE_WARN(val & PLANE_CTL_ENABLE, | 1420 | I915_STATE_WARN(val & PLANE_CTL_ENABLE, |
1443 | "plane %d assertion failure, should be off on pipe %c but is still active\n", | 1421 | "plane %d assertion failure, should be off on pipe %c but is still active\n", |
1444 | sprite, pipe_name(pipe)); | 1422 | sprite, pipe_name(pipe)); |
1445 | } | 1423 | } |
1446 | } else if (IS_VALLEYVIEW(dev)) { | 1424 | } else if (IS_VALLEYVIEW(dev)) { |
1447 | for_each_sprite(dev_priv, pipe, sprite) { | 1425 | for_each_sprite(dev_priv, pipe, sprite) { |
1448 | reg = SPCNTR(pipe, sprite); | 1426 | u32 val = I915_READ(SPCNTR(pipe, sprite)); |
1449 | val = I915_READ(reg); | ||
1450 | I915_STATE_WARN(val & SP_ENABLE, | 1427 | I915_STATE_WARN(val & SP_ENABLE, |
1451 | "sprite %c assertion failure, should be off on pipe %c but is still active\n", | 1428 | "sprite %c assertion failure, should be off on pipe %c but is still active\n", |
1452 | sprite_name(pipe, sprite), pipe_name(pipe)); | 1429 | sprite_name(pipe, sprite), pipe_name(pipe)); |
1453 | } | 1430 | } |
1454 | } else if (INTEL_INFO(dev)->gen >= 7) { | 1431 | } else if (INTEL_INFO(dev)->gen >= 7) { |
1455 | reg = SPRCTL(pipe); | 1432 | u32 val = I915_READ(SPRCTL(pipe)); |
1456 | val = I915_READ(reg); | ||
1457 | I915_STATE_WARN(val & SPRITE_ENABLE, | 1433 | I915_STATE_WARN(val & SPRITE_ENABLE, |
1458 | "sprite %c assertion failure, should be off on pipe %c but is still active\n", | 1434 | "sprite %c assertion failure, should be off on pipe %c but is still active\n", |
1459 | plane_name(pipe), pipe_name(pipe)); | 1435 | plane_name(pipe), pipe_name(pipe)); |
1460 | } else if (INTEL_INFO(dev)->gen >= 5) { | 1436 | } else if (INTEL_INFO(dev)->gen >= 5) { |
1461 | reg = DVSCNTR(pipe); | 1437 | u32 val = I915_READ(DVSCNTR(pipe)); |
1462 | val = I915_READ(reg); | ||
1463 | I915_STATE_WARN(val & DVS_ENABLE, | 1438 | I915_STATE_WARN(val & DVS_ENABLE, |
1464 | "sprite %c assertion failure, should be off on pipe %c but is still active\n", | 1439 | "sprite %c assertion failure, should be off on pipe %c but is still active\n", |
1465 | plane_name(pipe), pipe_name(pipe)); | 1440 | plane_name(pipe), pipe_name(pipe)); |
@@ -1488,12 +1463,10 @@ static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv) | |||
1488 | static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, | 1463 | static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, |
1489 | enum pipe pipe) | 1464 | enum pipe pipe) |
1490 | { | 1465 | { |
1491 | int reg; | ||
1492 | u32 val; | 1466 | u32 val; |
1493 | bool enabled; | 1467 | bool enabled; |
1494 | 1468 | ||
1495 | reg = PCH_TRANSCONF(pipe); | 1469 | val = I915_READ(PCH_TRANSCONF(pipe)); |
1496 | val = I915_READ(reg); | ||
1497 | enabled = !!(val & TRANS_ENABLE); | 1470 | enabled = !!(val & TRANS_ENABLE); |
1498 | I915_STATE_WARN(enabled, | 1471 | I915_STATE_WARN(enabled, |
1499 | "transcoder assertion failed, should be off on pipe %c but is still active\n", | 1472 | "transcoder assertion failed, should be off on pipe %c but is still active\n", |
@@ -1600,21 +1573,18 @@ static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, | |||
1600 | static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, | 1573 | static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, |
1601 | enum pipe pipe) | 1574 | enum pipe pipe) |
1602 | { | 1575 | { |
1603 | int reg; | ||
1604 | u32 val; | 1576 | u32 val; |
1605 | 1577 | ||
1606 | assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B); | 1578 | assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B); |
1607 | assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C); | 1579 | assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C); |
1608 | assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D); | 1580 | assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D); |
1609 | 1581 | ||
1610 | reg = PCH_ADPA; | 1582 | val = I915_READ(PCH_ADPA); |
1611 | val = I915_READ(reg); | ||
1612 | I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val), | 1583 | I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val), |
1613 | "PCH VGA enabled on transcoder %c, should be disabled\n", | 1584 | "PCH VGA enabled on transcoder %c, should be disabled\n", |
1614 | pipe_name(pipe)); | 1585 | pipe_name(pipe)); |
1615 | 1586 | ||
1616 | reg = PCH_LVDS; | 1587 | val = I915_READ(PCH_LVDS); |
1617 | val = I915_READ(reg); | ||
1618 | I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val), | 1588 | I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val), |
1619 | "PCH LVDS enabled on transcoder %c, should be disabled\n", | 1589 | "PCH LVDS enabled on transcoder %c, should be disabled\n", |
1620 | pipe_name(pipe)); | 1590 | pipe_name(pipe)); |
@@ -4804,6 +4774,7 @@ static void intel_post_plane_update(struct intel_crtc *crtc) | |||
4804 | struct intel_crtc_atomic_commit *atomic = &crtc->atomic; | 4774 | struct intel_crtc_atomic_commit *atomic = &crtc->atomic; |
4805 | struct drm_device *dev = crtc->base.dev; | 4775 | struct drm_device *dev = crtc->base.dev; |
4806 | struct drm_i915_private *dev_priv = dev->dev_private; | 4776 | struct drm_i915_private *dev_priv = dev->dev_private; |
4777 | struct drm_plane *plane; | ||
4807 | 4778 | ||
4808 | if (atomic->wait_vblank) | 4779 | if (atomic->wait_vblank) |
4809 | intel_wait_for_vblank(dev, crtc->pipe); | 4780 | intel_wait_for_vblank(dev, crtc->pipe); |
@@ -4822,6 +4793,10 @@ static void intel_post_plane_update(struct intel_crtc *crtc) | |||
4822 | if (atomic->post_enable_primary) | 4793 | if (atomic->post_enable_primary) |
4823 | intel_post_enable_primary(&crtc->base); | 4794 | intel_post_enable_primary(&crtc->base); |
4824 | 4795 | ||
4796 | drm_for_each_plane_mask(plane, dev, atomic->update_sprite_watermarks) | ||
4797 | intel_update_sprite_watermarks(plane, &crtc->base, | ||
4798 | 0, 0, 0, false, false); | ||
4799 | |||
4825 | memset(atomic, 0, sizeof(*atomic)); | 4800 | memset(atomic, 0, sizeof(*atomic)); |
4826 | } | 4801 | } |
4827 | 4802 | ||
@@ -9952,7 +9927,7 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) | |||
9952 | } | 9927 | } |
9953 | cntl |= pipe << 28; /* Connect to correct pipe */ | 9928 | cntl |= pipe << 28; /* Connect to correct pipe */ |
9954 | 9929 | ||
9955 | if (IS_HASWELL(dev) || IS_BROADWELL(dev)) | 9930 | if (HAS_DDI(dev)) |
9956 | cntl |= CURSOR_PIPE_CSC_ENABLE; | 9931 | cntl |= CURSOR_PIPE_CSC_ENABLE; |
9957 | } | 9932 | } |
9958 | 9933 | ||
@@ -10822,7 +10797,7 @@ static bool page_flip_finished(struct intel_crtc *crtc) | |||
10822 | */ | 10797 | */ |
10823 | return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) == | 10798 | return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) == |
10824 | crtc->unpin_work->gtt_offset && | 10799 | crtc->unpin_work->gtt_offset && |
10825 | g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_GM45(crtc->pipe)), | 10800 | g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_G4X(crtc->pipe)), |
10826 | crtc->unpin_work->flip_count); | 10801 | crtc->unpin_work->flip_count); |
10827 | } | 10802 | } |
10828 | 10803 | ||
@@ -10848,11 +10823,11 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane) | |||
10848 | spin_unlock_irqrestore(&dev->event_lock, flags); | 10823 | spin_unlock_irqrestore(&dev->event_lock, flags); |
10849 | } | 10824 | } |
10850 | 10825 | ||
10851 | static inline void intel_mark_page_flip_active(struct intel_crtc *intel_crtc) | 10826 | static inline void intel_mark_page_flip_active(struct intel_unpin_work *work) |
10852 | { | 10827 | { |
10853 | /* Ensure that the work item is consistent when activating it ... */ | 10828 | /* Ensure that the work item is consistent when activating it ... */ |
10854 | smp_wmb(); | 10829 | smp_wmb(); |
10855 | atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING); | 10830 | atomic_set(&work->pending, INTEL_FLIP_PENDING); |
10856 | /* and that it is marked active as soon as the irq could fire. */ | 10831 | /* and that it is marked active as soon as the irq could fire. */ |
10857 | smp_wmb(); | 10832 | smp_wmb(); |
10858 | } | 10833 | } |
@@ -10888,7 +10863,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev, | |||
10888 | intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset); | 10863 | intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset); |
10889 | intel_ring_emit(ring, 0); /* aux display base address, unused */ | 10864 | intel_ring_emit(ring, 0); /* aux display base address, unused */ |
10890 | 10865 | ||
10891 | intel_mark_page_flip_active(intel_crtc); | 10866 | intel_mark_page_flip_active(intel_crtc->unpin_work); |
10892 | return 0; | 10867 | return 0; |
10893 | } | 10868 | } |
10894 | 10869 | ||
@@ -10920,7 +10895,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev, | |||
10920 | intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset); | 10895 | intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset); |
10921 | intel_ring_emit(ring, MI_NOOP); | 10896 | intel_ring_emit(ring, MI_NOOP); |
10922 | 10897 | ||
10923 | intel_mark_page_flip_active(intel_crtc); | 10898 | intel_mark_page_flip_active(intel_crtc->unpin_work); |
10924 | return 0; | 10899 | return 0; |
10925 | } | 10900 | } |
10926 | 10901 | ||
@@ -10959,7 +10934,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev, | |||
10959 | pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; | 10934 | pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; |
10960 | intel_ring_emit(ring, pf | pipesrc); | 10935 | intel_ring_emit(ring, pf | pipesrc); |
10961 | 10936 | ||
10962 | intel_mark_page_flip_active(intel_crtc); | 10937 | intel_mark_page_flip_active(intel_crtc->unpin_work); |
10963 | return 0; | 10938 | return 0; |
10964 | } | 10939 | } |
10965 | 10940 | ||
@@ -10995,7 +10970,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev, | |||
10995 | pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; | 10970 | pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; |
10996 | intel_ring_emit(ring, pf | pipesrc); | 10971 | intel_ring_emit(ring, pf | pipesrc); |
10997 | 10972 | ||
10998 | intel_mark_page_flip_active(intel_crtc); | 10973 | intel_mark_page_flip_active(intel_crtc->unpin_work); |
10999 | return 0; | 10974 | return 0; |
11000 | } | 10975 | } |
11001 | 10976 | ||
@@ -11090,7 +11065,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev, | |||
11090 | intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset); | 11065 | intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset); |
11091 | intel_ring_emit(ring, (MI_NOOP)); | 11066 | intel_ring_emit(ring, (MI_NOOP)); |
11092 | 11067 | ||
11093 | intel_mark_page_flip_active(intel_crtc); | 11068 | intel_mark_page_flip_active(intel_crtc->unpin_work); |
11094 | return 0; | 11069 | return 0; |
11095 | } | 11070 | } |
11096 | 11071 | ||
@@ -11121,7 +11096,8 @@ static bool use_mmio_flip(struct intel_engine_cs *ring, | |||
11121 | return ring != i915_gem_request_get_ring(obj->last_write_req); | 11096 | return ring != i915_gem_request_get_ring(obj->last_write_req); |
11122 | } | 11097 | } |
11123 | 11098 | ||
11124 | static void skl_do_mmio_flip(struct intel_crtc *intel_crtc) | 11099 | static void skl_do_mmio_flip(struct intel_crtc *intel_crtc, |
11100 | struct intel_unpin_work *work) | ||
11125 | { | 11101 | { |
11126 | struct drm_device *dev = intel_crtc->base.dev; | 11102 | struct drm_device *dev = intel_crtc->base.dev; |
11127 | struct drm_i915_private *dev_priv = dev->dev_private; | 11103 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -11162,11 +11138,12 @@ static void skl_do_mmio_flip(struct intel_crtc *intel_crtc) | |||
11162 | I915_WRITE(PLANE_CTL(pipe, 0), ctl); | 11138 | I915_WRITE(PLANE_CTL(pipe, 0), ctl); |
11163 | I915_WRITE(PLANE_STRIDE(pipe, 0), stride); | 11139 | I915_WRITE(PLANE_STRIDE(pipe, 0), stride); |
11164 | 11140 | ||
11165 | I915_WRITE(PLANE_SURF(pipe, 0), intel_crtc->unpin_work->gtt_offset); | 11141 | I915_WRITE(PLANE_SURF(pipe, 0), work->gtt_offset); |
11166 | POSTING_READ(PLANE_SURF(pipe, 0)); | 11142 | POSTING_READ(PLANE_SURF(pipe, 0)); |
11167 | } | 11143 | } |
11168 | 11144 | ||
11169 | static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc) | 11145 | static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc, |
11146 | struct intel_unpin_work *work) | ||
11170 | { | 11147 | { |
11171 | struct drm_device *dev = intel_crtc->base.dev; | 11148 | struct drm_device *dev = intel_crtc->base.dev; |
11172 | struct drm_i915_private *dev_priv = dev->dev_private; | 11149 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -11186,31 +11163,36 @@ static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc) | |||
11186 | 11163 | ||
11187 | I915_WRITE(reg, dspcntr); | 11164 | I915_WRITE(reg, dspcntr); |
11188 | 11165 | ||
11189 | I915_WRITE(DSPSURF(intel_crtc->plane), | 11166 | I915_WRITE(DSPSURF(intel_crtc->plane), work->gtt_offset); |
11190 | intel_crtc->unpin_work->gtt_offset); | ||
11191 | POSTING_READ(DSPSURF(intel_crtc->plane)); | 11167 | POSTING_READ(DSPSURF(intel_crtc->plane)); |
11192 | |||
11193 | } | 11168 | } |
11194 | 11169 | ||
11195 | /* | 11170 | /* |
11196 | * XXX: This is the temporary way to update the plane registers until we get | 11171 | * XXX: This is the temporary way to update the plane registers until we get |
11197 | * around to using the usual plane update functions for MMIO flips | 11172 | * around to using the usual plane update functions for MMIO flips |
11198 | */ | 11173 | */ |
11199 | static void intel_do_mmio_flip(struct intel_crtc *intel_crtc) | 11174 | static void intel_do_mmio_flip(struct intel_mmio_flip *mmio_flip) |
11200 | { | 11175 | { |
11201 | struct drm_device *dev = intel_crtc->base.dev; | 11176 | struct intel_crtc *crtc = mmio_flip->crtc; |
11177 | struct intel_unpin_work *work; | ||
11202 | 11178 | ||
11203 | intel_mark_page_flip_active(intel_crtc); | 11179 | spin_lock_irq(&crtc->base.dev->event_lock); |
11180 | work = crtc->unpin_work; | ||
11181 | spin_unlock_irq(&crtc->base.dev->event_lock); | ||
11182 | if (work == NULL) | ||
11183 | return; | ||
11204 | 11184 | ||
11205 | intel_pipe_update_start(intel_crtc); | 11185 | intel_mark_page_flip_active(work); |
11206 | 11186 | ||
11207 | if (INTEL_INFO(dev)->gen >= 9) | 11187 | intel_pipe_update_start(crtc); |
11208 | skl_do_mmio_flip(intel_crtc); | 11188 | |
11189 | if (INTEL_INFO(mmio_flip->i915)->gen >= 9) | ||
11190 | skl_do_mmio_flip(crtc, work); | ||
11209 | else | 11191 | else |
11210 | /* use_mmio_flip() retricts MMIO flips to ilk+ */ | 11192 | /* use_mmio_flip() retricts MMIO flips to ilk+ */ |
11211 | ilk_do_mmio_flip(intel_crtc); | 11193 | ilk_do_mmio_flip(crtc, work); |
11212 | 11194 | ||
11213 | intel_pipe_update_end(intel_crtc); | 11195 | intel_pipe_update_end(crtc); |
11214 | } | 11196 | } |
11215 | 11197 | ||
11216 | static void intel_mmio_flip_work_func(struct work_struct *work) | 11198 | static void intel_mmio_flip_work_func(struct work_struct *work) |
@@ -11218,15 +11200,15 @@ static void intel_mmio_flip_work_func(struct work_struct *work) | |||
11218 | struct intel_mmio_flip *mmio_flip = | 11200 | struct intel_mmio_flip *mmio_flip = |
11219 | container_of(work, struct intel_mmio_flip, work); | 11201 | container_of(work, struct intel_mmio_flip, work); |
11220 | 11202 | ||
11221 | if (mmio_flip->req) | 11203 | if (mmio_flip->req) { |
11222 | WARN_ON(__i915_wait_request(mmio_flip->req, | 11204 | WARN_ON(__i915_wait_request(mmio_flip->req, |
11223 | mmio_flip->crtc->reset_counter, | 11205 | mmio_flip->crtc->reset_counter, |
11224 | false, NULL, | 11206 | false, NULL, |
11225 | &mmio_flip->i915->rps.mmioflips)); | 11207 | &mmio_flip->i915->rps.mmioflips)); |
11208 | i915_gem_request_unreference__unlocked(mmio_flip->req); | ||
11209 | } | ||
11226 | 11210 | ||
11227 | intel_do_mmio_flip(mmio_flip->crtc); | 11211 | intel_do_mmio_flip(mmio_flip); |
11228 | |||
11229 | i915_gem_request_unreference__unlocked(mmio_flip->req); | ||
11230 | kfree(mmio_flip); | 11212 | kfree(mmio_flip); |
11231 | } | 11213 | } |
11232 | 11214 | ||
@@ -11427,7 +11409,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
11427 | intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); | 11409 | intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); |
11428 | 11410 | ||
11429 | if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) | 11411 | if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) |
11430 | work->flip_count = I915_READ(PIPE_FLIPCOUNT_GM45(pipe)) + 1; | 11412 | work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1; |
11431 | 11413 | ||
11432 | if (IS_VALLEYVIEW(dev)) { | 11414 | if (IS_VALLEYVIEW(dev)) { |
11433 | ring = &dev_priv->ring[BCS]; | 11415 | ring = &dev_priv->ring[BCS]; |
@@ -11577,30 +11559,16 @@ retry: | |||
11577 | static bool intel_wm_need_update(struct drm_plane *plane, | 11559 | static bool intel_wm_need_update(struct drm_plane *plane, |
11578 | struct drm_plane_state *state) | 11560 | struct drm_plane_state *state) |
11579 | { | 11561 | { |
11580 | struct intel_plane_state *new = to_intel_plane_state(state); | 11562 | /* Update watermarks on tiling changes. */ |
11581 | struct intel_plane_state *cur = to_intel_plane_state(plane->state); | ||
11582 | |||
11583 | /* Update watermarks on tiling or size changes. */ | ||
11584 | if (!plane->state->fb || !state->fb || | 11563 | if (!plane->state->fb || !state->fb || |
11585 | plane->state->fb->modifier[0] != state->fb->modifier[0] || | 11564 | plane->state->fb->modifier[0] != state->fb->modifier[0] || |
11586 | plane->state->rotation != state->rotation || | 11565 | plane->state->rotation != state->rotation) |
11587 | drm_rect_width(&new->src) != drm_rect_width(&cur->src) || | ||
11588 | drm_rect_height(&new->src) != drm_rect_height(&cur->src) || | ||
11589 | drm_rect_width(&new->dst) != drm_rect_width(&cur->dst) || | ||
11590 | drm_rect_height(&new->dst) != drm_rect_height(&cur->dst)) | ||
11591 | return true; | 11566 | return true; |
11592 | 11567 | ||
11593 | return false; | 11568 | if (plane->state->crtc_w != state->crtc_w) |
11594 | } | 11569 | return true; |
11595 | |||
11596 | static bool needs_scaling(struct intel_plane_state *state) | ||
11597 | { | ||
11598 | int src_w = drm_rect_width(&state->src) >> 16; | ||
11599 | int src_h = drm_rect_height(&state->src) >> 16; | ||
11600 | int dst_w = drm_rect_width(&state->dst); | ||
11601 | int dst_h = drm_rect_height(&state->dst); | ||
11602 | 11570 | ||
11603 | return (src_w != dst_w || src_h != dst_h); | 11571 | return false; |
11604 | } | 11572 | } |
11605 | 11573 | ||
11606 | int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, | 11574 | int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, |
@@ -11618,6 +11586,7 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, | |||
11618 | bool mode_changed = needs_modeset(crtc_state); | 11586 | bool mode_changed = needs_modeset(crtc_state); |
11619 | bool was_crtc_enabled = crtc->state->active; | 11587 | bool was_crtc_enabled = crtc->state->active; |
11620 | bool is_crtc_enabled = crtc_state->active; | 11588 | bool is_crtc_enabled = crtc_state->active; |
11589 | |||
11621 | bool turn_off, turn_on, visible, was_visible; | 11590 | bool turn_off, turn_on, visible, was_visible; |
11622 | struct drm_framebuffer *fb = plane_state->fb; | 11591 | struct drm_framebuffer *fb = plane_state->fb; |
11623 | 11592 | ||
@@ -11735,23 +11704,11 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, | |||
11735 | case DRM_PLANE_TYPE_CURSOR: | 11704 | case DRM_PLANE_TYPE_CURSOR: |
11736 | break; | 11705 | break; |
11737 | case DRM_PLANE_TYPE_OVERLAY: | 11706 | case DRM_PLANE_TYPE_OVERLAY: |
11738 | /* | 11707 | if (turn_off && !mode_changed) { |
11739 | * WaCxSRDisabledForSpriteScaling:ivb | ||
11740 | * | ||
11741 | * cstate->update_wm was already set above, so this flag will | ||
11742 | * take effect when we commit and program watermarks. | ||
11743 | */ | ||
11744 | if (IS_IVYBRIDGE(dev) && | ||
11745 | needs_scaling(to_intel_plane_state(plane_state)) && | ||
11746 | !needs_scaling(old_plane_state)) { | ||
11747 | to_intel_crtc_state(crtc_state)->disable_lp_wm = true; | ||
11748 | } else if (turn_off && !mode_changed) { | ||
11749 | intel_crtc->atomic.wait_vblank = true; | 11708 | intel_crtc->atomic.wait_vblank = true; |
11750 | intel_crtc->atomic.update_sprite_watermarks |= | 11709 | intel_crtc->atomic.update_sprite_watermarks |= |
11751 | 1 << i; | 11710 | 1 << i; |
11752 | } | 11711 | } |
11753 | |||
11754 | break; | ||
11755 | } | 11712 | } |
11756 | return 0; | 11713 | return 0; |
11757 | } | 11714 | } |
@@ -14942,13 +14899,12 @@ intel_check_plane_mapping(struct intel_crtc *crtc) | |||
14942 | { | 14899 | { |
14943 | struct drm_device *dev = crtc->base.dev; | 14900 | struct drm_device *dev = crtc->base.dev; |
14944 | struct drm_i915_private *dev_priv = dev->dev_private; | 14901 | struct drm_i915_private *dev_priv = dev->dev_private; |
14945 | u32 reg, val; | 14902 | u32 val; |
14946 | 14903 | ||
14947 | if (INTEL_INFO(dev)->num_pipes == 1) | 14904 | if (INTEL_INFO(dev)->num_pipes == 1) |
14948 | return true; | 14905 | return true; |
14949 | 14906 | ||
14950 | reg = DSPCNTR(!crtc->plane); | 14907 | val = I915_READ(DSPCNTR(!crtc->plane)); |
14951 | val = I915_READ(reg); | ||
14952 | 14908 | ||
14953 | if ((val & DISPLAY_PLANE_ENABLE) && | 14909 | if ((val & DISPLAY_PLANE_ENABLE) && |
14954 | (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe)) | 14910 | (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe)) |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 18bcfbe0b8ba..09bdd94ca3ba 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -574,8 +574,6 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code, | |||
574 | edp_notifier); | 574 | edp_notifier); |
575 | struct drm_device *dev = intel_dp_to_dev(intel_dp); | 575 | struct drm_device *dev = intel_dp_to_dev(intel_dp); |
576 | struct drm_i915_private *dev_priv = dev->dev_private; | 576 | struct drm_i915_private *dev_priv = dev->dev_private; |
577 | u32 pp_div; | ||
578 | u32 pp_ctrl_reg, pp_div_reg; | ||
579 | 577 | ||
580 | if (!is_edp(intel_dp) || code != SYS_RESTART) | 578 | if (!is_edp(intel_dp) || code != SYS_RESTART) |
581 | return 0; | 579 | return 0; |
@@ -584,6 +582,8 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code, | |||
584 | 582 | ||
585 | if (IS_VALLEYVIEW(dev)) { | 583 | if (IS_VALLEYVIEW(dev)) { |
586 | enum pipe pipe = vlv_power_sequencer_pipe(intel_dp); | 584 | enum pipe pipe = vlv_power_sequencer_pipe(intel_dp); |
585 | u32 pp_ctrl_reg, pp_div_reg; | ||
586 | u32 pp_div; | ||
587 | 587 | ||
588 | pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe); | 588 | pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe); |
589 | pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe); | 589 | pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe); |
@@ -5536,7 +5536,6 @@ static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate) | |||
5536 | struct intel_dp *intel_dp = dev_priv->drrs.dp; | 5536 | struct intel_dp *intel_dp = dev_priv->drrs.dp; |
5537 | struct intel_crtc_state *config = NULL; | 5537 | struct intel_crtc_state *config = NULL; |
5538 | struct intel_crtc *intel_crtc = NULL; | 5538 | struct intel_crtc *intel_crtc = NULL; |
5539 | u32 reg, val; | ||
5540 | enum drrs_refresh_rate_type index = DRRS_HIGH_RR; | 5539 | enum drrs_refresh_rate_type index = DRRS_HIGH_RR; |
5541 | 5540 | ||
5542 | if (refresh_rate <= 0) { | 5541 | if (refresh_rate <= 0) { |
@@ -5598,9 +5597,10 @@ static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate) | |||
5598 | DRM_ERROR("Unsupported refreshrate type\n"); | 5597 | DRM_ERROR("Unsupported refreshrate type\n"); |
5599 | } | 5598 | } |
5600 | } else if (INTEL_INFO(dev)->gen > 6) { | 5599 | } else if (INTEL_INFO(dev)->gen > 6) { |
5601 | reg = PIPECONF(intel_crtc->config->cpu_transcoder); | 5600 | u32 reg = PIPECONF(intel_crtc->config->cpu_transcoder); |
5602 | val = I915_READ(reg); | 5601 | u32 val; |
5603 | 5602 | ||
5603 | val = I915_READ(reg); | ||
5604 | if (index > DRRS_HIGH_RR) { | 5604 | if (index > DRRS_HIGH_RR) { |
5605 | if (IS_VALLEYVIEW(dev)) | 5605 | if (IS_VALLEYVIEW(dev)) |
5606 | val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV; | 5606 | val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV; |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 91b6b4060333..0598932ce623 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -468,9 +468,6 @@ struct intel_crtc_state { | |||
468 | 468 | ||
469 | /* w/a for waiting 2 vblanks during crtc enable */ | 469 | /* w/a for waiting 2 vblanks during crtc enable */ |
470 | enum pipe hsw_workaround_pipe; | 470 | enum pipe hsw_workaround_pipe; |
471 | |||
472 | /* IVB sprite scaling w/a (WaCxSRDisabledForSpriteScaling:ivb) */ | ||
473 | bool disable_lp_wm; | ||
474 | }; | 471 | }; |
475 | 472 | ||
476 | struct vlv_wm_state { | 473 | struct vlv_wm_state { |
@@ -1399,6 +1396,12 @@ void intel_init_clock_gating(struct drm_device *dev); | |||
1399 | void intel_suspend_hw(struct drm_device *dev); | 1396 | void intel_suspend_hw(struct drm_device *dev); |
1400 | int ilk_wm_max_level(const struct drm_device *dev); | 1397 | int ilk_wm_max_level(const struct drm_device *dev); |
1401 | void intel_update_watermarks(struct drm_crtc *crtc); | 1398 | void intel_update_watermarks(struct drm_crtc *crtc); |
1399 | void intel_update_sprite_watermarks(struct drm_plane *plane, | ||
1400 | struct drm_crtc *crtc, | ||
1401 | uint32_t sprite_width, | ||
1402 | uint32_t sprite_height, | ||
1403 | int pixel_size, | ||
1404 | bool enabled, bool scaled); | ||
1402 | void intel_init_pm(struct drm_device *dev); | 1405 | void intel_init_pm(struct drm_device *dev); |
1403 | void intel_pm_setup(struct drm_device *dev); | 1406 | void intel_pm_setup(struct drm_device *dev); |
1404 | void intel_gpu_ips_init(struct drm_i915_private *dev_priv); | 1407 | void intel_gpu_ips_init(struct drm_i915_private *dev_priv); |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 3b28ed3237d3..9eafa191cee2 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
@@ -113,17 +113,18 @@ static u32 hsw_infoframe_enable(enum hdmi_infoframe_type type) | |||
113 | } | 113 | } |
114 | } | 114 | } |
115 | 115 | ||
116 | static u32 hsw_infoframe_data_reg(enum hdmi_infoframe_type type, | 116 | static u32 hsw_dip_data_reg(struct drm_i915_private *dev_priv, |
117 | enum transcoder cpu_transcoder, | 117 | enum transcoder cpu_transcoder, |
118 | struct drm_i915_private *dev_priv) | 118 | enum hdmi_infoframe_type type, |
119 | int i) | ||
119 | { | 120 | { |
120 | switch (type) { | 121 | switch (type) { |
121 | case HDMI_INFOFRAME_TYPE_AVI: | 122 | case HDMI_INFOFRAME_TYPE_AVI: |
122 | return HSW_TVIDEO_DIP_AVI_DATA(cpu_transcoder); | 123 | return HSW_TVIDEO_DIP_AVI_DATA(cpu_transcoder, i); |
123 | case HDMI_INFOFRAME_TYPE_SPD: | 124 | case HDMI_INFOFRAME_TYPE_SPD: |
124 | return HSW_TVIDEO_DIP_SPD_DATA(cpu_transcoder); | 125 | return HSW_TVIDEO_DIP_SPD_DATA(cpu_transcoder, i); |
125 | case HDMI_INFOFRAME_TYPE_VENDOR: | 126 | case HDMI_INFOFRAME_TYPE_VENDOR: |
126 | return HSW_TVIDEO_DIP_VS_DATA(cpu_transcoder); | 127 | return HSW_TVIDEO_DIP_VS_DATA(cpu_transcoder, i); |
127 | default: | 128 | default: |
128 | DRM_DEBUG_DRIVER("unknown info frame type %d\n", type); | 129 | DRM_DEBUG_DRIVER("unknown info frame type %d\n", type); |
129 | return 0; | 130 | return 0; |
@@ -365,14 +366,13 @@ static void hsw_write_infoframe(struct drm_encoder *encoder, | |||
365 | struct drm_device *dev = encoder->dev; | 366 | struct drm_device *dev = encoder->dev; |
366 | struct drm_i915_private *dev_priv = dev->dev_private; | 367 | struct drm_i915_private *dev_priv = dev->dev_private; |
367 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | 368 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); |
368 | u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder); | 369 | enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; |
370 | u32 ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder); | ||
369 | u32 data_reg; | 371 | u32 data_reg; |
370 | int i; | 372 | int i; |
371 | u32 val = I915_READ(ctl_reg); | 373 | u32 val = I915_READ(ctl_reg); |
372 | 374 | ||
373 | data_reg = hsw_infoframe_data_reg(type, | 375 | data_reg = hsw_dip_data_reg(dev_priv, cpu_transcoder, type, 0); |
374 | intel_crtc->config->cpu_transcoder, | ||
375 | dev_priv); | ||
376 | if (data_reg == 0) | 376 | if (data_reg == 0) |
377 | return; | 377 | return; |
378 | 378 | ||
@@ -381,12 +381,14 @@ static void hsw_write_infoframe(struct drm_encoder *encoder, | |||
381 | 381 | ||
382 | mmiowb(); | 382 | mmiowb(); |
383 | for (i = 0; i < len; i += 4) { | 383 | for (i = 0; i < len; i += 4) { |
384 | I915_WRITE(data_reg + i, *data); | 384 | I915_WRITE(hsw_dip_data_reg(dev_priv, cpu_transcoder, |
385 | type, i >> 2), *data); | ||
385 | data++; | 386 | data++; |
386 | } | 387 | } |
387 | /* Write every possible data byte to force correct ECC calculation. */ | 388 | /* Write every possible data byte to force correct ECC calculation. */ |
388 | for (; i < VIDEO_DIP_DATA_SIZE; i += 4) | 389 | for (; i < VIDEO_DIP_DATA_SIZE; i += 4) |
389 | I915_WRITE(data_reg + i, 0); | 390 | I915_WRITE(hsw_dip_data_reg(dev_priv, cpu_transcoder, |
391 | type, i >> 2), 0); | ||
390 | mmiowb(); | 392 | mmiowb(); |
391 | 393 | ||
392 | val |= hsw_infoframe_enable(type); | 394 | val |= hsw_infoframe_enable(type); |
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index a64f26c670af..1369fc41d039 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c | |||
@@ -114,8 +114,8 @@ intel_i2c_reset(struct drm_device *dev) | |||
114 | { | 114 | { |
115 | struct drm_i915_private *dev_priv = dev->dev_private; | 115 | struct drm_i915_private *dev_priv = dev->dev_private; |
116 | 116 | ||
117 | I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0); | 117 | I915_WRITE(GMBUS0, 0); |
118 | I915_WRITE(dev_priv->gpio_mmio_base + GMBUS4, 0); | 118 | I915_WRITE(GMBUS4, 0); |
119 | } | 119 | } |
120 | 120 | ||
121 | static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable) | 121 | static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable) |
@@ -261,7 +261,6 @@ gmbus_wait_hw_status(struct drm_i915_private *dev_priv, | |||
261 | u32 gmbus4_irq_en) | 261 | u32 gmbus4_irq_en) |
262 | { | 262 | { |
263 | int i; | 263 | int i; |
264 | int reg_offset = dev_priv->gpio_mmio_base; | ||
265 | u32 gmbus2 = 0; | 264 | u32 gmbus2 = 0; |
266 | DEFINE_WAIT(wait); | 265 | DEFINE_WAIT(wait); |
267 | 266 | ||
@@ -271,13 +270,13 @@ gmbus_wait_hw_status(struct drm_i915_private *dev_priv, | |||
271 | /* Important: The hw handles only the first bit, so set only one! Since | 270 | /* Important: The hw handles only the first bit, so set only one! Since |
272 | * we also need to check for NAKs besides the hw ready/idle signal, we | 271 | * we also need to check for NAKs besides the hw ready/idle signal, we |
273 | * need to wake up periodically and check that ourselves. */ | 272 | * need to wake up periodically and check that ourselves. */ |
274 | I915_WRITE(GMBUS4 + reg_offset, gmbus4_irq_en); | 273 | I915_WRITE(GMBUS4, gmbus4_irq_en); |
275 | 274 | ||
276 | for (i = 0; i < msecs_to_jiffies_timeout(50); i++) { | 275 | for (i = 0; i < msecs_to_jiffies_timeout(50); i++) { |
277 | prepare_to_wait(&dev_priv->gmbus_wait_queue, &wait, | 276 | prepare_to_wait(&dev_priv->gmbus_wait_queue, &wait, |
278 | TASK_UNINTERRUPTIBLE); | 277 | TASK_UNINTERRUPTIBLE); |
279 | 278 | ||
280 | gmbus2 = I915_READ_NOTRACE(GMBUS2 + reg_offset); | 279 | gmbus2 = I915_READ_NOTRACE(GMBUS2); |
281 | if (gmbus2 & (GMBUS_SATOER | gmbus2_status)) | 280 | if (gmbus2 & (GMBUS_SATOER | gmbus2_status)) |
282 | break; | 281 | break; |
283 | 282 | ||
@@ -285,7 +284,7 @@ gmbus_wait_hw_status(struct drm_i915_private *dev_priv, | |||
285 | } | 284 | } |
286 | finish_wait(&dev_priv->gmbus_wait_queue, &wait); | 285 | finish_wait(&dev_priv->gmbus_wait_queue, &wait); |
287 | 286 | ||
288 | I915_WRITE(GMBUS4 + reg_offset, 0); | 287 | I915_WRITE(GMBUS4, 0); |
289 | 288 | ||
290 | if (gmbus2 & GMBUS_SATOER) | 289 | if (gmbus2 & GMBUS_SATOER) |
291 | return -ENXIO; | 290 | return -ENXIO; |
@@ -298,20 +297,19 @@ static int | |||
298 | gmbus_wait_idle(struct drm_i915_private *dev_priv) | 297 | gmbus_wait_idle(struct drm_i915_private *dev_priv) |
299 | { | 298 | { |
300 | int ret; | 299 | int ret; |
301 | int reg_offset = dev_priv->gpio_mmio_base; | ||
302 | 300 | ||
303 | #define C ((I915_READ_NOTRACE(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0) | 301 | #define C ((I915_READ_NOTRACE(GMBUS2) & GMBUS_ACTIVE) == 0) |
304 | 302 | ||
305 | if (!HAS_GMBUS_IRQ(dev_priv->dev)) | 303 | if (!HAS_GMBUS_IRQ(dev_priv->dev)) |
306 | return wait_for(C, 10); | 304 | return wait_for(C, 10); |
307 | 305 | ||
308 | /* Important: The hw handles only the first bit, so set only one! */ | 306 | /* Important: The hw handles only the first bit, so set only one! */ |
309 | I915_WRITE(GMBUS4 + reg_offset, GMBUS_IDLE_EN); | 307 | I915_WRITE(GMBUS4, GMBUS_IDLE_EN); |
310 | 308 | ||
311 | ret = wait_event_timeout(dev_priv->gmbus_wait_queue, C, | 309 | ret = wait_event_timeout(dev_priv->gmbus_wait_queue, C, |
312 | msecs_to_jiffies_timeout(10)); | 310 | msecs_to_jiffies_timeout(10)); |
313 | 311 | ||
314 | I915_WRITE(GMBUS4 + reg_offset, 0); | 312 | I915_WRITE(GMBUS4, 0); |
315 | 313 | ||
316 | if (ret) | 314 | if (ret) |
317 | return 0; | 315 | return 0; |
@@ -325,9 +323,7 @@ gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv, | |||
325 | unsigned short addr, u8 *buf, unsigned int len, | 323 | unsigned short addr, u8 *buf, unsigned int len, |
326 | u32 gmbus1_index) | 324 | u32 gmbus1_index) |
327 | { | 325 | { |
328 | int reg_offset = dev_priv->gpio_mmio_base; | 326 | I915_WRITE(GMBUS1, |
329 | |||
330 | I915_WRITE(GMBUS1 + reg_offset, | ||
331 | gmbus1_index | | 327 | gmbus1_index | |
332 | GMBUS_CYCLE_WAIT | | 328 | GMBUS_CYCLE_WAIT | |
333 | (len << GMBUS_BYTE_COUNT_SHIFT) | | 329 | (len << GMBUS_BYTE_COUNT_SHIFT) | |
@@ -342,7 +338,7 @@ gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv, | |||
342 | if (ret) | 338 | if (ret) |
343 | return ret; | 339 | return ret; |
344 | 340 | ||
345 | val = I915_READ(GMBUS3 + reg_offset); | 341 | val = I915_READ(GMBUS3); |
346 | do { | 342 | do { |
347 | *buf++ = val & 0xff; | 343 | *buf++ = val & 0xff; |
348 | val >>= 8; | 344 | val >>= 8; |
@@ -380,7 +376,6 @@ static int | |||
380 | gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv, | 376 | gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv, |
381 | unsigned short addr, u8 *buf, unsigned int len) | 377 | unsigned short addr, u8 *buf, unsigned int len) |
382 | { | 378 | { |
383 | int reg_offset = dev_priv->gpio_mmio_base; | ||
384 | unsigned int chunk_size = len; | 379 | unsigned int chunk_size = len; |
385 | u32 val, loop; | 380 | u32 val, loop; |
386 | 381 | ||
@@ -390,8 +385,8 @@ gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv, | |||
390 | len -= 1; | 385 | len -= 1; |
391 | } | 386 | } |
392 | 387 | ||
393 | I915_WRITE(GMBUS3 + reg_offset, val); | 388 | I915_WRITE(GMBUS3, val); |
394 | I915_WRITE(GMBUS1 + reg_offset, | 389 | I915_WRITE(GMBUS1, |
395 | GMBUS_CYCLE_WAIT | | 390 | GMBUS_CYCLE_WAIT | |
396 | (chunk_size << GMBUS_BYTE_COUNT_SHIFT) | | 391 | (chunk_size << GMBUS_BYTE_COUNT_SHIFT) | |
397 | (addr << GMBUS_SLAVE_ADDR_SHIFT) | | 392 | (addr << GMBUS_SLAVE_ADDR_SHIFT) | |
@@ -404,7 +399,7 @@ gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv, | |||
404 | val |= *buf++ << (8 * loop); | 399 | val |= *buf++ << (8 * loop); |
405 | } while (--len && ++loop < 4); | 400 | } while (--len && ++loop < 4); |
406 | 401 | ||
407 | I915_WRITE(GMBUS3 + reg_offset, val); | 402 | I915_WRITE(GMBUS3, val); |
408 | 403 | ||
409 | ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_RDY, | 404 | ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_RDY, |
410 | GMBUS_HW_RDY_EN); | 405 | GMBUS_HW_RDY_EN); |
@@ -452,7 +447,6 @@ gmbus_is_index_read(struct i2c_msg *msgs, int i, int num) | |||
452 | static int | 447 | static int |
453 | gmbus_xfer_index_read(struct drm_i915_private *dev_priv, struct i2c_msg *msgs) | 448 | gmbus_xfer_index_read(struct drm_i915_private *dev_priv, struct i2c_msg *msgs) |
454 | { | 449 | { |
455 | int reg_offset = dev_priv->gpio_mmio_base; | ||
456 | u32 gmbus1_index = 0; | 450 | u32 gmbus1_index = 0; |
457 | u32 gmbus5 = 0; | 451 | u32 gmbus5 = 0; |
458 | int ret; | 452 | int ret; |
@@ -466,13 +460,13 @@ gmbus_xfer_index_read(struct drm_i915_private *dev_priv, struct i2c_msg *msgs) | |||
466 | 460 | ||
467 | /* GMBUS5 holds 16-bit index */ | 461 | /* GMBUS5 holds 16-bit index */ |
468 | if (gmbus5) | 462 | if (gmbus5) |
469 | I915_WRITE(GMBUS5 + reg_offset, gmbus5); | 463 | I915_WRITE(GMBUS5, gmbus5); |
470 | 464 | ||
471 | ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus1_index); | 465 | ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus1_index); |
472 | 466 | ||
473 | /* Clear GMBUS5 after each index transfer */ | 467 | /* Clear GMBUS5 after each index transfer */ |
474 | if (gmbus5) | 468 | if (gmbus5) |
475 | I915_WRITE(GMBUS5 + reg_offset, 0); | 469 | I915_WRITE(GMBUS5, 0); |
476 | 470 | ||
477 | return ret; | 471 | return ret; |
478 | } | 472 | } |
@@ -486,7 +480,7 @@ gmbus_xfer(struct i2c_adapter *adapter, | |||
486 | struct intel_gmbus, | 480 | struct intel_gmbus, |
487 | adapter); | 481 | adapter); |
488 | struct drm_i915_private *dev_priv = bus->dev_priv; | 482 | struct drm_i915_private *dev_priv = bus->dev_priv; |
489 | int i = 0, inc, try = 0, reg_offset; | 483 | int i = 0, inc, try = 0; |
490 | int ret = 0; | 484 | int ret = 0; |
491 | 485 | ||
492 | intel_aux_display_runtime_get(dev_priv); | 486 | intel_aux_display_runtime_get(dev_priv); |
@@ -497,10 +491,8 @@ gmbus_xfer(struct i2c_adapter *adapter, | |||
497 | goto out; | 491 | goto out; |
498 | } | 492 | } |
499 | 493 | ||
500 | reg_offset = dev_priv->gpio_mmio_base; | ||
501 | |||
502 | retry: | 494 | retry: |
503 | I915_WRITE(GMBUS0 + reg_offset, bus->reg0); | 495 | I915_WRITE(GMBUS0, bus->reg0); |
504 | 496 | ||
505 | for (; i < num; i += inc) { | 497 | for (; i < num; i += inc) { |
506 | inc = 1; | 498 | inc = 1; |
@@ -530,7 +522,7 @@ retry: | |||
530 | * a STOP on the very first cycle. To simplify the code we | 522 | * a STOP on the very first cycle. To simplify the code we |
531 | * unconditionally generate the STOP condition with an additional gmbus | 523 | * unconditionally generate the STOP condition with an additional gmbus |
532 | * cycle. */ | 524 | * cycle. */ |
533 | I915_WRITE(GMBUS1 + reg_offset, GMBUS_CYCLE_STOP | GMBUS_SW_RDY); | 525 | I915_WRITE(GMBUS1, GMBUS_CYCLE_STOP | GMBUS_SW_RDY); |
534 | 526 | ||
535 | /* Mark the GMBUS interface as disabled after waiting for idle. | 527 | /* Mark the GMBUS interface as disabled after waiting for idle. |
536 | * We will re-enable it at the start of the next xfer, | 528 | * We will re-enable it at the start of the next xfer, |
@@ -541,7 +533,7 @@ retry: | |||
541 | adapter->name); | 533 | adapter->name); |
542 | ret = -ETIMEDOUT; | 534 | ret = -ETIMEDOUT; |
543 | } | 535 | } |
544 | I915_WRITE(GMBUS0 + reg_offset, 0); | 536 | I915_WRITE(GMBUS0, 0); |
545 | ret = ret ?: i; | 537 | ret = ret ?: i; |
546 | goto out; | 538 | goto out; |
547 | 539 | ||
@@ -570,9 +562,9 @@ clear_err: | |||
570 | * of resetting the GMBUS controller and so clearing the | 562 | * of resetting the GMBUS controller and so clearing the |
571 | * BUS_ERROR raised by the slave's NAK. | 563 | * BUS_ERROR raised by the slave's NAK. |
572 | */ | 564 | */ |
573 | I915_WRITE(GMBUS1 + reg_offset, GMBUS_SW_CLR_INT); | 565 | I915_WRITE(GMBUS1, GMBUS_SW_CLR_INT); |
574 | I915_WRITE(GMBUS1 + reg_offset, 0); | 566 | I915_WRITE(GMBUS1, 0); |
575 | I915_WRITE(GMBUS0 + reg_offset, 0); | 567 | I915_WRITE(GMBUS0, 0); |
576 | 568 | ||
577 | DRM_DEBUG_KMS("GMBUS [%s] NAK for addr: %04x %c(%d)\n", | 569 | DRM_DEBUG_KMS("GMBUS [%s] NAK for addr: %04x %c(%d)\n", |
578 | adapter->name, msgs[i].addr, | 570 | adapter->name, msgs[i].addr, |
@@ -595,7 +587,7 @@ clear_err: | |||
595 | timeout: | 587 | timeout: |
596 | DRM_INFO("GMBUS [%s] timed out, falling back to bit banging on pin %d\n", | 588 | DRM_INFO("GMBUS [%s] timed out, falling back to bit banging on pin %d\n", |
597 | bus->adapter.name, bus->reg0 & 0xff); | 589 | bus->adapter.name, bus->reg0 & 0xff); |
598 | I915_WRITE(GMBUS0 + reg_offset, 0); | 590 | I915_WRITE(GMBUS0, 0); |
599 | 591 | ||
600 | /* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */ | 592 | /* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */ |
601 | bus->force_bit = 1; | 593 | bus->force_bit = 1; |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 5e70acf944c3..7f39b8ad88ae 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -98,15 +98,11 @@ static void intel_lvds_get_config(struct intel_encoder *encoder, | |||
98 | { | 98 | { |
99 | struct drm_device *dev = encoder->base.dev; | 99 | struct drm_device *dev = encoder->base.dev; |
100 | struct drm_i915_private *dev_priv = dev->dev_private; | 100 | struct drm_i915_private *dev_priv = dev->dev_private; |
101 | u32 lvds_reg, tmp, flags = 0; | 101 | struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); |
102 | u32 tmp, flags = 0; | ||
102 | int dotclock; | 103 | int dotclock; |
103 | 104 | ||
104 | if (HAS_PCH_SPLIT(dev)) | 105 | tmp = I915_READ(lvds_encoder->reg); |
105 | lvds_reg = PCH_LVDS; | ||
106 | else | ||
107 | lvds_reg = LVDS; | ||
108 | |||
109 | tmp = I915_READ(lvds_reg); | ||
110 | if (tmp & LVDS_HSYNC_POLARITY) | 106 | if (tmp & LVDS_HSYNC_POLARITY) |
111 | flags |= DRM_MODE_FLAG_NHSYNC; | 107 | flags |= DRM_MODE_FLAG_NHSYNC; |
112 | else | 108 | else |
@@ -943,6 +939,7 @@ void intel_lvds_init(struct drm_device *dev) | |||
943 | struct drm_display_mode *downclock_mode = NULL; | 939 | struct drm_display_mode *downclock_mode = NULL; |
944 | struct edid *edid; | 940 | struct edid *edid; |
945 | struct drm_crtc *crtc; | 941 | struct drm_crtc *crtc; |
942 | u32 lvds_reg; | ||
946 | u32 lvds; | 943 | u32 lvds; |
947 | int pipe; | 944 | int pipe; |
948 | u8 pin; | 945 | u8 pin; |
@@ -965,8 +962,15 @@ void intel_lvds_init(struct drm_device *dev) | |||
965 | if (dmi_check_system(intel_no_lvds)) | 962 | if (dmi_check_system(intel_no_lvds)) |
966 | return; | 963 | return; |
967 | 964 | ||
965 | if (HAS_PCH_SPLIT(dev)) | ||
966 | lvds_reg = PCH_LVDS; | ||
967 | else | ||
968 | lvds_reg = LVDS; | ||
969 | |||
970 | lvds = I915_READ(lvds_reg); | ||
971 | |||
968 | if (HAS_PCH_SPLIT(dev)) { | 972 | if (HAS_PCH_SPLIT(dev)) { |
969 | if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0) | 973 | if ((lvds & LVDS_DETECTED) == 0) |
970 | return; | 974 | return; |
971 | if (dev_priv->vbt.edp_support) { | 975 | if (dev_priv->vbt.edp_support) { |
972 | DRM_DEBUG_KMS("disable LVDS for eDP support\n"); | 976 | DRM_DEBUG_KMS("disable LVDS for eDP support\n"); |
@@ -976,8 +980,7 @@ void intel_lvds_init(struct drm_device *dev) | |||
976 | 980 | ||
977 | pin = GMBUS_PIN_PANEL; | 981 | pin = GMBUS_PIN_PANEL; |
978 | if (!lvds_is_present_in_vbt(dev, &pin)) { | 982 | if (!lvds_is_present_in_vbt(dev, &pin)) { |
979 | u32 reg = HAS_PCH_SPLIT(dev) ? PCH_LVDS : LVDS; | 983 | if ((lvds & LVDS_PORT_EN) == 0) { |
980 | if ((I915_READ(reg) & LVDS_PORT_EN) == 0) { | ||
981 | DRM_DEBUG_KMS("LVDS is not present in VBT\n"); | 984 | DRM_DEBUG_KMS("LVDS is not present in VBT\n"); |
982 | return; | 985 | return; |
983 | } | 986 | } |
@@ -1054,11 +1057,7 @@ void intel_lvds_init(struct drm_device *dev) | |||
1054 | connector->interlace_allowed = false; | 1057 | connector->interlace_allowed = false; |
1055 | connector->doublescan_allowed = false; | 1058 | connector->doublescan_allowed = false; |
1056 | 1059 | ||
1057 | if (HAS_PCH_SPLIT(dev)) { | 1060 | lvds_encoder->reg = lvds_reg; |
1058 | lvds_encoder->reg = PCH_LVDS; | ||
1059 | } else { | ||
1060 | lvds_encoder->reg = LVDS; | ||
1061 | } | ||
1062 | 1061 | ||
1063 | /* create the scaling mode property */ | 1062 | /* create the scaling mode property */ |
1064 | drm_mode_create_scaling_mode_property(dev); | 1063 | drm_mode_create_scaling_mode_property(dev); |
@@ -1139,7 +1138,6 @@ void intel_lvds_init(struct drm_device *dev) | |||
1139 | if (HAS_PCH_SPLIT(dev)) | 1138 | if (HAS_PCH_SPLIT(dev)) |
1140 | goto failed; | 1139 | goto failed; |
1141 | 1140 | ||
1142 | lvds = I915_READ(LVDS); | ||
1143 | pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0; | 1141 | pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0; |
1144 | crtc = intel_get_crtc_for_pipe(dev, pipe); | 1142 | crtc = intel_get_crtc_for_pipe(dev, pipe); |
1145 | 1143 | ||
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index b706b4e750da..6dc13c02c28e 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c | |||
@@ -239,7 +239,7 @@ struct opregion_asle { | |||
239 | static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out) | 239 | static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out) |
240 | { | 240 | { |
241 | struct drm_i915_private *dev_priv = dev->dev_private; | 241 | struct drm_i915_private *dev_priv = dev->dev_private; |
242 | struct opregion_swsci __iomem *swsci = dev_priv->opregion.swsci; | 242 | struct opregion_swsci *swsci = dev_priv->opregion.swsci; |
243 | u32 main_function, sub_function, scic; | 243 | u32 main_function, sub_function, scic; |
244 | u16 pci_swsci; | 244 | u16 pci_swsci; |
245 | u32 dslp; | 245 | u32 dslp; |
@@ -264,7 +264,7 @@ static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out) | |||
264 | } | 264 | } |
265 | 265 | ||
266 | /* Driver sleep timeout in ms. */ | 266 | /* Driver sleep timeout in ms. */ |
267 | dslp = ioread32(&swsci->dslp); | 267 | dslp = swsci->dslp; |
268 | if (!dslp) { | 268 | if (!dslp) { |
269 | /* The spec says 2ms should be the default, but it's too small | 269 | /* The spec says 2ms should be the default, but it's too small |
270 | * for some machines. */ | 270 | * for some machines. */ |
@@ -277,7 +277,7 @@ static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out) | |||
277 | } | 277 | } |
278 | 278 | ||
279 | /* The spec tells us to do this, but we are the only user... */ | 279 | /* The spec tells us to do this, but we are the only user... */ |
280 | scic = ioread32(&swsci->scic); | 280 | scic = swsci->scic; |
281 | if (scic & SWSCI_SCIC_INDICATOR) { | 281 | if (scic & SWSCI_SCIC_INDICATOR) { |
282 | DRM_DEBUG_DRIVER("SWSCI request already in progress\n"); | 282 | DRM_DEBUG_DRIVER("SWSCI request already in progress\n"); |
283 | return -EBUSY; | 283 | return -EBUSY; |
@@ -285,8 +285,8 @@ static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out) | |||
285 | 285 | ||
286 | scic = function | SWSCI_SCIC_INDICATOR; | 286 | scic = function | SWSCI_SCIC_INDICATOR; |
287 | 287 | ||
288 | iowrite32(parm, &swsci->parm); | 288 | swsci->parm = parm; |
289 | iowrite32(scic, &swsci->scic); | 289 | swsci->scic = scic; |
290 | 290 | ||
291 | /* Ensure SCI event is selected and event trigger is cleared. */ | 291 | /* Ensure SCI event is selected and event trigger is cleared. */ |
292 | pci_read_config_word(dev->pdev, PCI_SWSCI, &pci_swsci); | 292 | pci_read_config_word(dev->pdev, PCI_SWSCI, &pci_swsci); |
@@ -301,7 +301,7 @@ static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out) | |||
301 | pci_write_config_word(dev->pdev, PCI_SWSCI, pci_swsci); | 301 | pci_write_config_word(dev->pdev, PCI_SWSCI, pci_swsci); |
302 | 302 | ||
303 | /* Poll for the result. */ | 303 | /* Poll for the result. */ |
304 | #define C (((scic = ioread32(&swsci->scic)) & SWSCI_SCIC_INDICATOR) == 0) | 304 | #define C (((scic = swsci->scic) & SWSCI_SCIC_INDICATOR) == 0) |
305 | if (wait_for(C, dslp)) { | 305 | if (wait_for(C, dslp)) { |
306 | DRM_DEBUG_DRIVER("SWSCI request timed out\n"); | 306 | DRM_DEBUG_DRIVER("SWSCI request timed out\n"); |
307 | return -ETIMEDOUT; | 307 | return -ETIMEDOUT; |
@@ -317,7 +317,7 @@ static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out) | |||
317 | } | 317 | } |
318 | 318 | ||
319 | if (parm_out) | 319 | if (parm_out) |
320 | *parm_out = ioread32(&swsci->parm); | 320 | *parm_out = swsci->parm; |
321 | 321 | ||
322 | return 0; | 322 | return 0; |
323 | 323 | ||
@@ -412,7 +412,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) | |||
412 | { | 412 | { |
413 | struct drm_i915_private *dev_priv = dev->dev_private; | 413 | struct drm_i915_private *dev_priv = dev->dev_private; |
414 | struct intel_connector *intel_connector; | 414 | struct intel_connector *intel_connector; |
415 | struct opregion_asle __iomem *asle = dev_priv->opregion.asle; | 415 | struct opregion_asle *asle = dev_priv->opregion.asle; |
416 | 416 | ||
417 | DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp); | 417 | DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp); |
418 | 418 | ||
@@ -437,7 +437,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) | |||
437 | DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp); | 437 | DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp); |
438 | list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head) | 438 | list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head) |
439 | intel_panel_set_backlight_acpi(intel_connector, bclp, 255); | 439 | intel_panel_set_backlight_acpi(intel_connector, bclp, 255); |
440 | iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv); | 440 | asle->cblv = DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID; |
441 | 441 | ||
442 | drm_modeset_unlock(&dev->mode_config.connection_mutex); | 442 | drm_modeset_unlock(&dev->mode_config.connection_mutex); |
443 | 443 | ||
@@ -524,14 +524,14 @@ static void asle_work(struct work_struct *work) | |||
524 | struct drm_i915_private *dev_priv = | 524 | struct drm_i915_private *dev_priv = |
525 | container_of(opregion, struct drm_i915_private, opregion); | 525 | container_of(opregion, struct drm_i915_private, opregion); |
526 | struct drm_device *dev = dev_priv->dev; | 526 | struct drm_device *dev = dev_priv->dev; |
527 | struct opregion_asle __iomem *asle = dev_priv->opregion.asle; | 527 | struct opregion_asle *asle = dev_priv->opregion.asle; |
528 | u32 aslc_stat = 0; | 528 | u32 aslc_stat = 0; |
529 | u32 aslc_req; | 529 | u32 aslc_req; |
530 | 530 | ||
531 | if (!asle) | 531 | if (!asle) |
532 | return; | 532 | return; |
533 | 533 | ||
534 | aslc_req = ioread32(&asle->aslc); | 534 | aslc_req = asle->aslc; |
535 | 535 | ||
536 | if (!(aslc_req & ASLC_REQ_MSK)) { | 536 | if (!(aslc_req & ASLC_REQ_MSK)) { |
537 | DRM_DEBUG_DRIVER("No request on ASLC interrupt 0x%08x\n", | 537 | DRM_DEBUG_DRIVER("No request on ASLC interrupt 0x%08x\n", |
@@ -540,34 +540,34 @@ static void asle_work(struct work_struct *work) | |||
540 | } | 540 | } |
541 | 541 | ||
542 | if (aslc_req & ASLC_SET_ALS_ILLUM) | 542 | if (aslc_req & ASLC_SET_ALS_ILLUM) |
543 | aslc_stat |= asle_set_als_illum(dev, ioread32(&asle->alsi)); | 543 | aslc_stat |= asle_set_als_illum(dev, asle->alsi); |
544 | 544 | ||
545 | if (aslc_req & ASLC_SET_BACKLIGHT) | 545 | if (aslc_req & ASLC_SET_BACKLIGHT) |
546 | aslc_stat |= asle_set_backlight(dev, ioread32(&asle->bclp)); | 546 | aslc_stat |= asle_set_backlight(dev, asle->bclp); |
547 | 547 | ||
548 | if (aslc_req & ASLC_SET_PFIT) | 548 | if (aslc_req & ASLC_SET_PFIT) |
549 | aslc_stat |= asle_set_pfit(dev, ioread32(&asle->pfit)); | 549 | aslc_stat |= asle_set_pfit(dev, asle->pfit); |
550 | 550 | ||
551 | if (aslc_req & ASLC_SET_PWM_FREQ) | 551 | if (aslc_req & ASLC_SET_PWM_FREQ) |
552 | aslc_stat |= asle_set_pwm_freq(dev, ioread32(&asle->pfmb)); | 552 | aslc_stat |= asle_set_pwm_freq(dev, asle->pfmb); |
553 | 553 | ||
554 | if (aslc_req & ASLC_SUPPORTED_ROTATION_ANGLES) | 554 | if (aslc_req & ASLC_SUPPORTED_ROTATION_ANGLES) |
555 | aslc_stat |= asle_set_supported_rotation_angles(dev, | 555 | aslc_stat |= asle_set_supported_rotation_angles(dev, |
556 | ioread32(&asle->srot)); | 556 | asle->srot); |
557 | 557 | ||
558 | if (aslc_req & ASLC_BUTTON_ARRAY) | 558 | if (aslc_req & ASLC_BUTTON_ARRAY) |
559 | aslc_stat |= asle_set_button_array(dev, ioread32(&asle->iuer)); | 559 | aslc_stat |= asle_set_button_array(dev, asle->iuer); |
560 | 560 | ||
561 | if (aslc_req & ASLC_CONVERTIBLE_INDICATOR) | 561 | if (aslc_req & ASLC_CONVERTIBLE_INDICATOR) |
562 | aslc_stat |= asle_set_convertible(dev, ioread32(&asle->iuer)); | 562 | aslc_stat |= asle_set_convertible(dev, asle->iuer); |
563 | 563 | ||
564 | if (aslc_req & ASLC_DOCKING_INDICATOR) | 564 | if (aslc_req & ASLC_DOCKING_INDICATOR) |
565 | aslc_stat |= asle_set_docking(dev, ioread32(&asle->iuer)); | 565 | aslc_stat |= asle_set_docking(dev, asle->iuer); |
566 | 566 | ||
567 | if (aslc_req & ASLC_ISCT_STATE_CHANGE) | 567 | if (aslc_req & ASLC_ISCT_STATE_CHANGE) |
568 | aslc_stat |= asle_isct_state(dev); | 568 | aslc_stat |= asle_isct_state(dev); |
569 | 569 | ||
570 | iowrite32(aslc_stat, &asle->aslc); | 570 | asle->aslc = aslc_stat; |
571 | } | 571 | } |
572 | 572 | ||
573 | void intel_opregion_asle_intr(struct drm_device *dev) | 573 | void intel_opregion_asle_intr(struct drm_device *dev) |
@@ -592,8 +592,8 @@ static int intel_opregion_video_event(struct notifier_block *nb, | |||
592 | Linux, these are handled by the dock, button and video drivers. | 592 | Linux, these are handled by the dock, button and video drivers. |
593 | */ | 593 | */ |
594 | 594 | ||
595 | struct opregion_acpi __iomem *acpi; | ||
596 | struct acpi_bus_event *event = data; | 595 | struct acpi_bus_event *event = data; |
596 | struct opregion_acpi *acpi; | ||
597 | int ret = NOTIFY_OK; | 597 | int ret = NOTIFY_OK; |
598 | 598 | ||
599 | if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0) | 599 | if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0) |
@@ -604,11 +604,10 @@ static int intel_opregion_video_event(struct notifier_block *nb, | |||
604 | 604 | ||
605 | acpi = system_opregion->acpi; | 605 | acpi = system_opregion->acpi; |
606 | 606 | ||
607 | if (event->type == 0x80 && | 607 | if (event->type == 0x80 && ((acpi->cevt & 1) == 0)) |
608 | (ioread32(&acpi->cevt) & 1) == 0) | ||
609 | ret = NOTIFY_BAD; | 608 | ret = NOTIFY_BAD; |
610 | 609 | ||
611 | iowrite32(0, &acpi->csts); | 610 | acpi->csts = 0; |
612 | 611 | ||
613 | return ret; | 612 | return ret; |
614 | } | 613 | } |
@@ -628,14 +627,14 @@ static u32 get_did(struct intel_opregion *opregion, int i) | |||
628 | u32 did; | 627 | u32 did; |
629 | 628 | ||
630 | if (i < ARRAY_SIZE(opregion->acpi->didl)) { | 629 | if (i < ARRAY_SIZE(opregion->acpi->didl)) { |
631 | did = ioread32(&opregion->acpi->didl[i]); | 630 | did = opregion->acpi->didl[i]; |
632 | } else { | 631 | } else { |
633 | i -= ARRAY_SIZE(opregion->acpi->didl); | 632 | i -= ARRAY_SIZE(opregion->acpi->didl); |
634 | 633 | ||
635 | if (WARN_ON(i >= ARRAY_SIZE(opregion->acpi->did2))) | 634 | if (WARN_ON(i >= ARRAY_SIZE(opregion->acpi->did2))) |
636 | return 0; | 635 | return 0; |
637 | 636 | ||
638 | did = ioread32(&opregion->acpi->did2[i]); | 637 | did = opregion->acpi->did2[i]; |
639 | } | 638 | } |
640 | 639 | ||
641 | return did; | 640 | return did; |
@@ -644,14 +643,14 @@ static u32 get_did(struct intel_opregion *opregion, int i) | |||
644 | static void set_did(struct intel_opregion *opregion, int i, u32 val) | 643 | static void set_did(struct intel_opregion *opregion, int i, u32 val) |
645 | { | 644 | { |
646 | if (i < ARRAY_SIZE(opregion->acpi->didl)) { | 645 | if (i < ARRAY_SIZE(opregion->acpi->didl)) { |
647 | iowrite32(val, &opregion->acpi->didl[i]); | 646 | opregion->acpi->didl[i] = val; |
648 | } else { | 647 | } else { |
649 | i -= ARRAY_SIZE(opregion->acpi->didl); | 648 | i -= ARRAY_SIZE(opregion->acpi->didl); |
650 | 649 | ||
651 | if (WARN_ON(i >= ARRAY_SIZE(opregion->acpi->did2))) | 650 | if (WARN_ON(i >= ARRAY_SIZE(opregion->acpi->did2))) |
652 | return; | 651 | return; |
653 | 652 | ||
654 | iowrite32(val, &opregion->acpi->did2[i]); | 653 | opregion->acpi->did2[i] = val; |
655 | } | 654 | } |
656 | } | 655 | } |
657 | 656 | ||
@@ -773,7 +772,7 @@ static void intel_setup_cadls(struct drm_device *dev) | |||
773 | * there are less than eight devices. */ | 772 | * there are less than eight devices. */ |
774 | do { | 773 | do { |
775 | disp_id = get_did(opregion, i); | 774 | disp_id = get_did(opregion, i); |
776 | iowrite32(disp_id, &opregion->acpi->cadl[i]); | 775 | opregion->acpi->cadl[i] = disp_id; |
777 | } while (++i < 8 && disp_id != 0); | 776 | } while (++i < 8 && disp_id != 0); |
778 | } | 777 | } |
779 | 778 | ||
@@ -792,16 +791,16 @@ void intel_opregion_init(struct drm_device *dev) | |||
792 | /* Notify BIOS we are ready to handle ACPI video ext notifs. | 791 | /* Notify BIOS we are ready to handle ACPI video ext notifs. |
793 | * Right now, all the events are handled by the ACPI video module. | 792 | * Right now, all the events are handled by the ACPI video module. |
794 | * We don't actually need to do anything with them. */ | 793 | * We don't actually need to do anything with them. */ |
795 | iowrite32(0, &opregion->acpi->csts); | 794 | opregion->acpi->csts = 0; |
796 | iowrite32(1, &opregion->acpi->drdy); | 795 | opregion->acpi->drdy = 1; |
797 | 796 | ||
798 | system_opregion = opregion; | 797 | system_opregion = opregion; |
799 | register_acpi_notifier(&intel_opregion_notifier); | 798 | register_acpi_notifier(&intel_opregion_notifier); |
800 | } | 799 | } |
801 | 800 | ||
802 | if (opregion->asle) { | 801 | if (opregion->asle) { |
803 | iowrite32(ASLE_TCHE_BLC_EN, &opregion->asle->tche); | 802 | opregion->asle->tche = ASLE_TCHE_BLC_EN; |
804 | iowrite32(ASLE_ARDY_READY, &opregion->asle->ardy); | 803 | opregion->asle->ardy = ASLE_ARDY_READY; |
805 | } | 804 | } |
806 | } | 805 | } |
807 | 806 | ||
@@ -814,19 +813,19 @@ void intel_opregion_fini(struct drm_device *dev) | |||
814 | return; | 813 | return; |
815 | 814 | ||
816 | if (opregion->asle) | 815 | if (opregion->asle) |
817 | iowrite32(ASLE_ARDY_NOT_READY, &opregion->asle->ardy); | 816 | opregion->asle->ardy = ASLE_ARDY_NOT_READY; |
818 | 817 | ||
819 | cancel_work_sync(&dev_priv->opregion.asle_work); | 818 | cancel_work_sync(&dev_priv->opregion.asle_work); |
820 | 819 | ||
821 | if (opregion->acpi) { | 820 | if (opregion->acpi) { |
822 | iowrite32(0, &opregion->acpi->drdy); | 821 | opregion->acpi->drdy = 0; |
823 | 822 | ||
824 | system_opregion = NULL; | 823 | system_opregion = NULL; |
825 | unregister_acpi_notifier(&intel_opregion_notifier); | 824 | unregister_acpi_notifier(&intel_opregion_notifier); |
826 | } | 825 | } |
827 | 826 | ||
828 | /* just clear all opregion memory pointers now */ | 827 | /* just clear all opregion memory pointers now */ |
829 | iounmap(opregion->header); | 828 | memunmap(opregion->header); |
830 | opregion->header = NULL; | 829 | opregion->header = NULL; |
831 | opregion->acpi = NULL; | 830 | opregion->acpi = NULL; |
832 | opregion->swsci = NULL; | 831 | opregion->swsci = NULL; |
@@ -899,10 +898,10 @@ int intel_opregion_setup(struct drm_device *dev) | |||
899 | { | 898 | { |
900 | struct drm_i915_private *dev_priv = dev->dev_private; | 899 | struct drm_i915_private *dev_priv = dev->dev_private; |
901 | struct intel_opregion *opregion = &dev_priv->opregion; | 900 | struct intel_opregion *opregion = &dev_priv->opregion; |
902 | void __iomem *base; | ||
903 | u32 asls, mboxes; | 901 | u32 asls, mboxes; |
904 | char buf[sizeof(OPREGION_SIGNATURE)]; | 902 | char buf[sizeof(OPREGION_SIGNATURE)]; |
905 | int err = 0; | 903 | int err = 0; |
904 | void *base; | ||
906 | 905 | ||
907 | BUILD_BUG_ON(sizeof(struct opregion_header) != 0x100); | 906 | BUILD_BUG_ON(sizeof(struct opregion_header) != 0x100); |
908 | BUILD_BUG_ON(sizeof(struct opregion_acpi) != 0x100); | 907 | BUILD_BUG_ON(sizeof(struct opregion_acpi) != 0x100); |
@@ -920,11 +919,11 @@ int intel_opregion_setup(struct drm_device *dev) | |||
920 | INIT_WORK(&opregion->asle_work, asle_work); | 919 | INIT_WORK(&opregion->asle_work, asle_work); |
921 | #endif | 920 | #endif |
922 | 921 | ||
923 | base = acpi_os_ioremap(asls, OPREGION_SIZE); | 922 | base = memremap(asls, OPREGION_SIZE, MEMREMAP_WB); |
924 | if (!base) | 923 | if (!base) |
925 | return -ENOMEM; | 924 | return -ENOMEM; |
926 | 925 | ||
927 | memcpy_fromio(buf, base, sizeof(buf)); | 926 | memcpy(buf, base, sizeof(buf)); |
928 | 927 | ||
929 | if (memcmp(buf, OPREGION_SIGNATURE, 16)) { | 928 | if (memcmp(buf, OPREGION_SIGNATURE, 16)) { |
930 | DRM_DEBUG_DRIVER("opregion signature mismatch\n"); | 929 | DRM_DEBUG_DRIVER("opregion signature mismatch\n"); |
@@ -936,7 +935,7 @@ int intel_opregion_setup(struct drm_device *dev) | |||
936 | 935 | ||
937 | opregion->lid_state = base + ACPI_CLID; | 936 | opregion->lid_state = base + ACPI_CLID; |
938 | 937 | ||
939 | mboxes = ioread32(&opregion->header->mboxes); | 938 | mboxes = opregion->header->mboxes; |
940 | if (mboxes & MBOX_ACPI) { | 939 | if (mboxes & MBOX_ACPI) { |
941 | DRM_DEBUG_DRIVER("Public ACPI methods supported\n"); | 940 | DRM_DEBUG_DRIVER("Public ACPI methods supported\n"); |
942 | opregion->acpi = base + OPREGION_ACPI_OFFSET; | 941 | opregion->acpi = base + OPREGION_ACPI_OFFSET; |
@@ -951,12 +950,12 @@ int intel_opregion_setup(struct drm_device *dev) | |||
951 | DRM_DEBUG_DRIVER("ASLE supported\n"); | 950 | DRM_DEBUG_DRIVER("ASLE supported\n"); |
952 | opregion->asle = base + OPREGION_ASLE_OFFSET; | 951 | opregion->asle = base + OPREGION_ASLE_OFFSET; |
953 | 952 | ||
954 | iowrite32(ASLE_ARDY_NOT_READY, &opregion->asle->ardy); | 953 | opregion->asle->ardy = ASLE_ARDY_NOT_READY; |
955 | } | 954 | } |
956 | 955 | ||
957 | return 0; | 956 | return 0; |
958 | 957 | ||
959 | err_out: | 958 | err_out: |
960 | iounmap(base); | 959 | memunmap(base); |
961 | return err; | 960 | return err; |
962 | } | 961 | } |
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index f30c996e882c..b05c6d9b3be7 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c | |||
@@ -381,7 +381,7 @@ intel_panel_detect(struct drm_device *dev) | |||
381 | 381 | ||
382 | /* Assume that the BIOS does not lie through the OpRegion... */ | 382 | /* Assume that the BIOS does not lie through the OpRegion... */ |
383 | if (!i915.panel_ignore_lid && dev_priv->opregion.lid_state) { | 383 | if (!i915.panel_ignore_lid && dev_priv->opregion.lid_state) { |
384 | return ioread32(dev_priv->opregion.lid_state) & 0x1 ? | 384 | return *dev_priv->opregion.lid_state & 0x1 ? |
385 | connector_status_connected : | 385 | connector_status_connected : |
386 | connector_status_disconnected; | 386 | connector_status_disconnected; |
387 | } | 387 | } |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index d031d74abd27..9dda3eaebd12 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
@@ -52,56 +52,10 @@ | |||
52 | #define INTEL_RC6p_ENABLE (1<<1) | 52 | #define INTEL_RC6p_ENABLE (1<<1) |
53 | #define INTEL_RC6pp_ENABLE (1<<2) | 53 | #define INTEL_RC6pp_ENABLE (1<<2) |
54 | 54 | ||
55 | static void gen9_init_clock_gating(struct drm_device *dev) | ||
56 | { | ||
57 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
58 | |||
59 | /* WaEnableLbsSlaRetryTimerDecrement:skl */ | ||
60 | I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) | | ||
61 | GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE); | ||
62 | |||
63 | /* WaDisableKillLogic:bxt,skl */ | ||
64 | I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | | ||
65 | ECOCHK_DIS_TLB); | ||
66 | } | ||
67 | |||
68 | static void skl_init_clock_gating(struct drm_device *dev) | ||
69 | { | ||
70 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
71 | |||
72 | gen9_init_clock_gating(dev); | ||
73 | |||
74 | if (INTEL_REVID(dev) <= SKL_REVID_D0) { | ||
75 | /* WaDisableHDCInvalidation:skl */ | ||
76 | I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | | ||
77 | BDW_DISABLE_HDC_INVALIDATION); | ||
78 | |||
79 | /* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */ | ||
80 | I915_WRITE(FF_SLICE_CS_CHICKEN2, | ||
81 | _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE)); | ||
82 | } | ||
83 | |||
84 | /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes | ||
85 | * involving this register should also be added to WA batch as required. | ||
86 | */ | ||
87 | if (INTEL_REVID(dev) <= SKL_REVID_E0) | ||
88 | /* WaDisableLSQCROPERFforOCL:skl */ | ||
89 | I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | | ||
90 | GEN8_LQSC_RO_PERF_DIS); | ||
91 | |||
92 | /* WaEnableGapsTsvCreditFix:skl */ | ||
93 | if (IS_SKYLAKE(dev) && (INTEL_REVID(dev) >= SKL_REVID_C0)) { | ||
94 | I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) | | ||
95 | GEN9_GAPS_TSV_CREDIT_DISABLE)); | ||
96 | } | ||
97 | } | ||
98 | |||
99 | static void bxt_init_clock_gating(struct drm_device *dev) | 55 | static void bxt_init_clock_gating(struct drm_device *dev) |
100 | { | 56 | { |
101 | struct drm_i915_private *dev_priv = dev->dev_private; | 57 | struct drm_i915_private *dev_priv = dev->dev_private; |
102 | 58 | ||
103 | gen9_init_clock_gating(dev); | ||
104 | |||
105 | /* WaDisableSDEUnitClockGating:bxt */ | 59 | /* WaDisableSDEUnitClockGating:bxt */ |
106 | I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | | 60 | I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | |
107 | GEN8_SDEUNIT_CLOCK_GATE_DISABLE); | 61 | GEN8_SDEUNIT_CLOCK_GATE_DISABLE); |
@@ -112,17 +66,6 @@ static void bxt_init_clock_gating(struct drm_device *dev) | |||
112 | */ | 66 | */ |
113 | I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | | 67 | I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | |
114 | GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ); | 68 | GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ); |
115 | |||
116 | /* WaStoreMultiplePTEenable:bxt */ | ||
117 | /* This is a requirement according to Hardware specification */ | ||
118 | if (INTEL_REVID(dev) == BXT_REVID_A0) | ||
119 | I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF); | ||
120 | |||
121 | /* WaSetClckGatingDisableMedia:bxt */ | ||
122 | if (INTEL_REVID(dev) == BXT_REVID_A0) { | ||
123 | I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) & | ||
124 | ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE)); | ||
125 | } | ||
126 | } | 69 | } |
127 | 70 | ||
128 | static void i915_pineview_get_mem_freq(struct drm_device *dev) | 71 | static void i915_pineview_get_mem_freq(struct drm_device *dev) |
@@ -1765,6 +1708,13 @@ static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels, | |||
1765 | return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2; | 1708 | return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2; |
1766 | } | 1709 | } |
1767 | 1710 | ||
1711 | struct skl_pipe_wm_parameters { | ||
1712 | bool active; | ||
1713 | uint32_t pipe_htotal; | ||
1714 | uint32_t pixel_rate; /* in KHz */ | ||
1715 | struct intel_plane_wm_parameters plane[I915_MAX_PLANES]; | ||
1716 | }; | ||
1717 | |||
1768 | struct ilk_wm_maximums { | 1718 | struct ilk_wm_maximums { |
1769 | uint16_t pri; | 1719 | uint16_t pri; |
1770 | uint16_t spr; | 1720 | uint16_t spr; |
@@ -2805,40 +2755,18 @@ static bool ilk_disable_lp_wm(struct drm_device *dev) | |||
2805 | #define SKL_DDB_SIZE 896 /* in blocks */ | 2755 | #define SKL_DDB_SIZE 896 /* in blocks */ |
2806 | #define BXT_DDB_SIZE 512 | 2756 | #define BXT_DDB_SIZE 512 |
2807 | 2757 | ||
2808 | /* | ||
2809 | * Return the index of a plane in the SKL DDB and wm result arrays. Primary | ||
2810 | * plane is always in slot 0, cursor is always in slot I915_MAX_PLANES-1, and | ||
2811 | * other universal planes are in indices 1..n. Note that this may leave unused | ||
2812 | * indices between the top "sprite" plane and the cursor. | ||
2813 | */ | ||
2814 | static int | ||
2815 | skl_wm_plane_id(const struct intel_plane *plane) | ||
2816 | { | ||
2817 | switch (plane->base.type) { | ||
2818 | case DRM_PLANE_TYPE_PRIMARY: | ||
2819 | return 0; | ||
2820 | case DRM_PLANE_TYPE_CURSOR: | ||
2821 | return PLANE_CURSOR; | ||
2822 | case DRM_PLANE_TYPE_OVERLAY: | ||
2823 | return plane->plane + 1; | ||
2824 | default: | ||
2825 | MISSING_CASE(plane->base.type); | ||
2826 | return plane->plane; | ||
2827 | } | ||
2828 | } | ||
2829 | |||
2830 | static void | 2758 | static void |
2831 | skl_ddb_get_pipe_allocation_limits(struct drm_device *dev, | 2759 | skl_ddb_get_pipe_allocation_limits(struct drm_device *dev, |
2832 | const struct intel_crtc_state *cstate, | 2760 | struct drm_crtc *for_crtc, |
2833 | const struct intel_wm_config *config, | 2761 | const struct intel_wm_config *config, |
2762 | const struct skl_pipe_wm_parameters *params, | ||
2834 | struct skl_ddb_entry *alloc /* out */) | 2763 | struct skl_ddb_entry *alloc /* out */) |
2835 | { | 2764 | { |
2836 | struct drm_crtc *for_crtc = cstate->base.crtc; | ||
2837 | struct drm_crtc *crtc; | 2765 | struct drm_crtc *crtc; |
2838 | unsigned int pipe_size, ddb_size; | 2766 | unsigned int pipe_size, ddb_size; |
2839 | int nth_active_pipe; | 2767 | int nth_active_pipe; |
2840 | 2768 | ||
2841 | if (!cstate->base.active) { | 2769 | if (!params->active) { |
2842 | alloc->start = 0; | 2770 | alloc->start = 0; |
2843 | alloc->end = 0; | 2771 | alloc->end = 0; |
2844 | return; | 2772 | return; |
@@ -2904,29 +2832,19 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv, | |||
2904 | } | 2832 | } |
2905 | 2833 | ||
2906 | static unsigned int | 2834 | static unsigned int |
2907 | skl_plane_relative_data_rate(const struct intel_crtc_state *cstate, | 2835 | skl_plane_relative_data_rate(const struct intel_plane_wm_parameters *p, int y) |
2908 | const struct drm_plane_state *pstate, | ||
2909 | int y) | ||
2910 | { | 2836 | { |
2911 | struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); | ||
2912 | struct drm_framebuffer *fb = pstate->fb; | ||
2913 | 2837 | ||
2914 | /* for planar format */ | 2838 | /* for planar format */ |
2915 | if (fb->pixel_format == DRM_FORMAT_NV12) { | 2839 | if (p->y_bytes_per_pixel) { |
2916 | if (y) /* y-plane data rate */ | 2840 | if (y) /* y-plane data rate */ |
2917 | return intel_crtc->config->pipe_src_w * | 2841 | return p->horiz_pixels * p->vert_pixels * p->y_bytes_per_pixel; |
2918 | intel_crtc->config->pipe_src_h * | ||
2919 | drm_format_plane_cpp(fb->pixel_format, 0); | ||
2920 | else /* uv-plane data rate */ | 2842 | else /* uv-plane data rate */ |
2921 | return (intel_crtc->config->pipe_src_w/2) * | 2843 | return (p->horiz_pixels/2) * (p->vert_pixels/2) * p->bytes_per_pixel; |
2922 | (intel_crtc->config->pipe_src_h/2) * | ||
2923 | drm_format_plane_cpp(fb->pixel_format, 1); | ||
2924 | } | 2844 | } |
2925 | 2845 | ||
2926 | /* for packed formats */ | 2846 | /* for packed formats */ |
2927 | return intel_crtc->config->pipe_src_w * | 2847 | return p->horiz_pixels * p->vert_pixels * p->bytes_per_pixel; |
2928 | intel_crtc->config->pipe_src_h * | ||
2929 | drm_format_plane_cpp(fb->pixel_format, 0); | ||
2930 | } | 2848 | } |
2931 | 2849 | ||
2932 | /* | 2850 | /* |
@@ -2935,51 +2853,46 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate, | |||
2935 | * 3 * 4096 * 8192 * 4 < 2^32 | 2853 | * 3 * 4096 * 8192 * 4 < 2^32 |
2936 | */ | 2854 | */ |
2937 | static unsigned int | 2855 | static unsigned int |
2938 | skl_get_total_relative_data_rate(const struct intel_crtc_state *cstate) | 2856 | skl_get_total_relative_data_rate(struct intel_crtc *intel_crtc, |
2857 | const struct skl_pipe_wm_parameters *params) | ||
2939 | { | 2858 | { |
2940 | struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); | ||
2941 | struct drm_device *dev = intel_crtc->base.dev; | ||
2942 | const struct intel_plane *intel_plane; | ||
2943 | unsigned int total_data_rate = 0; | 2859 | unsigned int total_data_rate = 0; |
2860 | int plane; | ||
2944 | 2861 | ||
2945 | for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { | 2862 | for (plane = 0; plane < intel_num_planes(intel_crtc); plane++) { |
2946 | const struct drm_plane_state *pstate = intel_plane->base.state; | 2863 | const struct intel_plane_wm_parameters *p; |
2947 | 2864 | ||
2948 | if (pstate->fb == NULL) | 2865 | p = ¶ms->plane[plane]; |
2866 | if (!p->enabled) | ||
2949 | continue; | 2867 | continue; |
2950 | 2868 | ||
2951 | /* packed/uv */ | 2869 | total_data_rate += skl_plane_relative_data_rate(p, 0); /* packed/uv */ |
2952 | total_data_rate += skl_plane_relative_data_rate(cstate, | 2870 | if (p->y_bytes_per_pixel) { |
2953 | pstate, | 2871 | total_data_rate += skl_plane_relative_data_rate(p, 1); /* y-plane */ |
2954 | 0); | 2872 | } |
2955 | |||
2956 | if (pstate->fb->pixel_format == DRM_FORMAT_NV12) | ||
2957 | /* y-plane */ | ||
2958 | total_data_rate += skl_plane_relative_data_rate(cstate, | ||
2959 | pstate, | ||
2960 | 1); | ||
2961 | } | 2873 | } |
2962 | 2874 | ||
2963 | return total_data_rate; | 2875 | return total_data_rate; |
2964 | } | 2876 | } |
2965 | 2877 | ||
2966 | static void | 2878 | static void |
2967 | skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, | 2879 | skl_allocate_pipe_ddb(struct drm_crtc *crtc, |
2968 | const struct intel_wm_config *config, | 2880 | const struct intel_wm_config *config, |
2881 | const struct skl_pipe_wm_parameters *params, | ||
2969 | struct skl_ddb_allocation *ddb /* out */) | 2882 | struct skl_ddb_allocation *ddb /* out */) |
2970 | { | 2883 | { |
2971 | struct drm_crtc *crtc = cstate->base.crtc; | ||
2972 | struct drm_device *dev = crtc->dev; | 2884 | struct drm_device *dev = crtc->dev; |
2885 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2973 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 2886 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2974 | struct intel_plane *intel_plane; | ||
2975 | enum pipe pipe = intel_crtc->pipe; | 2887 | enum pipe pipe = intel_crtc->pipe; |
2976 | struct skl_ddb_entry *alloc = &ddb->pipe[pipe]; | 2888 | struct skl_ddb_entry *alloc = &ddb->pipe[pipe]; |
2977 | uint16_t alloc_size, start, cursor_blocks; | 2889 | uint16_t alloc_size, start, cursor_blocks; |
2978 | uint16_t minimum[I915_MAX_PLANES]; | 2890 | uint16_t minimum[I915_MAX_PLANES]; |
2979 | uint16_t y_minimum[I915_MAX_PLANES]; | 2891 | uint16_t y_minimum[I915_MAX_PLANES]; |
2980 | unsigned int total_data_rate; | 2892 | unsigned int total_data_rate; |
2893 | int plane; | ||
2981 | 2894 | ||
2982 | skl_ddb_get_pipe_allocation_limits(dev, cstate, config, alloc); | 2895 | skl_ddb_get_pipe_allocation_limits(dev, crtc, config, params, alloc); |
2983 | alloc_size = skl_ddb_entry_size(alloc); | 2896 | alloc_size = skl_ddb_entry_size(alloc); |
2984 | if (alloc_size == 0) { | 2897 | if (alloc_size == 0) { |
2985 | memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe])); | 2898 | memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe])); |
@@ -2996,20 +2909,17 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, | |||
2996 | alloc->end -= cursor_blocks; | 2909 | alloc->end -= cursor_blocks; |
2997 | 2910 | ||
2998 | /* 1. Allocate the mininum required blocks for each active plane */ | 2911 | /* 1. Allocate the mininum required blocks for each active plane */ |
2999 | for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { | 2912 | for_each_plane(dev_priv, pipe, plane) { |
3000 | struct drm_plane *plane = &intel_plane->base; | 2913 | const struct intel_plane_wm_parameters *p; |
3001 | struct drm_framebuffer *fb = plane->fb; | ||
3002 | int id = skl_wm_plane_id(intel_plane); | ||
3003 | 2914 | ||
3004 | if (fb == NULL) | 2915 | p = ¶ms->plane[plane]; |
3005 | continue; | 2916 | if (!p->enabled) |
3006 | if (plane->type == DRM_PLANE_TYPE_CURSOR) | ||
3007 | continue; | 2917 | continue; |
3008 | 2918 | ||
3009 | minimum[id] = 8; | 2919 | minimum[plane] = 8; |
3010 | alloc_size -= minimum[id]; | 2920 | alloc_size -= minimum[plane]; |
3011 | y_minimum[id] = (fb->pixel_format == DRM_FORMAT_NV12) ? 8 : 0; | 2921 | y_minimum[plane] = p->y_bytes_per_pixel ? 8 : 0; |
3012 | alloc_size -= y_minimum[id]; | 2922 | alloc_size -= y_minimum[plane]; |
3013 | } | 2923 | } |
3014 | 2924 | ||
3015 | /* | 2925 | /* |
@@ -3018,50 +2928,45 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, | |||
3018 | * | 2928 | * |
3019 | * FIXME: we may not allocate every single block here. | 2929 | * FIXME: we may not allocate every single block here. |
3020 | */ | 2930 | */ |
3021 | total_data_rate = skl_get_total_relative_data_rate(cstate); | 2931 | total_data_rate = skl_get_total_relative_data_rate(intel_crtc, params); |
3022 | 2932 | ||
3023 | start = alloc->start; | 2933 | start = alloc->start; |
3024 | for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { | 2934 | for (plane = 0; plane < intel_num_planes(intel_crtc); plane++) { |
3025 | struct drm_plane *plane = &intel_plane->base; | 2935 | const struct intel_plane_wm_parameters *p; |
3026 | struct drm_plane_state *pstate = intel_plane->base.state; | ||
3027 | unsigned int data_rate, y_data_rate; | 2936 | unsigned int data_rate, y_data_rate; |
3028 | uint16_t plane_blocks, y_plane_blocks = 0; | 2937 | uint16_t plane_blocks, y_plane_blocks = 0; |
3029 | int id = skl_wm_plane_id(intel_plane); | ||
3030 | 2938 | ||
3031 | if (pstate->fb == NULL) | 2939 | p = ¶ms->plane[plane]; |
3032 | continue; | 2940 | if (!p->enabled) |
3033 | if (plane->type == DRM_PLANE_TYPE_CURSOR) | ||
3034 | continue; | 2941 | continue; |
3035 | 2942 | ||
3036 | data_rate = skl_plane_relative_data_rate(cstate, pstate, 0); | 2943 | data_rate = skl_plane_relative_data_rate(p, 0); |
3037 | 2944 | ||
3038 | /* | 2945 | /* |
3039 | * allocation for (packed formats) or (uv-plane part of planar format): | 2946 | * allocation for (packed formats) or (uv-plane part of planar format): |
3040 | * promote the expression to 64 bits to avoid overflowing, the | 2947 | * promote the expression to 64 bits to avoid overflowing, the |
3041 | * result is < available as data_rate / total_data_rate < 1 | 2948 | * result is < available as data_rate / total_data_rate < 1 |
3042 | */ | 2949 | */ |
3043 | plane_blocks = minimum[id]; | 2950 | plane_blocks = minimum[plane]; |
3044 | plane_blocks += div_u64((uint64_t)alloc_size * data_rate, | 2951 | plane_blocks += div_u64((uint64_t)alloc_size * data_rate, |
3045 | total_data_rate); | 2952 | total_data_rate); |
3046 | 2953 | ||
3047 | ddb->plane[pipe][id].start = start; | 2954 | ddb->plane[pipe][plane].start = start; |
3048 | ddb->plane[pipe][id].end = start + plane_blocks; | 2955 | ddb->plane[pipe][plane].end = start + plane_blocks; |
3049 | 2956 | ||
3050 | start += plane_blocks; | 2957 | start += plane_blocks; |
3051 | 2958 | ||
3052 | /* | 2959 | /* |
3053 | * allocation for y_plane part of planar format: | 2960 | * allocation for y_plane part of planar format: |
3054 | */ | 2961 | */ |
3055 | if (pstate->fb->pixel_format == DRM_FORMAT_NV12) { | 2962 | if (p->y_bytes_per_pixel) { |
3056 | y_data_rate = skl_plane_relative_data_rate(cstate, | 2963 | y_data_rate = skl_plane_relative_data_rate(p, 1); |
3057 | pstate, | 2964 | y_plane_blocks = y_minimum[plane]; |
3058 | 1); | ||
3059 | y_plane_blocks = y_minimum[id]; | ||
3060 | y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate, | 2965 | y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate, |
3061 | total_data_rate); | 2966 | total_data_rate); |
3062 | 2967 | ||
3063 | ddb->y_plane[pipe][id].start = start; | 2968 | ddb->y_plane[pipe][plane].start = start; |
3064 | ddb->y_plane[pipe][id].end = start + y_plane_blocks; | 2969 | ddb->y_plane[pipe][plane].end = start + y_plane_blocks; |
3065 | 2970 | ||
3066 | start += y_plane_blocks; | 2971 | start += y_plane_blocks; |
3067 | } | 2972 | } |
@@ -3148,21 +3053,87 @@ static void skl_compute_wm_global_parameters(struct drm_device *dev, | |||
3148 | struct intel_wm_config *config) | 3053 | struct intel_wm_config *config) |
3149 | { | 3054 | { |
3150 | struct drm_crtc *crtc; | 3055 | struct drm_crtc *crtc; |
3056 | struct drm_plane *plane; | ||
3151 | 3057 | ||
3152 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) | 3058 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) |
3153 | config->num_pipes_active += to_intel_crtc(crtc)->active; | 3059 | config->num_pipes_active += to_intel_crtc(crtc)->active; |
3060 | |||
3061 | /* FIXME: I don't think we need those two global parameters on SKL */ | ||
3062 | list_for_each_entry(plane, &dev->mode_config.plane_list, head) { | ||
3063 | struct intel_plane *intel_plane = to_intel_plane(plane); | ||
3064 | |||
3065 | config->sprites_enabled |= intel_plane->wm.enabled; | ||
3066 | config->sprites_scaled |= intel_plane->wm.scaled; | ||
3067 | } | ||
3068 | } | ||
3069 | |||
3070 | static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc, | ||
3071 | struct skl_pipe_wm_parameters *p) | ||
3072 | { | ||
3073 | struct drm_device *dev = crtc->dev; | ||
3074 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
3075 | enum pipe pipe = intel_crtc->pipe; | ||
3076 | struct drm_plane *plane; | ||
3077 | struct drm_framebuffer *fb; | ||
3078 | int i = 1; /* Index for sprite planes start */ | ||
3079 | |||
3080 | p->active = intel_crtc->active; | ||
3081 | if (p->active) { | ||
3082 | p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal; | ||
3083 | p->pixel_rate = skl_pipe_pixel_rate(intel_crtc->config); | ||
3084 | |||
3085 | fb = crtc->primary->state->fb; | ||
3086 | /* For planar: Bpp is for uv plane, y_Bpp is for y plane */ | ||
3087 | if (fb) { | ||
3088 | p->plane[0].enabled = true; | ||
3089 | p->plane[0].bytes_per_pixel = fb->pixel_format == DRM_FORMAT_NV12 ? | ||
3090 | drm_format_plane_cpp(fb->pixel_format, 1) : | ||
3091 | drm_format_plane_cpp(fb->pixel_format, 0); | ||
3092 | p->plane[0].y_bytes_per_pixel = fb->pixel_format == DRM_FORMAT_NV12 ? | ||
3093 | drm_format_plane_cpp(fb->pixel_format, 0) : 0; | ||
3094 | p->plane[0].tiling = fb->modifier[0]; | ||
3095 | } else { | ||
3096 | p->plane[0].enabled = false; | ||
3097 | p->plane[0].bytes_per_pixel = 0; | ||
3098 | p->plane[0].y_bytes_per_pixel = 0; | ||
3099 | p->plane[0].tiling = DRM_FORMAT_MOD_NONE; | ||
3100 | } | ||
3101 | p->plane[0].horiz_pixels = intel_crtc->config->pipe_src_w; | ||
3102 | p->plane[0].vert_pixels = intel_crtc->config->pipe_src_h; | ||
3103 | p->plane[0].rotation = crtc->primary->state->rotation; | ||
3104 | |||
3105 | fb = crtc->cursor->state->fb; | ||
3106 | p->plane[PLANE_CURSOR].y_bytes_per_pixel = 0; | ||
3107 | if (fb) { | ||
3108 | p->plane[PLANE_CURSOR].enabled = true; | ||
3109 | p->plane[PLANE_CURSOR].bytes_per_pixel = fb->bits_per_pixel / 8; | ||
3110 | p->plane[PLANE_CURSOR].horiz_pixels = crtc->cursor->state->crtc_w; | ||
3111 | p->plane[PLANE_CURSOR].vert_pixels = crtc->cursor->state->crtc_h; | ||
3112 | } else { | ||
3113 | p->plane[PLANE_CURSOR].enabled = false; | ||
3114 | p->plane[PLANE_CURSOR].bytes_per_pixel = 0; | ||
3115 | p->plane[PLANE_CURSOR].horiz_pixels = 64; | ||
3116 | p->plane[PLANE_CURSOR].vert_pixels = 64; | ||
3117 | } | ||
3118 | } | ||
3119 | |||
3120 | list_for_each_entry(plane, &dev->mode_config.plane_list, head) { | ||
3121 | struct intel_plane *intel_plane = to_intel_plane(plane); | ||
3122 | |||
3123 | if (intel_plane->pipe == pipe && | ||
3124 | plane->type == DRM_PLANE_TYPE_OVERLAY) | ||
3125 | p->plane[i++] = intel_plane->wm; | ||
3126 | } | ||
3154 | } | 3127 | } |
3155 | 3128 | ||
3156 | static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv, | 3129 | static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv, |
3157 | struct intel_crtc_state *cstate, | 3130 | struct skl_pipe_wm_parameters *p, |
3158 | struct intel_plane *intel_plane, | 3131 | struct intel_plane_wm_parameters *p_params, |
3159 | uint16_t ddb_allocation, | 3132 | uint16_t ddb_allocation, |
3160 | int level, | 3133 | int level, |
3161 | uint16_t *out_blocks, /* out */ | 3134 | uint16_t *out_blocks, /* out */ |
3162 | uint8_t *out_lines /* out */) | 3135 | uint8_t *out_lines /* out */) |
3163 | { | 3136 | { |
3164 | struct drm_plane *plane = &intel_plane->base; | ||
3165 | struct drm_framebuffer *fb = plane->state->fb; | ||
3166 | uint32_t latency = dev_priv->wm.skl_latency[level]; | 3137 | uint32_t latency = dev_priv->wm.skl_latency[level]; |
3167 | uint32_t method1, method2; | 3138 | uint32_t method1, method2; |
3168 | uint32_t plane_bytes_per_line, plane_blocks_per_line; | 3139 | uint32_t plane_bytes_per_line, plane_blocks_per_line; |
@@ -3170,35 +3141,31 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv, | |||
3170 | uint32_t selected_result; | 3141 | uint32_t selected_result; |
3171 | uint8_t bytes_per_pixel; | 3142 | uint8_t bytes_per_pixel; |
3172 | 3143 | ||
3173 | if (latency == 0 || !cstate->base.active || !fb) | 3144 | if (latency == 0 || !p->active || !p_params->enabled) |
3174 | return false; | 3145 | return false; |
3175 | 3146 | ||
3176 | bytes_per_pixel = (fb->pixel_format == DRM_FORMAT_NV12) ? | 3147 | bytes_per_pixel = p_params->y_bytes_per_pixel ? |
3177 | drm_format_plane_cpp(DRM_FORMAT_NV12, 0) : | 3148 | p_params->y_bytes_per_pixel : |
3178 | drm_format_plane_cpp(DRM_FORMAT_NV12, 1); | 3149 | p_params->bytes_per_pixel; |
3179 | method1 = skl_wm_method1(skl_pipe_pixel_rate(cstate), | 3150 | method1 = skl_wm_method1(p->pixel_rate, |
3180 | bytes_per_pixel, | 3151 | bytes_per_pixel, |
3181 | latency); | 3152 | latency); |
3182 | method2 = skl_wm_method2(skl_pipe_pixel_rate(cstate), | 3153 | method2 = skl_wm_method2(p->pixel_rate, |
3183 | cstate->base.adjusted_mode.crtc_htotal, | 3154 | p->pipe_htotal, |
3184 | cstate->pipe_src_w, | 3155 | p_params->horiz_pixels, |
3185 | bytes_per_pixel, | 3156 | bytes_per_pixel, |
3186 | fb->modifier[0], | 3157 | p_params->tiling, |
3187 | latency); | 3158 | latency); |
3188 | 3159 | ||
3189 | plane_bytes_per_line = cstate->pipe_src_w * bytes_per_pixel; | 3160 | plane_bytes_per_line = p_params->horiz_pixels * bytes_per_pixel; |
3190 | plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512); | 3161 | plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512); |
3191 | 3162 | ||
3192 | if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED || | 3163 | if (p_params->tiling == I915_FORMAT_MOD_Y_TILED || |
3193 | fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) { | 3164 | p_params->tiling == I915_FORMAT_MOD_Yf_TILED) { |
3194 | uint32_t min_scanlines = 4; | 3165 | uint32_t min_scanlines = 4; |
3195 | uint32_t y_tile_minimum; | 3166 | uint32_t y_tile_minimum; |
3196 | if (intel_rotation_90_or_270(plane->state->rotation)) { | 3167 | if (intel_rotation_90_or_270(p_params->rotation)) { |
3197 | int bpp = (fb->pixel_format == DRM_FORMAT_NV12) ? | 3168 | switch (p_params->bytes_per_pixel) { |
3198 | drm_format_plane_cpp(fb->pixel_format, 1) : | ||
3199 | drm_format_plane_cpp(fb->pixel_format, 0); | ||
3200 | |||
3201 | switch (bpp) { | ||
3202 | case 1: | 3169 | case 1: |
3203 | min_scanlines = 16; | 3170 | min_scanlines = 16; |
3204 | break; | 3171 | break; |
@@ -3222,8 +3189,8 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv, | |||
3222 | res_lines = DIV_ROUND_UP(selected_result, plane_blocks_per_line); | 3189 | res_lines = DIV_ROUND_UP(selected_result, plane_blocks_per_line); |
3223 | 3190 | ||
3224 | if (level >= 1 && level <= 7) { | 3191 | if (level >= 1 && level <= 7) { |
3225 | if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED || | 3192 | if (p_params->tiling == I915_FORMAT_MOD_Y_TILED || |
3226 | fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) | 3193 | p_params->tiling == I915_FORMAT_MOD_Yf_TILED) |
3227 | res_lines += 4; | 3194 | res_lines += 4; |
3228 | else | 3195 | else |
3229 | res_blocks++; | 3196 | res_blocks++; |
@@ -3240,80 +3207,84 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv, | |||
3240 | 3207 | ||
3241 | static void skl_compute_wm_level(const struct drm_i915_private *dev_priv, | 3208 | static void skl_compute_wm_level(const struct drm_i915_private *dev_priv, |
3242 | struct skl_ddb_allocation *ddb, | 3209 | struct skl_ddb_allocation *ddb, |
3243 | struct intel_crtc_state *cstate, | 3210 | struct skl_pipe_wm_parameters *p, |
3211 | enum pipe pipe, | ||
3244 | int level, | 3212 | int level, |
3213 | int num_planes, | ||
3245 | struct skl_wm_level *result) | 3214 | struct skl_wm_level *result) |
3246 | { | 3215 | { |
3247 | struct drm_device *dev = dev_priv->dev; | ||
3248 | struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); | ||
3249 | struct intel_plane *intel_plane; | ||
3250 | uint16_t ddb_blocks; | 3216 | uint16_t ddb_blocks; |
3251 | enum pipe pipe = intel_crtc->pipe; | 3217 | int i; |
3252 | |||
3253 | for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { | ||
3254 | int i = skl_wm_plane_id(intel_plane); | ||
3255 | 3218 | ||
3219 | for (i = 0; i < num_planes; i++) { | ||
3256 | ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]); | 3220 | ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]); |
3257 | 3221 | ||
3258 | result->plane_en[i] = skl_compute_plane_wm(dev_priv, | 3222 | result->plane_en[i] = skl_compute_plane_wm(dev_priv, |
3259 | cstate, | 3223 | p, &p->plane[i], |
3260 | intel_plane, | ||
3261 | ddb_blocks, | 3224 | ddb_blocks, |
3262 | level, | 3225 | level, |
3263 | &result->plane_res_b[i], | 3226 | &result->plane_res_b[i], |
3264 | &result->plane_res_l[i]); | 3227 | &result->plane_res_l[i]); |
3265 | } | 3228 | } |
3229 | |||
3230 | ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][PLANE_CURSOR]); | ||
3231 | result->plane_en[PLANE_CURSOR] = skl_compute_plane_wm(dev_priv, p, | ||
3232 | &p->plane[PLANE_CURSOR], | ||
3233 | ddb_blocks, level, | ||
3234 | &result->plane_res_b[PLANE_CURSOR], | ||
3235 | &result->plane_res_l[PLANE_CURSOR]); | ||
3266 | } | 3236 | } |
3267 | 3237 | ||
3268 | static uint32_t | 3238 | static uint32_t |
3269 | skl_compute_linetime_wm(struct intel_crtc_state *cstate) | 3239 | skl_compute_linetime_wm(struct drm_crtc *crtc, struct skl_pipe_wm_parameters *p) |
3270 | { | 3240 | { |
3271 | if (!cstate->base.active) | 3241 | if (!to_intel_crtc(crtc)->active) |
3272 | return 0; | 3242 | return 0; |
3273 | 3243 | ||
3274 | if (WARN_ON(skl_pipe_pixel_rate(cstate) == 0)) | 3244 | if (WARN_ON(p->pixel_rate == 0)) |
3275 | return 0; | 3245 | return 0; |
3276 | 3246 | ||
3277 | return DIV_ROUND_UP(8 * cstate->base.adjusted_mode.crtc_htotal * 1000, | 3247 | return DIV_ROUND_UP(8 * p->pipe_htotal * 1000, p->pixel_rate); |
3278 | skl_pipe_pixel_rate(cstate)); | ||
3279 | } | 3248 | } |
3280 | 3249 | ||
3281 | static void skl_compute_transition_wm(struct intel_crtc_state *cstate, | 3250 | static void skl_compute_transition_wm(struct drm_crtc *crtc, |
3251 | struct skl_pipe_wm_parameters *params, | ||
3282 | struct skl_wm_level *trans_wm /* out */) | 3252 | struct skl_wm_level *trans_wm /* out */) |
3283 | { | 3253 | { |
3284 | struct drm_crtc *crtc = cstate->base.crtc; | ||
3285 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 3254 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
3286 | struct intel_plane *intel_plane; | 3255 | int i; |
3287 | 3256 | ||
3288 | if (!cstate->base.active) | 3257 | if (!params->active) |
3289 | return; | 3258 | return; |
3290 | 3259 | ||
3291 | /* Until we know more, just disable transition WMs */ | 3260 | /* Until we know more, just disable transition WMs */ |
3292 | for_each_intel_plane_on_crtc(crtc->dev, intel_crtc, intel_plane) { | 3261 | for (i = 0; i < intel_num_planes(intel_crtc); i++) |
3293 | int i = skl_wm_plane_id(intel_plane); | ||
3294 | |||
3295 | trans_wm->plane_en[i] = false; | 3262 | trans_wm->plane_en[i] = false; |
3296 | } | 3263 | trans_wm->plane_en[PLANE_CURSOR] = false; |
3297 | } | 3264 | } |
3298 | 3265 | ||
3299 | static void skl_compute_pipe_wm(struct intel_crtc_state *cstate, | 3266 | static void skl_compute_pipe_wm(struct drm_crtc *crtc, |
3300 | struct skl_ddb_allocation *ddb, | 3267 | struct skl_ddb_allocation *ddb, |
3268 | struct skl_pipe_wm_parameters *params, | ||
3301 | struct skl_pipe_wm *pipe_wm) | 3269 | struct skl_pipe_wm *pipe_wm) |
3302 | { | 3270 | { |
3303 | struct drm_device *dev = cstate->base.crtc->dev; | 3271 | struct drm_device *dev = crtc->dev; |
3304 | const struct drm_i915_private *dev_priv = dev->dev_private; | 3272 | const struct drm_i915_private *dev_priv = dev->dev_private; |
3273 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
3305 | int level, max_level = ilk_wm_max_level(dev); | 3274 | int level, max_level = ilk_wm_max_level(dev); |
3306 | 3275 | ||
3307 | for (level = 0; level <= max_level; level++) { | 3276 | for (level = 0; level <= max_level; level++) { |
3308 | skl_compute_wm_level(dev_priv, ddb, cstate, | 3277 | skl_compute_wm_level(dev_priv, ddb, params, intel_crtc->pipe, |
3309 | level, &pipe_wm->wm[level]); | 3278 | level, intel_num_planes(intel_crtc), |
3279 | &pipe_wm->wm[level]); | ||
3310 | } | 3280 | } |
3311 | pipe_wm->linetime = skl_compute_linetime_wm(cstate); | 3281 | pipe_wm->linetime = skl_compute_linetime_wm(crtc, params); |
3312 | 3282 | ||
3313 | skl_compute_transition_wm(cstate, &pipe_wm->trans_wm); | 3283 | skl_compute_transition_wm(crtc, params, &pipe_wm->trans_wm); |
3314 | } | 3284 | } |
3315 | 3285 | ||
3316 | static void skl_compute_wm_results(struct drm_device *dev, | 3286 | static void skl_compute_wm_results(struct drm_device *dev, |
3287 | struct skl_pipe_wm_parameters *p, | ||
3317 | struct skl_pipe_wm *p_wm, | 3288 | struct skl_pipe_wm *p_wm, |
3318 | struct skl_wm_values *r, | 3289 | struct skl_wm_values *r, |
3319 | struct intel_crtc *intel_crtc) | 3290 | struct intel_crtc *intel_crtc) |
@@ -3557,15 +3528,16 @@ static void skl_flush_wm_values(struct drm_i915_private *dev_priv, | |||
3557 | } | 3528 | } |
3558 | 3529 | ||
3559 | static bool skl_update_pipe_wm(struct drm_crtc *crtc, | 3530 | static bool skl_update_pipe_wm(struct drm_crtc *crtc, |
3531 | struct skl_pipe_wm_parameters *params, | ||
3560 | struct intel_wm_config *config, | 3532 | struct intel_wm_config *config, |
3561 | struct skl_ddb_allocation *ddb, /* out */ | 3533 | struct skl_ddb_allocation *ddb, /* out */ |
3562 | struct skl_pipe_wm *pipe_wm /* out */) | 3534 | struct skl_pipe_wm *pipe_wm /* out */) |
3563 | { | 3535 | { |
3564 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 3536 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
3565 | struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); | ||
3566 | 3537 | ||
3567 | skl_allocate_pipe_ddb(cstate, config, ddb); | 3538 | skl_compute_wm_pipe_parameters(crtc, params); |
3568 | skl_compute_pipe_wm(cstate, ddb, pipe_wm); | 3539 | skl_allocate_pipe_ddb(crtc, config, params, ddb); |
3540 | skl_compute_pipe_wm(crtc, ddb, params, pipe_wm); | ||
3569 | 3541 | ||
3570 | if (!memcmp(&intel_crtc->wm.skl_active, pipe_wm, sizeof(*pipe_wm))) | 3542 | if (!memcmp(&intel_crtc->wm.skl_active, pipe_wm, sizeof(*pipe_wm))) |
3571 | return false; | 3543 | return false; |
@@ -3598,6 +3570,7 @@ static void skl_update_other_pipe_wm(struct drm_device *dev, | |||
3598 | */ | 3570 | */ |
3599 | list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, | 3571 | list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, |
3600 | base.head) { | 3572 | base.head) { |
3573 | struct skl_pipe_wm_parameters params = {}; | ||
3601 | struct skl_pipe_wm pipe_wm = {}; | 3574 | struct skl_pipe_wm pipe_wm = {}; |
3602 | bool wm_changed; | 3575 | bool wm_changed; |
3603 | 3576 | ||
@@ -3607,7 +3580,8 @@ static void skl_update_other_pipe_wm(struct drm_device *dev, | |||
3607 | if (!intel_crtc->active) | 3580 | if (!intel_crtc->active) |
3608 | continue; | 3581 | continue; |
3609 | 3582 | ||
3610 | wm_changed = skl_update_pipe_wm(&intel_crtc->base, config, | 3583 | wm_changed = skl_update_pipe_wm(&intel_crtc->base, |
3584 | ¶ms, config, | ||
3611 | &r->ddb, &pipe_wm); | 3585 | &r->ddb, &pipe_wm); |
3612 | 3586 | ||
3613 | /* | 3587 | /* |
@@ -3617,7 +3591,7 @@ static void skl_update_other_pipe_wm(struct drm_device *dev, | |||
3617 | */ | 3591 | */ |
3618 | WARN_ON(!wm_changed); | 3592 | WARN_ON(!wm_changed); |
3619 | 3593 | ||
3620 | skl_compute_wm_results(dev, &pipe_wm, r, intel_crtc); | 3594 | skl_compute_wm_results(dev, ¶ms, &pipe_wm, r, intel_crtc); |
3621 | r->dirty[intel_crtc->pipe] = true; | 3595 | r->dirty[intel_crtc->pipe] = true; |
3622 | } | 3596 | } |
3623 | } | 3597 | } |
@@ -3647,6 +3621,7 @@ static void skl_update_wm(struct drm_crtc *crtc) | |||
3647 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 3621 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
3648 | struct drm_device *dev = crtc->dev; | 3622 | struct drm_device *dev = crtc->dev; |
3649 | struct drm_i915_private *dev_priv = dev->dev_private; | 3623 | struct drm_i915_private *dev_priv = dev->dev_private; |
3624 | struct skl_pipe_wm_parameters params = {}; | ||
3650 | struct skl_wm_values *results = &dev_priv->wm.skl_results; | 3625 | struct skl_wm_values *results = &dev_priv->wm.skl_results; |
3651 | struct skl_pipe_wm pipe_wm = {}; | 3626 | struct skl_pipe_wm pipe_wm = {}; |
3652 | struct intel_wm_config config = {}; | 3627 | struct intel_wm_config config = {}; |
@@ -3659,10 +3634,11 @@ static void skl_update_wm(struct drm_crtc *crtc) | |||
3659 | 3634 | ||
3660 | skl_compute_wm_global_parameters(dev, &config); | 3635 | skl_compute_wm_global_parameters(dev, &config); |
3661 | 3636 | ||
3662 | if (!skl_update_pipe_wm(crtc, &config, &results->ddb, &pipe_wm)) | 3637 | if (!skl_update_pipe_wm(crtc, ¶ms, &config, |
3638 | &results->ddb, &pipe_wm)) | ||
3663 | return; | 3639 | return; |
3664 | 3640 | ||
3665 | skl_compute_wm_results(dev, &pipe_wm, results, intel_crtc); | 3641 | skl_compute_wm_results(dev, ¶ms, &pipe_wm, results, intel_crtc); |
3666 | results->dirty[intel_crtc->pipe] = true; | 3642 | results->dirty[intel_crtc->pipe] = true; |
3667 | 3643 | ||
3668 | skl_update_other_pipe_wm(dev, crtc, &config, results); | 3644 | skl_update_other_pipe_wm(dev, crtc, &config, results); |
@@ -3673,6 +3649,39 @@ static void skl_update_wm(struct drm_crtc *crtc) | |||
3673 | dev_priv->wm.skl_hw = *results; | 3649 | dev_priv->wm.skl_hw = *results; |
3674 | } | 3650 | } |
3675 | 3651 | ||
3652 | static void | ||
3653 | skl_update_sprite_wm(struct drm_plane *plane, struct drm_crtc *crtc, | ||
3654 | uint32_t sprite_width, uint32_t sprite_height, | ||
3655 | int pixel_size, bool enabled, bool scaled) | ||
3656 | { | ||
3657 | struct intel_plane *intel_plane = to_intel_plane(plane); | ||
3658 | struct drm_framebuffer *fb = plane->state->fb; | ||
3659 | |||
3660 | intel_plane->wm.enabled = enabled; | ||
3661 | intel_plane->wm.scaled = scaled; | ||
3662 | intel_plane->wm.horiz_pixels = sprite_width; | ||
3663 | intel_plane->wm.vert_pixels = sprite_height; | ||
3664 | intel_plane->wm.tiling = DRM_FORMAT_MOD_NONE; | ||
3665 | |||
3666 | /* For planar: Bpp is for UV plane, y_Bpp is for Y plane */ | ||
3667 | intel_plane->wm.bytes_per_pixel = | ||
3668 | (fb && fb->pixel_format == DRM_FORMAT_NV12) ? | ||
3669 | drm_format_plane_cpp(plane->state->fb->pixel_format, 1) : pixel_size; | ||
3670 | intel_plane->wm.y_bytes_per_pixel = | ||
3671 | (fb && fb->pixel_format == DRM_FORMAT_NV12) ? | ||
3672 | drm_format_plane_cpp(plane->state->fb->pixel_format, 0) : 0; | ||
3673 | |||
3674 | /* | ||
3675 | * Framebuffer can be NULL on plane disable, but it does not | ||
3676 | * matter for watermarks if we assume no tiling in that case. | ||
3677 | */ | ||
3678 | if (fb) | ||
3679 | intel_plane->wm.tiling = fb->modifier[0]; | ||
3680 | intel_plane->wm.rotation = plane->state->rotation; | ||
3681 | |||
3682 | skl_update_wm(crtc); | ||
3683 | } | ||
3684 | |||
3676 | static void ilk_update_wm(struct drm_crtc *crtc) | 3685 | static void ilk_update_wm(struct drm_crtc *crtc) |
3677 | { | 3686 | { |
3678 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 3687 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
@@ -3688,18 +3697,6 @@ static void ilk_update_wm(struct drm_crtc *crtc) | |||
3688 | 3697 | ||
3689 | WARN_ON(cstate->base.active != intel_crtc->active); | 3698 | WARN_ON(cstate->base.active != intel_crtc->active); |
3690 | 3699 | ||
3691 | /* | ||
3692 | * IVB workaround: must disable low power watermarks for at least | ||
3693 | * one frame before enabling scaling. LP watermarks can be re-enabled | ||
3694 | * when scaling is disabled. | ||
3695 | * | ||
3696 | * WaCxSRDisabledForSpriteScaling:ivb | ||
3697 | */ | ||
3698 | if (cstate->disable_lp_wm) { | ||
3699 | ilk_disable_lp_wm(dev); | ||
3700 | intel_wait_for_vblank(dev, intel_crtc->pipe); | ||
3701 | } | ||
3702 | |||
3703 | intel_compute_pipe_wm(cstate, &pipe_wm); | 3700 | intel_compute_pipe_wm(cstate, &pipe_wm); |
3704 | 3701 | ||
3705 | if (!memcmp(&intel_crtc->wm.active, &pipe_wm, sizeof(pipe_wm))) | 3702 | if (!memcmp(&intel_crtc->wm.active, &pipe_wm, sizeof(pipe_wm))) |
@@ -3731,6 +3728,28 @@ static void ilk_update_wm(struct drm_crtc *crtc) | |||
3731 | ilk_write_wm_values(dev_priv, &results); | 3728 | ilk_write_wm_values(dev_priv, &results); |
3732 | } | 3729 | } |
3733 | 3730 | ||
3731 | static void | ||
3732 | ilk_update_sprite_wm(struct drm_plane *plane, | ||
3733 | struct drm_crtc *crtc, | ||
3734 | uint32_t sprite_width, uint32_t sprite_height, | ||
3735 | int pixel_size, bool enabled, bool scaled) | ||
3736 | { | ||
3737 | struct drm_device *dev = plane->dev; | ||
3738 | struct intel_plane *intel_plane = to_intel_plane(plane); | ||
3739 | |||
3740 | /* | ||
3741 | * IVB workaround: must disable low power watermarks for at least | ||
3742 | * one frame before enabling scaling. LP watermarks can be re-enabled | ||
3743 | * when scaling is disabled. | ||
3744 | * | ||
3745 | * WaCxSRDisabledForSpriteScaling:ivb | ||
3746 | */ | ||
3747 | if (IS_IVYBRIDGE(dev) && scaled && ilk_disable_lp_wm(dev)) | ||
3748 | intel_wait_for_vblank(dev, intel_plane->pipe); | ||
3749 | |||
3750 | ilk_update_wm(crtc); | ||
3751 | } | ||
3752 | |||
3734 | static void skl_pipe_wm_active_state(uint32_t val, | 3753 | static void skl_pipe_wm_active_state(uint32_t val, |
3735 | struct skl_pipe_wm *active, | 3754 | struct skl_pipe_wm *active, |
3736 | bool is_transwm, | 3755 | bool is_transwm, |
@@ -4108,6 +4127,21 @@ void intel_update_watermarks(struct drm_crtc *crtc) | |||
4108 | dev_priv->display.update_wm(crtc); | 4127 | dev_priv->display.update_wm(crtc); |
4109 | } | 4128 | } |
4110 | 4129 | ||
4130 | void intel_update_sprite_watermarks(struct drm_plane *plane, | ||
4131 | struct drm_crtc *crtc, | ||
4132 | uint32_t sprite_width, | ||
4133 | uint32_t sprite_height, | ||
4134 | int pixel_size, | ||
4135 | bool enabled, bool scaled) | ||
4136 | { | ||
4137 | struct drm_i915_private *dev_priv = plane->dev->dev_private; | ||
4138 | |||
4139 | if (dev_priv->display.update_sprite_wm) | ||
4140 | dev_priv->display.update_sprite_wm(plane, crtc, | ||
4141 | sprite_width, sprite_height, | ||
4142 | pixel_size, enabled, scaled); | ||
4143 | } | ||
4144 | |||
4111 | /** | 4145 | /** |
4112 | * Lock protecting IPS related data structures | 4146 | * Lock protecting IPS related data structures |
4113 | */ | 4147 | */ |
@@ -7018,10 +7052,8 @@ void intel_init_pm(struct drm_device *dev) | |||
7018 | if (IS_BROXTON(dev)) | 7052 | if (IS_BROXTON(dev)) |
7019 | dev_priv->display.init_clock_gating = | 7053 | dev_priv->display.init_clock_gating = |
7020 | bxt_init_clock_gating; | 7054 | bxt_init_clock_gating; |
7021 | else if (IS_SKYLAKE(dev)) | ||
7022 | dev_priv->display.init_clock_gating = | ||
7023 | skl_init_clock_gating; | ||
7024 | dev_priv->display.update_wm = skl_update_wm; | 7055 | dev_priv->display.update_wm = skl_update_wm; |
7056 | dev_priv->display.update_sprite_wm = skl_update_sprite_wm; | ||
7025 | } else if (HAS_PCH_SPLIT(dev)) { | 7057 | } else if (HAS_PCH_SPLIT(dev)) { |
7026 | ilk_setup_wm_latency(dev); | 7058 | ilk_setup_wm_latency(dev); |
7027 | 7059 | ||
@@ -7030,6 +7062,7 @@ void intel_init_pm(struct drm_device *dev) | |||
7030 | (!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] && | 7062 | (!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] && |
7031 | dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) { | 7063 | dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) { |
7032 | dev_priv->display.update_wm = ilk_update_wm; | 7064 | dev_priv->display.update_wm = ilk_update_wm; |
7065 | dev_priv->display.update_sprite_wm = ilk_update_sprite_wm; | ||
7033 | } else { | 7066 | } else { |
7034 | DRM_DEBUG_KMS("Failed to read display plane latency. " | 7067 | DRM_DEBUG_KMS("Failed to read display plane latency. " |
7035 | "Disable CxSR\n"); | 7068 | "Disable CxSR\n"); |
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c index a04b4dc5ed9b..213581c215b3 100644 --- a/drivers/gpu/drm/i915/intel_psr.c +++ b/drivers/gpu/drm/i915/intel_psr.c | |||
@@ -73,14 +73,14 @@ static bool vlv_is_psr_active_on_pipe(struct drm_device *dev, int pipe) | |||
73 | } | 73 | } |
74 | 74 | ||
75 | static void intel_psr_write_vsc(struct intel_dp *intel_dp, | 75 | static void intel_psr_write_vsc(struct intel_dp *intel_dp, |
76 | struct edp_vsc_psr *vsc_psr) | 76 | const struct edp_vsc_psr *vsc_psr) |
77 | { | 77 | { |
78 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); | 78 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); |
79 | struct drm_device *dev = dig_port->base.base.dev; | 79 | struct drm_device *dev = dig_port->base.base.dev; |
80 | struct drm_i915_private *dev_priv = dev->dev_private; | 80 | struct drm_i915_private *dev_priv = dev->dev_private; |
81 | struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); | 81 | struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); |
82 | u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config->cpu_transcoder); | 82 | enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; |
83 | u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config->cpu_transcoder); | 83 | u32 ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder); |
84 | uint32_t *data = (uint32_t *) vsc_psr; | 84 | uint32_t *data = (uint32_t *) vsc_psr; |
85 | unsigned int i; | 85 | unsigned int i; |
86 | 86 | ||
@@ -90,12 +90,14 @@ static void intel_psr_write_vsc(struct intel_dp *intel_dp, | |||
90 | I915_WRITE(ctl_reg, 0); | 90 | I915_WRITE(ctl_reg, 0); |
91 | POSTING_READ(ctl_reg); | 91 | POSTING_READ(ctl_reg); |
92 | 92 | ||
93 | for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) { | 93 | for (i = 0; i < sizeof(*vsc_psr); i += 4) { |
94 | if (i < sizeof(struct edp_vsc_psr)) | 94 | I915_WRITE(HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder, |
95 | I915_WRITE(data_reg + i, *data++); | 95 | i >> 2), *data); |
96 | else | 96 | data++; |
97 | I915_WRITE(data_reg + i, 0); | ||
98 | } | 97 | } |
98 | for (; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) | ||
99 | I915_WRITE(HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder, | ||
100 | i >> 2), 0); | ||
99 | 101 | ||
100 | I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW); | 102 | I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW); |
101 | POSTING_READ(ctl_reg); | 103 | POSTING_READ(ctl_reg); |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 654ae991ea13..0359736fe979 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -906,6 +906,14 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring) | |||
906 | struct drm_i915_private *dev_priv = dev->dev_private; | 906 | struct drm_i915_private *dev_priv = dev->dev_private; |
907 | uint32_t tmp; | 907 | uint32_t tmp; |
908 | 908 | ||
909 | /* WaEnableLbsSlaRetryTimerDecrement:skl */ | ||
910 | I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) | | ||
911 | GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE); | ||
912 | |||
913 | /* WaDisableKillLogic:bxt,skl */ | ||
914 | I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | | ||
915 | ECOCHK_DIS_TLB); | ||
916 | |||
909 | /* WaDisablePartialInstShootdown:skl,bxt */ | 917 | /* WaDisablePartialInstShootdown:skl,bxt */ |
910 | WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, | 918 | WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, |
911 | PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE); | 919 | PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE); |
@@ -1018,7 +1026,6 @@ static int skl_tune_iz_hashing(struct intel_engine_cs *ring) | |||
1018 | return 0; | 1026 | return 0; |
1019 | } | 1027 | } |
1020 | 1028 | ||
1021 | |||
1022 | static int skl_init_workarounds(struct intel_engine_cs *ring) | 1029 | static int skl_init_workarounds(struct intel_engine_cs *ring) |
1023 | { | 1030 | { |
1024 | int ret; | 1031 | int ret; |
@@ -1029,6 +1036,30 @@ static int skl_init_workarounds(struct intel_engine_cs *ring) | |||
1029 | if (ret) | 1036 | if (ret) |
1030 | return ret; | 1037 | return ret; |
1031 | 1038 | ||
1039 | if (INTEL_REVID(dev) <= SKL_REVID_D0) { | ||
1040 | /* WaDisableHDCInvalidation:skl */ | ||
1041 | I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | | ||
1042 | BDW_DISABLE_HDC_INVALIDATION); | ||
1043 | |||
1044 | /* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */ | ||
1045 | I915_WRITE(FF_SLICE_CS_CHICKEN2, | ||
1046 | _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE)); | ||
1047 | } | ||
1048 | |||
1049 | /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes | ||
1050 | * involving this register should also be added to WA batch as required. | ||
1051 | */ | ||
1052 | if (INTEL_REVID(dev) <= SKL_REVID_E0) | ||
1053 | /* WaDisableLSQCROPERFforOCL:skl */ | ||
1054 | I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | | ||
1055 | GEN8_LQSC_RO_PERF_DIS); | ||
1056 | |||
1057 | /* WaEnableGapsTsvCreditFix:skl */ | ||
1058 | if (IS_SKYLAKE(dev) && (INTEL_REVID(dev) >= SKL_REVID_C0)) { | ||
1059 | I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) | | ||
1060 | GEN9_GAPS_TSV_CREDIT_DISABLE)); | ||
1061 | } | ||
1062 | |||
1032 | /* WaDisablePowerCompilerClockGating:skl */ | 1063 | /* WaDisablePowerCompilerClockGating:skl */ |
1033 | if (INTEL_REVID(dev) == SKL_REVID_B0) | 1064 | if (INTEL_REVID(dev) == SKL_REVID_B0) |
1034 | WA_SET_BIT_MASKED(HIZ_CHICKEN, | 1065 | WA_SET_BIT_MASKED(HIZ_CHICKEN, |
@@ -1072,6 +1103,17 @@ static int bxt_init_workarounds(struct intel_engine_cs *ring) | |||
1072 | if (ret) | 1103 | if (ret) |
1073 | return ret; | 1104 | return ret; |
1074 | 1105 | ||
1106 | /* WaStoreMultiplePTEenable:bxt */ | ||
1107 | /* This is a requirement according to Hardware specification */ | ||
1108 | if (INTEL_REVID(dev) == BXT_REVID_A0) | ||
1109 | I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF); | ||
1110 | |||
1111 | /* WaSetClckGatingDisableMedia:bxt */ | ||
1112 | if (INTEL_REVID(dev) == BXT_REVID_A0) { | ||
1113 | I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) & | ||
1114 | ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE)); | ||
1115 | } | ||
1116 | |||
1075 | /* WaDisableThreadStallDopClockGating:bxt */ | 1117 | /* WaDisableThreadStallDopClockGating:bxt */ |
1076 | WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, | 1118 | WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, |
1077 | STALL_DOP_GATING_DISABLE); | 1119 | STALL_DOP_GATING_DISABLE); |
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index dd2d5683fcb1..56dc132e8e20 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c | |||
@@ -192,6 +192,7 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc, | |||
192 | const int pipe = intel_plane->pipe; | 192 | const int pipe = intel_plane->pipe; |
193 | const int plane = intel_plane->plane + 1; | 193 | const int plane = intel_plane->plane + 1; |
194 | u32 plane_ctl, stride_div, stride; | 194 | u32 plane_ctl, stride_div, stride; |
195 | int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); | ||
195 | const struct drm_intel_sprite_colorkey *key = | 196 | const struct drm_intel_sprite_colorkey *key = |
196 | &to_intel_plane_state(drm_plane->state)->ckey; | 197 | &to_intel_plane_state(drm_plane->state)->ckey; |
197 | unsigned long surf_addr; | 198 | unsigned long surf_addr; |
@@ -202,6 +203,7 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc, | |||
202 | int scaler_id; | 203 | int scaler_id; |
203 | 204 | ||
204 | plane_ctl = PLANE_CTL_ENABLE | | 205 | plane_ctl = PLANE_CTL_ENABLE | |
206 | PLANE_CTL_PIPE_GAMMA_ENABLE | | ||
205 | PLANE_CTL_PIPE_CSC_ENABLE; | 207 | PLANE_CTL_PIPE_CSC_ENABLE; |
206 | 208 | ||
207 | plane_ctl |= skl_plane_ctl_format(fb->pixel_format); | 209 | plane_ctl |= skl_plane_ctl_format(fb->pixel_format); |
@@ -210,6 +212,10 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc, | |||
210 | rotation = drm_plane->state->rotation; | 212 | rotation = drm_plane->state->rotation; |
211 | plane_ctl |= skl_plane_ctl_rotation(rotation); | 213 | plane_ctl |= skl_plane_ctl_rotation(rotation); |
212 | 214 | ||
215 | intel_update_sprite_watermarks(drm_plane, crtc, src_w, src_h, | ||
216 | pixel_size, true, | ||
217 | src_w != crtc_w || src_h != crtc_h); | ||
218 | |||
213 | stride_div = intel_fb_stride_alignment(dev, fb->modifier[0], | 219 | stride_div = intel_fb_stride_alignment(dev, fb->modifier[0], |
214 | fb->pixel_format); | 220 | fb->pixel_format); |
215 | 221 | ||
@@ -291,6 +297,8 @@ skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc) | |||
291 | 297 | ||
292 | I915_WRITE(PLANE_SURF(pipe, plane), 0); | 298 | I915_WRITE(PLANE_SURF(pipe, plane), 0); |
293 | POSTING_READ(PLANE_SURF(pipe, plane)); | 299 | POSTING_READ(PLANE_SURF(pipe, plane)); |
300 | |||
301 | intel_update_sprite_watermarks(dplane, crtc, 0, 0, 0, false, false); | ||
294 | } | 302 | } |
295 | 303 | ||
296 | static void | 304 | static void |
@@ -533,6 +541,10 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, | |||
533 | if (IS_HASWELL(dev) || IS_BROADWELL(dev)) | 541 | if (IS_HASWELL(dev) || IS_BROADWELL(dev)) |
534 | sprctl |= SPRITE_PIPE_CSC_ENABLE; | 542 | sprctl |= SPRITE_PIPE_CSC_ENABLE; |
535 | 543 | ||
544 | intel_update_sprite_watermarks(plane, crtc, src_w, src_h, pixel_size, | ||
545 | true, | ||
546 | src_w != crtc_w || src_h != crtc_h); | ||
547 | |||
536 | /* Sizes are 0 based */ | 548 | /* Sizes are 0 based */ |
537 | src_w--; | 549 | src_w--; |
538 | src_h--; | 550 | src_h--; |
@@ -601,7 +613,7 @@ ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc) | |||
601 | struct intel_plane *intel_plane = to_intel_plane(plane); | 613 | struct intel_plane *intel_plane = to_intel_plane(plane); |
602 | int pipe = intel_plane->pipe; | 614 | int pipe = intel_plane->pipe; |
603 | 615 | ||
604 | I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE); | 616 | I915_WRITE(SPRCTL(pipe), 0); |
605 | /* Can't leave the scaler enabled... */ | 617 | /* Can't leave the scaler enabled... */ |
606 | if (intel_plane->can_scale) | 618 | if (intel_plane->can_scale) |
607 | I915_WRITE(SPRSCALE(pipe), 0); | 619 | I915_WRITE(SPRSCALE(pipe), 0); |
@@ -666,6 +678,10 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, | |||
666 | if (IS_GEN6(dev)) | 678 | if (IS_GEN6(dev)) |
667 | dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */ | 679 | dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */ |
668 | 680 | ||
681 | intel_update_sprite_watermarks(plane, crtc, src_w, src_h, | ||
682 | pixel_size, true, | ||
683 | src_w != crtc_w || src_h != crtc_h); | ||
684 | |||
669 | /* Sizes are 0 based */ | 685 | /* Sizes are 0 based */ |
670 | src_w--; | 686 | src_w--; |
671 | src_h--; | 687 | src_h--; |
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index b43c6d025ac3..1663ea55e37c 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c | |||
@@ -525,7 +525,7 @@ void assert_forcewakes_inactive(struct drm_i915_private *dev_priv) | |||
525 | } | 525 | } |
526 | 526 | ||
527 | /* We give fast paths for the really cool registers */ | 527 | /* We give fast paths for the really cool registers */ |
528 | #define NEEDS_FORCE_WAKE(dev_priv, reg) \ | 528 | #define NEEDS_FORCE_WAKE(reg) \ |
529 | ((reg) < 0x40000 && (reg) != FORCEWAKE) | 529 | ((reg) < 0x40000 && (reg) != FORCEWAKE) |
530 | 530 | ||
531 | #define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end)) | 531 | #define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end)) |
@@ -727,7 +727,7 @@ static u##x \ | |||
727 | gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ | 727 | gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ |
728 | GEN6_READ_HEADER(x); \ | 728 | GEN6_READ_HEADER(x); \ |
729 | hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \ | 729 | hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \ |
730 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) \ | 730 | if (NEEDS_FORCE_WAKE(reg)) \ |
731 | __force_wake_get(dev_priv, FORCEWAKE_RENDER); \ | 731 | __force_wake_get(dev_priv, FORCEWAKE_RENDER); \ |
732 | val = __raw_i915_read##x(dev_priv, reg); \ | 732 | val = __raw_i915_read##x(dev_priv, reg); \ |
733 | hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \ | 733 | hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \ |
@@ -761,7 +761,7 @@ chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ | |||
761 | GEN6_READ_FOOTER; \ | 761 | GEN6_READ_FOOTER; \ |
762 | } | 762 | } |
763 | 763 | ||
764 | #define SKL_NEEDS_FORCE_WAKE(dev_priv, reg) \ | 764 | #define SKL_NEEDS_FORCE_WAKE(reg) \ |
765 | ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg)) | 765 | ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg)) |
766 | 766 | ||
767 | #define __gen9_read(x) \ | 767 | #define __gen9_read(x) \ |
@@ -770,9 +770,9 @@ gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ | |||
770 | enum forcewake_domains fw_engine; \ | 770 | enum forcewake_domains fw_engine; \ |
771 | GEN6_READ_HEADER(x); \ | 771 | GEN6_READ_HEADER(x); \ |
772 | hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \ | 772 | hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \ |
773 | if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg))) \ | 773 | if (!SKL_NEEDS_FORCE_WAKE(reg)) \ |
774 | fw_engine = 0; \ | 774 | fw_engine = 0; \ |
775 | else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \ | 775 | else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \ |
776 | fw_engine = FORCEWAKE_RENDER; \ | 776 | fw_engine = FORCEWAKE_RENDER; \ |
777 | else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \ | 777 | else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \ |
778 | fw_engine = FORCEWAKE_MEDIA; \ | 778 | fw_engine = FORCEWAKE_MEDIA; \ |
@@ -868,7 +868,7 @@ static void \ | |||
868 | gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ | 868 | gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ |
869 | u32 __fifo_ret = 0; \ | 869 | u32 __fifo_ret = 0; \ |
870 | GEN6_WRITE_HEADER; \ | 870 | GEN6_WRITE_HEADER; \ |
871 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ | 871 | if (NEEDS_FORCE_WAKE(reg)) { \ |
872 | __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ | 872 | __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ |
873 | } \ | 873 | } \ |
874 | __raw_i915_write##x(dev_priv, reg, val); \ | 874 | __raw_i915_write##x(dev_priv, reg, val); \ |
@@ -883,7 +883,7 @@ static void \ | |||
883 | hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ | 883 | hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ |
884 | u32 __fifo_ret = 0; \ | 884 | u32 __fifo_ret = 0; \ |
885 | GEN6_WRITE_HEADER; \ | 885 | GEN6_WRITE_HEADER; \ |
886 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ | 886 | if (NEEDS_FORCE_WAKE(reg)) { \ |
887 | __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ | 887 | __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ |
888 | } \ | 888 | } \ |
889 | hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \ | 889 | hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \ |
@@ -985,7 +985,7 @@ gen9_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, \ | |||
985 | enum forcewake_domains fw_engine; \ | 985 | enum forcewake_domains fw_engine; \ |
986 | GEN6_WRITE_HEADER; \ | 986 | GEN6_WRITE_HEADER; \ |
987 | hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \ | 987 | hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \ |
988 | if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg)) || \ | 988 | if (!SKL_NEEDS_FORCE_WAKE(reg) || \ |
989 | is_gen9_shadowed(dev_priv, reg)) \ | 989 | is_gen9_shadowed(dev_priv, reg)) \ |
990 | fw_engine = 0; \ | 990 | fw_engine = 0; \ |
991 | else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \ | 991 | else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \ |