diff options
author | Dave Airlie <airlied@redhat.com> | 2010-11-08 22:26:13 -0500 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2010-11-08 22:26:13 -0500 |
commit | 91839fd577abc5fb39fb2238e05e847c70c9dec3 (patch) | |
tree | 86238c628c368aab28e96d61de99b7d739eec1ff /drivers/gpu/drm | |
parent | a7bcf21e60c73cb7f7c13fad928967d7e47c3cac (diff) | |
parent | 3f8ff0e72d75fdbe7f2cba2c4015fd9fdd9e13fd (diff) |
Merge branch 'drm-intel-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/ickle/drm-intel
* 'drm-intel-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/ickle/drm-intel:
drm/i915: Fix LVDS fixed-mode regression from 219adae1
drm/i915/ringbuffer: Use the HEAD auto-reporting mechanism
drm/i915: Avoid might_fault during pwrite whilst holding our mutex
agp/intel: fix cache control for sandybridge
agp/intel: restore cache behavior on sandybridge
drm/i915; Don't apply Ironlake FDI clock workaround to Sandybridge
drm/i915: Fix KMS regression on Sandybridge/CPT
i915: reprogram power monitoring registers on resume
drm/i915: SNB BLT workaround
drm/i915: Fix the graphics frequency clamping at init and when IPS is active.
drm/i915: Allow powersave modparam to be adjusted at runtime.
drm/i915: Apply big hammer to serialise buffer access between rings
drm/i915: opregion_setup: iounmap correct address
drm/i915: Flush read-only buffers from the active list upon idle as well
i915: signedness bug in check_overlay_src()
drm/i915: Fix typo from "Enable DisplayPort Audio"
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 115 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_evict.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_suspend.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_display.c | 70 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_dp.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_drv.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_lvds.c | 16 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_opregion.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_overlay.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 129 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.h | 3 |
13 files changed, 264 insertions, 93 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 3467dd420760..80745f85902c 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -44,7 +44,7 @@ unsigned int i915_fbpercrtc = 0; | |||
44 | module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400); | 44 | module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400); |
45 | 45 | ||
46 | unsigned int i915_powersave = 1; | 46 | unsigned int i915_powersave = 1; |
47 | module_param_named(powersave, i915_powersave, int, 0400); | 47 | module_param_named(powersave, i915_powersave, int, 0600); |
48 | 48 | ||
49 | unsigned int i915_lvds_downclock = 0; | 49 | unsigned int i915_lvds_downclock = 0; |
50 | module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); | 50 | module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 2c2c19b6285e..90414ae86afc 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -1321,6 +1321,7 @@ static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg, | |||
1321 | 1321 | ||
1322 | #define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) | 1322 | #define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) |
1323 | #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) | 1323 | #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) |
1324 | #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) | ||
1324 | 1325 | ||
1325 | #define PRIMARY_RINGBUFFER_SIZE (128*1024) | 1326 | #define PRIMARY_RINGBUFFER_SIZE (128*1024) |
1326 | 1327 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 8eb8453208b5..eba9b1615228 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -2172,7 +2172,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj) | |||
2172 | static int i915_ring_idle(struct drm_device *dev, | 2172 | static int i915_ring_idle(struct drm_device *dev, |
2173 | struct intel_ring_buffer *ring) | 2173 | struct intel_ring_buffer *ring) |
2174 | { | 2174 | { |
2175 | if (list_empty(&ring->gpu_write_list)) | 2175 | if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list)) |
2176 | return 0; | 2176 | return 0; |
2177 | 2177 | ||
2178 | i915_gem_flush_ring(dev, NULL, ring, | 2178 | i915_gem_flush_ring(dev, NULL, ring, |
@@ -2190,9 +2190,7 @@ i915_gpu_idle(struct drm_device *dev) | |||
2190 | int ret; | 2190 | int ret; |
2191 | 2191 | ||
2192 | lists_empty = (list_empty(&dev_priv->mm.flushing_list) && | 2192 | lists_empty = (list_empty(&dev_priv->mm.flushing_list) && |
2193 | list_empty(&dev_priv->render_ring.active_list) && | 2193 | list_empty(&dev_priv->mm.active_list)); |
2194 | list_empty(&dev_priv->bsd_ring.active_list) && | ||
2195 | list_empty(&dev_priv->blt_ring.active_list)); | ||
2196 | if (lists_empty) | 2194 | if (lists_empty) |
2197 | return 0; | 2195 | return 0; |
2198 | 2196 | ||
@@ -3108,7 +3106,8 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, | |||
3108 | * write domain | 3106 | * write domain |
3109 | */ | 3107 | */ |
3110 | if (obj->write_domain && | 3108 | if (obj->write_domain && |
3111 | obj->write_domain != obj->pending_read_domains) { | 3109 | (obj->write_domain != obj->pending_read_domains || |
3110 | obj_priv->ring != ring)) { | ||
3112 | flush_domains |= obj->write_domain; | 3111 | flush_domains |= obj->write_domain; |
3113 | invalidate_domains |= | 3112 | invalidate_domains |= |
3114 | obj->pending_read_domains & ~obj->write_domain; | 3113 | obj->pending_read_domains & ~obj->write_domain; |
@@ -3497,6 +3496,52 @@ i915_gem_execbuffer_pin(struct drm_device *dev, | |||
3497 | return 0; | 3496 | return 0; |
3498 | } | 3497 | } |
3499 | 3498 | ||
3499 | static int | ||
3500 | i915_gem_execbuffer_move_to_gpu(struct drm_device *dev, | ||
3501 | struct drm_file *file, | ||
3502 | struct intel_ring_buffer *ring, | ||
3503 | struct drm_gem_object **objects, | ||
3504 | int count) | ||
3505 | { | ||
3506 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
3507 | int ret, i; | ||
3508 | |||
3509 | /* Zero the global flush/invalidate flags. These | ||
3510 | * will be modified as new domains are computed | ||
3511 | * for each object | ||
3512 | */ | ||
3513 | dev->invalidate_domains = 0; | ||
3514 | dev->flush_domains = 0; | ||
3515 | dev_priv->mm.flush_rings = 0; | ||
3516 | for (i = 0; i < count; i++) | ||
3517 | i915_gem_object_set_to_gpu_domain(objects[i], ring); | ||
3518 | |||
3519 | if (dev->invalidate_domains | dev->flush_domains) { | ||
3520 | #if WATCH_EXEC | ||
3521 | DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n", | ||
3522 | __func__, | ||
3523 | dev->invalidate_domains, | ||
3524 | dev->flush_domains); | ||
3525 | #endif | ||
3526 | i915_gem_flush(dev, file, | ||
3527 | dev->invalidate_domains, | ||
3528 | dev->flush_domains, | ||
3529 | dev_priv->mm.flush_rings); | ||
3530 | } | ||
3531 | |||
3532 | for (i = 0; i < count; i++) { | ||
3533 | struct drm_i915_gem_object *obj = to_intel_bo(objects[i]); | ||
3534 | /* XXX replace with semaphores */ | ||
3535 | if (obj->ring && ring != obj->ring) { | ||
3536 | ret = i915_gem_object_wait_rendering(&obj->base, true); | ||
3537 | if (ret) | ||
3538 | return ret; | ||
3539 | } | ||
3540 | } | ||
3541 | |||
3542 | return 0; | ||
3543 | } | ||
3544 | |||
3500 | /* Throttle our rendering by waiting until the ring has completed our requests | 3545 | /* Throttle our rendering by waiting until the ring has completed our requests |
3501 | * emitted over 20 msec ago. | 3546 | * emitted over 20 msec ago. |
3502 | * | 3547 | * |
@@ -3757,33 +3802,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3757 | goto err; | 3802 | goto err; |
3758 | } | 3803 | } |
3759 | 3804 | ||
3760 | /* Zero the global flush/invalidate flags. These | 3805 | ret = i915_gem_execbuffer_move_to_gpu(dev, file, ring, |
3761 | * will be modified as new domains are computed | 3806 | object_list, args->buffer_count); |
3762 | * for each object | 3807 | if (ret) |
3763 | */ | 3808 | goto err; |
3764 | dev->invalidate_domains = 0; | ||
3765 | dev->flush_domains = 0; | ||
3766 | dev_priv->mm.flush_rings = 0; | ||
3767 | |||
3768 | for (i = 0; i < args->buffer_count; i++) { | ||
3769 | struct drm_gem_object *obj = object_list[i]; | ||
3770 | |||
3771 | /* Compute new gpu domains and update invalidate/flush */ | ||
3772 | i915_gem_object_set_to_gpu_domain(obj, ring); | ||
3773 | } | ||
3774 | |||
3775 | if (dev->invalidate_domains | dev->flush_domains) { | ||
3776 | #if WATCH_EXEC | ||
3777 | DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n", | ||
3778 | __func__, | ||
3779 | dev->invalidate_domains, | ||
3780 | dev->flush_domains); | ||
3781 | #endif | ||
3782 | i915_gem_flush(dev, file, | ||
3783 | dev->invalidate_domains, | ||
3784 | dev->flush_domains, | ||
3785 | dev_priv->mm.flush_rings); | ||
3786 | } | ||
3787 | 3809 | ||
3788 | for (i = 0; i < args->buffer_count; i++) { | 3810 | for (i = 0; i < args->buffer_count; i++) { |
3789 | struct drm_gem_object *obj = object_list[i]; | 3811 | struct drm_gem_object *obj = object_list[i]; |
@@ -4856,17 +4878,24 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, | |||
4856 | struct drm_file *file_priv) | 4878 | struct drm_file *file_priv) |
4857 | { | 4879 | { |
4858 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 4880 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
4859 | void *obj_addr; | 4881 | void *vaddr = obj_priv->phys_obj->handle->vaddr + args->offset; |
4860 | int ret; | 4882 | char __user *user_data = (char __user *) (uintptr_t) args->data_ptr; |
4861 | char __user *user_data; | ||
4862 | 4883 | ||
4863 | user_data = (char __user *) (uintptr_t) args->data_ptr; | 4884 | DRM_DEBUG_DRIVER("vaddr %p, %lld\n", vaddr, args->size); |
4864 | obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset; | ||
4865 | 4885 | ||
4866 | DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr, args->size); | 4886 | if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { |
4867 | ret = copy_from_user(obj_addr, user_data, args->size); | 4887 | unsigned long unwritten; |
4868 | if (ret) | 4888 | |
4869 | return -EFAULT; | 4889 | /* The physical object once assigned is fixed for the lifetime |
4890 | * of the obj, so we can safely drop the lock and continue | ||
4891 | * to access vaddr. | ||
4892 | */ | ||
4893 | mutex_unlock(&dev->struct_mutex); | ||
4894 | unwritten = copy_from_user(vaddr, user_data, args->size); | ||
4895 | mutex_lock(&dev->struct_mutex); | ||
4896 | if (unwritten) | ||
4897 | return -EFAULT; | ||
4898 | } | ||
4870 | 4899 | ||
4871 | drm_agp_chipset_flush(dev); | 4900 | drm_agp_chipset_flush(dev); |
4872 | return 0; | 4901 | return 0; |
@@ -4900,9 +4929,7 @@ i915_gpu_is_active(struct drm_device *dev) | |||
4900 | int lists_empty; | 4929 | int lists_empty; |
4901 | 4930 | ||
4902 | lists_empty = list_empty(&dev_priv->mm.flushing_list) && | 4931 | lists_empty = list_empty(&dev_priv->mm.flushing_list) && |
4903 | list_empty(&dev_priv->render_ring.active_list) && | 4932 | list_empty(&dev_priv->mm.active_list); |
4904 | list_empty(&dev_priv->bsd_ring.active_list) && | ||
4905 | list_empty(&dev_priv->blt_ring.active_list); | ||
4906 | 4933 | ||
4907 | return !lists_empty; | 4934 | return !lists_empty; |
4908 | } | 4935 | } |
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 43a4013f53fa..d8ae7d1d0cc6 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c | |||
@@ -165,9 +165,7 @@ i915_gem_evict_everything(struct drm_device *dev) | |||
165 | 165 | ||
166 | lists_empty = (list_empty(&dev_priv->mm.inactive_list) && | 166 | lists_empty = (list_empty(&dev_priv->mm.inactive_list) && |
167 | list_empty(&dev_priv->mm.flushing_list) && | 167 | list_empty(&dev_priv->mm.flushing_list) && |
168 | list_empty(&dev_priv->render_ring.active_list) && | 168 | list_empty(&dev_priv->mm.active_list)); |
169 | list_empty(&dev_priv->bsd_ring.active_list) && | ||
170 | list_empty(&dev_priv->blt_ring.active_list)); | ||
171 | if (lists_empty) | 169 | if (lists_empty) |
172 | return -ENOSPC; | 170 | return -ENOSPC; |
173 | 171 | ||
@@ -184,9 +182,7 @@ i915_gem_evict_everything(struct drm_device *dev) | |||
184 | 182 | ||
185 | lists_empty = (list_empty(&dev_priv->mm.inactive_list) && | 183 | lists_empty = (list_empty(&dev_priv->mm.inactive_list) && |
186 | list_empty(&dev_priv->mm.flushing_list) && | 184 | list_empty(&dev_priv->mm.flushing_list) && |
187 | list_empty(&dev_priv->render_ring.active_list) && | 185 | list_empty(&dev_priv->mm.active_list)); |
188 | list_empty(&dev_priv->bsd_ring.active_list) && | ||
189 | list_empty(&dev_priv->blt_ring.active_list)); | ||
190 | BUG_ON(!lists_empty); | 186 | BUG_ON(!lists_empty); |
191 | 187 | ||
192 | return 0; | 188 | return 0; |
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 989c19d2d959..454c064f8ef7 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c | |||
@@ -862,8 +862,10 @@ int i915_restore_state(struct drm_device *dev) | |||
862 | /* Clock gating state */ | 862 | /* Clock gating state */ |
863 | intel_init_clock_gating(dev); | 863 | intel_init_clock_gating(dev); |
864 | 864 | ||
865 | if (HAS_PCH_SPLIT(dev)) | 865 | if (HAS_PCH_SPLIT(dev)) { |
866 | ironlake_enable_drps(dev); | 866 | ironlake_enable_drps(dev); |
867 | intel_init_emon(dev); | ||
868 | } | ||
867 | 869 | ||
868 | /* Cache mode state */ | 870 | /* Cache mode state */ |
869 | I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000); | 871 | I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000); |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 990f065374b2..48d8fd686ea9 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -1681,6 +1681,37 @@ static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock) | |||
1681 | udelay(500); | 1681 | udelay(500); |
1682 | } | 1682 | } |
1683 | 1683 | ||
1684 | static void intel_fdi_normal_train(struct drm_crtc *crtc) | ||
1685 | { | ||
1686 | struct drm_device *dev = crtc->dev; | ||
1687 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1688 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
1689 | int pipe = intel_crtc->pipe; | ||
1690 | u32 reg, temp; | ||
1691 | |||
1692 | /* enable normal train */ | ||
1693 | reg = FDI_TX_CTL(pipe); | ||
1694 | temp = I915_READ(reg); | ||
1695 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
1696 | temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; | ||
1697 | I915_WRITE(reg, temp); | ||
1698 | |||
1699 | reg = FDI_RX_CTL(pipe); | ||
1700 | temp = I915_READ(reg); | ||
1701 | if (HAS_PCH_CPT(dev)) { | ||
1702 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; | ||
1703 | temp |= FDI_LINK_TRAIN_NORMAL_CPT; | ||
1704 | } else { | ||
1705 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
1706 | temp |= FDI_LINK_TRAIN_NONE; | ||
1707 | } | ||
1708 | I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); | ||
1709 | |||
1710 | /* wait one idle pattern time */ | ||
1711 | POSTING_READ(reg); | ||
1712 | udelay(1000); | ||
1713 | } | ||
1714 | |||
1684 | /* The FDI link training functions for ILK/Ibexpeak. */ | 1715 | /* The FDI link training functions for ILK/Ibexpeak. */ |
1685 | static void ironlake_fdi_link_train(struct drm_crtc *crtc) | 1716 | static void ironlake_fdi_link_train(struct drm_crtc *crtc) |
1686 | { | 1717 | { |
@@ -1767,27 +1798,6 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc) | |||
1767 | 1798 | ||
1768 | DRM_DEBUG_KMS("FDI train done\n"); | 1799 | DRM_DEBUG_KMS("FDI train done\n"); |
1769 | 1800 | ||
1770 | /* enable normal train */ | ||
1771 | reg = FDI_TX_CTL(pipe); | ||
1772 | temp = I915_READ(reg); | ||
1773 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
1774 | temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; | ||
1775 | I915_WRITE(reg, temp); | ||
1776 | |||
1777 | reg = FDI_RX_CTL(pipe); | ||
1778 | temp = I915_READ(reg); | ||
1779 | if (HAS_PCH_CPT(dev)) { | ||
1780 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; | ||
1781 | temp |= FDI_LINK_TRAIN_NORMAL_CPT; | ||
1782 | } else { | ||
1783 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
1784 | temp |= FDI_LINK_TRAIN_NONE; | ||
1785 | } | ||
1786 | I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); | ||
1787 | |||
1788 | /* wait one idle pattern time */ | ||
1789 | POSTING_READ(reg); | ||
1790 | udelay(1000); | ||
1791 | } | 1801 | } |
1792 | 1802 | ||
1793 | static const int const snb_b_fdi_train_param [] = { | 1803 | static const int const snb_b_fdi_train_param [] = { |
@@ -2090,6 +2100,8 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) | |||
2090 | I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe))); | 2100 | I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe))); |
2091 | I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe))); | 2101 | I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe))); |
2092 | 2102 | ||
2103 | intel_fdi_normal_train(crtc); | ||
2104 | |||
2093 | /* For PCH DP, enable TRANS_DP_CTL */ | 2105 | /* For PCH DP, enable TRANS_DP_CTL */ |
2094 | if (HAS_PCH_CPT(dev) && | 2106 | if (HAS_PCH_CPT(dev) && |
2095 | intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { | 2107 | intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { |
@@ -2200,9 +2212,10 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) | |||
2200 | udelay(100); | 2212 | udelay(100); |
2201 | 2213 | ||
2202 | /* Ironlake workaround, disable clock pointer after downing FDI */ | 2214 | /* Ironlake workaround, disable clock pointer after downing FDI */ |
2203 | I915_WRITE(FDI_RX_CHICKEN(pipe), | 2215 | if (HAS_PCH_IBX(dev)) |
2204 | I915_READ(FDI_RX_CHICKEN(pipe) & | 2216 | I915_WRITE(FDI_RX_CHICKEN(pipe), |
2205 | ~FDI_RX_PHASE_SYNC_POINTER_ENABLE)); | 2217 | I915_READ(FDI_RX_CHICKEN(pipe) & |
2218 | ~FDI_RX_PHASE_SYNC_POINTER_ENABLE)); | ||
2206 | 2219 | ||
2207 | /* still set train pattern 1 */ | 2220 | /* still set train pattern 1 */ |
2208 | reg = FDI_TX_CTL(pipe); | 2221 | reg = FDI_TX_CTL(pipe); |
@@ -5581,20 +5594,19 @@ void ironlake_enable_drps(struct drm_device *dev) | |||
5581 | fmin = (rgvmodectl & MEMMODE_FMIN_MASK); | 5594 | fmin = (rgvmodectl & MEMMODE_FMIN_MASK); |
5582 | fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> | 5595 | fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> |
5583 | MEMMODE_FSTART_SHIFT; | 5596 | MEMMODE_FSTART_SHIFT; |
5584 | fstart = fmax; | ||
5585 | 5597 | ||
5586 | vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >> | 5598 | vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >> |
5587 | PXVFREQ_PX_SHIFT; | 5599 | PXVFREQ_PX_SHIFT; |
5588 | 5600 | ||
5589 | dev_priv->fmax = fstart; /* IPS callback will increase this */ | 5601 | dev_priv->fmax = fmax; /* IPS callback will increase this */ |
5590 | dev_priv->fstart = fstart; | 5602 | dev_priv->fstart = fstart; |
5591 | 5603 | ||
5592 | dev_priv->max_delay = fmax; | 5604 | dev_priv->max_delay = fstart; |
5593 | dev_priv->min_delay = fmin; | 5605 | dev_priv->min_delay = fmin; |
5594 | dev_priv->cur_delay = fstart; | 5606 | dev_priv->cur_delay = fstart; |
5595 | 5607 | ||
5596 | DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", fmax, fmin, | 5608 | DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", |
5597 | fstart); | 5609 | fmax, fmin, fstart); |
5598 | 5610 | ||
5599 | I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); | 5611 | I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); |
5600 | 5612 | ||
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 891f4f1d63b1..c8e005553310 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -1517,7 +1517,7 @@ g4x_dp_detect(struct intel_dp *intel_dp) | |||
1517 | status = connector_status_connected; | 1517 | status = connector_status_connected; |
1518 | } | 1518 | } |
1519 | 1519 | ||
1520 | return bit; | 1520 | return status; |
1521 | } | 1521 | } |
1522 | 1522 | ||
1523 | /** | 1523 | /** |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 9af9f86a8765..21551fe74541 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -296,6 +296,7 @@ extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, | |||
296 | extern void intel_init_clock_gating(struct drm_device *dev); | 296 | extern void intel_init_clock_gating(struct drm_device *dev); |
297 | extern void ironlake_enable_drps(struct drm_device *dev); | 297 | extern void ironlake_enable_drps(struct drm_device *dev); |
298 | extern void ironlake_disable_drps(struct drm_device *dev); | 298 | extern void ironlake_disable_drps(struct drm_device *dev); |
299 | extern void intel_init_emon(struct drm_device *dev); | ||
299 | 300 | ||
300 | extern int intel_pin_and_fence_fb_obj(struct drm_device *dev, | 301 | extern int intel_pin_and_fence_fb_obj(struct drm_device *dev, |
301 | struct drm_gem_object *obj, | 302 | struct drm_gem_object *obj, |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index f1a649990ea9..4324a326f98e 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -481,11 +481,8 @@ static int intel_lvds_get_modes(struct drm_connector *connector) | |||
481 | struct drm_device *dev = connector->dev; | 481 | struct drm_device *dev = connector->dev; |
482 | struct drm_display_mode *mode; | 482 | struct drm_display_mode *mode; |
483 | 483 | ||
484 | if (intel_lvds->edid) { | 484 | if (intel_lvds->edid) |
485 | drm_mode_connector_update_edid_property(connector, | ||
486 | intel_lvds->edid); | ||
487 | return drm_add_edid_modes(connector, intel_lvds->edid); | 485 | return drm_add_edid_modes(connector, intel_lvds->edid); |
488 | } | ||
489 | 486 | ||
490 | mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode); | 487 | mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode); |
491 | if (mode == 0) | 488 | if (mode == 0) |
@@ -939,7 +936,16 @@ void intel_lvds_init(struct drm_device *dev) | |||
939 | */ | 936 | */ |
940 | intel_lvds->edid = drm_get_edid(connector, | 937 | intel_lvds->edid = drm_get_edid(connector, |
941 | &dev_priv->gmbus[pin].adapter); | 938 | &dev_priv->gmbus[pin].adapter); |
942 | 939 | if (intel_lvds->edid) { | |
940 | if (drm_add_edid_modes(connector, | ||
941 | intel_lvds->edid)) { | ||
942 | drm_mode_connector_update_edid_property(connector, | ||
943 | intel_lvds->edid); | ||
944 | } else { | ||
945 | kfree(intel_lvds->edid); | ||
946 | intel_lvds->edid = NULL; | ||
947 | } | ||
948 | } | ||
943 | if (!intel_lvds->edid) { | 949 | if (!intel_lvds->edid) { |
944 | /* Didn't get an EDID, so | 950 | /* Didn't get an EDID, so |
945 | * Set wide sync ranges so we get all modes | 951 | * Set wide sync ranges so we get all modes |
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index 917c7dc3cd6b..9b0d9a867aea 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c | |||
@@ -512,6 +512,6 @@ int intel_opregion_setup(struct drm_device *dev) | |||
512 | return 0; | 512 | return 0; |
513 | 513 | ||
514 | err_out: | 514 | err_out: |
515 | iounmap(opregion->header); | 515 | iounmap(base); |
516 | return err; | 516 | return err; |
517 | } | 517 | } |
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index afb96d25219a..02ff0a481f47 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c | |||
@@ -946,7 +946,9 @@ static int check_overlay_src(struct drm_device *dev, | |||
946 | { | 946 | { |
947 | int uv_hscale = uv_hsubsampling(rec->flags); | 947 | int uv_hscale = uv_hsubsampling(rec->flags); |
948 | int uv_vscale = uv_vsubsampling(rec->flags); | 948 | int uv_vscale = uv_vsubsampling(rec->flags); |
949 | u32 stride_mask, depth, tmp; | 949 | u32 stride_mask; |
950 | int depth; | ||
951 | u32 tmp; | ||
950 | 952 | ||
951 | /* check src dimensions */ | 953 | /* check src dimensions */ |
952 | if (IS_845G(dev) || IS_I830(dev)) { | 954 | if (IS_845G(dev) || IS_I830(dev)) { |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 09f2dc353ae2..b83306f9244b 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -177,7 +177,7 @@ static int init_ring_common(struct drm_device *dev, | |||
177 | 177 | ||
178 | I915_WRITE_CTL(ring, | 178 | I915_WRITE_CTL(ring, |
179 | ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES) | 179 | ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES) |
180 | | RING_NO_REPORT | RING_VALID); | 180 | | RING_REPORT_64K | RING_VALID); |
181 | 181 | ||
182 | head = I915_READ_HEAD(ring) & HEAD_ADDR; | 182 | head = I915_READ_HEAD(ring) & HEAD_ADDR; |
183 | /* If the head is still not zero, the ring is dead */ | 183 | /* If the head is still not zero, the ring is dead */ |
@@ -654,6 +654,10 @@ void intel_cleanup_ring_buffer(struct drm_device *dev, | |||
654 | i915_gem_object_unpin(ring->gem_object); | 654 | i915_gem_object_unpin(ring->gem_object); |
655 | drm_gem_object_unreference(ring->gem_object); | 655 | drm_gem_object_unreference(ring->gem_object); |
656 | ring->gem_object = NULL; | 656 | ring->gem_object = NULL; |
657 | |||
658 | if (ring->cleanup) | ||
659 | ring->cleanup(ring); | ||
660 | |||
657 | cleanup_status_page(dev, ring); | 661 | cleanup_status_page(dev, ring); |
658 | } | 662 | } |
659 | 663 | ||
@@ -688,6 +692,17 @@ int intel_wait_ring_buffer(struct drm_device *dev, | |||
688 | { | 692 | { |
689 | unsigned long end; | 693 | unsigned long end; |
690 | drm_i915_private_t *dev_priv = dev->dev_private; | 694 | drm_i915_private_t *dev_priv = dev->dev_private; |
695 | u32 head; | ||
696 | |||
697 | head = intel_read_status_page(ring, 4); | ||
698 | if (head) { | ||
699 | ring->head = head & HEAD_ADDR; | ||
700 | ring->space = ring->head - (ring->tail + 8); | ||
701 | if (ring->space < 0) | ||
702 | ring->space += ring->size; | ||
703 | if (ring->space >= n) | ||
704 | return 0; | ||
705 | } | ||
691 | 706 | ||
692 | trace_i915_ring_wait_begin (dev); | 707 | trace_i915_ring_wait_begin (dev); |
693 | end = jiffies + 3 * HZ; | 708 | end = jiffies + 3 * HZ; |
@@ -854,19 +869,125 @@ blt_ring_put_user_irq(struct drm_device *dev, | |||
854 | /* do nothing */ | 869 | /* do nothing */ |
855 | } | 870 | } |
856 | 871 | ||
872 | |||
873 | /* Workaround for some stepping of SNB, | ||
874 | * each time when BLT engine ring tail moved, | ||
875 | * the first command in the ring to be parsed | ||
876 | * should be MI_BATCH_BUFFER_START | ||
877 | */ | ||
878 | #define NEED_BLT_WORKAROUND(dev) \ | ||
879 | (IS_GEN6(dev) && (dev->pdev->revision < 8)) | ||
880 | |||
881 | static inline struct drm_i915_gem_object * | ||
882 | to_blt_workaround(struct intel_ring_buffer *ring) | ||
883 | { | ||
884 | return ring->private; | ||
885 | } | ||
886 | |||
887 | static int blt_ring_init(struct drm_device *dev, | ||
888 | struct intel_ring_buffer *ring) | ||
889 | { | ||
890 | if (NEED_BLT_WORKAROUND(dev)) { | ||
891 | struct drm_i915_gem_object *obj; | ||
892 | u32 __iomem *ptr; | ||
893 | int ret; | ||
894 | |||
895 | obj = to_intel_bo(i915_gem_alloc_object(dev, 4096)); | ||
896 | if (obj == NULL) | ||
897 | return -ENOMEM; | ||
898 | |||
899 | ret = i915_gem_object_pin(&obj->base, 4096); | ||
900 | if (ret) { | ||
901 | drm_gem_object_unreference(&obj->base); | ||
902 | return ret; | ||
903 | } | ||
904 | |||
905 | ptr = kmap(obj->pages[0]); | ||
906 | iowrite32(MI_BATCH_BUFFER_END, ptr); | ||
907 | iowrite32(MI_NOOP, ptr+1); | ||
908 | kunmap(obj->pages[0]); | ||
909 | |||
910 | ret = i915_gem_object_set_to_gtt_domain(&obj->base, false); | ||
911 | if (ret) { | ||
912 | i915_gem_object_unpin(&obj->base); | ||
913 | drm_gem_object_unreference(&obj->base); | ||
914 | return ret; | ||
915 | } | ||
916 | |||
917 | ring->private = obj; | ||
918 | } | ||
919 | |||
920 | return init_ring_common(dev, ring); | ||
921 | } | ||
922 | |||
923 | static void blt_ring_begin(struct drm_device *dev, | ||
924 | struct intel_ring_buffer *ring, | ||
925 | int num_dwords) | ||
926 | { | ||
927 | if (ring->private) { | ||
928 | intel_ring_begin(dev, ring, num_dwords+2); | ||
929 | intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START); | ||
930 | intel_ring_emit(dev, ring, to_blt_workaround(ring)->gtt_offset); | ||
931 | } else | ||
932 | intel_ring_begin(dev, ring, 4); | ||
933 | } | ||
934 | |||
935 | static void blt_ring_flush(struct drm_device *dev, | ||
936 | struct intel_ring_buffer *ring, | ||
937 | u32 invalidate_domains, | ||
938 | u32 flush_domains) | ||
939 | { | ||
940 | blt_ring_begin(dev, ring, 4); | ||
941 | intel_ring_emit(dev, ring, MI_FLUSH_DW); | ||
942 | intel_ring_emit(dev, ring, 0); | ||
943 | intel_ring_emit(dev, ring, 0); | ||
944 | intel_ring_emit(dev, ring, 0); | ||
945 | intel_ring_advance(dev, ring); | ||
946 | } | ||
947 | |||
948 | static u32 | ||
949 | blt_ring_add_request(struct drm_device *dev, | ||
950 | struct intel_ring_buffer *ring, | ||
951 | u32 flush_domains) | ||
952 | { | ||
953 | u32 seqno = i915_gem_get_seqno(dev); | ||
954 | |||
955 | blt_ring_begin(dev, ring, 4); | ||
956 | intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX); | ||
957 | intel_ring_emit(dev, ring, | ||
958 | I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | ||
959 | intel_ring_emit(dev, ring, seqno); | ||
960 | intel_ring_emit(dev, ring, MI_USER_INTERRUPT); | ||
961 | intel_ring_advance(dev, ring); | ||
962 | |||
963 | DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno); | ||
964 | return seqno; | ||
965 | } | ||
966 | |||
967 | static void blt_ring_cleanup(struct intel_ring_buffer *ring) | ||
968 | { | ||
969 | if (!ring->private) | ||
970 | return; | ||
971 | |||
972 | i915_gem_object_unpin(ring->private); | ||
973 | drm_gem_object_unreference(ring->private); | ||
974 | ring->private = NULL; | ||
975 | } | ||
976 | |||
857 | static const struct intel_ring_buffer gen6_blt_ring = { | 977 | static const struct intel_ring_buffer gen6_blt_ring = { |
858 | .name = "blt ring", | 978 | .name = "blt ring", |
859 | .id = RING_BLT, | 979 | .id = RING_BLT, |
860 | .mmio_base = BLT_RING_BASE, | 980 | .mmio_base = BLT_RING_BASE, |
861 | .size = 32 * PAGE_SIZE, | 981 | .size = 32 * PAGE_SIZE, |
862 | .init = init_ring_common, | 982 | .init = blt_ring_init, |
863 | .write_tail = ring_write_tail, | 983 | .write_tail = ring_write_tail, |
864 | .flush = gen6_ring_flush, | 984 | .flush = blt_ring_flush, |
865 | .add_request = ring_add_request, | 985 | .add_request = blt_ring_add_request, |
866 | .get_seqno = ring_status_page_get_seqno, | 986 | .get_seqno = ring_status_page_get_seqno, |
867 | .user_irq_get = blt_ring_get_user_irq, | 987 | .user_irq_get = blt_ring_get_user_irq, |
868 | .user_irq_put = blt_ring_put_user_irq, | 988 | .user_irq_put = blt_ring_put_user_irq, |
869 | .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer, | 989 | .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer, |
990 | .cleanup = blt_ring_cleanup, | ||
870 | }; | 991 | }; |
871 | 992 | ||
872 | int intel_init_render_ring_buffer(struct drm_device *dev) | 993 | int intel_init_render_ring_buffer(struct drm_device *dev) |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index a05aff0e5764..3126c2681983 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
@@ -63,6 +63,7 @@ struct intel_ring_buffer { | |||
63 | struct drm_i915_gem_execbuffer2 *exec, | 63 | struct drm_i915_gem_execbuffer2 *exec, |
64 | struct drm_clip_rect *cliprects, | 64 | struct drm_clip_rect *cliprects, |
65 | uint64_t exec_offset); | 65 | uint64_t exec_offset); |
66 | void (*cleanup)(struct intel_ring_buffer *ring); | ||
66 | 67 | ||
67 | /** | 68 | /** |
68 | * List of objects currently involved in rendering from the | 69 | * List of objects currently involved in rendering from the |
@@ -98,6 +99,8 @@ struct intel_ring_buffer { | |||
98 | 99 | ||
99 | wait_queue_head_t irq_queue; | 100 | wait_queue_head_t irq_queue; |
100 | drm_local_map_t map; | 101 | drm_local_map_t map; |
102 | |||
103 | void *private; | ||
101 | }; | 104 | }; |
102 | 105 | ||
103 | static inline u32 | 106 | static inline u32 |