diff options
Diffstat (limited to 'drivers/gpu/drm')
38 files changed, 493 insertions, 267 deletions
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index dcbeb98f195..f7af91cb273 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c | |||
@@ -276,7 +276,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder, | |||
276 | struct drm_crtc *tmp; | 276 | struct drm_crtc *tmp; |
277 | int crtc_mask = 1; | 277 | int crtc_mask = 1; |
278 | 278 | ||
279 | WARN(!crtc, "checking null crtc?"); | 279 | WARN(!crtc, "checking null crtc?\n"); |
280 | 280 | ||
281 | dev = crtc->dev; | 281 | dev = crtc->dev; |
282 | 282 | ||
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index c1a26217a53..a245d17165a 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c | |||
@@ -240,7 +240,7 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf, | |||
240 | .addr = DDC_ADDR, | 240 | .addr = DDC_ADDR, |
241 | .flags = I2C_M_RD, | 241 | .flags = I2C_M_RD, |
242 | .len = len, | 242 | .len = len, |
243 | .buf = buf + start, | 243 | .buf = buf, |
244 | } | 244 | } |
245 | }; | 245 | }; |
246 | 246 | ||
@@ -253,7 +253,7 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf, | |||
253 | static u8 * | 253 | static u8 * |
254 | drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) | 254 | drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) |
255 | { | 255 | { |
256 | int i, j = 0; | 256 | int i, j = 0, valid_extensions = 0; |
257 | u8 *block, *new; | 257 | u8 *block, *new; |
258 | 258 | ||
259 | if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL) | 259 | if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL) |
@@ -280,14 +280,28 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) | |||
280 | 280 | ||
281 | for (j = 1; j <= block[0x7e]; j++) { | 281 | for (j = 1; j <= block[0x7e]; j++) { |
282 | for (i = 0; i < 4; i++) { | 282 | for (i = 0; i < 4; i++) { |
283 | if (drm_do_probe_ddc_edid(adapter, block, j, | 283 | if (drm_do_probe_ddc_edid(adapter, |
284 | EDID_LENGTH)) | 284 | block + (valid_extensions + 1) * EDID_LENGTH, |
285 | j, EDID_LENGTH)) | ||
285 | goto out; | 286 | goto out; |
286 | if (drm_edid_block_valid(block + j * EDID_LENGTH)) | 287 | if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH)) { |
288 | valid_extensions++; | ||
287 | break; | 289 | break; |
290 | } | ||
288 | } | 291 | } |
289 | if (i == 4) | 292 | if (i == 4) |
290 | goto carp; | 293 | dev_warn(connector->dev->dev, |
294 | "%s: Ignoring invalid EDID block %d.\n", | ||
295 | drm_get_connector_name(connector), j); | ||
296 | } | ||
297 | |||
298 | if (valid_extensions != block[0x7e]) { | ||
299 | block[EDID_LENGTH-1] += block[0x7e] - valid_extensions; | ||
300 | block[0x7e] = valid_extensions; | ||
301 | new = krealloc(block, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL); | ||
302 | if (!new) | ||
303 | goto out; | ||
304 | block = new; | ||
291 | } | 305 | } |
292 | 306 | ||
293 | return block; | 307 | return block; |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 3467dd42076..80745f85902 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -44,7 +44,7 @@ unsigned int i915_fbpercrtc = 0; | |||
44 | module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400); | 44 | module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400); |
45 | 45 | ||
46 | unsigned int i915_powersave = 1; | 46 | unsigned int i915_powersave = 1; |
47 | module_param_named(powersave, i915_powersave, int, 0400); | 47 | module_param_named(powersave, i915_powersave, int, 0600); |
48 | 48 | ||
49 | unsigned int i915_lvds_downclock = 0; | 49 | unsigned int i915_lvds_downclock = 0; |
50 | module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); | 50 | module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 2c2c19b6285..90414ae86af 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -1321,6 +1321,7 @@ static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg, | |||
1321 | 1321 | ||
1322 | #define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) | 1322 | #define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) |
1323 | #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) | 1323 | #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) |
1324 | #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) | ||
1324 | 1325 | ||
1325 | #define PRIMARY_RINGBUFFER_SIZE (128*1024) | 1326 | #define PRIMARY_RINGBUFFER_SIZE (128*1024) |
1326 | 1327 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 8eb8453208b..ef188e39140 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -2172,7 +2172,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj) | |||
2172 | static int i915_ring_idle(struct drm_device *dev, | 2172 | static int i915_ring_idle(struct drm_device *dev, |
2173 | struct intel_ring_buffer *ring) | 2173 | struct intel_ring_buffer *ring) |
2174 | { | 2174 | { |
2175 | if (list_empty(&ring->gpu_write_list)) | 2175 | if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list)) |
2176 | return 0; | 2176 | return 0; |
2177 | 2177 | ||
2178 | i915_gem_flush_ring(dev, NULL, ring, | 2178 | i915_gem_flush_ring(dev, NULL, ring, |
@@ -2190,9 +2190,7 @@ i915_gpu_idle(struct drm_device *dev) | |||
2190 | int ret; | 2190 | int ret; |
2191 | 2191 | ||
2192 | lists_empty = (list_empty(&dev_priv->mm.flushing_list) && | 2192 | lists_empty = (list_empty(&dev_priv->mm.flushing_list) && |
2193 | list_empty(&dev_priv->render_ring.active_list) && | 2193 | list_empty(&dev_priv->mm.active_list)); |
2194 | list_empty(&dev_priv->bsd_ring.active_list) && | ||
2195 | list_empty(&dev_priv->blt_ring.active_list)); | ||
2196 | if (lists_empty) | 2194 | if (lists_empty) |
2197 | return 0; | 2195 | return 0; |
2198 | 2196 | ||
@@ -3108,7 +3106,8 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, | |||
3108 | * write domain | 3106 | * write domain |
3109 | */ | 3107 | */ |
3110 | if (obj->write_domain && | 3108 | if (obj->write_domain && |
3111 | obj->write_domain != obj->pending_read_domains) { | 3109 | (obj->write_domain != obj->pending_read_domains || |
3110 | obj_priv->ring != ring)) { | ||
3112 | flush_domains |= obj->write_domain; | 3111 | flush_domains |= obj->write_domain; |
3113 | invalidate_domains |= | 3112 | invalidate_domains |= |
3114 | obj->pending_read_domains & ~obj->write_domain; | 3113 | obj->pending_read_domains & ~obj->write_domain; |
@@ -3497,6 +3496,52 @@ i915_gem_execbuffer_pin(struct drm_device *dev, | |||
3497 | return 0; | 3496 | return 0; |
3498 | } | 3497 | } |
3499 | 3498 | ||
3499 | static int | ||
3500 | i915_gem_execbuffer_move_to_gpu(struct drm_device *dev, | ||
3501 | struct drm_file *file, | ||
3502 | struct intel_ring_buffer *ring, | ||
3503 | struct drm_gem_object **objects, | ||
3504 | int count) | ||
3505 | { | ||
3506 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
3507 | int ret, i; | ||
3508 | |||
3509 | /* Zero the global flush/invalidate flags. These | ||
3510 | * will be modified as new domains are computed | ||
3511 | * for each object | ||
3512 | */ | ||
3513 | dev->invalidate_domains = 0; | ||
3514 | dev->flush_domains = 0; | ||
3515 | dev_priv->mm.flush_rings = 0; | ||
3516 | for (i = 0; i < count; i++) | ||
3517 | i915_gem_object_set_to_gpu_domain(objects[i], ring); | ||
3518 | |||
3519 | if (dev->invalidate_domains | dev->flush_domains) { | ||
3520 | #if WATCH_EXEC | ||
3521 | DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n", | ||
3522 | __func__, | ||
3523 | dev->invalidate_domains, | ||
3524 | dev->flush_domains); | ||
3525 | #endif | ||
3526 | i915_gem_flush(dev, file, | ||
3527 | dev->invalidate_domains, | ||
3528 | dev->flush_domains, | ||
3529 | dev_priv->mm.flush_rings); | ||
3530 | } | ||
3531 | |||
3532 | for (i = 0; i < count; i++) { | ||
3533 | struct drm_i915_gem_object *obj = to_intel_bo(objects[i]); | ||
3534 | /* XXX replace with semaphores */ | ||
3535 | if (obj->ring && ring != obj->ring) { | ||
3536 | ret = i915_gem_object_wait_rendering(&obj->base, true); | ||
3537 | if (ret) | ||
3538 | return ret; | ||
3539 | } | ||
3540 | } | ||
3541 | |||
3542 | return 0; | ||
3543 | } | ||
3544 | |||
3500 | /* Throttle our rendering by waiting until the ring has completed our requests | 3545 | /* Throttle our rendering by waiting until the ring has completed our requests |
3501 | * emitted over 20 msec ago. | 3546 | * emitted over 20 msec ago. |
3502 | * | 3547 | * |
@@ -3757,33 +3802,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3757 | goto err; | 3802 | goto err; |
3758 | } | 3803 | } |
3759 | 3804 | ||
3760 | /* Zero the global flush/invalidate flags. These | 3805 | ret = i915_gem_execbuffer_move_to_gpu(dev, file, ring, |
3761 | * will be modified as new domains are computed | 3806 | object_list, args->buffer_count); |
3762 | * for each object | 3807 | if (ret) |
3763 | */ | 3808 | goto err; |
3764 | dev->invalidate_domains = 0; | ||
3765 | dev->flush_domains = 0; | ||
3766 | dev_priv->mm.flush_rings = 0; | ||
3767 | |||
3768 | for (i = 0; i < args->buffer_count; i++) { | ||
3769 | struct drm_gem_object *obj = object_list[i]; | ||
3770 | |||
3771 | /* Compute new gpu domains and update invalidate/flush */ | ||
3772 | i915_gem_object_set_to_gpu_domain(obj, ring); | ||
3773 | } | ||
3774 | |||
3775 | if (dev->invalidate_domains | dev->flush_domains) { | ||
3776 | #if WATCH_EXEC | ||
3777 | DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n", | ||
3778 | __func__, | ||
3779 | dev->invalidate_domains, | ||
3780 | dev->flush_domains); | ||
3781 | #endif | ||
3782 | i915_gem_flush(dev, file, | ||
3783 | dev->invalidate_domains, | ||
3784 | dev->flush_domains, | ||
3785 | dev_priv->mm.flush_rings); | ||
3786 | } | ||
3787 | 3809 | ||
3788 | for (i = 0; i < args->buffer_count; i++) { | 3810 | for (i = 0; i < args->buffer_count; i++) { |
3789 | struct drm_gem_object *obj = object_list[i]; | 3811 | struct drm_gem_object *obj = object_list[i]; |
@@ -4043,8 +4065,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) | |||
4043 | alignment = i915_gem_get_gtt_alignment(obj); | 4065 | alignment = i915_gem_get_gtt_alignment(obj); |
4044 | if (obj_priv->gtt_offset & (alignment - 1)) { | 4066 | if (obj_priv->gtt_offset & (alignment - 1)) { |
4045 | WARN(obj_priv->pin_count, | 4067 | WARN(obj_priv->pin_count, |
4046 | "bo is already pinned with incorrect alignment:" | 4068 | "bo is already pinned with incorrect alignment: offset=%x, req.alignment=%x\n", |
4047 | " offset=%x, req.alignment=%x\n", | ||
4048 | obj_priv->gtt_offset, alignment); | 4069 | obj_priv->gtt_offset, alignment); |
4049 | ret = i915_gem_object_unbind(obj); | 4070 | ret = i915_gem_object_unbind(obj); |
4050 | if (ret) | 4071 | if (ret) |
@@ -4856,17 +4877,24 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, | |||
4856 | struct drm_file *file_priv) | 4877 | struct drm_file *file_priv) |
4857 | { | 4878 | { |
4858 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | 4879 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); |
4859 | void *obj_addr; | 4880 | void *vaddr = obj_priv->phys_obj->handle->vaddr + args->offset; |
4860 | int ret; | 4881 | char __user *user_data = (char __user *) (uintptr_t) args->data_ptr; |
4861 | char __user *user_data; | ||
4862 | 4882 | ||
4863 | user_data = (char __user *) (uintptr_t) args->data_ptr; | 4883 | DRM_DEBUG_DRIVER("vaddr %p, %lld\n", vaddr, args->size); |
4864 | obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset; | ||
4865 | 4884 | ||
4866 | DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr, args->size); | 4885 | if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { |
4867 | ret = copy_from_user(obj_addr, user_data, args->size); | 4886 | unsigned long unwritten; |
4868 | if (ret) | 4887 | |
4869 | return -EFAULT; | 4888 | /* The physical object once assigned is fixed for the lifetime |
4889 | * of the obj, so we can safely drop the lock and continue | ||
4890 | * to access vaddr. | ||
4891 | */ | ||
4892 | mutex_unlock(&dev->struct_mutex); | ||
4893 | unwritten = copy_from_user(vaddr, user_data, args->size); | ||
4894 | mutex_lock(&dev->struct_mutex); | ||
4895 | if (unwritten) | ||
4896 | return -EFAULT; | ||
4897 | } | ||
4870 | 4898 | ||
4871 | drm_agp_chipset_flush(dev); | 4899 | drm_agp_chipset_flush(dev); |
4872 | return 0; | 4900 | return 0; |
@@ -4900,9 +4928,7 @@ i915_gpu_is_active(struct drm_device *dev) | |||
4900 | int lists_empty; | 4928 | int lists_empty; |
4901 | 4929 | ||
4902 | lists_empty = list_empty(&dev_priv->mm.flushing_list) && | 4930 | lists_empty = list_empty(&dev_priv->mm.flushing_list) && |
4903 | list_empty(&dev_priv->render_ring.active_list) && | 4931 | list_empty(&dev_priv->mm.active_list); |
4904 | list_empty(&dev_priv->bsd_ring.active_list) && | ||
4905 | list_empty(&dev_priv->blt_ring.active_list); | ||
4906 | 4932 | ||
4907 | return !lists_empty; | 4933 | return !lists_empty; |
4908 | } | 4934 | } |
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 43a4013f53f..d8ae7d1d0cc 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c | |||
@@ -165,9 +165,7 @@ i915_gem_evict_everything(struct drm_device *dev) | |||
165 | 165 | ||
166 | lists_empty = (list_empty(&dev_priv->mm.inactive_list) && | 166 | lists_empty = (list_empty(&dev_priv->mm.inactive_list) && |
167 | list_empty(&dev_priv->mm.flushing_list) && | 167 | list_empty(&dev_priv->mm.flushing_list) && |
168 | list_empty(&dev_priv->render_ring.active_list) && | 168 | list_empty(&dev_priv->mm.active_list)); |
169 | list_empty(&dev_priv->bsd_ring.active_list) && | ||
170 | list_empty(&dev_priv->blt_ring.active_list)); | ||
171 | if (lists_empty) | 169 | if (lists_empty) |
172 | return -ENOSPC; | 170 | return -ENOSPC; |
173 | 171 | ||
@@ -184,9 +182,7 @@ i915_gem_evict_everything(struct drm_device *dev) | |||
184 | 182 | ||
185 | lists_empty = (list_empty(&dev_priv->mm.inactive_list) && | 183 | lists_empty = (list_empty(&dev_priv->mm.inactive_list) && |
186 | list_empty(&dev_priv->mm.flushing_list) && | 184 | list_empty(&dev_priv->mm.flushing_list) && |
187 | list_empty(&dev_priv->render_ring.active_list) && | 185 | list_empty(&dev_priv->mm.active_list)); |
188 | list_empty(&dev_priv->bsd_ring.active_list) && | ||
189 | list_empty(&dev_priv->blt_ring.active_list)); | ||
190 | BUG_ON(!lists_empty); | 186 | BUG_ON(!lists_empty); |
191 | 187 | ||
192 | return 0; | 188 | return 0; |
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 989c19d2d95..454c064f8ef 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c | |||
@@ -862,8 +862,10 @@ int i915_restore_state(struct drm_device *dev) | |||
862 | /* Clock gating state */ | 862 | /* Clock gating state */ |
863 | intel_init_clock_gating(dev); | 863 | intel_init_clock_gating(dev); |
864 | 864 | ||
865 | if (HAS_PCH_SPLIT(dev)) | 865 | if (HAS_PCH_SPLIT(dev)) { |
866 | ironlake_enable_drps(dev); | 866 | ironlake_enable_drps(dev); |
867 | intel_init_emon(dev); | ||
868 | } | ||
867 | 869 | ||
868 | /* Cache mode state */ | 870 | /* Cache mode state */ |
869 | I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000); | 871 | I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000); |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 990f065374b..48d8fd686ea 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -1681,6 +1681,37 @@ static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock) | |||
1681 | udelay(500); | 1681 | udelay(500); |
1682 | } | 1682 | } |
1683 | 1683 | ||
1684 | static void intel_fdi_normal_train(struct drm_crtc *crtc) | ||
1685 | { | ||
1686 | struct drm_device *dev = crtc->dev; | ||
1687 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1688 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
1689 | int pipe = intel_crtc->pipe; | ||
1690 | u32 reg, temp; | ||
1691 | |||
1692 | /* enable normal train */ | ||
1693 | reg = FDI_TX_CTL(pipe); | ||
1694 | temp = I915_READ(reg); | ||
1695 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
1696 | temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; | ||
1697 | I915_WRITE(reg, temp); | ||
1698 | |||
1699 | reg = FDI_RX_CTL(pipe); | ||
1700 | temp = I915_READ(reg); | ||
1701 | if (HAS_PCH_CPT(dev)) { | ||
1702 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; | ||
1703 | temp |= FDI_LINK_TRAIN_NORMAL_CPT; | ||
1704 | } else { | ||
1705 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
1706 | temp |= FDI_LINK_TRAIN_NONE; | ||
1707 | } | ||
1708 | I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); | ||
1709 | |||
1710 | /* wait one idle pattern time */ | ||
1711 | POSTING_READ(reg); | ||
1712 | udelay(1000); | ||
1713 | } | ||
1714 | |||
1684 | /* The FDI link training functions for ILK/Ibexpeak. */ | 1715 | /* The FDI link training functions for ILK/Ibexpeak. */ |
1685 | static void ironlake_fdi_link_train(struct drm_crtc *crtc) | 1716 | static void ironlake_fdi_link_train(struct drm_crtc *crtc) |
1686 | { | 1717 | { |
@@ -1767,27 +1798,6 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc) | |||
1767 | 1798 | ||
1768 | DRM_DEBUG_KMS("FDI train done\n"); | 1799 | DRM_DEBUG_KMS("FDI train done\n"); |
1769 | 1800 | ||
1770 | /* enable normal train */ | ||
1771 | reg = FDI_TX_CTL(pipe); | ||
1772 | temp = I915_READ(reg); | ||
1773 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
1774 | temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; | ||
1775 | I915_WRITE(reg, temp); | ||
1776 | |||
1777 | reg = FDI_RX_CTL(pipe); | ||
1778 | temp = I915_READ(reg); | ||
1779 | if (HAS_PCH_CPT(dev)) { | ||
1780 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; | ||
1781 | temp |= FDI_LINK_TRAIN_NORMAL_CPT; | ||
1782 | } else { | ||
1783 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
1784 | temp |= FDI_LINK_TRAIN_NONE; | ||
1785 | } | ||
1786 | I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); | ||
1787 | |||
1788 | /* wait one idle pattern time */ | ||
1789 | POSTING_READ(reg); | ||
1790 | udelay(1000); | ||
1791 | } | 1801 | } |
1792 | 1802 | ||
1793 | static const int const snb_b_fdi_train_param [] = { | 1803 | static const int const snb_b_fdi_train_param [] = { |
@@ -2090,6 +2100,8 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) | |||
2090 | I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe))); | 2100 | I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe))); |
2091 | I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe))); | 2101 | I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe))); |
2092 | 2102 | ||
2103 | intel_fdi_normal_train(crtc); | ||
2104 | |||
2093 | /* For PCH DP, enable TRANS_DP_CTL */ | 2105 | /* For PCH DP, enable TRANS_DP_CTL */ |
2094 | if (HAS_PCH_CPT(dev) && | 2106 | if (HAS_PCH_CPT(dev) && |
2095 | intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { | 2107 | intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { |
@@ -2200,9 +2212,10 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) | |||
2200 | udelay(100); | 2212 | udelay(100); |
2201 | 2213 | ||
2202 | /* Ironlake workaround, disable clock pointer after downing FDI */ | 2214 | /* Ironlake workaround, disable clock pointer after downing FDI */ |
2203 | I915_WRITE(FDI_RX_CHICKEN(pipe), | 2215 | if (HAS_PCH_IBX(dev)) |
2204 | I915_READ(FDI_RX_CHICKEN(pipe) & | 2216 | I915_WRITE(FDI_RX_CHICKEN(pipe), |
2205 | ~FDI_RX_PHASE_SYNC_POINTER_ENABLE)); | 2217 | I915_READ(FDI_RX_CHICKEN(pipe) & |
2218 | ~FDI_RX_PHASE_SYNC_POINTER_ENABLE)); | ||
2206 | 2219 | ||
2207 | /* still set train pattern 1 */ | 2220 | /* still set train pattern 1 */ |
2208 | reg = FDI_TX_CTL(pipe); | 2221 | reg = FDI_TX_CTL(pipe); |
@@ -5581,20 +5594,19 @@ void ironlake_enable_drps(struct drm_device *dev) | |||
5581 | fmin = (rgvmodectl & MEMMODE_FMIN_MASK); | 5594 | fmin = (rgvmodectl & MEMMODE_FMIN_MASK); |
5582 | fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> | 5595 | fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> |
5583 | MEMMODE_FSTART_SHIFT; | 5596 | MEMMODE_FSTART_SHIFT; |
5584 | fstart = fmax; | ||
5585 | 5597 | ||
5586 | vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >> | 5598 | vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >> |
5587 | PXVFREQ_PX_SHIFT; | 5599 | PXVFREQ_PX_SHIFT; |
5588 | 5600 | ||
5589 | dev_priv->fmax = fstart; /* IPS callback will increase this */ | 5601 | dev_priv->fmax = fmax; /* IPS callback will increase this */ |
5590 | dev_priv->fstart = fstart; | 5602 | dev_priv->fstart = fstart; |
5591 | 5603 | ||
5592 | dev_priv->max_delay = fmax; | 5604 | dev_priv->max_delay = fstart; |
5593 | dev_priv->min_delay = fmin; | 5605 | dev_priv->min_delay = fmin; |
5594 | dev_priv->cur_delay = fstart; | 5606 | dev_priv->cur_delay = fstart; |
5595 | 5607 | ||
5596 | DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", fmax, fmin, | 5608 | DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", |
5597 | fstart); | 5609 | fmax, fmin, fstart); |
5598 | 5610 | ||
5599 | I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); | 5611 | I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); |
5600 | 5612 | ||
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 891f4f1d63b..c8e00555331 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -1517,7 +1517,7 @@ g4x_dp_detect(struct intel_dp *intel_dp) | |||
1517 | status = connector_status_connected; | 1517 | status = connector_status_connected; |
1518 | } | 1518 | } |
1519 | 1519 | ||
1520 | return bit; | 1520 | return status; |
1521 | } | 1521 | } |
1522 | 1522 | ||
1523 | /** | 1523 | /** |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 9af9f86a876..21551fe7454 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -296,6 +296,7 @@ extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, | |||
296 | extern void intel_init_clock_gating(struct drm_device *dev); | 296 | extern void intel_init_clock_gating(struct drm_device *dev); |
297 | extern void ironlake_enable_drps(struct drm_device *dev); | 297 | extern void ironlake_enable_drps(struct drm_device *dev); |
298 | extern void ironlake_disable_drps(struct drm_device *dev); | 298 | extern void ironlake_disable_drps(struct drm_device *dev); |
299 | extern void intel_init_emon(struct drm_device *dev); | ||
299 | 300 | ||
300 | extern int intel_pin_and_fence_fb_obj(struct drm_device *dev, | 301 | extern int intel_pin_and_fence_fb_obj(struct drm_device *dev, |
301 | struct drm_gem_object *obj, | 302 | struct drm_gem_object *obj, |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index f1a649990ea..4324a326f98 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -481,11 +481,8 @@ static int intel_lvds_get_modes(struct drm_connector *connector) | |||
481 | struct drm_device *dev = connector->dev; | 481 | struct drm_device *dev = connector->dev; |
482 | struct drm_display_mode *mode; | 482 | struct drm_display_mode *mode; |
483 | 483 | ||
484 | if (intel_lvds->edid) { | 484 | if (intel_lvds->edid) |
485 | drm_mode_connector_update_edid_property(connector, | ||
486 | intel_lvds->edid); | ||
487 | return drm_add_edid_modes(connector, intel_lvds->edid); | 485 | return drm_add_edid_modes(connector, intel_lvds->edid); |
488 | } | ||
489 | 486 | ||
490 | mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode); | 487 | mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode); |
491 | if (mode == 0) | 488 | if (mode == 0) |
@@ -939,7 +936,16 @@ void intel_lvds_init(struct drm_device *dev) | |||
939 | */ | 936 | */ |
940 | intel_lvds->edid = drm_get_edid(connector, | 937 | intel_lvds->edid = drm_get_edid(connector, |
941 | &dev_priv->gmbus[pin].adapter); | 938 | &dev_priv->gmbus[pin].adapter); |
942 | 939 | if (intel_lvds->edid) { | |
940 | if (drm_add_edid_modes(connector, | ||
941 | intel_lvds->edid)) { | ||
942 | drm_mode_connector_update_edid_property(connector, | ||
943 | intel_lvds->edid); | ||
944 | } else { | ||
945 | kfree(intel_lvds->edid); | ||
946 | intel_lvds->edid = NULL; | ||
947 | } | ||
948 | } | ||
943 | if (!intel_lvds->edid) { | 949 | if (!intel_lvds->edid) { |
944 | /* Didn't get an EDID, so | 950 | /* Didn't get an EDID, so |
945 | * Set wide sync ranges so we get all modes | 951 | * Set wide sync ranges so we get all modes |
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index 917c7dc3cd6..9b0d9a867ae 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c | |||
@@ -512,6 +512,6 @@ int intel_opregion_setup(struct drm_device *dev) | |||
512 | return 0; | 512 | return 0; |
513 | 513 | ||
514 | err_out: | 514 | err_out: |
515 | iounmap(opregion->header); | 515 | iounmap(base); |
516 | return err; | 516 | return err; |
517 | } | 517 | } |
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index afb96d25219..02ff0a481f4 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c | |||
@@ -946,7 +946,9 @@ static int check_overlay_src(struct drm_device *dev, | |||
946 | { | 946 | { |
947 | int uv_hscale = uv_hsubsampling(rec->flags); | 947 | int uv_hscale = uv_hsubsampling(rec->flags); |
948 | int uv_vscale = uv_vsubsampling(rec->flags); | 948 | int uv_vscale = uv_vsubsampling(rec->flags); |
949 | u32 stride_mask, depth, tmp; | 949 | u32 stride_mask; |
950 | int depth; | ||
951 | u32 tmp; | ||
950 | 952 | ||
951 | /* check src dimensions */ | 953 | /* check src dimensions */ |
952 | if (IS_845G(dev) || IS_I830(dev)) { | 954 | if (IS_845G(dev) || IS_I830(dev)) { |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 09f2dc353ae..b83306f9244 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -177,7 +177,7 @@ static int init_ring_common(struct drm_device *dev, | |||
177 | 177 | ||
178 | I915_WRITE_CTL(ring, | 178 | I915_WRITE_CTL(ring, |
179 | ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES) | 179 | ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES) |
180 | | RING_NO_REPORT | RING_VALID); | 180 | | RING_REPORT_64K | RING_VALID); |
181 | 181 | ||
182 | head = I915_READ_HEAD(ring) & HEAD_ADDR; | 182 | head = I915_READ_HEAD(ring) & HEAD_ADDR; |
183 | /* If the head is still not zero, the ring is dead */ | 183 | /* If the head is still not zero, the ring is dead */ |
@@ -654,6 +654,10 @@ void intel_cleanup_ring_buffer(struct drm_device *dev, | |||
654 | i915_gem_object_unpin(ring->gem_object); | 654 | i915_gem_object_unpin(ring->gem_object); |
655 | drm_gem_object_unreference(ring->gem_object); | 655 | drm_gem_object_unreference(ring->gem_object); |
656 | ring->gem_object = NULL; | 656 | ring->gem_object = NULL; |
657 | |||
658 | if (ring->cleanup) | ||
659 | ring->cleanup(ring); | ||
660 | |||
657 | cleanup_status_page(dev, ring); | 661 | cleanup_status_page(dev, ring); |
658 | } | 662 | } |
659 | 663 | ||
@@ -688,6 +692,17 @@ int intel_wait_ring_buffer(struct drm_device *dev, | |||
688 | { | 692 | { |
689 | unsigned long end; | 693 | unsigned long end; |
690 | drm_i915_private_t *dev_priv = dev->dev_private; | 694 | drm_i915_private_t *dev_priv = dev->dev_private; |
695 | u32 head; | ||
696 | |||
697 | head = intel_read_status_page(ring, 4); | ||
698 | if (head) { | ||
699 | ring->head = head & HEAD_ADDR; | ||
700 | ring->space = ring->head - (ring->tail + 8); | ||
701 | if (ring->space < 0) | ||
702 | ring->space += ring->size; | ||
703 | if (ring->space >= n) | ||
704 | return 0; | ||
705 | } | ||
691 | 706 | ||
692 | trace_i915_ring_wait_begin (dev); | 707 | trace_i915_ring_wait_begin (dev); |
693 | end = jiffies + 3 * HZ; | 708 | end = jiffies + 3 * HZ; |
@@ -854,19 +869,125 @@ blt_ring_put_user_irq(struct drm_device *dev, | |||
854 | /* do nothing */ | 869 | /* do nothing */ |
855 | } | 870 | } |
856 | 871 | ||
872 | |||
873 | /* Workaround for some stepping of SNB, | ||
874 | * each time when BLT engine ring tail moved, | ||
875 | * the first command in the ring to be parsed | ||
876 | * should be MI_BATCH_BUFFER_START | ||
877 | */ | ||
878 | #define NEED_BLT_WORKAROUND(dev) \ | ||
879 | (IS_GEN6(dev) && (dev->pdev->revision < 8)) | ||
880 | |||
881 | static inline struct drm_i915_gem_object * | ||
882 | to_blt_workaround(struct intel_ring_buffer *ring) | ||
883 | { | ||
884 | return ring->private; | ||
885 | } | ||
886 | |||
887 | static int blt_ring_init(struct drm_device *dev, | ||
888 | struct intel_ring_buffer *ring) | ||
889 | { | ||
890 | if (NEED_BLT_WORKAROUND(dev)) { | ||
891 | struct drm_i915_gem_object *obj; | ||
892 | u32 __iomem *ptr; | ||
893 | int ret; | ||
894 | |||
895 | obj = to_intel_bo(i915_gem_alloc_object(dev, 4096)); | ||
896 | if (obj == NULL) | ||
897 | return -ENOMEM; | ||
898 | |||
899 | ret = i915_gem_object_pin(&obj->base, 4096); | ||
900 | if (ret) { | ||
901 | drm_gem_object_unreference(&obj->base); | ||
902 | return ret; | ||
903 | } | ||
904 | |||
905 | ptr = kmap(obj->pages[0]); | ||
906 | iowrite32(MI_BATCH_BUFFER_END, ptr); | ||
907 | iowrite32(MI_NOOP, ptr+1); | ||
908 | kunmap(obj->pages[0]); | ||
909 | |||
910 | ret = i915_gem_object_set_to_gtt_domain(&obj->base, false); | ||
911 | if (ret) { | ||
912 | i915_gem_object_unpin(&obj->base); | ||
913 | drm_gem_object_unreference(&obj->base); | ||
914 | return ret; | ||
915 | } | ||
916 | |||
917 | ring->private = obj; | ||
918 | } | ||
919 | |||
920 | return init_ring_common(dev, ring); | ||
921 | } | ||
922 | |||
923 | static void blt_ring_begin(struct drm_device *dev, | ||
924 | struct intel_ring_buffer *ring, | ||
925 | int num_dwords) | ||
926 | { | ||
927 | if (ring->private) { | ||
928 | intel_ring_begin(dev, ring, num_dwords+2); | ||
929 | intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START); | ||
930 | intel_ring_emit(dev, ring, to_blt_workaround(ring)->gtt_offset); | ||
931 | } else | ||
932 | intel_ring_begin(dev, ring, 4); | ||
933 | } | ||
934 | |||
935 | static void blt_ring_flush(struct drm_device *dev, | ||
936 | struct intel_ring_buffer *ring, | ||
937 | u32 invalidate_domains, | ||
938 | u32 flush_domains) | ||
939 | { | ||
940 | blt_ring_begin(dev, ring, 4); | ||
941 | intel_ring_emit(dev, ring, MI_FLUSH_DW); | ||
942 | intel_ring_emit(dev, ring, 0); | ||
943 | intel_ring_emit(dev, ring, 0); | ||
944 | intel_ring_emit(dev, ring, 0); | ||
945 | intel_ring_advance(dev, ring); | ||
946 | } | ||
947 | |||
948 | static u32 | ||
949 | blt_ring_add_request(struct drm_device *dev, | ||
950 | struct intel_ring_buffer *ring, | ||
951 | u32 flush_domains) | ||
952 | { | ||
953 | u32 seqno = i915_gem_get_seqno(dev); | ||
954 | |||
955 | blt_ring_begin(dev, ring, 4); | ||
956 | intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX); | ||
957 | intel_ring_emit(dev, ring, | ||
958 | I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | ||
959 | intel_ring_emit(dev, ring, seqno); | ||
960 | intel_ring_emit(dev, ring, MI_USER_INTERRUPT); | ||
961 | intel_ring_advance(dev, ring); | ||
962 | |||
963 | DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno); | ||
964 | return seqno; | ||
965 | } | ||
966 | |||
967 | static void blt_ring_cleanup(struct intel_ring_buffer *ring) | ||
968 | { | ||
969 | if (!ring->private) | ||
970 | return; | ||
971 | |||
972 | i915_gem_object_unpin(ring->private); | ||
973 | drm_gem_object_unreference(ring->private); | ||
974 | ring->private = NULL; | ||
975 | } | ||
976 | |||
857 | static const struct intel_ring_buffer gen6_blt_ring = { | 977 | static const struct intel_ring_buffer gen6_blt_ring = { |
858 | .name = "blt ring", | 978 | .name = "blt ring", |
859 | .id = RING_BLT, | 979 | .id = RING_BLT, |
860 | .mmio_base = BLT_RING_BASE, | 980 | .mmio_base = BLT_RING_BASE, |
861 | .size = 32 * PAGE_SIZE, | 981 | .size = 32 * PAGE_SIZE, |
862 | .init = init_ring_common, | 982 | .init = blt_ring_init, |
863 | .write_tail = ring_write_tail, | 983 | .write_tail = ring_write_tail, |
864 | .flush = gen6_ring_flush, | 984 | .flush = blt_ring_flush, |
865 | .add_request = ring_add_request, | 985 | .add_request = blt_ring_add_request, |
866 | .get_seqno = ring_status_page_get_seqno, | 986 | .get_seqno = ring_status_page_get_seqno, |
867 | .user_irq_get = blt_ring_get_user_irq, | 987 | .user_irq_get = blt_ring_get_user_irq, |
868 | .user_irq_put = blt_ring_put_user_irq, | 988 | .user_irq_put = blt_ring_put_user_irq, |
869 | .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer, | 989 | .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer, |
990 | .cleanup = blt_ring_cleanup, | ||
870 | }; | 991 | }; |
871 | 992 | ||
872 | int intel_init_render_ring_buffer(struct drm_device *dev) | 993 | int intel_init_render_ring_buffer(struct drm_device *dev) |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index a05aff0e576..3126c268198 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
@@ -63,6 +63,7 @@ struct intel_ring_buffer { | |||
63 | struct drm_i915_gem_execbuffer2 *exec, | 63 | struct drm_i915_gem_execbuffer2 *exec, |
64 | struct drm_clip_rect *cliprects, | 64 | struct drm_clip_rect *cliprects, |
65 | uint64_t exec_offset); | 65 | uint64_t exec_offset); |
66 | void (*cleanup)(struct intel_ring_buffer *ring); | ||
66 | 67 | ||
67 | /** | 68 | /** |
68 | * List of objects currently involved in rendering from the | 69 | * List of objects currently involved in rendering from the |
@@ -98,6 +99,8 @@ struct intel_ring_buffer { | |||
98 | 99 | ||
99 | wait_queue_head_t irq_queue; | 100 | wait_queue_head_t irq_queue; |
100 | drm_local_map_t map; | 101 | drm_local_map_t map; |
102 | |||
103 | void *private; | ||
101 | }; | 104 | }; |
102 | 105 | ||
103 | static inline u32 | 106 | static inline u32 |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index f12a5b3ec05..488c36c8f5e 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -2033,7 +2033,7 @@ int evergreen_irq_set(struct radeon_device *rdev) | |||
2033 | u32 grbm_int_cntl = 0; | 2033 | u32 grbm_int_cntl = 0; |
2034 | 2034 | ||
2035 | if (!rdev->irq.installed) { | 2035 | if (!rdev->irq.installed) { |
2036 | WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); | 2036 | WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); |
2037 | return -EINVAL; | 2037 | return -EINVAL; |
2038 | } | 2038 | } |
2039 | /* don't enable anything if the ih is disabled */ | 2039 | /* don't enable anything if the ih is disabled */ |
@@ -2295,6 +2295,7 @@ restart_ih: | |||
2295 | case 0: /* D1 vblank */ | 2295 | case 0: /* D1 vblank */ |
2296 | if (disp_int & LB_D1_VBLANK_INTERRUPT) { | 2296 | if (disp_int & LB_D1_VBLANK_INTERRUPT) { |
2297 | drm_handle_vblank(rdev->ddev, 0); | 2297 | drm_handle_vblank(rdev->ddev, 0); |
2298 | rdev->pm.vblank_sync = true; | ||
2298 | wake_up(&rdev->irq.vblank_queue); | 2299 | wake_up(&rdev->irq.vblank_queue); |
2299 | disp_int &= ~LB_D1_VBLANK_INTERRUPT; | 2300 | disp_int &= ~LB_D1_VBLANK_INTERRUPT; |
2300 | DRM_DEBUG("IH: D1 vblank\n"); | 2301 | DRM_DEBUG("IH: D1 vblank\n"); |
@@ -2316,6 +2317,7 @@ restart_ih: | |||
2316 | case 0: /* D2 vblank */ | 2317 | case 0: /* D2 vblank */ |
2317 | if (disp_int_cont & LB_D2_VBLANK_INTERRUPT) { | 2318 | if (disp_int_cont & LB_D2_VBLANK_INTERRUPT) { |
2318 | drm_handle_vblank(rdev->ddev, 1); | 2319 | drm_handle_vblank(rdev->ddev, 1); |
2320 | rdev->pm.vblank_sync = true; | ||
2319 | wake_up(&rdev->irq.vblank_queue); | 2321 | wake_up(&rdev->irq.vblank_queue); |
2320 | disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; | 2322 | disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; |
2321 | DRM_DEBUG("IH: D2 vblank\n"); | 2323 | DRM_DEBUG("IH: D2 vblank\n"); |
@@ -2337,6 +2339,7 @@ restart_ih: | |||
2337 | case 0: /* D3 vblank */ | 2339 | case 0: /* D3 vblank */ |
2338 | if (disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) { | 2340 | if (disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) { |
2339 | drm_handle_vblank(rdev->ddev, 2); | 2341 | drm_handle_vblank(rdev->ddev, 2); |
2342 | rdev->pm.vblank_sync = true; | ||
2340 | wake_up(&rdev->irq.vblank_queue); | 2343 | wake_up(&rdev->irq.vblank_queue); |
2341 | disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; | 2344 | disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; |
2342 | DRM_DEBUG("IH: D3 vblank\n"); | 2345 | DRM_DEBUG("IH: D3 vblank\n"); |
@@ -2358,6 +2361,7 @@ restart_ih: | |||
2358 | case 0: /* D4 vblank */ | 2361 | case 0: /* D4 vblank */ |
2359 | if (disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) { | 2362 | if (disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) { |
2360 | drm_handle_vblank(rdev->ddev, 3); | 2363 | drm_handle_vblank(rdev->ddev, 3); |
2364 | rdev->pm.vblank_sync = true; | ||
2361 | wake_up(&rdev->irq.vblank_queue); | 2365 | wake_up(&rdev->irq.vblank_queue); |
2362 | disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; | 2366 | disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; |
2363 | DRM_DEBUG("IH: D4 vblank\n"); | 2367 | DRM_DEBUG("IH: D4 vblank\n"); |
@@ -2379,6 +2383,7 @@ restart_ih: | |||
2379 | case 0: /* D5 vblank */ | 2383 | case 0: /* D5 vblank */ |
2380 | if (disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) { | 2384 | if (disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) { |
2381 | drm_handle_vblank(rdev->ddev, 4); | 2385 | drm_handle_vblank(rdev->ddev, 4); |
2386 | rdev->pm.vblank_sync = true; | ||
2382 | wake_up(&rdev->irq.vblank_queue); | 2387 | wake_up(&rdev->irq.vblank_queue); |
2383 | disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; | 2388 | disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; |
2384 | DRM_DEBUG("IH: D5 vblank\n"); | 2389 | DRM_DEBUG("IH: D5 vblank\n"); |
@@ -2400,6 +2405,7 @@ restart_ih: | |||
2400 | case 0: /* D6 vblank */ | 2405 | case 0: /* D6 vblank */ |
2401 | if (disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) { | 2406 | if (disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) { |
2402 | drm_handle_vblank(rdev->ddev, 5); | 2407 | drm_handle_vblank(rdev->ddev, 5); |
2408 | rdev->pm.vblank_sync = true; | ||
2403 | wake_up(&rdev->irq.vblank_queue); | 2409 | wake_up(&rdev->irq.vblank_queue); |
2404 | disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; | 2410 | disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; |
2405 | DRM_DEBUG("IH: D6 vblank\n"); | 2411 | DRM_DEBUG("IH: D6 vblank\n"); |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 0e8f28a6892..8e10aa9f74b 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -442,7 +442,7 @@ int r100_pci_gart_init(struct radeon_device *rdev) | |||
442 | int r; | 442 | int r; |
443 | 443 | ||
444 | if (rdev->gart.table.ram.ptr) { | 444 | if (rdev->gart.table.ram.ptr) { |
445 | WARN(1, "R100 PCI GART already initialized.\n"); | 445 | WARN(1, "R100 PCI GART already initialized\n"); |
446 | return 0; | 446 | return 0; |
447 | } | 447 | } |
448 | /* Initialize common gart structure */ | 448 | /* Initialize common gart structure */ |
@@ -516,7 +516,7 @@ int r100_irq_set(struct radeon_device *rdev) | |||
516 | uint32_t tmp = 0; | 516 | uint32_t tmp = 0; |
517 | 517 | ||
518 | if (!rdev->irq.installed) { | 518 | if (!rdev->irq.installed) { |
519 | WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); | 519 | WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); |
520 | WREG32(R_000040_GEN_INT_CNTL, 0); | 520 | WREG32(R_000040_GEN_INT_CNTL, 0); |
521 | return -EINVAL; | 521 | return -EINVAL; |
522 | } | 522 | } |
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 34527e600fe..cde1d3480d9 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c | |||
@@ -91,7 +91,7 @@ int rv370_pcie_gart_init(struct radeon_device *rdev) | |||
91 | int r; | 91 | int r; |
92 | 92 | ||
93 | if (rdev->gart.table.vram.robj) { | 93 | if (rdev->gart.table.vram.robj) { |
94 | WARN(1, "RV370 PCIE GART already initialized.\n"); | 94 | WARN(1, "RV370 PCIE GART already initialized\n"); |
95 | return 0; | 95 | return 0; |
96 | } | 96 | } |
97 | /* Initialize common gart structure */ | 97 | /* Initialize common gart structure */ |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 33952a12f0a..0f806cc7dc7 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -97,14 +97,8 @@ u32 rv6xx_get_temp(struct radeon_device *rdev) | |||
97 | { | 97 | { |
98 | u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >> | 98 | u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >> |
99 | ASIC_T_SHIFT; | 99 | ASIC_T_SHIFT; |
100 | u32 actual_temp = 0; | ||
101 | 100 | ||
102 | if ((temp >> 7) & 1) | 101 | return temp * 1000; |
103 | actual_temp = 0; | ||
104 | else | ||
105 | actual_temp = (temp >> 1) & 0xff; | ||
106 | |||
107 | return actual_temp * 1000; | ||
108 | } | 102 | } |
109 | 103 | ||
110 | void r600_pm_get_dynpm_state(struct radeon_device *rdev) | 104 | void r600_pm_get_dynpm_state(struct radeon_device *rdev) |
@@ -919,7 +913,7 @@ int r600_pcie_gart_init(struct radeon_device *rdev) | |||
919 | int r; | 913 | int r; |
920 | 914 | ||
921 | if (rdev->gart.table.vram.robj) { | 915 | if (rdev->gart.table.vram.robj) { |
922 | WARN(1, "R600 PCIE GART already initialized.\n"); | 916 | WARN(1, "R600 PCIE GART already initialized\n"); |
923 | return 0; | 917 | return 0; |
924 | } | 918 | } |
925 | /* Initialize common gart structure */ | 919 | /* Initialize common gart structure */ |
@@ -2995,7 +2989,7 @@ int r600_irq_set(struct radeon_device *rdev) | |||
2995 | u32 hdmi1, hdmi2; | 2989 | u32 hdmi1, hdmi2; |
2996 | 2990 | ||
2997 | if (!rdev->irq.installed) { | 2991 | if (!rdev->irq.installed) { |
2998 | WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); | 2992 | WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); |
2999 | return -EINVAL; | 2993 | return -EINVAL; |
3000 | } | 2994 | } |
3001 | /* don't enable anything if the ih is disabled */ | 2995 | /* don't enable anything if the ih is disabled */ |
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 04cac7ec903..87ead090c7d 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | |||
@@ -526,8 +526,6 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) | |||
526 | if (crev < 2) | 526 | if (crev < 2) |
527 | return false; | 527 | return false; |
528 | 528 | ||
529 | router.valid = false; | ||
530 | |||
531 | obj_header = (ATOM_OBJECT_HEADER *) (ctx->bios + data_offset); | 529 | obj_header = (ATOM_OBJECT_HEADER *) (ctx->bios + data_offset); |
532 | path_obj = (ATOM_DISPLAY_OBJECT_PATH_TABLE *) | 530 | path_obj = (ATOM_DISPLAY_OBJECT_PATH_TABLE *) |
533 | (ctx->bios + data_offset + | 531 | (ctx->bios + data_offset + |
@@ -624,6 +622,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) | |||
624 | if (connector_type == DRM_MODE_CONNECTOR_Unknown) | 622 | if (connector_type == DRM_MODE_CONNECTOR_Unknown) |
625 | continue; | 623 | continue; |
626 | 624 | ||
625 | router.ddc_valid = false; | ||
626 | router.cd_valid = false; | ||
627 | for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) { | 627 | for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) { |
628 | uint8_t grph_obj_id, grph_obj_num, grph_obj_type; | 628 | uint8_t grph_obj_id, grph_obj_num, grph_obj_type; |
629 | 629 | ||
@@ -647,9 +647,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) | |||
647 | usDeviceTag)); | 647 | usDeviceTag)); |
648 | 648 | ||
649 | } else if (grph_obj_type == GRAPH_OBJECT_TYPE_ROUTER) { | 649 | } else if (grph_obj_type == GRAPH_OBJECT_TYPE_ROUTER) { |
650 | router.valid = false; | ||
651 | for (k = 0; k < router_obj->ucNumberOfObjects; k++) { | 650 | for (k = 0; k < router_obj->ucNumberOfObjects; k++) { |
652 | u16 router_obj_id = le16_to_cpu(router_obj->asObjects[j].usObjectID); | 651 | u16 router_obj_id = le16_to_cpu(router_obj->asObjects[k].usObjectID); |
653 | if (le16_to_cpu(path->usGraphicObjIds[j]) == router_obj_id) { | 652 | if (le16_to_cpu(path->usGraphicObjIds[j]) == router_obj_id) { |
654 | ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *) | 653 | ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *) |
655 | (ctx->bios + data_offset + | 654 | (ctx->bios + data_offset + |
@@ -657,6 +656,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) | |||
657 | ATOM_I2C_RECORD *i2c_record; | 656 | ATOM_I2C_RECORD *i2c_record; |
658 | ATOM_I2C_ID_CONFIG_ACCESS *i2c_config; | 657 | ATOM_I2C_ID_CONFIG_ACCESS *i2c_config; |
659 | ATOM_ROUTER_DDC_PATH_SELECT_RECORD *ddc_path; | 658 | ATOM_ROUTER_DDC_PATH_SELECT_RECORD *ddc_path; |
659 | ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *cd_path; | ||
660 | ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *router_src_dst_table = | 660 | ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *router_src_dst_table = |
661 | (ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *) | 661 | (ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *) |
662 | (ctx->bios + data_offset + | 662 | (ctx->bios + data_offset + |
@@ -690,10 +690,18 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) | |||
690 | case ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE: | 690 | case ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE: |
691 | ddc_path = (ATOM_ROUTER_DDC_PATH_SELECT_RECORD *) | 691 | ddc_path = (ATOM_ROUTER_DDC_PATH_SELECT_RECORD *) |
692 | record; | 692 | record; |
693 | router.valid = true; | 693 | router.ddc_valid = true; |
694 | router.mux_type = ddc_path->ucMuxType; | 694 | router.ddc_mux_type = ddc_path->ucMuxType; |
695 | router.mux_control_pin = ddc_path->ucMuxControlPin; | 695 | router.ddc_mux_control_pin = ddc_path->ucMuxControlPin; |
696 | router.mux_state = ddc_path->ucMuxState[enum_id]; | 696 | router.ddc_mux_state = ddc_path->ucMuxState[enum_id]; |
697 | break; | ||
698 | case ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE: | ||
699 | cd_path = (ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *) | ||
700 | record; | ||
701 | router.cd_valid = true; | ||
702 | router.cd_mux_type = cd_path->ucMuxType; | ||
703 | router.cd_mux_control_pin = cd_path->ucMuxControlPin; | ||
704 | router.cd_mux_state = cd_path->ucMuxState[enum_id]; | ||
697 | break; | 705 | break; |
698 | } | 706 | } |
699 | record = (ATOM_COMMON_RECORD_HEADER *) | 707 | record = (ATOM_COMMON_RECORD_HEADER *) |
@@ -860,7 +868,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct | |||
860 | size_t bc_size = sizeof(*bios_connectors) * ATOM_MAX_SUPPORTED_DEVICE; | 868 | size_t bc_size = sizeof(*bios_connectors) * ATOM_MAX_SUPPORTED_DEVICE; |
861 | struct radeon_router router; | 869 | struct radeon_router router; |
862 | 870 | ||
863 | router.valid = false; | 871 | router.ddc_valid = false; |
872 | router.cd_valid = false; | ||
864 | 873 | ||
865 | bios_connectors = kzalloc(bc_size, GFP_KERNEL); | 874 | bios_connectors = kzalloc(bc_size, GFP_KERNEL); |
866 | if (!bios_connectors) | 875 | if (!bios_connectors) |
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 4dac4b0a02e..fe6c74780f1 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c | |||
@@ -183,13 +183,13 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector, | |||
183 | continue; | 183 | continue; |
184 | 184 | ||
185 | if (priority == true) { | 185 | if (priority == true) { |
186 | DRM_INFO("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict)); | 186 | DRM_DEBUG_KMS("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict)); |
187 | DRM_INFO("in favor of %s\n", drm_get_connector_name(connector)); | 187 | DRM_DEBUG_KMS("in favor of %s\n", drm_get_connector_name(connector)); |
188 | conflict->status = connector_status_disconnected; | 188 | conflict->status = connector_status_disconnected; |
189 | radeon_connector_update_scratch_regs(conflict, connector_status_disconnected); | 189 | radeon_connector_update_scratch_regs(conflict, connector_status_disconnected); |
190 | } else { | 190 | } else { |
191 | DRM_INFO("2: conflicting encoders switching off %s\n", drm_get_connector_name(connector)); | 191 | DRM_DEBUG_KMS("2: conflicting encoders switching off %s\n", drm_get_connector_name(connector)); |
192 | DRM_INFO("in favor of %s\n", drm_get_connector_name(conflict)); | 192 | DRM_DEBUG_KMS("in favor of %s\n", drm_get_connector_name(conflict)); |
193 | current_status = connector_status_disconnected; | 193 | current_status = connector_status_disconnected; |
194 | } | 194 | } |
195 | break; | 195 | break; |
@@ -432,13 +432,13 @@ static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder, | |||
432 | mode->vdisplay == native_mode->vdisplay) { | 432 | mode->vdisplay == native_mode->vdisplay) { |
433 | *native_mode = *mode; | 433 | *native_mode = *mode; |
434 | drm_mode_set_crtcinfo(native_mode, CRTC_INTERLACE_HALVE_V); | 434 | drm_mode_set_crtcinfo(native_mode, CRTC_INTERLACE_HALVE_V); |
435 | DRM_INFO("Determined LVDS native mode details from EDID\n"); | 435 | DRM_DEBUG_KMS("Determined LVDS native mode details from EDID\n"); |
436 | break; | 436 | break; |
437 | } | 437 | } |
438 | } | 438 | } |
439 | } | 439 | } |
440 | if (!native_mode->clock) { | 440 | if (!native_mode->clock) { |
441 | DRM_INFO("No LVDS native mode details, disabling RMX\n"); | 441 | DRM_DEBUG_KMS("No LVDS native mode details, disabling RMX\n"); |
442 | radeon_encoder->rmx_type = RMX_OFF; | 442 | radeon_encoder->rmx_type = RMX_OFF; |
443 | } | 443 | } |
444 | } | 444 | } |
@@ -1116,7 +1116,7 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
1116 | radeon_connector->shared_ddc = true; | 1116 | radeon_connector->shared_ddc = true; |
1117 | shared_ddc = true; | 1117 | shared_ddc = true; |
1118 | } | 1118 | } |
1119 | if (radeon_connector->router_bus && router->valid && | 1119 | if (radeon_connector->router_bus && router->ddc_valid && |
1120 | (radeon_connector->router.router_id == router->router_id)) { | 1120 | (radeon_connector->router.router_id == router->router_id)) { |
1121 | radeon_connector->shared_ddc = false; | 1121 | radeon_connector->shared_ddc = false; |
1122 | shared_ddc = false; | 1122 | shared_ddc = false; |
@@ -1136,7 +1136,7 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
1136 | radeon_connector->connector_object_id = connector_object_id; | 1136 | radeon_connector->connector_object_id = connector_object_id; |
1137 | radeon_connector->hpd = *hpd; | 1137 | radeon_connector->hpd = *hpd; |
1138 | radeon_connector->router = *router; | 1138 | radeon_connector->router = *router; |
1139 | if (router->valid) { | 1139 | if (router->ddc_valid || router->cd_valid) { |
1140 | radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info); | 1140 | radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info); |
1141 | if (!radeon_connector->router_bus) | 1141 | if (!radeon_connector->router_bus) |
1142 | goto failed; | 1142 | goto failed; |
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 0383631da69..1df4dc6c063 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
@@ -315,10 +315,14 @@ static void radeon_print_display_setup(struct drm_device *dev) | |||
315 | radeon_connector->ddc_bus->rec.en_data_reg, | 315 | radeon_connector->ddc_bus->rec.en_data_reg, |
316 | radeon_connector->ddc_bus->rec.y_clk_reg, | 316 | radeon_connector->ddc_bus->rec.y_clk_reg, |
317 | radeon_connector->ddc_bus->rec.y_data_reg); | 317 | radeon_connector->ddc_bus->rec.y_data_reg); |
318 | if (radeon_connector->router_bus) | 318 | if (radeon_connector->router.ddc_valid) |
319 | DRM_INFO(" DDC Router 0x%x/0x%x\n", | 319 | DRM_INFO(" DDC Router 0x%x/0x%x\n", |
320 | radeon_connector->router.mux_control_pin, | 320 | radeon_connector->router.ddc_mux_control_pin, |
321 | radeon_connector->router.mux_state); | 321 | radeon_connector->router.ddc_mux_state); |
322 | if (radeon_connector->router.cd_valid) | ||
323 | DRM_INFO(" Clock/Data Router 0x%x/0x%x\n", | ||
324 | radeon_connector->router.cd_mux_control_pin, | ||
325 | radeon_connector->router.cd_mux_state); | ||
322 | } else { | 326 | } else { |
323 | if (connector->connector_type == DRM_MODE_CONNECTOR_VGA || | 327 | if (connector->connector_type == DRM_MODE_CONNECTOR_VGA || |
324 | connector->connector_type == DRM_MODE_CONNECTOR_DVII || | 328 | connector->connector_type == DRM_MODE_CONNECTOR_DVII || |
@@ -398,8 +402,8 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector) | |||
398 | int ret = 0; | 402 | int ret = 0; |
399 | 403 | ||
400 | /* on hw with routers, select right port */ | 404 | /* on hw with routers, select right port */ |
401 | if (radeon_connector->router.valid) | 405 | if (radeon_connector->router.ddc_valid) |
402 | radeon_router_select_port(radeon_connector); | 406 | radeon_router_select_ddc_port(radeon_connector); |
403 | 407 | ||
404 | if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) || | 408 | if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) || |
405 | (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) { | 409 | (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) { |
@@ -432,8 +436,8 @@ static int radeon_ddc_dump(struct drm_connector *connector) | |||
432 | int ret = 0; | 436 | int ret = 0; |
433 | 437 | ||
434 | /* on hw with routers, select right port */ | 438 | /* on hw with routers, select right port */ |
435 | if (radeon_connector->router.valid) | 439 | if (radeon_connector->router.ddc_valid) |
436 | radeon_router_select_port(radeon_connector); | 440 | radeon_router_select_ddc_port(radeon_connector); |
437 | 441 | ||
438 | if (!radeon_connector->ddc_bus) | 442 | if (!radeon_connector->ddc_bus) |
439 | return -1; | 443 | return -1; |
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index ae58b6849a2..f678257c42e 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c | |||
@@ -1520,6 +1520,7 @@ radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connec | |||
1520 | static void radeon_atom_encoder_prepare(struct drm_encoder *encoder) | 1520 | static void radeon_atom_encoder_prepare(struct drm_encoder *encoder) |
1521 | { | 1521 | { |
1522 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 1522 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
1523 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | ||
1523 | 1524 | ||
1524 | if (radeon_encoder->active_device & | 1525 | if (radeon_encoder->active_device & |
1525 | (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) { | 1526 | (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) { |
@@ -1531,6 +1532,13 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder) | |||
1531 | radeon_atom_output_lock(encoder, true); | 1532 | radeon_atom_output_lock(encoder, true); |
1532 | radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); | 1533 | radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); |
1533 | 1534 | ||
1535 | /* select the clock/data port if it uses a router */ | ||
1536 | if (connector) { | ||
1537 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
1538 | if (radeon_connector->router.cd_valid) | ||
1539 | radeon_router_select_cd_port(radeon_connector); | ||
1540 | } | ||
1541 | |||
1534 | /* this is needed for the pll/ss setup to work correctly in some cases */ | 1542 | /* this is needed for the pll/ss setup to work correctly in some cases */ |
1535 | atombios_set_encoder_crtc_source(encoder); | 1543 | atombios_set_encoder_crtc_source(encoder); |
1536 | } | 1544 | } |
@@ -1547,6 +1555,23 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder) | |||
1547 | struct radeon_device *rdev = dev->dev_private; | 1555 | struct radeon_device *rdev = dev->dev_private; |
1548 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 1556 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
1549 | struct radeon_encoder_atom_dig *dig; | 1557 | struct radeon_encoder_atom_dig *dig; |
1558 | |||
1559 | /* check for pre-DCE3 cards with shared encoders; | ||
1560 | * can't really use the links individually, so don't disable | ||
1561 | * the encoder if it's in use by another connector | ||
1562 | */ | ||
1563 | if (!ASIC_IS_DCE3(rdev)) { | ||
1564 | struct drm_encoder *other_encoder; | ||
1565 | struct radeon_encoder *other_radeon_encoder; | ||
1566 | |||
1567 | list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) { | ||
1568 | other_radeon_encoder = to_radeon_encoder(other_encoder); | ||
1569 | if ((radeon_encoder->encoder_id == other_radeon_encoder->encoder_id) && | ||
1570 | drm_helper_encoder_in_use(other_encoder)) | ||
1571 | goto disable_done; | ||
1572 | } | ||
1573 | } | ||
1574 | |||
1550 | radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); | 1575 | radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); |
1551 | 1576 | ||
1552 | switch (radeon_encoder->encoder_id) { | 1577 | switch (radeon_encoder->encoder_id) { |
@@ -1586,6 +1611,7 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder) | |||
1586 | break; | 1611 | break; |
1587 | } | 1612 | } |
1588 | 1613 | ||
1614 | disable_done: | ||
1589 | if (radeon_encoder_is_digital(encoder)) { | 1615 | if (radeon_encoder_is_digital(encoder)) { |
1590 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) | 1616 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) |
1591 | r600_hdmi_disable(encoder); | 1617 | r600_hdmi_disable(encoder); |
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 216392d0353..daacb281dfa 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c | |||
@@ -240,7 +240,8 @@ retry: | |||
240 | */ | 240 | */ |
241 | if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) { | 241 | if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) { |
242 | /* good news we believe it's a lockup */ | 242 | /* good news we believe it's a lockup */ |
243 | WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n", fence->seq, seq); | 243 | WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n", |
244 | fence->seq, seq); | ||
244 | /* FIXME: what should we do ? marking everyone | 245 | /* FIXME: what should we do ? marking everyone |
245 | * as signaled for now | 246 | * as signaled for now |
246 | */ | 247 | */ |
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c index 6a13ee38a5b..0cfbba02c4d 100644 --- a/drivers/gpu/drm/radeon/radeon_i2c.c +++ b/drivers/gpu/drm/radeon/radeon_i2c.c | |||
@@ -53,8 +53,8 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector) | |||
53 | }; | 53 | }; |
54 | 54 | ||
55 | /* on hw with routers, select right port */ | 55 | /* on hw with routers, select right port */ |
56 | if (radeon_connector->router.valid) | 56 | if (radeon_connector->router.ddc_valid) |
57 | radeon_router_select_port(radeon_connector); | 57 | radeon_router_select_ddc_port(radeon_connector); |
58 | 58 | ||
59 | ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2); | 59 | ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2); |
60 | if (ret == 2) | 60 | if (ret == 2) |
@@ -1084,26 +1084,51 @@ void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c_bus, | |||
1084 | addr, val); | 1084 | addr, val); |
1085 | } | 1085 | } |
1086 | 1086 | ||
1087 | /* router switching */ | 1087 | /* ddc router switching */ |
1088 | void radeon_router_select_port(struct radeon_connector *radeon_connector) | 1088 | void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector) |
1089 | { | 1089 | { |
1090 | u8 val; | 1090 | u8 val; |
1091 | 1091 | ||
1092 | if (!radeon_connector->router.valid) | 1092 | if (!radeon_connector->router.ddc_valid) |
1093 | return; | 1093 | return; |
1094 | 1094 | ||
1095 | radeon_i2c_get_byte(radeon_connector->router_bus, | 1095 | radeon_i2c_get_byte(radeon_connector->router_bus, |
1096 | radeon_connector->router.i2c_addr, | 1096 | radeon_connector->router.i2c_addr, |
1097 | 0x3, &val); | 1097 | 0x3, &val); |
1098 | val &= radeon_connector->router.mux_control_pin; | 1098 | val &= ~radeon_connector->router.ddc_mux_control_pin; |
1099 | radeon_i2c_put_byte(radeon_connector->router_bus, | 1099 | radeon_i2c_put_byte(radeon_connector->router_bus, |
1100 | radeon_connector->router.i2c_addr, | 1100 | radeon_connector->router.i2c_addr, |
1101 | 0x3, val); | 1101 | 0x3, val); |
1102 | radeon_i2c_get_byte(radeon_connector->router_bus, | 1102 | radeon_i2c_get_byte(radeon_connector->router_bus, |
1103 | radeon_connector->router.i2c_addr, | 1103 | radeon_connector->router.i2c_addr, |
1104 | 0x1, &val); | 1104 | 0x1, &val); |
1105 | val &= radeon_connector->router.mux_control_pin; | 1105 | val &= ~radeon_connector->router.ddc_mux_control_pin; |
1106 | val |= radeon_connector->router.mux_state; | 1106 | val |= radeon_connector->router.ddc_mux_state; |
1107 | radeon_i2c_put_byte(radeon_connector->router_bus, | ||
1108 | radeon_connector->router.i2c_addr, | ||
1109 | 0x1, val); | ||
1110 | } | ||
1111 | |||
1112 | /* clock/data router switching */ | ||
1113 | void radeon_router_select_cd_port(struct radeon_connector *radeon_connector) | ||
1114 | { | ||
1115 | u8 val; | ||
1116 | |||
1117 | if (!radeon_connector->router.cd_valid) | ||
1118 | return; | ||
1119 | |||
1120 | radeon_i2c_get_byte(radeon_connector->router_bus, | ||
1121 | radeon_connector->router.i2c_addr, | ||
1122 | 0x3, &val); | ||
1123 | val &= ~radeon_connector->router.cd_mux_control_pin; | ||
1124 | radeon_i2c_put_byte(radeon_connector->router_bus, | ||
1125 | radeon_connector->router.i2c_addr, | ||
1126 | 0x3, val); | ||
1127 | radeon_i2c_get_byte(radeon_connector->router_bus, | ||
1128 | radeon_connector->router.i2c_addr, | ||
1129 | 0x1, &val); | ||
1130 | val &= ~radeon_connector->router.cd_mux_control_pin; | ||
1131 | val |= radeon_connector->router.cd_mux_state; | ||
1107 | radeon_i2c_put_byte(radeon_connector->router_bus, | 1132 | radeon_i2c_put_byte(radeon_connector->router_bus, |
1108 | radeon_connector->router.i2c_addr, | 1133 | radeon_connector->router.i2c_addr, |
1109 | 0x1, val); | 1134 | 0x1, val); |
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 92457163d07..680f57644e8 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h | |||
@@ -401,13 +401,19 @@ struct radeon_hpd { | |||
401 | }; | 401 | }; |
402 | 402 | ||
403 | struct radeon_router { | 403 | struct radeon_router { |
404 | bool valid; | ||
405 | u32 router_id; | 404 | u32 router_id; |
406 | struct radeon_i2c_bus_rec i2c_info; | 405 | struct radeon_i2c_bus_rec i2c_info; |
407 | u8 i2c_addr; | 406 | u8 i2c_addr; |
408 | u8 mux_type; | 407 | /* i2c mux */ |
409 | u8 mux_control_pin; | 408 | bool ddc_valid; |
410 | u8 mux_state; | 409 | u8 ddc_mux_type; |
410 | u8 ddc_mux_control_pin; | ||
411 | u8 ddc_mux_state; | ||
412 | /* clock/data mux */ | ||
413 | bool cd_valid; | ||
414 | u8 cd_mux_type; | ||
415 | u8 cd_mux_control_pin; | ||
416 | u8 cd_mux_state; | ||
411 | }; | 417 | }; |
412 | 418 | ||
413 | struct radeon_connector { | 419 | struct radeon_connector { |
@@ -488,7 +494,8 @@ extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c, | |||
488 | u8 slave_addr, | 494 | u8 slave_addr, |
489 | u8 addr, | 495 | u8 addr, |
490 | u8 val); | 496 | u8 val); |
491 | extern void radeon_router_select_port(struct radeon_connector *radeon_connector); | 497 | extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector); |
498 | extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector); | ||
492 | extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector); | 499 | extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector); |
493 | extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector); | 500 | extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector); |
494 | 501 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index d7ab9141641..8eb18346601 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
@@ -102,6 +102,8 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj, | |||
102 | type = ttm_bo_type_device; | 102 | type = ttm_bo_type_device; |
103 | } | 103 | } |
104 | *bo_ptr = NULL; | 104 | *bo_ptr = NULL; |
105 | |||
106 | retry: | ||
105 | bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); | 107 | bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); |
106 | if (bo == NULL) | 108 | if (bo == NULL) |
107 | return -ENOMEM; | 109 | return -ENOMEM; |
@@ -109,8 +111,6 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj, | |||
109 | bo->gobj = gobj; | 111 | bo->gobj = gobj; |
110 | bo->surface_reg = -1; | 112 | bo->surface_reg = -1; |
111 | INIT_LIST_HEAD(&bo->list); | 113 | INIT_LIST_HEAD(&bo->list); |
112 | |||
113 | retry: | ||
114 | radeon_ttm_placement_from_domain(bo, domain); | 114 | radeon_ttm_placement_from_domain(bo, domain); |
115 | /* Kernel allocation are uninterruptible */ | 115 | /* Kernel allocation are uninterruptible */ |
116 | mutex_lock(&rdev->vram_mutex); | 116 | mutex_lock(&rdev->vram_mutex); |
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index fe95bb35317..01c2c736a1d 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c | |||
@@ -689,7 +689,8 @@ static int radeon_ttm_backend_bind(struct ttm_backend *backend, | |||
689 | gtt = container_of(backend, struct radeon_ttm_backend, backend); | 689 | gtt = container_of(backend, struct radeon_ttm_backend, backend); |
690 | gtt->offset = bo_mem->start << PAGE_SHIFT; | 690 | gtt->offset = bo_mem->start << PAGE_SHIFT; |
691 | if (!gtt->num_pages) { | 691 | if (!gtt->num_pages) { |
692 | WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", gtt->num_pages, bo_mem, backend); | 692 | WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", |
693 | gtt->num_pages, bo_mem, backend); | ||
693 | } | 694 | } |
694 | r = radeon_gart_bind(gtt->rdev, gtt->offset, | 695 | r = radeon_gart_bind(gtt->rdev, gtt->offset, |
695 | gtt->num_pages, gtt->pages); | 696 | gtt->num_pages, gtt->pages); |
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index f683e51a2a0..5512e4e5e63 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c | |||
@@ -78,7 +78,7 @@ int rs400_gart_init(struct radeon_device *rdev) | |||
78 | int r; | 78 | int r; |
79 | 79 | ||
80 | if (rdev->gart.table.ram.ptr) { | 80 | if (rdev->gart.table.ram.ptr) { |
81 | WARN(1, "RS400 GART already initialized.\n"); | 81 | WARN(1, "RS400 GART already initialized\n"); |
82 | return 0; | 82 | return 0; |
83 | } | 83 | } |
84 | /* Check gart size */ | 84 | /* Check gart size */ |
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index b091a1f6fa4..f1c6e02c2e6 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
@@ -375,7 +375,7 @@ int rs600_gart_init(struct radeon_device *rdev) | |||
375 | int r; | 375 | int r; |
376 | 376 | ||
377 | if (rdev->gart.table.vram.robj) { | 377 | if (rdev->gart.table.vram.robj) { |
378 | WARN(1, "RS600 GART already initialized.\n"); | 378 | WARN(1, "RS600 GART already initialized\n"); |
379 | return 0; | 379 | return 0; |
380 | } | 380 | } |
381 | /* Initialize common gart structure */ | 381 | /* Initialize common gart structure */ |
@@ -505,7 +505,7 @@ int rs600_irq_set(struct radeon_device *rdev) | |||
505 | ~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1); | 505 | ~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1); |
506 | 506 | ||
507 | if (!rdev->irq.installed) { | 507 | if (!rdev->irq.installed) { |
508 | WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); | 508 | WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); |
509 | WREG32(R_000040_GEN_INT_CNTL, 0); | 509 | WREG32(R_000040_GEN_INT_CNTL, 0); |
510 | return -EINVAL; | 510 | return -EINVAL; |
511 | } | 511 | } |
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index a1cb783c713..3ca77dc0391 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -27,14 +27,6 @@ | |||
27 | /* | 27 | /* |
28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> | 28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> |
29 | */ | 29 | */ |
30 | /* Notes: | ||
31 | * | ||
32 | * We store bo pointer in drm_mm_node struct so we know which bo own a | ||
33 | * specific node. There is no protection on the pointer, thus to make | ||
34 | * sure things don't go berserk you have to access this pointer while | ||
35 | * holding the global lru lock and make sure anytime you free a node you | ||
36 | * reset the pointer to NULL. | ||
37 | */ | ||
38 | 30 | ||
39 | #include "ttm/ttm_module.h" | 31 | #include "ttm/ttm_module.h" |
40 | #include "ttm/ttm_bo_driver.h" | 32 | #include "ttm/ttm_bo_driver.h" |
@@ -45,6 +37,7 @@ | |||
45 | #include <linux/mm.h> | 37 | #include <linux/mm.h> |
46 | #include <linux/file.h> | 38 | #include <linux/file.h> |
47 | #include <linux/module.h> | 39 | #include <linux/module.h> |
40 | #include <asm/atomic.h> | ||
48 | 41 | ||
49 | #define TTM_ASSERT_LOCKED(param) | 42 | #define TTM_ASSERT_LOCKED(param) |
50 | #define TTM_DEBUG(fmt, arg...) | 43 | #define TTM_DEBUG(fmt, arg...) |
@@ -452,6 +445,11 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) | |||
452 | ttm_bo_mem_put(bo, &bo->mem); | 445 | ttm_bo_mem_put(bo, &bo->mem); |
453 | 446 | ||
454 | atomic_set(&bo->reserved, 0); | 447 | atomic_set(&bo->reserved, 0); |
448 | |||
449 | /* | ||
450 | * Make processes trying to reserve really pick it up. | ||
451 | */ | ||
452 | smp_mb__after_atomic_dec(); | ||
455 | wake_up_all(&bo->event_queue); | 453 | wake_up_all(&bo->event_queue); |
456 | } | 454 | } |
457 | 455 | ||
@@ -460,7 +458,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) | |||
460 | struct ttm_bo_device *bdev = bo->bdev; | 458 | struct ttm_bo_device *bdev = bo->bdev; |
461 | struct ttm_bo_global *glob = bo->glob; | 459 | struct ttm_bo_global *glob = bo->glob; |
462 | struct ttm_bo_driver *driver; | 460 | struct ttm_bo_driver *driver; |
463 | void *sync_obj; | 461 | void *sync_obj = NULL; |
464 | void *sync_obj_arg; | 462 | void *sync_obj_arg; |
465 | int put_count; | 463 | int put_count; |
466 | int ret; | 464 | int ret; |
@@ -495,17 +493,20 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) | |||
495 | spin_lock(&glob->lru_lock); | 493 | spin_lock(&glob->lru_lock); |
496 | } | 494 | } |
497 | queue: | 495 | queue: |
498 | sync_obj = bo->sync_obj; | ||
499 | sync_obj_arg = bo->sync_obj_arg; | ||
500 | driver = bdev->driver; | 496 | driver = bdev->driver; |
497 | if (bo->sync_obj) | ||
498 | sync_obj = driver->sync_obj_ref(bo->sync_obj); | ||
499 | sync_obj_arg = bo->sync_obj_arg; | ||
501 | 500 | ||
502 | kref_get(&bo->list_kref); | 501 | kref_get(&bo->list_kref); |
503 | list_add_tail(&bo->ddestroy, &bdev->ddestroy); | 502 | list_add_tail(&bo->ddestroy, &bdev->ddestroy); |
504 | spin_unlock(&glob->lru_lock); | 503 | spin_unlock(&glob->lru_lock); |
505 | spin_unlock(&bo->lock); | 504 | spin_unlock(&bo->lock); |
506 | 505 | ||
507 | if (sync_obj) | 506 | if (sync_obj) { |
508 | driver->sync_obj_flush(sync_obj, sync_obj_arg); | 507 | driver->sync_obj_flush(sync_obj, sync_obj_arg); |
508 | driver->sync_obj_unref(&sync_obj); | ||
509 | } | ||
509 | schedule_delayed_work(&bdev->wq, | 510 | schedule_delayed_work(&bdev->wq, |
510 | ((HZ / 100) < 1) ? 1 : HZ / 100); | 511 | ((HZ / 100) < 1) ? 1 : HZ / 100); |
511 | } | 512 | } |
@@ -822,7 +823,6 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, | |||
822 | bool no_wait_gpu) | 823 | bool no_wait_gpu) |
823 | { | 824 | { |
824 | struct ttm_bo_device *bdev = bo->bdev; | 825 | struct ttm_bo_device *bdev = bo->bdev; |
825 | struct ttm_bo_global *glob = bdev->glob; | ||
826 | struct ttm_mem_type_manager *man = &bdev->man[mem_type]; | 826 | struct ttm_mem_type_manager *man = &bdev->man[mem_type]; |
827 | int ret; | 827 | int ret; |
828 | 828 | ||
@@ -832,12 +832,6 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, | |||
832 | return ret; | 832 | return ret; |
833 | if (mem->mm_node) | 833 | if (mem->mm_node) |
834 | break; | 834 | break; |
835 | spin_lock(&glob->lru_lock); | ||
836 | if (list_empty(&man->lru)) { | ||
837 | spin_unlock(&glob->lru_lock); | ||
838 | break; | ||
839 | } | ||
840 | spin_unlock(&glob->lru_lock); | ||
841 | ret = ttm_mem_evict_first(bdev, mem_type, interruptible, | 835 | ret = ttm_mem_evict_first(bdev, mem_type, interruptible, |
842 | no_wait_reserve, no_wait_gpu); | 836 | no_wait_reserve, no_wait_gpu); |
843 | if (unlikely(ret != 0)) | 837 | if (unlikely(ret != 0)) |
@@ -1125,35 +1119,9 @@ EXPORT_SYMBOL(ttm_bo_validate); | |||
1125 | int ttm_bo_check_placement(struct ttm_buffer_object *bo, | 1119 | int ttm_bo_check_placement(struct ttm_buffer_object *bo, |
1126 | struct ttm_placement *placement) | 1120 | struct ttm_placement *placement) |
1127 | { | 1121 | { |
1128 | int i; | 1122 | BUG_ON((placement->fpfn || placement->lpfn) && |
1123 | (bo->mem.num_pages > (placement->lpfn - placement->fpfn))); | ||
1129 | 1124 | ||
1130 | if (placement->fpfn || placement->lpfn) { | ||
1131 | if (bo->mem.num_pages > (placement->lpfn - placement->fpfn)) { | ||
1132 | printk(KERN_ERR TTM_PFX "Page number range to small " | ||
1133 | "Need %lu pages, range is [%u, %u]\n", | ||
1134 | bo->mem.num_pages, placement->fpfn, | ||
1135 | placement->lpfn); | ||
1136 | return -EINVAL; | ||
1137 | } | ||
1138 | } | ||
1139 | for (i = 0; i < placement->num_placement; i++) { | ||
1140 | if (!capable(CAP_SYS_ADMIN)) { | ||
1141 | if (placement->placement[i] & TTM_PL_FLAG_NO_EVICT) { | ||
1142 | printk(KERN_ERR TTM_PFX "Need to be root to " | ||
1143 | "modify NO_EVICT status.\n"); | ||
1144 | return -EINVAL; | ||
1145 | } | ||
1146 | } | ||
1147 | } | ||
1148 | for (i = 0; i < placement->num_busy_placement; i++) { | ||
1149 | if (!capable(CAP_SYS_ADMIN)) { | ||
1150 | if (placement->busy_placement[i] & TTM_PL_FLAG_NO_EVICT) { | ||
1151 | printk(KERN_ERR TTM_PFX "Need to be root to " | ||
1152 | "modify NO_EVICT status.\n"); | ||
1153 | return -EINVAL; | ||
1154 | } | ||
1155 | } | ||
1156 | } | ||
1157 | return 0; | 1125 | return 0; |
1158 | } | 1126 | } |
1159 | 1127 | ||
@@ -1176,6 +1144,10 @@ int ttm_bo_init(struct ttm_bo_device *bdev, | |||
1176 | num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; | 1144 | num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
1177 | if (num_pages == 0) { | 1145 | if (num_pages == 0) { |
1178 | printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n"); | 1146 | printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n"); |
1147 | if (destroy) | ||
1148 | (*destroy)(bo); | ||
1149 | else | ||
1150 | kfree(bo); | ||
1179 | return -EINVAL; | 1151 | return -EINVAL; |
1180 | } | 1152 | } |
1181 | bo->destroy = destroy; | 1153 | bo->destroy = destroy; |
@@ -1369,18 +1341,9 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, | |||
1369 | int ret = -EINVAL; | 1341 | int ret = -EINVAL; |
1370 | struct ttm_mem_type_manager *man; | 1342 | struct ttm_mem_type_manager *man; |
1371 | 1343 | ||
1372 | if (type >= TTM_NUM_MEM_TYPES) { | 1344 | BUG_ON(type >= TTM_NUM_MEM_TYPES); |
1373 | printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", type); | ||
1374 | return ret; | ||
1375 | } | ||
1376 | |||
1377 | man = &bdev->man[type]; | 1345 | man = &bdev->man[type]; |
1378 | if (man->has_type) { | 1346 | BUG_ON(man->has_type); |
1379 | printk(KERN_ERR TTM_PFX | ||
1380 | "Memory manager already initialized for type %d\n", | ||
1381 | type); | ||
1382 | return ret; | ||
1383 | } | ||
1384 | 1347 | ||
1385 | ret = bdev->driver->init_mem_type(bdev, type, man); | 1348 | ret = bdev->driver->init_mem_type(bdev, type, man); |
1386 | if (ret) | 1349 | if (ret) |
@@ -1389,13 +1352,6 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, | |||
1389 | 1352 | ||
1390 | ret = 0; | 1353 | ret = 0; |
1391 | if (type != TTM_PL_SYSTEM) { | 1354 | if (type != TTM_PL_SYSTEM) { |
1392 | if (!p_size) { | ||
1393 | printk(KERN_ERR TTM_PFX | ||
1394 | "Zero size memory manager type %d\n", | ||
1395 | type); | ||
1396 | return ret; | ||
1397 | } | ||
1398 | |||
1399 | ret = (*man->func->init)(man, p_size); | 1355 | ret = (*man->func->init)(man, p_size); |
1400 | if (ret) | 1356 | if (ret) |
1401 | return ret; | 1357 | return ret; |
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c index 7410c190c89..038e947d00f 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_manager.c +++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /************************************************************************** | 1 | /************************************************************************** |
2 | * | 2 | * |
3 | * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA | 3 | * Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA |
4 | * All Rights Reserved. | 4 | * All Rights Reserved. |
5 | * | 5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | 6 | * Permission is hereby granted, free of charge, to any person obtaining a |
@@ -31,20 +31,29 @@ | |||
31 | #include "ttm/ttm_module.h" | 31 | #include "ttm/ttm_module.h" |
32 | #include "ttm/ttm_bo_driver.h" | 32 | #include "ttm/ttm_bo_driver.h" |
33 | #include "ttm/ttm_placement.h" | 33 | #include "ttm/ttm_placement.h" |
34 | #include <linux/jiffies.h> | 34 | #include "drm_mm.h" |
35 | #include <linux/slab.h> | 35 | #include <linux/slab.h> |
36 | #include <linux/sched.h> | 36 | #include <linux/spinlock.h> |
37 | #include <linux/mm.h> | ||
38 | #include <linux/file.h> | ||
39 | #include <linux/module.h> | 37 | #include <linux/module.h> |
40 | 38 | ||
39 | /** | ||
40 | * Currently we use a spinlock for the lock, but a mutex *may* be | ||
41 | * more appropriate to reduce scheduling latency if the range manager | ||
42 | * ends up with very fragmented allocation patterns. | ||
43 | */ | ||
44 | |||
45 | struct ttm_range_manager { | ||
46 | struct drm_mm mm; | ||
47 | spinlock_t lock; | ||
48 | }; | ||
49 | |||
41 | static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, | 50 | static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, |
42 | struct ttm_buffer_object *bo, | 51 | struct ttm_buffer_object *bo, |
43 | struct ttm_placement *placement, | 52 | struct ttm_placement *placement, |
44 | struct ttm_mem_reg *mem) | 53 | struct ttm_mem_reg *mem) |
45 | { | 54 | { |
46 | struct ttm_bo_global *glob = man->bdev->glob; | 55 | struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; |
47 | struct drm_mm *mm = man->priv; | 56 | struct drm_mm *mm = &rman->mm; |
48 | struct drm_mm_node *node = NULL; | 57 | struct drm_mm_node *node = NULL; |
49 | unsigned long lpfn; | 58 | unsigned long lpfn; |
50 | int ret; | 59 | int ret; |
@@ -57,19 +66,19 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, | |||
57 | if (unlikely(ret)) | 66 | if (unlikely(ret)) |
58 | return ret; | 67 | return ret; |
59 | 68 | ||
60 | spin_lock(&glob->lru_lock); | 69 | spin_lock(&rman->lock); |
61 | node = drm_mm_search_free_in_range(mm, | 70 | node = drm_mm_search_free_in_range(mm, |
62 | mem->num_pages, mem->page_alignment, | 71 | mem->num_pages, mem->page_alignment, |
63 | placement->fpfn, lpfn, 1); | 72 | placement->fpfn, lpfn, 1); |
64 | if (unlikely(node == NULL)) { | 73 | if (unlikely(node == NULL)) { |
65 | spin_unlock(&glob->lru_lock); | 74 | spin_unlock(&rman->lock); |
66 | return 0; | 75 | return 0; |
67 | } | 76 | } |
68 | node = drm_mm_get_block_atomic_range(node, mem->num_pages, | 77 | node = drm_mm_get_block_atomic_range(node, mem->num_pages, |
69 | mem->page_alignment, | 78 | mem->page_alignment, |
70 | placement->fpfn, | 79 | placement->fpfn, |
71 | lpfn); | 80 | lpfn); |
72 | spin_unlock(&glob->lru_lock); | 81 | spin_unlock(&rman->lock); |
73 | } while (node == NULL); | 82 | } while (node == NULL); |
74 | 83 | ||
75 | mem->mm_node = node; | 84 | mem->mm_node = node; |
@@ -80,12 +89,12 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, | |||
80 | static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man, | 89 | static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man, |
81 | struct ttm_mem_reg *mem) | 90 | struct ttm_mem_reg *mem) |
82 | { | 91 | { |
83 | struct ttm_bo_global *glob = man->bdev->glob; | 92 | struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; |
84 | 93 | ||
85 | if (mem->mm_node) { | 94 | if (mem->mm_node) { |
86 | spin_lock(&glob->lru_lock); | 95 | spin_lock(&rman->lock); |
87 | drm_mm_put_block(mem->mm_node); | 96 | drm_mm_put_block(mem->mm_node); |
88 | spin_unlock(&glob->lru_lock); | 97 | spin_unlock(&rman->lock); |
89 | mem->mm_node = NULL; | 98 | mem->mm_node = NULL; |
90 | } | 99 | } |
91 | } | 100 | } |
@@ -93,49 +102,49 @@ static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man, | |||
93 | static int ttm_bo_man_init(struct ttm_mem_type_manager *man, | 102 | static int ttm_bo_man_init(struct ttm_mem_type_manager *man, |
94 | unsigned long p_size) | 103 | unsigned long p_size) |
95 | { | 104 | { |
96 | struct drm_mm *mm; | 105 | struct ttm_range_manager *rman; |
97 | int ret; | 106 | int ret; |
98 | 107 | ||
99 | mm = kzalloc(sizeof(*mm), GFP_KERNEL); | 108 | rman = kzalloc(sizeof(*rman), GFP_KERNEL); |
100 | if (!mm) | 109 | if (!rman) |
101 | return -ENOMEM; | 110 | return -ENOMEM; |
102 | 111 | ||
103 | ret = drm_mm_init(mm, 0, p_size); | 112 | ret = drm_mm_init(&rman->mm, 0, p_size); |
104 | if (ret) { | 113 | if (ret) { |
105 | kfree(mm); | 114 | kfree(rman); |
106 | return ret; | 115 | return ret; |
107 | } | 116 | } |
108 | 117 | ||
109 | man->priv = mm; | 118 | spin_lock_init(&rman->lock); |
119 | man->priv = rman; | ||
110 | return 0; | 120 | return 0; |
111 | } | 121 | } |
112 | 122 | ||
113 | static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man) | 123 | static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man) |
114 | { | 124 | { |
115 | struct ttm_bo_global *glob = man->bdev->glob; | 125 | struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; |
116 | struct drm_mm *mm = man->priv; | 126 | struct drm_mm *mm = &rman->mm; |
117 | int ret = 0; | ||
118 | 127 | ||
119 | spin_lock(&glob->lru_lock); | 128 | spin_lock(&rman->lock); |
120 | if (drm_mm_clean(mm)) { | 129 | if (drm_mm_clean(mm)) { |
121 | drm_mm_takedown(mm); | 130 | drm_mm_takedown(mm); |
122 | kfree(mm); | 131 | spin_unlock(&rman->lock); |
132 | kfree(rman); | ||
123 | man->priv = NULL; | 133 | man->priv = NULL; |
124 | } else | 134 | return 0; |
125 | ret = -EBUSY; | 135 | } |
126 | spin_unlock(&glob->lru_lock); | 136 | spin_unlock(&rman->lock); |
127 | return ret; | 137 | return -EBUSY; |
128 | } | 138 | } |
129 | 139 | ||
130 | static void ttm_bo_man_debug(struct ttm_mem_type_manager *man, | 140 | static void ttm_bo_man_debug(struct ttm_mem_type_manager *man, |
131 | const char *prefix) | 141 | const char *prefix) |
132 | { | 142 | { |
133 | struct ttm_bo_global *glob = man->bdev->glob; | 143 | struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; |
134 | struct drm_mm *mm = man->priv; | ||
135 | 144 | ||
136 | spin_lock(&glob->lru_lock); | 145 | spin_lock(&rman->lock); |
137 | drm_mm_debug_table(mm, prefix); | 146 | drm_mm_debug_table(&rman->mm, prefix); |
138 | spin_unlock(&glob->lru_lock); | 147 | spin_unlock(&rman->lock); |
139 | } | 148 | } |
140 | 149 | ||
141 | const struct ttm_mem_type_manager_func ttm_bo_manager_func = { | 150 | const struct ttm_mem_type_manager_func ttm_bo_manager_func = { |
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index a7bab87a548..af789dc869b 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c | |||
@@ -440,10 +440,8 @@ int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) | |||
440 | return ret; | 440 | return ret; |
441 | 441 | ||
442 | ret = be->func->bind(be, bo_mem); | 442 | ret = be->func->bind(be, bo_mem); |
443 | if (ret) { | 443 | if (unlikely(ret != 0)) |
444 | printk(KERN_ERR TTM_PFX "Couldn't bind backend.\n"); | ||
445 | return ret; | 444 | return ret; |
446 | } | ||
447 | 445 | ||
448 | ttm->state = tt_bound; | 446 | ttm->state = tt_bound; |
449 | 447 | ||
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c index 9b5b4d9dd62..3e038a394c5 100644 --- a/drivers/gpu/drm/via/via_dmablit.c +++ b/drivers/gpu/drm/via/via_dmablit.c | |||
@@ -235,9 +235,9 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer) | |||
235 | vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride - 1)) - | 235 | vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride - 1)) - |
236 | first_pfn + 1; | 236 | first_pfn + 1; |
237 | 237 | ||
238 | if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages))) | 238 | vsg->pages = vzalloc(sizeof(struct page *) * vsg->num_pages); |
239 | if (NULL == vsg->pages) | ||
239 | return -ENOMEM; | 240 | return -ENOMEM; |
240 | memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages); | ||
241 | down_read(¤t->mm->mmap_sem); | 241 | down_read(¤t->mm->mmap_sem); |
242 | ret = get_user_pages(current, current->mm, | 242 | ret = get_user_pages(current, current->mm, |
243 | (unsigned long)xfer->mem_addr, | 243 | (unsigned long)xfer->mem_addr, |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 51d9f9f1d7f..76954e3528c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
@@ -691,6 +691,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, | |||
691 | 691 | ||
692 | fence_rep.error = ret; | 692 | fence_rep.error = ret; |
693 | fence_rep.fence_seq = (uint64_t) sequence; | 693 | fence_rep.fence_seq = (uint64_t) sequence; |
694 | fence_rep.pad64 = 0; | ||
694 | 695 | ||
695 | user_fence_rep = (struct drm_vmw_fence_rep __user *) | 696 | user_fence_rep = (struct drm_vmw_fence_rep __user *) |
696 | (unsigned long)arg->fence_rep; | 697 | (unsigned long)arg->fence_rep; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 87c6e6156d7..cceeb42789b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | |||
@@ -720,6 +720,8 @@ static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb) | |||
720 | &vmw_vram_ne_placement, | 720 | &vmw_vram_ne_placement, |
721 | false, &vmw_dmabuf_bo_free); | 721 | false, &vmw_dmabuf_bo_free); |
722 | vmw_overlay_resume_all(dev_priv); | 722 | vmw_overlay_resume_all(dev_priv); |
723 | if (unlikely(ret != 0)) | ||
724 | vfbs->buffer = NULL; | ||
723 | 725 | ||
724 | return ret; | 726 | return ret; |
725 | } | 727 | } |
@@ -730,6 +732,9 @@ static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb) | |||
730 | struct vmw_framebuffer_surface *vfbs = | 732 | struct vmw_framebuffer_surface *vfbs = |
731 | vmw_framebuffer_to_vfbs(&vfb->base); | 733 | vmw_framebuffer_to_vfbs(&vfb->base); |
732 | 734 | ||
735 | if (unlikely(vfbs->buffer == NULL)) | ||
736 | return 0; | ||
737 | |||
733 | bo = &vfbs->buffer->base; | 738 | bo = &vfbs->buffer->base; |
734 | ttm_bo_unref(&bo); | 739 | ttm_bo_unref(&bo); |
735 | vfbs->buffer = NULL; | 740 | vfbs->buffer = NULL; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index a01c47ddb5b..29113c9b26a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c | |||
@@ -557,7 +557,7 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv) | |||
557 | return -EINVAL; | 557 | return -EINVAL; |
558 | } | 558 | } |
559 | 559 | ||
560 | dev_priv->ldu_priv = kmalloc(GFP_KERNEL, sizeof(*dev_priv->ldu_priv)); | 560 | dev_priv->ldu_priv = kmalloc(sizeof(*dev_priv->ldu_priv), GFP_KERNEL); |
561 | 561 | ||
562 | if (!dev_priv->ldu_priv) | 562 | if (!dev_priv->ldu_priv) |
563 | return -ENOMEM; | 563 | return -ENOMEM; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c index df2036ed18d..f1a52f9e729 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c | |||
@@ -585,7 +585,7 @@ int vmw_overlay_init(struct vmw_private *dev_priv) | |||
585 | return -ENOSYS; | 585 | return -ENOSYS; |
586 | } | 586 | } |
587 | 587 | ||
588 | overlay = kmalloc(GFP_KERNEL, sizeof(*overlay)); | 588 | overlay = kmalloc(sizeof(*overlay), GFP_KERNEL); |
589 | if (!overlay) | 589 | if (!overlay) |
590 | return -ENOMEM; | 590 | return -ENOMEM; |
591 | 591 | ||