diff options
| author | Dave Airlie <airlied@redhat.com> | 2013-12-22 19:35:57 -0500 |
|---|---|---|
| committer | Dave Airlie <airlied@redhat.com> | 2013-12-22 19:35:57 -0500 |
| commit | 418cb50bd6f977249b38f8888359e0adca6fc8ea (patch) | |
| tree | aef53788d2fc1da1331513bc0e751e6be701a48d | |
| parent | 73e33c11b64256560b0ce54ed5fb3440446bbffd (diff) | |
| parent | a885b3ccc74d8e38074e1c43a47c354c5ea0b01e (diff) | |
Merge tag 'drm-intel-fixes-2013-12-18' of git://people.freedesktop.org/~danvet/drm-intel into drm-fixes
Besides the 2 fixes for tricky corner cases in gem from Chris I've
promised already two patche from Paulo to fix pc8 warnings (both ported
from -next, bug report from Dave Jones) and one patch from to fix vga
enable/disable on snb+. That one is a really old bug, but apparently it
can cause machine hangs if you try hard enough with vgacon/efifb handover.
* tag 'drm-intel-fixes-2013-12-18' of git://people.freedesktop.org/~danvet/drm-intel:
drm/i915: Use the correct GMCH_CTRL register for Sandybridge+
drm/i915: get a PC8 reference when enabling the power well
drm/i915: change CRTC assertion on LCPLL disable
drm/i915: Fix erroneous dereference of batch_obj inside reset_status
drm/i915: Prevent double unref following alloc failure during execbuffer
| -rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 34 | ||||
| -rw-r--r-- | drivers/gpu/drm/i915/i915_gem_execbuffer.c | 28 | ||||
| -rw-r--r-- | drivers/gpu/drm/i915/intel_display.c | 7 | ||||
| -rw-r--r-- | drivers/gpu/drm/i915/intel_pm.c | 14 |
4 files changed, 60 insertions, 23 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 621c7c67a643..76d3d1ab73c6 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
| @@ -2343,15 +2343,24 @@ static void i915_gem_free_request(struct drm_i915_gem_request *request) | |||
| 2343 | kfree(request); | 2343 | kfree(request); |
| 2344 | } | 2344 | } |
| 2345 | 2345 | ||
| 2346 | static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, | 2346 | static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv, |
| 2347 | struct intel_ring_buffer *ring) | 2347 | struct intel_ring_buffer *ring) |
| 2348 | { | 2348 | { |
| 2349 | u32 completed_seqno; | 2349 | u32 completed_seqno = ring->get_seqno(ring, false); |
| 2350 | u32 acthd; | 2350 | u32 acthd = intel_ring_get_active_head(ring); |
| 2351 | struct drm_i915_gem_request *request; | ||
| 2352 | |||
| 2353 | list_for_each_entry(request, &ring->request_list, list) { | ||
| 2354 | if (i915_seqno_passed(completed_seqno, request->seqno)) | ||
| 2355 | continue; | ||
| 2351 | 2356 | ||
| 2352 | acthd = intel_ring_get_active_head(ring); | 2357 | i915_set_reset_status(ring, request, acthd); |
| 2353 | completed_seqno = ring->get_seqno(ring, false); | 2358 | } |
| 2359 | } | ||
| 2354 | 2360 | ||
| 2361 | static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, | ||
| 2362 | struct intel_ring_buffer *ring) | ||
| 2363 | { | ||
| 2355 | while (!list_empty(&ring->request_list)) { | 2364 | while (!list_empty(&ring->request_list)) { |
| 2356 | struct drm_i915_gem_request *request; | 2365 | struct drm_i915_gem_request *request; |
| 2357 | 2366 | ||
| @@ -2359,9 +2368,6 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, | |||
| 2359 | struct drm_i915_gem_request, | 2368 | struct drm_i915_gem_request, |
| 2360 | list); | 2369 | list); |
| 2361 | 2370 | ||
| 2362 | if (request->seqno > completed_seqno) | ||
| 2363 | i915_set_reset_status(ring, request, acthd); | ||
| 2364 | |||
| 2365 | i915_gem_free_request(request); | 2371 | i915_gem_free_request(request); |
| 2366 | } | 2372 | } |
| 2367 | 2373 | ||
| @@ -2403,8 +2409,16 @@ void i915_gem_reset(struct drm_device *dev) | |||
| 2403 | struct intel_ring_buffer *ring; | 2409 | struct intel_ring_buffer *ring; |
| 2404 | int i; | 2410 | int i; |
| 2405 | 2411 | ||
| 2412 | /* | ||
| 2413 | * Before we free the objects from the requests, we need to inspect | ||
| 2414 | * them for finding the guilty party. As the requests only borrow | ||
| 2415 | * their reference to the objects, the inspection must be done first. | ||
| 2416 | */ | ||
| 2417 | for_each_ring(ring, dev_priv, i) | ||
| 2418 | i915_gem_reset_ring_status(dev_priv, ring); | ||
| 2419 | |||
| 2406 | for_each_ring(ring, dev_priv, i) | 2420 | for_each_ring(ring, dev_priv, i) |
| 2407 | i915_gem_reset_ring_lists(dev_priv, ring); | 2421 | i915_gem_reset_ring_cleanup(dev_priv, ring); |
| 2408 | 2422 | ||
| 2409 | i915_gem_cleanup_ringbuffer(dev); | 2423 | i915_gem_cleanup_ringbuffer(dev); |
| 2410 | 2424 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index b7e787fb4649..a3ba9a8cd687 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
| @@ -93,7 +93,7 @@ eb_lookup_vmas(struct eb_vmas *eb, | |||
| 93 | { | 93 | { |
| 94 | struct drm_i915_gem_object *obj; | 94 | struct drm_i915_gem_object *obj; |
| 95 | struct list_head objects; | 95 | struct list_head objects; |
| 96 | int i, ret = 0; | 96 | int i, ret; |
| 97 | 97 | ||
| 98 | INIT_LIST_HEAD(&objects); | 98 | INIT_LIST_HEAD(&objects); |
| 99 | spin_lock(&file->table_lock); | 99 | spin_lock(&file->table_lock); |
| @@ -106,7 +106,7 @@ eb_lookup_vmas(struct eb_vmas *eb, | |||
| 106 | DRM_DEBUG("Invalid object handle %d at index %d\n", | 106 | DRM_DEBUG("Invalid object handle %d at index %d\n", |
| 107 | exec[i].handle, i); | 107 | exec[i].handle, i); |
| 108 | ret = -ENOENT; | 108 | ret = -ENOENT; |
| 109 | goto out; | 109 | goto err; |
| 110 | } | 110 | } |
| 111 | 111 | ||
| 112 | if (!list_empty(&obj->obj_exec_link)) { | 112 | if (!list_empty(&obj->obj_exec_link)) { |
| @@ -114,7 +114,7 @@ eb_lookup_vmas(struct eb_vmas *eb, | |||
| 114 | DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n", | 114 | DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n", |
| 115 | obj, exec[i].handle, i); | 115 | obj, exec[i].handle, i); |
| 116 | ret = -EINVAL; | 116 | ret = -EINVAL; |
| 117 | goto out; | 117 | goto err; |
| 118 | } | 118 | } |
| 119 | 119 | ||
| 120 | drm_gem_object_reference(&obj->base); | 120 | drm_gem_object_reference(&obj->base); |
| @@ -123,9 +123,13 @@ eb_lookup_vmas(struct eb_vmas *eb, | |||
| 123 | spin_unlock(&file->table_lock); | 123 | spin_unlock(&file->table_lock); |
| 124 | 124 | ||
| 125 | i = 0; | 125 | i = 0; |
| 126 | list_for_each_entry(obj, &objects, obj_exec_link) { | 126 | while (!list_empty(&objects)) { |
| 127 | struct i915_vma *vma; | 127 | struct i915_vma *vma; |
| 128 | 128 | ||
| 129 | obj = list_first_entry(&objects, | ||
| 130 | struct drm_i915_gem_object, | ||
| 131 | obj_exec_link); | ||
| 132 | |||
| 129 | /* | 133 | /* |
| 130 | * NOTE: We can leak any vmas created here when something fails | 134 | * NOTE: We can leak any vmas created here when something fails |
| 131 | * later on. But that's no issue since vma_unbind can deal with | 135 | * later on. But that's no issue since vma_unbind can deal with |
| @@ -138,10 +142,12 @@ eb_lookup_vmas(struct eb_vmas *eb, | |||
| 138 | if (IS_ERR(vma)) { | 142 | if (IS_ERR(vma)) { |
| 139 | DRM_DEBUG("Failed to lookup VMA\n"); | 143 | DRM_DEBUG("Failed to lookup VMA\n"); |
| 140 | ret = PTR_ERR(vma); | 144 | ret = PTR_ERR(vma); |
| 141 | goto out; | 145 | goto err; |
| 142 | } | 146 | } |
| 143 | 147 | ||
| 148 | /* Transfer ownership from the objects list to the vmas list. */ | ||
| 144 | list_add_tail(&vma->exec_list, &eb->vmas); | 149 | list_add_tail(&vma->exec_list, &eb->vmas); |
| 150 | list_del_init(&obj->obj_exec_link); | ||
| 145 | 151 | ||
| 146 | vma->exec_entry = &exec[i]; | 152 | vma->exec_entry = &exec[i]; |
| 147 | if (eb->and < 0) { | 153 | if (eb->and < 0) { |
| @@ -155,16 +161,22 @@ eb_lookup_vmas(struct eb_vmas *eb, | |||
| 155 | ++i; | 161 | ++i; |
| 156 | } | 162 | } |
| 157 | 163 | ||
| 164 | return 0; | ||
| 165 | |||
| 158 | 166 | ||
| 159 | out: | 167 | err: |
| 160 | while (!list_empty(&objects)) { | 168 | while (!list_empty(&objects)) { |
| 161 | obj = list_first_entry(&objects, | 169 | obj = list_first_entry(&objects, |
| 162 | struct drm_i915_gem_object, | 170 | struct drm_i915_gem_object, |
| 163 | obj_exec_link); | 171 | obj_exec_link); |
| 164 | list_del_init(&obj->obj_exec_link); | 172 | list_del_init(&obj->obj_exec_link); |
| 165 | if (ret) | 173 | drm_gem_object_unreference(&obj->base); |
| 166 | drm_gem_object_unreference(&obj->base); | ||
| 167 | } | 174 | } |
| 175 | /* | ||
| 176 | * Objects already transfered to the vmas list will be unreferenced by | ||
| 177 | * eb_destroy. | ||
| 178 | */ | ||
| 179 | |||
| 168 | return ret; | 180 | return ret; |
| 169 | } | 181 | } |
| 170 | 182 | ||
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 8b8bde7dce53..54e82a80cf50 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -6303,7 +6303,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) | |||
| 6303 | uint32_t val; | 6303 | uint32_t val; |
| 6304 | 6304 | ||
| 6305 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) | 6305 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) |
| 6306 | WARN(crtc->base.enabled, "CRTC for pipe %c enabled\n", | 6306 | WARN(crtc->active, "CRTC for pipe %c enabled\n", |
| 6307 | pipe_name(crtc->pipe)); | 6307 | pipe_name(crtc->pipe)); |
| 6308 | 6308 | ||
| 6309 | WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n"); | 6309 | WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n"); |
| @@ -11126,14 +11126,15 @@ void intel_connector_attach_encoder(struct intel_connector *connector, | |||
| 11126 | int intel_modeset_vga_set_state(struct drm_device *dev, bool state) | 11126 | int intel_modeset_vga_set_state(struct drm_device *dev, bool state) |
| 11127 | { | 11127 | { |
| 11128 | struct drm_i915_private *dev_priv = dev->dev_private; | 11128 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 11129 | unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL; | ||
| 11129 | u16 gmch_ctrl; | 11130 | u16 gmch_ctrl; |
| 11130 | 11131 | ||
| 11131 | pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl); | 11132 | pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl); |
| 11132 | if (state) | 11133 | if (state) |
| 11133 | gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE; | 11134 | gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE; |
| 11134 | else | 11135 | else |
| 11135 | gmch_ctrl |= INTEL_GMCH_VGA_DISABLE; | 11136 | gmch_ctrl |= INTEL_GMCH_VGA_DISABLE; |
| 11136 | pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl); | 11137 | pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl); |
| 11137 | return 0; | 11138 | return 0; |
| 11138 | } | 11139 | } |
| 11139 | 11140 | ||
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 3657ab43c8fd..26c29c173221 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
| @@ -5688,6 +5688,8 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable) | |||
| 5688 | unsigned long irqflags; | 5688 | unsigned long irqflags; |
| 5689 | uint32_t tmp; | 5689 | uint32_t tmp; |
| 5690 | 5690 | ||
| 5691 | WARN_ON(dev_priv->pc8.enabled); | ||
| 5692 | |||
| 5691 | tmp = I915_READ(HSW_PWR_WELL_DRIVER); | 5693 | tmp = I915_READ(HSW_PWR_WELL_DRIVER); |
| 5692 | is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED; | 5694 | is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED; |
| 5693 | enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST; | 5695 | enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST; |
| @@ -5747,16 +5749,24 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable) | |||
| 5747 | static void __intel_power_well_get(struct drm_device *dev, | 5749 | static void __intel_power_well_get(struct drm_device *dev, |
| 5748 | struct i915_power_well *power_well) | 5750 | struct i915_power_well *power_well) |
| 5749 | { | 5751 | { |
| 5750 | if (!power_well->count++) | 5752 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 5753 | |||
| 5754 | if (!power_well->count++) { | ||
| 5755 | hsw_disable_package_c8(dev_priv); | ||
| 5751 | __intel_set_power_well(dev, true); | 5756 | __intel_set_power_well(dev, true); |
| 5757 | } | ||
| 5752 | } | 5758 | } |
| 5753 | 5759 | ||
| 5754 | static void __intel_power_well_put(struct drm_device *dev, | 5760 | static void __intel_power_well_put(struct drm_device *dev, |
| 5755 | struct i915_power_well *power_well) | 5761 | struct i915_power_well *power_well) |
| 5756 | { | 5762 | { |
| 5763 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 5764 | |||
| 5757 | WARN_ON(!power_well->count); | 5765 | WARN_ON(!power_well->count); |
| 5758 | if (!--power_well->count && i915_disable_power_well) | 5766 | if (!--power_well->count && i915_disable_power_well) { |
| 5759 | __intel_set_power_well(dev, false); | 5767 | __intel_set_power_well(dev, false); |
| 5768 | hsw_enable_package_c8(dev_priv); | ||
| 5769 | } | ||
| 5760 | } | 5770 | } |
| 5761 | 5771 | ||
| 5762 | void intel_display_power_get(struct drm_device *dev, | 5772 | void intel_display_power_get(struct drm_device *dev, |
