aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2014-01-16 16:06:30 -0500
committerDaniel Vetter <daniel.vetter@ffwll.ch>2014-01-16 16:06:30 -0500
commit0d9d349d8788d30f3fc3bb39279c370f94d9dbec (patch)
tree874f301d180bd2a80dee68ec4caf79ff64f9bed9 /drivers/gpu/drm/i915
parentcba1c07377132fb87b2c73b395ef386da7e03f60 (diff)
parent145830dfb005961cb507a578c9d2e7622f0b3716 (diff)
Merge commit origin/master into drm-intel-next
Conflicts are getting out of hand, and now we have to shuffle even more in -next which was also shuffled in -fixes (the call for drm_mode_config_reset needs to move yet again). So do a proper backmerge. I wanted to wait with this for the 3.13 relaese, but alas let's just do this now. Conflicts: drivers/gpu/drm/i915/i915_reg.h drivers/gpu/drm/i915/intel_ddi.c drivers/gpu/drm/i915/intel_display.c drivers/gpu/drm/i915/intel_pm.c Besides the conflict around the forcewake get/put (where we chaged the called function in -fixes and added a new parameter in -next) code all the current conflicts are of the adjacent lines changed type. Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c20
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c3
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c41
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c16
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c13
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c14
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c88
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c8
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c2
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c13
-rw-r--r--drivers/gpu/drm/i915/intel_display.c45
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c34
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h3
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c24
15 files changed, 200 insertions, 126 deletions
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 1a25f9eaca59..35542eaabe89 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -85,6 +85,14 @@ void i915_update_dri1_breadcrumb(struct drm_device *dev)
85 drm_i915_private_t *dev_priv = dev->dev_private; 85 drm_i915_private_t *dev_priv = dev->dev_private;
86 struct drm_i915_master_private *master_priv; 86 struct drm_i915_master_private *master_priv;
87 87
88 /*
89 * The dri breadcrumb update races against the drm master disappearing.
90 * Instead of trying to fix this (this is by far not the only ums issue)
91 * just don't do the update in kms mode.
92 */
93 if (drm_core_check_feature(dev, DRIVER_MODESET))
94 return;
95
88 if (dev->primary->master) { 96 if (dev->primary->master) {
89 master_priv = dev->primary->master->driver_priv; 97 master_priv = dev->primary->master->driver_priv;
90 if (master_priv->sarea_priv) 98 if (master_priv->sarea_priv)
@@ -1492,16 +1500,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1492 spin_lock_init(&dev_priv->uncore.lock); 1500 spin_lock_init(&dev_priv->uncore.lock);
1493 spin_lock_init(&dev_priv->mm.object_stat_lock); 1501 spin_lock_init(&dev_priv->mm.object_stat_lock);
1494 mutex_init(&dev_priv->dpio_lock); 1502 mutex_init(&dev_priv->dpio_lock);
1495 mutex_init(&dev_priv->rps.hw_lock);
1496 mutex_init(&dev_priv->modeset_restore_lock); 1503 mutex_init(&dev_priv->modeset_restore_lock);
1497 1504
1498 mutex_init(&dev_priv->pc8.lock); 1505 intel_pm_setup(dev);
1499 dev_priv->pc8.requirements_met = false;
1500 dev_priv->pc8.gpu_idle = false;
1501 dev_priv->pc8.irqs_disabled = false;
1502 dev_priv->pc8.enabled = false;
1503 dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
1504 INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
1505 1506
1506 intel_display_crc_init(dev); 1507 intel_display_crc_init(dev);
1507 1508
@@ -1605,7 +1606,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1605 } 1606 }
1606 1607
1607 intel_irq_init(dev); 1608 intel_irq_init(dev);
1608 intel_pm_init(dev);
1609 intel_uncore_sanitize(dev); 1609 intel_uncore_sanitize(dev);
1610 1610
1611 /* Try to make sure MCHBAR is enabled before poking at it */ 1611 /* Try to make sure MCHBAR is enabled before poking at it */
@@ -1851,8 +1851,10 @@ void i915_driver_lastclose(struct drm_device * dev)
1851 1851
1852void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) 1852void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
1853{ 1853{
1854 mutex_lock(&dev->struct_mutex);
1854 i915_gem_context_close(dev, file_priv); 1855 i915_gem_context_close(dev, file_priv);
1855 i915_gem_release(dev, file_priv); 1856 i915_gem_release(dev, file_priv);
1857 mutex_unlock(&dev->struct_mutex);
1856} 1858}
1857 1859
1858void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) 1860void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 31ffe39d2b79..bb27f0dde03d 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -540,8 +540,10 @@ static int i915_drm_freeze(struct drm_device *dev)
540 * Disable CRTCs directly since we want to preserve sw state 540 * Disable CRTCs directly since we want to preserve sw state
541 * for _thaw. 541 * for _thaw.
542 */ 542 */
543 mutex_lock(&dev->mode_config.mutex);
543 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 544 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
544 dev_priv->display.crtc_disable(crtc); 545 dev_priv->display.crtc_disable(crtc);
546 mutex_unlock(&dev->mode_config.mutex);
545 547
546 intel_modeset_suspend_hw(dev); 548 intel_modeset_suspend_hw(dev);
547 } 549 }
@@ -655,6 +657,7 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
655 intel_modeset_init_hw(dev); 657 intel_modeset_init_hw(dev);
656 658
657 drm_modeset_lock_all(dev); 659 drm_modeset_lock_all(dev);
660 drm_mode_config_reset(dev);
658 intel_modeset_setup_hw_state(dev, true); 661 intel_modeset_setup_hw_state(dev, true);
659 drm_modeset_unlock_all(dev); 662 drm_modeset_unlock_all(dev);
660 663
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index cf7922bdf87c..ff6f870d6621 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1937,9 +1937,7 @@ void i915_queue_hangcheck(struct drm_device *dev);
1937void i915_handle_error(struct drm_device *dev, bool wedged); 1937void i915_handle_error(struct drm_device *dev, bool wedged);
1938 1938
1939extern void intel_irq_init(struct drm_device *dev); 1939extern void intel_irq_init(struct drm_device *dev);
1940extern void intel_pm_init(struct drm_device *dev);
1941extern void intel_hpd_init(struct drm_device *dev); 1940extern void intel_hpd_init(struct drm_device *dev);
1942extern void intel_pm_init(struct drm_device *dev);
1943 1941
1944extern void intel_uncore_sanitize(struct drm_device *dev); 1942extern void intel_uncore_sanitize(struct drm_device *dev);
1945extern void intel_uncore_early_sanitize(struct drm_device *dev); 1943extern void intel_uncore_early_sanitize(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index c5a99c46ca9c..32636a470367 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2370,15 +2370,24 @@ static void i915_gem_free_request(struct drm_i915_gem_request *request)
2370 kfree(request); 2370 kfree(request);
2371} 2371}
2372 2372
2373static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, 2373static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
2374 struct intel_ring_buffer *ring) 2374 struct intel_ring_buffer *ring)
2375{ 2375{
2376 u32 completed_seqno; 2376 u32 completed_seqno = ring->get_seqno(ring, false);
2377 u32 acthd; 2377 u32 acthd = intel_ring_get_active_head(ring);
2378 struct drm_i915_gem_request *request;
2379
2380 list_for_each_entry(request, &ring->request_list, list) {
2381 if (i915_seqno_passed(completed_seqno, request->seqno))
2382 continue;
2378 2383
2379 acthd = intel_ring_get_active_head(ring); 2384 i915_set_reset_status(ring, request, acthd);
2380 completed_seqno = ring->get_seqno(ring, false); 2385 }
2386}
2381 2387
2388static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
2389 struct intel_ring_buffer *ring)
2390{
2382 while (!list_empty(&ring->request_list)) { 2391 while (!list_empty(&ring->request_list)) {
2383 struct drm_i915_gem_request *request; 2392 struct drm_i915_gem_request *request;
2384 2393
@@ -2386,9 +2395,6 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
2386 struct drm_i915_gem_request, 2395 struct drm_i915_gem_request,
2387 list); 2396 list);
2388 2397
2389 if (request->seqno > completed_seqno)
2390 i915_set_reset_status(ring, request, acthd);
2391
2392 i915_gem_free_request(request); 2398 i915_gem_free_request(request);
2393 } 2399 }
2394 2400
@@ -2430,8 +2436,16 @@ void i915_gem_reset(struct drm_device *dev)
2430 struct intel_ring_buffer *ring; 2436 struct intel_ring_buffer *ring;
2431 int i; 2437 int i;
2432 2438
2439 /*
2440 * Before we free the objects from the requests, we need to inspect
2441 * them for finding the guilty party. As the requests only borrow
2442 * their reference to the objects, the inspection must be done first.
2443 */
2444 for_each_ring(ring, dev_priv, i)
2445 i915_gem_reset_ring_status(dev_priv, ring);
2446
2433 for_each_ring(ring, dev_priv, i) 2447 for_each_ring(ring, dev_priv, i)
2434 i915_gem_reset_ring_lists(dev_priv, ring); 2448 i915_gem_reset_ring_cleanup(dev_priv, ring);
2435 2449
2436 i915_gem_cleanup_ringbuffer(dev); 2450 i915_gem_cleanup_ringbuffer(dev);
2437 2451
@@ -4477,10 +4491,9 @@ i915_gem_init_hw(struct drm_device *dev)
4477 if (dev_priv->ellc_size) 4491 if (dev_priv->ellc_size)
4478 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf)); 4492 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4479 4493
4480 if (IS_HSW_GT3(dev)) 4494 if (IS_HASWELL(dev))
4481 I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_ENABLED); 4495 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
4482 else 4496 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
4483 I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_DISABLED);
4484 4497
4485 if (HAS_PCH_NOP(dev)) { 4498 if (HAS_PCH_NOP(dev)) {
4486 u32 temp = I915_READ(GEN7_MSG_CTL); 4499 u32 temp = I915_READ(GEN7_MSG_CTL);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 41877045a1a0..e08acaba5402 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -345,10 +345,8 @@ void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
345{ 345{
346 struct drm_i915_file_private *file_priv = file->driver_priv; 346 struct drm_i915_file_private *file_priv = file->driver_priv;
347 347
348 mutex_lock(&dev->struct_mutex);
349 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL); 348 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
350 idr_destroy(&file_priv->context_idr); 349 idr_destroy(&file_priv->context_idr);
351 mutex_unlock(&dev->struct_mutex);
352} 350}
353 351
354static struct i915_hw_context * 352static struct i915_hw_context *
@@ -421,11 +419,21 @@ static int do_switch(struct i915_hw_context *to)
421 if (ret) 419 if (ret)
422 return ret; 420 return ret;
423 421
424 /* Clear this page out of any CPU caches for coherent swap-in/out. Note 422 /*
423 * Pin can switch back to the default context if we end up calling into
424 * evict_everything - as a last ditch gtt defrag effort that also
425 * switches to the default context. Hence we need to reload from here.
426 */
427 from = ring->last_context;
428
429 /*
430 * Clear this page out of any CPU caches for coherent swap-in/out. Note
425 * that thanks to write = false in this call and us not setting any gpu 431 * that thanks to write = false in this call and us not setting any gpu
426 * write domains when putting a context object onto the active list 432 * write domains when putting a context object onto the active list
427 * (when switching away from it), this won't block. 433 * (when switching away from it), this won't block.
428 * XXX: We need a real interface to do this instead of trickery. */ 434 *
435 * XXX: We need a real interface to do this instead of trickery.
436 */
429 ret = i915_gem_object_set_to_gtt_domain(to->obj, false); 437 ret = i915_gem_object_set_to_gtt_domain(to->obj, false);
430 if (ret) { 438 if (ret) {
431 i915_gem_object_unpin(to->obj); 439 i915_gem_object_unpin(to->obj);
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 7d5752fda5f1..9bb533e0d762 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -125,13 +125,15 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
125 125
126 ret = i915_gem_object_get_pages(obj); 126 ret = i915_gem_object_get_pages(obj);
127 if (ret) 127 if (ret)
128 goto error; 128 goto err;
129
130 i915_gem_object_pin_pages(obj);
129 131
130 ret = -ENOMEM; 132 ret = -ENOMEM;
131 133
132 pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages)); 134 pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
133 if (pages == NULL) 135 if (pages == NULL)
134 goto error; 136 goto err_unpin;
135 137
136 i = 0; 138 i = 0;
137 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) 139 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
@@ -141,15 +143,16 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
141 drm_free_large(pages); 143 drm_free_large(pages);
142 144
143 if (!obj->dma_buf_vmapping) 145 if (!obj->dma_buf_vmapping)
144 goto error; 146 goto err_unpin;
145 147
146 obj->vmapping_count = 1; 148 obj->vmapping_count = 1;
147 i915_gem_object_pin_pages(obj);
148out_unlock: 149out_unlock:
149 mutex_unlock(&dev->struct_mutex); 150 mutex_unlock(&dev->struct_mutex);
150 return obj->dma_buf_vmapping; 151 return obj->dma_buf_vmapping;
151 152
152error: 153err_unpin:
154 i915_gem_object_unpin_pages(obj);
155err:
153 mutex_unlock(&dev->struct_mutex); 156 mutex_unlock(&dev->struct_mutex);
154 return ERR_PTR(ret); 157 return ERR_PTR(ret);
155} 158}
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index b7376533633d..8f3adc7d0dc8 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -88,6 +88,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
88 } else 88 } else
89 drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level); 89 drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
90 90
91search_again:
91 /* First see if there is a large enough contiguous idle region... */ 92 /* First see if there is a large enough contiguous idle region... */
92 list_for_each_entry(vma, &vm->inactive_list, mm_list) { 93 list_for_each_entry(vma, &vm->inactive_list, mm_list) {
93 if (mark_free(vma, &unwind_list)) 94 if (mark_free(vma, &unwind_list))
@@ -115,10 +116,17 @@ none:
115 list_del_init(&vma->exec_list); 116 list_del_init(&vma->exec_list);
116 } 117 }
117 118
118 /* We expect the caller to unpin, evict all and try again, or give up. 119 /* Can we unpin some objects such as idle hw contents,
119 * So calling i915_gem_evict_vm() is unnecessary. 120 * or pending flips?
120 */ 121 */
121 return -ENOSPC; 122 ret = nonblocking ? -ENOSPC : i915_gpu_idle(dev);
123 if (ret)
124 return ret;
125
126 /* Only idle the GPU and repeat the search once */
127 i915_gem_retire_requests(dev);
128 nonblocking = true;
129 goto search_again;
122 130
123found: 131found:
124 /* drm_mm doesn't allow any other other operations while 132 /* drm_mm doesn't allow any other other operations while
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index bceddf5a04bc..8d795626a25e 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -33,6 +33,9 @@
33#include "intel_drv.h" 33#include "intel_drv.h"
34#include <linux/dma_remapping.h> 34#include <linux/dma_remapping.h>
35 35
36#define __EXEC_OBJECT_HAS_PIN (1<<31)
37#define __EXEC_OBJECT_HAS_FENCE (1<<30)
38
36struct eb_vmas { 39struct eb_vmas {
37 struct list_head vmas; 40 struct list_head vmas;
38 int and; 41 int and;
@@ -90,7 +93,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
90{ 93{
91 struct drm_i915_gem_object *obj; 94 struct drm_i915_gem_object *obj;
92 struct list_head objects; 95 struct list_head objects;
93 int i, ret = 0; 96 int i, ret;
94 97
95 INIT_LIST_HEAD(&objects); 98 INIT_LIST_HEAD(&objects);
96 spin_lock(&file->table_lock); 99 spin_lock(&file->table_lock);
@@ -103,7 +106,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
103 DRM_DEBUG("Invalid object handle %d at index %d\n", 106 DRM_DEBUG("Invalid object handle %d at index %d\n",
104 exec[i].handle, i); 107 exec[i].handle, i);
105 ret = -ENOENT; 108 ret = -ENOENT;
106 goto out; 109 goto err;
107 } 110 }
108 111
109 if (!list_empty(&obj->obj_exec_link)) { 112 if (!list_empty(&obj->obj_exec_link)) {
@@ -111,7 +114,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
111 DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n", 114 DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
112 obj, exec[i].handle, i); 115 obj, exec[i].handle, i);
113 ret = -EINVAL; 116 ret = -EINVAL;
114 goto out; 117 goto err;
115 } 118 }
116 119
117 drm_gem_object_reference(&obj->base); 120 drm_gem_object_reference(&obj->base);
@@ -120,9 +123,13 @@ eb_lookup_vmas(struct eb_vmas *eb,
120 spin_unlock(&file->table_lock); 123 spin_unlock(&file->table_lock);
121 124
122 i = 0; 125 i = 0;
123 list_for_each_entry(obj, &objects, obj_exec_link) { 126 while (!list_empty(&objects)) {
124 struct i915_vma *vma; 127 struct i915_vma *vma;
125 128
129 obj = list_first_entry(&objects,
130 struct drm_i915_gem_object,
131 obj_exec_link);
132
126 /* 133 /*
127 * NOTE: We can leak any vmas created here when something fails 134 * NOTE: We can leak any vmas created here when something fails
128 * later on. But that's no issue since vma_unbind can deal with 135 * later on. But that's no issue since vma_unbind can deal with
@@ -135,10 +142,12 @@ eb_lookup_vmas(struct eb_vmas *eb,
135 if (IS_ERR(vma)) { 142 if (IS_ERR(vma)) {
136 DRM_DEBUG("Failed to lookup VMA\n"); 143 DRM_DEBUG("Failed to lookup VMA\n");
137 ret = PTR_ERR(vma); 144 ret = PTR_ERR(vma);
138 goto out; 145 goto err;
139 } 146 }
140 147
148 /* Transfer ownership from the objects list to the vmas list. */
141 list_add_tail(&vma->exec_list, &eb->vmas); 149 list_add_tail(&vma->exec_list, &eb->vmas);
150 list_del_init(&obj->obj_exec_link);
142 151
143 vma->exec_entry = &exec[i]; 152 vma->exec_entry = &exec[i];
144 if (eb->and < 0) { 153 if (eb->and < 0) {
@@ -152,16 +161,22 @@ eb_lookup_vmas(struct eb_vmas *eb,
152 ++i; 161 ++i;
153 } 162 }
154 163
164 return 0;
155 165
156out: 166
167err:
157 while (!list_empty(&objects)) { 168 while (!list_empty(&objects)) {
158 obj = list_first_entry(&objects, 169 obj = list_first_entry(&objects,
159 struct drm_i915_gem_object, 170 struct drm_i915_gem_object,
160 obj_exec_link); 171 obj_exec_link);
161 list_del_init(&obj->obj_exec_link); 172 list_del_init(&obj->obj_exec_link);
162 if (ret) 173 drm_gem_object_unreference(&obj->base);
163 drm_gem_object_unreference(&obj->base);
164 } 174 }
175 /*
176 * Objects already transfered to the vmas list will be unreferenced by
177 * eb_destroy.
178 */
179
165 return ret; 180 return ret;
166} 181}
167 182
@@ -187,7 +202,28 @@ static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
187 } 202 }
188} 203}
189 204
190static void eb_destroy(struct eb_vmas *eb) { 205static void
206i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
207{
208 struct drm_i915_gem_exec_object2 *entry;
209 struct drm_i915_gem_object *obj = vma->obj;
210
211 if (!drm_mm_node_allocated(&vma->node))
212 return;
213
214 entry = vma->exec_entry;
215
216 if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
217 i915_gem_object_unpin_fence(obj);
218
219 if (entry->flags & __EXEC_OBJECT_HAS_PIN)
220 i915_gem_object_unpin(obj);
221
222 entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
223}
224
225static void eb_destroy(struct eb_vmas *eb)
226{
191 while (!list_empty(&eb->vmas)) { 227 while (!list_empty(&eb->vmas)) {
192 struct i915_vma *vma; 228 struct i915_vma *vma;
193 229
@@ -195,6 +231,7 @@ static void eb_destroy(struct eb_vmas *eb) {
195 struct i915_vma, 231 struct i915_vma,
196 exec_list); 232 exec_list);
197 list_del_init(&vma->exec_list); 233 list_del_init(&vma->exec_list);
234 i915_gem_execbuffer_unreserve_vma(vma);
198 drm_gem_object_unreference(&vma->obj->base); 235 drm_gem_object_unreference(&vma->obj->base);
199 } 236 }
200 kfree(eb); 237 kfree(eb);
@@ -477,9 +514,6 @@ i915_gem_execbuffer_relocate(struct eb_vmas *eb)
477 return ret; 514 return ret;
478} 515}
479 516
480#define __EXEC_OBJECT_HAS_PIN (1<<31)
481#define __EXEC_OBJECT_HAS_FENCE (1<<30)
482
483static int 517static int
484need_reloc_mappable(struct i915_vma *vma) 518need_reloc_mappable(struct i915_vma *vma)
485{ 519{
@@ -551,26 +585,6 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
551 return 0; 585 return 0;
552} 586}
553 587
554static void
555i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
556{
557 struct drm_i915_gem_exec_object2 *entry;
558 struct drm_i915_gem_object *obj = vma->obj;
559
560 if (!drm_mm_node_allocated(&vma->node))
561 return;
562
563 entry = vma->exec_entry;
564
565 if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
566 i915_gem_object_unpin_fence(obj);
567
568 if (entry->flags & __EXEC_OBJECT_HAS_PIN)
569 i915_gem_object_unpin(obj);
570
571 entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
572}
573
574static int 588static int
575i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, 589i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
576 struct list_head *vmas, 590 struct list_head *vmas,
@@ -669,13 +683,14 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
669 goto err; 683 goto err;
670 } 684 }
671 685
672err: /* Decrement pin count for bound objects */ 686err:
673 list_for_each_entry(vma, vmas, exec_list)
674 i915_gem_execbuffer_unreserve_vma(vma);
675
676 if (ret != -ENOSPC || retry++) 687 if (ret != -ENOSPC || retry++)
677 return ret; 688 return ret;
678 689
690 /* Decrement pin count for bound objects */
691 list_for_each_entry(vma, vmas, exec_list)
692 i915_gem_execbuffer_unreserve_vma(vma);
693
679 ret = i915_gem_evict_vm(vm, true); 694 ret = i915_gem_evict_vm(vm, true);
680 if (ret) 695 if (ret)
681 return ret; 696 return ret;
@@ -707,6 +722,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
707 while (!list_empty(&eb->vmas)) { 722 while (!list_empty(&eb->vmas)) {
708 vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list); 723 vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
709 list_del_init(&vma->exec_list); 724 list_del_init(&vma->exec_list);
725 i915_gem_execbuffer_unreserve_vma(vma);
710 drm_gem_object_unreference(&vma->obj->base); 726 drm_gem_object_unreference(&vma->obj->base);
711 } 727 }
712 728
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 8c7ebfa3bd56..6c3a6e60aeac 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -57,7 +57,9 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
57#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2) 57#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2)
58#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3) 58#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3)
59#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb) 59#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
60#define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8)
60#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6) 61#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
62#define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7)
61 63
62#define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t)) 64#define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t))
63#define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t)) 65#define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t))
@@ -185,10 +187,10 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
185 case I915_CACHE_NONE: 187 case I915_CACHE_NONE:
186 break; 188 break;
187 case I915_CACHE_WT: 189 case I915_CACHE_WT:
188 pte |= HSW_WT_ELLC_LLC_AGE0; 190 pte |= HSW_WT_ELLC_LLC_AGE3;
189 break; 191 break;
190 default: 192 default:
191 pte |= HSW_WB_ELLC_LLC_AGE0; 193 pte |= HSW_WB_ELLC_LLC_AGE3;
192 break; 194 break;
193 } 195 }
194 196
@@ -918,14 +920,12 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
918 WARN_ON(readq(&gtt_entries[i-1]) 920 WARN_ON(readq(&gtt_entries[i-1])
919 != gen8_pte_encode(addr, level, true)); 921 != gen8_pte_encode(addr, level, true));
920 922
921#if 0 /* TODO: Still needed on GEN8? */
922 /* This next bit makes the above posting read even more important. We 923 /* This next bit makes the above posting read even more important. We
923 * want to flush the TLBs only after we're certain all the PTE updates 924 * want to flush the TLBs only after we're certain all the PTE updates
924 * have finished. 925 * have finished.
925 */ 926 */
926 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); 927 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
927 POSTING_READ(GFX_FLSH_CNTL_GEN6); 928 POSTING_READ(GFX_FLSH_CNTL_GEN6);
928#endif
929} 929}
930 930
931/* 931/*
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index d64da4fe36e5..6d11e253218a 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2715,6 +2715,8 @@ static void gen8_irq_preinstall(struct drm_device *dev)
2715#undef GEN8_IRQ_INIT_NDX 2715#undef GEN8_IRQ_INIT_NDX
2716 2716
2717 POSTING_READ(GEN8_PCU_IIR); 2717 POSTING_READ(GEN8_PCU_IIR);
2718
2719 ibx_irq_preinstall(dev);
2718} 2720}
2719 2721
2720static void ibx_hpd_irq_setup(struct drm_device *dev) 2722static void ibx_hpd_irq_setup(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index cec06a5453cc..74749c6f897e 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -173,7 +173,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
173 ddi_translations = ddi_translations_dp; 173 ddi_translations = ddi_translations_dp;
174 break; 174 break;
175 case PORT_D: 175 case PORT_D:
176 if (intel_dpd_is_edp(dev)) 176 if (intel_dp_is_edp(dev, PORT_D))
177 ddi_translations = ddi_translations_edp; 177 ddi_translations = ddi_translations_edp;
178 else 178 else
179 ddi_translations = ddi_translations_dp; 179 ddi_translations = ddi_translations_dp;
@@ -1136,12 +1136,18 @@ void intel_ddi_setup_hw_pll_state(struct drm_device *dev)
1136 enum pipe pipe; 1136 enum pipe pipe;
1137 struct intel_crtc *intel_crtc; 1137 struct intel_crtc *intel_crtc;
1138 1138
1139 dev_priv->ddi_plls.spll_refcount = 0;
1140 dev_priv->ddi_plls.wrpll1_refcount = 0;
1141 dev_priv->ddi_plls.wrpll2_refcount = 0;
1142
1139 for_each_pipe(pipe) { 1143 for_each_pipe(pipe) {
1140 intel_crtc = 1144 intel_crtc =
1141 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 1145 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
1142 1146
1143 if (!intel_crtc->active) 1147 if (!intel_crtc->active) {
1148 intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE;
1144 continue; 1149 continue;
1150 }
1145 1151
1146 intel_crtc->ddi_pll_sel = intel_ddi_get_crtc_pll(dev_priv, 1152 intel_crtc->ddi_pll_sel = intel_ddi_get_crtc_pll(dev_priv,
1147 pipe); 1153 pipe);
@@ -1235,9 +1241,10 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
1235 if (wait) 1241 if (wait)
1236 intel_wait_ddi_buf_idle(dev_priv, port); 1242 intel_wait_ddi_buf_idle(dev_priv, port);
1237 1243
1238 if (type == INTEL_OUTPUT_EDP) { 1244 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
1239 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1245 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1240 1246
1247 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
1241 ironlake_edp_panel_off(intel_dp); 1248 ironlake_edp_panel_off(intel_dp);
1242 } 1249 }
1243 1250
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 9db009c55c88..e77d4b8856a7 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -6027,7 +6027,7 @@ static void intel_set_pipe_csc(struct drm_crtc *crtc)
6027 uint16_t postoff = 0; 6027 uint16_t postoff = 0;
6028 6028
6029 if (intel_crtc->config.limited_color_range) 6029 if (intel_crtc->config.limited_color_range)
6030 postoff = (16 * (1 << 13) / 255) & 0x1fff; 6030 postoff = (16 * (1 << 12) / 255) & 0x1fff;
6031 6031
6032 I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff); 6032 I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
6033 I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff); 6033 I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
@@ -6614,7 +6614,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
6614 6614
6615 /* Make sure we're not on PC8 state before disabling PC8, otherwise 6615 /* Make sure we're not on PC8 state before disabling PC8, otherwise
6616 * we'll hang the machine! */ 6616 * we'll hang the machine! */
6617 dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL); 6617 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
6618 6618
6619 if (val & LCPLL_POWER_DOWN_ALLOW) { 6619 if (val & LCPLL_POWER_DOWN_ALLOW) {
6620 val &= ~LCPLL_POWER_DOWN_ALLOW; 6620 val &= ~LCPLL_POWER_DOWN_ALLOW;
@@ -6648,7 +6648,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
6648 DRM_ERROR("Switching back to LCPLL failed\n"); 6648 DRM_ERROR("Switching back to LCPLL failed\n");
6649 } 6649 }
6650 6650
6651 dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL); 6651 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
6652} 6652}
6653 6653
6654void hsw_enable_pc8_work(struct work_struct *__work) 6654void hsw_enable_pc8_work(struct work_struct *__work)
@@ -8581,7 +8581,8 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
8581 intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE | 8581 intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
8582 DERRMR_PIPEB_PRI_FLIP_DONE | 8582 DERRMR_PIPEB_PRI_FLIP_DONE |
8583 DERRMR_PIPEC_PRI_FLIP_DONE)); 8583 DERRMR_PIPEC_PRI_FLIP_DONE));
8584 intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1)); 8584 intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) |
8585 MI_SRM_LRM_GLOBAL_GTT);
8585 intel_ring_emit(ring, DERRMR); 8586 intel_ring_emit(ring, DERRMR);
8586 intel_ring_emit(ring, ring->scratch.gtt_offset + 256); 8587 intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
8587 } 8588 }
@@ -9363,7 +9364,7 @@ intel_pipe_config_compare(struct drm_device *dev,
9363 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) 9364 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
9364 PIPE_CONF_CHECK_I(pipe_bpp); 9365 PIPE_CONF_CHECK_I(pipe_bpp);
9365 9366
9366 if (!IS_HASWELL(dev)) { 9367 if (!HAS_DDI(dev)) {
9367 PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock); 9368 PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock);
9368 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); 9369 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
9369 } 9370 }
@@ -10330,7 +10331,7 @@ static void intel_setup_outputs(struct drm_device *dev)
10330 intel_ddi_init(dev, PORT_D); 10331 intel_ddi_init(dev, PORT_D);
10331 } else if (HAS_PCH_SPLIT(dev)) { 10332 } else if (HAS_PCH_SPLIT(dev)) {
10332 int found; 10333 int found;
10333 dpd_is_edp = intel_dpd_is_edp(dev); 10334 dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
10334 10335
10335 if (has_edp_a(dev)) 10336 if (has_edp_a(dev))
10336 intel_dp_init(dev, DP_A, PORT_A); 10337 intel_dp_init(dev, DP_A, PORT_A);
@@ -10367,8 +10368,7 @@ static void intel_setup_outputs(struct drm_device *dev)
10367 intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC, 10368 intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC,
10368 PORT_C); 10369 PORT_C);
10369 if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED) 10370 if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED)
10370 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, 10371 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
10371 PORT_C);
10372 } 10372 }
10373 10373
10374 intel_dsi_init(dev); 10374 intel_dsi_init(dev);
@@ -10816,11 +10816,20 @@ static struct intel_quirk intel_quirks[] = {
10816 /* Sony Vaio Y cannot use SSC on LVDS */ 10816 /* Sony Vaio Y cannot use SSC on LVDS */
10817 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable }, 10817 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
10818 10818
10819 /* 10819 /* Acer Aspire 5734Z must invert backlight brightness */
10820 * All GM45 Acer (and its brands eMachines and Packard Bell) laptops 10820 { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
10821 * seem to use inverted backlight PWM. 10821
10822 */ 10822 /* Acer/eMachines G725 */
10823 { 0x2a42, 0x1025, PCI_ANY_ID, quirk_invert_brightness }, 10823 { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
10824
10825 /* Acer/eMachines e725 */
10826 { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
10827
10828 /* Acer/Packard Bell NCL20 */
10829 { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
10830
10831 /* Acer Aspire 4736Z */
10832 { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
10824}; 10833};
10825 10834
10826static void intel_init_quirks(struct drm_device *dev) 10835static void intel_init_quirks(struct drm_device *dev)
@@ -11302,8 +11311,6 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
11302 } 11311 }
11303 11312
11304 intel_modeset_check_state(dev); 11313 intel_modeset_check_state(dev);
11305
11306 drm_mode_config_reset(dev);
11307} 11314}
11308 11315
11309void intel_modeset_gem_init(struct drm_device *dev) 11316void intel_modeset_gem_init(struct drm_device *dev)
@@ -11312,7 +11319,10 @@ void intel_modeset_gem_init(struct drm_device *dev)
11312 11319
11313 intel_setup_overlay(dev); 11320 intel_setup_overlay(dev);
11314 11321
11322 mutex_lock(&dev->mode_config.mutex);
11323 drm_mode_config_reset(dev);
11315 intel_modeset_setup_hw_state(dev, false); 11324 intel_modeset_setup_hw_state(dev, false);
11325 mutex_unlock(&dev->mode_config.mutex);
11316} 11326}
11317 11327
11318void intel_modeset_cleanup(struct drm_device *dev) 11328void intel_modeset_cleanup(struct drm_device *dev)
@@ -11390,14 +11400,15 @@ void intel_connector_attach_encoder(struct intel_connector *connector,
11390int intel_modeset_vga_set_state(struct drm_device *dev, bool state) 11400int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
11391{ 11401{
11392 struct drm_i915_private *dev_priv = dev->dev_private; 11402 struct drm_i915_private *dev_priv = dev->dev_private;
11403 unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
11393 u16 gmch_ctrl; 11404 u16 gmch_ctrl;
11394 11405
11395 pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl); 11406 pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl);
11396 if (state) 11407 if (state)
11397 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE; 11408 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
11398 else 11409 else
11399 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE; 11410 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
11400 pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl); 11411 pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl);
11401 return 0; 11412 return 0;
11402} 11413}
11403 11414
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 8f17f8fbd0b1..9b40113f4fa1 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -3324,11 +3324,19 @@ intel_trans_dp_port_sel(struct drm_crtc *crtc)
3324} 3324}
3325 3325
3326/* check the VBT to see whether the eDP is on DP-D port */ 3326/* check the VBT to see whether the eDP is on DP-D port */
3327bool intel_dpd_is_edp(struct drm_device *dev) 3327bool intel_dp_is_edp(struct drm_device *dev, enum port port)
3328{ 3328{
3329 struct drm_i915_private *dev_priv = dev->dev_private; 3329 struct drm_i915_private *dev_priv = dev->dev_private;
3330 union child_device_config *p_child; 3330 union child_device_config *p_child;
3331 int i; 3331 int i;
3332 static const short port_mapping[] = {
3333 [PORT_B] = PORT_IDPB,
3334 [PORT_C] = PORT_IDPC,
3335 [PORT_D] = PORT_IDPD,
3336 };
3337
3338 if (port == PORT_A)
3339 return true;
3332 3340
3333 if (!dev_priv->vbt.child_dev_num) 3341 if (!dev_priv->vbt.child_dev_num)
3334 return false; 3342 return false;
@@ -3336,7 +3344,7 @@ bool intel_dpd_is_edp(struct drm_device *dev)
3336 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { 3344 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
3337 p_child = dev_priv->vbt.child_dev + i; 3345 p_child = dev_priv->vbt.child_dev + i;
3338 3346
3339 if (p_child->common.dvo_port == PORT_IDPD && 3347 if (p_child->common.dvo_port == port_mapping[port] &&
3340 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) == 3348 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
3341 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS)) 3349 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
3342 return true; 3350 return true;
@@ -3614,26 +3622,10 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
3614 intel_dp->DP = I915_READ(intel_dp->output_reg); 3622 intel_dp->DP = I915_READ(intel_dp->output_reg);
3615 intel_dp->attached_connector = intel_connector; 3623 intel_dp->attached_connector = intel_connector;
3616 3624
3617 type = DRM_MODE_CONNECTOR_DisplayPort; 3625 if (intel_dp_is_edp(dev, port))
3618 /*
3619 * FIXME : We need to initialize built-in panels before external panels.
3620 * For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup
3621 */
3622 switch (port) {
3623 case PORT_A:
3624 type = DRM_MODE_CONNECTOR_eDP; 3626 type = DRM_MODE_CONNECTOR_eDP;
3625 break; 3627 else
3626 case PORT_C: 3628 type = DRM_MODE_CONNECTOR_DisplayPort;
3627 if (IS_VALLEYVIEW(dev))
3628 type = DRM_MODE_CONNECTOR_eDP;
3629 break;
3630 case PORT_D:
3631 if (HAS_PCH_SPLIT(dev) && intel_dpd_is_edp(dev))
3632 type = DRM_MODE_CONNECTOR_eDP;
3633 break;
3634 default: /* silence GCC warning */
3635 break;
3636 }
3637 3629
3638 /* 3630 /*
3639 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but 3631 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 4cbf49051b9c..8754db9e3d52 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -722,7 +722,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder);
722void intel_dp_check_link_status(struct intel_dp *intel_dp); 722void intel_dp_check_link_status(struct intel_dp *intel_dp);
723bool intel_dp_compute_config(struct intel_encoder *encoder, 723bool intel_dp_compute_config(struct intel_encoder *encoder,
724 struct intel_crtc_config *pipe_config); 724 struct intel_crtc_config *pipe_config);
725bool intel_dpd_is_edp(struct drm_device *dev); 725bool intel_dp_is_edp(struct drm_device *dev, enum port port);
726void ironlake_edp_backlight_on(struct intel_dp *intel_dp); 726void ironlake_edp_backlight_on(struct intel_dp *intel_dp);
727void ironlake_edp_backlight_off(struct intel_dp *intel_dp); 727void ironlake_edp_backlight_off(struct intel_dp *intel_dp);
728void ironlake_edp_panel_on(struct intel_dp *intel_dp); 728void ironlake_edp_panel_on(struct intel_dp *intel_dp);
@@ -839,6 +839,7 @@ void intel_update_sprite_watermarks(struct drm_plane *plane,
839 uint32_t sprite_width, int pixel_size, 839 uint32_t sprite_width, int pixel_size,
840 bool enabled, bool scaled); 840 bool enabled, bool scaled);
841void intel_init_pm(struct drm_device *dev); 841void intel_init_pm(struct drm_device *dev);
842void intel_pm_setup(struct drm_device *dev);
842bool intel_fbc_enabled(struct drm_device *dev); 843bool intel_fbc_enabled(struct drm_device *dev);
843void intel_update_fbc(struct drm_device *dev); 844void intel_update_fbc(struct drm_device *dev);
844void intel_gpu_ips_init(struct drm_i915_private *dev_priv); 845void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 9998185fdb22..d77cc81900f9 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -1113,7 +1113,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
1113 1113
1114 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode; 1114 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1115 clock = adjusted_mode->crtc_clock; 1115 clock = adjusted_mode->crtc_clock;
1116 htotal = adjusted_mode->htotal; 1116 htotal = adjusted_mode->crtc_htotal;
1117 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; 1117 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1118 pixel_size = crtc->fb->bits_per_pixel / 8; 1118 pixel_size = crtc->fb->bits_per_pixel / 8;
1119 1119
@@ -1200,7 +1200,7 @@ static bool g4x_compute_srwm(struct drm_device *dev,
1200 crtc = intel_get_crtc_for_plane(dev, plane); 1200 crtc = intel_get_crtc_for_plane(dev, plane);
1201 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode; 1201 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1202 clock = adjusted_mode->crtc_clock; 1202 clock = adjusted_mode->crtc_clock;
1203 htotal = adjusted_mode->htotal; 1203 htotal = adjusted_mode->crtc_htotal;
1204 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; 1204 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1205 pixel_size = crtc->fb->bits_per_pixel / 8; 1205 pixel_size = crtc->fb->bits_per_pixel / 8;
1206 1206
@@ -1431,7 +1431,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
1431 const struct drm_display_mode *adjusted_mode = 1431 const struct drm_display_mode *adjusted_mode =
1432 &to_intel_crtc(crtc)->config.adjusted_mode; 1432 &to_intel_crtc(crtc)->config.adjusted_mode;
1433 int clock = adjusted_mode->crtc_clock; 1433 int clock = adjusted_mode->crtc_clock;
1434 int htotal = adjusted_mode->htotal; 1434 int htotal = adjusted_mode->crtc_htotal;
1435 int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; 1435 int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1436 int pixel_size = crtc->fb->bits_per_pixel / 8; 1436 int pixel_size = crtc->fb->bits_per_pixel / 8;
1437 unsigned long line_time_us; 1437 unsigned long line_time_us;
@@ -1557,7 +1557,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1557 const struct drm_display_mode *adjusted_mode = 1557 const struct drm_display_mode *adjusted_mode =
1558 &to_intel_crtc(enabled)->config.adjusted_mode; 1558 &to_intel_crtc(enabled)->config.adjusted_mode;
1559 int clock = adjusted_mode->crtc_clock; 1559 int clock = adjusted_mode->crtc_clock;
1560 int htotal = adjusted_mode->htotal; 1560 int htotal = adjusted_mode->crtc_htotal;
1561 int hdisplay = to_intel_crtc(enabled)->config.pipe_src_w; 1561 int hdisplay = to_intel_crtc(enabled)->config.pipe_src_w;
1562 int pixel_size = enabled->fb->bits_per_pixel / 8; 1562 int pixel_size = enabled->fb->bits_per_pixel / 8;
1563 unsigned long line_time_us; 1563 unsigned long line_time_us;
@@ -1985,8 +1985,9 @@ hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
1985 /* The WM are computed with base on how long it takes to fill a single 1985 /* The WM are computed with base on how long it takes to fill a single
1986 * row at the given clock rate, multiplied by 8. 1986 * row at the given clock rate, multiplied by 8.
1987 * */ 1987 * */
1988 linetime = DIV_ROUND_CLOSEST(mode->htotal * 1000 * 8, mode->clock); 1988 linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
1989 ips_linetime = DIV_ROUND_CLOSEST(mode->htotal * 1000 * 8, 1989 mode->crtc_clock);
1990 ips_linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
1990 intel_ddi_get_cdclk_freq(dev_priv)); 1991 intel_ddi_get_cdclk_freq(dev_priv));
1991 1992
1992 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) | 1993 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
@@ -5722,10 +5723,19 @@ int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val)
5722 return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6; 5723 return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6;
5723} 5724}
5724 5725
5725void intel_pm_init(struct drm_device *dev) 5726void intel_pm_setup(struct drm_device *dev)
5726{ 5727{
5727 struct drm_i915_private *dev_priv = dev->dev_private; 5728 struct drm_i915_private *dev_priv = dev->dev_private;
5728 5729
5730 mutex_init(&dev_priv->rps.hw_lock);
5731
5732 mutex_init(&dev_priv->pc8.lock);
5733 dev_priv->pc8.requirements_met = false;
5734 dev_priv->pc8.gpu_idle = false;
5735 dev_priv->pc8.irqs_disabled = false;
5736 dev_priv->pc8.enabled = false;
5737 dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
5738 INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
5729 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work, 5739 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
5730 intel_gen6_powersave_work); 5740 intel_gen6_powersave_work);
5731} 5741}