aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c20
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c3
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h9
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c7
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c16
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c13
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c14
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c60
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c15
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c5
-rw-r--r--drivers/gpu/drm/i915/intel_display.c21
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c34
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h3
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c26
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c44
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c1
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c1
18 files changed, 187 insertions, 106 deletions
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 0cab2d045135..5c648425c1e0 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -83,6 +83,14 @@ void i915_update_dri1_breadcrumb(struct drm_device *dev)
83 drm_i915_private_t *dev_priv = dev->dev_private; 83 drm_i915_private_t *dev_priv = dev->dev_private;
84 struct drm_i915_master_private *master_priv; 84 struct drm_i915_master_private *master_priv;
85 85
86 /*
87 * The dri breadcrumb update races against the drm master disappearing.
88 * Instead of trying to fix this (this is by far not the only ums issue)
89 * just don't do the update in kms mode.
90 */
91 if (drm_core_check_feature(dev, DRIVER_MODESET))
92 return;
93
86 if (dev->primary->master) { 94 if (dev->primary->master) {
87 master_priv = dev->primary->master->driver_priv; 95 master_priv = dev->primary->master->driver_priv;
88 if (master_priv->sarea_priv) 96 if (master_priv->sarea_priv)
@@ -1490,16 +1498,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1490 spin_lock_init(&dev_priv->uncore.lock); 1498 spin_lock_init(&dev_priv->uncore.lock);
1491 spin_lock_init(&dev_priv->mm.object_stat_lock); 1499 spin_lock_init(&dev_priv->mm.object_stat_lock);
1492 mutex_init(&dev_priv->dpio_lock); 1500 mutex_init(&dev_priv->dpio_lock);
1493 mutex_init(&dev_priv->rps.hw_lock);
1494 mutex_init(&dev_priv->modeset_restore_lock); 1501 mutex_init(&dev_priv->modeset_restore_lock);
1495 1502
1496 mutex_init(&dev_priv->pc8.lock); 1503 intel_pm_setup(dev);
1497 dev_priv->pc8.requirements_met = false;
1498 dev_priv->pc8.gpu_idle = false;
1499 dev_priv->pc8.irqs_disabled = false;
1500 dev_priv->pc8.enabled = false;
1501 dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
1502 INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
1503 1504
1504 intel_display_crc_init(dev); 1505 intel_display_crc_init(dev);
1505 1506
@@ -1603,7 +1604,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1603 } 1604 }
1604 1605
1605 intel_irq_init(dev); 1606 intel_irq_init(dev);
1606 intel_pm_init(dev);
1607 intel_uncore_sanitize(dev); 1607 intel_uncore_sanitize(dev);
1608 1608
1609 /* Try to make sure MCHBAR is enabled before poking at it */ 1609 /* Try to make sure MCHBAR is enabled before poking at it */
@@ -1848,8 +1848,10 @@ void i915_driver_lastclose(struct drm_device * dev)
1848 1848
1849void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) 1849void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
1850{ 1850{
1851 mutex_lock(&dev->struct_mutex);
1851 i915_gem_context_close(dev, file_priv); 1852 i915_gem_context_close(dev, file_priv);
1852 i915_gem_release(dev, file_priv); 1853 i915_gem_release(dev, file_priv);
1854 mutex_unlock(&dev->struct_mutex);
1853} 1855}
1854 1856
1855void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) 1857void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 989be12cdd6e..5b7b7e06cb3a 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -534,8 +534,10 @@ static int i915_drm_freeze(struct drm_device *dev)
534 * Disable CRTCs directly since we want to preserve sw state 534 * Disable CRTCs directly since we want to preserve sw state
535 * for _thaw. 535 * for _thaw.
536 */ 536 */
537 mutex_lock(&dev->mode_config.mutex);
537 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 538 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
538 dev_priv->display.crtc_disable(crtc); 539 dev_priv->display.crtc_disable(crtc);
540 mutex_unlock(&dev->mode_config.mutex);
539 541
540 intel_modeset_suspend_hw(dev); 542 intel_modeset_suspend_hw(dev);
541 } 543 }
@@ -649,6 +651,7 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
649 intel_modeset_init_hw(dev); 651 intel_modeset_init_hw(dev);
650 652
651 drm_modeset_lock_all(dev); 653 drm_modeset_lock_all(dev);
654 drm_mode_config_reset(dev);
652 intel_modeset_setup_hw_state(dev, true); 655 intel_modeset_setup_hw_state(dev, true);
653 drm_modeset_unlock_all(dev); 656 drm_modeset_unlock_all(dev);
654 657
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index ccdbecca070d..90fcccba17b0 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1755,8 +1755,13 @@ struct drm_i915_file_private {
1755#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) 1755#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
1756#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ 1756#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
1757 ((dev)->pdev->device & 0xFF00) == 0x0C00) 1757 ((dev)->pdev->device & 0xFF00) == 0x0C00)
1758#define IS_ULT(dev) (IS_HASWELL(dev) && \ 1758#define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \
1759 (((dev)->pdev->device & 0xf) == 0x2 || \
1760 ((dev)->pdev->device & 0xf) == 0x6 || \
1761 ((dev)->pdev->device & 0xf) == 0xe))
1762#define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \
1759 ((dev)->pdev->device & 0xFF00) == 0x0A00) 1763 ((dev)->pdev->device & 0xFF00) == 0x0A00)
1764#define IS_ULT(dev) (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
1760#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \ 1765#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \
1761 ((dev)->pdev->device & 0x00F0) == 0x0020) 1766 ((dev)->pdev->device & 0x00F0) == 0x0020)
1762#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary) 1767#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
@@ -1901,9 +1906,7 @@ void i915_queue_hangcheck(struct drm_device *dev);
1901void i915_handle_error(struct drm_device *dev, bool wedged); 1906void i915_handle_error(struct drm_device *dev, bool wedged);
1902 1907
1903extern void intel_irq_init(struct drm_device *dev); 1908extern void intel_irq_init(struct drm_device *dev);
1904extern void intel_pm_init(struct drm_device *dev);
1905extern void intel_hpd_init(struct drm_device *dev); 1909extern void intel_hpd_init(struct drm_device *dev);
1906extern void intel_pm_init(struct drm_device *dev);
1907 1910
1908extern void intel_uncore_sanitize(struct drm_device *dev); 1911extern void intel_uncore_sanitize(struct drm_device *dev);
1909extern void intel_uncore_early_sanitize(struct drm_device *dev); 1912extern void intel_uncore_early_sanitize(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 12bbd5eac70d..621c7c67a643 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -4442,10 +4442,9 @@ i915_gem_init_hw(struct drm_device *dev)
4442 if (dev_priv->ellc_size) 4442 if (dev_priv->ellc_size)
4443 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf)); 4443 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4444 4444
4445 if (IS_HSW_GT3(dev)) 4445 if (IS_HASWELL(dev))
4446 I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_ENABLED); 4446 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
4447 else 4447 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
4448 I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_DISABLED);
4449 4448
4450 if (HAS_PCH_NOP(dev)) { 4449 if (HAS_PCH_NOP(dev)) {
4451 u32 temp = I915_READ(GEN7_MSG_CTL); 4450 u32 temp = I915_READ(GEN7_MSG_CTL);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 72a3df32292f..b0f42b9ca037 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -347,10 +347,8 @@ void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
347{ 347{
348 struct drm_i915_file_private *file_priv = file->driver_priv; 348 struct drm_i915_file_private *file_priv = file->driver_priv;
349 349
350 mutex_lock(&dev->struct_mutex);
351 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL); 350 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
352 idr_destroy(&file_priv->context_idr); 351 idr_destroy(&file_priv->context_idr);
353 mutex_unlock(&dev->struct_mutex);
354} 352}
355 353
356static struct i915_hw_context * 354static struct i915_hw_context *
@@ -423,11 +421,21 @@ static int do_switch(struct i915_hw_context *to)
423 if (ret) 421 if (ret)
424 return ret; 422 return ret;
425 423
426 /* Clear this page out of any CPU caches for coherent swap-in/out. Note 424 /*
425 * Pin can switch back to the default context if we end up calling into
426 * evict_everything - as a last ditch gtt defrag effort that also
427 * switches to the default context. Hence we need to reload from here.
428 */
429 from = ring->last_context;
430
431 /*
432 * Clear this page out of any CPU caches for coherent swap-in/out. Note
427 * that thanks to write = false in this call and us not setting any gpu 433 * that thanks to write = false in this call and us not setting any gpu
428 * write domains when putting a context object onto the active list 434 * write domains when putting a context object onto the active list
429 * (when switching away from it), this won't block. 435 * (when switching away from it), this won't block.
430 * XXX: We need a real interface to do this instead of trickery. */ 436 *
437 * XXX: We need a real interface to do this instead of trickery.
438 */
431 ret = i915_gem_object_set_to_gtt_domain(to->obj, false); 439 ret = i915_gem_object_set_to_gtt_domain(to->obj, false);
432 if (ret) { 440 if (ret) {
433 i915_gem_object_unpin(to->obj); 441 i915_gem_object_unpin(to->obj);
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 7d5752fda5f1..9bb533e0d762 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -125,13 +125,15 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
125 125
126 ret = i915_gem_object_get_pages(obj); 126 ret = i915_gem_object_get_pages(obj);
127 if (ret) 127 if (ret)
128 goto error; 128 goto err;
129
130 i915_gem_object_pin_pages(obj);
129 131
130 ret = -ENOMEM; 132 ret = -ENOMEM;
131 133
132 pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages)); 134 pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
133 if (pages == NULL) 135 if (pages == NULL)
134 goto error; 136 goto err_unpin;
135 137
136 i = 0; 138 i = 0;
137 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) 139 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
@@ -141,15 +143,16 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
141 drm_free_large(pages); 143 drm_free_large(pages);
142 144
143 if (!obj->dma_buf_vmapping) 145 if (!obj->dma_buf_vmapping)
144 goto error; 146 goto err_unpin;
145 147
146 obj->vmapping_count = 1; 148 obj->vmapping_count = 1;
147 i915_gem_object_pin_pages(obj);
148out_unlock: 149out_unlock:
149 mutex_unlock(&dev->struct_mutex); 150 mutex_unlock(&dev->struct_mutex);
150 return obj->dma_buf_vmapping; 151 return obj->dma_buf_vmapping;
151 152
152error: 153err_unpin:
154 i915_gem_object_unpin_pages(obj);
155err:
153 mutex_unlock(&dev->struct_mutex); 156 mutex_unlock(&dev->struct_mutex);
154 return ERR_PTR(ret); 157 return ERR_PTR(ret);
155} 158}
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index b7376533633d..8f3adc7d0dc8 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -88,6 +88,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
88 } else 88 } else
89 drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level); 89 drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
90 90
91search_again:
91 /* First see if there is a large enough contiguous idle region... */ 92 /* First see if there is a large enough contiguous idle region... */
92 list_for_each_entry(vma, &vm->inactive_list, mm_list) { 93 list_for_each_entry(vma, &vm->inactive_list, mm_list) {
93 if (mark_free(vma, &unwind_list)) 94 if (mark_free(vma, &unwind_list))
@@ -115,10 +116,17 @@ none:
115 list_del_init(&vma->exec_list); 116 list_del_init(&vma->exec_list);
116 } 117 }
117 118
118 /* We expect the caller to unpin, evict all and try again, or give up. 119 /* Can we unpin some objects such as idle hw contents,
119 * So calling i915_gem_evict_vm() is unnecessary. 120 * or pending flips?
120 */ 121 */
121 return -ENOSPC; 122 ret = nonblocking ? -ENOSPC : i915_gpu_idle(dev);
123 if (ret)
124 return ret;
125
126 /* Only idle the GPU and repeat the search once */
127 i915_gem_retire_requests(dev);
128 nonblocking = true;
129 goto search_again;
122 130
123found: 131found:
124 /* drm_mm doesn't allow any other other operations while 132 /* drm_mm doesn't allow any other other operations while
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 885d595e0e02..b7e787fb4649 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -33,6 +33,9 @@
33#include "intel_drv.h" 33#include "intel_drv.h"
34#include <linux/dma_remapping.h> 34#include <linux/dma_remapping.h>
35 35
36#define __EXEC_OBJECT_HAS_PIN (1<<31)
37#define __EXEC_OBJECT_HAS_FENCE (1<<30)
38
36struct eb_vmas { 39struct eb_vmas {
37 struct list_head vmas; 40 struct list_head vmas;
38 int and; 41 int and;
@@ -187,7 +190,28 @@ static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
187 } 190 }
188} 191}
189 192
190static void eb_destroy(struct eb_vmas *eb) { 193static void
194i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
195{
196 struct drm_i915_gem_exec_object2 *entry;
197 struct drm_i915_gem_object *obj = vma->obj;
198
199 if (!drm_mm_node_allocated(&vma->node))
200 return;
201
202 entry = vma->exec_entry;
203
204 if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
205 i915_gem_object_unpin_fence(obj);
206
207 if (entry->flags & __EXEC_OBJECT_HAS_PIN)
208 i915_gem_object_unpin(obj);
209
210 entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
211}
212
213static void eb_destroy(struct eb_vmas *eb)
214{
191 while (!list_empty(&eb->vmas)) { 215 while (!list_empty(&eb->vmas)) {
192 struct i915_vma *vma; 216 struct i915_vma *vma;
193 217
@@ -195,6 +219,7 @@ static void eb_destroy(struct eb_vmas *eb) {
195 struct i915_vma, 219 struct i915_vma,
196 exec_list); 220 exec_list);
197 list_del_init(&vma->exec_list); 221 list_del_init(&vma->exec_list);
222 i915_gem_execbuffer_unreserve_vma(vma);
198 drm_gem_object_unreference(&vma->obj->base); 223 drm_gem_object_unreference(&vma->obj->base);
199 } 224 }
200 kfree(eb); 225 kfree(eb);
@@ -478,9 +503,6 @@ i915_gem_execbuffer_relocate(struct eb_vmas *eb,
478 return ret; 503 return ret;
479} 504}
480 505
481#define __EXEC_OBJECT_HAS_PIN (1<<31)
482#define __EXEC_OBJECT_HAS_FENCE (1<<30)
483
484static int 506static int
485need_reloc_mappable(struct i915_vma *vma) 507need_reloc_mappable(struct i915_vma *vma)
486{ 508{
@@ -552,26 +574,6 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
552 return 0; 574 return 0;
553} 575}
554 576
555static void
556i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
557{
558 struct drm_i915_gem_exec_object2 *entry;
559 struct drm_i915_gem_object *obj = vma->obj;
560
561 if (!drm_mm_node_allocated(&vma->node))
562 return;
563
564 entry = vma->exec_entry;
565
566 if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
567 i915_gem_object_unpin_fence(obj);
568
569 if (entry->flags & __EXEC_OBJECT_HAS_PIN)
570 i915_gem_object_unpin(obj);
571
572 entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
573}
574
575static int 577static int
576i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, 578i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
577 struct list_head *vmas, 579 struct list_head *vmas,
@@ -670,13 +672,14 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
670 goto err; 672 goto err;
671 } 673 }
672 674
673err: /* Decrement pin count for bound objects */ 675err:
674 list_for_each_entry(vma, vmas, exec_list)
675 i915_gem_execbuffer_unreserve_vma(vma);
676
677 if (ret != -ENOSPC || retry++) 676 if (ret != -ENOSPC || retry++)
678 return ret; 677 return ret;
679 678
679 /* Decrement pin count for bound objects */
680 list_for_each_entry(vma, vmas, exec_list)
681 i915_gem_execbuffer_unreserve_vma(vma);
682
680 ret = i915_gem_evict_vm(vm, true); 683 ret = i915_gem_evict_vm(vm, true);
681 if (ret) 684 if (ret)
682 return ret; 685 return ret;
@@ -708,6 +711,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
708 while (!list_empty(&eb->vmas)) { 711 while (!list_empty(&eb->vmas)) {
709 vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list); 712 vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
710 list_del_init(&vma->exec_list); 713 list_del_init(&vma->exec_list);
714 i915_gem_execbuffer_unreserve_vma(vma);
711 drm_gem_object_unreference(&vma->obj->base); 715 drm_gem_object_unreference(&vma->obj->base);
712 } 716 }
713 717
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 3620a1b0a73c..c79dd2b1f70e 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -57,7 +57,9 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
57#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2) 57#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2)
58#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3) 58#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3)
59#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb) 59#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
60#define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8)
60#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6) 61#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
62#define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7)
61 63
62#define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t)) 64#define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t))
63#define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t)) 65#define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t))
@@ -185,10 +187,10 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
185 case I915_CACHE_NONE: 187 case I915_CACHE_NONE:
186 break; 188 break;
187 case I915_CACHE_WT: 189 case I915_CACHE_WT:
188 pte |= HSW_WT_ELLC_LLC_AGE0; 190 pte |= HSW_WT_ELLC_LLC_AGE3;
189 break; 191 break;
190 default: 192 default:
191 pte |= HSW_WB_ELLC_LLC_AGE0; 193 pte |= HSW_WB_ELLC_LLC_AGE3;
192 break; 194 break;
193 } 195 }
194 196
@@ -335,8 +337,8 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
335 kfree(ppgtt->gen8_pt_dma_addr[i]); 337 kfree(ppgtt->gen8_pt_dma_addr[i]);
336 } 338 }
337 339
338 __free_pages(ppgtt->gen8_pt_pages, ppgtt->num_pt_pages << PAGE_SHIFT); 340 __free_pages(ppgtt->gen8_pt_pages, get_order(ppgtt->num_pt_pages << PAGE_SHIFT));
339 __free_pages(ppgtt->pd_pages, ppgtt->num_pd_pages << PAGE_SHIFT); 341 __free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT));
340} 342}
341 343
342/** 344/**
@@ -1239,6 +1241,11 @@ static inline unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
1239 bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK; 1241 bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
1240 if (bdw_gmch_ctl) 1242 if (bdw_gmch_ctl)
1241 bdw_gmch_ctl = 1 << bdw_gmch_ctl; 1243 bdw_gmch_ctl = 1 << bdw_gmch_ctl;
1244 if (bdw_gmch_ctl > 4) {
1245 WARN_ON(!i915_preliminary_hw_support);
1246 return 4<<20;
1247 }
1248
1242 return bdw_gmch_ctl << 20; 1249 return bdw_gmch_ctl << 20;
1243} 1250}
1244 1251
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index f9eafb6ed523..ee2742122a02 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -235,6 +235,7 @@
235 */ 235 */
236#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1) 236#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1)
237#define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*x-1) 237#define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*x-1)
238#define MI_SRM_LRM_GLOBAL_GTT (1<<22)
238#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */ 239#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */
239#define MI_FLUSH_DW_STORE_INDEX (1<<21) 240#define MI_FLUSH_DW_STORE_INDEX (1<<21)
240#define MI_INVALIDATE_TLB (1<<18) 241#define MI_INVALIDATE_TLB (1<<18)
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 330077bcd0bd..526c8ded16b0 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -173,7 +173,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
173 ddi_translations = ddi_translations_dp; 173 ddi_translations = ddi_translations_dp;
174 break; 174 break;
175 case PORT_D: 175 case PORT_D:
176 if (intel_dpd_is_edp(dev)) 176 if (intel_dp_is_edp(dev, PORT_D))
177 ddi_translations = ddi_translations_edp; 177 ddi_translations = ddi_translations_edp;
178 else 178 else
179 ddi_translations = ddi_translations_dp; 179 ddi_translations = ddi_translations_dp;
@@ -1158,9 +1158,10 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
1158 if (wait) 1158 if (wait)
1159 intel_wait_ddi_buf_idle(dev_priv, port); 1159 intel_wait_ddi_buf_idle(dev_priv, port);
1160 1160
1161 if (type == INTEL_OUTPUT_EDP) { 1161 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
1162 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1162 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1163 ironlake_edp_panel_vdd_on(intel_dp); 1163 ironlake_edp_panel_vdd_on(intel_dp);
1164 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
1164 ironlake_edp_panel_off(intel_dp); 1165 ironlake_edp_panel_off(intel_dp);
1165 } 1166 }
1166 1167
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 7ec8b488bb1d..8b8bde7dce53 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -5815,7 +5815,7 @@ static void intel_set_pipe_csc(struct drm_crtc *crtc)
5815 uint16_t postoff = 0; 5815 uint16_t postoff = 0;
5816 5816
5817 if (intel_crtc->config.limited_color_range) 5817 if (intel_crtc->config.limited_color_range)
5818 postoff = (16 * (1 << 13) / 255) & 0x1fff; 5818 postoff = (16 * (1 << 12) / 255) & 0x1fff;
5819 5819
5820 I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff); 5820 I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
5821 I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff); 5821 I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
@@ -6402,7 +6402,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
6402 6402
6403 /* Make sure we're not on PC8 state before disabling PC8, otherwise 6403 /* Make sure we're not on PC8 state before disabling PC8, otherwise
6404 * we'll hang the machine! */ 6404 * we'll hang the machine! */
6405 dev_priv->uncore.funcs.force_wake_get(dev_priv); 6405 gen6_gt_force_wake_get(dev_priv);
6406 6406
6407 if (val & LCPLL_POWER_DOWN_ALLOW) { 6407 if (val & LCPLL_POWER_DOWN_ALLOW) {
6408 val &= ~LCPLL_POWER_DOWN_ALLOW; 6408 val &= ~LCPLL_POWER_DOWN_ALLOW;
@@ -6436,7 +6436,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
6436 DRM_ERROR("Switching back to LCPLL failed\n"); 6436 DRM_ERROR("Switching back to LCPLL failed\n");
6437 } 6437 }
6438 6438
6439 dev_priv->uncore.funcs.force_wake_put(dev_priv); 6439 gen6_gt_force_wake_put(dev_priv);
6440} 6440}
6441 6441
6442void hsw_enable_pc8_work(struct work_struct *__work) 6442void hsw_enable_pc8_work(struct work_struct *__work)
@@ -8354,7 +8354,8 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
8354 intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE | 8354 intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
8355 DERRMR_PIPEB_PRI_FLIP_DONE | 8355 DERRMR_PIPEB_PRI_FLIP_DONE |
8356 DERRMR_PIPEC_PRI_FLIP_DONE)); 8356 DERRMR_PIPEC_PRI_FLIP_DONE));
8357 intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1)); 8357 intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) |
8358 MI_SRM_LRM_GLOBAL_GTT);
8358 intel_ring_emit(ring, DERRMR); 8359 intel_ring_emit(ring, DERRMR);
8359 intel_ring_emit(ring, ring->scratch.gtt_offset + 256); 8360 intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
8360 } 8361 }
@@ -9134,7 +9135,7 @@ intel_pipe_config_compare(struct drm_device *dev,
9134 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) 9135 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
9135 PIPE_CONF_CHECK_I(pipe_bpp); 9136 PIPE_CONF_CHECK_I(pipe_bpp);
9136 9137
9137 if (!IS_HASWELL(dev)) { 9138 if (!HAS_DDI(dev)) {
9138 PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock); 9139 PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock);
9139 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); 9140 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
9140 } 9141 }
@@ -10049,7 +10050,7 @@ static void intel_setup_outputs(struct drm_device *dev)
10049 intel_ddi_init(dev, PORT_D); 10050 intel_ddi_init(dev, PORT_D);
10050 } else if (HAS_PCH_SPLIT(dev)) { 10051 } else if (HAS_PCH_SPLIT(dev)) {
10051 int found; 10052 int found;
10052 dpd_is_edp = intel_dpd_is_edp(dev); 10053 dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
10053 10054
10054 if (has_edp_a(dev)) 10055 if (has_edp_a(dev))
10055 intel_dp_init(dev, DP_A, PORT_A); 10056 intel_dp_init(dev, DP_A, PORT_A);
@@ -10086,8 +10087,7 @@ static void intel_setup_outputs(struct drm_device *dev)
10086 intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC, 10087 intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC,
10087 PORT_C); 10088 PORT_C);
10088 if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED) 10089 if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED)
10089 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, 10090 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
10090 PORT_C);
10091 } 10091 }
10092 10092
10093 intel_dsi_init(dev); 10093 intel_dsi_init(dev);
@@ -11036,8 +11036,6 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
11036 } 11036 }
11037 11037
11038 intel_modeset_check_state(dev); 11038 intel_modeset_check_state(dev);
11039
11040 drm_mode_config_reset(dev);
11041} 11039}
11042 11040
11043void intel_modeset_gem_init(struct drm_device *dev) 11041void intel_modeset_gem_init(struct drm_device *dev)
@@ -11046,7 +11044,10 @@ void intel_modeset_gem_init(struct drm_device *dev)
11046 11044
11047 intel_setup_overlay(dev); 11045 intel_setup_overlay(dev);
11048 11046
11047 drm_modeset_lock_all(dev);
11048 drm_mode_config_reset(dev);
11049 intel_modeset_setup_hw_state(dev, false); 11049 intel_modeset_setup_hw_state(dev, false);
11050 drm_modeset_unlock_all(dev);
11050} 11051}
11051 11052
11052void intel_modeset_cleanup(struct drm_device *dev) 11053void intel_modeset_cleanup(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 0b2e842fef01..30c627c7b7ba 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -3326,11 +3326,19 @@ intel_trans_dp_port_sel(struct drm_crtc *crtc)
3326} 3326}
3327 3327
3328/* check the VBT to see whether the eDP is on DP-D port */ 3328/* check the VBT to see whether the eDP is on DP-D port */
3329bool intel_dpd_is_edp(struct drm_device *dev) 3329bool intel_dp_is_edp(struct drm_device *dev, enum port port)
3330{ 3330{
3331 struct drm_i915_private *dev_priv = dev->dev_private; 3331 struct drm_i915_private *dev_priv = dev->dev_private;
3332 union child_device_config *p_child; 3332 union child_device_config *p_child;
3333 int i; 3333 int i;
3334 static const short port_mapping[] = {
3335 [PORT_B] = PORT_IDPB,
3336 [PORT_C] = PORT_IDPC,
3337 [PORT_D] = PORT_IDPD,
3338 };
3339
3340 if (port == PORT_A)
3341 return true;
3334 3342
3335 if (!dev_priv->vbt.child_dev_num) 3343 if (!dev_priv->vbt.child_dev_num)
3336 return false; 3344 return false;
@@ -3338,7 +3346,7 @@ bool intel_dpd_is_edp(struct drm_device *dev)
3338 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { 3346 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
3339 p_child = dev_priv->vbt.child_dev + i; 3347 p_child = dev_priv->vbt.child_dev + i;
3340 3348
3341 if (p_child->common.dvo_port == PORT_IDPD && 3349 if (p_child->common.dvo_port == port_mapping[port] &&
3342 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) == 3350 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
3343 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS)) 3351 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
3344 return true; 3352 return true;
@@ -3616,26 +3624,10 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
3616 intel_dp->DP = I915_READ(intel_dp->output_reg); 3624 intel_dp->DP = I915_READ(intel_dp->output_reg);
3617 intel_dp->attached_connector = intel_connector; 3625 intel_dp->attached_connector = intel_connector;
3618 3626
3619 type = DRM_MODE_CONNECTOR_DisplayPort; 3627 if (intel_dp_is_edp(dev, port))
3620 /*
3621 * FIXME : We need to initialize built-in panels before external panels.
3622 * For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup
3623 */
3624 switch (port) {
3625 case PORT_A:
3626 type = DRM_MODE_CONNECTOR_eDP; 3628 type = DRM_MODE_CONNECTOR_eDP;
3627 break; 3629 else
3628 case PORT_C: 3630 type = DRM_MODE_CONNECTOR_DisplayPort;
3629 if (IS_VALLEYVIEW(dev))
3630 type = DRM_MODE_CONNECTOR_eDP;
3631 break;
3632 case PORT_D:
3633 if (HAS_PCH_SPLIT(dev) && intel_dpd_is_edp(dev))
3634 type = DRM_MODE_CONNECTOR_eDP;
3635 break;
3636 default: /* silence GCC warning */
3637 break;
3638 }
3639 3631
3640 /* 3632 /*
3641 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but 3633 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 1e49aa8f5377..79f91f26e288 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -708,7 +708,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder);
708void intel_dp_check_link_status(struct intel_dp *intel_dp); 708void intel_dp_check_link_status(struct intel_dp *intel_dp);
709bool intel_dp_compute_config(struct intel_encoder *encoder, 709bool intel_dp_compute_config(struct intel_encoder *encoder,
710 struct intel_crtc_config *pipe_config); 710 struct intel_crtc_config *pipe_config);
711bool intel_dpd_is_edp(struct drm_device *dev); 711bool intel_dp_is_edp(struct drm_device *dev, enum port port);
712void ironlake_edp_backlight_on(struct intel_dp *intel_dp); 712void ironlake_edp_backlight_on(struct intel_dp *intel_dp);
713void ironlake_edp_backlight_off(struct intel_dp *intel_dp); 713void ironlake_edp_backlight_off(struct intel_dp *intel_dp);
714void ironlake_edp_panel_on(struct intel_dp *intel_dp); 714void ironlake_edp_panel_on(struct intel_dp *intel_dp);
@@ -821,6 +821,7 @@ void intel_update_sprite_watermarks(struct drm_plane *plane,
821 uint32_t sprite_width, int pixel_size, 821 uint32_t sprite_width, int pixel_size,
822 bool enabled, bool scaled); 822 bool enabled, bool scaled);
823void intel_init_pm(struct drm_device *dev); 823void intel_init_pm(struct drm_device *dev);
824void intel_pm_setup(struct drm_device *dev);
824bool intel_fbc_enabled(struct drm_device *dev); 825bool intel_fbc_enabled(struct drm_device *dev);
825void intel_update_fbc(struct drm_device *dev); 826void intel_update_fbc(struct drm_device *dev);
826void intel_gpu_ips_init(struct drm_i915_private *dev_priv); 827void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index f161ac02c4f6..e6f782d1c669 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -451,7 +451,9 @@ static u32 intel_panel_get_backlight(struct drm_device *dev,
451 451
452 spin_lock_irqsave(&dev_priv->backlight.lock, flags); 452 spin_lock_irqsave(&dev_priv->backlight.lock, flags);
453 453
454 if (HAS_PCH_SPLIT(dev)) { 454 if (IS_BROADWELL(dev)) {
455 val = I915_READ(BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK;
456 } else if (HAS_PCH_SPLIT(dev)) {
455 val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; 457 val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
456 } else { 458 } else {
457 if (IS_VALLEYVIEW(dev)) 459 if (IS_VALLEYVIEW(dev))
@@ -479,6 +481,13 @@ static u32 intel_panel_get_backlight(struct drm_device *dev,
479 return val; 481 return val;
480} 482}
481 483
484static void intel_bdw_panel_set_backlight(struct drm_device *dev, u32 level)
485{
486 struct drm_i915_private *dev_priv = dev->dev_private;
487 u32 val = I915_READ(BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK;
488 I915_WRITE(BLC_PWM_PCH_CTL2, val | level);
489}
490
482static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level) 491static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level)
483{ 492{
484 struct drm_i915_private *dev_priv = dev->dev_private; 493 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -496,7 +505,9 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev,
496 DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level); 505 DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
497 level = intel_panel_compute_brightness(dev, pipe, level); 506 level = intel_panel_compute_brightness(dev, pipe, level);
498 507
499 if (HAS_PCH_SPLIT(dev)) 508 if (IS_BROADWELL(dev))
509 return intel_bdw_panel_set_backlight(dev, level);
510 else if (HAS_PCH_SPLIT(dev))
500 return intel_pch_panel_set_backlight(dev, level); 511 return intel_pch_panel_set_backlight(dev, level);
501 512
502 if (is_backlight_combination_mode(dev)) { 513 if (is_backlight_combination_mode(dev)) {
@@ -666,7 +677,16 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
666 POSTING_READ(reg); 677 POSTING_READ(reg);
667 I915_WRITE(reg, tmp | BLM_PWM_ENABLE); 678 I915_WRITE(reg, tmp | BLM_PWM_ENABLE);
668 679
669 if (HAS_PCH_SPLIT(dev) && 680 if (IS_BROADWELL(dev)) {
681 /*
682 * Broadwell requires PCH override to drive the PCH
683 * backlight pin. The above will configure the CPU
684 * backlight pin, which we don't plan to use.
685 */
686 tmp = I915_READ(BLC_PWM_PCH_CTL1);
687 tmp |= BLM_PCH_OVERRIDE_ENABLE | BLM_PCH_PWM_ENABLE;
688 I915_WRITE(BLC_PWM_PCH_CTL1, tmp);
689 } else if (HAS_PCH_SPLIT(dev) &&
670 !(dev_priv->quirks & QUIRK_NO_PCH_PWM_ENABLE)) { 690 !(dev_priv->quirks & QUIRK_NO_PCH_PWM_ENABLE)) {
671 tmp = I915_READ(BLC_PWM_PCH_CTL1); 691 tmp = I915_READ(BLC_PWM_PCH_CTL1);
672 tmp |= BLM_PCH_PWM_ENABLE; 692 tmp |= BLM_PCH_PWM_ENABLE;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index caf2ee4e5441..3657ab43c8fd 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -1180,7 +1180,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
1180 1180
1181 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode; 1181 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1182 clock = adjusted_mode->crtc_clock; 1182 clock = adjusted_mode->crtc_clock;
1183 htotal = adjusted_mode->htotal; 1183 htotal = adjusted_mode->crtc_htotal;
1184 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; 1184 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1185 pixel_size = crtc->fb->bits_per_pixel / 8; 1185 pixel_size = crtc->fb->bits_per_pixel / 8;
1186 1186
@@ -1267,7 +1267,7 @@ static bool g4x_compute_srwm(struct drm_device *dev,
1267 crtc = intel_get_crtc_for_plane(dev, plane); 1267 crtc = intel_get_crtc_for_plane(dev, plane);
1268 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode; 1268 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1269 clock = adjusted_mode->crtc_clock; 1269 clock = adjusted_mode->crtc_clock;
1270 htotal = adjusted_mode->htotal; 1270 htotal = adjusted_mode->crtc_htotal;
1271 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; 1271 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1272 pixel_size = crtc->fb->bits_per_pixel / 8; 1272 pixel_size = crtc->fb->bits_per_pixel / 8;
1273 1273
@@ -1498,7 +1498,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
1498 const struct drm_display_mode *adjusted_mode = 1498 const struct drm_display_mode *adjusted_mode =
1499 &to_intel_crtc(crtc)->config.adjusted_mode; 1499 &to_intel_crtc(crtc)->config.adjusted_mode;
1500 int clock = adjusted_mode->crtc_clock; 1500 int clock = adjusted_mode->crtc_clock;
1501 int htotal = adjusted_mode->htotal; 1501 int htotal = adjusted_mode->crtc_htotal;
1502 int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; 1502 int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1503 int pixel_size = crtc->fb->bits_per_pixel / 8; 1503 int pixel_size = crtc->fb->bits_per_pixel / 8;
1504 unsigned long line_time_us; 1504 unsigned long line_time_us;
@@ -1624,7 +1624,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1624 const struct drm_display_mode *adjusted_mode = 1624 const struct drm_display_mode *adjusted_mode =
1625 &to_intel_crtc(enabled)->config.adjusted_mode; 1625 &to_intel_crtc(enabled)->config.adjusted_mode;
1626 int clock = adjusted_mode->crtc_clock; 1626 int clock = adjusted_mode->crtc_clock;
1627 int htotal = adjusted_mode->htotal; 1627 int htotal = adjusted_mode->crtc_htotal;
1628 int hdisplay = to_intel_crtc(enabled)->config.pipe_src_w; 1628 int hdisplay = to_intel_crtc(enabled)->config.pipe_src_w;
1629 int pixel_size = enabled->fb->bits_per_pixel / 8; 1629 int pixel_size = enabled->fb->bits_per_pixel / 8;
1630 unsigned long line_time_us; 1630 unsigned long line_time_us;
@@ -1776,7 +1776,7 @@ static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
1776 crtc = intel_get_crtc_for_plane(dev, plane); 1776 crtc = intel_get_crtc_for_plane(dev, plane);
1777 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode; 1777 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1778 clock = adjusted_mode->crtc_clock; 1778 clock = adjusted_mode->crtc_clock;
1779 htotal = adjusted_mode->htotal; 1779 htotal = adjusted_mode->crtc_htotal;
1780 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; 1780 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1781 pixel_size = crtc->fb->bits_per_pixel / 8; 1781 pixel_size = crtc->fb->bits_per_pixel / 8;
1782 1782
@@ -2469,8 +2469,9 @@ hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
2469 /* The WM are computed with base on how long it takes to fill a single 2469 /* The WM are computed with base on how long it takes to fill a single
2470 * row at the given clock rate, multiplied by 8. 2470 * row at the given clock rate, multiplied by 8.
2471 * */ 2471 * */
2472 linetime = DIV_ROUND_CLOSEST(mode->htotal * 1000 * 8, mode->clock); 2472 linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
2473 ips_linetime = DIV_ROUND_CLOSEST(mode->htotal * 1000 * 8, 2473 mode->crtc_clock);
2474 ips_linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
2474 intel_ddi_get_cdclk_freq(dev_priv)); 2475 intel_ddi_get_cdclk_freq(dev_priv));
2475 2476
2476 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) | 2477 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
@@ -5684,6 +5685,7 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
5684{ 5685{
5685 struct drm_i915_private *dev_priv = dev->dev_private; 5686 struct drm_i915_private *dev_priv = dev->dev_private;
5686 bool is_enabled, enable_requested; 5687 bool is_enabled, enable_requested;
5688 unsigned long irqflags;
5687 uint32_t tmp; 5689 uint32_t tmp;
5688 5690
5689 tmp = I915_READ(HSW_PWR_WELL_DRIVER); 5691 tmp = I915_READ(HSW_PWR_WELL_DRIVER);
@@ -5701,9 +5703,24 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
5701 HSW_PWR_WELL_STATE_ENABLED), 20)) 5703 HSW_PWR_WELL_STATE_ENABLED), 20))
5702 DRM_ERROR("Timeout enabling power well\n"); 5704 DRM_ERROR("Timeout enabling power well\n");
5703 } 5705 }
5706
5707 if (IS_BROADWELL(dev)) {
5708 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
5709 I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_B),
5710 dev_priv->de_irq_mask[PIPE_B]);
5711 I915_WRITE(GEN8_DE_PIPE_IER(PIPE_B),
5712 ~dev_priv->de_irq_mask[PIPE_B] |
5713 GEN8_PIPE_VBLANK);
5714 I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_C),
5715 dev_priv->de_irq_mask[PIPE_C]);
5716 I915_WRITE(GEN8_DE_PIPE_IER(PIPE_C),
5717 ~dev_priv->de_irq_mask[PIPE_C] |
5718 GEN8_PIPE_VBLANK);
5719 POSTING_READ(GEN8_DE_PIPE_IER(PIPE_C));
5720 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
5721 }
5704 } else { 5722 } else {
5705 if (enable_requested) { 5723 if (enable_requested) {
5706 unsigned long irqflags;
5707 enum pipe p; 5724 enum pipe p;
5708 5725
5709 I915_WRITE(HSW_PWR_WELL_DRIVER, 0); 5726 I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
@@ -6129,10 +6146,19 @@ int vlv_freq_opcode(int ddr_freq, int val)
6129 return val; 6146 return val;
6130} 6147}
6131 6148
6132void intel_pm_init(struct drm_device *dev) 6149void intel_pm_setup(struct drm_device *dev)
6133{ 6150{
6134 struct drm_i915_private *dev_priv = dev->dev_private; 6151 struct drm_i915_private *dev_priv = dev->dev_private;
6135 6152
6153 mutex_init(&dev_priv->rps.hw_lock);
6154
6155 mutex_init(&dev_priv->pc8.lock);
6156 dev_priv->pc8.requirements_met = false;
6157 dev_priv->pc8.gpu_idle = false;
6158 dev_priv->pc8.irqs_disabled = false;
6159 dev_priv->pc8.enabled = false;
6160 dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
6161 INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
6136 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work, 6162 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
6137 intel_gen6_powersave_work); 6163 intel_gen6_powersave_work);
6138} 6164}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index b620337e6d67..c2f09d456300 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -965,6 +965,7 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
965 } else if (IS_GEN6(ring->dev)) { 965 } else if (IS_GEN6(ring->dev)) {
966 mmio = RING_HWS_PGA_GEN6(ring->mmio_base); 966 mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
967 } else { 967 } else {
968 /* XXX: gen8 returns to sanity */
968 mmio = RING_HWS_PGA(ring->mmio_base); 969 mmio = RING_HWS_PGA(ring->mmio_base);
969 } 970 }
970 971
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 0b02078a0b84..25cbe073c388 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -784,6 +784,7 @@ static int gen6_do_reset(struct drm_device *dev)
784int intel_gpu_reset(struct drm_device *dev) 784int intel_gpu_reset(struct drm_device *dev)
785{ 785{
786 switch (INTEL_INFO(dev)->gen) { 786 switch (INTEL_INFO(dev)->gen) {
787 case 8:
787 case 7: 788 case 7:
788 case 6: return gen6_do_reset(dev); 789 case 6: return gen6_do_reset(dev);
789 case 5: return ironlake_do_reset(dev); 790 case 5: return ironlake_do_reset(dev);