aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug3
-rw-r--r--drivers/gpu/drm/i915/Makefile6
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c400
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c1692
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c1839
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h377
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c528
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c83
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c55
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c14
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence.c24
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c86
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h40
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c12
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c124
-rw-r--r--drivers/gpu/drm/i915/i915_guc_submission.c21
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c311
-rw-r--r--drivers/gpu/drm/i915/i915_params.c2
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c503
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h7
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c8
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c29
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h24
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c42
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c2
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c586
-rw-r--r--drivers/gpu/drm/i915/intel_color.c20
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c45
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c33
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c60
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c388
-rw-r--r--drivers/gpu/drm/i915/intel_display.c775
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c607
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c19
-rw-r--r--drivers/gpu/drm/i915/intel_dpio_phy.c10
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c30
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h111
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c75
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c2
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_panel_vbt.c13
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_pll.c42
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c20
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c70
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c85
-rw-r--r--drivers/gpu/drm/i915/intel_fifo_underrun.c30
-rw-r--r--drivers/gpu/drm/i915/intel_guc.h7
-rw-r--r--drivers/gpu/drm/i915/intel_guc_loader.c24
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c10
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c72
-rw-r--r--drivers/gpu/drm/i915/intel_hotplug.c8
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c22
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c369
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c51
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c4
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c109
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c28
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c30
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c310
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c65
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c926
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h135
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c76
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c58
-rw-r--r--drivers/gpu/drm/i915/intel_sideband.c32
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c18
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c14
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c113
-rw-r--r--include/drm/i915_pciids.h10
-rw-r--r--include/uapi/drm/i915_drm.h3
72 files changed, 6344 insertions, 5415 deletions
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index 8f404103341d..cee87bfd10c4 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -18,6 +18,9 @@ config DRM_I915_WERROR
18config DRM_I915_DEBUG 18config DRM_I915_DEBUG
19 bool "Enable additional driver debugging" 19 bool "Enable additional driver debugging"
20 depends on DRM_I915 20 depends on DRM_I915
21 select PREEMPT_COUNT
22 select X86_MSR # used by igt/pm_rpm
23 select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
21 default n 24 default n
22 help 25 help
23 Choose this option to turn on extra driver debugging that may affect 26 Choose this option to turn on extra driver debugging that may affect
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 276abf1cac2b..684fc1cd08fa 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -10,9 +10,11 @@ subdir-ccflags-$(CONFIG_DRM_I915_WERROR) := -Werror
10i915-y := i915_drv.o \ 10i915-y := i915_drv.o \
11 i915_irq.o \ 11 i915_irq.o \
12 i915_params.o \ 12 i915_params.o \
13 i915_pci.o \
13 i915_suspend.o \ 14 i915_suspend.o \
14 i915_sysfs.o \ 15 i915_sysfs.o \
15 intel_csr.o \ 16 intel_csr.o \
17 intel_device_info.o \
16 intel_pm.o \ 18 intel_pm.o \
17 intel_runtime_pm.o 19 intel_runtime_pm.o
18 20
@@ -37,6 +39,7 @@ i915-y += i915_cmd_parser.o \
37 i915_gem_userptr.o \ 39 i915_gem_userptr.o \
38 i915_gpu_error.o \ 40 i915_gpu_error.o \
39 i915_trace_points.o \ 41 i915_trace_points.o \
42 intel_breadcrumbs.o \
40 intel_lrc.o \ 43 intel_lrc.o \
41 intel_mocs.o \ 44 intel_mocs.o \
42 intel_ringbuffer.o \ 45 intel_ringbuffer.o \
@@ -101,9 +104,6 @@ i915-y += dvo_ch7017.o \
101# virtual gpu code 104# virtual gpu code
102i915-y += i915_vgpu.o 105i915-y += i915_vgpu.o
103 106
104# legacy horrors
105i915-y += i915_dma.o
106
107ifeq ($(CONFIG_DRM_I915_GVT),y) 107ifeq ($(CONFIG_DRM_I915_GVT),y)
108i915-y += intel_gvt.o 108i915-y += intel_gvt.o
109include $(src)/gvt/Makefile 109include $(src)/gvt/Makefile
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 5b7526697838..844fea795bae 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -265,7 +265,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
265{ 265{
266 struct drm_info_node *node = m->private; 266 struct drm_info_node *node = m->private;
267 struct drm_device *dev = node->minor->dev; 267 struct drm_device *dev = node->minor->dev;
268 struct drm_i915_private *dev_priv = dev->dev_private; 268 struct drm_i915_private *dev_priv = to_i915(dev);
269 struct drm_i915_gem_object *obj; 269 struct drm_i915_gem_object *obj;
270 u64 total_obj_size, total_gtt_size; 270 u64 total_obj_size, total_gtt_size;
271 LIST_HEAD(stolen); 271 LIST_HEAD(stolen);
@@ -440,15 +440,15 @@ static void print_context_stats(struct seq_file *m,
440 440
441 memset(&stats, 0, sizeof(stats)); 441 memset(&stats, 0, sizeof(stats));
442 442
443 mutex_lock(&dev_priv->dev->struct_mutex); 443 mutex_lock(&dev_priv->drm.struct_mutex);
444 if (dev_priv->kernel_context) 444 if (dev_priv->kernel_context)
445 per_file_ctx_stats(0, dev_priv->kernel_context, &stats); 445 per_file_ctx_stats(0, dev_priv->kernel_context, &stats);
446 446
447 list_for_each_entry(file, &dev_priv->dev->filelist, lhead) { 447 list_for_each_entry(file, &dev_priv->drm.filelist, lhead) {
448 struct drm_i915_file_private *fpriv = file->driver_priv; 448 struct drm_i915_file_private *fpriv = file->driver_priv;
449 idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats); 449 idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats);
450 } 450 }
451 mutex_unlock(&dev_priv->dev->struct_mutex); 451 mutex_unlock(&dev_priv->drm.struct_mutex);
452 452
453 print_file_stats(m, "[k]contexts", stats); 453 print_file_stats(m, "[k]contexts", stats);
454} 454}
@@ -591,7 +591,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data)
591 struct drm_info_node *node = m->private; 591 struct drm_info_node *node = m->private;
592 struct drm_device *dev = node->minor->dev; 592 struct drm_device *dev = node->minor->dev;
593 uintptr_t list = (uintptr_t) node->info_ent->data; 593 uintptr_t list = (uintptr_t) node->info_ent->data;
594 struct drm_i915_private *dev_priv = dev->dev_private; 594 struct drm_i915_private *dev_priv = to_i915(dev);
595 struct drm_i915_gem_object *obj; 595 struct drm_i915_gem_object *obj;
596 u64 total_obj_size, total_gtt_size; 596 u64 total_obj_size, total_gtt_size;
597 int count, ret; 597 int count, ret;
@@ -625,7 +625,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
625{ 625{
626 struct drm_info_node *node = m->private; 626 struct drm_info_node *node = m->private;
627 struct drm_device *dev = node->minor->dev; 627 struct drm_device *dev = node->minor->dev;
628 struct drm_i915_private *dev_priv = dev->dev_private; 628 struct drm_i915_private *dev_priv = to_i915(dev);
629 struct intel_crtc *crtc; 629 struct intel_crtc *crtc;
630 int ret; 630 int ret;
631 631
@@ -662,8 +662,8 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
662 engine->name, 662 engine->name,
663 i915_gem_request_get_seqno(work->flip_queued_req), 663 i915_gem_request_get_seqno(work->flip_queued_req),
664 dev_priv->next_seqno, 664 dev_priv->next_seqno,
665 engine->get_seqno(engine), 665 intel_engine_get_seqno(engine),
666 i915_gem_request_completed(work->flip_queued_req, true)); 666 i915_gem_request_completed(work->flip_queued_req));
667 } else 667 } else
668 seq_printf(m, "Flip not associated with any ring\n"); 668 seq_printf(m, "Flip not associated with any ring\n");
669 seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n", 669 seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
@@ -695,7 +695,7 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
695{ 695{
696 struct drm_info_node *node = m->private; 696 struct drm_info_node *node = m->private;
697 struct drm_device *dev = node->minor->dev; 697 struct drm_device *dev = node->minor->dev;
698 struct drm_i915_private *dev_priv = dev->dev_private; 698 struct drm_i915_private *dev_priv = to_i915(dev);
699 struct drm_i915_gem_object *obj; 699 struct drm_i915_gem_object *obj;
700 struct intel_engine_cs *engine; 700 struct intel_engine_cs *engine;
701 int total = 0; 701 int total = 0;
@@ -740,7 +740,7 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
740{ 740{
741 struct drm_info_node *node = m->private; 741 struct drm_info_node *node = m->private;
742 struct drm_device *dev = node->minor->dev; 742 struct drm_device *dev = node->minor->dev;
743 struct drm_i915_private *dev_priv = dev->dev_private; 743 struct drm_i915_private *dev_priv = to_i915(dev);
744 struct intel_engine_cs *engine; 744 struct intel_engine_cs *engine;
745 struct drm_i915_gem_request *req; 745 struct drm_i915_gem_request *req;
746 int ret, any; 746 int ret, any;
@@ -788,17 +788,29 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
788static void i915_ring_seqno_info(struct seq_file *m, 788static void i915_ring_seqno_info(struct seq_file *m,
789 struct intel_engine_cs *engine) 789 struct intel_engine_cs *engine)
790{ 790{
791 struct intel_breadcrumbs *b = &engine->breadcrumbs;
792 struct rb_node *rb;
793
791 seq_printf(m, "Current sequence (%s): %x\n", 794 seq_printf(m, "Current sequence (%s): %x\n",
792 engine->name, engine->get_seqno(engine)); 795 engine->name, intel_engine_get_seqno(engine));
793 seq_printf(m, "Current user interrupts (%s): %x\n", 796 seq_printf(m, "Current user interrupts (%s): %lx\n",
794 engine->name, READ_ONCE(engine->user_interrupts)); 797 engine->name, READ_ONCE(engine->breadcrumbs.irq_wakeups));
798
799 spin_lock(&b->lock);
800 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
801 struct intel_wait *w = container_of(rb, typeof(*w), node);
802
803 seq_printf(m, "Waiting (%s): %s [%d] on %x\n",
804 engine->name, w->tsk->comm, w->tsk->pid, w->seqno);
805 }
806 spin_unlock(&b->lock);
795} 807}
796 808
797static int i915_gem_seqno_info(struct seq_file *m, void *data) 809static int i915_gem_seqno_info(struct seq_file *m, void *data)
798{ 810{
799 struct drm_info_node *node = m->private; 811 struct drm_info_node *node = m->private;
800 struct drm_device *dev = node->minor->dev; 812 struct drm_device *dev = node->minor->dev;
801 struct drm_i915_private *dev_priv = dev->dev_private; 813 struct drm_i915_private *dev_priv = to_i915(dev);
802 struct intel_engine_cs *engine; 814 struct intel_engine_cs *engine;
803 int ret; 815 int ret;
804 816
@@ -821,7 +833,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
821{ 833{
822 struct drm_info_node *node = m->private; 834 struct drm_info_node *node = m->private;
823 struct drm_device *dev = node->minor->dev; 835 struct drm_device *dev = node->minor->dev;
824 struct drm_i915_private *dev_priv = dev->dev_private; 836 struct drm_i915_private *dev_priv = to_i915(dev);
825 struct intel_engine_cs *engine; 837 struct intel_engine_cs *engine;
826 int ret, i, pipe; 838 int ret, i, pipe;
827 839
@@ -1012,7 +1024,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
1012{ 1024{
1013 struct drm_info_node *node = m->private; 1025 struct drm_info_node *node = m->private;
1014 struct drm_device *dev = node->minor->dev; 1026 struct drm_device *dev = node->minor->dev;
1015 struct drm_i915_private *dev_priv = dev->dev_private; 1027 struct drm_i915_private *dev_priv = to_i915(dev);
1016 int i, ret; 1028 int i, ret;
1017 1029
1018 ret = mutex_lock_interruptible(&dev->struct_mutex); 1030 ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -1040,7 +1052,7 @@ static int i915_hws_info(struct seq_file *m, void *data)
1040{ 1052{
1041 struct drm_info_node *node = m->private; 1053 struct drm_info_node *node = m->private;
1042 struct drm_device *dev = node->minor->dev; 1054 struct drm_device *dev = node->minor->dev;
1043 struct drm_i915_private *dev_priv = dev->dev_private; 1055 struct drm_i915_private *dev_priv = to_i915(dev);
1044 struct intel_engine_cs *engine; 1056 struct intel_engine_cs *engine;
1045 const u32 *hws; 1057 const u32 *hws;
1046 int i; 1058 int i;
@@ -1151,7 +1163,7 @@ static int
1151i915_next_seqno_get(void *data, u64 *val) 1163i915_next_seqno_get(void *data, u64 *val)
1152{ 1164{
1153 struct drm_device *dev = data; 1165 struct drm_device *dev = data;
1154 struct drm_i915_private *dev_priv = dev->dev_private; 1166 struct drm_i915_private *dev_priv = to_i915(dev);
1155 int ret; 1167 int ret;
1156 1168
1157 ret = mutex_lock_interruptible(&dev->struct_mutex); 1169 ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -1188,7 +1200,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
1188{ 1200{
1189 struct drm_info_node *node = m->private; 1201 struct drm_info_node *node = m->private;
1190 struct drm_device *dev = node->minor->dev; 1202 struct drm_device *dev = node->minor->dev;
1191 struct drm_i915_private *dev_priv = dev->dev_private; 1203 struct drm_i915_private *dev_priv = to_i915(dev);
1192 int ret = 0; 1204 int ret = 0;
1193 1205
1194 intel_runtime_pm_get(dev_priv); 1206 intel_runtime_pm_get(dev_priv);
@@ -1391,7 +1403,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
1391{ 1403{
1392 struct drm_info_node *node = m->private; 1404 struct drm_info_node *node = m->private;
1393 struct drm_device *dev = node->minor->dev; 1405 struct drm_device *dev = node->minor->dev;
1394 struct drm_i915_private *dev_priv = dev->dev_private; 1406 struct drm_i915_private *dev_priv = to_i915(dev);
1395 struct intel_engine_cs *engine; 1407 struct intel_engine_cs *engine;
1396 u64 acthd[I915_NUM_ENGINES]; 1408 u64 acthd[I915_NUM_ENGINES];
1397 u32 seqno[I915_NUM_ENGINES]; 1409 u32 seqno[I915_NUM_ENGINES];
@@ -1408,7 +1420,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
1408 1420
1409 for_each_engine_id(engine, dev_priv, id) { 1421 for_each_engine_id(engine, dev_priv, id) {
1410 acthd[id] = intel_ring_get_active_head(engine); 1422 acthd[id] = intel_ring_get_active_head(engine);
1411 seqno[id] = engine->get_seqno(engine); 1423 seqno[id] = intel_engine_get_seqno(engine);
1412 } 1424 }
1413 1425
1414 i915_get_extra_instdone(dev_priv, instdone); 1426 i915_get_extra_instdone(dev_priv, instdone);
@@ -1428,9 +1440,11 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
1428 engine->hangcheck.seqno, 1440 engine->hangcheck.seqno,
1429 seqno[id], 1441 seqno[id],
1430 engine->last_submitted_seqno); 1442 engine->last_submitted_seqno);
1431 seq_printf(m, "\tuser interrupts = %x [current %x]\n", 1443 seq_printf(m, "\twaiters? %d\n",
1444 intel_engine_has_waiter(engine));
1445 seq_printf(m, "\tuser interrupts = %lx [current %lx]\n",
1432 engine->hangcheck.user_interrupts, 1446 engine->hangcheck.user_interrupts,
1433 READ_ONCE(engine->user_interrupts)); 1447 READ_ONCE(engine->breadcrumbs.irq_wakeups));
1434 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n", 1448 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
1435 (long long)engine->hangcheck.acthd, 1449 (long long)engine->hangcheck.acthd,
1436 (long long)acthd[id]); 1450 (long long)acthd[id]);
@@ -1460,7 +1474,7 @@ static int ironlake_drpc_info(struct seq_file *m)
1460{ 1474{
1461 struct drm_info_node *node = m->private; 1475 struct drm_info_node *node = m->private;
1462 struct drm_device *dev = node->minor->dev; 1476 struct drm_device *dev = node->minor->dev;
1463 struct drm_i915_private *dev_priv = dev->dev_private; 1477 struct drm_i915_private *dev_priv = to_i915(dev);
1464 u32 rgvmodectl, rstdbyctl; 1478 u32 rgvmodectl, rstdbyctl;
1465 u16 crstandvid; 1479 u16 crstandvid;
1466 int ret; 1480 int ret;
@@ -1528,7 +1542,7 @@ static int i915_forcewake_domains(struct seq_file *m, void *data)
1528{ 1542{
1529 struct drm_info_node *node = m->private; 1543 struct drm_info_node *node = m->private;
1530 struct drm_device *dev = node->minor->dev; 1544 struct drm_device *dev = node->minor->dev;
1531 struct drm_i915_private *dev_priv = dev->dev_private; 1545 struct drm_i915_private *dev_priv = to_i915(dev);
1532 struct intel_uncore_forcewake_domain *fw_domain; 1546 struct intel_uncore_forcewake_domain *fw_domain;
1533 1547
1534 spin_lock_irq(&dev_priv->uncore.lock); 1548 spin_lock_irq(&dev_priv->uncore.lock);
@@ -1546,7 +1560,7 @@ static int vlv_drpc_info(struct seq_file *m)
1546{ 1560{
1547 struct drm_info_node *node = m->private; 1561 struct drm_info_node *node = m->private;
1548 struct drm_device *dev = node->minor->dev; 1562 struct drm_device *dev = node->minor->dev;
1549 struct drm_i915_private *dev_priv = dev->dev_private; 1563 struct drm_i915_private *dev_priv = to_i915(dev);
1550 u32 rpmodectl1, rcctl1, pw_status; 1564 u32 rpmodectl1, rcctl1, pw_status;
1551 1565
1552 intel_runtime_pm_get(dev_priv); 1566 intel_runtime_pm_get(dev_priv);
@@ -1586,7 +1600,7 @@ static int gen6_drpc_info(struct seq_file *m)
1586{ 1600{
1587 struct drm_info_node *node = m->private; 1601 struct drm_info_node *node = m->private;
1588 struct drm_device *dev = node->minor->dev; 1602 struct drm_device *dev = node->minor->dev;
1589 struct drm_i915_private *dev_priv = dev->dev_private; 1603 struct drm_i915_private *dev_priv = to_i915(dev);
1590 u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0; 1604 u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
1591 unsigned forcewake_count; 1605 unsigned forcewake_count;
1592 int count = 0, ret; 1606 int count = 0, ret;
@@ -1698,7 +1712,7 @@ static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1698{ 1712{
1699 struct drm_info_node *node = m->private; 1713 struct drm_info_node *node = m->private;
1700 struct drm_device *dev = node->minor->dev; 1714 struct drm_device *dev = node->minor->dev;
1701 struct drm_i915_private *dev_priv = dev->dev_private; 1715 struct drm_i915_private *dev_priv = to_i915(dev);
1702 1716
1703 seq_printf(m, "FB tracking busy bits: 0x%08x\n", 1717 seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1704 dev_priv->fb_tracking.busy_bits); 1718 dev_priv->fb_tracking.busy_bits);
@@ -1713,7 +1727,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
1713{ 1727{
1714 struct drm_info_node *node = m->private; 1728 struct drm_info_node *node = m->private;
1715 struct drm_device *dev = node->minor->dev; 1729 struct drm_device *dev = node->minor->dev;
1716 struct drm_i915_private *dev_priv = dev->dev_private; 1730 struct drm_i915_private *dev_priv = to_i915(dev);
1717 1731
1718 if (!HAS_FBC(dev)) { 1732 if (!HAS_FBC(dev)) {
1719 seq_puts(m, "FBC unsupported on this chipset\n"); 1733 seq_puts(m, "FBC unsupported on this chipset\n");
@@ -1743,7 +1757,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
1743static int i915_fbc_fc_get(void *data, u64 *val) 1757static int i915_fbc_fc_get(void *data, u64 *val)
1744{ 1758{
1745 struct drm_device *dev = data; 1759 struct drm_device *dev = data;
1746 struct drm_i915_private *dev_priv = dev->dev_private; 1760 struct drm_i915_private *dev_priv = to_i915(dev);
1747 1761
1748 if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev)) 1762 if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
1749 return -ENODEV; 1763 return -ENODEV;
@@ -1756,7 +1770,7 @@ static int i915_fbc_fc_get(void *data, u64 *val)
1756static int i915_fbc_fc_set(void *data, u64 val) 1770static int i915_fbc_fc_set(void *data, u64 val)
1757{ 1771{
1758 struct drm_device *dev = data; 1772 struct drm_device *dev = data;
1759 struct drm_i915_private *dev_priv = dev->dev_private; 1773 struct drm_i915_private *dev_priv = to_i915(dev);
1760 u32 reg; 1774 u32 reg;
1761 1775
1762 if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev)) 1776 if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
@@ -1783,7 +1797,7 @@ static int i915_ips_status(struct seq_file *m, void *unused)
1783{ 1797{
1784 struct drm_info_node *node = m->private; 1798 struct drm_info_node *node = m->private;
1785 struct drm_device *dev = node->minor->dev; 1799 struct drm_device *dev = node->minor->dev;
1786 struct drm_i915_private *dev_priv = dev->dev_private; 1800 struct drm_i915_private *dev_priv = to_i915(dev);
1787 1801
1788 if (!HAS_IPS(dev)) { 1802 if (!HAS_IPS(dev)) {
1789 seq_puts(m, "not supported\n"); 1803 seq_puts(m, "not supported\n");
@@ -1813,7 +1827,7 @@ static int i915_sr_status(struct seq_file *m, void *unused)
1813{ 1827{
1814 struct drm_info_node *node = m->private; 1828 struct drm_info_node *node = m->private;
1815 struct drm_device *dev = node->minor->dev; 1829 struct drm_device *dev = node->minor->dev;
1816 struct drm_i915_private *dev_priv = dev->dev_private; 1830 struct drm_i915_private *dev_priv = to_i915(dev);
1817 bool sr_enabled = false; 1831 bool sr_enabled = false;
1818 1832
1819 intel_runtime_pm_get(dev_priv); 1833 intel_runtime_pm_get(dev_priv);
@@ -1842,7 +1856,7 @@ static int i915_emon_status(struct seq_file *m, void *unused)
1842{ 1856{
1843 struct drm_info_node *node = m->private; 1857 struct drm_info_node *node = m->private;
1844 struct drm_device *dev = node->minor->dev; 1858 struct drm_device *dev = node->minor->dev;
1845 struct drm_i915_private *dev_priv = dev->dev_private; 1859 struct drm_i915_private *dev_priv = to_i915(dev);
1846 unsigned long temp, chipset, gfx; 1860 unsigned long temp, chipset, gfx;
1847 int ret; 1861 int ret;
1848 1862
@@ -1870,7 +1884,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
1870{ 1884{
1871 struct drm_info_node *node = m->private; 1885 struct drm_info_node *node = m->private;
1872 struct drm_device *dev = node->minor->dev; 1886 struct drm_device *dev = node->minor->dev;
1873 struct drm_i915_private *dev_priv = dev->dev_private; 1887 struct drm_i915_private *dev_priv = to_i915(dev);
1874 int ret = 0; 1888 int ret = 0;
1875 int gpu_freq, ia_freq; 1889 int gpu_freq, ia_freq;
1876 unsigned int max_gpu_freq, min_gpu_freq; 1890 unsigned int max_gpu_freq, min_gpu_freq;
@@ -1925,7 +1939,7 @@ static int i915_opregion(struct seq_file *m, void *unused)
1925{ 1939{
1926 struct drm_info_node *node = m->private; 1940 struct drm_info_node *node = m->private;
1927 struct drm_device *dev = node->minor->dev; 1941 struct drm_device *dev = node->minor->dev;
1928 struct drm_i915_private *dev_priv = dev->dev_private; 1942 struct drm_i915_private *dev_priv = to_i915(dev);
1929 struct intel_opregion *opregion = &dev_priv->opregion; 1943 struct intel_opregion *opregion = &dev_priv->opregion;
1930 int ret; 1944 int ret;
1931 1945
@@ -1946,7 +1960,7 @@ static int i915_vbt(struct seq_file *m, void *unused)
1946{ 1960{
1947 struct drm_info_node *node = m->private; 1961 struct drm_info_node *node = m->private;
1948 struct drm_device *dev = node->minor->dev; 1962 struct drm_device *dev = node->minor->dev;
1949 struct drm_i915_private *dev_priv = dev->dev_private; 1963 struct drm_i915_private *dev_priv = to_i915(dev);
1950 struct intel_opregion *opregion = &dev_priv->opregion; 1964 struct intel_opregion *opregion = &dev_priv->opregion;
1951 1965
1952 if (opregion->vbt) 1966 if (opregion->vbt)
@@ -1968,19 +1982,19 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1968 return ret; 1982 return ret;
1969 1983
1970#ifdef CONFIG_DRM_FBDEV_EMULATION 1984#ifdef CONFIG_DRM_FBDEV_EMULATION
1971 if (to_i915(dev)->fbdev) { 1985 if (to_i915(dev)->fbdev) {
1972 fbdev_fb = to_intel_framebuffer(to_i915(dev)->fbdev->helper.fb); 1986 fbdev_fb = to_intel_framebuffer(to_i915(dev)->fbdev->helper.fb);
1973 1987
1974 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", 1988 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1975 fbdev_fb->base.width, 1989 fbdev_fb->base.width,
1976 fbdev_fb->base.height, 1990 fbdev_fb->base.height,
1977 fbdev_fb->base.depth, 1991 fbdev_fb->base.depth,
1978 fbdev_fb->base.bits_per_pixel, 1992 fbdev_fb->base.bits_per_pixel,
1979 fbdev_fb->base.modifier[0], 1993 fbdev_fb->base.modifier[0],
1980 drm_framebuffer_read_refcount(&fbdev_fb->base)); 1994 drm_framebuffer_read_refcount(&fbdev_fb->base));
1981 describe_obj(m, fbdev_fb->obj); 1995 describe_obj(m, fbdev_fb->obj);
1982 seq_putc(m, '\n'); 1996 seq_putc(m, '\n');
1983 } 1997 }
1984#endif 1998#endif
1985 1999
1986 mutex_lock(&dev->mode_config.fb_lock); 2000 mutex_lock(&dev->mode_config.fb_lock);
@@ -2017,7 +2031,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
2017{ 2031{
2018 struct drm_info_node *node = m->private; 2032 struct drm_info_node *node = m->private;
2019 struct drm_device *dev = node->minor->dev; 2033 struct drm_device *dev = node->minor->dev;
2020 struct drm_i915_private *dev_priv = dev->dev_private; 2034 struct drm_i915_private *dev_priv = to_i915(dev);
2021 struct intel_engine_cs *engine; 2035 struct intel_engine_cs *engine;
2022 struct i915_gem_context *ctx; 2036 struct i915_gem_context *ctx;
2023 int ret; 2037 int ret;
@@ -2114,7 +2128,7 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
2114{ 2128{
2115 struct drm_info_node *node = (struct drm_info_node *) m->private; 2129 struct drm_info_node *node = (struct drm_info_node *) m->private;
2116 struct drm_device *dev = node->minor->dev; 2130 struct drm_device *dev = node->minor->dev;
2117 struct drm_i915_private *dev_priv = dev->dev_private; 2131 struct drm_i915_private *dev_priv = to_i915(dev);
2118 struct intel_engine_cs *engine; 2132 struct intel_engine_cs *engine;
2119 struct i915_gem_context *ctx; 2133 struct i915_gem_context *ctx;
2120 int ret; 2134 int ret;
@@ -2141,7 +2155,7 @@ static int i915_execlists(struct seq_file *m, void *data)
2141{ 2155{
2142 struct drm_info_node *node = (struct drm_info_node *)m->private; 2156 struct drm_info_node *node = (struct drm_info_node *)m->private;
2143 struct drm_device *dev = node->minor->dev; 2157 struct drm_device *dev = node->minor->dev;
2144 struct drm_i915_private *dev_priv = dev->dev_private; 2158 struct drm_i915_private *dev_priv = to_i915(dev);
2145 struct intel_engine_cs *engine; 2159 struct intel_engine_cs *engine;
2146 u32 status_pointer; 2160 u32 status_pointer;
2147 u8 read_pointer; 2161 u8 read_pointer;
@@ -2244,7 +2258,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
2244{ 2258{
2245 struct drm_info_node *node = m->private; 2259 struct drm_info_node *node = m->private;
2246 struct drm_device *dev = node->minor->dev; 2260 struct drm_device *dev = node->minor->dev;
2247 struct drm_i915_private *dev_priv = dev->dev_private; 2261 struct drm_i915_private *dev_priv = to_i915(dev);
2248 int ret; 2262 int ret;
2249 2263
2250 ret = mutex_lock_interruptible(&dev->struct_mutex); 2264 ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -2317,7 +2331,7 @@ static int per_file_ctx(int id, void *ptr, void *data)
2317 2331
2318static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev) 2332static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
2319{ 2333{
2320 struct drm_i915_private *dev_priv = dev->dev_private; 2334 struct drm_i915_private *dev_priv = to_i915(dev);
2321 struct intel_engine_cs *engine; 2335 struct intel_engine_cs *engine;
2322 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 2336 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2323 int i; 2337 int i;
@@ -2338,7 +2352,7 @@ static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
2338 2352
2339static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev) 2353static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
2340{ 2354{
2341 struct drm_i915_private *dev_priv = dev->dev_private; 2355 struct drm_i915_private *dev_priv = to_i915(dev);
2342 struct intel_engine_cs *engine; 2356 struct intel_engine_cs *engine;
2343 2357
2344 if (IS_GEN6(dev_priv)) 2358 if (IS_GEN6(dev_priv))
@@ -2372,7 +2386,7 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
2372{ 2386{
2373 struct drm_info_node *node = m->private; 2387 struct drm_info_node *node = m->private;
2374 struct drm_device *dev = node->minor->dev; 2388 struct drm_device *dev = node->minor->dev;
2375 struct drm_i915_private *dev_priv = dev->dev_private; 2389 struct drm_i915_private *dev_priv = to_i915(dev);
2376 struct drm_file *file; 2390 struct drm_file *file;
2377 2391
2378 int ret = mutex_lock_interruptible(&dev->struct_mutex); 2392 int ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -2415,7 +2429,7 @@ static int count_irq_waiters(struct drm_i915_private *i915)
2415 int count = 0; 2429 int count = 0;
2416 2430
2417 for_each_engine(engine, i915) 2431 for_each_engine(engine, i915)
2418 count += engine->irq_refcount; 2432 count += intel_engine_has_waiter(engine);
2419 2433
2420 return count; 2434 return count;
2421} 2435}
@@ -2424,11 +2438,12 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
2424{ 2438{
2425 struct drm_info_node *node = m->private; 2439 struct drm_info_node *node = m->private;
2426 struct drm_device *dev = node->minor->dev; 2440 struct drm_device *dev = node->minor->dev;
2427 struct drm_i915_private *dev_priv = dev->dev_private; 2441 struct drm_i915_private *dev_priv = to_i915(dev);
2428 struct drm_file *file; 2442 struct drm_file *file;
2429 2443
2430 seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.enabled); 2444 seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.enabled);
2431 seq_printf(m, "GPU busy? %d\n", dev_priv->mm.busy); 2445 seq_printf(m, "GPU busy? %s [%x]\n",
2446 yesno(dev_priv->gt.awake), dev_priv->gt.active_engines);
2432 seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv)); 2447 seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
2433 seq_printf(m, "Frequency requested %d; min hard:%d, soft:%d; max soft:%d, hard:%d\n", 2448 seq_printf(m, "Frequency requested %d; min hard:%d, soft:%d; max soft:%d, hard:%d\n",
2434 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq), 2449 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
@@ -2469,7 +2484,7 @@ static int i915_llc(struct seq_file *m, void *data)
2469{ 2484{
2470 struct drm_info_node *node = m->private; 2485 struct drm_info_node *node = m->private;
2471 struct drm_device *dev = node->minor->dev; 2486 struct drm_device *dev = node->minor->dev;
2472 struct drm_i915_private *dev_priv = dev->dev_private; 2487 struct drm_i915_private *dev_priv = to_i915(dev);
2473 const bool edram = INTEL_GEN(dev_priv) > 8; 2488 const bool edram = INTEL_GEN(dev_priv) > 8;
2474 2489
2475 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev))); 2490 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev)));
@@ -2482,7 +2497,7 @@ static int i915_llc(struct seq_file *m, void *data)
2482static int i915_guc_load_status_info(struct seq_file *m, void *data) 2497static int i915_guc_load_status_info(struct seq_file *m, void *data)
2483{ 2498{
2484 struct drm_info_node *node = m->private; 2499 struct drm_info_node *node = m->private;
2485 struct drm_i915_private *dev_priv = node->minor->dev->dev_private; 2500 struct drm_i915_private *dev_priv = to_i915(node->minor->dev);
2486 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; 2501 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
2487 u32 tmp, i; 2502 u32 tmp, i;
2488 2503
@@ -2544,9 +2559,9 @@ static void i915_guc_client_info(struct seq_file *m,
2544 2559
2545 for_each_engine(engine, dev_priv) { 2560 for_each_engine(engine, dev_priv) {
2546 seq_printf(m, "\tSubmissions: %llu %s\n", 2561 seq_printf(m, "\tSubmissions: %llu %s\n",
2547 client->submissions[engine->guc_id], 2562 client->submissions[engine->id],
2548 engine->name); 2563 engine->name);
2549 tot += client->submissions[engine->guc_id]; 2564 tot += client->submissions[engine->id];
2550 } 2565 }
2551 seq_printf(m, "\tTotal: %llu\n", tot); 2566 seq_printf(m, "\tTotal: %llu\n", tot);
2552} 2567}
@@ -2555,7 +2570,7 @@ static int i915_guc_info(struct seq_file *m, void *data)
2555{ 2570{
2556 struct drm_info_node *node = m->private; 2571 struct drm_info_node *node = m->private;
2557 struct drm_device *dev = node->minor->dev; 2572 struct drm_device *dev = node->minor->dev;
2558 struct drm_i915_private *dev_priv = dev->dev_private; 2573 struct drm_i915_private *dev_priv = to_i915(dev);
2559 struct intel_guc guc; 2574 struct intel_guc guc;
2560 struct i915_guc_client client = {}; 2575 struct i915_guc_client client = {};
2561 struct intel_engine_cs *engine; 2576 struct intel_engine_cs *engine;
@@ -2587,9 +2602,9 @@ static int i915_guc_info(struct seq_file *m, void *data)
2587 seq_printf(m, "\nGuC submissions:\n"); 2602 seq_printf(m, "\nGuC submissions:\n");
2588 for_each_engine(engine, dev_priv) { 2603 for_each_engine(engine, dev_priv) {
2589 seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n", 2604 seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n",
2590 engine->name, guc.submissions[engine->guc_id], 2605 engine->name, guc.submissions[engine->id],
2591 guc.last_seqno[engine->guc_id]); 2606 guc.last_seqno[engine->id]);
2592 total += guc.submissions[engine->guc_id]; 2607 total += guc.submissions[engine->id];
2593 } 2608 }
2594 seq_printf(m, "\t%s: %llu\n", "Total", total); 2609 seq_printf(m, "\t%s: %llu\n", "Total", total);
2595 2610
@@ -2605,7 +2620,7 @@ static int i915_guc_log_dump(struct seq_file *m, void *data)
2605{ 2620{
2606 struct drm_info_node *node = m->private; 2621 struct drm_info_node *node = m->private;
2607 struct drm_device *dev = node->minor->dev; 2622 struct drm_device *dev = node->minor->dev;
2608 struct drm_i915_private *dev_priv = dev->dev_private; 2623 struct drm_i915_private *dev_priv = to_i915(dev);
2609 struct drm_i915_gem_object *log_obj = dev_priv->guc.log_obj; 2624 struct drm_i915_gem_object *log_obj = dev_priv->guc.log_obj;
2610 u32 *log; 2625 u32 *log;
2611 int i = 0, pg; 2626 int i = 0, pg;
@@ -2633,7 +2648,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
2633{ 2648{
2634 struct drm_info_node *node = m->private; 2649 struct drm_info_node *node = m->private;
2635 struct drm_device *dev = node->minor->dev; 2650 struct drm_device *dev = node->minor->dev;
2636 struct drm_i915_private *dev_priv = dev->dev_private; 2651 struct drm_i915_private *dev_priv = to_i915(dev);
2637 u32 psrperf = 0; 2652 u32 psrperf = 0;
2638 u32 stat[3]; 2653 u32 stat[3];
2639 enum pipe pipe; 2654 enum pipe pipe;
@@ -2701,7 +2716,6 @@ static int i915_sink_crc(struct seq_file *m, void *data)
2701{ 2716{
2702 struct drm_info_node *node = m->private; 2717 struct drm_info_node *node = m->private;
2703 struct drm_device *dev = node->minor->dev; 2718 struct drm_device *dev = node->minor->dev;
2704 struct intel_encoder *encoder;
2705 struct intel_connector *connector; 2719 struct intel_connector *connector;
2706 struct intel_dp *intel_dp = NULL; 2720 struct intel_dp *intel_dp = NULL;
2707 int ret; 2721 int ret;
@@ -2709,18 +2723,19 @@ static int i915_sink_crc(struct seq_file *m, void *data)
2709 2723
2710 drm_modeset_lock_all(dev); 2724 drm_modeset_lock_all(dev);
2711 for_each_intel_connector(dev, connector) { 2725 for_each_intel_connector(dev, connector) {
2726 struct drm_crtc *crtc;
2712 2727
2713 if (connector->base.dpms != DRM_MODE_DPMS_ON) 2728 if (!connector->base.state->best_encoder)
2714 continue; 2729 continue;
2715 2730
2716 if (!connector->base.encoder) 2731 crtc = connector->base.state->crtc;
2732 if (!crtc->state->active)
2717 continue; 2733 continue;
2718 2734
2719 encoder = to_intel_encoder(connector->base.encoder); 2735 if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
2720 if (encoder->type != INTEL_OUTPUT_EDP)
2721 continue; 2736 continue;
2722 2737
2723 intel_dp = enc_to_intel_dp(&encoder->base); 2738 intel_dp = enc_to_intel_dp(connector->base.state->best_encoder);
2724 2739
2725 ret = intel_dp_sink_crc(intel_dp, crc); 2740 ret = intel_dp_sink_crc(intel_dp, crc);
2726 if (ret) 2741 if (ret)
@@ -2741,7 +2756,7 @@ static int i915_energy_uJ(struct seq_file *m, void *data)
2741{ 2756{
2742 struct drm_info_node *node = m->private; 2757 struct drm_info_node *node = m->private;
2743 struct drm_device *dev = node->minor->dev; 2758 struct drm_device *dev = node->minor->dev;
2744 struct drm_i915_private *dev_priv = dev->dev_private; 2759 struct drm_i915_private *dev_priv = to_i915(dev);
2745 u64 power; 2760 u64 power;
2746 u32 units; 2761 u32 units;
2747 2762
@@ -2767,12 +2782,12 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
2767{ 2782{
2768 struct drm_info_node *node = m->private; 2783 struct drm_info_node *node = m->private;
2769 struct drm_device *dev = node->minor->dev; 2784 struct drm_device *dev = node->minor->dev;
2770 struct drm_i915_private *dev_priv = dev->dev_private; 2785 struct drm_i915_private *dev_priv = to_i915(dev);
2771 2786
2772 if (!HAS_RUNTIME_PM(dev_priv)) 2787 if (!HAS_RUNTIME_PM(dev_priv))
2773 seq_puts(m, "Runtime power management not supported\n"); 2788 seq_puts(m, "Runtime power management not supported\n");
2774 2789
2775 seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy)); 2790 seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
2776 seq_printf(m, "IRQs disabled: %s\n", 2791 seq_printf(m, "IRQs disabled: %s\n",
2777 yesno(!intel_irqs_enabled(dev_priv))); 2792 yesno(!intel_irqs_enabled(dev_priv)));
2778#ifdef CONFIG_PM 2793#ifdef CONFIG_PM
@@ -2782,8 +2797,8 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
2782 seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n"); 2797 seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2783#endif 2798#endif
2784 seq_printf(m, "PCI device power state: %s [%d]\n", 2799 seq_printf(m, "PCI device power state: %s [%d]\n",
2785 pci_power_name(dev_priv->dev->pdev->current_state), 2800 pci_power_name(dev_priv->drm.pdev->current_state),
2786 dev_priv->dev->pdev->current_state); 2801 dev_priv->drm.pdev->current_state);
2787 2802
2788 return 0; 2803 return 0;
2789} 2804}
@@ -2792,7 +2807,7 @@ static int i915_power_domain_info(struct seq_file *m, void *unused)
2792{ 2807{
2793 struct drm_info_node *node = m->private; 2808 struct drm_info_node *node = m->private;
2794 struct drm_device *dev = node->minor->dev; 2809 struct drm_device *dev = node->minor->dev;
2795 struct drm_i915_private *dev_priv = dev->dev_private; 2810 struct drm_i915_private *dev_priv = to_i915(dev);
2796 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2811 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2797 int i; 2812 int i;
2798 2813
@@ -2827,7 +2842,7 @@ static int i915_dmc_info(struct seq_file *m, void *unused)
2827{ 2842{
2828 struct drm_info_node *node = m->private; 2843 struct drm_info_node *node = m->private;
2829 struct drm_device *dev = node->minor->dev; 2844 struct drm_device *dev = node->minor->dev;
2830 struct drm_i915_private *dev_priv = dev->dev_private; 2845 struct drm_i915_private *dev_priv = to_i915(dev);
2831 struct intel_csr *csr; 2846 struct intel_csr *csr;
2832 2847
2833 if (!HAS_CSR(dev)) { 2848 if (!HAS_CSR(dev)) {
@@ -2950,7 +2965,7 @@ static void intel_dp_info(struct seq_file *m,
2950 2965
2951 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]); 2966 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
2952 seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio)); 2967 seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
2953 if (intel_encoder->type == INTEL_OUTPUT_EDP) 2968 if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
2954 intel_panel_info(m, &intel_connector->panel); 2969 intel_panel_info(m, &intel_connector->panel);
2955} 2970}
2956 2971
@@ -2989,14 +3004,26 @@ static void intel_connector_info(struct seq_file *m,
2989 seq_printf(m, "\tCEA rev: %d\n", 3004 seq_printf(m, "\tCEA rev: %d\n",
2990 connector->display_info.cea_rev); 3005 connector->display_info.cea_rev);
2991 } 3006 }
2992 if (intel_encoder) { 3007
2993 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || 3008 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
2994 intel_encoder->type == INTEL_OUTPUT_EDP) 3009 return;
2995 intel_dp_info(m, intel_connector); 3010
2996 else if (intel_encoder->type == INTEL_OUTPUT_HDMI) 3011 switch (connector->connector_type) {
2997 intel_hdmi_info(m, intel_connector); 3012 case DRM_MODE_CONNECTOR_DisplayPort:
2998 else if (intel_encoder->type == INTEL_OUTPUT_LVDS) 3013 case DRM_MODE_CONNECTOR_eDP:
3014 intel_dp_info(m, intel_connector);
3015 break;
3016 case DRM_MODE_CONNECTOR_LVDS:
3017 if (intel_encoder->type == INTEL_OUTPUT_LVDS)
2999 intel_lvds_info(m, intel_connector); 3018 intel_lvds_info(m, intel_connector);
3019 break;
3020 case DRM_MODE_CONNECTOR_HDMIA:
3021 if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
3022 intel_encoder->type == INTEL_OUTPUT_UNKNOWN)
3023 intel_hdmi_info(m, intel_connector);
3024 break;
3025 default:
3026 break;
3000 } 3027 }
3001 3028
3002 seq_printf(m, "\tmodes:\n"); 3029 seq_printf(m, "\tmodes:\n");
@@ -3006,7 +3033,7 @@ static void intel_connector_info(struct seq_file *m,
3006 3033
3007static bool cursor_active(struct drm_device *dev, int pipe) 3034static bool cursor_active(struct drm_device *dev, int pipe)
3008{ 3035{
3009 struct drm_i915_private *dev_priv = dev->dev_private; 3036 struct drm_i915_private *dev_priv = to_i915(dev);
3010 u32 state; 3037 u32 state;
3011 3038
3012 if (IS_845G(dev) || IS_I865G(dev)) 3039 if (IS_845G(dev) || IS_I865G(dev))
@@ -3019,7 +3046,7 @@ static bool cursor_active(struct drm_device *dev, int pipe)
3019 3046
3020static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y) 3047static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y)
3021{ 3048{
3022 struct drm_i915_private *dev_priv = dev->dev_private; 3049 struct drm_i915_private *dev_priv = to_i915(dev);
3023 u32 pos; 3050 u32 pos;
3024 3051
3025 pos = I915_READ(CURPOS(pipe)); 3052 pos = I915_READ(CURPOS(pipe));
@@ -3140,7 +3167,7 @@ static int i915_display_info(struct seq_file *m, void *unused)
3140{ 3167{
3141 struct drm_info_node *node = m->private; 3168 struct drm_info_node *node = m->private;
3142 struct drm_device *dev = node->minor->dev; 3169 struct drm_device *dev = node->minor->dev;
3143 struct drm_i915_private *dev_priv = dev->dev_private; 3170 struct drm_i915_private *dev_priv = to_i915(dev);
3144 struct intel_crtc *crtc; 3171 struct intel_crtc *crtc;
3145 struct drm_connector *connector; 3172 struct drm_connector *connector;
3146 3173
@@ -3195,7 +3222,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
3195{ 3222{
3196 struct drm_info_node *node = (struct drm_info_node *) m->private; 3223 struct drm_info_node *node = (struct drm_info_node *) m->private;
3197 struct drm_device *dev = node->minor->dev; 3224 struct drm_device *dev = node->minor->dev;
3198 struct drm_i915_private *dev_priv = dev->dev_private; 3225 struct drm_i915_private *dev_priv = to_i915(dev);
3199 struct intel_engine_cs *engine; 3226 struct intel_engine_cs *engine;
3200 int num_rings = hweight32(INTEL_INFO(dev)->ring_mask); 3227 int num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
3201 enum intel_engine_id id; 3228 enum intel_engine_id id;
@@ -3268,7 +3295,7 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
3268{ 3295{
3269 struct drm_info_node *node = (struct drm_info_node *) m->private; 3296 struct drm_info_node *node = (struct drm_info_node *) m->private;
3270 struct drm_device *dev = node->minor->dev; 3297 struct drm_device *dev = node->minor->dev;
3271 struct drm_i915_private *dev_priv = dev->dev_private; 3298 struct drm_i915_private *dev_priv = to_i915(dev);
3272 int i; 3299 int i;
3273 3300
3274 drm_modeset_lock_all(dev); 3301 drm_modeset_lock_all(dev);
@@ -3298,7 +3325,7 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
3298 struct intel_engine_cs *engine; 3325 struct intel_engine_cs *engine;
3299 struct drm_info_node *node = (struct drm_info_node *) m->private; 3326 struct drm_info_node *node = (struct drm_info_node *) m->private;
3300 struct drm_device *dev = node->minor->dev; 3327 struct drm_device *dev = node->minor->dev;
3301 struct drm_i915_private *dev_priv = dev->dev_private; 3328 struct drm_i915_private *dev_priv = to_i915(dev);
3302 struct i915_workarounds *workarounds = &dev_priv->workarounds; 3329 struct i915_workarounds *workarounds = &dev_priv->workarounds;
3303 enum intel_engine_id id; 3330 enum intel_engine_id id;
3304 3331
@@ -3336,7 +3363,7 @@ static int i915_ddb_info(struct seq_file *m, void *unused)
3336{ 3363{
3337 struct drm_info_node *node = m->private; 3364 struct drm_info_node *node = m->private;
3338 struct drm_device *dev = node->minor->dev; 3365 struct drm_device *dev = node->minor->dev;
3339 struct drm_i915_private *dev_priv = dev->dev_private; 3366 struct drm_i915_private *dev_priv = to_i915(dev);
3340 struct skl_ddb_allocation *ddb; 3367 struct skl_ddb_allocation *ddb;
3341 struct skl_ddb_entry *entry; 3368 struct skl_ddb_entry *entry;
3342 enum pipe pipe; 3369 enum pipe pipe;
@@ -3374,31 +3401,16 @@ static int i915_ddb_info(struct seq_file *m, void *unused)
3374static void drrs_status_per_crtc(struct seq_file *m, 3401static void drrs_status_per_crtc(struct seq_file *m,
3375 struct drm_device *dev, struct intel_crtc *intel_crtc) 3402 struct drm_device *dev, struct intel_crtc *intel_crtc)
3376{ 3403{
3377 struct intel_encoder *intel_encoder; 3404 struct drm_i915_private *dev_priv = to_i915(dev);
3378 struct drm_i915_private *dev_priv = dev->dev_private;
3379 struct i915_drrs *drrs = &dev_priv->drrs; 3405 struct i915_drrs *drrs = &dev_priv->drrs;
3380 int vrefresh = 0; 3406 int vrefresh = 0;
3407 struct drm_connector *connector;
3381 3408
3382 for_each_encoder_on_crtc(dev, &intel_crtc->base, intel_encoder) { 3409 drm_for_each_connector(connector, dev) {
3383 /* Encoder connected on this CRTC */ 3410 if (connector->state->crtc != &intel_crtc->base)
3384 switch (intel_encoder->type) { 3411 continue;
3385 case INTEL_OUTPUT_EDP: 3412
3386 seq_puts(m, "eDP:\n"); 3413 seq_printf(m, "%s:\n", connector->name);
3387 break;
3388 case INTEL_OUTPUT_DSI:
3389 seq_puts(m, "DSI:\n");
3390 break;
3391 case INTEL_OUTPUT_HDMI:
3392 seq_puts(m, "HDMI:\n");
3393 break;
3394 case INTEL_OUTPUT_DISPLAYPORT:
3395 seq_puts(m, "DP:\n");
3396 break;
3397 default:
3398 seq_printf(m, "Other encoder (id=%d).\n",
3399 intel_encoder->type);
3400 return;
3401 }
3402 } 3414 }
3403 3415
3404 if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT) 3416 if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
@@ -3461,18 +3473,16 @@ static int i915_drrs_status(struct seq_file *m, void *unused)
3461 struct intel_crtc *intel_crtc; 3473 struct intel_crtc *intel_crtc;
3462 int active_crtc_cnt = 0; 3474 int active_crtc_cnt = 0;
3463 3475
3476 drm_modeset_lock_all(dev);
3464 for_each_intel_crtc(dev, intel_crtc) { 3477 for_each_intel_crtc(dev, intel_crtc) {
3465 drm_modeset_lock(&intel_crtc->base.mutex, NULL);
3466
3467 if (intel_crtc->base.state->active) { 3478 if (intel_crtc->base.state->active) {
3468 active_crtc_cnt++; 3479 active_crtc_cnt++;
3469 seq_printf(m, "\nCRTC %d: ", active_crtc_cnt); 3480 seq_printf(m, "\nCRTC %d: ", active_crtc_cnt);
3470 3481
3471 drrs_status_per_crtc(m, dev, intel_crtc); 3482 drrs_status_per_crtc(m, dev, intel_crtc);
3472 } 3483 }
3473
3474 drm_modeset_unlock(&intel_crtc->base.mutex);
3475 } 3484 }
3485 drm_modeset_unlock_all(dev);
3476 3486
3477 if (!active_crtc_cnt) 3487 if (!active_crtc_cnt)
3478 seq_puts(m, "No active crtc found\n"); 3488 seq_puts(m, "No active crtc found\n");
@@ -3490,17 +3500,23 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused)
3490{ 3500{
3491 struct drm_info_node *node = (struct drm_info_node *) m->private; 3501 struct drm_info_node *node = (struct drm_info_node *) m->private;
3492 struct drm_device *dev = node->minor->dev; 3502 struct drm_device *dev = node->minor->dev;
3493 struct drm_encoder *encoder;
3494 struct intel_encoder *intel_encoder; 3503 struct intel_encoder *intel_encoder;
3495 struct intel_digital_port *intel_dig_port; 3504 struct intel_digital_port *intel_dig_port;
3505 struct drm_connector *connector;
3506
3496 drm_modeset_lock_all(dev); 3507 drm_modeset_lock_all(dev);
3497 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 3508 drm_for_each_connector(connector, dev) {
3498 intel_encoder = to_intel_encoder(encoder); 3509 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
3499 if (intel_encoder->type != INTEL_OUTPUT_DISPLAYPORT) 3510 continue;
3511
3512 intel_encoder = intel_attached_encoder(connector);
3513 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3500 continue; 3514 continue;
3501 intel_dig_port = enc_to_dig_port(encoder); 3515
3516 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
3502 if (!intel_dig_port->dp.can_mst) 3517 if (!intel_dig_port->dp.can_mst)
3503 continue; 3518 continue;
3519
3504 seq_printf(m, "MST Source Port %c\n", 3520 seq_printf(m, "MST Source Port %c\n",
3505 port_name(intel_dig_port->port)); 3521 port_name(intel_dig_port->port));
3506 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr); 3522 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
@@ -3512,7 +3528,7 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused)
3512static int i915_pipe_crc_open(struct inode *inode, struct file *filep) 3528static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
3513{ 3529{
3514 struct pipe_crc_info *info = inode->i_private; 3530 struct pipe_crc_info *info = inode->i_private;
3515 struct drm_i915_private *dev_priv = info->dev->dev_private; 3531 struct drm_i915_private *dev_priv = to_i915(info->dev);
3516 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; 3532 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
3517 3533
3518 if (info->pipe >= INTEL_INFO(info->dev)->num_pipes) 3534 if (info->pipe >= INTEL_INFO(info->dev)->num_pipes)
@@ -3536,7 +3552,7 @@ static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
3536static int i915_pipe_crc_release(struct inode *inode, struct file *filep) 3552static int i915_pipe_crc_release(struct inode *inode, struct file *filep)
3537{ 3553{
3538 struct pipe_crc_info *info = inode->i_private; 3554 struct pipe_crc_info *info = inode->i_private;
3539 struct drm_i915_private *dev_priv = info->dev->dev_private; 3555 struct drm_i915_private *dev_priv = to_i915(info->dev);
3540 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; 3556 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
3541 3557
3542 spin_lock_irq(&pipe_crc->lock); 3558 spin_lock_irq(&pipe_crc->lock);
@@ -3564,7 +3580,7 @@ i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
3564{ 3580{
3565 struct pipe_crc_info *info = filep->private_data; 3581 struct pipe_crc_info *info = filep->private_data;
3566 struct drm_device *dev = info->dev; 3582 struct drm_device *dev = info->dev;
3567 struct drm_i915_private *dev_priv = dev->dev_private; 3583 struct drm_i915_private *dev_priv = to_i915(dev);
3568 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; 3584 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
3569 char buf[PIPE_CRC_BUFFER_LEN]; 3585 char buf[PIPE_CRC_BUFFER_LEN];
3570 int n_entries; 3586 int n_entries;
@@ -3697,7 +3713,7 @@ static const char *pipe_crc_source_name(enum intel_pipe_crc_source source)
3697static int display_crc_ctl_show(struct seq_file *m, void *data) 3713static int display_crc_ctl_show(struct seq_file *m, void *data)
3698{ 3714{
3699 struct drm_device *dev = m->private; 3715 struct drm_device *dev = m->private;
3700 struct drm_i915_private *dev_priv = dev->dev_private; 3716 struct drm_i915_private *dev_priv = to_i915(dev);
3701 int i; 3717 int i;
3702 3718
3703 for (i = 0; i < I915_MAX_PIPES; i++) 3719 for (i = 0; i < I915_MAX_PIPES; i++)
@@ -3758,7 +3774,7 @@ static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe,
3758 case INTEL_OUTPUT_TVOUT: 3774 case INTEL_OUTPUT_TVOUT:
3759 *source = INTEL_PIPE_CRC_SOURCE_TV; 3775 *source = INTEL_PIPE_CRC_SOURCE_TV;
3760 break; 3776 break;
3761 case INTEL_OUTPUT_DISPLAYPORT: 3777 case INTEL_OUTPUT_DP:
3762 case INTEL_OUTPUT_EDP: 3778 case INTEL_OUTPUT_EDP:
3763 dig_port = enc_to_dig_port(&encoder->base); 3779 dig_port = enc_to_dig_port(&encoder->base);
3764 switch (dig_port->port) { 3780 switch (dig_port->port) {
@@ -3791,7 +3807,7 @@ static int vlv_pipe_crc_ctl_reg(struct drm_device *dev,
3791 enum intel_pipe_crc_source *source, 3807 enum intel_pipe_crc_source *source,
3792 uint32_t *val) 3808 uint32_t *val)
3793{ 3809{
3794 struct drm_i915_private *dev_priv = dev->dev_private; 3810 struct drm_i915_private *dev_priv = to_i915(dev);
3795 bool need_stable_symbols = false; 3811 bool need_stable_symbols = false;
3796 3812
3797 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) { 3813 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
@@ -3862,7 +3878,7 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev,
3862 enum intel_pipe_crc_source *source, 3878 enum intel_pipe_crc_source *source,
3863 uint32_t *val) 3879 uint32_t *val)
3864{ 3880{
3865 struct drm_i915_private *dev_priv = dev->dev_private; 3881 struct drm_i915_private *dev_priv = to_i915(dev);
3866 bool need_stable_symbols = false; 3882 bool need_stable_symbols = false;
3867 3883
3868 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) { 3884 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
@@ -3936,7 +3952,7 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev,
3936static void vlv_undo_pipe_scramble_reset(struct drm_device *dev, 3952static void vlv_undo_pipe_scramble_reset(struct drm_device *dev,
3937 enum pipe pipe) 3953 enum pipe pipe)
3938{ 3954{
3939 struct drm_i915_private *dev_priv = dev->dev_private; 3955 struct drm_i915_private *dev_priv = to_i915(dev);
3940 uint32_t tmp = I915_READ(PORT_DFT2_G4X); 3956 uint32_t tmp = I915_READ(PORT_DFT2_G4X);
3941 3957
3942 switch (pipe) { 3958 switch (pipe) {
@@ -3961,7 +3977,7 @@ static void vlv_undo_pipe_scramble_reset(struct drm_device *dev,
3961static void g4x_undo_pipe_scramble_reset(struct drm_device *dev, 3977static void g4x_undo_pipe_scramble_reset(struct drm_device *dev,
3962 enum pipe pipe) 3978 enum pipe pipe)
3963{ 3979{
3964 struct drm_i915_private *dev_priv = dev->dev_private; 3980 struct drm_i915_private *dev_priv = to_i915(dev);
3965 uint32_t tmp = I915_READ(PORT_DFT2_G4X); 3981 uint32_t tmp = I915_READ(PORT_DFT2_G4X);
3966 3982
3967 if (pipe == PIPE_A) 3983 if (pipe == PIPE_A)
@@ -4004,7 +4020,7 @@ static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
4004 4020
4005static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev, bool enable) 4021static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev, bool enable)
4006{ 4022{
4007 struct drm_i915_private *dev_priv = dev->dev_private; 4023 struct drm_i915_private *dev_priv = to_i915(dev);
4008 struct intel_crtc *crtc = 4024 struct intel_crtc *crtc =
4009 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]); 4025 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
4010 struct intel_crtc_state *pipe_config; 4026 struct intel_crtc_state *pipe_config;
@@ -4072,7 +4088,7 @@ static int ivb_pipe_crc_ctl_reg(struct drm_device *dev,
4072static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe, 4088static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
4073 enum intel_pipe_crc_source source) 4089 enum intel_pipe_crc_source source)
4074{ 4090{
4075 struct drm_i915_private *dev_priv = dev->dev_private; 4091 struct drm_i915_private *dev_priv = to_i915(dev);
4076 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 4092 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
4077 struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, 4093 struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev,
4078 pipe)); 4094 pipe));
@@ -4579,7 +4595,7 @@ static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
4579static int pri_wm_latency_show(struct seq_file *m, void *data) 4595static int pri_wm_latency_show(struct seq_file *m, void *data)
4580{ 4596{
4581 struct drm_device *dev = m->private; 4597 struct drm_device *dev = m->private;
4582 struct drm_i915_private *dev_priv = dev->dev_private; 4598 struct drm_i915_private *dev_priv = to_i915(dev);
4583 const uint16_t *latencies; 4599 const uint16_t *latencies;
4584 4600
4585 if (INTEL_INFO(dev)->gen >= 9) 4601 if (INTEL_INFO(dev)->gen >= 9)
@@ -4595,7 +4611,7 @@ static int pri_wm_latency_show(struct seq_file *m, void *data)
4595static int spr_wm_latency_show(struct seq_file *m, void *data) 4611static int spr_wm_latency_show(struct seq_file *m, void *data)
4596{ 4612{
4597 struct drm_device *dev = m->private; 4613 struct drm_device *dev = m->private;
4598 struct drm_i915_private *dev_priv = dev->dev_private; 4614 struct drm_i915_private *dev_priv = to_i915(dev);
4599 const uint16_t *latencies; 4615 const uint16_t *latencies;
4600 4616
4601 if (INTEL_INFO(dev)->gen >= 9) 4617 if (INTEL_INFO(dev)->gen >= 9)
@@ -4611,7 +4627,7 @@ static int spr_wm_latency_show(struct seq_file *m, void *data)
4611static int cur_wm_latency_show(struct seq_file *m, void *data) 4627static int cur_wm_latency_show(struct seq_file *m, void *data)
4612{ 4628{
4613 struct drm_device *dev = m->private; 4629 struct drm_device *dev = m->private;
4614 struct drm_i915_private *dev_priv = dev->dev_private; 4630 struct drm_i915_private *dev_priv = to_i915(dev);
4615 const uint16_t *latencies; 4631 const uint16_t *latencies;
4616 4632
4617 if (INTEL_INFO(dev)->gen >= 9) 4633 if (INTEL_INFO(dev)->gen >= 9)
@@ -4702,7 +4718,7 @@ static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
4702{ 4718{
4703 struct seq_file *m = file->private_data; 4719 struct seq_file *m = file->private_data;
4704 struct drm_device *dev = m->private; 4720 struct drm_device *dev = m->private;
4705 struct drm_i915_private *dev_priv = dev->dev_private; 4721 struct drm_i915_private *dev_priv = to_i915(dev);
4706 uint16_t *latencies; 4722 uint16_t *latencies;
4707 4723
4708 if (INTEL_INFO(dev)->gen >= 9) 4724 if (INTEL_INFO(dev)->gen >= 9)
@@ -4718,7 +4734,7 @@ static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
4718{ 4734{
4719 struct seq_file *m = file->private_data; 4735 struct seq_file *m = file->private_data;
4720 struct drm_device *dev = m->private; 4736 struct drm_device *dev = m->private;
4721 struct drm_i915_private *dev_priv = dev->dev_private; 4737 struct drm_i915_private *dev_priv = to_i915(dev);
4722 uint16_t *latencies; 4738 uint16_t *latencies;
4723 4739
4724 if (INTEL_INFO(dev)->gen >= 9) 4740 if (INTEL_INFO(dev)->gen >= 9)
@@ -4734,7 +4750,7 @@ static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
4734{ 4750{
4735 struct seq_file *m = file->private_data; 4751 struct seq_file *m = file->private_data;
4736 struct drm_device *dev = m->private; 4752 struct drm_device *dev = m->private;
4737 struct drm_i915_private *dev_priv = dev->dev_private; 4753 struct drm_i915_private *dev_priv = to_i915(dev);
4738 uint16_t *latencies; 4754 uint16_t *latencies;
4739 4755
4740 if (INTEL_INFO(dev)->gen >= 9) 4756 if (INTEL_INFO(dev)->gen >= 9)
@@ -4776,7 +4792,7 @@ static int
4776i915_wedged_get(void *data, u64 *val) 4792i915_wedged_get(void *data, u64 *val)
4777{ 4793{
4778 struct drm_device *dev = data; 4794 struct drm_device *dev = data;
4779 struct drm_i915_private *dev_priv = dev->dev_private; 4795 struct drm_i915_private *dev_priv = to_i915(dev);
4780 4796
4781 *val = i915_terminally_wedged(&dev_priv->gpu_error); 4797 *val = i915_terminally_wedged(&dev_priv->gpu_error);
4782 4798
@@ -4787,7 +4803,7 @@ static int
4787i915_wedged_set(void *data, u64 val) 4803i915_wedged_set(void *data, u64 val)
4788{ 4804{
4789 struct drm_device *dev = data; 4805 struct drm_device *dev = data;
4790 struct drm_i915_private *dev_priv = dev->dev_private; 4806 struct drm_i915_private *dev_priv = to_i915(dev);
4791 4807
4792 /* 4808 /*
4793 * There is no safeguard against this debugfs entry colliding 4809 * There is no safeguard against this debugfs entry colliding
@@ -4815,44 +4831,10 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
4815 "%llu\n"); 4831 "%llu\n");
4816 4832
4817static int 4833static int
4818i915_ring_stop_get(void *data, u64 *val)
4819{
4820 struct drm_device *dev = data;
4821 struct drm_i915_private *dev_priv = dev->dev_private;
4822
4823 *val = dev_priv->gpu_error.stop_rings;
4824
4825 return 0;
4826}
4827
4828static int
4829i915_ring_stop_set(void *data, u64 val)
4830{
4831 struct drm_device *dev = data;
4832 struct drm_i915_private *dev_priv = dev->dev_private;
4833 int ret;
4834
4835 DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val);
4836
4837 ret = mutex_lock_interruptible(&dev->struct_mutex);
4838 if (ret)
4839 return ret;
4840
4841 dev_priv->gpu_error.stop_rings = val;
4842 mutex_unlock(&dev->struct_mutex);
4843
4844 return 0;
4845}
4846
4847DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops,
4848 i915_ring_stop_get, i915_ring_stop_set,
4849 "0x%08llx\n");
4850
4851static int
4852i915_ring_missed_irq_get(void *data, u64 *val) 4834i915_ring_missed_irq_get(void *data, u64 *val)
4853{ 4835{
4854 struct drm_device *dev = data; 4836 struct drm_device *dev = data;
4855 struct drm_i915_private *dev_priv = dev->dev_private; 4837 struct drm_i915_private *dev_priv = to_i915(dev);
4856 4838
4857 *val = dev_priv->gpu_error.missed_irq_rings; 4839 *val = dev_priv->gpu_error.missed_irq_rings;
4858 return 0; 4840 return 0;
@@ -4862,7 +4844,7 @@ static int
4862i915_ring_missed_irq_set(void *data, u64 val) 4844i915_ring_missed_irq_set(void *data, u64 val)
4863{ 4845{
4864 struct drm_device *dev = data; 4846 struct drm_device *dev = data;
4865 struct drm_i915_private *dev_priv = dev->dev_private; 4847 struct drm_i915_private *dev_priv = to_i915(dev);
4866 int ret; 4848 int ret;
4867 4849
4868 /* Lock against concurrent debugfs callers */ 4850 /* Lock against concurrent debugfs callers */
@@ -4883,7 +4865,7 @@ static int
4883i915_ring_test_irq_get(void *data, u64 *val) 4865i915_ring_test_irq_get(void *data, u64 *val)
4884{ 4866{
4885 struct drm_device *dev = data; 4867 struct drm_device *dev = data;
4886 struct drm_i915_private *dev_priv = dev->dev_private; 4868 struct drm_i915_private *dev_priv = to_i915(dev);
4887 4869
4888 *val = dev_priv->gpu_error.test_irq_rings; 4870 *val = dev_priv->gpu_error.test_irq_rings;
4889 4871
@@ -4894,18 +4876,11 @@ static int
4894i915_ring_test_irq_set(void *data, u64 val) 4876i915_ring_test_irq_set(void *data, u64 val)
4895{ 4877{
4896 struct drm_device *dev = data; 4878 struct drm_device *dev = data;
4897 struct drm_i915_private *dev_priv = dev->dev_private; 4879 struct drm_i915_private *dev_priv = to_i915(dev);
4898 int ret;
4899 4880
4881 val &= INTEL_INFO(dev_priv)->ring_mask;
4900 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val); 4882 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
4901
4902 /* Lock against concurrent debugfs callers */
4903 ret = mutex_lock_interruptible(&dev->struct_mutex);
4904 if (ret)
4905 return ret;
4906
4907 dev_priv->gpu_error.test_irq_rings = val; 4883 dev_priv->gpu_error.test_irq_rings = val;
4908 mutex_unlock(&dev->struct_mutex);
4909 4884
4910 return 0; 4885 return 0;
4911} 4886}
@@ -4934,7 +4909,7 @@ static int
4934i915_drop_caches_set(void *data, u64 val) 4909i915_drop_caches_set(void *data, u64 val)
4935{ 4910{
4936 struct drm_device *dev = data; 4911 struct drm_device *dev = data;
4937 struct drm_i915_private *dev_priv = dev->dev_private; 4912 struct drm_i915_private *dev_priv = to_i915(dev);
4938 int ret; 4913 int ret;
4939 4914
4940 DRM_DEBUG("Dropping caches: 0x%08llx\n", val); 4915 DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
@@ -4946,7 +4921,7 @@ i915_drop_caches_set(void *data, u64 val)
4946 return ret; 4921 return ret;
4947 4922
4948 if (val & DROP_ACTIVE) { 4923 if (val & DROP_ACTIVE) {
4949 ret = i915_gpu_idle(dev); 4924 ret = i915_gem_wait_for_idle(dev_priv);
4950 if (ret) 4925 if (ret)
4951 goto unlock; 4926 goto unlock;
4952 } 4927 }
@@ -4974,7 +4949,7 @@ static int
4974i915_max_freq_get(void *data, u64 *val) 4949i915_max_freq_get(void *data, u64 *val)
4975{ 4950{
4976 struct drm_device *dev = data; 4951 struct drm_device *dev = data;
4977 struct drm_i915_private *dev_priv = dev->dev_private; 4952 struct drm_i915_private *dev_priv = to_i915(dev);
4978 int ret; 4953 int ret;
4979 4954
4980 if (INTEL_INFO(dev)->gen < 6) 4955 if (INTEL_INFO(dev)->gen < 6)
@@ -4996,7 +4971,7 @@ static int
4996i915_max_freq_set(void *data, u64 val) 4971i915_max_freq_set(void *data, u64 val)
4997{ 4972{
4998 struct drm_device *dev = data; 4973 struct drm_device *dev = data;
4999 struct drm_i915_private *dev_priv = dev->dev_private; 4974 struct drm_i915_private *dev_priv = to_i915(dev);
5000 u32 hw_max, hw_min; 4975 u32 hw_max, hw_min;
5001 int ret; 4976 int ret;
5002 4977
@@ -5041,7 +5016,7 @@ static int
5041i915_min_freq_get(void *data, u64 *val) 5016i915_min_freq_get(void *data, u64 *val)
5042{ 5017{
5043 struct drm_device *dev = data; 5018 struct drm_device *dev = data;
5044 struct drm_i915_private *dev_priv = dev->dev_private; 5019 struct drm_i915_private *dev_priv = to_i915(dev);
5045 int ret; 5020 int ret;
5046 5021
5047 if (INTEL_INFO(dev)->gen < 6) 5022 if (INTEL_INFO(dev)->gen < 6)
@@ -5063,7 +5038,7 @@ static int
5063i915_min_freq_set(void *data, u64 val) 5038i915_min_freq_set(void *data, u64 val)
5064{ 5039{
5065 struct drm_device *dev = data; 5040 struct drm_device *dev = data;
5066 struct drm_i915_private *dev_priv = dev->dev_private; 5041 struct drm_i915_private *dev_priv = to_i915(dev);
5067 u32 hw_max, hw_min; 5042 u32 hw_max, hw_min;
5068 int ret; 5043 int ret;
5069 5044
@@ -5108,7 +5083,7 @@ static int
5108i915_cache_sharing_get(void *data, u64 *val) 5083i915_cache_sharing_get(void *data, u64 *val)
5109{ 5084{
5110 struct drm_device *dev = data; 5085 struct drm_device *dev = data;
5111 struct drm_i915_private *dev_priv = dev->dev_private; 5086 struct drm_i915_private *dev_priv = to_i915(dev);
5112 u32 snpcr; 5087 u32 snpcr;
5113 int ret; 5088 int ret;
5114 5089
@@ -5123,7 +5098,7 @@ i915_cache_sharing_get(void *data, u64 *val)
5123 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 5098 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
5124 5099
5125 intel_runtime_pm_put(dev_priv); 5100 intel_runtime_pm_put(dev_priv);
5126 mutex_unlock(&dev_priv->dev->struct_mutex); 5101 mutex_unlock(&dev_priv->drm.struct_mutex);
5127 5102
5128 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT; 5103 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
5129 5104
@@ -5134,7 +5109,7 @@ static int
5134i915_cache_sharing_set(void *data, u64 val) 5109i915_cache_sharing_set(void *data, u64 val)
5135{ 5110{
5136 struct drm_device *dev = data; 5111 struct drm_device *dev = data;
5137 struct drm_i915_private *dev_priv = dev->dev_private; 5112 struct drm_i915_private *dev_priv = to_i915(dev);
5138 u32 snpcr; 5113 u32 snpcr;
5139 5114
5140 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 5115 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
@@ -5171,7 +5146,7 @@ struct sseu_dev_status {
5171static void cherryview_sseu_device_status(struct drm_device *dev, 5146static void cherryview_sseu_device_status(struct drm_device *dev,
5172 struct sseu_dev_status *stat) 5147 struct sseu_dev_status *stat)
5173{ 5148{
5174 struct drm_i915_private *dev_priv = dev->dev_private; 5149 struct drm_i915_private *dev_priv = to_i915(dev);
5175 int ss_max = 2; 5150 int ss_max = 2;
5176 int ss; 5151 int ss;
5177 u32 sig1[ss_max], sig2[ss_max]; 5152 u32 sig1[ss_max], sig2[ss_max];
@@ -5203,7 +5178,7 @@ static void cherryview_sseu_device_status(struct drm_device *dev,
5203static void gen9_sseu_device_status(struct drm_device *dev, 5178static void gen9_sseu_device_status(struct drm_device *dev,
5204 struct sseu_dev_status *stat) 5179 struct sseu_dev_status *stat)
5205{ 5180{
5206 struct drm_i915_private *dev_priv = dev->dev_private; 5181 struct drm_i915_private *dev_priv = to_i915(dev);
5207 int s_max = 3, ss_max = 4; 5182 int s_max = 3, ss_max = 4;
5208 int s, ss; 5183 int s, ss;
5209 u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2]; 5184 u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2];
@@ -5268,7 +5243,7 @@ static void gen9_sseu_device_status(struct drm_device *dev,
5268static void broadwell_sseu_device_status(struct drm_device *dev, 5243static void broadwell_sseu_device_status(struct drm_device *dev,
5269 struct sseu_dev_status *stat) 5244 struct sseu_dev_status *stat)
5270{ 5245{
5271 struct drm_i915_private *dev_priv = dev->dev_private; 5246 struct drm_i915_private *dev_priv = to_i915(dev);
5272 int s; 5247 int s;
5273 u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO); 5248 u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
5274 5249
@@ -5347,7 +5322,7 @@ static int i915_sseu_status(struct seq_file *m, void *unused)
5347static int i915_forcewake_open(struct inode *inode, struct file *file) 5322static int i915_forcewake_open(struct inode *inode, struct file *file)
5348{ 5323{
5349 struct drm_device *dev = inode->i_private; 5324 struct drm_device *dev = inode->i_private;
5350 struct drm_i915_private *dev_priv = dev->dev_private; 5325 struct drm_i915_private *dev_priv = to_i915(dev);
5351 5326
5352 if (INTEL_INFO(dev)->gen < 6) 5327 if (INTEL_INFO(dev)->gen < 6)
5353 return 0; 5328 return 0;
@@ -5361,7 +5336,7 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
5361static int i915_forcewake_release(struct inode *inode, struct file *file) 5336static int i915_forcewake_release(struct inode *inode, struct file *file)
5362{ 5337{
5363 struct drm_device *dev = inode->i_private; 5338 struct drm_device *dev = inode->i_private;
5364 struct drm_i915_private *dev_priv = dev->dev_private; 5339 struct drm_i915_private *dev_priv = to_i915(dev);
5365 5340
5366 if (INTEL_INFO(dev)->gen < 6) 5341 if (INTEL_INFO(dev)->gen < 6)
5367 return 0; 5342 return 0;
@@ -5477,7 +5452,6 @@ static const struct i915_debugfs_files {
5477 {"i915_max_freq", &i915_max_freq_fops}, 5452 {"i915_max_freq", &i915_max_freq_fops},
5478 {"i915_min_freq", &i915_min_freq_fops}, 5453 {"i915_min_freq", &i915_min_freq_fops},
5479 {"i915_cache_sharing", &i915_cache_sharing_fops}, 5454 {"i915_cache_sharing", &i915_cache_sharing_fops},
5480 {"i915_ring_stop", &i915_ring_stop_fops},
5481 {"i915_ring_missed_irq", &i915_ring_missed_irq_fops}, 5455 {"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
5482 {"i915_ring_test_irq", &i915_ring_test_irq_fops}, 5456 {"i915_ring_test_irq", &i915_ring_test_irq_fops},
5483 {"i915_gem_drop_caches", &i915_drop_caches_fops}, 5457 {"i915_gem_drop_caches", &i915_drop_caches_fops},
@@ -5495,7 +5469,7 @@ static const struct i915_debugfs_files {
5495 5469
5496void intel_display_crc_init(struct drm_device *dev) 5470void intel_display_crc_init(struct drm_device *dev)
5497{ 5471{
5498 struct drm_i915_private *dev_priv = dev->dev_private; 5472 struct drm_i915_private *dev_priv = to_i915(dev);
5499 enum pipe pipe; 5473 enum pipe pipe;
5500 5474
5501 for_each_pipe(dev_priv, pipe) { 5475 for_each_pipe(dev_priv, pipe) {
@@ -5507,8 +5481,9 @@ void intel_display_crc_init(struct drm_device *dev)
5507 } 5481 }
5508} 5482}
5509 5483
5510int i915_debugfs_init(struct drm_minor *minor) 5484int i915_debugfs_register(struct drm_i915_private *dev_priv)
5511{ 5485{
5486 struct drm_minor *minor = dev_priv->drm.primary;
5512 int ret, i; 5487 int ret, i;
5513 5488
5514 ret = i915_forcewake_create(minor->debugfs_root, minor); 5489 ret = i915_forcewake_create(minor->debugfs_root, minor);
@@ -5534,8 +5509,9 @@ int i915_debugfs_init(struct drm_minor *minor)
5534 minor->debugfs_root, minor); 5509 minor->debugfs_root, minor);
5535} 5510}
5536 5511
5537void i915_debugfs_cleanup(struct drm_minor *minor) 5512void i915_debugfs_unregister(struct drm_i915_private *dev_priv)
5538{ 5513{
5514 struct drm_minor *minor = dev_priv->drm.primary;
5539 int i; 5515 int i;
5540 5516
5541 drm_debugfs_remove_files(i915_debugfs_list, 5517 drm_debugfs_remove_files(i915_debugfs_list,
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
deleted file mode 100644
index d15a461fa84a..000000000000
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ /dev/null
@@ -1,1692 +0,0 @@
1/* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
2 */
3/*
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 */
28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31#include <drm/drmP.h>
32#include <drm/drm_crtc_helper.h>
33#include <drm/drm_fb_helper.h>
34#include <drm/drm_legacy.h>
35#include "intel_drv.h"
36#include <drm/i915_drm.h>
37#include "i915_drv.h"
38#include "i915_vgpu.h"
39#include "i915_trace.h"
40#include <linux/pci.h>
41#include <linux/console.h>
42#include <linux/vt.h>
43#include <linux/vgaarb.h>
44#include <linux/acpi.h>
45#include <linux/pnp.h>
46#include <linux/vga_switcheroo.h>
47#include <linux/slab.h>
48#include <acpi/video.h>
49#include <linux/pm.h>
50#include <linux/pm_runtime.h>
51#include <linux/oom.h>
52
53static unsigned int i915_load_fail_count;
54
55bool __i915_inject_load_failure(const char *func, int line)
56{
57 if (i915_load_fail_count >= i915.inject_load_failure)
58 return false;
59
60 if (++i915_load_fail_count == i915.inject_load_failure) {
61 DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n",
62 i915.inject_load_failure, func, line);
63 return true;
64 }
65
66 return false;
67}
68
69#define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI"
70#define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \
71 "providing the dmesg log by booting with drm.debug=0xf"
72
73void
74__i915_printk(struct drm_i915_private *dev_priv, const char *level,
75 const char *fmt, ...)
76{
77 static bool shown_bug_once;
78 struct device *dev = dev_priv->dev->dev;
79 bool is_error = level[1] <= KERN_ERR[1];
80 bool is_debug = level[1] == KERN_DEBUG[1];
81 struct va_format vaf;
82 va_list args;
83
84 if (is_debug && !(drm_debug & DRM_UT_DRIVER))
85 return;
86
87 va_start(args, fmt);
88
89 vaf.fmt = fmt;
90 vaf.va = &args;
91
92 dev_printk(level, dev, "[" DRM_NAME ":%ps] %pV",
93 __builtin_return_address(0), &vaf);
94
95 if (is_error && !shown_bug_once) {
96 dev_notice(dev, "%s", FDO_BUG_MSG);
97 shown_bug_once = true;
98 }
99
100 va_end(args);
101}
102
103static bool i915_error_injected(struct drm_i915_private *dev_priv)
104{
105 return i915.inject_load_failure &&
106 i915_load_fail_count == i915.inject_load_failure;
107}
108
109#define i915_load_error(dev_priv, fmt, ...) \
110 __i915_printk(dev_priv, \
111 i915_error_injected(dev_priv) ? KERN_DEBUG : KERN_ERR, \
112 fmt, ##__VA_ARGS__)
113
114static int i915_getparam(struct drm_device *dev, void *data,
115 struct drm_file *file_priv)
116{
117 struct drm_i915_private *dev_priv = dev->dev_private;
118 drm_i915_getparam_t *param = data;
119 int value;
120
121 switch (param->param) {
122 case I915_PARAM_IRQ_ACTIVE:
123 case I915_PARAM_ALLOW_BATCHBUFFER:
124 case I915_PARAM_LAST_DISPATCH:
125 /* Reject all old ums/dri params. */
126 return -ENODEV;
127 case I915_PARAM_CHIPSET_ID:
128 value = dev->pdev->device;
129 break;
130 case I915_PARAM_REVISION:
131 value = dev->pdev->revision;
132 break;
133 case I915_PARAM_HAS_GEM:
134 value = 1;
135 break;
136 case I915_PARAM_NUM_FENCES_AVAIL:
137 value = dev_priv->num_fence_regs;
138 break;
139 case I915_PARAM_HAS_OVERLAY:
140 value = dev_priv->overlay ? 1 : 0;
141 break;
142 case I915_PARAM_HAS_PAGEFLIPPING:
143 value = 1;
144 break;
145 case I915_PARAM_HAS_EXECBUF2:
146 /* depends on GEM */
147 value = 1;
148 break;
149 case I915_PARAM_HAS_BSD:
150 value = intel_engine_initialized(&dev_priv->engine[VCS]);
151 break;
152 case I915_PARAM_HAS_BLT:
153 value = intel_engine_initialized(&dev_priv->engine[BCS]);
154 break;
155 case I915_PARAM_HAS_VEBOX:
156 value = intel_engine_initialized(&dev_priv->engine[VECS]);
157 break;
158 case I915_PARAM_HAS_BSD2:
159 value = intel_engine_initialized(&dev_priv->engine[VCS2]);
160 break;
161 case I915_PARAM_HAS_RELAXED_FENCING:
162 value = 1;
163 break;
164 case I915_PARAM_HAS_COHERENT_RINGS:
165 value = 1;
166 break;
167 case I915_PARAM_HAS_EXEC_CONSTANTS:
168 value = INTEL_INFO(dev)->gen >= 4;
169 break;
170 case I915_PARAM_HAS_RELAXED_DELTA:
171 value = 1;
172 break;
173 case I915_PARAM_HAS_GEN7_SOL_RESET:
174 value = 1;
175 break;
176 case I915_PARAM_HAS_LLC:
177 value = HAS_LLC(dev);
178 break;
179 case I915_PARAM_HAS_WT:
180 value = HAS_WT(dev);
181 break;
182 case I915_PARAM_HAS_ALIASING_PPGTT:
183 value = USES_PPGTT(dev);
184 break;
185 case I915_PARAM_HAS_WAIT_TIMEOUT:
186 value = 1;
187 break;
188 case I915_PARAM_HAS_SEMAPHORES:
189 value = i915_semaphore_is_enabled(dev_priv);
190 break;
191 case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
192 value = 1;
193 break;
194 case I915_PARAM_HAS_SECURE_BATCHES:
195 value = capable(CAP_SYS_ADMIN);
196 break;
197 case I915_PARAM_HAS_PINNED_BATCHES:
198 value = 1;
199 break;
200 case I915_PARAM_HAS_EXEC_NO_RELOC:
201 value = 1;
202 break;
203 case I915_PARAM_HAS_EXEC_HANDLE_LUT:
204 value = 1;
205 break;
206 case I915_PARAM_CMD_PARSER_VERSION:
207 value = i915_cmd_parser_get_version(dev_priv);
208 break;
209 case I915_PARAM_HAS_COHERENT_PHYS_GTT:
210 value = 1;
211 break;
212 case I915_PARAM_MMAP_VERSION:
213 value = 1;
214 break;
215 case I915_PARAM_SUBSLICE_TOTAL:
216 value = INTEL_INFO(dev)->subslice_total;
217 if (!value)
218 return -ENODEV;
219 break;
220 case I915_PARAM_EU_TOTAL:
221 value = INTEL_INFO(dev)->eu_total;
222 if (!value)
223 return -ENODEV;
224 break;
225 case I915_PARAM_HAS_GPU_RESET:
226 value = i915.enable_hangcheck && intel_has_gpu_reset(dev_priv);
227 break;
228 case I915_PARAM_HAS_RESOURCE_STREAMER:
229 value = HAS_RESOURCE_STREAMER(dev);
230 break;
231 case I915_PARAM_HAS_EXEC_SOFTPIN:
232 value = 1;
233 break;
234 default:
235 DRM_DEBUG("Unknown parameter %d\n", param->param);
236 return -EINVAL;
237 }
238
239 if (copy_to_user(param->value, &value, sizeof(int))) {
240 DRM_ERROR("copy_to_user failed\n");
241 return -EFAULT;
242 }
243
244 return 0;
245}
246
247static int i915_get_bridge_dev(struct drm_device *dev)
248{
249 struct drm_i915_private *dev_priv = dev->dev_private;
250
251 dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
252 if (!dev_priv->bridge_dev) {
253 DRM_ERROR("bridge device not found\n");
254 return -1;
255 }
256 return 0;
257}
258
259/* Allocate space for the MCH regs if needed, return nonzero on error */
260static int
261intel_alloc_mchbar_resource(struct drm_device *dev)
262{
263 struct drm_i915_private *dev_priv = dev->dev_private;
264 int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
265 u32 temp_lo, temp_hi = 0;
266 u64 mchbar_addr;
267 int ret;
268
269 if (INTEL_INFO(dev)->gen >= 4)
270 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
271 pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
272 mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
273
274 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
275#ifdef CONFIG_PNP
276 if (mchbar_addr &&
277 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
278 return 0;
279#endif
280
281 /* Get some space for it */
282 dev_priv->mch_res.name = "i915 MCHBAR";
283 dev_priv->mch_res.flags = IORESOURCE_MEM;
284 ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
285 &dev_priv->mch_res,
286 MCHBAR_SIZE, MCHBAR_SIZE,
287 PCIBIOS_MIN_MEM,
288 0, pcibios_align_resource,
289 dev_priv->bridge_dev);
290 if (ret) {
291 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
292 dev_priv->mch_res.start = 0;
293 return ret;
294 }
295
296 if (INTEL_INFO(dev)->gen >= 4)
297 pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
298 upper_32_bits(dev_priv->mch_res.start));
299
300 pci_write_config_dword(dev_priv->bridge_dev, reg,
301 lower_32_bits(dev_priv->mch_res.start));
302 return 0;
303}
304
305/* Setup MCHBAR if possible, return true if we should disable it again */
306static void
307intel_setup_mchbar(struct drm_device *dev)
308{
309 struct drm_i915_private *dev_priv = dev->dev_private;
310 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
311 u32 temp;
312 bool enabled;
313
314 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
315 return;
316
317 dev_priv->mchbar_need_disable = false;
318
319 if (IS_I915G(dev) || IS_I915GM(dev)) {
320 pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
321 enabled = !!(temp & DEVEN_MCHBAR_EN);
322 } else {
323 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
324 enabled = temp & 1;
325 }
326
327 /* If it's already enabled, don't have to do anything */
328 if (enabled)
329 return;
330
331 if (intel_alloc_mchbar_resource(dev))
332 return;
333
334 dev_priv->mchbar_need_disable = true;
335
336 /* Space is allocated or reserved, so enable it. */
337 if (IS_I915G(dev) || IS_I915GM(dev)) {
338 pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
339 temp | DEVEN_MCHBAR_EN);
340 } else {
341 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
342 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
343 }
344}
345
346static void
347intel_teardown_mchbar(struct drm_device *dev)
348{
349 struct drm_i915_private *dev_priv = dev->dev_private;
350 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
351
352 if (dev_priv->mchbar_need_disable) {
353 if (IS_I915G(dev) || IS_I915GM(dev)) {
354 u32 deven_val;
355
356 pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
357 &deven_val);
358 deven_val &= ~DEVEN_MCHBAR_EN;
359 pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
360 deven_val);
361 } else {
362 u32 mchbar_val;
363
364 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg,
365 &mchbar_val);
366 mchbar_val &= ~1;
367 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg,
368 mchbar_val);
369 }
370 }
371
372 if (dev_priv->mch_res.start)
373 release_resource(&dev_priv->mch_res);
374}
375
376/* true = enable decode, false = disable decoder */
377static unsigned int i915_vga_set_decode(void *cookie, bool state)
378{
379 struct drm_device *dev = cookie;
380
381 intel_modeset_vga_set_state(dev, state);
382 if (state)
383 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
384 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
385 else
386 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
387}
388
389static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
390{
391 struct drm_device *dev = pci_get_drvdata(pdev);
392 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
393
394 if (state == VGA_SWITCHEROO_ON) {
395 pr_info("switched on\n");
396 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
397 /* i915 resume handler doesn't set to D0 */
398 pci_set_power_state(dev->pdev, PCI_D0);
399 i915_resume_switcheroo(dev);
400 dev->switch_power_state = DRM_SWITCH_POWER_ON;
401 } else {
402 pr_info("switched off\n");
403 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
404 i915_suspend_switcheroo(dev, pmm);
405 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
406 }
407}
408
409static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
410{
411 struct drm_device *dev = pci_get_drvdata(pdev);
412
413 /*
414 * FIXME: open_count is protected by drm_global_mutex but that would lead to
415 * locking inversion with the driver load path. And the access here is
416 * completely racy anyway. So don't bother with locking for now.
417 */
418 return dev->open_count == 0;
419}
420
421static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
422 .set_gpu_state = i915_switcheroo_set_state,
423 .reprobe = NULL,
424 .can_switch = i915_switcheroo_can_switch,
425};
426
427static void i915_gem_fini(struct drm_device *dev)
428{
429 struct drm_i915_private *dev_priv = to_i915(dev);
430
431 /*
432 * Neither the BIOS, ourselves or any other kernel
433 * expects the system to be in execlists mode on startup,
434 * so we need to reset the GPU back to legacy mode. And the only
435 * known way to disable logical contexts is through a GPU reset.
436 *
437 * So in order to leave the system in a known default configuration,
438 * always reset the GPU upon unload. Afterwards we then clean up the
439 * GEM state tracking, flushing off the requests and leaving the
440 * system in a known idle state.
441 *
442 * Note that is of the upmost importance that the GPU is idle and
443 * all stray writes are flushed *before* we dismantle the backing
444 * storage for the pinned objects.
445 *
446 * However, since we are uncertain that reseting the GPU on older
447 * machines is a good idea, we don't - just in case it leaves the
448 * machine in an unusable condition.
449 */
450 if (HAS_HW_CONTEXTS(dev)) {
451 int reset = intel_gpu_reset(dev_priv, ALL_ENGINES);
452 WARN_ON(reset && reset != -ENODEV);
453 }
454
455 mutex_lock(&dev->struct_mutex);
456 i915_gem_reset(dev);
457 i915_gem_cleanup_engines(dev);
458 i915_gem_context_fini(dev);
459 mutex_unlock(&dev->struct_mutex);
460
461 WARN_ON(!list_empty(&to_i915(dev)->context_list));
462}
463
464static int i915_load_modeset_init(struct drm_device *dev)
465{
466 struct drm_i915_private *dev_priv = dev->dev_private;
467 int ret;
468
469 if (i915_inject_load_failure())
470 return -ENODEV;
471
472 ret = intel_bios_init(dev_priv);
473 if (ret)
474 DRM_INFO("failed to find VBIOS tables\n");
475
476 /* If we have > 1 VGA cards, then we need to arbitrate access
477 * to the common VGA resources.
478 *
479 * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
480 * then we do not take part in VGA arbitration and the
481 * vga_client_register() fails with -ENODEV.
482 */
483 ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
484 if (ret && ret != -ENODEV)
485 goto out;
486
487 intel_register_dsm_handler();
488
489 ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops, false);
490 if (ret)
491 goto cleanup_vga_client;
492
493 /* must happen before intel_power_domains_init_hw() on VLV/CHV */
494 intel_update_rawclk(dev_priv);
495
496 intel_power_domains_init_hw(dev_priv, false);
497
498 intel_csr_ucode_init(dev_priv);
499
500 ret = intel_irq_install(dev_priv);
501 if (ret)
502 goto cleanup_csr;
503
504 intel_setup_gmbus(dev);
505
506 /* Important: The output setup functions called by modeset_init need
507 * working irqs for e.g. gmbus and dp aux transfers. */
508 intel_modeset_init(dev);
509
510 intel_guc_init(dev);
511
512 ret = i915_gem_init(dev);
513 if (ret)
514 goto cleanup_irq;
515
516 intel_modeset_gem_init(dev);
517
518 if (INTEL_INFO(dev)->num_pipes == 0)
519 return 0;
520
521 ret = intel_fbdev_init(dev);
522 if (ret)
523 goto cleanup_gem;
524
525 /* Only enable hotplug handling once the fbdev is fully set up. */
526 intel_hpd_init(dev_priv);
527
528 /*
529 * Some ports require correctly set-up hpd registers for detection to
530 * work properly (leading to ghost connected connector status), e.g. VGA
531 * on gm45. Hence we can only set up the initial fbdev config after hpd
532 * irqs are fully enabled. Now we should scan for the initial config
533 * only once hotplug handling is enabled, but due to screwed-up locking
534 * around kms/fbdev init we can't protect the fdbev initial config
535 * scanning against hotplug events. Hence do this first and ignore the
536 * tiny window where we will loose hotplug notifactions.
537 */
538 intel_fbdev_initial_config_async(dev);
539
540 drm_kms_helper_poll_init(dev);
541
542 return 0;
543
544cleanup_gem:
545 i915_gem_fini(dev);
546cleanup_irq:
547 intel_guc_fini(dev);
548 drm_irq_uninstall(dev);
549 intel_teardown_gmbus(dev);
550cleanup_csr:
551 intel_csr_ucode_fini(dev_priv);
552 intel_power_domains_fini(dev_priv);
553 vga_switcheroo_unregister_client(dev->pdev);
554cleanup_vga_client:
555 vga_client_register(dev->pdev, NULL, NULL, NULL);
556out:
557 return ret;
558}
559
560#if IS_ENABLED(CONFIG_FB)
561static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
562{
563 struct apertures_struct *ap;
564 struct pci_dev *pdev = dev_priv->dev->pdev;
565 struct i915_ggtt *ggtt = &dev_priv->ggtt;
566 bool primary;
567 int ret;
568
569 ap = alloc_apertures(1);
570 if (!ap)
571 return -ENOMEM;
572
573 ap->ranges[0].base = ggtt->mappable_base;
574 ap->ranges[0].size = ggtt->mappable_end;
575
576 primary =
577 pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
578
579 ret = remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
580
581 kfree(ap);
582
583 return ret;
584}
585#else
586static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
587{
588 return 0;
589}
590#endif
591
592#if !defined(CONFIG_VGA_CONSOLE)
593static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
594{
595 return 0;
596}
597#elif !defined(CONFIG_DUMMY_CONSOLE)
598static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
599{
600 return -ENODEV;
601}
602#else
603static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
604{
605 int ret = 0;
606
607 DRM_INFO("Replacing VGA console driver\n");
608
609 console_lock();
610 if (con_is_bound(&vga_con))
611 ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1);
612 if (ret == 0) {
613 ret = do_unregister_con_driver(&vga_con);
614
615 /* Ignore "already unregistered". */
616 if (ret == -ENODEV)
617 ret = 0;
618 }
619 console_unlock();
620
621 return ret;
622}
623#endif
624
625static void i915_dump_device_info(struct drm_i915_private *dev_priv)
626{
627 const struct intel_device_info *info = &dev_priv->info;
628
629#define PRINT_S(name) "%s"
630#define SEP_EMPTY
631#define PRINT_FLAG(name) info->name ? #name "," : ""
632#define SEP_COMMA ,
633 DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags="
634 DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY),
635 info->gen,
636 dev_priv->dev->pdev->device,
637 dev_priv->dev->pdev->revision,
638 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA));
639#undef PRINT_S
640#undef SEP_EMPTY
641#undef PRINT_FLAG
642#undef SEP_COMMA
643}
644
645static void cherryview_sseu_info_init(struct drm_device *dev)
646{
647 struct drm_i915_private *dev_priv = dev->dev_private;
648 struct intel_device_info *info;
649 u32 fuse, eu_dis;
650
651 info = (struct intel_device_info *)&dev_priv->info;
652 fuse = I915_READ(CHV_FUSE_GT);
653
654 info->slice_total = 1;
655
656 if (!(fuse & CHV_FGT_DISABLE_SS0)) {
657 info->subslice_per_slice++;
658 eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK |
659 CHV_FGT_EU_DIS_SS0_R1_MASK);
660 info->eu_total += 8 - hweight32(eu_dis);
661 }
662
663 if (!(fuse & CHV_FGT_DISABLE_SS1)) {
664 info->subslice_per_slice++;
665 eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK |
666 CHV_FGT_EU_DIS_SS1_R1_MASK);
667 info->eu_total += 8 - hweight32(eu_dis);
668 }
669
670 info->subslice_total = info->subslice_per_slice;
671 /*
672 * CHV expected to always have a uniform distribution of EU
673 * across subslices.
674 */
675 info->eu_per_subslice = info->subslice_total ?
676 info->eu_total / info->subslice_total :
677 0;
678 /*
679 * CHV supports subslice power gating on devices with more than
680 * one subslice, and supports EU power gating on devices with
681 * more than one EU pair per subslice.
682 */
683 info->has_slice_pg = 0;
684 info->has_subslice_pg = (info->subslice_total > 1);
685 info->has_eu_pg = (info->eu_per_subslice > 2);
686}
687
688static void gen9_sseu_info_init(struct drm_device *dev)
689{
690 struct drm_i915_private *dev_priv = dev->dev_private;
691 struct intel_device_info *info;
692 int s_max = 3, ss_max = 4, eu_max = 8;
693 int s, ss;
694 u32 fuse2, s_enable, ss_disable, eu_disable;
695 u8 eu_mask = 0xff;
696
697 info = (struct intel_device_info *)&dev_priv->info;
698 fuse2 = I915_READ(GEN8_FUSE2);
699 s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >>
700 GEN8_F2_S_ENA_SHIFT;
701 ss_disable = (fuse2 & GEN9_F2_SS_DIS_MASK) >>
702 GEN9_F2_SS_DIS_SHIFT;
703
704 info->slice_total = hweight32(s_enable);
705 /*
706 * The subslice disable field is global, i.e. it applies
707 * to each of the enabled slices.
708 */
709 info->subslice_per_slice = ss_max - hweight32(ss_disable);
710 info->subslice_total = info->slice_total *
711 info->subslice_per_slice;
712
713 /*
714 * Iterate through enabled slices and subslices to
715 * count the total enabled EU.
716 */
717 for (s = 0; s < s_max; s++) {
718 if (!(s_enable & (0x1 << s)))
719 /* skip disabled slice */
720 continue;
721
722 eu_disable = I915_READ(GEN9_EU_DISABLE(s));
723 for (ss = 0; ss < ss_max; ss++) {
724 int eu_per_ss;
725
726 if (ss_disable & (0x1 << ss))
727 /* skip disabled subslice */
728 continue;
729
730 eu_per_ss = eu_max - hweight8((eu_disable >> (ss*8)) &
731 eu_mask);
732
733 /*
734 * Record which subslice(s) has(have) 7 EUs. we
735 * can tune the hash used to spread work among
736 * subslices if they are unbalanced.
737 */
738 if (eu_per_ss == 7)
739 info->subslice_7eu[s] |= 1 << ss;
740
741 info->eu_total += eu_per_ss;
742 }
743 }
744
745 /*
746 * SKL is expected to always have a uniform distribution
747 * of EU across subslices with the exception that any one
748 * EU in any one subslice may be fused off for die
749 * recovery. BXT is expected to be perfectly uniform in EU
750 * distribution.
751 */
752 info->eu_per_subslice = info->subslice_total ?
753 DIV_ROUND_UP(info->eu_total,
754 info->subslice_total) : 0;
755 /*
756 * SKL supports slice power gating on devices with more than
757 * one slice, and supports EU power gating on devices with
758 * more than one EU pair per subslice. BXT supports subslice
759 * power gating on devices with more than one subslice, and
760 * supports EU power gating on devices with more than one EU
761 * pair per subslice.
762 */
763 info->has_slice_pg = ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
764 (info->slice_total > 1));
765 info->has_subslice_pg = (IS_BROXTON(dev) && (info->subslice_total > 1));
766 info->has_eu_pg = (info->eu_per_subslice > 2);
767
768 if (IS_BROXTON(dev)) {
769#define IS_SS_DISABLED(_ss_disable, ss) (_ss_disable & (0x1 << ss))
770 /*
771 * There is a HW issue in 2x6 fused down parts that requires
772 * Pooled EU to be enabled as a WA. The pool configuration
773 * changes depending upon which subslice is fused down. This
774 * doesn't affect if the device has all 3 subslices enabled.
775 */
776 /* WaEnablePooledEuFor2x6:bxt */
777 info->has_pooled_eu = ((info->subslice_per_slice == 3) ||
778 (info->subslice_per_slice == 2 &&
779 INTEL_REVID(dev) < BXT_REVID_C0));
780
781 info->min_eu_in_pool = 0;
782 if (info->has_pooled_eu) {
783 if (IS_SS_DISABLED(ss_disable, 0) ||
784 IS_SS_DISABLED(ss_disable, 2))
785 info->min_eu_in_pool = 3;
786 else if (IS_SS_DISABLED(ss_disable, 1))
787 info->min_eu_in_pool = 6;
788 else
789 info->min_eu_in_pool = 9;
790 }
791#undef IS_SS_DISABLED
792 }
793}
794
795static void broadwell_sseu_info_init(struct drm_device *dev)
796{
797 struct drm_i915_private *dev_priv = dev->dev_private;
798 struct intel_device_info *info;
799 const int s_max = 3, ss_max = 3, eu_max = 8;
800 int s, ss;
801 u32 fuse2, eu_disable[s_max], s_enable, ss_disable;
802
803 fuse2 = I915_READ(GEN8_FUSE2);
804 s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
805 ss_disable = (fuse2 & GEN8_F2_SS_DIS_MASK) >> GEN8_F2_SS_DIS_SHIFT;
806
807 eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK;
808 eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) |
809 ((I915_READ(GEN8_EU_DISABLE1) & GEN8_EU_DIS1_S1_MASK) <<
810 (32 - GEN8_EU_DIS0_S1_SHIFT));
811 eu_disable[2] = (I915_READ(GEN8_EU_DISABLE1) >> GEN8_EU_DIS1_S2_SHIFT) |
812 ((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) <<
813 (32 - GEN8_EU_DIS1_S2_SHIFT));
814
815
816 info = (struct intel_device_info *)&dev_priv->info;
817 info->slice_total = hweight32(s_enable);
818
819 /*
820 * The subslice disable field is global, i.e. it applies
821 * to each of the enabled slices.
822 */
823 info->subslice_per_slice = ss_max - hweight32(ss_disable);
824 info->subslice_total = info->slice_total * info->subslice_per_slice;
825
826 /*
827 * Iterate through enabled slices and subslices to
828 * count the total enabled EU.
829 */
830 for (s = 0; s < s_max; s++) {
831 if (!(s_enable & (0x1 << s)))
832 /* skip disabled slice */
833 continue;
834
835 for (ss = 0; ss < ss_max; ss++) {
836 u32 n_disabled;
837
838 if (ss_disable & (0x1 << ss))
839 /* skip disabled subslice */
840 continue;
841
842 n_disabled = hweight8(eu_disable[s] >> (ss * eu_max));
843
844 /*
845 * Record which subslices have 7 EUs.
846 */
847 if (eu_max - n_disabled == 7)
848 info->subslice_7eu[s] |= 1 << ss;
849
850 info->eu_total += eu_max - n_disabled;
851 }
852 }
853
854 /*
855 * BDW is expected to always have a uniform distribution of EU across
856 * subslices with the exception that any one EU in any one subslice may
857 * be fused off for die recovery.
858 */
859 info->eu_per_subslice = info->subslice_total ?
860 DIV_ROUND_UP(info->eu_total, info->subslice_total) : 0;
861
862 /*
863 * BDW supports slice power gating on devices with more than
864 * one slice.
865 */
866 info->has_slice_pg = (info->slice_total > 1);
867 info->has_subslice_pg = 0;
868 info->has_eu_pg = 0;
869}
870
871/*
872 * Determine various intel_device_info fields at runtime.
873 *
874 * Use it when either:
875 * - it's judged too laborious to fill n static structures with the limit
876 * when a simple if statement does the job,
877 * - run-time checks (eg read fuse/strap registers) are needed.
878 *
879 * This function needs to be called:
880 * - after the MMIO has been setup as we are reading registers,
881 * - after the PCH has been detected,
882 * - before the first usage of the fields it can tweak.
883 */
884static void intel_device_info_runtime_init(struct drm_device *dev)
885{
886 struct drm_i915_private *dev_priv = dev->dev_private;
887 struct intel_device_info *info;
888 enum pipe pipe;
889
890 info = (struct intel_device_info *)&dev_priv->info;
891
892 /*
893 * Skylake and Broxton currently don't expose the topmost plane as its
894 * use is exclusive with the legacy cursor and we only want to expose
895 * one of those, not both. Until we can safely expose the topmost plane
896 * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
897 * we don't expose the topmost plane at all to prevent ABI breakage
898 * down the line.
899 */
900 if (IS_BROXTON(dev)) {
901 info->num_sprites[PIPE_A] = 2;
902 info->num_sprites[PIPE_B] = 2;
903 info->num_sprites[PIPE_C] = 1;
904 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
905 for_each_pipe(dev_priv, pipe)
906 info->num_sprites[pipe] = 2;
907 else
908 for_each_pipe(dev_priv, pipe)
909 info->num_sprites[pipe] = 1;
910
911 if (i915.disable_display) {
912 DRM_INFO("Display disabled (module parameter)\n");
913 info->num_pipes = 0;
914 } else if (info->num_pipes > 0 &&
915 (IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) &&
916 HAS_PCH_SPLIT(dev)) {
917 u32 fuse_strap = I915_READ(FUSE_STRAP);
918 u32 sfuse_strap = I915_READ(SFUSE_STRAP);
919
920 /*
921 * SFUSE_STRAP is supposed to have a bit signalling the display
922 * is fused off. Unfortunately it seems that, at least in
923 * certain cases, fused off display means that PCH display
924 * reads don't land anywhere. In that case, we read 0s.
925 *
926 * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
927 * should be set when taking over after the firmware.
928 */
929 if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
930 sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
931 (dev_priv->pch_type == PCH_CPT &&
932 !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
933 DRM_INFO("Display fused off, disabling\n");
934 info->num_pipes = 0;
935 } else if (fuse_strap & IVB_PIPE_C_DISABLE) {
936 DRM_INFO("PipeC fused off\n");
937 info->num_pipes -= 1;
938 }
939 } else if (info->num_pipes > 0 && IS_GEN9(dev_priv)) {
940 u32 dfsm = I915_READ(SKL_DFSM);
941 u8 disabled_mask = 0;
942 bool invalid;
943 int num_bits;
944
945 if (dfsm & SKL_DFSM_PIPE_A_DISABLE)
946 disabled_mask |= BIT(PIPE_A);
947 if (dfsm & SKL_DFSM_PIPE_B_DISABLE)
948 disabled_mask |= BIT(PIPE_B);
949 if (dfsm & SKL_DFSM_PIPE_C_DISABLE)
950 disabled_mask |= BIT(PIPE_C);
951
952 num_bits = hweight8(disabled_mask);
953
954 switch (disabled_mask) {
955 case BIT(PIPE_A):
956 case BIT(PIPE_B):
957 case BIT(PIPE_A) | BIT(PIPE_B):
958 case BIT(PIPE_A) | BIT(PIPE_C):
959 invalid = true;
960 break;
961 default:
962 invalid = false;
963 }
964
965 if (num_bits > info->num_pipes || invalid)
966 DRM_ERROR("invalid pipe fuse configuration: 0x%x\n",
967 disabled_mask);
968 else
969 info->num_pipes -= num_bits;
970 }
971
972 /* Initialize slice/subslice/EU info */
973 if (IS_CHERRYVIEW(dev))
974 cherryview_sseu_info_init(dev);
975 else if (IS_BROADWELL(dev))
976 broadwell_sseu_info_init(dev);
977 else if (INTEL_INFO(dev)->gen >= 9)
978 gen9_sseu_info_init(dev);
979
980 info->has_snoop = !info->has_llc;
981
982 /* Snooping is broken on BXT A stepping. */
983 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
984 info->has_snoop = false;
985
986 DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total);
987 DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total);
988 DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice);
989 DRM_DEBUG_DRIVER("EU total: %u\n", info->eu_total);
990 DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->eu_per_subslice);
991 DRM_DEBUG_DRIVER("Has Pooled EU: %s\n", HAS_POOLED_EU(dev) ? "y" : "n");
992 if (HAS_POOLED_EU(dev))
993 DRM_DEBUG_DRIVER("Min EU in pool: %u\n", info->min_eu_in_pool);
994 DRM_DEBUG_DRIVER("has slice power gating: %s\n",
995 info->has_slice_pg ? "y" : "n");
996 DRM_DEBUG_DRIVER("has subslice power gating: %s\n",
997 info->has_subslice_pg ? "y" : "n");
998 DRM_DEBUG_DRIVER("has EU power gating: %s\n",
999 info->has_eu_pg ? "y" : "n");
1000
1001 i915.enable_execlists =
1002 intel_sanitize_enable_execlists(dev_priv,
1003 i915.enable_execlists);
1004
1005 /*
1006 * i915.enable_ppgtt is read-only, so do an early pass to validate the
1007 * user's requested state against the hardware/driver capabilities. We
1008 * do this now so that we can print out any log messages once rather
1009 * than every time we check intel_enable_ppgtt().
1010 */
1011 i915.enable_ppgtt =
1012 intel_sanitize_enable_ppgtt(dev_priv, i915.enable_ppgtt);
1013 DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
1014}
1015
1016static void intel_init_dpio(struct drm_i915_private *dev_priv)
1017{
1018 /*
1019 * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
1020 * CHV x1 PHY (DP/HDMI D)
1021 * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
1022 */
1023 if (IS_CHERRYVIEW(dev_priv)) {
1024 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
1025 DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
1026 } else if (IS_VALLEYVIEW(dev_priv)) {
1027 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
1028 }
1029}
1030
1031static int i915_workqueues_init(struct drm_i915_private *dev_priv)
1032{
1033 /*
1034 * The i915 workqueue is primarily used for batched retirement of
1035 * requests (and thus managing bo) once the task has been completed
1036 * by the GPU. i915_gem_retire_requests() is called directly when we
1037 * need high-priority retirement, such as waiting for an explicit
1038 * bo.
1039 *
1040 * It is also used for periodic low-priority events, such as
1041 * idle-timers and recording error state.
1042 *
1043 * All tasks on the workqueue are expected to acquire the dev mutex
1044 * so there is no point in running more than one instance of the
1045 * workqueue at any time. Use an ordered one.
1046 */
1047 dev_priv->wq = alloc_ordered_workqueue("i915", 0);
1048 if (dev_priv->wq == NULL)
1049 goto out_err;
1050
1051 dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
1052 if (dev_priv->hotplug.dp_wq == NULL)
1053 goto out_free_wq;
1054
1055 dev_priv->gpu_error.hangcheck_wq =
1056 alloc_ordered_workqueue("i915-hangcheck", 0);
1057 if (dev_priv->gpu_error.hangcheck_wq == NULL)
1058 goto out_free_dp_wq;
1059
1060 return 0;
1061
1062out_free_dp_wq:
1063 destroy_workqueue(dev_priv->hotplug.dp_wq);
1064out_free_wq:
1065 destroy_workqueue(dev_priv->wq);
1066out_err:
1067 DRM_ERROR("Failed to allocate workqueues.\n");
1068
1069 return -ENOMEM;
1070}
1071
1072static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
1073{
1074 destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
1075 destroy_workqueue(dev_priv->hotplug.dp_wq);
1076 destroy_workqueue(dev_priv->wq);
1077}
1078
1079/**
1080 * i915_driver_init_early - setup state not requiring device access
1081 * @dev_priv: device private
1082 *
1083 * Initialize everything that is a "SW-only" state, that is state not
1084 * requiring accessing the device or exposing the driver via kernel internal
1085 * or userspace interfaces. Example steps belonging here: lock initialization,
1086 * system memory allocation, setting up device specific attributes and
1087 * function hooks not requiring accessing the device.
1088 */
1089static int i915_driver_init_early(struct drm_i915_private *dev_priv,
1090 struct drm_device *dev,
1091 struct intel_device_info *info)
1092{
1093 struct intel_device_info *device_info;
1094 int ret = 0;
1095
1096 if (i915_inject_load_failure())
1097 return -ENODEV;
1098
1099 /* Setup the write-once "constant" device info */
1100 device_info = (struct intel_device_info *)&dev_priv->info;
1101 memcpy(device_info, info, sizeof(dev_priv->info));
1102 device_info->device_id = dev->pdev->device;
1103
1104 BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE);
1105 device_info->gen_mask = BIT(device_info->gen - 1);
1106
1107 spin_lock_init(&dev_priv->irq_lock);
1108 spin_lock_init(&dev_priv->gpu_error.lock);
1109 mutex_init(&dev_priv->backlight_lock);
1110 spin_lock_init(&dev_priv->uncore.lock);
1111 spin_lock_init(&dev_priv->mm.object_stat_lock);
1112 spin_lock_init(&dev_priv->mmio_flip_lock);
1113 mutex_init(&dev_priv->sb_lock);
1114 mutex_init(&dev_priv->modeset_restore_lock);
1115 mutex_init(&dev_priv->av_mutex);
1116 mutex_init(&dev_priv->wm.wm_mutex);
1117 mutex_init(&dev_priv->pps_mutex);
1118
1119 ret = i915_workqueues_init(dev_priv);
1120 if (ret < 0)
1121 return ret;
1122
1123 ret = intel_gvt_init(dev_priv);
1124 if (ret < 0)
1125 goto err_workqueues;
1126
1127 /* This must be called before any calls to HAS_PCH_* */
1128 intel_detect_pch(dev);
1129
1130 intel_pm_setup(dev);
1131 intel_init_dpio(dev_priv);
1132 intel_power_domains_init(dev_priv);
1133 intel_irq_init(dev_priv);
1134 intel_init_display_hooks(dev_priv);
1135 intel_init_clock_gating_hooks(dev_priv);
1136 intel_init_audio_hooks(dev_priv);
1137 i915_gem_load_init(dev);
1138
1139 intel_display_crc_init(dev);
1140
1141 i915_dump_device_info(dev_priv);
1142
1143 /* Not all pre-production machines fall into this category, only the
1144 * very first ones. Almost everything should work, except for maybe
1145 * suspend/resume. And we don't implement workarounds that affect only
1146 * pre-production machines. */
1147 if (IS_HSW_EARLY_SDV(dev))
1148 DRM_INFO("This is an early pre-production Haswell machine. "
1149 "It may not be fully functional.\n");
1150
1151 return 0;
1152
1153err_workqueues:
1154 i915_workqueues_cleanup(dev_priv);
1155 return ret;
1156}
1157
1158/**
1159 * i915_driver_cleanup_early - cleanup the setup done in i915_driver_init_early()
1160 * @dev_priv: device private
1161 */
1162static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
1163{
1164 i915_gem_load_cleanup(dev_priv->dev);
1165 i915_workqueues_cleanup(dev_priv);
1166}
1167
1168static int i915_mmio_setup(struct drm_device *dev)
1169{
1170 struct drm_i915_private *dev_priv = to_i915(dev);
1171 int mmio_bar;
1172 int mmio_size;
1173
1174 mmio_bar = IS_GEN2(dev) ? 1 : 0;
1175 /*
1176 * Before gen4, the registers and the GTT are behind different BARs.
1177 * However, from gen4 onwards, the registers and the GTT are shared
1178 * in the same BAR, so we want to restrict this ioremap from
1179 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
1180 * the register BAR remains the same size for all the earlier
1181 * generations up to Ironlake.
1182 */
1183 if (INTEL_INFO(dev)->gen < 5)
1184 mmio_size = 512 * 1024;
1185 else
1186 mmio_size = 2 * 1024 * 1024;
1187 dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
1188 if (dev_priv->regs == NULL) {
1189 DRM_ERROR("failed to map registers\n");
1190
1191 return -EIO;
1192 }
1193
1194 /* Try to make sure MCHBAR is enabled before poking at it */
1195 intel_setup_mchbar(dev);
1196
1197 return 0;
1198}
1199
1200static void i915_mmio_cleanup(struct drm_device *dev)
1201{
1202 struct drm_i915_private *dev_priv = to_i915(dev);
1203
1204 intel_teardown_mchbar(dev);
1205 pci_iounmap(dev->pdev, dev_priv->regs);
1206}
1207
1208/**
1209 * i915_driver_init_mmio - setup device MMIO
1210 * @dev_priv: device private
1211 *
1212 * Setup minimal device state necessary for MMIO accesses later in the
1213 * initialization sequence. The setup here should avoid any other device-wide
1214 * side effects or exposing the driver via kernel internal or user space
1215 * interfaces.
1216 */
1217static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
1218{
1219 struct drm_device *dev = dev_priv->dev;
1220 int ret;
1221
1222 if (i915_inject_load_failure())
1223 return -ENODEV;
1224
1225 if (i915_get_bridge_dev(dev))
1226 return -EIO;
1227
1228 ret = i915_mmio_setup(dev);
1229 if (ret < 0)
1230 goto put_bridge;
1231
1232 intel_uncore_init(dev_priv);
1233
1234 return 0;
1235
1236put_bridge:
1237 pci_dev_put(dev_priv->bridge_dev);
1238
1239 return ret;
1240}
1241
1242/**
1243 * i915_driver_cleanup_mmio - cleanup the setup done in i915_driver_init_mmio()
1244 * @dev_priv: device private
1245 */
1246static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
1247{
1248 struct drm_device *dev = dev_priv->dev;
1249
1250 intel_uncore_fini(dev_priv);
1251 i915_mmio_cleanup(dev);
1252 pci_dev_put(dev_priv->bridge_dev);
1253}
1254
1255/**
1256 * i915_driver_init_hw - setup state requiring device access
1257 * @dev_priv: device private
1258 *
1259 * Setup state that requires accessing the device, but doesn't require
1260 * exposing the driver via kernel internal or userspace interfaces.
1261 */
1262static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
1263{
1264 struct drm_device *dev = dev_priv->dev;
1265 struct i915_ggtt *ggtt = &dev_priv->ggtt;
1266 uint32_t aperture_size;
1267 int ret;
1268
1269 if (i915_inject_load_failure())
1270 return -ENODEV;
1271
1272 intel_device_info_runtime_init(dev);
1273
1274 ret = i915_ggtt_init_hw(dev);
1275 if (ret)
1276 return ret;
1277
1278 ret = i915_ggtt_enable_hw(dev);
1279 if (ret) {
1280 DRM_ERROR("failed to enable GGTT\n");
1281 goto out_ggtt;
1282 }
1283
1284 /* WARNING: Apparently we must kick fbdev drivers before vgacon,
1285 * otherwise the vga fbdev driver falls over. */
1286 ret = i915_kick_out_firmware_fb(dev_priv);
1287 if (ret) {
1288 DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
1289 goto out_ggtt;
1290 }
1291
1292 ret = i915_kick_out_vgacon(dev_priv);
1293 if (ret) {
1294 DRM_ERROR("failed to remove conflicting VGA console\n");
1295 goto out_ggtt;
1296 }
1297
1298 pci_set_master(dev->pdev);
1299
1300 /* overlay on gen2 is broken and can't address above 1G */
1301 if (IS_GEN2(dev)) {
1302 ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
1303 if (ret) {
1304 DRM_ERROR("failed to set DMA mask\n");
1305
1306 goto out_ggtt;
1307 }
1308 }
1309
1310
1311 /* 965GM sometimes incorrectly writes to hardware status page (HWS)
1312 * using 32bit addressing, overwriting memory if HWS is located
1313 * above 4GB.
1314 *
1315 * The documentation also mentions an issue with undefined
1316 * behaviour if any general state is accessed within a page above 4GB,
1317 * which also needs to be handled carefully.
1318 */
1319 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) {
1320 ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
1321
1322 if (ret) {
1323 DRM_ERROR("failed to set DMA mask\n");
1324
1325 goto out_ggtt;
1326 }
1327 }
1328
1329 aperture_size = ggtt->mappable_end;
1330
1331 ggtt->mappable =
1332 io_mapping_create_wc(ggtt->mappable_base,
1333 aperture_size);
1334 if (!ggtt->mappable) {
1335 ret = -EIO;
1336 goto out_ggtt;
1337 }
1338
1339 ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base,
1340 aperture_size);
1341
1342 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
1343 PM_QOS_DEFAULT_VALUE);
1344
1345 intel_uncore_sanitize(dev_priv);
1346
1347 intel_opregion_setup(dev_priv);
1348
1349 i915_gem_load_init_fences(dev_priv);
1350
1351 /* On the 945G/GM, the chipset reports the MSI capability on the
1352 * integrated graphics even though the support isn't actually there
1353 * according to the published specs. It doesn't appear to function
1354 * correctly in testing on 945G.
1355 * This may be a side effect of MSI having been made available for PEG
1356 * and the registers being closely associated.
1357 *
1358 * According to chipset errata, on the 965GM, MSI interrupts may
1359 * be lost or delayed, but we use them anyways to avoid
1360 * stuck interrupts on some machines.
1361 */
1362 if (!IS_I945G(dev) && !IS_I945GM(dev)) {
1363 if (pci_enable_msi(dev->pdev) < 0)
1364 DRM_DEBUG_DRIVER("can't enable MSI");
1365 }
1366
1367 return 0;
1368
1369out_ggtt:
1370 i915_ggtt_cleanup_hw(dev);
1371
1372 return ret;
1373}
1374
1375/**
1376 * i915_driver_cleanup_hw - cleanup the setup done in i915_driver_init_hw()
1377 * @dev_priv: device private
1378 */
1379static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv)
1380{
1381 struct drm_device *dev = dev_priv->dev;
1382 struct i915_ggtt *ggtt = &dev_priv->ggtt;
1383
1384 if (dev->pdev->msi_enabled)
1385 pci_disable_msi(dev->pdev);
1386
1387 pm_qos_remove_request(&dev_priv->pm_qos);
1388 arch_phys_wc_del(ggtt->mtrr);
1389 io_mapping_free(ggtt->mappable);
1390 i915_ggtt_cleanup_hw(dev);
1391}
1392
1393/**
1394 * i915_driver_register - register the driver with the rest of the system
1395 * @dev_priv: device private
1396 *
1397 * Perform any steps necessary to make the driver available via kernel
1398 * internal or userspace interfaces.
1399 */
1400static void i915_driver_register(struct drm_i915_private *dev_priv)
1401{
1402 struct drm_device *dev = dev_priv->dev;
1403
1404 i915_gem_shrinker_init(dev_priv);
1405 /*
1406 * Notify a valid surface after modesetting,
1407 * when running inside a VM.
1408 */
1409 if (intel_vgpu_active(dev_priv))
1410 I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
1411
1412 i915_setup_sysfs(dev);
1413
1414 if (INTEL_INFO(dev_priv)->num_pipes) {
1415 /* Must be done after probing outputs */
1416 intel_opregion_register(dev_priv);
1417 acpi_video_register();
1418 }
1419
1420 if (IS_GEN5(dev_priv))
1421 intel_gpu_ips_init(dev_priv);
1422
1423 i915_audio_component_init(dev_priv);
1424}
1425
1426/**
1427 * i915_driver_unregister - cleanup the registration done in i915_driver_regiser()
1428 * @dev_priv: device private
1429 */
1430static void i915_driver_unregister(struct drm_i915_private *dev_priv)
1431{
1432 i915_audio_component_cleanup(dev_priv);
1433 intel_gpu_ips_teardown();
1434 acpi_video_unregister();
1435 intel_opregion_unregister(dev_priv);
1436 i915_teardown_sysfs(dev_priv->dev);
1437 i915_gem_shrinker_cleanup(dev_priv);
1438}
1439
1440/**
1441 * i915_driver_load - setup chip and create an initial config
1442 * @dev: DRM device
1443 * @flags: startup flags
1444 *
1445 * The driver load routine has to do several things:
1446 * - drive output discovery via intel_modeset_init()
1447 * - initialize the memory manager
1448 * - allocate initial config memory
1449 * - setup the DRM framebuffer with the allocated memory
1450 */
1451int i915_driver_load(struct drm_device *dev, unsigned long flags)
1452{
1453 struct drm_i915_private *dev_priv;
1454 int ret = 0;
1455
1456 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
1457 if (dev_priv == NULL)
1458 return -ENOMEM;
1459
1460 dev->dev_private = dev_priv;
1461 /* Must be set before calling __i915_printk */
1462 dev_priv->dev = dev;
1463
1464 ret = i915_driver_init_early(dev_priv, dev,
1465 (struct intel_device_info *)flags);
1466
1467 if (ret < 0)
1468 goto out_free_priv;
1469
1470 intel_runtime_pm_get(dev_priv);
1471
1472 ret = i915_driver_init_mmio(dev_priv);
1473 if (ret < 0)
1474 goto out_runtime_pm_put;
1475
1476 ret = i915_driver_init_hw(dev_priv);
1477 if (ret < 0)
1478 goto out_cleanup_mmio;
1479
1480 /*
1481 * TODO: move the vblank init and parts of modeset init steps into one
1482 * of the i915_driver_init_/i915_driver_register functions according
1483 * to the role/effect of the given init step.
1484 */
1485 if (INTEL_INFO(dev)->num_pipes) {
1486 ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
1487 if (ret)
1488 goto out_cleanup_hw;
1489 }
1490
1491 ret = i915_load_modeset_init(dev);
1492 if (ret < 0)
1493 goto out_cleanup_vblank;
1494
1495 i915_driver_register(dev_priv);
1496
1497 intel_runtime_pm_enable(dev_priv);
1498
1499 intel_runtime_pm_put(dev_priv);
1500
1501 return 0;
1502
1503out_cleanup_vblank:
1504 drm_vblank_cleanup(dev);
1505out_cleanup_hw:
1506 i915_driver_cleanup_hw(dev_priv);
1507out_cleanup_mmio:
1508 i915_driver_cleanup_mmio(dev_priv);
1509out_runtime_pm_put:
1510 intel_runtime_pm_put(dev_priv);
1511 i915_driver_cleanup_early(dev_priv);
1512out_free_priv:
1513 i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret);
1514
1515 kfree(dev_priv);
1516
1517 return ret;
1518}
1519
1520int i915_driver_unload(struct drm_device *dev)
1521{
1522 struct drm_i915_private *dev_priv = dev->dev_private;
1523 int ret;
1524
1525 intel_fbdev_fini(dev);
1526
1527 intel_gvt_cleanup(dev_priv);
1528
1529 ret = i915_gem_suspend(dev);
1530 if (ret) {
1531 DRM_ERROR("failed to idle hardware: %d\n", ret);
1532 return ret;
1533 }
1534
1535 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
1536
1537 i915_driver_unregister(dev_priv);
1538
1539 drm_vblank_cleanup(dev);
1540
1541 intel_modeset_cleanup(dev);
1542
1543 /*
1544 * free the memory space allocated for the child device
1545 * config parsed from VBT
1546 */
1547 if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
1548 kfree(dev_priv->vbt.child_dev);
1549 dev_priv->vbt.child_dev = NULL;
1550 dev_priv->vbt.child_dev_num = 0;
1551 }
1552 kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
1553 dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
1554 kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
1555 dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
1556
1557 vga_switcheroo_unregister_client(dev->pdev);
1558 vga_client_register(dev->pdev, NULL, NULL, NULL);
1559
1560 intel_csr_ucode_fini(dev_priv);
1561
1562 /* Free error state after interrupts are fully disabled. */
1563 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
1564 i915_destroy_error_state(dev);
1565
1566 /* Flush any outstanding unpin_work. */
1567 flush_workqueue(dev_priv->wq);
1568
1569 intel_guc_fini(dev);
1570 i915_gem_fini(dev);
1571 intel_fbc_cleanup_cfb(dev_priv);
1572
1573 intel_power_domains_fini(dev_priv);
1574
1575 i915_driver_cleanup_hw(dev_priv);
1576 i915_driver_cleanup_mmio(dev_priv);
1577
1578 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
1579
1580 i915_driver_cleanup_early(dev_priv);
1581 kfree(dev_priv);
1582
1583 return 0;
1584}
1585
1586int i915_driver_open(struct drm_device *dev, struct drm_file *file)
1587{
1588 int ret;
1589
1590 ret = i915_gem_open(dev, file);
1591 if (ret)
1592 return ret;
1593
1594 return 0;
1595}
1596
1597/**
1598 * i915_driver_lastclose - clean up after all DRM clients have exited
1599 * @dev: DRM device
1600 *
1601 * Take care of cleaning up after all DRM clients have exited. In the
1602 * mode setting case, we want to restore the kernel's initial mode (just
1603 * in case the last client left us in a bad state).
1604 *
1605 * Additionally, in the non-mode setting case, we'll tear down the GTT
1606 * and DMA structures, since the kernel won't be using them, and clea
1607 * up any GEM state.
1608 */
1609void i915_driver_lastclose(struct drm_device *dev)
1610{
1611 intel_fbdev_restore_mode(dev);
1612 vga_switcheroo_process_delayed_switch();
1613}
1614
1615void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
1616{
1617 mutex_lock(&dev->struct_mutex);
1618 i915_gem_context_close(dev, file);
1619 i915_gem_release(dev, file);
1620 mutex_unlock(&dev->struct_mutex);
1621}
1622
1623void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
1624{
1625 struct drm_i915_file_private *file_priv = file->driver_priv;
1626
1627 kfree(file_priv);
1628}
1629
1630static int
1631i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
1632 struct drm_file *file)
1633{
1634 return -ENODEV;
1635}
1636
1637const struct drm_ioctl_desc i915_ioctls[] = {
1638 DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1639 DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
1640 DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
1641 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
1642 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
1643 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
1644 DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
1645 DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1646 DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
1647 DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
1648 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1649 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
1650 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1651 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1652 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH),
1653 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
1654 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1655 DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1656 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
1657 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW),
1658 DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
1659 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
1660 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1661 DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
1662 DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
1663 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1664 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1665 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1666 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
1667 DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
1668 DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
1669 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
1670 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW),
1671 DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
1672 DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
1673 DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_RENDER_ALLOW),
1674 DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_RENDER_ALLOW),
1675 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
1676 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
1677 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
1678 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
1679 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
1680 DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW),
1681 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW),
1682 DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1683 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
1684 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
1685 DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
1686 DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW),
1687 DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
1688 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
1689 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
1690};
1691
1692int i915_max_ioctl = ARRAY_SIZE(i915_ioctls);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 3eb47fbcea73..b9a811750ca8 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -27,403 +27,92 @@
27 * 27 *
28 */ 28 */
29 29
30#include <linux/device.h>
31#include <linux/acpi.h> 30#include <linux/acpi.h>
31#include <linux/device.h>
32#include <linux/oom.h>
33#include <linux/module.h>
34#include <linux/pci.h>
35#include <linux/pm.h>
36#include <linux/pm_runtime.h>
37#include <linux/pnp.h>
38#include <linux/slab.h>
39#include <linux/vgaarb.h>
40#include <linux/vga_switcheroo.h>
41#include <linux/vt.h>
42#include <acpi/video.h>
43
32#include <drm/drmP.h> 44#include <drm/drmP.h>
45#include <drm/drm_crtc_helper.h>
33#include <drm/i915_drm.h> 46#include <drm/i915_drm.h>
47
34#include "i915_drv.h" 48#include "i915_drv.h"
35#include "i915_trace.h" 49#include "i915_trace.h"
50#include "i915_vgpu.h"
36#include "intel_drv.h" 51#include "intel_drv.h"
37 52
38#include <linux/console.h>
39#include <linux/module.h>
40#include <linux/pm_runtime.h>
41#include <linux/vga_switcheroo.h>
42#include <drm/drm_crtc_helper.h>
43
44static struct drm_driver driver; 53static struct drm_driver driver;
45 54
46#define GEN_DEFAULT_PIPEOFFSETS \ 55static unsigned int i915_load_fail_count;
47 .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
48 PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
49 .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
50 TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
51 .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
52
53#define GEN_CHV_PIPEOFFSETS \
54 .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
55 CHV_PIPE_C_OFFSET }, \
56 .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
57 CHV_TRANSCODER_C_OFFSET, }, \
58 .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
59 CHV_PALETTE_C_OFFSET }
60
61#define CURSOR_OFFSETS \
62 .cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }
63
64#define IVB_CURSOR_OFFSETS \
65 .cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }
66
67#define BDW_COLORS \
68 .color = { .degamma_lut_size = 512, .gamma_lut_size = 512 }
69#define CHV_COLORS \
70 .color = { .degamma_lut_size = 65, .gamma_lut_size = 257 }
71
72static const struct intel_device_info intel_i830_info = {
73 .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
74 .has_overlay = 1, .overlay_needs_physical = 1,
75 .ring_mask = RENDER_RING,
76 GEN_DEFAULT_PIPEOFFSETS,
77 CURSOR_OFFSETS,
78};
79
80static const struct intel_device_info intel_845g_info = {
81 .gen = 2, .num_pipes = 1,
82 .has_overlay = 1, .overlay_needs_physical = 1,
83 .ring_mask = RENDER_RING,
84 GEN_DEFAULT_PIPEOFFSETS,
85 CURSOR_OFFSETS,
86};
87
88static const struct intel_device_info intel_i85x_info = {
89 .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
90 .cursor_needs_physical = 1,
91 .has_overlay = 1, .overlay_needs_physical = 1,
92 .has_fbc = 1,
93 .ring_mask = RENDER_RING,
94 GEN_DEFAULT_PIPEOFFSETS,
95 CURSOR_OFFSETS,
96};
97
98static const struct intel_device_info intel_i865g_info = {
99 .gen = 2, .num_pipes = 1,
100 .has_overlay = 1, .overlay_needs_physical = 1,
101 .ring_mask = RENDER_RING,
102 GEN_DEFAULT_PIPEOFFSETS,
103 CURSOR_OFFSETS,
104};
105
106static const struct intel_device_info intel_i915g_info = {
107 .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
108 .has_overlay = 1, .overlay_needs_physical = 1,
109 .ring_mask = RENDER_RING,
110 GEN_DEFAULT_PIPEOFFSETS,
111 CURSOR_OFFSETS,
112};
113static const struct intel_device_info intel_i915gm_info = {
114 .gen = 3, .is_mobile = 1, .num_pipes = 2,
115 .cursor_needs_physical = 1,
116 .has_overlay = 1, .overlay_needs_physical = 1,
117 .supports_tv = 1,
118 .has_fbc = 1,
119 .ring_mask = RENDER_RING,
120 GEN_DEFAULT_PIPEOFFSETS,
121 CURSOR_OFFSETS,
122};
123static const struct intel_device_info intel_i945g_info = {
124 .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
125 .has_overlay = 1, .overlay_needs_physical = 1,
126 .ring_mask = RENDER_RING,
127 GEN_DEFAULT_PIPEOFFSETS,
128 CURSOR_OFFSETS,
129};
130static const struct intel_device_info intel_i945gm_info = {
131 .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
132 .has_hotplug = 1, .cursor_needs_physical = 1,
133 .has_overlay = 1, .overlay_needs_physical = 1,
134 .supports_tv = 1,
135 .has_fbc = 1,
136 .ring_mask = RENDER_RING,
137 GEN_DEFAULT_PIPEOFFSETS,
138 CURSOR_OFFSETS,
139};
140
141static const struct intel_device_info intel_i965g_info = {
142 .gen = 4, .is_broadwater = 1, .num_pipes = 2,
143 .has_hotplug = 1,
144 .has_overlay = 1,
145 .ring_mask = RENDER_RING,
146 GEN_DEFAULT_PIPEOFFSETS,
147 CURSOR_OFFSETS,
148};
149
150static const struct intel_device_info intel_i965gm_info = {
151 .gen = 4, .is_crestline = 1, .num_pipes = 2,
152 .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
153 .has_overlay = 1,
154 .supports_tv = 1,
155 .ring_mask = RENDER_RING,
156 GEN_DEFAULT_PIPEOFFSETS,
157 CURSOR_OFFSETS,
158};
159
160static const struct intel_device_info intel_g33_info = {
161 .gen = 3, .is_g33 = 1, .num_pipes = 2,
162 .need_gfx_hws = 1, .has_hotplug = 1,
163 .has_overlay = 1,
164 .ring_mask = RENDER_RING,
165 GEN_DEFAULT_PIPEOFFSETS,
166 CURSOR_OFFSETS,
167};
168
169static const struct intel_device_info intel_g45_info = {
170 .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
171 .has_pipe_cxsr = 1, .has_hotplug = 1,
172 .ring_mask = RENDER_RING | BSD_RING,
173 GEN_DEFAULT_PIPEOFFSETS,
174 CURSOR_OFFSETS,
175};
176
177static const struct intel_device_info intel_gm45_info = {
178 .gen = 4, .is_g4x = 1, .num_pipes = 2,
179 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
180 .has_pipe_cxsr = 1, .has_hotplug = 1,
181 .supports_tv = 1,
182 .ring_mask = RENDER_RING | BSD_RING,
183 GEN_DEFAULT_PIPEOFFSETS,
184 CURSOR_OFFSETS,
185};
186
187static const struct intel_device_info intel_pineview_info = {
188 .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
189 .need_gfx_hws = 1, .has_hotplug = 1,
190 .has_overlay = 1,
191 GEN_DEFAULT_PIPEOFFSETS,
192 CURSOR_OFFSETS,
193};
194
195static const struct intel_device_info intel_ironlake_d_info = {
196 .gen = 5, .num_pipes = 2,
197 .need_gfx_hws = 1, .has_hotplug = 1,
198 .ring_mask = RENDER_RING | BSD_RING,
199 GEN_DEFAULT_PIPEOFFSETS,
200 CURSOR_OFFSETS,
201};
202
203static const struct intel_device_info intel_ironlake_m_info = {
204 .gen = 5, .is_mobile = 1, .num_pipes = 2,
205 .need_gfx_hws = 1, .has_hotplug = 1,
206 .has_fbc = 1,
207 .ring_mask = RENDER_RING | BSD_RING,
208 GEN_DEFAULT_PIPEOFFSETS,
209 CURSOR_OFFSETS,
210};
211
212static const struct intel_device_info intel_sandybridge_d_info = {
213 .gen = 6, .num_pipes = 2,
214 .need_gfx_hws = 1, .has_hotplug = 1,
215 .has_fbc = 1,
216 .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
217 .has_llc = 1,
218 GEN_DEFAULT_PIPEOFFSETS,
219 CURSOR_OFFSETS,
220};
221
222static const struct intel_device_info intel_sandybridge_m_info = {
223 .gen = 6, .is_mobile = 1, .num_pipes = 2,
224 .need_gfx_hws = 1, .has_hotplug = 1,
225 .has_fbc = 1,
226 .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
227 .has_llc = 1,
228 GEN_DEFAULT_PIPEOFFSETS,
229 CURSOR_OFFSETS,
230};
231
232#define GEN7_FEATURES \
233 .gen = 7, .num_pipes = 3, \
234 .need_gfx_hws = 1, .has_hotplug = 1, \
235 .has_fbc = 1, \
236 .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
237 .has_llc = 1, \
238 GEN_DEFAULT_PIPEOFFSETS, \
239 IVB_CURSOR_OFFSETS
240
241static const struct intel_device_info intel_ivybridge_d_info = {
242 GEN7_FEATURES,
243 .is_ivybridge = 1,
244};
245
246static const struct intel_device_info intel_ivybridge_m_info = {
247 GEN7_FEATURES,
248 .is_ivybridge = 1,
249 .is_mobile = 1,
250};
251
252static const struct intel_device_info intel_ivybridge_q_info = {
253 GEN7_FEATURES,
254 .is_ivybridge = 1,
255 .num_pipes = 0, /* legal, last one wins */
256};
257
258#define VLV_FEATURES \
259 .gen = 7, .num_pipes = 2, \
260 .need_gfx_hws = 1, .has_hotplug = 1, \
261 .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
262 .display_mmio_offset = VLV_DISPLAY_BASE, \
263 GEN_DEFAULT_PIPEOFFSETS, \
264 CURSOR_OFFSETS
265
266static const struct intel_device_info intel_valleyview_m_info = {
267 VLV_FEATURES,
268 .is_valleyview = 1,
269 .is_mobile = 1,
270};
271
272static const struct intel_device_info intel_valleyview_d_info = {
273 VLV_FEATURES,
274 .is_valleyview = 1,
275};
276
277#define HSW_FEATURES \
278 GEN7_FEATURES, \
279 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
280 .has_ddi = 1, \
281 .has_fpga_dbg = 1
282 56
283static const struct intel_device_info intel_haswell_d_info = { 57bool __i915_inject_load_failure(const char *func, int line)
284 HSW_FEATURES, 58{
285 .is_haswell = 1, 59 if (i915_load_fail_count >= i915.inject_load_failure)
286}; 60 return false;
287 61
288static const struct intel_device_info intel_haswell_m_info = { 62 if (++i915_load_fail_count == i915.inject_load_failure) {
289 HSW_FEATURES, 63 DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n",
290 .is_haswell = 1, 64 i915.inject_load_failure, func, line);
291 .is_mobile = 1, 65 return true;
292}; 66 }
293 67
294#define BDW_FEATURES \ 68 return false;
295 HSW_FEATURES, \ 69}
296 BDW_COLORS
297 70
298static const struct intel_device_info intel_broadwell_d_info = { 71#define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI"
299 BDW_FEATURES, 72#define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \
300 .gen = 8, 73 "providing the dmesg log by booting with drm.debug=0xf"
301 .is_broadwell = 1,
302};
303 74
304static const struct intel_device_info intel_broadwell_m_info = { 75void
305 BDW_FEATURES, 76__i915_printk(struct drm_i915_private *dev_priv, const char *level,
306 .gen = 8, .is_mobile = 1, 77 const char *fmt, ...)
307 .is_broadwell = 1, 78{
308}; 79 static bool shown_bug_once;
80 struct device *dev = dev_priv->drm.dev;
81 bool is_error = level[1] <= KERN_ERR[1];
82 bool is_debug = level[1] == KERN_DEBUG[1];
83 struct va_format vaf;
84 va_list args;
85
86 if (is_debug && !(drm_debug & DRM_UT_DRIVER))
87 return;
309 88
310static const struct intel_device_info intel_broadwell_gt3d_info = { 89 va_start(args, fmt);
311 BDW_FEATURES,
312 .gen = 8,
313 .is_broadwell = 1,
314 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
315};
316 90
317static const struct intel_device_info intel_broadwell_gt3m_info = { 91 vaf.fmt = fmt;
318 BDW_FEATURES, 92 vaf.va = &args;
319 .gen = 8, .is_mobile = 1,
320 .is_broadwell = 1,
321 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
322};
323 93
324static const struct intel_device_info intel_cherryview_info = { 94 dev_printk(level, dev, "[" DRM_NAME ":%ps] %pV",
325 .gen = 8, .num_pipes = 3, 95 __builtin_return_address(0), &vaf);
326 .need_gfx_hws = 1, .has_hotplug = 1,
327 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
328 .is_cherryview = 1,
329 .display_mmio_offset = VLV_DISPLAY_BASE,
330 GEN_CHV_PIPEOFFSETS,
331 CURSOR_OFFSETS,
332 CHV_COLORS,
333};
334 96
335static const struct intel_device_info intel_skylake_info = { 97 if (is_error && !shown_bug_once) {
336 BDW_FEATURES, 98 dev_notice(dev, "%s", FDO_BUG_MSG);
337 .is_skylake = 1, 99 shown_bug_once = true;
338 .gen = 9, 100 }
339};
340
341static const struct intel_device_info intel_skylake_gt3_info = {
342 BDW_FEATURES,
343 .is_skylake = 1,
344 .gen = 9,
345 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
346};
347
348static const struct intel_device_info intel_broxton_info = {
349 .is_preliminary = 1,
350 .is_broxton = 1,
351 .gen = 9,
352 .need_gfx_hws = 1, .has_hotplug = 1,
353 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
354 .num_pipes = 3,
355 .has_ddi = 1,
356 .has_fpga_dbg = 1,
357 .has_fbc = 1,
358 .has_pooled_eu = 0,
359 GEN_DEFAULT_PIPEOFFSETS,
360 IVB_CURSOR_OFFSETS,
361 BDW_COLORS,
362};
363 101
364static const struct intel_device_info intel_kabylake_info = { 102 va_end(args);
365 BDW_FEATURES, 103}
366 .is_kabylake = 1,
367 .gen = 9,
368};
369 104
370static const struct intel_device_info intel_kabylake_gt3_info = { 105static bool i915_error_injected(struct drm_i915_private *dev_priv)
371 BDW_FEATURES, 106{
372 .is_kabylake = 1, 107 return i915.inject_load_failure &&
373 .gen = 9, 108 i915_load_fail_count == i915.inject_load_failure;
374 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, 109}
375};
376 110
377/* 111#define i915_load_error(dev_priv, fmt, ...) \
378 * Make sure any device matches here are from most specific to most 112 __i915_printk(dev_priv, \
379 * general. For example, since the Quanta match is based on the subsystem 113 i915_error_injected(dev_priv) ? KERN_DEBUG : KERN_ERR, \
380 * and subvendor IDs, we need it to come before the more general IVB 114 fmt, ##__VA_ARGS__)
381 * PCI ID matches, otherwise we'll use the wrong info struct above.
382 */
383static const struct pci_device_id pciidlist[] = {
384 INTEL_I830_IDS(&intel_i830_info),
385 INTEL_I845G_IDS(&intel_845g_info),
386 INTEL_I85X_IDS(&intel_i85x_info),
387 INTEL_I865G_IDS(&intel_i865g_info),
388 INTEL_I915G_IDS(&intel_i915g_info),
389 INTEL_I915GM_IDS(&intel_i915gm_info),
390 INTEL_I945G_IDS(&intel_i945g_info),
391 INTEL_I945GM_IDS(&intel_i945gm_info),
392 INTEL_I965G_IDS(&intel_i965g_info),
393 INTEL_G33_IDS(&intel_g33_info),
394 INTEL_I965GM_IDS(&intel_i965gm_info),
395 INTEL_GM45_IDS(&intel_gm45_info),
396 INTEL_G45_IDS(&intel_g45_info),
397 INTEL_PINEVIEW_IDS(&intel_pineview_info),
398 INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),
399 INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),
400 INTEL_SNB_D_IDS(&intel_sandybridge_d_info),
401 INTEL_SNB_M_IDS(&intel_sandybridge_m_info),
402 INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */
403 INTEL_IVB_M_IDS(&intel_ivybridge_m_info),
404 INTEL_IVB_D_IDS(&intel_ivybridge_d_info),
405 INTEL_HSW_D_IDS(&intel_haswell_d_info),
406 INTEL_HSW_M_IDS(&intel_haswell_m_info),
407 INTEL_VLV_M_IDS(&intel_valleyview_m_info),
408 INTEL_VLV_D_IDS(&intel_valleyview_d_info),
409 INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info),
410 INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info),
411 INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info),
412 INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info),
413 INTEL_CHV_IDS(&intel_cherryview_info),
414 INTEL_SKL_GT1_IDS(&intel_skylake_info),
415 INTEL_SKL_GT2_IDS(&intel_skylake_info),
416 INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info),
417 INTEL_SKL_GT4_IDS(&intel_skylake_gt3_info),
418 INTEL_BXT_IDS(&intel_broxton_info),
419 INTEL_KBL_GT1_IDS(&intel_kabylake_info),
420 INTEL_KBL_GT2_IDS(&intel_kabylake_info),
421 INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info),
422 INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info),
423 {0, 0, 0}
424};
425 115
426MODULE_DEVICE_TABLE(pci, pciidlist);
427 116
428static enum intel_pch intel_virt_detect_pch(struct drm_device *dev) 117static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
429{ 118{
@@ -453,9 +142,9 @@ static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
453 return ret; 142 return ret;
454} 143}
455 144
456void intel_detect_pch(struct drm_device *dev) 145static void intel_detect_pch(struct drm_device *dev)
457{ 146{
458 struct drm_i915_private *dev_priv = dev->dev_private; 147 struct drm_i915_private *dev_priv = to_i915(dev);
459 struct pci_dev *pch = NULL; 148 struct pci_dev *pch = NULL;
460 149
461 /* In all current cases, num_pipes is equivalent to the PCH_NOP setting 150 /* In all current cases, num_pipes is equivalent to the PCH_NOP setting
@@ -515,6 +204,10 @@ void intel_detect_pch(struct drm_device *dev)
515 DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n"); 204 DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
516 WARN_ON(!IS_SKYLAKE(dev) && 205 WARN_ON(!IS_SKYLAKE(dev) &&
517 !IS_KABYLAKE(dev)); 206 !IS_KABYLAKE(dev));
207 } else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) {
208 dev_priv->pch_type = PCH_KBP;
209 DRM_DEBUG_KMS("Found KabyPoint PCH\n");
210 WARN_ON(!IS_KABYLAKE(dev));
518 } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) || 211 } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
519 (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) || 212 (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) ||
520 ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) && 213 ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
@@ -556,9 +249,1163 @@ bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv)
556 return true; 249 return true;
557} 250}
558 251
252static int i915_getparam(struct drm_device *dev, void *data,
253 struct drm_file *file_priv)
254{
255 struct drm_i915_private *dev_priv = to_i915(dev);
256 drm_i915_getparam_t *param = data;
257 int value;
258
259 switch (param->param) {
260 case I915_PARAM_IRQ_ACTIVE:
261 case I915_PARAM_ALLOW_BATCHBUFFER:
262 case I915_PARAM_LAST_DISPATCH:
263 /* Reject all old ums/dri params. */
264 return -ENODEV;
265 case I915_PARAM_CHIPSET_ID:
266 value = dev->pdev->device;
267 break;
268 case I915_PARAM_REVISION:
269 value = dev->pdev->revision;
270 break;
271 case I915_PARAM_HAS_GEM:
272 value = 1;
273 break;
274 case I915_PARAM_NUM_FENCES_AVAIL:
275 value = dev_priv->num_fence_regs;
276 break;
277 case I915_PARAM_HAS_OVERLAY:
278 value = dev_priv->overlay ? 1 : 0;
279 break;
280 case I915_PARAM_HAS_PAGEFLIPPING:
281 value = 1;
282 break;
283 case I915_PARAM_HAS_EXECBUF2:
284 /* depends on GEM */
285 value = 1;
286 break;
287 case I915_PARAM_HAS_BSD:
288 value = intel_engine_initialized(&dev_priv->engine[VCS]);
289 break;
290 case I915_PARAM_HAS_BLT:
291 value = intel_engine_initialized(&dev_priv->engine[BCS]);
292 break;
293 case I915_PARAM_HAS_VEBOX:
294 value = intel_engine_initialized(&dev_priv->engine[VECS]);
295 break;
296 case I915_PARAM_HAS_BSD2:
297 value = intel_engine_initialized(&dev_priv->engine[VCS2]);
298 break;
299 case I915_PARAM_HAS_RELAXED_FENCING:
300 value = 1;
301 break;
302 case I915_PARAM_HAS_COHERENT_RINGS:
303 value = 1;
304 break;
305 case I915_PARAM_HAS_EXEC_CONSTANTS:
306 value = INTEL_INFO(dev)->gen >= 4;
307 break;
308 case I915_PARAM_HAS_RELAXED_DELTA:
309 value = 1;
310 break;
311 case I915_PARAM_HAS_GEN7_SOL_RESET:
312 value = 1;
313 break;
314 case I915_PARAM_HAS_LLC:
315 value = HAS_LLC(dev);
316 break;
317 case I915_PARAM_HAS_WT:
318 value = HAS_WT(dev);
319 break;
320 case I915_PARAM_HAS_ALIASING_PPGTT:
321 value = USES_PPGTT(dev);
322 break;
323 case I915_PARAM_HAS_WAIT_TIMEOUT:
324 value = 1;
325 break;
326 case I915_PARAM_HAS_SEMAPHORES:
327 value = i915_semaphore_is_enabled(dev_priv);
328 break;
329 case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
330 value = 1;
331 break;
332 case I915_PARAM_HAS_SECURE_BATCHES:
333 value = capable(CAP_SYS_ADMIN);
334 break;
335 case I915_PARAM_HAS_PINNED_BATCHES:
336 value = 1;
337 break;
338 case I915_PARAM_HAS_EXEC_NO_RELOC:
339 value = 1;
340 break;
341 case I915_PARAM_HAS_EXEC_HANDLE_LUT:
342 value = 1;
343 break;
344 case I915_PARAM_CMD_PARSER_VERSION:
345 value = i915_cmd_parser_get_version(dev_priv);
346 break;
347 case I915_PARAM_HAS_COHERENT_PHYS_GTT:
348 value = 1;
349 break;
350 case I915_PARAM_MMAP_VERSION:
351 value = 1;
352 break;
353 case I915_PARAM_SUBSLICE_TOTAL:
354 value = INTEL_INFO(dev)->subslice_total;
355 if (!value)
356 return -ENODEV;
357 break;
358 case I915_PARAM_EU_TOTAL:
359 value = INTEL_INFO(dev)->eu_total;
360 if (!value)
361 return -ENODEV;
362 break;
363 case I915_PARAM_HAS_GPU_RESET:
364 value = i915.enable_hangcheck && intel_has_gpu_reset(dev_priv);
365 break;
366 case I915_PARAM_HAS_RESOURCE_STREAMER:
367 value = HAS_RESOURCE_STREAMER(dev);
368 break;
369 case I915_PARAM_HAS_EXEC_SOFTPIN:
370 value = 1;
371 break;
372 case I915_PARAM_HAS_POOLED_EU:
373 value = HAS_POOLED_EU(dev);
374 break;
375 case I915_PARAM_MIN_EU_IN_POOL:
376 value = INTEL_INFO(dev)->min_eu_in_pool;
377 break;
378 default:
379 DRM_DEBUG("Unknown parameter %d\n", param->param);
380 return -EINVAL;
381 }
382
383 if (put_user(value, param->value))
384 return -EFAULT;
385
386 return 0;
387}
388
389static int i915_get_bridge_dev(struct drm_device *dev)
390{
391 struct drm_i915_private *dev_priv = to_i915(dev);
392
393 dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
394 if (!dev_priv->bridge_dev) {
395 DRM_ERROR("bridge device not found\n");
396 return -1;
397 }
398 return 0;
399}
400
401/* Allocate space for the MCH regs if needed, return nonzero on error */
402static int
403intel_alloc_mchbar_resource(struct drm_device *dev)
404{
405 struct drm_i915_private *dev_priv = to_i915(dev);
406 int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
407 u32 temp_lo, temp_hi = 0;
408 u64 mchbar_addr;
409 int ret;
410
411 if (INTEL_INFO(dev)->gen >= 4)
412 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
413 pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
414 mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
415
416 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
417#ifdef CONFIG_PNP
418 if (mchbar_addr &&
419 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
420 return 0;
421#endif
422
423 /* Get some space for it */
424 dev_priv->mch_res.name = "i915 MCHBAR";
425 dev_priv->mch_res.flags = IORESOURCE_MEM;
426 ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
427 &dev_priv->mch_res,
428 MCHBAR_SIZE, MCHBAR_SIZE,
429 PCIBIOS_MIN_MEM,
430 0, pcibios_align_resource,
431 dev_priv->bridge_dev);
432 if (ret) {
433 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
434 dev_priv->mch_res.start = 0;
435 return ret;
436 }
437
438 if (INTEL_INFO(dev)->gen >= 4)
439 pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
440 upper_32_bits(dev_priv->mch_res.start));
441
442 pci_write_config_dword(dev_priv->bridge_dev, reg,
443 lower_32_bits(dev_priv->mch_res.start));
444 return 0;
445}
446
447/* Setup MCHBAR if possible, return true if we should disable it again */
448static void
449intel_setup_mchbar(struct drm_device *dev)
450{
451 struct drm_i915_private *dev_priv = to_i915(dev);
452 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
453 u32 temp;
454 bool enabled;
455
456 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
457 return;
458
459 dev_priv->mchbar_need_disable = false;
460
461 if (IS_I915G(dev) || IS_I915GM(dev)) {
462 pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
463 enabled = !!(temp & DEVEN_MCHBAR_EN);
464 } else {
465 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
466 enabled = temp & 1;
467 }
468
469 /* If it's already enabled, don't have to do anything */
470 if (enabled)
471 return;
472
473 if (intel_alloc_mchbar_resource(dev))
474 return;
475
476 dev_priv->mchbar_need_disable = true;
477
478 /* Space is allocated or reserved, so enable it. */
479 if (IS_I915G(dev) || IS_I915GM(dev)) {
480 pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
481 temp | DEVEN_MCHBAR_EN);
482 } else {
483 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
484 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
485 }
486}
487
488static void
489intel_teardown_mchbar(struct drm_device *dev)
490{
491 struct drm_i915_private *dev_priv = to_i915(dev);
492 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
493
494 if (dev_priv->mchbar_need_disable) {
495 if (IS_I915G(dev) || IS_I915GM(dev)) {
496 u32 deven_val;
497
498 pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
499 &deven_val);
500 deven_val &= ~DEVEN_MCHBAR_EN;
501 pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
502 deven_val);
503 } else {
504 u32 mchbar_val;
505
506 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg,
507 &mchbar_val);
508 mchbar_val &= ~1;
509 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg,
510 mchbar_val);
511 }
512 }
513
514 if (dev_priv->mch_res.start)
515 release_resource(&dev_priv->mch_res);
516}
517
518/* true = enable decode, false = disable decoder */
519static unsigned int i915_vga_set_decode(void *cookie, bool state)
520{
521 struct drm_device *dev = cookie;
522
523 intel_modeset_vga_set_state(dev, state);
524 if (state)
525 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
526 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
527 else
528 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
529}
530
531static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
532{
533 struct drm_device *dev = pci_get_drvdata(pdev);
534 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
535
536 if (state == VGA_SWITCHEROO_ON) {
537 pr_info("switched on\n");
538 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
539 /* i915 resume handler doesn't set to D0 */
540 pci_set_power_state(dev->pdev, PCI_D0);
541 i915_resume_switcheroo(dev);
542 dev->switch_power_state = DRM_SWITCH_POWER_ON;
543 } else {
544 pr_info("switched off\n");
545 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
546 i915_suspend_switcheroo(dev, pmm);
547 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
548 }
549}
550
551static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
552{
553 struct drm_device *dev = pci_get_drvdata(pdev);
554
555 /*
556 * FIXME: open_count is protected by drm_global_mutex but that would lead to
557 * locking inversion with the driver load path. And the access here is
558 * completely racy anyway. So don't bother with locking for now.
559 */
560 return dev->open_count == 0;
561}
562
563static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
564 .set_gpu_state = i915_switcheroo_set_state,
565 .reprobe = NULL,
566 .can_switch = i915_switcheroo_can_switch,
567};
568
569static void i915_gem_fini(struct drm_device *dev)
570{
571 struct drm_i915_private *dev_priv = to_i915(dev);
572
573 /*
574 * Neither the BIOS, ourselves or any other kernel
575 * expects the system to be in execlists mode on startup,
576 * so we need to reset the GPU back to legacy mode. And the only
577 * known way to disable logical contexts is through a GPU reset.
578 *
579 * So in order to leave the system in a known default configuration,
580 * always reset the GPU upon unload. Afterwards we then clean up the
581 * GEM state tracking, flushing off the requests and leaving the
582 * system in a known idle state.
583 *
584 * Note that is of the upmost importance that the GPU is idle and
585 * all stray writes are flushed *before* we dismantle the backing
586 * storage for the pinned objects.
587 *
588 * However, since we are uncertain that reseting the GPU on older
589 * machines is a good idea, we don't - just in case it leaves the
590 * machine in an unusable condition.
591 */
592 if (HAS_HW_CONTEXTS(dev)) {
593 int reset = intel_gpu_reset(dev_priv, ALL_ENGINES);
594 WARN_ON(reset && reset != -ENODEV);
595 }
596
597 mutex_lock(&dev->struct_mutex);
598 i915_gem_reset(dev);
599 i915_gem_cleanup_engines(dev);
600 i915_gem_context_fini(dev);
601 mutex_unlock(&dev->struct_mutex);
602
603 WARN_ON(!list_empty(&to_i915(dev)->context_list));
604}
605
606static int i915_load_modeset_init(struct drm_device *dev)
607{
608 struct drm_i915_private *dev_priv = to_i915(dev);
609 int ret;
610
611 if (i915_inject_load_failure())
612 return -ENODEV;
613
614 ret = intel_bios_init(dev_priv);
615 if (ret)
616 DRM_INFO("failed to find VBIOS tables\n");
617
618 /* If we have > 1 VGA cards, then we need to arbitrate access
619 * to the common VGA resources.
620 *
621 * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
622 * then we do not take part in VGA arbitration and the
623 * vga_client_register() fails with -ENODEV.
624 */
625 ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
626 if (ret && ret != -ENODEV)
627 goto out;
628
629 intel_register_dsm_handler();
630
631 ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops, false);
632 if (ret)
633 goto cleanup_vga_client;
634
635 /* must happen before intel_power_domains_init_hw() on VLV/CHV */
636 intel_update_rawclk(dev_priv);
637
638 intel_power_domains_init_hw(dev_priv, false);
639
640 intel_csr_ucode_init(dev_priv);
641
642 ret = intel_irq_install(dev_priv);
643 if (ret)
644 goto cleanup_csr;
645
646 intel_setup_gmbus(dev);
647
648 /* Important: The output setup functions called by modeset_init need
649 * working irqs for e.g. gmbus and dp aux transfers. */
650 intel_modeset_init(dev);
651
652 intel_guc_init(dev);
653
654 ret = i915_gem_init(dev);
655 if (ret)
656 goto cleanup_irq;
657
658 intel_modeset_gem_init(dev);
659
660 if (INTEL_INFO(dev)->num_pipes == 0)
661 return 0;
662
663 ret = intel_fbdev_init(dev);
664 if (ret)
665 goto cleanup_gem;
666
667 /* Only enable hotplug handling once the fbdev is fully set up. */
668 intel_hpd_init(dev_priv);
669
670 drm_kms_helper_poll_init(dev);
671
672 return 0;
673
674cleanup_gem:
675 i915_gem_fini(dev);
676cleanup_irq:
677 intel_guc_fini(dev);
678 drm_irq_uninstall(dev);
679 intel_teardown_gmbus(dev);
680cleanup_csr:
681 intel_csr_ucode_fini(dev_priv);
682 intel_power_domains_fini(dev_priv);
683 vga_switcheroo_unregister_client(dev->pdev);
684cleanup_vga_client:
685 vga_client_register(dev->pdev, NULL, NULL, NULL);
686out:
687 return ret;
688}
689
690#if IS_ENABLED(CONFIG_FB)
691static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
692{
693 struct apertures_struct *ap;
694 struct pci_dev *pdev = dev_priv->drm.pdev;
695 struct i915_ggtt *ggtt = &dev_priv->ggtt;
696 bool primary;
697 int ret;
698
699 ap = alloc_apertures(1);
700 if (!ap)
701 return -ENOMEM;
702
703 ap->ranges[0].base = ggtt->mappable_base;
704 ap->ranges[0].size = ggtt->mappable_end;
705
706 primary =
707 pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
708
709 ret = remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
710
711 kfree(ap);
712
713 return ret;
714}
715#else
716static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
717{
718 return 0;
719}
720#endif
721
722#if !defined(CONFIG_VGA_CONSOLE)
723static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
724{
725 return 0;
726}
727#elif !defined(CONFIG_DUMMY_CONSOLE)
728static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
729{
730 return -ENODEV;
731}
732#else
733static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
734{
735 int ret = 0;
736
737 DRM_INFO("Replacing VGA console driver\n");
738
739 console_lock();
740 if (con_is_bound(&vga_con))
741 ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1);
742 if (ret == 0) {
743 ret = do_unregister_con_driver(&vga_con);
744
745 /* Ignore "already unregistered". */
746 if (ret == -ENODEV)
747 ret = 0;
748 }
749 console_unlock();
750
751 return ret;
752}
753#endif
754
755static void intel_init_dpio(struct drm_i915_private *dev_priv)
756{
757 /*
758 * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
759 * CHV x1 PHY (DP/HDMI D)
760 * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
761 */
762 if (IS_CHERRYVIEW(dev_priv)) {
763 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
764 DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
765 } else if (IS_VALLEYVIEW(dev_priv)) {
766 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
767 }
768}
769
770static int i915_workqueues_init(struct drm_i915_private *dev_priv)
771{
772 /*
773 * The i915 workqueue is primarily used for batched retirement of
774 * requests (and thus managing bo) once the task has been completed
775 * by the GPU. i915_gem_retire_requests() is called directly when we
776 * need high-priority retirement, such as waiting for an explicit
777 * bo.
778 *
779 * It is also used for periodic low-priority events, such as
780 * idle-timers and recording error state.
781 *
782 * All tasks on the workqueue are expected to acquire the dev mutex
783 * so there is no point in running more than one instance of the
784 * workqueue at any time. Use an ordered one.
785 */
786 dev_priv->wq = alloc_ordered_workqueue("i915", 0);
787 if (dev_priv->wq == NULL)
788 goto out_err;
789
790 dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
791 if (dev_priv->hotplug.dp_wq == NULL)
792 goto out_free_wq;
793
794 return 0;
795
796out_free_wq:
797 destroy_workqueue(dev_priv->wq);
798out_err:
799 DRM_ERROR("Failed to allocate workqueues.\n");
800
801 return -ENOMEM;
802}
803
804static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
805{
806 destroy_workqueue(dev_priv->hotplug.dp_wq);
807 destroy_workqueue(dev_priv->wq);
808}
809
810/**
811 * i915_driver_init_early - setup state not requiring device access
812 * @dev_priv: device private
813 *
814 * Initialize everything that is a "SW-only" state, that is state not
815 * requiring accessing the device or exposing the driver via kernel internal
816 * or userspace interfaces. Example steps belonging here: lock initialization,
817 * system memory allocation, setting up device specific attributes and
818 * function hooks not requiring accessing the device.
819 */
820static int i915_driver_init_early(struct drm_i915_private *dev_priv,
821 const struct pci_device_id *ent)
822{
823 const struct intel_device_info *match_info =
824 (struct intel_device_info *)ent->driver_data;
825 struct intel_device_info *device_info;
826 int ret = 0;
827
828 if (i915_inject_load_failure())
829 return -ENODEV;
830
831 /* Setup the write-once "constant" device info */
832 device_info = mkwrite_device_info(dev_priv);
833 memcpy(device_info, match_info, sizeof(*device_info));
834 device_info->device_id = dev_priv->drm.pdev->device;
835
836 BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE);
837 device_info->gen_mask = BIT(device_info->gen - 1);
838
839 spin_lock_init(&dev_priv->irq_lock);
840 spin_lock_init(&dev_priv->gpu_error.lock);
841 mutex_init(&dev_priv->backlight_lock);
842 spin_lock_init(&dev_priv->uncore.lock);
843 spin_lock_init(&dev_priv->mm.object_stat_lock);
844 spin_lock_init(&dev_priv->mmio_flip_lock);
845 mutex_init(&dev_priv->sb_lock);
846 mutex_init(&dev_priv->modeset_restore_lock);
847 mutex_init(&dev_priv->av_mutex);
848 mutex_init(&dev_priv->wm.wm_mutex);
849 mutex_init(&dev_priv->pps_mutex);
850
851 ret = i915_workqueues_init(dev_priv);
852 if (ret < 0)
853 return ret;
854
855 ret = intel_gvt_init(dev_priv);
856 if (ret < 0)
857 goto err_workqueues;
858
859 /* This must be called before any calls to HAS_PCH_* */
860 intel_detect_pch(&dev_priv->drm);
861
862 intel_pm_setup(&dev_priv->drm);
863 intel_init_dpio(dev_priv);
864 intel_power_domains_init(dev_priv);
865 intel_irq_init(dev_priv);
866 intel_init_display_hooks(dev_priv);
867 intel_init_clock_gating_hooks(dev_priv);
868 intel_init_audio_hooks(dev_priv);
869 i915_gem_load_init(&dev_priv->drm);
870
871 intel_display_crc_init(&dev_priv->drm);
872
873 intel_device_info_dump(dev_priv);
874
875 /* Not all pre-production machines fall into this category, only the
876 * very first ones. Almost everything should work, except for maybe
877 * suspend/resume. And we don't implement workarounds that affect only
878 * pre-production machines. */
879 if (IS_HSW_EARLY_SDV(dev_priv))
880 DRM_INFO("This is an early pre-production Haswell machine. "
881 "It may not be fully functional.\n");
882
883 return 0;
884
885err_workqueues:
886 i915_workqueues_cleanup(dev_priv);
887 return ret;
888}
889
890/**
891 * i915_driver_cleanup_early - cleanup the setup done in i915_driver_init_early()
892 * @dev_priv: device private
893 */
894static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
895{
896 i915_gem_load_cleanup(&dev_priv->drm);
897 i915_workqueues_cleanup(dev_priv);
898}
899
900static int i915_mmio_setup(struct drm_device *dev)
901{
902 struct drm_i915_private *dev_priv = to_i915(dev);
903 int mmio_bar;
904 int mmio_size;
905
906 mmio_bar = IS_GEN2(dev) ? 1 : 0;
907 /*
908 * Before gen4, the registers and the GTT are behind different BARs.
909 * However, from gen4 onwards, the registers and the GTT are shared
910 * in the same BAR, so we want to restrict this ioremap from
911 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
912 * the register BAR remains the same size for all the earlier
913 * generations up to Ironlake.
914 */
915 if (INTEL_INFO(dev)->gen < 5)
916 mmio_size = 512 * 1024;
917 else
918 mmio_size = 2 * 1024 * 1024;
919 dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
920 if (dev_priv->regs == NULL) {
921 DRM_ERROR("failed to map registers\n");
922
923 return -EIO;
924 }
925
926 /* Try to make sure MCHBAR is enabled before poking at it */
927 intel_setup_mchbar(dev);
928
929 return 0;
930}
931
932static void i915_mmio_cleanup(struct drm_device *dev)
933{
934 struct drm_i915_private *dev_priv = to_i915(dev);
935
936 intel_teardown_mchbar(dev);
937 pci_iounmap(dev->pdev, dev_priv->regs);
938}
939
940/**
941 * i915_driver_init_mmio - setup device MMIO
942 * @dev_priv: device private
943 *
944 * Setup minimal device state necessary for MMIO accesses later in the
945 * initialization sequence. The setup here should avoid any other device-wide
946 * side effects or exposing the driver via kernel internal or user space
947 * interfaces.
948 */
949static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
950{
951 struct drm_device *dev = &dev_priv->drm;
952 int ret;
953
954 if (i915_inject_load_failure())
955 return -ENODEV;
956
957 if (i915_get_bridge_dev(dev))
958 return -EIO;
959
960 ret = i915_mmio_setup(dev);
961 if (ret < 0)
962 goto put_bridge;
963
964 intel_uncore_init(dev_priv);
965
966 return 0;
967
968put_bridge:
969 pci_dev_put(dev_priv->bridge_dev);
970
971 return ret;
972}
973
974/**
975 * i915_driver_cleanup_mmio - cleanup the setup done in i915_driver_init_mmio()
976 * @dev_priv: device private
977 */
978static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
979{
980 struct drm_device *dev = &dev_priv->drm;
981
982 intel_uncore_fini(dev_priv);
983 i915_mmio_cleanup(dev);
984 pci_dev_put(dev_priv->bridge_dev);
985}
986
987static void intel_sanitize_options(struct drm_i915_private *dev_priv)
988{
989 i915.enable_execlists =
990 intel_sanitize_enable_execlists(dev_priv,
991 i915.enable_execlists);
992
993 /*
994 * i915.enable_ppgtt is read-only, so do an early pass to validate the
995 * user's requested state against the hardware/driver capabilities. We
996 * do this now so that we can print out any log messages once rather
997 * than every time we check intel_enable_ppgtt().
998 */
999 i915.enable_ppgtt =
1000 intel_sanitize_enable_ppgtt(dev_priv, i915.enable_ppgtt);
1001 DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
1002}
1003
1004/**
1005 * i915_driver_init_hw - setup state requiring device access
1006 * @dev_priv: device private
1007 *
1008 * Setup state that requires accessing the device, but doesn't require
1009 * exposing the driver via kernel internal or userspace interfaces.
1010 */
1011static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
1012{
1013 struct drm_device *dev = &dev_priv->drm;
1014 struct i915_ggtt *ggtt = &dev_priv->ggtt;
1015 uint32_t aperture_size;
1016 int ret;
1017
1018 if (i915_inject_load_failure())
1019 return -ENODEV;
1020
1021 intel_device_info_runtime_init(dev_priv);
1022
1023 intel_sanitize_options(dev_priv);
1024
1025 ret = i915_ggtt_init_hw(dev);
1026 if (ret)
1027 return ret;
1028
1029 ret = i915_ggtt_enable_hw(dev);
1030 if (ret) {
1031 DRM_ERROR("failed to enable GGTT\n");
1032 goto out_ggtt;
1033 }
1034
1035 /* WARNING: Apparently we must kick fbdev drivers before vgacon,
1036 * otherwise the vga fbdev driver falls over. */
1037 ret = i915_kick_out_firmware_fb(dev_priv);
1038 if (ret) {
1039 DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
1040 goto out_ggtt;
1041 }
1042
1043 ret = i915_kick_out_vgacon(dev_priv);
1044 if (ret) {
1045 DRM_ERROR("failed to remove conflicting VGA console\n");
1046 goto out_ggtt;
1047 }
1048
1049 pci_set_master(dev->pdev);
1050
1051 /* overlay on gen2 is broken and can't address above 1G */
1052 if (IS_GEN2(dev)) {
1053 ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
1054 if (ret) {
1055 DRM_ERROR("failed to set DMA mask\n");
1056
1057 goto out_ggtt;
1058 }
1059 }
1060
1061
1062 /* 965GM sometimes incorrectly writes to hardware status page (HWS)
1063 * using 32bit addressing, overwriting memory if HWS is located
1064 * above 4GB.
1065 *
1066 * The documentation also mentions an issue with undefined
1067 * behaviour if any general state is accessed within a page above 4GB,
1068 * which also needs to be handled carefully.
1069 */
1070 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) {
1071 ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
1072
1073 if (ret) {
1074 DRM_ERROR("failed to set DMA mask\n");
1075
1076 goto out_ggtt;
1077 }
1078 }
1079
1080 aperture_size = ggtt->mappable_end;
1081
1082 ggtt->mappable =
1083 io_mapping_create_wc(ggtt->mappable_base,
1084 aperture_size);
1085 if (!ggtt->mappable) {
1086 ret = -EIO;
1087 goto out_ggtt;
1088 }
1089
1090 ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base,
1091 aperture_size);
1092
1093 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
1094 PM_QOS_DEFAULT_VALUE);
1095
1096 intel_uncore_sanitize(dev_priv);
1097
1098 intel_opregion_setup(dev_priv);
1099
1100 i915_gem_load_init_fences(dev_priv);
1101
1102 /* On the 945G/GM, the chipset reports the MSI capability on the
1103 * integrated graphics even though the support isn't actually there
1104 * according to the published specs. It doesn't appear to function
1105 * correctly in testing on 945G.
1106 * This may be a side effect of MSI having been made available for PEG
1107 * and the registers being closely associated.
1108 *
1109 * According to chipset errata, on the 965GM, MSI interrupts may
1110 * be lost or delayed, but we use them anyways to avoid
1111 * stuck interrupts on some machines.
1112 */
1113 if (!IS_I945G(dev) && !IS_I945GM(dev)) {
1114 if (pci_enable_msi(dev->pdev) < 0)
1115 DRM_DEBUG_DRIVER("can't enable MSI");
1116 }
1117
1118 return 0;
1119
1120out_ggtt:
1121 i915_ggtt_cleanup_hw(dev);
1122
1123 return ret;
1124}
1125
1126/**
1127 * i915_driver_cleanup_hw - cleanup the setup done in i915_driver_init_hw()
1128 * @dev_priv: device private
1129 */
1130static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv)
1131{
1132 struct drm_device *dev = &dev_priv->drm;
1133 struct i915_ggtt *ggtt = &dev_priv->ggtt;
1134
1135 if (dev->pdev->msi_enabled)
1136 pci_disable_msi(dev->pdev);
1137
1138 pm_qos_remove_request(&dev_priv->pm_qos);
1139 arch_phys_wc_del(ggtt->mtrr);
1140 io_mapping_free(ggtt->mappable);
1141 i915_ggtt_cleanup_hw(dev);
1142}
1143
1144/**
1145 * i915_driver_register - register the driver with the rest of the system
1146 * @dev_priv: device private
1147 *
1148 * Perform any steps necessary to make the driver available via kernel
1149 * internal or userspace interfaces.
1150 */
1151static void i915_driver_register(struct drm_i915_private *dev_priv)
1152{
1153 struct drm_device *dev = &dev_priv->drm;
1154
1155 i915_gem_shrinker_init(dev_priv);
1156
1157 /*
1158 * Notify a valid surface after modesetting,
1159 * when running inside a VM.
1160 */
1161 if (intel_vgpu_active(dev_priv))
1162 I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
1163
1164 /* Reveal our presence to userspace */
1165 if (drm_dev_register(dev, 0) == 0) {
1166 i915_debugfs_register(dev_priv);
1167 i915_setup_sysfs(dev);
1168 } else
1169 DRM_ERROR("Failed to register driver for userspace access!\n");
1170
1171 if (INTEL_INFO(dev_priv)->num_pipes) {
1172 /* Must be done after probing outputs */
1173 intel_opregion_register(dev_priv);
1174 acpi_video_register();
1175 }
1176
1177 if (IS_GEN5(dev_priv))
1178 intel_gpu_ips_init(dev_priv);
1179
1180 i915_audio_component_init(dev_priv);
1181
1182 /*
1183 * Some ports require correctly set-up hpd registers for detection to
1184 * work properly (leading to ghost connected connector status), e.g. VGA
1185 * on gm45. Hence we can only set up the initial fbdev config after hpd
1186 * irqs are fully enabled. We do it last so that the async config
1187 * cannot run before the connectors are registered.
1188 */
1189 intel_fbdev_initial_config_async(dev);
1190}
1191
1192/**
1193 * i915_driver_unregister - cleanup the registration done in i915_driver_regiser()
1194 * @dev_priv: device private
1195 */
1196static void i915_driver_unregister(struct drm_i915_private *dev_priv)
1197{
1198 i915_audio_component_cleanup(dev_priv);
1199
1200 intel_gpu_ips_teardown();
1201 acpi_video_unregister();
1202 intel_opregion_unregister(dev_priv);
1203
1204 i915_teardown_sysfs(&dev_priv->drm);
1205 i915_debugfs_unregister(dev_priv);
1206 drm_dev_unregister(&dev_priv->drm);
1207
1208 i915_gem_shrinker_cleanup(dev_priv);
1209}
1210
1211/**
1212 * i915_driver_load - setup chip and create an initial config
1213 * @dev: DRM device
1214 * @flags: startup flags
1215 *
1216 * The driver load routine has to do several things:
1217 * - drive output discovery via intel_modeset_init()
1218 * - initialize the memory manager
1219 * - allocate initial config memory
1220 * - setup the DRM framebuffer with the allocated memory
1221 */
1222int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
1223{
1224 struct drm_i915_private *dev_priv;
1225 int ret;
1226
1227 if (i915.nuclear_pageflip)
1228 driver.driver_features |= DRIVER_ATOMIC;
1229
1230 ret = -ENOMEM;
1231 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
1232 if (dev_priv)
1233 ret = drm_dev_init(&dev_priv->drm, &driver, &pdev->dev);
1234 if (ret) {
1235 dev_printk(KERN_ERR, &pdev->dev,
1236 "[" DRM_NAME ":%s] allocation failed\n", __func__);
1237 kfree(dev_priv);
1238 return ret;
1239 }
1240
1241 dev_priv->drm.pdev = pdev;
1242 dev_priv->drm.dev_private = dev_priv;
1243
1244 ret = pci_enable_device(pdev);
1245 if (ret)
1246 goto out_free_priv;
1247
1248 pci_set_drvdata(pdev, &dev_priv->drm);
1249
1250 ret = i915_driver_init_early(dev_priv, ent);
1251 if (ret < 0)
1252 goto out_pci_disable;
1253
1254 intel_runtime_pm_get(dev_priv);
1255
1256 ret = i915_driver_init_mmio(dev_priv);
1257 if (ret < 0)
1258 goto out_runtime_pm_put;
1259
1260 ret = i915_driver_init_hw(dev_priv);
1261 if (ret < 0)
1262 goto out_cleanup_mmio;
1263
1264 /*
1265 * TODO: move the vblank init and parts of modeset init steps into one
1266 * of the i915_driver_init_/i915_driver_register functions according
1267 * to the role/effect of the given init step.
1268 */
1269 if (INTEL_INFO(dev_priv)->num_pipes) {
1270 ret = drm_vblank_init(&dev_priv->drm,
1271 INTEL_INFO(dev_priv)->num_pipes);
1272 if (ret)
1273 goto out_cleanup_hw;
1274 }
1275
1276 ret = i915_load_modeset_init(&dev_priv->drm);
1277 if (ret < 0)
1278 goto out_cleanup_vblank;
1279
1280 i915_driver_register(dev_priv);
1281
1282 intel_runtime_pm_enable(dev_priv);
1283
1284 intel_runtime_pm_put(dev_priv);
1285
1286 return 0;
1287
1288out_cleanup_vblank:
1289 drm_vblank_cleanup(&dev_priv->drm);
1290out_cleanup_hw:
1291 i915_driver_cleanup_hw(dev_priv);
1292out_cleanup_mmio:
1293 i915_driver_cleanup_mmio(dev_priv);
1294out_runtime_pm_put:
1295 intel_runtime_pm_put(dev_priv);
1296 i915_driver_cleanup_early(dev_priv);
1297out_pci_disable:
1298 pci_disable_device(pdev);
1299out_free_priv:
1300 i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret);
1301 drm_dev_unref(&dev_priv->drm);
1302 return ret;
1303}
1304
1305void i915_driver_unload(struct drm_device *dev)
1306{
1307 struct drm_i915_private *dev_priv = to_i915(dev);
1308
1309 intel_fbdev_fini(dev);
1310
1311 if (i915_gem_suspend(dev))
1312 DRM_ERROR("failed to idle hardware; continuing to unload!\n");
1313
1314 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
1315
1316 i915_driver_unregister(dev_priv);
1317
1318 drm_vblank_cleanup(dev);
1319
1320 intel_modeset_cleanup(dev);
1321
1322 /*
1323 * free the memory space allocated for the child device
1324 * config parsed from VBT
1325 */
1326 if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
1327 kfree(dev_priv->vbt.child_dev);
1328 dev_priv->vbt.child_dev = NULL;
1329 dev_priv->vbt.child_dev_num = 0;
1330 }
1331 kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
1332 dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
1333 kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
1334 dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
1335
1336 vga_switcheroo_unregister_client(dev->pdev);
1337 vga_client_register(dev->pdev, NULL, NULL, NULL);
1338
1339 intel_csr_ucode_fini(dev_priv);
1340
1341 /* Free error state after interrupts are fully disabled. */
1342 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
1343 i915_destroy_error_state(dev);
1344
1345 /* Flush any outstanding unpin_work. */
1346 flush_workqueue(dev_priv->wq);
1347
1348 intel_guc_fini(dev);
1349 i915_gem_fini(dev);
1350 intel_fbc_cleanup_cfb(dev_priv);
1351
1352 intel_power_domains_fini(dev_priv);
1353
1354 i915_driver_cleanup_hw(dev_priv);
1355 i915_driver_cleanup_mmio(dev_priv);
1356
1357 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
1358
1359 i915_driver_cleanup_early(dev_priv);
1360}
1361
1362static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
1363{
1364 int ret;
1365
1366 ret = i915_gem_open(dev, file);
1367 if (ret)
1368 return ret;
1369
1370 return 0;
1371}
1372
1373/**
1374 * i915_driver_lastclose - clean up after all DRM clients have exited
1375 * @dev: DRM device
1376 *
1377 * Take care of cleaning up after all DRM clients have exited. In the
1378 * mode setting case, we want to restore the kernel's initial mode (just
1379 * in case the last client left us in a bad state).
1380 *
1381 * Additionally, in the non-mode setting case, we'll tear down the GTT
1382 * and DMA structures, since the kernel won't be using them, and clea
1383 * up any GEM state.
1384 */
1385static void i915_driver_lastclose(struct drm_device *dev)
1386{
1387 intel_fbdev_restore_mode(dev);
1388 vga_switcheroo_process_delayed_switch();
1389}
1390
1391static void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
1392{
1393 mutex_lock(&dev->struct_mutex);
1394 i915_gem_context_close(dev, file);
1395 i915_gem_release(dev, file);
1396 mutex_unlock(&dev->struct_mutex);
1397}
1398
1399static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
1400{
1401 struct drm_i915_file_private *file_priv = file->driver_priv;
1402
1403 kfree(file_priv);
1404}
1405
559static void intel_suspend_encoders(struct drm_i915_private *dev_priv) 1406static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
560{ 1407{
561 struct drm_device *dev = dev_priv->dev; 1408 struct drm_device *dev = &dev_priv->drm;
562 struct intel_encoder *encoder; 1409 struct intel_encoder *encoder;
563 1410
564 drm_modeset_lock_all(dev); 1411 drm_modeset_lock_all(dev);
@@ -583,7 +1430,7 @@ static bool suspend_to_idle(struct drm_i915_private *dev_priv)
583 1430
584static int i915_drm_suspend(struct drm_device *dev) 1431static int i915_drm_suspend(struct drm_device *dev)
585{ 1432{
586 struct drm_i915_private *dev_priv = dev->dev_private; 1433 struct drm_i915_private *dev_priv = to_i915(dev);
587 pci_power_t opregion_target_state; 1434 pci_power_t opregion_target_state;
588 int error; 1435 int error;
589 1436
@@ -650,7 +1497,7 @@ out:
650 1497
651static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation) 1498static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
652{ 1499{
653 struct drm_i915_private *dev_priv = drm_dev->dev_private; 1500 struct drm_i915_private *dev_priv = to_i915(drm_dev);
654 bool fw_csr; 1501 bool fw_csr;
655 int ret; 1502 int ret;
656 1503
@@ -712,7 +1559,7 @@ int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
712{ 1559{
713 int error; 1560 int error;
714 1561
715 if (!dev || !dev->dev_private) { 1562 if (!dev) {
716 DRM_ERROR("dev: %p\n", dev); 1563 DRM_ERROR("dev: %p\n", dev);
717 DRM_ERROR("DRM not initialized, aborting suspend.\n"); 1564 DRM_ERROR("DRM not initialized, aborting suspend.\n");
718 return -ENODEV; 1565 return -ENODEV;
@@ -734,7 +1581,7 @@ int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
734 1581
735static int i915_drm_resume(struct drm_device *dev) 1582static int i915_drm_resume(struct drm_device *dev)
736{ 1583{
737 struct drm_i915_private *dev_priv = dev->dev_private; 1584 struct drm_i915_private *dev_priv = to_i915(dev);
738 int ret; 1585 int ret;
739 1586
740 disable_rpm_wakeref_asserts(dev_priv); 1587 disable_rpm_wakeref_asserts(dev_priv);
@@ -768,7 +1615,7 @@ static int i915_drm_resume(struct drm_device *dev)
768 mutex_lock(&dev->struct_mutex); 1615 mutex_lock(&dev->struct_mutex);
769 if (i915_gem_init_hw(dev)) { 1616 if (i915_gem_init_hw(dev)) {
770 DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n"); 1617 DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
771 atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter); 1618 atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
772 } 1619 }
773 mutex_unlock(&dev->struct_mutex); 1620 mutex_unlock(&dev->struct_mutex);
774 1621
@@ -814,7 +1661,7 @@ static int i915_drm_resume(struct drm_device *dev)
814 1661
815static int i915_drm_resume_early(struct drm_device *dev) 1662static int i915_drm_resume_early(struct drm_device *dev)
816{ 1663{
817 struct drm_i915_private *dev_priv = dev->dev_private; 1664 struct drm_i915_private *dev_priv = to_i915(dev);
818 int ret; 1665 int ret;
819 1666
820 /* 1667 /*
@@ -926,7 +1773,7 @@ int i915_resume_switcheroo(struct drm_device *dev)
926 */ 1773 */
927int i915_reset(struct drm_i915_private *dev_priv) 1774int i915_reset(struct drm_i915_private *dev_priv)
928{ 1775{
929 struct drm_device *dev = dev_priv->dev; 1776 struct drm_device *dev = &dev_priv->drm;
930 struct i915_gpu_error *error = &dev_priv->gpu_error; 1777 struct i915_gpu_error *error = &dev_priv->gpu_error;
931 unsigned reset_counter; 1778 unsigned reset_counter;
932 int ret; 1779 int ret;
@@ -945,24 +1792,11 @@ int i915_reset(struct drm_i915_private *dev_priv)
945 goto error; 1792 goto error;
946 } 1793 }
947 1794
1795 pr_notice("drm/i915: Resetting chip after gpu hang\n");
1796
948 i915_gem_reset(dev); 1797 i915_gem_reset(dev);
949 1798
950 ret = intel_gpu_reset(dev_priv, ALL_ENGINES); 1799 ret = intel_gpu_reset(dev_priv, ALL_ENGINES);
951
952 /* Also reset the gpu hangman. */
953 if (error->stop_rings != 0) {
954 DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
955 error->stop_rings = 0;
956 if (ret == -ENODEV) {
957 DRM_INFO("Reset not implemented, but ignoring "
958 "error for simulated gpu hangs\n");
959 ret = 0;
960 }
961 }
962
963 if (i915_stop_ring_allow_warn(dev_priv))
964 pr_notice("drm/i915: Resetting chip after gpu hang\n");
965
966 if (ret) { 1800 if (ret) {
967 if (ret != -ENODEV) 1801 if (ret != -ENODEV)
968 DRM_ERROR("Failed to reset chip: %i\n", ret); 1802 DRM_ERROR("Failed to reset chip: %i\n", ret);
@@ -1012,45 +1846,12 @@ error:
1012 return ret; 1846 return ret;
1013} 1847}
1014 1848
1015static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1016{
1017 struct intel_device_info *intel_info =
1018 (struct intel_device_info *) ent->driver_data;
1019
1020 if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) {
1021 DRM_INFO("This hardware requires preliminary hardware support.\n"
1022 "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
1023 return -ENODEV;
1024 }
1025
1026 /* Only bind to function 0 of the device. Early generations
1027 * used function 1 as a placeholder for multi-head. This causes
1028 * us confusion instead, especially on the systems where both
1029 * functions have the same PCI-ID!
1030 */
1031 if (PCI_FUNC(pdev->devfn))
1032 return -ENODEV;
1033
1034 if (vga_switcheroo_client_probe_defer(pdev))
1035 return -EPROBE_DEFER;
1036
1037 return drm_get_pci_dev(pdev, ent, &driver);
1038}
1039
1040static void
1041i915_pci_remove(struct pci_dev *pdev)
1042{
1043 struct drm_device *dev = pci_get_drvdata(pdev);
1044
1045 drm_put_dev(dev);
1046}
1047
1048static int i915_pm_suspend(struct device *dev) 1849static int i915_pm_suspend(struct device *dev)
1049{ 1850{
1050 struct pci_dev *pdev = to_pci_dev(dev); 1851 struct pci_dev *pdev = to_pci_dev(dev);
1051 struct drm_device *drm_dev = pci_get_drvdata(pdev); 1852 struct drm_device *drm_dev = pci_get_drvdata(pdev);
1052 1853
1053 if (!drm_dev || !drm_dev->dev_private) { 1854 if (!drm_dev) {
1054 dev_err(dev, "DRM not initialized, aborting suspend.\n"); 1855 dev_err(dev, "DRM not initialized, aborting suspend.\n");
1055 return -ENODEV; 1856 return -ENODEV;
1056 } 1857 }
@@ -1063,7 +1864,7 @@ static int i915_pm_suspend(struct device *dev)
1063 1864
1064static int i915_pm_suspend_late(struct device *dev) 1865static int i915_pm_suspend_late(struct device *dev)
1065{ 1866{
1066 struct drm_device *drm_dev = dev_to_i915(dev)->dev; 1867 struct drm_device *drm_dev = &dev_to_i915(dev)->drm;
1067 1868
1068 /* 1869 /*
1069 * We have a suspend ordering issue with the snd-hda driver also 1870 * We have a suspend ordering issue with the snd-hda driver also
@@ -1082,7 +1883,7 @@ static int i915_pm_suspend_late(struct device *dev)
1082 1883
1083static int i915_pm_poweroff_late(struct device *dev) 1884static int i915_pm_poweroff_late(struct device *dev)
1084{ 1885{
1085 struct drm_device *drm_dev = dev_to_i915(dev)->dev; 1886 struct drm_device *drm_dev = &dev_to_i915(dev)->drm;
1086 1887
1087 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1888 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1088 return 0; 1889 return 0;
@@ -1092,7 +1893,7 @@ static int i915_pm_poweroff_late(struct device *dev)
1092 1893
1093static int i915_pm_resume_early(struct device *dev) 1894static int i915_pm_resume_early(struct device *dev)
1094{ 1895{
1095 struct drm_device *drm_dev = dev_to_i915(dev)->dev; 1896 struct drm_device *drm_dev = &dev_to_i915(dev)->drm;
1096 1897
1097 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1898 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1098 return 0; 1899 return 0;
@@ -1102,7 +1903,7 @@ static int i915_pm_resume_early(struct device *dev)
1102 1903
1103static int i915_pm_resume(struct device *dev) 1904static int i915_pm_resume(struct device *dev)
1104{ 1905{
1105 struct drm_device *drm_dev = dev_to_i915(dev)->dev; 1906 struct drm_device *drm_dev = &dev_to_i915(dev)->drm;
1106 1907
1107 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1908 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1108 return 0; 1909 return 0;
@@ -1352,8 +2153,6 @@ int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
1352 u32 val; 2153 u32 val;
1353 int err; 2154 int err;
1354 2155
1355#define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT)
1356
1357 val = I915_READ(VLV_GTLC_SURVIVABILITY_REG); 2156 val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
1358 val &= ~VLV_GFX_CLK_FORCE_ON_BIT; 2157 val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
1359 if (force_on) 2158 if (force_on)
@@ -1363,13 +2162,16 @@ int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
1363 if (!force_on) 2162 if (!force_on)
1364 return 0; 2163 return 0;
1365 2164
1366 err = wait_for(COND, 20); 2165 err = intel_wait_for_register(dev_priv,
2166 VLV_GTLC_SURVIVABILITY_REG,
2167 VLV_GFX_CLK_STATUS_BIT,
2168 VLV_GFX_CLK_STATUS_BIT,
2169 20);
1367 if (err) 2170 if (err)
1368 DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n", 2171 DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
1369 I915_READ(VLV_GTLC_SURVIVABILITY_REG)); 2172 I915_READ(VLV_GTLC_SURVIVABILITY_REG));
1370 2173
1371 return err; 2174 return err;
1372#undef COND
1373} 2175}
1374 2176
1375static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow) 2177static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
@@ -1384,13 +2186,15 @@ static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
1384 I915_WRITE(VLV_GTLC_WAKE_CTRL, val); 2186 I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
1385 POSTING_READ(VLV_GTLC_WAKE_CTRL); 2187 POSTING_READ(VLV_GTLC_WAKE_CTRL);
1386 2188
1387#define COND (!!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEACK) == \ 2189 err = intel_wait_for_register(dev_priv,
1388 allow) 2190 VLV_GTLC_PW_STATUS,
1389 err = wait_for(COND, 1); 2191 VLV_GTLC_ALLOWWAKEACK,
2192 allow,
2193 1);
1390 if (err) 2194 if (err)
1391 DRM_ERROR("timeout disabling GT waking\n"); 2195 DRM_ERROR("timeout disabling GT waking\n");
2196
1392 return err; 2197 return err;
1393#undef COND
1394} 2198}
1395 2199
1396static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv, 2200static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
@@ -1402,8 +2206,7 @@ static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
1402 2206
1403 mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK; 2207 mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
1404 val = wait_for_on ? mask : 0; 2208 val = wait_for_on ? mask : 0;
1405#define COND ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val) 2209 if ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val)
1406 if (COND)
1407 return 0; 2210 return 0;
1408 2211
1409 DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n", 2212 DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n",
@@ -1414,13 +2217,14 @@ static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
1414 * RC6 transitioning can be delayed up to 2 msec (see 2217 * RC6 transitioning can be delayed up to 2 msec (see
1415 * valleyview_enable_rps), use 3 msec for safety. 2218 * valleyview_enable_rps), use 3 msec for safety.
1416 */ 2219 */
1417 err = wait_for(COND, 3); 2220 err = intel_wait_for_register(dev_priv,
2221 VLV_GTLC_PW_STATUS, mask, val,
2222 3);
1418 if (err) 2223 if (err)
1419 DRM_ERROR("timeout waiting for GT wells to go %s\n", 2224 DRM_ERROR("timeout waiting for GT wells to go %s\n",
1420 onoff(wait_for_on)); 2225 onoff(wait_for_on));
1421 2226
1422 return err; 2227 return err;
1423#undef COND
1424} 2228}
1425 2229
1426static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv) 2230static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
@@ -1477,7 +2281,7 @@ err1:
1477static int vlv_resume_prepare(struct drm_i915_private *dev_priv, 2281static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
1478 bool rpm_resume) 2282 bool rpm_resume)
1479{ 2283{
1480 struct drm_device *dev = dev_priv->dev; 2284 struct drm_device *dev = &dev_priv->drm;
1481 int err; 2285 int err;
1482 int ret; 2286 int ret;
1483 2287
@@ -1513,7 +2317,7 @@ static int intel_runtime_suspend(struct device *device)
1513{ 2317{
1514 struct pci_dev *pdev = to_pci_dev(device); 2318 struct pci_dev *pdev = to_pci_dev(device);
1515 struct drm_device *dev = pci_get_drvdata(pdev); 2319 struct drm_device *dev = pci_get_drvdata(pdev);
1516 struct drm_i915_private *dev_priv = dev->dev_private; 2320 struct drm_i915_private *dev_priv = to_i915(dev);
1517 int ret; 2321 int ret;
1518 2322
1519 if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6()))) 2323 if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6())))
@@ -1551,11 +2355,8 @@ static int intel_runtime_suspend(struct device *device)
1551 i915_gem_release_all_mmaps(dev_priv); 2355 i915_gem_release_all_mmaps(dev_priv);
1552 mutex_unlock(&dev->struct_mutex); 2356 mutex_unlock(&dev->struct_mutex);
1553 2357
1554 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
1555
1556 intel_guc_suspend(dev); 2358 intel_guc_suspend(dev);
1557 2359
1558 intel_suspend_gt_powersave(dev_priv);
1559 intel_runtime_pm_disable_interrupts(dev_priv); 2360 intel_runtime_pm_disable_interrupts(dev_priv);
1560 2361
1561 ret = 0; 2362 ret = 0;
@@ -1620,7 +2421,7 @@ static int intel_runtime_resume(struct device *device)
1620{ 2421{
1621 struct pci_dev *pdev = to_pci_dev(device); 2422 struct pci_dev *pdev = to_pci_dev(device);
1622 struct drm_device *dev = pci_get_drvdata(pdev); 2423 struct drm_device *dev = pci_get_drvdata(pdev);
1623 struct drm_i915_private *dev_priv = dev->dev_private; 2424 struct drm_i915_private *dev_priv = to_i915(dev);
1624 int ret = 0; 2425 int ret = 0;
1625 2426
1626 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev))) 2427 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
@@ -1670,8 +2471,6 @@ static int intel_runtime_resume(struct device *device)
1670 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) 2471 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
1671 intel_hpd_init(dev_priv); 2472 intel_hpd_init(dev_priv);
1672 2473
1673 intel_enable_gt_powersave(dev_priv);
1674
1675 enable_rpm_wakeref_asserts(dev_priv); 2474 enable_rpm_wakeref_asserts(dev_priv);
1676 2475
1677 if (ret) 2476 if (ret)
@@ -1682,7 +2481,7 @@ static int intel_runtime_resume(struct device *device)
1682 return ret; 2481 return ret;
1683} 2482}
1684 2483
1685static const struct dev_pm_ops i915_pm_ops = { 2484const struct dev_pm_ops i915_pm_ops = {
1686 /* 2485 /*
1687 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND, 2486 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
1688 * PMSG_RESUME] 2487 * PMSG_RESUME]
@@ -1741,6 +2540,68 @@ static const struct file_operations i915_driver_fops = {
1741 .llseek = noop_llseek, 2540 .llseek = noop_llseek,
1742}; 2541};
1743 2542
2543static int
2544i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
2545 struct drm_file *file)
2546{
2547 return -ENODEV;
2548}
2549
2550static const struct drm_ioctl_desc i915_ioctls[] = {
2551 DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2552 DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
2553 DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
2554 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
2555 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
2556 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
2557 DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
2558 DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2559 DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
2560 DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
2561 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2562 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
2563 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2564 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2565 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH),
2566 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
2567 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2568 DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2569 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
2570 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW),
2571 DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
2572 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
2573 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
2574 DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
2575 DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
2576 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
2577 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2578 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2579 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
2580 DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
2581 DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
2582 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
2583 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW),
2584 DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
2585 DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
2586 DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_RENDER_ALLOW),
2587 DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_RENDER_ALLOW),
2588 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
2589 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
2590 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
2591 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
2592 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
2593 DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW),
2594 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW),
2595 DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
2596 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
2597 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
2598 DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
2599 DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW),
2600 DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
2601 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
2602 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
2603};
2604
1744static struct drm_driver driver = { 2605static struct drm_driver driver = {
1745 /* Don't use MTRRs here; the Xserver or userspace app should 2606 /* Don't use MTRRs here; the Xserver or userspace app should
1746 * deal with them for Intel hardware. 2607 * deal with them for Intel hardware.
@@ -1748,18 +2609,12 @@ static struct drm_driver driver = {
1748 .driver_features = 2609 .driver_features =
1749 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME | 2610 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
1750 DRIVER_RENDER | DRIVER_MODESET, 2611 DRIVER_RENDER | DRIVER_MODESET,
1751 .load = i915_driver_load,
1752 .unload = i915_driver_unload,
1753 .open = i915_driver_open, 2612 .open = i915_driver_open,
1754 .lastclose = i915_driver_lastclose, 2613 .lastclose = i915_driver_lastclose,
1755 .preclose = i915_driver_preclose, 2614 .preclose = i915_driver_preclose,
1756 .postclose = i915_driver_postclose, 2615 .postclose = i915_driver_postclose,
1757 .set_busid = drm_pci_set_busid, 2616 .set_busid = drm_pci_set_busid,
1758 2617
1759#if defined(CONFIG_DEBUG_FS)
1760 .debugfs_init = i915_debugfs_init,
1761 .debugfs_cleanup = i915_debugfs_cleanup,
1762#endif
1763 .gem_free_object = i915_gem_free_object, 2618 .gem_free_object = i915_gem_free_object,
1764 .gem_vm_ops = &i915_gem_vm_ops, 2619 .gem_vm_ops = &i915_gem_vm_ops,
1765 2620
@@ -1772,6 +2627,7 @@ static struct drm_driver driver = {
1772 .dumb_map_offset = i915_gem_mmap_gtt, 2627 .dumb_map_offset = i915_gem_mmap_gtt,
1773 .dumb_destroy = drm_gem_dumb_destroy, 2628 .dumb_destroy = drm_gem_dumb_destroy,
1774 .ioctls = i915_ioctls, 2629 .ioctls = i915_ioctls,
2630 .num_ioctls = ARRAY_SIZE(i915_ioctls),
1775 .fops = &i915_driver_fops, 2631 .fops = &i915_driver_fops,
1776 .name = DRIVER_NAME, 2632 .name = DRIVER_NAME,
1777 .desc = DRIVER_DESC, 2633 .desc = DRIVER_DESC,
@@ -1780,56 +2636,3 @@ static struct drm_driver driver = {
1780 .minor = DRIVER_MINOR, 2636 .minor = DRIVER_MINOR,
1781 .patchlevel = DRIVER_PATCHLEVEL, 2637 .patchlevel = DRIVER_PATCHLEVEL,
1782}; 2638};
1783
1784static struct pci_driver i915_pci_driver = {
1785 .name = DRIVER_NAME,
1786 .id_table = pciidlist,
1787 .probe = i915_pci_probe,
1788 .remove = i915_pci_remove,
1789 .driver.pm = &i915_pm_ops,
1790};
1791
1792static int __init i915_init(void)
1793{
1794 driver.num_ioctls = i915_max_ioctl;
1795
1796 /*
1797 * Enable KMS by default, unless explicitly overriden by
1798 * either the i915.modeset prarameter or by the
1799 * vga_text_mode_force boot option.
1800 */
1801
1802 if (i915.modeset == 0)
1803 driver.driver_features &= ~DRIVER_MODESET;
1804
1805 if (vgacon_text_force() && i915.modeset == -1)
1806 driver.driver_features &= ~DRIVER_MODESET;
1807
1808 if (!(driver.driver_features & DRIVER_MODESET)) {
1809 /* Silently fail loading to not upset userspace. */
1810 DRM_DEBUG_DRIVER("KMS and UMS disabled.\n");
1811 return 0;
1812 }
1813
1814 if (i915.nuclear_pageflip)
1815 driver.driver_features |= DRIVER_ATOMIC;
1816
1817 return drm_pci_init(&driver, &i915_pci_driver);
1818}
1819
1820static void __exit i915_exit(void)
1821{
1822 if (!(driver.driver_features & DRIVER_MODESET))
1823 return; /* Never loaded a driver. */
1824
1825 drm_pci_exit(&driver, &i915_pci_driver);
1826}
1827
1828module_init(i915_init);
1829module_exit(i915_exit);
1830
1831MODULE_AUTHOR("Tungsten Graphics, Inc.");
1832MODULE_AUTHOR("Intel Corporation");
1833
1834MODULE_DESCRIPTION(DRIVER_DESC);
1835MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 24a86c64d22e..03e1bfaa5a41 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -69,7 +69,7 @@
69 69
70#define DRIVER_NAME "i915" 70#define DRIVER_NAME "i915"
71#define DRIVER_DESC "Intel Graphics" 71#define DRIVER_DESC "Intel Graphics"
72#define DRIVER_DATE "20160620" 72#define DRIVER_DATE "20160711"
73 73
74#undef WARN_ON 74#undef WARN_ON
75/* Many gcc seem to no see through this and fall over :( */ 75/* Many gcc seem to no see through this and fall over :( */
@@ -320,15 +320,16 @@ struct i915_hotplug {
320 for_each_if ((__ports_mask) & (1 << (__port))) 320 for_each_if ((__ports_mask) & (1 << (__port)))
321 321
322#define for_each_crtc(dev, crtc) \ 322#define for_each_crtc(dev, crtc) \
323 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 323 list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head)
324 324
325#define for_each_intel_plane(dev, intel_plane) \ 325#define for_each_intel_plane(dev, intel_plane) \
326 list_for_each_entry(intel_plane, \ 326 list_for_each_entry(intel_plane, \
327 &dev->mode_config.plane_list, \ 327 &(dev)->mode_config.plane_list, \
328 base.head) 328 base.head)
329 329
330#define for_each_intel_plane_mask(dev, intel_plane, plane_mask) \ 330#define for_each_intel_plane_mask(dev, intel_plane, plane_mask) \
331 list_for_each_entry(intel_plane, &dev->mode_config.plane_list, \ 331 list_for_each_entry(intel_plane, \
332 &(dev)->mode_config.plane_list, \
332 base.head) \ 333 base.head) \
333 for_each_if ((plane_mask) & \ 334 for_each_if ((plane_mask) & \
334 (1 << drm_plane_index(&intel_plane->base))) 335 (1 << drm_plane_index(&intel_plane->base)))
@@ -339,11 +340,15 @@ struct i915_hotplug {
339 base.head) \ 340 base.head) \
340 for_each_if ((intel_plane)->pipe == (intel_crtc)->pipe) 341 for_each_if ((intel_plane)->pipe == (intel_crtc)->pipe)
341 342
342#define for_each_intel_crtc(dev, intel_crtc) \ 343#define for_each_intel_crtc(dev, intel_crtc) \
343 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) 344 list_for_each_entry(intel_crtc, \
345 &(dev)->mode_config.crtc_list, \
346 base.head)
344 347
345#define for_each_intel_crtc_mask(dev, intel_crtc, crtc_mask) \ 348#define for_each_intel_crtc_mask(dev, intel_crtc, crtc_mask) \
346 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) \ 349 list_for_each_entry(intel_crtc, \
350 &(dev)->mode_config.crtc_list, \
351 base.head) \
347 for_each_if ((crtc_mask) & (1 << drm_crtc_index(&intel_crtc->base))) 352 for_each_if ((crtc_mask) & (1 << drm_crtc_index(&intel_crtc->base)))
348 353
349#define for_each_intel_encoder(dev, intel_encoder) \ 354#define for_each_intel_encoder(dev, intel_encoder) \
@@ -353,7 +358,7 @@ struct i915_hotplug {
353 358
354#define for_each_intel_connector(dev, intel_connector) \ 359#define for_each_intel_connector(dev, intel_connector) \
355 list_for_each_entry(intel_connector, \ 360 list_for_each_entry(intel_connector, \
356 &dev->mode_config.connector_list, \ 361 &(dev)->mode_config.connector_list, \
357 base.head) 362 base.head)
358 363
359#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \ 364#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
@@ -475,6 +480,7 @@ struct drm_i915_error_state {
475 struct timeval time; 480 struct timeval time;
476 481
477 char error_msg[128]; 482 char error_msg[128];
483 bool simulated;
478 int iommu; 484 int iommu;
479 u32 reset_count; 485 u32 reset_count;
480 u32 suspend_count; 486 u32 suspend_count;
@@ -506,6 +512,7 @@ struct drm_i915_error_state {
506 bool valid; 512 bool valid;
507 /* Software tracked state */ 513 /* Software tracked state */
508 bool waiting; 514 bool waiting;
515 int num_waiters;
509 int hangcheck_score; 516 int hangcheck_score;
510 enum intel_ring_hangcheck_action hangcheck_action; 517 enum intel_ring_hangcheck_action hangcheck_action;
511 int num_requests; 518 int num_requests;
@@ -551,6 +558,12 @@ struct drm_i915_error_state {
551 u32 tail; 558 u32 tail;
552 } *requests; 559 } *requests;
553 560
561 struct drm_i915_error_waiter {
562 char comm[TASK_COMM_LEN];
563 pid_t pid;
564 u32 seqno;
565 } *waiters;
566
554 struct { 567 struct {
555 u32 gfx_mode; 568 u32 gfx_mode;
556 union { 569 union {
@@ -868,9 +881,12 @@ struct i915_gem_context {
868 881
869 /* Unique identifier for this context, used by the hw for tracking */ 882 /* Unique identifier for this context, used by the hw for tracking */
870 unsigned long flags; 883 unsigned long flags;
884#define CONTEXT_NO_ZEROMAP BIT(0)
885#define CONTEXT_NO_ERROR_CAPTURE BIT(1)
871 unsigned hw_id; 886 unsigned hw_id;
872 u32 user_handle; 887 u32 user_handle;
873#define CONTEXT_NO_ZEROMAP (1<<0) 888
889 u32 ggtt_alignment;
874 890
875 struct intel_context { 891 struct intel_context {
876 struct drm_i915_gem_object *state; 892 struct drm_i915_gem_object *state;
@@ -1011,6 +1027,7 @@ enum intel_pch {
1011 PCH_CPT, /* Cougarpoint PCH */ 1027 PCH_CPT, /* Cougarpoint PCH */
1012 PCH_LPT, /* Lynxpoint PCH */ 1028 PCH_LPT, /* Lynxpoint PCH */
1013 PCH_SPT, /* Sunrisepoint PCH */ 1029 PCH_SPT, /* Sunrisepoint PCH */
1030 PCH_KBP, /* Kabypoint PCH */
1014 PCH_NOP, 1031 PCH_NOP,
1015}; 1032};
1016 1033
@@ -1305,37 +1322,11 @@ struct i915_gem_mm {
1305 struct list_head fence_list; 1322 struct list_head fence_list;
1306 1323
1307 /** 1324 /**
1308 * We leave the user IRQ off as much as possible,
1309 * but this means that requests will finish and never
1310 * be retired once the system goes idle. Set a timer to
1311 * fire periodically while the ring is running. When it
1312 * fires, go retire requests.
1313 */
1314 struct delayed_work retire_work;
1315
1316 /**
1317 * When we detect an idle GPU, we want to turn on
1318 * powersaving features. So once we see that there
1319 * are no more requests outstanding and no more
1320 * arrive within a small period of time, we fire
1321 * off the idle_work.
1322 */
1323 struct delayed_work idle_work;
1324
1325 /**
1326 * Are we in a non-interruptible section of code like 1325 * Are we in a non-interruptible section of code like
1327 * modesetting? 1326 * modesetting?
1328 */ 1327 */
1329 bool interruptible; 1328 bool interruptible;
1330 1329
1331 /**
1332 * Is the GPU currently considered idle, or busy executing userspace
1333 * requests? Whilst idle, we attempt to power down the hardware and
1334 * display clocks. In order to reduce the effect on performance, there
1335 * is a slight delay before we do so.
1336 */
1337 bool busy;
1338
1339 /* the indicator for dispatch video commands on two BSD rings */ 1330 /* the indicator for dispatch video commands on two BSD rings */
1340 unsigned int bsd_ring_dispatch_index; 1331 unsigned int bsd_ring_dispatch_index;
1341 1332
@@ -1372,7 +1363,6 @@ struct i915_gpu_error {
1372 /* Hang gpu twice in this window and your context gets banned */ 1363 /* Hang gpu twice in this window and your context gets banned */
1373#define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000) 1364#define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000)
1374 1365
1375 struct workqueue_struct *hangcheck_wq;
1376 struct delayed_work hangcheck_work; 1366 struct delayed_work hangcheck_work;
1377 1367
1378 /* For reset and error_state handling. */ 1368 /* For reset and error_state handling. */
@@ -1409,20 +1399,19 @@ struct i915_gpu_error {
1409#define I915_WEDGED (1 << 31) 1399#define I915_WEDGED (1 << 31)
1410 1400
1411 /** 1401 /**
1402 * Waitqueue to signal when a hang is detected. Used to for waiters
1403 * to release the struct_mutex for the reset to procede.
1404 */
1405 wait_queue_head_t wait_queue;
1406
1407 /**
1412 * Waitqueue to signal when the reset has completed. Used by clients 1408 * Waitqueue to signal when the reset has completed. Used by clients
1413 * that wait for dev_priv->mm.wedged to settle. 1409 * that wait for dev_priv->mm.wedged to settle.
1414 */ 1410 */
1415 wait_queue_head_t reset_queue; 1411 wait_queue_head_t reset_queue;
1416 1412
1417 /* Userspace knobs for gpu hang simulation;
1418 * combines both a ring mask, and extra flags
1419 */
1420 u32 stop_rings;
1421#define I915_STOP_RING_ALLOW_BAN (1 << 31)
1422#define I915_STOP_RING_ALLOW_WARN (1 << 30)
1423
1424 /* For missed irq/seqno simulation. */ 1413 /* For missed irq/seqno simulation. */
1425 unsigned int test_irq_rings; 1414 unsigned long test_irq_rings;
1426}; 1415};
1427 1416
1428enum modeset_restore { 1417enum modeset_restore {
@@ -1733,7 +1722,8 @@ struct intel_wm_config {
1733}; 1722};
1734 1723
1735struct drm_i915_private { 1724struct drm_i915_private {
1736 struct drm_device *dev; 1725 struct drm_device drm;
1726
1737 struct kmem_cache *objects; 1727 struct kmem_cache *objects;
1738 struct kmem_cache *vmas; 1728 struct kmem_cache *vmas;
1739 struct kmem_cache *requests; 1729 struct kmem_cache *requests;
@@ -2029,6 +2019,34 @@ struct drm_i915_private {
2029 int (*init_engines)(struct drm_device *dev); 2019 int (*init_engines)(struct drm_device *dev);
2030 void (*cleanup_engine)(struct intel_engine_cs *engine); 2020 void (*cleanup_engine)(struct intel_engine_cs *engine);
2031 void (*stop_engine)(struct intel_engine_cs *engine); 2021 void (*stop_engine)(struct intel_engine_cs *engine);
2022
2023 /**
2024 * Is the GPU currently considered idle, or busy executing
2025 * userspace requests? Whilst idle, we allow runtime power
2026 * management to power down the hardware and display clocks.
2027 * In order to reduce the effect on performance, there
2028 * is a slight delay before we do so.
2029 */
2030 unsigned int active_engines;
2031 bool awake;
2032
2033 /**
2034 * We leave the user IRQ off as much as possible,
2035 * but this means that requests will finish and never
2036 * be retired once the system goes idle. Set a timer to
2037 * fire periodically while the ring is running. When it
2038 * fires, go retire requests.
2039 */
2040 struct delayed_work retire_work;
2041
2042 /**
2043 * When we detect an idle GPU, we want to turn on
2044 * powersaving features. So once we see that there
2045 * are no more requests outstanding and no more
2046 * arrive within a small period of time, we fire
2047 * off the idle_work.
2048 */
2049 struct delayed_work idle_work;
2032 } gt; 2050 } gt;
2033 2051
2034 /* perform PHY state sanity checks? */ 2052 /* perform PHY state sanity checks? */
@@ -2044,7 +2062,7 @@ struct drm_i915_private {
2044 2062
2045static inline struct drm_i915_private *to_i915(const struct drm_device *dev) 2063static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
2046{ 2064{
2047 return dev->dev_private; 2065 return container_of(dev, struct drm_i915_private, drm);
2048} 2066}
2049 2067
2050static inline struct drm_i915_private *dev_to_i915(struct device *dev) 2068static inline struct drm_i915_private *dev_to_i915(struct device *dev)
@@ -2215,6 +2233,7 @@ struct drm_i915_gem_object {
2215 2233
2216 unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS; 2234 unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS;
2217 2235
2236 unsigned int has_wc_mmap;
2218 unsigned int pin_display; 2237 unsigned int pin_display;
2219 2238
2220 struct sg_table *pages; 2239 struct sg_table *pages;
@@ -2267,6 +2286,12 @@ struct drm_i915_gem_object {
2267}; 2286};
2268#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) 2287#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
2269 2288
2289static inline bool
2290i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
2291{
2292 return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE;
2293}
2294
2270/* 2295/*
2271 * Optimised SGL iterator for GEM objects 2296 * Optimised SGL iterator for GEM objects
2272 */ 2297 */
@@ -2357,7 +2382,7 @@ struct drm_i915_gem_request {
2357 /** On Which ring this request was generated */ 2382 /** On Which ring this request was generated */
2358 struct drm_i915_private *i915; 2383 struct drm_i915_private *i915;
2359 struct intel_engine_cs *engine; 2384 struct intel_engine_cs *engine;
2360 unsigned reset_counter; 2385 struct intel_signal_node signaling;
2361 2386
2362 /** GEM sequence number associated with the previous request, 2387 /** GEM sequence number associated with the previous request,
2363 * when the HWS breadcrumb is equal to this the GPU is processing 2388 * when the HWS breadcrumb is equal to this the GPU is processing
@@ -2613,7 +2638,7 @@ struct drm_i915_cmd_table {
2613#define INTEL_DEVID(p) (INTEL_INFO(p)->device_id) 2638#define INTEL_DEVID(p) (INTEL_INFO(p)->device_id)
2614 2639
2615#define REVID_FOREVER 0xff 2640#define REVID_FOREVER 0xff
2616#define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision) 2641#define INTEL_REVID(p) (__I915__(p)->drm.pdev->revision)
2617 2642
2618#define GEN_FOREVER (0) 2643#define GEN_FOREVER (0)
2619/* 2644/*
@@ -2743,29 +2768,34 @@ struct drm_i915_cmd_table {
2743 * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular 2768 * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
2744 * chips, etc.). 2769 * chips, etc.).
2745 */ 2770 */
2746#define IS_GEN2(dev) (INTEL_INFO(dev)->gen_mask & BIT(1)) 2771#define IS_GEN2(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(1)))
2747#define IS_GEN3(dev) (INTEL_INFO(dev)->gen_mask & BIT(2)) 2772#define IS_GEN3(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(2)))
2748#define IS_GEN4(dev) (INTEL_INFO(dev)->gen_mask & BIT(3)) 2773#define IS_GEN4(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(3)))
2749#define IS_GEN5(dev) (INTEL_INFO(dev)->gen_mask & BIT(4)) 2774#define IS_GEN5(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(4)))
2750#define IS_GEN6(dev) (INTEL_INFO(dev)->gen_mask & BIT(5)) 2775#define IS_GEN6(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(5)))
2751#define IS_GEN7(dev) (INTEL_INFO(dev)->gen_mask & BIT(6)) 2776#define IS_GEN7(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(6)))
2752#define IS_GEN8(dev) (INTEL_INFO(dev)->gen_mask & BIT(7)) 2777#define IS_GEN8(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(7)))
2753#define IS_GEN9(dev) (INTEL_INFO(dev)->gen_mask & BIT(8)) 2778#define IS_GEN9(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(8)))
2754 2779
2755#define RENDER_RING (1<<RCS) 2780#define ENGINE_MASK(id) BIT(id)
2756#define BSD_RING (1<<VCS) 2781#define RENDER_RING ENGINE_MASK(RCS)
2757#define BLT_RING (1<<BCS) 2782#define BSD_RING ENGINE_MASK(VCS)
2758#define VEBOX_RING (1<<VECS) 2783#define BLT_RING ENGINE_MASK(BCS)
2759#define BSD2_RING (1<<VCS2) 2784#define VEBOX_RING ENGINE_MASK(VECS)
2760#define ALL_ENGINES (~0) 2785#define BSD2_RING ENGINE_MASK(VCS2)
2761 2786#define ALL_ENGINES (~0)
2762#define HAS_BSD(dev) (INTEL_INFO(dev)->ring_mask & BSD_RING) 2787
2763#define HAS_BSD2(dev) (INTEL_INFO(dev)->ring_mask & BSD2_RING) 2788#define HAS_ENGINE(dev_priv, id) \
2764#define HAS_BLT(dev) (INTEL_INFO(dev)->ring_mask & BLT_RING) 2789 (!!(INTEL_INFO(dev_priv)->ring_mask & ENGINE_MASK(id)))
2765#define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING) 2790
2791#define HAS_BSD(dev_priv) HAS_ENGINE(dev_priv, VCS)
2792#define HAS_BSD2(dev_priv) HAS_ENGINE(dev_priv, VCS2)
2793#define HAS_BLT(dev_priv) HAS_ENGINE(dev_priv, BCS)
2794#define HAS_VEBOX(dev_priv) HAS_ENGINE(dev_priv, VECS)
2795
2766#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc) 2796#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
2767#define HAS_SNOOP(dev) (INTEL_INFO(dev)->has_snoop) 2797#define HAS_SNOOP(dev) (INTEL_INFO(dev)->has_snoop)
2768#define HAS_EDRAM(dev) (__I915__(dev)->edram_cap & EDRAM_ENABLED) 2798#define HAS_EDRAM(dev) (!!(__I915__(dev)->edram_cap & EDRAM_ENABLED))
2769#define HAS_WT(dev) ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \ 2799#define HAS_WT(dev) ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \
2770 HAS_EDRAM(dev)) 2800 HAS_EDRAM(dev))
2771#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) 2801#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
@@ -2783,9 +2813,10 @@ struct drm_i915_cmd_table {
2783#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev)) 2813#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
2784 2814
2785/* WaRsDisableCoarsePowerGating:skl,bxt */ 2815/* WaRsDisableCoarsePowerGating:skl,bxt */
2786#define NEEDS_WaRsDisableCoarsePowerGating(dev) (IS_BXT_REVID(dev, 0, BXT_REVID_A1) || \ 2816#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
2787 IS_SKL_GT3(dev) || \ 2817 (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1) || \
2788 IS_SKL_GT4(dev)) 2818 IS_SKL_GT3(dev_priv) || \
2819 IS_SKL_GT4(dev_priv))
2789 2820
2790/* 2821/*
2791 * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts 2822 * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
@@ -2832,7 +2863,7 @@ struct drm_i915_cmd_table {
2832 * command submission once loaded. But these are logically independent 2863 * command submission once loaded. But these are logically independent
2833 * properties, so we have separate macros to test them. 2864 * properties, so we have separate macros to test them.
2834 */ 2865 */
2835#define HAS_GUC(dev) (IS_GEN9(dev) && !IS_KABYLAKE(dev)) 2866#define HAS_GUC(dev) (IS_GEN9(dev))
2836#define HAS_GUC_UCODE(dev) (HAS_GUC(dev)) 2867#define HAS_GUC_UCODE(dev) (HAS_GUC(dev))
2837#define HAS_GUC_SCHED(dev) (HAS_GUC(dev)) 2868#define HAS_GUC_SCHED(dev) (HAS_GUC(dev))
2838 2869
@@ -2853,11 +2884,13 @@ struct drm_i915_cmd_table {
2853#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00 2884#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
2854#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100 2885#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
2855#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00 2886#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
2887#define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA200
2856#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100 2888#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
2857#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000 2889#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
2858#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */ 2890#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
2859 2891
2860#define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type) 2892#define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type)
2893#define HAS_PCH_KBP(dev) (INTEL_PCH_TYPE(dev) == PCH_KBP)
2861#define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT) 2894#define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT)
2862#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT) 2895#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
2863#define HAS_PCH_LPT_LP(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) 2896#define HAS_PCH_LPT_LP(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
@@ -2879,8 +2912,14 @@ struct drm_i915_cmd_table {
2879 2912
2880#include "i915_trace.h" 2913#include "i915_trace.h"
2881 2914
2882extern const struct drm_ioctl_desc i915_ioctls[]; 2915static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
2883extern int i915_max_ioctl; 2916{
2917#ifdef CONFIG_INTEL_IOMMU
2918 if (INTEL_GEN(dev_priv) >= 6 && intel_iommu_gfx_mapped)
2919 return true;
2920#endif
2921 return false;
2922}
2884 2923
2885extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state); 2924extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state);
2886extern int i915_resume_switcheroo(struct drm_device *dev); 2925extern int i915_resume_switcheroo(struct drm_device *dev);
@@ -2888,7 +2927,7 @@ extern int i915_resume_switcheroo(struct drm_device *dev);
2888int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv, 2927int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
2889 int enable_ppgtt); 2928 int enable_ppgtt);
2890 2929
2891/* i915_dma.c */ 2930/* i915_drv.c */
2892void __printf(3, 4) 2931void __printf(3, 4)
2893__i915_printk(struct drm_i915_private *dev_priv, const char *level, 2932__i915_printk(struct drm_i915_private *dev_priv, const char *level,
2894 const char *fmt, ...); 2933 const char *fmt, ...);
@@ -2896,14 +2935,6 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
2896#define i915_report_error(dev_priv, fmt, ...) \ 2935#define i915_report_error(dev_priv, fmt, ...) \
2897 __i915_printk(dev_priv, KERN_ERR, fmt, ##__VA_ARGS__) 2936 __i915_printk(dev_priv, KERN_ERR, fmt, ##__VA_ARGS__)
2898 2937
2899extern int i915_driver_load(struct drm_device *, unsigned long flags);
2900extern int i915_driver_unload(struct drm_device *);
2901extern int i915_driver_open(struct drm_device *dev, struct drm_file *file);
2902extern void i915_driver_lastclose(struct drm_device * dev);
2903extern void i915_driver_preclose(struct drm_device *dev,
2904 struct drm_file *file);
2905extern void i915_driver_postclose(struct drm_device *dev,
2906 struct drm_file *file);
2907#ifdef CONFIG_COMPAT 2938#ifdef CONFIG_COMPAT
2908extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, 2939extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
2909 unsigned long arg); 2940 unsigned long arg);
@@ -2928,7 +2959,23 @@ void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
2928bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port); 2959bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port);
2929 2960
2930/* i915_irq.c */ 2961/* i915_irq.c */
2931void i915_queue_hangcheck(struct drm_i915_private *dev_priv); 2962static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv)
2963{
2964 unsigned long delay;
2965
2966 if (unlikely(!i915.enable_hangcheck))
2967 return;
2968
2969 /* Don't continually defer the hangcheck so that it is always run at
2970 * least once after work has been scheduled on any ring. Otherwise,
2971 * we will ignore a hung ring if a second ring is kept busy.
2972 */
2973
2974 delay = round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES);
2975 queue_delayed_work(system_long_wq,
2976 &dev_priv->gpu_error.hangcheck_work, delay);
2977}
2978
2932__printf(3, 4) 2979__printf(3, 4)
2933void i915_handle_error(struct drm_i915_private *dev_priv, 2980void i915_handle_error(struct drm_i915_private *dev_priv,
2934 u32 engine_mask, 2981 u32 engine_mask,
@@ -2963,6 +3010,17 @@ u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv);
2963 3010
2964void assert_forcewakes_inactive(struct drm_i915_private *dev_priv); 3011void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
2965 3012
3013int intel_wait_for_register(struct drm_i915_private *dev_priv,
3014 i915_reg_t reg,
3015 const u32 mask,
3016 const u32 value,
3017 const unsigned long timeout_ms);
3018int intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
3019 i915_reg_t reg,
3020 const u32 mask,
3021 const u32 value,
3022 const unsigned long timeout_ms);
3023
2966static inline bool intel_gvt_active(struct drm_i915_private *dev_priv) 3024static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
2967{ 3025{
2968 return dev_priv->gvt.initialized; 3026 return dev_priv->gvt.initialized;
@@ -3027,7 +3085,6 @@ ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits)
3027 ibx_display_interrupt_update(dev_priv, bits, 0); 3085 ibx_display_interrupt_update(dev_priv, bits, 0);
3028} 3086}
3029 3087
3030
3031/* i915_gem.c */ 3088/* i915_gem.c */
3032int i915_gem_create_ioctl(struct drm_device *dev, void *data, 3089int i915_gem_create_ioctl(struct drm_device *dev, void *data,
3033 struct drm_file *file_priv); 3090 struct drm_file *file_priv);
@@ -3244,31 +3301,34 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
3244 return (int32_t)(seq1 - seq2) >= 0; 3301 return (int32_t)(seq1 - seq2) >= 0;
3245} 3302}
3246 3303
3247static inline bool i915_gem_request_started(struct drm_i915_gem_request *req, 3304static inline bool i915_gem_request_started(const struct drm_i915_gem_request *req)
3248 bool lazy_coherency)
3249{ 3305{
3250 if (!lazy_coherency && req->engine->irq_seqno_barrier) 3306 return i915_seqno_passed(intel_engine_get_seqno(req->engine),
3251 req->engine->irq_seqno_barrier(req->engine);
3252 return i915_seqno_passed(req->engine->get_seqno(req->engine),
3253 req->previous_seqno); 3307 req->previous_seqno);
3254} 3308}
3255 3309
3256static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req, 3310static inline bool i915_gem_request_completed(const struct drm_i915_gem_request *req)
3257 bool lazy_coherency)
3258{ 3311{
3259 if (!lazy_coherency && req->engine->irq_seqno_barrier) 3312 return i915_seqno_passed(intel_engine_get_seqno(req->engine),
3260 req->engine->irq_seqno_barrier(req->engine);
3261 return i915_seqno_passed(req->engine->get_seqno(req->engine),
3262 req->seqno); 3313 req->seqno);
3263} 3314}
3264 3315
3316bool __i915_spin_request(const struct drm_i915_gem_request *request,
3317 int state, unsigned long timeout_us);
3318static inline bool i915_spin_request(const struct drm_i915_gem_request *request,
3319 int state, unsigned long timeout_us)
3320{
3321 return (i915_gem_request_started(request) &&
3322 __i915_spin_request(request, state, timeout_us));
3323}
3324
3265int __must_check i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno); 3325int __must_check i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno);
3266int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno); 3326int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
3267 3327
3268struct drm_i915_gem_request * 3328struct drm_i915_gem_request *
3269i915_gem_find_active_request(struct intel_engine_cs *engine); 3329i915_gem_find_active_request(struct intel_engine_cs *engine);
3270 3330
3271bool i915_gem_retire_requests(struct drm_i915_private *dev_priv); 3331void i915_gem_retire_requests(struct drm_i915_private *dev_priv);
3272void i915_gem_retire_requests_ring(struct intel_engine_cs *engine); 3332void i915_gem_retire_requests_ring(struct intel_engine_cs *engine);
3273 3333
3274static inline u32 i915_reset_counter(struct i915_gpu_error *error) 3334static inline u32 i915_reset_counter(struct i915_gpu_error *error)
@@ -3311,18 +3371,6 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error)
3311 return ((i915_reset_counter(error) & ~I915_WEDGED) + 1) / 2; 3371 return ((i915_reset_counter(error) & ~I915_WEDGED) + 1) / 2;
3312} 3372}
3313 3373
3314static inline bool i915_stop_ring_allow_ban(struct drm_i915_private *dev_priv)
3315{
3316 return dev_priv->gpu_error.stop_rings == 0 ||
3317 dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_BAN;
3318}
3319
3320static inline bool i915_stop_ring_allow_warn(struct drm_i915_private *dev_priv)
3321{
3322 return dev_priv->gpu_error.stop_rings == 0 ||
3323 dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_WARN;
3324}
3325
3326void i915_gem_reset(struct drm_device *dev); 3374void i915_gem_reset(struct drm_device *dev);
3327bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); 3375bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
3328int __must_check i915_gem_init(struct drm_device *dev); 3376int __must_check i915_gem_init(struct drm_device *dev);
@@ -3330,7 +3378,7 @@ int i915_gem_init_engines(struct drm_device *dev);
3330int __must_check i915_gem_init_hw(struct drm_device *dev); 3378int __must_check i915_gem_init_hw(struct drm_device *dev);
3331void i915_gem_init_swizzling(struct drm_device *dev); 3379void i915_gem_init_swizzling(struct drm_device *dev);
3332void i915_gem_cleanup_engines(struct drm_device *dev); 3380void i915_gem_cleanup_engines(struct drm_device *dev);
3333int __must_check i915_gpu_idle(struct drm_device *dev); 3381int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv);
3334int __must_check i915_gem_suspend(struct drm_device *dev); 3382int __must_check i915_gem_suspend(struct drm_device *dev);
3335void __i915_add_request(struct drm_i915_gem_request *req, 3383void __i915_add_request(struct drm_i915_gem_request *req,
3336 struct drm_i915_gem_object *batch_obj, 3384 struct drm_i915_gem_object *batch_obj,
@@ -3484,7 +3532,7 @@ i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
3484{ 3532{
3485 struct i915_gem_context *ctx; 3533 struct i915_gem_context *ctx;
3486 3534
3487 lockdep_assert_held(&file_priv->dev_priv->dev->struct_mutex); 3535 lockdep_assert_held(&file_priv->dev_priv->drm.struct_mutex);
3488 3536
3489 ctx = idr_find(&file_priv->context_idr, id); 3537 ctx = idr_find(&file_priv->context_idr, id);
3490 if (!ctx) 3538 if (!ctx)
@@ -3500,7 +3548,7 @@ static inline void i915_gem_context_reference(struct i915_gem_context *ctx)
3500 3548
3501static inline void i915_gem_context_unreference(struct i915_gem_context *ctx) 3549static inline void i915_gem_context_unreference(struct i915_gem_context *ctx)
3502{ 3550{
3503 lockdep_assert_held(&ctx->i915->dev->struct_mutex); 3551 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
3504 kref_put(&ctx->ref, i915_gem_context_free); 3552 kref_put(&ctx->ref, i915_gem_context_free);
3505} 3553}
3506 3554
@@ -3576,7 +3624,7 @@ void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv);
3576/* i915_gem_tiling.c */ 3624/* i915_gem_tiling.c */
3577static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) 3625static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
3578{ 3626{
3579 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 3627 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
3580 3628
3581 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && 3629 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
3582 obj->tiling_mode != I915_TILING_NONE; 3630 obj->tiling_mode != I915_TILING_NONE;
@@ -3590,12 +3638,14 @@ int i915_verify_lists(struct drm_device *dev);
3590#endif 3638#endif
3591 3639
3592/* i915_debugfs.c */ 3640/* i915_debugfs.c */
3593int i915_debugfs_init(struct drm_minor *minor);
3594void i915_debugfs_cleanup(struct drm_minor *minor);
3595#ifdef CONFIG_DEBUG_FS 3641#ifdef CONFIG_DEBUG_FS
3642int i915_debugfs_register(struct drm_i915_private *dev_priv);
3643void i915_debugfs_unregister(struct drm_i915_private *dev_priv);
3596int i915_debugfs_connector_add(struct drm_connector *connector); 3644int i915_debugfs_connector_add(struct drm_connector *connector);
3597void intel_display_crc_init(struct drm_device *dev); 3645void intel_display_crc_init(struct drm_device *dev);
3598#else 3646#else
3647static inline int i915_debugfs_register(struct drm_i915_private *) {return 0;}
3648static inline void i915_debugfs_unregister(struct drm_i915_private *) {}
3599static inline int i915_debugfs_connector_add(struct drm_connector *connector) 3649static inline int i915_debugfs_connector_add(struct drm_connector *connector)
3600{ return 0; } 3650{ return 0; }
3601static inline void intel_display_crc_init(struct drm_device *dev) {} 3651static inline void intel_display_crc_init(struct drm_device *dev) {}
@@ -3686,8 +3736,8 @@ extern int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv,
3686extern int intel_opregion_get_panel_type(struct drm_i915_private *dev_priv); 3736extern int intel_opregion_get_panel_type(struct drm_i915_private *dev_priv);
3687#else 3737#else
3688static inline int intel_opregion_setup(struct drm_i915_private *dev) { return 0; } 3738static inline int intel_opregion_setup(struct drm_i915_private *dev) { return 0; }
3689static inline void intel_opregion_init(struct drm_i915_private *dev) { } 3739static inline void intel_opregion_register(struct drm_i915_private *dev_priv) { }
3690static inline void intel_opregion_fini(struct drm_i915_private *dev) { } 3740static inline void intel_opregion_unregister(struct drm_i915_private *dev_priv) { }
3691static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv) 3741static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
3692{ 3742{
3693} 3743}
@@ -3716,11 +3766,22 @@ static inline void intel_register_dsm_handler(void) { return; }
3716static inline void intel_unregister_dsm_handler(void) { return; } 3766static inline void intel_unregister_dsm_handler(void) { return; }
3717#endif /* CONFIG_ACPI */ 3767#endif /* CONFIG_ACPI */
3718 3768
3769/* intel_device_info.c */
3770static inline struct intel_device_info *
3771mkwrite_device_info(struct drm_i915_private *dev_priv)
3772{
3773 return (struct intel_device_info *)&dev_priv->info;
3774}
3775
3776void intel_device_info_runtime_init(struct drm_i915_private *dev_priv);
3777void intel_device_info_dump(struct drm_i915_private *dev_priv);
3778
3719/* modesetting */ 3779/* modesetting */
3720extern void intel_modeset_init_hw(struct drm_device *dev); 3780extern void intel_modeset_init_hw(struct drm_device *dev);
3721extern void intel_modeset_init(struct drm_device *dev); 3781extern void intel_modeset_init(struct drm_device *dev);
3722extern void intel_modeset_gem_init(struct drm_device *dev); 3782extern void intel_modeset_gem_init(struct drm_device *dev);
3723extern void intel_modeset_cleanup(struct drm_device *dev); 3783extern void intel_modeset_cleanup(struct drm_device *dev);
3784extern int intel_connector_register(struct drm_connector *);
3724extern void intel_connector_unregister(struct drm_connector *); 3785extern void intel_connector_unregister(struct drm_connector *);
3725extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); 3786extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
3726extern void intel_display_resume(struct drm_device *dev); 3787extern void intel_display_resume(struct drm_device *dev);
@@ -3731,7 +3792,6 @@ extern void intel_init_pch_refclk(struct drm_device *dev);
3731extern void intel_set_rps(struct drm_i915_private *dev_priv, u8 val); 3792extern void intel_set_rps(struct drm_i915_private *dev_priv, u8 val);
3732extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, 3793extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
3733 bool enable); 3794 bool enable);
3734extern void intel_detect_pch(struct drm_device *dev);
3735 3795
3736extern bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv); 3796extern bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv);
3737int i915_reg_read_ioctl(struct drm_device *dev, void *data, 3797int i915_reg_read_ioctl(struct drm_device *dev, void *data,
@@ -3864,6 +3924,7 @@ __raw_write(64, q)
3864 */ 3924 */
3865#define I915_READ_FW(reg__) __raw_i915_read32(dev_priv, (reg__)) 3925#define I915_READ_FW(reg__) __raw_i915_read32(dev_priv, (reg__))
3866#define I915_WRITE_FW(reg__, val__) __raw_i915_write32(dev_priv, (reg__), (val__)) 3926#define I915_WRITE_FW(reg__, val__) __raw_i915_write32(dev_priv, (reg__), (val__))
3927#define I915_WRITE64_FW(reg__, val__) __raw_i915_write64(dev_priv, (reg__), (val__))
3867#define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__) 3928#define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__)
3868 3929
3869/* "Broadcast RGB" property */ 3930/* "Broadcast RGB" property */
@@ -3927,12 +3988,80 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
3927 schedule_timeout_uninterruptible(remaining_jiffies); 3988 schedule_timeout_uninterruptible(remaining_jiffies);
3928 } 3989 }
3929} 3990}
3930 3991static inline bool __i915_request_irq_complete(struct drm_i915_gem_request *req)
3931static inline void i915_trace_irq_get(struct intel_engine_cs *engine,
3932 struct drm_i915_gem_request *req)
3933{ 3992{
3934 if (engine->trace_irq_req == NULL && engine->irq_get(engine)) 3993 struct intel_engine_cs *engine = req->engine;
3935 i915_gem_request_assign(&engine->trace_irq_req, req); 3994
3995 /* Before we do the heavier coherent read of the seqno,
3996 * check the value (hopefully) in the CPU cacheline.
3997 */
3998 if (i915_gem_request_completed(req))
3999 return true;
4000
4001 /* Ensure our read of the seqno is coherent so that we
4002 * do not "miss an interrupt" (i.e. if this is the last
4003 * request and the seqno write from the GPU is not visible
4004 * by the time the interrupt fires, we will see that the
4005 * request is incomplete and go back to sleep awaiting
4006 * another interrupt that will never come.)
4007 *
4008 * Strictly, we only need to do this once after an interrupt,
4009 * but it is easier and safer to do it every time the waiter
4010 * is woken.
4011 */
4012 if (engine->irq_seqno_barrier &&
4013 READ_ONCE(engine->breadcrumbs.irq_seqno_bh) == current &&
4014 cmpxchg_relaxed(&engine->breadcrumbs.irq_posted, 1, 0)) {
4015 struct task_struct *tsk;
4016
4017 /* The ordering of irq_posted versus applying the barrier
4018 * is crucial. The clearing of the current irq_posted must
4019 * be visible before we perform the barrier operation,
4020 * such that if a subsequent interrupt arrives, irq_posted
4021 * is reasserted and our task rewoken (which causes us to
4022 * do another __i915_request_irq_complete() immediately
4023 * and reapply the barrier). Conversely, if the clear
4024 * occurs after the barrier, then an interrupt that arrived
4025 * whilst we waited on the barrier would not trigger a
4026 * barrier on the next pass, and the read may not see the
4027 * seqno update.
4028 */
4029 engine->irq_seqno_barrier(engine);
4030
4031 /* If we consume the irq, but we are no longer the bottom-half,
4032 * the real bottom-half may not have serialised their own
4033 * seqno check with the irq-barrier (i.e. may have inspected
4034 * the seqno before we believe it coherent since they see
4035 * irq_posted == false but we are still running).
4036 */
4037 rcu_read_lock();
4038 tsk = READ_ONCE(engine->breadcrumbs.irq_seqno_bh);
4039 if (tsk && tsk != current)
4040 /* Note that if the bottom-half is changed as we
4041 * are sending the wake-up, the new bottom-half will
4042 * be woken by whomever made the change. We only have
4043 * to worry about when we steal the irq-posted for
4044 * ourself.
4045 */
4046 wake_up_process(tsk);
4047 rcu_read_unlock();
4048
4049 if (i915_gem_request_completed(req))
4050 return true;
4051 }
4052
4053 /* We need to check whether any gpu reset happened in between
4054 * the request being submitted and now. If a reset has occurred,
4055 * the seqno will have been advance past ours and our request
4056 * is complete. If we are in the process of handling a reset,
4057 * the request is effectively complete as the rendering will
4058 * be discarded, but we need to return in order to drop the
4059 * struct_mutex.
4060 */
4061 if (i915_reset_in_progress(&req->i915->gpu_error))
4062 return true;
4063
4064 return false;
3936} 4065}
3937 4066
3938#endif 4067#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 21d0dea57312..8f50919ba9b4 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -128,7 +128,7 @@ i915_gem_wait_for_error(struct i915_gpu_error *error)
128 128
129int i915_mutex_lock_interruptible(struct drm_device *dev) 129int i915_mutex_lock_interruptible(struct drm_device *dev)
130{ 130{
131 struct drm_i915_private *dev_priv = dev->dev_private; 131 struct drm_i915_private *dev_priv = to_i915(dev);
132 int ret; 132 int ret;
133 133
134 ret = i915_gem_wait_for_error(&dev_priv->gpu_error); 134 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
@@ -377,13 +377,13 @@ out:
377 377
378void *i915_gem_object_alloc(struct drm_device *dev) 378void *i915_gem_object_alloc(struct drm_device *dev)
379{ 379{
380 struct drm_i915_private *dev_priv = dev->dev_private; 380 struct drm_i915_private *dev_priv = to_i915(dev);
381 return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL); 381 return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
382} 382}
383 383
384void i915_gem_object_free(struct drm_i915_gem_object *obj) 384void i915_gem_object_free(struct drm_i915_gem_object *obj)
385{ 385{
386 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 386 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
387 kmem_cache_free(dev_priv->objects, obj); 387 kmem_cache_free(dev_priv->objects, obj);
388} 388}
389 389
@@ -508,7 +508,7 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
508 508
509 *needs_clflush = 0; 509 *needs_clflush = 0;
510 510
511 if (WARN_ON((obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE) == 0)) 511 if (WARN_ON(!i915_gem_object_has_struct_page(obj)))
512 return -EINVAL; 512 return -EINVAL;
513 513
514 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) { 514 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
@@ -636,7 +636,7 @@ i915_gem_gtt_pread(struct drm_device *dev,
636 struct drm_i915_gem_object *obj, uint64_t size, 636 struct drm_i915_gem_object *obj, uint64_t size,
637 uint64_t data_offset, uint64_t data_ptr) 637 uint64_t data_offset, uint64_t data_ptr)
638{ 638{
639 struct drm_i915_private *dev_priv = dev->dev_private; 639 struct drm_i915_private *dev_priv = to_i915(dev);
640 struct i915_ggtt *ggtt = &dev_priv->ggtt; 640 struct i915_ggtt *ggtt = &dev_priv->ggtt;
641 struct drm_mm_node node; 641 struct drm_mm_node node;
642 char __user *user_data; 642 char __user *user_data;
@@ -760,7 +760,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
760 int needs_clflush = 0; 760 int needs_clflush = 0;
761 struct sg_page_iter sg_iter; 761 struct sg_page_iter sg_iter;
762 762
763 if (!obj->base.filp) 763 if (!i915_gem_object_has_struct_page(obj))
764 return -ENODEV; 764 return -ENODEV;
765 765
766 user_data = u64_to_user_ptr(args->data_ptr); 766 user_data = u64_to_user_ptr(args->data_ptr);
@@ -1250,7 +1250,7 @@ int
1250i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, 1250i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1251 struct drm_file *file) 1251 struct drm_file *file)
1252{ 1252{
1253 struct drm_i915_private *dev_priv = dev->dev_private; 1253 struct drm_i915_private *dev_priv = to_i915(dev);
1254 struct drm_i915_gem_pwrite *args = data; 1254 struct drm_i915_gem_pwrite *args = data;
1255 struct drm_i915_gem_object *obj; 1255 struct drm_i915_gem_object *obj;
1256 int ret; 1256 int ret;
@@ -1298,7 +1298,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1298 * pread/pwrite currently are reading and writing from the CPU 1298 * pread/pwrite currently are reading and writing from the CPU
1299 * perspective, requiring manual detiling by the client. 1299 * perspective, requiring manual detiling by the client.
1300 */ 1300 */
1301 if (!obj->base.filp || cpu_write_needs_clflush(obj)) { 1301 if (!i915_gem_object_has_struct_page(obj) ||
1302 cpu_write_needs_clflush(obj)) {
1302 ret = i915_gem_gtt_pwrite_fast(dev_priv, obj, args, file); 1303 ret = i915_gem_gtt_pwrite_fast(dev_priv, obj, args, file);
1303 /* Note that the gtt paths might fail with non-page-backed user 1304 /* Note that the gtt paths might fail with non-page-backed user
1304 * pointers (e.g. gtt mappings when moving data between 1305 * pointers (e.g. gtt mappings when moving data between
@@ -1308,7 +1309,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1308 if (ret == -EFAULT) { 1309 if (ret == -EFAULT) {
1309 if (obj->phys_handle) 1310 if (obj->phys_handle)
1310 ret = i915_gem_phys_pwrite(obj, args, file); 1311 ret = i915_gem_phys_pwrite(obj, args, file);
1311 else if (obj->base.filp) 1312 else if (i915_gem_object_has_struct_page(obj))
1312 ret = i915_gem_shmem_pwrite(dev, obj, args, file); 1313 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
1313 else 1314 else
1314 ret = -ENODEV; 1315 ret = -ENODEV;
@@ -1342,17 +1343,6 @@ i915_gem_check_wedge(unsigned reset_counter, bool interruptible)
1342 return 0; 1343 return 0;
1343} 1344}
1344 1345
1345static void fake_irq(unsigned long data)
1346{
1347 wake_up_process((struct task_struct *)data);
1348}
1349
1350static bool missed_irq(struct drm_i915_private *dev_priv,
1351 struct intel_engine_cs *engine)
1352{
1353 return test_bit(engine->id, &dev_priv->gpu_error.missed_irq_rings);
1354}
1355
1356static unsigned long local_clock_us(unsigned *cpu) 1346static unsigned long local_clock_us(unsigned *cpu)
1357{ 1347{
1358 unsigned long t; 1348 unsigned long t;
@@ -1385,9 +1375,9 @@ static bool busywait_stop(unsigned long timeout, unsigned cpu)
1385 return this_cpu != cpu; 1375 return this_cpu != cpu;
1386} 1376}
1387 1377
1388static int __i915_spin_request(struct drm_i915_gem_request *req, int state) 1378bool __i915_spin_request(const struct drm_i915_gem_request *req,
1379 int state, unsigned long timeout_us)
1389{ 1380{
1390 unsigned long timeout;
1391 unsigned cpu; 1381 unsigned cpu;
1392 1382
1393 /* When waiting for high frequency requests, e.g. during synchronous 1383 /* When waiting for high frequency requests, e.g. during synchronous
@@ -1400,31 +1390,21 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
1400 * takes to sleep on a request, on the order of a microsecond. 1390 * takes to sleep on a request, on the order of a microsecond.
1401 */ 1391 */
1402 1392
1403 if (req->engine->irq_refcount) 1393 timeout_us += local_clock_us(&cpu);
1404 return -EBUSY; 1394 do {
1405 1395 if (i915_gem_request_completed(req))
1406 /* Only spin if we know the GPU is processing this request */ 1396 return true;
1407 if (!i915_gem_request_started(req, true))
1408 return -EAGAIN;
1409
1410 timeout = local_clock_us(&cpu) + 5;
1411 while (!need_resched()) {
1412 if (i915_gem_request_completed(req, true))
1413 return 0;
1414 1397
1415 if (signal_pending_state(state, current)) 1398 if (signal_pending_state(state, current))
1416 break; 1399 break;
1417 1400
1418 if (busywait_stop(timeout, cpu)) 1401 if (busywait_stop(timeout_us, cpu))
1419 break; 1402 break;
1420 1403
1421 cpu_relax_lowlatency(); 1404 cpu_relax_lowlatency();
1422 } 1405 } while (!need_resched());
1423 1406
1424 if (i915_gem_request_completed(req, false)) 1407 return false;
1425 return 0;
1426
1427 return -EAGAIN;
1428} 1408}
1429 1409
1430/** 1410/**
@@ -1449,25 +1429,22 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
1449 s64 *timeout, 1429 s64 *timeout,
1450 struct intel_rps_client *rps) 1430 struct intel_rps_client *rps)
1451{ 1431{
1452 struct intel_engine_cs *engine = i915_gem_request_get_engine(req);
1453 struct drm_i915_private *dev_priv = req->i915;
1454 const bool irq_test_in_progress =
1455 ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_engine_flag(engine);
1456 int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; 1432 int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
1457 DEFINE_WAIT(wait); 1433 DEFINE_WAIT(reset);
1458 unsigned long timeout_expire; 1434 struct intel_wait wait;
1435 unsigned long timeout_remain;
1459 s64 before = 0; /* Only to silence a compiler warning. */ 1436 s64 before = 0; /* Only to silence a compiler warning. */
1460 int ret; 1437 int ret = 0;
1461 1438
1462 WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled"); 1439 might_sleep();
1463 1440
1464 if (list_empty(&req->list)) 1441 if (list_empty(&req->list))
1465 return 0; 1442 return 0;
1466 1443
1467 if (i915_gem_request_completed(req, true)) 1444 if (i915_gem_request_completed(req))
1468 return 0; 1445 return 0;
1469 1446
1470 timeout_expire = 0; 1447 timeout_remain = MAX_SCHEDULE_TIMEOUT;
1471 if (timeout) { 1448 if (timeout) {
1472 if (WARN_ON(*timeout < 0)) 1449 if (WARN_ON(*timeout < 0))
1473 return -EINVAL; 1450 return -EINVAL;
@@ -1475,7 +1452,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
1475 if (*timeout == 0) 1452 if (*timeout == 0)
1476 return -ETIME; 1453 return -ETIME;
1477 1454
1478 timeout_expire = jiffies + nsecs_to_jiffies_timeout(*timeout); 1455 timeout_remain = nsecs_to_jiffies_timeout(*timeout);
1479 1456
1480 /* 1457 /*
1481 * Record current time in case interrupted by signal, or wedged. 1458 * Record current time in case interrupted by signal, or wedged.
@@ -1483,75 +1460,85 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
1483 before = ktime_get_raw_ns(); 1460 before = ktime_get_raw_ns();
1484 } 1461 }
1485 1462
1486 if (INTEL_INFO(dev_priv)->gen >= 6)
1487 gen6_rps_boost(dev_priv, rps, req->emitted_jiffies);
1488
1489 trace_i915_gem_request_wait_begin(req); 1463 trace_i915_gem_request_wait_begin(req);
1490 1464
1491 /* Optimistic spin for the next jiffie before touching IRQs */ 1465 /* This client is about to stall waiting for the GPU. In many cases
1492 ret = __i915_spin_request(req, state); 1466 * this is undesirable and limits the throughput of the system, as
1493 if (ret == 0) 1467 * many clients cannot continue processing user input/output whilst
1494 goto out; 1468 * blocked. RPS autotuning may take tens of milliseconds to respond
1495 1469 * to the GPU load and thus incurs additional latency for the client.
1496 if (!irq_test_in_progress && WARN_ON(!engine->irq_get(engine))) { 1470 * We can circumvent that by promoting the GPU frequency to maximum
1497 ret = -ENODEV; 1471 * before we wait. This makes the GPU throttle up much more quickly
1498 goto out; 1472 * (good for benchmarks and user experience, e.g. window animations),
1499 } 1473 * but at a cost of spending more power processing the workload
1474 * (bad for battery). Not all clients even want their results
1475 * immediately and for them we should just let the GPU select its own
1476 * frequency to maximise efficiency. To prevent a single client from
1477 * forcing the clocks too high for the whole system, we only allow
1478 * each client to waitboost once in a busy period.
1479 */
1480 if (INTEL_INFO(req->i915)->gen >= 6)
1481 gen6_rps_boost(req->i915, rps, req->emitted_jiffies);
1500 1482
1501 for (;;) { 1483 /* Optimistic spin for the next ~jiffie before touching IRQs */
1502 struct timer_list timer; 1484 if (i915_spin_request(req, state, 5))
1485 goto complete;
1503 1486
1504 prepare_to_wait(&engine->irq_queue, &wait, state); 1487 set_current_state(state);
1488 add_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
1505 1489
1506 /* We need to check whether any gpu reset happened in between 1490 intel_wait_init(&wait, req->seqno);
1507 * the request being submitted and now. If a reset has occurred, 1491 if (intel_engine_add_wait(req->engine, &wait))
1508 * the request is effectively complete (we either are in the 1492 /* In order to check that we haven't missed the interrupt
1509 * process of or have discarded the rendering and completely 1493 * as we enabled it, we need to kick ourselves to do a
1510 * reset the GPU. The results of the request are lost and we 1494 * coherent check on the seqno before we sleep.
1511 * are free to continue on with the original operation.
1512 */ 1495 */
1513 if (req->reset_counter != i915_reset_counter(&dev_priv->gpu_error)) { 1496 goto wakeup;
1514 ret = 0;
1515 break;
1516 }
1517
1518 if (i915_gem_request_completed(req, false)) {
1519 ret = 0;
1520 break;
1521 }
1522 1497
1498 for (;;) {
1523 if (signal_pending_state(state, current)) { 1499 if (signal_pending_state(state, current)) {
1524 ret = -ERESTARTSYS; 1500 ret = -ERESTARTSYS;
1525 break; 1501 break;
1526 } 1502 }
1527 1503
1528 if (timeout && time_after_eq(jiffies, timeout_expire)) { 1504 /* Ensure that even if the GPU hangs, we get woken up.
1505 *
1506 * However, note that if no one is waiting, we never notice
1507 * a gpu hang. Eventually, we will have to wait for a resource
1508 * held by the GPU and so trigger a hangcheck. In the most
1509 * pathological case, this will be upon memory starvation!
1510 */
1511 i915_queue_hangcheck(req->i915);
1512
1513 timeout_remain = io_schedule_timeout(timeout_remain);
1514 if (timeout_remain == 0) {
1529 ret = -ETIME; 1515 ret = -ETIME;
1530 break; 1516 break;
1531 } 1517 }
1532 1518
1533 timer.function = NULL; 1519 if (intel_wait_complete(&wait))
1534 if (timeout || missed_irq(dev_priv, engine)) { 1520 break;
1535 unsigned long expire;
1536 1521
1537 setup_timer_on_stack(&timer, fake_irq, (unsigned long)current); 1522 set_current_state(state);
1538 expire = missed_irq(dev_priv, engine) ? jiffies + 1 : timeout_expire;
1539 mod_timer(&timer, expire);
1540 }
1541 1523
1542 io_schedule(); 1524wakeup:
1525 /* Carefully check if the request is complete, giving time
1526 * for the seqno to be visible following the interrupt.
1527 * We also have to check in case we are kicked by the GPU
1528 * reset in order to drop the struct_mutex.
1529 */
1530 if (__i915_request_irq_complete(req))
1531 break;
1543 1532
1544 if (timer.function) { 1533 /* Only spin if we know the GPU is processing this request */
1545 del_singleshot_timer_sync(&timer); 1534 if (i915_spin_request(req, state, 2))
1546 destroy_timer_on_stack(&timer); 1535 break;
1547 }
1548 } 1536 }
1549 if (!irq_test_in_progress) 1537 remove_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
1550 engine->irq_put(engine);
1551
1552 finish_wait(&engine->irq_queue, &wait);
1553 1538
1554out: 1539 intel_engine_remove_wait(req->engine, &wait);
1540 __set_current_state(TASK_RUNNING);
1541complete:
1555 trace_i915_gem_request_wait_end(req); 1542 trace_i915_gem_request_wait_end(req);
1556 1543
1557 if (timeout) { 1544 if (timeout) {
@@ -1570,6 +1557,22 @@ out:
1570 *timeout = 0; 1557 *timeout = 0;
1571 } 1558 }
1572 1559
1560 if (rps && req->seqno == req->engine->last_submitted_seqno) {
1561 /* The GPU is now idle and this client has stalled.
1562 * Since no other client has submitted a request in the
1563 * meantime, assume that this client is the only one
1564 * supplying work to the GPU but is unable to keep that
1565 * work supplied because it is waiting. Since the GPU is
1566 * then never kept fully busy, RPS autoclocking will
1567 * keep the clocks relatively low, causing further delays.
1568 * Compensate by giving the synchronous client credit for
1569 * a waitboost next time.
1570 */
1571 spin_lock(&req->i915->rps.client_lock);
1572 list_del_init(&rps->link);
1573 spin_unlock(&req->i915->rps.client_lock);
1574 }
1575
1573 return ret; 1576 return ret;
1574} 1577}
1575 1578
@@ -1648,7 +1651,7 @@ __i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
1648 struct intel_engine_cs *engine = req->engine; 1651 struct intel_engine_cs *engine = req->engine;
1649 struct drm_i915_gem_request *tmp; 1652 struct drm_i915_gem_request *tmp;
1650 1653
1651 lockdep_assert_held(&engine->i915->dev->struct_mutex); 1654 lockdep_assert_held(&engine->i915->drm.struct_mutex);
1652 1655
1653 if (list_empty(&req->list)) 1656 if (list_empty(&req->list))
1654 return; 1657 return;
@@ -1677,14 +1680,14 @@ i915_wait_request(struct drm_i915_gem_request *req)
1677 1680
1678 interruptible = dev_priv->mm.interruptible; 1681 interruptible = dev_priv->mm.interruptible;
1679 1682
1680 BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); 1683 BUG_ON(!mutex_is_locked(&dev_priv->drm.struct_mutex));
1681 1684
1682 ret = __i915_wait_request(req, interruptible, NULL, NULL); 1685 ret = __i915_wait_request(req, interruptible, NULL, NULL);
1683 if (ret) 1686 if (ret)
1684 return ret; 1687 return ret;
1685 1688
1686 /* If the GPU hung, we want to keep the requests to find the guilty. */ 1689 /* If the GPU hung, we want to keep the requests to find the guilty. */
1687 if (req->reset_counter == i915_reset_counter(&dev_priv->gpu_error)) 1690 if (!i915_reset_in_progress(&dev_priv->gpu_error))
1688 __i915_gem_request_retire__upto(req); 1691 __i915_gem_request_retire__upto(req);
1689 1692
1690 return 0; 1693 return 0;
@@ -1745,7 +1748,7 @@ i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
1745 else if (obj->last_write_req == req) 1748 else if (obj->last_write_req == req)
1746 i915_gem_object_retire__write(obj); 1749 i915_gem_object_retire__write(obj);
1747 1750
1748 if (req->reset_counter == i915_reset_counter(&req->i915->gpu_error)) 1751 if (!i915_reset_in_progress(&req->i915->gpu_error))
1749 __i915_gem_request_retire__upto(req); 1752 __i915_gem_request_retire__upto(req);
1750} 1753}
1751 1754
@@ -1758,7 +1761,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1758 bool readonly) 1761 bool readonly)
1759{ 1762{
1760 struct drm_device *dev = obj->base.dev; 1763 struct drm_device *dev = obj->base.dev;
1761 struct drm_i915_private *dev_priv = dev->dev_private; 1764 struct drm_i915_private *dev_priv = to_i915(dev);
1762 struct drm_i915_gem_request *requests[I915_NUM_ENGINES]; 1765 struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
1763 int ret, i, n = 0; 1766 int ret, i, n = 0;
1764 1767
@@ -1809,6 +1812,13 @@ static struct intel_rps_client *to_rps_client(struct drm_file *file)
1809 return &fpriv->rps; 1812 return &fpriv->rps;
1810} 1813}
1811 1814
1815static enum fb_op_origin
1816write_origin(struct drm_i915_gem_object *obj, unsigned domain)
1817{
1818 return domain == I915_GEM_DOMAIN_GTT && !obj->has_wc_mmap ?
1819 ORIGIN_GTT : ORIGIN_CPU;
1820}
1821
1812/** 1822/**
1813 * Called when user space prepares to use an object with the CPU, either 1823 * Called when user space prepares to use an object with the CPU, either
1814 * through the mmap ioctl's mapping or a GTT mapping. 1824 * through the mmap ioctl's mapping or a GTT mapping.
@@ -1865,9 +1875,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1865 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0); 1875 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1866 1876
1867 if (write_domain != 0) 1877 if (write_domain != 0)
1868 intel_fb_obj_invalidate(obj, 1878 intel_fb_obj_invalidate(obj, write_origin(obj, write_domain));
1869 write_domain == I915_GEM_DOMAIN_GTT ?
1870 ORIGIN_GTT : ORIGIN_CPU);
1871 1879
1872unref: 1880unref:
1873 drm_gem_object_unreference(&obj->base); 1881 drm_gem_object_unreference(&obj->base);
@@ -1974,6 +1982,9 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1974 else 1982 else
1975 addr = -ENOMEM; 1983 addr = -ENOMEM;
1976 up_write(&mm->mmap_sem); 1984 up_write(&mm->mmap_sem);
1985
1986 /* This may race, but that's ok, it only gets set */
1987 WRITE_ONCE(to_intel_bo(obj)->has_wc_mmap, true);
1977 } 1988 }
1978 drm_gem_object_unreference_unlocked(obj); 1989 drm_gem_object_unreference_unlocked(obj);
1979 if (IS_ERR((void *)addr)) 1990 if (IS_ERR((void *)addr))
@@ -2262,7 +2273,7 @@ i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
2262 2273
2263static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj) 2274static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
2264{ 2275{
2265 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 2276 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2266 int ret; 2277 int ret;
2267 2278
2268 dev_priv->mm.shrinker_no_lock_stealing = true; 2279 dev_priv->mm.shrinker_no_lock_stealing = true;
@@ -2478,7 +2489,7 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
2478static int 2489static int
2479i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) 2490i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2480{ 2491{
2481 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 2492 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2482 int page_count, i; 2493 int page_count, i;
2483 struct address_space *mapping; 2494 struct address_space *mapping;
2484 struct sg_table *st; 2495 struct sg_table *st;
@@ -2609,7 +2620,7 @@ err_pages:
2609int 2620int
2610i915_gem_object_get_pages(struct drm_i915_gem_object *obj) 2621i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2611{ 2622{
2612 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 2623 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2613 const struct drm_i915_gem_object_ops *ops = obj->ops; 2624 const struct drm_i915_gem_object_ops *ops = obj->ops;
2614 int ret; 2625 int ret;
2615 2626
@@ -2773,6 +2784,13 @@ i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
2773 } 2784 }
2774 i915_gem_retire_requests(dev_priv); 2785 i915_gem_retire_requests(dev_priv);
2775 2786
2787 /* If the seqno wraps around, we need to clear the breadcrumb rbtree */
2788 if (!i915_seqno_passed(seqno, dev_priv->next_seqno)) {
2789 while (intel_kick_waiters(dev_priv) ||
2790 intel_kick_signalers(dev_priv))
2791 yield();
2792 }
2793
2776 /* Finally reset hw state */ 2794 /* Finally reset hw state */
2777 for_each_engine(engine, dev_priv) 2795 for_each_engine(engine, dev_priv)
2778 intel_ring_init_seqno(engine, seqno); 2796 intel_ring_init_seqno(engine, seqno);
@@ -2782,7 +2800,7 @@ i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
2782 2800
2783int i915_gem_set_seqno(struct drm_device *dev, u32 seqno) 2801int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
2784{ 2802{
2785 struct drm_i915_private *dev_priv = dev->dev_private; 2803 struct drm_i915_private *dev_priv = to_i915(dev);
2786 int ret; 2804 int ret;
2787 2805
2788 if (seqno == 0) 2806 if (seqno == 0)
@@ -2822,6 +2840,26 @@ i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno)
2822 return 0; 2840 return 0;
2823} 2841}
2824 2842
2843static void i915_gem_mark_busy(const struct intel_engine_cs *engine)
2844{
2845 struct drm_i915_private *dev_priv = engine->i915;
2846
2847 dev_priv->gt.active_engines |= intel_engine_flag(engine);
2848 if (dev_priv->gt.awake)
2849 return;
2850
2851 intel_runtime_pm_get_noresume(dev_priv);
2852 dev_priv->gt.awake = true;
2853
2854 i915_update_gfx_val(dev_priv);
2855 if (INTEL_GEN(dev_priv) >= 6)
2856 gen6_rps_busy(dev_priv);
2857
2858 queue_delayed_work(dev_priv->wq,
2859 &dev_priv->gt.retire_work,
2860 round_jiffies_up_relative(HZ));
2861}
2862
2825/* 2863/*
2826 * NB: This function is not allowed to fail. Doing so would mean the the 2864 * NB: This function is not allowed to fail. Doing so would mean the the
2827 * request is not being tracked for completion but the work itself is 2865 * request is not being tracked for completion but the work itself is
@@ -2832,7 +2870,6 @@ void __i915_add_request(struct drm_i915_gem_request *request,
2832 bool flush_caches) 2870 bool flush_caches)
2833{ 2871{
2834 struct intel_engine_cs *engine; 2872 struct intel_engine_cs *engine;
2835 struct drm_i915_private *dev_priv;
2836 struct intel_ringbuffer *ringbuf; 2873 struct intel_ringbuffer *ringbuf;
2837 u32 request_start; 2874 u32 request_start;
2838 u32 reserved_tail; 2875 u32 reserved_tail;
@@ -2842,7 +2879,6 @@ void __i915_add_request(struct drm_i915_gem_request *request,
2842 return; 2879 return;
2843 2880
2844 engine = request->engine; 2881 engine = request->engine;
2845 dev_priv = request->i915;
2846 ringbuf = request->ringbuf; 2882 ringbuf = request->ringbuf;
2847 2883
2848 /* 2884 /*
@@ -2908,14 +2944,6 @@ void __i915_add_request(struct drm_i915_gem_request *request,
2908 } 2944 }
2909 /* Not allowed to fail! */ 2945 /* Not allowed to fail! */
2910 WARN(ret, "emit|add_request failed: %d!\n", ret); 2946 WARN(ret, "emit|add_request failed: %d!\n", ret);
2911
2912 i915_queue_hangcheck(engine->i915);
2913
2914 queue_delayed_work(dev_priv->wq,
2915 &dev_priv->mm.retire_work,
2916 round_jiffies_up_relative(HZ));
2917 intel_mark_busy(dev_priv);
2918
2919 /* Sanity check that the reserved size was large enough. */ 2947 /* Sanity check that the reserved size was large enough. */
2920 ret = intel_ring_get_tail(ringbuf) - request_start; 2948 ret = intel_ring_get_tail(ringbuf) - request_start;
2921 if (ret < 0) 2949 if (ret < 0)
@@ -2924,46 +2952,34 @@ void __i915_add_request(struct drm_i915_gem_request *request,
2924 "Not enough space reserved (%d bytes) " 2952 "Not enough space reserved (%d bytes) "
2925 "for adding the request (%d bytes)\n", 2953 "for adding the request (%d bytes)\n",
2926 reserved_tail, ret); 2954 reserved_tail, ret);
2955
2956 i915_gem_mark_busy(engine);
2927} 2957}
2928 2958
2929static bool i915_context_is_banned(struct drm_i915_private *dev_priv, 2959static bool i915_context_is_banned(const struct i915_gem_context *ctx)
2930 const struct i915_gem_context *ctx)
2931{ 2960{
2932 unsigned long elapsed; 2961 unsigned long elapsed;
2933 2962
2934 elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
2935
2936 if (ctx->hang_stats.banned) 2963 if (ctx->hang_stats.banned)
2937 return true; 2964 return true;
2938 2965
2966 elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
2939 if (ctx->hang_stats.ban_period_seconds && 2967 if (ctx->hang_stats.ban_period_seconds &&
2940 elapsed <= ctx->hang_stats.ban_period_seconds) { 2968 elapsed <= ctx->hang_stats.ban_period_seconds) {
2941 if (!i915_gem_context_is_default(ctx)) { 2969 DRM_DEBUG("context hanging too fast, banning!\n");
2942 DRM_DEBUG("context hanging too fast, banning!\n"); 2970 return true;
2943 return true;
2944 } else if (i915_stop_ring_allow_ban(dev_priv)) {
2945 if (i915_stop_ring_allow_warn(dev_priv))
2946 DRM_ERROR("gpu hanging too fast, banning!\n");
2947 return true;
2948 }
2949 } 2971 }
2950 2972
2951 return false; 2973 return false;
2952} 2974}
2953 2975
2954static void i915_set_reset_status(struct drm_i915_private *dev_priv, 2976static void i915_set_reset_status(struct i915_gem_context *ctx,
2955 struct i915_gem_context *ctx,
2956 const bool guilty) 2977 const bool guilty)
2957{ 2978{
2958 struct i915_ctx_hang_stats *hs; 2979 struct i915_ctx_hang_stats *hs = &ctx->hang_stats;
2959
2960 if (WARN_ON(!ctx))
2961 return;
2962
2963 hs = &ctx->hang_stats;
2964 2980
2965 if (guilty) { 2981 if (guilty) {
2966 hs->banned = i915_context_is_banned(dev_priv, ctx); 2982 hs->banned = i915_context_is_banned(ctx);
2967 hs->batch_active++; 2983 hs->batch_active++;
2968 hs->guilty_ts = get_seconds(); 2984 hs->guilty_ts = get_seconds();
2969 } else { 2985 } else {
@@ -3012,7 +3028,6 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
3012 kref_init(&req->ref); 3028 kref_init(&req->ref);
3013 req->i915 = dev_priv; 3029 req->i915 = dev_priv;
3014 req->engine = engine; 3030 req->engine = engine;
3015 req->reset_counter = reset_counter;
3016 req->ctx = ctx; 3031 req->ctx = ctx;
3017 i915_gem_context_reference(req->ctx); 3032 i915_gem_context_reference(req->ctx);
3018 3033
@@ -3072,8 +3087,16 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
3072{ 3087{
3073 struct drm_i915_gem_request *request; 3088 struct drm_i915_gem_request *request;
3074 3089
3090 /* We are called by the error capture and reset at a random
3091 * point in time. In particular, note that neither is crucially
3092 * ordered with an interrupt. After a hang, the GPU is dead and we
3093 * assume that no more writes can happen (we waited long enough for
3094 * all writes that were in transaction to be flushed) - adding an
3095 * extra delay for a recent interrupt is pointless. Hence, we do
3096 * not need an engine->irq_seqno_barrier() before the seqno reads.
3097 */
3075 list_for_each_entry(request, &engine->request_list, list) { 3098 list_for_each_entry(request, &engine->request_list, list) {
3076 if (i915_gem_request_completed(request, false)) 3099 if (i915_gem_request_completed(request))
3077 continue; 3100 continue;
3078 3101
3079 return request; 3102 return request;
@@ -3082,27 +3105,23 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
3082 return NULL; 3105 return NULL;
3083} 3106}
3084 3107
3085static void i915_gem_reset_engine_status(struct drm_i915_private *dev_priv, 3108static void i915_gem_reset_engine_status(struct intel_engine_cs *engine)
3086 struct intel_engine_cs *engine)
3087{ 3109{
3088 struct drm_i915_gem_request *request; 3110 struct drm_i915_gem_request *request;
3089 bool ring_hung; 3111 bool ring_hung;
3090 3112
3091 request = i915_gem_find_active_request(engine); 3113 request = i915_gem_find_active_request(engine);
3092
3093 if (request == NULL) 3114 if (request == NULL)
3094 return; 3115 return;
3095 3116
3096 ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG; 3117 ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
3097 3118
3098 i915_set_reset_status(dev_priv, request->ctx, ring_hung); 3119 i915_set_reset_status(request->ctx, ring_hung);
3099
3100 list_for_each_entry_continue(request, &engine->request_list, list) 3120 list_for_each_entry_continue(request, &engine->request_list, list)
3101 i915_set_reset_status(dev_priv, request->ctx, false); 3121 i915_set_reset_status(request->ctx, false);
3102} 3122}
3103 3123
3104static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv, 3124static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine)
3105 struct intel_engine_cs *engine)
3106{ 3125{
3107 struct intel_ringbuffer *buffer; 3126 struct intel_ringbuffer *buffer;
3108 3127
@@ -3163,7 +3182,7 @@ static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv,
3163 3182
3164void i915_gem_reset(struct drm_device *dev) 3183void i915_gem_reset(struct drm_device *dev)
3165{ 3184{
3166 struct drm_i915_private *dev_priv = dev->dev_private; 3185 struct drm_i915_private *dev_priv = to_i915(dev);
3167 struct intel_engine_cs *engine; 3186 struct intel_engine_cs *engine;
3168 3187
3169 /* 3188 /*
@@ -3172,10 +3191,10 @@ void i915_gem_reset(struct drm_device *dev)
3172 * their reference to the objects, the inspection must be done first. 3191 * their reference to the objects, the inspection must be done first.
3173 */ 3192 */
3174 for_each_engine(engine, dev_priv) 3193 for_each_engine(engine, dev_priv)
3175 i915_gem_reset_engine_status(dev_priv, engine); 3194 i915_gem_reset_engine_status(engine);
3176 3195
3177 for_each_engine(engine, dev_priv) 3196 for_each_engine(engine, dev_priv)
3178 i915_gem_reset_engine_cleanup(dev_priv, engine); 3197 i915_gem_reset_engine_cleanup(engine);
3179 3198
3180 i915_gem_context_reset(dev); 3199 i915_gem_context_reset(dev);
3181 3200
@@ -3205,7 +3224,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
3205 struct drm_i915_gem_request, 3224 struct drm_i915_gem_request,
3206 list); 3225 list);
3207 3226
3208 if (!i915_gem_request_completed(request, true)) 3227 if (!i915_gem_request_completed(request))
3209 break; 3228 break;
3210 3229
3211 i915_gem_request_retire(request); 3230 i915_gem_request_retire(request);
@@ -3228,55 +3247,52 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
3228 i915_gem_object_retire__read(obj, engine->id); 3247 i915_gem_object_retire__read(obj, engine->id);
3229 } 3248 }
3230 3249
3231 if (unlikely(engine->trace_irq_req &&
3232 i915_gem_request_completed(engine->trace_irq_req, true))) {
3233 engine->irq_put(engine);
3234 i915_gem_request_assign(&engine->trace_irq_req, NULL);
3235 }
3236
3237 WARN_ON(i915_verify_lists(engine->dev)); 3250 WARN_ON(i915_verify_lists(engine->dev));
3238} 3251}
3239 3252
3240bool 3253void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
3241i915_gem_retire_requests(struct drm_i915_private *dev_priv)
3242{ 3254{
3243 struct intel_engine_cs *engine; 3255 struct intel_engine_cs *engine;
3244 bool idle = true; 3256
3257 lockdep_assert_held(&dev_priv->drm.struct_mutex);
3258
3259 if (dev_priv->gt.active_engines == 0)
3260 return;
3261
3262 GEM_BUG_ON(!dev_priv->gt.awake);
3245 3263
3246 for_each_engine(engine, dev_priv) { 3264 for_each_engine(engine, dev_priv) {
3247 i915_gem_retire_requests_ring(engine); 3265 i915_gem_retire_requests_ring(engine);
3248 idle &= list_empty(&engine->request_list); 3266 if (list_empty(&engine->request_list))
3249 if (i915.enable_execlists) { 3267 dev_priv->gt.active_engines &= ~intel_engine_flag(engine);
3250 spin_lock_bh(&engine->execlist_lock);
3251 idle &= list_empty(&engine->execlist_queue);
3252 spin_unlock_bh(&engine->execlist_lock);
3253 }
3254 } 3268 }
3255 3269
3256 if (idle) 3270 if (dev_priv->gt.active_engines == 0)
3257 mod_delayed_work(dev_priv->wq, 3271 queue_delayed_work(dev_priv->wq,
3258 &dev_priv->mm.idle_work, 3272 &dev_priv->gt.idle_work,
3259 msecs_to_jiffies(100)); 3273 msecs_to_jiffies(100));
3260
3261 return idle;
3262} 3274}
3263 3275
3264static void 3276static void
3265i915_gem_retire_work_handler(struct work_struct *work) 3277i915_gem_retire_work_handler(struct work_struct *work)
3266{ 3278{
3267 struct drm_i915_private *dev_priv = 3279 struct drm_i915_private *dev_priv =
3268 container_of(work, typeof(*dev_priv), mm.retire_work.work); 3280 container_of(work, typeof(*dev_priv), gt.retire_work.work);
3269 struct drm_device *dev = dev_priv->dev; 3281 struct drm_device *dev = &dev_priv->drm;
3270 bool idle;
3271 3282
3272 /* Come back later if the device is busy... */ 3283 /* Come back later if the device is busy... */
3273 idle = false;
3274 if (mutex_trylock(&dev->struct_mutex)) { 3284 if (mutex_trylock(&dev->struct_mutex)) {
3275 idle = i915_gem_retire_requests(dev_priv); 3285 i915_gem_retire_requests(dev_priv);
3276 mutex_unlock(&dev->struct_mutex); 3286 mutex_unlock(&dev->struct_mutex);
3277 } 3287 }
3278 if (!idle) 3288
3279 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 3289 /* Keep the retire handler running until we are finally idle.
3290 * We do not need to do this test under locking as in the worst-case
3291 * we queue the retire worker once too often.
3292 */
3293 if (READ_ONCE(dev_priv->gt.awake))
3294 queue_delayed_work(dev_priv->wq,
3295 &dev_priv->gt.retire_work,
3280 round_jiffies_up_relative(HZ)); 3296 round_jiffies_up_relative(HZ));
3281} 3297}
3282 3298
@@ -3284,25 +3300,55 @@ static void
3284i915_gem_idle_work_handler(struct work_struct *work) 3300i915_gem_idle_work_handler(struct work_struct *work)
3285{ 3301{
3286 struct drm_i915_private *dev_priv = 3302 struct drm_i915_private *dev_priv =
3287 container_of(work, typeof(*dev_priv), mm.idle_work.work); 3303 container_of(work, typeof(*dev_priv), gt.idle_work.work);
3288 struct drm_device *dev = dev_priv->dev; 3304 struct drm_device *dev = &dev_priv->drm;
3289 struct intel_engine_cs *engine; 3305 struct intel_engine_cs *engine;
3306 unsigned int stuck_engines;
3307 bool rearm_hangcheck;
3308
3309 if (!READ_ONCE(dev_priv->gt.awake))
3310 return;
3311
3312 if (READ_ONCE(dev_priv->gt.active_engines))
3313 return;
3314
3315 rearm_hangcheck =
3316 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
3317
3318 if (!mutex_trylock(&dev->struct_mutex)) {
3319 /* Currently busy, come back later */
3320 mod_delayed_work(dev_priv->wq,
3321 &dev_priv->gt.idle_work,
3322 msecs_to_jiffies(50));
3323 goto out_rearm;
3324 }
3325
3326 if (dev_priv->gt.active_engines)
3327 goto out_unlock;
3290 3328
3291 for_each_engine(engine, dev_priv) 3329 for_each_engine(engine, dev_priv)
3292 if (!list_empty(&engine->request_list)) 3330 i915_gem_batch_pool_fini(&engine->batch_pool);
3293 return;
3294 3331
3295 /* we probably should sync with hangcheck here, using cancel_work_sync. 3332 GEM_BUG_ON(!dev_priv->gt.awake);
3296 * Also locking seems to be fubar here, engine->request_list is protected 3333 dev_priv->gt.awake = false;
3297 * by dev->struct_mutex. */ 3334 rearm_hangcheck = false;
3298 3335
3299 intel_mark_idle(dev_priv); 3336 stuck_engines = intel_kick_waiters(dev_priv);
3337 if (unlikely(stuck_engines)) {
3338 DRM_DEBUG_DRIVER("kicked stuck waiters...missed irq\n");
3339 dev_priv->gpu_error.missed_irq_rings |= stuck_engines;
3340 }
3300 3341
3301 if (mutex_trylock(&dev->struct_mutex)) { 3342 if (INTEL_GEN(dev_priv) >= 6)
3302 for_each_engine(engine, dev_priv) 3343 gen6_rps_idle(dev_priv);
3303 i915_gem_batch_pool_fini(&engine->batch_pool); 3344 intel_runtime_pm_put(dev_priv);
3345out_unlock:
3346 mutex_unlock(&dev->struct_mutex);
3304 3347
3305 mutex_unlock(&dev->struct_mutex); 3348out_rearm:
3349 if (rearm_hangcheck) {
3350 GEM_BUG_ON(!dev_priv->gt.awake);
3351 i915_queue_hangcheck(dev_priv);
3306 } 3352 }
3307} 3353}
3308 3354
@@ -3327,7 +3373,7 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
3327 if (req == NULL) 3373 if (req == NULL)
3328 continue; 3374 continue;
3329 3375
3330 if (i915_gem_request_completed(req, true)) 3376 if (i915_gem_request_completed(req))
3331 i915_gem_object_retire__read(obj, i); 3377 i915_gem_object_retire__read(obj, i);
3332 } 3378 }
3333 3379
@@ -3435,7 +3481,7 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
3435 if (to == from) 3481 if (to == from)
3436 return 0; 3482 return 0;
3437 3483
3438 if (i915_gem_request_completed(from_req, true)) 3484 if (i915_gem_request_completed(from_req))
3439 return 0; 3485 return 0;
3440 3486
3441 if (!i915_semaphore_is_enabled(to_i915(obj->base.dev))) { 3487 if (!i915_semaphore_is_enabled(to_i915(obj->base.dev))) {
@@ -3586,7 +3632,7 @@ static void __i915_vma_iounmap(struct i915_vma *vma)
3586static int __i915_vma_unbind(struct i915_vma *vma, bool wait) 3632static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
3587{ 3633{
3588 struct drm_i915_gem_object *obj = vma->obj; 3634 struct drm_i915_gem_object *obj = vma->obj;
3589 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 3635 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
3590 int ret; 3636 int ret;
3591 3637
3592 if (list_empty(&vma->obj_link)) 3638 if (list_empty(&vma->obj_link))
@@ -3662,26 +3708,16 @@ int __i915_vma_unbind_no_wait(struct i915_vma *vma)
3662 return __i915_vma_unbind(vma, false); 3708 return __i915_vma_unbind(vma, false);
3663} 3709}
3664 3710
3665int i915_gpu_idle(struct drm_device *dev) 3711int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv)
3666{ 3712{
3667 struct drm_i915_private *dev_priv = dev->dev_private;
3668 struct intel_engine_cs *engine; 3713 struct intel_engine_cs *engine;
3669 int ret; 3714 int ret;
3670 3715
3671 /* Flush everything onto the inactive list. */ 3716 lockdep_assert_held(&dev_priv->drm.struct_mutex);
3672 for_each_engine(engine, dev_priv) {
3673 if (!i915.enable_execlists) {
3674 struct drm_i915_gem_request *req;
3675 3717
3676 req = i915_gem_request_alloc(engine, NULL); 3718 for_each_engine(engine, dev_priv) {
3677 if (IS_ERR(req)) 3719 if (engine->last_context == NULL)
3678 return PTR_ERR(req); 3720 continue;
3679
3680 ret = i915_switch_context(req);
3681 i915_add_request_no_flush(req);
3682 if (ret)
3683 return ret;
3684 }
3685 3721
3686 ret = intel_engine_idle(engine); 3722 ret = intel_engine_idle(engine);
3687 if (ret) 3723 if (ret)
@@ -4214,7 +4250,7 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
4214int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, 4250int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
4215 struct drm_file *file) 4251 struct drm_file *file)
4216{ 4252{
4217 struct drm_i915_private *dev_priv = dev->dev_private; 4253 struct drm_i915_private *dev_priv = to_i915(dev);
4218 struct drm_i915_gem_caching *args = data; 4254 struct drm_i915_gem_caching *args = data;
4219 struct drm_i915_gem_object *obj; 4255 struct drm_i915_gem_object *obj;
4220 enum i915_cache_level level; 4256 enum i915_cache_level level;
@@ -4408,7 +4444,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
4408static int 4444static int
4409i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) 4445i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
4410{ 4446{
4411 struct drm_i915_private *dev_priv = dev->dev_private; 4447 struct drm_i915_private *dev_priv = to_i915(dev);
4412 struct drm_i915_file_private *file_priv = file->driver_priv; 4448 struct drm_i915_file_private *file_priv = file->driver_priv;
4413 unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES; 4449 unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
4414 struct drm_i915_gem_request *request, *target = NULL; 4450 struct drm_i915_gem_request *request, *target = NULL;
@@ -4444,9 +4480,6 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
4444 return 0; 4480 return 0;
4445 4481
4446 ret = __i915_wait_request(target, true, NULL, NULL); 4482 ret = __i915_wait_request(target, true, NULL, NULL);
4447 if (ret == 0)
4448 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
4449
4450 i915_gem_request_unreference(target); 4483 i915_gem_request_unreference(target);
4451 4484
4452 return ret; 4485 return ret;
@@ -4505,7 +4538,7 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
4505 uint32_t alignment, 4538 uint32_t alignment,
4506 uint64_t flags) 4539 uint64_t flags)
4507{ 4540{
4508 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 4541 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
4509 struct i915_vma *vma; 4542 struct i915_vma *vma;
4510 unsigned bound; 4543 unsigned bound;
4511 int ret; 4544 int ret;
@@ -4669,7 +4702,7 @@ int
4669i915_gem_madvise_ioctl(struct drm_device *dev, void *data, 4702i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4670 struct drm_file *file_priv) 4703 struct drm_file *file_priv)
4671{ 4704{
4672 struct drm_i915_private *dev_priv = dev->dev_private; 4705 struct drm_i915_private *dev_priv = to_i915(dev);
4673 struct drm_i915_gem_madvise *args = data; 4706 struct drm_i915_gem_madvise *args = data;
4674 struct drm_i915_gem_object *obj; 4707 struct drm_i915_gem_object *obj;
4675 int ret; 4708 int ret;
@@ -4739,7 +4772,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
4739 obj->fence_reg = I915_FENCE_REG_NONE; 4772 obj->fence_reg = I915_FENCE_REG_NONE;
4740 obj->madv = I915_MADV_WILLNEED; 4773 obj->madv = I915_MADV_WILLNEED;
4741 4774
4742 i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size); 4775 i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
4743} 4776}
4744 4777
4745static const struct drm_i915_gem_object_ops i915_gem_object_ops = { 4778static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
@@ -4834,7 +4867,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
4834{ 4867{
4835 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); 4868 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4836 struct drm_device *dev = obj->base.dev; 4869 struct drm_device *dev = obj->base.dev;
4837 struct drm_i915_private *dev_priv = dev->dev_private; 4870 struct drm_i915_private *dev_priv = to_i915(dev);
4838 struct i915_vma *vma, *next; 4871 struct i915_vma *vma, *next;
4839 4872
4840 intel_runtime_pm_get(dev_priv); 4873 intel_runtime_pm_get(dev_priv);
@@ -4938,7 +4971,7 @@ void i915_gem_vma_destroy(struct i915_vma *vma)
4938static void 4971static void
4939i915_gem_stop_engines(struct drm_device *dev) 4972i915_gem_stop_engines(struct drm_device *dev)
4940{ 4973{
4941 struct drm_i915_private *dev_priv = dev->dev_private; 4974 struct drm_i915_private *dev_priv = to_i915(dev);
4942 struct intel_engine_cs *engine; 4975 struct intel_engine_cs *engine;
4943 4976
4944 for_each_engine(engine, dev_priv) 4977 for_each_engine(engine, dev_priv)
@@ -4948,11 +4981,11 @@ i915_gem_stop_engines(struct drm_device *dev)
4948int 4981int
4949i915_gem_suspend(struct drm_device *dev) 4982i915_gem_suspend(struct drm_device *dev)
4950{ 4983{
4951 struct drm_i915_private *dev_priv = dev->dev_private; 4984 struct drm_i915_private *dev_priv = to_i915(dev);
4952 int ret = 0; 4985 int ret = 0;
4953 4986
4954 mutex_lock(&dev->struct_mutex); 4987 mutex_lock(&dev->struct_mutex);
4955 ret = i915_gpu_idle(dev); 4988 ret = i915_gem_wait_for_idle(dev_priv);
4956 if (ret) 4989 if (ret)
4957 goto err; 4990 goto err;
4958 4991
@@ -4963,13 +4996,13 @@ i915_gem_suspend(struct drm_device *dev)
4963 mutex_unlock(&dev->struct_mutex); 4996 mutex_unlock(&dev->struct_mutex);
4964 4997
4965 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); 4998 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
4966 cancel_delayed_work_sync(&dev_priv->mm.retire_work); 4999 cancel_delayed_work_sync(&dev_priv->gt.retire_work);
4967 flush_delayed_work(&dev_priv->mm.idle_work); 5000 flush_delayed_work(&dev_priv->gt.idle_work);
4968 5001
4969 /* Assert that we sucessfully flushed all the work and 5002 /* Assert that we sucessfully flushed all the work and
4970 * reset the GPU back to its idle, low power state. 5003 * reset the GPU back to its idle, low power state.
4971 */ 5004 */
4972 WARN_ON(dev_priv->mm.busy); 5005 WARN_ON(dev_priv->gt.awake);
4973 5006
4974 return 0; 5007 return 0;
4975 5008
@@ -4980,7 +5013,7 @@ err:
4980 5013
4981void i915_gem_init_swizzling(struct drm_device *dev) 5014void i915_gem_init_swizzling(struct drm_device *dev)
4982{ 5015{
4983 struct drm_i915_private *dev_priv = dev->dev_private; 5016 struct drm_i915_private *dev_priv = to_i915(dev);
4984 5017
4985 if (INTEL_INFO(dev)->gen < 5 || 5018 if (INTEL_INFO(dev)->gen < 5 ||
4986 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE) 5019 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
@@ -5005,7 +5038,7 @@ void i915_gem_init_swizzling(struct drm_device *dev)
5005 5038
5006static void init_unused_ring(struct drm_device *dev, u32 base) 5039static void init_unused_ring(struct drm_device *dev, u32 base)
5007{ 5040{
5008 struct drm_i915_private *dev_priv = dev->dev_private; 5041 struct drm_i915_private *dev_priv = to_i915(dev);
5009 5042
5010 I915_WRITE(RING_CTL(base), 0); 5043 I915_WRITE(RING_CTL(base), 0);
5011 I915_WRITE(RING_HEAD(base), 0); 5044 I915_WRITE(RING_HEAD(base), 0);
@@ -5032,7 +5065,7 @@ static void init_unused_rings(struct drm_device *dev)
5032 5065
5033int i915_gem_init_engines(struct drm_device *dev) 5066int i915_gem_init_engines(struct drm_device *dev)
5034{ 5067{
5035 struct drm_i915_private *dev_priv = dev->dev_private; 5068 struct drm_i915_private *dev_priv = to_i915(dev);
5036 int ret; 5069 int ret;
5037 5070
5038 ret = intel_init_render_ring_buffer(dev); 5071 ret = intel_init_render_ring_buffer(dev);
@@ -5080,7 +5113,7 @@ cleanup_render_ring:
5080int 5113int
5081i915_gem_init_hw(struct drm_device *dev) 5114i915_gem_init_hw(struct drm_device *dev)
5082{ 5115{
5083 struct drm_i915_private *dev_priv = dev->dev_private; 5116 struct drm_i915_private *dev_priv = to_i915(dev);
5084 struct intel_engine_cs *engine; 5117 struct intel_engine_cs *engine;
5085 int ret; 5118 int ret;
5086 5119
@@ -5138,12 +5171,6 @@ i915_gem_init_hw(struct drm_device *dev)
5138 if (ret) 5171 if (ret)
5139 goto out; 5172 goto out;
5140 5173
5141 /*
5142 * Increment the next seqno by 0x100 so we have a visible break
5143 * on re-initialisation
5144 */
5145 ret = i915_gem_set_seqno(dev, dev_priv->next_seqno+0x100);
5146
5147out: 5174out:
5148 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 5175 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5149 return ret; 5176 return ret;
@@ -5151,7 +5178,7 @@ out:
5151 5178
5152int i915_gem_init(struct drm_device *dev) 5179int i915_gem_init(struct drm_device *dev)
5153{ 5180{
5154 struct drm_i915_private *dev_priv = dev->dev_private; 5181 struct drm_i915_private *dev_priv = to_i915(dev);
5155 int ret; 5182 int ret;
5156 5183
5157 mutex_lock(&dev->struct_mutex); 5184 mutex_lock(&dev->struct_mutex);
@@ -5208,7 +5235,7 @@ out_unlock:
5208void 5235void
5209i915_gem_cleanup_engines(struct drm_device *dev) 5236i915_gem_cleanup_engines(struct drm_device *dev)
5210{ 5237{
5211 struct drm_i915_private *dev_priv = dev->dev_private; 5238 struct drm_i915_private *dev_priv = to_i915(dev);
5212 struct intel_engine_cs *engine; 5239 struct intel_engine_cs *engine;
5213 5240
5214 for_each_engine(engine, dev_priv) 5241 for_each_engine(engine, dev_priv)
@@ -5225,7 +5252,7 @@ init_engine_lists(struct intel_engine_cs *engine)
5225void 5252void
5226i915_gem_load_init_fences(struct drm_i915_private *dev_priv) 5253i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
5227{ 5254{
5228 struct drm_device *dev = dev_priv->dev; 5255 struct drm_device *dev = &dev_priv->drm;
5229 5256
5230 if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) && 5257 if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
5231 !IS_CHERRYVIEW(dev_priv)) 5258 !IS_CHERRYVIEW(dev_priv))
@@ -5249,7 +5276,7 @@ i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
5249void 5276void
5250i915_gem_load_init(struct drm_device *dev) 5277i915_gem_load_init(struct drm_device *dev)
5251{ 5278{
5252 struct drm_i915_private *dev_priv = dev->dev_private; 5279 struct drm_i915_private *dev_priv = to_i915(dev);
5253 int i; 5280 int i;
5254 5281
5255 dev_priv->objects = 5282 dev_priv->objects =
@@ -5277,22 +5304,15 @@ i915_gem_load_init(struct drm_device *dev)
5277 init_engine_lists(&dev_priv->engine[i]); 5304 init_engine_lists(&dev_priv->engine[i]);
5278 for (i = 0; i < I915_MAX_NUM_FENCES; i++) 5305 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
5279 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); 5306 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
5280 INIT_DELAYED_WORK(&dev_priv->mm.retire_work, 5307 INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
5281 i915_gem_retire_work_handler); 5308 i915_gem_retire_work_handler);
5282 INIT_DELAYED_WORK(&dev_priv->mm.idle_work, 5309 INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
5283 i915_gem_idle_work_handler); 5310 i915_gem_idle_work_handler);
5311 init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
5284 init_waitqueue_head(&dev_priv->gpu_error.reset_queue); 5312 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
5285 5313
5286 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL; 5314 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
5287 5315
5288 /*
5289 * Set initial sequence number for requests.
5290 * Using this number allows the wraparound to happen early,
5291 * catching any obvious problems.
5292 */
5293 dev_priv->next_seqno = ((u32)~0 - 0x1100);
5294 dev_priv->last_seqno = ((u32)~0 - 0x1101);
5295
5296 INIT_LIST_HEAD(&dev_priv->mm.fence_list); 5316 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
5297 5317
5298 init_waitqueue_head(&dev_priv->pending_flip_queue); 5318 init_waitqueue_head(&dev_priv->pending_flip_queue);
@@ -5378,7 +5398,7 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file)
5378 return -ENOMEM; 5398 return -ENOMEM;
5379 5399
5380 file->driver_priv = file_priv; 5400 file->driver_priv = file_priv;
5381 file_priv->dev_priv = dev->dev_private; 5401 file_priv->dev_priv = to_i915(dev);
5382 file_priv->file = file; 5402 file_priv->file = file;
5383 INIT_LIST_HEAD(&file_priv->rps.link); 5403 INIT_LIST_HEAD(&file_priv->rps.link);
5384 5404
@@ -5424,7 +5444,7 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
5424u64 i915_gem_obj_offset(struct drm_i915_gem_object *o, 5444u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
5425 struct i915_address_space *vm) 5445 struct i915_address_space *vm)
5426{ 5446{
5427 struct drm_i915_private *dev_priv = o->base.dev->dev_private; 5447 struct drm_i915_private *dev_priv = to_i915(o->base.dev);
5428 struct i915_vma *vma; 5448 struct i915_vma *vma;
5429 5449
5430 WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base); 5450 WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
@@ -5528,7 +5548,7 @@ i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n)
5528 struct page *page; 5548 struct page *page;
5529 5549
5530 /* Only default objects have per-page dirty tracking */ 5550 /* Only default objects have per-page dirty tracking */
5531 if (WARN_ON((obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE) == 0)) 5551 if (WARN_ON(!i915_gem_object_has_struct_page(obj)))
5532 return NULL; 5552 return NULL;
5533 5553
5534 page = i915_gem_object_get_page(obj, n); 5554 page = i915_gem_object_get_page(obj, n);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 30d9b4fd30f3..3c97f0e7a003 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -154,7 +154,7 @@ void i915_gem_context_free(struct kref *ctx_ref)
154 struct i915_gem_context *ctx = container_of(ctx_ref, typeof(*ctx), ref); 154 struct i915_gem_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
155 int i; 155 int i;
156 156
157 lockdep_assert_held(&ctx->i915->dev->struct_mutex); 157 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
158 trace_i915_context_free(ctx); 158 trace_i915_context_free(ctx);
159 159
160 /* 160 /*
@@ -250,7 +250,7 @@ static struct i915_gem_context *
250__create_hw_context(struct drm_device *dev, 250__create_hw_context(struct drm_device *dev,
251 struct drm_i915_file_private *file_priv) 251 struct drm_i915_file_private *file_priv)
252{ 252{
253 struct drm_i915_private *dev_priv = dev->dev_private; 253 struct drm_i915_private *dev_priv = to_i915(dev);
254 struct i915_gem_context *ctx; 254 struct i915_gem_context *ctx;
255 int ret; 255 int ret;
256 256
@@ -268,6 +268,8 @@ __create_hw_context(struct drm_device *dev,
268 list_add_tail(&ctx->link, &dev_priv->context_list); 268 list_add_tail(&ctx->link, &dev_priv->context_list);
269 ctx->i915 = dev_priv; 269 ctx->i915 = dev_priv;
270 270
271 ctx->ggtt_alignment = get_context_alignment(dev_priv);
272
271 if (dev_priv->hw_context_size) { 273 if (dev_priv->hw_context_size) {
272 struct drm_i915_gem_object *obj = 274 struct drm_i915_gem_object *obj =
273 i915_gem_alloc_context_obj(dev, dev_priv->hw_context_size); 275 i915_gem_alloc_context_obj(dev, dev_priv->hw_context_size);
@@ -394,7 +396,7 @@ static void i915_gem_context_unpin(struct i915_gem_context *ctx,
394 396
395void i915_gem_context_reset(struct drm_device *dev) 397void i915_gem_context_reset(struct drm_device *dev)
396{ 398{
397 struct drm_i915_private *dev_priv = dev->dev_private; 399 struct drm_i915_private *dev_priv = to_i915(dev);
398 400
399 lockdep_assert_held(&dev->struct_mutex); 401 lockdep_assert_held(&dev->struct_mutex);
400 402
@@ -410,7 +412,7 @@ void i915_gem_context_reset(struct drm_device *dev)
410 412
411int i915_gem_context_init(struct drm_device *dev) 413int i915_gem_context_init(struct drm_device *dev)
412{ 414{
413 struct drm_i915_private *dev_priv = dev->dev_private; 415 struct drm_i915_private *dev_priv = to_i915(dev);
414 struct i915_gem_context *ctx; 416 struct i915_gem_context *ctx;
415 417
416 /* Init should only be called once per module load. Eventually the 418 /* Init should only be called once per module load. Eventually the
@@ -451,26 +453,6 @@ int i915_gem_context_init(struct drm_device *dev)
451 return PTR_ERR(ctx); 453 return PTR_ERR(ctx);
452 } 454 }
453 455
454 if (!i915.enable_execlists && ctx->engine[RCS].state) {
455 int ret;
456
457 /* We may need to do things with the shrinker which
458 * require us to immediately switch back to the default
459 * context. This can cause a problem as pinning the
460 * default context also requires GTT space which may not
461 * be available. To avoid this we always pin the default
462 * context.
463 */
464 ret = i915_gem_obj_ggtt_pin(ctx->engine[RCS].state,
465 get_context_alignment(dev_priv), 0);
466 if (ret) {
467 DRM_ERROR("Failed to pinned default global context (error %d)\n",
468 ret);
469 i915_gem_context_unreference(ctx);
470 return ret;
471 }
472 }
473
474 dev_priv->kernel_context = ctx; 456 dev_priv->kernel_context = ctx;
475 457
476 DRM_DEBUG_DRIVER("%s context support initialized\n", 458 DRM_DEBUG_DRIVER("%s context support initialized\n",
@@ -483,33 +465,45 @@ void i915_gem_context_lost(struct drm_i915_private *dev_priv)
483{ 465{
484 struct intel_engine_cs *engine; 466 struct intel_engine_cs *engine;
485 467
486 lockdep_assert_held(&dev_priv->dev->struct_mutex); 468 lockdep_assert_held(&dev_priv->drm.struct_mutex);
487 469
488 for_each_engine(engine, dev_priv) { 470 for_each_engine(engine, dev_priv) {
489 if (engine->last_context) { 471 if (engine->last_context) {
490 i915_gem_context_unpin(engine->last_context, engine); 472 i915_gem_context_unpin(engine->last_context, engine);
491 engine->last_context = NULL; 473 engine->last_context = NULL;
492 } 474 }
493
494 /* Force the GPU state to be reinitialised on enabling */
495 dev_priv->kernel_context->engine[engine->id].initialised =
496 engine->init_context == NULL;
497 } 475 }
498 476
499 /* Force the GPU state to be reinitialised on enabling */ 477 /* Force the GPU state to be restored on enabling */
500 dev_priv->kernel_context->remap_slice = ALL_L3_SLICES(dev_priv); 478 if (!i915.enable_execlists) {
479 struct i915_gem_context *ctx;
480
481 list_for_each_entry(ctx, &dev_priv->context_list, link) {
482 if (!i915_gem_context_is_default(ctx))
483 continue;
484
485 for_each_engine(engine, dev_priv)
486 ctx->engine[engine->id].initialised = false;
487
488 ctx->remap_slice = ALL_L3_SLICES(dev_priv);
489 }
490
491 for_each_engine(engine, dev_priv) {
492 struct intel_context *kce =
493 &dev_priv->kernel_context->engine[engine->id];
494
495 kce->initialised = true;
496 }
497 }
501} 498}
502 499
503void i915_gem_context_fini(struct drm_device *dev) 500void i915_gem_context_fini(struct drm_device *dev)
504{ 501{
505 struct drm_i915_private *dev_priv = dev->dev_private; 502 struct drm_i915_private *dev_priv = to_i915(dev);
506 struct i915_gem_context *dctx = dev_priv->kernel_context; 503 struct i915_gem_context *dctx = dev_priv->kernel_context;
507 504
508 lockdep_assert_held(&dev->struct_mutex); 505 lockdep_assert_held(&dev->struct_mutex);
509 506
510 if (!i915.enable_execlists && dctx->engine[RCS].state)
511 i915_gem_object_ggtt_unpin(dctx->engine[RCS].state);
512
513 i915_gem_context_unreference(dctx); 507 i915_gem_context_unreference(dctx);
514 dev_priv->kernel_context = NULL; 508 dev_priv->kernel_context = NULL;
515 509
@@ -759,7 +753,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
759 753
760 /* Trying to pin first makes error handling easier. */ 754 /* Trying to pin first makes error handling easier. */
761 ret = i915_gem_obj_ggtt_pin(to->engine[RCS].state, 755 ret = i915_gem_obj_ggtt_pin(to->engine[RCS].state,
762 get_context_alignment(engine->i915), 756 to->ggtt_alignment,
763 0); 757 0);
764 if (ret) 758 if (ret)
765 return ret; 759 return ret;
@@ -901,7 +895,7 @@ int i915_switch_context(struct drm_i915_gem_request *req)
901 struct intel_engine_cs *engine = req->engine; 895 struct intel_engine_cs *engine = req->engine;
902 896
903 WARN_ON(i915.enable_execlists); 897 WARN_ON(i915.enable_execlists);
904 lockdep_assert_held(&req->i915->dev->struct_mutex); 898 lockdep_assert_held(&req->i915->drm.struct_mutex);
905 899
906 if (!req->ctx->engine[engine->id].state) { 900 if (!req->ctx->engine[engine->id].state) {
907 struct i915_gem_context *to = req->ctx; 901 struct i915_gem_context *to = req->ctx;
@@ -1032,6 +1026,9 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
1032 else 1026 else
1033 args->value = to_i915(dev)->ggtt.base.total; 1027 args->value = to_i915(dev)->ggtt.base.total;
1034 break; 1028 break;
1029 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
1030 args->value = !!(ctx->flags & CONTEXT_NO_ERROR_CAPTURE);
1031 break;
1035 default: 1032 default:
1036 ret = -EINVAL; 1033 ret = -EINVAL;
1037 break; 1034 break;
@@ -1077,6 +1074,16 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
1077 ctx->flags |= args->value ? CONTEXT_NO_ZEROMAP : 0; 1074 ctx->flags |= args->value ? CONTEXT_NO_ZEROMAP : 0;
1078 } 1075 }
1079 break; 1076 break;
1077 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
1078 if (args->size) {
1079 ret = -EINVAL;
1080 } else {
1081 if (args->value)
1082 ctx->flags |= CONTEXT_NO_ERROR_CAPTURE;
1083 else
1084 ctx->flags &= ~CONTEXT_NO_ERROR_CAPTURE;
1085 }
1086 break;
1080 default: 1087 default:
1081 ret = -EINVAL; 1088 ret = -EINVAL;
1082 break; 1089 break;
@@ -1089,7 +1096,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
1089int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, 1096int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
1090 void *data, struct drm_file *file) 1097 void *data, struct drm_file *file)
1091{ 1098{
1092 struct drm_i915_private *dev_priv = dev->dev_private; 1099 struct drm_i915_private *dev_priv = to_i915(dev);
1093 struct drm_i915_reset_stats *args = data; 1100 struct drm_i915_reset_stats *args = data;
1094 struct i915_ctx_hang_stats *hs; 1101 struct i915_ctx_hang_stats *hs;
1095 struct i915_gem_context *ctx; 1102 struct i915_gem_context *ctx;
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index b144c3f5c650..3c1280ec7ff6 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -33,6 +33,37 @@
33#include "intel_drv.h" 33#include "intel_drv.h"
34#include "i915_trace.h" 34#include "i915_trace.h"
35 35
36static int switch_to_pinned_context(struct drm_i915_private *dev_priv)
37{
38 struct intel_engine_cs *engine;
39
40 if (i915.enable_execlists)
41 return 0;
42
43 for_each_engine(engine, dev_priv) {
44 struct drm_i915_gem_request *req;
45 int ret;
46
47 if (engine->last_context == NULL)
48 continue;
49
50 if (engine->last_context == dev_priv->kernel_context)
51 continue;
52
53 req = i915_gem_request_alloc(engine, dev_priv->kernel_context);
54 if (IS_ERR(req))
55 return PTR_ERR(req);
56
57 ret = i915_switch_context(req);
58 i915_add_request_no_flush(req);
59 if (ret)
60 return ret;
61 }
62
63 return 0;
64}
65
66
36static bool 67static bool
37mark_free(struct i915_vma *vma, struct list_head *unwind) 68mark_free(struct i915_vma *vma, struct list_head *unwind)
38{ 69{
@@ -150,11 +181,19 @@ none:
150 181
151 /* Only idle the GPU and repeat the search once */ 182 /* Only idle the GPU and repeat the search once */
152 if (pass++ == 0) { 183 if (pass++ == 0) {
153 ret = i915_gpu_idle(dev); 184 struct drm_i915_private *dev_priv = to_i915(dev);
185
186 if (i915_is_ggtt(vm)) {
187 ret = switch_to_pinned_context(dev_priv);
188 if (ret)
189 return ret;
190 }
191
192 ret = i915_gem_wait_for_idle(dev_priv);
154 if (ret) 193 if (ret)
155 return ret; 194 return ret;
156 195
157 i915_gem_retire_requests(to_i915(dev)); 196 i915_gem_retire_requests(dev_priv);
158 goto search_again; 197 goto search_again;
159 } 198 }
160 199
@@ -261,11 +300,19 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
261 trace_i915_gem_evict_vm(vm); 300 trace_i915_gem_evict_vm(vm);
262 301
263 if (do_idle) { 302 if (do_idle) {
264 ret = i915_gpu_idle(vm->dev); 303 struct drm_i915_private *dev_priv = to_i915(vm->dev);
304
305 if (i915_is_ggtt(vm)) {
306 ret = switch_to_pinned_context(dev_priv);
307 if (ret)
308 return ret;
309 }
310
311 ret = i915_gem_wait_for_idle(dev_priv);
265 if (ret) 312 if (ret)
266 return ret; 313 return ret;
267 314
268 i915_gem_retire_requests(to_i915(vm->dev)); 315 i915_gem_retire_requests(dev_priv);
269 316
270 WARN_ON(!list_empty(&vm->active_list)); 317 WARN_ON(!list_empty(&vm->active_list));
271 } 318 }
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 7941f1fe9cd2..1978633e7549 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1142,7 +1142,7 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
1142 struct drm_i915_gem_request *req) 1142 struct drm_i915_gem_request *req)
1143{ 1143{
1144 struct intel_engine_cs *engine = req->engine; 1144 struct intel_engine_cs *engine = req->engine;
1145 struct drm_i915_private *dev_priv = dev->dev_private; 1145 struct drm_i915_private *dev_priv = to_i915(dev);
1146 int ret, i; 1146 int ret, i;
1147 1147
1148 if (!IS_GEN7(dev) || engine != &dev_priv->engine[RCS]) { 1148 if (!IS_GEN7(dev) || engine != &dev_priv->engine[RCS]) {
@@ -1225,7 +1225,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
1225{ 1225{
1226 struct drm_device *dev = params->dev; 1226 struct drm_device *dev = params->dev;
1227 struct intel_engine_cs *engine = params->engine; 1227 struct intel_engine_cs *engine = params->engine;
1228 struct drm_i915_private *dev_priv = dev->dev_private; 1228 struct drm_i915_private *dev_priv = to_i915(dev);
1229 u64 exec_start, exec_len; 1229 u64 exec_start, exec_len;
1230 int instp_mode; 1230 int instp_mode;
1231 u32 instp_mask; 1231 u32 instp_mask;
@@ -1328,10 +1328,10 @@ gen8_dispatch_bsd_ring(struct drm_i915_private *dev_priv, struct drm_file *file)
1328 /* Check whether the file_priv has already selected one ring. */ 1328 /* Check whether the file_priv has already selected one ring. */
1329 if ((int)file_priv->bsd_ring < 0) { 1329 if ((int)file_priv->bsd_ring < 0) {
1330 /* If not, use the ping-pong mechanism to select one. */ 1330 /* If not, use the ping-pong mechanism to select one. */
1331 mutex_lock(&dev_priv->dev->struct_mutex); 1331 mutex_lock(&dev_priv->drm.struct_mutex);
1332 file_priv->bsd_ring = dev_priv->mm.bsd_ring_dispatch_index; 1332 file_priv->bsd_ring = dev_priv->mm.bsd_ring_dispatch_index;
1333 dev_priv->mm.bsd_ring_dispatch_index ^= 1; 1333 dev_priv->mm.bsd_ring_dispatch_index ^= 1;
1334 mutex_unlock(&dev_priv->dev->struct_mutex); 1334 mutex_unlock(&dev_priv->drm.struct_mutex);
1335 } 1335 }
1336 1336
1337 return file_priv->bsd_ring; 1337 return file_priv->bsd_ring;
@@ -1477,6 +1477,12 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1477 dispatch_flags |= I915_DISPATCH_RS; 1477 dispatch_flags |= I915_DISPATCH_RS;
1478 } 1478 }
1479 1479
1480 /* Take a local wakeref for preparing to dispatch the execbuf as
1481 * we expect to access the hardware fairly frequently in the
1482 * process. Upon first dispatch, we acquire another prolonged
1483 * wakeref that we hold until the GPU has been idle for at least
1484 * 100ms.
1485 */
1480 intel_runtime_pm_get(dev_priv); 1486 intel_runtime_pm_get(dev_priv);
1481 1487
1482 ret = i915_mutex_lock_interruptible(dev); 1488 ret = i915_mutex_lock_interruptible(dev);
diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c
index 2b6bdc267fb5..251d7a95af89 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence.c
@@ -58,7 +58,7 @@
58static void i965_write_fence_reg(struct drm_device *dev, int reg, 58static void i965_write_fence_reg(struct drm_device *dev, int reg,
59 struct drm_i915_gem_object *obj) 59 struct drm_i915_gem_object *obj)
60{ 60{
61 struct drm_i915_private *dev_priv = dev->dev_private; 61 struct drm_i915_private *dev_priv = to_i915(dev);
62 i915_reg_t fence_reg_lo, fence_reg_hi; 62 i915_reg_t fence_reg_lo, fence_reg_hi;
63 int fence_pitch_shift; 63 int fence_pitch_shift;
64 64
@@ -117,7 +117,7 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
117static void i915_write_fence_reg(struct drm_device *dev, int reg, 117static void i915_write_fence_reg(struct drm_device *dev, int reg,
118 struct drm_i915_gem_object *obj) 118 struct drm_i915_gem_object *obj)
119{ 119{
120 struct drm_i915_private *dev_priv = dev->dev_private; 120 struct drm_i915_private *dev_priv = to_i915(dev);
121 u32 val; 121 u32 val;
122 122
123 if (obj) { 123 if (obj) {
@@ -156,7 +156,7 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
156static void i830_write_fence_reg(struct drm_device *dev, int reg, 156static void i830_write_fence_reg(struct drm_device *dev, int reg,
157 struct drm_i915_gem_object *obj) 157 struct drm_i915_gem_object *obj)
158{ 158{
159 struct drm_i915_private *dev_priv = dev->dev_private; 159 struct drm_i915_private *dev_priv = to_i915(dev);
160 uint32_t val; 160 uint32_t val;
161 161
162 if (obj) { 162 if (obj) {
@@ -193,7 +193,7 @@ inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
193static void i915_gem_write_fence(struct drm_device *dev, int reg, 193static void i915_gem_write_fence(struct drm_device *dev, int reg,
194 struct drm_i915_gem_object *obj) 194 struct drm_i915_gem_object *obj)
195{ 195{
196 struct drm_i915_private *dev_priv = dev->dev_private; 196 struct drm_i915_private *dev_priv = to_i915(dev);
197 197
198 /* Ensure that all CPU reads are completed before installing a fence 198 /* Ensure that all CPU reads are completed before installing a fence
199 * and all writes before removing the fence. 199 * and all writes before removing the fence.
@@ -229,7 +229,7 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
229 struct drm_i915_fence_reg *fence, 229 struct drm_i915_fence_reg *fence,
230 bool enable) 230 bool enable)
231{ 231{
232 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 232 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
233 int reg = fence_number(dev_priv, fence); 233 int reg = fence_number(dev_priv, fence);
234 234
235 i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL); 235 i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
@@ -286,7 +286,7 @@ i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
286int 286int
287i915_gem_object_put_fence(struct drm_i915_gem_object *obj) 287i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
288{ 288{
289 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 289 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
290 struct drm_i915_fence_reg *fence; 290 struct drm_i915_fence_reg *fence;
291 int ret; 291 int ret;
292 292
@@ -311,7 +311,7 @@ i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
311static struct drm_i915_fence_reg * 311static struct drm_i915_fence_reg *
312i915_find_fence_reg(struct drm_device *dev) 312i915_find_fence_reg(struct drm_device *dev)
313{ 313{
314 struct drm_i915_private *dev_priv = dev->dev_private; 314 struct drm_i915_private *dev_priv = to_i915(dev);
315 struct drm_i915_fence_reg *reg, *avail; 315 struct drm_i915_fence_reg *reg, *avail;
316 int i; 316 int i;
317 317
@@ -367,7 +367,7 @@ int
367i915_gem_object_get_fence(struct drm_i915_gem_object *obj) 367i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
368{ 368{
369 struct drm_device *dev = obj->base.dev; 369 struct drm_device *dev = obj->base.dev;
370 struct drm_i915_private *dev_priv = dev->dev_private; 370 struct drm_i915_private *dev_priv = to_i915(dev);
371 bool enable = obj->tiling_mode != I915_TILING_NONE; 371 bool enable = obj->tiling_mode != I915_TILING_NONE;
372 struct drm_i915_fence_reg *reg; 372 struct drm_i915_fence_reg *reg;
373 int ret; 373 int ret;
@@ -433,7 +433,7 @@ bool
433i915_gem_object_pin_fence(struct drm_i915_gem_object *obj) 433i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
434{ 434{
435 if (obj->fence_reg != I915_FENCE_REG_NONE) { 435 if (obj->fence_reg != I915_FENCE_REG_NONE) {
436 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 436 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
437 struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj); 437 struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
438 438
439 WARN_ON(!ggtt_vma || 439 WARN_ON(!ggtt_vma ||
@@ -457,7 +457,7 @@ void
457i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj) 457i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
458{ 458{
459 if (obj->fence_reg != I915_FENCE_REG_NONE) { 459 if (obj->fence_reg != I915_FENCE_REG_NONE) {
460 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 460 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
461 WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0); 461 WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
462 dev_priv->fence_regs[obj->fence_reg].pin_count--; 462 dev_priv->fence_regs[obj->fence_reg].pin_count--;
463 } 463 }
@@ -472,7 +472,7 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
472 */ 472 */
473void i915_gem_restore_fences(struct drm_device *dev) 473void i915_gem_restore_fences(struct drm_device *dev)
474{ 474{
475 struct drm_i915_private *dev_priv = dev->dev_private; 475 struct drm_i915_private *dev_priv = to_i915(dev);
476 int i; 476 int i;
477 477
478 for (i = 0; i < dev_priv->num_fence_regs; i++) { 478 for (i = 0; i < dev_priv->num_fence_regs; i++) {
@@ -549,7 +549,7 @@ void i915_gem_restore_fences(struct drm_device *dev)
549void 549void
550i915_gem_detect_bit_6_swizzle(struct drm_device *dev) 550i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
551{ 551{
552 struct drm_i915_private *dev_priv = dev->dev_private; 552 struct drm_i915_private *dev_priv = to_i915(dev);
553 uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; 553 uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
554 uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; 554 uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
555 555
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 5890017b9832..10f1e32767e6 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -153,7 +153,7 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
153#endif 153#endif
154 154
155 /* Early VLV doesn't have this */ 155 /* Early VLV doesn't have this */
156 if (IS_VALLEYVIEW(dev_priv) && dev_priv->dev->pdev->revision < 0xb) { 156 if (IS_VALLEYVIEW(dev_priv) && dev_priv->drm.pdev->revision < 0xb) {
157 DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n"); 157 DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
158 return 0; 158 return 0;
159 } 159 }
@@ -1570,13 +1570,13 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
1570 struct i915_page_table *unused; 1570 struct i915_page_table *unused;
1571 gen6_pte_t scratch_pte; 1571 gen6_pte_t scratch_pte;
1572 uint32_t pd_entry; 1572 uint32_t pd_entry;
1573 uint32_t pte, pde, temp; 1573 uint32_t pte, pde;
1574 uint32_t start = ppgtt->base.start, length = ppgtt->base.total; 1574 uint32_t start = ppgtt->base.start, length = ppgtt->base.total;
1575 1575
1576 scratch_pte = vm->pte_encode(px_dma(vm->scratch_page), 1576 scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
1577 I915_CACHE_LLC, true, 0); 1577 I915_CACHE_LLC, true, 0);
1578 1578
1579 gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde) { 1579 gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde) {
1580 u32 expected; 1580 u32 expected;
1581 gen6_pte_t *pt_vaddr; 1581 gen6_pte_t *pt_vaddr;
1582 const dma_addr_t pt_addr = px_dma(ppgtt->pd.page_table[pde]); 1582 const dma_addr_t pt_addr = px_dma(ppgtt->pd.page_table[pde]);
@@ -1640,9 +1640,9 @@ static void gen6_write_page_range(struct drm_i915_private *dev_priv,
1640{ 1640{
1641 struct i915_ggtt *ggtt = &dev_priv->ggtt; 1641 struct i915_ggtt *ggtt = &dev_priv->ggtt;
1642 struct i915_page_table *pt; 1642 struct i915_page_table *pt;
1643 uint32_t pde, temp; 1643 uint32_t pde;
1644 1644
1645 gen6_for_each_pde(pt, pd, start, length, temp, pde) 1645 gen6_for_each_pde(pt, pd, start, length, pde)
1646 gen6_write_pde(pd, pde, pt); 1646 gen6_write_pde(pd, pde, pt);
1647 1647
1648 /* Make sure write is complete before other code can use this page 1648 /* Make sure write is complete before other code can use this page
@@ -1683,17 +1683,6 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
1683 return 0; 1683 return 0;
1684} 1684}
1685 1685
1686static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt,
1687 struct drm_i915_gem_request *req)
1688{
1689 struct intel_engine_cs *engine = req->engine;
1690 struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
1691
1692 I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
1693 I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
1694 return 0;
1695}
1696
1697static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt, 1686static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
1698 struct drm_i915_gem_request *req) 1687 struct drm_i915_gem_request *req)
1699{ 1688{
@@ -1731,21 +1720,16 @@ static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
1731 struct drm_i915_gem_request *req) 1720 struct drm_i915_gem_request *req)
1732{ 1721{
1733 struct intel_engine_cs *engine = req->engine; 1722 struct intel_engine_cs *engine = req->engine;
1734 struct drm_device *dev = ppgtt->base.dev; 1723 struct drm_i915_private *dev_priv = req->i915;
1735 struct drm_i915_private *dev_priv = dev->dev_private;
1736
1737 1724
1738 I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G); 1725 I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
1739 I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt)); 1726 I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
1740
1741 POSTING_READ(RING_PP_DIR_DCLV(engine));
1742
1743 return 0; 1727 return 0;
1744} 1728}
1745 1729
1746static void gen8_ppgtt_enable(struct drm_device *dev) 1730static void gen8_ppgtt_enable(struct drm_device *dev)
1747{ 1731{
1748 struct drm_i915_private *dev_priv = dev->dev_private; 1732 struct drm_i915_private *dev_priv = to_i915(dev);
1749 struct intel_engine_cs *engine; 1733 struct intel_engine_cs *engine;
1750 1734
1751 for_each_engine(engine, dev_priv) { 1735 for_each_engine(engine, dev_priv) {
@@ -1757,7 +1741,7 @@ static void gen8_ppgtt_enable(struct drm_device *dev)
1757 1741
1758static void gen7_ppgtt_enable(struct drm_device *dev) 1742static void gen7_ppgtt_enable(struct drm_device *dev)
1759{ 1743{
1760 struct drm_i915_private *dev_priv = dev->dev_private; 1744 struct drm_i915_private *dev_priv = to_i915(dev);
1761 struct intel_engine_cs *engine; 1745 struct intel_engine_cs *engine;
1762 uint32_t ecochk, ecobits; 1746 uint32_t ecochk, ecobits;
1763 1747
@@ -1782,7 +1766,7 @@ static void gen7_ppgtt_enable(struct drm_device *dev)
1782 1766
1783static void gen6_ppgtt_enable(struct drm_device *dev) 1767static void gen6_ppgtt_enable(struct drm_device *dev)
1784{ 1768{
1785 struct drm_i915_private *dev_priv = dev->dev_private; 1769 struct drm_i915_private *dev_priv = to_i915(dev);
1786 uint32_t ecochk, gab_ctl, ecobits; 1770 uint32_t ecochk, gab_ctl, ecobits;
1787 1771
1788 ecobits = I915_READ(GAC_ECO_BITS); 1772 ecobits = I915_READ(GAC_ECO_BITS);
@@ -1875,7 +1859,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
1875 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 1859 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1876 struct i915_page_table *pt; 1860 struct i915_page_table *pt;
1877 uint32_t start, length, start_save, length_save; 1861 uint32_t start, length, start_save, length_save;
1878 uint32_t pde, temp; 1862 uint32_t pde;
1879 int ret; 1863 int ret;
1880 1864
1881 if (WARN_ON(start_in + length_in > ppgtt->base.total)) 1865 if (WARN_ON(start_in + length_in > ppgtt->base.total))
@@ -1891,7 +1875,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
1891 * need allocation. The second stage marks use ptes within the page 1875 * need allocation. The second stage marks use ptes within the page
1892 * tables. 1876 * tables.
1893 */ 1877 */
1894 gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) { 1878 gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde) {
1895 if (pt != vm->scratch_pt) { 1879 if (pt != vm->scratch_pt) {
1896 WARN_ON(bitmap_empty(pt->used_ptes, GEN6_PTES)); 1880 WARN_ON(bitmap_empty(pt->used_ptes, GEN6_PTES));
1897 continue; 1881 continue;
@@ -1916,7 +1900,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
1916 start = start_save; 1900 start = start_save;
1917 length = length_save; 1901 length = length_save;
1918 1902
1919 gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) { 1903 gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde) {
1920 DECLARE_BITMAP(tmp_bitmap, GEN6_PTES); 1904 DECLARE_BITMAP(tmp_bitmap, GEN6_PTES);
1921 1905
1922 bitmap_zero(tmp_bitmap, GEN6_PTES); 1906 bitmap_zero(tmp_bitmap, GEN6_PTES);
@@ -1985,15 +1969,16 @@ static void gen6_free_scratch(struct i915_address_space *vm)
1985static void gen6_ppgtt_cleanup(struct i915_address_space *vm) 1969static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
1986{ 1970{
1987 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 1971 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1972 struct i915_page_directory *pd = &ppgtt->pd;
1973 struct drm_device *dev = vm->dev;
1988 struct i915_page_table *pt; 1974 struct i915_page_table *pt;
1989 uint32_t pde; 1975 uint32_t pde;
1990 1976
1991 drm_mm_remove_node(&ppgtt->node); 1977 drm_mm_remove_node(&ppgtt->node);
1992 1978
1993 gen6_for_all_pdes(pt, ppgtt, pde) { 1979 gen6_for_all_pdes(pt, pd, pde)
1994 if (pt != vm->scratch_pt) 1980 if (pt != vm->scratch_pt)
1995 free_pt(ppgtt->base.dev, pt); 1981 free_pt(dev, pt);
1996 }
1997 1982
1998 gen6_free_scratch(vm); 1983 gen6_free_scratch(vm);
1999} 1984}
@@ -2059,9 +2044,9 @@ static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
2059 uint64_t start, uint64_t length) 2044 uint64_t start, uint64_t length)
2060{ 2045{
2061 struct i915_page_table *unused; 2046 struct i915_page_table *unused;
2062 uint32_t pde, temp; 2047 uint32_t pde;
2063 2048
2064 gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde) 2049 gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde)
2065 ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt; 2050 ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt;
2066} 2051}
2067 2052
@@ -2073,18 +2058,15 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
2073 int ret; 2058 int ret;
2074 2059
2075 ppgtt->base.pte_encode = ggtt->base.pte_encode; 2060 ppgtt->base.pte_encode = ggtt->base.pte_encode;
2076 if (IS_GEN6(dev)) { 2061 if (intel_vgpu_active(dev_priv) || IS_GEN6(dev))
2077 ppgtt->switch_mm = gen6_mm_switch; 2062 ppgtt->switch_mm = gen6_mm_switch;
2078 } else if (IS_HASWELL(dev)) { 2063 else if (IS_HASWELL(dev))
2079 ppgtt->switch_mm = hsw_mm_switch; 2064 ppgtt->switch_mm = hsw_mm_switch;
2080 } else if (IS_GEN7(dev)) { 2065 else if (IS_GEN7(dev))
2081 ppgtt->switch_mm = gen7_mm_switch; 2066 ppgtt->switch_mm = gen7_mm_switch;
2082 } else 2067 else
2083 BUG(); 2068 BUG();
2084 2069
2085 if (intel_vgpu_active(dev_priv))
2086 ppgtt->switch_mm = vgpu_mm_switch;
2087
2088 ret = gen6_ppgtt_alloc(ppgtt); 2070 ret = gen6_ppgtt_alloc(ppgtt);
2089 if (ret) 2071 if (ret)
2090 return ret; 2072 return ret;
@@ -2133,7 +2115,7 @@ static void i915_address_space_init(struct i915_address_space *vm,
2133 struct drm_i915_private *dev_priv) 2115 struct drm_i915_private *dev_priv)
2134{ 2116{
2135 drm_mm_init(&vm->mm, vm->start, vm->total); 2117 drm_mm_init(&vm->mm, vm->start, vm->total);
2136 vm->dev = dev_priv->dev; 2118 vm->dev = &dev_priv->drm;
2137 INIT_LIST_HEAD(&vm->active_list); 2119 INIT_LIST_HEAD(&vm->active_list);
2138 INIT_LIST_HEAD(&vm->inactive_list); 2120 INIT_LIST_HEAD(&vm->inactive_list);
2139 list_add_tail(&vm->global_link, &dev_priv->vm_list); 2121 list_add_tail(&vm->global_link, &dev_priv->vm_list);
@@ -2141,7 +2123,7 @@ static void i915_address_space_init(struct i915_address_space *vm,
2141 2123
2142static void gtt_write_workarounds(struct drm_device *dev) 2124static void gtt_write_workarounds(struct drm_device *dev)
2143{ 2125{
2144 struct drm_i915_private *dev_priv = dev->dev_private; 2126 struct drm_i915_private *dev_priv = to_i915(dev);
2145 2127
2146 /* This function is for gtt related workarounds. This function is 2128 /* This function is for gtt related workarounds. This function is
2147 * called on driver load and after a GPU reset, so you can place 2129 * called on driver load and after a GPU reset, so you can place
@@ -2160,7 +2142,7 @@ static void gtt_write_workarounds(struct drm_device *dev)
2160 2142
2161static int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) 2143static int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
2162{ 2144{
2163 struct drm_i915_private *dev_priv = dev->dev_private; 2145 struct drm_i915_private *dev_priv = to_i915(dev);
2164 int ret = 0; 2146 int ret = 0;
2165 2147
2166 ret = __hw_ppgtt_init(dev, ppgtt); 2148 ret = __hw_ppgtt_init(dev, ppgtt);
@@ -2261,8 +2243,8 @@ static bool do_idling(struct drm_i915_private *dev_priv)
2261 2243
2262 if (unlikely(ggtt->do_idle_maps)) { 2244 if (unlikely(ggtt->do_idle_maps)) {
2263 dev_priv->mm.interruptible = false; 2245 dev_priv->mm.interruptible = false;
2264 if (i915_gpu_idle(dev_priv->dev)) { 2246 if (i915_gem_wait_for_idle(dev_priv)) {
2265 DRM_ERROR("Couldn't idle GPU\n"); 2247 DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
2266 /* Wait a bit, in hopes it avoids the hang */ 2248 /* Wait a bit, in hopes it avoids the hang */
2267 udelay(10); 2249 udelay(10);
2268 } 2250 }
@@ -2610,7 +2592,7 @@ static void i915_ggtt_insert_entries(struct i915_address_space *vm,
2610 uint64_t start, 2592 uint64_t start,
2611 enum i915_cache_level cache_level, u32 unused) 2593 enum i915_cache_level cache_level, u32 unused)
2612{ 2594{
2613 struct drm_i915_private *dev_priv = vm->dev->dev_private; 2595 struct drm_i915_private *dev_priv = to_i915(vm->dev);
2614 unsigned int flags = (cache_level == I915_CACHE_NONE) ? 2596 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
2615 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; 2597 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
2616 int rpm_atomic_seq; 2598 int rpm_atomic_seq;
@@ -2628,7 +2610,7 @@ static void i915_ggtt_clear_range(struct i915_address_space *vm,
2628 uint64_t length, 2610 uint64_t length,
2629 bool unused) 2611 bool unused)
2630{ 2612{
2631 struct drm_i915_private *dev_priv = vm->dev->dev_private; 2613 struct drm_i915_private *dev_priv = to_i915(vm->dev);
2632 unsigned first_entry = start >> PAGE_SHIFT; 2614 unsigned first_entry = start >> PAGE_SHIFT;
2633 unsigned num_entries = length >> PAGE_SHIFT; 2615 unsigned num_entries = length >> PAGE_SHIFT;
2634 int rpm_atomic_seq; 2616 int rpm_atomic_seq;
@@ -2709,7 +2691,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
2709static void ggtt_unbind_vma(struct i915_vma *vma) 2691static void ggtt_unbind_vma(struct i915_vma *vma)
2710{ 2692{
2711 struct drm_device *dev = vma->vm->dev; 2693 struct drm_device *dev = vma->vm->dev;
2712 struct drm_i915_private *dev_priv = dev->dev_private; 2694 struct drm_i915_private *dev_priv = to_i915(dev);
2713 struct drm_i915_gem_object *obj = vma->obj; 2695 struct drm_i915_gem_object *obj = vma->obj;
2714 const uint64_t size = min_t(uint64_t, 2696 const uint64_t size = min_t(uint64_t,
2715 obj->base.size, 2697 obj->base.size,
@@ -2735,7 +2717,7 @@ static void ggtt_unbind_vma(struct i915_vma *vma)
2735void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj) 2717void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
2736{ 2718{
2737 struct drm_device *dev = obj->base.dev; 2719 struct drm_device *dev = obj->base.dev;
2738 struct drm_i915_private *dev_priv = dev->dev_private; 2720 struct drm_i915_private *dev_priv = to_i915(dev);
2739 bool interruptible; 2721 bool interruptible;
2740 2722
2741 interruptible = do_idling(dev_priv); 2723 interruptible = do_idling(dev_priv);
@@ -3137,7 +3119,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
3137 ggtt->base.unbind_vma = ggtt_unbind_vma; 3119 ggtt->base.unbind_vma = ggtt_unbind_vma;
3138 ggtt->base.insert_page = gen8_ggtt_insert_page; 3120 ggtt->base.insert_page = gen8_ggtt_insert_page;
3139 ggtt->base.clear_range = nop_clear_range; 3121 ggtt->base.clear_range = nop_clear_range;
3140 if (!USES_FULL_PPGTT(dev_priv)) 3122 if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
3141 ggtt->base.clear_range = gen8_ggtt_clear_range; 3123 ggtt->base.clear_range = gen8_ggtt_clear_range;
3142 3124
3143 ggtt->base.insert_entries = gen8_ggtt_insert_entries; 3125 ggtt->base.insert_entries = gen8_ggtt_insert_entries;
@@ -3197,7 +3179,7 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
3197 struct drm_i915_private *dev_priv = to_i915(dev); 3179 struct drm_i915_private *dev_priv = to_i915(dev);
3198 int ret; 3180 int ret;
3199 3181
3200 ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL); 3182 ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL);
3201 if (!ret) { 3183 if (!ret) {
3202 DRM_ERROR("failed to set up gmch\n"); 3184 DRM_ERROR("failed to set up gmch\n");
3203 return -EIO; 3185 return -EIO;
@@ -3206,7 +3188,7 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
3206 intel_gtt_get(&ggtt->base.total, &ggtt->stolen_size, 3188 intel_gtt_get(&ggtt->base.total, &ggtt->stolen_size,
3207 &ggtt->mappable_base, &ggtt->mappable_end); 3189 &ggtt->mappable_base, &ggtt->mappable_end);
3208 3190
3209 ggtt->do_idle_maps = needs_idle_maps(dev_priv->dev); 3191 ggtt->do_idle_maps = needs_idle_maps(&dev_priv->drm);
3210 ggtt->base.insert_page = i915_ggtt_insert_page; 3192 ggtt->base.insert_page = i915_ggtt_insert_page;
3211 ggtt->base.insert_entries = i915_ggtt_insert_entries; 3193 ggtt->base.insert_entries = i915_ggtt_insert_entries;
3212 ggtt->base.clear_range = i915_ggtt_clear_range; 3194 ggtt->base.clear_range = i915_ggtt_clear_range;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 163b564fb87d..aa5f31d1c2ed 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -390,27 +390,27 @@ struct i915_hw_ppgtt {
390 void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m); 390 void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
391}; 391};
392 392
393/* For each pde iterates over every pde between from start until start + length. 393/*
394 * If start, and start+length are not perfectly divisible, the macro will round 394 * gen6_for_each_pde() iterates over every pde from start until start+length.
395 * down, and up as needed. The macro modifies pde, start, and length. Dev is 395 * If start and start+length are not perfectly divisible, the macro will round
396 * only used to differentiate shift values. Temp is temp. On gen6/7, start = 0, 396 * down and up as needed. Start=0 and length=2G effectively iterates over
397 * and length = 2G effectively iterates over every PDE in the system. 397 * every PDE in the system. The macro modifies ALL its parameters except 'pd',
398 * 398 * so each of the other parameters should preferably be a simple variable, or
399 * XXX: temp is not actually needed, but it saves doing the ALIGN operation. 399 * at most an lvalue with no side-effects!
400 */ 400 */
401#define gen6_for_each_pde(pt, pd, start, length, temp, iter) \ 401#define gen6_for_each_pde(pt, pd, start, length, iter) \
402 for (iter = gen6_pde_index(start); \ 402 for (iter = gen6_pde_index(start); \
403 length > 0 && iter < I915_PDES ? \ 403 length > 0 && iter < I915_PDES && \
404 (pt = (pd)->page_table[iter]), 1 : 0; \ 404 (pt = (pd)->page_table[iter], true); \
405 iter++, \ 405 ({ u32 temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT); \
406 temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT) - start, \ 406 temp = min(temp - start, length); \
407 temp = min_t(unsigned, temp, length), \ 407 start += temp, length -= temp; }), ++iter)
408 start += temp, length -= temp) 408
409 409#define gen6_for_all_pdes(pt, pd, iter) \
410#define gen6_for_all_pdes(pt, ppgtt, iter) \ 410 for (iter = 0; \
411 for (iter = 0; \ 411 iter < I915_PDES && \
412 pt = ppgtt->pd.page_table[iter], iter < I915_PDES; \ 412 (pt = (pd)->page_table[iter], true); \
413 iter++) 413 ++iter)
414 414
415static inline uint32_t i915_pte_index(uint64_t address, uint32_t pde_shift) 415static inline uint32_t i915_pte_index(uint64_t address, uint32_t pde_shift)
416{ 416{
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index b7c1b5fb61ea..f75bbd67a13a 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -58,7 +58,7 @@ static int render_state_init(struct render_state *so,
58 if (so->rodata->batch_items * 4 > 4096) 58 if (so->rodata->batch_items * 4 > 4096)
59 return -EINVAL; 59 return -EINVAL;
60 60
61 so->obj = i915_gem_object_create(dev_priv->dev, 4096); 61 so->obj = i915_gem_object_create(&dev_priv->drm, 4096);
62 if (IS_ERR(so->obj)) 62 if (IS_ERR(so->obj))
63 return PTR_ERR(so->obj); 63 return PTR_ERR(so->obj);
64 64
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 538c30499848..067632ad2f29 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -257,7 +257,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
257{ 257{
258 struct drm_i915_private *dev_priv = 258 struct drm_i915_private *dev_priv =
259 container_of(shrinker, struct drm_i915_private, mm.shrinker); 259 container_of(shrinker, struct drm_i915_private, mm.shrinker);
260 struct drm_device *dev = dev_priv->dev; 260 struct drm_device *dev = &dev_priv->drm;
261 struct drm_i915_gem_object *obj; 261 struct drm_i915_gem_object *obj;
262 unsigned long count; 262 unsigned long count;
263 bool unlock; 263 bool unlock;
@@ -265,6 +265,8 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
265 if (!i915_gem_shrinker_lock(dev, &unlock)) 265 if (!i915_gem_shrinker_lock(dev, &unlock))
266 return 0; 266 return 0;
267 267
268 i915_gem_retire_requests(dev_priv);
269
268 count = 0; 270 count = 0;
269 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) 271 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
270 if (can_release_pages(obj)) 272 if (can_release_pages(obj))
@@ -286,7 +288,7 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
286{ 288{
287 struct drm_i915_private *dev_priv = 289 struct drm_i915_private *dev_priv =
288 container_of(shrinker, struct drm_i915_private, mm.shrinker); 290 container_of(shrinker, struct drm_i915_private, mm.shrinker);
289 struct drm_device *dev = dev_priv->dev; 291 struct drm_device *dev = &dev_priv->drm;
290 unsigned long freed; 292 unsigned long freed;
291 bool unlock; 293 bool unlock;
292 294
@@ -321,7 +323,7 @@ i915_gem_shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv,
321{ 323{
322 unsigned long timeout = msecs_to_jiffies(timeout_ms) + 1; 324 unsigned long timeout = msecs_to_jiffies(timeout_ms) + 1;
323 325
324 while (!i915_gem_shrinker_lock(dev_priv->dev, &slu->unlock)) { 326 while (!i915_gem_shrinker_lock(&dev_priv->drm, &slu->unlock)) {
325 schedule_timeout_killable(1); 327 schedule_timeout_killable(1);
326 if (fatal_signal_pending(current)) 328 if (fatal_signal_pending(current))
327 return false; 329 return false;
@@ -342,7 +344,7 @@ i915_gem_shrinker_unlock_uninterruptible(struct drm_i915_private *dev_priv,
342{ 344{
343 dev_priv->mm.interruptible = slu->was_interruptible; 345 dev_priv->mm.interruptible = slu->was_interruptible;
344 if (slu->unlock) 346 if (slu->unlock)
345 mutex_unlock(&dev_priv->dev->struct_mutex); 347 mutex_unlock(&dev_priv->drm.struct_mutex);
346} 348}
347 349
348static int 350static int
@@ -408,7 +410,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
408 return NOTIFY_DONE; 410 return NOTIFY_DONE;
409 411
410 /* Force everything onto the inactive lists */ 412 /* Force everything onto the inactive lists */
411 ret = i915_gpu_idle(dev_priv->dev); 413 ret = i915_gem_wait_for_idle(dev_priv);
412 if (ret) 414 if (ret)
413 goto out; 415 goto out;
414 416
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index e9cd82290408..66be299a1486 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -270,7 +270,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
270 270
271void i915_gem_cleanup_stolen(struct drm_device *dev) 271void i915_gem_cleanup_stolen(struct drm_device *dev)
272{ 272{
273 struct drm_i915_private *dev_priv = dev->dev_private; 273 struct drm_i915_private *dev_priv = to_i915(dev);
274 274
275 if (!drm_mm_initialized(&dev_priv->mm.stolen)) 275 if (!drm_mm_initialized(&dev_priv->mm.stolen))
276 return; 276 return;
@@ -550,7 +550,7 @@ static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
550static void 550static void
551i915_gem_object_release_stolen(struct drm_i915_gem_object *obj) 551i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
552{ 552{
553 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 553 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
554 554
555 if (obj->stolen) { 555 if (obj->stolen) {
556 i915_gem_stolen_remove_node(dev_priv, obj->stolen); 556 i915_gem_stolen_remove_node(dev_priv, obj->stolen);
@@ -601,7 +601,7 @@ cleanup:
601struct drm_i915_gem_object * 601struct drm_i915_gem_object *
602i915_gem_object_create_stolen(struct drm_device *dev, u32 size) 602i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
603{ 603{
604 struct drm_i915_private *dev_priv = dev->dev_private; 604 struct drm_i915_private *dev_priv = to_i915(dev);
605 struct drm_i915_gem_object *obj; 605 struct drm_i915_gem_object *obj;
606 struct drm_mm_node *stolen; 606 struct drm_mm_node *stolen;
607 int ret; 607 int ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index a6eb5c47a49c..8030199731db 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -162,7 +162,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
162 struct drm_file *file) 162 struct drm_file *file)
163{ 163{
164 struct drm_i915_gem_set_tiling *args = data; 164 struct drm_i915_gem_set_tiling *args = data;
165 struct drm_i915_private *dev_priv = dev->dev_private; 165 struct drm_i915_private *dev_priv = to_i915(dev);
166 struct drm_i915_gem_object *obj; 166 struct drm_i915_gem_object *obj;
167 int ret = 0; 167 int ret = 0;
168 168
@@ -294,7 +294,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
294 struct drm_file *file) 294 struct drm_file *file)
295{ 295{
296 struct drm_i915_gem_get_tiling *args = data; 296 struct drm_i915_gem_get_tiling *args = data;
297 struct drm_i915_private *dev_priv = dev->dev_private; 297 struct drm_i915_private *dev_priv = to_i915(dev);
298 struct drm_i915_gem_object *obj; 298 struct drm_i915_gem_object *obj;
299 299
300 obj = to_intel_bo(drm_gem_object_lookup(file, args->handle)); 300 obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 34ff2459ceea..9d73d2216adc 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -332,7 +332,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
332 const struct i915_error_state_file_priv *error_priv) 332 const struct i915_error_state_file_priv *error_priv)
333{ 333{
334 struct drm_device *dev = error_priv->dev; 334 struct drm_device *dev = error_priv->dev;
335 struct drm_i915_private *dev_priv = dev->dev_private; 335 struct drm_i915_private *dev_priv = to_i915(dev);
336 struct drm_i915_error_state *error = error_priv->error; 336 struct drm_i915_error_state *error = error_priv->error;
337 struct drm_i915_error_object *obj; 337 struct drm_i915_error_object *obj;
338 int i, j, offset, elt; 338 int i, j, offset, elt;
@@ -463,6 +463,18 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
463 } 463 }
464 } 464 }
465 465
466 if (error->ring[i].num_waiters) {
467 err_printf(m, "%s --- %d waiters\n",
468 dev_priv->engine[i].name,
469 error->ring[i].num_waiters);
470 for (j = 0; j < error->ring[i].num_waiters; j++) {
471 err_printf(m, " seqno 0x%08x for %s [%d]\n",
472 error->ring[i].waiters[j].seqno,
473 error->ring[i].waiters[j].comm,
474 error->ring[i].waiters[j].pid);
475 }
476 }
477
466 if ((obj = error->ring[i].ringbuffer)) { 478 if ((obj = error->ring[i].ringbuffer)) {
467 err_printf(m, "%s --- ringbuffer = 0x%08x\n", 479 err_printf(m, "%s --- ringbuffer = 0x%08x\n",
468 dev_priv->engine[i].name, 480 dev_priv->engine[i].name,
@@ -488,7 +500,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
488 hws_page[elt+1], 500 hws_page[elt+1],
489 hws_page[elt+2], 501 hws_page[elt+2],
490 hws_page[elt+3]); 502 hws_page[elt+3]);
491 offset += 16; 503 offset += 16;
492 } 504 }
493 } 505 }
494 506
@@ -605,8 +617,9 @@ static void i915_error_state_free(struct kref *error_ref)
605 i915_error_object_free(error->ring[i].ringbuffer); 617 i915_error_object_free(error->ring[i].ringbuffer);
606 i915_error_object_free(error->ring[i].hws_page); 618 i915_error_object_free(error->ring[i].hws_page);
607 i915_error_object_free(error->ring[i].ctx); 619 i915_error_object_free(error->ring[i].ctx);
608 kfree(error->ring[i].requests);
609 i915_error_object_free(error->ring[i].wa_ctx); 620 i915_error_object_free(error->ring[i].wa_ctx);
621 kfree(error->ring[i].requests);
622 kfree(error->ring[i].waiters);
610 } 623 }
611 624
612 i915_error_object_free(error->semaphore_obj); 625 i915_error_object_free(error->semaphore_obj);
@@ -892,6 +905,48 @@ static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv,
892 } 905 }
893} 906}
894 907
908static void engine_record_waiters(struct intel_engine_cs *engine,
909 struct drm_i915_error_ring *ering)
910{
911 struct intel_breadcrumbs *b = &engine->breadcrumbs;
912 struct drm_i915_error_waiter *waiter;
913 struct rb_node *rb;
914 int count;
915
916 ering->num_waiters = 0;
917 ering->waiters = NULL;
918
919 spin_lock(&b->lock);
920 count = 0;
921 for (rb = rb_first(&b->waiters); rb != NULL; rb = rb_next(rb))
922 count++;
923 spin_unlock(&b->lock);
924
925 waiter = NULL;
926 if (count)
927 waiter = kmalloc_array(count,
928 sizeof(struct drm_i915_error_waiter),
929 GFP_ATOMIC);
930 if (!waiter)
931 return;
932
933 ering->waiters = waiter;
934
935 spin_lock(&b->lock);
936 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
937 struct intel_wait *w = container_of(rb, typeof(*w), node);
938
939 strcpy(waiter->comm, w->tsk->comm);
940 waiter->pid = w->tsk->pid;
941 waiter->seqno = w->seqno;
942 waiter++;
943
944 if (++ering->num_waiters == count)
945 break;
946 }
947 spin_unlock(&b->lock);
948}
949
895static void i915_record_ring_state(struct drm_i915_private *dev_priv, 950static void i915_record_ring_state(struct drm_i915_private *dev_priv,
896 struct drm_i915_error_state *error, 951 struct drm_i915_error_state *error,
897 struct intel_engine_cs *engine, 952 struct intel_engine_cs *engine,
@@ -926,10 +981,10 @@ static void i915_record_ring_state(struct drm_i915_private *dev_priv,
926 ering->instdone = I915_READ(GEN2_INSTDONE); 981 ering->instdone = I915_READ(GEN2_INSTDONE);
927 } 982 }
928 983
929 ering->waiting = waitqueue_active(&engine->irq_queue); 984 ering->waiting = intel_engine_has_waiter(engine);
930 ering->instpm = I915_READ(RING_INSTPM(engine->mmio_base)); 985 ering->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
931 ering->acthd = intel_ring_get_active_head(engine); 986 ering->acthd = intel_ring_get_active_head(engine);
932 ering->seqno = engine->get_seqno(engine); 987 ering->seqno = intel_engine_get_seqno(engine);
933 ering->last_seqno = engine->last_submitted_seqno; 988 ering->last_seqno = engine->last_submitted_seqno;
934 ering->start = I915_READ_START(engine); 989 ering->start = I915_READ_START(engine);
935 ering->head = I915_READ_HEAD(engine); 990 ering->head = I915_READ_HEAD(engine);
@@ -1022,7 +1077,6 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
1022 1077
1023 for (i = 0; i < I915_NUM_ENGINES; i++) { 1078 for (i = 0; i < I915_NUM_ENGINES; i++) {
1024 struct intel_engine_cs *engine = &dev_priv->engine[i]; 1079 struct intel_engine_cs *engine = &dev_priv->engine[i];
1025 struct intel_ringbuffer *rbuf;
1026 1080
1027 error->ring[i].pid = -1; 1081 error->ring[i].pid = -1;
1028 1082
@@ -1032,14 +1086,15 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
1032 error->ring[i].valid = true; 1086 error->ring[i].valid = true;
1033 1087
1034 i915_record_ring_state(dev_priv, error, engine, &error->ring[i]); 1088 i915_record_ring_state(dev_priv, error, engine, &error->ring[i]);
1089 engine_record_waiters(engine, &error->ring[i]);
1035 1090
1036 request = i915_gem_find_active_request(engine); 1091 request = i915_gem_find_active_request(engine);
1037 if (request) { 1092 if (request) {
1038 struct i915_address_space *vm; 1093 struct i915_address_space *vm;
1094 struct intel_ringbuffer *rb;
1039 1095
1040 vm = request->ctx && request->ctx->ppgtt ? 1096 vm = request->ctx->ppgtt ?
1041 &request->ctx->ppgtt->base : 1097 &request->ctx->ppgtt->base : &ggtt->base;
1042 &ggtt->base;
1043 1098
1044 /* We need to copy these to an anonymous buffer 1099 /* We need to copy these to an anonymous buffer
1045 * as the simplest method to avoid being overwritten 1100 * as the simplest method to avoid being overwritten
@@ -1066,26 +1121,17 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
1066 } 1121 }
1067 rcu_read_unlock(); 1122 rcu_read_unlock();
1068 } 1123 }
1069 }
1070
1071 if (i915.enable_execlists) {
1072 /* TODO: This is only a small fix to keep basic error
1073 * capture working, but we need to add more information
1074 * for it to be useful (e.g. dump the context being
1075 * executed).
1076 */
1077 if (request)
1078 rbuf = request->ctx->engine[engine->id].ringbuf;
1079 else
1080 rbuf = dev_priv->kernel_context->engine[engine->id].ringbuf;
1081 } else
1082 rbuf = engine->buffer;
1083 1124
1084 error->ring[i].cpu_ring_head = rbuf->head; 1125 error->simulated |=
1085 error->ring[i].cpu_ring_tail = rbuf->tail; 1126 request->ctx->flags & CONTEXT_NO_ERROR_CAPTURE;
1086 1127
1087 error->ring[i].ringbuffer = 1128 rb = request->ringbuf;
1088 i915_error_ggtt_object_create(dev_priv, rbuf->obj); 1129 error->ring[i].cpu_ring_head = rb->head;
1130 error->ring[i].cpu_ring_tail = rb->tail;
1131 error->ring[i].ringbuffer =
1132 i915_error_ggtt_object_create(dev_priv,
1133 rb->obj);
1134 }
1089 1135
1090 error->ring[i].hws_page = 1136 error->ring[i].hws_page =
1091 i915_error_ggtt_object_create(dev_priv, 1137 i915_error_ggtt_object_create(dev_priv,
@@ -1230,7 +1276,7 @@ static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
1230static void i915_capture_reg_state(struct drm_i915_private *dev_priv, 1276static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
1231 struct drm_i915_error_state *error) 1277 struct drm_i915_error_state *error)
1232{ 1278{
1233 struct drm_device *dev = dev_priv->dev; 1279 struct drm_device *dev = &dev_priv->drm;
1234 int i; 1280 int i;
1235 1281
1236 /* General organization 1282 /* General organization
@@ -1355,6 +1401,9 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv,
1355 struct drm_i915_error_state *error; 1401 struct drm_i915_error_state *error;
1356 unsigned long flags; 1402 unsigned long flags;
1357 1403
1404 if (READ_ONCE(dev_priv->gpu_error.first_error))
1405 return;
1406
1358 /* Account for pipe specific data like PIPE*STAT */ 1407 /* Account for pipe specific data like PIPE*STAT */
1359 error = kzalloc(sizeof(*error), GFP_ATOMIC); 1408 error = kzalloc(sizeof(*error), GFP_ATOMIC);
1360 if (!error) { 1409 if (!error) {
@@ -1378,12 +1427,14 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv,
1378 i915_error_capture_msg(dev_priv, error, engine_mask, error_msg); 1427 i915_error_capture_msg(dev_priv, error, engine_mask, error_msg);
1379 DRM_INFO("%s\n", error->error_msg); 1428 DRM_INFO("%s\n", error->error_msg);
1380 1429
1381 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); 1430 if (!error->simulated) {
1382 if (dev_priv->gpu_error.first_error == NULL) { 1431 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1383 dev_priv->gpu_error.first_error = error; 1432 if (!dev_priv->gpu_error.first_error) {
1384 error = NULL; 1433 dev_priv->gpu_error.first_error = error;
1434 error = NULL;
1435 }
1436 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1385 } 1437 }
1386 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1387 1438
1388 if (error) { 1439 if (error) {
1389 i915_error_state_free(&error->ref); 1440 i915_error_state_free(&error->ref);
@@ -1395,7 +1446,8 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv,
1395 DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n"); 1446 DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
1396 DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n"); 1447 DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
1397 DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n"); 1448 DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
1398 DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev_priv->dev->primary->index); 1449 DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n",
1450 dev_priv->drm.primary->index);
1399 warned = true; 1451 warned = true;
1400 } 1452 }
1401} 1453}
@@ -1403,7 +1455,7 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv,
1403void i915_error_state_get(struct drm_device *dev, 1455void i915_error_state_get(struct drm_device *dev,
1404 struct i915_error_state_file_priv *error_priv) 1456 struct i915_error_state_file_priv *error_priv)
1405{ 1457{
1406 struct drm_i915_private *dev_priv = dev->dev_private; 1458 struct drm_i915_private *dev_priv = to_i915(dev);
1407 1459
1408 spin_lock_irq(&dev_priv->gpu_error.lock); 1460 spin_lock_irq(&dev_priv->gpu_error.lock);
1409 error_priv->error = dev_priv->gpu_error.first_error; 1461 error_priv->error = dev_priv->gpu_error.first_error;
@@ -1421,7 +1473,7 @@ void i915_error_state_put(struct i915_error_state_file_priv *error_priv)
1421 1473
1422void i915_destroy_error_state(struct drm_device *dev) 1474void i915_destroy_error_state(struct drm_device *dev)
1423{ 1475{
1424 struct drm_i915_private *dev_priv = dev->dev_private; 1476 struct drm_i915_private *dev_priv = to_i915(dev);
1425 struct drm_i915_error_state *error; 1477 struct drm_i915_error_state *error;
1426 1478
1427 spin_lock_irq(&dev_priv->gpu_error.lock); 1479 spin_lock_irq(&dev_priv->gpu_error.lock);
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index 22a55ac4e51c..2112e029db6a 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -97,8 +97,14 @@ static int host2guc_action(struct intel_guc *guc, u32 *data, u32 len)
97 97
98 I915_WRITE(HOST2GUC_INTERRUPT, HOST2GUC_TRIGGER); 98 I915_WRITE(HOST2GUC_INTERRUPT, HOST2GUC_TRIGGER);
99 99
100 /* No HOST2GUC command should take longer than 10ms */ 100 /*
101 ret = wait_for_atomic(host2guc_action_response(dev_priv, &status), 10); 101 * Fast commands should complete in less than 10us, so sample quickly
102 * up to that length of time, then switch to a slower sleep-wait loop.
103 * No HOST2GUC command should ever take longer than 10ms.
104 */
105 ret = wait_for_us(host2guc_action_response(dev_priv, &status), 10);
106 if (ret)
107 ret = wait_for(host2guc_action_response(dev_priv, &status), 10);
102 if (status != GUC2HOST_STATUS_SUCCESS) { 108 if (status != GUC2HOST_STATUS_SUCCESS) {
103 /* 109 /*
104 * Either the GuC explicitly returned an error (which 110 * Either the GuC explicitly returned an error (which
@@ -153,12 +159,11 @@ static int host2guc_sample_forcewake(struct intel_guc *guc,
153 struct i915_guc_client *client) 159 struct i915_guc_client *client)
154{ 160{
155 struct drm_i915_private *dev_priv = guc_to_i915(guc); 161 struct drm_i915_private *dev_priv = guc_to_i915(guc);
156 struct drm_device *dev = dev_priv->dev;
157 u32 data[2]; 162 u32 data[2];
158 163
159 data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE; 164 data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE;
160 /* WaRsDisableCoarsePowerGating:skl,bxt */ 165 /* WaRsDisableCoarsePowerGating:skl,bxt */
161 if (!intel_enable_rc6() || NEEDS_WaRsDisableCoarsePowerGating(dev)) 166 if (!intel_enable_rc6() || NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
162 data[1] = 0; 167 data[1] = 0;
163 else 168 else
164 /* bit 0 and 1 are for Render and Media domain separately */ 169 /* bit 0 and 1 are for Render and Media domain separately */
@@ -582,7 +587,7 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
582 */ 587 */
583int i915_guc_submit(struct drm_i915_gem_request *rq) 588int i915_guc_submit(struct drm_i915_gem_request *rq)
584{ 589{
585 unsigned int engine_id = rq->engine->guc_id; 590 unsigned int engine_id = rq->engine->id;
586 struct intel_guc *guc = &rq->i915->guc; 591 struct intel_guc *guc = &rq->i915->guc;
587 struct i915_guc_client *client = guc->execbuf_client; 592 struct i915_guc_client *client = guc->execbuf_client;
588 int b_ret; 593 int b_ret;
@@ -623,7 +628,7 @@ gem_allocate_guc_obj(struct drm_i915_private *dev_priv, u32 size)
623{ 628{
624 struct drm_i915_gem_object *obj; 629 struct drm_i915_gem_object *obj;
625 630
626 obj = i915_gem_object_create(dev_priv->dev, size); 631 obj = i915_gem_object_create(&dev_priv->drm, size);
627 if (IS_ERR(obj)) 632 if (IS_ERR(obj))
628 return NULL; 633 return NULL;
629 634
@@ -1034,7 +1039,7 @@ void i915_guc_submission_fini(struct drm_i915_private *dev_priv)
1034 */ 1039 */
1035int intel_guc_suspend(struct drm_device *dev) 1040int intel_guc_suspend(struct drm_device *dev)
1036{ 1041{
1037 struct drm_i915_private *dev_priv = dev->dev_private; 1042 struct drm_i915_private *dev_priv = to_i915(dev);
1038 struct intel_guc *guc = &dev_priv->guc; 1043 struct intel_guc *guc = &dev_priv->guc;
1039 struct i915_gem_context *ctx; 1044 struct i915_gem_context *ctx;
1040 u32 data[3]; 1045 u32 data[3];
@@ -1060,7 +1065,7 @@ int intel_guc_suspend(struct drm_device *dev)
1060 */ 1065 */
1061int intel_guc_resume(struct drm_device *dev) 1066int intel_guc_resume(struct drm_device *dev)
1062{ 1067{
1063 struct drm_i915_private *dev_priv = dev->dev_private; 1068 struct drm_i915_private *dev_priv = to_i915(dev);
1064 struct intel_guc *guc = &dev_priv->guc; 1069 struct intel_guc *guc = &dev_priv->guc;
1065 struct i915_gem_context *ctx; 1070 struct i915_gem_context *ctx;
1066 u32 data[3]; 1071 u32 data[3];
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 4378a659d962..1c2aec392412 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -259,12 +259,12 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
259 dev_priv->gt_irq_mask &= ~interrupt_mask; 259 dev_priv->gt_irq_mask &= ~interrupt_mask;
260 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); 260 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
261 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 261 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
262 POSTING_READ(GTIMR);
263} 262}
264 263
265void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 264void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
266{ 265{
267 ilk_update_gt_irq(dev_priv, mask, mask); 266 ilk_update_gt_irq(dev_priv, mask, mask);
267 POSTING_READ_FW(GTIMR);
268} 268}
269 269
270void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 270void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
@@ -351,9 +351,8 @@ void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
351void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv) 351void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
352{ 352{
353 spin_lock_irq(&dev_priv->irq_lock); 353 spin_lock_irq(&dev_priv->irq_lock);
354 354 WARN_ON_ONCE(dev_priv->rps.pm_iir);
355 WARN_ON(dev_priv->rps.pm_iir); 355 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
356 WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
357 dev_priv->rps.interrupts_enabled = true; 356 dev_priv->rps.interrupts_enabled = true;
358 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) | 357 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
359 dev_priv->pm_rps_events); 358 dev_priv->pm_rps_events);
@@ -371,11 +370,6 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
371{ 370{
372 spin_lock_irq(&dev_priv->irq_lock); 371 spin_lock_irq(&dev_priv->irq_lock);
373 dev_priv->rps.interrupts_enabled = false; 372 dev_priv->rps.interrupts_enabled = false;
374 spin_unlock_irq(&dev_priv->irq_lock);
375
376 cancel_work_sync(&dev_priv->rps.work);
377
378 spin_lock_irq(&dev_priv->irq_lock);
379 373
380 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0)); 374 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
381 375
@@ -384,8 +378,15 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
384 ~dev_priv->pm_rps_events); 378 ~dev_priv->pm_rps_events);
385 379
386 spin_unlock_irq(&dev_priv->irq_lock); 380 spin_unlock_irq(&dev_priv->irq_lock);
381 synchronize_irq(dev_priv->drm.irq);
387 382
388 synchronize_irq(dev_priv->dev->irq); 383 /* Now that we will not be generating any more work, flush any
384 * outsanding tasks. As we are called on the RPS idle path,
385 * we will reset the GPU to minimum frequencies, so the current
386 * state of the worker can be discarded.
387 */
388 cancel_work_sync(&dev_priv->rps.work);
389 gen6_reset_rps_interrupts(dev_priv);
389} 390}
390 391
391/** 392/**
@@ -565,7 +566,7 @@ i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
565 u32 enable_mask; 566 u32 enable_mask;
566 567
567 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 568 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
568 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, 569 enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm,
569 status_mask); 570 status_mask);
570 else 571 else
571 enable_mask = status_mask << 16; 572 enable_mask = status_mask << 16;
@@ -579,7 +580,7 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
579 u32 enable_mask; 580 u32 enable_mask;
580 581
581 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 582 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
582 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, 583 enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm,
583 status_mask); 584 status_mask);
584 else 585 else
585 enable_mask = status_mask << 16; 586 enable_mask = status_mask << 16;
@@ -666,7 +667,7 @@ static u32 i8xx_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
666 */ 667 */
667static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 668static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
668{ 669{
669 struct drm_i915_private *dev_priv = dev->dev_private; 670 struct drm_i915_private *dev_priv = to_i915(dev);
670 i915_reg_t high_frame, low_frame; 671 i915_reg_t high_frame, low_frame;
671 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; 672 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
672 struct intel_crtc *intel_crtc = 673 struct intel_crtc *intel_crtc =
@@ -713,7 +714,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
713 714
714static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 715static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
715{ 716{
716 struct drm_i915_private *dev_priv = dev->dev_private; 717 struct drm_i915_private *dev_priv = to_i915(dev);
717 718
718 return I915_READ(PIPE_FRMCOUNT_G4X(pipe)); 719 return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
719} 720}
@@ -722,7 +723,7 @@ static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
722static int __intel_get_crtc_scanline(struct intel_crtc *crtc) 723static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
723{ 724{
724 struct drm_device *dev = crtc->base.dev; 725 struct drm_device *dev = crtc->base.dev;
725 struct drm_i915_private *dev_priv = dev->dev_private; 726 struct drm_i915_private *dev_priv = to_i915(dev);
726 const struct drm_display_mode *mode = &crtc->base.hwmode; 727 const struct drm_display_mode *mode = &crtc->base.hwmode;
727 enum pipe pipe = crtc->pipe; 728 enum pipe pipe = crtc->pipe;
728 int position, vtotal; 729 int position, vtotal;
@@ -774,7 +775,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
774 ktime_t *stime, ktime_t *etime, 775 ktime_t *stime, ktime_t *etime,
775 const struct drm_display_mode *mode) 776 const struct drm_display_mode *mode)
776{ 777{
777 struct drm_i915_private *dev_priv = dev->dev_private; 778 struct drm_i915_private *dev_priv = to_i915(dev);
778 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 779 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
779 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 780 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
780 int position; 781 int position;
@@ -895,7 +896,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
895 896
896int intel_get_crtc_scanline(struct intel_crtc *crtc) 897int intel_get_crtc_scanline(struct intel_crtc *crtc)
897{ 898{
898 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 899 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
899 unsigned long irqflags; 900 unsigned long irqflags;
900 int position; 901 int position;
901 902
@@ -976,13 +977,11 @@ static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
976 977
977static void notify_ring(struct intel_engine_cs *engine) 978static void notify_ring(struct intel_engine_cs *engine)
978{ 979{
979 if (!intel_engine_initialized(engine)) 980 smp_store_mb(engine->breadcrumbs.irq_posted, true);
980 return; 981 if (intel_engine_wakeup(engine)) {
981 982 trace_i915_gem_request_notify(engine);
982 trace_i915_gem_request_notify(engine); 983 engine->breadcrumbs.irq_wakeups++;
983 engine->user_interrupts++; 984 }
984
985 wake_up_all(&engine->irq_queue);
986} 985}
987 986
988static void vlv_c0_read(struct drm_i915_private *dev_priv, 987static void vlv_c0_read(struct drm_i915_private *dev_priv,
@@ -1063,7 +1062,7 @@ static bool any_waiters(struct drm_i915_private *dev_priv)
1063 struct intel_engine_cs *engine; 1062 struct intel_engine_cs *engine;
1064 1063
1065 for_each_engine(engine, dev_priv) 1064 for_each_engine(engine, dev_priv)
1066 if (engine->irq_refcount) 1065 if (intel_engine_has_waiter(engine))
1067 return true; 1066 return true;
1068 1067
1069 return false; 1068 return false;
@@ -1084,13 +1083,6 @@ static void gen6_pm_rps_work(struct work_struct *work)
1084 return; 1083 return;
1085 } 1084 }
1086 1085
1087 /*
1088 * The RPS work is synced during runtime suspend, we don't require a
1089 * wakeref. TODO: instead of disabling the asserts make sure that we
1090 * always hold an RPM reference while the work is running.
1091 */
1092 DISABLE_RPM_WAKEREF_ASSERTS(dev_priv);
1093
1094 pm_iir = dev_priv->rps.pm_iir; 1086 pm_iir = dev_priv->rps.pm_iir;
1095 dev_priv->rps.pm_iir = 0; 1087 dev_priv->rps.pm_iir = 0;
1096 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ 1088 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
@@ -1103,7 +1095,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
1103 WARN_ON(pm_iir & ~dev_priv->pm_rps_events); 1095 WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1104 1096
1105 if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost) 1097 if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
1106 goto out; 1098 return;
1107 1099
1108 mutex_lock(&dev_priv->rps.hw_lock); 1100 mutex_lock(&dev_priv->rps.hw_lock);
1109 1101
@@ -1158,8 +1150,6 @@ static void gen6_pm_rps_work(struct work_struct *work)
1158 intel_set_rps(dev_priv, new_delay); 1150 intel_set_rps(dev_priv, new_delay);
1159 1151
1160 mutex_unlock(&dev_priv->rps.hw_lock); 1152 mutex_unlock(&dev_priv->rps.hw_lock);
1161out:
1162 ENABLE_RPM_WAKEREF_ASSERTS(dev_priv);
1163} 1153}
1164 1154
1165 1155
@@ -1185,7 +1175,7 @@ static void ivybridge_parity_work(struct work_struct *work)
1185 * In order to prevent a get/put style interface, acquire struct mutex 1175 * In order to prevent a get/put style interface, acquire struct mutex
1186 * any time we access those registers. 1176 * any time we access those registers.
1187 */ 1177 */
1188 mutex_lock(&dev_priv->dev->struct_mutex); 1178 mutex_lock(&dev_priv->drm.struct_mutex);
1189 1179
1190 /* If we've screwed up tracking, just let the interrupt fire again */ 1180 /* If we've screwed up tracking, just let the interrupt fire again */
1191 if (WARN_ON(!dev_priv->l3_parity.which_slice)) 1181 if (WARN_ON(!dev_priv->l3_parity.which_slice))
@@ -1221,7 +1211,7 @@ static void ivybridge_parity_work(struct work_struct *work)
1221 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); 1211 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1222 parity_event[5] = NULL; 1212 parity_event[5] = NULL;
1223 1213
1224 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj, 1214 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
1225 KOBJ_CHANGE, parity_event); 1215 KOBJ_CHANGE, parity_event);
1226 1216
1227 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 1217 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
@@ -1241,7 +1231,7 @@ out:
1241 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); 1231 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1242 spin_unlock_irq(&dev_priv->irq_lock); 1232 spin_unlock_irq(&dev_priv->irq_lock);
1243 1233
1244 mutex_unlock(&dev_priv->dev->struct_mutex); 1234 mutex_unlock(&dev_priv->drm.struct_mutex);
1245} 1235}
1246 1236
1247static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv, 1237static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
@@ -1267,8 +1257,7 @@ static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv
1267static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv, 1257static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
1268 u32 gt_iir) 1258 u32 gt_iir)
1269{ 1259{
1270 if (gt_iir & 1260 if (gt_iir & GT_RENDER_USER_INTERRUPT)
1271 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1272 notify_ring(&dev_priv->engine[RCS]); 1261 notify_ring(&dev_priv->engine[RCS]);
1273 if (gt_iir & ILK_BSD_USER_INTERRUPT) 1262 if (gt_iir & ILK_BSD_USER_INTERRUPT)
1274 notify_ring(&dev_priv->engine[VCS]); 1263 notify_ring(&dev_priv->engine[VCS]);
@@ -1277,9 +1266,7 @@ static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
1277static void snb_gt_irq_handler(struct drm_i915_private *dev_priv, 1266static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
1278 u32 gt_iir) 1267 u32 gt_iir)
1279{ 1268{
1280 1269 if (gt_iir & GT_RENDER_USER_INTERRUPT)
1281 if (gt_iir &
1282 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1283 notify_ring(&dev_priv->engine[RCS]); 1270 notify_ring(&dev_priv->engine[RCS]);
1284 if (gt_iir & GT_BSD_USER_INTERRUPT) 1271 if (gt_iir & GT_BSD_USER_INTERRUPT)
1285 notify_ring(&dev_priv->engine[VCS]); 1272 notify_ring(&dev_priv->engine[VCS]);
@@ -1526,7 +1513,7 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1526 1513
1527 entry = &pipe_crc->entries[head]; 1514 entry = &pipe_crc->entries[head];
1528 1515
1529 entry->frame = dev_priv->dev->driver->get_vblank_counter(dev_priv->dev, 1516 entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm,
1530 pipe); 1517 pipe);
1531 entry->crc[0] = crc0; 1518 entry->crc[0] = crc0;
1532 entry->crc[1] = crc1; 1519 entry->crc[1] = crc1;
@@ -1602,7 +1589,7 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1602 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); 1589 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1603 if (dev_priv->rps.interrupts_enabled) { 1590 if (dev_priv->rps.interrupts_enabled) {
1604 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; 1591 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1605 queue_work(dev_priv->wq, &dev_priv->rps.work); 1592 schedule_work(&dev_priv->rps.work);
1606 } 1593 }
1607 spin_unlock(&dev_priv->irq_lock); 1594 spin_unlock(&dev_priv->irq_lock);
1608 } 1595 }
@@ -1624,7 +1611,7 @@ static bool intel_pipe_handle_vblank(struct drm_i915_private *dev_priv,
1624{ 1611{
1625 bool ret; 1612 bool ret;
1626 1613
1627 ret = drm_handle_vblank(dev_priv->dev, pipe); 1614 ret = drm_handle_vblank(&dev_priv->drm, pipe);
1628 if (ret) 1615 if (ret)
1629 intel_finish_page_flip_mmio(dev_priv, pipe); 1616 intel_finish_page_flip_mmio(dev_priv, pipe);
1630 1617
@@ -1757,7 +1744,7 @@ static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1757static irqreturn_t valleyview_irq_handler(int irq, void *arg) 1744static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1758{ 1745{
1759 struct drm_device *dev = arg; 1746 struct drm_device *dev = arg;
1760 struct drm_i915_private *dev_priv = dev->dev_private; 1747 struct drm_i915_private *dev_priv = to_i915(dev);
1761 irqreturn_t ret = IRQ_NONE; 1748 irqreturn_t ret = IRQ_NONE;
1762 1749
1763 if (!intel_irqs_enabled(dev_priv)) 1750 if (!intel_irqs_enabled(dev_priv))
@@ -1840,7 +1827,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1840static irqreturn_t cherryview_irq_handler(int irq, void *arg) 1827static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1841{ 1828{
1842 struct drm_device *dev = arg; 1829 struct drm_device *dev = arg;
1843 struct drm_i915_private *dev_priv = dev->dev_private; 1830 struct drm_i915_private *dev_priv = to_i915(dev);
1844 irqreturn_t ret = IRQ_NONE; 1831 irqreturn_t ret = IRQ_NONE;
1845 1832
1846 if (!intel_irqs_enabled(dev_priv)) 1833 if (!intel_irqs_enabled(dev_priv))
@@ -2225,7 +2212,7 @@ static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
2225static irqreturn_t ironlake_irq_handler(int irq, void *arg) 2212static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2226{ 2213{
2227 struct drm_device *dev = arg; 2214 struct drm_device *dev = arg;
2228 struct drm_i915_private *dev_priv = dev->dev_private; 2215 struct drm_i915_private *dev_priv = to_i915(dev);
2229 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 2216 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2230 irqreturn_t ret = IRQ_NONE; 2217 irqreturn_t ret = IRQ_NONE;
2231 2218
@@ -2438,7 +2425,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2438 I915_WRITE(SDEIIR, iir); 2425 I915_WRITE(SDEIIR, iir);
2439 ret = IRQ_HANDLED; 2426 ret = IRQ_HANDLED;
2440 2427
2441 if (HAS_PCH_SPT(dev_priv)) 2428 if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv))
2442 spt_irq_handler(dev_priv, iir); 2429 spt_irq_handler(dev_priv, iir);
2443 else 2430 else
2444 cpt_irq_handler(dev_priv, iir); 2431 cpt_irq_handler(dev_priv, iir);
@@ -2457,7 +2444,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2457static irqreturn_t gen8_irq_handler(int irq, void *arg) 2444static irqreturn_t gen8_irq_handler(int irq, void *arg)
2458{ 2445{
2459 struct drm_device *dev = arg; 2446 struct drm_device *dev = arg;
2460 struct drm_i915_private *dev_priv = dev->dev_private; 2447 struct drm_i915_private *dev_priv = to_i915(dev);
2461 u32 master_ctl; 2448 u32 master_ctl;
2462 u32 gt_iir[4] = {}; 2449 u32 gt_iir[4] = {};
2463 irqreturn_t ret; 2450 irqreturn_t ret;
@@ -2488,11 +2475,8 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
2488 return ret; 2475 return ret;
2489} 2476}
2490 2477
2491static void i915_error_wake_up(struct drm_i915_private *dev_priv, 2478static void i915_error_wake_up(struct drm_i915_private *dev_priv)
2492 bool reset_completed)
2493{ 2479{
2494 struct intel_engine_cs *engine;
2495
2496 /* 2480 /*
2497 * Notify all waiters for GPU completion events that reset state has 2481 * Notify all waiters for GPU completion events that reset state has
2498 * been changed, and that they need to restart their wait after 2482 * been changed, and that they need to restart their wait after
@@ -2501,18 +2485,10 @@ static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2501 */ 2485 */
2502 2486
2503 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */ 2487 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2504 for_each_engine(engine, dev_priv) 2488 wake_up_all(&dev_priv->gpu_error.wait_queue);
2505 wake_up_all(&engine->irq_queue);
2506 2489
2507 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */ 2490 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2508 wake_up_all(&dev_priv->pending_flip_queue); 2491 wake_up_all(&dev_priv->pending_flip_queue);
2509
2510 /*
2511 * Signal tasks blocked in i915_gem_wait_for_error that the pending
2512 * reset state is cleared.
2513 */
2514 if (reset_completed)
2515 wake_up_all(&dev_priv->gpu_error.reset_queue);
2516} 2492}
2517 2493
2518/** 2494/**
@@ -2524,7 +2500,7 @@ static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2524 */ 2500 */
2525static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv) 2501static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv)
2526{ 2502{
2527 struct kobject *kobj = &dev_priv->dev->primary->kdev->kobj; 2503 struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
2528 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 2504 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2529 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 2505 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2530 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 2506 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
@@ -2577,7 +2553,7 @@ static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv)
2577 * Note: The wake_up also serves as a memory barrier so that 2553 * Note: The wake_up also serves as a memory barrier so that
2578 * waiters see the update value of the reset counter atomic_t. 2554 * waiters see the update value of the reset counter atomic_t.
2579 */ 2555 */
2580 i915_error_wake_up(dev_priv, true); 2556 wake_up_all(&dev_priv->gpu_error.reset_queue);
2581 } 2557 }
2582} 2558}
2583 2559
@@ -2714,7 +2690,7 @@ void i915_handle_error(struct drm_i915_private *dev_priv,
2714 * ensure that the waiters see the updated value of the reset 2690 * ensure that the waiters see the updated value of the reset
2715 * counter atomic_t. 2691 * counter atomic_t.
2716 */ 2692 */
2717 i915_error_wake_up(dev_priv, false); 2693 i915_error_wake_up(dev_priv);
2718 } 2694 }
2719 2695
2720 i915_reset_and_wakeup(dev_priv); 2696 i915_reset_and_wakeup(dev_priv);
@@ -2725,7 +2701,7 @@ void i915_handle_error(struct drm_i915_private *dev_priv,
2725 */ 2701 */
2726static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe) 2702static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe)
2727{ 2703{
2728 struct drm_i915_private *dev_priv = dev->dev_private; 2704 struct drm_i915_private *dev_priv = to_i915(dev);
2729 unsigned long irqflags; 2705 unsigned long irqflags;
2730 2706
2731 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2707 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -2742,7 +2718,7 @@ static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe)
2742 2718
2743static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe) 2719static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
2744{ 2720{
2745 struct drm_i915_private *dev_priv = dev->dev_private; 2721 struct drm_i915_private *dev_priv = to_i915(dev);
2746 unsigned long irqflags; 2722 unsigned long irqflags;
2747 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2723 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2748 DE_PIPE_VBLANK(pipe); 2724 DE_PIPE_VBLANK(pipe);
@@ -2756,7 +2732,7 @@ static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
2756 2732
2757static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe) 2733static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe)
2758{ 2734{
2759 struct drm_i915_private *dev_priv = dev->dev_private; 2735 struct drm_i915_private *dev_priv = to_i915(dev);
2760 unsigned long irqflags; 2736 unsigned long irqflags;
2761 2737
2762 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2738 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -2769,7 +2745,7 @@ static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe)
2769 2745
2770static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe) 2746static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
2771{ 2747{
2772 struct drm_i915_private *dev_priv = dev->dev_private; 2748 struct drm_i915_private *dev_priv = to_i915(dev);
2773 unsigned long irqflags; 2749 unsigned long irqflags;
2774 2750
2775 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2751 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -2784,7 +2760,7 @@ static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
2784 */ 2760 */
2785static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe) 2761static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe)
2786{ 2762{
2787 struct drm_i915_private *dev_priv = dev->dev_private; 2763 struct drm_i915_private *dev_priv = to_i915(dev);
2788 unsigned long irqflags; 2764 unsigned long irqflags;
2789 2765
2790 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2766 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -2796,7 +2772,7 @@ static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe)
2796 2772
2797static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe) 2773static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
2798{ 2774{
2799 struct drm_i915_private *dev_priv = dev->dev_private; 2775 struct drm_i915_private *dev_priv = to_i915(dev);
2800 unsigned long irqflags; 2776 unsigned long irqflags;
2801 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2777 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2802 DE_PIPE_VBLANK(pipe); 2778 DE_PIPE_VBLANK(pipe);
@@ -2808,7 +2784,7 @@ static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
2808 2784
2809static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe) 2785static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe)
2810{ 2786{
2811 struct drm_i915_private *dev_priv = dev->dev_private; 2787 struct drm_i915_private *dev_priv = to_i915(dev);
2812 unsigned long irqflags; 2788 unsigned long irqflags;
2813 2789
2814 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2790 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -2819,7 +2795,7 @@ static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe)
2819 2795
2820static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe) 2796static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
2821{ 2797{
2822 struct drm_i915_private *dev_priv = dev->dev_private; 2798 struct drm_i915_private *dev_priv = to_i915(dev);
2823 unsigned long irqflags; 2799 unsigned long irqflags;
2824 2800
2825 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2801 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -2835,9 +2811,9 @@ ring_idle(struct intel_engine_cs *engine, u32 seqno)
2835} 2811}
2836 2812
2837static bool 2813static bool
2838ipehr_is_semaphore_wait(struct drm_i915_private *dev_priv, u32 ipehr) 2814ipehr_is_semaphore_wait(struct intel_engine_cs *engine, u32 ipehr)
2839{ 2815{
2840 if (INTEL_GEN(dev_priv) >= 8) { 2816 if (INTEL_GEN(engine->i915) >= 8) {
2841 return (ipehr >> 23) == 0x1c; 2817 return (ipehr >> 23) == 0x1c;
2842 } else { 2818 } else {
2843 ipehr &= ~MI_SEMAPHORE_SYNC_MASK; 2819 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
@@ -2908,7 +2884,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
2908 return NULL; 2884 return NULL;
2909 2885
2910 ipehr = I915_READ(RING_IPEHR(engine->mmio_base)); 2886 ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
2911 if (!ipehr_is_semaphore_wait(engine->i915, ipehr)) 2887 if (!ipehr_is_semaphore_wait(engine, ipehr))
2912 return NULL; 2888 return NULL;
2913 2889
2914 /* 2890 /*
@@ -2966,7 +2942,7 @@ static int semaphore_passed(struct intel_engine_cs *engine)
2966 if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES) 2942 if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
2967 return -1; 2943 return -1;
2968 2944
2969 if (i915_seqno_passed(signaller->get_seqno(signaller), seqno)) 2945 if (i915_seqno_passed(intel_engine_get_seqno(signaller), seqno))
2970 return 1; 2946 return 1;
2971 2947
2972 /* cursory check for an unkickable deadlock */ 2948 /* cursory check for an unkickable deadlock */
@@ -3078,23 +3054,21 @@ ring_stuck(struct intel_engine_cs *engine, u64 acthd)
3078 return HANGCHECK_HUNG; 3054 return HANGCHECK_HUNG;
3079} 3055}
3080 3056
3081static unsigned kick_waiters(struct intel_engine_cs *engine) 3057static unsigned long kick_waiters(struct intel_engine_cs *engine)
3082{ 3058{
3083 struct drm_i915_private *i915 = engine->i915; 3059 struct drm_i915_private *i915 = engine->i915;
3084 unsigned user_interrupts = READ_ONCE(engine->user_interrupts); 3060 unsigned long irq_count = READ_ONCE(engine->breadcrumbs.irq_wakeups);
3085 3061
3086 if (engine->hangcheck.user_interrupts == user_interrupts && 3062 if (engine->hangcheck.user_interrupts == irq_count &&
3087 !test_and_set_bit(engine->id, &i915->gpu_error.missed_irq_rings)) { 3063 !test_and_set_bit(engine->id, &i915->gpu_error.missed_irq_rings)) {
3088 if (!(i915->gpu_error.test_irq_rings & intel_engine_flag(engine))) 3064 if (!test_bit(engine->id, &i915->gpu_error.test_irq_rings))
3089 DRM_ERROR("Hangcheck timer elapsed... %s idle\n", 3065 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
3090 engine->name); 3066 engine->name);
3091 else 3067
3092 DRM_INFO("Fake missed irq on %s\n", 3068 intel_engine_enable_fake_irq(engine);
3093 engine->name);
3094 wake_up_all(&engine->irq_queue);
3095 } 3069 }
3096 3070
3097 return user_interrupts; 3071 return irq_count;
3098} 3072}
3099/* 3073/*
3100 * This is called when the chip hasn't reported back with completed 3074 * This is called when the chip hasn't reported back with completed
@@ -3110,9 +3084,8 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
3110 container_of(work, typeof(*dev_priv), 3084 container_of(work, typeof(*dev_priv),
3111 gpu_error.hangcheck_work.work); 3085 gpu_error.hangcheck_work.work);
3112 struct intel_engine_cs *engine; 3086 struct intel_engine_cs *engine;
3113 enum intel_engine_id id; 3087 unsigned int hung = 0, stuck = 0;
3114 int busy_count = 0, rings_hung = 0; 3088 int busy_count = 0;
3115 bool stuck[I915_NUM_ENGINES] = { 0 };
3116#define BUSY 1 3089#define BUSY 1
3117#define KICK 5 3090#define KICK 5
3118#define HUNG 20 3091#define HUNG 20
@@ -3121,12 +3094,8 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
3121 if (!i915.enable_hangcheck) 3094 if (!i915.enable_hangcheck)
3122 return; 3095 return;
3123 3096
3124 /* 3097 if (!READ_ONCE(dev_priv->gt.awake))
3125 * The hangcheck work is synced during runtime suspend, we don't 3098 return;
3126 * require a wakeref. TODO: instead of disabling the asserts make
3127 * sure that we hold a reference when this work is running.
3128 */
3129 DISABLE_RPM_WAKEREF_ASSERTS(dev_priv);
3130 3099
3131 /* As enabling the GPU requires fairly extensive mmio access, 3100 /* As enabling the GPU requires fairly extensive mmio access,
3132 * periodically arm the mmio checker to see if we are triggering 3101 * periodically arm the mmio checker to see if we are triggering
@@ -3134,11 +3103,11 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
3134 */ 3103 */
3135 intel_uncore_arm_unclaimed_mmio_detection(dev_priv); 3104 intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
3136 3105
3137 for_each_engine_id(engine, dev_priv, id) { 3106 for_each_engine(engine, dev_priv) {
3107 bool busy = intel_engine_has_waiter(engine);
3138 u64 acthd; 3108 u64 acthd;
3139 u32 seqno; 3109 u32 seqno;
3140 unsigned user_interrupts; 3110 unsigned user_interrupts;
3141 bool busy = true;
3142 3111
3143 semaphore_clear_deadlocks(dev_priv); 3112 semaphore_clear_deadlocks(dev_priv);
3144 3113
@@ -3153,7 +3122,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
3153 engine->irq_seqno_barrier(engine); 3122 engine->irq_seqno_barrier(engine);
3154 3123
3155 acthd = intel_ring_get_active_head(engine); 3124 acthd = intel_ring_get_active_head(engine);
3156 seqno = engine->get_seqno(engine); 3125 seqno = intel_engine_get_seqno(engine);
3157 3126
3158 /* Reset stuck interrupts between batch advances */ 3127 /* Reset stuck interrupts between batch advances */
3159 user_interrupts = 0; 3128 user_interrupts = 0;
@@ -3161,12 +3130,11 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
3161 if (engine->hangcheck.seqno == seqno) { 3130 if (engine->hangcheck.seqno == seqno) {
3162 if (ring_idle(engine, seqno)) { 3131 if (ring_idle(engine, seqno)) {
3163 engine->hangcheck.action = HANGCHECK_IDLE; 3132 engine->hangcheck.action = HANGCHECK_IDLE;
3164 if (waitqueue_active(&engine->irq_queue)) { 3133 if (busy) {
3165 /* Safeguard against driver failure */ 3134 /* Safeguard against driver failure */
3166 user_interrupts = kick_waiters(engine); 3135 user_interrupts = kick_waiters(engine);
3167 engine->hangcheck.score += BUSY; 3136 engine->hangcheck.score += BUSY;
3168 } else 3137 }
3169 busy = false;
3170 } else { 3138 } else {
3171 /* We always increment the hangcheck score 3139 /* We always increment the hangcheck score
3172 * if the ring is busy and still processing 3140 * if the ring is busy and still processing
@@ -3198,10 +3166,15 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
3198 break; 3166 break;
3199 case HANGCHECK_HUNG: 3167 case HANGCHECK_HUNG:
3200 engine->hangcheck.score += HUNG; 3168 engine->hangcheck.score += HUNG;
3201 stuck[id] = true;
3202 break; 3169 break;
3203 } 3170 }
3204 } 3171 }
3172
3173 if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
3174 hung |= intel_engine_flag(engine);
3175 if (engine->hangcheck.action != HANGCHECK_HUNG)
3176 stuck |= intel_engine_flag(engine);
3177 }
3205 } else { 3178 } else {
3206 engine->hangcheck.action = HANGCHECK_ACTIVE; 3179 engine->hangcheck.action = HANGCHECK_ACTIVE;
3207 3180
@@ -3226,48 +3199,33 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
3226 busy_count += busy; 3199 busy_count += busy;
3227 } 3200 }
3228 3201
3229 for_each_engine_id(engine, dev_priv, id) { 3202 if (hung) {
3230 if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) { 3203 char msg[80];
3231 DRM_INFO("%s on %s\n", 3204 int len;
3232 stuck[id] ? "stuck" : "no progress",
3233 engine->name);
3234 rings_hung |= intel_engine_flag(engine);
3235 }
3236 }
3237 3205
3238 if (rings_hung) { 3206 /* If some rings hung but others were still busy, only
3239 i915_handle_error(dev_priv, rings_hung, "Engine(s) hung"); 3207 * blame the hanging rings in the synopsis.
3240 goto out; 3208 */
3209 if (stuck != hung)
3210 hung &= ~stuck;
3211 len = scnprintf(msg, sizeof(msg),
3212 "%s on ", stuck == hung ? "No progress" : "Hang");
3213 for_each_engine_masked(engine, dev_priv, hung)
3214 len += scnprintf(msg + len, sizeof(msg) - len,
3215 "%s, ", engine->name);
3216 msg[len-2] = '\0';
3217
3218 return i915_handle_error(dev_priv, hung, msg);
3241 } 3219 }
3242 3220
3221 /* Reset timer in case GPU hangs without another request being added */
3243 if (busy_count) 3222 if (busy_count)
3244 /* Reset timer case chip hangs without another request
3245 * being added */
3246 i915_queue_hangcheck(dev_priv); 3223 i915_queue_hangcheck(dev_priv);
3247
3248out:
3249 ENABLE_RPM_WAKEREF_ASSERTS(dev_priv);
3250}
3251
3252void i915_queue_hangcheck(struct drm_i915_private *dev_priv)
3253{
3254 struct i915_gpu_error *e = &dev_priv->gpu_error;
3255
3256 if (!i915.enable_hangcheck)
3257 return;
3258
3259 /* Don't continually defer the hangcheck so that it is always run at
3260 * least once after work has been scheduled on any ring. Otherwise,
3261 * we will ignore a hung ring if a second ring is kept busy.
3262 */
3263
3264 queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work,
3265 round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES));
3266} 3224}
3267 3225
3268static void ibx_irq_reset(struct drm_device *dev) 3226static void ibx_irq_reset(struct drm_device *dev)
3269{ 3227{
3270 struct drm_i915_private *dev_priv = dev->dev_private; 3228 struct drm_i915_private *dev_priv = to_i915(dev);
3271 3229
3272 if (HAS_PCH_NOP(dev)) 3230 if (HAS_PCH_NOP(dev))
3273 return; 3231 return;
@@ -3288,7 +3246,7 @@ static void ibx_irq_reset(struct drm_device *dev)
3288 */ 3246 */
3289static void ibx_irq_pre_postinstall(struct drm_device *dev) 3247static void ibx_irq_pre_postinstall(struct drm_device *dev)
3290{ 3248{
3291 struct drm_i915_private *dev_priv = dev->dev_private; 3249 struct drm_i915_private *dev_priv = to_i915(dev);
3292 3250
3293 if (HAS_PCH_NOP(dev)) 3251 if (HAS_PCH_NOP(dev))
3294 return; 3252 return;
@@ -3300,7 +3258,7 @@ static void ibx_irq_pre_postinstall(struct drm_device *dev)
3300 3258
3301static void gen5_gt_irq_reset(struct drm_device *dev) 3259static void gen5_gt_irq_reset(struct drm_device *dev)
3302{ 3260{
3303 struct drm_i915_private *dev_priv = dev->dev_private; 3261 struct drm_i915_private *dev_priv = to_i915(dev);
3304 3262
3305 GEN5_IRQ_RESET(GT); 3263 GEN5_IRQ_RESET(GT);
3306 if (INTEL_INFO(dev)->gen >= 6) 3264 if (INTEL_INFO(dev)->gen >= 6)
@@ -3360,7 +3318,7 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3360*/ 3318*/
3361static void ironlake_irq_reset(struct drm_device *dev) 3319static void ironlake_irq_reset(struct drm_device *dev)
3362{ 3320{
3363 struct drm_i915_private *dev_priv = dev->dev_private; 3321 struct drm_i915_private *dev_priv = to_i915(dev);
3364 3322
3365 I915_WRITE(HWSTAM, 0xffffffff); 3323 I915_WRITE(HWSTAM, 0xffffffff);
3366 3324
@@ -3375,7 +3333,7 @@ static void ironlake_irq_reset(struct drm_device *dev)
3375 3333
3376static void valleyview_irq_preinstall(struct drm_device *dev) 3334static void valleyview_irq_preinstall(struct drm_device *dev)
3377{ 3335{
3378 struct drm_i915_private *dev_priv = dev->dev_private; 3336 struct drm_i915_private *dev_priv = to_i915(dev);
3379 3337
3380 I915_WRITE(VLV_MASTER_IER, 0); 3338 I915_WRITE(VLV_MASTER_IER, 0);
3381 POSTING_READ(VLV_MASTER_IER); 3339 POSTING_READ(VLV_MASTER_IER);
@@ -3398,7 +3356,7 @@ static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3398 3356
3399static void gen8_irq_reset(struct drm_device *dev) 3357static void gen8_irq_reset(struct drm_device *dev)
3400{ 3358{
3401 struct drm_i915_private *dev_priv = dev->dev_private; 3359 struct drm_i915_private *dev_priv = to_i915(dev);
3402 int pipe; 3360 int pipe;
3403 3361
3404 I915_WRITE(GEN8_MASTER_IRQ, 0); 3362 I915_WRITE(GEN8_MASTER_IRQ, 0);
@@ -3444,12 +3402,12 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
3444 spin_unlock_irq(&dev_priv->irq_lock); 3402 spin_unlock_irq(&dev_priv->irq_lock);
3445 3403
3446 /* make sure we're done processing display irqs */ 3404 /* make sure we're done processing display irqs */
3447 synchronize_irq(dev_priv->dev->irq); 3405 synchronize_irq(dev_priv->drm.irq);
3448} 3406}
3449 3407
3450static void cherryview_irq_preinstall(struct drm_device *dev) 3408static void cherryview_irq_preinstall(struct drm_device *dev)
3451{ 3409{
3452 struct drm_i915_private *dev_priv = dev->dev_private; 3410 struct drm_i915_private *dev_priv = to_i915(dev);
3453 3411
3454 I915_WRITE(GEN8_MASTER_IRQ, 0); 3412 I915_WRITE(GEN8_MASTER_IRQ, 0);
3455 POSTING_READ(GEN8_MASTER_IRQ); 3413 POSTING_READ(GEN8_MASTER_IRQ);
@@ -3470,7 +3428,7 @@ static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
3470 struct intel_encoder *encoder; 3428 struct intel_encoder *encoder;
3471 u32 enabled_irqs = 0; 3429 u32 enabled_irqs = 0;
3472 3430
3473 for_each_intel_encoder(dev_priv->dev, encoder) 3431 for_each_intel_encoder(&dev_priv->drm, encoder)
3474 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED) 3432 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
3475 enabled_irqs |= hpd[encoder->hpd_pin]; 3433 enabled_irqs |= hpd[encoder->hpd_pin];
3476 3434
@@ -3601,7 +3559,7 @@ static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3601 3559
3602static void ibx_irq_postinstall(struct drm_device *dev) 3560static void ibx_irq_postinstall(struct drm_device *dev)
3603{ 3561{
3604 struct drm_i915_private *dev_priv = dev->dev_private; 3562 struct drm_i915_private *dev_priv = to_i915(dev);
3605 u32 mask; 3563 u32 mask;
3606 3564
3607 if (HAS_PCH_NOP(dev)) 3565 if (HAS_PCH_NOP(dev))
@@ -3618,7 +3576,7 @@ static void ibx_irq_postinstall(struct drm_device *dev)
3618 3576
3619static void gen5_gt_irq_postinstall(struct drm_device *dev) 3577static void gen5_gt_irq_postinstall(struct drm_device *dev)
3620{ 3578{
3621 struct drm_i915_private *dev_priv = dev->dev_private; 3579 struct drm_i915_private *dev_priv = to_i915(dev);
3622 u32 pm_irqs, gt_irqs; 3580 u32 pm_irqs, gt_irqs;
3623 3581
3624 pm_irqs = gt_irqs = 0; 3582 pm_irqs = gt_irqs = 0;
@@ -3632,8 +3590,7 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
3632 3590
3633 gt_irqs |= GT_RENDER_USER_INTERRUPT; 3591 gt_irqs |= GT_RENDER_USER_INTERRUPT;
3634 if (IS_GEN5(dev)) { 3592 if (IS_GEN5(dev)) {
3635 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT | 3593 gt_irqs |= ILK_BSD_USER_INTERRUPT;
3636 ILK_BSD_USER_INTERRUPT;
3637 } else { 3594 } else {
3638 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 3595 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3639 } 3596 }
@@ -3655,7 +3612,7 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
3655 3612
3656static int ironlake_irq_postinstall(struct drm_device *dev) 3613static int ironlake_irq_postinstall(struct drm_device *dev)
3657{ 3614{
3658 struct drm_i915_private *dev_priv = dev->dev_private; 3615 struct drm_i915_private *dev_priv = to_i915(dev);
3659 u32 display_mask, extra_mask; 3616 u32 display_mask, extra_mask;
3660 3617
3661 if (INTEL_INFO(dev)->gen >= 7) { 3618 if (INTEL_INFO(dev)->gen >= 7) {
@@ -3734,7 +3691,7 @@ void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3734 3691
3735static int valleyview_irq_postinstall(struct drm_device *dev) 3692static int valleyview_irq_postinstall(struct drm_device *dev)
3736{ 3693{
3737 struct drm_i915_private *dev_priv = dev->dev_private; 3694 struct drm_i915_private *dev_priv = to_i915(dev);
3738 3695
3739 gen5_gt_irq_postinstall(dev); 3696 gen5_gt_irq_postinstall(dev);
3740 3697
@@ -3827,7 +3784,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3827 3784
3828static int gen8_irq_postinstall(struct drm_device *dev) 3785static int gen8_irq_postinstall(struct drm_device *dev)
3829{ 3786{
3830 struct drm_i915_private *dev_priv = dev->dev_private; 3787 struct drm_i915_private *dev_priv = to_i915(dev);
3831 3788
3832 if (HAS_PCH_SPLIT(dev)) 3789 if (HAS_PCH_SPLIT(dev))
3833 ibx_irq_pre_postinstall(dev); 3790 ibx_irq_pre_postinstall(dev);
@@ -3846,7 +3803,7 @@ static int gen8_irq_postinstall(struct drm_device *dev)
3846 3803
3847static int cherryview_irq_postinstall(struct drm_device *dev) 3804static int cherryview_irq_postinstall(struct drm_device *dev)
3848{ 3805{
3849 struct drm_i915_private *dev_priv = dev->dev_private; 3806 struct drm_i915_private *dev_priv = to_i915(dev);
3850 3807
3851 gen8_gt_irq_postinstall(dev_priv); 3808 gen8_gt_irq_postinstall(dev_priv);
3852 3809
@@ -3863,7 +3820,7 @@ static int cherryview_irq_postinstall(struct drm_device *dev)
3863 3820
3864static void gen8_irq_uninstall(struct drm_device *dev) 3821static void gen8_irq_uninstall(struct drm_device *dev)
3865{ 3822{
3866 struct drm_i915_private *dev_priv = dev->dev_private; 3823 struct drm_i915_private *dev_priv = to_i915(dev);
3867 3824
3868 if (!dev_priv) 3825 if (!dev_priv)
3869 return; 3826 return;
@@ -3873,7 +3830,7 @@ static void gen8_irq_uninstall(struct drm_device *dev)
3873 3830
3874static void valleyview_irq_uninstall(struct drm_device *dev) 3831static void valleyview_irq_uninstall(struct drm_device *dev)
3875{ 3832{
3876 struct drm_i915_private *dev_priv = dev->dev_private; 3833 struct drm_i915_private *dev_priv = to_i915(dev);
3877 3834
3878 if (!dev_priv) 3835 if (!dev_priv)
3879 return; 3836 return;
@@ -3893,7 +3850,7 @@ static void valleyview_irq_uninstall(struct drm_device *dev)
3893 3850
3894static void cherryview_irq_uninstall(struct drm_device *dev) 3851static void cherryview_irq_uninstall(struct drm_device *dev)
3895{ 3852{
3896 struct drm_i915_private *dev_priv = dev->dev_private; 3853 struct drm_i915_private *dev_priv = to_i915(dev);
3897 3854
3898 if (!dev_priv) 3855 if (!dev_priv)
3899 return; 3856 return;
@@ -3913,7 +3870,7 @@ static void cherryview_irq_uninstall(struct drm_device *dev)
3913 3870
3914static void ironlake_irq_uninstall(struct drm_device *dev) 3871static void ironlake_irq_uninstall(struct drm_device *dev)
3915{ 3872{
3916 struct drm_i915_private *dev_priv = dev->dev_private; 3873 struct drm_i915_private *dev_priv = to_i915(dev);
3917 3874
3918 if (!dev_priv) 3875 if (!dev_priv)
3919 return; 3876 return;
@@ -3923,7 +3880,7 @@ static void ironlake_irq_uninstall(struct drm_device *dev)
3923 3880
3924static void i8xx_irq_preinstall(struct drm_device * dev) 3881static void i8xx_irq_preinstall(struct drm_device * dev)
3925{ 3882{
3926 struct drm_i915_private *dev_priv = dev->dev_private; 3883 struct drm_i915_private *dev_priv = to_i915(dev);
3927 int pipe; 3884 int pipe;
3928 3885
3929 for_each_pipe(dev_priv, pipe) 3886 for_each_pipe(dev_priv, pipe)
@@ -3935,7 +3892,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
3935 3892
3936static int i8xx_irq_postinstall(struct drm_device *dev) 3893static int i8xx_irq_postinstall(struct drm_device *dev)
3937{ 3894{
3938 struct drm_i915_private *dev_priv = dev->dev_private; 3895 struct drm_i915_private *dev_priv = to_i915(dev);
3939 3896
3940 I915_WRITE16(EMR, 3897 I915_WRITE16(EMR,
3941 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3898 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
@@ -3998,7 +3955,7 @@ check_page_flip:
3998static irqreturn_t i8xx_irq_handler(int irq, void *arg) 3955static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3999{ 3956{
4000 struct drm_device *dev = arg; 3957 struct drm_device *dev = arg;
4001 struct drm_i915_private *dev_priv = dev->dev_private; 3958 struct drm_i915_private *dev_priv = to_i915(dev);
4002 u16 iir, new_iir; 3959 u16 iir, new_iir;
4003 u32 pipe_stats[2]; 3960 u32 pipe_stats[2];
4004 int pipe; 3961 int pipe;
@@ -4075,7 +4032,7 @@ out:
4075 4032
4076static void i8xx_irq_uninstall(struct drm_device * dev) 4033static void i8xx_irq_uninstall(struct drm_device * dev)
4077{ 4034{
4078 struct drm_i915_private *dev_priv = dev->dev_private; 4035 struct drm_i915_private *dev_priv = to_i915(dev);
4079 int pipe; 4036 int pipe;
4080 4037
4081 for_each_pipe(dev_priv, pipe) { 4038 for_each_pipe(dev_priv, pipe) {
@@ -4090,7 +4047,7 @@ static void i8xx_irq_uninstall(struct drm_device * dev)
4090 4047
4091static void i915_irq_preinstall(struct drm_device * dev) 4048static void i915_irq_preinstall(struct drm_device * dev)
4092{ 4049{
4093 struct drm_i915_private *dev_priv = dev->dev_private; 4050 struct drm_i915_private *dev_priv = to_i915(dev);
4094 int pipe; 4051 int pipe;
4095 4052
4096 if (I915_HAS_HOTPLUG(dev)) { 4053 if (I915_HAS_HOTPLUG(dev)) {
@@ -4108,7 +4065,7 @@ static void i915_irq_preinstall(struct drm_device * dev)
4108 4065
4109static int i915_irq_postinstall(struct drm_device *dev) 4066static int i915_irq_postinstall(struct drm_device *dev)
4110{ 4067{
4111 struct drm_i915_private *dev_priv = dev->dev_private; 4068 struct drm_i915_private *dev_priv = to_i915(dev);
4112 u32 enable_mask; 4069 u32 enable_mask;
4113 4070
4114 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 4071 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
@@ -4187,7 +4144,7 @@ check_page_flip:
4187static irqreturn_t i915_irq_handler(int irq, void *arg) 4144static irqreturn_t i915_irq_handler(int irq, void *arg)
4188{ 4145{
4189 struct drm_device *dev = arg; 4146 struct drm_device *dev = arg;
4190 struct drm_i915_private *dev_priv = dev->dev_private; 4147 struct drm_i915_private *dev_priv = to_i915(dev);
4191 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; 4148 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
4192 u32 flip_mask = 4149 u32 flip_mask =
4193 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4150 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
@@ -4292,7 +4249,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
4292 4249
4293static void i915_irq_uninstall(struct drm_device * dev) 4250static void i915_irq_uninstall(struct drm_device * dev)
4294{ 4251{
4295 struct drm_i915_private *dev_priv = dev->dev_private; 4252 struct drm_i915_private *dev_priv = to_i915(dev);
4296 int pipe; 4253 int pipe;
4297 4254
4298 if (I915_HAS_HOTPLUG(dev)) { 4255 if (I915_HAS_HOTPLUG(dev)) {
@@ -4314,7 +4271,7 @@ static void i915_irq_uninstall(struct drm_device * dev)
4314 4271
4315static void i965_irq_preinstall(struct drm_device * dev) 4272static void i965_irq_preinstall(struct drm_device * dev)
4316{ 4273{
4317 struct drm_i915_private *dev_priv = dev->dev_private; 4274 struct drm_i915_private *dev_priv = to_i915(dev);
4318 int pipe; 4275 int pipe;
4319 4276
4320 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4277 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
@@ -4330,7 +4287,7 @@ static void i965_irq_preinstall(struct drm_device * dev)
4330 4287
4331static int i965_irq_postinstall(struct drm_device *dev) 4288static int i965_irq_postinstall(struct drm_device *dev)
4332{ 4289{
4333 struct drm_i915_private *dev_priv = dev->dev_private; 4290 struct drm_i915_private *dev_priv = to_i915(dev);
4334 u32 enable_mask; 4291 u32 enable_mask;
4335 u32 error_mask; 4292 u32 error_mask;
4336 4293
@@ -4414,7 +4371,7 @@ static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
4414static irqreturn_t i965_irq_handler(int irq, void *arg) 4371static irqreturn_t i965_irq_handler(int irq, void *arg)
4415{ 4372{
4416 struct drm_device *dev = arg; 4373 struct drm_device *dev = arg;
4417 struct drm_i915_private *dev_priv = dev->dev_private; 4374 struct drm_i915_private *dev_priv = to_i915(dev);
4418 u32 iir, new_iir; 4375 u32 iir, new_iir;
4419 u32 pipe_stats[I915_MAX_PIPES]; 4376 u32 pipe_stats[I915_MAX_PIPES];
4420 int ret = IRQ_NONE, pipe; 4377 int ret = IRQ_NONE, pipe;
@@ -4523,7 +4480,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
4523 4480
4524static void i965_irq_uninstall(struct drm_device * dev) 4481static void i965_irq_uninstall(struct drm_device * dev)
4525{ 4482{
4526 struct drm_i915_private *dev_priv = dev->dev_private; 4483 struct drm_i915_private *dev_priv = to_i915(dev);
4527 int pipe; 4484 int pipe;
4528 4485
4529 if (!dev_priv) 4486 if (!dev_priv)
@@ -4553,7 +4510,7 @@ static void i965_irq_uninstall(struct drm_device * dev)
4553 */ 4510 */
4554void intel_irq_init(struct drm_i915_private *dev_priv) 4511void intel_irq_init(struct drm_i915_private *dev_priv)
4555{ 4512{
4556 struct drm_device *dev = dev_priv->dev; 4513 struct drm_device *dev = &dev_priv->drm;
4557 4514
4558 intel_hpd_init_work(dev_priv); 4515 intel_hpd_init_work(dev_priv);
4559 4516
@@ -4631,7 +4588,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
4631 dev->driver->disable_vblank = gen8_disable_vblank; 4588 dev->driver->disable_vblank = gen8_disable_vblank;
4632 if (IS_BROXTON(dev)) 4589 if (IS_BROXTON(dev))
4633 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; 4590 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4634 else if (HAS_PCH_SPT(dev)) 4591 else if (HAS_PCH_SPT(dev) || HAS_PCH_KBP(dev))
4635 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup; 4592 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
4636 else 4593 else
4637 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4594 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
@@ -4687,7 +4644,7 @@ int intel_irq_install(struct drm_i915_private *dev_priv)
4687 */ 4644 */
4688 dev_priv->pm.irqs_enabled = true; 4645 dev_priv->pm.irqs_enabled = true;
4689 4646
4690 return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq); 4647 return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq);
4691} 4648}
4692 4649
4693/** 4650/**
@@ -4699,7 +4656,7 @@ int intel_irq_install(struct drm_i915_private *dev_priv)
4699 */ 4656 */
4700void intel_irq_uninstall(struct drm_i915_private *dev_priv) 4657void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4701{ 4658{
4702 drm_irq_uninstall(dev_priv->dev); 4659 drm_irq_uninstall(&dev_priv->drm);
4703 intel_hpd_cancel_work(dev_priv); 4660 intel_hpd_cancel_work(dev_priv);
4704 dev_priv->pm.irqs_enabled = false; 4661 dev_priv->pm.irqs_enabled = false;
4705} 4662}
@@ -4713,9 +4670,9 @@ void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4713 */ 4670 */
4714void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) 4671void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4715{ 4672{
4716 dev_priv->dev->driver->irq_uninstall(dev_priv->dev); 4673 dev_priv->drm.driver->irq_uninstall(&dev_priv->drm);
4717 dev_priv->pm.irqs_enabled = false; 4674 dev_priv->pm.irqs_enabled = false;
4718 synchronize_irq(dev_priv->dev->irq); 4675 synchronize_irq(dev_priv->drm.irq);
4719} 4676}
4720 4677
4721/** 4678/**
@@ -4728,6 +4685,6 @@ void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4728void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) 4685void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4729{ 4686{
4730 dev_priv->pm.irqs_enabled = true; 4687 dev_priv->pm.irqs_enabled = true;
4731 dev_priv->dev->driver->irq_preinstall(dev_priv->dev); 4688 dev_priv->drm.driver->irq_preinstall(&dev_priv->drm);
4732 dev_priv->dev->driver->irq_postinstall(dev_priv->dev); 4689 dev_priv->drm.driver->irq_postinstall(&dev_priv->drm);
4733} 4690}
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 7effe68d552c..8b13bfa47fba 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -224,6 +224,6 @@ module_param_named(enable_dpcd_backlight, i915.enable_dpcd_backlight, bool, 0600
224MODULE_PARM_DESC(enable_dpcd_backlight, 224MODULE_PARM_DESC(enable_dpcd_backlight,
225 "Enable support for DPCD backlight control (default:false)"); 225 "Enable support for DPCD backlight control (default:false)");
226 226
227module_param_named(enable_gvt, i915.enable_gvt, bool, 0600); 227module_param_named(enable_gvt, i915.enable_gvt, bool, 0400);
228MODULE_PARM_DESC(enable_gvt, 228MODULE_PARM_DESC(enable_gvt,
229 "Enable support for Intel GVT-g graphics virtualization host support(default:false)"); 229 "Enable support for Intel GVT-g graphics virtualization host support(default:false)");
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
new file mode 100644
index 000000000000..949c01686a66
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -0,0 +1,503 @@
1/*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#include <linux/console.h>
26#include <linux/vgaarb.h>
27#include <linux/vga_switcheroo.h>
28
29#include "i915_drv.h"
30
31#define GEN_DEFAULT_PIPEOFFSETS \
32 .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
33 PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
34 .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
35 TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
36 .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
37
38#define GEN_CHV_PIPEOFFSETS \
39 .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
40 CHV_PIPE_C_OFFSET }, \
41 .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
42 CHV_TRANSCODER_C_OFFSET, }, \
43 .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
44 CHV_PALETTE_C_OFFSET }
45
46#define CURSOR_OFFSETS \
47 .cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }
48
49#define IVB_CURSOR_OFFSETS \
50 .cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }
51
52#define BDW_COLORS \
53 .color = { .degamma_lut_size = 512, .gamma_lut_size = 512 }
54#define CHV_COLORS \
55 .color = { .degamma_lut_size = 65, .gamma_lut_size = 257 }
56
57static const struct intel_device_info intel_i830_info = {
58 .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
59 .has_overlay = 1, .overlay_needs_physical = 1,
60 .ring_mask = RENDER_RING,
61 GEN_DEFAULT_PIPEOFFSETS,
62 CURSOR_OFFSETS,
63};
64
65static const struct intel_device_info intel_845g_info = {
66 .gen = 2, .num_pipes = 1,
67 .has_overlay = 1, .overlay_needs_physical = 1,
68 .ring_mask = RENDER_RING,
69 GEN_DEFAULT_PIPEOFFSETS,
70 CURSOR_OFFSETS,
71};
72
73static const struct intel_device_info intel_i85x_info = {
74 .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
75 .cursor_needs_physical = 1,
76 .has_overlay = 1, .overlay_needs_physical = 1,
77 .has_fbc = 1,
78 .ring_mask = RENDER_RING,
79 GEN_DEFAULT_PIPEOFFSETS,
80 CURSOR_OFFSETS,
81};
82
83static const struct intel_device_info intel_i865g_info = {
84 .gen = 2, .num_pipes = 1,
85 .has_overlay = 1, .overlay_needs_physical = 1,
86 .ring_mask = RENDER_RING,
87 GEN_DEFAULT_PIPEOFFSETS,
88 CURSOR_OFFSETS,
89};
90
91static const struct intel_device_info intel_i915g_info = {
92 .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
93 .has_overlay = 1, .overlay_needs_physical = 1,
94 .ring_mask = RENDER_RING,
95 GEN_DEFAULT_PIPEOFFSETS,
96 CURSOR_OFFSETS,
97};
98static const struct intel_device_info intel_i915gm_info = {
99 .gen = 3, .is_mobile = 1, .num_pipes = 2,
100 .cursor_needs_physical = 1,
101 .has_overlay = 1, .overlay_needs_physical = 1,
102 .supports_tv = 1,
103 .has_fbc = 1,
104 .ring_mask = RENDER_RING,
105 GEN_DEFAULT_PIPEOFFSETS,
106 CURSOR_OFFSETS,
107};
108static const struct intel_device_info intel_i945g_info = {
109 .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
110 .has_overlay = 1, .overlay_needs_physical = 1,
111 .ring_mask = RENDER_RING,
112 GEN_DEFAULT_PIPEOFFSETS,
113 CURSOR_OFFSETS,
114};
115static const struct intel_device_info intel_i945gm_info = {
116 .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
117 .has_hotplug = 1, .cursor_needs_physical = 1,
118 .has_overlay = 1, .overlay_needs_physical = 1,
119 .supports_tv = 1,
120 .has_fbc = 1,
121 .ring_mask = RENDER_RING,
122 GEN_DEFAULT_PIPEOFFSETS,
123 CURSOR_OFFSETS,
124};
125
126static const struct intel_device_info intel_i965g_info = {
127 .gen = 4, .is_broadwater = 1, .num_pipes = 2,
128 .has_hotplug = 1,
129 .has_overlay = 1,
130 .ring_mask = RENDER_RING,
131 GEN_DEFAULT_PIPEOFFSETS,
132 CURSOR_OFFSETS,
133};
134
135static const struct intel_device_info intel_i965gm_info = {
136 .gen = 4, .is_crestline = 1, .num_pipes = 2,
137 .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
138 .has_overlay = 1,
139 .supports_tv = 1,
140 .ring_mask = RENDER_RING,
141 GEN_DEFAULT_PIPEOFFSETS,
142 CURSOR_OFFSETS,
143};
144
145static const struct intel_device_info intel_g33_info = {
146 .gen = 3, .is_g33 = 1, .num_pipes = 2,
147 .need_gfx_hws = 1, .has_hotplug = 1,
148 .has_overlay = 1,
149 .ring_mask = RENDER_RING,
150 GEN_DEFAULT_PIPEOFFSETS,
151 CURSOR_OFFSETS,
152};
153
154static const struct intel_device_info intel_g45_info = {
155 .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
156 .has_pipe_cxsr = 1, .has_hotplug = 1,
157 .ring_mask = RENDER_RING | BSD_RING,
158 GEN_DEFAULT_PIPEOFFSETS,
159 CURSOR_OFFSETS,
160};
161
162static const struct intel_device_info intel_gm45_info = {
163 .gen = 4, .is_g4x = 1, .num_pipes = 2,
164 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
165 .has_pipe_cxsr = 1, .has_hotplug = 1,
166 .supports_tv = 1,
167 .ring_mask = RENDER_RING | BSD_RING,
168 GEN_DEFAULT_PIPEOFFSETS,
169 CURSOR_OFFSETS,
170};
171
172static const struct intel_device_info intel_pineview_info = {
173 .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
174 .need_gfx_hws = 1, .has_hotplug = 1,
175 .has_overlay = 1,
176 GEN_DEFAULT_PIPEOFFSETS,
177 CURSOR_OFFSETS,
178};
179
180static const struct intel_device_info intel_ironlake_d_info = {
181 .gen = 5, .num_pipes = 2,
182 .need_gfx_hws = 1, .has_hotplug = 1,
183 .ring_mask = RENDER_RING | BSD_RING,
184 GEN_DEFAULT_PIPEOFFSETS,
185 CURSOR_OFFSETS,
186};
187
188static const struct intel_device_info intel_ironlake_m_info = {
189 .gen = 5, .is_mobile = 1, .num_pipes = 2,
190 .need_gfx_hws = 1, .has_hotplug = 1,
191 .has_fbc = 1,
192 .ring_mask = RENDER_RING | BSD_RING,
193 GEN_DEFAULT_PIPEOFFSETS,
194 CURSOR_OFFSETS,
195};
196
197static const struct intel_device_info intel_sandybridge_d_info = {
198 .gen = 6, .num_pipes = 2,
199 .need_gfx_hws = 1, .has_hotplug = 1,
200 .has_fbc = 1,
201 .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
202 .has_llc = 1,
203 GEN_DEFAULT_PIPEOFFSETS,
204 CURSOR_OFFSETS,
205};
206
207static const struct intel_device_info intel_sandybridge_m_info = {
208 .gen = 6, .is_mobile = 1, .num_pipes = 2,
209 .need_gfx_hws = 1, .has_hotplug = 1,
210 .has_fbc = 1,
211 .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
212 .has_llc = 1,
213 GEN_DEFAULT_PIPEOFFSETS,
214 CURSOR_OFFSETS,
215};
216
217#define GEN7_FEATURES \
218 .gen = 7, .num_pipes = 3, \
219 .need_gfx_hws = 1, .has_hotplug = 1, \
220 .has_fbc = 1, \
221 .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
222 .has_llc = 1, \
223 GEN_DEFAULT_PIPEOFFSETS, \
224 IVB_CURSOR_OFFSETS
225
226static const struct intel_device_info intel_ivybridge_d_info = {
227 GEN7_FEATURES,
228 .is_ivybridge = 1,
229};
230
231static const struct intel_device_info intel_ivybridge_m_info = {
232 GEN7_FEATURES,
233 .is_ivybridge = 1,
234 .is_mobile = 1,
235};
236
237static const struct intel_device_info intel_ivybridge_q_info = {
238 GEN7_FEATURES,
239 .is_ivybridge = 1,
240 .num_pipes = 0, /* legal, last one wins */
241};
242
243#define VLV_FEATURES \
244 .gen = 7, .num_pipes = 2, \
245 .need_gfx_hws = 1, .has_hotplug = 1, \
246 .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
247 .display_mmio_offset = VLV_DISPLAY_BASE, \
248 GEN_DEFAULT_PIPEOFFSETS, \
249 CURSOR_OFFSETS
250
251static const struct intel_device_info intel_valleyview_m_info = {
252 VLV_FEATURES,
253 .is_valleyview = 1,
254 .is_mobile = 1,
255};
256
257static const struct intel_device_info intel_valleyview_d_info = {
258 VLV_FEATURES,
259 .is_valleyview = 1,
260};
261
262#define HSW_FEATURES \
263 GEN7_FEATURES, \
264 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
265 .has_ddi = 1, \
266 .has_fpga_dbg = 1
267
268static const struct intel_device_info intel_haswell_d_info = {
269 HSW_FEATURES,
270 .is_haswell = 1,
271};
272
273static const struct intel_device_info intel_haswell_m_info = {
274 HSW_FEATURES,
275 .is_haswell = 1,
276 .is_mobile = 1,
277};
278
279#define BDW_FEATURES \
280 HSW_FEATURES, \
281 BDW_COLORS
282
283static const struct intel_device_info intel_broadwell_d_info = {
284 BDW_FEATURES,
285 .gen = 8,
286 .is_broadwell = 1,
287};
288
289static const struct intel_device_info intel_broadwell_m_info = {
290 BDW_FEATURES,
291 .gen = 8, .is_mobile = 1,
292 .is_broadwell = 1,
293};
294
295static const struct intel_device_info intel_broadwell_gt3d_info = {
296 BDW_FEATURES,
297 .gen = 8,
298 .is_broadwell = 1,
299 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
300};
301
302static const struct intel_device_info intel_broadwell_gt3m_info = {
303 BDW_FEATURES,
304 .gen = 8, .is_mobile = 1,
305 .is_broadwell = 1,
306 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
307};
308
309static const struct intel_device_info intel_cherryview_info = {
310 .gen = 8, .num_pipes = 3,
311 .need_gfx_hws = 1, .has_hotplug = 1,
312 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
313 .is_cherryview = 1,
314 .display_mmio_offset = VLV_DISPLAY_BASE,
315 GEN_CHV_PIPEOFFSETS,
316 CURSOR_OFFSETS,
317 CHV_COLORS,
318};
319
320static const struct intel_device_info intel_skylake_info = {
321 BDW_FEATURES,
322 .is_skylake = 1,
323 .gen = 9,
324};
325
326static const struct intel_device_info intel_skylake_gt3_info = {
327 BDW_FEATURES,
328 .is_skylake = 1,
329 .gen = 9,
330 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
331};
332
333static const struct intel_device_info intel_broxton_info = {
334 .is_broxton = 1,
335 .gen = 9,
336 .need_gfx_hws = 1, .has_hotplug = 1,
337 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
338 .num_pipes = 3,
339 .has_ddi = 1,
340 .has_fpga_dbg = 1,
341 .has_fbc = 1,
342 .has_pooled_eu = 0,
343 GEN_DEFAULT_PIPEOFFSETS,
344 IVB_CURSOR_OFFSETS,
345 BDW_COLORS,
346};
347
348static const struct intel_device_info intel_kabylake_info = {
349 BDW_FEATURES,
350 .is_kabylake = 1,
351 .gen = 9,
352};
353
354static const struct intel_device_info intel_kabylake_gt3_info = {
355 BDW_FEATURES,
356 .is_kabylake = 1,
357 .gen = 9,
358 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
359};
360
361/*
362 * Make sure any device matches here are from most specific to most
363 * general. For example, since the Quanta match is based on the subsystem
364 * and subvendor IDs, we need it to come before the more general IVB
365 * PCI ID matches, otherwise we'll use the wrong info struct above.
366 */
367static const struct pci_device_id pciidlist[] = {
368 INTEL_I830_IDS(&intel_i830_info),
369 INTEL_I845G_IDS(&intel_845g_info),
370 INTEL_I85X_IDS(&intel_i85x_info),
371 INTEL_I865G_IDS(&intel_i865g_info),
372 INTEL_I915G_IDS(&intel_i915g_info),
373 INTEL_I915GM_IDS(&intel_i915gm_info),
374 INTEL_I945G_IDS(&intel_i945g_info),
375 INTEL_I945GM_IDS(&intel_i945gm_info),
376 INTEL_I965G_IDS(&intel_i965g_info),
377 INTEL_G33_IDS(&intel_g33_info),
378 INTEL_I965GM_IDS(&intel_i965gm_info),
379 INTEL_GM45_IDS(&intel_gm45_info),
380 INTEL_G45_IDS(&intel_g45_info),
381 INTEL_PINEVIEW_IDS(&intel_pineview_info),
382 INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),
383 INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),
384 INTEL_SNB_D_IDS(&intel_sandybridge_d_info),
385 INTEL_SNB_M_IDS(&intel_sandybridge_m_info),
386 INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */
387 INTEL_IVB_M_IDS(&intel_ivybridge_m_info),
388 INTEL_IVB_D_IDS(&intel_ivybridge_d_info),
389 INTEL_HSW_D_IDS(&intel_haswell_d_info),
390 INTEL_HSW_M_IDS(&intel_haswell_m_info),
391 INTEL_VLV_M_IDS(&intel_valleyview_m_info),
392 INTEL_VLV_D_IDS(&intel_valleyview_d_info),
393 INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info),
394 INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info),
395 INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info),
396 INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info),
397 INTEL_CHV_IDS(&intel_cherryview_info),
398 INTEL_SKL_GT1_IDS(&intel_skylake_info),
399 INTEL_SKL_GT2_IDS(&intel_skylake_info),
400 INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info),
401 INTEL_SKL_GT4_IDS(&intel_skylake_gt3_info),
402 INTEL_BXT_IDS(&intel_broxton_info),
403 INTEL_KBL_GT1_IDS(&intel_kabylake_info),
404 INTEL_KBL_GT2_IDS(&intel_kabylake_info),
405 INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info),
406 INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info),
407 {0, 0, 0}
408};
409MODULE_DEVICE_TABLE(pci, pciidlist);
410
411extern int i915_driver_load(struct pci_dev *pdev,
412 const struct pci_device_id *ent);
413
414static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
415{
416 struct intel_device_info *intel_info =
417 (struct intel_device_info *) ent->driver_data;
418
419 if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) {
420 DRM_INFO("This hardware requires preliminary hardware support.\n"
421 "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
422 return -ENODEV;
423 }
424
425 /* Only bind to function 0 of the device. Early generations
426 * used function 1 as a placeholder for multi-head. This causes
427 * us confusion instead, especially on the systems where both
428 * functions have the same PCI-ID!
429 */
430 if (PCI_FUNC(pdev->devfn))
431 return -ENODEV;
432
433 /*
434 * apple-gmux is needed on dual GPU MacBook Pro
435 * to probe the panel if we're the inactive GPU.
436 */
437 if (vga_switcheroo_client_probe_defer(pdev))
438 return -EPROBE_DEFER;
439
440 return i915_driver_load(pdev, ent);
441}
442
443extern void i915_driver_unload(struct drm_device *dev);
444
445static void i915_pci_remove(struct pci_dev *pdev)
446{
447 struct drm_device *dev = pci_get_drvdata(pdev);
448
449 i915_driver_unload(dev);
450 drm_dev_unref(dev);
451}
452
453extern const struct dev_pm_ops i915_pm_ops;
454
455static struct pci_driver i915_pci_driver = {
456 .name = DRIVER_NAME,
457 .id_table = pciidlist,
458 .probe = i915_pci_probe,
459 .remove = i915_pci_remove,
460 .driver.pm = &i915_pm_ops,
461};
462
463static int __init i915_init(void)
464{
465 bool use_kms = true;
466
467 /*
468 * Enable KMS by default, unless explicitly overriden by
469 * either the i915.modeset prarameter or by the
470 * vga_text_mode_force boot option.
471 */
472
473 if (i915.modeset == 0)
474 use_kms = false;
475
476 if (vgacon_text_force() && i915.modeset == -1)
477 use_kms = false;
478
479 if (!use_kms) {
480 /* Silently fail loading to not upset userspace. */
481 DRM_DEBUG_DRIVER("KMS disabled.\n");
482 return 0;
483 }
484
485 return pci_register_driver(&i915_pci_driver);
486}
487
488static void __exit i915_exit(void)
489{
490 if (!i915_pci_driver.driver.owner)
491 return;
492
493 pci_unregister_driver(&i915_pci_driver);
494}
495
496module_init(i915_init);
497module_exit(i915_exit);
498
499MODULE_AUTHOR("Tungsten Graphics, Inc.");
500MODULE_AUTHOR("Intel Corporation");
501
502MODULE_DESCRIPTION(DRIVER_DESC);
503MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index c6bfbf8d7cca..8bfde75789f6 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -7070,7 +7070,8 @@ enum {
7070#define GEN6_RPDEUC _MMIO(0xA084) 7070#define GEN6_RPDEUC _MMIO(0xA084)
7071#define GEN6_RPDEUCSW _MMIO(0xA088) 7071#define GEN6_RPDEUCSW _MMIO(0xA088)
7072#define GEN6_RC_STATE _MMIO(0xA094) 7072#define GEN6_RC_STATE _MMIO(0xA094)
7073#define RC6_STATE (1 << 18) 7073#define RC_SW_TARGET_STATE_SHIFT 16
7074#define RC_SW_TARGET_STATE_MASK (7 << RC_SW_TARGET_STATE_SHIFT)
7074#define GEN6_RC1_WAKE_RATE_LIMIT _MMIO(0xA098) 7075#define GEN6_RC1_WAKE_RATE_LIMIT _MMIO(0xA098)
7075#define GEN6_RC6_WAKE_RATE_LIMIT _MMIO(0xA09C) 7076#define GEN6_RC6_WAKE_RATE_LIMIT _MMIO(0xA09C)
7076#define GEN6_RC6pp_WAKE_RATE_LIMIT _MMIO(0xA0A0) 7077#define GEN6_RC6pp_WAKE_RATE_LIMIT _MMIO(0xA0A0)
@@ -7085,12 +7086,16 @@ enum {
7085#define GEN6_RC6pp_THRESHOLD _MMIO(0xA0C0) 7086#define GEN6_RC6pp_THRESHOLD _MMIO(0xA0C0)
7086#define GEN6_PMINTRMSK _MMIO(0xA168) 7087#define GEN6_PMINTRMSK _MMIO(0xA168)
7087#define GEN8_PMINTR_REDIRECT_TO_NON_DISP (1<<31) 7088#define GEN8_PMINTR_REDIRECT_TO_NON_DISP (1<<31)
7089#define GEN8_MISC_CTRL0 _MMIO(0xA180)
7088#define VLV_PWRDWNUPCTL _MMIO(0xA294) 7090#define VLV_PWRDWNUPCTL _MMIO(0xA294)
7089#define GEN9_MEDIA_PG_IDLE_HYSTERESIS _MMIO(0xA0C4) 7091#define GEN9_MEDIA_PG_IDLE_HYSTERESIS _MMIO(0xA0C4)
7090#define GEN9_RENDER_PG_IDLE_HYSTERESIS _MMIO(0xA0C8) 7092#define GEN9_RENDER_PG_IDLE_HYSTERESIS _MMIO(0xA0C8)
7091#define GEN9_PG_ENABLE _MMIO(0xA210) 7093#define GEN9_PG_ENABLE _MMIO(0xA210)
7092#define GEN9_RENDER_PG_ENABLE (1<<0) 7094#define GEN9_RENDER_PG_ENABLE (1<<0)
7093#define GEN9_MEDIA_PG_ENABLE (1<<1) 7095#define GEN9_MEDIA_PG_ENABLE (1<<1)
7096#define GEN8_PUSHBUS_CONTROL _MMIO(0xA248)
7097#define GEN8_PUSHBUS_ENABLE _MMIO(0xA250)
7098#define GEN8_PUSHBUS_SHIFT _MMIO(0xA25C)
7094 7099
7095#define VLV_CHICKEN_3 _MMIO(VLV_DISPLAY_BASE + 0x7040C) 7100#define VLV_CHICKEN_3 _MMIO(VLV_DISPLAY_BASE + 0x7040C)
7096#define PIXEL_OVERLAP_CNT_MASK (3 << 30) 7101#define PIXEL_OVERLAP_CNT_MASK (3 << 30)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 34e061a9ef06..5cfe4c7716b4 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -31,7 +31,7 @@
31 31
32static void i915_save_display(struct drm_device *dev) 32static void i915_save_display(struct drm_device *dev)
33{ 33{
34 struct drm_i915_private *dev_priv = dev->dev_private; 34 struct drm_i915_private *dev_priv = to_i915(dev);
35 35
36 /* Display arbitration control */ 36 /* Display arbitration control */
37 if (INTEL_INFO(dev)->gen <= 4) 37 if (INTEL_INFO(dev)->gen <= 4)
@@ -63,7 +63,7 @@ static void i915_save_display(struct drm_device *dev)
63 63
64static void i915_restore_display(struct drm_device *dev) 64static void i915_restore_display(struct drm_device *dev)
65{ 65{
66 struct drm_i915_private *dev_priv = dev->dev_private; 66 struct drm_i915_private *dev_priv = to_i915(dev);
67 u32 mask = 0xffffffff; 67 u32 mask = 0xffffffff;
68 68
69 /* Display arbitration */ 69 /* Display arbitration */
@@ -103,7 +103,7 @@ static void i915_restore_display(struct drm_device *dev)
103 103
104int i915_save_state(struct drm_device *dev) 104int i915_save_state(struct drm_device *dev)
105{ 105{
106 struct drm_i915_private *dev_priv = dev->dev_private; 106 struct drm_i915_private *dev_priv = to_i915(dev);
107 int i; 107 int i;
108 108
109 mutex_lock(&dev->struct_mutex); 109 mutex_lock(&dev->struct_mutex);
@@ -148,7 +148,7 @@ int i915_save_state(struct drm_device *dev)
148 148
149int i915_restore_state(struct drm_device *dev) 149int i915_restore_state(struct drm_device *dev)
150{ 150{
151 struct drm_i915_private *dev_priv = dev->dev_private; 151 struct drm_i915_private *dev_priv = to_i915(dev);
152 int i; 152 int i;
153 153
154 mutex_lock(&dev->struct_mutex); 154 mutex_lock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 02507bfc8def..d61829e54f93 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -38,7 +38,7 @@
38static u32 calc_residency(struct drm_device *dev, 38static u32 calc_residency(struct drm_device *dev,
39 i915_reg_t reg) 39 i915_reg_t reg)
40{ 40{
41 struct drm_i915_private *dev_priv = dev->dev_private; 41 struct drm_i915_private *dev_priv = to_i915(dev);
42 u64 raw_time; /* 32b value may overflow during fixed point math */ 42 u64 raw_time; /* 32b value may overflow during fixed point math */
43 u64 units = 128ULL, div = 100000ULL; 43 u64 units = 128ULL, div = 100000ULL;
44 u32 ret; 44 u32 ret;
@@ -166,7 +166,7 @@ i915_l3_read(struct file *filp, struct kobject *kobj,
166 struct device *dev = kobj_to_dev(kobj); 166 struct device *dev = kobj_to_dev(kobj);
167 struct drm_minor *dminor = dev_to_drm_minor(dev); 167 struct drm_minor *dminor = dev_to_drm_minor(dev);
168 struct drm_device *drm_dev = dminor->dev; 168 struct drm_device *drm_dev = dminor->dev;
169 struct drm_i915_private *dev_priv = drm_dev->dev_private; 169 struct drm_i915_private *dev_priv = to_i915(drm_dev);
170 int slice = (int)(uintptr_t)attr->private; 170 int slice = (int)(uintptr_t)attr->private;
171 int ret; 171 int ret;
172 172
@@ -202,7 +202,7 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
202 struct device *dev = kobj_to_dev(kobj); 202 struct device *dev = kobj_to_dev(kobj);
203 struct drm_minor *dminor = dev_to_drm_minor(dev); 203 struct drm_minor *dminor = dev_to_drm_minor(dev);
204 struct drm_device *drm_dev = dminor->dev; 204 struct drm_device *drm_dev = dminor->dev;
205 struct drm_i915_private *dev_priv = drm_dev->dev_private; 205 struct drm_i915_private *dev_priv = to_i915(drm_dev);
206 struct i915_gem_context *ctx; 206 struct i915_gem_context *ctx;
207 u32 *temp = NULL; /* Just here to make handling failures easy */ 207 u32 *temp = NULL; /* Just here to make handling failures easy */
208 int slice = (int)(uintptr_t)attr->private; 208 int slice = (int)(uintptr_t)attr->private;
@@ -227,13 +227,6 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
227 } 227 }
228 } 228 }
229 229
230 ret = i915_gpu_idle(drm_dev);
231 if (ret) {
232 kfree(temp);
233 mutex_unlock(&drm_dev->struct_mutex);
234 return ret;
235 }
236
237 /* TODO: Ideally we really want a GPU reset here to make sure errors 230 /* TODO: Ideally we really want a GPU reset here to make sure errors
238 * aren't propagated. Since I cannot find a stable way to reset the GPU 231 * aren't propagated. Since I cannot find a stable way to reset the GPU
239 * at this point it is left as a TODO. 232 * at this point it is left as a TODO.
@@ -275,7 +268,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
275{ 268{
276 struct drm_minor *minor = dev_to_drm_minor(kdev); 269 struct drm_minor *minor = dev_to_drm_minor(kdev);
277 struct drm_device *dev = minor->dev; 270 struct drm_device *dev = minor->dev;
278 struct drm_i915_private *dev_priv = dev->dev_private; 271 struct drm_i915_private *dev_priv = to_i915(dev);
279 int ret; 272 int ret;
280 273
281 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 274 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
@@ -309,7 +302,7 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
309{ 302{
310 struct drm_minor *minor = dev_to_drm_minor(kdev); 303 struct drm_minor *minor = dev_to_drm_minor(kdev);
311 struct drm_device *dev = minor->dev; 304 struct drm_device *dev = minor->dev;
312 struct drm_i915_private *dev_priv = dev->dev_private; 305 struct drm_i915_private *dev_priv = to_i915(dev);
313 int ret; 306 int ret;
314 307
315 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 308 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
@@ -330,7 +323,7 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
330{ 323{
331 struct drm_minor *minor = dev_to_drm_minor(kdev); 324 struct drm_minor *minor = dev_to_drm_minor(kdev);
332 struct drm_device *dev = minor->dev; 325 struct drm_device *dev = minor->dev;
333 struct drm_i915_private *dev_priv = dev->dev_private; 326 struct drm_i915_private *dev_priv = to_i915(dev);
334 327
335 return snprintf(buf, PAGE_SIZE, 328 return snprintf(buf, PAGE_SIZE,
336 "%d\n", 329 "%d\n",
@@ -341,7 +334,7 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute
341{ 334{
342 struct drm_minor *minor = dev_to_drm_minor(kdev); 335 struct drm_minor *minor = dev_to_drm_minor(kdev);
343 struct drm_device *dev = minor->dev; 336 struct drm_device *dev = minor->dev;
344 struct drm_i915_private *dev_priv = dev->dev_private; 337 struct drm_i915_private *dev_priv = to_i915(dev);
345 int ret; 338 int ret;
346 339
347 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 340 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
@@ -359,7 +352,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
359{ 352{
360 struct drm_minor *minor = dev_to_drm_minor(kdev); 353 struct drm_minor *minor = dev_to_drm_minor(kdev);
361 struct drm_device *dev = minor->dev; 354 struct drm_device *dev = minor->dev;
362 struct drm_i915_private *dev_priv = dev->dev_private; 355 struct drm_i915_private *dev_priv = to_i915(dev);
363 u32 val; 356 u32 val;
364 ssize_t ret; 357 ssize_t ret;
365 358
@@ -409,7 +402,7 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute
409{ 402{
410 struct drm_minor *minor = dev_to_drm_minor(kdev); 403 struct drm_minor *minor = dev_to_drm_minor(kdev);
411 struct drm_device *dev = minor->dev; 404 struct drm_device *dev = minor->dev;
412 struct drm_i915_private *dev_priv = dev->dev_private; 405 struct drm_i915_private *dev_priv = to_i915(dev);
413 int ret; 406 int ret;
414 407
415 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 408 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
@@ -427,7 +420,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
427{ 420{
428 struct drm_minor *minor = dev_to_drm_minor(kdev); 421 struct drm_minor *minor = dev_to_drm_minor(kdev);
429 struct drm_device *dev = minor->dev; 422 struct drm_device *dev = minor->dev;
430 struct drm_i915_private *dev_priv = dev->dev_private; 423 struct drm_i915_private *dev_priv = to_i915(dev);
431 u32 val; 424 u32 val;
432 ssize_t ret; 425 ssize_t ret;
433 426
@@ -487,7 +480,7 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr
487{ 480{
488 struct drm_minor *minor = dev_to_drm_minor(kdev); 481 struct drm_minor *minor = dev_to_drm_minor(kdev);
489 struct drm_device *dev = minor->dev; 482 struct drm_device *dev = minor->dev;
490 struct drm_i915_private *dev_priv = dev->dev_private; 483 struct drm_i915_private *dev_priv = to_i915(dev);
491 u32 val; 484 u32 val;
492 485
493 if (attr == &dev_attr_gt_RP0_freq_mhz) 486 if (attr == &dev_attr_gt_RP0_freq_mhz)
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 6768db032f84..534154e05fbe 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -118,7 +118,7 @@ TRACE_EVENT(i915_gem_shrink,
118 ), 118 ),
119 119
120 TP_fast_assign( 120 TP_fast_assign(
121 __entry->dev = i915->dev->primary->index; 121 __entry->dev = i915->drm.primary->index;
122 __entry->target = target; 122 __entry->target = target;
123 __entry->flags = flags; 123 __entry->flags = flags;
124 ), 124 ),
@@ -462,7 +462,7 @@ TRACE_EVENT(i915_gem_ring_sync_to,
462 ), 462 ),
463 463
464 TP_fast_assign( 464 TP_fast_assign(
465 __entry->dev = from->i915->dev->primary->index; 465 __entry->dev = from->i915->drm.primary->index;
466 __entry->sync_from = from->id; 466 __entry->sync_from = from->id;
467 __entry->sync_to = to_req->engine->id; 467 __entry->sync_to = to_req->engine->id;
468 __entry->seqno = i915_gem_request_get_seqno(req); 468 __entry->seqno = i915_gem_request_get_seqno(req);
@@ -486,11 +486,11 @@ TRACE_EVENT(i915_gem_ring_dispatch,
486 ), 486 ),
487 487
488 TP_fast_assign( 488 TP_fast_assign(
489 __entry->dev = req->i915->dev->primary->index; 489 __entry->dev = req->i915->drm.primary->index;
490 __entry->ring = req->engine->id; 490 __entry->ring = req->engine->id;
491 __entry->seqno = req->seqno; 491 __entry->seqno = req->seqno;
492 __entry->flags = flags; 492 __entry->flags = flags;
493 i915_trace_irq_get(req->engine, req); 493 intel_engine_enable_signaling(req);
494 ), 494 ),
495 495
496 TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x", 496 TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
@@ -509,7 +509,7 @@ TRACE_EVENT(i915_gem_ring_flush,
509 ), 509 ),
510 510
511 TP_fast_assign( 511 TP_fast_assign(
512 __entry->dev = req->i915->dev->primary->index; 512 __entry->dev = req->i915->drm.primary->index;
513 __entry->ring = req->engine->id; 513 __entry->ring = req->engine->id;
514 __entry->invalidate = invalidate; 514 __entry->invalidate = invalidate;
515 __entry->flush = flush; 515 __entry->flush = flush;
@@ -531,7 +531,7 @@ DECLARE_EVENT_CLASS(i915_gem_request,
531 ), 531 ),
532 532
533 TP_fast_assign( 533 TP_fast_assign(
534 __entry->dev = req->i915->dev->primary->index; 534 __entry->dev = req->i915->drm.primary->index;
535 __entry->ring = req->engine->id; 535 __entry->ring = req->engine->id;
536 __entry->seqno = req->seqno; 536 __entry->seqno = req->seqno;
537 ), 537 ),
@@ -556,9 +556,9 @@ TRACE_EVENT(i915_gem_request_notify,
556 ), 556 ),
557 557
558 TP_fast_assign( 558 TP_fast_assign(
559 __entry->dev = engine->i915->dev->primary->index; 559 __entry->dev = engine->i915->drm.primary->index;
560 __entry->ring = engine->id; 560 __entry->ring = engine->id;
561 __entry->seqno = engine->get_seqno(engine); 561 __entry->seqno = intel_engine_get_seqno(engine);
562 ), 562 ),
563 563
564 TP_printk("dev=%u, ring=%u, seqno=%u", 564 TP_printk("dev=%u, ring=%u, seqno=%u",
@@ -593,11 +593,11 @@ TRACE_EVENT(i915_gem_request_wait_begin,
593 * less desirable. 593 * less desirable.
594 */ 594 */
595 TP_fast_assign( 595 TP_fast_assign(
596 __entry->dev = req->i915->dev->primary->index; 596 __entry->dev = req->i915->drm.primary->index;
597 __entry->ring = req->engine->id; 597 __entry->ring = req->engine->id;
598 __entry->seqno = req->seqno; 598 __entry->seqno = req->seqno;
599 __entry->blocking = 599 __entry->blocking =
600 mutex_is_locked(&req->i915->dev->struct_mutex); 600 mutex_is_locked(&req->i915->drm.struct_mutex);
601 ), 601 ),
602 602
603 TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s", 603 TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s",
@@ -746,7 +746,7 @@ DECLARE_EVENT_CLASS(i915_context,
746 TP_fast_assign( 746 TP_fast_assign(
747 __entry->ctx = ctx; 747 __entry->ctx = ctx;
748 __entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL; 748 __entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL;
749 __entry->dev = ctx->i915->dev->primary->index; 749 __entry->dev = ctx->i915->drm.primary->index;
750 ), 750 ),
751 751
752 TP_printk("dev=%u, ctx=%p, ctx_vm=%p", 752 TP_printk("dev=%u, ctx=%p, ctx_vm=%p",
@@ -786,7 +786,7 @@ TRACE_EVENT(switch_mm,
786 __entry->ring = engine->id; 786 __entry->ring = engine->id;
787 __entry->to = to; 787 __entry->to = to;
788 __entry->vm = to->ppgtt? &to->ppgtt->base : NULL; 788 __entry->vm = to->ppgtt? &to->ppgtt->base : NULL;
789 __entry->dev = engine->i915->dev->primary->index; 789 __entry->dev = engine->i915->drm.primary->index;
790 ), 790 ),
791 791
792 TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p", 792 TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p",
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index b9329c2a670a..6700a7be7f78 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -154,7 +154,7 @@ static bool audio_rate_need_prog(struct intel_crtc *crtc,
154{ 154{
155 if (((mode->clock == TMDS_297M) || 155 if (((mode->clock == TMDS_297M) ||
156 (mode->clock == TMDS_296M)) && 156 (mode->clock == TMDS_296M)) &&
157 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) 157 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI))
158 return true; 158 return true;
159 else 159 else
160 return false; 160 return false;
@@ -165,7 +165,7 @@ static bool intel_eld_uptodate(struct drm_connector *connector,
165 i915_reg_t reg_elda, uint32_t bits_elda, 165 i915_reg_t reg_elda, uint32_t bits_elda,
166 i915_reg_t reg_edid) 166 i915_reg_t reg_edid)
167{ 167{
168 struct drm_i915_private *dev_priv = connector->dev->dev_private; 168 struct drm_i915_private *dev_priv = to_i915(connector->dev);
169 uint8_t *eld = connector->eld; 169 uint8_t *eld = connector->eld;
170 uint32_t tmp; 170 uint32_t tmp;
171 int i; 171 int i;
@@ -189,7 +189,7 @@ static bool intel_eld_uptodate(struct drm_connector *connector,
189 189
190static void g4x_audio_codec_disable(struct intel_encoder *encoder) 190static void g4x_audio_codec_disable(struct intel_encoder *encoder)
191{ 191{
192 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 192 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
193 uint32_t eldv, tmp; 193 uint32_t eldv, tmp;
194 194
195 DRM_DEBUG_KMS("Disable audio codec\n"); 195 DRM_DEBUG_KMS("Disable audio codec\n");
@@ -210,7 +210,7 @@ static void g4x_audio_codec_enable(struct drm_connector *connector,
210 struct intel_encoder *encoder, 210 struct intel_encoder *encoder,
211 const struct drm_display_mode *adjusted_mode) 211 const struct drm_display_mode *adjusted_mode)
212{ 212{
213 struct drm_i915_private *dev_priv = connector->dev->dev_private; 213 struct drm_i915_private *dev_priv = to_i915(connector->dev);
214 uint8_t *eld = connector->eld; 214 uint8_t *eld = connector->eld;
215 uint32_t eldv; 215 uint32_t eldv;
216 uint32_t tmp; 216 uint32_t tmp;
@@ -247,7 +247,7 @@ static void g4x_audio_codec_enable(struct drm_connector *connector,
247 247
248static void hsw_audio_codec_disable(struct intel_encoder *encoder) 248static void hsw_audio_codec_disable(struct intel_encoder *encoder)
249{ 249{
250 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 250 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
251 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); 251 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
252 enum pipe pipe = intel_crtc->pipe; 252 enum pipe pipe = intel_crtc->pipe;
253 uint32_t tmp; 253 uint32_t tmp;
@@ -262,7 +262,7 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder)
262 tmp |= AUD_CONFIG_N_PROG_ENABLE; 262 tmp |= AUD_CONFIG_N_PROG_ENABLE;
263 tmp &= ~AUD_CONFIG_UPPER_N_MASK; 263 tmp &= ~AUD_CONFIG_UPPER_N_MASK;
264 tmp &= ~AUD_CONFIG_LOWER_N_MASK; 264 tmp &= ~AUD_CONFIG_LOWER_N_MASK;
265 if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT)) 265 if (intel_crtc_has_dp_encoder(intel_crtc->config))
266 tmp |= AUD_CONFIG_N_VALUE_INDEX; 266 tmp |= AUD_CONFIG_N_VALUE_INDEX;
267 I915_WRITE(HSW_AUD_CFG(pipe), tmp); 267 I915_WRITE(HSW_AUD_CFG(pipe), tmp);
268 268
@@ -279,7 +279,7 @@ static void hsw_audio_codec_enable(struct drm_connector *connector,
279 struct intel_encoder *encoder, 279 struct intel_encoder *encoder,
280 const struct drm_display_mode *adjusted_mode) 280 const struct drm_display_mode *adjusted_mode)
281{ 281{
282 struct drm_i915_private *dev_priv = connector->dev->dev_private; 282 struct drm_i915_private *dev_priv = to_i915(connector->dev);
283 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); 283 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
284 enum pipe pipe = intel_crtc->pipe; 284 enum pipe pipe = intel_crtc->pipe;
285 struct i915_audio_component *acomp = dev_priv->audio_component; 285 struct i915_audio_component *acomp = dev_priv->audio_component;
@@ -328,7 +328,7 @@ static void hsw_audio_codec_enable(struct drm_connector *connector,
328 tmp = I915_READ(HSW_AUD_CFG(pipe)); 328 tmp = I915_READ(HSW_AUD_CFG(pipe));
329 tmp &= ~AUD_CONFIG_N_VALUE_INDEX; 329 tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
330 tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK; 330 tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
331 if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT)) 331 if (intel_crtc_has_dp_encoder(intel_crtc->config))
332 tmp |= AUD_CONFIG_N_VALUE_INDEX; 332 tmp |= AUD_CONFIG_N_VALUE_INDEX;
333 else 333 else
334 tmp |= audio_config_hdmi_pixel_clock(adjusted_mode); 334 tmp |= audio_config_hdmi_pixel_clock(adjusted_mode);
@@ -357,7 +357,7 @@ static void hsw_audio_codec_enable(struct drm_connector *connector,
357 357
358static void ilk_audio_codec_disable(struct intel_encoder *encoder) 358static void ilk_audio_codec_disable(struct intel_encoder *encoder)
359{ 359{
360 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 360 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
361 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); 361 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
362 struct intel_digital_port *intel_dig_port = 362 struct intel_digital_port *intel_dig_port =
363 enc_to_dig_port(&encoder->base); 363 enc_to_dig_port(&encoder->base);
@@ -389,7 +389,7 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder)
389 tmp |= AUD_CONFIG_N_PROG_ENABLE; 389 tmp |= AUD_CONFIG_N_PROG_ENABLE;
390 tmp &= ~AUD_CONFIG_UPPER_N_MASK; 390 tmp &= ~AUD_CONFIG_UPPER_N_MASK;
391 tmp &= ~AUD_CONFIG_LOWER_N_MASK; 391 tmp &= ~AUD_CONFIG_LOWER_N_MASK;
392 if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT)) 392 if (intel_crtc_has_dp_encoder(intel_crtc->config))
393 tmp |= AUD_CONFIG_N_VALUE_INDEX; 393 tmp |= AUD_CONFIG_N_VALUE_INDEX;
394 I915_WRITE(aud_config, tmp); 394 I915_WRITE(aud_config, tmp);
395 395
@@ -405,7 +405,7 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
405 struct intel_encoder *encoder, 405 struct intel_encoder *encoder,
406 const struct drm_display_mode *adjusted_mode) 406 const struct drm_display_mode *adjusted_mode)
407{ 407{
408 struct drm_i915_private *dev_priv = connector->dev->dev_private; 408 struct drm_i915_private *dev_priv = to_i915(connector->dev);
409 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); 409 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
410 struct intel_digital_port *intel_dig_port = 410 struct intel_digital_port *intel_dig_port =
411 enc_to_dig_port(&encoder->base); 411 enc_to_dig_port(&encoder->base);
@@ -475,7 +475,7 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
475 tmp &= ~AUD_CONFIG_N_VALUE_INDEX; 475 tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
476 tmp &= ~AUD_CONFIG_N_PROG_ENABLE; 476 tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
477 tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK; 477 tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
478 if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT)) 478 if (intel_crtc_has_dp_encoder(intel_crtc->config))
479 tmp |= AUD_CONFIG_N_VALUE_INDEX; 479 tmp |= AUD_CONFIG_N_VALUE_INDEX;
480 else 480 else
481 tmp |= audio_config_hdmi_pixel_clock(adjusted_mode); 481 tmp |= audio_config_hdmi_pixel_clock(adjusted_mode);
@@ -496,7 +496,7 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
496 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; 496 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
497 struct drm_connector *connector; 497 struct drm_connector *connector;
498 struct drm_device *dev = encoder->dev; 498 struct drm_device *dev = encoder->dev;
499 struct drm_i915_private *dev_priv = dev->dev_private; 499 struct drm_i915_private *dev_priv = to_i915(dev);
500 struct i915_audio_component *acomp = dev_priv->audio_component; 500 struct i915_audio_component *acomp = dev_priv->audio_component;
501 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 501 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
502 enum port port = intel_dig_port->port; 502 enum port port = intel_dig_port->port;
@@ -513,7 +513,7 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
513 513
514 /* ELD Conn_Type */ 514 /* ELD Conn_Type */
515 connector->eld[5] &= ~(3 << 2); 515 connector->eld[5] &= ~(3 << 2);
516 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) 516 if (intel_crtc_has_dp_encoder(crtc->config))
517 connector->eld[5] |= (1 << 2); 517 connector->eld[5] |= (1 << 2);
518 518
519 connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2; 519 connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
@@ -543,7 +543,7 @@ void intel_audio_codec_disable(struct intel_encoder *intel_encoder)
543{ 543{
544 struct drm_encoder *encoder = &intel_encoder->base; 544 struct drm_encoder *encoder = &intel_encoder->base;
545 struct drm_device *dev = encoder->dev; 545 struct drm_device *dev = encoder->dev;
546 struct drm_i915_private *dev_priv = dev->dev_private; 546 struct drm_i915_private *dev_priv = to_i915(dev);
547 struct i915_audio_component *acomp = dev_priv->audio_component; 547 struct i915_audio_component *acomp = dev_priv->audio_component;
548 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 548 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
549 enum port port = intel_dig_port->port; 549 enum port port = intel_dig_port->port;
@@ -749,14 +749,14 @@ static int i915_audio_component_bind(struct device *i915_dev,
749 if (WARN_ON(acomp->ops || acomp->dev)) 749 if (WARN_ON(acomp->ops || acomp->dev))
750 return -EEXIST; 750 return -EEXIST;
751 751
752 drm_modeset_lock_all(dev_priv->dev); 752 drm_modeset_lock_all(&dev_priv->drm);
753 acomp->ops = &i915_audio_component_ops; 753 acomp->ops = &i915_audio_component_ops;
754 acomp->dev = i915_dev; 754 acomp->dev = i915_dev;
755 BUILD_BUG_ON(MAX_PORTS != I915_MAX_PORTS); 755 BUILD_BUG_ON(MAX_PORTS != I915_MAX_PORTS);
756 for (i = 0; i < ARRAY_SIZE(acomp->aud_sample_rate); i++) 756 for (i = 0; i < ARRAY_SIZE(acomp->aud_sample_rate); i++)
757 acomp->aud_sample_rate[i] = 0; 757 acomp->aud_sample_rate[i] = 0;
758 dev_priv->audio_component = acomp; 758 dev_priv->audio_component = acomp;
759 drm_modeset_unlock_all(dev_priv->dev); 759 drm_modeset_unlock_all(&dev_priv->drm);
760 760
761 return 0; 761 return 0;
762} 762}
@@ -767,11 +767,11 @@ static void i915_audio_component_unbind(struct device *i915_dev,
767 struct i915_audio_component *acomp = data; 767 struct i915_audio_component *acomp = data;
768 struct drm_i915_private *dev_priv = dev_to_i915(i915_dev); 768 struct drm_i915_private *dev_priv = dev_to_i915(i915_dev);
769 769
770 drm_modeset_lock_all(dev_priv->dev); 770 drm_modeset_lock_all(&dev_priv->drm);
771 acomp->ops = NULL; 771 acomp->ops = NULL;
772 acomp->dev = NULL; 772 acomp->dev = NULL;
773 dev_priv->audio_component = NULL; 773 dev_priv->audio_component = NULL;
774 drm_modeset_unlock_all(dev_priv->dev); 774 drm_modeset_unlock_all(&dev_priv->drm);
775} 775}
776 776
777static const struct component_ops i915_audio_component_bind_ops = { 777static const struct component_ops i915_audio_component_bind_ops = {
@@ -799,7 +799,7 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv)
799{ 799{
800 int ret; 800 int ret;
801 801
802 ret = component_add(dev_priv->dev->dev, &i915_audio_component_bind_ops); 802 ret = component_add(dev_priv->drm.dev, &i915_audio_component_bind_ops);
803 if (ret < 0) { 803 if (ret < 0) {
804 DRM_ERROR("failed to add audio component (%d)\n", ret); 804 DRM_ERROR("failed to add audio component (%d)\n", ret);
805 /* continue with reduced functionality */ 805 /* continue with reduced functionality */
@@ -821,6 +821,6 @@ void i915_audio_component_cleanup(struct drm_i915_private *dev_priv)
821 if (!dev_priv->audio_component_registered) 821 if (!dev_priv->audio_component_registered)
822 return; 822 return;
823 823
824 component_del(dev_priv->dev->dev, &i915_audio_component_bind_ops); 824 component_del(dev_priv->drm.dev, &i915_audio_component_bind_ops);
825 dev_priv->audio_component_registered = false; 825 dev_priv->audio_component_registered = false;
826} 826}
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index da5ed4a850b9..c6e69e4cfa83 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1426,7 +1426,7 @@ static const struct vbt_header *find_vbt(void __iomem *bios, size_t size)
1426int 1426int
1427intel_bios_init(struct drm_i915_private *dev_priv) 1427intel_bios_init(struct drm_i915_private *dev_priv)
1428{ 1428{
1429 struct pci_dev *pdev = dev_priv->dev->pdev; 1429 struct pci_dev *pdev = dev_priv->drm.pdev;
1430 const struct vbt_header *vbt = dev_priv->opregion.vbt; 1430 const struct vbt_header *vbt = dev_priv->opregion.vbt;
1431 const struct bdb_header *bdb; 1431 const struct bdb_header *bdb;
1432 u8 __iomem *bios = NULL; 1432 u8 __iomem *bios = NULL;
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
new file mode 100644
index 000000000000..d89b2c963618
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -0,0 +1,586 @@
1/*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#include <linux/kthread.h>
26
27#include "i915_drv.h"
28
29static void intel_breadcrumbs_fake_irq(unsigned long data)
30{
31 struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
32
33 /*
34 * The timer persists in case we cannot enable interrupts,
35 * or if we have previously seen seqno/interrupt incoherency
36 * ("missed interrupt" syndrome). Here the worker will wake up
37 * every jiffie in order to kick the oldest waiter to do the
38 * coherent seqno check.
39 */
40 rcu_read_lock();
41 if (intel_engine_wakeup(engine))
42 mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1);
43 rcu_read_unlock();
44}
45
46static void irq_enable(struct intel_engine_cs *engine)
47{
48 /* Enabling the IRQ may miss the generation of the interrupt, but
49 * we still need to force the barrier before reading the seqno,
50 * just in case.
51 */
52 engine->breadcrumbs.irq_posted = true;
53
54 spin_lock_irq(&engine->i915->irq_lock);
55 engine->irq_enable(engine);
56 spin_unlock_irq(&engine->i915->irq_lock);
57}
58
59static void irq_disable(struct intel_engine_cs *engine)
60{
61 spin_lock_irq(&engine->i915->irq_lock);
62 engine->irq_disable(engine);
63 spin_unlock_irq(&engine->i915->irq_lock);
64
65 engine->breadcrumbs.irq_posted = false;
66}
67
68static void __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b)
69{
70 struct intel_engine_cs *engine =
71 container_of(b, struct intel_engine_cs, breadcrumbs);
72 struct drm_i915_private *i915 = engine->i915;
73
74 assert_spin_locked(&b->lock);
75 if (b->rpm_wakelock)
76 return;
77
78 /* Since we are waiting on a request, the GPU should be busy
79 * and should have its own rpm reference. For completeness,
80 * record an rpm reference for ourselves to cover the
81 * interrupt we unmask.
82 */
83 intel_runtime_pm_get_noresume(i915);
84 b->rpm_wakelock = true;
85
86 /* No interrupts? Kick the waiter every jiffie! */
87 if (intel_irqs_enabled(i915)) {
88 if (!test_bit(engine->id, &i915->gpu_error.test_irq_rings))
89 irq_enable(engine);
90 b->irq_enabled = true;
91 }
92
93 if (!b->irq_enabled ||
94 test_bit(engine->id, &i915->gpu_error.missed_irq_rings))
95 mod_timer(&b->fake_irq, jiffies + 1);
96}
97
98static void __intel_breadcrumbs_disable_irq(struct intel_breadcrumbs *b)
99{
100 struct intel_engine_cs *engine =
101 container_of(b, struct intel_engine_cs, breadcrumbs);
102
103 assert_spin_locked(&b->lock);
104 if (!b->rpm_wakelock)
105 return;
106
107 if (b->irq_enabled) {
108 irq_disable(engine);
109 b->irq_enabled = false;
110 }
111
112 intel_runtime_pm_put(engine->i915);
113 b->rpm_wakelock = false;
114}
115
116static inline struct intel_wait *to_wait(struct rb_node *node)
117{
118 return container_of(node, struct intel_wait, node);
119}
120
121static inline void __intel_breadcrumbs_finish(struct intel_breadcrumbs *b,
122 struct intel_wait *wait)
123{
124 assert_spin_locked(&b->lock);
125
126 /* This request is completed, so remove it from the tree, mark it as
127 * complete, and *then* wake up the associated task.
128 */
129 rb_erase(&wait->node, &b->waiters);
130 RB_CLEAR_NODE(&wait->node);
131
132 wake_up_process(wait->tsk); /* implicit smp_wmb() */
133}
134
135static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
136 struct intel_wait *wait)
137{
138 struct intel_breadcrumbs *b = &engine->breadcrumbs;
139 struct rb_node **p, *parent, *completed;
140 bool first;
141 u32 seqno;
142
143 /* Insert the request into the retirement ordered list
144 * of waiters by walking the rbtree. If we are the oldest
145 * seqno in the tree (the first to be retired), then
146 * set ourselves as the bottom-half.
147 *
148 * As we descend the tree, prune completed branches since we hold the
149 * spinlock we know that the first_waiter must be delayed and can
150 * reduce some of the sequential wake up latency if we take action
151 * ourselves and wake up the completed tasks in parallel. Also, by
152 * removing stale elements in the tree, we may be able to reduce the
153 * ping-pong between the old bottom-half and ourselves as first-waiter.
154 */
155 first = true;
156 parent = NULL;
157 completed = NULL;
158 seqno = intel_engine_get_seqno(engine);
159
160 /* If the request completed before we managed to grab the spinlock,
161 * return now before adding ourselves to the rbtree. We let the
162 * current bottom-half handle any pending wakeups and instead
163 * try and get out of the way quickly.
164 */
165 if (i915_seqno_passed(seqno, wait->seqno)) {
166 RB_CLEAR_NODE(&wait->node);
167 return first;
168 }
169
170 p = &b->waiters.rb_node;
171 while (*p) {
172 parent = *p;
173 if (wait->seqno == to_wait(parent)->seqno) {
174 /* We have multiple waiters on the same seqno, select
175 * the highest priority task (that with the smallest
176 * task->prio) to serve as the bottom-half for this
177 * group.
178 */
179 if (wait->tsk->prio > to_wait(parent)->tsk->prio) {
180 p = &parent->rb_right;
181 first = false;
182 } else {
183 p = &parent->rb_left;
184 }
185 } else if (i915_seqno_passed(wait->seqno,
186 to_wait(parent)->seqno)) {
187 p = &parent->rb_right;
188 if (i915_seqno_passed(seqno, to_wait(parent)->seqno))
189 completed = parent;
190 else
191 first = false;
192 } else {
193 p = &parent->rb_left;
194 }
195 }
196 rb_link_node(&wait->node, parent, p);
197 rb_insert_color(&wait->node, &b->waiters);
198 GEM_BUG_ON(!first && !b->irq_seqno_bh);
199
200 if (completed) {
201 struct rb_node *next = rb_next(completed);
202
203 GEM_BUG_ON(!next && !first);
204 if (next && next != &wait->node) {
205 GEM_BUG_ON(first);
206 b->first_wait = to_wait(next);
207 smp_store_mb(b->irq_seqno_bh, b->first_wait->tsk);
208 /* As there is a delay between reading the current
209 * seqno, processing the completed tasks and selecting
210 * the next waiter, we may have missed the interrupt
211 * and so need for the next bottom-half to wakeup.
212 *
213 * Also as we enable the IRQ, we may miss the
214 * interrupt for that seqno, so we have to wake up
215 * the next bottom-half in order to do a coherent check
216 * in case the seqno passed.
217 */
218 __intel_breadcrumbs_enable_irq(b);
219 if (READ_ONCE(b->irq_posted))
220 wake_up_process(to_wait(next)->tsk);
221 }
222
223 do {
224 struct intel_wait *crumb = to_wait(completed);
225 completed = rb_prev(completed);
226 __intel_breadcrumbs_finish(b, crumb);
227 } while (completed);
228 }
229
230 if (first) {
231 GEM_BUG_ON(rb_first(&b->waiters) != &wait->node);
232 b->first_wait = wait;
233 smp_store_mb(b->irq_seqno_bh, wait->tsk);
234 /* After assigning ourselves as the new bottom-half, we must
235 * perform a cursory check to prevent a missed interrupt.
236 * Either we miss the interrupt whilst programming the hardware,
237 * or if there was a previous waiter (for a later seqno) they
238 * may be woken instead of us (due to the inherent race
239 * in the unlocked read of b->irq_seqno_bh in the irq handler)
240 * and so we miss the wake up.
241 */
242 __intel_breadcrumbs_enable_irq(b);
243 }
244 GEM_BUG_ON(!b->irq_seqno_bh);
245 GEM_BUG_ON(!b->first_wait);
246 GEM_BUG_ON(rb_first(&b->waiters) != &b->first_wait->node);
247
248 return first;
249}
250
251bool intel_engine_add_wait(struct intel_engine_cs *engine,
252 struct intel_wait *wait)
253{
254 struct intel_breadcrumbs *b = &engine->breadcrumbs;
255 bool first;
256
257 spin_lock(&b->lock);
258 first = __intel_engine_add_wait(engine, wait);
259 spin_unlock(&b->lock);
260
261 return first;
262}
263
264void intel_engine_enable_fake_irq(struct intel_engine_cs *engine)
265{
266 mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1);
267}
268
269static inline bool chain_wakeup(struct rb_node *rb, int priority)
270{
271 return rb && to_wait(rb)->tsk->prio <= priority;
272}
273
274static inline int wakeup_priority(struct intel_breadcrumbs *b,
275 struct task_struct *tsk)
276{
277 if (tsk == b->signaler)
278 return INT_MIN;
279 else
280 return tsk->prio;
281}
282
283void intel_engine_remove_wait(struct intel_engine_cs *engine,
284 struct intel_wait *wait)
285{
286 struct intel_breadcrumbs *b = &engine->breadcrumbs;
287
288 /* Quick check to see if this waiter was already decoupled from
289 * the tree by the bottom-half to avoid contention on the spinlock
290 * by the herd.
291 */
292 if (RB_EMPTY_NODE(&wait->node))
293 return;
294
295 spin_lock(&b->lock);
296
297 if (RB_EMPTY_NODE(&wait->node))
298 goto out_unlock;
299
300 if (b->first_wait == wait) {
301 const int priority = wakeup_priority(b, wait->tsk);
302 struct rb_node *next;
303
304 GEM_BUG_ON(b->irq_seqno_bh != wait->tsk);
305
306 /* We are the current bottom-half. Find the next candidate,
307 * the first waiter in the queue on the remaining oldest
308 * request. As multiple seqnos may complete in the time it
309 * takes us to wake up and find the next waiter, we have to
310 * wake up that waiter for it to perform its own coherent
311 * completion check.
312 */
313 next = rb_next(&wait->node);
314 if (chain_wakeup(next, priority)) {
315 /* If the next waiter is already complete,
316 * wake it up and continue onto the next waiter. So
317 * if have a small herd, they will wake up in parallel
318 * rather than sequentially, which should reduce
319 * the overall latency in waking all the completed
320 * clients.
321 *
322 * However, waking up a chain adds extra latency to
323 * the first_waiter. This is undesirable if that
324 * waiter is a high priority task.
325 */
326 u32 seqno = intel_engine_get_seqno(engine);
327
328 while (i915_seqno_passed(seqno, to_wait(next)->seqno)) {
329 struct rb_node *n = rb_next(next);
330
331 __intel_breadcrumbs_finish(b, to_wait(next));
332 next = n;
333 if (!chain_wakeup(next, priority))
334 break;
335 }
336 }
337
338 if (next) {
339 /* In our haste, we may have completed the first waiter
340 * before we enabled the interrupt. Do so now as we
341 * have a second waiter for a future seqno. Afterwards,
342 * we have to wake up that waiter in case we missed
343 * the interrupt, or if we have to handle an
344 * exception rather than a seqno completion.
345 */
346 b->first_wait = to_wait(next);
347 smp_store_mb(b->irq_seqno_bh, b->first_wait->tsk);
348 if (b->first_wait->seqno != wait->seqno)
349 __intel_breadcrumbs_enable_irq(b);
350 wake_up_process(b->irq_seqno_bh);
351 } else {
352 b->first_wait = NULL;
353 WRITE_ONCE(b->irq_seqno_bh, NULL);
354 __intel_breadcrumbs_disable_irq(b);
355 }
356 } else {
357 GEM_BUG_ON(rb_first(&b->waiters) == &wait->node);
358 }
359
360 GEM_BUG_ON(RB_EMPTY_NODE(&wait->node));
361 rb_erase(&wait->node, &b->waiters);
362
363out_unlock:
364 GEM_BUG_ON(b->first_wait == wait);
365 GEM_BUG_ON(rb_first(&b->waiters) !=
366 (b->first_wait ? &b->first_wait->node : NULL));
367 GEM_BUG_ON(!b->irq_seqno_bh ^ RB_EMPTY_ROOT(&b->waiters));
368 spin_unlock(&b->lock);
369}
370
371static bool signal_complete(struct drm_i915_gem_request *request)
372{
373 if (!request)
374 return false;
375
376 /* If another process served as the bottom-half it may have already
377 * signalled that this wait is already completed.
378 */
379 if (intel_wait_complete(&request->signaling.wait))
380 return true;
381
382 /* Carefully check if the request is complete, giving time for the
383 * seqno to be visible or if the GPU hung.
384 */
385 if (__i915_request_irq_complete(request))
386 return true;
387
388 return false;
389}
390
391static struct drm_i915_gem_request *to_signaler(struct rb_node *rb)
392{
393 return container_of(rb, struct drm_i915_gem_request, signaling.node);
394}
395
396static void signaler_set_rtpriority(void)
397{
398 struct sched_param param = { .sched_priority = 1 };
399
400 sched_setscheduler_nocheck(current, SCHED_FIFO, &param);
401}
402
403static int intel_breadcrumbs_signaler(void *arg)
404{
405 struct intel_engine_cs *engine = arg;
406 struct intel_breadcrumbs *b = &engine->breadcrumbs;
407 struct drm_i915_gem_request *request;
408
409 /* Install ourselves with high priority to reduce signalling latency */
410 signaler_set_rtpriority();
411
412 do {
413 set_current_state(TASK_INTERRUPTIBLE);
414
415 /* We are either woken up by the interrupt bottom-half,
416 * or by a client adding a new signaller. In both cases,
417 * the GPU seqno may have advanced beyond our oldest signal.
418 * If it has, propagate the signal, remove the waiter and
419 * check again with the next oldest signal. Otherwise we
420 * need to wait for a new interrupt from the GPU or for
421 * a new client.
422 */
423 request = READ_ONCE(b->first_signal);
424 if (signal_complete(request)) {
425 /* Wake up all other completed waiters and select the
426 * next bottom-half for the next user interrupt.
427 */
428 intel_engine_remove_wait(engine,
429 &request->signaling.wait);
430
431 /* Find the next oldest signal. Note that as we have
432 * not been holding the lock, another client may
433 * have installed an even older signal than the one
434 * we just completed - so double check we are still
435 * the oldest before picking the next one.
436 */
437 spin_lock(&b->lock);
438 if (request == b->first_signal) {
439 struct rb_node *rb =
440 rb_next(&request->signaling.node);
441 b->first_signal = rb ? to_signaler(rb) : NULL;
442 }
443 rb_erase(&request->signaling.node, &b->signals);
444 spin_unlock(&b->lock);
445
446 i915_gem_request_unreference(request);
447 } else {
448 if (kthread_should_stop())
449 break;
450
451 schedule();
452 }
453 } while (1);
454 __set_current_state(TASK_RUNNING);
455
456 return 0;
457}
458
459void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
460{
461 struct intel_engine_cs *engine = request->engine;
462 struct intel_breadcrumbs *b = &engine->breadcrumbs;
463 struct rb_node *parent, **p;
464 bool first, wakeup;
465
466 if (unlikely(READ_ONCE(request->signaling.wait.tsk)))
467 return;
468
469 spin_lock(&b->lock);
470 if (unlikely(request->signaling.wait.tsk)) {
471 wakeup = false;
472 goto unlock;
473 }
474
475 request->signaling.wait.tsk = b->signaler;
476 request->signaling.wait.seqno = request->seqno;
477 i915_gem_request_reference(request);
478
479 /* First add ourselves into the list of waiters, but register our
480 * bottom-half as the signaller thread. As per usual, only the oldest
481 * waiter (not just signaller) is tasked as the bottom-half waking
482 * up all completed waiters after the user interrupt.
483 *
484 * If we are the oldest waiter, enable the irq (after which we
485 * must double check that the seqno did not complete).
486 */
487 wakeup = __intel_engine_add_wait(engine, &request->signaling.wait);
488
489 /* Now insert ourselves into the retirement ordered list of signals
490 * on this engine. We track the oldest seqno as that will be the
491 * first signal to complete.
492 */
493 parent = NULL;
494 first = true;
495 p = &b->signals.rb_node;
496 while (*p) {
497 parent = *p;
498 if (i915_seqno_passed(request->seqno,
499 to_signaler(parent)->seqno)) {
500 p = &parent->rb_right;
501 first = false;
502 } else {
503 p = &parent->rb_left;
504 }
505 }
506 rb_link_node(&request->signaling.node, parent, p);
507 rb_insert_color(&request->signaling.node, &b->signals);
508 if (first)
509 smp_store_mb(b->first_signal, request);
510
511unlock:
512 spin_unlock(&b->lock);
513
514 if (wakeup)
515 wake_up_process(b->signaler);
516}
517
518int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
519{
520 struct intel_breadcrumbs *b = &engine->breadcrumbs;
521 struct task_struct *tsk;
522
523 spin_lock_init(&b->lock);
524 setup_timer(&b->fake_irq,
525 intel_breadcrumbs_fake_irq,
526 (unsigned long)engine);
527
528 /* Spawn a thread to provide a common bottom-half for all signals.
529 * As this is an asynchronous interface we cannot steal the current
530 * task for handling the bottom-half to the user interrupt, therefore
531 * we create a thread to do the coherent seqno dance after the
532 * interrupt and then signal the waitqueue (via the dma-buf/fence).
533 */
534 tsk = kthread_run(intel_breadcrumbs_signaler, engine,
535 "i915/signal:%d", engine->id);
536 if (IS_ERR(tsk))
537 return PTR_ERR(tsk);
538
539 b->signaler = tsk;
540
541 return 0;
542}
543
544void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
545{
546 struct intel_breadcrumbs *b = &engine->breadcrumbs;
547
548 if (!IS_ERR_OR_NULL(b->signaler))
549 kthread_stop(b->signaler);
550
551 del_timer_sync(&b->fake_irq);
552}
553
554unsigned int intel_kick_waiters(struct drm_i915_private *i915)
555{
556 struct intel_engine_cs *engine;
557 unsigned int mask = 0;
558
559 /* To avoid the task_struct disappearing beneath us as we wake up
560 * the process, we must first inspect the task_struct->state under the
561 * RCU lock, i.e. as we call wake_up_process() we must be holding the
562 * rcu_read_lock().
563 */
564 rcu_read_lock();
565 for_each_engine(engine, i915)
566 if (unlikely(intel_engine_wakeup(engine)))
567 mask |= intel_engine_flag(engine);
568 rcu_read_unlock();
569
570 return mask;
571}
572
573unsigned int intel_kick_signalers(struct drm_i915_private *i915)
574{
575 struct intel_engine_cs *engine;
576 unsigned int mask = 0;
577
578 for_each_engine(engine, i915) {
579 if (unlikely(READ_ONCE(engine->breadcrumbs.first_signal))) {
580 wake_up_process(engine->breadcrumbs.signaler);
581 mask |= intel_engine_flag(engine);
582 }
583 }
584
585 return mask;
586}
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c
index 522f5a2de015..bc0fef3d3335 100644
--- a/drivers/gpu/drm/i915/intel_color.c
+++ b/drivers/gpu/drm/i915/intel_color.c
@@ -96,7 +96,7 @@ static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
96{ 96{
97 struct drm_crtc *crtc = crtc_state->crtc; 97 struct drm_crtc *crtc = crtc_state->crtc;
98 struct drm_device *dev = crtc->dev; 98 struct drm_device *dev = crtc->dev;
99 struct drm_i915_private *dev_priv = dev->dev_private; 99 struct drm_i915_private *dev_priv = to_i915(dev);
100 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 100 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
101 int i, pipe = intel_crtc->pipe; 101 int i, pipe = intel_crtc->pipe;
102 uint16_t coeffs[9] = { 0, }; 102 uint16_t coeffs[9] = { 0, };
@@ -207,7 +207,7 @@ static void cherryview_load_csc_matrix(struct drm_crtc_state *state)
207{ 207{
208 struct drm_crtc *crtc = state->crtc; 208 struct drm_crtc *crtc = state->crtc;
209 struct drm_device *dev = crtc->dev; 209 struct drm_device *dev = crtc->dev;
210 struct drm_i915_private *dev_priv = dev->dev_private; 210 struct drm_i915_private *dev_priv = to_i915(dev);
211 int pipe = to_intel_crtc(crtc)->pipe; 211 int pipe = to_intel_crtc(crtc)->pipe;
212 uint32_t mode; 212 uint32_t mode;
213 213
@@ -255,7 +255,7 @@ static void cherryview_load_csc_matrix(struct drm_crtc_state *state)
255void intel_color_set_csc(struct drm_crtc_state *crtc_state) 255void intel_color_set_csc(struct drm_crtc_state *crtc_state)
256{ 256{
257 struct drm_device *dev = crtc_state->crtc->dev; 257 struct drm_device *dev = crtc_state->crtc->dev;
258 struct drm_i915_private *dev_priv = dev->dev_private; 258 struct drm_i915_private *dev_priv = to_i915(dev);
259 259
260 if (dev_priv->display.load_csc_matrix) 260 if (dev_priv->display.load_csc_matrix)
261 dev_priv->display.load_csc_matrix(crtc_state); 261 dev_priv->display.load_csc_matrix(crtc_state);
@@ -266,13 +266,13 @@ static void i9xx_load_luts_internal(struct drm_crtc *crtc,
266 struct drm_property_blob *blob) 266 struct drm_property_blob *blob)
267{ 267{
268 struct drm_device *dev = crtc->dev; 268 struct drm_device *dev = crtc->dev;
269 struct drm_i915_private *dev_priv = dev->dev_private; 269 struct drm_i915_private *dev_priv = to_i915(dev);
270 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 270 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
271 enum pipe pipe = intel_crtc->pipe; 271 enum pipe pipe = intel_crtc->pipe;
272 int i; 272 int i;
273 273
274 if (HAS_GMCH_DISPLAY(dev)) { 274 if (HAS_GMCH_DISPLAY(dev)) {
275 if (intel_crtc->config->has_dsi_encoder) 275 if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI))
276 assert_dsi_pll_enabled(dev_priv); 276 assert_dsi_pll_enabled(dev_priv);
277 else 277 else
278 assert_pll_enabled(dev_priv, pipe); 278 assert_pll_enabled(dev_priv, pipe);
@@ -313,7 +313,7 @@ static void haswell_load_luts(struct drm_crtc_state *crtc_state)
313{ 313{
314 struct drm_crtc *crtc = crtc_state->crtc; 314 struct drm_crtc *crtc = crtc_state->crtc;
315 struct drm_device *dev = crtc->dev; 315 struct drm_device *dev = crtc->dev;
316 struct drm_i915_private *dev_priv = dev->dev_private; 316 struct drm_i915_private *dev_priv = to_i915(dev);
317 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 317 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
318 struct intel_crtc_state *intel_crtc_state = 318 struct intel_crtc_state *intel_crtc_state =
319 to_intel_crtc_state(crtc_state); 319 to_intel_crtc_state(crtc_state);
@@ -343,7 +343,7 @@ static void broadwell_load_luts(struct drm_crtc_state *state)
343{ 343{
344 struct drm_crtc *crtc = state->crtc; 344 struct drm_crtc *crtc = state->crtc;
345 struct drm_device *dev = crtc->dev; 345 struct drm_device *dev = crtc->dev;
346 struct drm_i915_private *dev_priv = dev->dev_private; 346 struct drm_i915_private *dev_priv = to_i915(dev);
347 struct intel_crtc_state *intel_state = to_intel_crtc_state(state); 347 struct intel_crtc_state *intel_state = to_intel_crtc_state(state);
348 enum pipe pipe = to_intel_crtc(crtc)->pipe; 348 enum pipe pipe = to_intel_crtc(crtc)->pipe;
349 uint32_t i, lut_size = INTEL_INFO(dev)->color.degamma_lut_size; 349 uint32_t i, lut_size = INTEL_INFO(dev)->color.degamma_lut_size;
@@ -426,7 +426,7 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
426{ 426{
427 struct drm_crtc *crtc = state->crtc; 427 struct drm_crtc *crtc = state->crtc;
428 struct drm_device *dev = crtc->dev; 428 struct drm_device *dev = crtc->dev;
429 struct drm_i915_private *dev_priv = dev->dev_private; 429 struct drm_i915_private *dev_priv = to_i915(dev);
430 enum pipe pipe = to_intel_crtc(crtc)->pipe; 430 enum pipe pipe = to_intel_crtc(crtc)->pipe;
431 struct drm_color_lut *lut; 431 struct drm_color_lut *lut;
432 uint32_t i, lut_size; 432 uint32_t i, lut_size;
@@ -485,7 +485,7 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
485void intel_color_load_luts(struct drm_crtc_state *crtc_state) 485void intel_color_load_luts(struct drm_crtc_state *crtc_state)
486{ 486{
487 struct drm_device *dev = crtc_state->crtc->dev; 487 struct drm_device *dev = crtc_state->crtc->dev;
488 struct drm_i915_private *dev_priv = dev->dev_private; 488 struct drm_i915_private *dev_priv = to_i915(dev);
489 489
490 dev_priv->display.load_luts(crtc_state); 490 dev_priv->display.load_luts(crtc_state);
491} 491}
@@ -526,7 +526,7 @@ int intel_color_check(struct drm_crtc *crtc,
526void intel_color_init(struct drm_crtc *crtc) 526void intel_color_init(struct drm_crtc *crtc)
527{ 527{
528 struct drm_device *dev = crtc->dev; 528 struct drm_device *dev = crtc->dev;
529 struct drm_i915_private *dev_priv = dev->dev_private; 529 struct drm_i915_private *dev_priv = to_i915(dev);
530 530
531 drm_mode_crtc_set_gamma_size(crtc, 256); 531 drm_mode_crtc_set_gamma_size(crtc, 256);
532 532
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index e115bcc6766f..5819d524d917 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -67,7 +67,7 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
67 enum pipe *pipe) 67 enum pipe *pipe)
68{ 68{
69 struct drm_device *dev = encoder->base.dev; 69 struct drm_device *dev = encoder->base.dev;
70 struct drm_i915_private *dev_priv = dev->dev_private; 70 struct drm_i915_private *dev_priv = to_i915(dev);
71 struct intel_crt *crt = intel_encoder_to_crt(encoder); 71 struct intel_crt *crt = intel_encoder_to_crt(encoder);
72 enum intel_display_power_domain power_domain; 72 enum intel_display_power_domain power_domain;
73 u32 tmp; 73 u32 tmp;
@@ -98,7 +98,7 @@ out:
98 98
99static unsigned int intel_crt_get_flags(struct intel_encoder *encoder) 99static unsigned int intel_crt_get_flags(struct intel_encoder *encoder)
100{ 100{
101 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 101 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
102 struct intel_crt *crt = intel_encoder_to_crt(encoder); 102 struct intel_crt *crt = intel_encoder_to_crt(encoder);
103 u32 tmp, flags = 0; 103 u32 tmp, flags = 0;
104 104
@@ -146,7 +146,7 @@ static void hsw_crt_get_config(struct intel_encoder *encoder,
146static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode) 146static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
147{ 147{
148 struct drm_device *dev = encoder->base.dev; 148 struct drm_device *dev = encoder->base.dev;
149 struct drm_i915_private *dev_priv = dev->dev_private; 149 struct drm_i915_private *dev_priv = to_i915(dev);
150 struct intel_crt *crt = intel_encoder_to_crt(encoder); 150 struct intel_crt *crt = intel_encoder_to_crt(encoder);
151 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 151 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
152 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; 152 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
@@ -281,7 +281,7 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
281{ 281{
282 struct drm_device *dev = connector->dev; 282 struct drm_device *dev = connector->dev;
283 struct intel_crt *crt = intel_attached_crt(connector); 283 struct intel_crt *crt = intel_attached_crt(connector);
284 struct drm_i915_private *dev_priv = dev->dev_private; 284 struct drm_i915_private *dev_priv = to_i915(dev);
285 u32 adpa; 285 u32 adpa;
286 bool ret; 286 bool ret;
287 287
@@ -301,8 +301,10 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
301 301
302 I915_WRITE(crt->adpa_reg, adpa); 302 I915_WRITE(crt->adpa_reg, adpa);
303 303
304 if (wait_for((I915_READ(crt->adpa_reg) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0, 304 if (intel_wait_for_register(dev_priv,
305 1000)) 305 crt->adpa_reg,
306 ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 0,
307 1000))
306 DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER"); 308 DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
307 309
308 if (turn_off_dac) { 310 if (turn_off_dac) {
@@ -326,7 +328,7 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
326{ 328{
327 struct drm_device *dev = connector->dev; 329 struct drm_device *dev = connector->dev;
328 struct intel_crt *crt = intel_attached_crt(connector); 330 struct intel_crt *crt = intel_attached_crt(connector);
329 struct drm_i915_private *dev_priv = dev->dev_private; 331 struct drm_i915_private *dev_priv = to_i915(dev);
330 u32 adpa; 332 u32 adpa;
331 bool ret; 333 bool ret;
332 u32 save_adpa; 334 u32 save_adpa;
@@ -338,8 +340,10 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
338 340
339 I915_WRITE(crt->adpa_reg, adpa); 341 I915_WRITE(crt->adpa_reg, adpa);
340 342
341 if (wait_for((I915_READ(crt->adpa_reg) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0, 343 if (intel_wait_for_register(dev_priv,
342 1000)) { 344 crt->adpa_reg,
345 ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 0,
346 1000)) {
343 DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER"); 347 DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
344 I915_WRITE(crt->adpa_reg, save_adpa); 348 I915_WRITE(crt->adpa_reg, save_adpa);
345 } 349 }
@@ -367,7 +371,7 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
367static bool intel_crt_detect_hotplug(struct drm_connector *connector) 371static bool intel_crt_detect_hotplug(struct drm_connector *connector)
368{ 372{
369 struct drm_device *dev = connector->dev; 373 struct drm_device *dev = connector->dev;
370 struct drm_i915_private *dev_priv = dev->dev_private; 374 struct drm_i915_private *dev_priv = to_i915(dev);
371 u32 stat; 375 u32 stat;
372 bool ret = false; 376 bool ret = false;
373 int i, tries = 0; 377 int i, tries = 0;
@@ -394,9 +398,9 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
394 CRT_HOTPLUG_FORCE_DETECT, 398 CRT_HOTPLUG_FORCE_DETECT,
395 CRT_HOTPLUG_FORCE_DETECT); 399 CRT_HOTPLUG_FORCE_DETECT);
396 /* wait for FORCE_DETECT to go off */ 400 /* wait for FORCE_DETECT to go off */
397 if (wait_for((I915_READ(PORT_HOTPLUG_EN) & 401 if (intel_wait_for_register(dev_priv, PORT_HOTPLUG_EN,
398 CRT_HOTPLUG_FORCE_DETECT) == 0, 402 CRT_HOTPLUG_FORCE_DETECT, 0,
399 1000)) 403 1000))
400 DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off"); 404 DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off");
401 } 405 }
402 406
@@ -449,7 +453,7 @@ static int intel_crt_ddc_get_modes(struct drm_connector *connector,
449static bool intel_crt_detect_ddc(struct drm_connector *connector) 453static bool intel_crt_detect_ddc(struct drm_connector *connector)
450{ 454{
451 struct intel_crt *crt = intel_attached_crt(connector); 455 struct intel_crt *crt = intel_attached_crt(connector);
452 struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private; 456 struct drm_i915_private *dev_priv = to_i915(crt->base.base.dev);
453 struct edid *edid; 457 struct edid *edid;
454 struct i2c_adapter *i2c; 458 struct i2c_adapter *i2c;
455 459
@@ -485,7 +489,7 @@ static enum drm_connector_status
485intel_crt_load_detect(struct intel_crt *crt, uint32_t pipe) 489intel_crt_load_detect(struct intel_crt *crt, uint32_t pipe)
486{ 490{
487 struct drm_device *dev = crt->base.base.dev; 491 struct drm_device *dev = crt->base.base.dev;
488 struct drm_i915_private *dev_priv = dev->dev_private; 492 struct drm_i915_private *dev_priv = to_i915(dev);
489 uint32_t save_bclrpat; 493 uint32_t save_bclrpat;
490 uint32_t save_vtotal; 494 uint32_t save_vtotal;
491 uint32_t vtotal, vactive; 495 uint32_t vtotal, vactive;
@@ -600,7 +604,7 @@ static enum drm_connector_status
600intel_crt_detect(struct drm_connector *connector, bool force) 604intel_crt_detect(struct drm_connector *connector, bool force)
601{ 605{
602 struct drm_device *dev = connector->dev; 606 struct drm_device *dev = connector->dev;
603 struct drm_i915_private *dev_priv = dev->dev_private; 607 struct drm_i915_private *dev_priv = to_i915(dev);
604 struct intel_crt *crt = intel_attached_crt(connector); 608 struct intel_crt *crt = intel_attached_crt(connector);
605 struct intel_encoder *intel_encoder = &crt->base; 609 struct intel_encoder *intel_encoder = &crt->base;
606 enum intel_display_power_domain power_domain; 610 enum intel_display_power_domain power_domain;
@@ -681,7 +685,7 @@ static void intel_crt_destroy(struct drm_connector *connector)
681static int intel_crt_get_modes(struct drm_connector *connector) 685static int intel_crt_get_modes(struct drm_connector *connector)
682{ 686{
683 struct drm_device *dev = connector->dev; 687 struct drm_device *dev = connector->dev;
684 struct drm_i915_private *dev_priv = dev->dev_private; 688 struct drm_i915_private *dev_priv = to_i915(dev);
685 struct intel_crt *crt = intel_attached_crt(connector); 689 struct intel_crt *crt = intel_attached_crt(connector);
686 struct intel_encoder *intel_encoder = &crt->base; 690 struct intel_encoder *intel_encoder = &crt->base;
687 enum intel_display_power_domain power_domain; 691 enum intel_display_power_domain power_domain;
@@ -716,7 +720,7 @@ static int intel_crt_set_property(struct drm_connector *connector,
716static void intel_crt_reset(struct drm_connector *connector) 720static void intel_crt_reset(struct drm_connector *connector)
717{ 721{
718 struct drm_device *dev = connector->dev; 722 struct drm_device *dev = connector->dev;
719 struct drm_i915_private *dev_priv = dev->dev_private; 723 struct drm_i915_private *dev_priv = to_i915(dev);
720 struct intel_crt *crt = intel_attached_crt(connector); 724 struct intel_crt *crt = intel_attached_crt(connector);
721 725
722 if (INTEL_INFO(dev)->gen >= 5) { 726 if (INTEL_INFO(dev)->gen >= 5) {
@@ -743,6 +747,7 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = {
743 .dpms = drm_atomic_helper_connector_dpms, 747 .dpms = drm_atomic_helper_connector_dpms,
744 .detect = intel_crt_detect, 748 .detect = intel_crt_detect,
745 .fill_modes = drm_helper_probe_single_connector_modes, 749 .fill_modes = drm_helper_probe_single_connector_modes,
750 .late_register = intel_connector_register,
746 .early_unregister = intel_connector_unregister, 751 .early_unregister = intel_connector_unregister,
747 .destroy = intel_crt_destroy, 752 .destroy = intel_crt_destroy,
748 .set_property = intel_crt_set_property, 753 .set_property = intel_crt_set_property,
@@ -791,7 +796,7 @@ void intel_crt_init(struct drm_device *dev)
791 struct drm_connector *connector; 796 struct drm_connector *connector;
792 struct intel_crt *crt; 797 struct intel_crt *crt;
793 struct intel_connector *intel_connector; 798 struct intel_connector *intel_connector;
794 struct drm_i915_private *dev_priv = dev->dev_private; 799 struct drm_i915_private *dev_priv = to_i915(dev);
795 i915_reg_t adpa_reg; 800 i915_reg_t adpa_reg;
796 u32 adpa; 801 u32 adpa;
797 802
@@ -879,8 +884,6 @@ void intel_crt_init(struct drm_device *dev)
879 884
880 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); 885 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
881 886
882 drm_connector_register(connector);
883
884 if (!I915_HAS_HOTPLUG(dev)) 887 if (!I915_HAS_HOTPLUG(dev))
885 intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT; 888 intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
886 889
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index 2b3b428d9cd2..c3b33a10c15c 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -41,15 +41,15 @@
41 * be moved to FW_FAILED. 41 * be moved to FW_FAILED.
42 */ 42 */
43 43
44#define I915_CSR_KBL "i915/kbl_dmc_ver1.bin" 44#define I915_CSR_KBL "i915/kbl_dmc_ver1_01.bin"
45MODULE_FIRMWARE(I915_CSR_KBL); 45MODULE_FIRMWARE(I915_CSR_KBL);
46#define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 1) 46#define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 1)
47 47
48#define I915_CSR_SKL "i915/skl_dmc_ver1.bin" 48#define I915_CSR_SKL "i915/skl_dmc_ver1_26.bin"
49MODULE_FIRMWARE(I915_CSR_SKL); 49MODULE_FIRMWARE(I915_CSR_SKL);
50#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 23) 50#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 26)
51 51
52#define I915_CSR_BXT "i915/bxt_dmc_ver1.bin" 52#define I915_CSR_BXT "i915/bxt_dmc_ver1_07.bin"
53MODULE_FIRMWARE(I915_CSR_BXT); 53MODULE_FIRMWARE(I915_CSR_BXT);
54#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7) 54#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
55 55
@@ -286,7 +286,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
286 uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes; 286 uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
287 uint32_t i; 287 uint32_t i;
288 uint32_t *dmc_payload; 288 uint32_t *dmc_payload;
289 uint32_t required_min_version; 289 uint32_t required_version;
290 290
291 if (!fw) 291 if (!fw)
292 return NULL; 292 return NULL;
@@ -303,24 +303,23 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
303 csr->version = css_header->version; 303 csr->version = css_header->version;
304 304
305 if (IS_KABYLAKE(dev_priv)) { 305 if (IS_KABYLAKE(dev_priv)) {
306 required_min_version = KBL_CSR_VERSION_REQUIRED; 306 required_version = KBL_CSR_VERSION_REQUIRED;
307 } else if (IS_SKYLAKE(dev_priv)) { 307 } else if (IS_SKYLAKE(dev_priv)) {
308 required_min_version = SKL_CSR_VERSION_REQUIRED; 308 required_version = SKL_CSR_VERSION_REQUIRED;
309 } else if (IS_BROXTON(dev_priv)) { 309 } else if (IS_BROXTON(dev_priv)) {
310 required_min_version = BXT_CSR_VERSION_REQUIRED; 310 required_version = BXT_CSR_VERSION_REQUIRED;
311 } else { 311 } else {
312 MISSING_CASE(INTEL_REVID(dev_priv)); 312 MISSING_CASE(INTEL_REVID(dev_priv));
313 required_min_version = 0; 313 required_version = 0;
314 } 314 }
315 315
316 if (csr->version < required_min_version) { 316 if (csr->version != required_version) {
317 DRM_INFO("Refusing to load old DMC firmware v%u.%u," 317 DRM_INFO("Refusing to load DMC firmware v%u.%u,"
318 " please upgrade to v%u.%u or later" 318 " please use v%u.%u [" FIRMWARE_URL "].\n",
319 " [" FIRMWARE_URL "].\n",
320 CSR_VERSION_MAJOR(csr->version), 319 CSR_VERSION_MAJOR(csr->version),
321 CSR_VERSION_MINOR(csr->version), 320 CSR_VERSION_MINOR(csr->version),
322 CSR_VERSION_MAJOR(required_min_version), 321 CSR_VERSION_MAJOR(required_version),
323 CSR_VERSION_MINOR(required_min_version)); 322 CSR_VERSION_MINOR(required_version));
324 return NULL; 323 return NULL;
325 } 324 }
326 325
@@ -413,7 +412,7 @@ static void csr_load_work_fn(struct work_struct *work)
413 csr = &dev_priv->csr; 412 csr = &dev_priv->csr;
414 413
415 ret = request_firmware(&fw, dev_priv->csr.fw_path, 414 ret = request_firmware(&fw, dev_priv->csr.fw_path,
416 &dev_priv->dev->pdev->dev); 415 &dev_priv->drm.pdev->dev);
417 if (fw) 416 if (fw)
418 dev_priv->csr.dmc_payload = parse_csr_fw(dev_priv, fw); 417 dev_priv->csr.dmc_payload = parse_csr_fw(dev_priv, fw);
419 418
@@ -427,7 +426,7 @@ static void csr_load_work_fn(struct work_struct *work)
427 CSR_VERSION_MAJOR(csr->version), 426 CSR_VERSION_MAJOR(csr->version),
428 CSR_VERSION_MINOR(csr->version)); 427 CSR_VERSION_MINOR(csr->version));
429 } else { 428 } else {
430 dev_notice(dev_priv->dev->dev, 429 dev_notice(dev_priv->drm.dev,
431 "Failed to load DMC firmware" 430 "Failed to load DMC firmware"
432 " [" FIRMWARE_URL "]," 431 " [" FIRMWARE_URL "],"
433 " disabling runtime power management.\n"); 432 " disabling runtime power management.\n");
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index ad3b0ee5e55b..dd1d6fe12297 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -318,7 +318,7 @@ static void ddi_get_encoder_port(struct intel_encoder *intel_encoder,
318 default: 318 default:
319 WARN(1, "Invalid DDI encoder type %d\n", intel_encoder->type); 319 WARN(1, "Invalid DDI encoder type %d\n", intel_encoder->type);
320 /* fallthrough and treat as unknown */ 320 /* fallthrough and treat as unknown */
321 case INTEL_OUTPUT_DISPLAYPORT: 321 case INTEL_OUTPUT_DP:
322 case INTEL_OUTPUT_EDP: 322 case INTEL_OUTPUT_EDP:
323 case INTEL_OUTPUT_HDMI: 323 case INTEL_OUTPUT_HDMI:
324 case INTEL_OUTPUT_UNKNOWN: 324 case INTEL_OUTPUT_UNKNOWN:
@@ -482,7 +482,7 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
482 ddi_translations = ddi_translations_edp; 482 ddi_translations = ddi_translations_edp;
483 size = n_edp_entries; 483 size = n_edp_entries;
484 break; 484 break;
485 case INTEL_OUTPUT_DISPLAYPORT: 485 case INTEL_OUTPUT_DP:
486 case INTEL_OUTPUT_HDMI: 486 case INTEL_OUTPUT_HDMI:
487 ddi_translations = ddi_translations_dp; 487 ddi_translations = ddi_translations_dp;
488 size = n_dp_entries; 488 size = n_dp_entries;
@@ -543,7 +543,7 @@ static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
543void hsw_fdi_link_train(struct drm_crtc *crtc) 543void hsw_fdi_link_train(struct drm_crtc *crtc)
544{ 544{
545 struct drm_device *dev = crtc->dev; 545 struct drm_device *dev = crtc->dev;
546 struct drm_i915_private *dev_priv = dev->dev_private; 546 struct drm_i915_private *dev_priv = to_i915(dev);
547 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 547 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
548 struct intel_encoder *encoder; 548 struct intel_encoder *encoder;
549 u32 temp, i, rx_ctl_val; 549 u32 temp, i, rx_ctl_val;
@@ -834,7 +834,7 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
834 if (pipe_config->has_pch_encoder) 834 if (pipe_config->has_pch_encoder)
835 dotclock = intel_dotclock_calculate(pipe_config->port_clock, 835 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
836 &pipe_config->fdi_m_n); 836 &pipe_config->fdi_m_n);
837 else if (pipe_config->has_dp_encoder) 837 else if (intel_crtc_has_dp_encoder(pipe_config))
838 dotclock = intel_dotclock_calculate(pipe_config->port_clock, 838 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
839 &pipe_config->dp_m_n); 839 &pipe_config->dp_m_n);
840 else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp == 36) 840 else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp == 36)
@@ -851,7 +851,7 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
851static void skl_ddi_clock_get(struct intel_encoder *encoder, 851static void skl_ddi_clock_get(struct intel_encoder *encoder,
852 struct intel_crtc_state *pipe_config) 852 struct intel_crtc_state *pipe_config)
853{ 853{
854 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 854 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
855 int link_clock = 0; 855 int link_clock = 0;
856 uint32_t dpll_ctl1, dpll; 856 uint32_t dpll_ctl1, dpll;
857 857
@@ -899,7 +899,7 @@ static void skl_ddi_clock_get(struct intel_encoder *encoder,
899static void hsw_ddi_clock_get(struct intel_encoder *encoder, 899static void hsw_ddi_clock_get(struct intel_encoder *encoder,
900 struct intel_crtc_state *pipe_config) 900 struct intel_crtc_state *pipe_config)
901{ 901{
902 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 902 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
903 int link_clock = 0; 903 int link_clock = 0;
904 u32 val, pll; 904 u32 val, pll;
905 905
@@ -971,7 +971,7 @@ static int bxt_calc_pll_link(struct drm_i915_private *dev_priv,
971static void bxt_ddi_clock_get(struct intel_encoder *encoder, 971static void bxt_ddi_clock_get(struct intel_encoder *encoder,
972 struct intel_crtc_state *pipe_config) 972 struct intel_crtc_state *pipe_config)
973{ 973{
974 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 974 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
975 enum port port = intel_ddi_get_encoder_port(encoder); 975 enum port port = intel_ddi_get_encoder_port(encoder);
976 uint32_t dpll = port; 976 uint32_t dpll = port;
977 977
@@ -1061,14 +1061,14 @@ bool intel_ddi_pll_select(struct intel_crtc *intel_crtc,
1061 1061
1062void intel_ddi_set_pipe_settings(struct drm_crtc *crtc) 1062void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
1063{ 1063{
1064 struct drm_i915_private *dev_priv = crtc->dev->dev_private; 1064 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
1065 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1065 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1066 struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); 1066 struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
1067 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 1067 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
1068 int type = intel_encoder->type; 1068 int type = intel_encoder->type;
1069 uint32_t temp; 1069 uint32_t temp;
1070 1070
1071 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_DP_MST) { 1071 if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_DP_MST) {
1072 WARN_ON(transcoder_is_dsi(cpu_transcoder)); 1072 WARN_ON(transcoder_is_dsi(cpu_transcoder));
1073 1073
1074 temp = TRANS_MSA_SYNC_CLK; 1074 temp = TRANS_MSA_SYNC_CLK;
@@ -1096,7 +1096,7 @@ void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state)
1096{ 1096{
1097 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1097 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1098 struct drm_device *dev = crtc->dev; 1098 struct drm_device *dev = crtc->dev;
1099 struct drm_i915_private *dev_priv = dev->dev_private; 1099 struct drm_i915_private *dev_priv = to_i915(dev);
1100 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 1100 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
1101 uint32_t temp; 1101 uint32_t temp;
1102 temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); 1102 temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
@@ -1113,7 +1113,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
1113 struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); 1113 struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
1114 struct drm_encoder *encoder = &intel_encoder->base; 1114 struct drm_encoder *encoder = &intel_encoder->base;
1115 struct drm_device *dev = crtc->dev; 1115 struct drm_device *dev = crtc->dev;
1116 struct drm_i915_private *dev_priv = dev->dev_private; 1116 struct drm_i915_private *dev_priv = to_i915(dev);
1117 enum pipe pipe = intel_crtc->pipe; 1117 enum pipe pipe = intel_crtc->pipe;
1118 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 1118 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
1119 enum port port = intel_ddi_get_encoder_port(intel_encoder); 1119 enum port port = intel_ddi_get_encoder_port(intel_encoder);
@@ -1182,7 +1182,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
1182 temp |= TRANS_DDI_MODE_SELECT_FDI; 1182 temp |= TRANS_DDI_MODE_SELECT_FDI;
1183 temp |= (intel_crtc->config->fdi_lanes - 1) << 1; 1183 temp |= (intel_crtc->config->fdi_lanes - 1) << 1;
1184 1184
1185 } else if (type == INTEL_OUTPUT_DISPLAYPORT || 1185 } else if (type == INTEL_OUTPUT_DP ||
1186 type == INTEL_OUTPUT_EDP) { 1186 type == INTEL_OUTPUT_EDP) {
1187 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1187 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1188 1188
@@ -1223,7 +1223,7 @@ void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
1223bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector) 1223bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
1224{ 1224{
1225 struct drm_device *dev = intel_connector->base.dev; 1225 struct drm_device *dev = intel_connector->base.dev;
1226 struct drm_i915_private *dev_priv = dev->dev_private; 1226 struct drm_i915_private *dev_priv = to_i915(dev);
1227 struct intel_encoder *intel_encoder = intel_connector->encoder; 1227 struct intel_encoder *intel_encoder = intel_connector->encoder;
1228 int type = intel_connector->base.connector_type; 1228 int type = intel_connector->base.connector_type;
1229 enum port port = intel_ddi_get_encoder_port(intel_encoder); 1229 enum port port = intel_ddi_get_encoder_port(intel_encoder);
@@ -1285,7 +1285,7 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
1285 enum pipe *pipe) 1285 enum pipe *pipe)
1286{ 1286{
1287 struct drm_device *dev = encoder->base.dev; 1287 struct drm_device *dev = encoder->base.dev;
1288 struct drm_i915_private *dev_priv = dev->dev_private; 1288 struct drm_i915_private *dev_priv = to_i915(dev);
1289 enum port port = intel_ddi_get_encoder_port(encoder); 1289 enum port port = intel_ddi_get_encoder_port(encoder);
1290 enum intel_display_power_domain power_domain; 1290 enum intel_display_power_domain power_domain;
1291 u32 tmp; 1291 u32 tmp;
@@ -1359,7 +1359,7 @@ void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
1359{ 1359{
1360 struct drm_crtc *crtc = &intel_crtc->base; 1360 struct drm_crtc *crtc = &intel_crtc->base;
1361 struct drm_device *dev = crtc->dev; 1361 struct drm_device *dev = crtc->dev;
1362 struct drm_i915_private *dev_priv = dev->dev_private; 1362 struct drm_i915_private *dev_priv = to_i915(dev);
1363 struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); 1363 struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
1364 enum port port = intel_ddi_get_encoder_port(intel_encoder); 1364 enum port port = intel_ddi_get_encoder_port(intel_encoder);
1365 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 1365 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
@@ -1371,7 +1371,7 @@ void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
1371 1371
1372void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc) 1372void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc)
1373{ 1373{
1374 struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private; 1374 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
1375 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 1375 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
1376 1376
1377 if (cpu_transcoder != TRANSCODER_EDP) 1377 if (cpu_transcoder != TRANSCODER_EDP)
@@ -1392,7 +1392,7 @@ static void skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
1392 dp_iboost = dev_priv->vbt.ddi_port_info[port].dp_boost_level; 1392 dp_iboost = dev_priv->vbt.ddi_port_info[port].dp_boost_level;
1393 hdmi_iboost = dev_priv->vbt.ddi_port_info[port].hdmi_boost_level; 1393 hdmi_iboost = dev_priv->vbt.ddi_port_info[port].hdmi_boost_level;
1394 1394
1395 if (type == INTEL_OUTPUT_DISPLAYPORT) { 1395 if (type == INTEL_OUTPUT_DP) {
1396 if (dp_iboost) { 1396 if (dp_iboost) {
1397 iboost = dp_iboost; 1397 iboost = dp_iboost;
1398 } else { 1398 } else {
@@ -1450,7 +1450,7 @@ static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv,
1450 if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) { 1450 if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) {
1451 n_entries = ARRAY_SIZE(bxt_ddi_translations_edp); 1451 n_entries = ARRAY_SIZE(bxt_ddi_translations_edp);
1452 ddi_translations = bxt_ddi_translations_edp; 1452 ddi_translations = bxt_ddi_translations_edp;
1453 } else if (type == INTEL_OUTPUT_DISPLAYPORT 1453 } else if (type == INTEL_OUTPUT_DP
1454 || type == INTEL_OUTPUT_EDP) { 1454 || type == INTEL_OUTPUT_EDP) {
1455 n_entries = ARRAY_SIZE(bxt_ddi_translations_dp); 1455 n_entries = ARRAY_SIZE(bxt_ddi_translations_dp);
1456 ddi_translations = bxt_ddi_translations_dp; 1456 ddi_translations = bxt_ddi_translations_dp;
@@ -1624,7 +1624,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
1624 1624
1625 intel_ddi_clk_select(intel_encoder, crtc->config); 1625 intel_ddi_clk_select(intel_encoder, crtc->config);
1626 1626
1627 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { 1627 if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP) {
1628 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1628 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1629 1629
1630 intel_dp_set_link_params(intel_dp, crtc->config); 1630 intel_dp_set_link_params(intel_dp, crtc->config);
@@ -1648,7 +1648,7 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
1648{ 1648{
1649 struct drm_encoder *encoder = &intel_encoder->base; 1649 struct drm_encoder *encoder = &intel_encoder->base;
1650 struct drm_device *dev = encoder->dev; 1650 struct drm_device *dev = encoder->dev;
1651 struct drm_i915_private *dev_priv = dev->dev_private; 1651 struct drm_i915_private *dev_priv = to_i915(dev);
1652 enum port port = intel_ddi_get_encoder_port(intel_encoder); 1652 enum port port = intel_ddi_get_encoder_port(intel_encoder);
1653 int type = intel_encoder->type; 1653 int type = intel_encoder->type;
1654 uint32_t val; 1654 uint32_t val;
@@ -1669,7 +1669,7 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
1669 if (wait) 1669 if (wait)
1670 intel_wait_ddi_buf_idle(dev_priv, port); 1670 intel_wait_ddi_buf_idle(dev_priv, port);
1671 1671
1672 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { 1672 if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP) {
1673 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1673 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1674 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); 1674 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
1675 intel_edp_panel_vdd_on(intel_dp); 1675 intel_edp_panel_vdd_on(intel_dp);
@@ -1695,7 +1695,7 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
1695 struct drm_crtc *crtc = encoder->crtc; 1695 struct drm_crtc *crtc = encoder->crtc;
1696 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1696 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1697 struct drm_device *dev = encoder->dev; 1697 struct drm_device *dev = encoder->dev;
1698 struct drm_i915_private *dev_priv = dev->dev_private; 1698 struct drm_i915_private *dev_priv = to_i915(dev);
1699 enum port port = intel_ddi_get_encoder_port(intel_encoder); 1699 enum port port = intel_ddi_get_encoder_port(intel_encoder);
1700 int type = intel_encoder->type; 1700 int type = intel_encoder->type;
1701 1701
@@ -1734,7 +1734,7 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
1734 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1734 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1735 int type = intel_encoder->type; 1735 int type = intel_encoder->type;
1736 struct drm_device *dev = encoder->dev; 1736 struct drm_device *dev = encoder->dev;
1737 struct drm_i915_private *dev_priv = dev->dev_private; 1737 struct drm_i915_private *dev_priv = to_i915(dev);
1738 1738
1739 if (intel_crtc->config->has_audio) { 1739 if (intel_crtc->config->has_audio) {
1740 intel_audio_codec_disable(intel_encoder); 1740 intel_audio_codec_disable(intel_encoder);
@@ -1808,7 +1808,10 @@ static u32 bxt_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy)
1808static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv, 1808static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv,
1809 enum dpio_phy phy) 1809 enum dpio_phy phy)
1810{ 1810{
1811 if (wait_for(I915_READ(BXT_PORT_REF_DW3(phy)) & GRC_DONE, 10)) 1811 if (intel_wait_for_register(dev_priv,
1812 BXT_PORT_REF_DW3(phy),
1813 GRC_DONE, GRC_DONE,
1814 10))
1812 DRM_ERROR("timeout waiting for PHY%d GRC\n", phy); 1815 DRM_ERROR("timeout waiting for PHY%d GRC\n", phy);
1813} 1816}
1814 1817
@@ -2121,7 +2124,7 @@ void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
2121 2124
2122void intel_ddi_fdi_disable(struct drm_crtc *crtc) 2125void intel_ddi_fdi_disable(struct drm_crtc *crtc)
2123{ 2126{
2124 struct drm_i915_private *dev_priv = crtc->dev->dev_private; 2127 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2125 struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); 2128 struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
2126 uint32_t val; 2129 uint32_t val;
2127 2130
@@ -2154,7 +2157,7 @@ void intel_ddi_fdi_disable(struct drm_crtc *crtc)
2154void intel_ddi_get_config(struct intel_encoder *encoder, 2157void intel_ddi_get_config(struct intel_encoder *encoder,
2155 struct intel_crtc_state *pipe_config) 2158 struct intel_crtc_state *pipe_config)
2156{ 2159{
2157 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 2160 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2158 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); 2161 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2159 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; 2162 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
2160 struct intel_hdmi *intel_hdmi; 2163 struct intel_hdmi *intel_hdmi;
@@ -2208,7 +2211,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
2208 break; 2211 break;
2209 case TRANS_DDI_MODE_SELECT_DP_SST: 2212 case TRANS_DDI_MODE_SELECT_DP_SST:
2210 case TRANS_DDI_MODE_SELECT_DP_MST: 2213 case TRANS_DDI_MODE_SELECT_DP_MST:
2211 pipe_config->has_dp_encoder = true;
2212 pipe_config->lane_count = 2214 pipe_config->lane_count =
2213 ((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1; 2215 ((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
2214 intel_dp_get_m_n(intel_crtc, pipe_config); 2216 intel_dp_get_m_n(intel_crtc, pipe_config);
@@ -2253,7 +2255,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
2253static bool intel_ddi_compute_config(struct intel_encoder *encoder, 2255static bool intel_ddi_compute_config(struct intel_encoder *encoder,
2254 struct intel_crtc_state *pipe_config) 2256 struct intel_crtc_state *pipe_config)
2255{ 2257{
2256 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 2258 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2257 int type = encoder->type; 2259 int type = encoder->type;
2258 int port = intel_ddi_get_encoder_port(encoder); 2260 int port = intel_ddi_get_encoder_port(encoder);
2259 int ret; 2261 int ret;
@@ -2319,7 +2321,7 @@ intel_ddi_init_hdmi_connector(struct intel_digital_port *intel_dig_port)
2319 2321
2320void intel_ddi_init(struct drm_device *dev, enum port port) 2322void intel_ddi_init(struct drm_device *dev, enum port port)
2321{ 2323{
2322 struct drm_i915_private *dev_priv = dev->dev_private; 2324 struct drm_i915_private *dev_priv = to_i915(dev);
2323 struct intel_digital_port *intel_dig_port; 2325 struct intel_digital_port *intel_dig_port;
2324 struct intel_encoder *intel_encoder; 2326 struct intel_encoder *intel_encoder;
2325 struct drm_encoder *encoder; 2327 struct drm_encoder *encoder;
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
new file mode 100644
index 000000000000..cba137f9ad3e
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -0,0 +1,388 @@
1/*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#include "i915_drv.h"
26
27void intel_device_info_dump(struct drm_i915_private *dev_priv)
28{
29 const struct intel_device_info *info = &dev_priv->info;
30
31#define PRINT_S(name) "%s"
32#define SEP_EMPTY
33#define PRINT_FLAG(name) info->name ? #name "," : ""
34#define SEP_COMMA ,
35 DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags="
36 DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY),
37 info->gen,
38 dev_priv->drm.pdev->device,
39 dev_priv->drm.pdev->revision,
40 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA));
41#undef PRINT_S
42#undef SEP_EMPTY
43#undef PRINT_FLAG
44#undef SEP_COMMA
45}
46
47static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
48{
49 struct intel_device_info *info = mkwrite_device_info(dev_priv);
50 u32 fuse, eu_dis;
51
52 fuse = I915_READ(CHV_FUSE_GT);
53
54 info->slice_total = 1;
55
56 if (!(fuse & CHV_FGT_DISABLE_SS0)) {
57 info->subslice_per_slice++;
58 eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK |
59 CHV_FGT_EU_DIS_SS0_R1_MASK);
60 info->eu_total += 8 - hweight32(eu_dis);
61 }
62
63 if (!(fuse & CHV_FGT_DISABLE_SS1)) {
64 info->subslice_per_slice++;
65 eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK |
66 CHV_FGT_EU_DIS_SS1_R1_MASK);
67 info->eu_total += 8 - hweight32(eu_dis);
68 }
69
70 info->subslice_total = info->subslice_per_slice;
71 /*
72 * CHV expected to always have a uniform distribution of EU
73 * across subslices.
74 */
75 info->eu_per_subslice = info->subslice_total ?
76 info->eu_total / info->subslice_total :
77 0;
78 /*
79 * CHV supports subslice power gating on devices with more than
80 * one subslice, and supports EU power gating on devices with
81 * more than one EU pair per subslice.
82 */
83 info->has_slice_pg = 0;
84 info->has_subslice_pg = (info->subslice_total > 1);
85 info->has_eu_pg = (info->eu_per_subslice > 2);
86}
87
88static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
89{
90 struct intel_device_info *info = mkwrite_device_info(dev_priv);
91 int s_max = 3, ss_max = 4, eu_max = 8;
92 int s, ss;
93 u32 fuse2, s_enable, ss_disable, eu_disable;
94 u8 eu_mask = 0xff;
95
96 fuse2 = I915_READ(GEN8_FUSE2);
97 s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
98 ss_disable = (fuse2 & GEN9_F2_SS_DIS_MASK) >> GEN9_F2_SS_DIS_SHIFT;
99
100 info->slice_total = hweight32(s_enable);
101 /*
102 * The subslice disable field is global, i.e. it applies
103 * to each of the enabled slices.
104 */
105 info->subslice_per_slice = ss_max - hweight32(ss_disable);
106 info->subslice_total = info->slice_total * info->subslice_per_slice;
107
108 /*
109 * Iterate through enabled slices and subslices to
110 * count the total enabled EU.
111 */
112 for (s = 0; s < s_max; s++) {
113 if (!(s_enable & BIT(s)))
114 /* skip disabled slice */
115 continue;
116
117 eu_disable = I915_READ(GEN9_EU_DISABLE(s));
118 for (ss = 0; ss < ss_max; ss++) {
119 int eu_per_ss;
120
121 if (ss_disable & BIT(ss))
122 /* skip disabled subslice */
123 continue;
124
125 eu_per_ss = eu_max - hweight8((eu_disable >> (ss*8)) &
126 eu_mask);
127
128 /*
129 * Record which subslice(s) has(have) 7 EUs. we
130 * can tune the hash used to spread work among
131 * subslices if they are unbalanced.
132 */
133 if (eu_per_ss == 7)
134 info->subslice_7eu[s] |= BIT(ss);
135
136 info->eu_total += eu_per_ss;
137 }
138 }
139
140 /*
141 * SKL is expected to always have a uniform distribution
142 * of EU across subslices with the exception that any one
143 * EU in any one subslice may be fused off for die
144 * recovery. BXT is expected to be perfectly uniform in EU
145 * distribution.
146 */
147 info->eu_per_subslice = info->subslice_total ?
148 DIV_ROUND_UP(info->eu_total,
149 info->subslice_total) : 0;
150 /*
151 * SKL supports slice power gating on devices with more than
152 * one slice, and supports EU power gating on devices with
153 * more than one EU pair per subslice. BXT supports subslice
154 * power gating on devices with more than one subslice, and
155 * supports EU power gating on devices with more than one EU
156 * pair per subslice.
157 */
158 info->has_slice_pg =
159 (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
160 info->slice_total > 1;
161 info->has_subslice_pg =
162 IS_BROXTON(dev_priv) && info->subslice_total > 1;
163 info->has_eu_pg = info->eu_per_subslice > 2;
164
165 if (IS_BROXTON(dev_priv)) {
166#define IS_SS_DISABLED(_ss_disable, ss) (_ss_disable & BIT(ss))
167 /*
168 * There is a HW issue in 2x6 fused down parts that requires
169 * Pooled EU to be enabled as a WA. The pool configuration
170 * changes depending upon which subslice is fused down. This
171 * doesn't affect if the device has all 3 subslices enabled.
172 */
173 /* WaEnablePooledEuFor2x6:bxt */
174 info->has_pooled_eu = ((info->subslice_per_slice == 3) ||
175 (info->subslice_per_slice == 2 &&
176 INTEL_REVID(dev_priv) < BXT_REVID_C0));
177
178 info->min_eu_in_pool = 0;
179 if (info->has_pooled_eu) {
180 if (IS_SS_DISABLED(ss_disable, 0) ||
181 IS_SS_DISABLED(ss_disable, 2))
182 info->min_eu_in_pool = 3;
183 else if (IS_SS_DISABLED(ss_disable, 1))
184 info->min_eu_in_pool = 6;
185 else
186 info->min_eu_in_pool = 9;
187 }
188#undef IS_SS_DISABLED
189 }
190}
191
192static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
193{
194 struct intel_device_info *info = mkwrite_device_info(dev_priv);
195 const int s_max = 3, ss_max = 3, eu_max = 8;
196 int s, ss;
197 u32 fuse2, eu_disable[s_max], s_enable, ss_disable;
198
199 fuse2 = I915_READ(GEN8_FUSE2);
200 s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
201 ss_disable = (fuse2 & GEN8_F2_SS_DIS_MASK) >> GEN8_F2_SS_DIS_SHIFT;
202
203 eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK;
204 eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) |
205 ((I915_READ(GEN8_EU_DISABLE1) & GEN8_EU_DIS1_S1_MASK) <<
206 (32 - GEN8_EU_DIS0_S1_SHIFT));
207 eu_disable[2] = (I915_READ(GEN8_EU_DISABLE1) >> GEN8_EU_DIS1_S2_SHIFT) |
208 ((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) <<
209 (32 - GEN8_EU_DIS1_S2_SHIFT));
210
211 info->slice_total = hweight32(s_enable);
212
213 /*
214 * The subslice disable field is global, i.e. it applies
215 * to each of the enabled slices.
216 */
217 info->subslice_per_slice = ss_max - hweight32(ss_disable);
218 info->subslice_total = info->slice_total * info->subslice_per_slice;
219
220 /*
221 * Iterate through enabled slices and subslices to
222 * count the total enabled EU.
223 */
224 for (s = 0; s < s_max; s++) {
225 if (!(s_enable & (0x1 << s)))
226 /* skip disabled slice */
227 continue;
228
229 for (ss = 0; ss < ss_max; ss++) {
230 u32 n_disabled;
231
232 if (ss_disable & (0x1 << ss))
233 /* skip disabled subslice */
234 continue;
235
236 n_disabled = hweight8(eu_disable[s] >> (ss * eu_max));
237
238 /*
239 * Record which subslices have 7 EUs.
240 */
241 if (eu_max - n_disabled == 7)
242 info->subslice_7eu[s] |= 1 << ss;
243
244 info->eu_total += eu_max - n_disabled;
245 }
246 }
247
248 /*
249 * BDW is expected to always have a uniform distribution of EU across
250 * subslices with the exception that any one EU in any one subslice may
251 * be fused off for die recovery.
252 */
253 info->eu_per_subslice = info->subslice_total ?
254 DIV_ROUND_UP(info->eu_total, info->subslice_total) : 0;
255
256 /*
257 * BDW supports slice power gating on devices with more than
258 * one slice.
259 */
260 info->has_slice_pg = (info->slice_total > 1);
261 info->has_subslice_pg = 0;
262 info->has_eu_pg = 0;
263}
264
265/*
266 * Determine various intel_device_info fields at runtime.
267 *
268 * Use it when either:
269 * - it's judged too laborious to fill n static structures with the limit
270 * when a simple if statement does the job,
271 * - run-time checks (eg read fuse/strap registers) are needed.
272 *
273 * This function needs to be called:
274 * - after the MMIO has been setup as we are reading registers,
275 * - after the PCH has been detected,
276 * - before the first usage of the fields it can tweak.
277 */
278void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
279{
280 struct intel_device_info *info = mkwrite_device_info(dev_priv);
281 enum pipe pipe;
282
283 /*
284 * Skylake and Broxton currently don't expose the topmost plane as its
285 * use is exclusive with the legacy cursor and we only want to expose
286 * one of those, not both. Until we can safely expose the topmost plane
287 * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
288 * we don't expose the topmost plane at all to prevent ABI breakage
289 * down the line.
290 */
291 if (IS_BROXTON(dev_priv)) {
292 info->num_sprites[PIPE_A] = 2;
293 info->num_sprites[PIPE_B] = 2;
294 info->num_sprites[PIPE_C] = 1;
295 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
296 for_each_pipe(dev_priv, pipe)
297 info->num_sprites[pipe] = 2;
298 else
299 for_each_pipe(dev_priv, pipe)
300 info->num_sprites[pipe] = 1;
301
302 if (i915.disable_display) {
303 DRM_INFO("Display disabled (module parameter)\n");
304 info->num_pipes = 0;
305 } else if (info->num_pipes > 0 &&
306 (IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) &&
307 HAS_PCH_SPLIT(dev_priv)) {
308 u32 fuse_strap = I915_READ(FUSE_STRAP);
309 u32 sfuse_strap = I915_READ(SFUSE_STRAP);
310
311 /*
312 * SFUSE_STRAP is supposed to have a bit signalling the display
313 * is fused off. Unfortunately it seems that, at least in
314 * certain cases, fused off display means that PCH display
315 * reads don't land anywhere. In that case, we read 0s.
316 *
317 * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
318 * should be set when taking over after the firmware.
319 */
320 if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
321 sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
322 (dev_priv->pch_type == PCH_CPT &&
323 !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
324 DRM_INFO("Display fused off, disabling\n");
325 info->num_pipes = 0;
326 } else if (fuse_strap & IVB_PIPE_C_DISABLE) {
327 DRM_INFO("PipeC fused off\n");
328 info->num_pipes -= 1;
329 }
330 } else if (info->num_pipes > 0 && IS_GEN9(dev_priv)) {
331 u32 dfsm = I915_READ(SKL_DFSM);
332 u8 disabled_mask = 0;
333 bool invalid;
334 int num_bits;
335
336 if (dfsm & SKL_DFSM_PIPE_A_DISABLE)
337 disabled_mask |= BIT(PIPE_A);
338 if (dfsm & SKL_DFSM_PIPE_B_DISABLE)
339 disabled_mask |= BIT(PIPE_B);
340 if (dfsm & SKL_DFSM_PIPE_C_DISABLE)
341 disabled_mask |= BIT(PIPE_C);
342
343 num_bits = hweight8(disabled_mask);
344
345 switch (disabled_mask) {
346 case BIT(PIPE_A):
347 case BIT(PIPE_B):
348 case BIT(PIPE_A) | BIT(PIPE_B):
349 case BIT(PIPE_A) | BIT(PIPE_C):
350 invalid = true;
351 break;
352 default:
353 invalid = false;
354 }
355
356 if (num_bits > info->num_pipes || invalid)
357 DRM_ERROR("invalid pipe fuse configuration: 0x%x\n",
358 disabled_mask);
359 else
360 info->num_pipes -= num_bits;
361 }
362
363 /* Initialize slice/subslice/EU info */
364 if (IS_CHERRYVIEW(dev_priv))
365 cherryview_sseu_info_init(dev_priv);
366 else if (IS_BROADWELL(dev_priv))
367 broadwell_sseu_info_init(dev_priv);
368 else if (INTEL_INFO(dev_priv)->gen >= 9)
369 gen9_sseu_info_init(dev_priv);
370
371 info->has_snoop = !info->has_llc;
372
373 /* Snooping is broken on BXT A stepping. */
374 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
375 info->has_snoop = false;
376
377 DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total);
378 DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total);
379 DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice);
380 DRM_DEBUG_DRIVER("EU total: %u\n", info->eu_total);
381 DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->eu_per_subslice);
382 DRM_DEBUG_DRIVER("has slice power gating: %s\n",
383 info->has_slice_pg ? "y" : "n");
384 DRM_DEBUG_DRIVER("has subslice power gating: %s\n",
385 info->has_subslice_pg ? "y" : "n");
386 DRM_DEBUG_DRIVER("has EU power gating: %s\n",
387 info->has_eu_pg ? "y" : "n");
388}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 0b2cd669ac05..be3b2cab2640 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -530,52 +530,6 @@ needs_modeset(struct drm_crtc_state *state)
530 return drm_atomic_crtc_needs_modeset(state); 530 return drm_atomic_crtc_needs_modeset(state);
531} 531}
532 532
533/**
534 * Returns whether any output on the specified pipe is of the specified type
535 */
536bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type)
537{
538 struct drm_device *dev = crtc->base.dev;
539 struct intel_encoder *encoder;
540
541 for_each_encoder_on_crtc(dev, &crtc->base, encoder)
542 if (encoder->type == type)
543 return true;
544
545 return false;
546}
547
548/**
549 * Returns whether any output on the specified pipe will have the specified
550 * type after a staged modeset is complete, i.e., the same as
551 * intel_pipe_has_type() but looking at encoder->new_crtc instead of
552 * encoder->crtc.
553 */
554static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state,
555 int type)
556{
557 struct drm_atomic_state *state = crtc_state->base.state;
558 struct drm_connector *connector;
559 struct drm_connector_state *connector_state;
560 struct intel_encoder *encoder;
561 int i, num_connectors = 0;
562
563 for_each_connector_in_state(state, connector, connector_state, i) {
564 if (connector_state->crtc != crtc_state->base.crtc)
565 continue;
566
567 num_connectors++;
568
569 encoder = to_intel_encoder(connector_state->best_encoder);
570 if (encoder->type == type)
571 return true;
572 }
573
574 WARN_ON(num_connectors == 0);
575
576 return false;
577}
578
579/* 533/*
580 * Platform specific helpers to calculate the port PLL loopback- (clock.m), 534 * Platform specific helpers to calculate the port PLL loopback- (clock.m),
581 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast 535 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
@@ -688,7 +642,7 @@ i9xx_select_p2_div(const struct intel_limit *limit,
688{ 642{
689 struct drm_device *dev = crtc_state->base.crtc->dev; 643 struct drm_device *dev = crtc_state->base.crtc->dev;
690 644
691 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { 645 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
692 /* 646 /*
693 * For LVDS just rely on its current settings for dual-channel. 647 * For LVDS just rely on its current settings for dual-channel.
694 * We haven't figured out how to reliably set up different 648 * We haven't figured out how to reliably set up different
@@ -1080,7 +1034,7 @@ enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
1080 1034
1081static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe) 1035static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
1082{ 1036{
1083 struct drm_i915_private *dev_priv = dev->dev_private; 1037 struct drm_i915_private *dev_priv = to_i915(dev);
1084 i915_reg_t reg = PIPEDSL(pipe); 1038 i915_reg_t reg = PIPEDSL(pipe);
1085 u32 line1, line2; 1039 u32 line1, line2;
1086 u32 line_mask; 1040 u32 line_mask;
@@ -1116,7 +1070,7 @@ static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
1116static void intel_wait_for_pipe_off(struct intel_crtc *crtc) 1070static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
1117{ 1071{
1118 struct drm_device *dev = crtc->base.dev; 1072 struct drm_device *dev = crtc->base.dev;
1119 struct drm_i915_private *dev_priv = dev->dev_private; 1073 struct drm_i915_private *dev_priv = to_i915(dev);
1120 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 1074 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
1121 enum pipe pipe = crtc->pipe; 1075 enum pipe pipe = crtc->pipe;
1122 1076
@@ -1124,8 +1078,9 @@ static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
1124 i915_reg_t reg = PIPECONF(cpu_transcoder); 1078 i915_reg_t reg = PIPECONF(cpu_transcoder);
1125 1079
1126 /* Wait for the Pipe State to go off */ 1080 /* Wait for the Pipe State to go off */
1127 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, 1081 if (intel_wait_for_register(dev_priv,
1128 100)) 1082 reg, I965_PIPECONF_ACTIVE, 0,
1083 100))
1129 WARN(1, "pipe_off wait timed out\n"); 1084 WARN(1, "pipe_off wait timed out\n");
1130 } else { 1085 } else {
1131 /* Wait for the display line to settle */ 1086 /* Wait for the display line to settle */
@@ -1234,7 +1189,7 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1234void assert_panel_unlocked(struct drm_i915_private *dev_priv, 1189void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1235 enum pipe pipe) 1190 enum pipe pipe)
1236{ 1191{
1237 struct drm_device *dev = dev_priv->dev; 1192 struct drm_device *dev = &dev_priv->drm;
1238 i915_reg_t pp_reg; 1193 i915_reg_t pp_reg;
1239 u32 val; 1194 u32 val;
1240 enum pipe panel_pipe = PIPE_A; 1195 enum pipe panel_pipe = PIPE_A;
@@ -1276,7 +1231,7 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1276static void assert_cursor(struct drm_i915_private *dev_priv, 1231static void assert_cursor(struct drm_i915_private *dev_priv,
1277 enum pipe pipe, bool state) 1232 enum pipe pipe, bool state)
1278{ 1233{
1279 struct drm_device *dev = dev_priv->dev; 1234 struct drm_device *dev = &dev_priv->drm;
1280 bool cur_state; 1235 bool cur_state;
1281 1236
1282 if (IS_845G(dev) || IS_I865G(dev)) 1237 if (IS_845G(dev) || IS_I865G(dev))
@@ -1338,7 +1293,7 @@ static void assert_plane(struct drm_i915_private *dev_priv,
1338static void assert_planes_disabled(struct drm_i915_private *dev_priv, 1293static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1339 enum pipe pipe) 1294 enum pipe pipe)
1340{ 1295{
1341 struct drm_device *dev = dev_priv->dev; 1296 struct drm_device *dev = &dev_priv->drm;
1342 int i; 1297 int i;
1343 1298
1344 /* Primary planes are fixed to pipes on gen4+ */ 1299 /* Primary planes are fixed to pipes on gen4+ */
@@ -1364,7 +1319,7 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1364static void assert_sprites_disabled(struct drm_i915_private *dev_priv, 1319static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1365 enum pipe pipe) 1320 enum pipe pipe)
1366{ 1321{
1367 struct drm_device *dev = dev_priv->dev; 1322 struct drm_device *dev = &dev_priv->drm;
1368 int sprite; 1323 int sprite;
1369 1324
1370 if (INTEL_INFO(dev)->gen >= 9) { 1325 if (INTEL_INFO(dev)->gen >= 9) {
@@ -1544,7 +1499,11 @@ static void _vlv_enable_pll(struct intel_crtc *crtc,
1544 POSTING_READ(DPLL(pipe)); 1499 POSTING_READ(DPLL(pipe));
1545 udelay(150); 1500 udelay(150);
1546 1501
1547 if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1)) 1502 if (intel_wait_for_register(dev_priv,
1503 DPLL(pipe),
1504 DPLL_LOCK_VLV,
1505 DPLL_LOCK_VLV,
1506 1))
1548 DRM_ERROR("DPLL %d failed to lock\n", pipe); 1507 DRM_ERROR("DPLL %d failed to lock\n", pipe);
1549} 1508}
1550 1509
@@ -1593,7 +1552,9 @@ static void _chv_enable_pll(struct intel_crtc *crtc,
1593 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll); 1552 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1594 1553
1595 /* Check PLL is locked */ 1554 /* Check PLL is locked */
1596 if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1)) 1555 if (intel_wait_for_register(dev_priv,
1556 DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV,
1557 1))
1597 DRM_ERROR("PLL %d failed to lock\n", pipe); 1558 DRM_ERROR("PLL %d failed to lock\n", pipe);
1598} 1559}
1599 1560
@@ -1639,9 +1600,10 @@ static int intel_num_dvo_pipes(struct drm_device *dev)
1639 struct intel_crtc *crtc; 1600 struct intel_crtc *crtc;
1640 int count = 0; 1601 int count = 0;
1641 1602
1642 for_each_intel_crtc(dev, crtc) 1603 for_each_intel_crtc(dev, crtc) {
1643 count += crtc->base.state->active && 1604 count += crtc->base.state->active &&
1644 intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO); 1605 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO);
1606 }
1645 1607
1646 return count; 1608 return count;
1647} 1609}
@@ -1649,7 +1611,7 @@ static int intel_num_dvo_pipes(struct drm_device *dev)
1649static void i9xx_enable_pll(struct intel_crtc *crtc) 1611static void i9xx_enable_pll(struct intel_crtc *crtc)
1650{ 1612{
1651 struct drm_device *dev = crtc->base.dev; 1613 struct drm_device *dev = crtc->base.dev;
1652 struct drm_i915_private *dev_priv = dev->dev_private; 1614 struct drm_i915_private *dev_priv = to_i915(dev);
1653 i915_reg_t reg = DPLL(crtc->pipe); 1615 i915_reg_t reg = DPLL(crtc->pipe);
1654 u32 dpll = crtc->config->dpll_hw_state.dpll; 1616 u32 dpll = crtc->config->dpll_hw_state.dpll;
1655 1617
@@ -1721,12 +1683,12 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
1721static void i9xx_disable_pll(struct intel_crtc *crtc) 1683static void i9xx_disable_pll(struct intel_crtc *crtc)
1722{ 1684{
1723 struct drm_device *dev = crtc->base.dev; 1685 struct drm_device *dev = crtc->base.dev;
1724 struct drm_i915_private *dev_priv = dev->dev_private; 1686 struct drm_i915_private *dev_priv = to_i915(dev);
1725 enum pipe pipe = crtc->pipe; 1687 enum pipe pipe = crtc->pipe;
1726 1688
1727 /* Disable DVO 2x clock on both PLLs if necessary */ 1689 /* Disable DVO 2x clock on both PLLs if necessary */
1728 if (IS_I830(dev) && 1690 if (IS_I830(dev) &&
1729 intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) && 1691 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO) &&
1730 !intel_num_dvo_pipes(dev)) { 1692 !intel_num_dvo_pipes(dev)) {
1731 I915_WRITE(DPLL(PIPE_B), 1693 I915_WRITE(DPLL(PIPE_B),
1732 I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE); 1694 I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
@@ -1813,7 +1775,9 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1813 BUG(); 1775 BUG();
1814 } 1776 }
1815 1777
1816 if (wait_for((I915_READ(dpll_reg) & port_mask) == expected_mask, 1000)) 1778 if (intel_wait_for_register(dev_priv,
1779 dpll_reg, port_mask, expected_mask,
1780 1000))
1817 WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n", 1781 WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
1818 port_name(dport->port), I915_READ(dpll_reg) & port_mask, expected_mask); 1782 port_name(dport->port), I915_READ(dpll_reg) & port_mask, expected_mask);
1819} 1783}
@@ -1821,7 +1785,7 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1821static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, 1785static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1822 enum pipe pipe) 1786 enum pipe pipe)
1823{ 1787{
1824 struct drm_device *dev = dev_priv->dev; 1788 struct drm_device *dev = &dev_priv->drm;
1825 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 1789 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1826 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1790 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1827 i915_reg_t reg; 1791 i915_reg_t reg;
@@ -1854,7 +1818,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1854 * here for both 8bpc and 12bpc. 1818 * here for both 8bpc and 12bpc.
1855 */ 1819 */
1856 val &= ~PIPECONF_BPC_MASK; 1820 val &= ~PIPECONF_BPC_MASK;
1857 if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_HDMI)) 1821 if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_HDMI))
1858 val |= PIPECONF_8BPC; 1822 val |= PIPECONF_8BPC;
1859 else 1823 else
1860 val |= pipeconf_val & PIPECONF_BPC_MASK; 1824 val |= pipeconf_val & PIPECONF_BPC_MASK;
@@ -1863,7 +1827,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1863 val &= ~TRANS_INTERLACE_MASK; 1827 val &= ~TRANS_INTERLACE_MASK;
1864 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) 1828 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1865 if (HAS_PCH_IBX(dev_priv) && 1829 if (HAS_PCH_IBX(dev_priv) &&
1866 intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO)) 1830 intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
1867 val |= TRANS_LEGACY_INTERLACED_ILK; 1831 val |= TRANS_LEGACY_INTERLACED_ILK;
1868 else 1832 else
1869 val |= TRANS_INTERLACED; 1833 val |= TRANS_INTERLACED;
@@ -1871,7 +1835,9 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1871 val |= TRANS_PROGRESSIVE; 1835 val |= TRANS_PROGRESSIVE;
1872 1836
1873 I915_WRITE(reg, val | TRANS_ENABLE); 1837 I915_WRITE(reg, val | TRANS_ENABLE);
1874 if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) 1838 if (intel_wait_for_register(dev_priv,
1839 reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE,
1840 100))
1875 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe)); 1841 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
1876} 1842}
1877 1843
@@ -1899,14 +1865,18 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1899 val |= TRANS_PROGRESSIVE; 1865 val |= TRANS_PROGRESSIVE;
1900 1866
1901 I915_WRITE(LPT_TRANSCONF, val); 1867 I915_WRITE(LPT_TRANSCONF, val);
1902 if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100)) 1868 if (intel_wait_for_register(dev_priv,
1869 LPT_TRANSCONF,
1870 TRANS_STATE_ENABLE,
1871 TRANS_STATE_ENABLE,
1872 100))
1903 DRM_ERROR("Failed to enable PCH transcoder\n"); 1873 DRM_ERROR("Failed to enable PCH transcoder\n");
1904} 1874}
1905 1875
1906static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv, 1876static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1907 enum pipe pipe) 1877 enum pipe pipe)
1908{ 1878{
1909 struct drm_device *dev = dev_priv->dev; 1879 struct drm_device *dev = &dev_priv->drm;
1910 i915_reg_t reg; 1880 i915_reg_t reg;
1911 uint32_t val; 1881 uint32_t val;
1912 1882
@@ -1922,7 +1892,9 @@ static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1922 val &= ~TRANS_ENABLE; 1892 val &= ~TRANS_ENABLE;
1923 I915_WRITE(reg, val); 1893 I915_WRITE(reg, val);
1924 /* wait for PCH transcoder off, transcoder state */ 1894 /* wait for PCH transcoder off, transcoder state */
1925 if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50)) 1895 if (intel_wait_for_register(dev_priv,
1896 reg, TRANS_STATE_ENABLE, 0,
1897 50))
1926 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe)); 1898 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
1927 1899
1928 if (HAS_PCH_CPT(dev)) { 1900 if (HAS_PCH_CPT(dev)) {
@@ -1942,7 +1914,9 @@ static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1942 val &= ~TRANS_ENABLE; 1914 val &= ~TRANS_ENABLE;
1943 I915_WRITE(LPT_TRANSCONF, val); 1915 I915_WRITE(LPT_TRANSCONF, val);
1944 /* wait for PCH transcoder off, transcoder state */ 1916 /* wait for PCH transcoder off, transcoder state */
1945 if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50)) 1917 if (intel_wait_for_register(dev_priv,
1918 LPT_TRANSCONF, TRANS_STATE_ENABLE, 0,
1919 50))
1946 DRM_ERROR("Failed to disable PCH transcoder\n"); 1920 DRM_ERROR("Failed to disable PCH transcoder\n");
1947 1921
1948 /* Workaround: clear timing override bit. */ 1922 /* Workaround: clear timing override bit. */
@@ -1961,7 +1935,7 @@ static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1961static void intel_enable_pipe(struct intel_crtc *crtc) 1935static void intel_enable_pipe(struct intel_crtc *crtc)
1962{ 1936{
1963 struct drm_device *dev = crtc->base.dev; 1937 struct drm_device *dev = crtc->base.dev;
1964 struct drm_i915_private *dev_priv = dev->dev_private; 1938 struct drm_i915_private *dev_priv = to_i915(dev);
1965 enum pipe pipe = crtc->pipe; 1939 enum pipe pipe = crtc->pipe;
1966 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 1940 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
1967 enum pipe pch_transcoder; 1941 enum pipe pch_transcoder;
@@ -1985,7 +1959,7 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
1985 * need the check. 1959 * need the check.
1986 */ 1960 */
1987 if (HAS_GMCH_DISPLAY(dev_priv)) 1961 if (HAS_GMCH_DISPLAY(dev_priv))
1988 if (crtc->config->has_dsi_encoder) 1962 if (intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DSI))
1989 assert_dsi_pll_enabled(dev_priv); 1963 assert_dsi_pll_enabled(dev_priv);
1990 else 1964 else
1991 assert_pll_enabled(dev_priv, pipe); 1965 assert_pll_enabled(dev_priv, pipe);
@@ -2034,7 +2008,7 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
2034 */ 2008 */
2035static void intel_disable_pipe(struct intel_crtc *crtc) 2009static void intel_disable_pipe(struct intel_crtc *crtc)
2036{ 2010{
2037 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 2011 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2038 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 2012 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
2039 enum pipe pipe = crtc->pipe; 2013 enum pipe pipe = crtc->pipe;
2040 i915_reg_t reg; 2014 i915_reg_t reg;
@@ -2072,15 +2046,6 @@ static void intel_disable_pipe(struct intel_crtc *crtc)
2072 intel_wait_for_pipe_off(crtc); 2046 intel_wait_for_pipe_off(crtc);
2073} 2047}
2074 2048
2075static bool need_vtd_wa(struct drm_device *dev)
2076{
2077#ifdef CONFIG_INTEL_IOMMU
2078 if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
2079 return true;
2080#endif
2081 return false;
2082}
2083
2084static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv) 2049static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
2085{ 2050{
2086 return IS_GEN2(dev_priv) ? 2048 : 4096; 2051 return IS_GEN2(dev_priv) ? 2048 : 4096;
@@ -2245,7 +2210,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
2245 unsigned int rotation) 2210 unsigned int rotation)
2246{ 2211{
2247 struct drm_device *dev = fb->dev; 2212 struct drm_device *dev = fb->dev;
2248 struct drm_i915_private *dev_priv = dev->dev_private; 2213 struct drm_i915_private *dev_priv = to_i915(dev);
2249 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2214 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2250 struct i915_ggtt_view view; 2215 struct i915_ggtt_view view;
2251 u32 alignment; 2216 u32 alignment;
@@ -2262,7 +2227,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
2262 * we should always have valid PTE following the scanout preventing 2227 * we should always have valid PTE following the scanout preventing
2263 * the VT-d warning. 2228 * the VT-d warning.
2264 */ 2229 */
2265 if (need_vtd_wa(dev) && alignment < 256 * 1024) 2230 if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
2266 alignment = 256 * 1024; 2231 alignment = 256 * 1024;
2267 2232
2268 /* 2233 /*
@@ -2547,7 +2512,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2547 struct intel_initial_plane_config *plane_config) 2512 struct intel_initial_plane_config *plane_config)
2548{ 2513{
2549 struct drm_device *dev = intel_crtc->base.dev; 2514 struct drm_device *dev = intel_crtc->base.dev;
2550 struct drm_i915_private *dev_priv = dev->dev_private; 2515 struct drm_i915_private *dev_priv = to_i915(dev);
2551 struct drm_crtc *c; 2516 struct drm_crtc *c;
2552 struct intel_crtc *i; 2517 struct intel_crtc *i;
2553 struct drm_i915_gem_object *obj; 2518 struct drm_i915_gem_object *obj;
@@ -2643,7 +2608,7 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
2643 const struct intel_plane_state *plane_state) 2608 const struct intel_plane_state *plane_state)
2644{ 2609{
2645 struct drm_device *dev = primary->dev; 2610 struct drm_device *dev = primary->dev;
2646 struct drm_i915_private *dev_priv = dev->dev_private; 2611 struct drm_i915_private *dev_priv = to_i915(dev);
2647 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); 2612 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
2648 struct drm_framebuffer *fb = plane_state->base.fb; 2613 struct drm_framebuffer *fb = plane_state->base.fb;
2649 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2614 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
@@ -2756,7 +2721,7 @@ static void i9xx_disable_primary_plane(struct drm_plane *primary,
2756 struct drm_crtc *crtc) 2721 struct drm_crtc *crtc)
2757{ 2722{
2758 struct drm_device *dev = crtc->dev; 2723 struct drm_device *dev = crtc->dev;
2759 struct drm_i915_private *dev_priv = dev->dev_private; 2724 struct drm_i915_private *dev_priv = to_i915(dev);
2760 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2725 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2761 int plane = intel_crtc->plane; 2726 int plane = intel_crtc->plane;
2762 2727
@@ -2773,7 +2738,7 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
2773 const struct intel_plane_state *plane_state) 2738 const struct intel_plane_state *plane_state)
2774{ 2739{
2775 struct drm_device *dev = primary->dev; 2740 struct drm_device *dev = primary->dev;
2776 struct drm_i915_private *dev_priv = dev->dev_private; 2741 struct drm_i915_private *dev_priv = to_i915(dev);
2777 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); 2742 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
2778 struct drm_framebuffer *fb = plane_state->base.fb; 2743 struct drm_framebuffer *fb = plane_state->base.fb;
2779 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2744 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
@@ -2901,7 +2866,7 @@ u32 intel_plane_obj_offset(struct intel_plane *intel_plane,
2901static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id) 2866static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
2902{ 2867{
2903 struct drm_device *dev = intel_crtc->base.dev; 2868 struct drm_device *dev = intel_crtc->base.dev;
2904 struct drm_i915_private *dev_priv = dev->dev_private; 2869 struct drm_i915_private *dev_priv = to_i915(dev);
2905 2870
2906 I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0); 2871 I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
2907 I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0); 2872 I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
@@ -3011,7 +2976,7 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
3011 const struct intel_plane_state *plane_state) 2976 const struct intel_plane_state *plane_state)
3012{ 2977{
3013 struct drm_device *dev = plane->dev; 2978 struct drm_device *dev = plane->dev;
3014 struct drm_i915_private *dev_priv = dev->dev_private; 2979 struct drm_i915_private *dev_priv = to_i915(dev);
3015 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); 2980 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
3016 struct drm_framebuffer *fb = plane_state->base.fb; 2981 struct drm_framebuffer *fb = plane_state->base.fb;
3017 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2982 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
@@ -3095,7 +3060,7 @@ static void skylake_disable_primary_plane(struct drm_plane *primary,
3095 struct drm_crtc *crtc) 3060 struct drm_crtc *crtc)
3096{ 3061{
3097 struct drm_device *dev = crtc->dev; 3062 struct drm_device *dev = crtc->dev;
3098 struct drm_i915_private *dev_priv = dev->dev_private; 3063 struct drm_i915_private *dev_priv = to_i915(dev);
3099 int pipe = to_intel_crtc(crtc)->pipe; 3064 int pipe = to_intel_crtc(crtc)->pipe;
3100 3065
3101 I915_WRITE(PLANE_CTL(pipe, 0), 0); 3066 I915_WRITE(PLANE_CTL(pipe, 0), 0);
@@ -3118,7 +3083,7 @@ static void intel_complete_page_flips(struct drm_i915_private *dev_priv)
3118{ 3083{
3119 struct intel_crtc *crtc; 3084 struct intel_crtc *crtc;
3120 3085
3121 for_each_intel_crtc(dev_priv->dev, crtc) 3086 for_each_intel_crtc(&dev_priv->drm, crtc)
3122 intel_finish_page_flip_cs(dev_priv, crtc->pipe); 3087 intel_finish_page_flip_cs(dev_priv, crtc->pipe);
3123} 3088}
3124 3089
@@ -3152,12 +3117,12 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv)
3152 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 3117 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
3153 return; 3118 return;
3154 3119
3155 drm_modeset_lock_all(dev_priv->dev); 3120 drm_modeset_lock_all(&dev_priv->drm);
3156 /* 3121 /*
3157 * Disabling the crtcs gracefully seems nicer. Also the 3122 * Disabling the crtcs gracefully seems nicer. Also the
3158 * g33 docs say we should at least disable all the planes. 3123 * g33 docs say we should at least disable all the planes.
3159 */ 3124 */
3160 intel_display_suspend(dev_priv->dev); 3125 intel_display_suspend(&dev_priv->drm);
3161} 3126}
3162 3127
3163void intel_finish_reset(struct drm_i915_private *dev_priv) 3128void intel_finish_reset(struct drm_i915_private *dev_priv)
@@ -3184,7 +3149,7 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
3184 * FIXME: Atomic will make this obsolete since we won't schedule 3149 * FIXME: Atomic will make this obsolete since we won't schedule
3185 * CS-based flips (which might get lost in gpu resets) any more. 3150 * CS-based flips (which might get lost in gpu resets) any more.
3186 */ 3151 */
3187 intel_update_primary_planes(dev_priv->dev); 3152 intel_update_primary_planes(&dev_priv->drm);
3188 return; 3153 return;
3189 } 3154 }
3190 3155
@@ -3195,18 +3160,18 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
3195 intel_runtime_pm_disable_interrupts(dev_priv); 3160 intel_runtime_pm_disable_interrupts(dev_priv);
3196 intel_runtime_pm_enable_interrupts(dev_priv); 3161 intel_runtime_pm_enable_interrupts(dev_priv);
3197 3162
3198 intel_modeset_init_hw(dev_priv->dev); 3163 intel_modeset_init_hw(&dev_priv->drm);
3199 3164
3200 spin_lock_irq(&dev_priv->irq_lock); 3165 spin_lock_irq(&dev_priv->irq_lock);
3201 if (dev_priv->display.hpd_irq_setup) 3166 if (dev_priv->display.hpd_irq_setup)
3202 dev_priv->display.hpd_irq_setup(dev_priv); 3167 dev_priv->display.hpd_irq_setup(dev_priv);
3203 spin_unlock_irq(&dev_priv->irq_lock); 3168 spin_unlock_irq(&dev_priv->irq_lock);
3204 3169
3205 intel_display_resume(dev_priv->dev); 3170 intel_display_resume(&dev_priv->drm);
3206 3171
3207 intel_hpd_init(dev_priv); 3172 intel_hpd_init(dev_priv);
3208 3173
3209 drm_modeset_unlock_all(dev_priv->dev); 3174 drm_modeset_unlock_all(&dev_priv->drm);
3210} 3175}
3211 3176
3212static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) 3177static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
@@ -3231,7 +3196,7 @@ static void intel_update_pipe_config(struct intel_crtc *crtc,
3231 struct intel_crtc_state *old_crtc_state) 3196 struct intel_crtc_state *old_crtc_state)
3232{ 3197{
3233 struct drm_device *dev = crtc->base.dev; 3198 struct drm_device *dev = crtc->base.dev;
3234 struct drm_i915_private *dev_priv = dev->dev_private; 3199 struct drm_i915_private *dev_priv = to_i915(dev);
3235 struct intel_crtc_state *pipe_config = 3200 struct intel_crtc_state *pipe_config =
3236 to_intel_crtc_state(crtc->base.state); 3201 to_intel_crtc_state(crtc->base.state);
3237 3202
@@ -3272,7 +3237,7 @@ static void intel_update_pipe_config(struct intel_crtc *crtc,
3272static void intel_fdi_normal_train(struct drm_crtc *crtc) 3237static void intel_fdi_normal_train(struct drm_crtc *crtc)
3273{ 3238{
3274 struct drm_device *dev = crtc->dev; 3239 struct drm_device *dev = crtc->dev;
3275 struct drm_i915_private *dev_priv = dev->dev_private; 3240 struct drm_i915_private *dev_priv = to_i915(dev);
3276 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3241 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3277 int pipe = intel_crtc->pipe; 3242 int pipe = intel_crtc->pipe;
3278 i915_reg_t reg; 3243 i915_reg_t reg;
@@ -3315,7 +3280,7 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
3315static void ironlake_fdi_link_train(struct drm_crtc *crtc) 3280static void ironlake_fdi_link_train(struct drm_crtc *crtc)
3316{ 3281{
3317 struct drm_device *dev = crtc->dev; 3282 struct drm_device *dev = crtc->dev;
3318 struct drm_i915_private *dev_priv = dev->dev_private; 3283 struct drm_i915_private *dev_priv = to_i915(dev);
3319 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3284 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3320 int pipe = intel_crtc->pipe; 3285 int pipe = intel_crtc->pipe;
3321 i915_reg_t reg; 3286 i915_reg_t reg;
@@ -3416,7 +3381,7 @@ static const int snb_b_fdi_train_param[] = {
3416static void gen6_fdi_link_train(struct drm_crtc *crtc) 3381static void gen6_fdi_link_train(struct drm_crtc *crtc)
3417{ 3382{
3418 struct drm_device *dev = crtc->dev; 3383 struct drm_device *dev = crtc->dev;
3419 struct drm_i915_private *dev_priv = dev->dev_private; 3384 struct drm_i915_private *dev_priv = to_i915(dev);
3420 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3385 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3421 int pipe = intel_crtc->pipe; 3386 int pipe = intel_crtc->pipe;
3422 i915_reg_t reg; 3387 i915_reg_t reg;
@@ -3549,7 +3514,7 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
3549static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) 3514static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
3550{ 3515{
3551 struct drm_device *dev = crtc->dev; 3516 struct drm_device *dev = crtc->dev;
3552 struct drm_i915_private *dev_priv = dev->dev_private; 3517 struct drm_i915_private *dev_priv = to_i915(dev);
3553 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3518 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3554 int pipe = intel_crtc->pipe; 3519 int pipe = intel_crtc->pipe;
3555 i915_reg_t reg; 3520 i915_reg_t reg;
@@ -3668,7 +3633,7 @@ train_done:
3668static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc) 3633static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
3669{ 3634{
3670 struct drm_device *dev = intel_crtc->base.dev; 3635 struct drm_device *dev = intel_crtc->base.dev;
3671 struct drm_i915_private *dev_priv = dev->dev_private; 3636 struct drm_i915_private *dev_priv = to_i915(dev);
3672 int pipe = intel_crtc->pipe; 3637 int pipe = intel_crtc->pipe;
3673 i915_reg_t reg; 3638 i915_reg_t reg;
3674 u32 temp; 3639 u32 temp;
@@ -3705,7 +3670,7 @@ static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
3705static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc) 3670static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
3706{ 3671{
3707 struct drm_device *dev = intel_crtc->base.dev; 3672 struct drm_device *dev = intel_crtc->base.dev;
3708 struct drm_i915_private *dev_priv = dev->dev_private; 3673 struct drm_i915_private *dev_priv = to_i915(dev);
3709 int pipe = intel_crtc->pipe; 3674 int pipe = intel_crtc->pipe;
3710 i915_reg_t reg; 3675 i915_reg_t reg;
3711 u32 temp; 3676 u32 temp;
@@ -3735,7 +3700,7 @@ static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
3735static void ironlake_fdi_disable(struct drm_crtc *crtc) 3700static void ironlake_fdi_disable(struct drm_crtc *crtc)
3736{ 3701{
3737 struct drm_device *dev = crtc->dev; 3702 struct drm_device *dev = crtc->dev;
3738 struct drm_i915_private *dev_priv = dev->dev_private; 3703 struct drm_i915_private *dev_priv = to_i915(dev);
3739 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3704 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3740 int pipe = intel_crtc->pipe; 3705 int pipe = intel_crtc->pipe;
3741 i915_reg_t reg; 3706 i915_reg_t reg;
@@ -3831,7 +3796,7 @@ static void page_flip_completed(struct intel_crtc *intel_crtc)
3831static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) 3796static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
3832{ 3797{
3833 struct drm_device *dev = crtc->dev; 3798 struct drm_device *dev = crtc->dev;
3834 struct drm_i915_private *dev_priv = dev->dev_private; 3799 struct drm_i915_private *dev_priv = to_i915(dev);
3835 long ret; 3800 long ret;
3836 3801
3837 WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue)); 3802 WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
@@ -3994,7 +3959,7 @@ static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
3994 enum pipe pch_transcoder) 3959 enum pipe pch_transcoder)
3995{ 3960{
3996 struct drm_device *dev = crtc->base.dev; 3961 struct drm_device *dev = crtc->base.dev;
3997 struct drm_i915_private *dev_priv = dev->dev_private; 3962 struct drm_i915_private *dev_priv = to_i915(dev);
3998 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 3963 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
3999 3964
4000 I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder), 3965 I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
@@ -4016,7 +3981,7 @@ static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
4016 3981
4017static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable) 3982static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
4018{ 3983{
4019 struct drm_i915_private *dev_priv = dev->dev_private; 3984 struct drm_i915_private *dev_priv = to_i915(dev);
4020 uint32_t temp; 3985 uint32_t temp;
4021 3986
4022 temp = I915_READ(SOUTH_CHICKEN1); 3987 temp = I915_READ(SOUTH_CHICKEN1);
@@ -4066,7 +4031,7 @@ intel_trans_dp_port_sel(struct drm_crtc *crtc)
4066 struct intel_encoder *encoder; 4031 struct intel_encoder *encoder;
4067 4032
4068 for_each_encoder_on_crtc(dev, crtc, encoder) { 4033 for_each_encoder_on_crtc(dev, crtc, encoder) {
4069 if (encoder->type == INTEL_OUTPUT_DISPLAYPORT || 4034 if (encoder->type == INTEL_OUTPUT_DP ||
4070 encoder->type == INTEL_OUTPUT_EDP) 4035 encoder->type == INTEL_OUTPUT_EDP)
4071 return enc_to_dig_port(&encoder->base)->port; 4036 return enc_to_dig_port(&encoder->base)->port;
4072 } 4037 }
@@ -4085,7 +4050,7 @@ intel_trans_dp_port_sel(struct drm_crtc *crtc)
4085static void ironlake_pch_enable(struct drm_crtc *crtc) 4050static void ironlake_pch_enable(struct drm_crtc *crtc)
4086{ 4051{
4087 struct drm_device *dev = crtc->dev; 4052 struct drm_device *dev = crtc->dev;
4088 struct drm_i915_private *dev_priv = dev->dev_private; 4053 struct drm_i915_private *dev_priv = to_i915(dev);
4089 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4054 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4090 int pipe = intel_crtc->pipe; 4055 int pipe = intel_crtc->pipe;
4091 u32 temp; 4056 u32 temp;
@@ -4135,7 +4100,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
4135 intel_fdi_normal_train(crtc); 4100 intel_fdi_normal_train(crtc);
4136 4101
4137 /* For PCH DP, enable TRANS_DP_CTL */ 4102 /* For PCH DP, enable TRANS_DP_CTL */
4138 if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) { 4103 if (HAS_PCH_CPT(dev) && intel_crtc_has_dp_encoder(intel_crtc->config)) {
4139 const struct drm_display_mode *adjusted_mode = 4104 const struct drm_display_mode *adjusted_mode =
4140 &intel_crtc->config->base.adjusted_mode; 4105 &intel_crtc->config->base.adjusted_mode;
4141 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5; 4106 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
@@ -4175,7 +4140,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
4175static void lpt_pch_enable(struct drm_crtc *crtc) 4140static void lpt_pch_enable(struct drm_crtc *crtc)
4176{ 4141{
4177 struct drm_device *dev = crtc->dev; 4142 struct drm_device *dev = crtc->dev;
4178 struct drm_i915_private *dev_priv = dev->dev_private; 4143 struct drm_i915_private *dev_priv = to_i915(dev);
4179 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4144 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4180 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 4145 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
4181 4146
@@ -4191,7 +4156,7 @@ static void lpt_pch_enable(struct drm_crtc *crtc)
4191 4156
4192static void cpt_verify_modeset(struct drm_device *dev, int pipe) 4157static void cpt_verify_modeset(struct drm_device *dev, int pipe)
4193{ 4158{
4194 struct drm_i915_private *dev_priv = dev->dev_private; 4159 struct drm_i915_private *dev_priv = to_i915(dev);
4195 i915_reg_t dslreg = PIPEDSL(pipe); 4160 i915_reg_t dslreg = PIPEDSL(pipe);
4196 u32 temp; 4161 u32 temp;
4197 4162
@@ -4369,7 +4334,7 @@ static void skylake_scaler_disable(struct intel_crtc *crtc)
4369static void skylake_pfit_enable(struct intel_crtc *crtc) 4334static void skylake_pfit_enable(struct intel_crtc *crtc)
4370{ 4335{
4371 struct drm_device *dev = crtc->base.dev; 4336 struct drm_device *dev = crtc->base.dev;
4372 struct drm_i915_private *dev_priv = dev->dev_private; 4337 struct drm_i915_private *dev_priv = to_i915(dev);
4373 int pipe = crtc->pipe; 4338 int pipe = crtc->pipe;
4374 struct intel_crtc_scaler_state *scaler_state = 4339 struct intel_crtc_scaler_state *scaler_state =
4375 &crtc->config->scaler_state; 4340 &crtc->config->scaler_state;
@@ -4397,7 +4362,7 @@ static void skylake_pfit_enable(struct intel_crtc *crtc)
4397static void ironlake_pfit_enable(struct intel_crtc *crtc) 4362static void ironlake_pfit_enable(struct intel_crtc *crtc)
4398{ 4363{
4399 struct drm_device *dev = crtc->base.dev; 4364 struct drm_device *dev = crtc->base.dev;
4400 struct drm_i915_private *dev_priv = dev->dev_private; 4365 struct drm_i915_private *dev_priv = to_i915(dev);
4401 int pipe = crtc->pipe; 4366 int pipe = crtc->pipe;
4402 4367
4403 if (crtc->config->pch_pfit.enabled) { 4368 if (crtc->config->pch_pfit.enabled) {
@@ -4418,7 +4383,7 @@ static void ironlake_pfit_enable(struct intel_crtc *crtc)
4418void hsw_enable_ips(struct intel_crtc *crtc) 4383void hsw_enable_ips(struct intel_crtc *crtc)
4419{ 4384{
4420 struct drm_device *dev = crtc->base.dev; 4385 struct drm_device *dev = crtc->base.dev;
4421 struct drm_i915_private *dev_priv = dev->dev_private; 4386 struct drm_i915_private *dev_priv = to_i915(dev);
4422 4387
4423 if (!crtc->config->ips_enabled) 4388 if (!crtc->config->ips_enabled)
4424 return; 4389 return;
@@ -4446,7 +4411,9 @@ void hsw_enable_ips(struct intel_crtc *crtc)
4446 * and don't wait for vblanks until the end of crtc_enable, then 4411 * and don't wait for vblanks until the end of crtc_enable, then
4447 * the HW state readout code will complain that the expected 4412 * the HW state readout code will complain that the expected
4448 * IPS_CTL value is not the one we read. */ 4413 * IPS_CTL value is not the one we read. */
4449 if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50)) 4414 if (intel_wait_for_register(dev_priv,
4415 IPS_CTL, IPS_ENABLE, IPS_ENABLE,
4416 50))
4450 DRM_ERROR("Timed out waiting for IPS enable\n"); 4417 DRM_ERROR("Timed out waiting for IPS enable\n");
4451 } 4418 }
4452} 4419}
@@ -4454,7 +4421,7 @@ void hsw_enable_ips(struct intel_crtc *crtc)
4454void hsw_disable_ips(struct intel_crtc *crtc) 4421void hsw_disable_ips(struct intel_crtc *crtc)
4455{ 4422{
4456 struct drm_device *dev = crtc->base.dev; 4423 struct drm_device *dev = crtc->base.dev;
4457 struct drm_i915_private *dev_priv = dev->dev_private; 4424 struct drm_i915_private *dev_priv = to_i915(dev);
4458 4425
4459 if (!crtc->config->ips_enabled) 4426 if (!crtc->config->ips_enabled)
4460 return; 4427 return;
@@ -4465,7 +4432,9 @@ void hsw_disable_ips(struct intel_crtc *crtc)
4465 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); 4432 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
4466 mutex_unlock(&dev_priv->rps.hw_lock); 4433 mutex_unlock(&dev_priv->rps.hw_lock);
4467 /* wait for pcode to finish disabling IPS, which may take up to 42ms */ 4434 /* wait for pcode to finish disabling IPS, which may take up to 42ms */
4468 if (wait_for((I915_READ(IPS_CTL) & IPS_ENABLE) == 0, 42)) 4435 if (intel_wait_for_register(dev_priv,
4436 IPS_CTL, IPS_ENABLE, 0,
4437 42))
4469 DRM_ERROR("Timed out waiting for IPS disable\n"); 4438 DRM_ERROR("Timed out waiting for IPS disable\n");
4470 } else { 4439 } else {
4471 I915_WRITE(IPS_CTL, 0); 4440 I915_WRITE(IPS_CTL, 0);
@@ -4480,7 +4449,7 @@ static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
4480{ 4449{
4481 if (intel_crtc->overlay) { 4450 if (intel_crtc->overlay) {
4482 struct drm_device *dev = intel_crtc->base.dev; 4451 struct drm_device *dev = intel_crtc->base.dev;
4483 struct drm_i915_private *dev_priv = dev->dev_private; 4452 struct drm_i915_private *dev_priv = to_i915(dev);
4484 4453
4485 mutex_lock(&dev->struct_mutex); 4454 mutex_lock(&dev->struct_mutex);
4486 dev_priv->mm.interruptible = false; 4455 dev_priv->mm.interruptible = false;
@@ -4508,7 +4477,7 @@ static void
4508intel_post_enable_primary(struct drm_crtc *crtc) 4477intel_post_enable_primary(struct drm_crtc *crtc)
4509{ 4478{
4510 struct drm_device *dev = crtc->dev; 4479 struct drm_device *dev = crtc->dev;
4511 struct drm_i915_private *dev_priv = dev->dev_private; 4480 struct drm_i915_private *dev_priv = to_i915(dev);
4512 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4481 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4513 int pipe = intel_crtc->pipe; 4482 int pipe = intel_crtc->pipe;
4514 4483
@@ -4540,7 +4509,7 @@ static void
4540intel_pre_disable_primary(struct drm_crtc *crtc) 4509intel_pre_disable_primary(struct drm_crtc *crtc)
4541{ 4510{
4542 struct drm_device *dev = crtc->dev; 4511 struct drm_device *dev = crtc->dev;
4543 struct drm_i915_private *dev_priv = dev->dev_private; 4512 struct drm_i915_private *dev_priv = to_i915(dev);
4544 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4513 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4545 int pipe = intel_crtc->pipe; 4514 int pipe = intel_crtc->pipe;
4546 4515
@@ -4567,7 +4536,7 @@ static void
4567intel_pre_disable_primary_noatomic(struct drm_crtc *crtc) 4536intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
4568{ 4537{
4569 struct drm_device *dev = crtc->dev; 4538 struct drm_device *dev = crtc->dev;
4570 struct drm_i915_private *dev_priv = dev->dev_private; 4539 struct drm_i915_private *dev_priv = to_i915(dev);
4571 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4540 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4572 int pipe = intel_crtc->pipe; 4541 int pipe = intel_crtc->pipe;
4573 4542
@@ -4626,7 +4595,7 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
4626{ 4595{
4627 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); 4596 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
4628 struct drm_device *dev = crtc->base.dev; 4597 struct drm_device *dev = crtc->base.dev;
4629 struct drm_i915_private *dev_priv = dev->dev_private; 4598 struct drm_i915_private *dev_priv = to_i915(dev);
4630 struct intel_crtc_state *pipe_config = 4599 struct intel_crtc_state *pipe_config =
4631 to_intel_crtc_state(crtc->base.state); 4600 to_intel_crtc_state(crtc->base.state);
4632 struct drm_atomic_state *old_state = old_crtc_state->base.state; 4601 struct drm_atomic_state *old_state = old_crtc_state->base.state;
@@ -4729,7 +4698,7 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask
4729static void ironlake_crtc_enable(struct drm_crtc *crtc) 4698static void ironlake_crtc_enable(struct drm_crtc *crtc)
4730{ 4699{
4731 struct drm_device *dev = crtc->dev; 4700 struct drm_device *dev = crtc->dev;
4732 struct drm_i915_private *dev_priv = dev->dev_private; 4701 struct drm_i915_private *dev_priv = to_i915(dev);
4733 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4702 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4734 struct intel_encoder *encoder; 4703 struct intel_encoder *encoder;
4735 int pipe = intel_crtc->pipe; 4704 int pipe = intel_crtc->pipe;
@@ -4757,7 +4726,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
4757 if (intel_crtc->config->has_pch_encoder) 4726 if (intel_crtc->config->has_pch_encoder)
4758 intel_prepare_shared_dpll(intel_crtc); 4727 intel_prepare_shared_dpll(intel_crtc);
4759 4728
4760 if (intel_crtc->config->has_dp_encoder) 4729 if (intel_crtc_has_dp_encoder(intel_crtc->config))
4761 intel_dp_set_m_n(intel_crtc, M1_N1); 4730 intel_dp_set_m_n(intel_crtc, M1_N1);
4762 4731
4763 intel_set_pipe_timings(intel_crtc); 4732 intel_set_pipe_timings(intel_crtc);
@@ -4826,7 +4795,7 @@ static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
4826static void haswell_crtc_enable(struct drm_crtc *crtc) 4795static void haswell_crtc_enable(struct drm_crtc *crtc)
4827{ 4796{
4828 struct drm_device *dev = crtc->dev; 4797 struct drm_device *dev = crtc->dev;
4829 struct drm_i915_private *dev_priv = dev->dev_private; 4798 struct drm_i915_private *dev_priv = to_i915(dev);
4830 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4799 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4831 struct intel_encoder *encoder; 4800 struct intel_encoder *encoder;
4832 int pipe = intel_crtc->pipe, hsw_workaround_pipe; 4801 int pipe = intel_crtc->pipe, hsw_workaround_pipe;
@@ -4848,10 +4817,10 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
4848 if (intel_crtc->config->shared_dpll) 4817 if (intel_crtc->config->shared_dpll)
4849 intel_enable_shared_dpll(intel_crtc); 4818 intel_enable_shared_dpll(intel_crtc);
4850 4819
4851 if (intel_crtc->config->has_dp_encoder) 4820 if (intel_crtc_has_dp_encoder(intel_crtc->config))
4852 intel_dp_set_m_n(intel_crtc, M1_N1); 4821 intel_dp_set_m_n(intel_crtc, M1_N1);
4853 4822
4854 if (!intel_crtc->config->has_dsi_encoder) 4823 if (!transcoder_is_dsi(cpu_transcoder))
4855 intel_set_pipe_timings(intel_crtc); 4824 intel_set_pipe_timings(intel_crtc);
4856 4825
4857 intel_set_pipe_src_size(intel_crtc); 4826 intel_set_pipe_src_size(intel_crtc);
@@ -4867,7 +4836,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
4867 &intel_crtc->config->fdi_m_n, NULL); 4836 &intel_crtc->config->fdi_m_n, NULL);
4868 } 4837 }
4869 4838
4870 if (!intel_crtc->config->has_dsi_encoder) 4839 if (!transcoder_is_dsi(cpu_transcoder))
4871 haswell_set_pipeconf(crtc); 4840 haswell_set_pipeconf(crtc);
4872 4841
4873 haswell_set_pipemisc(crtc); 4842 haswell_set_pipemisc(crtc);
@@ -4889,7 +4858,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
4889 if (intel_crtc->config->has_pch_encoder) 4858 if (intel_crtc->config->has_pch_encoder)
4890 dev_priv->display.fdi_link_train(crtc); 4859 dev_priv->display.fdi_link_train(crtc);
4891 4860
4892 if (!intel_crtc->config->has_dsi_encoder) 4861 if (!transcoder_is_dsi(cpu_transcoder))
4893 intel_ddi_enable_pipe_clock(intel_crtc); 4862 intel_ddi_enable_pipe_clock(intel_crtc);
4894 4863
4895 if (INTEL_INFO(dev)->gen >= 9) 4864 if (INTEL_INFO(dev)->gen >= 9)
@@ -4904,7 +4873,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
4904 intel_color_load_luts(&pipe_config->base); 4873 intel_color_load_luts(&pipe_config->base);
4905 4874
4906 intel_ddi_set_pipe_settings(crtc); 4875 intel_ddi_set_pipe_settings(crtc);
4907 if (!intel_crtc->config->has_dsi_encoder) 4876 if (!transcoder_is_dsi(cpu_transcoder))
4908 intel_ddi_enable_transcoder_func(crtc); 4877 intel_ddi_enable_transcoder_func(crtc);
4909 4878
4910 if (dev_priv->display.initial_watermarks != NULL) 4879 if (dev_priv->display.initial_watermarks != NULL)
@@ -4913,7 +4882,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
4913 intel_update_watermarks(crtc); 4882 intel_update_watermarks(crtc);
4914 4883
4915 /* XXX: Do the pipe assertions at the right place for BXT DSI. */ 4884 /* XXX: Do the pipe assertions at the right place for BXT DSI. */
4916 if (!intel_crtc->config->has_dsi_encoder) 4885 if (!transcoder_is_dsi(cpu_transcoder))
4917 intel_enable_pipe(intel_crtc); 4886 intel_enable_pipe(intel_crtc);
4918 4887
4919 if (intel_crtc->config->has_pch_encoder) 4888 if (intel_crtc->config->has_pch_encoder)
@@ -4950,7 +4919,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
4950static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force) 4919static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
4951{ 4920{
4952 struct drm_device *dev = crtc->base.dev; 4921 struct drm_device *dev = crtc->base.dev;
4953 struct drm_i915_private *dev_priv = dev->dev_private; 4922 struct drm_i915_private *dev_priv = to_i915(dev);
4954 int pipe = crtc->pipe; 4923 int pipe = crtc->pipe;
4955 4924
4956 /* To avoid upsetting the power well on haswell only disable the pfit if 4925 /* To avoid upsetting the power well on haswell only disable the pfit if
@@ -4965,7 +4934,7 @@ static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
4965static void ironlake_crtc_disable(struct drm_crtc *crtc) 4934static void ironlake_crtc_disable(struct drm_crtc *crtc)
4966{ 4935{
4967 struct drm_device *dev = crtc->dev; 4936 struct drm_device *dev = crtc->dev;
4968 struct drm_i915_private *dev_priv = dev->dev_private; 4937 struct drm_i915_private *dev_priv = to_i915(dev);
4969 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4938 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4970 struct intel_encoder *encoder; 4939 struct intel_encoder *encoder;
4971 int pipe = intel_crtc->pipe; 4940 int pipe = intel_crtc->pipe;
@@ -5028,7 +4997,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
5028static void haswell_crtc_disable(struct drm_crtc *crtc) 4997static void haswell_crtc_disable(struct drm_crtc *crtc)
5029{ 4998{
5030 struct drm_device *dev = crtc->dev; 4999 struct drm_device *dev = crtc->dev;
5031 struct drm_i915_private *dev_priv = dev->dev_private; 5000 struct drm_i915_private *dev_priv = to_i915(dev);
5032 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5001 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5033 struct intel_encoder *encoder; 5002 struct intel_encoder *encoder;
5034 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 5003 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
@@ -5046,13 +5015,13 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
5046 assert_vblank_disabled(crtc); 5015 assert_vblank_disabled(crtc);
5047 5016
5048 /* XXX: Do the pipe assertions at the right place for BXT DSI. */ 5017 /* XXX: Do the pipe assertions at the right place for BXT DSI. */
5049 if (!intel_crtc->config->has_dsi_encoder) 5018 if (!transcoder_is_dsi(cpu_transcoder))
5050 intel_disable_pipe(intel_crtc); 5019 intel_disable_pipe(intel_crtc);
5051 5020
5052 if (intel_crtc->config->dp_encoder_is_mst) 5021 if (intel_crtc->config->dp_encoder_is_mst)
5053 intel_ddi_set_vc_payload_alloc(crtc, false); 5022 intel_ddi_set_vc_payload_alloc(crtc, false);
5054 5023
5055 if (!intel_crtc->config->has_dsi_encoder) 5024 if (!transcoder_is_dsi(cpu_transcoder))
5056 intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder); 5025 intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
5057 5026
5058 if (INTEL_INFO(dev)->gen >= 9) 5027 if (INTEL_INFO(dev)->gen >= 9)
@@ -5060,7 +5029,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
5060 else 5029 else
5061 ironlake_pfit_disable(intel_crtc, false); 5030 ironlake_pfit_disable(intel_crtc, false);
5062 5031
5063 if (!intel_crtc->config->has_dsi_encoder) 5032 if (!transcoder_is_dsi(cpu_transcoder))
5064 intel_ddi_disable_pipe_clock(intel_crtc); 5033 intel_ddi_disable_pipe_clock(intel_crtc);
5065 5034
5066 for_each_encoder_on_crtc(dev, crtc, encoder) 5035 for_each_encoder_on_crtc(dev, crtc, encoder)
@@ -5080,7 +5049,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
5080static void i9xx_pfit_enable(struct intel_crtc *crtc) 5049static void i9xx_pfit_enable(struct intel_crtc *crtc)
5081{ 5050{
5082 struct drm_device *dev = crtc->base.dev; 5051 struct drm_device *dev = crtc->base.dev;
5083 struct drm_i915_private *dev_priv = dev->dev_private; 5052 struct drm_i915_private *dev_priv = to_i915(dev);
5084 struct intel_crtc_state *pipe_config = crtc->config; 5053 struct intel_crtc_state *pipe_config = crtc->config;
5085 5054
5086 if (!pipe_config->gmch_pfit.control) 5055 if (!pipe_config->gmch_pfit.control)
@@ -5150,7 +5119,7 @@ intel_display_port_power_domain(struct intel_encoder *intel_encoder)
5150 case INTEL_OUTPUT_UNKNOWN: 5119 case INTEL_OUTPUT_UNKNOWN:
5151 /* Only DDI platforms should ever use this output type */ 5120 /* Only DDI platforms should ever use this output type */
5152 WARN_ON_ONCE(!HAS_DDI(dev)); 5121 WARN_ON_ONCE(!HAS_DDI(dev));
5153 case INTEL_OUTPUT_DISPLAYPORT: 5122 case INTEL_OUTPUT_DP:
5154 case INTEL_OUTPUT_HDMI: 5123 case INTEL_OUTPUT_HDMI:
5155 case INTEL_OUTPUT_EDP: 5124 case INTEL_OUTPUT_EDP:
5156 intel_dig_port = enc_to_dig_port(&intel_encoder->base); 5125 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
@@ -5184,7 +5153,7 @@ intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder)
5184 * run the DP detection too. 5153 * run the DP detection too.
5185 */ 5154 */
5186 WARN_ON_ONCE(!HAS_DDI(dev)); 5155 WARN_ON_ONCE(!HAS_DDI(dev));
5187 case INTEL_OUTPUT_DISPLAYPORT: 5156 case INTEL_OUTPUT_DP:
5188 case INTEL_OUTPUT_EDP: 5157 case INTEL_OUTPUT_EDP:
5189 intel_dig_port = enc_to_dig_port(&intel_encoder->base); 5158 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
5190 return port_to_aux_power_domain(intel_dig_port->port); 5159 return port_to_aux_power_domain(intel_dig_port->port);
@@ -5232,7 +5201,7 @@ static unsigned long
5232modeset_get_crtc_power_domains(struct drm_crtc *crtc, 5201modeset_get_crtc_power_domains(struct drm_crtc *crtc,
5233 struct intel_crtc_state *crtc_state) 5202 struct intel_crtc_state *crtc_state)
5234{ 5203{
5235 struct drm_i915_private *dev_priv = crtc->dev->dev_private; 5204 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
5236 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5205 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5237 enum intel_display_power_domain domain; 5206 enum intel_display_power_domain domain;
5238 unsigned long domains, new_domains, old_domains; 5207 unsigned long domains, new_domains, old_domains;
@@ -5277,7 +5246,7 @@ static int skl_calc_cdclk(int max_pixclk, int vco);
5277 5246
5278static void intel_update_max_cdclk(struct drm_device *dev) 5247static void intel_update_max_cdclk(struct drm_device *dev)
5279{ 5248{
5280 struct drm_i915_private *dev_priv = dev->dev_private; 5249 struct drm_i915_private *dev_priv = to_i915(dev);
5281 5250
5282 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { 5251 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
5283 u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK; 5252 u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
@@ -5338,7 +5307,7 @@ static void intel_update_max_cdclk(struct drm_device *dev)
5338 5307
5339static void intel_update_cdclk(struct drm_device *dev) 5308static void intel_update_cdclk(struct drm_device *dev)
5340{ 5309{
5341 struct drm_i915_private *dev_priv = dev->dev_private; 5310 struct drm_i915_private *dev_priv = to_i915(dev);
5342 5311
5343 dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev); 5312 dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
5344 5313
@@ -5395,7 +5364,9 @@ static void bxt_de_pll_disable(struct drm_i915_private *dev_priv)
5395 I915_WRITE(BXT_DE_PLL_ENABLE, 0); 5364 I915_WRITE(BXT_DE_PLL_ENABLE, 0);
5396 5365
5397 /* Timeout 200us */ 5366 /* Timeout 200us */
5398 if (wait_for((I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK) == 0, 1)) 5367 if (intel_wait_for_register(dev_priv,
5368 BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 0,
5369 1))
5399 DRM_ERROR("timeout waiting for DE PLL unlock\n"); 5370 DRM_ERROR("timeout waiting for DE PLL unlock\n");
5400 5371
5401 dev_priv->cdclk_pll.vco = 0; 5372 dev_priv->cdclk_pll.vco = 0;
@@ -5414,7 +5385,11 @@ static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco)
5414 I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE); 5385 I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
5415 5386
5416 /* Timeout 200us */ 5387 /* Timeout 200us */
5417 if (wait_for((I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK) != 0, 1)) 5388 if (intel_wait_for_register(dev_priv,
5389 BXT_DE_PLL_ENABLE,
5390 BXT_DE_PLL_LOCK,
5391 BXT_DE_PLL_LOCK,
5392 1))
5418 DRM_ERROR("timeout waiting for DE PLL lock\n"); 5393 DRM_ERROR("timeout waiting for DE PLL lock\n");
5419 5394
5420 dev_priv->cdclk_pll.vco = vco; 5395 dev_priv->cdclk_pll.vco = vco;
@@ -5495,14 +5470,14 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, int cdclk)
5495 return; 5470 return;
5496 } 5471 }
5497 5472
5498 intel_update_cdclk(dev_priv->dev); 5473 intel_update_cdclk(&dev_priv->drm);
5499} 5474}
5500 5475
5501static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv) 5476static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
5502{ 5477{
5503 u32 cdctl, expected; 5478 u32 cdctl, expected;
5504 5479
5505 intel_update_cdclk(dev_priv->dev); 5480 intel_update_cdclk(&dev_priv->drm);
5506 5481
5507 if (dev_priv->cdclk_pll.vco == 0 || 5482 if (dev_priv->cdclk_pll.vco == 0 ||
5508 dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref) 5483 dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref)
@@ -5635,7 +5610,7 @@ void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, int vco)
5635 dev_priv->skl_preferred_vco_freq = vco; 5610 dev_priv->skl_preferred_vco_freq = vco;
5636 5611
5637 if (changed) 5612 if (changed)
5638 intel_update_max_cdclk(dev_priv->dev); 5613 intel_update_max_cdclk(&dev_priv->drm);
5639} 5614}
5640 5615
5641static void 5616static void
@@ -5677,7 +5652,9 @@ skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
5677 5652
5678 I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE); 5653 I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE);
5679 5654
5680 if (wait_for(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK, 5)) 5655 if (intel_wait_for_register(dev_priv,
5656 LCPLL1_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
5657 5))
5681 DRM_ERROR("DPLL0 not locked\n"); 5658 DRM_ERROR("DPLL0 not locked\n");
5682 5659
5683 dev_priv->cdclk_pll.vco = vco; 5660 dev_priv->cdclk_pll.vco = vco;
@@ -5690,7 +5667,9 @@ static void
5690skl_dpll0_disable(struct drm_i915_private *dev_priv) 5667skl_dpll0_disable(struct drm_i915_private *dev_priv)
5691{ 5668{
5692 I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE); 5669 I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
5693 if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1)) 5670 if (intel_wait_for_register(dev_priv,
5671 LCPLL1_CTL, LCPLL_PLL_LOCK, 0,
5672 1))
5694 DRM_ERROR("Couldn't disable DPLL0\n"); 5673 DRM_ERROR("Couldn't disable DPLL0\n");
5695 5674
5696 dev_priv->cdclk_pll.vco = 0; 5675 dev_priv->cdclk_pll.vco = 0;
@@ -5725,7 +5704,7 @@ static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
5725 5704
5726static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco) 5705static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco)
5727{ 5706{
5728 struct drm_device *dev = dev_priv->dev; 5707 struct drm_device *dev = &dev_priv->drm;
5729 u32 freq_select, pcu_ack; 5708 u32 freq_select, pcu_ack;
5730 5709
5731 WARN_ON((cdclk == 24000) != (vco == 0)); 5710 WARN_ON((cdclk == 24000) != (vco == 0));
@@ -5823,7 +5802,7 @@ static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
5823 if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0) 5802 if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
5824 goto sanitize; 5803 goto sanitize;
5825 5804
5826 intel_update_cdclk(dev_priv->dev); 5805 intel_update_cdclk(&dev_priv->drm);
5827 /* Is PLL enabled and locked ? */ 5806 /* Is PLL enabled and locked ? */
5828 if (dev_priv->cdclk_pll.vco == 0 || 5807 if (dev_priv->cdclk_pll.vco == 0 ||
5829 dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref) 5808 dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref)
@@ -5854,7 +5833,7 @@ sanitize:
5854/* Adjust CDclk dividers to allow high res or save power if possible */ 5833/* Adjust CDclk dividers to allow high res or save power if possible */
5855static void valleyview_set_cdclk(struct drm_device *dev, int cdclk) 5834static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
5856{ 5835{
5857 struct drm_i915_private *dev_priv = dev->dev_private; 5836 struct drm_i915_private *dev_priv = to_i915(dev);
5858 u32 val, cmd; 5837 u32 val, cmd;
5859 5838
5860 WARN_ON(dev_priv->display.get_display_clock_speed(dev) 5839 WARN_ON(dev_priv->display.get_display_clock_speed(dev)
@@ -5919,7 +5898,7 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
5919 5898
5920static void cherryview_set_cdclk(struct drm_device *dev, int cdclk) 5899static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
5921{ 5900{
5922 struct drm_i915_private *dev_priv = dev->dev_private; 5901 struct drm_i915_private *dev_priv = to_i915(dev);
5923 u32 val, cmd; 5902 u32 val, cmd;
5924 5903
5925 WARN_ON(dev_priv->display.get_display_clock_speed(dev) 5904 WARN_ON(dev_priv->display.get_display_clock_speed(dev)
@@ -6007,7 +5986,7 @@ static int intel_mode_max_pixclk(struct drm_device *dev,
6007 struct drm_atomic_state *state) 5986 struct drm_atomic_state *state)
6008{ 5987{
6009 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 5988 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
6010 struct drm_i915_private *dev_priv = dev->dev_private; 5989 struct drm_i915_private *dev_priv = to_i915(dev);
6011 struct drm_crtc *crtc; 5990 struct drm_crtc *crtc;
6012 struct drm_crtc_state *crtc_state; 5991 struct drm_crtc_state *crtc_state;
6013 unsigned max_pixclk = 0, i; 5992 unsigned max_pixclk = 0, i;
@@ -6034,7 +6013,7 @@ static int intel_mode_max_pixclk(struct drm_device *dev,
6034static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state) 6013static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state)
6035{ 6014{
6036 struct drm_device *dev = state->dev; 6015 struct drm_device *dev = state->dev;
6037 struct drm_i915_private *dev_priv = dev->dev_private; 6016 struct drm_i915_private *dev_priv = to_i915(dev);
6038 int max_pixclk = intel_mode_max_pixclk(dev, state); 6017 int max_pixclk = intel_mode_max_pixclk(dev, state);
6039 struct intel_atomic_state *intel_state = 6018 struct intel_atomic_state *intel_state =
6040 to_intel_atomic_state(state); 6019 to_intel_atomic_state(state);
@@ -6102,7 +6081,7 @@ static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
6102static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state) 6081static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state)
6103{ 6082{
6104 struct drm_device *dev = old_state->dev; 6083 struct drm_device *dev = old_state->dev;
6105 struct drm_i915_private *dev_priv = dev->dev_private; 6084 struct drm_i915_private *dev_priv = to_i915(dev);
6106 struct intel_atomic_state *old_intel_state = 6085 struct intel_atomic_state *old_intel_state =
6107 to_intel_atomic_state(old_state); 6086 to_intel_atomic_state(old_state);
6108 unsigned req_cdclk = old_intel_state->dev_cdclk; 6087 unsigned req_cdclk = old_intel_state->dev_cdclk;
@@ -6141,14 +6120,14 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
6141 if (WARN_ON(intel_crtc->active)) 6120 if (WARN_ON(intel_crtc->active))
6142 return; 6121 return;
6143 6122
6144 if (intel_crtc->config->has_dp_encoder) 6123 if (intel_crtc_has_dp_encoder(intel_crtc->config))
6145 intel_dp_set_m_n(intel_crtc, M1_N1); 6124 intel_dp_set_m_n(intel_crtc, M1_N1);
6146 6125
6147 intel_set_pipe_timings(intel_crtc); 6126 intel_set_pipe_timings(intel_crtc);
6148 intel_set_pipe_src_size(intel_crtc); 6127 intel_set_pipe_src_size(intel_crtc);
6149 6128
6150 if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) { 6129 if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) {
6151 struct drm_i915_private *dev_priv = dev->dev_private; 6130 struct drm_i915_private *dev_priv = to_i915(dev);
6152 6131
6153 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY); 6132 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
6154 I915_WRITE(CHV_CANVAS(pipe), 0); 6133 I915_WRITE(CHV_CANVAS(pipe), 0);
@@ -6193,7 +6172,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
6193static void i9xx_set_pll_dividers(struct intel_crtc *crtc) 6172static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
6194{ 6173{
6195 struct drm_device *dev = crtc->base.dev; 6174 struct drm_device *dev = crtc->base.dev;
6196 struct drm_i915_private *dev_priv = dev->dev_private; 6175 struct drm_i915_private *dev_priv = to_i915(dev);
6197 6176
6198 I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0); 6177 I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0);
6199 I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1); 6178 I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
@@ -6214,7 +6193,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
6214 6193
6215 i9xx_set_pll_dividers(intel_crtc); 6194 i9xx_set_pll_dividers(intel_crtc);
6216 6195
6217 if (intel_crtc->config->has_dp_encoder) 6196 if (intel_crtc_has_dp_encoder(intel_crtc->config))
6218 intel_dp_set_m_n(intel_crtc, M1_N1); 6197 intel_dp_set_m_n(intel_crtc, M1_N1);
6219 6198
6220 intel_set_pipe_timings(intel_crtc); 6199 intel_set_pipe_timings(intel_crtc);
@@ -6250,7 +6229,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
6250static void i9xx_pfit_disable(struct intel_crtc *crtc) 6229static void i9xx_pfit_disable(struct intel_crtc *crtc)
6251{ 6230{
6252 struct drm_device *dev = crtc->base.dev; 6231 struct drm_device *dev = crtc->base.dev;
6253 struct drm_i915_private *dev_priv = dev->dev_private; 6232 struct drm_i915_private *dev_priv = to_i915(dev);
6254 6233
6255 if (!crtc->config->gmch_pfit.control) 6234 if (!crtc->config->gmch_pfit.control)
6256 return; 6235 return;
@@ -6265,7 +6244,7 @@ static void i9xx_pfit_disable(struct intel_crtc *crtc)
6265static void i9xx_crtc_disable(struct drm_crtc *crtc) 6244static void i9xx_crtc_disable(struct drm_crtc *crtc)
6266{ 6245{
6267 struct drm_device *dev = crtc->dev; 6246 struct drm_device *dev = crtc->dev;
6268 struct drm_i915_private *dev_priv = dev->dev_private; 6247 struct drm_i915_private *dev_priv = to_i915(dev);
6269 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6248 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6270 struct intel_encoder *encoder; 6249 struct intel_encoder *encoder;
6271 int pipe = intel_crtc->pipe; 6250 int pipe = intel_crtc->pipe;
@@ -6291,7 +6270,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
6291 if (encoder->post_disable) 6270 if (encoder->post_disable)
6292 encoder->post_disable(encoder); 6271 encoder->post_disable(encoder);
6293 6272
6294 if (!intel_crtc->config->has_dsi_encoder) { 6273 if (!intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI)) {
6295 if (IS_CHERRYVIEW(dev)) 6274 if (IS_CHERRYVIEW(dev))
6296 chv_disable_pll(dev_priv, pipe); 6275 chv_disable_pll(dev_priv, pipe);
6297 else if (IS_VALLEYVIEW(dev)) 6276 else if (IS_VALLEYVIEW(dev))
@@ -6609,7 +6588,7 @@ static void hsw_compute_ips_config(struct intel_crtc *crtc,
6609 struct intel_crtc_state *pipe_config) 6588 struct intel_crtc_state *pipe_config)
6610{ 6589{
6611 struct drm_device *dev = crtc->base.dev; 6590 struct drm_device *dev = crtc->base.dev;
6612 struct drm_i915_private *dev_priv = dev->dev_private; 6591 struct drm_i915_private *dev_priv = to_i915(dev);
6613 6592
6614 pipe_config->ips_enabled = i915.enable_ips && 6593 pipe_config->ips_enabled = i915.enable_ips &&
6615 hsw_crtc_supports_ips(crtc) && 6594 hsw_crtc_supports_ips(crtc) &&
@@ -6629,7 +6608,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
6629 struct intel_crtc_state *pipe_config) 6608 struct intel_crtc_state *pipe_config)
6630{ 6609{
6631 struct drm_device *dev = crtc->base.dev; 6610 struct drm_device *dev = crtc->base.dev;
6632 struct drm_i915_private *dev_priv = dev->dev_private; 6611 struct drm_i915_private *dev_priv = to_i915(dev);
6633 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 6612 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6634 int clock_limit = dev_priv->max_dotclk_freq; 6613 int clock_limit = dev_priv->max_dotclk_freq;
6635 6614
@@ -6660,7 +6639,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
6660 * - LVDS dual channel mode 6639 * - LVDS dual channel mode
6661 * - Double wide pipe 6640 * - Double wide pipe
6662 */ 6641 */
6663 if ((intel_pipe_will_have_type(pipe_config, INTEL_OUTPUT_LVDS) && 6642 if ((intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
6664 intel_is_dual_link_lvds(dev)) || pipe_config->double_wide) 6643 intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
6665 pipe_config->pipe_src_w &= ~1; 6644 pipe_config->pipe_src_w &= ~1;
6666 6645
@@ -6779,7 +6758,7 @@ static int broxton_get_display_clock_speed(struct drm_device *dev)
6779 6758
6780static int broadwell_get_display_clock_speed(struct drm_device *dev) 6759static int broadwell_get_display_clock_speed(struct drm_device *dev)
6781{ 6760{
6782 struct drm_i915_private *dev_priv = dev->dev_private; 6761 struct drm_i915_private *dev_priv = to_i915(dev);
6783 uint32_t lcpll = I915_READ(LCPLL_CTL); 6762 uint32_t lcpll = I915_READ(LCPLL_CTL);
6784 uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK; 6763 uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
6785 6764
@@ -6799,7 +6778,7 @@ static int broadwell_get_display_clock_speed(struct drm_device *dev)
6799 6778
6800static int haswell_get_display_clock_speed(struct drm_device *dev) 6779static int haswell_get_display_clock_speed(struct drm_device *dev)
6801{ 6780{
6802 struct drm_i915_private *dev_priv = dev->dev_private; 6781 struct drm_i915_private *dev_priv = to_i915(dev);
6803 uint32_t lcpll = I915_READ(LCPLL_CTL); 6782 uint32_t lcpll = I915_READ(LCPLL_CTL);
6804 uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK; 6783 uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
6805 6784
@@ -6933,7 +6912,7 @@ static int i830_get_display_clock_speed(struct drm_device *dev)
6933 6912
6934static unsigned int intel_hpll_vco(struct drm_device *dev) 6913static unsigned int intel_hpll_vco(struct drm_device *dev)
6935{ 6914{
6936 struct drm_i915_private *dev_priv = dev->dev_private; 6915 struct drm_i915_private *dev_priv = to_i915(dev);
6937 static const unsigned int blb_vco[8] = { 6916 static const unsigned int blb_vco[8] = {
6938 [0] = 3200000, 6917 [0] = 3200000,
6939 [1] = 4000000, 6918 [1] = 4000000,
@@ -7171,7 +7150,7 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
7171 crtc_state->dpll_hw_state.fp0 = fp; 7150 crtc_state->dpll_hw_state.fp0 = fp;
7172 7151
7173 crtc->lowfreq_avail = false; 7152 crtc->lowfreq_avail = false;
7174 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) && 7153 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7175 reduced_clock) { 7154 reduced_clock) {
7176 crtc_state->dpll_hw_state.fp1 = fp2; 7155 crtc_state->dpll_hw_state.fp1 = fp2;
7177 crtc->lowfreq_avail = true; 7156 crtc->lowfreq_avail = true;
@@ -7213,7 +7192,7 @@ static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
7213 struct intel_link_m_n *m_n) 7192 struct intel_link_m_n *m_n)
7214{ 7193{
7215 struct drm_device *dev = crtc->base.dev; 7194 struct drm_device *dev = crtc->base.dev;
7216 struct drm_i915_private *dev_priv = dev->dev_private; 7195 struct drm_i915_private *dev_priv = to_i915(dev);
7217 int pipe = crtc->pipe; 7196 int pipe = crtc->pipe;
7218 7197
7219 I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m); 7198 I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
@@ -7227,7 +7206,7 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
7227 struct intel_link_m_n *m2_n2) 7206 struct intel_link_m_n *m2_n2)
7228{ 7207{
7229 struct drm_device *dev = crtc->base.dev; 7208 struct drm_device *dev = crtc->base.dev;
7230 struct drm_i915_private *dev_priv = dev->dev_private; 7209 struct drm_i915_private *dev_priv = to_i915(dev);
7231 int pipe = crtc->pipe; 7210 int pipe = crtc->pipe;
7232 enum transcoder transcoder = crtc->config->cpu_transcoder; 7211 enum transcoder transcoder = crtc->config->cpu_transcoder;
7233 7212
@@ -7290,7 +7269,7 @@ static void vlv_compute_dpll(struct intel_crtc *crtc,
7290 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 7269 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7291 7270
7292 /* DPLL not used with DSI, but still need the rest set up */ 7271 /* DPLL not used with DSI, but still need the rest set up */
7293 if (!pipe_config->has_dsi_encoder) 7272 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
7294 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE | 7273 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
7295 DPLL_EXT_BUFFER_ENABLE_VLV; 7274 DPLL_EXT_BUFFER_ENABLE_VLV;
7296 7275
@@ -7307,7 +7286,7 @@ static void chv_compute_dpll(struct intel_crtc *crtc,
7307 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 7286 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7308 7287
7309 /* DPLL not used with DSI, but still need the rest set up */ 7288 /* DPLL not used with DSI, but still need the rest set up */
7310 if (!pipe_config->has_dsi_encoder) 7289 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
7311 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE; 7290 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
7312 7291
7313 pipe_config->dpll_hw_state.dpll_md = 7292 pipe_config->dpll_hw_state.dpll_md =
@@ -7318,7 +7297,7 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
7318 const struct intel_crtc_state *pipe_config) 7297 const struct intel_crtc_state *pipe_config)
7319{ 7298{
7320 struct drm_device *dev = crtc->base.dev; 7299 struct drm_device *dev = crtc->base.dev;
7321 struct drm_i915_private *dev_priv = dev->dev_private; 7300 struct drm_i915_private *dev_priv = to_i915(dev);
7322 enum pipe pipe = crtc->pipe; 7301 enum pipe pipe = crtc->pipe;
7323 u32 mdiv; 7302 u32 mdiv;
7324 u32 bestn, bestm1, bestm2, bestp1, bestp2; 7303 u32 bestn, bestm1, bestm2, bestp1, bestp2;
@@ -7377,15 +7356,15 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
7377 7356
7378 /* Set HBR and RBR LPF coefficients */ 7357 /* Set HBR and RBR LPF coefficients */
7379 if (pipe_config->port_clock == 162000 || 7358 if (pipe_config->port_clock == 162000 ||
7380 intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG) || 7359 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_ANALOG) ||
7381 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) 7360 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI))
7382 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 7361 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7383 0x009f0003); 7362 0x009f0003);
7384 else 7363 else
7385 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 7364 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7386 0x00d0000f); 7365 0x00d0000f);
7387 7366
7388 if (pipe_config->has_dp_encoder) { 7367 if (intel_crtc_has_dp_encoder(pipe_config)) {
7389 /* Use SSC source */ 7368 /* Use SSC source */
7390 if (pipe == PIPE_A) 7369 if (pipe == PIPE_A)
7391 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 7370 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
@@ -7405,8 +7384,7 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
7405 7384
7406 coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe)); 7385 coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
7407 coreclk = (coreclk & 0x0000ff00) | 0x01c00000; 7386 coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
7408 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || 7387 if (intel_crtc_has_dp_encoder(crtc->config))
7409 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
7410 coreclk |= 0x01000000; 7388 coreclk |= 0x01000000;
7411 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk); 7389 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
7412 7390
@@ -7418,7 +7396,7 @@ static void chv_prepare_pll(struct intel_crtc *crtc,
7418 const struct intel_crtc_state *pipe_config) 7396 const struct intel_crtc_state *pipe_config)
7419{ 7397{
7420 struct drm_device *dev = crtc->base.dev; 7398 struct drm_device *dev = crtc->base.dev;
7421 struct drm_i915_private *dev_priv = dev->dev_private; 7399 struct drm_i915_private *dev_priv = to_i915(dev);
7422 enum pipe pipe = crtc->pipe; 7400 enum pipe pipe = crtc->pipe;
7423 enum dpio_channel port = vlv_pipe_to_channel(pipe); 7401 enum dpio_channel port = vlv_pipe_to_channel(pipe);
7424 u32 loopfilter, tribuf_calcntr; 7402 u32 loopfilter, tribuf_calcntr;
@@ -7580,19 +7558,15 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
7580 struct dpll *reduced_clock) 7558 struct dpll *reduced_clock)
7581{ 7559{
7582 struct drm_device *dev = crtc->base.dev; 7560 struct drm_device *dev = crtc->base.dev;
7583 struct drm_i915_private *dev_priv = dev->dev_private; 7561 struct drm_i915_private *dev_priv = to_i915(dev);
7584 u32 dpll; 7562 u32 dpll;
7585 bool is_sdvo;
7586 struct dpll *clock = &crtc_state->dpll; 7563 struct dpll *clock = &crtc_state->dpll;
7587 7564
7588 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock); 7565 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
7589 7566
7590 is_sdvo = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO) ||
7591 intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI);
7592
7593 dpll = DPLL_VGA_MODE_DIS; 7567 dpll = DPLL_VGA_MODE_DIS;
7594 7568
7595 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) 7569 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
7596 dpll |= DPLLB_MODE_LVDS; 7570 dpll |= DPLLB_MODE_LVDS;
7597 else 7571 else
7598 dpll |= DPLLB_MODE_DAC_SERIAL; 7572 dpll |= DPLLB_MODE_DAC_SERIAL;
@@ -7602,10 +7576,11 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
7602 << SDVO_MULTIPLIER_SHIFT_HIRES; 7576 << SDVO_MULTIPLIER_SHIFT_HIRES;
7603 } 7577 }
7604 7578
7605 if (is_sdvo) 7579 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
7580 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
7606 dpll |= DPLL_SDVO_HIGH_SPEED; 7581 dpll |= DPLL_SDVO_HIGH_SPEED;
7607 7582
7608 if (crtc_state->has_dp_encoder) 7583 if (intel_crtc_has_dp_encoder(crtc_state))
7609 dpll |= DPLL_SDVO_HIGH_SPEED; 7584 dpll |= DPLL_SDVO_HIGH_SPEED;
7610 7585
7611 /* compute bitmask from p1 value */ 7586 /* compute bitmask from p1 value */
@@ -7635,7 +7610,7 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
7635 7610
7636 if (crtc_state->sdvo_tv_clock) 7611 if (crtc_state->sdvo_tv_clock)
7637 dpll |= PLL_REF_INPUT_TVCLKINBC; 7612 dpll |= PLL_REF_INPUT_TVCLKINBC;
7638 else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) && 7613 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7639 intel_panel_use_ssc(dev_priv)) 7614 intel_panel_use_ssc(dev_priv))
7640 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 7615 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7641 else 7616 else
@@ -7656,7 +7631,7 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc,
7656 struct dpll *reduced_clock) 7631 struct dpll *reduced_clock)
7657{ 7632{
7658 struct drm_device *dev = crtc->base.dev; 7633 struct drm_device *dev = crtc->base.dev;
7659 struct drm_i915_private *dev_priv = dev->dev_private; 7634 struct drm_i915_private *dev_priv = to_i915(dev);
7660 u32 dpll; 7635 u32 dpll;
7661 struct dpll *clock = &crtc_state->dpll; 7636 struct dpll *clock = &crtc_state->dpll;
7662 7637
@@ -7664,7 +7639,7 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc,
7664 7639
7665 dpll = DPLL_VGA_MODE_DIS; 7640 dpll = DPLL_VGA_MODE_DIS;
7666 7641
7667 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { 7642 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7668 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 7643 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7669 } else { 7644 } else {
7670 if (clock->p1 == 2) 7645 if (clock->p1 == 2)
@@ -7675,10 +7650,10 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc,
7675 dpll |= PLL_P2_DIVIDE_BY_4; 7650 dpll |= PLL_P2_DIVIDE_BY_4;
7676 } 7651 }
7677 7652
7678 if (!IS_I830(dev) && intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO)) 7653 if (!IS_I830(dev) && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
7679 dpll |= DPLL_DVO_2X_MODE; 7654 dpll |= DPLL_DVO_2X_MODE;
7680 7655
7681 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) && 7656 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7682 intel_panel_use_ssc(dev_priv)) 7657 intel_panel_use_ssc(dev_priv))
7683 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 7658 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7684 else 7659 else
@@ -7691,7 +7666,7 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc,
7691static void intel_set_pipe_timings(struct intel_crtc *intel_crtc) 7666static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
7692{ 7667{
7693 struct drm_device *dev = intel_crtc->base.dev; 7668 struct drm_device *dev = intel_crtc->base.dev;
7694 struct drm_i915_private *dev_priv = dev->dev_private; 7669 struct drm_i915_private *dev_priv = to_i915(dev);
7695 enum pipe pipe = intel_crtc->pipe; 7670 enum pipe pipe = intel_crtc->pipe;
7696 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 7671 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
7697 const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; 7672 const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
@@ -7708,7 +7683,7 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
7708 crtc_vtotal -= 1; 7683 crtc_vtotal -= 1;
7709 crtc_vblank_end -= 1; 7684 crtc_vblank_end -= 1;
7710 7685
7711 if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO)) 7686 if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
7712 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2; 7687 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
7713 else 7688 else
7714 vsyncshift = adjusted_mode->crtc_hsync_start - 7689 vsyncshift = adjusted_mode->crtc_hsync_start -
@@ -7753,7 +7728,7 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
7753static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc) 7728static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc)
7754{ 7729{
7755 struct drm_device *dev = intel_crtc->base.dev; 7730 struct drm_device *dev = intel_crtc->base.dev;
7756 struct drm_i915_private *dev_priv = dev->dev_private; 7731 struct drm_i915_private *dev_priv = to_i915(dev);
7757 enum pipe pipe = intel_crtc->pipe; 7732 enum pipe pipe = intel_crtc->pipe;
7758 7733
7759 /* pipesrc controls the size that is scaled from, which should 7734 /* pipesrc controls the size that is scaled from, which should
@@ -7768,7 +7743,7 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc,
7768 struct intel_crtc_state *pipe_config) 7743 struct intel_crtc_state *pipe_config)
7769{ 7744{
7770 struct drm_device *dev = crtc->base.dev; 7745 struct drm_device *dev = crtc->base.dev;
7771 struct drm_i915_private *dev_priv = dev->dev_private; 7746 struct drm_i915_private *dev_priv = to_i915(dev);
7772 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; 7747 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
7773 uint32_t tmp; 7748 uint32_t tmp;
7774 7749
@@ -7803,7 +7778,7 @@ static void intel_get_pipe_src_size(struct intel_crtc *crtc,
7803 struct intel_crtc_state *pipe_config) 7778 struct intel_crtc_state *pipe_config)
7804{ 7779{
7805 struct drm_device *dev = crtc->base.dev; 7780 struct drm_device *dev = crtc->base.dev;
7806 struct drm_i915_private *dev_priv = dev->dev_private; 7781 struct drm_i915_private *dev_priv = to_i915(dev);
7807 u32 tmp; 7782 u32 tmp;
7808 7783
7809 tmp = I915_READ(PIPESRC(crtc->pipe)); 7784 tmp = I915_READ(PIPESRC(crtc->pipe));
@@ -7841,7 +7816,7 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
7841static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) 7816static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
7842{ 7817{
7843 struct drm_device *dev = intel_crtc->base.dev; 7818 struct drm_device *dev = intel_crtc->base.dev;
7844 struct drm_i915_private *dev_priv = dev->dev_private; 7819 struct drm_i915_private *dev_priv = to_i915(dev);
7845 uint32_t pipeconf; 7820 uint32_t pipeconf;
7846 7821
7847 pipeconf = 0; 7822 pipeconf = 0;
@@ -7887,7 +7862,7 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
7887 7862
7888 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { 7863 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
7889 if (INTEL_INFO(dev)->gen < 4 || 7864 if (INTEL_INFO(dev)->gen < 4 ||
7890 intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO)) 7865 intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
7891 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; 7866 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
7892 else 7867 else
7893 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT; 7868 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
@@ -7906,21 +7881,21 @@ static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
7906 struct intel_crtc_state *crtc_state) 7881 struct intel_crtc_state *crtc_state)
7907{ 7882{
7908 struct drm_device *dev = crtc->base.dev; 7883 struct drm_device *dev = crtc->base.dev;
7909 struct drm_i915_private *dev_priv = dev->dev_private; 7884 struct drm_i915_private *dev_priv = to_i915(dev);
7910 const struct intel_limit *limit; 7885 const struct intel_limit *limit;
7911 int refclk = 48000; 7886 int refclk = 48000;
7912 7887
7913 memset(&crtc_state->dpll_hw_state, 0, 7888 memset(&crtc_state->dpll_hw_state, 0,
7914 sizeof(crtc_state->dpll_hw_state)); 7889 sizeof(crtc_state->dpll_hw_state));
7915 7890
7916 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { 7891 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7917 if (intel_panel_use_ssc(dev_priv)) { 7892 if (intel_panel_use_ssc(dev_priv)) {
7918 refclk = dev_priv->vbt.lvds_ssc_freq; 7893 refclk = dev_priv->vbt.lvds_ssc_freq;
7919 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 7894 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7920 } 7895 }
7921 7896
7922 limit = &intel_limits_i8xx_lvds; 7897 limit = &intel_limits_i8xx_lvds;
7923 } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO)) { 7898 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
7924 limit = &intel_limits_i8xx_dvo; 7899 limit = &intel_limits_i8xx_dvo;
7925 } else { 7900 } else {
7926 limit = &intel_limits_i8xx_dac; 7901 limit = &intel_limits_i8xx_dac;
@@ -7942,14 +7917,14 @@ static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
7942 struct intel_crtc_state *crtc_state) 7917 struct intel_crtc_state *crtc_state)
7943{ 7918{
7944 struct drm_device *dev = crtc->base.dev; 7919 struct drm_device *dev = crtc->base.dev;
7945 struct drm_i915_private *dev_priv = dev->dev_private; 7920 struct drm_i915_private *dev_priv = to_i915(dev);
7946 const struct intel_limit *limit; 7921 const struct intel_limit *limit;
7947 int refclk = 96000; 7922 int refclk = 96000;
7948 7923
7949 memset(&crtc_state->dpll_hw_state, 0, 7924 memset(&crtc_state->dpll_hw_state, 0,
7950 sizeof(crtc_state->dpll_hw_state)); 7925 sizeof(crtc_state->dpll_hw_state));
7951 7926
7952 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { 7927 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7953 if (intel_panel_use_ssc(dev_priv)) { 7928 if (intel_panel_use_ssc(dev_priv)) {
7954 refclk = dev_priv->vbt.lvds_ssc_freq; 7929 refclk = dev_priv->vbt.lvds_ssc_freq;
7955 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 7930 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
@@ -7959,10 +7934,10 @@ static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
7959 limit = &intel_limits_g4x_dual_channel_lvds; 7934 limit = &intel_limits_g4x_dual_channel_lvds;
7960 else 7935 else
7961 limit = &intel_limits_g4x_single_channel_lvds; 7936 limit = &intel_limits_g4x_single_channel_lvds;
7962 } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI) || 7937 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
7963 intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_ANALOG)) { 7938 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
7964 limit = &intel_limits_g4x_hdmi; 7939 limit = &intel_limits_g4x_hdmi;
7965 } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO)) { 7940 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
7966 limit = &intel_limits_g4x_sdvo; 7941 limit = &intel_limits_g4x_sdvo;
7967 } else { 7942 } else {
7968 /* The option is for other outputs */ 7943 /* The option is for other outputs */
@@ -7985,14 +7960,14 @@ static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
7985 struct intel_crtc_state *crtc_state) 7960 struct intel_crtc_state *crtc_state)
7986{ 7961{
7987 struct drm_device *dev = crtc->base.dev; 7962 struct drm_device *dev = crtc->base.dev;
7988 struct drm_i915_private *dev_priv = dev->dev_private; 7963 struct drm_i915_private *dev_priv = to_i915(dev);
7989 const struct intel_limit *limit; 7964 const struct intel_limit *limit;
7990 int refclk = 96000; 7965 int refclk = 96000;
7991 7966
7992 memset(&crtc_state->dpll_hw_state, 0, 7967 memset(&crtc_state->dpll_hw_state, 0,
7993 sizeof(crtc_state->dpll_hw_state)); 7968 sizeof(crtc_state->dpll_hw_state));
7994 7969
7995 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { 7970 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7996 if (intel_panel_use_ssc(dev_priv)) { 7971 if (intel_panel_use_ssc(dev_priv)) {
7997 refclk = dev_priv->vbt.lvds_ssc_freq; 7972 refclk = dev_priv->vbt.lvds_ssc_freq;
7998 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 7973 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
@@ -8019,14 +7994,14 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
8019 struct intel_crtc_state *crtc_state) 7994 struct intel_crtc_state *crtc_state)
8020{ 7995{
8021 struct drm_device *dev = crtc->base.dev; 7996 struct drm_device *dev = crtc->base.dev;
8022 struct drm_i915_private *dev_priv = dev->dev_private; 7997 struct drm_i915_private *dev_priv = to_i915(dev);
8023 const struct intel_limit *limit; 7998 const struct intel_limit *limit;
8024 int refclk = 96000; 7999 int refclk = 96000;
8025 8000
8026 memset(&crtc_state->dpll_hw_state, 0, 8001 memset(&crtc_state->dpll_hw_state, 0,
8027 sizeof(crtc_state->dpll_hw_state)); 8002 sizeof(crtc_state->dpll_hw_state));
8028 8003
8029 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { 8004 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8030 if (intel_panel_use_ssc(dev_priv)) { 8005 if (intel_panel_use_ssc(dev_priv)) {
8031 refclk = dev_priv->vbt.lvds_ssc_freq; 8006 refclk = dev_priv->vbt.lvds_ssc_freq;
8032 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 8007 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
@@ -8095,7 +8070,7 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc,
8095 struct intel_crtc_state *pipe_config) 8070 struct intel_crtc_state *pipe_config)
8096{ 8071{
8097 struct drm_device *dev = crtc->base.dev; 8072 struct drm_device *dev = crtc->base.dev;
8098 struct drm_i915_private *dev_priv = dev->dev_private; 8073 struct drm_i915_private *dev_priv = to_i915(dev);
8099 uint32_t tmp; 8074 uint32_t tmp;
8100 8075
8101 if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev))) 8076 if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
@@ -8122,7 +8097,7 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc,
8122 struct intel_crtc_state *pipe_config) 8097 struct intel_crtc_state *pipe_config)
8123{ 8098{
8124 struct drm_device *dev = crtc->base.dev; 8099 struct drm_device *dev = crtc->base.dev;
8125 struct drm_i915_private *dev_priv = dev->dev_private; 8100 struct drm_i915_private *dev_priv = to_i915(dev);
8126 int pipe = pipe_config->cpu_transcoder; 8101 int pipe = pipe_config->cpu_transcoder;
8127 struct dpll clock; 8102 struct dpll clock;
8128 u32 mdiv; 8103 u32 mdiv;
@@ -8150,7 +8125,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
8150 struct intel_initial_plane_config *plane_config) 8125 struct intel_initial_plane_config *plane_config)
8151{ 8126{
8152 struct drm_device *dev = crtc->base.dev; 8127 struct drm_device *dev = crtc->base.dev;
8153 struct drm_i915_private *dev_priv = dev->dev_private; 8128 struct drm_i915_private *dev_priv = to_i915(dev);
8154 u32 val, base, offset; 8129 u32 val, base, offset;
8155 int pipe = crtc->pipe, plane = crtc->plane; 8130 int pipe = crtc->pipe, plane = crtc->plane;
8156 int fourcc, pixel_format; 8131 int fourcc, pixel_format;
@@ -8218,7 +8193,7 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
8218 struct intel_crtc_state *pipe_config) 8193 struct intel_crtc_state *pipe_config)
8219{ 8194{
8220 struct drm_device *dev = crtc->base.dev; 8195 struct drm_device *dev = crtc->base.dev;
8221 struct drm_i915_private *dev_priv = dev->dev_private; 8196 struct drm_i915_private *dev_priv = to_i915(dev);
8222 int pipe = pipe_config->cpu_transcoder; 8197 int pipe = pipe_config->cpu_transcoder;
8223 enum dpio_channel port = vlv_pipe_to_channel(pipe); 8198 enum dpio_channel port = vlv_pipe_to_channel(pipe);
8224 struct dpll clock; 8199 struct dpll clock;
@@ -8252,7 +8227,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
8252 struct intel_crtc_state *pipe_config) 8227 struct intel_crtc_state *pipe_config)
8253{ 8228{
8254 struct drm_device *dev = crtc->base.dev; 8229 struct drm_device *dev = crtc->base.dev;
8255 struct drm_i915_private *dev_priv = dev->dev_private; 8230 struct drm_i915_private *dev_priv = to_i915(dev);
8256 enum intel_display_power_domain power_domain; 8231 enum intel_display_power_domain power_domain;
8257 uint32_t tmp; 8232 uint32_t tmp;
8258 bool ret; 8233 bool ret;
@@ -8363,7 +8338,7 @@ out:
8363 8338
8364static void ironlake_init_pch_refclk(struct drm_device *dev) 8339static void ironlake_init_pch_refclk(struct drm_device *dev)
8365{ 8340{
8366 struct drm_i915_private *dev_priv = dev->dev_private; 8341 struct drm_i915_private *dev_priv = to_i915(dev);
8367 struct intel_encoder *encoder; 8342 struct intel_encoder *encoder;
8368 int i; 8343 int i;
8369 u32 val, final; 8344 u32 val, final;
@@ -8537,16 +8512,16 @@ static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
8537 tmp |= FDI_MPHY_IOSFSB_RESET_CTL; 8512 tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
8538 I915_WRITE(SOUTH_CHICKEN2, tmp); 8513 I915_WRITE(SOUTH_CHICKEN2, tmp);
8539 8514
8540 if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) & 8515 if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
8541 FDI_MPHY_IOSFSB_RESET_STATUS, 100)) 8516 FDI_MPHY_IOSFSB_RESET_STATUS, 100))
8542 DRM_ERROR("FDI mPHY reset assert timeout\n"); 8517 DRM_ERROR("FDI mPHY reset assert timeout\n");
8543 8518
8544 tmp = I915_READ(SOUTH_CHICKEN2); 8519 tmp = I915_READ(SOUTH_CHICKEN2);
8545 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL; 8520 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
8546 I915_WRITE(SOUTH_CHICKEN2, tmp); 8521 I915_WRITE(SOUTH_CHICKEN2, tmp);
8547 8522
8548 if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) & 8523 if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
8549 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100)) 8524 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
8550 DRM_ERROR("FDI mPHY reset de-assert timeout\n"); 8525 DRM_ERROR("FDI mPHY reset de-assert timeout\n");
8551} 8526}
8552 8527
@@ -8634,7 +8609,7 @@ static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
8634static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread, 8609static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
8635 bool with_fdi) 8610 bool with_fdi)
8636{ 8611{
8637 struct drm_i915_private *dev_priv = dev->dev_private; 8612 struct drm_i915_private *dev_priv = to_i915(dev);
8638 uint32_t reg, tmp; 8613 uint32_t reg, tmp;
8639 8614
8640 if (WARN(with_fdi && !with_spread, "FDI requires downspread\n")) 8615 if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
@@ -8673,7 +8648,7 @@ static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
8673/* Sequence to disable CLKOUT_DP */ 8648/* Sequence to disable CLKOUT_DP */
8674static void lpt_disable_clkout_dp(struct drm_device *dev) 8649static void lpt_disable_clkout_dp(struct drm_device *dev)
8675{ 8650{
8676 struct drm_i915_private *dev_priv = dev->dev_private; 8651 struct drm_i915_private *dev_priv = to_i915(dev);
8677 uint32_t reg, tmp; 8652 uint32_t reg, tmp;
8678 8653
8679 mutex_lock(&dev_priv->sb_lock); 8654 mutex_lock(&dev_priv->sb_lock);
@@ -8794,7 +8769,7 @@ void intel_init_pch_refclk(struct drm_device *dev)
8794 8769
8795static void ironlake_set_pipeconf(struct drm_crtc *crtc) 8770static void ironlake_set_pipeconf(struct drm_crtc *crtc)
8796{ 8771{
8797 struct drm_i915_private *dev_priv = crtc->dev->dev_private; 8772 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
8798 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8773 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8799 int pipe = intel_crtc->pipe; 8774 int pipe = intel_crtc->pipe;
8800 uint32_t val; 8775 uint32_t val;
@@ -8836,7 +8811,7 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc)
8836 8811
8837static void haswell_set_pipeconf(struct drm_crtc *crtc) 8812static void haswell_set_pipeconf(struct drm_crtc *crtc)
8838{ 8813{
8839 struct drm_i915_private *dev_priv = crtc->dev->dev_private; 8814 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
8840 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8815 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8841 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 8816 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
8842 u32 val = 0; 8817 u32 val = 0;
@@ -8855,7 +8830,7 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc)
8855 8830
8856static void haswell_set_pipemisc(struct drm_crtc *crtc) 8831static void haswell_set_pipemisc(struct drm_crtc *crtc)
8857{ 8832{
8858 struct drm_i915_private *dev_priv = crtc->dev->dev_private; 8833 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
8859 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8834 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8860 8835
8861 if (IS_BROADWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 9) { 8836 if (IS_BROADWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 9) {
@@ -8908,37 +8883,13 @@ static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
8908{ 8883{
8909 struct drm_crtc *crtc = &intel_crtc->base; 8884 struct drm_crtc *crtc = &intel_crtc->base;
8910 struct drm_device *dev = crtc->dev; 8885 struct drm_device *dev = crtc->dev;
8911 struct drm_i915_private *dev_priv = dev->dev_private; 8886 struct drm_i915_private *dev_priv = to_i915(dev);
8912 struct drm_atomic_state *state = crtc_state->base.state;
8913 struct drm_connector *connector;
8914 struct drm_connector_state *connector_state;
8915 struct intel_encoder *encoder;
8916 u32 dpll, fp, fp2; 8887 u32 dpll, fp, fp2;
8917 int factor, i; 8888 int factor;
8918 bool is_lvds = false, is_sdvo = false;
8919
8920 for_each_connector_in_state(state, connector, connector_state, i) {
8921 if (connector_state->crtc != crtc_state->base.crtc)
8922 continue;
8923
8924 encoder = to_intel_encoder(connector_state->best_encoder);
8925
8926 switch (encoder->type) {
8927 case INTEL_OUTPUT_LVDS:
8928 is_lvds = true;
8929 break;
8930 case INTEL_OUTPUT_SDVO:
8931 case INTEL_OUTPUT_HDMI:
8932 is_sdvo = true;
8933 break;
8934 default:
8935 break;
8936 }
8937 }
8938 8889
8939 /* Enable autotuning of the PLL clock (if permissible) */ 8890 /* Enable autotuning of the PLL clock (if permissible) */
8940 factor = 21; 8891 factor = 21;
8941 if (is_lvds) { 8892 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8942 if ((intel_panel_use_ssc(dev_priv) && 8893 if ((intel_panel_use_ssc(dev_priv) &&
8943 dev_priv->vbt.lvds_ssc_freq == 100000) || 8894 dev_priv->vbt.lvds_ssc_freq == 100000) ||
8944 (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev))) 8895 (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
@@ -8962,7 +8913,7 @@ static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
8962 8913
8963 dpll = 0; 8914 dpll = 0;
8964 8915
8965 if (is_lvds) 8916 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
8966 dpll |= DPLLB_MODE_LVDS; 8917 dpll |= DPLLB_MODE_LVDS;
8967 else 8918 else
8968 dpll |= DPLLB_MODE_DAC_SERIAL; 8919 dpll |= DPLLB_MODE_DAC_SERIAL;
@@ -8970,9 +8921,11 @@ static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
8970 dpll |= (crtc_state->pixel_multiplier - 1) 8921 dpll |= (crtc_state->pixel_multiplier - 1)
8971 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; 8922 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
8972 8923
8973 if (is_sdvo) 8924 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
8925 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
8974 dpll |= DPLL_SDVO_HIGH_SPEED; 8926 dpll |= DPLL_SDVO_HIGH_SPEED;
8975 if (crtc_state->has_dp_encoder) 8927
8928 if (intel_crtc_has_dp_encoder(crtc_state))
8976 dpll |= DPLL_SDVO_HIGH_SPEED; 8929 dpll |= DPLL_SDVO_HIGH_SPEED;
8977 8930
8978 /* compute bitmask from p1 value */ 8931 /* compute bitmask from p1 value */
@@ -8995,7 +8948,8 @@ static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
8995 break; 8948 break;
8996 } 8949 }
8997 8950
8998 if (is_lvds && intel_panel_use_ssc(dev_priv)) 8951 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8952 intel_panel_use_ssc(dev_priv))
8999 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 8953 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
9000 else 8954 else
9001 dpll |= PLL_REF_INPUT_DREFCLK; 8955 dpll |= PLL_REF_INPUT_DREFCLK;
@@ -9011,7 +8965,7 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
9011 struct intel_crtc_state *crtc_state) 8965 struct intel_crtc_state *crtc_state)
9012{ 8966{
9013 struct drm_device *dev = crtc->base.dev; 8967 struct drm_device *dev = crtc->base.dev;
9014 struct drm_i915_private *dev_priv = dev->dev_private; 8968 struct drm_i915_private *dev_priv = to_i915(dev);
9015 struct dpll reduced_clock; 8969 struct dpll reduced_clock;
9016 bool has_reduced_clock = false; 8970 bool has_reduced_clock = false;
9017 struct intel_shared_dpll *pll; 8971 struct intel_shared_dpll *pll;
@@ -9027,7 +8981,7 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
9027 if (!crtc_state->has_pch_encoder) 8981 if (!crtc_state->has_pch_encoder)
9028 return 0; 8982 return 0;
9029 8983
9030 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { 8984 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9031 if (intel_panel_use_ssc(dev_priv)) { 8985 if (intel_panel_use_ssc(dev_priv)) {
9032 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", 8986 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
9033 dev_priv->vbt.lvds_ssc_freq); 8987 dev_priv->vbt.lvds_ssc_freq);
@@ -9066,7 +9020,7 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
9066 return -EINVAL; 9020 return -EINVAL;
9067 } 9021 }
9068 9022
9069 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) && 9023 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
9070 has_reduced_clock) 9024 has_reduced_clock)
9071 crtc->lowfreq_avail = true; 9025 crtc->lowfreq_avail = true;
9072 9026
@@ -9077,7 +9031,7 @@ static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
9077 struct intel_link_m_n *m_n) 9031 struct intel_link_m_n *m_n)
9078{ 9032{
9079 struct drm_device *dev = crtc->base.dev; 9033 struct drm_device *dev = crtc->base.dev;
9080 struct drm_i915_private *dev_priv = dev->dev_private; 9034 struct drm_i915_private *dev_priv = to_i915(dev);
9081 enum pipe pipe = crtc->pipe; 9035 enum pipe pipe = crtc->pipe;
9082 9036
9083 m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe)); 9037 m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
@@ -9095,7 +9049,7 @@ static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
9095 struct intel_link_m_n *m2_n2) 9049 struct intel_link_m_n *m2_n2)
9096{ 9050{
9097 struct drm_device *dev = crtc->base.dev; 9051 struct drm_device *dev = crtc->base.dev;
9098 struct drm_i915_private *dev_priv = dev->dev_private; 9052 struct drm_i915_private *dev_priv = to_i915(dev);
9099 enum pipe pipe = crtc->pipe; 9053 enum pipe pipe = crtc->pipe;
9100 9054
9101 if (INTEL_INFO(dev)->gen >= 5) { 9055 if (INTEL_INFO(dev)->gen >= 5) {
@@ -9153,7 +9107,7 @@ static void skylake_get_pfit_config(struct intel_crtc *crtc,
9153 struct intel_crtc_state *pipe_config) 9107 struct intel_crtc_state *pipe_config)
9154{ 9108{
9155 struct drm_device *dev = crtc->base.dev; 9109 struct drm_device *dev = crtc->base.dev;
9156 struct drm_i915_private *dev_priv = dev->dev_private; 9110 struct drm_i915_private *dev_priv = to_i915(dev);
9157 struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state; 9111 struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
9158 uint32_t ps_ctrl = 0; 9112 uint32_t ps_ctrl = 0;
9159 int id = -1; 9113 int id = -1;
@@ -9184,7 +9138,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
9184 struct intel_initial_plane_config *plane_config) 9138 struct intel_initial_plane_config *plane_config)
9185{ 9139{
9186 struct drm_device *dev = crtc->base.dev; 9140 struct drm_device *dev = crtc->base.dev;
9187 struct drm_i915_private *dev_priv = dev->dev_private; 9141 struct drm_i915_private *dev_priv = to_i915(dev);
9188 u32 val, base, offset, stride_mult, tiling; 9142 u32 val, base, offset, stride_mult, tiling;
9189 int pipe = crtc->pipe; 9143 int pipe = crtc->pipe;
9190 int fourcc, pixel_format; 9144 int fourcc, pixel_format;
@@ -9267,7 +9221,7 @@ static void ironlake_get_pfit_config(struct intel_crtc *crtc,
9267 struct intel_crtc_state *pipe_config) 9221 struct intel_crtc_state *pipe_config)
9268{ 9222{
9269 struct drm_device *dev = crtc->base.dev; 9223 struct drm_device *dev = crtc->base.dev;
9270 struct drm_i915_private *dev_priv = dev->dev_private; 9224 struct drm_i915_private *dev_priv = to_i915(dev);
9271 uint32_t tmp; 9225 uint32_t tmp;
9272 9226
9273 tmp = I915_READ(PF_CTL(crtc->pipe)); 9227 tmp = I915_READ(PF_CTL(crtc->pipe));
@@ -9292,7 +9246,7 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
9292 struct intel_initial_plane_config *plane_config) 9246 struct intel_initial_plane_config *plane_config)
9293{ 9247{
9294 struct drm_device *dev = crtc->base.dev; 9248 struct drm_device *dev = crtc->base.dev;
9295 struct drm_i915_private *dev_priv = dev->dev_private; 9249 struct drm_i915_private *dev_priv = to_i915(dev);
9296 u32 val, base, offset; 9250 u32 val, base, offset;
9297 int pipe = crtc->pipe; 9251 int pipe = crtc->pipe;
9298 int fourcc, pixel_format; 9252 int fourcc, pixel_format;
@@ -9360,7 +9314,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
9360 struct intel_crtc_state *pipe_config) 9314 struct intel_crtc_state *pipe_config)
9361{ 9315{
9362 struct drm_device *dev = crtc->base.dev; 9316 struct drm_device *dev = crtc->base.dev;
9363 struct drm_i915_private *dev_priv = dev->dev_private; 9317 struct drm_i915_private *dev_priv = to_i915(dev);
9364 enum intel_display_power_domain power_domain; 9318 enum intel_display_power_domain power_domain;
9365 uint32_t tmp; 9319 uint32_t tmp;
9366 bool ret; 9320 bool ret;
@@ -9455,7 +9409,7 @@ out:
9455 9409
9456static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) 9410static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
9457{ 9411{
9458 struct drm_device *dev = dev_priv->dev; 9412 struct drm_device *dev = &dev_priv->drm;
9459 struct intel_crtc *crtc; 9413 struct intel_crtc *crtc;
9460 9414
9461 for_each_intel_crtc(dev, crtc) 9415 for_each_intel_crtc(dev, crtc)
@@ -9489,7 +9443,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
9489 9443
9490static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv) 9444static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
9491{ 9445{
9492 struct drm_device *dev = dev_priv->dev; 9446 struct drm_device *dev = &dev_priv->drm;
9493 9447
9494 if (IS_HASWELL(dev)) 9448 if (IS_HASWELL(dev))
9495 return I915_READ(D_COMP_HSW); 9449 return I915_READ(D_COMP_HSW);
@@ -9499,7 +9453,7 @@ static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
9499 9453
9500static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val) 9454static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
9501{ 9455{
9502 struct drm_device *dev = dev_priv->dev; 9456 struct drm_device *dev = &dev_priv->drm;
9503 9457
9504 if (IS_HASWELL(dev)) { 9458 if (IS_HASWELL(dev)) {
9505 mutex_lock(&dev_priv->rps.hw_lock); 9459 mutex_lock(&dev_priv->rps.hw_lock);
@@ -9534,8 +9488,8 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
9534 val |= LCPLL_CD_SOURCE_FCLK; 9488 val |= LCPLL_CD_SOURCE_FCLK;
9535 I915_WRITE(LCPLL_CTL, val); 9489 I915_WRITE(LCPLL_CTL, val);
9536 9490
9537 if (wait_for_atomic_us(I915_READ(LCPLL_CTL) & 9491 if (wait_for_us(I915_READ(LCPLL_CTL) &
9538 LCPLL_CD_SOURCE_FCLK_DONE, 1)) 9492 LCPLL_CD_SOURCE_FCLK_DONE, 1))
9539 DRM_ERROR("Switching to FCLK failed\n"); 9493 DRM_ERROR("Switching to FCLK failed\n");
9540 9494
9541 val = I915_READ(LCPLL_CTL); 9495 val = I915_READ(LCPLL_CTL);
@@ -9545,7 +9499,7 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
9545 I915_WRITE(LCPLL_CTL, val); 9499 I915_WRITE(LCPLL_CTL, val);
9546 POSTING_READ(LCPLL_CTL); 9500 POSTING_READ(LCPLL_CTL);
9547 9501
9548 if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1)) 9502 if (intel_wait_for_register(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 0, 1))
9549 DRM_ERROR("LCPLL still locked\n"); 9503 DRM_ERROR("LCPLL still locked\n");
9550 9504
9551 val = hsw_read_dcomp(dev_priv); 9505 val = hsw_read_dcomp(dev_priv);
@@ -9600,7 +9554,9 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
9600 val &= ~LCPLL_PLL_DISABLE; 9554 val &= ~LCPLL_PLL_DISABLE;
9601 I915_WRITE(LCPLL_CTL, val); 9555 I915_WRITE(LCPLL_CTL, val);
9602 9556
9603 if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5)) 9557 if (intel_wait_for_register(dev_priv,
9558 LCPLL_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
9559 5))
9604 DRM_ERROR("LCPLL not locked yet\n"); 9560 DRM_ERROR("LCPLL not locked yet\n");
9605 9561
9606 if (val & LCPLL_CD_SOURCE_FCLK) { 9562 if (val & LCPLL_CD_SOURCE_FCLK) {
@@ -9608,13 +9564,13 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
9608 val &= ~LCPLL_CD_SOURCE_FCLK; 9564 val &= ~LCPLL_CD_SOURCE_FCLK;
9609 I915_WRITE(LCPLL_CTL, val); 9565 I915_WRITE(LCPLL_CTL, val);
9610 9566
9611 if (wait_for_atomic_us((I915_READ(LCPLL_CTL) & 9567 if (wait_for_us((I915_READ(LCPLL_CTL) &
9612 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) 9568 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
9613 DRM_ERROR("Switching back to LCPLL failed\n"); 9569 DRM_ERROR("Switching back to LCPLL failed\n");
9614 } 9570 }
9615 9571
9616 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 9572 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
9617 intel_update_cdclk(dev_priv->dev); 9573 intel_update_cdclk(&dev_priv->drm);
9618} 9574}
9619 9575
9620/* 9576/*
@@ -9642,7 +9598,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
9642 */ 9598 */
9643void hsw_enable_pc8(struct drm_i915_private *dev_priv) 9599void hsw_enable_pc8(struct drm_i915_private *dev_priv)
9644{ 9600{
9645 struct drm_device *dev = dev_priv->dev; 9601 struct drm_device *dev = &dev_priv->drm;
9646 uint32_t val; 9602 uint32_t val;
9647 9603
9648 DRM_DEBUG_KMS("Enabling package C8+\n"); 9604 DRM_DEBUG_KMS("Enabling package C8+\n");
@@ -9659,7 +9615,7 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv)
9659 9615
9660void hsw_disable_pc8(struct drm_i915_private *dev_priv) 9616void hsw_disable_pc8(struct drm_i915_private *dev_priv)
9661{ 9617{
9662 struct drm_device *dev = dev_priv->dev; 9618 struct drm_device *dev = &dev_priv->drm;
9663 uint32_t val; 9619 uint32_t val;
9664 9620
9665 DRM_DEBUG_KMS("Disabling package C8+\n"); 9621 DRM_DEBUG_KMS("Disabling package C8+\n");
@@ -9688,7 +9644,7 @@ static void bxt_modeset_commit_cdclk(struct drm_atomic_state *old_state)
9688static int ilk_max_pixel_rate(struct drm_atomic_state *state) 9644static int ilk_max_pixel_rate(struct drm_atomic_state *state)
9689{ 9645{
9690 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 9646 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
9691 struct drm_i915_private *dev_priv = state->dev->dev_private; 9647 struct drm_i915_private *dev_priv = to_i915(state->dev);
9692 struct drm_crtc *crtc; 9648 struct drm_crtc *crtc;
9693 struct drm_crtc_state *cstate; 9649 struct drm_crtc_state *cstate;
9694 struct intel_crtc_state *crtc_state; 9650 struct intel_crtc_state *crtc_state;
@@ -9724,7 +9680,7 @@ static int ilk_max_pixel_rate(struct drm_atomic_state *state)
9724 9680
9725static void broadwell_set_cdclk(struct drm_device *dev, int cdclk) 9681static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
9726{ 9682{
9727 struct drm_i915_private *dev_priv = dev->dev_private; 9683 struct drm_i915_private *dev_priv = to_i915(dev);
9728 uint32_t val, data; 9684 uint32_t val, data;
9729 int ret; 9685 int ret;
9730 9686
@@ -9893,10 +9849,7 @@ static void skl_modeset_commit_cdclk(struct drm_atomic_state *old_state)
9893static int haswell_crtc_compute_clock(struct intel_crtc *crtc, 9849static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
9894 struct intel_crtc_state *crtc_state) 9850 struct intel_crtc_state *crtc_state)
9895{ 9851{
9896 struct intel_encoder *intel_encoder = 9852 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) {
9897 intel_ddi_get_crtc_new_encoder(crtc_state);
9898
9899 if (intel_encoder->type != INTEL_OUTPUT_DSI) {
9900 if (!intel_ddi_pll_select(crtc, crtc_state)) 9853 if (!intel_ddi_pll_select(crtc, crtc_state))
9901 return -EINVAL; 9854 return -EINVAL;
9902 } 9855 }
@@ -10006,7 +9959,7 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
10006 unsigned long *power_domain_mask) 9959 unsigned long *power_domain_mask)
10007{ 9960{
10008 struct drm_device *dev = crtc->base.dev; 9961 struct drm_device *dev = crtc->base.dev;
10009 struct drm_i915_private *dev_priv = dev->dev_private; 9962 struct drm_i915_private *dev_priv = to_i915(dev);
10010 enum intel_display_power_domain power_domain; 9963 enum intel_display_power_domain power_domain;
10011 u32 tmp; 9964 u32 tmp;
10012 9965
@@ -10057,14 +10010,12 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
10057 unsigned long *power_domain_mask) 10010 unsigned long *power_domain_mask)
10058{ 10011{
10059 struct drm_device *dev = crtc->base.dev; 10012 struct drm_device *dev = crtc->base.dev;
10060 struct drm_i915_private *dev_priv = dev->dev_private; 10013 struct drm_i915_private *dev_priv = to_i915(dev);
10061 enum intel_display_power_domain power_domain; 10014 enum intel_display_power_domain power_domain;
10062 enum port port; 10015 enum port port;
10063 enum transcoder cpu_transcoder; 10016 enum transcoder cpu_transcoder;
10064 u32 tmp; 10017 u32 tmp;
10065 10018
10066 pipe_config->has_dsi_encoder = false;
10067
10068 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) { 10019 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
10069 if (port == PORT_A) 10020 if (port == PORT_A)
10070 cpu_transcoder = TRANSCODER_DSI_A; 10021 cpu_transcoder = TRANSCODER_DSI_A;
@@ -10096,18 +10047,17 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
10096 continue; 10047 continue;
10097 10048
10098 pipe_config->cpu_transcoder = cpu_transcoder; 10049 pipe_config->cpu_transcoder = cpu_transcoder;
10099 pipe_config->has_dsi_encoder = true;
10100 break; 10050 break;
10101 } 10051 }
10102 10052
10103 return pipe_config->has_dsi_encoder; 10053 return transcoder_is_dsi(pipe_config->cpu_transcoder);
10104} 10054}
10105 10055
10106static void haswell_get_ddi_port_state(struct intel_crtc *crtc, 10056static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
10107 struct intel_crtc_state *pipe_config) 10057 struct intel_crtc_state *pipe_config)
10108{ 10058{
10109 struct drm_device *dev = crtc->base.dev; 10059 struct drm_device *dev = crtc->base.dev;
10110 struct drm_i915_private *dev_priv = dev->dev_private; 10060 struct drm_i915_private *dev_priv = to_i915(dev);
10111 struct intel_shared_dpll *pll; 10061 struct intel_shared_dpll *pll;
10112 enum port port; 10062 enum port port;
10113 uint32_t tmp; 10063 uint32_t tmp;
@@ -10150,7 +10100,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
10150 struct intel_crtc_state *pipe_config) 10100 struct intel_crtc_state *pipe_config)
10151{ 10101{
10152 struct drm_device *dev = crtc->base.dev; 10102 struct drm_device *dev = crtc->base.dev;
10153 struct drm_i915_private *dev_priv = dev->dev_private; 10103 struct drm_i915_private *dev_priv = to_i915(dev);
10154 enum intel_display_power_domain power_domain; 10104 enum intel_display_power_domain power_domain;
10155 unsigned long power_domain_mask; 10105 unsigned long power_domain_mask;
10156 bool active; 10106 bool active;
@@ -10164,18 +10114,16 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
10164 10114
10165 active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_mask); 10115 active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_mask);
10166 10116
10167 if (IS_BROXTON(dev_priv)) { 10117 if (IS_BROXTON(dev_priv) &&
10168 bxt_get_dsi_transcoder_state(crtc, pipe_config, 10118 bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_mask)) {
10169 &power_domain_mask); 10119 WARN_ON(active);
10170 WARN_ON(active && pipe_config->has_dsi_encoder); 10120 active = true;
10171 if (pipe_config->has_dsi_encoder)
10172 active = true;
10173 } 10121 }
10174 10122
10175 if (!active) 10123 if (!active)
10176 goto out; 10124 goto out;
10177 10125
10178 if (!pipe_config->has_dsi_encoder) { 10126 if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
10179 haswell_get_ddi_port_state(crtc, pipe_config); 10127 haswell_get_ddi_port_state(crtc, pipe_config);
10180 intel_get_pipe_timings(crtc, pipe_config); 10128 intel_get_pipe_timings(crtc, pipe_config);
10181 } 10129 }
@@ -10226,7 +10174,7 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base,
10226 const struct intel_plane_state *plane_state) 10174 const struct intel_plane_state *plane_state)
10227{ 10175{
10228 struct drm_device *dev = crtc->dev; 10176 struct drm_device *dev = crtc->dev;
10229 struct drm_i915_private *dev_priv = dev->dev_private; 10177 struct drm_i915_private *dev_priv = to_i915(dev);
10230 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10178 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10231 uint32_t cntl = 0, size = 0; 10179 uint32_t cntl = 0, size = 0;
10232 10180
@@ -10289,7 +10237,7 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
10289 const struct intel_plane_state *plane_state) 10237 const struct intel_plane_state *plane_state)
10290{ 10238{
10291 struct drm_device *dev = crtc->dev; 10239 struct drm_device *dev = crtc->dev;
10292 struct drm_i915_private *dev_priv = dev->dev_private; 10240 struct drm_i915_private *dev_priv = to_i915(dev);
10293 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10241 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10294 int pipe = intel_crtc->pipe; 10242 int pipe = intel_crtc->pipe;
10295 uint32_t cntl = 0; 10243 uint32_t cntl = 0;
@@ -10337,7 +10285,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
10337 const struct intel_plane_state *plane_state) 10285 const struct intel_plane_state *plane_state)
10338{ 10286{
10339 struct drm_device *dev = crtc->dev; 10287 struct drm_device *dev = crtc->dev;
10340 struct drm_i915_private *dev_priv = dev->dev_private; 10288 struct drm_i915_private *dev_priv = to_i915(dev);
10341 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10289 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10342 int pipe = intel_crtc->pipe; 10290 int pipe = intel_crtc->pipe;
10343 u32 base = intel_crtc->cursor_addr; 10291 u32 base = intel_crtc->cursor_addr;
@@ -10504,7 +10452,7 @@ mode_fits_in_fbdev(struct drm_device *dev,
10504 struct drm_display_mode *mode) 10452 struct drm_display_mode *mode)
10505{ 10453{
10506#ifdef CONFIG_DRM_FBDEV_EMULATION 10454#ifdef CONFIG_DRM_FBDEV_EMULATION
10507 struct drm_i915_private *dev_priv = dev->dev_private; 10455 struct drm_i915_private *dev_priv = to_i915(dev);
10508 struct drm_i915_gem_object *obj; 10456 struct drm_i915_gem_object *obj;
10509 struct drm_framebuffer *fb; 10457 struct drm_framebuffer *fb;
10510 10458
@@ -10774,7 +10722,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
10774static int i9xx_pll_refclk(struct drm_device *dev, 10722static int i9xx_pll_refclk(struct drm_device *dev,
10775 const struct intel_crtc_state *pipe_config) 10723 const struct intel_crtc_state *pipe_config)
10776{ 10724{
10777 struct drm_i915_private *dev_priv = dev->dev_private; 10725 struct drm_i915_private *dev_priv = to_i915(dev);
10778 u32 dpll = pipe_config->dpll_hw_state.dpll; 10726 u32 dpll = pipe_config->dpll_hw_state.dpll;
10779 10727
10780 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN) 10728 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
@@ -10792,7 +10740,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
10792 struct intel_crtc_state *pipe_config) 10740 struct intel_crtc_state *pipe_config)
10793{ 10741{
10794 struct drm_device *dev = crtc->base.dev; 10742 struct drm_device *dev = crtc->base.dev;
10795 struct drm_i915_private *dev_priv = dev->dev_private; 10743 struct drm_i915_private *dev_priv = to_i915(dev);
10796 int pipe = pipe_config->cpu_transcoder; 10744 int pipe = pipe_config->cpu_transcoder;
10797 u32 dpll = pipe_config->dpll_hw_state.dpll; 10745 u32 dpll = pipe_config->dpll_hw_state.dpll;
10798 u32 fp; 10746 u32 fp;
@@ -10918,7 +10866,7 @@ static void ironlake_pch_clock_get(struct intel_crtc *crtc,
10918struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, 10866struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
10919 struct drm_crtc *crtc) 10867 struct drm_crtc *crtc)
10920{ 10868{
10921 struct drm_i915_private *dev_priv = dev->dev_private; 10869 struct drm_i915_private *dev_priv = to_i915(dev);
10922 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10870 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10923 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 10871 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
10924 struct drm_display_mode *mode; 10872 struct drm_display_mode *mode;
@@ -10970,31 +10918,6 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
10970 return mode; 10918 return mode;
10971} 10919}
10972 10920
10973void intel_mark_busy(struct drm_i915_private *dev_priv)
10974{
10975 if (dev_priv->mm.busy)
10976 return;
10977
10978 intel_runtime_pm_get(dev_priv);
10979 i915_update_gfx_val(dev_priv);
10980 if (INTEL_GEN(dev_priv) >= 6)
10981 gen6_rps_busy(dev_priv);
10982 dev_priv->mm.busy = true;
10983}
10984
10985void intel_mark_idle(struct drm_i915_private *dev_priv)
10986{
10987 if (!dev_priv->mm.busy)
10988 return;
10989
10990 dev_priv->mm.busy = false;
10991
10992 if (INTEL_GEN(dev_priv) >= 6)
10993 gen6_rps_idle(dev_priv);
10994
10995 intel_runtime_pm_put(dev_priv);
10996}
10997
10998static void intel_crtc_destroy(struct drm_crtc *crtc) 10921static void intel_crtc_destroy(struct drm_crtc *crtc)
10999{ 10922{
11000 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10923 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -11056,7 +10979,7 @@ static bool __pageflip_finished_cs(struct intel_crtc *crtc,
11056 struct intel_flip_work *work) 10979 struct intel_flip_work *work)
11057{ 10980{
11058 struct drm_device *dev = crtc->base.dev; 10981 struct drm_device *dev = crtc->base.dev;
11059 struct drm_i915_private *dev_priv = dev->dev_private; 10982 struct drm_i915_private *dev_priv = to_i915(dev);
11060 unsigned reset_counter; 10983 unsigned reset_counter;
11061 10984
11062 reset_counter = i915_reset_counter(&dev_priv->gpu_error); 10985 reset_counter = i915_reset_counter(&dev_priv->gpu_error);
@@ -11132,7 +11055,7 @@ static bool pageflip_finished(struct intel_crtc *crtc,
11132 11055
11133void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe) 11056void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe)
11134{ 11057{
11135 struct drm_device *dev = dev_priv->dev; 11058 struct drm_device *dev = &dev_priv->drm;
11136 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 11059 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
11137 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11060 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11138 struct intel_flip_work *work; 11061 struct intel_flip_work *work;
@@ -11159,7 +11082,7 @@ void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe)
11159 11082
11160void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe) 11083void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe)
11161{ 11084{
11162 struct drm_device *dev = dev_priv->dev; 11085 struct drm_device *dev = &dev_priv->drm;
11163 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 11086 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
11164 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11087 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11165 struct intel_flip_work *work; 11088 struct intel_flip_work *work;
@@ -11267,7 +11190,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
11267 uint32_t flags) 11190 uint32_t flags)
11268{ 11191{
11269 struct intel_engine_cs *engine = req->engine; 11192 struct intel_engine_cs *engine = req->engine;
11270 struct drm_i915_private *dev_priv = dev->dev_private; 11193 struct drm_i915_private *dev_priv = to_i915(dev);
11271 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11194 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11272 uint32_t pf, pipesrc; 11195 uint32_t pf, pipesrc;
11273 int ret; 11196 int ret;
@@ -11305,7 +11228,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
11305 uint32_t flags) 11228 uint32_t flags)
11306{ 11229{
11307 struct intel_engine_cs *engine = req->engine; 11230 struct intel_engine_cs *engine = req->engine;
11308 struct drm_i915_private *dev_priv = dev->dev_private; 11231 struct drm_i915_private *dev_priv = to_i915(dev);
11309 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11232 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11310 uint32_t pf, pipesrc; 11233 uint32_t pf, pipesrc;
11311 int ret; 11234 int ret;
@@ -11464,7 +11387,7 @@ static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
11464 struct intel_flip_work *work) 11387 struct intel_flip_work *work)
11465{ 11388{
11466 struct drm_device *dev = intel_crtc->base.dev; 11389 struct drm_device *dev = intel_crtc->base.dev;
11467 struct drm_i915_private *dev_priv = dev->dev_private; 11390 struct drm_i915_private *dev_priv = to_i915(dev);
11468 struct drm_framebuffer *fb = intel_crtc->base.primary->fb; 11391 struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
11469 const enum pipe pipe = intel_crtc->pipe; 11392 const enum pipe pipe = intel_crtc->pipe;
11470 u32 ctl, stride, tile_height; 11393 u32 ctl, stride, tile_height;
@@ -11516,7 +11439,7 @@ static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
11516 struct intel_flip_work *work) 11439 struct intel_flip_work *work)
11517{ 11440{
11518 struct drm_device *dev = intel_crtc->base.dev; 11441 struct drm_device *dev = intel_crtc->base.dev;
11519 struct drm_i915_private *dev_priv = dev->dev_private; 11442 struct drm_i915_private *dev_priv = to_i915(dev);
11520 struct intel_framebuffer *intel_fb = 11443 struct intel_framebuffer *intel_fb =
11521 to_intel_framebuffer(intel_crtc->base.primary->fb); 11444 to_intel_framebuffer(intel_crtc->base.primary->fb);
11522 struct drm_i915_gem_object *obj = intel_fb->obj; 11445 struct drm_i915_gem_object *obj = intel_fb->obj;
@@ -11593,7 +11516,7 @@ static bool __pageflip_stall_check_cs(struct drm_i915_private *dev_priv,
11593 vblank = intel_crtc_get_vblank_counter(intel_crtc); 11516 vblank = intel_crtc_get_vblank_counter(intel_crtc);
11594 if (work->flip_ready_vblank == 0) { 11517 if (work->flip_ready_vblank == 0) {
11595 if (work->flip_queued_req && 11518 if (work->flip_queued_req &&
11596 !i915_gem_request_completed(work->flip_queued_req, true)) 11519 !i915_gem_request_completed(work->flip_queued_req))
11597 return false; 11520 return false;
11598 11521
11599 work->flip_ready_vblank = vblank; 11522 work->flip_ready_vblank = vblank;
@@ -11618,7 +11541,7 @@ static bool __pageflip_stall_check_cs(struct drm_i915_private *dev_priv,
11618 11541
11619void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe) 11542void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe)
11620{ 11543{
11621 struct drm_device *dev = dev_priv->dev; 11544 struct drm_device *dev = &dev_priv->drm;
11622 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 11545 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
11623 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11546 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11624 struct intel_flip_work *work; 11547 struct intel_flip_work *work;
@@ -11646,14 +11569,13 @@ void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe)
11646 spin_unlock(&dev->event_lock); 11569 spin_unlock(&dev->event_lock);
11647} 11570}
11648 11571
11649__maybe_unused
11650static int intel_crtc_page_flip(struct drm_crtc *crtc, 11572static int intel_crtc_page_flip(struct drm_crtc *crtc,
11651 struct drm_framebuffer *fb, 11573 struct drm_framebuffer *fb,
11652 struct drm_pending_vblank_event *event, 11574 struct drm_pending_vblank_event *event,
11653 uint32_t page_flip_flags) 11575 uint32_t page_flip_flags)
11654{ 11576{
11655 struct drm_device *dev = crtc->dev; 11577 struct drm_device *dev = crtc->dev;
11656 struct drm_i915_private *dev_priv = dev->dev_private; 11578 struct drm_i915_private *dev_priv = to_i915(dev);
11657 struct drm_framebuffer *old_fb = crtc->primary->fb; 11579 struct drm_framebuffer *old_fb = crtc->primary->fb;
11658 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 11580 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11659 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11581 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -11949,8 +11871,7 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
11949 struct drm_framebuffer *fb = plane_state->fb; 11871 struct drm_framebuffer *fb = plane_state->fb;
11950 int ret; 11872 int ret;
11951 11873
11952 if (crtc_state && INTEL_INFO(dev)->gen >= 9 && 11874 if (INTEL_GEN(dev) >= 9 && plane->type != DRM_PLANE_TYPE_CURSOR) {
11953 plane->type != DRM_PLANE_TYPE_CURSOR) {
11954 ret = skl_update_scaler_plane( 11875 ret = skl_update_scaler_plane(
11955 to_intel_crtc_state(crtc_state), 11876 to_intel_crtc_state(crtc_state),
11956 to_intel_plane_state(plane_state)); 11877 to_intel_plane_state(plane_state));
@@ -12067,31 +11988,11 @@ static bool check_single_encoder_cloning(struct drm_atomic_state *state,
12067 return true; 11988 return true;
12068} 11989}
12069 11990
12070static bool check_encoder_cloning(struct drm_atomic_state *state,
12071 struct intel_crtc *crtc)
12072{
12073 struct intel_encoder *encoder;
12074 struct drm_connector *connector;
12075 struct drm_connector_state *connector_state;
12076 int i;
12077
12078 for_each_connector_in_state(state, connector, connector_state, i) {
12079 if (connector_state->crtc != &crtc->base)
12080 continue;
12081
12082 encoder = to_intel_encoder(connector_state->best_encoder);
12083 if (!check_single_encoder_cloning(state, crtc, encoder))
12084 return false;
12085 }
12086
12087 return true;
12088}
12089
12090static int intel_crtc_atomic_check(struct drm_crtc *crtc, 11991static int intel_crtc_atomic_check(struct drm_crtc *crtc,
12091 struct drm_crtc_state *crtc_state) 11992 struct drm_crtc_state *crtc_state)
12092{ 11993{
12093 struct drm_device *dev = crtc->dev; 11994 struct drm_device *dev = crtc->dev;
12094 struct drm_i915_private *dev_priv = dev->dev_private; 11995 struct drm_i915_private *dev_priv = to_i915(dev);
12095 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11996 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12096 struct intel_crtc_state *pipe_config = 11997 struct intel_crtc_state *pipe_config =
12097 to_intel_crtc_state(crtc_state); 11998 to_intel_crtc_state(crtc_state);
@@ -12099,11 +12000,6 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
12099 int ret; 12000 int ret;
12100 bool mode_changed = needs_modeset(crtc_state); 12001 bool mode_changed = needs_modeset(crtc_state);
12101 12002
12102 if (mode_changed && !check_encoder_cloning(state, intel_crtc)) {
12103 DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
12104 return -EINVAL;
12105 }
12106
12107 if (mode_changed && !crtc_state->active) 12003 if (mode_changed && !crtc_state->active)
12108 pipe_config->update_wm_post = true; 12004 pipe_config->update_wm_post = true;
12109 12005
@@ -12299,14 +12195,14 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
12299 pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n, 12195 pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
12300 pipe_config->fdi_m_n.tu); 12196 pipe_config->fdi_m_n.tu);
12301 DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", 12197 DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
12302 pipe_config->has_dp_encoder, 12198 intel_crtc_has_dp_encoder(pipe_config),
12303 pipe_config->lane_count, 12199 pipe_config->lane_count,
12304 pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n, 12200 pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
12305 pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n, 12201 pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
12306 pipe_config->dp_m_n.tu); 12202 pipe_config->dp_m_n.tu);
12307 12203
12308 DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n", 12204 DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n",
12309 pipe_config->has_dp_encoder, 12205 intel_crtc_has_dp_encoder(pipe_config),
12310 pipe_config->lane_count, 12206 pipe_config->lane_count,
12311 pipe_config->dp_m2_n2.gmch_m, 12207 pipe_config->dp_m2_n2.gmch_m,
12312 pipe_config->dp_m2_n2.gmch_n, 12208 pipe_config->dp_m2_n2.gmch_n,
@@ -12439,7 +12335,7 @@ static bool check_digital_port_conflicts(struct drm_atomic_state *state)
12439 case INTEL_OUTPUT_UNKNOWN: 12335 case INTEL_OUTPUT_UNKNOWN:
12440 if (WARN_ON(!HAS_DDI(dev))) 12336 if (WARN_ON(!HAS_DDI(dev)))
12441 break; 12337 break;
12442 case INTEL_OUTPUT_DISPLAYPORT: 12338 case INTEL_OUTPUT_DP:
12443 case INTEL_OUTPUT_HDMI: 12339 case INTEL_OUTPUT_HDMI:
12444 case INTEL_OUTPUT_EDP: 12340 case INTEL_OUTPUT_EDP:
12445 port_mask = 1 << enc_to_dig_port(&encoder->base)->port; 12341 port_mask = 1 << enc_to_dig_port(&encoder->base)->port;
@@ -12536,6 +12432,24 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
12536 &pipe_config->pipe_src_w, 12432 &pipe_config->pipe_src_w,
12537 &pipe_config->pipe_src_h); 12433 &pipe_config->pipe_src_h);
12538 12434
12435 for_each_connector_in_state(state, connector, connector_state, i) {
12436 if (connector_state->crtc != crtc)
12437 continue;
12438
12439 encoder = to_intel_encoder(connector_state->best_encoder);
12440
12441 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
12442 DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
12443 goto fail;
12444 }
12445
12446 /*
12447 * Determine output_types before calling the .compute_config()
12448 * hooks so that the hooks can use this information safely.
12449 */
12450 pipe_config->output_types |= 1 << encoder->type;
12451 }
12452
12539encoder_retry: 12453encoder_retry:
12540 /* Ensure the port clock defaults are reset when retrying. */ 12454 /* Ensure the port clock defaults are reset when retrying. */
12541 pipe_config->port_clock = 0; 12455 pipe_config->port_clock = 0;
@@ -12821,7 +12735,6 @@ intel_pipe_config_compare(struct drm_device *dev,
12821 PIPE_CONF_CHECK_I(fdi_lanes); 12735 PIPE_CONF_CHECK_I(fdi_lanes);
12822 PIPE_CONF_CHECK_M_N(fdi_m_n); 12736 PIPE_CONF_CHECK_M_N(fdi_m_n);
12823 12737
12824 PIPE_CONF_CHECK_I(has_dp_encoder);
12825 PIPE_CONF_CHECK_I(lane_count); 12738 PIPE_CONF_CHECK_I(lane_count);
12826 PIPE_CONF_CHECK_X(lane_lat_optim_mask); 12739 PIPE_CONF_CHECK_X(lane_lat_optim_mask);
12827 12740
@@ -12833,7 +12746,7 @@ intel_pipe_config_compare(struct drm_device *dev,
12833 } else 12746 } else
12834 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2); 12747 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
12835 12748
12836 PIPE_CONF_CHECK_I(has_dsi_encoder); 12749 PIPE_CONF_CHECK_X(output_types);
12837 12750
12838 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay); 12751 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
12839 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal); 12752 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
@@ -12952,7 +12865,7 @@ static void verify_wm_state(struct drm_crtc *crtc,
12952 struct drm_crtc_state *new_state) 12865 struct drm_crtc_state *new_state)
12953{ 12866{
12954 struct drm_device *dev = crtc->dev; 12867 struct drm_device *dev = crtc->dev;
12955 struct drm_i915_private *dev_priv = dev->dev_private; 12868 struct drm_i915_private *dev_priv = to_i915(dev);
12956 struct skl_ddb_allocation hw_ddb, *sw_ddb; 12869 struct skl_ddb_allocation hw_ddb, *sw_ddb;
12957 struct skl_ddb_entry *hw_entry, *sw_entry; 12870 struct skl_ddb_entry *hw_entry, *sw_entry;
12958 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 12871 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -13058,7 +12971,7 @@ verify_crtc_state(struct drm_crtc *crtc,
13058 struct drm_crtc_state *new_crtc_state) 12971 struct drm_crtc_state *new_crtc_state)
13059{ 12972{
13060 struct drm_device *dev = crtc->dev; 12973 struct drm_device *dev = crtc->dev;
13061 struct drm_i915_private *dev_priv = dev->dev_private; 12974 struct drm_i915_private *dev_priv = to_i915(dev);
13062 struct intel_encoder *encoder; 12975 struct intel_encoder *encoder;
13063 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 12976 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13064 struct intel_crtc_state *pipe_config, *sw_config; 12977 struct intel_crtc_state *pipe_config, *sw_config;
@@ -13101,8 +13014,10 @@ verify_crtc_state(struct drm_crtc *crtc,
13101 "Encoder connected to wrong pipe %c\n", 13014 "Encoder connected to wrong pipe %c\n",
13102 pipe_name(pipe)); 13015 pipe_name(pipe));
13103 13016
13104 if (active) 13017 if (active) {
13018 pipe_config->output_types |= 1 << encoder->type;
13105 encoder->get_config(encoder, pipe_config); 13019 encoder->get_config(encoder, pipe_config);
13020 }
13106 } 13021 }
13107 13022
13108 if (!new_crtc_state->active) 13023 if (!new_crtc_state->active)
@@ -13181,7 +13096,7 @@ verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
13181 struct drm_crtc_state *old_crtc_state, 13096 struct drm_crtc_state *old_crtc_state,
13182 struct drm_crtc_state *new_crtc_state) 13097 struct drm_crtc_state *new_crtc_state)
13183{ 13098{
13184 struct drm_i915_private *dev_priv = dev->dev_private; 13099 struct drm_i915_private *dev_priv = to_i915(dev);
13185 struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state); 13100 struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state);
13186 struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state); 13101 struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state);
13187 13102
@@ -13220,7 +13135,7 @@ intel_modeset_verify_crtc(struct drm_crtc *crtc,
13220static void 13135static void
13221verify_disabled_dpll_state(struct drm_device *dev) 13136verify_disabled_dpll_state(struct drm_device *dev)
13222{ 13137{
13223 struct drm_i915_private *dev_priv = dev->dev_private; 13138 struct drm_i915_private *dev_priv = to_i915(dev);
13224 int i; 13139 int i;
13225 13140
13226 for (i = 0; i < dev_priv->num_shared_dpll; i++) 13141 for (i = 0; i < dev_priv->num_shared_dpll; i++)
@@ -13267,7 +13182,7 @@ static void update_scanline_offset(struct intel_crtc *crtc)
13267 13182
13268 crtc->scanline_offset = vtotal - 1; 13183 crtc->scanline_offset = vtotal - 1;
13269 } else if (HAS_DDI(dev) && 13184 } else if (HAS_DDI(dev) &&
13270 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) { 13185 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI)) {
13271 crtc->scanline_offset = 2; 13186 crtc->scanline_offset = 2;
13272 } else 13187 } else
13273 crtc->scanline_offset = 1; 13188 crtc->scanline_offset = 1;
@@ -13402,7 +13317,7 @@ static int intel_modeset_all_pipes(struct drm_atomic_state *state)
13402static int intel_modeset_checks(struct drm_atomic_state *state) 13317static int intel_modeset_checks(struct drm_atomic_state *state)
13403{ 13318{
13404 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 13319 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13405 struct drm_i915_private *dev_priv = state->dev->dev_private; 13320 struct drm_i915_private *dev_priv = to_i915(state->dev);
13406 struct drm_crtc *crtc; 13321 struct drm_crtc *crtc;
13407 struct drm_crtc_state *crtc_state; 13322 struct drm_crtc_state *crtc_state;
13408 int ret = 0, i; 13323 int ret = 0, i;
@@ -13568,7 +13483,7 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
13568 struct drm_atomic_state *state, 13483 struct drm_atomic_state *state,
13569 bool nonblock) 13484 bool nonblock)
13570{ 13485{
13571 struct drm_i915_private *dev_priv = dev->dev_private; 13486 struct drm_i915_private *dev_priv = to_i915(dev);
13572 struct drm_plane_state *plane_state; 13487 struct drm_plane_state *plane_state;
13573 struct drm_crtc_state *crtc_state; 13488 struct drm_crtc_state *crtc_state;
13574 struct drm_plane *plane; 13489 struct drm_plane *plane;
@@ -13697,7 +13612,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
13697{ 13612{
13698 struct drm_device *dev = state->dev; 13613 struct drm_device *dev = state->dev;
13699 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 13614 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13700 struct drm_i915_private *dev_priv = dev->dev_private; 13615 struct drm_i915_private *dev_priv = to_i915(dev);
13701 struct drm_crtc_state *old_crtc_state; 13616 struct drm_crtc_state *old_crtc_state;
13702 struct drm_crtc *crtc; 13617 struct drm_crtc *crtc;
13703 struct intel_crtc_state *intel_cstate; 13618 struct intel_crtc_state *intel_cstate;
@@ -13929,7 +13844,7 @@ static int intel_atomic_commit(struct drm_device *dev,
13929 bool nonblock) 13844 bool nonblock)
13930{ 13845{
13931 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 13846 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13932 struct drm_i915_private *dev_priv = dev->dev_private; 13847 struct drm_i915_private *dev_priv = to_i915(dev);
13933 int ret = 0; 13848 int ret = 0;
13934 13849
13935 if (intel_state->modeset && nonblock) { 13850 if (intel_state->modeset && nonblock) {
@@ -14008,7 +13923,7 @@ static const struct drm_crtc_funcs intel_crtc_funcs = {
14008 .set_config = drm_atomic_helper_set_config, 13923 .set_config = drm_atomic_helper_set_config,
14009 .set_property = drm_atomic_helper_crtc_set_property, 13924 .set_property = drm_atomic_helper_crtc_set_property,
14010 .destroy = intel_crtc_destroy, 13925 .destroy = intel_crtc_destroy,
14011 .page_flip = drm_atomic_helper_page_flip, 13926 .page_flip = intel_crtc_page_flip,
14012 .atomic_duplicate_state = intel_crtc_duplicate_state, 13927 .atomic_duplicate_state = intel_crtc_duplicate_state,
14013 .atomic_destroy_state = intel_crtc_destroy_state, 13928 .atomic_destroy_state = intel_crtc_destroy_state,
14014}; 13929};
@@ -14136,15 +14051,11 @@ int
14136skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state) 14051skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state)
14137{ 14052{
14138 int max_scale; 14053 int max_scale;
14139 struct drm_device *dev;
14140 struct drm_i915_private *dev_priv;
14141 int crtc_clock, cdclk; 14054 int crtc_clock, cdclk;
14142 14055
14143 if (!intel_crtc || !crtc_state->base.enable) 14056 if (!intel_crtc || !crtc_state->base.enable)
14144 return DRM_PLANE_HELPER_NO_SCALING; 14057 return DRM_PLANE_HELPER_NO_SCALING;
14145 14058
14146 dev = intel_crtc->base.dev;
14147 dev_priv = dev->dev_private;
14148 crtc_clock = crtc_state->base.adjusted_mode.crtc_clock; 14059 crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
14149 cdclk = to_intel_atomic_state(crtc_state->base.state)->cdclk; 14060 cdclk = to_intel_atomic_state(crtc_state->base.state)->cdclk;
14150 14061
@@ -14534,7 +14445,7 @@ static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_cr
14534 14445
14535static void intel_crtc_init(struct drm_device *dev, int pipe) 14446static void intel_crtc_init(struct drm_device *dev, int pipe)
14536{ 14447{
14537 struct drm_i915_private *dev_priv = dev->dev_private; 14448 struct drm_i915_private *dev_priv = to_i915(dev);
14538 struct intel_crtc *intel_crtc; 14449 struct intel_crtc *intel_crtc;
14539 struct intel_crtc_state *crtc_state = NULL; 14450 struct intel_crtc_state *crtc_state = NULL;
14540 struct drm_plane *primary = NULL; 14451 struct drm_plane *primary = NULL;
@@ -14633,11 +14544,8 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
14633 struct intel_crtc *crtc; 14544 struct intel_crtc *crtc;
14634 14545
14635 drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id); 14546 drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id);
14636 14547 if (!drmmode_crtc)
14637 if (!drmmode_crtc) {
14638 DRM_ERROR("no such CRTC id\n");
14639 return -ENOENT; 14548 return -ENOENT;
14640 }
14641 14549
14642 crtc = to_intel_crtc(drmmode_crtc); 14550 crtc = to_intel_crtc(drmmode_crtc);
14643 pipe_from_crtc_id->pipe = crtc->pipe; 14551 pipe_from_crtc_id->pipe = crtc->pipe;
@@ -14664,7 +14572,7 @@ static int intel_encoder_clones(struct intel_encoder *encoder)
14664 14572
14665static bool has_edp_a(struct drm_device *dev) 14573static bool has_edp_a(struct drm_device *dev)
14666{ 14574{
14667 struct drm_i915_private *dev_priv = dev->dev_private; 14575 struct drm_i915_private *dev_priv = to_i915(dev);
14668 14576
14669 if (!IS_MOBILE(dev)) 14577 if (!IS_MOBILE(dev))
14670 return false; 14578 return false;
@@ -14680,7 +14588,7 @@ static bool has_edp_a(struct drm_device *dev)
14680 14588
14681static bool intel_crt_present(struct drm_device *dev) 14589static bool intel_crt_present(struct drm_device *dev)
14682{ 14590{
14683 struct drm_i915_private *dev_priv = dev->dev_private; 14591 struct drm_i915_private *dev_priv = to_i915(dev);
14684 14592
14685 if (INTEL_INFO(dev)->gen >= 9) 14593 if (INTEL_INFO(dev)->gen >= 9)
14686 return false; 14594 return false;
@@ -14706,10 +14614,15 @@ static bool intel_crt_present(struct drm_device *dev)
14706 14614
14707static void intel_setup_outputs(struct drm_device *dev) 14615static void intel_setup_outputs(struct drm_device *dev)
14708{ 14616{
14709 struct drm_i915_private *dev_priv = dev->dev_private; 14617 struct drm_i915_private *dev_priv = to_i915(dev);
14710 struct intel_encoder *encoder; 14618 struct intel_encoder *encoder;
14711 bool dpd_is_edp = false; 14619 bool dpd_is_edp = false;
14712 14620
14621 /*
14622 * intel_edp_init_connector() depends on this completing first, to
14623 * prevent the registeration of both eDP and LVDS and the incorrect
14624 * sharing of the PPS.
14625 */
14713 intel_lvds_init(dev); 14626 intel_lvds_init(dev);
14714 14627
14715 if (intel_crt_present(dev)) 14628 if (intel_crt_present(dev))
@@ -15354,7 +15267,7 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
15354 */ 15267 */
15355static void quirk_pipea_force(struct drm_device *dev) 15268static void quirk_pipea_force(struct drm_device *dev)
15356{ 15269{
15357 struct drm_i915_private *dev_priv = dev->dev_private; 15270 struct drm_i915_private *dev_priv = to_i915(dev);
15358 15271
15359 dev_priv->quirks |= QUIRK_PIPEA_FORCE; 15272 dev_priv->quirks |= QUIRK_PIPEA_FORCE;
15360 DRM_INFO("applying pipe a force quirk\n"); 15273 DRM_INFO("applying pipe a force quirk\n");
@@ -15362,7 +15275,7 @@ static void quirk_pipea_force(struct drm_device *dev)
15362 15275
15363static void quirk_pipeb_force(struct drm_device *dev) 15276static void quirk_pipeb_force(struct drm_device *dev)
15364{ 15277{
15365 struct drm_i915_private *dev_priv = dev->dev_private; 15278 struct drm_i915_private *dev_priv = to_i915(dev);
15366 15279
15367 dev_priv->quirks |= QUIRK_PIPEB_FORCE; 15280 dev_priv->quirks |= QUIRK_PIPEB_FORCE;
15368 DRM_INFO("applying pipe b force quirk\n"); 15281 DRM_INFO("applying pipe b force quirk\n");
@@ -15373,7 +15286,7 @@ static void quirk_pipeb_force(struct drm_device *dev)
15373 */ 15286 */
15374static void quirk_ssc_force_disable(struct drm_device *dev) 15287static void quirk_ssc_force_disable(struct drm_device *dev)
15375{ 15288{
15376 struct drm_i915_private *dev_priv = dev->dev_private; 15289 struct drm_i915_private *dev_priv = to_i915(dev);
15377 dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE; 15290 dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
15378 DRM_INFO("applying lvds SSC disable quirk\n"); 15291 DRM_INFO("applying lvds SSC disable quirk\n");
15379} 15292}
@@ -15384,7 +15297,7 @@ static void quirk_ssc_force_disable(struct drm_device *dev)
15384 */ 15297 */
15385static void quirk_invert_brightness(struct drm_device *dev) 15298static void quirk_invert_brightness(struct drm_device *dev)
15386{ 15299{
15387 struct drm_i915_private *dev_priv = dev->dev_private; 15300 struct drm_i915_private *dev_priv = to_i915(dev);
15388 dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS; 15301 dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
15389 DRM_INFO("applying inverted panel brightness quirk\n"); 15302 DRM_INFO("applying inverted panel brightness quirk\n");
15390} 15303}
@@ -15392,7 +15305,7 @@ static void quirk_invert_brightness(struct drm_device *dev)
15392/* Some VBT's incorrectly indicate no backlight is present */ 15305/* Some VBT's incorrectly indicate no backlight is present */
15393static void quirk_backlight_present(struct drm_device *dev) 15306static void quirk_backlight_present(struct drm_device *dev)
15394{ 15307{
15395 struct drm_i915_private *dev_priv = dev->dev_private; 15308 struct drm_i915_private *dev_priv = to_i915(dev);
15396 dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT; 15309 dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
15397 DRM_INFO("applying backlight present quirk\n"); 15310 DRM_INFO("applying backlight present quirk\n");
15398} 15311}
@@ -15518,7 +15431,7 @@ static void intel_init_quirks(struct drm_device *dev)
15518/* Disable the VGA plane that we never use */ 15431/* Disable the VGA plane that we never use */
15519static void i915_disable_vga(struct drm_device *dev) 15432static void i915_disable_vga(struct drm_device *dev)
15520{ 15433{
15521 struct drm_i915_private *dev_priv = dev->dev_private; 15434 struct drm_i915_private *dev_priv = to_i915(dev);
15522 u8 sr1; 15435 u8 sr1;
15523 i915_reg_t vga_reg = i915_vgacntrl_reg(dev); 15436 i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
15524 15437
@@ -15536,7 +15449,7 @@ static void i915_disable_vga(struct drm_device *dev)
15536 15449
15537void intel_modeset_init_hw(struct drm_device *dev) 15450void intel_modeset_init_hw(struct drm_device *dev)
15538{ 15451{
15539 struct drm_i915_private *dev_priv = dev->dev_private; 15452 struct drm_i915_private *dev_priv = to_i915(dev);
15540 15453
15541 intel_update_cdclk(dev); 15454 intel_update_cdclk(dev);
15542 15455
@@ -15784,7 +15697,7 @@ static bool
15784intel_check_plane_mapping(struct intel_crtc *crtc) 15697intel_check_plane_mapping(struct intel_crtc *crtc)
15785{ 15698{
15786 struct drm_device *dev = crtc->base.dev; 15699 struct drm_device *dev = crtc->base.dev;
15787 struct drm_i915_private *dev_priv = dev->dev_private; 15700 struct drm_i915_private *dev_priv = to_i915(dev);
15788 u32 val; 15701 u32 val;
15789 15702
15790 if (INTEL_INFO(dev)->num_pipes == 1) 15703 if (INTEL_INFO(dev)->num_pipes == 1)
@@ -15824,7 +15737,7 @@ static bool intel_encoder_has_connectors(struct intel_encoder *encoder)
15824static void intel_sanitize_crtc(struct intel_crtc *crtc) 15737static void intel_sanitize_crtc(struct intel_crtc *crtc)
15825{ 15738{
15826 struct drm_device *dev = crtc->base.dev; 15739 struct drm_device *dev = crtc->base.dev;
15827 struct drm_i915_private *dev_priv = dev->dev_private; 15740 struct drm_i915_private *dev_priv = to_i915(dev);
15828 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 15741 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
15829 15742
15830 /* Clear any frame start delays used for debugging left by the BIOS */ 15743 /* Clear any frame start delays used for debugging left by the BIOS */
@@ -15949,7 +15862,7 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
15949 15862
15950void i915_redisable_vga_power_on(struct drm_device *dev) 15863void i915_redisable_vga_power_on(struct drm_device *dev)
15951{ 15864{
15952 struct drm_i915_private *dev_priv = dev->dev_private; 15865 struct drm_i915_private *dev_priv = to_i915(dev);
15953 i915_reg_t vga_reg = i915_vgacntrl_reg(dev); 15866 i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
15954 15867
15955 if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) { 15868 if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
@@ -15960,7 +15873,7 @@ void i915_redisable_vga_power_on(struct drm_device *dev)
15960 15873
15961void i915_redisable_vga(struct drm_device *dev) 15874void i915_redisable_vga(struct drm_device *dev)
15962{ 15875{
15963 struct drm_i915_private *dev_priv = dev->dev_private; 15876 struct drm_i915_private *dev_priv = to_i915(dev);
15964 15877
15965 /* This function can be called both from intel_modeset_setup_hw_state or 15878 /* This function can be called both from intel_modeset_setup_hw_state or
15966 * at a very early point in our resume sequence, where the power well 15879 * at a very early point in our resume sequence, where the power well
@@ -16000,7 +15913,7 @@ static void readout_plane_state(struct intel_crtc *crtc)
16000 15913
16001static void intel_modeset_readout_hw_state(struct drm_device *dev) 15914static void intel_modeset_readout_hw_state(struct drm_device *dev)
16002{ 15915{
16003 struct drm_i915_private *dev_priv = dev->dev_private; 15916 struct drm_i915_private *dev_priv = to_i915(dev);
16004 enum pipe pipe; 15917 enum pipe pipe;
16005 struct intel_crtc *crtc; 15918 struct intel_crtc *crtc;
16006 struct intel_encoder *encoder; 15919 struct intel_encoder *encoder;
@@ -16069,6 +15982,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
16069 if (encoder->get_hw_state(encoder, &pipe)) { 15982 if (encoder->get_hw_state(encoder, &pipe)) {
16070 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 15983 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
16071 encoder->base.crtc = &crtc->base; 15984 encoder->base.crtc = &crtc->base;
15985 crtc->config->output_types |= 1 << encoder->type;
16072 encoder->get_config(encoder, crtc->config); 15986 encoder->get_config(encoder, crtc->config);
16073 } else { 15987 } else {
16074 encoder->base.crtc = NULL; 15988 encoder->base.crtc = NULL;
@@ -16153,7 +16067,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
16153static void 16067static void
16154intel_modeset_setup_hw_state(struct drm_device *dev) 16068intel_modeset_setup_hw_state(struct drm_device *dev)
16155{ 16069{
16156 struct drm_i915_private *dev_priv = dev->dev_private; 16070 struct drm_i915_private *dev_priv = to_i915(dev);
16157 enum pipe pipe; 16071 enum pipe pipe;
16158 struct intel_crtc *crtc; 16072 struct intel_crtc *crtc;
16159 struct intel_encoder *encoder; 16073 struct intel_encoder *encoder;
@@ -16309,8 +16223,21 @@ void intel_modeset_gem_init(struct drm_device *dev)
16309 c->state->plane_mask &= ~(1 << drm_plane_index(c->primary)); 16223 c->state->plane_mask &= ~(1 << drm_plane_index(c->primary));
16310 } 16224 }
16311 } 16225 }
16226}
16227
16228int intel_connector_register(struct drm_connector *connector)
16229{
16230 struct intel_connector *intel_connector = to_intel_connector(connector);
16231 int ret;
16232
16233 ret = intel_backlight_device_register(intel_connector);
16234 if (ret)
16235 goto err;
16312 16236
16313 intel_backlight_register(dev); 16237 return 0;
16238
16239err:
16240 return ret;
16314} 16241}
16315 16242
16316void intel_connector_unregister(struct drm_connector *connector) 16243void intel_connector_unregister(struct drm_connector *connector)
@@ -16323,7 +16250,7 @@ void intel_connector_unregister(struct drm_connector *connector)
16323 16250
16324void intel_modeset_cleanup(struct drm_device *dev) 16251void intel_modeset_cleanup(struct drm_device *dev)
16325{ 16252{
16326 struct drm_i915_private *dev_priv = dev->dev_private; 16253 struct drm_i915_private *dev_priv = to_i915(dev);
16327 16254
16328 intel_disable_gt_powersave(dev_priv); 16255 intel_disable_gt_powersave(dev_priv);
16329 16256
@@ -16347,8 +16274,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
16347 /* flush any delayed tasks or pending work */ 16274 /* flush any delayed tasks or pending work */
16348 flush_scheduled_work(); 16275 flush_scheduled_work();
16349 16276
16350 drm_connector_unregister_all(dev);
16351
16352 drm_mode_config_cleanup(dev); 16277 drm_mode_config_cleanup(dev);
16353 16278
16354 intel_cleanup_overlay(dev_priv); 16279 intel_cleanup_overlay(dev_priv);
@@ -16371,7 +16296,7 @@ void intel_connector_attach_encoder(struct intel_connector *connector,
16371 */ 16296 */
16372int intel_modeset_vga_set_state(struct drm_device *dev, bool state) 16297int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
16373{ 16298{
16374 struct drm_i915_private *dev_priv = dev->dev_private; 16299 struct drm_i915_private *dev_priv = to_i915(dev);
16375 unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL; 16300 unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
16376 u16 gmch_ctrl; 16301 u16 gmch_ctrl;
16377 16302
@@ -16527,7 +16452,7 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
16527 struct drm_device *dev, 16452 struct drm_device *dev,
16528 struct intel_display_error_state *error) 16453 struct intel_display_error_state *error)
16529{ 16454{
16530 struct drm_i915_private *dev_priv = dev->dev_private; 16455 struct drm_i915_private *dev_priv = to_i915(dev);
16531 int i; 16456 int i;
16532 16457
16533 if (!error) 16458 if (!error)
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index ffa43eca14d3..0c5ba3410a1e 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -262,7 +262,7 @@ static void pps_lock(struct intel_dp *intel_dp)
262 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 262 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
263 struct intel_encoder *encoder = &intel_dig_port->base; 263 struct intel_encoder *encoder = &intel_dig_port->base;
264 struct drm_device *dev = encoder->base.dev; 264 struct drm_device *dev = encoder->base.dev;
265 struct drm_i915_private *dev_priv = dev->dev_private; 265 struct drm_i915_private *dev_priv = to_i915(dev);
266 enum intel_display_power_domain power_domain; 266 enum intel_display_power_domain power_domain;
267 267
268 /* 268 /*
@@ -280,7 +280,7 @@ static void pps_unlock(struct intel_dp *intel_dp)
280 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 280 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
281 struct intel_encoder *encoder = &intel_dig_port->base; 281 struct intel_encoder *encoder = &intel_dig_port->base;
282 struct drm_device *dev = encoder->base.dev; 282 struct drm_device *dev = encoder->base.dev;
283 struct drm_i915_private *dev_priv = dev->dev_private; 283 struct drm_i915_private *dev_priv = to_i915(dev);
284 enum intel_display_power_domain power_domain; 284 enum intel_display_power_domain power_domain;
285 285
286 mutex_unlock(&dev_priv->pps_mutex); 286 mutex_unlock(&dev_priv->pps_mutex);
@@ -294,7 +294,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
294{ 294{
295 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 295 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
296 struct drm_device *dev = intel_dig_port->base.base.dev; 296 struct drm_device *dev = intel_dig_port->base.base.dev;
297 struct drm_i915_private *dev_priv = dev->dev_private; 297 struct drm_i915_private *dev_priv = to_i915(dev);
298 enum pipe pipe = intel_dp->pps_pipe; 298 enum pipe pipe = intel_dp->pps_pipe;
299 bool pll_enabled, release_cl_override = false; 299 bool pll_enabled, release_cl_override = false;
300 enum dpio_phy phy = DPIO_PHY(pipe); 300 enum dpio_phy phy = DPIO_PHY(pipe);
@@ -368,7 +368,7 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
368{ 368{
369 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 369 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
370 struct drm_device *dev = intel_dig_port->base.base.dev; 370 struct drm_device *dev = intel_dig_port->base.base.dev;
371 struct drm_i915_private *dev_priv = dev->dev_private; 371 struct drm_i915_private *dev_priv = to_i915(dev);
372 struct intel_encoder *encoder; 372 struct intel_encoder *encoder;
373 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B); 373 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
374 enum pipe pipe; 374 enum pipe pipe;
@@ -426,6 +426,37 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
426 return intel_dp->pps_pipe; 426 return intel_dp->pps_pipe;
427} 427}
428 428
429static int
430bxt_power_sequencer_idx(struct intel_dp *intel_dp)
431{
432 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
433 struct drm_device *dev = intel_dig_port->base.base.dev;
434 struct drm_i915_private *dev_priv = to_i915(dev);
435
436 lockdep_assert_held(&dev_priv->pps_mutex);
437
438 /* We should never land here with regular DP ports */
439 WARN_ON(!is_edp(intel_dp));
440
441 /*
442 * TODO: BXT has 2 PPS instances. The correct port->PPS instance
443 * mapping needs to be retrieved from VBT, for now just hard-code to
444 * use instance #0 always.
445 */
446 if (!intel_dp->pps_reset)
447 return 0;
448
449 intel_dp->pps_reset = false;
450
451 /*
452 * Only the HW needs to be reprogrammed, the SW state is fixed and
453 * has been setup during connector init.
454 */
455 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
456
457 return 0;
458}
459
429typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv, 460typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
430 enum pipe pipe); 461 enum pipe pipe);
431 462
@@ -475,7 +506,7 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
475{ 506{
476 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 507 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
477 struct drm_device *dev = intel_dig_port->base.base.dev; 508 struct drm_device *dev = intel_dig_port->base.base.dev;
478 struct drm_i915_private *dev_priv = dev->dev_private; 509 struct drm_i915_private *dev_priv = to_i915(dev);
479 enum port port = intel_dig_port->port; 510 enum port port = intel_dig_port->port;
480 511
481 lockdep_assert_held(&dev_priv->pps_mutex); 512 lockdep_assert_held(&dev_priv->pps_mutex);
@@ -507,12 +538,13 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
507 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp); 538 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
508} 539}
509 540
510void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv) 541void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
511{ 542{
512 struct drm_device *dev = dev_priv->dev; 543 struct drm_device *dev = &dev_priv->drm;
513 struct intel_encoder *encoder; 544 struct intel_encoder *encoder;
514 545
515 if (WARN_ON(!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev))) 546 if (WARN_ON(!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
547 !IS_BROXTON(dev)))
516 return; 548 return;
517 549
518 /* 550 /*
@@ -532,34 +564,71 @@ void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
532 continue; 564 continue;
533 565
534 intel_dp = enc_to_intel_dp(&encoder->base); 566 intel_dp = enc_to_intel_dp(&encoder->base);
535 intel_dp->pps_pipe = INVALID_PIPE; 567 if (IS_BROXTON(dev))
568 intel_dp->pps_reset = true;
569 else
570 intel_dp->pps_pipe = INVALID_PIPE;
571 }
572}
573
574struct pps_registers {
575 i915_reg_t pp_ctrl;
576 i915_reg_t pp_stat;
577 i915_reg_t pp_on;
578 i915_reg_t pp_off;
579 i915_reg_t pp_div;
580};
581
582static void intel_pps_get_registers(struct drm_i915_private *dev_priv,
583 struct intel_dp *intel_dp,
584 struct pps_registers *regs)
585{
586 memset(regs, 0, sizeof(*regs));
587
588 if (IS_BROXTON(dev_priv)) {
589 int idx = bxt_power_sequencer_idx(intel_dp);
590
591 regs->pp_ctrl = BXT_PP_CONTROL(idx);
592 regs->pp_stat = BXT_PP_STATUS(idx);
593 regs->pp_on = BXT_PP_ON_DELAYS(idx);
594 regs->pp_off = BXT_PP_OFF_DELAYS(idx);
595 } else if (HAS_PCH_SPLIT(dev_priv)) {
596 regs->pp_ctrl = PCH_PP_CONTROL;
597 regs->pp_stat = PCH_PP_STATUS;
598 regs->pp_on = PCH_PP_ON_DELAYS;
599 regs->pp_off = PCH_PP_OFF_DELAYS;
600 regs->pp_div = PCH_PP_DIVISOR;
601 } else {
602 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
603
604 regs->pp_ctrl = VLV_PIPE_PP_CONTROL(pipe);
605 regs->pp_stat = VLV_PIPE_PP_STATUS(pipe);
606 regs->pp_on = VLV_PIPE_PP_ON_DELAYS(pipe);
607 regs->pp_off = VLV_PIPE_PP_OFF_DELAYS(pipe);
608 regs->pp_div = VLV_PIPE_PP_DIVISOR(pipe);
536 } 609 }
537} 610}
538 611
539static i915_reg_t 612static i915_reg_t
540_pp_ctrl_reg(struct intel_dp *intel_dp) 613_pp_ctrl_reg(struct intel_dp *intel_dp)
541{ 614{
542 struct drm_device *dev = intel_dp_to_dev(intel_dp); 615 struct pps_registers regs;
543 616
544 if (IS_BROXTON(dev)) 617 intel_pps_get_registers(to_i915(intel_dp_to_dev(intel_dp)), intel_dp,
545 return BXT_PP_CONTROL(0); 618 &regs);
546 else if (HAS_PCH_SPLIT(dev)) 619
547 return PCH_PP_CONTROL; 620 return regs.pp_ctrl;
548 else
549 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
550} 621}
551 622
552static i915_reg_t 623static i915_reg_t
553_pp_stat_reg(struct intel_dp *intel_dp) 624_pp_stat_reg(struct intel_dp *intel_dp)
554{ 625{
555 struct drm_device *dev = intel_dp_to_dev(intel_dp); 626 struct pps_registers regs;
556 627
557 if (IS_BROXTON(dev)) 628 intel_pps_get_registers(to_i915(intel_dp_to_dev(intel_dp)), intel_dp,
558 return BXT_PP_STATUS(0); 629 &regs);
559 else if (HAS_PCH_SPLIT(dev)) 630
560 return PCH_PP_STATUS; 631 return regs.pp_stat;
561 else
562 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
563} 632}
564 633
565/* Reboot notifier handler to shutdown panel power to guarantee T12 timing 634/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
@@ -570,7 +639,7 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code,
570 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp), 639 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
571 edp_notifier); 640 edp_notifier);
572 struct drm_device *dev = intel_dp_to_dev(intel_dp); 641 struct drm_device *dev = intel_dp_to_dev(intel_dp);
573 struct drm_i915_private *dev_priv = dev->dev_private; 642 struct drm_i915_private *dev_priv = to_i915(dev);
574 643
575 if (!is_edp(intel_dp) || code != SYS_RESTART) 644 if (!is_edp(intel_dp) || code != SYS_RESTART)
576 return 0; 645 return 0;
@@ -601,7 +670,7 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code,
601static bool edp_have_panel_power(struct intel_dp *intel_dp) 670static bool edp_have_panel_power(struct intel_dp *intel_dp)
602{ 671{
603 struct drm_device *dev = intel_dp_to_dev(intel_dp); 672 struct drm_device *dev = intel_dp_to_dev(intel_dp);
604 struct drm_i915_private *dev_priv = dev->dev_private; 673 struct drm_i915_private *dev_priv = to_i915(dev);
605 674
606 lockdep_assert_held(&dev_priv->pps_mutex); 675 lockdep_assert_held(&dev_priv->pps_mutex);
607 676
@@ -615,7 +684,7 @@ static bool edp_have_panel_power(struct intel_dp *intel_dp)
615static bool edp_have_panel_vdd(struct intel_dp *intel_dp) 684static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
616{ 685{
617 struct drm_device *dev = intel_dp_to_dev(intel_dp); 686 struct drm_device *dev = intel_dp_to_dev(intel_dp);
618 struct drm_i915_private *dev_priv = dev->dev_private; 687 struct drm_i915_private *dev_priv = to_i915(dev);
619 688
620 lockdep_assert_held(&dev_priv->pps_mutex); 689 lockdep_assert_held(&dev_priv->pps_mutex);
621 690
@@ -630,7 +699,7 @@ static void
630intel_dp_check_edp(struct intel_dp *intel_dp) 699intel_dp_check_edp(struct intel_dp *intel_dp)
631{ 700{
632 struct drm_device *dev = intel_dp_to_dev(intel_dp); 701 struct drm_device *dev = intel_dp_to_dev(intel_dp);
633 struct drm_i915_private *dev_priv = dev->dev_private; 702 struct drm_i915_private *dev_priv = to_i915(dev);
634 703
635 if (!is_edp(intel_dp)) 704 if (!is_edp(intel_dp))
636 return; 705 return;
@@ -648,7 +717,7 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
648{ 717{
649 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 718 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
650 struct drm_device *dev = intel_dig_port->base.base.dev; 719 struct drm_device *dev = intel_dig_port->base.base.dev;
651 struct drm_i915_private *dev_priv = dev->dev_private; 720 struct drm_i915_private *dev_priv = to_i915(dev);
652 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg; 721 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
653 uint32_t status; 722 uint32_t status;
654 bool done; 723 bool done;
@@ -658,7 +727,7 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
658 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C, 727 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
659 msecs_to_jiffies_timeout(10)); 728 msecs_to_jiffies_timeout(10));
660 else 729 else
661 done = wait_for_atomic(C, 10) == 0; 730 done = wait_for(C, 10) == 0;
662 if (!done) 731 if (!done)
663 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n", 732 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
664 has_aux_irq); 733 has_aux_irq);
@@ -781,7 +850,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
781{ 850{
782 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 851 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
783 struct drm_device *dev = intel_dig_port->base.base.dev; 852 struct drm_device *dev = intel_dig_port->base.base.dev;
784 struct drm_i915_private *dev_priv = dev->dev_private; 853 struct drm_i915_private *dev_priv = to_i915(dev);
785 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg; 854 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
786 uint32_t aux_clock_divider; 855 uint32_t aux_clock_divider;
787 int i, ret, recv_bytes; 856 int i, ret, recv_bytes;
@@ -1180,35 +1249,18 @@ intel_dp_aux_fini(struct intel_dp *intel_dp)
1180 kfree(intel_dp->aux.name); 1249 kfree(intel_dp->aux.name);
1181} 1250}
1182 1251
1183static int 1252static void
1184intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector) 1253intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1185{ 1254{
1186 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1255 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1187 enum port port = intel_dig_port->port; 1256 enum port port = intel_dig_port->port;
1188 int ret;
1189 1257
1190 intel_aux_reg_init(intel_dp); 1258 intel_aux_reg_init(intel_dp);
1259 drm_dp_aux_init(&intel_dp->aux);
1191 1260
1261 /* Failure to allocate our preferred name is not critical */
1192 intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c", port_name(port)); 1262 intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c", port_name(port));
1193 if (!intel_dp->aux.name)
1194 return -ENOMEM;
1195
1196 intel_dp->aux.dev = connector->base.kdev;
1197 intel_dp->aux.transfer = intel_dp_aux_transfer; 1263 intel_dp->aux.transfer = intel_dp_aux_transfer;
1198
1199 DRM_DEBUG_KMS("registering %s bus for %s\n",
1200 intel_dp->aux.name,
1201 connector->base.kdev->kobj.name);
1202
1203 ret = drm_dp_aux_register(&intel_dp->aux);
1204 if (ret < 0) {
1205 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1206 intel_dp->aux.name, ret);
1207 kfree(intel_dp->aux.name);
1208 return ret;
1209 }
1210
1211 return 0;
1212} 1264}
1213 1265
1214static int 1266static int
@@ -1421,7 +1473,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
1421 struct intel_crtc_state *pipe_config) 1473 struct intel_crtc_state *pipe_config)
1422{ 1474{
1423 struct drm_device *dev = encoder->base.dev; 1475 struct drm_device *dev = encoder->base.dev;
1424 struct drm_i915_private *dev_priv = dev->dev_private; 1476 struct drm_i915_private *dev_priv = to_i915(dev);
1425 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 1477 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1426 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1478 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1427 enum port port = dp_to_dig_port(intel_dp)->port; 1479 enum port port = dp_to_dig_port(intel_dp)->port;
@@ -1449,7 +1501,6 @@ intel_dp_compute_config(struct intel_encoder *encoder,
1449 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A) 1501 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1450 pipe_config->has_pch_encoder = true; 1502 pipe_config->has_pch_encoder = true;
1451 1503
1452 pipe_config->has_dp_encoder = true;
1453 pipe_config->has_drrs = false; 1504 pipe_config->has_drrs = false;
1454 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A; 1505 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1455 1506
@@ -1605,7 +1656,7 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp,
1605static void intel_dp_prepare(struct intel_encoder *encoder) 1656static void intel_dp_prepare(struct intel_encoder *encoder)
1606{ 1657{
1607 struct drm_device *dev = encoder->base.dev; 1658 struct drm_device *dev = encoder->base.dev;
1608 struct drm_i915_private *dev_priv = dev->dev_private; 1659 struct drm_i915_private *dev_priv = to_i915(dev);
1609 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1660 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1610 enum port port = dp_to_dig_port(intel_dp)->port; 1661 enum port port = dp_to_dig_port(intel_dp)->port;
1611 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 1662 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
@@ -1693,16 +1744,21 @@ static void intel_dp_prepare(struct intel_encoder *encoder)
1693#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK) 1744#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1694#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) 1745#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
1695 1746
1747static void intel_pps_verify_state(struct drm_i915_private *dev_priv,
1748 struct intel_dp *intel_dp);
1749
1696static void wait_panel_status(struct intel_dp *intel_dp, 1750static void wait_panel_status(struct intel_dp *intel_dp,
1697 u32 mask, 1751 u32 mask,
1698 u32 value) 1752 u32 value)
1699{ 1753{
1700 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1754 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1701 struct drm_i915_private *dev_priv = dev->dev_private; 1755 struct drm_i915_private *dev_priv = to_i915(dev);
1702 i915_reg_t pp_stat_reg, pp_ctrl_reg; 1756 i915_reg_t pp_stat_reg, pp_ctrl_reg;
1703 1757
1704 lockdep_assert_held(&dev_priv->pps_mutex); 1758 lockdep_assert_held(&dev_priv->pps_mutex);
1705 1759
1760 intel_pps_verify_state(dev_priv, intel_dp);
1761
1706 pp_stat_reg = _pp_stat_reg(intel_dp); 1762 pp_stat_reg = _pp_stat_reg(intel_dp);
1707 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 1763 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1708 1764
@@ -1711,8 +1767,9 @@ static void wait_panel_status(struct intel_dp *intel_dp,
1711 I915_READ(pp_stat_reg), 1767 I915_READ(pp_stat_reg),
1712 I915_READ(pp_ctrl_reg)); 1768 I915_READ(pp_ctrl_reg));
1713 1769
1714 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 1770 if (intel_wait_for_register(dev_priv,
1715 5 * USEC_PER_SEC, 10 * USEC_PER_MSEC)) 1771 pp_stat_reg, mask, value,
1772 5000))
1716 DRM_ERROR("Panel status timeout: status %08x control %08x\n", 1773 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1717 I915_READ(pp_stat_reg), 1774 I915_READ(pp_stat_reg),
1718 I915_READ(pp_ctrl_reg)); 1775 I915_READ(pp_ctrl_reg));
@@ -1772,7 +1829,7 @@ static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1772static u32 ironlake_get_pp_control(struct intel_dp *intel_dp) 1829static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1773{ 1830{
1774 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1831 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1775 struct drm_i915_private *dev_priv = dev->dev_private; 1832 struct drm_i915_private *dev_priv = to_i915(dev);
1776 u32 control; 1833 u32 control;
1777 1834
1778 lockdep_assert_held(&dev_priv->pps_mutex); 1835 lockdep_assert_held(&dev_priv->pps_mutex);
@@ -1795,7 +1852,7 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1795 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1852 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1796 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1853 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1797 struct intel_encoder *intel_encoder = &intel_dig_port->base; 1854 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1798 struct drm_i915_private *dev_priv = dev->dev_private; 1855 struct drm_i915_private *dev_priv = to_i915(dev);
1799 enum intel_display_power_domain power_domain; 1856 enum intel_display_power_domain power_domain;
1800 u32 pp; 1857 u32 pp;
1801 i915_reg_t pp_stat_reg, pp_ctrl_reg; 1858 i915_reg_t pp_stat_reg, pp_ctrl_reg;
@@ -1868,7 +1925,7 @@ void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1868static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp) 1925static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1869{ 1926{
1870 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1927 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1871 struct drm_i915_private *dev_priv = dev->dev_private; 1928 struct drm_i915_private *dev_priv = to_i915(dev);
1872 struct intel_digital_port *intel_dig_port = 1929 struct intel_digital_port *intel_dig_port =
1873 dp_to_dig_port(intel_dp); 1930 dp_to_dig_port(intel_dp);
1874 struct intel_encoder *intel_encoder = &intel_dig_port->base; 1931 struct intel_encoder *intel_encoder = &intel_dig_port->base;
@@ -1937,8 +1994,7 @@ static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1937 */ 1994 */
1938static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) 1995static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1939{ 1996{
1940 struct drm_i915_private *dev_priv = 1997 struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
1941 intel_dp_to_dev(intel_dp)->dev_private;
1942 1998
1943 lockdep_assert_held(&dev_priv->pps_mutex); 1999 lockdep_assert_held(&dev_priv->pps_mutex);
1944 2000
@@ -1959,7 +2015,7 @@ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1959static void edp_panel_on(struct intel_dp *intel_dp) 2015static void edp_panel_on(struct intel_dp *intel_dp)
1960{ 2016{
1961 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2017 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1962 struct drm_i915_private *dev_priv = dev->dev_private; 2018 struct drm_i915_private *dev_priv = to_i915(dev);
1963 u32 pp; 2019 u32 pp;
1964 i915_reg_t pp_ctrl_reg; 2020 i915_reg_t pp_ctrl_reg;
1965 2021
@@ -2020,7 +2076,7 @@ static void edp_panel_off(struct intel_dp *intel_dp)
2020 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2076 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2021 struct intel_encoder *intel_encoder = &intel_dig_port->base; 2077 struct intel_encoder *intel_encoder = &intel_dig_port->base;
2022 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2078 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2023 struct drm_i915_private *dev_priv = dev->dev_private; 2079 struct drm_i915_private *dev_priv = to_i915(dev);
2024 enum intel_display_power_domain power_domain; 2080 enum intel_display_power_domain power_domain;
2025 u32 pp; 2081 u32 pp;
2026 i915_reg_t pp_ctrl_reg; 2082 i915_reg_t pp_ctrl_reg;
@@ -2072,7 +2128,7 @@ static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2072{ 2128{
2073 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2129 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2074 struct drm_device *dev = intel_dig_port->base.base.dev; 2130 struct drm_device *dev = intel_dig_port->base.base.dev;
2075 struct drm_i915_private *dev_priv = dev->dev_private; 2131 struct drm_i915_private *dev_priv = to_i915(dev);
2076 u32 pp; 2132 u32 pp;
2077 i915_reg_t pp_ctrl_reg; 2133 i915_reg_t pp_ctrl_reg;
2078 2134
@@ -2113,7 +2169,7 @@ void intel_edp_backlight_on(struct intel_dp *intel_dp)
2113static void _intel_edp_backlight_off(struct intel_dp *intel_dp) 2169static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2114{ 2170{
2115 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2171 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2116 struct drm_i915_private *dev_priv = dev->dev_private; 2172 struct drm_i915_private *dev_priv = to_i915(dev);
2117 u32 pp; 2173 u32 pp;
2118 i915_reg_t pp_ctrl_reg; 2174 i915_reg_t pp_ctrl_reg;
2119 2175
@@ -2229,7 +2285,7 @@ static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2229 * 2. Program DP PLL enable 2285 * 2. Program DP PLL enable
2230 */ 2286 */
2231 if (IS_GEN5(dev_priv)) 2287 if (IS_GEN5(dev_priv))
2232 intel_wait_for_vblank_if_active(dev_priv->dev, !crtc->pipe); 2288 intel_wait_for_vblank_if_active(&dev_priv->drm, !crtc->pipe);
2233 2289
2234 intel_dp->DP |= DP_PLL_ENABLE; 2290 intel_dp->DP |= DP_PLL_ENABLE;
2235 2291
@@ -2294,7 +2350,7 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2294 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2350 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2295 enum port port = dp_to_dig_port(intel_dp)->port; 2351 enum port port = dp_to_dig_port(intel_dp)->port;
2296 struct drm_device *dev = encoder->base.dev; 2352 struct drm_device *dev = encoder->base.dev;
2297 struct drm_i915_private *dev_priv = dev->dev_private; 2353 struct drm_i915_private *dev_priv = to_i915(dev);
2298 enum intel_display_power_domain power_domain; 2354 enum intel_display_power_domain power_domain;
2299 u32 tmp; 2355 u32 tmp;
2300 bool ret; 2356 bool ret;
@@ -2347,7 +2403,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
2347 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2403 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2348 u32 tmp, flags = 0; 2404 u32 tmp, flags = 0;
2349 struct drm_device *dev = encoder->base.dev; 2405 struct drm_device *dev = encoder->base.dev;
2350 struct drm_i915_private *dev_priv = dev->dev_private; 2406 struct drm_i915_private *dev_priv = to_i915(dev);
2351 enum port port = dp_to_dig_port(intel_dp)->port; 2407 enum port port = dp_to_dig_port(intel_dp)->port;
2352 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 2408 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2353 2409
@@ -2385,8 +2441,6 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
2385 !IS_CHERRYVIEW(dev) && tmp & DP_COLOR_RANGE_16_235) 2441 !IS_CHERRYVIEW(dev) && tmp & DP_COLOR_RANGE_16_235)
2386 pipe_config->limited_color_range = true; 2442 pipe_config->limited_color_range = true;
2387 2443
2388 pipe_config->has_dp_encoder = true;
2389
2390 pipe_config->lane_count = 2444 pipe_config->lane_count =
2391 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1; 2445 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2392 2446
@@ -2471,7 +2525,7 @@ static void chv_post_disable_dp(struct intel_encoder *encoder)
2471{ 2525{
2472 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2526 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2473 struct drm_device *dev = encoder->base.dev; 2527 struct drm_device *dev = encoder->base.dev;
2474 struct drm_i915_private *dev_priv = dev->dev_private; 2528 struct drm_i915_private *dev_priv = to_i915(dev);
2475 2529
2476 intel_dp_link_down(intel_dp); 2530 intel_dp_link_down(intel_dp);
2477 2531
@@ -2490,7 +2544,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
2490{ 2544{
2491 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2545 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2492 struct drm_device *dev = intel_dig_port->base.base.dev; 2546 struct drm_device *dev = intel_dig_port->base.base.dev;
2493 struct drm_i915_private *dev_priv = dev->dev_private; 2547 struct drm_i915_private *dev_priv = to_i915(dev);
2494 enum port port = intel_dig_port->port; 2548 enum port port = intel_dig_port->port;
2495 2549
2496 if (HAS_DDI(dev)) { 2550 if (HAS_DDI(dev)) {
@@ -2570,7 +2624,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
2570static void intel_dp_enable_port(struct intel_dp *intel_dp) 2624static void intel_dp_enable_port(struct intel_dp *intel_dp)
2571{ 2625{
2572 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2626 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2573 struct drm_i915_private *dev_priv = dev->dev_private; 2627 struct drm_i915_private *dev_priv = to_i915(dev);
2574 struct intel_crtc *crtc = 2628 struct intel_crtc *crtc =
2575 to_intel_crtc(dp_to_dig_port(intel_dp)->base.base.crtc); 2629 to_intel_crtc(dp_to_dig_port(intel_dp)->base.base.crtc);
2576 2630
@@ -2599,7 +2653,7 @@ static void intel_enable_dp(struct intel_encoder *encoder)
2599{ 2653{
2600 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2654 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2601 struct drm_device *dev = encoder->base.dev; 2655 struct drm_device *dev = encoder->base.dev;
2602 struct drm_i915_private *dev_priv = dev->dev_private; 2656 struct drm_i915_private *dev_priv = to_i915(dev);
2603 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 2657 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2604 uint32_t dp_reg = I915_READ(intel_dp->output_reg); 2658 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2605 enum pipe pipe = crtc->pipe; 2659 enum pipe pipe = crtc->pipe;
@@ -2672,7 +2726,7 @@ static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2672static void vlv_detach_power_sequencer(struct intel_dp *intel_dp) 2726static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2673{ 2727{
2674 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2728 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2675 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private; 2729 struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
2676 enum pipe pipe = intel_dp->pps_pipe; 2730 enum pipe pipe = intel_dp->pps_pipe;
2677 i915_reg_t pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe); 2731 i915_reg_t pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2678 2732
@@ -2698,7 +2752,7 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2698static void vlv_steal_power_sequencer(struct drm_device *dev, 2752static void vlv_steal_power_sequencer(struct drm_device *dev,
2699 enum pipe pipe) 2753 enum pipe pipe)
2700{ 2754{
2701 struct drm_i915_private *dev_priv = dev->dev_private; 2755 struct drm_i915_private *dev_priv = to_i915(dev);
2702 struct intel_encoder *encoder; 2756 struct intel_encoder *encoder;
2703 2757
2704 lockdep_assert_held(&dev_priv->pps_mutex); 2758 lockdep_assert_held(&dev_priv->pps_mutex);
@@ -2736,7 +2790,7 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2736 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2790 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2737 struct intel_encoder *encoder = &intel_dig_port->base; 2791 struct intel_encoder *encoder = &intel_dig_port->base;
2738 struct drm_device *dev = encoder->base.dev; 2792 struct drm_device *dev = encoder->base.dev;
2739 struct drm_i915_private *dev_priv = dev->dev_private; 2793 struct drm_i915_private *dev_priv = to_i915(dev);
2740 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 2794 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2741 2795
2742 lockdep_assert_held(&dev_priv->pps_mutex); 2796 lockdep_assert_held(&dev_priv->pps_mutex);
@@ -2824,7 +2878,7 @@ uint8_t
2824intel_dp_voltage_max(struct intel_dp *intel_dp) 2878intel_dp_voltage_max(struct intel_dp *intel_dp)
2825{ 2879{
2826 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2880 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2827 struct drm_i915_private *dev_priv = dev->dev_private; 2881 struct drm_i915_private *dev_priv = to_i915(dev);
2828 enum port port = dp_to_dig_port(intel_dp)->port; 2882 enum port port = dp_to_dig_port(intel_dp)->port;
2829 2883
2830 if (IS_BROXTON(dev)) 2884 if (IS_BROXTON(dev))
@@ -3242,7 +3296,7 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3242{ 3296{
3243 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 3297 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3244 struct drm_device *dev = intel_dig_port->base.base.dev; 3298 struct drm_device *dev = intel_dig_port->base.base.dev;
3245 struct drm_i915_private *dev_priv = dev->dev_private; 3299 struct drm_i915_private *dev_priv = to_i915(dev);
3246 enum port port = intel_dig_port->port; 3300 enum port port = intel_dig_port->port;
3247 uint32_t val; 3301 uint32_t val;
3248 3302
@@ -3264,8 +3318,10 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3264 if (port == PORT_A) 3318 if (port == PORT_A)
3265 return; 3319 return;
3266 3320
3267 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE), 3321 if (intel_wait_for_register(dev_priv,DP_TP_STATUS(port),
3268 1)) 3322 DP_TP_STATUS_IDLE_DONE,
3323 DP_TP_STATUS_IDLE_DONE,
3324 1))
3269 DRM_ERROR("Timed out waiting for DP idle patterns\n"); 3325 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3270} 3326}
3271 3327
@@ -3276,7 +3332,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
3276 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc); 3332 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
3277 enum port port = intel_dig_port->port; 3333 enum port port = intel_dig_port->port;
3278 struct drm_device *dev = intel_dig_port->base.base.dev; 3334 struct drm_device *dev = intel_dig_port->base.base.dev;
3279 struct drm_i915_private *dev_priv = dev->dev_private; 3335 struct drm_i915_private *dev_priv = to_i915(dev);
3280 uint32_t DP = intel_dp->DP; 3336 uint32_t DP = intel_dp->DP;
3281 3337
3282 if (WARN_ON(HAS_DDI(dev))) 3338 if (WARN_ON(HAS_DDI(dev)))
@@ -3328,7 +3384,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
3328 I915_WRITE(intel_dp->output_reg, DP); 3384 I915_WRITE(intel_dp->output_reg, DP);
3329 POSTING_READ(intel_dp->output_reg); 3385 POSTING_READ(intel_dp->output_reg);
3330 3386
3331 intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A); 3387 intel_wait_for_vblank_if_active(&dev_priv->drm, PIPE_A);
3332 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true); 3388 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3333 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true); 3389 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3334 } 3390 }
@@ -3343,7 +3399,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
3343{ 3399{
3344 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 3400 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3345 struct drm_device *dev = dig_port->base.base.dev; 3401 struct drm_device *dev = dig_port->base.base.dev;
3346 struct drm_i915_private *dev_priv = dev->dev_private; 3402 struct drm_i915_private *dev_priv = to_i915(dev);
3347 3403
3348 if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd, 3404 if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
3349 sizeof(intel_dp->dpcd)) < 0) 3405 sizeof(intel_dp->dpcd)) < 0)
@@ -4194,7 +4250,7 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
4194 } 4250 }
4195 4251
4196 if (intel_encoder->type != INTEL_OUTPUT_EDP) 4252 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4197 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; 4253 intel_encoder->type = INTEL_OUTPUT_DP;
4198 4254
4199 intel_dp_probe_oui(intel_dp); 4255 intel_dp_probe_oui(intel_dp);
4200 4256
@@ -4270,7 +4326,7 @@ intel_dp_detect(struct drm_connector *connector, bool force)
4270 /* MST devices are disconnected from a monitor POV */ 4326 /* MST devices are disconnected from a monitor POV */
4271 intel_dp_unset_edid(intel_dp); 4327 intel_dp_unset_edid(intel_dp);
4272 if (intel_encoder->type != INTEL_OUTPUT_EDP) 4328 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4273 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; 4329 intel_encoder->type = INTEL_OUTPUT_DP;
4274 return connector_status_disconnected; 4330 return connector_status_disconnected;
4275 } 4331 }
4276 4332
@@ -4309,7 +4365,7 @@ intel_dp_force(struct drm_connector *connector)
4309 intel_display_power_put(dev_priv, power_domain); 4365 intel_display_power_put(dev_priv, power_domain);
4310 4366
4311 if (intel_encoder->type != INTEL_OUTPUT_EDP) 4367 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4312 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; 4368 intel_encoder->type = INTEL_OUTPUT_DP;
4313} 4369}
4314 4370
4315static int intel_dp_get_modes(struct drm_connector *connector) 4371static int intel_dp_get_modes(struct drm_connector *connector)
@@ -4358,7 +4414,7 @@ intel_dp_set_property(struct drm_connector *connector,
4358 struct drm_property *property, 4414 struct drm_property *property,
4359 uint64_t val) 4415 uint64_t val)
4360{ 4416{
4361 struct drm_i915_private *dev_priv = connector->dev->dev_private; 4417 struct drm_i915_private *dev_priv = to_i915(connector->dev);
4362 struct intel_connector *intel_connector = to_intel_connector(connector); 4418 struct intel_connector *intel_connector = to_intel_connector(connector);
4363 struct intel_encoder *intel_encoder = intel_attached_encoder(connector); 4419 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4364 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); 4420 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
@@ -4446,6 +4502,25 @@ done:
4446 return 0; 4502 return 0;
4447} 4503}
4448 4504
4505static int
4506intel_dp_connector_register(struct drm_connector *connector)
4507{
4508 struct intel_dp *intel_dp = intel_attached_dp(connector);
4509 int ret;
4510
4511 ret = intel_connector_register(connector);
4512 if (ret)
4513 return ret;
4514
4515 i915_debugfs_connector_add(connector);
4516
4517 DRM_DEBUG_KMS("registering %s bus for %s\n",
4518 intel_dp->aux.name, connector->kdev->kobj.name);
4519
4520 intel_dp->aux.dev = connector->kdev;
4521 return drm_dp_aux_register(&intel_dp->aux);
4522}
4523
4449static void 4524static void
4450intel_dp_connector_unregister(struct drm_connector *connector) 4525intel_dp_connector_unregister(struct drm_connector *connector)
4451{ 4526{
@@ -4521,7 +4596,7 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4521{ 4596{
4522 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 4597 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4523 struct drm_device *dev = intel_dig_port->base.base.dev; 4598 struct drm_device *dev = intel_dig_port->base.base.dev;
4524 struct drm_i915_private *dev_priv = dev->dev_private; 4599 struct drm_i915_private *dev_priv = to_i915(dev);
4525 enum intel_display_power_domain power_domain; 4600 enum intel_display_power_domain power_domain;
4526 4601
4527 lockdep_assert_held(&dev_priv->pps_mutex); 4602 lockdep_assert_held(&dev_priv->pps_mutex);
@@ -4544,13 +4619,15 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4544 4619
4545void intel_dp_encoder_reset(struct drm_encoder *encoder) 4620void intel_dp_encoder_reset(struct drm_encoder *encoder)
4546{ 4621{
4547 struct intel_dp *intel_dp; 4622 struct drm_i915_private *dev_priv = to_i915(encoder->dev);
4623 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
4624
4625 if (!HAS_DDI(dev_priv))
4626 intel_dp->DP = I915_READ(intel_dp->output_reg);
4548 4627
4549 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP) 4628 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4550 return; 4629 return;
4551 4630
4552 intel_dp = enc_to_intel_dp(encoder);
4553
4554 pps_lock(intel_dp); 4631 pps_lock(intel_dp);
4555 4632
4556 /* 4633 /*
@@ -4572,6 +4649,7 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = {
4572 .fill_modes = drm_helper_probe_single_connector_modes, 4649 .fill_modes = drm_helper_probe_single_connector_modes,
4573 .set_property = intel_dp_set_property, 4650 .set_property = intel_dp_set_property,
4574 .atomic_get_property = intel_connector_atomic_get_property, 4651 .atomic_get_property = intel_connector_atomic_get_property,
4652 .late_register = intel_dp_connector_register,
4575 .early_unregister = intel_dp_connector_unregister, 4653 .early_unregister = intel_dp_connector_unregister,
4576 .destroy = intel_dp_connector_destroy, 4654 .destroy = intel_dp_connector_destroy,
4577 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 4655 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
@@ -4594,13 +4672,13 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4594 struct intel_dp *intel_dp = &intel_dig_port->dp; 4672 struct intel_dp *intel_dp = &intel_dig_port->dp;
4595 struct intel_encoder *intel_encoder = &intel_dig_port->base; 4673 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4596 struct drm_device *dev = intel_dig_port->base.base.dev; 4674 struct drm_device *dev = intel_dig_port->base.base.dev;
4597 struct drm_i915_private *dev_priv = dev->dev_private; 4675 struct drm_i915_private *dev_priv = to_i915(dev);
4598 enum intel_display_power_domain power_domain; 4676 enum intel_display_power_domain power_domain;
4599 enum irqreturn ret = IRQ_NONE; 4677 enum irqreturn ret = IRQ_NONE;
4600 4678
4601 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP && 4679 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP &&
4602 intel_dig_port->base.type != INTEL_OUTPUT_HDMI) 4680 intel_dig_port->base.type != INTEL_OUTPUT_HDMI)
4603 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT; 4681 intel_dig_port->base.type = INTEL_OUTPUT_DP;
4604 4682
4605 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) { 4683 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4606 /* 4684 /*
@@ -4662,7 +4740,7 @@ put_power:
4662/* check the VBT to see whether the eDP is on another port */ 4740/* check the VBT to see whether the eDP is on another port */
4663bool intel_dp_is_edp(struct drm_device *dev, enum port port) 4741bool intel_dp_is_edp(struct drm_device *dev, enum port port)
4664{ 4742{
4665 struct drm_i915_private *dev_priv = dev->dev_private; 4743 struct drm_i915_private *dev_priv = to_i915(dev);
4666 4744
4667 /* 4745 /*
4668 * eDP not supported on g4x. so bail out early just 4746 * eDP not supported on g4x. so bail out early just
@@ -4704,82 +4782,93 @@ static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
4704} 4782}
4705 4783
4706static void 4784static void
4707intel_dp_init_panel_power_sequencer(struct drm_device *dev, 4785intel_pps_readout_hw_state(struct drm_i915_private *dev_priv,
4708 struct intel_dp *intel_dp) 4786 struct intel_dp *intel_dp, struct edp_power_seq *seq)
4709{ 4787{
4710 struct drm_i915_private *dev_priv = dev->dev_private;
4711 struct edp_power_seq cur, vbt, spec,
4712 *final = &intel_dp->pps_delays;
4713 u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0; 4788 u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
4714 i915_reg_t pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg; 4789 struct pps_registers regs;
4715 4790
4716 lockdep_assert_held(&dev_priv->pps_mutex); 4791 intel_pps_get_registers(dev_priv, intel_dp, &regs);
4717
4718 /* already initialized? */
4719 if (final->t11_t12 != 0)
4720 return;
4721
4722 if (IS_BROXTON(dev)) {
4723 /*
4724 * TODO: BXT has 2 sets of PPS registers.
4725 * Correct Register for Broxton need to be identified
4726 * using VBT. hardcoding for now
4727 */
4728 pp_ctrl_reg = BXT_PP_CONTROL(0);
4729 pp_on_reg = BXT_PP_ON_DELAYS(0);
4730 pp_off_reg = BXT_PP_OFF_DELAYS(0);
4731 } else if (HAS_PCH_SPLIT(dev)) {
4732 pp_ctrl_reg = PCH_PP_CONTROL;
4733 pp_on_reg = PCH_PP_ON_DELAYS;
4734 pp_off_reg = PCH_PP_OFF_DELAYS;
4735 pp_div_reg = PCH_PP_DIVISOR;
4736 } else {
4737 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4738
4739 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
4740 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4741 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4742 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
4743 }
4744 4792
4745 /* Workaround: Need to write PP_CONTROL with the unlock key as 4793 /* Workaround: Need to write PP_CONTROL with the unlock key as
4746 * the very first thing. */ 4794 * the very first thing. */
4747 pp_ctl = ironlake_get_pp_control(intel_dp); 4795 pp_ctl = ironlake_get_pp_control(intel_dp);
4748 4796
4749 pp_on = I915_READ(pp_on_reg); 4797 pp_on = I915_READ(regs.pp_on);
4750 pp_off = I915_READ(pp_off_reg); 4798 pp_off = I915_READ(regs.pp_off);
4751 if (!IS_BROXTON(dev)) { 4799 if (!IS_BROXTON(dev_priv)) {
4752 I915_WRITE(pp_ctrl_reg, pp_ctl); 4800 I915_WRITE(regs.pp_ctrl, pp_ctl);
4753 pp_div = I915_READ(pp_div_reg); 4801 pp_div = I915_READ(regs.pp_div);
4754 } 4802 }
4755 4803
4756 /* Pull timing values out of registers */ 4804 /* Pull timing values out of registers */
4757 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >> 4805 seq->t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
4758 PANEL_POWER_UP_DELAY_SHIFT; 4806 PANEL_POWER_UP_DELAY_SHIFT;
4759 4807
4760 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >> 4808 seq->t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
4761 PANEL_LIGHT_ON_DELAY_SHIFT; 4809 PANEL_LIGHT_ON_DELAY_SHIFT;
4762 4810
4763 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >> 4811 seq->t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
4764 PANEL_LIGHT_OFF_DELAY_SHIFT; 4812 PANEL_LIGHT_OFF_DELAY_SHIFT;
4765 4813
4766 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >> 4814 seq->t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
4767 PANEL_POWER_DOWN_DELAY_SHIFT; 4815 PANEL_POWER_DOWN_DELAY_SHIFT;
4768 4816
4769 if (IS_BROXTON(dev)) { 4817 if (IS_BROXTON(dev_priv)) {
4770 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >> 4818 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
4771 BXT_POWER_CYCLE_DELAY_SHIFT; 4819 BXT_POWER_CYCLE_DELAY_SHIFT;
4772 if (tmp > 0) 4820 if (tmp > 0)
4773 cur.t11_t12 = (tmp - 1) * 1000; 4821 seq->t11_t12 = (tmp - 1) * 1000;
4774 else 4822 else
4775 cur.t11_t12 = 0; 4823 seq->t11_t12 = 0;
4776 } else { 4824 } else {
4777 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >> 4825 seq->t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
4778 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000; 4826 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
4779 } 4827 }
4828}
4829
4830static void
4831intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq)
4832{
4833 DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4834 state_name,
4835 seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
4836}
4837
4838static void
4839intel_pps_verify_state(struct drm_i915_private *dev_priv,
4840 struct intel_dp *intel_dp)
4841{
4842 struct edp_power_seq hw;
4843 struct edp_power_seq *sw = &intel_dp->pps_delays;
4844
4845 intel_pps_readout_hw_state(dev_priv, intel_dp, &hw);
4846
4847 if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
4848 hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
4849 DRM_ERROR("PPS state mismatch\n");
4850 intel_pps_dump_state("sw", sw);
4851 intel_pps_dump_state("hw", &hw);
4852 }
4853}
4854
4855static void
4856intel_dp_init_panel_power_sequencer(struct drm_device *dev,
4857 struct intel_dp *intel_dp)
4858{
4859 struct drm_i915_private *dev_priv = to_i915(dev);
4860 struct edp_power_seq cur, vbt, spec,
4861 *final = &intel_dp->pps_delays;
4862
4863 lockdep_assert_held(&dev_priv->pps_mutex);
4780 4864
4781 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", 4865 /* already initialized? */
4782 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12); 4866 if (final->t11_t12 != 0)
4867 return;
4868
4869 intel_pps_readout_hw_state(dev_priv, intel_dp, &cur);
4870
4871 intel_pps_dump_state("cur", &cur);
4783 4872
4784 vbt = dev_priv->vbt.edp.pps; 4873 vbt = dev_priv->vbt.edp.pps;
4785 4874
@@ -4795,8 +4884,7 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
4795 * too. */ 4884 * too. */
4796 spec.t11_t12 = (510 + 100) * 10; 4885 spec.t11_t12 = (510 + 100) * 10;
4797 4886
4798 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", 4887 intel_pps_dump_state("vbt", &vbt);
4799 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
4800 4888
4801 /* Use the max of the register settings and vbt. If both are 4889 /* Use the max of the register settings and vbt. If both are
4802 * unset, fall back to the spec limits. */ 4890 * unset, fall back to the spec limits. */
@@ -4824,59 +4912,41 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
4824 4912
4825 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", 4913 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
4826 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); 4914 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
4915
4916 /*
4917 * We override the HW backlight delays to 1 because we do manual waits
4918 * on them. For T8, even BSpec recommends doing it. For T9, if we
4919 * don't do this, we'll end up waiting for the backlight off delay
4920 * twice: once when we do the manual sleep, and once when we disable
4921 * the panel and wait for the PP_STATUS bit to become zero.
4922 */
4923 final->t8 = 1;
4924 final->t9 = 1;
4827} 4925}
4828 4926
4829static void 4927static void
4830intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, 4928intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
4831 struct intel_dp *intel_dp) 4929 struct intel_dp *intel_dp)
4832{ 4930{
4833 struct drm_i915_private *dev_priv = dev->dev_private; 4931 struct drm_i915_private *dev_priv = to_i915(dev);
4834 u32 pp_on, pp_off, pp_div, port_sel = 0; 4932 u32 pp_on, pp_off, pp_div, port_sel = 0;
4835 int div = dev_priv->rawclk_freq / 1000; 4933 int div = dev_priv->rawclk_freq / 1000;
4836 i915_reg_t pp_on_reg, pp_off_reg, pp_div_reg, pp_ctrl_reg; 4934 struct pps_registers regs;
4837 enum port port = dp_to_dig_port(intel_dp)->port; 4935 enum port port = dp_to_dig_port(intel_dp)->port;
4838 const struct edp_power_seq *seq = &intel_dp->pps_delays; 4936 const struct edp_power_seq *seq = &intel_dp->pps_delays;
4839 4937
4840 lockdep_assert_held(&dev_priv->pps_mutex); 4938 lockdep_assert_held(&dev_priv->pps_mutex);
4841 4939
4842 if (IS_BROXTON(dev)) { 4940 intel_pps_get_registers(dev_priv, intel_dp, &regs);
4843 /*
4844 * TODO: BXT has 2 sets of PPS registers.
4845 * Correct Register for Broxton need to be identified
4846 * using VBT. hardcoding for now
4847 */
4848 pp_ctrl_reg = BXT_PP_CONTROL(0);
4849 pp_on_reg = BXT_PP_ON_DELAYS(0);
4850 pp_off_reg = BXT_PP_OFF_DELAYS(0);
4851
4852 } else if (HAS_PCH_SPLIT(dev)) {
4853 pp_on_reg = PCH_PP_ON_DELAYS;
4854 pp_off_reg = PCH_PP_OFF_DELAYS;
4855 pp_div_reg = PCH_PP_DIVISOR;
4856 } else {
4857 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4858 4941
4859 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4860 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4861 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
4862 }
4863
4864 /*
4865 * And finally store the new values in the power sequencer. The
4866 * backlight delays are set to 1 because we do manual waits on them. For
4867 * T8, even BSpec recommends doing it. For T9, if we don't do this,
4868 * we'll end up waiting for the backlight off delay twice: once when we
4869 * do the manual sleep, and once when we disable the panel and wait for
4870 * the PP_STATUS bit to become zero.
4871 */
4872 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) | 4942 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
4873 (1 << PANEL_LIGHT_ON_DELAY_SHIFT); 4943 (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
4874 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) | 4944 pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
4875 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT); 4945 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
4876 /* Compute the divisor for the pp clock, simply match the Bspec 4946 /* Compute the divisor for the pp clock, simply match the Bspec
4877 * formula. */ 4947 * formula. */
4878 if (IS_BROXTON(dev)) { 4948 if (IS_BROXTON(dev)) {
4879 pp_div = I915_READ(pp_ctrl_reg); 4949 pp_div = I915_READ(regs.pp_ctrl);
4880 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK; 4950 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
4881 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000) 4951 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
4882 << BXT_POWER_CYCLE_DELAY_SHIFT); 4952 << BXT_POWER_CYCLE_DELAY_SHIFT);
@@ -4899,19 +4969,19 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
4899 4969
4900 pp_on |= port_sel; 4970 pp_on |= port_sel;
4901 4971
4902 I915_WRITE(pp_on_reg, pp_on); 4972 I915_WRITE(regs.pp_on, pp_on);
4903 I915_WRITE(pp_off_reg, pp_off); 4973 I915_WRITE(regs.pp_off, pp_off);
4904 if (IS_BROXTON(dev)) 4974 if (IS_BROXTON(dev))
4905 I915_WRITE(pp_ctrl_reg, pp_div); 4975 I915_WRITE(regs.pp_ctrl, pp_div);
4906 else 4976 else
4907 I915_WRITE(pp_div_reg, pp_div); 4977 I915_WRITE(regs.pp_div, pp_div);
4908 4978
4909 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", 4979 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
4910 I915_READ(pp_on_reg), 4980 I915_READ(regs.pp_on),
4911 I915_READ(pp_off_reg), 4981 I915_READ(regs.pp_off),
4912 IS_BROXTON(dev) ? 4982 IS_BROXTON(dev) ?
4913 (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) : 4983 (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK) :
4914 I915_READ(pp_div_reg)); 4984 I915_READ(regs.pp_div));
4915} 4985}
4916 4986
4917/** 4987/**
@@ -4928,7 +4998,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
4928 */ 4998 */
4929static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate) 4999static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
4930{ 5000{
4931 struct drm_i915_private *dev_priv = dev->dev_private; 5001 struct drm_i915_private *dev_priv = to_i915(dev);
4932 struct intel_encoder *encoder; 5002 struct intel_encoder *encoder;
4933 struct intel_digital_port *dig_port = NULL; 5003 struct intel_digital_port *dig_port = NULL;
4934 struct intel_dp *intel_dp = dev_priv->drrs.dp; 5004 struct intel_dp *intel_dp = dev_priv->drrs.dp;
@@ -5027,7 +5097,7 @@ static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
5027void intel_edp_drrs_enable(struct intel_dp *intel_dp) 5097void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5028{ 5098{
5029 struct drm_device *dev = intel_dp_to_dev(intel_dp); 5099 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5030 struct drm_i915_private *dev_priv = dev->dev_private; 5100 struct drm_i915_private *dev_priv = to_i915(dev);
5031 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 5101 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5032 struct drm_crtc *crtc = dig_port->base.base.crtc; 5102 struct drm_crtc *crtc = dig_port->base.base.crtc;
5033 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5103 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -5059,7 +5129,7 @@ unlock:
5059void intel_edp_drrs_disable(struct intel_dp *intel_dp) 5129void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5060{ 5130{
5061 struct drm_device *dev = intel_dp_to_dev(intel_dp); 5131 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5062 struct drm_i915_private *dev_priv = dev->dev_private; 5132 struct drm_i915_private *dev_priv = to_i915(dev);
5063 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 5133 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5064 struct drm_crtc *crtc = dig_port->base.base.crtc; 5134 struct drm_crtc *crtc = dig_port->base.base.crtc;
5065 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5135 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -5074,9 +5144,9 @@ void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5074 } 5144 }
5075 5145
5076 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) 5146 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5077 intel_dp_set_drrs_state(dev_priv->dev, 5147 intel_dp_set_drrs_state(&dev_priv->drm,
5078 intel_dp->attached_connector->panel. 5148 intel_dp->attached_connector->panel.
5079 fixed_mode->vrefresh); 5149 fixed_mode->vrefresh);
5080 5150
5081 dev_priv->drrs.dp = NULL; 5151 dev_priv->drrs.dp = NULL;
5082 mutex_unlock(&dev_priv->drrs.mutex); 5152 mutex_unlock(&dev_priv->drrs.mutex);
@@ -5106,9 +5176,9 @@ static void intel_edp_drrs_downclock_work(struct work_struct *work)
5106 goto unlock; 5176 goto unlock;
5107 5177
5108 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) 5178 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5109 intel_dp_set_drrs_state(dev_priv->dev, 5179 intel_dp_set_drrs_state(&dev_priv->drm,
5110 intel_dp->attached_connector->panel. 5180 intel_dp->attached_connector->panel.
5111 downclock_mode->vrefresh); 5181 downclock_mode->vrefresh);
5112 5182
5113unlock: 5183unlock:
5114 mutex_unlock(&dev_priv->drrs.mutex); 5184 mutex_unlock(&dev_priv->drrs.mutex);
@@ -5127,7 +5197,7 @@ unlock:
5127void intel_edp_drrs_invalidate(struct drm_device *dev, 5197void intel_edp_drrs_invalidate(struct drm_device *dev,
5128 unsigned frontbuffer_bits) 5198 unsigned frontbuffer_bits)
5129{ 5199{
5130 struct drm_i915_private *dev_priv = dev->dev_private; 5200 struct drm_i915_private *dev_priv = to_i915(dev);
5131 struct drm_crtc *crtc; 5201 struct drm_crtc *crtc;
5132 enum pipe pipe; 5202 enum pipe pipe;
5133 5203
@@ -5150,9 +5220,9 @@ void intel_edp_drrs_invalidate(struct drm_device *dev,
5150 5220
5151 /* invalidate means busy screen hence upclock */ 5221 /* invalidate means busy screen hence upclock */
5152 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) 5222 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5153 intel_dp_set_drrs_state(dev_priv->dev, 5223 intel_dp_set_drrs_state(&dev_priv->drm,
5154 dev_priv->drrs.dp->attached_connector->panel. 5224 dev_priv->drrs.dp->attached_connector->panel.
5155 fixed_mode->vrefresh); 5225 fixed_mode->vrefresh);
5156 5226
5157 mutex_unlock(&dev_priv->drrs.mutex); 5227 mutex_unlock(&dev_priv->drrs.mutex);
5158} 5228}
@@ -5172,7 +5242,7 @@ void intel_edp_drrs_invalidate(struct drm_device *dev,
5172void intel_edp_drrs_flush(struct drm_device *dev, 5242void intel_edp_drrs_flush(struct drm_device *dev,
5173 unsigned frontbuffer_bits) 5243 unsigned frontbuffer_bits)
5174{ 5244{
5175 struct drm_i915_private *dev_priv = dev->dev_private; 5245 struct drm_i915_private *dev_priv = to_i915(dev);
5176 struct drm_crtc *crtc; 5246 struct drm_crtc *crtc;
5177 enum pipe pipe; 5247 enum pipe pipe;
5178 5248
@@ -5195,9 +5265,9 @@ void intel_edp_drrs_flush(struct drm_device *dev,
5195 5265
5196 /* flush means busy screen hence upclock */ 5266 /* flush means busy screen hence upclock */
5197 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) 5267 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5198 intel_dp_set_drrs_state(dev_priv->dev, 5268 intel_dp_set_drrs_state(&dev_priv->drm,
5199 dev_priv->drrs.dp->attached_connector->panel. 5269 dev_priv->drrs.dp->attached_connector->panel.
5200 fixed_mode->vrefresh); 5270 fixed_mode->vrefresh);
5201 5271
5202 /* 5272 /*
5203 * flush also means no more activity hence schedule downclock, if all 5273 * flush also means no more activity hence schedule downclock, if all
@@ -5265,7 +5335,7 @@ intel_dp_drrs_init(struct intel_connector *intel_connector,
5265{ 5335{
5266 struct drm_connector *connector = &intel_connector->base; 5336 struct drm_connector *connector = &intel_connector->base;
5267 struct drm_device *dev = connector->dev; 5337 struct drm_device *dev = connector->dev;
5268 struct drm_i915_private *dev_priv = dev->dev_private; 5338 struct drm_i915_private *dev_priv = to_i915(dev);
5269 struct drm_display_mode *downclock_mode = NULL; 5339 struct drm_display_mode *downclock_mode = NULL;
5270 5340
5271 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work); 5341 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
@@ -5303,7 +5373,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5303 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 5373 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5304 struct intel_encoder *intel_encoder = &intel_dig_port->base; 5374 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5305 struct drm_device *dev = intel_encoder->base.dev; 5375 struct drm_device *dev = intel_encoder->base.dev;
5306 struct drm_i915_private *dev_priv = dev->dev_private; 5376 struct drm_i915_private *dev_priv = to_i915(dev);
5307 struct drm_display_mode *fixed_mode = NULL; 5377 struct drm_display_mode *fixed_mode = NULL;
5308 struct drm_display_mode *downclock_mode = NULL; 5378 struct drm_display_mode *downclock_mode = NULL;
5309 bool has_dpcd; 5379 bool has_dpcd;
@@ -5314,8 +5384,32 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5314 if (!is_edp(intel_dp)) 5384 if (!is_edp(intel_dp))
5315 return true; 5385 return true;
5316 5386
5387 /*
5388 * On IBX/CPT we may get here with LVDS already registered. Since the
5389 * driver uses the only internal power sequencer available for both
5390 * eDP and LVDS bail out early in this case to prevent interfering
5391 * with an already powered-on LVDS power sequencer.
5392 */
5393 if (intel_get_lvds_encoder(dev)) {
5394 WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
5395 DRM_INFO("LVDS was detected, not registering eDP\n");
5396
5397 return false;
5398 }
5399
5317 pps_lock(intel_dp); 5400 pps_lock(intel_dp);
5401
5402 intel_dp_init_panel_power_timestamps(intel_dp);
5403
5404 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
5405 vlv_initial_power_sequencer_setup(intel_dp);
5406 } else {
5407 intel_dp_init_panel_power_sequencer(dev, intel_dp);
5408 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5409 }
5410
5318 intel_edp_panel_vdd_sanitize(intel_dp); 5411 intel_edp_panel_vdd_sanitize(intel_dp);
5412
5319 pps_unlock(intel_dp); 5413 pps_unlock(intel_dp);
5320 5414
5321 /* Cache DPCD and EDID for edp. */ 5415 /* Cache DPCD and EDID for edp. */
@@ -5329,14 +5423,9 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5329 } else { 5423 } else {
5330 /* if this fails, presume the device is a ghost */ 5424 /* if this fails, presume the device is a ghost */
5331 DRM_INFO("failed to retrieve link info, disabling eDP\n"); 5425 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5332 return false; 5426 goto out_vdd_off;
5333 } 5427 }
5334 5428
5335 /* We now know it's not a ghost, init power sequence regs. */
5336 pps_lock(intel_dp);
5337 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5338 pps_unlock(intel_dp);
5339
5340 mutex_lock(&dev->mode_config.mutex); 5429 mutex_lock(&dev->mode_config.mutex);
5341 edid = drm_get_edid(connector, &intel_dp->aux.ddc); 5430 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5342 if (edid) { 5431 if (edid) {
@@ -5404,6 +5493,18 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5404 intel_panel_setup_backlight(connector, pipe); 5493 intel_panel_setup_backlight(connector, pipe);
5405 5494
5406 return true; 5495 return true;
5496
5497out_vdd_off:
5498 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5499 /*
5500 * vdd might still be enabled do to the delayed vdd off.
5501 * Make sure vdd is actually turned off here.
5502 */
5503 pps_lock(intel_dp);
5504 edp_panel_vdd_off_sync(intel_dp);
5505 pps_unlock(intel_dp);
5506
5507 return false;
5407} 5508}
5408 5509
5409bool 5510bool
@@ -5414,9 +5515,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5414 struct intel_dp *intel_dp = &intel_dig_port->dp; 5515 struct intel_dp *intel_dp = &intel_dig_port->dp;
5415 struct intel_encoder *intel_encoder = &intel_dig_port->base; 5516 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5416 struct drm_device *dev = intel_encoder->base.dev; 5517 struct drm_device *dev = intel_encoder->base.dev;
5417 struct drm_i915_private *dev_priv = dev->dev_private; 5518 struct drm_i915_private *dev_priv = to_i915(dev);
5418 enum port port = intel_dig_port->port; 5519 enum port port = intel_dig_port->port;
5419 int type, ret; 5520 int type;
5420 5521
5421 if (WARN(intel_dig_port->max_lanes < 1, 5522 if (WARN(intel_dig_port->max_lanes < 1,
5422 "Not enough lanes (%d) for DP on port %c\n", 5523 "Not enough lanes (%d) for DP on port %c\n",
@@ -5475,11 +5576,12 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5475 connector->interlace_allowed = true; 5576 connector->interlace_allowed = true;
5476 connector->doublescan_allowed = 0; 5577 connector->doublescan_allowed = 0;
5477 5578
5579 intel_dp_aux_init(intel_dp, intel_connector);
5580
5478 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, 5581 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
5479 edp_panel_vdd_work); 5582 edp_panel_vdd_work);
5480 5583
5481 intel_connector_attach_encoder(intel_connector, intel_encoder); 5584 intel_connector_attach_encoder(intel_connector, intel_encoder);
5482 drm_connector_register(connector);
5483 5585
5484 if (HAS_DDI(dev)) 5586 if (HAS_DDI(dev))
5485 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; 5587 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
@@ -5509,22 +5611,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5509 BUG(); 5611 BUG();
5510 } 5612 }
5511 5613
5512 if (is_edp(intel_dp)) {
5513 pps_lock(intel_dp);
5514 intel_dp_init_panel_power_timestamps(intel_dp);
5515 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5516 vlv_initial_power_sequencer_setup(intel_dp);
5517 else
5518 intel_dp_init_panel_power_sequencer(dev, intel_dp);
5519 pps_unlock(intel_dp);
5520 }
5521
5522 ret = intel_dp_aux_init(intel_dp, intel_connector);
5523 if (ret)
5524 goto fail;
5525
5526 /* init MST on ports that can support it */ 5614 /* init MST on ports that can support it */
5527 if (HAS_DP_MST(dev) && 5615 if (HAS_DP_MST(dev) && !is_edp(intel_dp) &&
5528 (port == PORT_B || port == PORT_C || port == PORT_D)) 5616 (port == PORT_B || port == PORT_C || port == PORT_D))
5529 intel_dp_mst_encoder_init(intel_dig_port, 5617 intel_dp_mst_encoder_init(intel_dig_port,
5530 intel_connector->base.base.id); 5618 intel_connector->base.base.id);
@@ -5546,22 +5634,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5546 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); 5634 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5547 } 5635 }
5548 5636
5549 i915_debugfs_connector_add(connector);
5550
5551 return true; 5637 return true;
5552 5638
5553fail: 5639fail:
5554 if (is_edp(intel_dp)) {
5555 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5556 /*
5557 * vdd might still be enabled do to the delayed vdd off.
5558 * Make sure vdd is actually turned off here.
5559 */
5560 pps_lock(intel_dp);
5561 edp_panel_vdd_off_sync(intel_dp);
5562 pps_unlock(intel_dp);
5563 }
5564 drm_connector_unregister(connector);
5565 drm_connector_cleanup(connector); 5640 drm_connector_cleanup(connector);
5566 5641
5567 return false; 5642 return false;
@@ -5571,7 +5646,7 @@ bool intel_dp_init(struct drm_device *dev,
5571 i915_reg_t output_reg, 5646 i915_reg_t output_reg,
5572 enum port port) 5647 enum port port)
5573{ 5648{
5574 struct drm_i915_private *dev_priv = dev->dev_private; 5649 struct drm_i915_private *dev_priv = to_i915(dev);
5575 struct intel_digital_port *intel_dig_port; 5650 struct intel_digital_port *intel_dig_port;
5576 struct intel_encoder *intel_encoder; 5651 struct intel_encoder *intel_encoder;
5577 struct drm_encoder *encoder; 5652 struct drm_encoder *encoder;
@@ -5619,7 +5694,7 @@ bool intel_dp_init(struct drm_device *dev,
5619 intel_dig_port->dp.output_reg = output_reg; 5694 intel_dig_port->dp.output_reg = output_reg;
5620 intel_dig_port->max_lanes = 4; 5695 intel_dig_port->max_lanes = 4;
5621 5696
5622 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; 5697 intel_encoder->type = INTEL_OUTPUT_DP;
5623 if (IS_CHERRYVIEW(dev)) { 5698 if (IS_CHERRYVIEW(dev)) {
5624 if (port == PORT_D) 5699 if (port == PORT_D)
5625 intel_encoder->crtc_mask = 1 << 2; 5700 intel_encoder->crtc_mask = 1 << 2;
@@ -5649,43 +5724,35 @@ err_connector_alloc:
5649 5724
5650void intel_dp_mst_suspend(struct drm_device *dev) 5725void intel_dp_mst_suspend(struct drm_device *dev)
5651{ 5726{
5652 struct drm_i915_private *dev_priv = dev->dev_private; 5727 struct drm_i915_private *dev_priv = to_i915(dev);
5653 int i; 5728 int i;
5654 5729
5655 /* disable MST */ 5730 /* disable MST */
5656 for (i = 0; i < I915_MAX_PORTS; i++) { 5731 for (i = 0; i < I915_MAX_PORTS; i++) {
5657 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i]; 5732 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
5658 if (!intel_dig_port) 5733
5734 if (!intel_dig_port || !intel_dig_port->dp.can_mst)
5659 continue; 5735 continue;
5660 5736
5661 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) { 5737 if (intel_dig_port->dp.is_mst)
5662 if (!intel_dig_port->dp.can_mst) 5738 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
5663 continue;
5664 if (intel_dig_port->dp.is_mst)
5665 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
5666 }
5667 } 5739 }
5668} 5740}
5669 5741
5670void intel_dp_mst_resume(struct drm_device *dev) 5742void intel_dp_mst_resume(struct drm_device *dev)
5671{ 5743{
5672 struct drm_i915_private *dev_priv = dev->dev_private; 5744 struct drm_i915_private *dev_priv = to_i915(dev);
5673 int i; 5745 int i;
5674 5746
5675 for (i = 0; i < I915_MAX_PORTS; i++) { 5747 for (i = 0; i < I915_MAX_PORTS; i++) {
5676 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i]; 5748 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
5677 if (!intel_dig_port) 5749 int ret;
5678 continue;
5679 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5680 int ret;
5681 5750
5682 if (!intel_dig_port->dp.can_mst) 5751 if (!intel_dig_port || !intel_dig_port->dp.can_mst)
5683 continue; 5752 continue;
5684 5753
5685 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr); 5754 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
5686 if (ret != 0) { 5755 if (ret)
5687 intel_dp_check_mst_status(&intel_dig_port->dp); 5756 intel_dp_check_mst_status(&intel_dig_port->dp);
5688 }
5689 }
5690 } 5757 }
5691} 5758}
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 9646816604be..68a005d729e9 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -47,7 +47,6 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
47 47
48 pipe_config->dp_encoder_is_mst = true; 48 pipe_config->dp_encoder_is_mst = true;
49 pipe_config->has_pch_encoder = false; 49 pipe_config->has_pch_encoder = false;
50 pipe_config->has_dp_encoder = true;
51 bpp = 24; 50 bpp = 24;
52 /* 51 /*
53 * for MST we always configure max link bw - the spec doesn't 52 * for MST we always configure max link bw - the spec doesn't
@@ -140,7 +139,7 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
140 struct intel_digital_port *intel_dig_port = intel_mst->primary; 139 struct intel_digital_port *intel_dig_port = intel_mst->primary;
141 struct intel_dp *intel_dp = &intel_dig_port->dp; 140 struct intel_dp *intel_dp = &intel_dig_port->dp;
142 struct drm_device *dev = encoder->base.dev; 141 struct drm_device *dev = encoder->base.dev;
143 struct drm_i915_private *dev_priv = dev->dev_private; 142 struct drm_i915_private *dev_priv = to_i915(dev);
144 enum port port = intel_dig_port->port; 143 enum port port = intel_dig_port->port;
145 int ret; 144 int ret;
146 uint32_t temp; 145 uint32_t temp;
@@ -207,14 +206,17 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder)
207 struct intel_digital_port *intel_dig_port = intel_mst->primary; 206 struct intel_digital_port *intel_dig_port = intel_mst->primary;
208 struct intel_dp *intel_dp = &intel_dig_port->dp; 207 struct intel_dp *intel_dp = &intel_dig_port->dp;
209 struct drm_device *dev = intel_dig_port->base.base.dev; 208 struct drm_device *dev = intel_dig_port->base.base.dev;
210 struct drm_i915_private *dev_priv = dev->dev_private; 209 struct drm_i915_private *dev_priv = to_i915(dev);
211 enum port port = intel_dig_port->port; 210 enum port port = intel_dig_port->port;
212 int ret; 211 int ret;
213 212
214 DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links); 213 DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
215 214
216 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_ACT_SENT), 215 if (intel_wait_for_register(dev_priv,
217 1)) 216 DP_TP_STATUS(port),
217 DP_TP_STATUS_ACT_SENT,
218 DP_TP_STATUS_ACT_SENT,
219 1))
218 DRM_ERROR("Timed out waiting for ACT sent\n"); 220 DRM_ERROR("Timed out waiting for ACT sent\n");
219 221
220 ret = drm_dp_check_act_status(&intel_dp->mst_mgr); 222 ret = drm_dp_check_act_status(&intel_dp->mst_mgr);
@@ -239,12 +241,10 @@ static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
239 struct intel_digital_port *intel_dig_port = intel_mst->primary; 241 struct intel_digital_port *intel_dig_port = intel_mst->primary;
240 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 242 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
241 struct drm_device *dev = encoder->base.dev; 243 struct drm_device *dev = encoder->base.dev;
242 struct drm_i915_private *dev_priv = dev->dev_private; 244 struct drm_i915_private *dev_priv = to_i915(dev);
243 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; 245 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
244 u32 temp, flags = 0; 246 u32 temp, flags = 0;
245 247
246 pipe_config->has_dp_encoder = true;
247
248 temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); 248 temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
249 if (temp & TRANS_DDI_PHSYNC) 249 if (temp & TRANS_DDI_PHSYNC)
250 flags |= DRM_MODE_FLAG_PHSYNC; 250 flags |= DRM_MODE_FLAG_PHSYNC;
@@ -336,6 +336,7 @@ static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
336 .fill_modes = drm_helper_probe_single_connector_modes, 336 .fill_modes = drm_helper_probe_single_connector_modes,
337 .set_property = intel_dp_mst_set_property, 337 .set_property = intel_dp_mst_set_property,
338 .atomic_get_property = intel_connector_atomic_get_property, 338 .atomic_get_property = intel_connector_atomic_get_property,
339 .late_register = intel_connector_register,
339 .early_unregister = intel_connector_unregister, 340 .early_unregister = intel_connector_unregister,
340 .destroy = intel_dp_mst_connector_destroy, 341 .destroy = intel_dp_mst_connector_destroy,
341 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 342 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
@@ -477,9 +478,11 @@ static void intel_dp_register_mst_connector(struct drm_connector *connector)
477{ 478{
478 struct intel_connector *intel_connector = to_intel_connector(connector); 479 struct intel_connector *intel_connector = to_intel_connector(connector);
479 struct drm_device *dev = connector->dev; 480 struct drm_device *dev = connector->dev;
481
480 drm_modeset_lock_all(dev); 482 drm_modeset_lock_all(dev);
481 intel_connector_add_to_fbdev(intel_connector); 483 intel_connector_add_to_fbdev(intel_connector);
482 drm_modeset_unlock_all(dev); 484 drm_modeset_unlock_all(dev);
485
483 drm_connector_register(&intel_connector->base); 486 drm_connector_register(&intel_connector->base);
484} 487}
485 488
diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/intel_dpio_phy.c
index 288da35572b4..047f48748944 100644
--- a/drivers/gpu/drm/i915/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/intel_dpio_phy.c
@@ -168,7 +168,7 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder)
168{ 168{
169 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); 169 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
170 struct drm_device *dev = encoder->base.dev; 170 struct drm_device *dev = encoder->base.dev;
171 struct drm_i915_private *dev_priv = dev->dev_private; 171 struct drm_i915_private *dev_priv = to_i915(dev);
172 struct intel_crtc *intel_crtc = 172 struct intel_crtc *intel_crtc =
173 to_intel_crtc(encoder->base.crtc); 173 to_intel_crtc(encoder->base.crtc);
174 enum dpio_channel ch = vlv_dport_to_channel(dport); 174 enum dpio_channel ch = vlv_dport_to_channel(dport);
@@ -250,7 +250,7 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder)
250 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 250 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
251 struct intel_digital_port *dport = dp_to_dig_port(intel_dp); 251 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
252 struct drm_device *dev = encoder->base.dev; 252 struct drm_device *dev = encoder->base.dev;
253 struct drm_i915_private *dev_priv = dev->dev_private; 253 struct drm_i915_private *dev_priv = to_i915(dev);
254 struct intel_crtc *intel_crtc = 254 struct intel_crtc *intel_crtc =
255 to_intel_crtc(encoder->base.crtc); 255 to_intel_crtc(encoder->base.crtc);
256 enum dpio_channel ch = vlv_dport_to_channel(dport); 256 enum dpio_channel ch = vlv_dport_to_channel(dport);
@@ -400,7 +400,7 @@ void vlv_phy_pre_pll_enable(struct intel_encoder *encoder)
400{ 400{
401 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); 401 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
402 struct drm_device *dev = encoder->base.dev; 402 struct drm_device *dev = encoder->base.dev;
403 struct drm_i915_private *dev_priv = dev->dev_private; 403 struct drm_i915_private *dev_priv = to_i915(dev);
404 struct intel_crtc *intel_crtc = 404 struct intel_crtc *intel_crtc =
405 to_intel_crtc(encoder->base.crtc); 405 to_intel_crtc(encoder->base.crtc);
406 enum dpio_channel port = vlv_dport_to_channel(dport); 406 enum dpio_channel port = vlv_dport_to_channel(dport);
@@ -429,7 +429,7 @@ void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder)
429 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 429 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
430 struct intel_digital_port *dport = dp_to_dig_port(intel_dp); 430 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
431 struct drm_device *dev = encoder->base.dev; 431 struct drm_device *dev = encoder->base.dev;
432 struct drm_i915_private *dev_priv = dev->dev_private; 432 struct drm_i915_private *dev_priv = to_i915(dev);
433 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); 433 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
434 enum dpio_channel port = vlv_dport_to_channel(dport); 434 enum dpio_channel port = vlv_dport_to_channel(dport);
435 int pipe = intel_crtc->pipe; 435 int pipe = intel_crtc->pipe;
@@ -457,7 +457,7 @@ void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder)
457void vlv_phy_reset_lanes(struct intel_encoder *encoder) 457void vlv_phy_reset_lanes(struct intel_encoder *encoder)
458{ 458{
459 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); 459 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
460 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 460 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
461 struct intel_crtc *intel_crtc = 461 struct intel_crtc *intel_crtc =
462 to_intel_crtc(encoder->base.crtc); 462 to_intel_crtc(encoder->base.crtc);
463 enum dpio_channel port = vlv_dport_to_channel(dport); 463 enum dpio_channel port = vlv_dport_to_channel(dport);
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index c0eff1571731..5c1f2d235ffa 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -83,7 +83,7 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
83void intel_prepare_shared_dpll(struct intel_crtc *crtc) 83void intel_prepare_shared_dpll(struct intel_crtc *crtc)
84{ 84{
85 struct drm_device *dev = crtc->base.dev; 85 struct drm_device *dev = crtc->base.dev;
86 struct drm_i915_private *dev_priv = dev->dev_private; 86 struct drm_i915_private *dev_priv = to_i915(dev);
87 struct intel_shared_dpll *pll = crtc->config->shared_dpll; 87 struct intel_shared_dpll *pll = crtc->config->shared_dpll;
88 88
89 if (WARN_ON(pll == NULL)) 89 if (WARN_ON(pll == NULL))
@@ -112,7 +112,7 @@ void intel_prepare_shared_dpll(struct intel_crtc *crtc)
112void intel_enable_shared_dpll(struct intel_crtc *crtc) 112void intel_enable_shared_dpll(struct intel_crtc *crtc)
113{ 113{
114 struct drm_device *dev = crtc->base.dev; 114 struct drm_device *dev = crtc->base.dev;
115 struct drm_i915_private *dev_priv = dev->dev_private; 115 struct drm_i915_private *dev_priv = to_i915(dev);
116 struct intel_shared_dpll *pll = crtc->config->shared_dpll; 116 struct intel_shared_dpll *pll = crtc->config->shared_dpll;
117 unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base); 117 unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base);
118 unsigned old_mask; 118 unsigned old_mask;
@@ -151,7 +151,7 @@ out:
151void intel_disable_shared_dpll(struct intel_crtc *crtc) 151void intel_disable_shared_dpll(struct intel_crtc *crtc)
152{ 152{
153 struct drm_device *dev = crtc->base.dev; 153 struct drm_device *dev = crtc->base.dev;
154 struct drm_i915_private *dev_priv = dev->dev_private; 154 struct drm_i915_private *dev_priv = to_i915(dev);
155 struct intel_shared_dpll *pll = crtc->config->shared_dpll; 155 struct intel_shared_dpll *pll = crtc->config->shared_dpll;
156 unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base); 156 unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base);
157 157
@@ -191,7 +191,7 @@ intel_find_shared_dpll(struct intel_crtc *crtc,
191 enum intel_dpll_id range_min, 191 enum intel_dpll_id range_min,
192 enum intel_dpll_id range_max) 192 enum intel_dpll_id range_max)
193{ 193{
194 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 194 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
195 struct intel_shared_dpll *pll; 195 struct intel_shared_dpll *pll;
196 struct intel_shared_dpll_config *shared_dpll; 196 struct intel_shared_dpll_config *shared_dpll;
197 enum intel_dpll_id i; 197 enum intel_dpll_id i;
@@ -331,7 +331,7 @@ static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
331static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv, 331static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
332 struct intel_shared_dpll *pll) 332 struct intel_shared_dpll *pll)
333{ 333{
334 struct drm_device *dev = dev_priv->dev; 334 struct drm_device *dev = &dev_priv->drm;
335 struct intel_crtc *crtc; 335 struct intel_crtc *crtc;
336 336
337 /* Make sure no transcoder isn't still depending on us. */ 337 /* Make sure no transcoder isn't still depending on us. */
@@ -713,7 +713,7 @@ hsw_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
713 pll = intel_find_shared_dpll(crtc, crtc_state, 713 pll = intel_find_shared_dpll(crtc, crtc_state,
714 DPLL_ID_WRPLL1, DPLL_ID_WRPLL2); 714 DPLL_ID_WRPLL1, DPLL_ID_WRPLL2);
715 715
716 } else if (encoder->type == INTEL_OUTPUT_DISPLAYPORT || 716 } else if (encoder->type == INTEL_OUTPUT_DP ||
717 encoder->type == INTEL_OUTPUT_DP_MST || 717 encoder->type == INTEL_OUTPUT_DP_MST ||
718 encoder->type == INTEL_OUTPUT_EDP) { 718 encoder->type == INTEL_OUTPUT_EDP) {
719 enum intel_dpll_id pll_id; 719 enum intel_dpll_id pll_id;
@@ -856,7 +856,11 @@ static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
856 I915_WRITE(regs[pll->id].ctl, 856 I915_WRITE(regs[pll->id].ctl,
857 I915_READ(regs[pll->id].ctl) | LCPLL_PLL_ENABLE); 857 I915_READ(regs[pll->id].ctl) | LCPLL_PLL_ENABLE);
858 858
859 if (wait_for(I915_READ(DPLL_STATUS) & DPLL_LOCK(pll->id), 5)) 859 if (intel_wait_for_register(dev_priv,
860 DPLL_STATUS,
861 DPLL_LOCK(pll->id),
862 DPLL_LOCK(pll->id),
863 5))
860 DRM_ERROR("DPLL %d not locked\n", pll->id); 864 DRM_ERROR("DPLL %d not locked\n", pll->id);
861} 865}
862 866
@@ -1222,7 +1226,7 @@ skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
1222 DPLL_CFGCR2_KDIV(wrpll_params.kdiv) | 1226 DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1223 DPLL_CFGCR2_PDIV(wrpll_params.pdiv) | 1227 DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1224 wrpll_params.central_freq; 1228 wrpll_params.central_freq;
1225 } else if (encoder->type == INTEL_OUTPUT_DISPLAYPORT || 1229 } else if (encoder->type == INTEL_OUTPUT_DP ||
1226 encoder->type == INTEL_OUTPUT_DP_MST || 1230 encoder->type == INTEL_OUTPUT_DP_MST ||
1227 encoder->type == INTEL_OUTPUT_EDP) { 1231 encoder->type == INTEL_OUTPUT_EDP) {
1228 switch (crtc_state->port_clock / 2) { 1232 switch (crtc_state->port_clock / 2) {
@@ -1374,8 +1378,8 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
1374 I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp); 1378 I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1375 POSTING_READ(BXT_PORT_PLL_ENABLE(port)); 1379 POSTING_READ(BXT_PORT_PLL_ENABLE(port));
1376 1380
1377 if (wait_for_atomic_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) & 1381 if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
1378 PORT_PLL_LOCK), 200)) 1382 200))
1379 DRM_ERROR("PLL %d not locked\n", port); 1383 DRM_ERROR("PLL %d not locked\n", port);
1380 1384
1381 /* 1385 /*
@@ -1530,7 +1534,7 @@ bxt_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
1530 clk_div.m2_frac_en = clk_div.m2_frac != 0; 1534 clk_div.m2_frac_en = clk_div.m2_frac != 0;
1531 1535
1532 vco = best_clock.vco; 1536 vco = best_clock.vco;
1533 } else if (encoder->type == INTEL_OUTPUT_DISPLAYPORT || 1537 } else if (encoder->type == INTEL_OUTPUT_DP ||
1534 encoder->type == INTEL_OUTPUT_EDP) { 1538 encoder->type == INTEL_OUTPUT_EDP) {
1535 int i; 1539 int i;
1536 1540
@@ -1632,7 +1636,7 @@ static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
1632 1636
1633static void intel_ddi_pll_init(struct drm_device *dev) 1637static void intel_ddi_pll_init(struct drm_device *dev)
1634{ 1638{
1635 struct drm_i915_private *dev_priv = dev->dev_private; 1639 struct drm_i915_private *dev_priv = to_i915(dev);
1636 1640
1637 if (INTEL_GEN(dev_priv) < 9) { 1641 if (INTEL_GEN(dev_priv) < 9) {
1638 uint32_t val = I915_READ(LCPLL_CTL); 1642 uint32_t val = I915_READ(LCPLL_CTL);
@@ -1719,7 +1723,7 @@ static const struct intel_dpll_mgr bxt_pll_mgr = {
1719 1723
1720void intel_shared_dpll_init(struct drm_device *dev) 1724void intel_shared_dpll_init(struct drm_device *dev)
1721{ 1725{
1722 struct drm_i915_private *dev_priv = dev->dev_private; 1726 struct drm_i915_private *dev_priv = to_i915(dev);
1723 const struct intel_dpll_mgr *dpll_mgr = NULL; 1727 const struct intel_dpll_mgr *dpll_mgr = NULL;
1724 const struct dpll_info *dpll_info; 1728 const struct dpll_info *dpll_info;
1725 int i; 1729 int i;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 089a42577ea3..55aeaf041749 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -69,39 +69,63 @@
69}) 69})
70 70
71#define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 1000) 71#define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 1000)
72#define wait_for_us(COND, US) _wait_for((COND), (US), 1)
73 72
74/* If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. */ 73/* If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. */
75#if defined(CONFIG_DRM_I915_DEBUG) && defined(CONFIG_PREEMPT_COUNT) 74#if defined(CONFIG_DRM_I915_DEBUG) && defined(CONFIG_PREEMPT_COUNT)
76# define _WAIT_FOR_ATOMIC_CHECK WARN_ON_ONCE(!in_atomic()) 75# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) WARN_ON_ONCE((ATOMIC) && !in_atomic())
77#else 76#else
78# define _WAIT_FOR_ATOMIC_CHECK do { } while (0) 77# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) do { } while (0)
79#endif 78#endif
80 79
81#define _wait_for_atomic(COND, US) ({ \ 80#define _wait_for_atomic(COND, US, ATOMIC) \
82 unsigned long end__; \ 81({ \
83 int ret__ = 0; \ 82 int cpu, ret, timeout = (US) * 1000; \
84 _WAIT_FOR_ATOMIC_CHECK; \ 83 u64 base; \
84 _WAIT_FOR_ATOMIC_CHECK(ATOMIC); \
85 BUILD_BUG_ON((US) > 50000); \ 85 BUILD_BUG_ON((US) > 50000); \
86 end__ = (local_clock() >> 10) + (US) + 1; \ 86 if (!(ATOMIC)) { \
87 while (!(COND)) { \ 87 preempt_disable(); \
88 if (time_after((unsigned long)(local_clock() >> 10), end__)) { \ 88 cpu = smp_processor_id(); \
89 /* Unlike the regular wait_for(), this atomic variant \ 89 } \
90 * cannot be preempted (and we'll just ignore the issue\ 90 base = local_clock(); \
91 * of irq interruptions) and so we know that no time \ 91 for (;;) { \
92 * has passed since the last check of COND and can \ 92 u64 now = local_clock(); \
93 * immediately report the timeout. \ 93 if (!(ATOMIC)) \
94 */ \ 94 preempt_enable(); \
95 ret__ = -ETIMEDOUT; \ 95 if (COND) { \
96 ret = 0; \
97 break; \
98 } \
99 if (now - base >= timeout) { \
100 ret = -ETIMEDOUT; \
96 break; \ 101 break; \
97 } \ 102 } \
98 cpu_relax(); \ 103 cpu_relax(); \
104 if (!(ATOMIC)) { \
105 preempt_disable(); \
106 if (unlikely(cpu != smp_processor_id())) { \
107 timeout -= now - base; \
108 cpu = smp_processor_id(); \
109 base = local_clock(); \
110 } \
111 } \
99 } \ 112 } \
113 ret; \
114})
115
116#define wait_for_us(COND, US) \
117({ \
118 int ret__; \
119 BUILD_BUG_ON(!__builtin_constant_p(US)); \
120 if ((US) > 10) \
121 ret__ = _wait_for((COND), (US), 10); \
122 else \
123 ret__ = _wait_for_atomic((COND), (US), 0); \
100 ret__; \ 124 ret__; \
101}) 125})
102 126
103#define wait_for_atomic(COND, MS) _wait_for_atomic((COND), (MS) * 1000) 127#define wait_for_atomic(COND, MS) _wait_for_atomic((COND), (MS) * 1000, 1)
104#define wait_for_atomic_us(COND, US) _wait_for_atomic((COND), (US)) 128#define wait_for_atomic_us(COND, US) _wait_for_atomic((COND), (US), 1)
105 129
106#define KHz(x) (1000 * (x)) 130#define KHz(x) (1000 * (x))
107#define MHz(x) KHz(1000 * (x)) 131#define MHz(x) KHz(1000 * (x))
@@ -135,7 +159,7 @@ enum intel_output_type {
135 INTEL_OUTPUT_LVDS = 4, 159 INTEL_OUTPUT_LVDS = 4,
136 INTEL_OUTPUT_TVOUT = 5, 160 INTEL_OUTPUT_TVOUT = 5,
137 INTEL_OUTPUT_HDMI = 6, 161 INTEL_OUTPUT_HDMI = 6,
138 INTEL_OUTPUT_DISPLAYPORT = 7, 162 INTEL_OUTPUT_DP = 7,
139 INTEL_OUTPUT_EDP = 8, 163 INTEL_OUTPUT_EDP = 8,
140 INTEL_OUTPUT_DSI = 9, 164 INTEL_OUTPUT_DSI = 9,
141 INTEL_OUTPUT_UNKNOWN = 10, 165 INTEL_OUTPUT_UNKNOWN = 10,
@@ -159,6 +183,7 @@ struct intel_framebuffer {
159struct intel_fbdev { 183struct intel_fbdev {
160 struct drm_fb_helper helper; 184 struct drm_fb_helper helper;
161 struct intel_framebuffer *fb; 185 struct intel_framebuffer *fb;
186 async_cookie_t cookie;
162 int preferred_bpp; 187 int preferred_bpp;
163}; 188};
164 189
@@ -497,12 +522,10 @@ struct intel_crtc_state {
497 */ 522 */
498 bool limited_color_range; 523 bool limited_color_range;
499 524
500 /* DP has a bunch of special case unfortunately, so mark the pipe 525 /* Bitmask of encoder types (enum intel_output_type)
501 * accordingly. */ 526 * driven by the pipe.
502 bool has_dp_encoder; 527 */
503 528 unsigned int output_types;
504 /* DSI has special cases */
505 bool has_dsi_encoder;
506 529
507 /* Whether we should send NULL infoframes. Required for audio. */ 530 /* Whether we should send NULL infoframes. Required for audio. */
508 bool has_hdmi_sink; 531 bool has_hdmi_sink;
@@ -861,6 +884,11 @@ struct intel_dp {
861 * this port. Only relevant on VLV/CHV. 884 * this port. Only relevant on VLV/CHV.
862 */ 885 */
863 enum pipe pps_pipe; 886 enum pipe pps_pipe;
887 /*
888 * Set if the sequencer may be reset due to a power transition,
889 * requiring a reinitialization. Only relevant on BXT.
890 */
891 bool pps_reset;
864 struct edp_power_seq pps_delays; 892 struct edp_power_seq pps_delays;
865 893
866 bool can_mst; /* this port supports mst */ 894 bool can_mst; /* this port supports mst */
@@ -957,14 +985,14 @@ vlv_pipe_to_channel(enum pipe pipe)
957static inline struct drm_crtc * 985static inline struct drm_crtc *
958intel_get_crtc_for_pipe(struct drm_device *dev, int pipe) 986intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
959{ 987{
960 struct drm_i915_private *dev_priv = dev->dev_private; 988 struct drm_i915_private *dev_priv = to_i915(dev);
961 return dev_priv->pipe_to_crtc_mapping[pipe]; 989 return dev_priv->pipe_to_crtc_mapping[pipe];
962} 990}
963 991
964static inline struct drm_crtc * 992static inline struct drm_crtc *
965intel_get_crtc_for_plane(struct drm_device *dev, int plane) 993intel_get_crtc_for_plane(struct drm_device *dev, int plane)
966{ 994{
967 struct drm_i915_private *dev_priv = dev->dev_private; 995 struct drm_i915_private *dev_priv = to_i915(dev);
968 return dev_priv->plane_to_crtc_mapping[plane]; 996 return dev_priv->plane_to_crtc_mapping[plane];
969} 997}
970 998
@@ -1157,7 +1185,20 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
1157 struct drm_file *file_priv); 1185 struct drm_file *file_priv);
1158enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, 1186enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
1159 enum pipe pipe); 1187 enum pipe pipe);
1160bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type); 1188static inline bool
1189intel_crtc_has_type(const struct intel_crtc_state *crtc_state,
1190 enum intel_output_type type)
1191{
1192 return crtc_state->output_types & (1 << type);
1193}
1194static inline bool
1195intel_crtc_has_dp_encoder(const struct intel_crtc_state *crtc_state)
1196{
1197 return crtc_state->output_types &
1198 ((1 << INTEL_OUTPUT_DP) |
1199 (1 << INTEL_OUTPUT_DP_MST) |
1200 (1 << INTEL_OUTPUT_EDP));
1201}
1161static inline void 1202static inline void
1162intel_wait_for_vblank(struct drm_device *dev, int pipe) 1203intel_wait_for_vblank(struct drm_device *dev, int pipe)
1163{ 1204{
@@ -1338,7 +1379,7 @@ void intel_dp_mst_resume(struct drm_device *dev);
1338int intel_dp_max_link_rate(struct intel_dp *intel_dp); 1379int intel_dp_max_link_rate(struct intel_dp *intel_dp);
1339int intel_dp_rate_select(struct intel_dp *intel_dp, int rate); 1380int intel_dp_rate_select(struct intel_dp *intel_dp, int rate);
1340void intel_dp_hot_plug(struct intel_encoder *intel_encoder); 1381void intel_dp_hot_plug(struct intel_encoder *intel_encoder);
1341void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv); 1382void intel_power_sequencer_reset(struct drm_i915_private *dev_priv);
1342uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes); 1383uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes);
1343void intel_plane_destroy(struct drm_plane *plane); 1384void intel_plane_destroy(struct drm_plane *plane);
1344void intel_edp_drrs_enable(struct intel_dp *intel_dp); 1385void intel_edp_drrs_enable(struct intel_dp *intel_dp);
@@ -1451,6 +1492,7 @@ void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable);
1451 1492
1452/* intel_lvds.c */ 1493/* intel_lvds.c */
1453void intel_lvds_init(struct drm_device *dev); 1494void intel_lvds_init(struct drm_device *dev);
1495struct intel_encoder *intel_get_lvds_encoder(struct drm_device *dev);
1454bool intel_is_dual_link_lvds(struct drm_device *dev); 1496bool intel_is_dual_link_lvds(struct drm_device *dev);
1455 1497
1456 1498
@@ -1489,7 +1531,8 @@ void intel_gmch_panel_fitting(struct intel_crtc *crtc,
1489 int fitting_mode); 1531 int fitting_mode);
1490void intel_panel_set_backlight_acpi(struct intel_connector *connector, 1532void intel_panel_set_backlight_acpi(struct intel_connector *connector,
1491 u32 level, u32 max); 1533 u32 level, u32 max);
1492int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe); 1534int intel_panel_setup_backlight(struct drm_connector *connector,
1535 enum pipe pipe);
1493void intel_panel_enable_backlight(struct intel_connector *connector); 1536void intel_panel_enable_backlight(struct intel_connector *connector);
1494void intel_panel_disable_backlight(struct intel_connector *connector); 1537void intel_panel_disable_backlight(struct intel_connector *connector);
1495void intel_panel_destroy_backlight(struct drm_connector *connector); 1538void intel_panel_destroy_backlight(struct drm_connector *connector);
@@ -1498,11 +1541,15 @@ extern struct drm_display_mode *intel_find_panel_downclock(
1498 struct drm_device *dev, 1541 struct drm_device *dev,
1499 struct drm_display_mode *fixed_mode, 1542 struct drm_display_mode *fixed_mode,
1500 struct drm_connector *connector); 1543 struct drm_connector *connector);
1501void intel_backlight_register(struct drm_device *dev);
1502 1544
1503#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE) 1545#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
1546int intel_backlight_device_register(struct intel_connector *connector);
1504void intel_backlight_device_unregister(struct intel_connector *connector); 1547void intel_backlight_device_unregister(struct intel_connector *connector);
1505#else /* CONFIG_BACKLIGHT_CLASS_DEVICE */ 1548#else /* CONFIG_BACKLIGHT_CLASS_DEVICE */
1549static int intel_backlight_device_register(struct intel_connector *connector)
1550{
1551 return 0;
1552}
1506static inline void intel_backlight_device_unregister(struct intel_connector *connector) 1553static inline void intel_backlight_device_unregister(struct intel_connector *connector)
1507{ 1554{
1508} 1555}
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index b444d0e35a98..de8e9fb51595 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -84,13 +84,15 @@ static void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
84{ 84{
85 struct drm_encoder *encoder = &intel_dsi->base.base; 85 struct drm_encoder *encoder = &intel_dsi->base.base;
86 struct drm_device *dev = encoder->dev; 86 struct drm_device *dev = encoder->dev;
87 struct drm_i915_private *dev_priv = dev->dev_private; 87 struct drm_i915_private *dev_priv = to_i915(dev);
88 u32 mask; 88 u32 mask;
89 89
90 mask = LP_CTRL_FIFO_EMPTY | HS_CTRL_FIFO_EMPTY | 90 mask = LP_CTRL_FIFO_EMPTY | HS_CTRL_FIFO_EMPTY |
91 LP_DATA_FIFO_EMPTY | HS_DATA_FIFO_EMPTY; 91 LP_DATA_FIFO_EMPTY | HS_DATA_FIFO_EMPTY;
92 92
93 if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(port)) & mask) == mask, 100)) 93 if (intel_wait_for_register(dev_priv,
94 MIPI_GEN_FIFO_STAT(port), mask, mask,
95 100))
94 DRM_ERROR("DPI FIFOs are not empty\n"); 96 DRM_ERROR("DPI FIFOs are not empty\n");
95} 97}
96 98
@@ -129,7 +131,7 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
129{ 131{
130 struct intel_dsi_host *intel_dsi_host = to_intel_dsi_host(host); 132 struct intel_dsi_host *intel_dsi_host = to_intel_dsi_host(host);
131 struct drm_device *dev = intel_dsi_host->intel_dsi->base.base.dev; 133 struct drm_device *dev = intel_dsi_host->intel_dsi->base.base.dev;
132 struct drm_i915_private *dev_priv = dev->dev_private; 134 struct drm_i915_private *dev_priv = to_i915(dev);
133 enum port port = intel_dsi_host->port; 135 enum port port = intel_dsi_host->port;
134 struct mipi_dsi_packet packet; 136 struct mipi_dsi_packet packet;
135 ssize_t ret; 137 ssize_t ret;
@@ -158,8 +160,10 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
158 160
159 /* note: this is never true for reads */ 161 /* note: this is never true for reads */
160 if (packet.payload_length) { 162 if (packet.payload_length) {
161 163 if (intel_wait_for_register(dev_priv,
162 if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(port)) & data_mask) == 0, 50)) 164 MIPI_GEN_FIFO_STAT(port),
165 data_mask, 0,
166 50))
163 DRM_ERROR("Timeout waiting for HS/LP DATA FIFO !full\n"); 167 DRM_ERROR("Timeout waiting for HS/LP DATA FIFO !full\n");
164 168
165 write_data(dev_priv, data_reg, packet.payload, 169 write_data(dev_priv, data_reg, packet.payload,
@@ -170,7 +174,10 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
170 I915_WRITE(MIPI_INTR_STAT(port), GEN_READ_DATA_AVAIL); 174 I915_WRITE(MIPI_INTR_STAT(port), GEN_READ_DATA_AVAIL);
171 } 175 }
172 176
173 if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(port)) & ctrl_mask) == 0, 50)) { 177 if (intel_wait_for_register(dev_priv,
178 MIPI_GEN_FIFO_STAT(port),
179 ctrl_mask, 0,
180 50)) {
174 DRM_ERROR("Timeout waiting for HS/LP CTRL FIFO !full\n"); 181 DRM_ERROR("Timeout waiting for HS/LP CTRL FIFO !full\n");
175 } 182 }
176 183
@@ -179,7 +186,10 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
179 /* ->rx_len is set only for reads */ 186 /* ->rx_len is set only for reads */
180 if (msg->rx_len) { 187 if (msg->rx_len) {
181 data_mask = GEN_READ_DATA_AVAIL; 188 data_mask = GEN_READ_DATA_AVAIL;
182 if (wait_for((I915_READ(MIPI_INTR_STAT(port)) & data_mask) == data_mask, 50)) 189 if (intel_wait_for_register(dev_priv,
190 MIPI_INTR_STAT(port),
191 data_mask, data_mask,
192 50))
183 DRM_ERROR("Timeout waiting for read data.\n"); 193 DRM_ERROR("Timeout waiting for read data.\n");
184 194
185 read_data(dev_priv, data_reg, msg->rx_buf, msg->rx_len); 195 read_data(dev_priv, data_reg, msg->rx_buf, msg->rx_len);
@@ -250,7 +260,7 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs,
250{ 260{
251 struct drm_encoder *encoder = &intel_dsi->base.base; 261 struct drm_encoder *encoder = &intel_dsi->base.base;
252 struct drm_device *dev = encoder->dev; 262 struct drm_device *dev = encoder->dev;
253 struct drm_i915_private *dev_priv = dev->dev_private; 263 struct drm_i915_private *dev_priv = to_i915(dev);
254 u32 mask; 264 u32 mask;
255 265
256 /* XXX: pipe, hs */ 266 /* XXX: pipe, hs */
@@ -269,7 +279,9 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs,
269 I915_WRITE(MIPI_DPI_CONTROL(port), cmd); 279 I915_WRITE(MIPI_DPI_CONTROL(port), cmd);
270 280
271 mask = SPL_PKT_SENT_INTERRUPT; 281 mask = SPL_PKT_SENT_INTERRUPT;
272 if (wait_for((I915_READ(MIPI_INTR_STAT(port)) & mask) == mask, 100)) 282 if (intel_wait_for_register(dev_priv,
283 MIPI_INTR_STAT(port), mask, mask,
284 100))
273 DRM_ERROR("Video mode command 0x%08x send failed.\n", cmd); 285 DRM_ERROR("Video mode command 0x%08x send failed.\n", cmd);
274 286
275 return 0; 287 return 0;
@@ -302,7 +314,7 @@ static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
302static bool intel_dsi_compute_config(struct intel_encoder *encoder, 314static bool intel_dsi_compute_config(struct intel_encoder *encoder,
303 struct intel_crtc_state *pipe_config) 315 struct intel_crtc_state *pipe_config)
304{ 316{
305 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 317 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
306 struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi, 318 struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
307 base); 319 base);
308 struct intel_connector *intel_connector = intel_dsi->attached_connector; 320 struct intel_connector *intel_connector = intel_dsi->attached_connector;
@@ -313,8 +325,6 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
313 325
314 DRM_DEBUG_KMS("\n"); 326 DRM_DEBUG_KMS("\n");
315 327
316 pipe_config->has_dsi_encoder = true;
317
318 if (fixed_mode) { 328 if (fixed_mode) {
319 intel_fixed_panel_mode(fixed_mode, adjusted_mode); 329 intel_fixed_panel_mode(fixed_mode, adjusted_mode);
320 330
@@ -348,7 +358,7 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
348 358
349static void bxt_dsi_device_ready(struct intel_encoder *encoder) 359static void bxt_dsi_device_ready(struct intel_encoder *encoder)
350{ 360{
351 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 361 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
352 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 362 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
353 enum port port; 363 enum port port;
354 u32 val; 364 u32 val;
@@ -387,7 +397,7 @@ static void bxt_dsi_device_ready(struct intel_encoder *encoder)
387 397
388static void vlv_dsi_device_ready(struct intel_encoder *encoder) 398static void vlv_dsi_device_ready(struct intel_encoder *encoder)
389{ 399{
390 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 400 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
391 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 401 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
392 enum port port; 402 enum port port;
393 u32 val; 403 u32 val;
@@ -437,7 +447,7 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder)
437static void intel_dsi_port_enable(struct intel_encoder *encoder) 447static void intel_dsi_port_enable(struct intel_encoder *encoder)
438{ 448{
439 struct drm_device *dev = encoder->base.dev; 449 struct drm_device *dev = encoder->base.dev;
440 struct drm_i915_private *dev_priv = dev->dev_private; 450 struct drm_i915_private *dev_priv = to_i915(dev);
441 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); 451 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
442 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 452 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
443 enum port port; 453 enum port port;
@@ -478,7 +488,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder)
478static void intel_dsi_port_disable(struct intel_encoder *encoder) 488static void intel_dsi_port_disable(struct intel_encoder *encoder)
479{ 489{
480 struct drm_device *dev = encoder->base.dev; 490 struct drm_device *dev = encoder->base.dev;
481 struct drm_i915_private *dev_priv = dev->dev_private; 491 struct drm_i915_private *dev_priv = to_i915(dev);
482 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 492 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
483 enum port port; 493 enum port port;
484 494
@@ -497,7 +507,7 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder)
497static void intel_dsi_enable(struct intel_encoder *encoder) 507static void intel_dsi_enable(struct intel_encoder *encoder)
498{ 508{
499 struct drm_device *dev = encoder->base.dev; 509 struct drm_device *dev = encoder->base.dev;
500 struct drm_i915_private *dev_priv = dev->dev_private; 510 struct drm_i915_private *dev_priv = to_i915(dev);
501 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 511 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
502 enum port port; 512 enum port port;
503 513
@@ -528,7 +538,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder);
528static void intel_dsi_pre_enable(struct intel_encoder *encoder) 538static void intel_dsi_pre_enable(struct intel_encoder *encoder)
529{ 539{
530 struct drm_device *dev = encoder->base.dev; 540 struct drm_device *dev = encoder->base.dev;
531 struct drm_i915_private *dev_priv = dev->dev_private; 541 struct drm_i915_private *dev_priv = to_i915(dev);
532 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 542 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
533 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 543 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
534 enum port port; 544 enum port port;
@@ -602,7 +612,7 @@ static void intel_dsi_pre_disable(struct intel_encoder *encoder)
602static void intel_dsi_disable(struct intel_encoder *encoder) 612static void intel_dsi_disable(struct intel_encoder *encoder)
603{ 613{
604 struct drm_device *dev = encoder->base.dev; 614 struct drm_device *dev = encoder->base.dev;
605 struct drm_i915_private *dev_priv = dev->dev_private; 615 struct drm_i915_private *dev_priv = to_i915(dev);
606 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 616 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
607 enum port port; 617 enum port port;
608 u32 temp; 618 u32 temp;
@@ -641,7 +651,7 @@ static void intel_dsi_disable(struct intel_encoder *encoder)
641static void intel_dsi_clear_device_ready(struct intel_encoder *encoder) 651static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
642{ 652{
643 struct drm_device *dev = encoder->base.dev; 653 struct drm_device *dev = encoder->base.dev;
644 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 654 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
645 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 655 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
646 enum port port; 656 enum port port;
647 657
@@ -667,8 +677,9 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
667 /* Wait till Clock lanes are in LP-00 state for MIPI Port A 677 /* Wait till Clock lanes are in LP-00 state for MIPI Port A
668 * only. MIPI Port C has no similar bit for checking 678 * only. MIPI Port C has no similar bit for checking
669 */ 679 */
670 if (wait_for(((I915_READ(port_ctrl) & AFE_LATCHOUT) 680 if (intel_wait_for_register(dev_priv,
671 == 0x00000), 30)) 681 port_ctrl, AFE_LATCHOUT, 0,
682 30))
672 DRM_ERROR("DSI LP not going Low\n"); 683 DRM_ERROR("DSI LP not going Low\n");
673 684
674 /* Disable MIPI PHY transparent latch */ 685 /* Disable MIPI PHY transparent latch */
@@ -685,7 +696,7 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
685 696
686static void intel_dsi_post_disable(struct intel_encoder *encoder) 697static void intel_dsi_post_disable(struct intel_encoder *encoder)
687{ 698{
688 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 699 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
689 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 700 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
690 701
691 DRM_DEBUG_KMS("\n"); 702 DRM_DEBUG_KMS("\n");
@@ -720,7 +731,7 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder)
720static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, 731static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
721 enum pipe *pipe) 732 enum pipe *pipe)
722{ 733{
723 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 734 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
724 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 735 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
725 struct drm_device *dev = encoder->base.dev; 736 struct drm_device *dev = encoder->base.dev;
726 enum intel_display_power_domain power_domain; 737 enum intel_display_power_domain power_domain;
@@ -794,7 +805,7 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
794 struct intel_crtc_state *pipe_config) 805 struct intel_crtc_state *pipe_config)
795{ 806{
796 struct drm_device *dev = encoder->base.dev; 807 struct drm_device *dev = encoder->base.dev;
797 struct drm_i915_private *dev_priv = dev->dev_private; 808 struct drm_i915_private *dev_priv = to_i915(dev);
798 struct drm_display_mode *adjusted_mode = 809 struct drm_display_mode *adjusted_mode =
799 &pipe_config->base.adjusted_mode; 810 &pipe_config->base.adjusted_mode;
800 struct drm_display_mode *adjusted_mode_sw; 811 struct drm_display_mode *adjusted_mode_sw;
@@ -954,8 +965,6 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
954 u32 pclk; 965 u32 pclk;
955 DRM_DEBUG_KMS("\n"); 966 DRM_DEBUG_KMS("\n");
956 967
957 pipe_config->has_dsi_encoder = true;
958
959 if (IS_BROXTON(dev)) 968 if (IS_BROXTON(dev))
960 bxt_dsi_get_pipe_config(encoder, pipe_config); 969 bxt_dsi_get_pipe_config(encoder, pipe_config);
961 970
@@ -1013,7 +1022,7 @@ static void set_dsi_timings(struct drm_encoder *encoder,
1013 const struct drm_display_mode *adjusted_mode) 1022 const struct drm_display_mode *adjusted_mode)
1014{ 1023{
1015 struct drm_device *dev = encoder->dev; 1024 struct drm_device *dev = encoder->dev;
1016 struct drm_i915_private *dev_priv = dev->dev_private; 1025 struct drm_i915_private *dev_priv = to_i915(dev);
1017 struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); 1026 struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
1018 enum port port; 1027 enum port port;
1019 unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); 1028 unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
@@ -1099,7 +1108,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
1099{ 1108{
1100 struct drm_encoder *encoder = &intel_encoder->base; 1109 struct drm_encoder *encoder = &intel_encoder->base;
1101 struct drm_device *dev = encoder->dev; 1110 struct drm_device *dev = encoder->dev;
1102 struct drm_i915_private *dev_priv = dev->dev_private; 1111 struct drm_i915_private *dev_priv = to_i915(dev);
1103 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 1112 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
1104 struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); 1113 struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
1105 const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; 1114 const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
@@ -1390,6 +1399,7 @@ static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs
1390static const struct drm_connector_funcs intel_dsi_connector_funcs = { 1399static const struct drm_connector_funcs intel_dsi_connector_funcs = {
1391 .dpms = drm_atomic_helper_connector_dpms, 1400 .dpms = drm_atomic_helper_connector_dpms,
1392 .detect = intel_dsi_detect, 1401 .detect = intel_dsi_detect,
1402 .late_register = intel_connector_register,
1393 .early_unregister = intel_connector_unregister, 1403 .early_unregister = intel_connector_unregister,
1394 .destroy = intel_dsi_connector_destroy, 1404 .destroy = intel_dsi_connector_destroy,
1395 .fill_modes = drm_helper_probe_single_connector_modes, 1405 .fill_modes = drm_helper_probe_single_connector_modes,
@@ -1420,7 +1430,7 @@ void intel_dsi_init(struct drm_device *dev)
1420 struct intel_connector *intel_connector; 1430 struct intel_connector *intel_connector;
1421 struct drm_connector *connector; 1431 struct drm_connector *connector;
1422 struct drm_display_mode *scan, *fixed_mode = NULL; 1432 struct drm_display_mode *scan, *fixed_mode = NULL;
1423 struct drm_i915_private *dev_priv = dev->dev_private; 1433 struct drm_i915_private *dev_priv = to_i915(dev);
1424 enum port port; 1434 enum port port;
1425 unsigned int i; 1435 unsigned int i;
1426 1436
@@ -1587,13 +1597,10 @@ void intel_dsi_init(struct drm_device *dev)
1587 connector->display_info.height_mm = fixed_mode->height_mm; 1597 connector->display_info.height_mm = fixed_mode->height_mm;
1588 1598
1589 intel_panel_init(&intel_connector->panel, fixed_mode, NULL); 1599 intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
1600 intel_panel_setup_backlight(connector, INVALID_PIPE);
1590 1601
1591 intel_dsi_add_properties(intel_connector); 1602 intel_dsi_add_properties(intel_connector);
1592 1603
1593 drm_connector_register(connector);
1594
1595 intel_panel_setup_backlight(connector, INVALID_PIPE);
1596
1597 return; 1604 return;
1598 1605
1599err: 1606err:
diff --git a/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c
index f0dc427743f8..ac7c6020c443 100644
--- a/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c
+++ b/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c
@@ -159,7 +159,7 @@ static int dcs_setup_backlight(struct intel_connector *connector,
159int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector) 159int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector)
160{ 160{
161 struct drm_device *dev = intel_connector->base.dev; 161 struct drm_device *dev = intel_connector->base.dev;
162 struct drm_i915_private *dev_priv = dev->dev_private; 162 struct drm_i915_private *dev_priv = to_i915(dev);
163 struct intel_encoder *encoder = intel_connector->encoder; 163 struct intel_encoder *encoder = intel_connector->encoder;
164 struct intel_panel *panel = &intel_connector->panel; 164 struct intel_panel *panel = &intel_connector->panel;
165 165
diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
index f122484bedfc..cd154ce6b6c1 100644
--- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
@@ -303,7 +303,7 @@ static void chv_exec_gpio(struct drm_i915_private *dev_priv,
303static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data) 303static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
304{ 304{
305 struct drm_device *dev = intel_dsi->base.base.dev; 305 struct drm_device *dev = intel_dsi->base.base.dev;
306 struct drm_i915_private *dev_priv = dev->dev_private; 306 struct drm_i915_private *dev_priv = to_i915(dev);
307 u8 gpio_source, gpio_index; 307 u8 gpio_source, gpio_index;
308 bool value; 308 bool value;
309 309
@@ -469,7 +469,7 @@ static int vbt_panel_get_modes(struct drm_panel *panel)
469 struct vbt_panel *vbt_panel = to_vbt_panel(panel); 469 struct vbt_panel *vbt_panel = to_vbt_panel(panel);
470 struct intel_dsi *intel_dsi = vbt_panel->intel_dsi; 470 struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
471 struct drm_device *dev = intel_dsi->base.base.dev; 471 struct drm_device *dev = intel_dsi->base.base.dev;
472 struct drm_i915_private *dev_priv = dev->dev_private; 472 struct drm_i915_private *dev_priv = to_i915(dev);
473 struct drm_display_mode *mode; 473 struct drm_display_mode *mode;
474 474
475 if (!panel->connector) 475 if (!panel->connector)
@@ -497,7 +497,7 @@ static const struct drm_panel_funcs vbt_panel_funcs = {
497struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id) 497struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
498{ 498{
499 struct drm_device *dev = intel_dsi->base.base.dev; 499 struct drm_device *dev = intel_dsi->base.base.dev;
500 struct drm_i915_private *dev_priv = dev->dev_private; 500 struct drm_i915_private *dev_priv = to_i915(dev);
501 struct mipi_config *mipi_config = dev_priv->vbt.dsi.config; 501 struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
502 struct mipi_pps_data *pps = dev_priv->vbt.dsi.pps; 502 struct mipi_pps_data *pps = dev_priv->vbt.dsi.pps;
503 struct drm_display_mode *mode = dev_priv->vbt.lfp_lvds_vbt_mode; 503 struct drm_display_mode *mode = dev_priv->vbt.lfp_lvds_vbt_mode;
@@ -649,14 +649,13 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
649 ); 649 );
650 650
651 /* 651 /*
652 * Exit zero is unified val ths_zero and ths_exit 652 * Exit zero is unified val ths_zero and ths_exit
653 * minimum value for ths_exit = 110ns 653 * minimum value for ths_exit = 110ns
654 * min (exit_zero_cnt * 2) = 110/UI 654 * min (exit_zero_cnt * 2) = 110/UI
655 * exit_zero_cnt = 55/UI 655 * exit_zero_cnt = 55/UI
656 */ 656 */
657 if (exit_zero_cnt < (55 * ui_den / ui_num)) 657 if (exit_zero_cnt < (55 * ui_den / ui_num) && (55 * ui_den) % ui_num)
658 if ((55 * ui_den) % ui_num) 658 exit_zero_cnt += 1;
659 exit_zero_cnt += 1;
660 659
661 /* clk zero count */ 660 /* clk zero count */
662 clk_zero_cnt = DIV_ROUND_UP( 661 clk_zero_cnt = DIV_ROUND_UP(
diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c
index 1765e6e18f2c..6ab58a01b18e 100644
--- a/drivers/gpu/drm/i915/intel_dsi_pll.c
+++ b/drivers/gpu/drm/i915/intel_dsi_pll.c
@@ -55,12 +55,10 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
55 struct intel_crtc_state *config, 55 struct intel_crtc_state *config,
56 int target_dsi_clk) 56 int target_dsi_clk)
57{ 57{
58 unsigned int calc_m = 0, calc_p = 0;
59 unsigned int m_min, m_max, p_min = 2, p_max = 6; 58 unsigned int m_min, m_max, p_min = 2, p_max = 6;
60 unsigned int m, n, p; 59 unsigned int m, n, p;
61 int ref_clk; 60 unsigned int calc_m, calc_p;
62 int delta = target_dsi_clk; 61 int delta, ref_clk;
63 u32 m_seed;
64 62
65 /* target_dsi_clk is expected in kHz */ 63 /* target_dsi_clk is expected in kHz */
66 if (target_dsi_clk < 300000 || target_dsi_clk > 1150000) { 64 if (target_dsi_clk < 300000 || target_dsi_clk > 1150000) {
@@ -80,6 +78,10 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
80 m_max = 92; 78 m_max = 92;
81 } 79 }
82 80
81 calc_p = p_min;
82 calc_m = m_min;
83 delta = abs(target_dsi_clk - (m_min * ref_clk) / (p_min * n));
84
83 for (m = m_min; m <= m_max && delta; m++) { 85 for (m = m_min; m <= m_max && delta; m++) {
84 for (p = p_min; p <= p_max && delta; p++) { 86 for (p = p_min; p <= p_max && delta; p++) {
85 /* 87 /*
@@ -97,11 +99,10 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
97 } 99 }
98 100
99 /* register has log2(N1), this works fine for powers of two */ 101 /* register has log2(N1), this works fine for powers of two */
100 n = ffs(n) - 1;
101 m_seed = lfsr_converts[calc_m - 62];
102 config->dsi_pll.ctrl = 1 << (DSI_PLL_P1_POST_DIV_SHIFT + calc_p - 2); 102 config->dsi_pll.ctrl = 1 << (DSI_PLL_P1_POST_DIV_SHIFT + calc_p - 2);
103 config->dsi_pll.div = n << DSI_PLL_N1_DIV_SHIFT | 103 config->dsi_pll.div =
104 m_seed << DSI_PLL_M1_DIV_SHIFT; 104 (ffs(n) - 1) << DSI_PLL_N1_DIV_SHIFT |
105 (u32)lfsr_converts[calc_m - 62] << DSI_PLL_M1_DIV_SHIFT;
105 106
106 return 0; 107 return 0;
107} 108}
@@ -113,7 +114,7 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
113static int vlv_compute_dsi_pll(struct intel_encoder *encoder, 114static int vlv_compute_dsi_pll(struct intel_encoder *encoder,
114 struct intel_crtc_state *config) 115 struct intel_crtc_state *config)
115{ 116{
116 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 117 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
117 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 118 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
118 int ret; 119 int ret;
119 u32 dsi_clk; 120 u32 dsi_clk;
@@ -234,8 +235,11 @@ static void bxt_disable_dsi_pll(struct intel_encoder *encoder)
234 * PLL lock should deassert within 200us. 235 * PLL lock should deassert within 200us.
235 * Wait up to 1ms before timing out. 236 * Wait up to 1ms before timing out.
236 */ 237 */
237 if (wait_for((I915_READ(BXT_DSI_PLL_ENABLE) 238 if (intel_wait_for_register(dev_priv,
238 & BXT_DSI_PLL_LOCKED) == 0, 1)) 239 BXT_DSI_PLL_ENABLE,
240 BXT_DSI_PLL_LOCKED,
241 0,
242 1))
239 DRM_ERROR("Timeout waiting for PLL lock deassertion\n"); 243 DRM_ERROR("Timeout waiting for PLL lock deassertion\n");
240} 244}
241 245
@@ -321,7 +325,7 @@ static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
321 u32 dsi_clk; 325 u32 dsi_clk;
322 u32 dsi_ratio; 326 u32 dsi_ratio;
323 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 327 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
324 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 328 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
325 329
326 /* Divide by zero */ 330 /* Divide by zero */
327 if (!pipe_bpp) { 331 if (!pipe_bpp) {
@@ -356,7 +360,7 @@ u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
356static void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port) 360static void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
357{ 361{
358 u32 temp; 362 u32 temp;
359 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 363 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
360 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 364 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
361 365
362 temp = I915_READ(MIPI_CTRL(port)); 366 temp = I915_READ(MIPI_CTRL(port));
@@ -370,7 +374,7 @@ static void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
370static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port, 374static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port,
371 const struct intel_crtc_state *config) 375 const struct intel_crtc_state *config)
372{ 376{
373 struct drm_i915_private *dev_priv = dev->dev_private; 377 struct drm_i915_private *dev_priv = to_i915(dev);
374 u32 tmp; 378 u32 tmp;
375 u32 dsi_rate = 0; 379 u32 dsi_rate = 0;
376 u32 pll_ratio = 0; 380 u32 pll_ratio = 0;
@@ -465,7 +469,7 @@ static int bxt_compute_dsi_pll(struct intel_encoder *encoder,
465static void bxt_enable_dsi_pll(struct intel_encoder *encoder, 469static void bxt_enable_dsi_pll(struct intel_encoder *encoder,
466 const struct intel_crtc_state *config) 470 const struct intel_crtc_state *config)
467{ 471{
468 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 472 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
469 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 473 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
470 enum port port; 474 enum port port;
471 u32 val; 475 u32 val;
@@ -486,7 +490,11 @@ static void bxt_enable_dsi_pll(struct intel_encoder *encoder,
486 I915_WRITE(BXT_DSI_PLL_ENABLE, val); 490 I915_WRITE(BXT_DSI_PLL_ENABLE, val);
487 491
488 /* Timeout and fail if PLL not locked */ 492 /* Timeout and fail if PLL not locked */
489 if (wait_for(I915_READ(BXT_DSI_PLL_ENABLE) & BXT_DSI_PLL_LOCKED, 1)) { 493 if (intel_wait_for_register(dev_priv,
494 BXT_DSI_PLL_ENABLE,
495 BXT_DSI_PLL_LOCKED,
496 BXT_DSI_PLL_LOCKED,
497 1)) {
490 DRM_ERROR("Timed out waiting for DSI PLL to lock\n"); 498 DRM_ERROR("Timed out waiting for DSI PLL to lock\n");
491 return; 499 return;
492 } 500 }
@@ -542,7 +550,7 @@ static void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
542{ 550{
543 u32 tmp; 551 u32 tmp;
544 struct drm_device *dev = encoder->base.dev; 552 struct drm_device *dev = encoder->base.dev;
545 struct drm_i915_private *dev_priv = dev->dev_private; 553 struct drm_i915_private *dev_priv = to_i915(dev);
546 554
547 /* Clear old configurations */ 555 /* Clear old configurations */
548 tmp = I915_READ(BXT_MIPI_CLOCK_CTL); 556 tmp = I915_READ(BXT_MIPI_CLOCK_CTL);
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 60e4ddf2ec6d..47bdf9dad0d3 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -122,7 +122,7 @@ static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector)
122static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector) 122static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
123{ 123{
124 struct drm_device *dev = connector->base.dev; 124 struct drm_device *dev = connector->base.dev;
125 struct drm_i915_private *dev_priv = dev->dev_private; 125 struct drm_i915_private *dev_priv = to_i915(dev);
126 struct intel_dvo *intel_dvo = intel_attached_dvo(&connector->base); 126 struct intel_dvo *intel_dvo = intel_attached_dvo(&connector->base);
127 u32 tmp; 127 u32 tmp;
128 128
@@ -138,7 +138,7 @@ static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
138 enum pipe *pipe) 138 enum pipe *pipe)
139{ 139{
140 struct drm_device *dev = encoder->base.dev; 140 struct drm_device *dev = encoder->base.dev;
141 struct drm_i915_private *dev_priv = dev->dev_private; 141 struct drm_i915_private *dev_priv = to_i915(dev);
142 struct intel_dvo *intel_dvo = enc_to_dvo(encoder); 142 struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
143 u32 tmp; 143 u32 tmp;
144 144
@@ -155,7 +155,7 @@ static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
155static void intel_dvo_get_config(struct intel_encoder *encoder, 155static void intel_dvo_get_config(struct intel_encoder *encoder,
156 struct intel_crtc_state *pipe_config) 156 struct intel_crtc_state *pipe_config)
157{ 157{
158 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 158 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
159 struct intel_dvo *intel_dvo = enc_to_dvo(encoder); 159 struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
160 u32 tmp, flags = 0; 160 u32 tmp, flags = 0;
161 161
@@ -176,7 +176,7 @@ static void intel_dvo_get_config(struct intel_encoder *encoder,
176 176
177static void intel_disable_dvo(struct intel_encoder *encoder) 177static void intel_disable_dvo(struct intel_encoder *encoder)
178{ 178{
179 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 179 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
180 struct intel_dvo *intel_dvo = enc_to_dvo(encoder); 180 struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
181 i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg; 181 i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
182 u32 temp = I915_READ(dvo_reg); 182 u32 temp = I915_READ(dvo_reg);
@@ -188,7 +188,7 @@ static void intel_disable_dvo(struct intel_encoder *encoder)
188 188
189static void intel_enable_dvo(struct intel_encoder *encoder) 189static void intel_enable_dvo(struct intel_encoder *encoder)
190{ 190{
191 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 191 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
192 struct intel_dvo *intel_dvo = enc_to_dvo(encoder); 192 struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
193 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 193 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
194 i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg; 194 i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
@@ -256,7 +256,7 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
256static void intel_dvo_pre_enable(struct intel_encoder *encoder) 256static void intel_dvo_pre_enable(struct intel_encoder *encoder)
257{ 257{
258 struct drm_device *dev = encoder->base.dev; 258 struct drm_device *dev = encoder->base.dev;
259 struct drm_i915_private *dev_priv = dev->dev_private; 259 struct drm_i915_private *dev_priv = to_i915(dev);
260 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 260 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
261 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; 261 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
262 struct intel_dvo *intel_dvo = enc_to_dvo(encoder); 262 struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
@@ -305,7 +305,7 @@ intel_dvo_detect(struct drm_connector *connector, bool force)
305 305
306static int intel_dvo_get_modes(struct drm_connector *connector) 306static int intel_dvo_get_modes(struct drm_connector *connector)
307{ 307{
308 struct drm_i915_private *dev_priv = connector->dev->dev_private; 308 struct drm_i915_private *dev_priv = to_i915(connector->dev);
309 const struct drm_display_mode *fixed_mode = 309 const struct drm_display_mode *fixed_mode =
310 to_intel_connector(connector)->panel.fixed_mode; 310 to_intel_connector(connector)->panel.fixed_mode;
311 311
@@ -341,6 +341,7 @@ static void intel_dvo_destroy(struct drm_connector *connector)
341static const struct drm_connector_funcs intel_dvo_connector_funcs = { 341static const struct drm_connector_funcs intel_dvo_connector_funcs = {
342 .dpms = drm_atomic_helper_connector_dpms, 342 .dpms = drm_atomic_helper_connector_dpms,
343 .detect = intel_dvo_detect, 343 .detect = intel_dvo_detect,
344 .late_register = intel_connector_register,
344 .early_unregister = intel_connector_unregister, 345 .early_unregister = intel_connector_unregister,
345 .destroy = intel_dvo_destroy, 346 .destroy = intel_dvo_destroy,
346 .fill_modes = drm_helper_probe_single_connector_modes, 347 .fill_modes = drm_helper_probe_single_connector_modes,
@@ -378,7 +379,7 @@ static struct drm_display_mode *
378intel_dvo_get_current_mode(struct drm_connector *connector) 379intel_dvo_get_current_mode(struct drm_connector *connector)
379{ 380{
380 struct drm_device *dev = connector->dev; 381 struct drm_device *dev = connector->dev;
381 struct drm_i915_private *dev_priv = dev->dev_private; 382 struct drm_i915_private *dev_priv = to_i915(dev);
382 struct intel_dvo *intel_dvo = intel_attached_dvo(connector); 383 struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
383 uint32_t dvo_val = I915_READ(intel_dvo->dev.dvo_reg); 384 uint32_t dvo_val = I915_READ(intel_dvo->dev.dvo_reg);
384 struct drm_display_mode *mode = NULL; 385 struct drm_display_mode *mode = NULL;
@@ -420,7 +421,7 @@ static char intel_dvo_port_name(i915_reg_t dvo_reg)
420 421
421void intel_dvo_init(struct drm_device *dev) 422void intel_dvo_init(struct drm_device *dev)
422{ 423{
423 struct drm_i915_private *dev_priv = dev->dev_private; 424 struct drm_i915_private *dev_priv = to_i915(dev);
424 struct intel_encoder *intel_encoder; 425 struct intel_encoder *intel_encoder;
425 struct intel_dvo *intel_dvo; 426 struct intel_dvo *intel_dvo;
426 struct intel_connector *intel_connector; 427 struct intel_connector *intel_connector;
@@ -550,7 +551,6 @@ void intel_dvo_init(struct drm_device *dev)
550 intel_dvo->panel_wants_dither = true; 551 intel_dvo->panel_wants_dither = true;
551 } 552 }
552 553
553 drm_connector_register(connector);
554 return; 554 return;
555 } 555 }
556 556
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index a19944b6dc25..6a7ad3ed1463 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -124,7 +124,9 @@ static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
124 I915_WRITE(FBC_CONTROL, fbc_ctl); 124 I915_WRITE(FBC_CONTROL, fbc_ctl);
125 125
126 /* Wait for compressing bit to clear */ 126 /* Wait for compressing bit to clear */
127 if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) { 127 if (intel_wait_for_register(dev_priv,
128 FBC_STATUS, FBC_STAT_COMPRESSING, 0,
129 10)) {
128 DRM_DEBUG_KMS("FBC idle timed out\n"); 130 DRM_DEBUG_KMS("FBC idle timed out\n");
129 return; 131 return;
130 } 132 }
@@ -390,7 +392,7 @@ static void intel_fbc_work_fn(struct work_struct *__work)
390 struct intel_fbc *fbc = &dev_priv->fbc; 392 struct intel_fbc *fbc = &dev_priv->fbc;
391 struct intel_fbc_work *work = &fbc->work; 393 struct intel_fbc_work *work = &fbc->work;
392 struct intel_crtc *crtc = fbc->crtc; 394 struct intel_crtc *crtc = fbc->crtc;
393 struct drm_vblank_crtc *vblank = &dev_priv->dev->vblank[crtc->pipe]; 395 struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[crtc->pipe];
394 396
395 if (drm_crtc_vblank_get(&crtc->base)) { 397 if (drm_crtc_vblank_get(&crtc->base)) {
396 DRM_ERROR("vblank not available for FBC on pipe %c\n", 398 DRM_ERROR("vblank not available for FBC on pipe %c\n",
@@ -443,7 +445,7 @@ out:
443 445
444static void intel_fbc_schedule_activation(struct intel_crtc *crtc) 446static void intel_fbc_schedule_activation(struct intel_crtc *crtc)
445{ 447{
446 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 448 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
447 struct intel_fbc *fbc = &dev_priv->fbc; 449 struct intel_fbc *fbc = &dev_priv->fbc;
448 struct intel_fbc_work *work = &fbc->work; 450 struct intel_fbc_work *work = &fbc->work;
449 451
@@ -553,7 +555,7 @@ again:
553 555
554static int intel_fbc_alloc_cfb(struct intel_crtc *crtc) 556static int intel_fbc_alloc_cfb(struct intel_crtc *crtc)
555{ 557{
556 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 558 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
557 struct intel_fbc *fbc = &dev_priv->fbc; 559 struct intel_fbc *fbc = &dev_priv->fbc;
558 struct drm_mm_node *uninitialized_var(compressed_llb); 560 struct drm_mm_node *uninitialized_var(compressed_llb);
559 int size, fb_cpp, ret; 561 int size, fb_cpp, ret;
@@ -684,7 +686,7 @@ static bool pixel_format_is_valid(struct drm_i915_private *dev_priv,
684 */ 686 */
685static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc) 687static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
686{ 688{
687 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 689 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
688 struct intel_fbc *fbc = &dev_priv->fbc; 690 struct intel_fbc *fbc = &dev_priv->fbc;
689 unsigned int effective_w, effective_h, max_w, max_h; 691 unsigned int effective_w, effective_h, max_w, max_h;
690 692
@@ -711,7 +713,7 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
711 struct intel_crtc_state *crtc_state, 713 struct intel_crtc_state *crtc_state,
712 struct intel_plane_state *plane_state) 714 struct intel_plane_state *plane_state)
713{ 715{
714 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 716 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
715 struct intel_fbc *fbc = &dev_priv->fbc; 717 struct intel_fbc *fbc = &dev_priv->fbc;
716 struct intel_fbc_state_cache *cache = &fbc->state_cache; 718 struct intel_fbc_state_cache *cache = &fbc->state_cache;
717 struct drm_framebuffer *fb = plane_state->base.fb; 719 struct drm_framebuffer *fb = plane_state->base.fb;
@@ -744,7 +746,7 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
744 746
745static bool intel_fbc_can_activate(struct intel_crtc *crtc) 747static bool intel_fbc_can_activate(struct intel_crtc *crtc)
746{ 748{
747 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 749 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
748 struct intel_fbc *fbc = &dev_priv->fbc; 750 struct intel_fbc *fbc = &dev_priv->fbc;
749 struct intel_fbc_state_cache *cache = &fbc->state_cache; 751 struct intel_fbc_state_cache *cache = &fbc->state_cache;
750 752
@@ -816,22 +818,16 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
816 818
817static bool intel_fbc_can_choose(struct intel_crtc *crtc) 819static bool intel_fbc_can_choose(struct intel_crtc *crtc)
818{ 820{
819 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 821 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
820 struct intel_fbc *fbc = &dev_priv->fbc; 822 struct intel_fbc *fbc = &dev_priv->fbc;
821 bool enable_by_default = IS_BROADWELL(dev_priv);
822 823
823 if (intel_vgpu_active(dev_priv)) { 824 if (intel_vgpu_active(dev_priv)) {
824 fbc->no_fbc_reason = "VGPU is active"; 825 fbc->no_fbc_reason = "VGPU is active";
825 return false; 826 return false;
826 } 827 }
827 828
828 if (i915.enable_fbc < 0 && !enable_by_default) {
829 fbc->no_fbc_reason = "disabled per chip default";
830 return false;
831 }
832
833 if (!i915.enable_fbc) { 829 if (!i915.enable_fbc) {
834 fbc->no_fbc_reason = "disabled per module param"; 830 fbc->no_fbc_reason = "disabled per module param or by default";
835 return false; 831 return false;
836 } 832 }
837 833
@@ -851,7 +847,7 @@ static bool intel_fbc_can_choose(struct intel_crtc *crtc)
851static void intel_fbc_get_reg_params(struct intel_crtc *crtc, 847static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
852 struct intel_fbc_reg_params *params) 848 struct intel_fbc_reg_params *params)
853{ 849{
854 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 850 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
855 struct intel_fbc *fbc = &dev_priv->fbc; 851 struct intel_fbc *fbc = &dev_priv->fbc;
856 struct intel_fbc_state_cache *cache = &fbc->state_cache; 852 struct intel_fbc_state_cache *cache = &fbc->state_cache;
857 853
@@ -884,7 +880,7 @@ void intel_fbc_pre_update(struct intel_crtc *crtc,
884 struct intel_crtc_state *crtc_state, 880 struct intel_crtc_state *crtc_state,
885 struct intel_plane_state *plane_state) 881 struct intel_plane_state *plane_state)
886{ 882{
887 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 883 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
888 struct intel_fbc *fbc = &dev_priv->fbc; 884 struct intel_fbc *fbc = &dev_priv->fbc;
889 885
890 if (!fbc_supported(dev_priv)) 886 if (!fbc_supported(dev_priv))
@@ -910,7 +906,7 @@ unlock:
910 906
911static void __intel_fbc_post_update(struct intel_crtc *crtc) 907static void __intel_fbc_post_update(struct intel_crtc *crtc)
912{ 908{
913 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 909 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
914 struct intel_fbc *fbc = &dev_priv->fbc; 910 struct intel_fbc *fbc = &dev_priv->fbc;
915 struct intel_fbc_reg_params old_params; 911 struct intel_fbc_reg_params old_params;
916 912
@@ -943,7 +939,7 @@ static void __intel_fbc_post_update(struct intel_crtc *crtc)
943 939
944void intel_fbc_post_update(struct intel_crtc *crtc) 940void intel_fbc_post_update(struct intel_crtc *crtc)
945{ 941{
946 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 942 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
947 struct intel_fbc *fbc = &dev_priv->fbc; 943 struct intel_fbc *fbc = &dev_priv->fbc;
948 944
949 if (!fbc_supported(dev_priv)) 945 if (!fbc_supported(dev_priv))
@@ -992,13 +988,13 @@ void intel_fbc_flush(struct drm_i915_private *dev_priv,
992 if (!fbc_supported(dev_priv)) 988 if (!fbc_supported(dev_priv))
993 return; 989 return;
994 990
995 if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
996 return;
997
998 mutex_lock(&fbc->lock); 991 mutex_lock(&fbc->lock);
999 992
1000 fbc->busy_bits &= ~frontbuffer_bits; 993 fbc->busy_bits &= ~frontbuffer_bits;
1001 994
995 if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
996 goto out;
997
1002 if (!fbc->busy_bits && fbc->enabled && 998 if (!fbc->busy_bits && fbc->enabled &&
1003 (frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) { 999 (frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) {
1004 if (fbc->active) 1000 if (fbc->active)
@@ -1007,6 +1003,7 @@ void intel_fbc_flush(struct drm_i915_private *dev_priv,
1007 __intel_fbc_post_update(fbc->crtc); 1003 __intel_fbc_post_update(fbc->crtc);
1008 } 1004 }
1009 1005
1006out:
1010 mutex_unlock(&fbc->lock); 1007 mutex_unlock(&fbc->lock);
1011} 1008}
1012 1009
@@ -1088,7 +1085,7 @@ void intel_fbc_enable(struct intel_crtc *crtc,
1088 struct intel_crtc_state *crtc_state, 1085 struct intel_crtc_state *crtc_state,
1089 struct intel_plane_state *plane_state) 1086 struct intel_plane_state *plane_state)
1090{ 1087{
1091 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 1088 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1092 struct intel_fbc *fbc = &dev_priv->fbc; 1089 struct intel_fbc *fbc = &dev_priv->fbc;
1093 1090
1094 if (!fbc_supported(dev_priv)) 1091 if (!fbc_supported(dev_priv))
@@ -1159,7 +1156,7 @@ static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
1159 */ 1156 */
1160void intel_fbc_disable(struct intel_crtc *crtc) 1157void intel_fbc_disable(struct intel_crtc *crtc)
1161{ 1158{
1162 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 1159 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1163 struct intel_fbc *fbc = &dev_priv->fbc; 1160 struct intel_fbc *fbc = &dev_priv->fbc;
1164 1161
1165 if (!fbc_supported(dev_priv)) 1162 if (!fbc_supported(dev_priv))
@@ -1213,12 +1210,32 @@ void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv)
1213 if (!no_fbc_on_multiple_pipes(dev_priv)) 1210 if (!no_fbc_on_multiple_pipes(dev_priv))
1214 return; 1211 return;
1215 1212
1216 for_each_intel_crtc(dev_priv->dev, crtc) 1213 for_each_intel_crtc(&dev_priv->drm, crtc)
1217 if (intel_crtc_active(&crtc->base) && 1214 if (intel_crtc_active(&crtc->base) &&
1218 to_intel_plane_state(crtc->base.primary->state)->visible) 1215 to_intel_plane_state(crtc->base.primary->state)->visible)
1219 dev_priv->fbc.visible_pipes_mask |= (1 << crtc->pipe); 1216 dev_priv->fbc.visible_pipes_mask |= (1 << crtc->pipe);
1220} 1217}
1221 1218
1219/*
1220 * The DDX driver changes its behavior depending on the value it reads from
1221 * i915.enable_fbc, so sanitize it by translating the default value into either
1222 * 0 or 1 in order to allow it to know what's going on.
1223 *
1224 * Notice that this is done at driver initialization and we still allow user
1225 * space to change the value during runtime without sanitizing it again. IGT
1226 * relies on being able to change i915.enable_fbc at runtime.
1227 */
1228static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
1229{
1230 if (i915.enable_fbc >= 0)
1231 return !!i915.enable_fbc;
1232
1233 if (IS_BROADWELL(dev_priv))
1234 return 1;
1235
1236 return 0;
1237}
1238
1222/** 1239/**
1223 * intel_fbc_init - Initialize FBC 1240 * intel_fbc_init - Initialize FBC
1224 * @dev_priv: the i915 device 1241 * @dev_priv: the i915 device
@@ -1236,6 +1253,9 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
1236 fbc->active = false; 1253 fbc->active = false;
1237 fbc->work.scheduled = false; 1254 fbc->work.scheduled = false;
1238 1255
1256 i915.enable_fbc = intel_sanitize_fbc_option(dev_priv);
1257 DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n", i915.enable_fbc);
1258
1239 if (!HAS_FBC(dev_priv)) { 1259 if (!HAS_FBC(dev_priv)) {
1240 fbc->no_fbc_reason = "unsupported by this chipset"; 1260 fbc->no_fbc_reason = "unsupported by this chipset";
1241 return; 1261 return;
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 4babefc51eb2..86b00c6db1a6 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -362,23 +362,24 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
362 bool *enabled, int width, int height) 362 bool *enabled, int width, int height)
363{ 363{
364 struct drm_device *dev = fb_helper->dev; 364 struct drm_device *dev = fb_helper->dev;
365 unsigned long conn_configured, mask;
366 unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
365 int i, j; 367 int i, j;
366 bool *save_enabled; 368 bool *save_enabled;
367 bool fallback = true; 369 bool fallback = true;
368 int num_connectors_enabled = 0; 370 int num_connectors_enabled = 0;
369 int num_connectors_detected = 0; 371 int num_connectors_detected = 0;
370 uint64_t conn_configured = 0, mask;
371 int pass = 0; 372 int pass = 0;
372 373
373 save_enabled = kcalloc(fb_helper->connector_count, sizeof(bool), 374 save_enabled = kcalloc(count, sizeof(bool), GFP_KERNEL);
374 GFP_KERNEL);
375 if (!save_enabled) 375 if (!save_enabled)
376 return false; 376 return false;
377 377
378 memcpy(save_enabled, enabled, fb_helper->connector_count); 378 memcpy(save_enabled, enabled, count);
379 mask = (1 << fb_helper->connector_count) - 1; 379 mask = BIT(count) - 1;
380 conn_configured = 0;
380retry: 381retry:
381 for (i = 0; i < fb_helper->connector_count; i++) { 382 for (i = 0; i < count; i++) {
382 struct drm_fb_helper_connector *fb_conn; 383 struct drm_fb_helper_connector *fb_conn;
383 struct drm_connector *connector; 384 struct drm_connector *connector;
384 struct drm_encoder *encoder; 385 struct drm_encoder *encoder;
@@ -388,7 +389,7 @@ retry:
388 fb_conn = fb_helper->connector_info[i]; 389 fb_conn = fb_helper->connector_info[i];
389 connector = fb_conn->connector; 390 connector = fb_conn->connector;
390 391
391 if (conn_configured & (1 << i)) 392 if (conn_configured & BIT(i))
392 continue; 393 continue;
393 394
394 if (pass == 0 && !connector->has_tile) 395 if (pass == 0 && !connector->has_tile)
@@ -400,7 +401,7 @@ retry:
400 if (!enabled[i]) { 401 if (!enabled[i]) {
401 DRM_DEBUG_KMS("connector %s not enabled, skipping\n", 402 DRM_DEBUG_KMS("connector %s not enabled, skipping\n",
402 connector->name); 403 connector->name);
403 conn_configured |= (1 << i); 404 conn_configured |= BIT(i);
404 continue; 405 continue;
405 } 406 }
406 407
@@ -419,7 +420,7 @@ retry:
419 DRM_DEBUG_KMS("connector %s has no encoder or crtc, skipping\n", 420 DRM_DEBUG_KMS("connector %s has no encoder or crtc, skipping\n",
420 connector->name); 421 connector->name);
421 enabled[i] = false; 422 enabled[i] = false;
422 conn_configured |= (1 << i); 423 conn_configured |= BIT(i);
423 continue; 424 continue;
424 } 425 }
425 426
@@ -432,14 +433,15 @@ retry:
432 intel_crtc->lut_b[j] = j; 433 intel_crtc->lut_b[j] = j;
433 } 434 }
434 435
435 new_crtc = intel_fb_helper_crtc(fb_helper, connector->state->crtc); 436 new_crtc = intel_fb_helper_crtc(fb_helper,
437 connector->state->crtc);
436 438
437 /* 439 /*
438 * Make sure we're not trying to drive multiple connectors 440 * Make sure we're not trying to drive multiple connectors
439 * with a single CRTC, since our cloning support may not 441 * with a single CRTC, since our cloning support may not
440 * match the BIOS. 442 * match the BIOS.
441 */ 443 */
442 for (j = 0; j < fb_helper->connector_count; j++) { 444 for (j = 0; j < count; j++) {
443 if (crtcs[j] == new_crtc) { 445 if (crtcs[j] == new_crtc) {
444 DRM_DEBUG_KMS("fallback: cloned configuration\n"); 446 DRM_DEBUG_KMS("fallback: cloned configuration\n");
445 goto bail; 447 goto bail;
@@ -498,7 +500,7 @@ retry:
498 modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" :""); 500 modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" :"");
499 501
500 fallback = false; 502 fallback = false;
501 conn_configured |= (1 << i); 503 conn_configured |= BIT(i);
502 } 504 }
503 505
504 if ((conn_configured & mask) != mask) { 506 if ((conn_configured & mask) != mask) {
@@ -522,7 +524,7 @@ retry:
522 if (fallback) { 524 if (fallback) {
523bail: 525bail:
524 DRM_DEBUG_KMS("Not using firmware configuration\n"); 526 DRM_DEBUG_KMS("Not using firmware configuration\n");
525 memcpy(enabled, save_enabled, fb_helper->connector_count); 527 memcpy(enabled, save_enabled, count);
526 kfree(save_enabled); 528 kfree(save_enabled);
527 return false; 529 return false;
528 } 530 }
@@ -538,8 +540,7 @@ static const struct drm_fb_helper_funcs intel_fb_helper_funcs = {
538 .fb_probe = intelfb_create, 540 .fb_probe = intelfb_create,
539}; 541};
540 542
541static void intel_fbdev_destroy(struct drm_device *dev, 543static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
542 struct intel_fbdev *ifbdev)
543{ 544{
544 /* We rely on the object-free to release the VMA pinning for 545 /* We rely on the object-free to release the VMA pinning for
545 * the info->screen_base mmaping. Leaking the VMA is simpler than 546 * the info->screen_base mmaping. Leaking the VMA is simpler than
@@ -552,12 +553,14 @@ static void intel_fbdev_destroy(struct drm_device *dev,
552 drm_fb_helper_fini(&ifbdev->helper); 553 drm_fb_helper_fini(&ifbdev->helper);
553 554
554 if (ifbdev->fb) { 555 if (ifbdev->fb) {
555 mutex_lock(&dev->struct_mutex); 556 mutex_lock(&ifbdev->helper.dev->struct_mutex);
556 intel_unpin_fb_obj(&ifbdev->fb->base, BIT(DRM_ROTATE_0)); 557 intel_unpin_fb_obj(&ifbdev->fb->base, BIT(DRM_ROTATE_0));
557 mutex_unlock(&dev->struct_mutex); 558 mutex_unlock(&ifbdev->helper.dev->struct_mutex);
558 559
559 drm_framebuffer_remove(&ifbdev->fb->base); 560 drm_framebuffer_remove(&ifbdev->fb->base);
560 } 561 }
562
563 kfree(ifbdev);
561} 564}
562 565
563/* 566/*
@@ -690,9 +693,9 @@ out:
690 693
691static void intel_fbdev_suspend_worker(struct work_struct *work) 694static void intel_fbdev_suspend_worker(struct work_struct *work)
692{ 695{
693 intel_fbdev_set_suspend(container_of(work, 696 intel_fbdev_set_suspend(&container_of(work,
694 struct drm_i915_private, 697 struct drm_i915_private,
695 fbdev_suspend_work)->dev, 698 fbdev_suspend_work)->drm,
696 FBINFO_STATE_RUNNING, 699 FBINFO_STATE_RUNNING,
697 true); 700 true);
698} 701}
@@ -700,7 +703,7 @@ static void intel_fbdev_suspend_worker(struct work_struct *work)
700int intel_fbdev_init(struct drm_device *dev) 703int intel_fbdev_init(struct drm_device *dev)
701{ 704{
702 struct intel_fbdev *ifbdev; 705 struct intel_fbdev *ifbdev;
703 struct drm_i915_private *dev_priv = dev->dev_private; 706 struct drm_i915_private *dev_priv = to_i915(dev);
704 int ret; 707 int ret;
705 708
706 if (WARN_ON(INTEL_INFO(dev)->num_pipes == 0)) 709 if (WARN_ON(INTEL_INFO(dev)->num_pipes == 0))
@@ -732,38 +735,50 @@ int intel_fbdev_init(struct drm_device *dev)
732 735
733static void intel_fbdev_initial_config(void *data, async_cookie_t cookie) 736static void intel_fbdev_initial_config(void *data, async_cookie_t cookie)
734{ 737{
735 struct drm_i915_private *dev_priv = data; 738 struct intel_fbdev *ifbdev = data;
736 struct intel_fbdev *ifbdev = dev_priv->fbdev;
737 739
738 /* Due to peculiar init order wrt to hpd handling this is separate. */ 740 /* Due to peculiar init order wrt to hpd handling this is separate. */
739 if (drm_fb_helper_initial_config(&ifbdev->helper, 741 if (drm_fb_helper_initial_config(&ifbdev->helper,
740 ifbdev->preferred_bpp)) 742 ifbdev->preferred_bpp))
741 intel_fbdev_fini(dev_priv->dev); 743 intel_fbdev_fini(ifbdev->helper.dev);
742} 744}
743 745
744void intel_fbdev_initial_config_async(struct drm_device *dev) 746void intel_fbdev_initial_config_async(struct drm_device *dev)
745{ 747{
746 async_schedule(intel_fbdev_initial_config, to_i915(dev)); 748 struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
749
750 ifbdev->cookie = async_schedule(intel_fbdev_initial_config, ifbdev);
751}
752
753static void intel_fbdev_sync(struct intel_fbdev *ifbdev)
754{
755 if (!ifbdev->cookie)
756 return;
757
758 /* Only serialises with all preceding async calls, hence +1 */
759 async_synchronize_cookie(ifbdev->cookie + 1);
760 ifbdev->cookie = 0;
747} 761}
748 762
749void intel_fbdev_fini(struct drm_device *dev) 763void intel_fbdev_fini(struct drm_device *dev)
750{ 764{
751 struct drm_i915_private *dev_priv = dev->dev_private; 765 struct drm_i915_private *dev_priv = to_i915(dev);
752 if (!dev_priv->fbdev) 766 struct intel_fbdev *ifbdev = dev_priv->fbdev;
767
768 if (!ifbdev)
753 return; 769 return;
754 770
755 flush_work(&dev_priv->fbdev_suspend_work); 771 flush_work(&dev_priv->fbdev_suspend_work);
756
757 if (!current_is_async()) 772 if (!current_is_async())
758 async_synchronize_full(); 773 intel_fbdev_sync(ifbdev);
759 intel_fbdev_destroy(dev, dev_priv->fbdev); 774
760 kfree(dev_priv->fbdev); 775 intel_fbdev_destroy(ifbdev);
761 dev_priv->fbdev = NULL; 776 dev_priv->fbdev = NULL;
762} 777}
763 778
764void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous) 779void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
765{ 780{
766 struct drm_i915_private *dev_priv = dev->dev_private; 781 struct drm_i915_private *dev_priv = to_i915(dev);
767 struct intel_fbdev *ifbdev = dev_priv->fbdev; 782 struct intel_fbdev *ifbdev = dev_priv->fbdev;
768 struct fb_info *info; 783 struct fb_info *info;
769 784
@@ -812,7 +827,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
812 827
813void intel_fbdev_output_poll_changed(struct drm_device *dev) 828void intel_fbdev_output_poll_changed(struct drm_device *dev)
814{ 829{
815 struct drm_i915_private *dev_priv = dev->dev_private; 830 struct drm_i915_private *dev_priv = to_i915(dev);
816 if (dev_priv->fbdev) 831 if (dev_priv->fbdev)
817 drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper); 832 drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper);
818} 833}
@@ -820,13 +835,15 @@ void intel_fbdev_output_poll_changed(struct drm_device *dev)
820void intel_fbdev_restore_mode(struct drm_device *dev) 835void intel_fbdev_restore_mode(struct drm_device *dev)
821{ 836{
822 int ret; 837 int ret;
823 struct drm_i915_private *dev_priv = dev->dev_private; 838 struct drm_i915_private *dev_priv = to_i915(dev);
824 struct intel_fbdev *ifbdev = dev_priv->fbdev; 839 struct intel_fbdev *ifbdev = dev_priv->fbdev;
825 struct drm_fb_helper *fb_helper; 840 struct drm_fb_helper *fb_helper;
826 841
827 if (!ifbdev) 842 if (!ifbdev)
828 return; 843 return;
829 844
845 intel_fbdev_sync(ifbdev);
846
830 fb_helper = &ifbdev->helper; 847 fb_helper = &ifbdev->helper;
831 848
832 ret = drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper); 849 ret = drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c
index 9be839a242f9..2aa744081f09 100644
--- a/drivers/gpu/drm/i915/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c
@@ -50,7 +50,7 @@
50 50
51static bool ivb_can_enable_err_int(struct drm_device *dev) 51static bool ivb_can_enable_err_int(struct drm_device *dev)
52{ 52{
53 struct drm_i915_private *dev_priv = dev->dev_private; 53 struct drm_i915_private *dev_priv = to_i915(dev);
54 struct intel_crtc *crtc; 54 struct intel_crtc *crtc;
55 enum pipe pipe; 55 enum pipe pipe;
56 56
@@ -68,7 +68,7 @@ static bool ivb_can_enable_err_int(struct drm_device *dev)
68 68
69static bool cpt_can_enable_serr_int(struct drm_device *dev) 69static bool cpt_can_enable_serr_int(struct drm_device *dev)
70{ 70{
71 struct drm_i915_private *dev_priv = dev->dev_private; 71 struct drm_i915_private *dev_priv = to_i915(dev);
72 enum pipe pipe; 72 enum pipe pipe;
73 struct intel_crtc *crtc; 73 struct intel_crtc *crtc;
74 74
@@ -105,7 +105,7 @@ static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
105 enum pipe pipe, 105 enum pipe pipe,
106 bool enable, bool old) 106 bool enable, bool old)
107{ 107{
108 struct drm_i915_private *dev_priv = dev->dev_private; 108 struct drm_i915_private *dev_priv = to_i915(dev);
109 i915_reg_t reg = PIPESTAT(pipe); 109 i915_reg_t reg = PIPESTAT(pipe);
110 u32 pipestat = I915_READ(reg) & 0xffff0000; 110 u32 pipestat = I915_READ(reg) & 0xffff0000;
111 111
@@ -123,7 +123,7 @@ static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
123static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev, 123static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
124 enum pipe pipe, bool enable) 124 enum pipe pipe, bool enable)
125{ 125{
126 struct drm_i915_private *dev_priv = dev->dev_private; 126 struct drm_i915_private *dev_priv = to_i915(dev);
127 uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN : 127 uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
128 DE_PIPEB_FIFO_UNDERRUN; 128 DE_PIPEB_FIFO_UNDERRUN;
129 129
@@ -154,7 +154,7 @@ static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
154 enum pipe pipe, 154 enum pipe pipe,
155 bool enable, bool old) 155 bool enable, bool old)
156{ 156{
157 struct drm_i915_private *dev_priv = dev->dev_private; 157 struct drm_i915_private *dev_priv = to_i915(dev);
158 if (enable) { 158 if (enable) {
159 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe)); 159 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
160 160
@@ -176,7 +176,7 @@ static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
176static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev, 176static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
177 enum pipe pipe, bool enable) 177 enum pipe pipe, bool enable)
178{ 178{
179 struct drm_i915_private *dev_priv = dev->dev_private; 179 struct drm_i915_private *dev_priv = to_i915(dev);
180 180
181 if (enable) 181 if (enable)
182 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN); 182 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN);
@@ -188,7 +188,7 @@ static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
188 enum transcoder pch_transcoder, 188 enum transcoder pch_transcoder,
189 bool enable) 189 bool enable)
190{ 190{
191 struct drm_i915_private *dev_priv = dev->dev_private; 191 struct drm_i915_private *dev_priv = to_i915(dev);
192 uint32_t bit = (pch_transcoder == TRANSCODER_A) ? 192 uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
193 SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER; 193 SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
194 194
@@ -220,7 +220,7 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
220 enum transcoder pch_transcoder, 220 enum transcoder pch_transcoder,
221 bool enable, bool old) 221 bool enable, bool old)
222{ 222{
223 struct drm_i915_private *dev_priv = dev->dev_private; 223 struct drm_i915_private *dev_priv = to_i915(dev);
224 224
225 if (enable) { 225 if (enable) {
226 I915_WRITE(SERR_INT, 226 I915_WRITE(SERR_INT,
@@ -244,7 +244,7 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
244static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, 244static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
245 enum pipe pipe, bool enable) 245 enum pipe pipe, bool enable)
246{ 246{
247 struct drm_i915_private *dev_priv = dev->dev_private; 247 struct drm_i915_private *dev_priv = to_i915(dev);
248 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 248 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
249 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 249 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
250 bool old; 250 bool old;
@@ -289,7 +289,7 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
289 bool ret; 289 bool ret;
290 290
291 spin_lock_irqsave(&dev_priv->irq_lock, flags); 291 spin_lock_irqsave(&dev_priv->irq_lock, flags);
292 ret = __intel_set_cpu_fifo_underrun_reporting(dev_priv->dev, pipe, 292 ret = __intel_set_cpu_fifo_underrun_reporting(&dev_priv->drm, pipe,
293 enable); 293 enable);
294 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 294 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
295 295
@@ -334,10 +334,12 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
334 intel_crtc->pch_fifo_underrun_disabled = !enable; 334 intel_crtc->pch_fifo_underrun_disabled = !enable;
335 335
336 if (HAS_PCH_IBX(dev_priv)) 336 if (HAS_PCH_IBX(dev_priv))
337 ibx_set_fifo_underrun_reporting(dev_priv->dev, pch_transcoder, 337 ibx_set_fifo_underrun_reporting(&dev_priv->drm,
338 pch_transcoder,
338 enable); 339 enable);
339 else 340 else
340 cpt_set_fifo_underrun_reporting(dev_priv->dev, pch_transcoder, 341 cpt_set_fifo_underrun_reporting(&dev_priv->drm,
342 pch_transcoder,
341 enable, old); 343 enable, old);
342 344
343 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 345 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
@@ -405,7 +407,7 @@ void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv)
405 407
406 spin_lock_irq(&dev_priv->irq_lock); 408 spin_lock_irq(&dev_priv->irq_lock);
407 409
408 for_each_intel_crtc(dev_priv->dev, crtc) { 410 for_each_intel_crtc(&dev_priv->drm, crtc) {
409 if (crtc->cpu_fifo_underrun_disabled) 411 if (crtc->cpu_fifo_underrun_disabled)
410 continue; 412 continue;
411 413
@@ -432,7 +434,7 @@ void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv)
432 434
433 spin_lock_irq(&dev_priv->irq_lock); 435 spin_lock_irq(&dev_priv->irq_lock);
434 436
435 for_each_intel_crtc(dev_priv->dev, crtc) { 437 for_each_intel_crtc(&dev_priv->drm, crtc) {
436 if (crtc->pch_fifo_underrun_disabled) 438 if (crtc->pch_fifo_underrun_disabled)
437 continue; 439 continue;
438 440
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index 4df80cc9a291..3e3e743740c0 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -26,6 +26,7 @@
26 26
27#include "intel_guc_fwif.h" 27#include "intel_guc_fwif.h"
28#include "i915_guc_reg.h" 28#include "i915_guc_reg.h"
29#include "intel_ringbuffer.h"
29 30
30struct drm_i915_gem_request; 31struct drm_i915_gem_request;
31 32
@@ -86,7 +87,7 @@ struct i915_guc_client {
86 int retcode; 87 int retcode;
87 88
88 /* Per-engine counts of GuC submissions */ 89 /* Per-engine counts of GuC submissions */
89 uint64_t submissions[GUC_MAX_ENGINES_NUM]; 90 uint64_t submissions[I915_NUM_ENGINES];
90}; 91};
91 92
92enum intel_guc_fw_status { 93enum intel_guc_fw_status {
@@ -143,8 +144,8 @@ struct intel_guc {
143 uint32_t action_fail; /* Total number of failures */ 144 uint32_t action_fail; /* Total number of failures */
144 int32_t action_err; /* Last error code */ 145 int32_t action_err; /* Last error code */
145 146
146 uint64_t submissions[GUC_MAX_ENGINES_NUM]; 147 uint64_t submissions[I915_NUM_ENGINES];
147 uint32_t last_seqno[GUC_MAX_ENGINES_NUM]; 148 uint32_t last_seqno[I915_NUM_ENGINES];
148}; 149};
149 150
150/* intel_guc_loader.c */ 151/* intel_guc_loader.c */
diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c
index 8fe96a2d989e..605c69658d2c 100644
--- a/drivers/gpu/drm/i915/intel_guc_loader.c
+++ b/drivers/gpu/drm/i915/intel_guc_loader.c
@@ -65,6 +65,9 @@ MODULE_FIRMWARE(I915_SKL_GUC_UCODE);
65#define I915_BXT_GUC_UCODE "i915/bxt_guc_ver8_7.bin" 65#define I915_BXT_GUC_UCODE "i915/bxt_guc_ver8_7.bin"
66MODULE_FIRMWARE(I915_BXT_GUC_UCODE); 66MODULE_FIRMWARE(I915_BXT_GUC_UCODE);
67 67
68#define I915_KBL_GUC_UCODE "i915/kbl_guc_ver9_14.bin"
69MODULE_FIRMWARE(I915_KBL_GUC_UCODE);
70
68/* User-friendly representation of an enum */ 71/* User-friendly representation of an enum */
69const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status) 72const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status)
70{ 73{
@@ -87,7 +90,7 @@ static void direct_interrupts_to_host(struct drm_i915_private *dev_priv)
87 struct intel_engine_cs *engine; 90 struct intel_engine_cs *engine;
88 int irqs; 91 int irqs;
89 92
90 /* tell all command streamers NOT to forward interrupts and vblank to GuC */ 93 /* tell all command streamers NOT to forward interrupts or vblank to GuC */
91 irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER); 94 irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
92 irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING); 95 irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
93 for_each_engine(engine, dev_priv) 96 for_each_engine(engine, dev_priv)
@@ -105,9 +108,8 @@ static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv)
105 int irqs; 108 int irqs;
106 u32 tmp; 109 u32 tmp;
107 110
108 /* tell all command streamers to forward interrupts and vblank to GuC */ 111 /* tell all command streamers to forward interrupts (but not vblank) to GuC */
109 irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_ALWAYS); 112 irqs = _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
110 irqs |= _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
111 for_each_engine(engine, dev_priv) 113 for_each_engine(engine, dev_priv)
112 I915_WRITE(RING_MODE_GEN7(engine), irqs); 114 I915_WRITE(RING_MODE_GEN7(engine), irqs);
113 115
@@ -312,7 +314,7 @@ static u32 guc_wopcm_size(struct drm_i915_private *dev_priv)
312static int guc_ucode_xfer(struct drm_i915_private *dev_priv) 314static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
313{ 315{
314 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; 316 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
315 struct drm_device *dev = dev_priv->dev; 317 struct drm_device *dev = &dev_priv->drm;
316 int ret; 318 int ret;
317 319
318 ret = i915_gem_object_set_to_gtt_domain(guc_fw->guc_fw_obj, false); 320 ret = i915_gem_object_set_to_gtt_domain(guc_fw->guc_fw_obj, false);
@@ -411,7 +413,7 @@ static int i915_reset_guc(struct drm_i915_private *dev_priv)
411 */ 413 */
412int intel_guc_setup(struct drm_device *dev) 414int intel_guc_setup(struct drm_device *dev)
413{ 415{
414 struct drm_i915_private *dev_priv = dev->dev_private; 416 struct drm_i915_private *dev_priv = to_i915(dev);
415 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; 417 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
416 const char *fw_path = guc_fw->guc_fw_path; 418 const char *fw_path = guc_fw->guc_fw_path;
417 int retries, ret, err; 419 int retries, ret, err;
@@ -606,7 +608,7 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
606 608
607 /* Header and uCode will be loaded to WOPCM. Size of the two. */ 609 /* Header and uCode will be loaded to WOPCM. Size of the two. */
608 size = guc_fw->header_size + guc_fw->ucode_size; 610 size = guc_fw->header_size + guc_fw->ucode_size;
609 if (size > guc_wopcm_size(dev->dev_private)) { 611 if (size > guc_wopcm_size(to_i915(dev))) {
610 DRM_ERROR("Firmware is too large to fit in WOPCM\n"); 612 DRM_ERROR("Firmware is too large to fit in WOPCM\n");
611 goto fail; 613 goto fail;
612 } 614 }
@@ -679,7 +681,7 @@ fail:
679 */ 681 */
680void intel_guc_init(struct drm_device *dev) 682void intel_guc_init(struct drm_device *dev)
681{ 683{
682 struct drm_i915_private *dev_priv = dev->dev_private; 684 struct drm_i915_private *dev_priv = to_i915(dev);
683 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; 685 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
684 const char *fw_path; 686 const char *fw_path;
685 687
@@ -699,6 +701,10 @@ void intel_guc_init(struct drm_device *dev)
699 fw_path = I915_BXT_GUC_UCODE; 701 fw_path = I915_BXT_GUC_UCODE;
700 guc_fw->guc_fw_major_wanted = 8; 702 guc_fw->guc_fw_major_wanted = 8;
701 guc_fw->guc_fw_minor_wanted = 7; 703 guc_fw->guc_fw_minor_wanted = 7;
704 } else if (IS_KABYLAKE(dev)) {
705 fw_path = I915_KBL_GUC_UCODE;
706 guc_fw->guc_fw_major_wanted = 9;
707 guc_fw->guc_fw_minor_wanted = 14;
702 } else { 708 } else {
703 fw_path = ""; /* unknown device */ 709 fw_path = ""; /* unknown device */
704 } 710 }
@@ -728,7 +734,7 @@ void intel_guc_init(struct drm_device *dev)
728 */ 734 */
729void intel_guc_fini(struct drm_device *dev) 735void intel_guc_fini(struct drm_device *dev)
730{ 736{
731 struct drm_i915_private *dev_priv = dev->dev_private; 737 struct drm_i915_private *dev_priv = to_i915(dev);
732 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; 738 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
733 739
734 mutex_lock(&dev->struct_mutex); 740 mutex_lock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index 9fa458ce40a6..434f4d5c553d 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -63,7 +63,7 @@ int intel_gvt_init(struct drm_i915_private *dev_priv)
63 63
64 if (!is_supported_device(dev_priv)) { 64 if (!is_supported_device(dev_priv)) {
65 DRM_DEBUG_DRIVER("Unsupported device. GVT-g is disabled\n"); 65 DRM_DEBUG_DRIVER("Unsupported device. GVT-g is disabled\n");
66 return 0; 66 goto bail;
67 } 67 }
68 68
69 /* 69 /*
@@ -72,16 +72,20 @@ int intel_gvt_init(struct drm_i915_private *dev_priv)
72 ret = intel_gvt_init_host(); 72 ret = intel_gvt_init_host();
73 if (ret) { 73 if (ret) {
74 DRM_DEBUG_DRIVER("Not in host or MPT modules not found\n"); 74 DRM_DEBUG_DRIVER("Not in host or MPT modules not found\n");
75 return 0; 75 goto bail;
76 } 76 }
77 77
78 ret = intel_gvt_init_device(dev_priv); 78 ret = intel_gvt_init_device(dev_priv);
79 if (ret) { 79 if (ret) {
80 DRM_DEBUG_DRIVER("Fail to init GVT device\n"); 80 DRM_DEBUG_DRIVER("Fail to init GVT device\n");
81 return 0; 81 goto bail;
82 } 82 }
83 83
84 return 0; 84 return 0;
85
86bail:
87 i915.enable_gvt = 0;
88 return 0;
85} 89}
86 90
87/** 91/**
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index fb21626ada64..4df9f384910c 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -47,7 +47,7 @@ static void
47assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi) 47assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
48{ 48{
49 struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi); 49 struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi);
50 struct drm_i915_private *dev_priv = dev->dev_private; 50 struct drm_i915_private *dev_priv = to_i915(dev);
51 uint32_t enabled_bits; 51 uint32_t enabled_bits;
52 52
53 enabled_bits = HAS_DDI(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE; 53 enabled_bits = HAS_DDI(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
@@ -138,7 +138,7 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
138{ 138{
139 const uint32_t *data = frame; 139 const uint32_t *data = frame;
140 struct drm_device *dev = encoder->dev; 140 struct drm_device *dev = encoder->dev;
141 struct drm_i915_private *dev_priv = dev->dev_private; 141 struct drm_i915_private *dev_priv = to_i915(dev);
142 u32 val = I915_READ(VIDEO_DIP_CTL); 142 u32 val = I915_READ(VIDEO_DIP_CTL);
143 int i; 143 int i;
144 144
@@ -192,7 +192,7 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
192{ 192{
193 const uint32_t *data = frame; 193 const uint32_t *data = frame;
194 struct drm_device *dev = encoder->dev; 194 struct drm_device *dev = encoder->dev;
195 struct drm_i915_private *dev_priv = dev->dev_private; 195 struct drm_i915_private *dev_priv = to_i915(dev);
196 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 196 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
197 i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); 197 i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
198 u32 val = I915_READ(reg); 198 u32 val = I915_READ(reg);
@@ -251,7 +251,7 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
251{ 251{
252 const uint32_t *data = frame; 252 const uint32_t *data = frame;
253 struct drm_device *dev = encoder->dev; 253 struct drm_device *dev = encoder->dev;
254 struct drm_i915_private *dev_priv = dev->dev_private; 254 struct drm_i915_private *dev_priv = to_i915(dev);
255 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 255 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
256 i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); 256 i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
257 u32 val = I915_READ(reg); 257 u32 val = I915_READ(reg);
@@ -308,7 +308,7 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
308{ 308{
309 const uint32_t *data = frame; 309 const uint32_t *data = frame;
310 struct drm_device *dev = encoder->dev; 310 struct drm_device *dev = encoder->dev;
311 struct drm_i915_private *dev_priv = dev->dev_private; 311 struct drm_i915_private *dev_priv = to_i915(dev);
312 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 312 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
313 i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); 313 i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
314 u32 val = I915_READ(reg); 314 u32 val = I915_READ(reg);
@@ -366,7 +366,7 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
366{ 366{
367 const uint32_t *data = frame; 367 const uint32_t *data = frame;
368 struct drm_device *dev = encoder->dev; 368 struct drm_device *dev = encoder->dev;
369 struct drm_i915_private *dev_priv = dev->dev_private; 369 struct drm_i915_private *dev_priv = to_i915(dev);
370 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 370 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
371 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 371 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
372 i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder); 372 i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
@@ -508,7 +508,7 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
508 bool enable, 508 bool enable,
509 const struct drm_display_mode *adjusted_mode) 509 const struct drm_display_mode *adjusted_mode)
510{ 510{
511 struct drm_i915_private *dev_priv = encoder->dev->dev_private; 511 struct drm_i915_private *dev_priv = to_i915(encoder->dev);
512 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 512 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
513 struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; 513 struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
514 i915_reg_t reg = VIDEO_DIP_CTL; 514 i915_reg_t reg = VIDEO_DIP_CTL;
@@ -629,7 +629,7 @@ static bool gcp_default_phase_possible(int pipe_bpp,
629 629
630static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder) 630static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder)
631{ 631{
632 struct drm_i915_private *dev_priv = encoder->dev->dev_private; 632 struct drm_i915_private *dev_priv = to_i915(encoder->dev);
633 struct intel_crtc *crtc = to_intel_crtc(encoder->crtc); 633 struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
634 i915_reg_t reg; 634 i915_reg_t reg;
635 u32 val = 0; 635 u32 val = 0;
@@ -661,7 +661,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
661 bool enable, 661 bool enable,
662 const struct drm_display_mode *adjusted_mode) 662 const struct drm_display_mode *adjusted_mode)
663{ 663{
664 struct drm_i915_private *dev_priv = encoder->dev->dev_private; 664 struct drm_i915_private *dev_priv = to_i915(encoder->dev);
665 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 665 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
666 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 666 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
667 struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; 667 struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
@@ -713,7 +713,7 @@ static void cpt_set_infoframes(struct drm_encoder *encoder,
713 bool enable, 713 bool enable,
714 const struct drm_display_mode *adjusted_mode) 714 const struct drm_display_mode *adjusted_mode)
715{ 715{
716 struct drm_i915_private *dev_priv = encoder->dev->dev_private; 716 struct drm_i915_private *dev_priv = to_i915(encoder->dev);
717 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 717 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
718 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 718 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
719 i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); 719 i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
@@ -755,7 +755,7 @@ static void vlv_set_infoframes(struct drm_encoder *encoder,
755 bool enable, 755 bool enable,
756 const struct drm_display_mode *adjusted_mode) 756 const struct drm_display_mode *adjusted_mode)
757{ 757{
758 struct drm_i915_private *dev_priv = encoder->dev->dev_private; 758 struct drm_i915_private *dev_priv = to_i915(encoder->dev);
759 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 759 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
760 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 760 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
761 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 761 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
@@ -807,7 +807,7 @@ static void hsw_set_infoframes(struct drm_encoder *encoder,
807 bool enable, 807 bool enable,
808 const struct drm_display_mode *adjusted_mode) 808 const struct drm_display_mode *adjusted_mode)
809{ 809{
810 struct drm_i915_private *dev_priv = encoder->dev->dev_private; 810 struct drm_i915_private *dev_priv = to_i915(encoder->dev);
811 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 811 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
812 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 812 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
813 i915_reg_t reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder); 813 i915_reg_t reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder);
@@ -855,7 +855,7 @@ void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable)
855static void intel_hdmi_prepare(struct intel_encoder *encoder) 855static void intel_hdmi_prepare(struct intel_encoder *encoder)
856{ 856{
857 struct drm_device *dev = encoder->base.dev; 857 struct drm_device *dev = encoder->base.dev;
858 struct drm_i915_private *dev_priv = dev->dev_private; 858 struct drm_i915_private *dev_priv = to_i915(dev);
859 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 859 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
860 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); 860 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
861 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; 861 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
@@ -894,7 +894,7 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
894 enum pipe *pipe) 894 enum pipe *pipe)
895{ 895{
896 struct drm_device *dev = encoder->base.dev; 896 struct drm_device *dev = encoder->base.dev;
897 struct drm_i915_private *dev_priv = dev->dev_private; 897 struct drm_i915_private *dev_priv = to_i915(dev);
898 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); 898 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
899 enum intel_display_power_domain power_domain; 899 enum intel_display_power_domain power_domain;
900 u32 tmp; 900 u32 tmp;
@@ -931,7 +931,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
931{ 931{
932 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); 932 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
933 struct drm_device *dev = encoder->base.dev; 933 struct drm_device *dev = encoder->base.dev;
934 struct drm_i915_private *dev_priv = dev->dev_private; 934 struct drm_i915_private *dev_priv = to_i915(dev);
935 u32 tmp, flags = 0; 935 u32 tmp, flags = 0;
936 int dotclock; 936 int dotclock;
937 937
@@ -988,7 +988,7 @@ static void intel_enable_hdmi_audio(struct intel_encoder *encoder)
988static void g4x_enable_hdmi(struct intel_encoder *encoder) 988static void g4x_enable_hdmi(struct intel_encoder *encoder)
989{ 989{
990 struct drm_device *dev = encoder->base.dev; 990 struct drm_device *dev = encoder->base.dev;
991 struct drm_i915_private *dev_priv = dev->dev_private; 991 struct drm_i915_private *dev_priv = to_i915(dev);
992 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 992 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
993 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); 993 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
994 u32 temp; 994 u32 temp;
@@ -1009,7 +1009,7 @@ static void g4x_enable_hdmi(struct intel_encoder *encoder)
1009static void ibx_enable_hdmi(struct intel_encoder *encoder) 1009static void ibx_enable_hdmi(struct intel_encoder *encoder)
1010{ 1010{
1011 struct drm_device *dev = encoder->base.dev; 1011 struct drm_device *dev = encoder->base.dev;
1012 struct drm_i915_private *dev_priv = dev->dev_private; 1012 struct drm_i915_private *dev_priv = to_i915(dev);
1013 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 1013 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1014 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); 1014 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
1015 u32 temp; 1015 u32 temp;
@@ -1058,7 +1058,7 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder)
1058static void cpt_enable_hdmi(struct intel_encoder *encoder) 1058static void cpt_enable_hdmi(struct intel_encoder *encoder)
1059{ 1059{
1060 struct drm_device *dev = encoder->base.dev; 1060 struct drm_device *dev = encoder->base.dev;
1061 struct drm_i915_private *dev_priv = dev->dev_private; 1061 struct drm_i915_private *dev_priv = to_i915(dev);
1062 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 1062 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1063 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); 1063 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
1064 enum pipe pipe = crtc->pipe; 1064 enum pipe pipe = crtc->pipe;
@@ -1115,7 +1115,7 @@ static void vlv_enable_hdmi(struct intel_encoder *encoder)
1115static void intel_disable_hdmi(struct intel_encoder *encoder) 1115static void intel_disable_hdmi(struct intel_encoder *encoder)
1116{ 1116{
1117 struct drm_device *dev = encoder->base.dev; 1117 struct drm_device *dev = encoder->base.dev;
1118 struct drm_i915_private *dev_priv = dev->dev_private; 1118 struct drm_i915_private *dev_priv = to_i915(dev);
1119 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); 1119 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
1120 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 1120 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1121 u32 temp; 1121 u32 temp;
@@ -1154,7 +1154,7 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
1154 I915_WRITE(intel_hdmi->hdmi_reg, temp); 1154 I915_WRITE(intel_hdmi->hdmi_reg, temp);
1155 POSTING_READ(intel_hdmi->hdmi_reg); 1155 POSTING_READ(intel_hdmi->hdmi_reg);
1156 1156
1157 intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A); 1157 intel_wait_for_vblank_if_active(&dev_priv->drm, PIPE_A);
1158 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true); 1158 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
1159 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true); 1159 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
1160 } 1160 }
@@ -1273,33 +1273,15 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
1273static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state) 1273static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state)
1274{ 1274{
1275 struct drm_device *dev = crtc_state->base.crtc->dev; 1275 struct drm_device *dev = crtc_state->base.crtc->dev;
1276 struct drm_atomic_state *state;
1277 struct intel_encoder *encoder;
1278 struct drm_connector *connector;
1279 struct drm_connector_state *connector_state;
1280 int count = 0, count_hdmi = 0;
1281 int i;
1282 1276
1283 if (HAS_GMCH_DISPLAY(dev)) 1277 if (HAS_GMCH_DISPLAY(dev))
1284 return false; 1278 return false;
1285 1279
1286 state = crtc_state->base.state;
1287
1288 for_each_connector_in_state(state, connector, connector_state, i) {
1289 if (connector_state->crtc != crtc_state->base.crtc)
1290 continue;
1291
1292 encoder = to_intel_encoder(connector_state->best_encoder);
1293
1294 count_hdmi += encoder->type == INTEL_OUTPUT_HDMI;
1295 count++;
1296 }
1297
1298 /* 1280 /*
1299 * HDMI 12bpc affects the clocks, so it's only possible 1281 * HDMI 12bpc affects the clocks, so it's only possible
1300 * when not cloning with other encoder types. 1282 * when not cloning with other encoder types.
1301 */ 1283 */
1302 return count_hdmi > 0 && count_hdmi == count; 1284 return crtc_state->output_types == 1 << INTEL_OUTPUT_HDMI;
1303} 1285}
1304 1286
1305bool intel_hdmi_compute_config(struct intel_encoder *encoder, 1287bool intel_hdmi_compute_config(struct intel_encoder *encoder,
@@ -1575,7 +1557,7 @@ intel_hdmi_set_property(struct drm_connector *connector,
1575 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); 1557 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
1576 struct intel_digital_port *intel_dig_port = 1558 struct intel_digital_port *intel_dig_port =
1577 hdmi_to_dig_port(intel_hdmi); 1559 hdmi_to_dig_port(intel_hdmi);
1578 struct drm_i915_private *dev_priv = connector->dev->dev_private; 1560 struct drm_i915_private *dev_priv = to_i915(connector->dev);
1579 int ret; 1561 int ret;
1580 1562
1581 ret = drm_object_property_set_value(&connector->base, property, val); 1563 ret = drm_object_property_set_value(&connector->base, property, val);
@@ -1674,7 +1656,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
1674 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); 1656 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
1675 struct intel_hdmi *intel_hdmi = &dport->hdmi; 1657 struct intel_hdmi *intel_hdmi = &dport->hdmi;
1676 struct drm_device *dev = encoder->base.dev; 1658 struct drm_device *dev = encoder->base.dev;
1677 struct drm_i915_private *dev_priv = dev->dev_private; 1659 struct drm_i915_private *dev_priv = to_i915(dev);
1678 struct intel_crtc *intel_crtc = 1660 struct intel_crtc *intel_crtc =
1679 to_intel_crtc(encoder->base.crtc); 1661 to_intel_crtc(encoder->base.crtc);
1680 const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; 1662 const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
@@ -1722,7 +1704,7 @@ static void vlv_hdmi_post_disable(struct intel_encoder *encoder)
1722static void chv_hdmi_post_disable(struct intel_encoder *encoder) 1704static void chv_hdmi_post_disable(struct intel_encoder *encoder)
1723{ 1705{
1724 struct drm_device *dev = encoder->base.dev; 1706 struct drm_device *dev = encoder->base.dev;
1725 struct drm_i915_private *dev_priv = dev->dev_private; 1707 struct drm_i915_private *dev_priv = to_i915(dev);
1726 1708
1727 mutex_lock(&dev_priv->sb_lock); 1709 mutex_lock(&dev_priv->sb_lock);
1728 1710
@@ -1737,7 +1719,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
1737 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); 1719 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
1738 struct intel_hdmi *intel_hdmi = &dport->hdmi; 1720 struct intel_hdmi *intel_hdmi = &dport->hdmi;
1739 struct drm_device *dev = encoder->base.dev; 1721 struct drm_device *dev = encoder->base.dev;
1740 struct drm_i915_private *dev_priv = dev->dev_private; 1722 struct drm_i915_private *dev_priv = to_i915(dev);
1741 struct intel_crtc *intel_crtc = 1723 struct intel_crtc *intel_crtc =
1742 to_intel_crtc(encoder->base.crtc); 1724 to_intel_crtc(encoder->base.crtc);
1743 const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; 1725 const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
@@ -1774,6 +1756,7 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
1774 .fill_modes = drm_helper_probe_single_connector_modes, 1756 .fill_modes = drm_helper_probe_single_connector_modes,
1775 .set_property = intel_hdmi_set_property, 1757 .set_property = intel_hdmi_set_property,
1776 .atomic_get_property = intel_connector_atomic_get_property, 1758 .atomic_get_property = intel_connector_atomic_get_property,
1759 .late_register = intel_connector_register,
1777 .early_unregister = intel_connector_unregister, 1760 .early_unregister = intel_connector_unregister,
1778 .destroy = intel_hdmi_destroy, 1761 .destroy = intel_hdmi_destroy,
1779 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 1762 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
@@ -1806,7 +1789,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
1806 struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; 1789 struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
1807 struct intel_encoder *intel_encoder = &intel_dig_port->base; 1790 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1808 struct drm_device *dev = intel_encoder->base.dev; 1791 struct drm_device *dev = intel_encoder->base.dev;
1809 struct drm_i915_private *dev_priv = dev->dev_private; 1792 struct drm_i915_private *dev_priv = to_i915(dev);
1810 enum port port = intel_dig_port->port; 1793 enum port port = intel_dig_port->port;
1811 uint8_t alternate_ddc_pin; 1794 uint8_t alternate_ddc_pin;
1812 1795
@@ -1914,7 +1897,6 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
1914 intel_hdmi_add_properties(intel_hdmi, connector); 1897 intel_hdmi_add_properties(intel_hdmi, connector);
1915 1898
1916 intel_connector_attach_encoder(intel_connector, intel_encoder); 1899 intel_connector_attach_encoder(intel_connector, intel_encoder);
1917 drm_connector_register(connector);
1918 intel_hdmi->attached_connector = intel_connector; 1900 intel_hdmi->attached_connector = intel_connector;
1919 1901
1920 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written 1902 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
index 38eeca7a6e72..51434ec871f2 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -144,7 +144,7 @@ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
144 144
145static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv) 145static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv)
146{ 146{
147 struct drm_device *dev = dev_priv->dev; 147 struct drm_device *dev = &dev_priv->drm;
148 struct drm_mode_config *mode_config = &dev->mode_config; 148 struct drm_mode_config *mode_config = &dev->mode_config;
149 struct intel_connector *intel_connector; 149 struct intel_connector *intel_connector;
150 struct intel_encoder *intel_encoder; 150 struct intel_encoder *intel_encoder;
@@ -191,7 +191,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
191 struct drm_i915_private *dev_priv = 191 struct drm_i915_private *dev_priv =
192 container_of(work, typeof(*dev_priv), 192 container_of(work, typeof(*dev_priv),
193 hotplug.reenable_work.work); 193 hotplug.reenable_work.work);
194 struct drm_device *dev = dev_priv->dev; 194 struct drm_device *dev = &dev_priv->drm;
195 struct drm_mode_config *mode_config = &dev->mode_config; 195 struct drm_mode_config *mode_config = &dev->mode_config;
196 int i; 196 int i;
197 197
@@ -302,7 +302,7 @@ static void i915_hotplug_work_func(struct work_struct *work)
302{ 302{
303 struct drm_i915_private *dev_priv = 303 struct drm_i915_private *dev_priv =
304 container_of(work, struct drm_i915_private, hotplug.hotplug_work); 304 container_of(work, struct drm_i915_private, hotplug.hotplug_work);
305 struct drm_device *dev = dev_priv->dev; 305 struct drm_device *dev = &dev_priv->drm;
306 struct drm_mode_config *mode_config = &dev->mode_config; 306 struct drm_mode_config *mode_config = &dev->mode_config;
307 struct intel_connector *intel_connector; 307 struct intel_connector *intel_connector;
308 struct intel_encoder *intel_encoder; 308 struct intel_encoder *intel_encoder;
@@ -455,7 +455,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
455 */ 455 */
456void intel_hpd_init(struct drm_i915_private *dev_priv) 456void intel_hpd_init(struct drm_i915_private *dev_priv)
457{ 457{
458 struct drm_device *dev = dev_priv->dev; 458 struct drm_device *dev = &dev_priv->drm;
459 struct drm_mode_config *mode_config = &dev->mode_config; 459 struct drm_mode_config *mode_config = &dev->mode_config;
460 struct drm_connector *connector; 460 struct drm_connector *connector;
461 int i; 461 int i;
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 81de23098be7..1f266d7df2ec 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -113,7 +113,7 @@ to_intel_gmbus(struct i2c_adapter *i2c)
113void 113void
114intel_i2c_reset(struct drm_device *dev) 114intel_i2c_reset(struct drm_device *dev)
115{ 115{
116 struct drm_i915_private *dev_priv = dev->dev_private; 116 struct drm_i915_private *dev_priv = to_i915(dev);
117 117
118 I915_WRITE(GMBUS0, 0); 118 I915_WRITE(GMBUS0, 0);
119 I915_WRITE(GMBUS4, 0); 119 I915_WRITE(GMBUS4, 0);
@@ -138,7 +138,7 @@ static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
138static u32 get_reserved(struct intel_gmbus *bus) 138static u32 get_reserved(struct intel_gmbus *bus)
139{ 139{
140 struct drm_i915_private *dev_priv = bus->dev_priv; 140 struct drm_i915_private *dev_priv = bus->dev_priv;
141 struct drm_device *dev = dev_priv->dev; 141 struct drm_device *dev = &dev_priv->drm;
142 u32 reserved = 0; 142 u32 reserved = 0;
143 143
144 /* On most chips, these bits must be preserved in software. */ 144 /* On most chips, these bits must be preserved in software. */
@@ -212,7 +212,7 @@ intel_gpio_pre_xfer(struct i2c_adapter *adapter)
212 adapter); 212 adapter);
213 struct drm_i915_private *dev_priv = bus->dev_priv; 213 struct drm_i915_private *dev_priv = bus->dev_priv;
214 214
215 intel_i2c_reset(dev_priv->dev); 215 intel_i2c_reset(&dev_priv->drm);
216 intel_i2c_quirk_set(dev_priv, true); 216 intel_i2c_quirk_set(dev_priv, true);
217 set_data(bus, 1); 217 set_data(bus, 1);
218 set_clock(bus, 1); 218 set_clock(bus, 1);
@@ -298,15 +298,16 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv)
298{ 298{
299 int ret; 299 int ret;
300 300
301#define C ((I915_READ_NOTRACE(GMBUS2) & GMBUS_ACTIVE) == 0)
302
303 if (!HAS_GMBUS_IRQ(dev_priv)) 301 if (!HAS_GMBUS_IRQ(dev_priv))
304 return wait_for(C, 10); 302 return intel_wait_for_register(dev_priv,
303 GMBUS2, GMBUS_ACTIVE, 0,
304 10);
305 305
306 /* Important: The hw handles only the first bit, so set only one! */ 306 /* Important: The hw handles only the first bit, so set only one! */
307 I915_WRITE(GMBUS4, GMBUS_IDLE_EN); 307 I915_WRITE(GMBUS4, GMBUS_IDLE_EN);
308 308
309 ret = wait_event_timeout(dev_priv->gmbus_wait_queue, C, 309 ret = wait_event_timeout(dev_priv->gmbus_wait_queue,
310 (I915_READ_NOTRACE(GMBUS2) & GMBUS_ACTIVE) == 0,
310 msecs_to_jiffies_timeout(10)); 311 msecs_to_jiffies_timeout(10));
311 312
312 I915_WRITE(GMBUS4, 0); 313 I915_WRITE(GMBUS4, 0);
@@ -315,7 +316,6 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv)
315 return 0; 316 return 0;
316 else 317 else
317 return -ETIMEDOUT; 318 return -ETIMEDOUT;
318#undef C
319} 319}
320 320
321static int 321static int
@@ -632,7 +632,7 @@ static const struct i2c_algorithm gmbus_algorithm = {
632 */ 632 */
633int intel_setup_gmbus(struct drm_device *dev) 633int intel_setup_gmbus(struct drm_device *dev)
634{ 634{
635 struct drm_i915_private *dev_priv = dev->dev_private; 635 struct drm_i915_private *dev_priv = to_i915(dev);
636 struct intel_gmbus *bus; 636 struct intel_gmbus *bus;
637 unsigned int pin; 637 unsigned int pin;
638 int ret; 638 int ret;
@@ -688,7 +688,7 @@ int intel_setup_gmbus(struct drm_device *dev)
688 goto err; 688 goto err;
689 } 689 }
690 690
691 intel_i2c_reset(dev_priv->dev); 691 intel_i2c_reset(&dev_priv->drm);
692 692
693 return 0; 693 return 0;
694 694
@@ -736,7 +736,7 @@ void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
736 736
737void intel_teardown_gmbus(struct drm_device *dev) 737void intel_teardown_gmbus(struct drm_device *dev)
738{ 738{
739 struct drm_i915_private *dev_priv = dev->dev_private; 739 struct drm_i915_private *dev_priv = to_i915(dev);
740 struct intel_gmbus *bus; 740 struct intel_gmbus *bus;
741 unsigned int pin; 741 unsigned int pin;
742 742
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index debed011a958..70c699043d0e 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -789,9 +789,6 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
789 intel_logical_ring_emit(ringbuf, MI_NOOP); 789 intel_logical_ring_emit(ringbuf, MI_NOOP);
790 intel_logical_ring_advance(ringbuf); 790 intel_logical_ring_advance(ringbuf);
791 791
792 if (intel_engine_stopped(engine))
793 return 0;
794
795 /* We keep the previous context alive until we retire the following 792 /* We keep the previous context alive until we retire the following
796 * request. This ensures that any the context object is still pinned 793 * request. This ensures that any the context object is still pinned
797 * for any residual writes the HW makes into it on the context switch 794 * for any residual writes the HW makes into it on the context switch
@@ -826,7 +823,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
826{ 823{
827 struct drm_device *dev = params->dev; 824 struct drm_device *dev = params->dev;
828 struct intel_engine_cs *engine = params->engine; 825 struct intel_engine_cs *engine = params->engine;
829 struct drm_i915_private *dev_priv = dev->dev_private; 826 struct drm_i915_private *dev_priv = to_i915(dev);
830 struct intel_ringbuffer *ringbuf = params->ctx->engine[engine->id].ringbuf; 827 struct intel_ringbuffer *ringbuf = params->ctx->engine[engine->id].ringbuf;
831 u64 exec_start; 828 u64 exec_start;
832 int instp_mode; 829 int instp_mode;
@@ -902,7 +899,7 @@ void intel_execlists_cancel_requests(struct intel_engine_cs *engine)
902 struct drm_i915_gem_request *req, *tmp; 899 struct drm_i915_gem_request *req, *tmp;
903 LIST_HEAD(cancel_list); 900 LIST_HEAD(cancel_list);
904 901
905 WARN_ON(!mutex_is_locked(&engine->i915->dev->struct_mutex)); 902 WARN_ON(!mutex_is_locked(&engine->i915->drm.struct_mutex));
906 903
907 spin_lock_bh(&engine->execlist_lock); 904 spin_lock_bh(&engine->execlist_lock);
908 list_replace_init(&engine->execlist_queue, &cancel_list); 905 list_replace_init(&engine->execlist_queue, &cancel_list);
@@ -929,7 +926,10 @@ void intel_logical_ring_stop(struct intel_engine_cs *engine)
929 926
930 /* TODO: Is this correct with Execlists enabled? */ 927 /* TODO: Is this correct with Execlists enabled? */
931 I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING)); 928 I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
932 if (wait_for((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) { 929 if (intel_wait_for_register(dev_priv,
930 RING_MI_MODE(engine->mmio_base),
931 MODE_IDLE, MODE_IDLE,
932 1000)) {
933 DRM_ERROR("%s :timed out trying to stop ring\n", engine->name); 933 DRM_ERROR("%s :timed out trying to stop ring\n", engine->name);
934 return; 934 return;
935 } 935 }
@@ -961,7 +961,7 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx,
961 u32 *lrc_reg_state; 961 u32 *lrc_reg_state;
962 int ret; 962 int ret;
963 963
964 lockdep_assert_held(&ctx->i915->dev->struct_mutex); 964 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
965 965
966 if (ce->pin_count++) 966 if (ce->pin_count++)
967 return 0; 967 return 0;
@@ -1011,7 +1011,7 @@ void intel_lr_context_unpin(struct i915_gem_context *ctx,
1011{ 1011{
1012 struct intel_context *ce = &ctx->engine[engine->id]; 1012 struct intel_context *ce = &ctx->engine[engine->id];
1013 1013
1014 lockdep_assert_held(&ctx->i915->dev->struct_mutex); 1014 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
1015 GEM_BUG_ON(ce->pin_count == 0); 1015 GEM_BUG_ON(ce->pin_count == 0);
1016 1016
1017 if (--ce->pin_count) 1017 if (--ce->pin_count)
@@ -1296,6 +1296,31 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
1296 wa_ctx_emit(batch, index, 0); 1296 wa_ctx_emit(batch, index, 0);
1297 wa_ctx_emit(batch, index, 0); 1297 wa_ctx_emit(batch, index, 0);
1298 } 1298 }
1299
1300 /* WaMediaPoolStateCmdInWABB:bxt */
1301 if (HAS_POOLED_EU(engine->i915)) {
1302 /*
1303 * EU pool configuration is setup along with golden context
1304 * during context initialization. This value depends on
1305 * device type (2x6 or 3x6) and needs to be updated based
1306 * on which subslice is disabled especially for 2x6
1307 * devices, however it is safe to load default
1308 * configuration of 3x6 device instead of masking off
1309 * corresponding bits because HW ignores bits of a disabled
1310 * subslice and drops down to appropriate config. Please
1311 * see render_state_setup() in i915_gem_render_state.c for
1312 * possible configurations, to avoid duplication they are
1313 * not shown here again.
1314 */
1315 u32 eu_pool_config = 0x00777000;
1316 wa_ctx_emit(batch, index, GEN9_MEDIA_POOL_STATE);
1317 wa_ctx_emit(batch, index, GEN9_MEDIA_POOL_ENABLE);
1318 wa_ctx_emit(batch, index, eu_pool_config);
1319 wa_ctx_emit(batch, index, 0);
1320 wa_ctx_emit(batch, index, 0);
1321 wa_ctx_emit(batch, index, 0);
1322 }
1323
1299 /* Pad to end of cacheline */ 1324 /* Pad to end of cacheline */
1300 while (index % CACHELINE_DWORDS) 1325 while (index % CACHELINE_DWORDS)
1301 wa_ctx_emit(batch, index, MI_NOOP); 1326 wa_ctx_emit(batch, index, MI_NOOP);
@@ -1353,8 +1378,8 @@ static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size)
1353{ 1378{
1354 int ret; 1379 int ret;
1355 1380
1356 engine->wa_ctx.obj = i915_gem_object_create(engine->i915->dev, 1381 engine->wa_ctx.obj = i915_gem_object_create(&engine->i915->drm,
1357 PAGE_ALIGN(size)); 1382 PAGE_ALIGN(size));
1358 if (IS_ERR(engine->wa_ctx.obj)) { 1383 if (IS_ERR(engine->wa_ctx.obj)) {
1359 DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n"); 1384 DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n");
1360 ret = PTR_ERR(engine->wa_ctx.obj); 1385 ret = PTR_ERR(engine->wa_ctx.obj);
@@ -1614,36 +1639,18 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
1614 return 0; 1639 return 0;
1615} 1640}
1616 1641
1617static bool gen8_logical_ring_get_irq(struct intel_engine_cs *engine) 1642static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine)
1618{ 1643{
1619 struct drm_i915_private *dev_priv = engine->i915; 1644 struct drm_i915_private *dev_priv = engine->i915;
1620 unsigned long flags; 1645 I915_WRITE_IMR(engine,
1621 1646 ~(engine->irq_enable_mask | engine->irq_keep_mask));
1622 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 1647 POSTING_READ_FW(RING_IMR(engine->mmio_base));
1623 return false;
1624
1625 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1626 if (engine->irq_refcount++ == 0) {
1627 I915_WRITE_IMR(engine,
1628 ~(engine->irq_enable_mask | engine->irq_keep_mask));
1629 POSTING_READ(RING_IMR(engine->mmio_base));
1630 }
1631 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1632
1633 return true;
1634} 1648}
1635 1649
1636static void gen8_logical_ring_put_irq(struct intel_engine_cs *engine) 1650static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine)
1637{ 1651{
1638 struct drm_i915_private *dev_priv = engine->i915; 1652 struct drm_i915_private *dev_priv = engine->i915;
1639 unsigned long flags; 1653 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
1640
1641 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1642 if (--engine->irq_refcount == 0) {
1643 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
1644 POSTING_READ(RING_IMR(engine->mmio_base));
1645 }
1646 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1647} 1654}
1648 1655
1649static int gen8_emit_flush(struct drm_i915_gem_request *request, 1656static int gen8_emit_flush(struct drm_i915_gem_request *request,
@@ -1780,16 +1787,6 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
1780 return 0; 1787 return 0;
1781} 1788}
1782 1789
1783static u32 gen8_get_seqno(struct intel_engine_cs *engine)
1784{
1785 return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
1786}
1787
1788static void gen8_set_seqno(struct intel_engine_cs *engine, u32 seqno)
1789{
1790 intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
1791}
1792
1793static void bxt_a_seqno_barrier(struct intel_engine_cs *engine) 1790static void bxt_a_seqno_barrier(struct intel_engine_cs *engine)
1794{ 1791{
1795 /* 1792 /*
@@ -1805,14 +1802,6 @@ static void bxt_a_seqno_barrier(struct intel_engine_cs *engine)
1805 intel_flush_status_page(engine, I915_GEM_HWS_INDEX); 1802 intel_flush_status_page(engine, I915_GEM_HWS_INDEX);
1806} 1803}
1807 1804
1808static void bxt_a_set_seqno(struct intel_engine_cs *engine, u32 seqno)
1809{
1810 intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
1811
1812 /* See bxt_a_get_seqno() explaining the reason for the clflush. */
1813 intel_flush_status_page(engine, I915_GEM_HWS_INDEX);
1814}
1815
1816/* 1805/*
1817 * Reserve space for 2 NOOPs at the end of each request to be 1806 * Reserve space for 2 NOOPs at the end of each request to be
1818 * used as a workaround for not being allowed to do lite 1807 * used as a workaround for not being allowed to do lite
@@ -1838,7 +1827,7 @@ static int gen8_emit_request(struct drm_i915_gem_request *request)
1838 intel_hws_seqno_address(request->engine) | 1827 intel_hws_seqno_address(request->engine) |
1839 MI_FLUSH_DW_USE_GTT); 1828 MI_FLUSH_DW_USE_GTT);
1840 intel_logical_ring_emit(ringbuf, 0); 1829 intel_logical_ring_emit(ringbuf, 0);
1841 intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request)); 1830 intel_logical_ring_emit(ringbuf, request->seqno);
1842 intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT); 1831 intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
1843 intel_logical_ring_emit(ringbuf, MI_NOOP); 1832 intel_logical_ring_emit(ringbuf, MI_NOOP);
1844 return intel_logical_ring_advance_and_submit(request); 1833 return intel_logical_ring_advance_and_submit(request);
@@ -1958,6 +1947,8 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
1958 i915_cmd_parser_fini_ring(engine); 1947 i915_cmd_parser_fini_ring(engine);
1959 i915_gem_batch_pool_fini(&engine->batch_pool); 1948 i915_gem_batch_pool_fini(&engine->batch_pool);
1960 1949
1950 intel_engine_fini_breadcrumbs(engine);
1951
1961 if (engine->status_page.obj) { 1952 if (engine->status_page.obj) {
1962 i915_gem_object_unpin_map(engine->status_page.obj); 1953 i915_gem_object_unpin_map(engine->status_page.obj);
1963 engine->status_page.obj = NULL; 1954 engine->status_page.obj = NULL;
@@ -1979,15 +1970,11 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
1979 engine->init_hw = gen8_init_common_ring; 1970 engine->init_hw = gen8_init_common_ring;
1980 engine->emit_request = gen8_emit_request; 1971 engine->emit_request = gen8_emit_request;
1981 engine->emit_flush = gen8_emit_flush; 1972 engine->emit_flush = gen8_emit_flush;
1982 engine->irq_get = gen8_logical_ring_get_irq; 1973 engine->irq_enable = gen8_logical_ring_enable_irq;
1983 engine->irq_put = gen8_logical_ring_put_irq; 1974 engine->irq_disable = gen8_logical_ring_disable_irq;
1984 engine->emit_bb_start = gen8_emit_bb_start; 1975 engine->emit_bb_start = gen8_emit_bb_start;
1985 engine->get_seqno = gen8_get_seqno; 1976 if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
1986 engine->set_seqno = gen8_set_seqno;
1987 if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) {
1988 engine->irq_seqno_barrier = bxt_a_seqno_barrier; 1977 engine->irq_seqno_barrier = bxt_a_seqno_barrier;
1989 engine->set_seqno = bxt_a_set_seqno;
1990 }
1991} 1978}
1992 1979
1993static inline void 1980static inline void
@@ -1995,7 +1982,6 @@ logical_ring_default_irqs(struct intel_engine_cs *engine, unsigned shift)
1995{ 1982{
1996 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift; 1983 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
1997 engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift; 1984 engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
1998 init_waitqueue_head(&engine->irq_queue);
1999} 1985}
2000 1986
2001static int 1987static int
@@ -2016,12 +2002,94 @@ lrc_setup_hws(struct intel_engine_cs *engine,
2016 return 0; 2002 return 0;
2017} 2003}
2018 2004
2005static int
2006logical_ring_init(struct intel_engine_cs *engine)
2007{
2008 struct i915_gem_context *dctx = engine->i915->kernel_context;
2009 int ret;
2010
2011 ret = intel_engine_init_breadcrumbs(engine);
2012 if (ret)
2013 goto error;
2014
2015 ret = i915_cmd_parser_init_ring(engine);
2016 if (ret)
2017 goto error;
2018
2019 ret = execlists_context_deferred_alloc(dctx, engine);
2020 if (ret)
2021 goto error;
2022
2023 /* As this is the default context, always pin it */
2024 ret = intel_lr_context_pin(dctx, engine);
2025 if (ret) {
2026 DRM_ERROR("Failed to pin context for %s: %d\n",
2027 engine->name, ret);
2028 goto error;
2029 }
2030
2031 /* And setup the hardware status page. */
2032 ret = lrc_setup_hws(engine, dctx->engine[engine->id].state);
2033 if (ret) {
2034 DRM_ERROR("Failed to set up hws %s: %d\n", engine->name, ret);
2035 goto error;
2036 }
2037
2038 return 0;
2039
2040error:
2041 intel_logical_ring_cleanup(engine);
2042 return ret;
2043}
2044
2045static int logical_render_ring_init(struct intel_engine_cs *engine)
2046{
2047 struct drm_i915_private *dev_priv = engine->i915;
2048 int ret;
2049
2050 if (HAS_L3_DPF(dev_priv))
2051 engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2052
2053 /* Override some for render ring. */
2054 if (INTEL_GEN(dev_priv) >= 9)
2055 engine->init_hw = gen9_init_render_ring;
2056 else
2057 engine->init_hw = gen8_init_render_ring;
2058 engine->init_context = gen8_init_rcs_context;
2059 engine->cleanup = intel_fini_pipe_control;
2060 engine->emit_flush = gen8_emit_flush_render;
2061 engine->emit_request = gen8_emit_request_render;
2062
2063 ret = intel_init_pipe_control(engine, 4096);
2064 if (ret)
2065 return ret;
2066
2067 ret = intel_init_workaround_bb(engine);
2068 if (ret) {
2069 /*
2070 * We continue even if we fail to initialize WA batch
2071 * because we only expect rare glitches but nothing
2072 * critical to prevent us from using GPU
2073 */
2074 DRM_ERROR("WA batch buffer initialization failed: %d\n",
2075 ret);
2076 }
2077
2078 ret = logical_ring_init(engine);
2079 if (ret) {
2080 lrc_destroy_wa_ctx_obj(engine);
2081 }
2082
2083 return ret;
2084}
2085
2019static const struct logical_ring_info { 2086static const struct logical_ring_info {
2020 const char *name; 2087 const char *name;
2021 unsigned exec_id; 2088 unsigned exec_id;
2022 unsigned guc_id; 2089 unsigned guc_id;
2023 u32 mmio_base; 2090 u32 mmio_base;
2024 unsigned irq_shift; 2091 unsigned irq_shift;
2092 int (*init)(struct intel_engine_cs *engine);
2025} logical_rings[] = { 2093} logical_rings[] = {
2026 [RCS] = { 2094 [RCS] = {
2027 .name = "render ring", 2095 .name = "render ring",
@@ -2029,6 +2097,7 @@ static const struct logical_ring_info {
2029 .guc_id = GUC_RENDER_ENGINE, 2097 .guc_id = GUC_RENDER_ENGINE,
2030 .mmio_base = RENDER_RING_BASE, 2098 .mmio_base = RENDER_RING_BASE,
2031 .irq_shift = GEN8_RCS_IRQ_SHIFT, 2099 .irq_shift = GEN8_RCS_IRQ_SHIFT,
2100 .init = logical_render_ring_init,
2032 }, 2101 },
2033 [BCS] = { 2102 [BCS] = {
2034 .name = "blitter ring", 2103 .name = "blitter ring",
@@ -2036,6 +2105,7 @@ static const struct logical_ring_info {
2036 .guc_id = GUC_BLITTER_ENGINE, 2105 .guc_id = GUC_BLITTER_ENGINE,
2037 .mmio_base = BLT_RING_BASE, 2106 .mmio_base = BLT_RING_BASE,
2038 .irq_shift = GEN8_BCS_IRQ_SHIFT, 2107 .irq_shift = GEN8_BCS_IRQ_SHIFT,
2108 .init = logical_ring_init,
2039 }, 2109 },
2040 [VCS] = { 2110 [VCS] = {
2041 .name = "bsd ring", 2111 .name = "bsd ring",
@@ -2043,6 +2113,7 @@ static const struct logical_ring_info {
2043 .guc_id = GUC_VIDEO_ENGINE, 2113 .guc_id = GUC_VIDEO_ENGINE,
2044 .mmio_base = GEN6_BSD_RING_BASE, 2114 .mmio_base = GEN6_BSD_RING_BASE,
2045 .irq_shift = GEN8_VCS1_IRQ_SHIFT, 2115 .irq_shift = GEN8_VCS1_IRQ_SHIFT,
2116 .init = logical_ring_init,
2046 }, 2117 },
2047 [VCS2] = { 2118 [VCS2] = {
2048 .name = "bsd2 ring", 2119 .name = "bsd2 ring",
@@ -2050,6 +2121,7 @@ static const struct logical_ring_info {
2050 .guc_id = GUC_VIDEO_ENGINE2, 2121 .guc_id = GUC_VIDEO_ENGINE2,
2051 .mmio_base = GEN8_BSD2_RING_BASE, 2122 .mmio_base = GEN8_BSD2_RING_BASE,
2052 .irq_shift = GEN8_VCS2_IRQ_SHIFT, 2123 .irq_shift = GEN8_VCS2_IRQ_SHIFT,
2124 .init = logical_ring_init,
2053 }, 2125 },
2054 [VECS] = { 2126 [VECS] = {
2055 .name = "video enhancement ring", 2127 .name = "video enhancement ring",
@@ -2057,14 +2129,14 @@ static const struct logical_ring_info {
2057 .guc_id = GUC_VIDEOENHANCE_ENGINE, 2129 .guc_id = GUC_VIDEOENHANCE_ENGINE,
2058 .mmio_base = VEBOX_RING_BASE, 2130 .mmio_base = VEBOX_RING_BASE,
2059 .irq_shift = GEN8_VECS_IRQ_SHIFT, 2131 .irq_shift = GEN8_VECS_IRQ_SHIFT,
2132 .init = logical_ring_init,
2060 }, 2133 },
2061}; 2134};
2062 2135
2063static struct intel_engine_cs * 2136static struct intel_engine_cs *
2064logical_ring_setup(struct drm_device *dev, enum intel_engine_id id) 2137logical_ring_setup(struct drm_i915_private *dev_priv, enum intel_engine_id id)
2065{ 2138{
2066 const struct logical_ring_info *info = &logical_rings[id]; 2139 const struct logical_ring_info *info = &logical_rings[id];
2067 struct drm_i915_private *dev_priv = to_i915(dev);
2068 struct intel_engine_cs *engine = &dev_priv->engine[id]; 2140 struct intel_engine_cs *engine = &dev_priv->engine[id];
2069 enum forcewake_domains fw_domains; 2141 enum forcewake_domains fw_domains;
2070 2142
@@ -2107,169 +2179,62 @@ logical_ring_setup(struct drm_device *dev, enum intel_engine_id id)
2107 logical_ring_default_irqs(engine, info->irq_shift); 2179 logical_ring_default_irqs(engine, info->irq_shift);
2108 2180
2109 intel_engine_init_hangcheck(engine); 2181 intel_engine_init_hangcheck(engine);
2110 i915_gem_batch_pool_init(dev, &engine->batch_pool); 2182 i915_gem_batch_pool_init(&dev_priv->drm, &engine->batch_pool);
2111 2183
2112 return engine; 2184 return engine;
2113} 2185}
2114 2186
2115static int
2116logical_ring_init(struct intel_engine_cs *engine)
2117{
2118 struct i915_gem_context *dctx = engine->i915->kernel_context;
2119 int ret;
2120
2121 ret = i915_cmd_parser_init_ring(engine);
2122 if (ret)
2123 goto error;
2124
2125 ret = execlists_context_deferred_alloc(dctx, engine);
2126 if (ret)
2127 goto error;
2128
2129 /* As this is the default context, always pin it */
2130 ret = intel_lr_context_pin(dctx, engine);
2131 if (ret) {
2132 DRM_ERROR("Failed to pin context for %s: %d\n",
2133 engine->name, ret);
2134 goto error;
2135 }
2136
2137 /* And setup the hardware status page. */
2138 ret = lrc_setup_hws(engine, dctx->engine[engine->id].state);
2139 if (ret) {
2140 DRM_ERROR("Failed to set up hws %s: %d\n", engine->name, ret);
2141 goto error;
2142 }
2143
2144 return 0;
2145
2146error:
2147 intel_logical_ring_cleanup(engine);
2148 return ret;
2149}
2150
2151static int logical_render_ring_init(struct drm_device *dev)
2152{
2153 struct intel_engine_cs *engine = logical_ring_setup(dev, RCS);
2154 int ret;
2155
2156 if (HAS_L3_DPF(dev))
2157 engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2158
2159 /* Override some for render ring. */
2160 if (INTEL_INFO(dev)->gen >= 9)
2161 engine->init_hw = gen9_init_render_ring;
2162 else
2163 engine->init_hw = gen8_init_render_ring;
2164 engine->init_context = gen8_init_rcs_context;
2165 engine->cleanup = intel_fini_pipe_control;
2166 engine->emit_flush = gen8_emit_flush_render;
2167 engine->emit_request = gen8_emit_request_render;
2168
2169 ret = intel_init_pipe_control(engine);
2170 if (ret)
2171 return ret;
2172
2173 ret = intel_init_workaround_bb(engine);
2174 if (ret) {
2175 /*
2176 * We continue even if we fail to initialize WA batch
2177 * because we only expect rare glitches but nothing
2178 * critical to prevent us from using GPU
2179 */
2180 DRM_ERROR("WA batch buffer initialization failed: %d\n",
2181 ret);
2182 }
2183
2184 ret = logical_ring_init(engine);
2185 if (ret) {
2186 lrc_destroy_wa_ctx_obj(engine);
2187 }
2188
2189 return ret;
2190}
2191
2192static int logical_bsd_ring_init(struct drm_device *dev)
2193{
2194 struct intel_engine_cs *engine = logical_ring_setup(dev, VCS);
2195
2196 return logical_ring_init(engine);
2197}
2198
2199static int logical_bsd2_ring_init(struct drm_device *dev)
2200{
2201 struct intel_engine_cs *engine = logical_ring_setup(dev, VCS2);
2202
2203 return logical_ring_init(engine);
2204}
2205
2206static int logical_blt_ring_init(struct drm_device *dev)
2207{
2208 struct intel_engine_cs *engine = logical_ring_setup(dev, BCS);
2209
2210 return logical_ring_init(engine);
2211}
2212
2213static int logical_vebox_ring_init(struct drm_device *dev)
2214{
2215 struct intel_engine_cs *engine = logical_ring_setup(dev, VECS);
2216
2217 return logical_ring_init(engine);
2218}
2219
2220/** 2187/**
2221 * intel_logical_rings_init() - allocate, populate and init the Engine Command Streamers 2188 * intel_logical_rings_init() - allocate, populate and init the Engine Command Streamers
2222 * @dev: DRM device. 2189 * @dev: DRM device.
2223 * 2190 *
2224 * This function inits the engines for an Execlists submission style (the equivalent in the 2191 * This function inits the engines for an Execlists submission style (the
2225 * legacy ringbuffer submission world would be i915_gem_init_engines). It does it only for 2192 * equivalent in the legacy ringbuffer submission world would be
2226 * those engines that are present in the hardware. 2193 * i915_gem_init_engines). It does it only for those engines that are present in
2194 * the hardware.
2227 * 2195 *
2228 * Return: non-zero if the initialization failed. 2196 * Return: non-zero if the initialization failed.
2229 */ 2197 */
2230int intel_logical_rings_init(struct drm_device *dev) 2198int intel_logical_rings_init(struct drm_device *dev)
2231{ 2199{
2232 struct drm_i915_private *dev_priv = dev->dev_private; 2200 struct drm_i915_private *dev_priv = to_i915(dev);
2201 unsigned int mask = 0;
2202 unsigned int i;
2233 int ret; 2203 int ret;
2234 2204
2235 ret = logical_render_ring_init(dev); 2205 WARN_ON(INTEL_INFO(dev_priv)->ring_mask &
2236 if (ret) 2206 GENMASK(sizeof(mask) * BITS_PER_BYTE - 1, I915_NUM_ENGINES));
2237 return ret;
2238 2207
2239 if (HAS_BSD(dev)) { 2208 for (i = 0; i < ARRAY_SIZE(logical_rings); i++) {
2240 ret = logical_bsd_ring_init(dev); 2209 if (!HAS_ENGINE(dev_priv, i))
2241 if (ret) 2210 continue;
2242 goto cleanup_render_ring;
2243 }
2244 2211
2245 if (HAS_BLT(dev)) { 2212 if (!logical_rings[i].init)
2246 ret = logical_blt_ring_init(dev); 2213 continue;
2247 if (ret)
2248 goto cleanup_bsd_ring;
2249 }
2250 2214
2251 if (HAS_VEBOX(dev)) { 2215 ret = logical_rings[i].init(logical_ring_setup(dev_priv, i));
2252 ret = logical_vebox_ring_init(dev);
2253 if (ret) 2216 if (ret)
2254 goto cleanup_blt_ring; 2217 goto cleanup;
2218
2219 mask |= ENGINE_MASK(i);
2255 } 2220 }
2256 2221
2257 if (HAS_BSD2(dev)) { 2222 /*
2258 ret = logical_bsd2_ring_init(dev); 2223 * Catch failures to update logical_rings table when the new engines
2259 if (ret) 2224 * are added to the driver by a warning and disabling the forgotten
2260 goto cleanup_vebox_ring; 2225 * engines.
2226 */
2227 if (WARN_ON(mask != INTEL_INFO(dev_priv)->ring_mask)) {
2228 struct intel_device_info *info =
2229 (struct intel_device_info *)&dev_priv->info;
2230 info->ring_mask = mask;
2261 } 2231 }
2262 2232
2263 return 0; 2233 return 0;
2264 2234
2265cleanup_vebox_ring: 2235cleanup:
2266 intel_logical_ring_cleanup(&dev_priv->engine[VECS]); 2236 for (i = 0; i < I915_NUM_ENGINES; i++)
2267cleanup_blt_ring: 2237 intel_logical_ring_cleanup(&dev_priv->engine[i]);
2268 intel_logical_ring_cleanup(&dev_priv->engine[BCS]);
2269cleanup_bsd_ring:
2270 intel_logical_ring_cleanup(&dev_priv->engine[VCS]);
2271cleanup_render_ring:
2272 intel_logical_ring_cleanup(&dev_priv->engine[RCS]);
2273 2238
2274 return ret; 2239 return ret;
2275} 2240}
@@ -2546,7 +2511,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
2546 /* One extra page as the sharing data between driver and GuC */ 2511 /* One extra page as the sharing data between driver and GuC */
2547 context_size += PAGE_SIZE * LRC_PPHWSP_PN; 2512 context_size += PAGE_SIZE * LRC_PPHWSP_PN;
2548 2513
2549 ctx_obj = i915_gem_object_create(ctx->i915->dev, context_size); 2514 ctx_obj = i915_gem_object_create(&ctx->i915->drm, context_size);
2550 if (IS_ERR(ctx_obj)) { 2515 if (IS_ERR(ctx_obj)) {
2551 DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n"); 2516 DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
2552 return PTR_ERR(ctx_obj); 2517 return PTR_ERR(ctx_obj);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index e9082185a375..49550470483e 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -72,7 +72,7 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
72 enum pipe *pipe) 72 enum pipe *pipe)
73{ 73{
74 struct drm_device *dev = encoder->base.dev; 74 struct drm_device *dev = encoder->base.dev;
75 struct drm_i915_private *dev_priv = dev->dev_private; 75 struct drm_i915_private *dev_priv = to_i915(dev);
76 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); 76 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
77 enum intel_display_power_domain power_domain; 77 enum intel_display_power_domain power_domain;
78 u32 tmp; 78 u32 tmp;
@@ -106,7 +106,7 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
106 struct intel_crtc_state *pipe_config) 106 struct intel_crtc_state *pipe_config)
107{ 107{
108 struct drm_device *dev = encoder->base.dev; 108 struct drm_device *dev = encoder->base.dev;
109 struct drm_i915_private *dev_priv = dev->dev_private; 109 struct drm_i915_private *dev_priv = to_i915(dev);
110 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); 110 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
111 u32 tmp, flags = 0; 111 u32 tmp, flags = 0;
112 112
@@ -140,7 +140,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
140{ 140{
141 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); 141 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
142 struct drm_device *dev = encoder->base.dev; 142 struct drm_device *dev = encoder->base.dev;
143 struct drm_i915_private *dev_priv = dev->dev_private; 143 struct drm_i915_private *dev_priv = to_i915(dev);
144 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 144 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
145 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; 145 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
146 int pipe = crtc->pipe; 146 int pipe = crtc->pipe;
@@ -184,8 +184,8 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
184 * panels behave in the two modes. For now, let's just maintain the 184 * panels behave in the two modes. For now, let's just maintain the
185 * value we got from the BIOS. 185 * value we got from the BIOS.
186 */ 186 */
187 temp &= ~LVDS_A3_POWER_MASK; 187 temp &= ~LVDS_A3_POWER_MASK;
188 temp |= lvds_encoder->a3_power; 188 temp |= lvds_encoder->a3_power;
189 189
190 /* Set the dithering flag on LVDS as needed, note that there is no 190 /* Set the dithering flag on LVDS as needed, note that there is no
191 * special lvds dither control bit on pch-split platforms, dithering is 191 * special lvds dither control bit on pch-split platforms, dithering is
@@ -216,7 +216,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
216 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); 216 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
217 struct intel_connector *intel_connector = 217 struct intel_connector *intel_connector =
218 &lvds_encoder->attached_connector->base; 218 &lvds_encoder->attached_connector->base;
219 struct drm_i915_private *dev_priv = dev->dev_private; 219 struct drm_i915_private *dev_priv = to_i915(dev);
220 i915_reg_t ctl_reg, stat_reg; 220 i915_reg_t ctl_reg, stat_reg;
221 221
222 if (HAS_PCH_SPLIT(dev)) { 222 if (HAS_PCH_SPLIT(dev)) {
@@ -231,7 +231,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
231 231
232 I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON); 232 I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
233 POSTING_READ(lvds_encoder->reg); 233 POSTING_READ(lvds_encoder->reg);
234 if (wait_for((I915_READ(stat_reg) & PP_ON) != 0, 1000)) 234 if (intel_wait_for_register(dev_priv, stat_reg, PP_ON, PP_ON, 1000))
235 DRM_ERROR("timed out waiting for panel to power on\n"); 235 DRM_ERROR("timed out waiting for panel to power on\n");
236 236
237 intel_panel_enable_backlight(intel_connector); 237 intel_panel_enable_backlight(intel_connector);
@@ -241,7 +241,7 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
241{ 241{
242 struct drm_device *dev = encoder->base.dev; 242 struct drm_device *dev = encoder->base.dev;
243 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); 243 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
244 struct drm_i915_private *dev_priv = dev->dev_private; 244 struct drm_i915_private *dev_priv = to_i915(dev);
245 i915_reg_t ctl_reg, stat_reg; 245 i915_reg_t ctl_reg, stat_reg;
246 246
247 if (HAS_PCH_SPLIT(dev)) { 247 if (HAS_PCH_SPLIT(dev)) {
@@ -253,7 +253,7 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
253 } 253 }
254 254
255 I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON); 255 I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
256 if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000)) 256 if (intel_wait_for_register(dev_priv, stat_reg, PP_ON, 0, 1000))
257 DRM_ERROR("timed out waiting for panel to power off\n"); 257 DRM_ERROR("timed out waiting for panel to power off\n");
258 258
259 I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) & ~LVDS_PORT_EN); 259 I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) & ~LVDS_PORT_EN);
@@ -442,7 +442,7 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
442 container_of(nb, struct intel_lvds_connector, lid_notifier); 442 container_of(nb, struct intel_lvds_connector, lid_notifier);
443 struct drm_connector *connector = &lvds_connector->base.base; 443 struct drm_connector *connector = &lvds_connector->base.base;
444 struct drm_device *dev = connector->dev; 444 struct drm_device *dev = connector->dev;
445 struct drm_i915_private *dev_priv = dev->dev_private; 445 struct drm_i915_private *dev_priv = to_i915(dev);
446 446
447 if (dev->switch_power_state != DRM_SWITCH_POWER_ON) 447 if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
448 return NOTIFY_OK; 448 return NOTIFY_OK;
@@ -555,6 +555,7 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
555 .fill_modes = drm_helper_probe_single_connector_modes, 555 .fill_modes = drm_helper_probe_single_connector_modes,
556 .set_property = intel_lvds_set_property, 556 .set_property = intel_lvds_set_property,
557 .atomic_get_property = intel_connector_atomic_get_property, 557 .atomic_get_property = intel_connector_atomic_get_property,
558 .late_register = intel_connector_register,
558 .early_unregister = intel_connector_unregister, 559 .early_unregister = intel_connector_unregister,
559 .destroy = intel_lvds_destroy, 560 .destroy = intel_lvds_destroy,
560 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 561 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
@@ -810,27 +811,29 @@ static const struct dmi_system_id intel_dual_link_lvds[] = {
810 { } /* terminating entry */ 811 { } /* terminating entry */
811}; 812};
812 813
813bool intel_is_dual_link_lvds(struct drm_device *dev) 814struct intel_encoder *intel_get_lvds_encoder(struct drm_device *dev)
814{ 815{
815 struct intel_encoder *encoder; 816 struct intel_encoder *intel_encoder;
816 struct intel_lvds_encoder *lvds_encoder;
817 817
818 for_each_intel_encoder(dev, encoder) { 818 for_each_intel_encoder(dev, intel_encoder)
819 if (encoder->type == INTEL_OUTPUT_LVDS) { 819 if (intel_encoder->type == INTEL_OUTPUT_LVDS)
820 lvds_encoder = to_lvds_encoder(&encoder->base); 820 return intel_encoder;
821 821
822 return lvds_encoder->is_dual_link; 822 return NULL;
823 } 823}
824 }
825 824
826 return false; 825bool intel_is_dual_link_lvds(struct drm_device *dev)
826{
827 struct intel_encoder *encoder = intel_get_lvds_encoder(dev);
828
829 return encoder && to_lvds_encoder(&encoder->base)->is_dual_link;
827} 830}
828 831
829static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder) 832static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
830{ 833{
831 struct drm_device *dev = lvds_encoder->base.base.dev; 834 struct drm_device *dev = lvds_encoder->base.base.dev;
832 unsigned int val; 835 unsigned int val;
833 struct drm_i915_private *dev_priv = dev->dev_private; 836 struct drm_i915_private *dev_priv = to_i915(dev);
834 837
835 /* use the module option value if specified */ 838 /* use the module option value if specified */
836 if (i915.lvds_channel_mode > 0) 839 if (i915.lvds_channel_mode > 0)
@@ -880,7 +883,7 @@ static bool intel_lvds_supported(struct drm_device *dev)
880 */ 883 */
881void intel_lvds_init(struct drm_device *dev) 884void intel_lvds_init(struct drm_device *dev)
882{ 885{
883 struct drm_i915_private *dev_priv = dev->dev_private; 886 struct drm_i915_private *dev_priv = to_i915(dev);
884 struct intel_lvds_encoder *lvds_encoder; 887 struct intel_lvds_encoder *lvds_encoder;
885 struct intel_encoder *intel_encoder; 888 struct intel_encoder *intel_encoder;
886 struct intel_lvds_connector *lvds_connector; 889 struct intel_lvds_connector *lvds_connector;
@@ -1118,6 +1121,7 @@ out:
1118 mutex_unlock(&dev->mode_config.mutex); 1121 mutex_unlock(&dev->mode_config.mutex);
1119 1122
1120 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode); 1123 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
1124 intel_panel_setup_backlight(connector, INVALID_PIPE);
1121 1125
1122 lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder); 1126 lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder);
1123 DRM_DEBUG_KMS("detected %s-link lvds configuration\n", 1127 DRM_DEBUG_KMS("detected %s-link lvds configuration\n",
@@ -1130,9 +1134,6 @@ out:
1130 DRM_DEBUG_KMS("lid notifier registration failed\n"); 1134 DRM_DEBUG_KMS("lid notifier registration failed\n");
1131 lvds_connector->lid_notifier.notifier_call = NULL; 1135 lvds_connector->lid_notifier.notifier_call = NULL;
1132 } 1136 }
1133 drm_connector_register(connector);
1134
1135 intel_panel_setup_backlight(connector, INVALID_PIPE);
1136 1137
1137 return; 1138 return;
1138 1139
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index 38a4c8ce7e63..f2584d0a01ab 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -82,7 +82,7 @@ void
82intel_attach_force_audio_property(struct drm_connector *connector) 82intel_attach_force_audio_property(struct drm_connector *connector)
83{ 83{
84 struct drm_device *dev = connector->dev; 84 struct drm_device *dev = connector->dev;
85 struct drm_i915_private *dev_priv = dev->dev_private; 85 struct drm_i915_private *dev_priv = to_i915(dev);
86 struct drm_property *prop; 86 struct drm_property *prop;
87 87
88 prop = dev_priv->force_audio_property; 88 prop = dev_priv->force_audio_property;
@@ -109,7 +109,7 @@ void
109intel_attach_broadcast_rgb_property(struct drm_connector *connector) 109intel_attach_broadcast_rgb_property(struct drm_connector *connector)
110{ 110{
111 struct drm_device *dev = connector->dev; 111 struct drm_device *dev = connector->dev;
112 struct drm_i915_private *dev_priv = dev->dev_private; 112 struct drm_i915_private *dev_priv = to_i915(dev);
113 struct drm_property *prop; 113 struct drm_property *prop;
114 114
115 prop = dev_priv->broadcast_rgb_property; 115 prop = dev_priv->broadcast_rgb_property;
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index f6d8a21d2c49..c27d5eb063d0 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -232,11 +232,28 @@ struct opregion_asle_ext {
232#define SWSCI_SBCB_POST_VBE_PM SWSCI_FUNCTION_CODE(SWSCI_SBCB, 19) 232#define SWSCI_SBCB_POST_VBE_PM SWSCI_FUNCTION_CODE(SWSCI_SBCB, 19)
233#define SWSCI_SBCB_ENABLE_DISABLE_AUDIO SWSCI_FUNCTION_CODE(SWSCI_SBCB, 21) 233#define SWSCI_SBCB_ENABLE_DISABLE_AUDIO SWSCI_FUNCTION_CODE(SWSCI_SBCB, 21)
234 234
235#define ACPI_OTHER_OUTPUT (0<<8) 235/*
236#define ACPI_VGA_OUTPUT (1<<8) 236 * ACPI Specification, Revision 5.0, Appendix B.3.2 _DOD (Enumerate All Devices
237#define ACPI_TV_OUTPUT (2<<8) 237 * Attached to the Display Adapter).
238#define ACPI_DIGITAL_OUTPUT (3<<8) 238 */
239#define ACPI_LVDS_OUTPUT (4<<8) 239#define ACPI_DISPLAY_INDEX_SHIFT 0
240#define ACPI_DISPLAY_INDEX_MASK (0xf << 0)
241#define ACPI_DISPLAY_PORT_ATTACHMENT_SHIFT 4
242#define ACPI_DISPLAY_PORT_ATTACHMENT_MASK (0xf << 4)
243#define ACPI_DISPLAY_TYPE_SHIFT 8
244#define ACPI_DISPLAY_TYPE_MASK (0xf << 8)
245#define ACPI_DISPLAY_TYPE_OTHER (0 << 8)
246#define ACPI_DISPLAY_TYPE_VGA (1 << 8)
247#define ACPI_DISPLAY_TYPE_TV (2 << 8)
248#define ACPI_DISPLAY_TYPE_EXTERNAL_DIGITAL (3 << 8)
249#define ACPI_DISPLAY_TYPE_INTERNAL_DIGITAL (4 << 8)
250#define ACPI_VENDOR_SPECIFIC_SHIFT 12
251#define ACPI_VENDOR_SPECIFIC_MASK (0xf << 12)
252#define ACPI_BIOS_CAN_DETECT (1 << 16)
253#define ACPI_DEPENDS_ON_VGA (1 << 17)
254#define ACPI_PIPE_ID_SHIFT 18
255#define ACPI_PIPE_ID_MASK (7 << 18)
256#define ACPI_DEVICE_ID_SCHEME (1 << 31)
240 257
241#define MAX_DSLP 1500 258#define MAX_DSLP 1500
242 259
@@ -244,7 +261,7 @@ static int swsci(struct drm_i915_private *dev_priv,
244 u32 function, u32 parm, u32 *parm_out) 261 u32 function, u32 parm, u32 *parm_out)
245{ 262{
246 struct opregion_swsci *swsci = dev_priv->opregion.swsci; 263 struct opregion_swsci *swsci = dev_priv->opregion.swsci;
247 struct pci_dev *pdev = dev_priv->dev->pdev; 264 struct pci_dev *pdev = dev_priv->drm.pdev;
248 u32 main_function, sub_function, scic; 265 u32 main_function, sub_function, scic;
249 u16 swsci_val; 266 u16 swsci_val;
250 u32 dslp; 267 u32 dslp;
@@ -366,7 +383,7 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
366 type = DISPLAY_TYPE_CRT; 383 type = DISPLAY_TYPE_CRT;
367 break; 384 break;
368 case INTEL_OUTPUT_UNKNOWN: 385 case INTEL_OUTPUT_UNKNOWN:
369 case INTEL_OUTPUT_DISPLAYPORT: 386 case INTEL_OUTPUT_DP:
370 case INTEL_OUTPUT_HDMI: 387 case INTEL_OUTPUT_HDMI:
371 case INTEL_OUTPUT_DP_MST: 388 case INTEL_OUTPUT_DP_MST:
372 type = DISPLAY_TYPE_EXTERNAL_FLAT_PANEL; 389 type = DISPLAY_TYPE_EXTERNAL_FLAT_PANEL;
@@ -418,7 +435,7 @@ static u32 asle_set_backlight(struct drm_i915_private *dev_priv, u32 bclp)
418{ 435{
419 struct intel_connector *connector; 436 struct intel_connector *connector;
420 struct opregion_asle *asle = dev_priv->opregion.asle; 437 struct opregion_asle *asle = dev_priv->opregion.asle;
421 struct drm_device *dev = dev_priv->dev; 438 struct drm_device *dev = &dev_priv->drm;
422 439
423 DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp); 440 DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
424 441
@@ -657,10 +674,51 @@ static void set_did(struct intel_opregion *opregion, int i, u32 val)
657 } 674 }
658} 675}
659 676
677static u32 acpi_display_type(struct drm_connector *connector)
678{
679 u32 display_type;
680
681 switch (connector->connector_type) {
682 case DRM_MODE_CONNECTOR_VGA:
683 case DRM_MODE_CONNECTOR_DVIA:
684 display_type = ACPI_DISPLAY_TYPE_VGA;
685 break;
686 case DRM_MODE_CONNECTOR_Composite:
687 case DRM_MODE_CONNECTOR_SVIDEO:
688 case DRM_MODE_CONNECTOR_Component:
689 case DRM_MODE_CONNECTOR_9PinDIN:
690 case DRM_MODE_CONNECTOR_TV:
691 display_type = ACPI_DISPLAY_TYPE_TV;
692 break;
693 case DRM_MODE_CONNECTOR_DVII:
694 case DRM_MODE_CONNECTOR_DVID:
695 case DRM_MODE_CONNECTOR_DisplayPort:
696 case DRM_MODE_CONNECTOR_HDMIA:
697 case DRM_MODE_CONNECTOR_HDMIB:
698 display_type = ACPI_DISPLAY_TYPE_EXTERNAL_DIGITAL;
699 break;
700 case DRM_MODE_CONNECTOR_LVDS:
701 case DRM_MODE_CONNECTOR_eDP:
702 case DRM_MODE_CONNECTOR_DSI:
703 display_type = ACPI_DISPLAY_TYPE_INTERNAL_DIGITAL;
704 break;
705 case DRM_MODE_CONNECTOR_Unknown:
706 case DRM_MODE_CONNECTOR_VIRTUAL:
707 display_type = ACPI_DISPLAY_TYPE_OTHER;
708 break;
709 default:
710 MISSING_CASE(connector->connector_type);
711 display_type = ACPI_DISPLAY_TYPE_OTHER;
712 break;
713 }
714
715 return display_type;
716}
717
660static void intel_didl_outputs(struct drm_i915_private *dev_priv) 718static void intel_didl_outputs(struct drm_i915_private *dev_priv)
661{ 719{
662 struct intel_opregion *opregion = &dev_priv->opregion; 720 struct intel_opregion *opregion = &dev_priv->opregion;
663 struct pci_dev *pdev = dev_priv->dev->pdev; 721 struct pci_dev *pdev = dev_priv->drm.pdev;
664 struct drm_connector *connector; 722 struct drm_connector *connector;
665 acpi_handle handle; 723 acpi_handle handle;
666 struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL; 724 struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL;
@@ -724,37 +782,18 @@ end:
724 782
725blind_set: 783blind_set:
726 i = 0; 784 i = 0;
727 list_for_each_entry(connector, &dev_priv->dev->mode_config.connector_list, head) { 785 list_for_each_entry(connector,
728 int output_type = ACPI_OTHER_OUTPUT; 786 &dev_priv->drm.mode_config.connector_list, head) {
787 int display_type = acpi_display_type(connector);
788
729 if (i >= max_outputs) { 789 if (i >= max_outputs) {
730 DRM_DEBUG_KMS("More than %u outputs in connector list\n", 790 DRM_DEBUG_KMS("More than %u outputs in connector list\n",
731 max_outputs); 791 max_outputs);
732 return; 792 return;
733 } 793 }
734 switch (connector->connector_type) { 794
735 case DRM_MODE_CONNECTOR_VGA:
736 case DRM_MODE_CONNECTOR_DVIA:
737 output_type = ACPI_VGA_OUTPUT;
738 break;
739 case DRM_MODE_CONNECTOR_Composite:
740 case DRM_MODE_CONNECTOR_SVIDEO:
741 case DRM_MODE_CONNECTOR_Component:
742 case DRM_MODE_CONNECTOR_9PinDIN:
743 output_type = ACPI_TV_OUTPUT;
744 break;
745 case DRM_MODE_CONNECTOR_DVII:
746 case DRM_MODE_CONNECTOR_DVID:
747 case DRM_MODE_CONNECTOR_DisplayPort:
748 case DRM_MODE_CONNECTOR_HDMIA:
749 case DRM_MODE_CONNECTOR_HDMIB:
750 output_type = ACPI_DIGITAL_OUTPUT;
751 break;
752 case DRM_MODE_CONNECTOR_LVDS:
753 output_type = ACPI_LVDS_OUTPUT;
754 break;
755 }
756 temp = get_did(opregion, i); 795 temp = get_did(opregion, i);
757 set_did(opregion, i, temp | (1 << 31) | output_type | i); 796 set_did(opregion, i, temp | (1 << 31) | display_type | i);
758 i++; 797 i++;
759 } 798 }
760 goto end; 799 goto end;
@@ -916,7 +955,7 @@ static const struct dmi_system_id intel_no_opregion_vbt[] = {
916int intel_opregion_setup(struct drm_i915_private *dev_priv) 955int intel_opregion_setup(struct drm_i915_private *dev_priv)
917{ 956{
918 struct intel_opregion *opregion = &dev_priv->opregion; 957 struct intel_opregion *opregion = &dev_priv->opregion;
919 struct pci_dev *pdev = dev_priv->dev->pdev; 958 struct pci_dev *pdev = dev_priv->drm.pdev;
920 u32 asls, mboxes; 959 u32 asls, mboxes;
921 char buf[sizeof(OPREGION_SIGNATURE)]; 960 char buf[sizeof(OPREGION_SIGNATURE)];
922 int err = 0; 961 int err = 0;
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index eb93f90bb74d..3212d8806b5a 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -409,7 +409,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
409 struct intel_engine_cs *engine = &dev_priv->engine[RCS]; 409 struct intel_engine_cs *engine = &dev_priv->engine[RCS];
410 int ret; 410 int ret;
411 411
412 lockdep_assert_held(&dev_priv->dev->struct_mutex); 412 lockdep_assert_held(&dev_priv->drm.struct_mutex);
413 413
414 /* Only wait if there is actually an old frame to release to 414 /* Only wait if there is actually an old frame to release to
415 * guarantee forward progress. 415 * guarantee forward progress.
@@ -741,8 +741,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
741 u32 swidth, swidthsw, sheight, ostride; 741 u32 swidth, swidthsw, sheight, ostride;
742 enum pipe pipe = overlay->crtc->pipe; 742 enum pipe pipe = overlay->crtc->pipe;
743 743
744 lockdep_assert_held(&dev_priv->dev->struct_mutex); 744 lockdep_assert_held(&dev_priv->drm.struct_mutex);
745 WARN_ON(!drm_modeset_is_locked(&dev_priv->dev->mode_config.connection_mutex)); 745 WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
746 746
747 ret = intel_overlay_release_old_vid(overlay); 747 ret = intel_overlay_release_old_vid(overlay);
748 if (ret != 0) 748 if (ret != 0)
@@ -836,7 +836,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
836 overlay->old_vid_bo = overlay->vid_bo; 836 overlay->old_vid_bo = overlay->vid_bo;
837 overlay->vid_bo = new_bo; 837 overlay->vid_bo = new_bo;
838 838
839 intel_frontbuffer_flip(dev_priv->dev, INTEL_FRONTBUFFER_OVERLAY(pipe)); 839 intel_frontbuffer_flip(&dev_priv->drm,
840 INTEL_FRONTBUFFER_OVERLAY(pipe));
840 841
841 return 0; 842 return 0;
842 843
@@ -851,8 +852,8 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
851 struct overlay_registers __iomem *regs; 852 struct overlay_registers __iomem *regs;
852 int ret; 853 int ret;
853 854
854 lockdep_assert_held(&dev_priv->dev->struct_mutex); 855 lockdep_assert_held(&dev_priv->drm.struct_mutex);
855 WARN_ON(!drm_modeset_is_locked(&dev_priv->dev->mode_config.connection_mutex)); 856 WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
856 857
857 ret = intel_overlay_recover_from_interrupt(overlay); 858 ret = intel_overlay_recover_from_interrupt(overlay);
858 if (ret != 0) 859 if (ret != 0)
@@ -1084,7 +1085,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
1084 struct drm_file *file_priv) 1085 struct drm_file *file_priv)
1085{ 1086{
1086 struct drm_intel_overlay_put_image *put_image_rec = data; 1087 struct drm_intel_overlay_put_image *put_image_rec = data;
1087 struct drm_i915_private *dev_priv = dev->dev_private; 1088 struct drm_i915_private *dev_priv = to_i915(dev);
1088 struct intel_overlay *overlay; 1089 struct intel_overlay *overlay;
1089 struct drm_crtc *drmmode_crtc; 1090 struct drm_crtc *drmmode_crtc;
1090 struct intel_crtc *crtc; 1091 struct intel_crtc *crtc;
@@ -1282,7 +1283,7 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
1282 struct drm_file *file_priv) 1283 struct drm_file *file_priv)
1283{ 1284{
1284 struct drm_intel_overlay_attrs *attrs = data; 1285 struct drm_intel_overlay_attrs *attrs = data;
1285 struct drm_i915_private *dev_priv = dev->dev_private; 1286 struct drm_i915_private *dev_priv = to_i915(dev);
1286 struct intel_overlay *overlay; 1287 struct intel_overlay *overlay;
1287 struct overlay_registers __iomem *regs; 1288 struct overlay_registers __iomem *regs;
1288 int ret; 1289 int ret;
@@ -1379,7 +1380,7 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
1379 if (!overlay) 1380 if (!overlay)
1380 return; 1381 return;
1381 1382
1382 mutex_lock(&dev_priv->dev->struct_mutex); 1383 mutex_lock(&dev_priv->drm.struct_mutex);
1383 if (WARN_ON(dev_priv->overlay)) 1384 if (WARN_ON(dev_priv->overlay))
1384 goto out_free; 1385 goto out_free;
1385 1386
@@ -1387,9 +1388,10 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
1387 1388
1388 reg_bo = NULL; 1389 reg_bo = NULL;
1389 if (!OVERLAY_NEEDS_PHYSICAL(dev_priv)) 1390 if (!OVERLAY_NEEDS_PHYSICAL(dev_priv))
1390 reg_bo = i915_gem_object_create_stolen(dev_priv->dev, PAGE_SIZE); 1391 reg_bo = i915_gem_object_create_stolen(&dev_priv->drm,
1392 PAGE_SIZE);
1391 if (reg_bo == NULL) 1393 if (reg_bo == NULL)
1392 reg_bo = i915_gem_object_create(dev_priv->dev, PAGE_SIZE); 1394 reg_bo = i915_gem_object_create(&dev_priv->drm, PAGE_SIZE);
1393 if (IS_ERR(reg_bo)) 1395 if (IS_ERR(reg_bo))
1394 goto out_free; 1396 goto out_free;
1395 overlay->reg_bo = reg_bo; 1397 overlay->reg_bo = reg_bo;
@@ -1434,7 +1436,7 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
1434 intel_overlay_unmap_regs(overlay, regs); 1436 intel_overlay_unmap_regs(overlay, regs);
1435 1437
1436 dev_priv->overlay = overlay; 1438 dev_priv->overlay = overlay;
1437 mutex_unlock(&dev_priv->dev->struct_mutex); 1439 mutex_unlock(&dev_priv->drm.struct_mutex);
1438 DRM_INFO("initialized overlay support\n"); 1440 DRM_INFO("initialized overlay support\n");
1439 return; 1441 return;
1440 1442
@@ -1444,7 +1446,7 @@ out_unpin_bo:
1444out_free_bo: 1446out_free_bo:
1445 drm_gem_object_unreference(&reg_bo->base); 1447 drm_gem_object_unreference(&reg_bo->base);
1446out_free: 1448out_free:
1447 mutex_unlock(&dev_priv->dev->struct_mutex); 1449 mutex_unlock(&dev_priv->drm.struct_mutex);
1448 kfree(overlay); 1450 kfree(overlay);
1449 return; 1451 return;
1450} 1452}
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index bf721781c259..96c65d77e886 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -377,7 +377,7 @@ out:
377enum drm_connector_status 377enum drm_connector_status
378intel_panel_detect(struct drm_device *dev) 378intel_panel_detect(struct drm_device *dev)
379{ 379{
380 struct drm_i915_private *dev_priv = dev->dev_private; 380 struct drm_i915_private *dev_priv = to_i915(dev);
381 381
382 /* Assume that the BIOS does not lie through the OpRegion... */ 382 /* Assume that the BIOS does not lie through the OpRegion... */
383 if (!i915.panel_ignore_lid && dev_priv->opregion.lid_state) { 383 if (!i915.panel_ignore_lid && dev_priv->opregion.lid_state) {
@@ -504,7 +504,7 @@ static u32 i9xx_get_backlight(struct intel_connector *connector)
504 if (panel->backlight.combination_mode) { 504 if (panel->backlight.combination_mode) {
505 u8 lbpc; 505 u8 lbpc;
506 506
507 pci_read_config_byte(dev_priv->dev->pdev, LBPC, &lbpc); 507 pci_read_config_byte(dev_priv->drm.pdev, LBPC, &lbpc);
508 val *= lbpc; 508 val *= lbpc;
509 } 509 }
510 510
@@ -592,7 +592,7 @@ static void i9xx_set_backlight(struct intel_connector *connector, u32 level)
592 592
593 lbpc = level * 0xfe / panel->backlight.max + 1; 593 lbpc = level * 0xfe / panel->backlight.max + 1;
594 level /= lbpc; 594 level /= lbpc;
595 pci_write_config_byte(dev_priv->dev->pdev, LBPC, lbpc); 595 pci_write_config_byte(dev_priv->drm.pdev, LBPC, lbpc);
596 } 596 }
597 597
598 if (IS_GEN4(dev_priv)) { 598 if (IS_GEN4(dev_priv)) {
@@ -822,7 +822,7 @@ void intel_panel_disable_backlight(struct intel_connector *connector)
822 * backlight. This will leave the backlight on unnecessarily when 822 * backlight. This will leave the backlight on unnecessarily when
823 * another client is not activated. 823 * another client is not activated.
824 */ 824 */
825 if (dev_priv->dev->switch_power_state == DRM_SWITCH_POWER_CHANGING) { 825 if (dev_priv->drm.switch_power_state == DRM_SWITCH_POWER_CHANGING) {
826 DRM_DEBUG_DRIVER("Skipping backlight disable on vga switch\n"); 826 DRM_DEBUG_DRIVER("Skipping backlight disable on vga switch\n");
827 return; 827 return;
828 } 828 }
@@ -1142,7 +1142,7 @@ static int intel_backlight_device_get_brightness(struct backlight_device *bd)
1142{ 1142{
1143 struct intel_connector *connector = bl_get_data(bd); 1143 struct intel_connector *connector = bl_get_data(bd);
1144 struct drm_device *dev = connector->base.dev; 1144 struct drm_device *dev = connector->base.dev;
1145 struct drm_i915_private *dev_priv = dev->dev_private; 1145 struct drm_i915_private *dev_priv = to_i915(dev);
1146 u32 hw_level; 1146 u32 hw_level;
1147 int ret; 1147 int ret;
1148 1148
@@ -1163,7 +1163,7 @@ static const struct backlight_ops intel_backlight_device_ops = {
1163 .get_brightness = intel_backlight_device_get_brightness, 1163 .get_brightness = intel_backlight_device_get_brightness,
1164}; 1164};
1165 1165
1166static int intel_backlight_device_register(struct intel_connector *connector) 1166int intel_backlight_device_register(struct intel_connector *connector)
1167{ 1167{
1168 struct intel_panel *panel = &connector->panel; 1168 struct intel_panel *panel = &connector->panel;
1169 struct backlight_properties props; 1169 struct backlight_properties props;
@@ -1225,11 +1225,6 @@ void intel_backlight_device_unregister(struct intel_connector *connector)
1225 panel->backlight.device = NULL; 1225 panel->backlight.device = NULL;
1226 } 1226 }
1227} 1227}
1228#else /* CONFIG_BACKLIGHT_CLASS_DEVICE */
1229static int intel_backlight_device_register(struct intel_connector *connector)
1230{
1231 return 0;
1232}
1233#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */ 1228#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */
1234 1229
1235/* 1230/*
@@ -1321,7 +1316,7 @@ static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
1321static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) 1316static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
1322{ 1317{
1323 struct drm_device *dev = connector->base.dev; 1318 struct drm_device *dev = connector->base.dev;
1324 struct drm_i915_private *dev_priv = dev->dev_private; 1319 struct drm_i915_private *dev_priv = to_i915(dev);
1325 int clock; 1320 int clock;
1326 1321
1327 if (IS_G4X(dev_priv)) 1322 if (IS_G4X(dev_priv))
@@ -1736,7 +1731,8 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
1736 panel->backlight.set = bxt_set_backlight; 1731 panel->backlight.set = bxt_set_backlight;
1737 panel->backlight.get = bxt_get_backlight; 1732 panel->backlight.get = bxt_get_backlight;
1738 panel->backlight.hz_to_pwm = bxt_hz_to_pwm; 1733 panel->backlight.hz_to_pwm = bxt_hz_to_pwm;
1739 } else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_SPT(dev_priv)) { 1734 } else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_SPT(dev_priv) ||
1735 HAS_PCH_KBP(dev_priv)) {
1740 panel->backlight.setup = lpt_setup_backlight; 1736 panel->backlight.setup = lpt_setup_backlight;
1741 panel->backlight.enable = lpt_enable_backlight; 1737 panel->backlight.enable = lpt_enable_backlight;
1742 panel->backlight.disable = lpt_disable_backlight; 1738 panel->backlight.disable = lpt_disable_backlight;
@@ -1809,11 +1805,3 @@ void intel_panel_fini(struct intel_panel *panel)
1809 drm_mode_destroy(intel_connector->base.dev, 1805 drm_mode_destroy(intel_connector->base.dev,
1810 panel->downclock_mode); 1806 panel->downclock_mode);
1811} 1807}
1812
1813void intel_backlight_register(struct drm_device *dev)
1814{
1815 struct intel_connector *connector;
1816
1817 for_each_intel_connector(dev, connector)
1818 intel_backlight_device_register(connector);
1819}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 658a75659657..5a8ee0c76593 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -57,7 +57,7 @@
57 57
58static void gen9_init_clock_gating(struct drm_device *dev) 58static void gen9_init_clock_gating(struct drm_device *dev)
59{ 59{
60 struct drm_i915_private *dev_priv = dev->dev_private; 60 struct drm_i915_private *dev_priv = to_i915(dev);
61 61
62 /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl */ 62 /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl */
63 I915_WRITE(CHICKEN_PAR1_1, 63 I915_WRITE(CHICKEN_PAR1_1,
@@ -83,7 +83,7 @@ static void gen9_init_clock_gating(struct drm_device *dev)
83 83
84static void bxt_init_clock_gating(struct drm_device *dev) 84static void bxt_init_clock_gating(struct drm_device *dev)
85{ 85{
86 struct drm_i915_private *dev_priv = dev->dev_private; 86 struct drm_i915_private *dev_priv = to_i915(dev);
87 87
88 gen9_init_clock_gating(dev); 88 gen9_init_clock_gating(dev);
89 89
@@ -109,7 +109,7 @@ static void bxt_init_clock_gating(struct drm_device *dev)
109 109
110static void i915_pineview_get_mem_freq(struct drm_device *dev) 110static void i915_pineview_get_mem_freq(struct drm_device *dev)
111{ 111{
112 struct drm_i915_private *dev_priv = dev->dev_private; 112 struct drm_i915_private *dev_priv = to_i915(dev);
113 u32 tmp; 113 u32 tmp;
114 114
115 tmp = I915_READ(CLKCFG); 115 tmp = I915_READ(CLKCFG);
@@ -148,7 +148,7 @@ static void i915_pineview_get_mem_freq(struct drm_device *dev)
148 148
149static void i915_ironlake_get_mem_freq(struct drm_device *dev) 149static void i915_ironlake_get_mem_freq(struct drm_device *dev)
150{ 150{
151 struct drm_i915_private *dev_priv = dev->dev_private; 151 struct drm_i915_private *dev_priv = to_i915(dev);
152 u16 ddrpll, csipll; 152 u16 ddrpll, csipll;
153 153
154 ddrpll = I915_READ16(DDRMPLL1); 154 ddrpll = I915_READ16(DDRMPLL1);
@@ -319,7 +319,7 @@ static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
319 319
320void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) 320void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
321{ 321{
322 struct drm_device *dev = dev_priv->dev; 322 struct drm_device *dev = &dev_priv->drm;
323 u32 val; 323 u32 val;
324 324
325 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 325 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
@@ -375,7 +375,7 @@ static const int pessimal_latency_ns = 5000;
375static int vlv_get_fifo_size(struct drm_device *dev, 375static int vlv_get_fifo_size(struct drm_device *dev,
376 enum pipe pipe, int plane) 376 enum pipe pipe, int plane)
377{ 377{
378 struct drm_i915_private *dev_priv = dev->dev_private; 378 struct drm_i915_private *dev_priv = to_i915(dev);
379 int sprite0_start, sprite1_start, size; 379 int sprite0_start, sprite1_start, size;
380 380
381 switch (pipe) { 381 switch (pipe) {
@@ -426,7 +426,7 @@ static int vlv_get_fifo_size(struct drm_device *dev,
426 426
427static int i9xx_get_fifo_size(struct drm_device *dev, int plane) 427static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
428{ 428{
429 struct drm_i915_private *dev_priv = dev->dev_private; 429 struct drm_i915_private *dev_priv = to_i915(dev);
430 uint32_t dsparb = I915_READ(DSPARB); 430 uint32_t dsparb = I915_READ(DSPARB);
431 int size; 431 int size;
432 432
@@ -442,7 +442,7 @@ static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
442 442
443static int i830_get_fifo_size(struct drm_device *dev, int plane) 443static int i830_get_fifo_size(struct drm_device *dev, int plane)
444{ 444{
445 struct drm_i915_private *dev_priv = dev->dev_private; 445 struct drm_i915_private *dev_priv = to_i915(dev);
446 uint32_t dsparb = I915_READ(DSPARB); 446 uint32_t dsparb = I915_READ(DSPARB);
447 int size; 447 int size;
448 448
@@ -459,7 +459,7 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane)
459 459
460static int i845_get_fifo_size(struct drm_device *dev, int plane) 460static int i845_get_fifo_size(struct drm_device *dev, int plane)
461{ 461{
462 struct drm_i915_private *dev_priv = dev->dev_private; 462 struct drm_i915_private *dev_priv = to_i915(dev);
463 uint32_t dsparb = I915_READ(DSPARB); 463 uint32_t dsparb = I915_READ(DSPARB);
464 int size; 464 int size;
465 465
@@ -637,7 +637,7 @@ static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
637static void pineview_update_wm(struct drm_crtc *unused_crtc) 637static void pineview_update_wm(struct drm_crtc *unused_crtc)
638{ 638{
639 struct drm_device *dev = unused_crtc->dev; 639 struct drm_device *dev = unused_crtc->dev;
640 struct drm_i915_private *dev_priv = dev->dev_private; 640 struct drm_i915_private *dev_priv = to_i915(dev);
641 struct drm_crtc *crtc; 641 struct drm_crtc *crtc;
642 const struct cxsr_latency *latency; 642 const struct cxsr_latency *latency;
643 u32 reg; 643 u32 reg;
@@ -934,7 +934,7 @@ static unsigned int vlv_wm_method2(unsigned int pixel_rate,
934 934
935static void vlv_setup_wm_latency(struct drm_device *dev) 935static void vlv_setup_wm_latency(struct drm_device *dev)
936{ 936{
937 struct drm_i915_private *dev_priv = dev->dev_private; 937 struct drm_i915_private *dev_priv = to_i915(dev);
938 938
939 /* all latencies in usec */ 939 /* all latencies in usec */
940 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3; 940 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
@@ -1325,7 +1325,7 @@ static void vlv_merge_wm(struct drm_device *dev,
1325static void vlv_update_wm(struct drm_crtc *crtc) 1325static void vlv_update_wm(struct drm_crtc *crtc)
1326{ 1326{
1327 struct drm_device *dev = crtc->dev; 1327 struct drm_device *dev = crtc->dev;
1328 struct drm_i915_private *dev_priv = dev->dev_private; 1328 struct drm_i915_private *dev_priv = to_i915(dev);
1329 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1329 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1330 enum pipe pipe = intel_crtc->pipe; 1330 enum pipe pipe = intel_crtc->pipe;
1331 struct vlv_wm_values wm = {}; 1331 struct vlv_wm_values wm = {};
@@ -1381,7 +1381,7 @@ static void g4x_update_wm(struct drm_crtc *crtc)
1381{ 1381{
1382 struct drm_device *dev = crtc->dev; 1382 struct drm_device *dev = crtc->dev;
1383 static const int sr_latency_ns = 12000; 1383 static const int sr_latency_ns = 12000;
1384 struct drm_i915_private *dev_priv = dev->dev_private; 1384 struct drm_i915_private *dev_priv = to_i915(dev);
1385 int planea_wm, planeb_wm, cursora_wm, cursorb_wm; 1385 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1386 int plane_sr, cursor_sr; 1386 int plane_sr, cursor_sr;
1387 unsigned int enabled = 0; 1387 unsigned int enabled = 0;
@@ -1438,7 +1438,7 @@ static void g4x_update_wm(struct drm_crtc *crtc)
1438static void i965_update_wm(struct drm_crtc *unused_crtc) 1438static void i965_update_wm(struct drm_crtc *unused_crtc)
1439{ 1439{
1440 struct drm_device *dev = unused_crtc->dev; 1440 struct drm_device *dev = unused_crtc->dev;
1441 struct drm_i915_private *dev_priv = dev->dev_private; 1441 struct drm_i915_private *dev_priv = to_i915(dev);
1442 struct drm_crtc *crtc; 1442 struct drm_crtc *crtc;
1443 int srwm = 1; 1443 int srwm = 1;
1444 int cursor_sr = 16; 1444 int cursor_sr = 16;
@@ -1512,7 +1512,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
1512static void i9xx_update_wm(struct drm_crtc *unused_crtc) 1512static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1513{ 1513{
1514 struct drm_device *dev = unused_crtc->dev; 1514 struct drm_device *dev = unused_crtc->dev;
1515 struct drm_i915_private *dev_priv = dev->dev_private; 1515 struct drm_i915_private *dev_priv = to_i915(dev);
1516 const struct intel_watermark_params *wm_info; 1516 const struct intel_watermark_params *wm_info;
1517 uint32_t fwater_lo; 1517 uint32_t fwater_lo;
1518 uint32_t fwater_hi; 1518 uint32_t fwater_hi;
@@ -1642,7 +1642,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1642static void i845_update_wm(struct drm_crtc *unused_crtc) 1642static void i845_update_wm(struct drm_crtc *unused_crtc)
1643{ 1643{
1644 struct drm_device *dev = unused_crtc->dev; 1644 struct drm_device *dev = unused_crtc->dev;
1645 struct drm_i915_private *dev_priv = dev->dev_private; 1645 struct drm_i915_private *dev_priv = to_i915(dev);
1646 struct drm_crtc *crtc; 1646 struct drm_crtc *crtc;
1647 const struct drm_display_mode *adjusted_mode; 1647 const struct drm_display_mode *adjusted_mode;
1648 uint32_t fwater_lo; 1648 uint32_t fwater_lo;
@@ -2070,7 +2070,7 @@ hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
2070 2070
2071static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8]) 2071static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
2072{ 2072{
2073 struct drm_i915_private *dev_priv = dev->dev_private; 2073 struct drm_i915_private *dev_priv = to_i915(dev);
2074 2074
2075 if (IS_GEN9(dev)) { 2075 if (IS_GEN9(dev)) {
2076 uint32_t val; 2076 uint32_t val;
@@ -2236,7 +2236,7 @@ static void intel_print_wm_latency(struct drm_device *dev,
2236static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv, 2236static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
2237 uint16_t wm[5], uint16_t min) 2237 uint16_t wm[5], uint16_t min)
2238{ 2238{
2239 int level, max_level = ilk_wm_max_level(dev_priv->dev); 2239 int level, max_level = ilk_wm_max_level(&dev_priv->drm);
2240 2240
2241 if (wm[0] >= min) 2241 if (wm[0] >= min)
2242 return false; 2242 return false;
@@ -2250,7 +2250,7 @@ static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
2250 2250
2251static void snb_wm_latency_quirk(struct drm_device *dev) 2251static void snb_wm_latency_quirk(struct drm_device *dev)
2252{ 2252{
2253 struct drm_i915_private *dev_priv = dev->dev_private; 2253 struct drm_i915_private *dev_priv = to_i915(dev);
2254 bool changed; 2254 bool changed;
2255 2255
2256 /* 2256 /*
@@ -2272,7 +2272,7 @@ static void snb_wm_latency_quirk(struct drm_device *dev)
2272 2272
2273static void ilk_setup_wm_latency(struct drm_device *dev) 2273static void ilk_setup_wm_latency(struct drm_device *dev)
2274{ 2274{
2275 struct drm_i915_private *dev_priv = dev->dev_private; 2275 struct drm_i915_private *dev_priv = to_i915(dev);
2276 2276
2277 intel_read_wm_latency(dev, dev_priv->wm.pri_latency); 2277 intel_read_wm_latency(dev, dev_priv->wm.pri_latency);
2278 2278
@@ -2294,7 +2294,7 @@ static void ilk_setup_wm_latency(struct drm_device *dev)
2294 2294
2295static void skl_setup_wm_latency(struct drm_device *dev) 2295static void skl_setup_wm_latency(struct drm_device *dev)
2296{ 2296{
2297 struct drm_i915_private *dev_priv = dev->dev_private; 2297 struct drm_i915_private *dev_priv = to_i915(dev);
2298 2298
2299 intel_read_wm_latency(dev, dev_priv->wm.skl_latency); 2299 intel_read_wm_latency(dev, dev_priv->wm.skl_latency);
2300 intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency); 2300 intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency);
@@ -2330,7 +2330,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
2330 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); 2330 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
2331 struct intel_pipe_wm *pipe_wm; 2331 struct intel_pipe_wm *pipe_wm;
2332 struct drm_device *dev = state->dev; 2332 struct drm_device *dev = state->dev;
2333 const struct drm_i915_private *dev_priv = dev->dev_private; 2333 const struct drm_i915_private *dev_priv = to_i915(dev);
2334 struct intel_plane *intel_plane; 2334 struct intel_plane *intel_plane;
2335 struct intel_plane_state *pristate = NULL; 2335 struct intel_plane_state *pristate = NULL;
2336 struct intel_plane_state *sprstate = NULL; 2336 struct intel_plane_state *sprstate = NULL;
@@ -2505,7 +2505,7 @@ static void ilk_wm_merge(struct drm_device *dev,
2505 const struct ilk_wm_maximums *max, 2505 const struct ilk_wm_maximums *max,
2506 struct intel_pipe_wm *merged) 2506 struct intel_pipe_wm *merged)
2507{ 2507{
2508 struct drm_i915_private *dev_priv = dev->dev_private; 2508 struct drm_i915_private *dev_priv = to_i915(dev);
2509 int level, max_level = ilk_wm_max_level(dev); 2509 int level, max_level = ilk_wm_max_level(dev);
2510 int last_enabled_level = max_level; 2510 int last_enabled_level = max_level;
2511 2511
@@ -2565,7 +2565,7 @@ static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
2565/* The value we need to program into the WM_LPx latency field */ 2565/* The value we need to program into the WM_LPx latency field */
2566static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level) 2566static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
2567{ 2567{
2568 struct drm_i915_private *dev_priv = dev->dev_private; 2568 struct drm_i915_private *dev_priv = to_i915(dev);
2569 2569
2570 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 2570 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2571 return 2 * level; 2571 return 2 * level;
@@ -2765,7 +2765,7 @@ static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
2765static void ilk_write_wm_values(struct drm_i915_private *dev_priv, 2765static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
2766 struct ilk_wm_values *results) 2766 struct ilk_wm_values *results)
2767{ 2767{
2768 struct drm_device *dev = dev_priv->dev; 2768 struct drm_device *dev = &dev_priv->drm;
2769 struct ilk_wm_values *previous = &dev_priv->wm.hw; 2769 struct ilk_wm_values *previous = &dev_priv->wm.hw;
2770 unsigned int dirty; 2770 unsigned int dirty;
2771 uint32_t val; 2771 uint32_t val;
@@ -2840,7 +2840,7 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
2840 2840
2841bool ilk_disable_lp_wm(struct drm_device *dev) 2841bool ilk_disable_lp_wm(struct drm_device *dev)
2842{ 2842{
2843 struct drm_i915_private *dev_priv = dev->dev_private; 2843 struct drm_i915_private *dev_priv = to_i915(dev);
2844 2844
2845 return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL); 2845 return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
2846} 2846}
@@ -3498,7 +3498,6 @@ skl_compute_wm_level(const struct drm_i915_private *dev_priv,
3498 int level, 3498 int level,
3499 struct skl_wm_level *result) 3499 struct skl_wm_level *result)
3500{ 3500{
3501 struct drm_device *dev = dev_priv->dev;
3502 struct drm_atomic_state *state = cstate->base.state; 3501 struct drm_atomic_state *state = cstate->base.state;
3503 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); 3502 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
3504 struct drm_plane *plane; 3503 struct drm_plane *plane;
@@ -3514,7 +3513,9 @@ skl_compute_wm_level(const struct drm_i915_private *dev_priv,
3514 */ 3513 */
3515 memset(result, 0, sizeof(*result)); 3514 memset(result, 0, sizeof(*result));
3516 3515
3517 for_each_intel_plane_mask(dev, intel_plane, cstate->base.plane_mask) { 3516 for_each_intel_plane_mask(&dev_priv->drm,
3517 intel_plane,
3518 cstate->base.plane_mask) {
3518 int i = skl_wm_plane_id(intel_plane); 3519 int i = skl_wm_plane_id(intel_plane);
3519 3520
3520 plane = &intel_plane->base; 3521 plane = &intel_plane->base;
@@ -3595,7 +3596,7 @@ static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
3595 struct skl_pipe_wm *pipe_wm) 3596 struct skl_pipe_wm *pipe_wm)
3596{ 3597{
3597 struct drm_device *dev = cstate->base.crtc->dev; 3598 struct drm_device *dev = cstate->base.crtc->dev;
3598 const struct drm_i915_private *dev_priv = dev->dev_private; 3599 const struct drm_i915_private *dev_priv = to_i915(dev);
3599 int level, max_level = ilk_wm_max_level(dev); 3600 int level, max_level = ilk_wm_max_level(dev);
3600 int ret; 3601 int ret;
3601 3602
@@ -3682,7 +3683,7 @@ static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
3682static void skl_write_wm_values(struct drm_i915_private *dev_priv, 3683static void skl_write_wm_values(struct drm_i915_private *dev_priv,
3683 const struct skl_wm_values *new) 3684 const struct skl_wm_values *new)
3684{ 3685{
3685 struct drm_device *dev = dev_priv->dev; 3686 struct drm_device *dev = &dev_priv->drm;
3686 struct intel_crtc *crtc; 3687 struct intel_crtc *crtc;
3687 3688
3688 for_each_intel_crtc(dev, crtc) { 3689 for_each_intel_crtc(dev, crtc) {
@@ -3779,7 +3780,7 @@ skl_ddb_allocation_included(const struct skl_ddb_allocation *old,
3779static void skl_flush_wm_values(struct drm_i915_private *dev_priv, 3780static void skl_flush_wm_values(struct drm_i915_private *dev_priv,
3780 struct skl_wm_values *new_values) 3781 struct skl_wm_values *new_values)
3781{ 3782{
3782 struct drm_device *dev = dev_priv->dev; 3783 struct drm_device *dev = &dev_priv->drm;
3783 struct skl_ddb_allocation *cur_ddb, *new_ddb; 3784 struct skl_ddb_allocation *cur_ddb, *new_ddb;
3784 bool reallocated[I915_MAX_PIPES] = {}; 3785 bool reallocated[I915_MAX_PIPES] = {};
3785 struct intel_crtc *crtc; 3786 struct intel_crtc *crtc;
@@ -3879,6 +3880,19 @@ static int skl_update_pipe_wm(struct drm_crtc_state *cstate,
3879 return 0; 3880 return 0;
3880} 3881}
3881 3882
3883static uint32_t
3884pipes_modified(struct drm_atomic_state *state)
3885{
3886 struct drm_crtc *crtc;
3887 struct drm_crtc_state *cstate;
3888 uint32_t i, ret = 0;
3889
3890 for_each_crtc_in_state(state, crtc, cstate, i)
3891 ret |= drm_crtc_mask(crtc);
3892
3893 return ret;
3894}
3895
3882static int 3896static int
3883skl_compute_ddb(struct drm_atomic_state *state) 3897skl_compute_ddb(struct drm_atomic_state *state)
3884{ 3898{
@@ -3887,7 +3901,7 @@ skl_compute_ddb(struct drm_atomic_state *state)
3887 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 3901 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
3888 struct intel_crtc *intel_crtc; 3902 struct intel_crtc *intel_crtc;
3889 struct skl_ddb_allocation *ddb = &intel_state->wm_results.ddb; 3903 struct skl_ddb_allocation *ddb = &intel_state->wm_results.ddb;
3890 unsigned realloc_pipes = dev_priv->active_crtcs; 3904 uint32_t realloc_pipes = pipes_modified(state);
3891 int ret; 3905 int ret;
3892 3906
3893 /* 3907 /*
@@ -4002,7 +4016,7 @@ static void skl_update_wm(struct drm_crtc *crtc)
4002{ 4016{
4003 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4017 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4004 struct drm_device *dev = crtc->dev; 4018 struct drm_device *dev = crtc->dev;
4005 struct drm_i915_private *dev_priv = dev->dev_private; 4019 struct drm_i915_private *dev_priv = to_i915(dev);
4006 struct skl_wm_values *results = &dev_priv->wm.skl_results; 4020 struct skl_wm_values *results = &dev_priv->wm.skl_results;
4007 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); 4021 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
4008 struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal; 4022 struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
@@ -4043,7 +4057,7 @@ static void ilk_compute_wm_config(struct drm_device *dev,
4043 4057
4044static void ilk_program_watermarks(struct drm_i915_private *dev_priv) 4058static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
4045{ 4059{
4046 struct drm_device *dev = dev_priv->dev; 4060 struct drm_device *dev = &dev_priv->drm;
4047 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm; 4061 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
4048 struct ilk_wm_maximums max; 4062 struct ilk_wm_maximums max;
4049 struct intel_wm_config config = {}; 4063 struct intel_wm_config config = {};
@@ -4145,7 +4159,7 @@ static void skl_pipe_wm_active_state(uint32_t val,
4145static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc) 4159static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
4146{ 4160{
4147 struct drm_device *dev = crtc->dev; 4161 struct drm_device *dev = crtc->dev;
4148 struct drm_i915_private *dev_priv = dev->dev_private; 4162 struct drm_i915_private *dev_priv = to_i915(dev);
4149 struct skl_wm_values *hw = &dev_priv->wm.skl_hw; 4163 struct skl_wm_values *hw = &dev_priv->wm.skl_hw;
4150 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4164 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4151 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); 4165 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
@@ -4199,7 +4213,7 @@ static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
4199 4213
4200void skl_wm_get_hw_state(struct drm_device *dev) 4214void skl_wm_get_hw_state(struct drm_device *dev)
4201{ 4215{
4202 struct drm_i915_private *dev_priv = dev->dev_private; 4216 struct drm_i915_private *dev_priv = to_i915(dev);
4203 struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb; 4217 struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
4204 struct drm_crtc *crtc; 4218 struct drm_crtc *crtc;
4205 4219
@@ -4219,7 +4233,7 @@ void skl_wm_get_hw_state(struct drm_device *dev)
4219static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc) 4233static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
4220{ 4234{
4221 struct drm_device *dev = crtc->dev; 4235 struct drm_device *dev = crtc->dev;
4222 struct drm_i915_private *dev_priv = dev->dev_private; 4236 struct drm_i915_private *dev_priv = to_i915(dev);
4223 struct ilk_wm_values *hw = &dev_priv->wm.hw; 4237 struct ilk_wm_values *hw = &dev_priv->wm.hw;
4224 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4238 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4225 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); 4239 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
@@ -4423,7 +4437,7 @@ void vlv_wm_get_hw_state(struct drm_device *dev)
4423 4437
4424void ilk_wm_get_hw_state(struct drm_device *dev) 4438void ilk_wm_get_hw_state(struct drm_device *dev)
4425{ 4439{
4426 struct drm_i915_private *dev_priv = dev->dev_private; 4440 struct drm_i915_private *dev_priv = to_i915(dev);
4427 struct ilk_wm_values *hw = &dev_priv->wm.hw; 4441 struct ilk_wm_values *hw = &dev_priv->wm.hw;
4428 struct drm_crtc *crtc; 4442 struct drm_crtc *crtc;
4429 4443
@@ -4485,7 +4499,7 @@ void ilk_wm_get_hw_state(struct drm_device *dev)
4485 */ 4499 */
4486void intel_update_watermarks(struct drm_crtc *crtc) 4500void intel_update_watermarks(struct drm_crtc *crtc)
4487{ 4501{
4488 struct drm_i915_private *dev_priv = crtc->dev->dev_private; 4502 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
4489 4503
4490 if (dev_priv->display.update_wm) 4504 if (dev_priv->display.update_wm)
4491 dev_priv->display.update_wm(crtc); 4505 dev_priv->display.update_wm(crtc);
@@ -4654,19 +4668,23 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
4654 new_power = dev_priv->rps.power; 4668 new_power = dev_priv->rps.power;
4655 switch (dev_priv->rps.power) { 4669 switch (dev_priv->rps.power) {
4656 case LOW_POWER: 4670 case LOW_POWER:
4657 if (val > dev_priv->rps.efficient_freq + 1 && val > dev_priv->rps.cur_freq) 4671 if (val > dev_priv->rps.efficient_freq + 1 &&
4672 val > dev_priv->rps.cur_freq)
4658 new_power = BETWEEN; 4673 new_power = BETWEEN;
4659 break; 4674 break;
4660 4675
4661 case BETWEEN: 4676 case BETWEEN:
4662 if (val <= dev_priv->rps.efficient_freq && val < dev_priv->rps.cur_freq) 4677 if (val <= dev_priv->rps.efficient_freq &&
4678 val < dev_priv->rps.cur_freq)
4663 new_power = LOW_POWER; 4679 new_power = LOW_POWER;
4664 else if (val >= dev_priv->rps.rp0_freq && val > dev_priv->rps.cur_freq) 4680 else if (val >= dev_priv->rps.rp0_freq &&
4681 val > dev_priv->rps.cur_freq)
4665 new_power = HIGH_POWER; 4682 new_power = HIGH_POWER;
4666 break; 4683 break;
4667 4684
4668 case HIGH_POWER: 4685 case HIGH_POWER:
4669 if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 && val < dev_priv->rps.cur_freq) 4686 if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 &&
4687 val < dev_priv->rps.cur_freq)
4670 new_power = BETWEEN; 4688 new_power = BETWEEN;
4671 break; 4689 break;
4672 } 4690 }
@@ -4712,22 +4730,24 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
4712 } 4730 }
4713 4731
4714 I915_WRITE(GEN6_RP_UP_EI, 4732 I915_WRITE(GEN6_RP_UP_EI,
4715 GT_INTERVAL_FROM_US(dev_priv, ei_up)); 4733 GT_INTERVAL_FROM_US(dev_priv, ei_up));
4716 I915_WRITE(GEN6_RP_UP_THRESHOLD, 4734 I915_WRITE(GEN6_RP_UP_THRESHOLD,
4717 GT_INTERVAL_FROM_US(dev_priv, (ei_up * threshold_up / 100))); 4735 GT_INTERVAL_FROM_US(dev_priv,
4736 ei_up * threshold_up / 100));
4718 4737
4719 I915_WRITE(GEN6_RP_DOWN_EI, 4738 I915_WRITE(GEN6_RP_DOWN_EI,
4720 GT_INTERVAL_FROM_US(dev_priv, ei_down)); 4739 GT_INTERVAL_FROM_US(dev_priv, ei_down));
4721 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 4740 I915_WRITE(GEN6_RP_DOWN_THRESHOLD,
4722 GT_INTERVAL_FROM_US(dev_priv, (ei_down * threshold_down / 100))); 4741 GT_INTERVAL_FROM_US(dev_priv,
4742 ei_down * threshold_down / 100));
4723 4743
4724 I915_WRITE(GEN6_RP_CONTROL, 4744 I915_WRITE(GEN6_RP_CONTROL,
4725 GEN6_RP_MEDIA_TURBO | 4745 GEN6_RP_MEDIA_TURBO |
4726 GEN6_RP_MEDIA_HW_NORMAL_MODE | 4746 GEN6_RP_MEDIA_HW_NORMAL_MODE |
4727 GEN6_RP_MEDIA_IS_GFX | 4747 GEN6_RP_MEDIA_IS_GFX |
4728 GEN6_RP_ENABLE | 4748 GEN6_RP_ENABLE |
4729 GEN6_RP_UP_BUSY_AVG | 4749 GEN6_RP_UP_BUSY_AVG |
4730 GEN6_RP_DOWN_IDLE_AVG); 4750 GEN6_RP_DOWN_IDLE_AVG);
4731 4751
4732 dev_priv->rps.power = new_power; 4752 dev_priv->rps.power = new_power;
4733 dev_priv->rps.up_threshold = threshold_up; 4753 dev_priv->rps.up_threshold = threshold_up;
@@ -4844,12 +4864,27 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv)
4844 gen6_rps_reset_ei(dev_priv); 4864 gen6_rps_reset_ei(dev_priv);
4845 I915_WRITE(GEN6_PMINTRMSK, 4865 I915_WRITE(GEN6_PMINTRMSK,
4846 gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq)); 4866 gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
4867
4868 gen6_enable_rps_interrupts(dev_priv);
4869
4870 /* Ensure we start at the user's desired frequency */
4871 intel_set_rps(dev_priv,
4872 clamp(dev_priv->rps.cur_freq,
4873 dev_priv->rps.min_freq_softlimit,
4874 dev_priv->rps.max_freq_softlimit));
4847 } 4875 }
4848 mutex_unlock(&dev_priv->rps.hw_lock); 4876 mutex_unlock(&dev_priv->rps.hw_lock);
4849} 4877}
4850 4878
4851void gen6_rps_idle(struct drm_i915_private *dev_priv) 4879void gen6_rps_idle(struct drm_i915_private *dev_priv)
4852{ 4880{
4881 /* Flush our bottom-half so that it does not race with us
4882 * setting the idle frequency and so that it is bounded by
4883 * our rpm wakeref. And then disable the interrupts to stop any
4884 * futher RPS reclocking whilst we are asleep.
4885 */
4886 gen6_disable_rps_interrupts(dev_priv);
4887
4853 mutex_lock(&dev_priv->rps.hw_lock); 4888 mutex_lock(&dev_priv->rps.hw_lock);
4854 if (dev_priv->rps.enabled) { 4889 if (dev_priv->rps.enabled) {
4855 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 4890 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
@@ -4874,7 +4909,7 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv,
4874 /* This is intentionally racy! We peek at the state here, then 4909 /* This is intentionally racy! We peek at the state here, then
4875 * validate inside the RPS worker. 4910 * validate inside the RPS worker.
4876 */ 4911 */
4877 if (!(dev_priv->mm.busy && 4912 if (!(dev_priv->gt.awake &&
4878 dev_priv->rps.enabled && 4913 dev_priv->rps.enabled &&
4879 dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit)) 4914 dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit))
4880 return; 4915 return;
@@ -4890,7 +4925,7 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv,
4890 spin_lock_irq(&dev_priv->irq_lock); 4925 spin_lock_irq(&dev_priv->irq_lock);
4891 if (dev_priv->rps.interrupts_enabled) { 4926 if (dev_priv->rps.interrupts_enabled) {
4892 dev_priv->rps.client_boost = true; 4927 dev_priv->rps.client_boost = true;
4893 queue_work(dev_priv->wq, &dev_priv->rps.work); 4928 schedule_work(&dev_priv->rps.work);
4894 } 4929 }
4895 spin_unlock_irq(&dev_priv->irq_lock); 4930 spin_unlock_irq(&dev_priv->irq_lock);
4896 4931
@@ -4954,14 +4989,15 @@ static void intel_print_rc6_info(struct drm_i915_private *dev_priv, u32 mode)
4954 mode = 0; 4989 mode = 0;
4955 } 4990 }
4956 if (HAS_RC6p(dev_priv)) 4991 if (HAS_RC6p(dev_priv))
4957 DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n", 4992 DRM_DEBUG_DRIVER("Enabling RC6 states: "
4958 onoff(mode & GEN6_RC_CTL_RC6_ENABLE), 4993 "RC6 %s RC6p %s RC6pp %s\n",
4959 onoff(mode & GEN6_RC_CTL_RC6p_ENABLE), 4994 onoff(mode & GEN6_RC_CTL_RC6_ENABLE),
4960 onoff(mode & GEN6_RC_CTL_RC6pp_ENABLE)); 4995 onoff(mode & GEN6_RC_CTL_RC6p_ENABLE),
4996 onoff(mode & GEN6_RC_CTL_RC6pp_ENABLE));
4961 4997
4962 else 4998 else
4963 DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s\n", 4999 DRM_DEBUG_DRIVER("Enabling RC6 states: RC6 %s\n",
4964 onoff(mode & GEN6_RC_CTL_RC6_ENABLE)); 5000 onoff(mode & GEN6_RC_CTL_RC6_ENABLE));
4965} 5001}
4966 5002
4967static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv) 5003static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv)
@@ -4969,9 +5005,20 @@ static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv)
4969 struct i915_ggtt *ggtt = &dev_priv->ggtt; 5005 struct i915_ggtt *ggtt = &dev_priv->ggtt;
4970 bool enable_rc6 = true; 5006 bool enable_rc6 = true;
4971 unsigned long rc6_ctx_base; 5007 unsigned long rc6_ctx_base;
5008 u32 rc_ctl;
5009 int rc_sw_target;
5010
5011 rc_ctl = I915_READ(GEN6_RC_CONTROL);
5012 rc_sw_target = (I915_READ(GEN6_RC_STATE) & RC_SW_TARGET_STATE_MASK) >>
5013 RC_SW_TARGET_STATE_SHIFT;
5014 DRM_DEBUG_DRIVER("BIOS enabled RC states: "
5015 "HW_CTRL %s HW_RC6 %s SW_TARGET_STATE %x\n",
5016 onoff(rc_ctl & GEN6_RC_CTL_HW_ENABLE),
5017 onoff(rc_ctl & GEN6_RC_CTL_RC6_ENABLE),
5018 rc_sw_target);
4972 5019
4973 if (!(I915_READ(RC6_LOCATION) & RC6_CTX_IN_DRAM)) { 5020 if (!(I915_READ(RC6_LOCATION) & RC6_CTX_IN_DRAM)) {
4974 DRM_DEBUG_KMS("RC6 Base location not set properly.\n"); 5021 DRM_DEBUG_DRIVER("RC6 Base location not set properly.\n");
4975 enable_rc6 = false; 5022 enable_rc6 = false;
4976 } 5023 }
4977 5024
@@ -4983,7 +5030,7 @@ static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv)
4983 if (!((rc6_ctx_base >= ggtt->stolen_reserved_base) && 5030 if (!((rc6_ctx_base >= ggtt->stolen_reserved_base) &&
4984 (rc6_ctx_base + PAGE_SIZE <= ggtt->stolen_reserved_base + 5031 (rc6_ctx_base + PAGE_SIZE <= ggtt->stolen_reserved_base +
4985 ggtt->stolen_reserved_size))) { 5032 ggtt->stolen_reserved_size))) {
4986 DRM_DEBUG_KMS("RC6 Base address not as expected.\n"); 5033 DRM_DEBUG_DRIVER("RC6 Base address not as expected.\n");
4987 enable_rc6 = false; 5034 enable_rc6 = false;
4988 } 5035 }
4989 5036
@@ -4991,15 +5038,24 @@ static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv)
4991 ((I915_READ(PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1) && 5038 ((I915_READ(PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1) &&
4992 ((I915_READ(PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1) && 5039 ((I915_READ(PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1) &&
4993 ((I915_READ(PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1))) { 5040 ((I915_READ(PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1))) {
4994 DRM_DEBUG_KMS("Engine Idle wait time not set properly.\n"); 5041 DRM_DEBUG_DRIVER("Engine Idle wait time not set properly.\n");
4995 enable_rc6 = false; 5042 enable_rc6 = false;
4996 } 5043 }
4997 5044
4998 if (!(I915_READ(GEN6_RC_CONTROL) & (GEN6_RC_CTL_RC6_ENABLE | 5045 if (!I915_READ(GEN8_PUSHBUS_CONTROL) ||
4999 GEN6_RC_CTL_HW_ENABLE)) && 5046 !I915_READ(GEN8_PUSHBUS_ENABLE) ||
5000 ((I915_READ(GEN6_RC_CONTROL) & GEN6_RC_CTL_HW_ENABLE) || 5047 !I915_READ(GEN8_PUSHBUS_SHIFT)) {
5001 !(I915_READ(GEN6_RC_STATE) & RC6_STATE))) { 5048 DRM_DEBUG_DRIVER("Pushbus not setup properly.\n");
5002 DRM_DEBUG_KMS("HW/SW RC6 is not enabled by BIOS.\n"); 5049 enable_rc6 = false;
5050 }
5051
5052 if (!I915_READ(GEN6_GFXPAUSE)) {
5053 DRM_DEBUG_DRIVER("GFX pause not setup properly.\n");
5054 enable_rc6 = false;
5055 }
5056
5057 if (!I915_READ(GEN8_MISC_CTRL0)) {
5058 DRM_DEBUG_DRIVER("GPM control not setup properly.\n");
5003 enable_rc6 = false; 5059 enable_rc6 = false;
5004 } 5060 }
5005 5061
@@ -5031,8 +5087,9 @@ int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6)
5031 mask = INTEL_RC6_ENABLE; 5087 mask = INTEL_RC6_ENABLE;
5032 5088
5033 if ((enable_rc6 & mask) != enable_rc6) 5089 if ((enable_rc6 & mask) != enable_rc6)
5034 DRM_DEBUG_KMS("Adjusting RC6 mask to %d (requested %d, valid %d)\n", 5090 DRM_DEBUG_DRIVER("Adjusting RC6 mask to %d "
5035 enable_rc6 & mask, enable_rc6, mask); 5091 "(requested %d, valid %d)\n",
5092 enable_rc6 & mask, enable_rc6, mask);
5036 5093
5037 return enable_rc6 & mask; 5094 return enable_rc6 & mask;
5038 } 5095 }
@@ -5643,7 +5700,7 @@ static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
5643 u32 pcbr; 5700 u32 pcbr;
5644 int pctx_size = 24*1024; 5701 int pctx_size = 24*1024;
5645 5702
5646 mutex_lock(&dev_priv->dev->struct_mutex); 5703 mutex_lock(&dev_priv->drm.struct_mutex);
5647 5704
5648 pcbr = I915_READ(VLV_PCBR); 5705 pcbr = I915_READ(VLV_PCBR);
5649 if (pcbr) { 5706 if (pcbr) {
@@ -5651,7 +5708,7 @@ static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
5651 int pcbr_offset; 5708 int pcbr_offset;
5652 5709
5653 pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base; 5710 pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
5654 pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev, 5711 pctx = i915_gem_object_create_stolen_for_preallocated(&dev_priv->drm,
5655 pcbr_offset, 5712 pcbr_offset,
5656 I915_GTT_OFFSET_NONE, 5713 I915_GTT_OFFSET_NONE,
5657 pctx_size); 5714 pctx_size);
@@ -5668,7 +5725,7 @@ static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
5668 * overlap with other ranges, such as the frame buffer, protected 5725 * overlap with other ranges, such as the frame buffer, protected
5669 * memory, or any other relevant ranges. 5726 * memory, or any other relevant ranges.
5670 */ 5727 */
5671 pctx = i915_gem_object_create_stolen(dev_priv->dev, pctx_size); 5728 pctx = i915_gem_object_create_stolen(&dev_priv->drm, pctx_size);
5672 if (!pctx) { 5729 if (!pctx) {
5673 DRM_DEBUG("not enough stolen space for PCTX, disabling\n"); 5730 DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
5674 goto out; 5731 goto out;
@@ -5680,7 +5737,7 @@ static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
5680out: 5737out:
5681 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR)); 5738 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
5682 dev_priv->vlv_pctx = pctx; 5739 dev_priv->vlv_pctx = pctx;
5683 mutex_unlock(&dev_priv->dev->struct_mutex); 5740 mutex_unlock(&dev_priv->drm.struct_mutex);
5684} 5741}
5685 5742
5686static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv) 5743static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv)
@@ -6624,9 +6681,9 @@ void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
6624 6681
6625 if (IS_IRONLAKE_M(dev_priv)) { 6682 if (IS_IRONLAKE_M(dev_priv)) {
6626 ironlake_enable_drps(dev_priv); 6683 ironlake_enable_drps(dev_priv);
6627 mutex_lock(&dev_priv->dev->struct_mutex); 6684 mutex_lock(&dev_priv->drm.struct_mutex);
6628 intel_init_emon(dev_priv); 6685 intel_init_emon(dev_priv);
6629 mutex_unlock(&dev_priv->dev->struct_mutex); 6686 mutex_unlock(&dev_priv->drm.struct_mutex);
6630 } else if (INTEL_INFO(dev_priv)->gen >= 6) { 6687 } else if (INTEL_INFO(dev_priv)->gen >= 6) {
6631 /* 6688 /*
6632 * PCU communication is slow and this doesn't need to be 6689 * PCU communication is slow and this doesn't need to be
@@ -6657,7 +6714,7 @@ void intel_reset_gt_powersave(struct drm_i915_private *dev_priv)
6657 6714
6658static void ibx_init_clock_gating(struct drm_device *dev) 6715static void ibx_init_clock_gating(struct drm_device *dev)
6659{ 6716{
6660 struct drm_i915_private *dev_priv = dev->dev_private; 6717 struct drm_i915_private *dev_priv = to_i915(dev);
6661 6718
6662 /* 6719 /*
6663 * On Ibex Peak and Cougar Point, we need to disable clock 6720 * On Ibex Peak and Cougar Point, we need to disable clock
@@ -6669,7 +6726,7 @@ static void ibx_init_clock_gating(struct drm_device *dev)
6669 6726
6670static void g4x_disable_trickle_feed(struct drm_device *dev) 6727static void g4x_disable_trickle_feed(struct drm_device *dev)
6671{ 6728{
6672 struct drm_i915_private *dev_priv = dev->dev_private; 6729 struct drm_i915_private *dev_priv = to_i915(dev);
6673 enum pipe pipe; 6730 enum pipe pipe;
6674 6731
6675 for_each_pipe(dev_priv, pipe) { 6732 for_each_pipe(dev_priv, pipe) {
@@ -6684,7 +6741,7 @@ static void g4x_disable_trickle_feed(struct drm_device *dev)
6684 6741
6685static void ilk_init_lp_watermarks(struct drm_device *dev) 6742static void ilk_init_lp_watermarks(struct drm_device *dev)
6686{ 6743{
6687 struct drm_i915_private *dev_priv = dev->dev_private; 6744 struct drm_i915_private *dev_priv = to_i915(dev);
6688 6745
6689 I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN); 6746 I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
6690 I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN); 6747 I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
@@ -6698,7 +6755,7 @@ static void ilk_init_lp_watermarks(struct drm_device *dev)
6698 6755
6699static void ironlake_init_clock_gating(struct drm_device *dev) 6756static void ironlake_init_clock_gating(struct drm_device *dev)
6700{ 6757{
6701 struct drm_i915_private *dev_priv = dev->dev_private; 6758 struct drm_i915_private *dev_priv = to_i915(dev);
6702 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; 6759 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
6703 6760
6704 /* 6761 /*
@@ -6772,7 +6829,7 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
6772 6829
6773static void cpt_init_clock_gating(struct drm_device *dev) 6830static void cpt_init_clock_gating(struct drm_device *dev)
6774{ 6831{
6775 struct drm_i915_private *dev_priv = dev->dev_private; 6832 struct drm_i915_private *dev_priv = to_i915(dev);
6776 int pipe; 6833 int pipe;
6777 uint32_t val; 6834 uint32_t val;
6778 6835
@@ -6809,7 +6866,7 @@ static void cpt_init_clock_gating(struct drm_device *dev)
6809 6866
6810static void gen6_check_mch_setup(struct drm_device *dev) 6867static void gen6_check_mch_setup(struct drm_device *dev)
6811{ 6868{
6812 struct drm_i915_private *dev_priv = dev->dev_private; 6869 struct drm_i915_private *dev_priv = to_i915(dev);
6813 uint32_t tmp; 6870 uint32_t tmp;
6814 6871
6815 tmp = I915_READ(MCH_SSKPD); 6872 tmp = I915_READ(MCH_SSKPD);
@@ -6820,7 +6877,7 @@ static void gen6_check_mch_setup(struct drm_device *dev)
6820 6877
6821static void gen6_init_clock_gating(struct drm_device *dev) 6878static void gen6_init_clock_gating(struct drm_device *dev)
6822{ 6879{
6823 struct drm_i915_private *dev_priv = dev->dev_private; 6880 struct drm_i915_private *dev_priv = to_i915(dev);
6824 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; 6881 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
6825 6882
6826 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate); 6883 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
@@ -6935,7 +6992,7 @@ static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
6935 6992
6936static void lpt_init_clock_gating(struct drm_device *dev) 6993static void lpt_init_clock_gating(struct drm_device *dev)
6937{ 6994{
6938 struct drm_i915_private *dev_priv = dev->dev_private; 6995 struct drm_i915_private *dev_priv = to_i915(dev);
6939 6996
6940 /* 6997 /*
6941 * TODO: this bit should only be enabled when really needed, then 6998 * TODO: this bit should only be enabled when really needed, then
@@ -6954,7 +7011,7 @@ static void lpt_init_clock_gating(struct drm_device *dev)
6954 7011
6955static void lpt_suspend_hw(struct drm_device *dev) 7012static void lpt_suspend_hw(struct drm_device *dev)
6956{ 7013{
6957 struct drm_i915_private *dev_priv = dev->dev_private; 7014 struct drm_i915_private *dev_priv = to_i915(dev);
6958 7015
6959 if (HAS_PCH_LPT_LP(dev)) { 7016 if (HAS_PCH_LPT_LP(dev)) {
6960 uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D); 7017 uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
@@ -6989,7 +7046,7 @@ static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
6989 7046
6990static void kabylake_init_clock_gating(struct drm_device *dev) 7047static void kabylake_init_clock_gating(struct drm_device *dev)
6991{ 7048{
6992 struct drm_i915_private *dev_priv = dev->dev_private; 7049 struct drm_i915_private *dev_priv = to_i915(dev);
6993 7050
6994 gen9_init_clock_gating(dev); 7051 gen9_init_clock_gating(dev);
6995 7052
@@ -7010,7 +7067,7 @@ static void kabylake_init_clock_gating(struct drm_device *dev)
7010 7067
7011static void skylake_init_clock_gating(struct drm_device *dev) 7068static void skylake_init_clock_gating(struct drm_device *dev)
7012{ 7069{
7013 struct drm_i915_private *dev_priv = dev->dev_private; 7070 struct drm_i915_private *dev_priv = to_i915(dev);
7014 7071
7015 gen9_init_clock_gating(dev); 7072 gen9_init_clock_gating(dev);
7016 7073
@@ -7025,7 +7082,7 @@ static void skylake_init_clock_gating(struct drm_device *dev)
7025 7082
7026static void broadwell_init_clock_gating(struct drm_device *dev) 7083static void broadwell_init_clock_gating(struct drm_device *dev)
7027{ 7084{
7028 struct drm_i915_private *dev_priv = dev->dev_private; 7085 struct drm_i915_private *dev_priv = to_i915(dev);
7029 enum pipe pipe; 7086 enum pipe pipe;
7030 7087
7031 ilk_init_lp_watermarks(dev); 7088 ilk_init_lp_watermarks(dev);
@@ -7076,7 +7133,7 @@ static void broadwell_init_clock_gating(struct drm_device *dev)
7076 7133
7077static void haswell_init_clock_gating(struct drm_device *dev) 7134static void haswell_init_clock_gating(struct drm_device *dev)
7078{ 7135{
7079 struct drm_i915_private *dev_priv = dev->dev_private; 7136 struct drm_i915_private *dev_priv = to_i915(dev);
7080 7137
7081 ilk_init_lp_watermarks(dev); 7138 ilk_init_lp_watermarks(dev);
7082 7139
@@ -7132,7 +7189,7 @@ static void haswell_init_clock_gating(struct drm_device *dev)
7132 7189
7133static void ivybridge_init_clock_gating(struct drm_device *dev) 7190static void ivybridge_init_clock_gating(struct drm_device *dev)
7134{ 7191{
7135 struct drm_i915_private *dev_priv = dev->dev_private; 7192 struct drm_i915_private *dev_priv = to_i915(dev);
7136 uint32_t snpcr; 7193 uint32_t snpcr;
7137 7194
7138 ilk_init_lp_watermarks(dev); 7195 ilk_init_lp_watermarks(dev);
@@ -7230,7 +7287,7 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
7230 7287
7231static void valleyview_init_clock_gating(struct drm_device *dev) 7288static void valleyview_init_clock_gating(struct drm_device *dev)
7232{ 7289{
7233 struct drm_i915_private *dev_priv = dev->dev_private; 7290 struct drm_i915_private *dev_priv = to_i915(dev);
7234 7291
7235 /* WaDisableEarlyCull:vlv */ 7292 /* WaDisableEarlyCull:vlv */
7236 I915_WRITE(_3D_CHICKEN3, 7293 I915_WRITE(_3D_CHICKEN3,
@@ -7312,7 +7369,7 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
7312 7369
7313static void cherryview_init_clock_gating(struct drm_device *dev) 7370static void cherryview_init_clock_gating(struct drm_device *dev)
7314{ 7371{
7315 struct drm_i915_private *dev_priv = dev->dev_private; 7372 struct drm_i915_private *dev_priv = to_i915(dev);
7316 7373
7317 /* WaVSRefCountFullforceMissDisable:chv */ 7374 /* WaVSRefCountFullforceMissDisable:chv */
7318 /* WaDSRefCountFullforceMissDisable:chv */ 7375 /* WaDSRefCountFullforceMissDisable:chv */
@@ -7348,7 +7405,7 @@ static void cherryview_init_clock_gating(struct drm_device *dev)
7348 7405
7349static void g4x_init_clock_gating(struct drm_device *dev) 7406static void g4x_init_clock_gating(struct drm_device *dev)
7350{ 7407{
7351 struct drm_i915_private *dev_priv = dev->dev_private; 7408 struct drm_i915_private *dev_priv = to_i915(dev);
7352 uint32_t dspclk_gate; 7409 uint32_t dspclk_gate;
7353 7410
7354 I915_WRITE(RENCLK_GATE_D1, 0); 7411 I915_WRITE(RENCLK_GATE_D1, 0);
@@ -7375,7 +7432,7 @@ static void g4x_init_clock_gating(struct drm_device *dev)
7375 7432
7376static void crestline_init_clock_gating(struct drm_device *dev) 7433static void crestline_init_clock_gating(struct drm_device *dev)
7377{ 7434{
7378 struct drm_i915_private *dev_priv = dev->dev_private; 7435 struct drm_i915_private *dev_priv = to_i915(dev);
7379 7436
7380 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE); 7437 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
7381 I915_WRITE(RENCLK_GATE_D2, 0); 7438 I915_WRITE(RENCLK_GATE_D2, 0);
@@ -7391,7 +7448,7 @@ static void crestline_init_clock_gating(struct drm_device *dev)
7391 7448
7392static void broadwater_init_clock_gating(struct drm_device *dev) 7449static void broadwater_init_clock_gating(struct drm_device *dev)
7393{ 7450{
7394 struct drm_i915_private *dev_priv = dev->dev_private; 7451 struct drm_i915_private *dev_priv = to_i915(dev);
7395 7452
7396 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE | 7453 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
7397 I965_RCC_CLOCK_GATE_DISABLE | 7454 I965_RCC_CLOCK_GATE_DISABLE |
@@ -7408,7 +7465,7 @@ static void broadwater_init_clock_gating(struct drm_device *dev)
7408 7465
7409static void gen3_init_clock_gating(struct drm_device *dev) 7466static void gen3_init_clock_gating(struct drm_device *dev)
7410{ 7467{
7411 struct drm_i915_private *dev_priv = dev->dev_private; 7468 struct drm_i915_private *dev_priv = to_i915(dev);
7412 u32 dstate = I915_READ(D_STATE); 7469 u32 dstate = I915_READ(D_STATE);
7413 7470
7414 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING | 7471 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
@@ -7433,7 +7490,7 @@ static void gen3_init_clock_gating(struct drm_device *dev)
7433 7490
7434static void i85x_init_clock_gating(struct drm_device *dev) 7491static void i85x_init_clock_gating(struct drm_device *dev)
7435{ 7492{
7436 struct drm_i915_private *dev_priv = dev->dev_private; 7493 struct drm_i915_private *dev_priv = to_i915(dev);
7437 7494
7438 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE); 7495 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
7439 7496
@@ -7447,7 +7504,7 @@ static void i85x_init_clock_gating(struct drm_device *dev)
7447 7504
7448static void i830_init_clock_gating(struct drm_device *dev) 7505static void i830_init_clock_gating(struct drm_device *dev)
7449{ 7506{
7450 struct drm_i915_private *dev_priv = dev->dev_private; 7507 struct drm_i915_private *dev_priv = to_i915(dev);
7451 7508
7452 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); 7509 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
7453 7510
@@ -7458,7 +7515,7 @@ static void i830_init_clock_gating(struct drm_device *dev)
7458 7515
7459void intel_init_clock_gating(struct drm_device *dev) 7516void intel_init_clock_gating(struct drm_device *dev)
7460{ 7517{
7461 struct drm_i915_private *dev_priv = dev->dev_private; 7518 struct drm_i915_private *dev_priv = to_i915(dev);
7462 7519
7463 dev_priv->display.init_clock_gating(dev); 7520 dev_priv->display.init_clock_gating(dev);
7464} 7521}
@@ -7526,7 +7583,7 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
7526/* Set up chip specific power management-related functions */ 7583/* Set up chip specific power management-related functions */
7527void intel_init_pm(struct drm_device *dev) 7584void intel_init_pm(struct drm_device *dev)
7528{ 7585{
7529 struct drm_i915_private *dev_priv = dev->dev_private; 7586 struct drm_i915_private *dev_priv = to_i915(dev);
7530 7587
7531 intel_fbc_init(dev_priv); 7588 intel_fbc_init(dev_priv);
7532 7589
@@ -7604,46 +7661,59 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val
7604{ 7661{
7605 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 7662 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
7606 7663
7607 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) { 7664 /* GEN6_PCODE_* are outside of the forcewake domain, we can
7665 * use te fw I915_READ variants to reduce the amount of work
7666 * required when reading/writing.
7667 */
7668
7669 if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
7608 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n"); 7670 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
7609 return -EAGAIN; 7671 return -EAGAIN;
7610 } 7672 }
7611 7673
7612 I915_WRITE(GEN6_PCODE_DATA, *val); 7674 I915_WRITE_FW(GEN6_PCODE_DATA, *val);
7613 I915_WRITE(GEN6_PCODE_DATA1, 0); 7675 I915_WRITE_FW(GEN6_PCODE_DATA1, 0);
7614 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox); 7676 I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
7615 7677
7616 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 7678 if (intel_wait_for_register_fw(dev_priv,
7617 500)) { 7679 GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
7680 500)) {
7618 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox); 7681 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
7619 return -ETIMEDOUT; 7682 return -ETIMEDOUT;
7620 } 7683 }
7621 7684
7622 *val = I915_READ(GEN6_PCODE_DATA); 7685 *val = I915_READ_FW(GEN6_PCODE_DATA);
7623 I915_WRITE(GEN6_PCODE_DATA, 0); 7686 I915_WRITE_FW(GEN6_PCODE_DATA, 0);
7624 7687
7625 return 0; 7688 return 0;
7626} 7689}
7627 7690
7628int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val) 7691int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
7692 u32 mbox, u32 val)
7629{ 7693{
7630 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 7694 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
7631 7695
7632 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) { 7696 /* GEN6_PCODE_* are outside of the forcewake domain, we can
7697 * use te fw I915_READ variants to reduce the amount of work
7698 * required when reading/writing.
7699 */
7700
7701 if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
7633 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n"); 7702 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
7634 return -EAGAIN; 7703 return -EAGAIN;
7635 } 7704 }
7636 7705
7637 I915_WRITE(GEN6_PCODE_DATA, val); 7706 I915_WRITE_FW(GEN6_PCODE_DATA, val);
7638 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox); 7707 I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
7639 7708
7640 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 7709 if (intel_wait_for_register_fw(dev_priv,
7641 500)) { 7710 GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
7711 500)) {
7642 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox); 7712 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
7643 return -ETIMEDOUT; 7713 return -ETIMEDOUT;
7644 } 7714 }
7645 7715
7646 I915_WRITE(GEN6_PCODE_DATA, 0); 7716 I915_WRITE_FW(GEN6_PCODE_DATA, 0);
7647 7717
7648 return 0; 7718 return 0;
7649} 7719}
@@ -7713,7 +7783,7 @@ static void __intel_rps_boost_work(struct work_struct *work)
7713 struct request_boost *boost = container_of(work, struct request_boost, work); 7783 struct request_boost *boost = container_of(work, struct request_boost, work);
7714 struct drm_i915_gem_request *req = boost->req; 7784 struct drm_i915_gem_request *req = boost->req;
7715 7785
7716 if (!i915_gem_request_completed(req, true)) 7786 if (!i915_gem_request_completed(req))
7717 gen6_rps_boost(req->i915, NULL, req->emitted_jiffies); 7787 gen6_rps_boost(req->i915, NULL, req->emitted_jiffies);
7718 7788
7719 i915_gem_request_unreference(req); 7789 i915_gem_request_unreference(req);
@@ -7727,7 +7797,7 @@ void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req)
7727 if (req == NULL || INTEL_GEN(req->i915) < 6) 7797 if (req == NULL || INTEL_GEN(req->i915) < 6)
7728 return; 7798 return;
7729 7799
7730 if (i915_gem_request_completed(req, true)) 7800 if (i915_gem_request_completed(req))
7731 return; 7801 return;
7732 7802
7733 boost = kmalloc(sizeof(*boost), GFP_ATOMIC); 7803 boost = kmalloc(sizeof(*boost), GFP_ATOMIC);
@@ -7743,7 +7813,7 @@ void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req)
7743 7813
7744void intel_pm_setup(struct drm_device *dev) 7814void intel_pm_setup(struct drm_device *dev)
7745{ 7815{
7746 struct drm_i915_private *dev_priv = dev->dev_private; 7816 struct drm_i915_private *dev_priv = to_i915(dev);
7747 7817
7748 mutex_init(&dev_priv->rps.hw_lock); 7818 mutex_init(&dev_priv->rps.hw_lock);
7749 spin_lock_init(&dev_priv->rps.client_lock); 7819 spin_lock_init(&dev_priv->rps.client_lock);
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 29a09bf6bd18..68bd0bb34817 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -63,7 +63,7 @@ static bool is_edp_psr(struct intel_dp *intel_dp)
63 63
64static bool vlv_is_psr_active_on_pipe(struct drm_device *dev, int pipe) 64static bool vlv_is_psr_active_on_pipe(struct drm_device *dev, int pipe)
65{ 65{
66 struct drm_i915_private *dev_priv = dev->dev_private; 66 struct drm_i915_private *dev_priv = to_i915(dev);
67 uint32_t val; 67 uint32_t val;
68 68
69 val = I915_READ(VLV_PSRSTAT(pipe)) & 69 val = I915_READ(VLV_PSRSTAT(pipe)) &
@@ -77,7 +77,7 @@ static void intel_psr_write_vsc(struct intel_dp *intel_dp,
77{ 77{
78 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 78 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
79 struct drm_device *dev = dig_port->base.base.dev; 79 struct drm_device *dev = dig_port->base.base.dev;
80 struct drm_i915_private *dev_priv = dev->dev_private; 80 struct drm_i915_private *dev_priv = to_i915(dev);
81 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); 81 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
82 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 82 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
83 i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder); 83 i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
@@ -107,7 +107,7 @@ static void vlv_psr_setup_vsc(struct intel_dp *intel_dp)
107{ 107{
108 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 108 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
109 struct drm_device *dev = intel_dig_port->base.base.dev; 109 struct drm_device *dev = intel_dig_port->base.base.dev;
110 struct drm_i915_private *dev_priv = dev->dev_private; 110 struct drm_i915_private *dev_priv = to_i915(dev);
111 struct drm_crtc *crtc = intel_dig_port->base.base.crtc; 111 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
112 enum pipe pipe = to_intel_crtc(crtc)->pipe; 112 enum pipe pipe = to_intel_crtc(crtc)->pipe;
113 uint32_t val; 113 uint32_t val;
@@ -173,7 +173,7 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
173{ 173{
174 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 174 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
175 struct drm_device *dev = dig_port->base.base.dev; 175 struct drm_device *dev = dig_port->base.base.dev;
176 struct drm_i915_private *dev_priv = dev->dev_private; 176 struct drm_i915_private *dev_priv = to_i915(dev);
177 uint32_t aux_clock_divider; 177 uint32_t aux_clock_divider;
178 i915_reg_t aux_ctl_reg; 178 i915_reg_t aux_ctl_reg;
179 static const uint8_t aux_msg[] = { 179 static const uint8_t aux_msg[] = {
@@ -220,7 +220,7 @@ static void vlv_psr_enable_source(struct intel_dp *intel_dp)
220{ 220{
221 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 221 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
222 struct drm_device *dev = dig_port->base.base.dev; 222 struct drm_device *dev = dig_port->base.base.dev;
223 struct drm_i915_private *dev_priv = dev->dev_private; 223 struct drm_i915_private *dev_priv = to_i915(dev);
224 struct drm_crtc *crtc = dig_port->base.base.crtc; 224 struct drm_crtc *crtc = dig_port->base.base.crtc;
225 enum pipe pipe = to_intel_crtc(crtc)->pipe; 225 enum pipe pipe = to_intel_crtc(crtc)->pipe;
226 226
@@ -235,7 +235,7 @@ static void vlv_psr_activate(struct intel_dp *intel_dp)
235{ 235{
236 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 236 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
237 struct drm_device *dev = dig_port->base.base.dev; 237 struct drm_device *dev = dig_port->base.base.dev;
238 struct drm_i915_private *dev_priv = dev->dev_private; 238 struct drm_i915_private *dev_priv = to_i915(dev);
239 struct drm_crtc *crtc = dig_port->base.base.crtc; 239 struct drm_crtc *crtc = dig_port->base.base.crtc;
240 enum pipe pipe = to_intel_crtc(crtc)->pipe; 240 enum pipe pipe = to_intel_crtc(crtc)->pipe;
241 241
@@ -252,7 +252,7 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp)
252{ 252{
253 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 253 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
254 struct drm_device *dev = dig_port->base.base.dev; 254 struct drm_device *dev = dig_port->base.base.dev;
255 struct drm_i915_private *dev_priv = dev->dev_private; 255 struct drm_i915_private *dev_priv = to_i915(dev);
256 256
257 uint32_t max_sleep_time = 0x1f; 257 uint32_t max_sleep_time = 0x1f;
258 /* Lately it was identified that depending on panel idle frame count 258 /* Lately it was identified that depending on panel idle frame count
@@ -324,7 +324,7 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
324{ 324{
325 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 325 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
326 struct drm_device *dev = dig_port->base.base.dev; 326 struct drm_device *dev = dig_port->base.base.dev;
327 struct drm_i915_private *dev_priv = dev->dev_private; 327 struct drm_i915_private *dev_priv = to_i915(dev);
328 struct drm_crtc *crtc = dig_port->base.base.crtc; 328 struct drm_crtc *crtc = dig_port->base.base.crtc;
329 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 329 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
330 330
@@ -378,7 +378,7 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
378{ 378{
379 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 379 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
380 struct drm_device *dev = intel_dig_port->base.base.dev; 380 struct drm_device *dev = intel_dig_port->base.base.dev;
381 struct drm_i915_private *dev_priv = dev->dev_private; 381 struct drm_i915_private *dev_priv = to_i915(dev);
382 382
383 WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE); 383 WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
384 WARN_ON(dev_priv->psr.active); 384 WARN_ON(dev_priv->psr.active);
@@ -407,7 +407,7 @@ void intel_psr_enable(struct intel_dp *intel_dp)
407{ 407{
408 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 408 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
409 struct drm_device *dev = intel_dig_port->base.base.dev; 409 struct drm_device *dev = intel_dig_port->base.base.dev;
410 struct drm_i915_private *dev_priv = dev->dev_private; 410 struct drm_i915_private *dev_priv = to_i915(dev);
411 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc); 411 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
412 412
413 if (!HAS_PSR(dev)) { 413 if (!HAS_PSR(dev)) {
@@ -494,15 +494,18 @@ static void vlv_psr_disable(struct intel_dp *intel_dp)
494{ 494{
495 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 495 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
496 struct drm_device *dev = intel_dig_port->base.base.dev; 496 struct drm_device *dev = intel_dig_port->base.base.dev;
497 struct drm_i915_private *dev_priv = dev->dev_private; 497 struct drm_i915_private *dev_priv = to_i915(dev);
498 struct intel_crtc *intel_crtc = 498 struct intel_crtc *intel_crtc =
499 to_intel_crtc(intel_dig_port->base.base.crtc); 499 to_intel_crtc(intel_dig_port->base.base.crtc);
500 uint32_t val; 500 uint32_t val;
501 501
502 if (dev_priv->psr.active) { 502 if (dev_priv->psr.active) {
503 /* Put VLV PSR back to PSR_state 0 that is PSR Disabled. */ 503 /* Put VLV PSR back to PSR_state 0 that is PSR Disabled. */
504 if (wait_for((I915_READ(VLV_PSRSTAT(intel_crtc->pipe)) & 504 if (intel_wait_for_register(dev_priv,
505 VLV_EDP_PSR_IN_TRANS) == 0, 1)) 505 VLV_PSRSTAT(intel_crtc->pipe),
506 VLV_EDP_PSR_IN_TRANS,
507 0,
508 1))
506 WARN(1, "PSR transition took longer than expected\n"); 509 WARN(1, "PSR transition took longer than expected\n");
507 510
508 val = I915_READ(VLV_PSRCTL(intel_crtc->pipe)); 511 val = I915_READ(VLV_PSRCTL(intel_crtc->pipe));
@@ -521,16 +524,18 @@ static void hsw_psr_disable(struct intel_dp *intel_dp)
521{ 524{
522 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 525 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
523 struct drm_device *dev = intel_dig_port->base.base.dev; 526 struct drm_device *dev = intel_dig_port->base.base.dev;
524 struct drm_i915_private *dev_priv = dev->dev_private; 527 struct drm_i915_private *dev_priv = to_i915(dev);
525 528
526 if (dev_priv->psr.active) { 529 if (dev_priv->psr.active) {
527 I915_WRITE(EDP_PSR_CTL, 530 I915_WRITE(EDP_PSR_CTL,
528 I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE); 531 I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
529 532
530 /* Wait till PSR is idle */ 533 /* Wait till PSR is idle */
531 if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL) & 534 if (intel_wait_for_register(dev_priv,
532 EDP_PSR_STATUS_STATE_MASK) == 0, 535 EDP_PSR_STATUS_CTL,
533 2 * USEC_PER_SEC, 10 * USEC_PER_MSEC)) 536 EDP_PSR_STATUS_STATE_MASK,
537 0,
538 2000))
534 DRM_ERROR("Timed out waiting for PSR Idle State\n"); 539 DRM_ERROR("Timed out waiting for PSR Idle State\n");
535 540
536 dev_priv->psr.active = false; 541 dev_priv->psr.active = false;
@@ -549,7 +554,7 @@ void intel_psr_disable(struct intel_dp *intel_dp)
549{ 554{
550 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 555 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
551 struct drm_device *dev = intel_dig_port->base.base.dev; 556 struct drm_device *dev = intel_dig_port->base.base.dev;
552 struct drm_i915_private *dev_priv = dev->dev_private; 557 struct drm_i915_private *dev_priv = to_i915(dev);
553 558
554 mutex_lock(&dev_priv->psr.lock); 559 mutex_lock(&dev_priv->psr.lock);
555 if (!dev_priv->psr.enabled) { 560 if (!dev_priv->psr.enabled) {
@@ -586,14 +591,20 @@ static void intel_psr_work(struct work_struct *work)
586 * and be ready for re-enable. 591 * and be ready for re-enable.
587 */ 592 */
588 if (HAS_DDI(dev_priv)) { 593 if (HAS_DDI(dev_priv)) {
589 if (wait_for((I915_READ(EDP_PSR_STATUS_CTL) & 594 if (intel_wait_for_register(dev_priv,
590 EDP_PSR_STATUS_STATE_MASK) == 0, 50)) { 595 EDP_PSR_STATUS_CTL,
596 EDP_PSR_STATUS_STATE_MASK,
597 0,
598 50)) {
591 DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n"); 599 DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
592 return; 600 return;
593 } 601 }
594 } else { 602 } else {
595 if (wait_for((I915_READ(VLV_PSRSTAT(pipe)) & 603 if (intel_wait_for_register(dev_priv,
596 VLV_EDP_PSR_IN_TRANS) == 0, 1)) { 604 VLV_PSRSTAT(pipe),
605 VLV_EDP_PSR_IN_TRANS,
606 0,
607 1)) {
597 DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n"); 608 DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
598 return; 609 return;
599 } 610 }
@@ -619,7 +630,7 @@ unlock:
619 630
620static void intel_psr_exit(struct drm_device *dev) 631static void intel_psr_exit(struct drm_device *dev)
621{ 632{
622 struct drm_i915_private *dev_priv = dev->dev_private; 633 struct drm_i915_private *dev_priv = to_i915(dev);
623 struct intel_dp *intel_dp = dev_priv->psr.enabled; 634 struct intel_dp *intel_dp = dev_priv->psr.enabled;
624 struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc; 635 struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
625 enum pipe pipe = to_intel_crtc(crtc)->pipe; 636 enum pipe pipe = to_intel_crtc(crtc)->pipe;
@@ -674,7 +685,7 @@ static void intel_psr_exit(struct drm_device *dev)
674void intel_psr_single_frame_update(struct drm_device *dev, 685void intel_psr_single_frame_update(struct drm_device *dev,
675 unsigned frontbuffer_bits) 686 unsigned frontbuffer_bits)
676{ 687{
677 struct drm_i915_private *dev_priv = dev->dev_private; 688 struct drm_i915_private *dev_priv = to_i915(dev);
678 struct drm_crtc *crtc; 689 struct drm_crtc *crtc;
679 enum pipe pipe; 690 enum pipe pipe;
680 u32 val; 691 u32 val;
@@ -722,7 +733,7 @@ void intel_psr_single_frame_update(struct drm_device *dev,
722void intel_psr_invalidate(struct drm_device *dev, 733void intel_psr_invalidate(struct drm_device *dev,
723 unsigned frontbuffer_bits) 734 unsigned frontbuffer_bits)
724{ 735{
725 struct drm_i915_private *dev_priv = dev->dev_private; 736 struct drm_i915_private *dev_priv = to_i915(dev);
726 struct drm_crtc *crtc; 737 struct drm_crtc *crtc;
727 enum pipe pipe; 738 enum pipe pipe;
728 739
@@ -760,7 +771,7 @@ void intel_psr_invalidate(struct drm_device *dev,
760void intel_psr_flush(struct drm_device *dev, 771void intel_psr_flush(struct drm_device *dev,
761 unsigned frontbuffer_bits, enum fb_op_origin origin) 772 unsigned frontbuffer_bits, enum fb_op_origin origin)
762{ 773{
763 struct drm_i915_private *dev_priv = dev->dev_private; 774 struct drm_i915_private *dev_priv = to_i915(dev);
764 struct drm_crtc *crtc; 775 struct drm_crtc *crtc;
765 enum pipe pipe; 776 enum pipe pipe;
766 777
@@ -796,7 +807,7 @@ void intel_psr_flush(struct drm_device *dev,
796 */ 807 */
797void intel_psr_init(struct drm_device *dev) 808void intel_psr_init(struct drm_device *dev)
798{ 809{
799 struct drm_i915_private *dev_priv = dev->dev_private; 810 struct drm_i915_private *dev_priv = to_i915(dev);
800 811
801 dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ? 812 dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
802 HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE; 813 HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index fedd27049814..61e00bf9e87f 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -58,18 +58,10 @@ void intel_ring_update_space(struct intel_ringbuffer *ringbuf)
58 ringbuf->tail, ringbuf->size); 58 ringbuf->tail, ringbuf->size);
59} 59}
60 60
61bool intel_engine_stopped(struct intel_engine_cs *engine)
62{
63 struct drm_i915_private *dev_priv = engine->i915;
64 return dev_priv->gpu_error.stop_rings & intel_engine_flag(engine);
65}
66
67static void __intel_ring_advance(struct intel_engine_cs *engine) 61static void __intel_ring_advance(struct intel_engine_cs *engine)
68{ 62{
69 struct intel_ringbuffer *ringbuf = engine->buffer; 63 struct intel_ringbuffer *ringbuf = engine->buffer;
70 ringbuf->tail &= ringbuf->size - 1; 64 ringbuf->tail &= ringbuf->size - 1;
71 if (intel_engine_stopped(engine))
72 return;
73 engine->write_tail(engine, ringbuf->tail); 65 engine->write_tail(engine, ringbuf->tail);
74} 66}
75 67
@@ -515,8 +507,9 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
515 I915_WRITE(reg, 507 I915_WRITE(reg,
516 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | 508 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
517 INSTPM_SYNC_FLUSH)); 509 INSTPM_SYNC_FLUSH));
518 if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0, 510 if (intel_wait_for_register(dev_priv,
519 1000)) 511 reg, INSTPM_SYNC_FLUSH, 0,
512 1000))
520 DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n", 513 DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
521 engine->name); 514 engine->name);
522 } 515 }
@@ -528,7 +521,11 @@ static bool stop_ring(struct intel_engine_cs *engine)
528 521
529 if (!IS_GEN2(dev_priv)) { 522 if (!IS_GEN2(dev_priv)) {
530 I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING)); 523 I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
531 if (wait_for((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) { 524 if (intel_wait_for_register(dev_priv,
525 RING_MI_MODE(engine->mmio_base),
526 MODE_IDLE,
527 MODE_IDLE,
528 1000)) {
532 DRM_ERROR("%s : timed out trying to stop ring\n", 529 DRM_ERROR("%s : timed out trying to stop ring\n",
533 engine->name); 530 engine->name);
534 /* Sometimes we observe that the idle flag is not 531 /* Sometimes we observe that the idle flag is not
@@ -643,58 +640,42 @@ out:
643 return ret; 640 return ret;
644} 641}
645 642
646void 643void intel_fini_pipe_control(struct intel_engine_cs *engine)
647intel_fini_pipe_control(struct intel_engine_cs *engine)
648{ 644{
649 if (engine->scratch.obj == NULL) 645 if (engine->scratch.obj == NULL)
650 return; 646 return;
651 647
652 if (INTEL_GEN(engine->i915) >= 5) { 648 i915_gem_object_ggtt_unpin(engine->scratch.obj);
653 kunmap(sg_page(engine->scratch.obj->pages->sgl));
654 i915_gem_object_ggtt_unpin(engine->scratch.obj);
655 }
656
657 drm_gem_object_unreference(&engine->scratch.obj->base); 649 drm_gem_object_unreference(&engine->scratch.obj->base);
658 engine->scratch.obj = NULL; 650 engine->scratch.obj = NULL;
659} 651}
660 652
661int 653int intel_init_pipe_control(struct intel_engine_cs *engine, int size)
662intel_init_pipe_control(struct intel_engine_cs *engine)
663{ 654{
655 struct drm_i915_gem_object *obj;
664 int ret; 656 int ret;
665 657
666 WARN_ON(engine->scratch.obj); 658 WARN_ON(engine->scratch.obj);
667 659
668 engine->scratch.obj = i915_gem_object_create(engine->i915->dev, 4096); 660 obj = i915_gem_object_create_stolen(&engine->i915->drm, size);
669 if (IS_ERR(engine->scratch.obj)) { 661 if (!obj)
670 DRM_ERROR("Failed to allocate seqno page\n"); 662 obj = i915_gem_object_create(&engine->i915->drm, size);
671 ret = PTR_ERR(engine->scratch.obj); 663 if (IS_ERR(obj)) {
672 engine->scratch.obj = NULL; 664 DRM_ERROR("Failed to allocate scratch page\n");
665 ret = PTR_ERR(obj);
673 goto err; 666 goto err;
674 } 667 }
675 668
676 ret = i915_gem_object_set_cache_level(engine->scratch.obj, 669 ret = i915_gem_obj_ggtt_pin(obj, 4096, PIN_HIGH);
677 I915_CACHE_LLC);
678 if (ret) 670 if (ret)
679 goto err_unref; 671 goto err_unref;
680 672
681 ret = i915_gem_obj_ggtt_pin(engine->scratch.obj, 4096, 0); 673 engine->scratch.obj = obj;
682 if (ret) 674 engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
683 goto err_unref;
684
685 engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(engine->scratch.obj);
686 engine->scratch.cpu_page = kmap(sg_page(engine->scratch.obj->pages->sgl));
687 if (engine->scratch.cpu_page == NULL) {
688 ret = -ENOMEM;
689 goto err_unpin;
690 }
691
692 DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n", 675 DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
693 engine->name, engine->scratch.gtt_offset); 676 engine->name, engine->scratch.gtt_offset);
694 return 0; 677 return 0;
695 678
696err_unpin:
697 i915_gem_object_ggtt_unpin(engine->scratch.obj);
698err_unref: 679err_unref:
699 drm_gem_object_unreference(&engine->scratch.obj->base); 680 drm_gem_object_unreference(&engine->scratch.obj->base);
700err: 681err:
@@ -1324,8 +1305,7 @@ static int init_render_ring(struct intel_engine_cs *engine)
1324 if (IS_GEN(dev_priv, 6, 7)) 1305 if (IS_GEN(dev_priv, 6, 7))
1325 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); 1306 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
1326 1307
1327 if (HAS_L3_DPF(dev_priv)) 1308 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
1328 I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev_priv));
1329 1309
1330 return init_workarounds_ring(engine); 1310 return init_workarounds_ring(engine);
1331} 1311}
@@ -1362,19 +1342,17 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
1362 return ret; 1342 return ret;
1363 1343
1364 for_each_engine_id(waiter, dev_priv, id) { 1344 for_each_engine_id(waiter, dev_priv, id) {
1365 u32 seqno;
1366 u64 gtt_offset = signaller->semaphore.signal_ggtt[id]; 1345 u64 gtt_offset = signaller->semaphore.signal_ggtt[id];
1367 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID) 1346 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
1368 continue; 1347 continue;
1369 1348
1370 seqno = i915_gem_request_get_seqno(signaller_req);
1371 intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6)); 1349 intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6));
1372 intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB | 1350 intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB |
1373 PIPE_CONTROL_QW_WRITE | 1351 PIPE_CONTROL_QW_WRITE |
1374 PIPE_CONTROL_CS_STALL); 1352 PIPE_CONTROL_CS_STALL);
1375 intel_ring_emit(signaller, lower_32_bits(gtt_offset)); 1353 intel_ring_emit(signaller, lower_32_bits(gtt_offset));
1376 intel_ring_emit(signaller, upper_32_bits(gtt_offset)); 1354 intel_ring_emit(signaller, upper_32_bits(gtt_offset));
1377 intel_ring_emit(signaller, seqno); 1355 intel_ring_emit(signaller, signaller_req->seqno);
1378 intel_ring_emit(signaller, 0); 1356 intel_ring_emit(signaller, 0);
1379 intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL | 1357 intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
1380 MI_SEMAPHORE_TARGET(waiter->hw_id)); 1358 MI_SEMAPHORE_TARGET(waiter->hw_id));
@@ -1403,18 +1381,16 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
1403 return ret; 1381 return ret;
1404 1382
1405 for_each_engine_id(waiter, dev_priv, id) { 1383 for_each_engine_id(waiter, dev_priv, id) {
1406 u32 seqno;
1407 u64 gtt_offset = signaller->semaphore.signal_ggtt[id]; 1384 u64 gtt_offset = signaller->semaphore.signal_ggtt[id];
1408 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID) 1385 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
1409 continue; 1386 continue;
1410 1387
1411 seqno = i915_gem_request_get_seqno(signaller_req);
1412 intel_ring_emit(signaller, (MI_FLUSH_DW + 1) | 1388 intel_ring_emit(signaller, (MI_FLUSH_DW + 1) |
1413 MI_FLUSH_DW_OP_STOREDW); 1389 MI_FLUSH_DW_OP_STOREDW);
1414 intel_ring_emit(signaller, lower_32_bits(gtt_offset) | 1390 intel_ring_emit(signaller, lower_32_bits(gtt_offset) |
1415 MI_FLUSH_DW_USE_GTT); 1391 MI_FLUSH_DW_USE_GTT);
1416 intel_ring_emit(signaller, upper_32_bits(gtt_offset)); 1392 intel_ring_emit(signaller, upper_32_bits(gtt_offset));
1417 intel_ring_emit(signaller, seqno); 1393 intel_ring_emit(signaller, signaller_req->seqno);
1418 intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL | 1394 intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
1419 MI_SEMAPHORE_TARGET(waiter->hw_id)); 1395 MI_SEMAPHORE_TARGET(waiter->hw_id));
1420 intel_ring_emit(signaller, 0); 1396 intel_ring_emit(signaller, 0);
@@ -1445,11 +1421,9 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
1445 i915_reg_t mbox_reg = signaller->semaphore.mbox.signal[id]; 1421 i915_reg_t mbox_reg = signaller->semaphore.mbox.signal[id];
1446 1422
1447 if (i915_mmio_reg_valid(mbox_reg)) { 1423 if (i915_mmio_reg_valid(mbox_reg)) {
1448 u32 seqno = i915_gem_request_get_seqno(signaller_req);
1449
1450 intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1)); 1424 intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
1451 intel_ring_emit_reg(signaller, mbox_reg); 1425 intel_ring_emit_reg(signaller, mbox_reg);
1452 intel_ring_emit(signaller, seqno); 1426 intel_ring_emit(signaller, signaller_req->seqno);
1453 } 1427 }
1454 } 1428 }
1455 1429
@@ -1485,7 +1459,7 @@ gen6_add_request(struct drm_i915_gem_request *req)
1485 intel_ring_emit(engine, MI_STORE_DWORD_INDEX); 1459 intel_ring_emit(engine, MI_STORE_DWORD_INDEX);
1486 intel_ring_emit(engine, 1460 intel_ring_emit(engine,
1487 I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 1461 I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1488 intel_ring_emit(engine, i915_gem_request_get_seqno(req)); 1462 intel_ring_emit(engine, req->seqno);
1489 intel_ring_emit(engine, MI_USER_INTERRUPT); 1463 intel_ring_emit(engine, MI_USER_INTERRUPT);
1490 __intel_ring_advance(engine); 1464 __intel_ring_advance(engine);
1491 1465
@@ -1542,6 +1516,7 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
1542{ 1516{
1543 struct intel_engine_cs *waiter = waiter_req->engine; 1517 struct intel_engine_cs *waiter = waiter_req->engine;
1544 struct drm_i915_private *dev_priv = waiter_req->i915; 1518 struct drm_i915_private *dev_priv = waiter_req->i915;
1519 u64 offset = GEN8_WAIT_OFFSET(waiter, signaller->id);
1545 struct i915_hw_ppgtt *ppgtt; 1520 struct i915_hw_ppgtt *ppgtt;
1546 int ret; 1521 int ret;
1547 1522
@@ -1553,10 +1528,8 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
1553 MI_SEMAPHORE_GLOBAL_GTT | 1528 MI_SEMAPHORE_GLOBAL_GTT |
1554 MI_SEMAPHORE_SAD_GTE_SDD); 1529 MI_SEMAPHORE_SAD_GTE_SDD);
1555 intel_ring_emit(waiter, seqno); 1530 intel_ring_emit(waiter, seqno);
1556 intel_ring_emit(waiter, 1531 intel_ring_emit(waiter, lower_32_bits(offset));
1557 lower_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id))); 1532 intel_ring_emit(waiter, upper_32_bits(offset));
1558 intel_ring_emit(waiter,
1559 upper_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
1560 intel_ring_advance(waiter); 1533 intel_ring_advance(waiter);
1561 1534
1562 /* When the !RCS engines idle waiting upon a semaphore, they lose their 1535 /* When the !RCS engines idle waiting upon a semaphore, they lose their
@@ -1611,66 +1584,22 @@ gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
1611 return 0; 1584 return 0;
1612} 1585}
1613 1586
1614#define PIPE_CONTROL_FLUSH(ring__, addr__) \ 1587static void
1615do { \ 1588gen5_seqno_barrier(struct intel_engine_cs *ring)
1616 intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \
1617 PIPE_CONTROL_DEPTH_STALL); \
1618 intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \
1619 intel_ring_emit(ring__, 0); \
1620 intel_ring_emit(ring__, 0); \
1621} while (0)
1622
1623static int
1624pc_render_add_request(struct drm_i915_gem_request *req)
1625{ 1589{
1626 struct intel_engine_cs *engine = req->engine; 1590 /* MI_STORE are internally buffered by the GPU and not flushed
1627 u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; 1591 * either by MI_FLUSH or SyncFlush or any other combination of
1628 int ret; 1592 * MI commands.
1629
1630 /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
1631 * incoherent with writes to memory, i.e. completely fubar,
1632 * so we need to use PIPE_NOTIFY instead.
1633 * 1593 *
1634 * However, we also need to workaround the qword write 1594 * "Only the submission of the store operation is guaranteed.
1635 * incoherence by flushing the 6 PIPE_NOTIFY buffers out to 1595 * The write result will be complete (coherent) some time later
1636 * memory before requesting an interrupt. 1596 * (this is practically a finite period but there is no guaranteed
1597 * latency)."
1598 *
1599 * Empirically, we observe that we need a delay of at least 75us to
1600 * be sure that the seqno write is visible by the CPU.
1637 */ 1601 */
1638 ret = intel_ring_begin(req, 32); 1602 usleep_range(125, 250);
1639 if (ret)
1640 return ret;
1641
1642 intel_ring_emit(engine,
1643 GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
1644 PIPE_CONTROL_WRITE_FLUSH |
1645 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
1646 intel_ring_emit(engine,
1647 engine->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
1648 intel_ring_emit(engine, i915_gem_request_get_seqno(req));
1649 intel_ring_emit(engine, 0);
1650 PIPE_CONTROL_FLUSH(engine, scratch_addr);
1651 scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */
1652 PIPE_CONTROL_FLUSH(engine, scratch_addr);
1653 scratch_addr += 2 * CACHELINE_BYTES;
1654 PIPE_CONTROL_FLUSH(engine, scratch_addr);
1655 scratch_addr += 2 * CACHELINE_BYTES;
1656 PIPE_CONTROL_FLUSH(engine, scratch_addr);
1657 scratch_addr += 2 * CACHELINE_BYTES;
1658 PIPE_CONTROL_FLUSH(engine, scratch_addr);
1659 scratch_addr += 2 * CACHELINE_BYTES;
1660 PIPE_CONTROL_FLUSH(engine, scratch_addr);
1661
1662 intel_ring_emit(engine,
1663 GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
1664 PIPE_CONTROL_WRITE_FLUSH |
1665 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
1666 PIPE_CONTROL_NOTIFY);
1667 intel_ring_emit(engine,
1668 engine->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
1669 intel_ring_emit(engine, i915_gem_request_get_seqno(req));
1670 intel_ring_emit(engine, 0);
1671 __intel_ring_advance(engine);
1672
1673 return 0;
1674} 1603}
1675 1604
1676static void 1605static void
@@ -1698,127 +1627,54 @@ gen6_seqno_barrier(struct intel_engine_cs *engine)
1698 spin_unlock_irq(&dev_priv->uncore.lock); 1627 spin_unlock_irq(&dev_priv->uncore.lock);
1699} 1628}
1700 1629
1701static u32
1702ring_get_seqno(struct intel_engine_cs *engine)
1703{
1704 return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
1705}
1706
1707static void 1630static void
1708ring_set_seqno(struct intel_engine_cs *engine, u32 seqno) 1631gen5_irq_enable(struct intel_engine_cs *engine)
1709{
1710 intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
1711}
1712
1713static u32
1714pc_render_get_seqno(struct intel_engine_cs *engine)
1715{ 1632{
1716 return engine->scratch.cpu_page[0]; 1633 gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask);
1717} 1634}
1718 1635
1719static void 1636static void
1720pc_render_set_seqno(struct intel_engine_cs *engine, u32 seqno) 1637gen5_irq_disable(struct intel_engine_cs *engine)
1721{ 1638{
1722 engine->scratch.cpu_page[0] = seqno; 1639 gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask);
1723}
1724
1725static bool
1726gen5_ring_get_irq(struct intel_engine_cs *engine)
1727{
1728 struct drm_i915_private *dev_priv = engine->i915;
1729 unsigned long flags;
1730
1731 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
1732 return false;
1733
1734 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1735 if (engine->irq_refcount++ == 0)
1736 gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
1737 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1738
1739 return true;
1740} 1640}
1741 1641
1742static void 1642static void
1743gen5_ring_put_irq(struct intel_engine_cs *engine) 1643i9xx_irq_enable(struct intel_engine_cs *engine)
1744{ 1644{
1745 struct drm_i915_private *dev_priv = engine->i915; 1645 struct drm_i915_private *dev_priv = engine->i915;
1746 unsigned long flags;
1747
1748 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1749 if (--engine->irq_refcount == 0)
1750 gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
1751 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1752}
1753
1754static bool
1755i9xx_ring_get_irq(struct intel_engine_cs *engine)
1756{
1757 struct drm_i915_private *dev_priv = engine->i915;
1758 unsigned long flags;
1759
1760 if (!intel_irqs_enabled(dev_priv))
1761 return false;
1762 1646
1763 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1647 dev_priv->irq_mask &= ~engine->irq_enable_mask;
1764 if (engine->irq_refcount++ == 0) { 1648 I915_WRITE(IMR, dev_priv->irq_mask);
1765 dev_priv->irq_mask &= ~engine->irq_enable_mask; 1649 POSTING_READ_FW(RING_IMR(engine->mmio_base));
1766 I915_WRITE(IMR, dev_priv->irq_mask);
1767 POSTING_READ(IMR);
1768 }
1769 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1770
1771 return true;
1772} 1650}
1773 1651
1774static void 1652static void
1775i9xx_ring_put_irq(struct intel_engine_cs *engine) 1653i9xx_irq_disable(struct intel_engine_cs *engine)
1776{ 1654{
1777 struct drm_i915_private *dev_priv = engine->i915; 1655 struct drm_i915_private *dev_priv = engine->i915;
1778 unsigned long flags;
1779 1656
1780 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1657 dev_priv->irq_mask |= engine->irq_enable_mask;
1781 if (--engine->irq_refcount == 0) { 1658 I915_WRITE(IMR, dev_priv->irq_mask);
1782 dev_priv->irq_mask |= engine->irq_enable_mask;
1783 I915_WRITE(IMR, dev_priv->irq_mask);
1784 POSTING_READ(IMR);
1785 }
1786 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1787} 1659}
1788 1660
1789static bool 1661static void
1790i8xx_ring_get_irq(struct intel_engine_cs *engine) 1662i8xx_irq_enable(struct intel_engine_cs *engine)
1791{ 1663{
1792 struct drm_i915_private *dev_priv = engine->i915; 1664 struct drm_i915_private *dev_priv = engine->i915;
1793 unsigned long flags;
1794
1795 if (!intel_irqs_enabled(dev_priv))
1796 return false;
1797 1665
1798 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1666 dev_priv->irq_mask &= ~engine->irq_enable_mask;
1799 if (engine->irq_refcount++ == 0) { 1667 I915_WRITE16(IMR, dev_priv->irq_mask);
1800 dev_priv->irq_mask &= ~engine->irq_enable_mask; 1668 POSTING_READ16(RING_IMR(engine->mmio_base));
1801 I915_WRITE16(IMR, dev_priv->irq_mask);
1802 POSTING_READ16(IMR);
1803 }
1804 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1805
1806 return true;
1807} 1669}
1808 1670
1809static void 1671static void
1810i8xx_ring_put_irq(struct intel_engine_cs *engine) 1672i8xx_irq_disable(struct intel_engine_cs *engine)
1811{ 1673{
1812 struct drm_i915_private *dev_priv = engine->i915; 1674 struct drm_i915_private *dev_priv = engine->i915;
1813 unsigned long flags;
1814 1675
1815 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1676 dev_priv->irq_mask |= engine->irq_enable_mask;
1816 if (--engine->irq_refcount == 0) { 1677 I915_WRITE16(IMR, dev_priv->irq_mask);
1817 dev_priv->irq_mask |= engine->irq_enable_mask;
1818 I915_WRITE16(IMR, dev_priv->irq_mask);
1819 POSTING_READ16(IMR);
1820 }
1821 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1822} 1678}
1823 1679
1824static int 1680static int
@@ -1852,129 +1708,68 @@ i9xx_add_request(struct drm_i915_gem_request *req)
1852 intel_ring_emit(engine, MI_STORE_DWORD_INDEX); 1708 intel_ring_emit(engine, MI_STORE_DWORD_INDEX);
1853 intel_ring_emit(engine, 1709 intel_ring_emit(engine,
1854 I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 1710 I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1855 intel_ring_emit(engine, i915_gem_request_get_seqno(req)); 1711 intel_ring_emit(engine, req->seqno);
1856 intel_ring_emit(engine, MI_USER_INTERRUPT); 1712 intel_ring_emit(engine, MI_USER_INTERRUPT);
1857 __intel_ring_advance(engine); 1713 __intel_ring_advance(engine);
1858 1714
1859 return 0; 1715 return 0;
1860} 1716}
1861 1717
1862static bool 1718static void
1863gen6_ring_get_irq(struct intel_engine_cs *engine) 1719gen6_irq_enable(struct intel_engine_cs *engine)
1864{ 1720{
1865 struct drm_i915_private *dev_priv = engine->i915; 1721 struct drm_i915_private *dev_priv = engine->i915;
1866 unsigned long flags;
1867
1868 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
1869 return false;
1870
1871 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1872 if (engine->irq_refcount++ == 0) {
1873 if (HAS_L3_DPF(dev_priv) && engine->id == RCS)
1874 I915_WRITE_IMR(engine,
1875 ~(engine->irq_enable_mask |
1876 GT_PARITY_ERROR(dev_priv)));
1877 else
1878 I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
1879 gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
1880 }
1881 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1882 1722
1883 return true; 1723 I915_WRITE_IMR(engine,
1724 ~(engine->irq_enable_mask |
1725 engine->irq_keep_mask));
1726 gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
1884} 1727}
1885 1728
1886static void 1729static void
1887gen6_ring_put_irq(struct intel_engine_cs *engine) 1730gen6_irq_disable(struct intel_engine_cs *engine)
1888{ 1731{
1889 struct drm_i915_private *dev_priv = engine->i915; 1732 struct drm_i915_private *dev_priv = engine->i915;
1890 unsigned long flags;
1891 1733
1892 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1734 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
1893 if (--engine->irq_refcount == 0) { 1735 gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
1894 if (HAS_L3_DPF(dev_priv) && engine->id == RCS)
1895 I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev_priv));
1896 else
1897 I915_WRITE_IMR(engine, ~0);
1898 gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
1899 }
1900 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1901} 1736}
1902 1737
1903static bool 1738static void
1904hsw_vebox_get_irq(struct intel_engine_cs *engine) 1739hsw_vebox_irq_enable(struct intel_engine_cs *engine)
1905{ 1740{
1906 struct drm_i915_private *dev_priv = engine->i915; 1741 struct drm_i915_private *dev_priv = engine->i915;
1907 unsigned long flags;
1908 1742
1909 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 1743 I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
1910 return false; 1744 gen6_enable_pm_irq(dev_priv, engine->irq_enable_mask);
1911
1912 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1913 if (engine->irq_refcount++ == 0) {
1914 I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
1915 gen6_enable_pm_irq(dev_priv, engine->irq_enable_mask);
1916 }
1917 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1918
1919 return true;
1920} 1745}
1921 1746
1922static void 1747static void
1923hsw_vebox_put_irq(struct intel_engine_cs *engine) 1748hsw_vebox_irq_disable(struct intel_engine_cs *engine)
1924{ 1749{
1925 struct drm_i915_private *dev_priv = engine->i915; 1750 struct drm_i915_private *dev_priv = engine->i915;
1926 unsigned long flags;
1927 1751
1928 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1752 I915_WRITE_IMR(engine, ~0);
1929 if (--engine->irq_refcount == 0) { 1753 gen6_disable_pm_irq(dev_priv, engine->irq_enable_mask);
1930 I915_WRITE_IMR(engine, ~0);
1931 gen6_disable_pm_irq(dev_priv, engine->irq_enable_mask);
1932 }
1933 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1934} 1754}
1935 1755
1936static bool 1756static void
1937gen8_ring_get_irq(struct intel_engine_cs *engine) 1757gen8_irq_enable(struct intel_engine_cs *engine)
1938{ 1758{
1939 struct drm_i915_private *dev_priv = engine->i915; 1759 struct drm_i915_private *dev_priv = engine->i915;
1940 unsigned long flags;
1941 1760
1942 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 1761 I915_WRITE_IMR(engine,
1943 return false; 1762 ~(engine->irq_enable_mask |
1944 1763 engine->irq_keep_mask));
1945 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1764 POSTING_READ_FW(RING_IMR(engine->mmio_base));
1946 if (engine->irq_refcount++ == 0) {
1947 if (HAS_L3_DPF(dev_priv) && engine->id == RCS) {
1948 I915_WRITE_IMR(engine,
1949 ~(engine->irq_enable_mask |
1950 GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
1951 } else {
1952 I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
1953 }
1954 POSTING_READ(RING_IMR(engine->mmio_base));
1955 }
1956 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1957
1958 return true;
1959} 1765}
1960 1766
1961static void 1767static void
1962gen8_ring_put_irq(struct intel_engine_cs *engine) 1768gen8_irq_disable(struct intel_engine_cs *engine)
1963{ 1769{
1964 struct drm_i915_private *dev_priv = engine->i915; 1770 struct drm_i915_private *dev_priv = engine->i915;
1965 unsigned long flags;
1966 1771
1967 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1772 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
1968 if (--engine->irq_refcount == 0) {
1969 if (HAS_L3_DPF(dev_priv) && engine->id == RCS) {
1970 I915_WRITE_IMR(engine,
1971 ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
1972 } else {
1973 I915_WRITE_IMR(engine, ~0);
1974 }
1975 POSTING_READ(RING_IMR(engine->mmio_base));
1976 }
1977 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1978} 1773}
1979 1774
1980static int 1775static int
@@ -2093,7 +1888,7 @@ static void cleanup_phys_status_page(struct intel_engine_cs *engine)
2093 if (!dev_priv->status_page_dmah) 1888 if (!dev_priv->status_page_dmah)
2094 return; 1889 return;
2095 1890
2096 drm_pci_free(dev_priv->dev, dev_priv->status_page_dmah); 1891 drm_pci_free(&dev_priv->drm, dev_priv->status_page_dmah);
2097 engine->status_page.page_addr = NULL; 1892 engine->status_page.page_addr = NULL;
2098} 1893}
2099 1894
@@ -2119,7 +1914,7 @@ static int init_status_page(struct intel_engine_cs *engine)
2119 unsigned flags; 1914 unsigned flags;
2120 int ret; 1915 int ret;
2121 1916
2122 obj = i915_gem_object_create(engine->i915->dev, 4096); 1917 obj = i915_gem_object_create(&engine->i915->drm, 4096);
2123 if (IS_ERR(obj)) { 1918 if (IS_ERR(obj)) {
2124 DRM_ERROR("Failed to allocate status page\n"); 1919 DRM_ERROR("Failed to allocate status page\n");
2125 return PTR_ERR(obj); 1920 return PTR_ERR(obj);
@@ -2168,7 +1963,7 @@ static int init_phys_status_page(struct intel_engine_cs *engine)
2168 1963
2169 if (!dev_priv->status_page_dmah) { 1964 if (!dev_priv->status_page_dmah) {
2170 dev_priv->status_page_dmah = 1965 dev_priv->status_page_dmah =
2171 drm_pci_alloc(dev_priv->dev, PAGE_SIZE, PAGE_SIZE); 1966 drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE);
2172 if (!dev_priv->status_page_dmah) 1967 if (!dev_priv->status_page_dmah)
2173 return -ENOMEM; 1968 return -ENOMEM;
2174 } 1969 }
@@ -2301,7 +2096,7 @@ intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
2301 ring->last_retired_head = -1; 2096 ring->last_retired_head = -1;
2302 intel_ring_update_space(ring); 2097 intel_ring_update_space(ring);
2303 2098
2304 ret = intel_alloc_ringbuffer_obj(engine->i915->dev, ring); 2099 ret = intel_alloc_ringbuffer_obj(&engine->i915->drm, ring);
2305 if (ret) { 2100 if (ret) {
2306 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s: %d\n", 2101 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s: %d\n",
2307 engine->name, ret); 2102 engine->name, ret);
@@ -2321,6 +2116,57 @@ intel_ringbuffer_free(struct intel_ringbuffer *ring)
2321 kfree(ring); 2116 kfree(ring);
2322} 2117}
2323 2118
2119static int intel_ring_context_pin(struct i915_gem_context *ctx,
2120 struct intel_engine_cs *engine)
2121{
2122 struct intel_context *ce = &ctx->engine[engine->id];
2123 int ret;
2124
2125 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
2126
2127 if (ce->pin_count++)
2128 return 0;
2129
2130 if (ce->state) {
2131 ret = i915_gem_obj_ggtt_pin(ce->state, ctx->ggtt_alignment, 0);
2132 if (ret)
2133 goto error;
2134 }
2135
2136 /* The kernel context is only used as a placeholder for flushing the
2137 * active context. It is never used for submitting user rendering and
2138 * as such never requires the golden render context, and so we can skip
2139 * emitting it when we switch to the kernel context. This is required
2140 * as during eviction we cannot allocate and pin the renderstate in
2141 * order to initialise the context.
2142 */
2143 if (ctx == ctx->i915->kernel_context)
2144 ce->initialised = true;
2145
2146 i915_gem_context_reference(ctx);
2147 return 0;
2148
2149error:
2150 ce->pin_count = 0;
2151 return ret;
2152}
2153
2154static void intel_ring_context_unpin(struct i915_gem_context *ctx,
2155 struct intel_engine_cs *engine)
2156{
2157 struct intel_context *ce = &ctx->engine[engine->id];
2158
2159 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
2160
2161 if (--ce->pin_count)
2162 return;
2163
2164 if (ce->state)
2165 i915_gem_object_ggtt_unpin(ce->state);
2166
2167 i915_gem_context_unreference(ctx);
2168}
2169
2324static int intel_init_ring_buffer(struct drm_device *dev, 2170static int intel_init_ring_buffer(struct drm_device *dev,
2325 struct intel_engine_cs *engine) 2171 struct intel_engine_cs *engine)
2326{ 2172{
@@ -2339,7 +2185,20 @@ static int intel_init_ring_buffer(struct drm_device *dev,
2339 memset(engine->semaphore.sync_seqno, 0, 2185 memset(engine->semaphore.sync_seqno, 0,
2340 sizeof(engine->semaphore.sync_seqno)); 2186 sizeof(engine->semaphore.sync_seqno));
2341 2187
2342 init_waitqueue_head(&engine->irq_queue); 2188 ret = intel_engine_init_breadcrumbs(engine);
2189 if (ret)
2190 goto error;
2191
2192 /* We may need to do things with the shrinker which
2193 * require us to immediately switch back to the default
2194 * context. This can cause a problem as pinning the
2195 * default context also requires GTT space which may not
2196 * be available. To avoid this we always pin the default
2197 * context.
2198 */
2199 ret = intel_ring_context_pin(dev_priv->kernel_context, engine);
2200 if (ret)
2201 goto error;
2343 2202
2344 ringbuf = intel_engine_create_ringbuffer(engine, 32 * PAGE_SIZE); 2203 ringbuf = intel_engine_create_ringbuffer(engine, 32 * PAGE_SIZE);
2345 if (IS_ERR(ringbuf)) { 2204 if (IS_ERR(ringbuf)) {
@@ -2408,6 +2267,10 @@ void intel_cleanup_engine(struct intel_engine_cs *engine)
2408 2267
2409 i915_cmd_parser_fini_ring(engine); 2268 i915_cmd_parser_fini_ring(engine);
2410 i915_gem_batch_pool_fini(&engine->batch_pool); 2269 i915_gem_batch_pool_fini(&engine->batch_pool);
2270 intel_engine_fini_breadcrumbs(engine);
2271
2272 intel_ring_context_unpin(dev_priv->kernel_context, engine);
2273
2411 engine->i915 = NULL; 2274 engine->i915 = NULL;
2412} 2275}
2413 2276
@@ -2603,10 +2466,19 @@ void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno)
2603 memset(engine->semaphore.sync_seqno, 0, 2466 memset(engine->semaphore.sync_seqno, 0,
2604 sizeof(engine->semaphore.sync_seqno)); 2467 sizeof(engine->semaphore.sync_seqno));
2605 2468
2606 engine->set_seqno(engine, seqno); 2469 intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
2470 if (engine->irq_seqno_barrier)
2471 engine->irq_seqno_barrier(engine);
2607 engine->last_submitted_seqno = seqno; 2472 engine->last_submitted_seqno = seqno;
2608 2473
2609 engine->hangcheck.seqno = seqno; 2474 engine->hangcheck.seqno = seqno;
2475
2476 /* After manually advancing the seqno, fake the interrupt in case
2477 * there are any waiters for that seqno.
2478 */
2479 rcu_read_lock();
2480 intel_engine_wakeup(engine);
2481 rcu_read_unlock();
2610} 2482}
2611 2483
2612static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine, 2484static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine,
@@ -2614,32 +2486,38 @@ static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine,
2614{ 2486{
2615 struct drm_i915_private *dev_priv = engine->i915; 2487 struct drm_i915_private *dev_priv = engine->i915;
2616 2488
2489 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2490
2617 /* Every tail move must follow the sequence below */ 2491 /* Every tail move must follow the sequence below */
2618 2492
2619 /* Disable notification that the ring is IDLE. The GT 2493 /* Disable notification that the ring is IDLE. The GT
2620 * will then assume that it is busy and bring it out of rc6. 2494 * will then assume that it is busy and bring it out of rc6.
2621 */ 2495 */
2622 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, 2496 I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
2623 _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); 2497 _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
2624 2498
2625 /* Clear the context id. Here be magic! */ 2499 /* Clear the context id. Here be magic! */
2626 I915_WRITE64(GEN6_BSD_RNCID, 0x0); 2500 I915_WRITE64_FW(GEN6_BSD_RNCID, 0x0);
2627 2501
2628 /* Wait for the ring not to be idle, i.e. for it to wake up. */ 2502 /* Wait for the ring not to be idle, i.e. for it to wake up. */
2629 if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) & 2503 if (intel_wait_for_register_fw(dev_priv,
2630 GEN6_BSD_SLEEP_INDICATOR) == 0, 2504 GEN6_BSD_SLEEP_PSMI_CONTROL,
2631 50)) 2505 GEN6_BSD_SLEEP_INDICATOR,
2506 0,
2507 50))
2632 DRM_ERROR("timed out waiting for the BSD ring to wake up\n"); 2508 DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
2633 2509
2634 /* Now that the ring is fully powered up, update the tail */ 2510 /* Now that the ring is fully powered up, update the tail */
2635 I915_WRITE_TAIL(engine, value); 2511 I915_WRITE_FW(RING_TAIL(engine->mmio_base), value);
2636 POSTING_READ(RING_TAIL(engine->mmio_base)); 2512 POSTING_READ_FW(RING_TAIL(engine->mmio_base));
2637 2513
2638 /* Let the ring send IDLE messages to the GT again, 2514 /* Let the ring send IDLE messages to the GT again,
2639 * and so let it sleep to conserve power when idle. 2515 * and so let it sleep to conserve power when idle.
2640 */ 2516 */
2641 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, 2517 I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
2642 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); 2518 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
2519
2520 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2643} 2521}
2644 2522
2645static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, 2523static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
@@ -2808,11 +2686,159 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req,
2808 return 0; 2686 return 0;
2809} 2687}
2810 2688
2689static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
2690 struct intel_engine_cs *engine)
2691{
2692 struct drm_i915_gem_object *obj;
2693 int ret, i;
2694
2695 if (!i915_semaphore_is_enabled(dev_priv))
2696 return;
2697
2698 if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore_obj) {
2699 obj = i915_gem_object_create(&dev_priv->drm, 4096);
2700 if (IS_ERR(obj)) {
2701 DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n");
2702 i915.semaphores = 0;
2703 } else {
2704 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
2705 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_NONBLOCK);
2706 if (ret != 0) {
2707 drm_gem_object_unreference(&obj->base);
2708 DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n");
2709 i915.semaphores = 0;
2710 } else {
2711 dev_priv->semaphore_obj = obj;
2712 }
2713 }
2714 }
2715
2716 if (!i915_semaphore_is_enabled(dev_priv))
2717 return;
2718
2719 if (INTEL_GEN(dev_priv) >= 8) {
2720 u64 offset = i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj);
2721
2722 engine->semaphore.sync_to = gen8_ring_sync;
2723 engine->semaphore.signal = gen8_xcs_signal;
2724
2725 for (i = 0; i < I915_NUM_ENGINES; i++) {
2726 u64 ring_offset;
2727
2728 if (i != engine->id)
2729 ring_offset = offset + GEN8_SEMAPHORE_OFFSET(engine->id, i);
2730 else
2731 ring_offset = MI_SEMAPHORE_SYNC_INVALID;
2732
2733 engine->semaphore.signal_ggtt[i] = ring_offset;
2734 }
2735 } else if (INTEL_GEN(dev_priv) >= 6) {
2736 engine->semaphore.sync_to = gen6_ring_sync;
2737 engine->semaphore.signal = gen6_signal;
2738
2739 /*
2740 * The current semaphore is only applied on pre-gen8
2741 * platform. And there is no VCS2 ring on the pre-gen8
2742 * platform. So the semaphore between RCS and VCS2 is
2743 * initialized as INVALID. Gen8 will initialize the
2744 * sema between VCS2 and RCS later.
2745 */
2746 for (i = 0; i < I915_NUM_ENGINES; i++) {
2747 static const struct {
2748 u32 wait_mbox;
2749 i915_reg_t mbox_reg;
2750 } sem_data[I915_NUM_ENGINES][I915_NUM_ENGINES] = {
2751 [RCS] = {
2752 [VCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_RV, .mbox_reg = GEN6_VRSYNC },
2753 [BCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_RB, .mbox_reg = GEN6_BRSYNC },
2754 [VECS] = { .wait_mbox = MI_SEMAPHORE_SYNC_RVE, .mbox_reg = GEN6_VERSYNC },
2755 },
2756 [VCS] = {
2757 [RCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VR, .mbox_reg = GEN6_RVSYNC },
2758 [BCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VB, .mbox_reg = GEN6_BVSYNC },
2759 [VECS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VVE, .mbox_reg = GEN6_VEVSYNC },
2760 },
2761 [BCS] = {
2762 [RCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_BR, .mbox_reg = GEN6_RBSYNC },
2763 [VCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_BV, .mbox_reg = GEN6_VBSYNC },
2764 [VECS] = { .wait_mbox = MI_SEMAPHORE_SYNC_BVE, .mbox_reg = GEN6_VEBSYNC },
2765 },
2766 [VECS] = {
2767 [RCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VER, .mbox_reg = GEN6_RVESYNC },
2768 [VCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEV, .mbox_reg = GEN6_VVESYNC },
2769 [BCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEB, .mbox_reg = GEN6_BVESYNC },
2770 },
2771 };
2772 u32 wait_mbox;
2773 i915_reg_t mbox_reg;
2774
2775 if (i == engine->id || i == VCS2) {
2776 wait_mbox = MI_SEMAPHORE_SYNC_INVALID;
2777 mbox_reg = GEN6_NOSYNC;
2778 } else {
2779 wait_mbox = sem_data[engine->id][i].wait_mbox;
2780 mbox_reg = sem_data[engine->id][i].mbox_reg;
2781 }
2782
2783 engine->semaphore.mbox.wait[i] = wait_mbox;
2784 engine->semaphore.mbox.signal[i] = mbox_reg;
2785 }
2786 }
2787}
2788
2789static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
2790 struct intel_engine_cs *engine)
2791{
2792 if (INTEL_GEN(dev_priv) >= 8) {
2793 engine->irq_enable = gen8_irq_enable;
2794 engine->irq_disable = gen8_irq_disable;
2795 engine->irq_seqno_barrier = gen6_seqno_barrier;
2796 } else if (INTEL_GEN(dev_priv) >= 6) {
2797 engine->irq_enable = gen6_irq_enable;
2798 engine->irq_disable = gen6_irq_disable;
2799 engine->irq_seqno_barrier = gen6_seqno_barrier;
2800 } else if (INTEL_GEN(dev_priv) >= 5) {
2801 engine->irq_enable = gen5_irq_enable;
2802 engine->irq_disable = gen5_irq_disable;
2803 engine->irq_seqno_barrier = gen5_seqno_barrier;
2804 } else if (INTEL_GEN(dev_priv) >= 3) {
2805 engine->irq_enable = i9xx_irq_enable;
2806 engine->irq_disable = i9xx_irq_disable;
2807 } else {
2808 engine->irq_enable = i8xx_irq_enable;
2809 engine->irq_disable = i8xx_irq_disable;
2810 }
2811}
2812
2813static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
2814 struct intel_engine_cs *engine)
2815{
2816 engine->init_hw = init_ring_common;
2817 engine->write_tail = ring_write_tail;
2818
2819 engine->add_request = i9xx_add_request;
2820 if (INTEL_GEN(dev_priv) >= 6)
2821 engine->add_request = gen6_add_request;
2822
2823 if (INTEL_GEN(dev_priv) >= 8)
2824 engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
2825 else if (INTEL_GEN(dev_priv) >= 6)
2826 engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
2827 else if (INTEL_GEN(dev_priv) >= 4)
2828 engine->dispatch_execbuffer = i965_dispatch_execbuffer;
2829 else if (IS_I830(dev_priv) || IS_845G(dev_priv))
2830 engine->dispatch_execbuffer = i830_dispatch_execbuffer;
2831 else
2832 engine->dispatch_execbuffer = i915_dispatch_execbuffer;
2833
2834 intel_ring_init_irq(dev_priv, engine);
2835 intel_ring_init_semaphores(dev_priv, engine);
2836}
2837
2811int intel_init_render_ring_buffer(struct drm_device *dev) 2838int intel_init_render_ring_buffer(struct drm_device *dev)
2812{ 2839{
2813 struct drm_i915_private *dev_priv = dev->dev_private; 2840 struct drm_i915_private *dev_priv = to_i915(dev);
2814 struct intel_engine_cs *engine = &dev_priv->engine[RCS]; 2841 struct intel_engine_cs *engine = &dev_priv->engine[RCS];
2815 struct drm_i915_gem_object *obj;
2816 int ret; 2842 int ret;
2817 2843
2818 engine->name = "render ring"; 2844 engine->name = "render ring";
@@ -2821,139 +2847,49 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
2821 engine->hw_id = 0; 2847 engine->hw_id = 0;
2822 engine->mmio_base = RENDER_RING_BASE; 2848 engine->mmio_base = RENDER_RING_BASE;
2823 2849
2824 if (INTEL_GEN(dev_priv) >= 8) { 2850 intel_ring_default_vfuncs(dev_priv, engine);
2825 if (i915_semaphore_is_enabled(dev_priv)) { 2851
2826 obj = i915_gem_object_create(dev, 4096); 2852 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
2827 if (IS_ERR(obj)) { 2853 if (HAS_L3_DPF(dev_priv))
2828 DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n"); 2854 engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2829 i915.semaphores = 0;
2830 } else {
2831 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
2832 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_NONBLOCK);
2833 if (ret != 0) {
2834 drm_gem_object_unreference(&obj->base);
2835 DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n");
2836 i915.semaphores = 0;
2837 } else
2838 dev_priv->semaphore_obj = obj;
2839 }
2840 }
2841 2855
2856 if (INTEL_GEN(dev_priv) >= 8) {
2842 engine->init_context = intel_rcs_ctx_init; 2857 engine->init_context = intel_rcs_ctx_init;
2843 engine->add_request = gen8_render_add_request; 2858 engine->add_request = gen8_render_add_request;
2844 engine->flush = gen8_render_ring_flush; 2859 engine->flush = gen8_render_ring_flush;
2845 engine->irq_get = gen8_ring_get_irq; 2860 if (i915_semaphore_is_enabled(dev_priv))
2846 engine->irq_put = gen8_ring_put_irq;
2847 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
2848 engine->get_seqno = ring_get_seqno;
2849 engine->set_seqno = ring_set_seqno;
2850 if (i915_semaphore_is_enabled(dev_priv)) {
2851 WARN_ON(!dev_priv->semaphore_obj);
2852 engine->semaphore.sync_to = gen8_ring_sync;
2853 engine->semaphore.signal = gen8_rcs_signal; 2861 engine->semaphore.signal = gen8_rcs_signal;
2854 GEN8_RING_SEMAPHORE_INIT(engine);
2855 }
2856 } else if (INTEL_GEN(dev_priv) >= 6) { 2862 } else if (INTEL_GEN(dev_priv) >= 6) {
2857 engine->init_context = intel_rcs_ctx_init; 2863 engine->init_context = intel_rcs_ctx_init;
2858 engine->add_request = gen6_add_request;
2859 engine->flush = gen7_render_ring_flush; 2864 engine->flush = gen7_render_ring_flush;
2860 if (IS_GEN6(dev_priv)) 2865 if (IS_GEN6(dev_priv))
2861 engine->flush = gen6_render_ring_flush; 2866 engine->flush = gen6_render_ring_flush;
2862 engine->irq_get = gen6_ring_get_irq;
2863 engine->irq_put = gen6_ring_put_irq;
2864 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
2865 engine->irq_seqno_barrier = gen6_seqno_barrier;
2866 engine->get_seqno = ring_get_seqno;
2867 engine->set_seqno = ring_set_seqno;
2868 if (i915_semaphore_is_enabled(dev_priv)) {
2869 engine->semaphore.sync_to = gen6_ring_sync;
2870 engine->semaphore.signal = gen6_signal;
2871 /*
2872 * The current semaphore is only applied on pre-gen8
2873 * platform. And there is no VCS2 ring on the pre-gen8
2874 * platform. So the semaphore between RCS and VCS2 is
2875 * initialized as INVALID. Gen8 will initialize the
2876 * sema between VCS2 and RCS later.
2877 */
2878 engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
2879 engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV;
2880 engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB;
2881 engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE;
2882 engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
2883 engine->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
2884 engine->semaphore.mbox.signal[VCS] = GEN6_VRSYNC;
2885 engine->semaphore.mbox.signal[BCS] = GEN6_BRSYNC;
2886 engine->semaphore.mbox.signal[VECS] = GEN6_VERSYNC;
2887 engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
2888 }
2889 } else if (IS_GEN5(dev_priv)) { 2867 } else if (IS_GEN5(dev_priv)) {
2890 engine->add_request = pc_render_add_request;
2891 engine->flush = gen4_render_ring_flush; 2868 engine->flush = gen4_render_ring_flush;
2892 engine->get_seqno = pc_render_get_seqno;
2893 engine->set_seqno = pc_render_set_seqno;
2894 engine->irq_get = gen5_ring_get_irq;
2895 engine->irq_put = gen5_ring_put_irq;
2896 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT |
2897 GT_RENDER_PIPECTL_NOTIFY_INTERRUPT;
2898 } else { 2869 } else {
2899 engine->add_request = i9xx_add_request;
2900 if (INTEL_GEN(dev_priv) < 4) 2870 if (INTEL_GEN(dev_priv) < 4)
2901 engine->flush = gen2_render_ring_flush; 2871 engine->flush = gen2_render_ring_flush;
2902 else 2872 else
2903 engine->flush = gen4_render_ring_flush; 2873 engine->flush = gen4_render_ring_flush;
2904 engine->get_seqno = ring_get_seqno;
2905 engine->set_seqno = ring_set_seqno;
2906 if (IS_GEN2(dev_priv)) {
2907 engine->irq_get = i8xx_ring_get_irq;
2908 engine->irq_put = i8xx_ring_put_irq;
2909 } else {
2910 engine->irq_get = i9xx_ring_get_irq;
2911 engine->irq_put = i9xx_ring_put_irq;
2912 }
2913 engine->irq_enable_mask = I915_USER_INTERRUPT; 2874 engine->irq_enable_mask = I915_USER_INTERRUPT;
2914 } 2875 }
2915 engine->write_tail = ring_write_tail;
2916 2876
2917 if (IS_HASWELL(dev_priv)) 2877 if (IS_HASWELL(dev_priv))
2918 engine->dispatch_execbuffer = hsw_ring_dispatch_execbuffer; 2878 engine->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
2919 else if (IS_GEN8(dev_priv)) 2879
2920 engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
2921 else if (INTEL_GEN(dev_priv) >= 6)
2922 engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
2923 else if (INTEL_GEN(dev_priv) >= 4)
2924 engine->dispatch_execbuffer = i965_dispatch_execbuffer;
2925 else if (IS_I830(dev_priv) || IS_845G(dev_priv))
2926 engine->dispatch_execbuffer = i830_dispatch_execbuffer;
2927 else
2928 engine->dispatch_execbuffer = i915_dispatch_execbuffer;
2929 engine->init_hw = init_render_ring; 2880 engine->init_hw = init_render_ring;
2930 engine->cleanup = render_ring_cleanup; 2881 engine->cleanup = render_ring_cleanup;
2931 2882
2932 /* Workaround batchbuffer to combat CS tlb bug. */
2933 if (HAS_BROKEN_CS_TLB(dev_priv)) {
2934 obj = i915_gem_object_create(dev, I830_WA_SIZE);
2935 if (IS_ERR(obj)) {
2936 DRM_ERROR("Failed to allocate batch bo\n");
2937 return PTR_ERR(obj);
2938 }
2939
2940 ret = i915_gem_obj_ggtt_pin(obj, 0, 0);
2941 if (ret != 0) {
2942 drm_gem_object_unreference(&obj->base);
2943 DRM_ERROR("Failed to ping batch bo\n");
2944 return ret;
2945 }
2946
2947 engine->scratch.obj = obj;
2948 engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
2949 }
2950
2951 ret = intel_init_ring_buffer(dev, engine); 2883 ret = intel_init_ring_buffer(dev, engine);
2952 if (ret) 2884 if (ret)
2953 return ret; 2885 return ret;
2954 2886
2955 if (INTEL_GEN(dev_priv) >= 5) { 2887 if (INTEL_GEN(dev_priv) >= 6) {
2956 ret = intel_init_pipe_control(engine); 2888 ret = intel_init_pipe_control(engine, 4096);
2889 if (ret)
2890 return ret;
2891 } else if (HAS_BROKEN_CS_TLB(dev_priv)) {
2892 ret = intel_init_pipe_control(engine, I830_WA_SIZE);
2957 if (ret) 2893 if (ret)
2958 return ret; 2894 return ret;
2959 } 2895 }
@@ -2963,7 +2899,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
2963 2899
2964int intel_init_bsd_ring_buffer(struct drm_device *dev) 2900int intel_init_bsd_ring_buffer(struct drm_device *dev)
2965{ 2901{
2966 struct drm_i915_private *dev_priv = dev->dev_private; 2902 struct drm_i915_private *dev_priv = to_i915(dev);
2967 struct intel_engine_cs *engine = &dev_priv->engine[VCS]; 2903 struct intel_engine_cs *engine = &dev_priv->engine[VCS];
2968 2904
2969 engine->name = "bsd ring"; 2905 engine->name = "bsd ring";
@@ -2971,68 +2907,27 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
2971 engine->exec_id = I915_EXEC_BSD; 2907 engine->exec_id = I915_EXEC_BSD;
2972 engine->hw_id = 1; 2908 engine->hw_id = 1;
2973 2909
2974 engine->write_tail = ring_write_tail; 2910 intel_ring_default_vfuncs(dev_priv, engine);
2911
2975 if (INTEL_GEN(dev_priv) >= 6) { 2912 if (INTEL_GEN(dev_priv) >= 6) {
2976 engine->mmio_base = GEN6_BSD_RING_BASE; 2913 engine->mmio_base = GEN6_BSD_RING_BASE;
2977 /* gen6 bsd needs a special wa for tail updates */ 2914 /* gen6 bsd needs a special wa for tail updates */
2978 if (IS_GEN6(dev_priv)) 2915 if (IS_GEN6(dev_priv))
2979 engine->write_tail = gen6_bsd_ring_write_tail; 2916 engine->write_tail = gen6_bsd_ring_write_tail;
2980 engine->flush = gen6_bsd_ring_flush; 2917 engine->flush = gen6_bsd_ring_flush;
2981 engine->add_request = gen6_add_request; 2918 if (INTEL_GEN(dev_priv) >= 8)
2982 engine->irq_seqno_barrier = gen6_seqno_barrier;
2983 engine->get_seqno = ring_get_seqno;
2984 engine->set_seqno = ring_set_seqno;
2985 if (INTEL_GEN(dev_priv) >= 8) {
2986 engine->irq_enable_mask = 2919 engine->irq_enable_mask =
2987 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT; 2920 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
2988 engine->irq_get = gen8_ring_get_irq; 2921 else
2989 engine->irq_put = gen8_ring_put_irq;
2990 engine->dispatch_execbuffer =
2991 gen8_ring_dispatch_execbuffer;
2992 if (i915_semaphore_is_enabled(dev_priv)) {
2993 engine->semaphore.sync_to = gen8_ring_sync;
2994 engine->semaphore.signal = gen8_xcs_signal;
2995 GEN8_RING_SEMAPHORE_INIT(engine);
2996 }
2997 } else {
2998 engine->irq_enable_mask = GT_BSD_USER_INTERRUPT; 2922 engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
2999 engine->irq_get = gen6_ring_get_irq;
3000 engine->irq_put = gen6_ring_put_irq;
3001 engine->dispatch_execbuffer =
3002 gen6_ring_dispatch_execbuffer;
3003 if (i915_semaphore_is_enabled(dev_priv)) {
3004 engine->semaphore.sync_to = gen6_ring_sync;
3005 engine->semaphore.signal = gen6_signal;
3006 engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR;
3007 engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
3008 engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB;
3009 engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE;
3010 engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
3011 engine->semaphore.mbox.signal[RCS] = GEN6_RVSYNC;
3012 engine->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
3013 engine->semaphore.mbox.signal[BCS] = GEN6_BVSYNC;
3014 engine->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC;
3015 engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
3016 }
3017 }
3018 } else { 2923 } else {
3019 engine->mmio_base = BSD_RING_BASE; 2924 engine->mmio_base = BSD_RING_BASE;
3020 engine->flush = bsd_ring_flush; 2925 engine->flush = bsd_ring_flush;
3021 engine->add_request = i9xx_add_request; 2926 if (IS_GEN5(dev_priv))
3022 engine->get_seqno = ring_get_seqno;
3023 engine->set_seqno = ring_set_seqno;
3024 if (IS_GEN5(dev_priv)) {
3025 engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT; 2927 engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
3026 engine->irq_get = gen5_ring_get_irq; 2928 else
3027 engine->irq_put = gen5_ring_put_irq;
3028 } else {
3029 engine->irq_enable_mask = I915_BSD_USER_INTERRUPT; 2929 engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
3030 engine->irq_get = i9xx_ring_get_irq;
3031 engine->irq_put = i9xx_ring_put_irq;
3032 }
3033 engine->dispatch_execbuffer = i965_dispatch_execbuffer;
3034 } 2930 }
3035 engine->init_hw = init_ring_common;
3036 2931
3037 return intel_init_ring_buffer(dev, engine); 2932 return intel_init_ring_buffer(dev, engine);
3038} 2933}
@@ -3042,147 +2937,70 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
3042 */ 2937 */
3043int intel_init_bsd2_ring_buffer(struct drm_device *dev) 2938int intel_init_bsd2_ring_buffer(struct drm_device *dev)
3044{ 2939{
3045 struct drm_i915_private *dev_priv = dev->dev_private; 2940 struct drm_i915_private *dev_priv = to_i915(dev);
3046 struct intel_engine_cs *engine = &dev_priv->engine[VCS2]; 2941 struct intel_engine_cs *engine = &dev_priv->engine[VCS2];
3047 2942
3048 engine->name = "bsd2 ring"; 2943 engine->name = "bsd2 ring";
3049 engine->id = VCS2; 2944 engine->id = VCS2;
3050 engine->exec_id = I915_EXEC_BSD; 2945 engine->exec_id = I915_EXEC_BSD;
3051 engine->hw_id = 4; 2946 engine->hw_id = 4;
3052
3053 engine->write_tail = ring_write_tail;
3054 engine->mmio_base = GEN8_BSD2_RING_BASE; 2947 engine->mmio_base = GEN8_BSD2_RING_BASE;
2948
2949 intel_ring_default_vfuncs(dev_priv, engine);
2950
3055 engine->flush = gen6_bsd_ring_flush; 2951 engine->flush = gen6_bsd_ring_flush;
3056 engine->add_request = gen6_add_request;
3057 engine->irq_seqno_barrier = gen6_seqno_barrier;
3058 engine->get_seqno = ring_get_seqno;
3059 engine->set_seqno = ring_set_seqno;
3060 engine->irq_enable_mask = 2952 engine->irq_enable_mask =
3061 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT; 2953 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
3062 engine->irq_get = gen8_ring_get_irq;
3063 engine->irq_put = gen8_ring_put_irq;
3064 engine->dispatch_execbuffer =
3065 gen8_ring_dispatch_execbuffer;
3066 if (i915_semaphore_is_enabled(dev_priv)) {
3067 engine->semaphore.sync_to = gen8_ring_sync;
3068 engine->semaphore.signal = gen8_xcs_signal;
3069 GEN8_RING_SEMAPHORE_INIT(engine);
3070 }
3071 engine->init_hw = init_ring_common;
3072 2954
3073 return intel_init_ring_buffer(dev, engine); 2955 return intel_init_ring_buffer(dev, engine);
3074} 2956}
3075 2957
3076int intel_init_blt_ring_buffer(struct drm_device *dev) 2958int intel_init_blt_ring_buffer(struct drm_device *dev)
3077{ 2959{
3078 struct drm_i915_private *dev_priv = dev->dev_private; 2960 struct drm_i915_private *dev_priv = to_i915(dev);
3079 struct intel_engine_cs *engine = &dev_priv->engine[BCS]; 2961 struct intel_engine_cs *engine = &dev_priv->engine[BCS];
3080 2962
3081 engine->name = "blitter ring"; 2963 engine->name = "blitter ring";
3082 engine->id = BCS; 2964 engine->id = BCS;
3083 engine->exec_id = I915_EXEC_BLT; 2965 engine->exec_id = I915_EXEC_BLT;
3084 engine->hw_id = 2; 2966 engine->hw_id = 2;
3085
3086 engine->mmio_base = BLT_RING_BASE; 2967 engine->mmio_base = BLT_RING_BASE;
3087 engine->write_tail = ring_write_tail; 2968
2969 intel_ring_default_vfuncs(dev_priv, engine);
2970
3088 engine->flush = gen6_ring_flush; 2971 engine->flush = gen6_ring_flush;
3089 engine->add_request = gen6_add_request; 2972 if (INTEL_GEN(dev_priv) >= 8)
3090 engine->irq_seqno_barrier = gen6_seqno_barrier;
3091 engine->get_seqno = ring_get_seqno;
3092 engine->set_seqno = ring_set_seqno;
3093 if (INTEL_GEN(dev_priv) >= 8) {
3094 engine->irq_enable_mask = 2973 engine->irq_enable_mask =
3095 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT; 2974 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
3096 engine->irq_get = gen8_ring_get_irq; 2975 else
3097 engine->irq_put = gen8_ring_put_irq;
3098 engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
3099 if (i915_semaphore_is_enabled(dev_priv)) {
3100 engine->semaphore.sync_to = gen8_ring_sync;
3101 engine->semaphore.signal = gen8_xcs_signal;
3102 GEN8_RING_SEMAPHORE_INIT(engine);
3103 }
3104 } else {
3105 engine->irq_enable_mask = GT_BLT_USER_INTERRUPT; 2976 engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
3106 engine->irq_get = gen6_ring_get_irq;
3107 engine->irq_put = gen6_ring_put_irq;
3108 engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
3109 if (i915_semaphore_is_enabled(dev_priv)) {
3110 engine->semaphore.signal = gen6_signal;
3111 engine->semaphore.sync_to = gen6_ring_sync;
3112 /*
3113 * The current semaphore is only applied on pre-gen8
3114 * platform. And there is no VCS2 ring on the pre-gen8
3115 * platform. So the semaphore between BCS and VCS2 is
3116 * initialized as INVALID. Gen8 will initialize the
3117 * sema between BCS and VCS2 later.
3118 */
3119 engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR;
3120 engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV;
3121 engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
3122 engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE;
3123 engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
3124 engine->semaphore.mbox.signal[RCS] = GEN6_RBSYNC;
3125 engine->semaphore.mbox.signal[VCS] = GEN6_VBSYNC;
3126 engine->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
3127 engine->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC;
3128 engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
3129 }
3130 }
3131 engine->init_hw = init_ring_common;
3132 2977
3133 return intel_init_ring_buffer(dev, engine); 2978 return intel_init_ring_buffer(dev, engine);
3134} 2979}
3135 2980
3136int intel_init_vebox_ring_buffer(struct drm_device *dev) 2981int intel_init_vebox_ring_buffer(struct drm_device *dev)
3137{ 2982{
3138 struct drm_i915_private *dev_priv = dev->dev_private; 2983 struct drm_i915_private *dev_priv = to_i915(dev);
3139 struct intel_engine_cs *engine = &dev_priv->engine[VECS]; 2984 struct intel_engine_cs *engine = &dev_priv->engine[VECS];
3140 2985
3141 engine->name = "video enhancement ring"; 2986 engine->name = "video enhancement ring";
3142 engine->id = VECS; 2987 engine->id = VECS;
3143 engine->exec_id = I915_EXEC_VEBOX; 2988 engine->exec_id = I915_EXEC_VEBOX;
3144 engine->hw_id = 3; 2989 engine->hw_id = 3;
3145
3146 engine->mmio_base = VEBOX_RING_BASE; 2990 engine->mmio_base = VEBOX_RING_BASE;
3147 engine->write_tail = ring_write_tail; 2991
2992 intel_ring_default_vfuncs(dev_priv, engine);
2993
3148 engine->flush = gen6_ring_flush; 2994 engine->flush = gen6_ring_flush;
3149 engine->add_request = gen6_add_request;
3150 engine->irq_seqno_barrier = gen6_seqno_barrier;
3151 engine->get_seqno = ring_get_seqno;
3152 engine->set_seqno = ring_set_seqno;
3153 2995
3154 if (INTEL_GEN(dev_priv) >= 8) { 2996 if (INTEL_GEN(dev_priv) >= 8) {
3155 engine->irq_enable_mask = 2997 engine->irq_enable_mask =
3156 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT; 2998 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
3157 engine->irq_get = gen8_ring_get_irq;
3158 engine->irq_put = gen8_ring_put_irq;
3159 engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
3160 if (i915_semaphore_is_enabled(dev_priv)) {
3161 engine->semaphore.sync_to = gen8_ring_sync;
3162 engine->semaphore.signal = gen8_xcs_signal;
3163 GEN8_RING_SEMAPHORE_INIT(engine);
3164 }
3165 } else { 2999 } else {
3166 engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; 3000 engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
3167 engine->irq_get = hsw_vebox_get_irq; 3001 engine->irq_enable = hsw_vebox_irq_enable;
3168 engine->irq_put = hsw_vebox_put_irq; 3002 engine->irq_disable = hsw_vebox_irq_disable;
3169 engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
3170 if (i915_semaphore_is_enabled(dev_priv)) {
3171 engine->semaphore.sync_to = gen6_ring_sync;
3172 engine->semaphore.signal = gen6_signal;
3173 engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER;
3174 engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV;
3175 engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB;
3176 engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
3177 engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
3178 engine->semaphore.mbox.signal[RCS] = GEN6_RVESYNC;
3179 engine->semaphore.mbox.signal[VCS] = GEN6_VVESYNC;
3180 engine->semaphore.mbox.signal[BCS] = GEN6_BVESYNC;
3181 engine->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
3182 engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
3183 }
3184 } 3003 }
3185 engine->init_hw = init_ring_common;
3186 3004
3187 return intel_init_ring_buffer(dev, engine); 3005 return intel_init_ring_buffer(dev, engine);
3188} 3006}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index b33c876fed20..12cb7ed90014 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -62,18 +62,6 @@ struct intel_hw_status_page {
62 (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \ 62 (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
63 GEN8_SEMAPHORE_OFFSET(from, (__ring)->id)) 63 GEN8_SEMAPHORE_OFFSET(from, (__ring)->id))
64 64
65#define GEN8_RING_SEMAPHORE_INIT(e) do { \
66 if (!dev_priv->semaphore_obj) { \
67 break; \
68 } \
69 (e)->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET((e), RCS); \
70 (e)->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET((e), VCS); \
71 (e)->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET((e), BCS); \
72 (e)->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET((e), VECS); \
73 (e)->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET((e), VCS2); \
74 (e)->semaphore.signal_ggtt[(e)->id] = MI_SEMAPHORE_SYNC_INVALID; \
75 } while(0)
76
77enum intel_ring_hangcheck_action { 65enum intel_ring_hangcheck_action {
78 HANGCHECK_IDLE = 0, 66 HANGCHECK_IDLE = 0,
79 HANGCHECK_WAIT, 67 HANGCHECK_WAIT,
@@ -86,8 +74,8 @@ enum intel_ring_hangcheck_action {
86 74
87struct intel_ring_hangcheck { 75struct intel_ring_hangcheck {
88 u64 acthd; 76 u64 acthd;
77 unsigned long user_interrupts;
89 u32 seqno; 78 u32 seqno;
90 unsigned user_interrupts;
91 int score; 79 int score;
92 enum intel_ring_hangcheck_action action; 80 enum intel_ring_hangcheck_action action;
93 int deadlock; 81 int deadlock;
@@ -141,6 +129,8 @@ struct i915_ctx_workarounds {
141 struct drm_i915_gem_object *obj; 129 struct drm_i915_gem_object *obj;
142}; 130};
143 131
132struct drm_i915_gem_request;
133
144struct intel_engine_cs { 134struct intel_engine_cs {
145 struct drm_i915_private *i915; 135 struct drm_i915_private *i915;
146 const char *name; 136 const char *name;
@@ -160,6 +150,39 @@ struct intel_engine_cs {
160 struct intel_ringbuffer *buffer; 150 struct intel_ringbuffer *buffer;
161 struct list_head buffers; 151 struct list_head buffers;
162 152
153 /* Rather than have every client wait upon all user interrupts,
154 * with the herd waking after every interrupt and each doing the
155 * heavyweight seqno dance, we delegate the task (of being the
156 * bottom-half of the user interrupt) to the first client. After
157 * every interrupt, we wake up one client, who does the heavyweight
158 * coherent seqno read and either goes back to sleep (if incomplete),
159 * or wakes up all the completed clients in parallel, before then
160 * transferring the bottom-half status to the next client in the queue.
161 *
162 * Compared to walking the entire list of waiters in a single dedicated
163 * bottom-half, we reduce the latency of the first waiter by avoiding
164 * a context switch, but incur additional coherent seqno reads when
165 * following the chain of request breadcrumbs. Since it is most likely
166 * that we have a single client waiting on each seqno, then reducing
167 * the overhead of waking that client is much preferred.
168 */
169 struct intel_breadcrumbs {
170 struct task_struct *irq_seqno_bh; /* bh for user interrupts */
171 unsigned long irq_wakeups;
172 bool irq_posted;
173
174 spinlock_t lock; /* protects the lists of requests */
175 struct rb_root waiters; /* sorted by retirement, priority */
176 struct rb_root signals; /* sorted by retirement */
177 struct intel_wait *first_wait; /* oldest waiter by retirement */
178 struct task_struct *signaler; /* used for fence signalling */
179 struct drm_i915_gem_request *first_signal;
180 struct timer_list fake_irq; /* used after a missed interrupt */
181
182 bool irq_enabled : 1;
183 bool rpm_wakelock : 1;
184 } breadcrumbs;
185
163 /* 186 /*
164 * A pool of objects to use as shadow copies of client batch buffers 187 * A pool of objects to use as shadow copies of client batch buffers
165 * when the command parser is enabled. Prevents the client from 188 * when the command parser is enabled. Prevents the client from
@@ -170,11 +193,10 @@ struct intel_engine_cs {
170 struct intel_hw_status_page status_page; 193 struct intel_hw_status_page status_page;
171 struct i915_ctx_workarounds wa_ctx; 194 struct i915_ctx_workarounds wa_ctx;
172 195
173 unsigned irq_refcount; /* protected by dev_priv->irq_lock */ 196 u32 irq_keep_mask; /* always keep these interrupts */
174 u32 irq_enable_mask; /* bitmask to enable ring interrupt */ 197 u32 irq_enable_mask; /* bitmask to enable ring interrupt */
175 struct drm_i915_gem_request *trace_irq_req; 198 void (*irq_enable)(struct intel_engine_cs *ring);
176 bool __must_check (*irq_get)(struct intel_engine_cs *ring); 199 void (*irq_disable)(struct intel_engine_cs *ring);
177 void (*irq_put)(struct intel_engine_cs *ring);
178 200
179 int (*init_hw)(struct intel_engine_cs *ring); 201 int (*init_hw)(struct intel_engine_cs *ring);
180 202
@@ -193,9 +215,6 @@ struct intel_engine_cs {
193 * monotonic, even if not coherent. 215 * monotonic, even if not coherent.
194 */ 216 */
195 void (*irq_seqno_barrier)(struct intel_engine_cs *ring); 217 void (*irq_seqno_barrier)(struct intel_engine_cs *ring);
196 u32 (*get_seqno)(struct intel_engine_cs *ring);
197 void (*set_seqno)(struct intel_engine_cs *ring,
198 u32 seqno);
199 int (*dispatch_execbuffer)(struct drm_i915_gem_request *req, 218 int (*dispatch_execbuffer)(struct drm_i915_gem_request *req,
200 u64 offset, u32 length, 219 u64 offset, u32 length,
201 unsigned dispatch_flags); 220 unsigned dispatch_flags);
@@ -272,7 +291,6 @@ struct intel_engine_cs {
272 unsigned int idle_lite_restore_wa; 291 unsigned int idle_lite_restore_wa;
273 bool disable_lite_restore_wa; 292 bool disable_lite_restore_wa;
274 u32 ctx_desc_template; 293 u32 ctx_desc_template;
275 u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */
276 int (*emit_request)(struct drm_i915_gem_request *request); 294 int (*emit_request)(struct drm_i915_gem_request *request);
277 int (*emit_flush)(struct drm_i915_gem_request *request, 295 int (*emit_flush)(struct drm_i915_gem_request *request,
278 u32 invalidate_domains, 296 u32 invalidate_domains,
@@ -304,12 +322,9 @@ struct intel_engine_cs {
304 * inspecting request list. 322 * inspecting request list.
305 */ 323 */
306 u32 last_submitted_seqno; 324 u32 last_submitted_seqno;
307 unsigned user_interrupts;
308 325
309 bool gpu_caches_dirty; 326 bool gpu_caches_dirty;
310 327
311 wait_queue_head_t irq_queue;
312
313 struct i915_gem_context *last_context; 328 struct i915_gem_context *last_context;
314 329
315 struct intel_ring_hangcheck hangcheck; 330 struct intel_ring_hangcheck hangcheck;
@@ -317,7 +332,6 @@ struct intel_engine_cs {
317 struct { 332 struct {
318 struct drm_i915_gem_object *obj; 333 struct drm_i915_gem_object *obj;
319 u32 gtt_offset; 334 u32 gtt_offset;
320 volatile u32 *cpu_page;
321 } scratch; 335 } scratch;
322 336
323 bool needs_cmd_parser; 337 bool needs_cmd_parser;
@@ -348,13 +362,13 @@ struct intel_engine_cs {
348}; 362};
349 363
350static inline bool 364static inline bool
351intel_engine_initialized(struct intel_engine_cs *engine) 365intel_engine_initialized(const struct intel_engine_cs *engine)
352{ 366{
353 return engine->i915 != NULL; 367 return engine->i915 != NULL;
354} 368}
355 369
356static inline unsigned 370static inline unsigned
357intel_engine_flag(struct intel_engine_cs *engine) 371intel_engine_flag(const struct intel_engine_cs *engine)
358{ 372{
359 return 1 << engine->id; 373 return 1 << engine->id;
360} 374}
@@ -456,15 +470,14 @@ static inline void intel_ring_advance(struct intel_engine_cs *engine)
456} 470}
457int __intel_ring_space(int head, int tail, int size); 471int __intel_ring_space(int head, int tail, int size);
458void intel_ring_update_space(struct intel_ringbuffer *ringbuf); 472void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
459bool intel_engine_stopped(struct intel_engine_cs *engine);
460 473
461int __must_check intel_engine_idle(struct intel_engine_cs *engine); 474int __must_check intel_engine_idle(struct intel_engine_cs *engine);
462void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno); 475void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno);
463int intel_ring_flush_all_caches(struct drm_i915_gem_request *req); 476int intel_ring_flush_all_caches(struct drm_i915_gem_request *req);
464int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req); 477int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req);
465 478
479int intel_init_pipe_control(struct intel_engine_cs *engine, int size);
466void intel_fini_pipe_control(struct intel_engine_cs *engine); 480void intel_fini_pipe_control(struct intel_engine_cs *engine);
467int intel_init_pipe_control(struct intel_engine_cs *engine);
468 481
469int intel_init_render_ring_buffer(struct drm_device *dev); 482int intel_init_render_ring_buffer(struct drm_device *dev);
470int intel_init_bsd_ring_buffer(struct drm_device *dev); 483int intel_init_bsd_ring_buffer(struct drm_device *dev);
@@ -473,6 +486,10 @@ int intel_init_blt_ring_buffer(struct drm_device *dev);
473int intel_init_vebox_ring_buffer(struct drm_device *dev); 486int intel_init_vebox_ring_buffer(struct drm_device *dev);
474 487
475u64 intel_ring_get_active_head(struct intel_engine_cs *engine); 488u64 intel_ring_get_active_head(struct intel_engine_cs *engine);
489static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
490{
491 return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
492}
476 493
477int init_workarounds_ring(struct intel_engine_cs *engine); 494int init_workarounds_ring(struct intel_engine_cs *engine);
478 495
@@ -495,4 +512,62 @@ static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
495 return engine->status_page.gfx_addr + I915_GEM_HWS_INDEX_ADDR; 512 return engine->status_page.gfx_addr + I915_GEM_HWS_INDEX_ADDR;
496} 513}
497 514
515/* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */
516struct intel_wait {
517 struct rb_node node;
518 struct task_struct *tsk;
519 u32 seqno;
520};
521
522struct intel_signal_node {
523 struct rb_node node;
524 struct intel_wait wait;
525};
526
527int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
528
529static inline void intel_wait_init(struct intel_wait *wait, u32 seqno)
530{
531 wait->tsk = current;
532 wait->seqno = seqno;
533}
534
535static inline bool intel_wait_complete(const struct intel_wait *wait)
536{
537 return RB_EMPTY_NODE(&wait->node);
538}
539
540bool intel_engine_add_wait(struct intel_engine_cs *engine,
541 struct intel_wait *wait);
542void intel_engine_remove_wait(struct intel_engine_cs *engine,
543 struct intel_wait *wait);
544void intel_engine_enable_signaling(struct drm_i915_gem_request *request);
545
546static inline bool intel_engine_has_waiter(struct intel_engine_cs *engine)
547{
548 return READ_ONCE(engine->breadcrumbs.irq_seqno_bh);
549}
550
551static inline bool intel_engine_wakeup(struct intel_engine_cs *engine)
552{
553 bool wakeup = false;
554 struct task_struct *tsk = READ_ONCE(engine->breadcrumbs.irq_seqno_bh);
555 /* Note that for this not to dangerously chase a dangling pointer,
556 * the caller is responsible for ensure that the task remain valid for
557 * wake_up_process() i.e. that the RCU grace period cannot expire.
558 *
559 * Also note that tsk is likely to be in !TASK_RUNNING state so an
560 * early test for tsk->state != TASK_RUNNING before wake_up_process()
561 * is unlikely to be beneficial.
562 */
563 if (tsk)
564 wakeup = wake_up_process(tsk);
565 return wakeup;
566}
567
568void intel_engine_enable_fake_irq(struct intel_engine_cs *engine);
569void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
570unsigned int intel_kick_waiters(struct drm_i915_private *i915);
571unsigned int intel_kick_signalers(struct drm_i915_private *i915);
572
498#endif /* _INTEL_RINGBUFFER_H_ */ 573#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index e856d49d6dc3..6b78295f53db 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -287,7 +287,7 @@ void intel_display_set_init_power(struct drm_i915_private *dev_priv,
287 */ 287 */
288static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv) 288static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
289{ 289{
290 struct drm_device *dev = dev_priv->dev; 290 struct drm_device *dev = &dev_priv->drm;
291 291
292 /* 292 /*
293 * After we re-enable the power well, if we touch VGA register 0x3d5 293 * After we re-enable the power well, if we touch VGA register 0x3d5
@@ -318,7 +318,7 @@ static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv)
318static void skl_power_well_post_enable(struct drm_i915_private *dev_priv, 318static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
319 struct i915_power_well *power_well) 319 struct i915_power_well *power_well)
320{ 320{
321 struct drm_device *dev = dev_priv->dev; 321 struct drm_device *dev = &dev_priv->drm;
322 322
323 /* 323 /*
324 * After we re-enable the power well, if we touch VGA register 0x3d5 324 * After we re-enable the power well, if we touch VGA register 0x3d5
@@ -365,8 +365,11 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
365 365
366 if (!is_enabled) { 366 if (!is_enabled) {
367 DRM_DEBUG_KMS("Enabling power well\n"); 367 DRM_DEBUG_KMS("Enabling power well\n");
368 if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) & 368 if (intel_wait_for_register(dev_priv,
369 HSW_PWR_WELL_STATE_ENABLED), 20)) 369 HSW_PWR_WELL_DRIVER,
370 HSW_PWR_WELL_STATE_ENABLED,
371 HSW_PWR_WELL_STATE_ENABLED,
372 20))
370 DRM_ERROR("Timeout enabling power well\n"); 373 DRM_ERROR("Timeout enabling power well\n");
371 hsw_power_well_post_enable(dev_priv); 374 hsw_power_well_post_enable(dev_priv);
372 } 375 }
@@ -578,6 +581,7 @@ void bxt_enable_dc9(struct drm_i915_private *dev_priv)
578 581
579 DRM_DEBUG_KMS("Enabling DC9\n"); 582 DRM_DEBUG_KMS("Enabling DC9\n");
580 583
584 intel_power_sequencer_reset(dev_priv);
581 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9); 585 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
582} 586}
583 587
@@ -699,8 +703,11 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
699 703
700 switch (power_well->data) { 704 switch (power_well->data) {
701 case SKL_DISP_PW_1: 705 case SKL_DISP_PW_1:
702 if (wait_for((I915_READ(SKL_FUSE_STATUS) & 706 if (intel_wait_for_register(dev_priv,
703 SKL_FUSE_PG0_DIST_STATUS), 1)) { 707 SKL_FUSE_STATUS,
708 SKL_FUSE_PG0_DIST_STATUS,
709 SKL_FUSE_PG0_DIST_STATUS,
710 1)) {
704 DRM_ERROR("PG0 not enabled\n"); 711 DRM_ERROR("PG0 not enabled\n");
705 return; 712 return;
706 } 713 }
@@ -761,12 +768,18 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
761 768
762 if (check_fuse_status) { 769 if (check_fuse_status) {
763 if (power_well->data == SKL_DISP_PW_1) { 770 if (power_well->data == SKL_DISP_PW_1) {
764 if (wait_for((I915_READ(SKL_FUSE_STATUS) & 771 if (intel_wait_for_register(dev_priv,
765 SKL_FUSE_PG1_DIST_STATUS), 1)) 772 SKL_FUSE_STATUS,
773 SKL_FUSE_PG1_DIST_STATUS,
774 SKL_FUSE_PG1_DIST_STATUS,
775 1))
766 DRM_ERROR("PG1 distributing status timeout\n"); 776 DRM_ERROR("PG1 distributing status timeout\n");
767 } else if (power_well->data == SKL_DISP_PW_2) { 777 } else if (power_well->data == SKL_DISP_PW_2) {
768 if (wait_for((I915_READ(SKL_FUSE_STATUS) & 778 if (intel_wait_for_register(dev_priv,
769 SKL_FUSE_PG2_DIST_STATUS), 1)) 779 SKL_FUSE_STATUS,
780 SKL_FUSE_PG2_DIST_STATUS,
781 SKL_FUSE_PG2_DIST_STATUS,
782 1))
770 DRM_ERROR("PG2 distributing status timeout\n"); 783 DRM_ERROR("PG2 distributing status timeout\n");
771 } 784 }
772 } 785 }
@@ -917,7 +930,7 @@ static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
917 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 930 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
918 931
919 WARN_ON(dev_priv->cdclk_freq != 932 WARN_ON(dev_priv->cdclk_freq !=
920 dev_priv->display.get_display_clock_speed(dev_priv->dev)); 933 dev_priv->display.get_display_clock_speed(&dev_priv->drm));
921 934
922 gen9_assert_dbuf_enabled(dev_priv); 935 gen9_assert_dbuf_enabled(dev_priv);
923 936
@@ -1075,7 +1088,7 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
1075 * 1088 *
1076 * CHV DPLL B/C have some issues if VGA mode is enabled. 1089 * CHV DPLL B/C have some issues if VGA mode is enabled.
1077 */ 1090 */
1078 for_each_pipe(dev_priv->dev, pipe) { 1091 for_each_pipe(&dev_priv->drm, pipe) {
1079 u32 val = I915_READ(DPLL(pipe)); 1092 u32 val = I915_READ(DPLL(pipe));
1080 1093
1081 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1094 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
@@ -1100,7 +1113,7 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
1100 1113
1101 intel_hpd_init(dev_priv); 1114 intel_hpd_init(dev_priv);
1102 1115
1103 i915_redisable_vga_power_on(dev_priv->dev); 1116 i915_redisable_vga_power_on(&dev_priv->drm);
1104} 1117}
1105 1118
1106static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv) 1119static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
@@ -1110,9 +1123,9 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
1110 spin_unlock_irq(&dev_priv->irq_lock); 1123 spin_unlock_irq(&dev_priv->irq_lock);
1111 1124
1112 /* make sure we're done processing display irqs */ 1125 /* make sure we're done processing display irqs */
1113 synchronize_irq(dev_priv->dev->irq); 1126 synchronize_irq(dev_priv->drm.irq);
1114 1127
1115 vlv_power_sequencer_reset(dev_priv); 1128 intel_power_sequencer_reset(dev_priv);
1116} 1129}
1117 1130
1118static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv, 1131static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
@@ -1205,7 +1218,6 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
1205 u32 phy_control = dev_priv->chv_phy_control; 1218 u32 phy_control = dev_priv->chv_phy_control;
1206 u32 phy_status = 0; 1219 u32 phy_status = 0;
1207 u32 phy_status_mask = 0xffffffff; 1220 u32 phy_status_mask = 0xffffffff;
1208 u32 tmp;
1209 1221
1210 /* 1222 /*
1211 * The BIOS can leave the PHY is some weird state 1223 * The BIOS can leave the PHY is some weird state
@@ -1293,10 +1305,14 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
1293 * The PHY may be busy with some initial calibration and whatnot, 1305 * The PHY may be busy with some initial calibration and whatnot,
1294 * so the power state can take a while to actually change. 1306 * so the power state can take a while to actually change.
1295 */ 1307 */
1296 if (wait_for((tmp = I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask) == phy_status, 10)) 1308 if (intel_wait_for_register(dev_priv,
1297 WARN(phy_status != tmp, 1309 DISPLAY_PHY_STATUS,
1298 "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n", 1310 phy_status_mask,
1299 tmp, phy_status, dev_priv->chv_phy_control); 1311 phy_status,
1312 10))
1313 DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1314 I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask,
1315 phy_status, dev_priv->chv_phy_control);
1300} 1316}
1301 1317
1302#undef BITS_SET 1318#undef BITS_SET
@@ -1324,7 +1340,11 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1324 vlv_set_power_well(dev_priv, power_well, true); 1340 vlv_set_power_well(dev_priv, power_well, true);
1325 1341
1326 /* Poll for phypwrgood signal */ 1342 /* Poll for phypwrgood signal */
1327 if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1)) 1343 if (intel_wait_for_register(dev_priv,
1344 DISPLAY_PHY_STATUS,
1345 PHY_POWERGOOD(phy),
1346 PHY_POWERGOOD(phy),
1347 1))
1328 DRM_ERROR("Display PHY %d is not power up\n", phy); 1348 DRM_ERROR("Display PHY %d is not power up\n", phy);
1329 1349
1330 mutex_lock(&dev_priv->sb_lock); 1350 mutex_lock(&dev_priv->sb_lock);
@@ -2255,7 +2275,7 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
2255 */ 2275 */
2256void intel_power_domains_fini(struct drm_i915_private *dev_priv) 2276void intel_power_domains_fini(struct drm_i915_private *dev_priv)
2257{ 2277{
2258 struct device *device = &dev_priv->dev->pdev->dev; 2278 struct device *device = &dev_priv->drm.pdev->dev;
2259 2279
2260 /* 2280 /*
2261 * The i915.ko module is still not prepared to be loaded when 2281 * The i915.ko module is still not prepared to be loaded when
@@ -2556,7 +2576,7 @@ static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
2556 */ 2576 */
2557void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume) 2577void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
2558{ 2578{
2559 struct drm_device *dev = dev_priv->dev; 2579 struct drm_device *dev = &dev_priv->drm;
2560 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2580 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2561 2581
2562 power_domains->initializing = true; 2582 power_domains->initializing = true;
@@ -2618,7 +2638,7 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
2618 */ 2638 */
2619void intel_runtime_pm_get(struct drm_i915_private *dev_priv) 2639void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
2620{ 2640{
2621 struct drm_device *dev = dev_priv->dev; 2641 struct drm_device *dev = &dev_priv->drm;
2622 struct device *device = &dev->pdev->dev; 2642 struct device *device = &dev->pdev->dev;
2623 2643
2624 pm_runtime_get_sync(device); 2644 pm_runtime_get_sync(device);
@@ -2639,7 +2659,7 @@ void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
2639 */ 2659 */
2640bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv) 2660bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
2641{ 2661{
2642 struct drm_device *dev = dev_priv->dev; 2662 struct drm_device *dev = &dev_priv->drm;
2643 struct device *device = &dev->pdev->dev; 2663 struct device *device = &dev->pdev->dev;
2644 2664
2645 if (IS_ENABLED(CONFIG_PM)) { 2665 if (IS_ENABLED(CONFIG_PM)) {
@@ -2681,7 +2701,7 @@ bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
2681 */ 2701 */
2682void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv) 2702void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
2683{ 2703{
2684 struct drm_device *dev = dev_priv->dev; 2704 struct drm_device *dev = &dev_priv->drm;
2685 struct device *device = &dev->pdev->dev; 2705 struct device *device = &dev->pdev->dev;
2686 2706
2687 assert_rpm_wakelock_held(dev_priv); 2707 assert_rpm_wakelock_held(dev_priv);
@@ -2700,7 +2720,7 @@ void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
2700 */ 2720 */
2701void intel_runtime_pm_put(struct drm_i915_private *dev_priv) 2721void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
2702{ 2722{
2703 struct drm_device *dev = dev_priv->dev; 2723 struct drm_device *dev = &dev_priv->drm;
2704 struct device *device = &dev->pdev->dev; 2724 struct device *device = &dev->pdev->dev;
2705 2725
2706 assert_rpm_wakelock_held(dev_priv); 2726 assert_rpm_wakelock_held(dev_priv);
@@ -2723,7 +2743,7 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
2723 */ 2743 */
2724void intel_runtime_pm_enable(struct drm_i915_private *dev_priv) 2744void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
2725{ 2745{
2726 struct drm_device *dev = dev_priv->dev; 2746 struct drm_device *dev = &dev_priv->drm;
2727 struct device *device = &dev->pdev->dev; 2747 struct device *device = &dev->pdev->dev;
2728 2748
2729 pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */ 2749 pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 02b4a6695528..e378f35365a2 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -240,7 +240,7 @@ intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
240static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val) 240static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
241{ 241{
242 struct drm_device *dev = intel_sdvo->base.base.dev; 242 struct drm_device *dev = intel_sdvo->base.base.dev;
243 struct drm_i915_private *dev_priv = dev->dev_private; 243 struct drm_i915_private *dev_priv = to_i915(dev);
244 u32 bval = val, cval = val; 244 u32 bval = val, cval = val;
245 int i; 245 int i;
246 246
@@ -1195,7 +1195,7 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
1195static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder) 1195static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
1196{ 1196{
1197 struct drm_device *dev = intel_encoder->base.dev; 1197 struct drm_device *dev = intel_encoder->base.dev;
1198 struct drm_i915_private *dev_priv = dev->dev_private; 1198 struct drm_i915_private *dev_priv = to_i915(dev);
1199 struct intel_crtc *crtc = to_intel_crtc(intel_encoder->base.crtc); 1199 struct intel_crtc *crtc = to_intel_crtc(intel_encoder->base.crtc);
1200 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; 1200 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1201 struct drm_display_mode *mode = &crtc->config->base.mode; 1201 struct drm_display_mode *mode = &crtc->config->base.mode;
@@ -1330,7 +1330,7 @@ static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder,
1330 enum pipe *pipe) 1330 enum pipe *pipe)
1331{ 1331{
1332 struct drm_device *dev = encoder->base.dev; 1332 struct drm_device *dev = encoder->base.dev;
1333 struct drm_i915_private *dev_priv = dev->dev_private; 1333 struct drm_i915_private *dev_priv = to_i915(dev);
1334 struct intel_sdvo *intel_sdvo = to_sdvo(encoder); 1334 struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
1335 u16 active_outputs = 0; 1335 u16 active_outputs = 0;
1336 u32 tmp; 1336 u32 tmp;
@@ -1353,7 +1353,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
1353 struct intel_crtc_state *pipe_config) 1353 struct intel_crtc_state *pipe_config)
1354{ 1354{
1355 struct drm_device *dev = encoder->base.dev; 1355 struct drm_device *dev = encoder->base.dev;
1356 struct drm_i915_private *dev_priv = dev->dev_private; 1356 struct drm_i915_private *dev_priv = to_i915(dev);
1357 struct intel_sdvo *intel_sdvo = to_sdvo(encoder); 1357 struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
1358 struct intel_sdvo_dtd dtd; 1358 struct intel_sdvo_dtd dtd;
1359 int encoder_pixel_multiplier = 0; 1359 int encoder_pixel_multiplier = 0;
@@ -1436,7 +1436,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
1436 1436
1437static void intel_disable_sdvo(struct intel_encoder *encoder) 1437static void intel_disable_sdvo(struct intel_encoder *encoder)
1438{ 1438{
1439 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 1439 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1440 struct intel_sdvo *intel_sdvo = to_sdvo(encoder); 1440 struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
1441 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 1441 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1442 u32 temp; 1442 u32 temp;
@@ -1471,7 +1471,7 @@ static void intel_disable_sdvo(struct intel_encoder *encoder)
1471 temp &= ~SDVO_ENABLE; 1471 temp &= ~SDVO_ENABLE;
1472 intel_sdvo_write_sdvox(intel_sdvo, temp); 1472 intel_sdvo_write_sdvox(intel_sdvo, temp);
1473 1473
1474 intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A); 1474 intel_wait_for_vblank_if_active(&dev_priv->drm, PIPE_A);
1475 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true); 1475 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
1476 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true); 1476 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
1477 } 1477 }
@@ -1489,7 +1489,7 @@ static void pch_post_disable_sdvo(struct intel_encoder *encoder)
1489static void intel_enable_sdvo(struct intel_encoder *encoder) 1489static void intel_enable_sdvo(struct intel_encoder *encoder)
1490{ 1490{
1491 struct drm_device *dev = encoder->base.dev; 1491 struct drm_device *dev = encoder->base.dev;
1492 struct drm_i915_private *dev_priv = dev->dev_private; 1492 struct drm_i915_private *dev_priv = to_i915(dev);
1493 struct intel_sdvo *intel_sdvo = to_sdvo(encoder); 1493 struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
1494 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); 1494 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
1495 u32 temp; 1495 u32 temp;
@@ -1633,7 +1633,7 @@ intel_sdvo_get_edid(struct drm_connector *connector)
1633static struct edid * 1633static struct edid *
1634intel_sdvo_get_analog_edid(struct drm_connector *connector) 1634intel_sdvo_get_analog_edid(struct drm_connector *connector)
1635{ 1635{
1636 struct drm_i915_private *dev_priv = connector->dev->dev_private; 1636 struct drm_i915_private *dev_priv = to_i915(connector->dev);
1637 1637
1638 return drm_get_edid(connector, 1638 return drm_get_edid(connector,
1639 intel_gmbus_get_adapter(dev_priv, 1639 intel_gmbus_get_adapter(dev_priv,
@@ -1916,7 +1916,7 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
1916static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) 1916static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
1917{ 1917{
1918 struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); 1918 struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
1919 struct drm_i915_private *dev_priv = connector->dev->dev_private; 1919 struct drm_i915_private *dev_priv = to_i915(connector->dev);
1920 struct drm_display_mode *newmode; 1920 struct drm_display_mode *newmode;
1921 1921
1922 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 1922 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
@@ -2001,7 +2001,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
2001{ 2001{
2002 struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); 2002 struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
2003 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); 2003 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
2004 struct drm_i915_private *dev_priv = connector->dev->dev_private; 2004 struct drm_i915_private *dev_priv = to_i915(connector->dev);
2005 uint16_t temp_value; 2005 uint16_t temp_value;
2006 uint8_t cmd; 2006 uint8_t cmd;
2007 int ret; 2007 int ret;
@@ -2177,6 +2177,21 @@ done:
2177#undef CHECK_PROPERTY 2177#undef CHECK_PROPERTY
2178} 2178}
2179 2179
2180static int
2181intel_sdvo_connector_register(struct drm_connector *connector)
2182{
2183 struct intel_sdvo *sdvo = intel_attached_sdvo(connector);
2184 int ret;
2185
2186 ret = intel_connector_register(connector);
2187 if (ret)
2188 return ret;
2189
2190 return sysfs_create_link(&connector->kdev->kobj,
2191 &sdvo->ddc.dev.kobj,
2192 sdvo->ddc.dev.kobj.name);
2193}
2194
2180static void 2195static void
2181intel_sdvo_connector_unregister(struct drm_connector *connector) 2196intel_sdvo_connector_unregister(struct drm_connector *connector)
2182{ 2197{
@@ -2193,6 +2208,7 @@ static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
2193 .fill_modes = drm_helper_probe_single_connector_modes, 2208 .fill_modes = drm_helper_probe_single_connector_modes,
2194 .set_property = intel_sdvo_set_property, 2209 .set_property = intel_sdvo_set_property,
2195 .atomic_get_property = intel_connector_atomic_get_property, 2210 .atomic_get_property = intel_connector_atomic_get_property,
2211 .late_register = intel_sdvo_connector_register,
2196 .early_unregister = intel_sdvo_connector_unregister, 2212 .early_unregister = intel_sdvo_connector_unregister,
2197 .destroy = intel_sdvo_destroy, 2213 .destroy = intel_sdvo_destroy,
2198 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 2214 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
@@ -2322,7 +2338,7 @@ intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device)
2322static u8 2338static u8
2323intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo) 2339intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo)
2324{ 2340{
2325 struct drm_i915_private *dev_priv = dev->dev_private; 2341 struct drm_i915_private *dev_priv = to_i915(dev);
2326 struct sdvo_device_mapping *my_mapping, *other_mapping; 2342 struct sdvo_device_mapping *my_mapping, *other_mapping;
2327 2343
2328 if (sdvo->port == PORT_B) { 2344 if (sdvo->port == PORT_B) {
@@ -2380,24 +2396,8 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
2380 connector->base.get_hw_state = intel_sdvo_connector_get_hw_state; 2396 connector->base.get_hw_state = intel_sdvo_connector_get_hw_state;
2381 2397
2382 intel_connector_attach_encoder(&connector->base, &encoder->base); 2398 intel_connector_attach_encoder(&connector->base, &encoder->base);
2383 ret = drm_connector_register(drm_connector);
2384 if (ret < 0)
2385 goto err1;
2386
2387 ret = sysfs_create_link(&drm_connector->kdev->kobj,
2388 &encoder->ddc.dev.kobj,
2389 encoder->ddc.dev.kobj.name);
2390 if (ret < 0)
2391 goto err2;
2392 2399
2393 return 0; 2400 return 0;
2394
2395err2:
2396 drm_connector_unregister(drm_connector);
2397err1:
2398 drm_connector_cleanup(drm_connector);
2399
2400 return ret;
2401} 2401}
2402 2402
2403static void 2403static void
@@ -2524,7 +2524,6 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
2524 return true; 2524 return true;
2525 2525
2526err: 2526err:
2527 drm_connector_unregister(connector);
2528 intel_sdvo_destroy(connector); 2527 intel_sdvo_destroy(connector);
2529 return false; 2528 return false;
2530} 2529}
@@ -2603,7 +2602,6 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
2603 return true; 2602 return true;
2604 2603
2605err: 2604err:
2606 drm_connector_unregister(connector);
2607 intel_sdvo_destroy(connector); 2605 intel_sdvo_destroy(connector);
2608 return false; 2606 return false;
2609} 2607}
@@ -2954,7 +2952,7 @@ static void assert_sdvo_port_valid(const struct drm_i915_private *dev_priv,
2954bool intel_sdvo_init(struct drm_device *dev, 2952bool intel_sdvo_init(struct drm_device *dev,
2955 i915_reg_t sdvo_reg, enum port port) 2953 i915_reg_t sdvo_reg, enum port port)
2956{ 2954{
2957 struct drm_i915_private *dev_priv = dev->dev_private; 2955 struct drm_i915_private *dev_priv = to_i915(dev);
2958 struct intel_encoder *intel_encoder; 2956 struct intel_encoder *intel_encoder;
2959 struct intel_sdvo *intel_sdvo; 2957 struct intel_sdvo *intel_sdvo;
2960 int i; 2958 int i;
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
index c3998188cf35..1a840bf92eea 100644
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -51,7 +51,9 @@ static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
51 51
52 WARN_ON(!mutex_is_locked(&dev_priv->sb_lock)); 52 WARN_ON(!mutex_is_locked(&dev_priv->sb_lock));
53 53
54 if (wait_for((I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) == 0, 5)) { 54 if (intel_wait_for_register(dev_priv,
55 VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0,
56 5)) {
55 DRM_DEBUG_DRIVER("IOSF sideband idle wait (%s) timed out\n", 57 DRM_DEBUG_DRIVER("IOSF sideband idle wait (%s) timed out\n",
56 is_read ? "read" : "write"); 58 is_read ? "read" : "write");
57 return -EAGAIN; 59 return -EAGAIN;
@@ -62,7 +64,9 @@ static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
62 I915_WRITE(VLV_IOSF_DATA, *val); 64 I915_WRITE(VLV_IOSF_DATA, *val);
63 I915_WRITE(VLV_IOSF_DOORBELL_REQ, cmd); 65 I915_WRITE(VLV_IOSF_DOORBELL_REQ, cmd);
64 66
65 if (wait_for((I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) == 0, 5)) { 67 if (intel_wait_for_register(dev_priv,
68 VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0,
69 5)) {
66 DRM_DEBUG_DRIVER("IOSF sideband finish wait (%s) timed out\n", 70 DRM_DEBUG_DRIVER("IOSF sideband finish wait (%s) timed out\n",
67 is_read ? "read" : "write"); 71 is_read ? "read" : "write");
68 return -ETIMEDOUT; 72 return -ETIMEDOUT;
@@ -202,8 +206,9 @@ u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
202 u32 value = 0; 206 u32 value = 0;
203 WARN_ON(!mutex_is_locked(&dev_priv->sb_lock)); 207 WARN_ON(!mutex_is_locked(&dev_priv->sb_lock));
204 208
205 if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 209 if (intel_wait_for_register(dev_priv,
206 100)) { 210 SBI_CTL_STAT, SBI_BUSY, 0,
211 100)) {
207 DRM_ERROR("timeout waiting for SBI to become ready\n"); 212 DRM_ERROR("timeout waiting for SBI to become ready\n");
208 return 0; 213 return 0;
209 } 214 }
@@ -216,8 +221,11 @@ u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
216 value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD; 221 value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
217 I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY); 222 I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY);
218 223
219 if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0, 224 if (intel_wait_for_register(dev_priv,
220 100)) { 225 SBI_CTL_STAT,
226 SBI_BUSY | SBI_RESPONSE_FAIL,
227 0,
228 100)) {
221 DRM_ERROR("timeout waiting for SBI to complete read transaction\n"); 229 DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
222 return 0; 230 return 0;
223 } 231 }
@@ -232,8 +240,9 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
232 240
233 WARN_ON(!mutex_is_locked(&dev_priv->sb_lock)); 241 WARN_ON(!mutex_is_locked(&dev_priv->sb_lock));
234 242
235 if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 243 if (intel_wait_for_register(dev_priv,
236 100)) { 244 SBI_CTL_STAT, SBI_BUSY, 0,
245 100)) {
237 DRM_ERROR("timeout waiting for SBI to become ready\n"); 246 DRM_ERROR("timeout waiting for SBI to become ready\n");
238 return; 247 return;
239 } 248 }
@@ -247,8 +256,11 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
247 tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR; 256 tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR;
248 I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp); 257 I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp);
249 258
250 if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0, 259 if (intel_wait_for_register(dev_priv,
251 100)) { 260 SBI_CTL_STAT,
261 SBI_BUSY | SBI_RESPONSE_FAIL,
262 0,
263 100)) {
252 DRM_ERROR("timeout waiting for SBI to complete write transaction\n"); 264 DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
253 return; 265 return;
254 } 266 }
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index fc654173c491..0de935ad01c2 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -199,7 +199,7 @@ skl_update_plane(struct drm_plane *drm_plane,
199 const struct intel_plane_state *plane_state) 199 const struct intel_plane_state *plane_state)
200{ 200{
201 struct drm_device *dev = drm_plane->dev; 201 struct drm_device *dev = drm_plane->dev;
202 struct drm_i915_private *dev_priv = dev->dev_private; 202 struct drm_i915_private *dev_priv = to_i915(dev);
203 struct intel_plane *intel_plane = to_intel_plane(drm_plane); 203 struct intel_plane *intel_plane = to_intel_plane(drm_plane);
204 struct drm_framebuffer *fb = plane_state->base.fb; 204 struct drm_framebuffer *fb = plane_state->base.fb;
205 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 205 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
@@ -303,7 +303,7 @@ static void
303skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc) 303skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
304{ 304{
305 struct drm_device *dev = dplane->dev; 305 struct drm_device *dev = dplane->dev;
306 struct drm_i915_private *dev_priv = dev->dev_private; 306 struct drm_i915_private *dev_priv = to_i915(dev);
307 struct intel_plane *intel_plane = to_intel_plane(dplane); 307 struct intel_plane *intel_plane = to_intel_plane(dplane);
308 const int pipe = intel_plane->pipe; 308 const int pipe = intel_plane->pipe;
309 const int plane = intel_plane->plane + 1; 309 const int plane = intel_plane->plane + 1;
@@ -317,7 +317,7 @@ skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
317static void 317static void
318chv_update_csc(struct intel_plane *intel_plane, uint32_t format) 318chv_update_csc(struct intel_plane *intel_plane, uint32_t format)
319{ 319{
320 struct drm_i915_private *dev_priv = intel_plane->base.dev->dev_private; 320 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
321 int plane = intel_plane->plane; 321 int plane = intel_plane->plane;
322 322
323 /* Seems RGB data bypasses the CSC always */ 323 /* Seems RGB data bypasses the CSC always */
@@ -359,7 +359,7 @@ vlv_update_plane(struct drm_plane *dplane,
359 const struct intel_plane_state *plane_state) 359 const struct intel_plane_state *plane_state)
360{ 360{
361 struct drm_device *dev = dplane->dev; 361 struct drm_device *dev = dplane->dev;
362 struct drm_i915_private *dev_priv = dev->dev_private; 362 struct drm_i915_private *dev_priv = to_i915(dev);
363 struct intel_plane *intel_plane = to_intel_plane(dplane); 363 struct intel_plane *intel_plane = to_intel_plane(dplane);
364 struct drm_framebuffer *fb = plane_state->base.fb; 364 struct drm_framebuffer *fb = plane_state->base.fb;
365 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 365 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
@@ -485,7 +485,7 @@ static void
485vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc) 485vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
486{ 486{
487 struct drm_device *dev = dplane->dev; 487 struct drm_device *dev = dplane->dev;
488 struct drm_i915_private *dev_priv = dev->dev_private; 488 struct drm_i915_private *dev_priv = to_i915(dev);
489 struct intel_plane *intel_plane = to_intel_plane(dplane); 489 struct intel_plane *intel_plane = to_intel_plane(dplane);
490 int pipe = intel_plane->pipe; 490 int pipe = intel_plane->pipe;
491 int plane = intel_plane->plane; 491 int plane = intel_plane->plane;
@@ -502,7 +502,7 @@ ivb_update_plane(struct drm_plane *plane,
502 const struct intel_plane_state *plane_state) 502 const struct intel_plane_state *plane_state)
503{ 503{
504 struct drm_device *dev = plane->dev; 504 struct drm_device *dev = plane->dev;
505 struct drm_i915_private *dev_priv = dev->dev_private; 505 struct drm_i915_private *dev_priv = to_i915(dev);
506 struct intel_plane *intel_plane = to_intel_plane(plane); 506 struct intel_plane *intel_plane = to_intel_plane(plane);
507 struct drm_framebuffer *fb = plane_state->base.fb; 507 struct drm_framebuffer *fb = plane_state->base.fb;
508 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 508 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
@@ -624,7 +624,7 @@ static void
624ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc) 624ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
625{ 625{
626 struct drm_device *dev = plane->dev; 626 struct drm_device *dev = plane->dev;
627 struct drm_i915_private *dev_priv = dev->dev_private; 627 struct drm_i915_private *dev_priv = to_i915(dev);
628 struct intel_plane *intel_plane = to_intel_plane(plane); 628 struct intel_plane *intel_plane = to_intel_plane(plane);
629 int pipe = intel_plane->pipe; 629 int pipe = intel_plane->pipe;
630 630
@@ -643,7 +643,7 @@ ilk_update_plane(struct drm_plane *plane,
643 const struct intel_plane_state *plane_state) 643 const struct intel_plane_state *plane_state)
644{ 644{
645 struct drm_device *dev = plane->dev; 645 struct drm_device *dev = plane->dev;
646 struct drm_i915_private *dev_priv = dev->dev_private; 646 struct drm_i915_private *dev_priv = to_i915(dev);
647 struct intel_plane *intel_plane = to_intel_plane(plane); 647 struct intel_plane *intel_plane = to_intel_plane(plane);
648 struct drm_framebuffer *fb = plane_state->base.fb; 648 struct drm_framebuffer *fb = plane_state->base.fb;
649 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 649 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
@@ -753,7 +753,7 @@ static void
753ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc) 753ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
754{ 754{
755 struct drm_device *dev = plane->dev; 755 struct drm_device *dev = plane->dev;
756 struct drm_i915_private *dev_priv = dev->dev_private; 756 struct drm_i915_private *dev_priv = to_i915(dev);
757 struct intel_plane *intel_plane = to_intel_plane(plane); 757 struct intel_plane *intel_plane = to_intel_plane(plane);
758 int pipe = intel_plane->pipe; 758 int pipe = intel_plane->pipe;
759 759
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 4ce70a9f9df2..49136ad5473e 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -826,7 +826,7 @@ static bool
826intel_tv_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) 826intel_tv_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe)
827{ 827{
828 struct drm_device *dev = encoder->base.dev; 828 struct drm_device *dev = encoder->base.dev;
829 struct drm_i915_private *dev_priv = dev->dev_private; 829 struct drm_i915_private *dev_priv = to_i915(dev);
830 u32 tmp = I915_READ(TV_CTL); 830 u32 tmp = I915_READ(TV_CTL);
831 831
832 if (!(tmp & TV_ENC_ENABLE)) 832 if (!(tmp & TV_ENC_ENABLE))
@@ -841,7 +841,7 @@ static void
841intel_enable_tv(struct intel_encoder *encoder) 841intel_enable_tv(struct intel_encoder *encoder)
842{ 842{
843 struct drm_device *dev = encoder->base.dev; 843 struct drm_device *dev = encoder->base.dev;
844 struct drm_i915_private *dev_priv = dev->dev_private; 844 struct drm_i915_private *dev_priv = to_i915(dev);
845 845
846 /* Prevents vblank waits from timing out in intel_tv_detect_type() */ 846 /* Prevents vblank waits from timing out in intel_tv_detect_type() */
847 intel_wait_for_vblank(encoder->base.dev, 847 intel_wait_for_vblank(encoder->base.dev,
@@ -854,7 +854,7 @@ static void
854intel_disable_tv(struct intel_encoder *encoder) 854intel_disable_tv(struct intel_encoder *encoder)
855{ 855{
856 struct drm_device *dev = encoder->base.dev; 856 struct drm_device *dev = encoder->base.dev;
857 struct drm_i915_private *dev_priv = dev->dev_private; 857 struct drm_i915_private *dev_priv = to_i915(dev);
858 858
859 I915_WRITE(TV_CTL, I915_READ(TV_CTL) & ~TV_ENC_ENABLE); 859 I915_WRITE(TV_CTL, I915_READ(TV_CTL) & ~TV_ENC_ENABLE);
860} 860}
@@ -1013,7 +1013,7 @@ static void set_color_conversion(struct drm_i915_private *dev_priv,
1013static void intel_tv_pre_enable(struct intel_encoder *encoder) 1013static void intel_tv_pre_enable(struct intel_encoder *encoder)
1014{ 1014{
1015 struct drm_device *dev = encoder->base.dev; 1015 struct drm_device *dev = encoder->base.dev;
1016 struct drm_i915_private *dev_priv = dev->dev_private; 1016 struct drm_i915_private *dev_priv = to_i915(dev);
1017 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); 1017 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
1018 struct intel_tv *intel_tv = enc_to_tv(encoder); 1018 struct intel_tv *intel_tv = enc_to_tv(encoder);
1019 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); 1019 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
@@ -1173,7 +1173,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
1173 struct drm_crtc *crtc = connector->state->crtc; 1173 struct drm_crtc *crtc = connector->state->crtc;
1174 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1174 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1175 struct drm_device *dev = connector->dev; 1175 struct drm_device *dev = connector->dev;
1176 struct drm_i915_private *dev_priv = dev->dev_private; 1176 struct drm_i915_private *dev_priv = to_i915(dev);
1177 u32 tv_ctl, save_tv_ctl; 1177 u32 tv_ctl, save_tv_ctl;
1178 u32 tv_dac, save_tv_dac; 1178 u32 tv_dac, save_tv_dac;
1179 int type; 1179 int type;
@@ -1501,6 +1501,7 @@ out:
1501static const struct drm_connector_funcs intel_tv_connector_funcs = { 1501static const struct drm_connector_funcs intel_tv_connector_funcs = {
1502 .dpms = drm_atomic_helper_connector_dpms, 1502 .dpms = drm_atomic_helper_connector_dpms,
1503 .detect = intel_tv_detect, 1503 .detect = intel_tv_detect,
1504 .late_register = intel_connector_register,
1504 .early_unregister = intel_connector_unregister, 1505 .early_unregister = intel_connector_unregister,
1505 .destroy = intel_tv_destroy, 1506 .destroy = intel_tv_destroy,
1506 .set_property = intel_tv_set_property, 1507 .set_property = intel_tv_set_property,
@@ -1522,7 +1523,7 @@ static const struct drm_encoder_funcs intel_tv_enc_funcs = {
1522void 1523void
1523intel_tv_init(struct drm_device *dev) 1524intel_tv_init(struct drm_device *dev)
1524{ 1525{
1525 struct drm_i915_private *dev_priv = dev->dev_private; 1526 struct drm_i915_private *dev_priv = to_i915(dev);
1526 struct drm_connector *connector; 1527 struct drm_connector *connector;
1527 struct intel_tv *intel_tv; 1528 struct intel_tv *intel_tv;
1528 struct intel_encoder *intel_encoder; 1529 struct intel_encoder *intel_encoder;
@@ -1641,5 +1642,4 @@ intel_tv_init(struct drm_device *dev)
1641 drm_object_attach_property(&connector->base, 1642 drm_object_attach_property(&connector->base,
1642 dev->mode_config.tv_bottom_margin_property, 1643 dev->mode_config.tv_bottom_margin_property,
1643 intel_tv->margin[TV_MARGIN_BOTTOM]); 1644 intel_tv->margin[TV_MARGIN_BOTTOM]);
1644 drm_connector_register(connector);
1645} 1645}
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index c1ca458d688e..ff80a81b1a84 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1299,9 +1299,11 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
1299 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1299 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1300 FORCEWAKE_MT, FORCEWAKE_MT_ACK); 1300 FORCEWAKE_MT, FORCEWAKE_MT_ACK);
1301 1301
1302 spin_lock_irq(&dev_priv->uncore.lock);
1302 fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL); 1303 fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL);
1303 ecobus = __raw_i915_read32(dev_priv, ECOBUS); 1304 ecobus = __raw_i915_read32(dev_priv, ECOBUS);
1304 fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL); 1305 fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL);
1306 spin_unlock_irq(&dev_priv->uncore.lock);
1305 1307
1306 if (!(ecobus & FORCEWAKE_MT_ENABLE)) { 1308 if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
1307 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); 1309 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
@@ -1407,7 +1409,7 @@ static const struct register_whitelist {
1407int i915_reg_read_ioctl(struct drm_device *dev, 1409int i915_reg_read_ioctl(struct drm_device *dev,
1408 void *data, struct drm_file *file) 1410 void *data, struct drm_file *file)
1409{ 1411{
1410 struct drm_i915_private *dev_priv = dev->dev_private; 1412 struct drm_i915_private *dev_priv = to_i915(dev);
1411 struct drm_i915_reg_read *reg = data; 1413 struct drm_i915_reg_read *reg = data;
1412 struct register_whitelist const *entry = whitelist; 1414 struct register_whitelist const *entry = whitelist;
1413 unsigned size; 1415 unsigned size;
@@ -1469,7 +1471,7 @@ static int i915_reset_complete(struct pci_dev *pdev)
1469 1471
1470static int i915_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) 1472static int i915_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1471{ 1473{
1472 struct pci_dev *pdev = dev_priv->dev->pdev; 1474 struct pci_dev *pdev = dev_priv->drm.pdev;
1473 1475
1474 /* assert reset for at least 20 usec */ 1476 /* assert reset for at least 20 usec */
1475 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE); 1477 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
@@ -1488,14 +1490,14 @@ static int g4x_reset_complete(struct pci_dev *pdev)
1488 1490
1489static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) 1491static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1490{ 1492{
1491 struct pci_dev *pdev = dev_priv->dev->pdev; 1493 struct pci_dev *pdev = dev_priv->drm.pdev;
1492 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE); 1494 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1493 return wait_for(g4x_reset_complete(pdev), 500); 1495 return wait_for(g4x_reset_complete(pdev), 500);
1494} 1496}
1495 1497
1496static int g4x_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) 1498static int g4x_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1497{ 1499{
1498 struct pci_dev *pdev = dev_priv->dev->pdev; 1500 struct pci_dev *pdev = dev_priv->drm.pdev;
1499 int ret; 1501 int ret;
1500 1502
1501 pci_write_config_byte(pdev, I915_GDRST, 1503 pci_write_config_byte(pdev, I915_GDRST,
@@ -1530,15 +1532,17 @@ static int ironlake_do_reset(struct drm_i915_private *dev_priv,
1530 1532
1531 I915_WRITE(ILK_GDSR, 1533 I915_WRITE(ILK_GDSR,
1532 ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE); 1534 ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
1533 ret = wait_for((I915_READ(ILK_GDSR) & 1535 ret = intel_wait_for_register(dev_priv,
1534 ILK_GRDOM_RESET_ENABLE) == 0, 500); 1536 ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
1537 500);
1535 if (ret) 1538 if (ret)
1536 return ret; 1539 return ret;
1537 1540
1538 I915_WRITE(ILK_GDSR, 1541 I915_WRITE(ILK_GDSR,
1539 ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE); 1542 ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
1540 ret = wait_for((I915_READ(ILK_GDSR) & 1543 ret = intel_wait_for_register(dev_priv,
1541 ILK_GRDOM_RESET_ENABLE) == 0, 500); 1544 ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
1545 500);
1542 if (ret) 1546 if (ret)
1543 return ret; 1547 return ret;
1544 1548
@@ -1551,20 +1555,16 @@ static int ironlake_do_reset(struct drm_i915_private *dev_priv,
1551static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv, 1555static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
1552 u32 hw_domain_mask) 1556 u32 hw_domain_mask)
1553{ 1557{
1554 int ret;
1555
1556 /* GEN6_GDRST is not in the gt power well, no need to check 1558 /* GEN6_GDRST is not in the gt power well, no need to check
1557 * for fifo space for the write or forcewake the chip for 1559 * for fifo space for the write or forcewake the chip for
1558 * the read 1560 * the read
1559 */ 1561 */
1560 __raw_i915_write32(dev_priv, GEN6_GDRST, hw_domain_mask); 1562 __raw_i915_write32(dev_priv, GEN6_GDRST, hw_domain_mask);
1561 1563
1562#define ACKED ((__raw_i915_read32(dev_priv, GEN6_GDRST) & hw_domain_mask) == 0)
1563 /* Spin waiting for the device to ack the reset requests */ 1564 /* Spin waiting for the device to ack the reset requests */
1564 ret = wait_for(ACKED, 500); 1565 return intel_wait_for_register_fw(dev_priv,
1565#undef ACKED 1566 GEN6_GDRST, hw_domain_mask, 0,
1566 1567 500);
1567 return ret;
1568} 1568}
1569 1569
1570/** 1570/**
@@ -1609,13 +1609,74 @@ static int gen6_reset_engines(struct drm_i915_private *dev_priv,
1609 return ret; 1609 return ret;
1610} 1610}
1611 1611
1612static int wait_for_register_fw(struct drm_i915_private *dev_priv, 1612/**
1613 i915_reg_t reg, 1613 * intel_wait_for_register_fw - wait until register matches expected state
1614 const u32 mask, 1614 * @dev_priv: the i915 device
1615 const u32 value, 1615 * @reg: the register to read
1616 const unsigned long timeout_ms) 1616 * @mask: mask to apply to register value
1617 * @value: expected value
1618 * @timeout_ms: timeout in millisecond
1619 *
1620 * This routine waits until the target register @reg contains the expected
1621 * @value after applying the @mask, i.e. it waits until
1622 * (I915_READ_FW(@reg) & @mask) == @value
1623 * Otherwise, the wait will timeout after @timeout_ms milliseconds.
1624 *
1625 * Note that this routine assumes the caller holds forcewake asserted, it is
1626 * not suitable for very long waits. See intel_wait_for_register() if you
1627 * wish to wait without holding forcewake for the duration (i.e. you expect
1628 * the wait to be slow).
1629 *
1630 * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
1631 */
1632int intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
1633 i915_reg_t reg,
1634 const u32 mask,
1635 const u32 value,
1636 const unsigned long timeout_ms)
1637{
1638#define done ((I915_READ_FW(reg) & mask) == value)
1639 int ret = wait_for_us(done, 2);
1640 if (ret)
1641 ret = wait_for(done, timeout_ms);
1642 return ret;
1643#undef done
1644}
1645
1646/**
1647 * intel_wait_for_register - wait until register matches expected state
1648 * @dev_priv: the i915 device
1649 * @reg: the register to read
1650 * @mask: mask to apply to register value
1651 * @value: expected value
1652 * @timeout_ms: timeout in millisecond
1653 *
1654 * This routine waits until the target register @reg contains the expected
1655 * @value after applying the @mask, i.e. it waits until
1656 * (I915_READ(@reg) & @mask) == @value
1657 * Otherwise, the wait will timeout after @timeout_ms milliseconds.
1658 *
1659 * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
1660 */
1661int intel_wait_for_register(struct drm_i915_private *dev_priv,
1662 i915_reg_t reg,
1663 const u32 mask,
1664 const u32 value,
1665 const unsigned long timeout_ms)
1617{ 1666{
1618 return wait_for((I915_READ_FW(reg) & mask) == value, timeout_ms); 1667
1668 unsigned fw =
1669 intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ);
1670 int ret;
1671
1672 intel_uncore_forcewake_get(dev_priv, fw);
1673 ret = wait_for_us((I915_READ_FW(reg) & mask) == value, 2);
1674 intel_uncore_forcewake_put(dev_priv, fw);
1675 if (ret)
1676 ret = wait_for((I915_READ_NOTRACE(reg) & mask) == value,
1677 timeout_ms);
1678
1679 return ret;
1619} 1680}
1620 1681
1621static int gen8_request_engine_reset(struct intel_engine_cs *engine) 1682static int gen8_request_engine_reset(struct intel_engine_cs *engine)
@@ -1626,11 +1687,11 @@ static int gen8_request_engine_reset(struct intel_engine_cs *engine)
1626 I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base), 1687 I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
1627 _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET)); 1688 _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
1628 1689
1629 ret = wait_for_register_fw(dev_priv, 1690 ret = intel_wait_for_register_fw(dev_priv,
1630 RING_RESET_CTL(engine->mmio_base), 1691 RING_RESET_CTL(engine->mmio_base),
1631 RESET_CTL_READY_TO_RESET, 1692 RESET_CTL_READY_TO_RESET,
1632 RESET_CTL_READY_TO_RESET, 1693 RESET_CTL_READY_TO_RESET,
1633 700); 1694 700);
1634 if (ret) 1695 if (ret)
1635 DRM_ERROR("%s: reset request timeout\n", engine->name); 1696 DRM_ERROR("%s: reset request timeout\n", engine->name);
1636 1697
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index 9094599a1150..33466bfc6440 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -309,6 +309,7 @@
309 INTEL_VGA_DEVICE(0x5906, info), /* ULT GT1 */ \ 309 INTEL_VGA_DEVICE(0x5906, info), /* ULT GT1 */ \
310 INTEL_VGA_DEVICE(0x590E, info), /* ULX GT1 */ \ 310 INTEL_VGA_DEVICE(0x590E, info), /* ULX GT1 */ \
311 INTEL_VGA_DEVICE(0x5902, info), /* DT GT1 */ \ 311 INTEL_VGA_DEVICE(0x5902, info), /* DT GT1 */ \
312 INTEL_VGA_DEVICE(0x5908, info), /* Halo GT1 */ \
312 INTEL_VGA_DEVICE(0x590B, info), /* Halo GT1 */ \ 313 INTEL_VGA_DEVICE(0x590B, info), /* Halo GT1 */ \
313 INTEL_VGA_DEVICE(0x590A, info) /* SRV GT1 */ 314 INTEL_VGA_DEVICE(0x590A, info) /* SRV GT1 */
314 315
@@ -322,15 +323,12 @@
322 INTEL_VGA_DEVICE(0x591D, info) /* WKS GT2 */ 323 INTEL_VGA_DEVICE(0x591D, info) /* WKS GT2 */
323 324
324#define INTEL_KBL_GT3_IDS(info) \ 325#define INTEL_KBL_GT3_IDS(info) \
326 INTEL_VGA_DEVICE(0x5923, info), /* ULT GT3 */ \
325 INTEL_VGA_DEVICE(0x5926, info), /* ULT GT3 */ \ 327 INTEL_VGA_DEVICE(0x5926, info), /* ULT GT3 */ \
326 INTEL_VGA_DEVICE(0x592B, info), /* Halo GT3 */ \ 328 INTEL_VGA_DEVICE(0x5927, info) /* ULT GT3 */
327 INTEL_VGA_DEVICE(0x592A, info) /* SRV GT3 */
328 329
329#define INTEL_KBL_GT4_IDS(info) \ 330#define INTEL_KBL_GT4_IDS(info) \
330 INTEL_VGA_DEVICE(0x5932, info), /* DT GT4 */ \ 331 INTEL_VGA_DEVICE(0x593B, info) /* Halo GT4 */
331 INTEL_VGA_DEVICE(0x593B, info), /* Halo GT4 */ \
332 INTEL_VGA_DEVICE(0x593A, info), /* SRV GT4 */ \
333 INTEL_VGA_DEVICE(0x593D, info) /* WKS GT4 */
334 332
335#define INTEL_KBL_IDS(info) \ 333#define INTEL_KBL_IDS(info) \
336 INTEL_KBL_GT1_IDS(info), \ 334 INTEL_KBL_GT1_IDS(info), \
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index c17d63d8b543..d7e81a3886fd 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -361,6 +361,8 @@ typedef struct drm_i915_irq_wait {
361#define I915_PARAM_HAS_GPU_RESET 35 361#define I915_PARAM_HAS_GPU_RESET 35
362#define I915_PARAM_HAS_RESOURCE_STREAMER 36 362#define I915_PARAM_HAS_RESOURCE_STREAMER 36
363#define I915_PARAM_HAS_EXEC_SOFTPIN 37 363#define I915_PARAM_HAS_EXEC_SOFTPIN 37
364#define I915_PARAM_HAS_POOLED_EU 38
365#define I915_PARAM_MIN_EU_IN_POOL 39
364 366
365typedef struct drm_i915_getparam { 367typedef struct drm_i915_getparam {
366 __s32 param; 368 __s32 param;
@@ -1171,6 +1173,7 @@ struct drm_i915_gem_context_param {
1171#define I915_CONTEXT_PARAM_BAN_PERIOD 0x1 1173#define I915_CONTEXT_PARAM_BAN_PERIOD 0x1
1172#define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2 1174#define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2
1173#define I915_CONTEXT_PARAM_GTT_SIZE 0x3 1175#define I915_CONTEXT_PARAM_GTT_SIZE 0x3
1176#define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x4
1174 __u64 value; 1177 __u64 value;
1175}; 1178};
1176 1179