aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/acpi/video.c7
-rw-r--r--drivers/cpufreq/cpufreq.c20
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c5
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c41
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c18
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c70
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h46
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c191
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c39
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c13
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h6
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c8
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c142
-rw-r--r--drivers/gpu/drm/i915/intel_display.c699
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c135
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h9
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c8
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c15
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c6
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c6
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h2
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c46
-rw-r--r--include/acpi/video.h2
-rw-r--r--include/linux/cpufreq.h5
24 files changed, 1132 insertions, 407 deletions
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index db39e9e607d8..ada4b4d9bdc8 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -46,7 +46,6 @@
46 46
47#define PREFIX "ACPI: " 47#define PREFIX "ACPI: "
48 48
49#define ACPI_VIDEO_CLASS "video"
50#define ACPI_VIDEO_BUS_NAME "Video Bus" 49#define ACPI_VIDEO_BUS_NAME "Video Bus"
51#define ACPI_VIDEO_DEVICE_NAME "Video Device" 50#define ACPI_VIDEO_DEVICE_NAME "Video Device"
52#define ACPI_VIDEO_NOTIFY_SWITCH 0x80 51#define ACPI_VIDEO_NOTIFY_SWITCH 0x80
@@ -1445,7 +1444,8 @@ static void acpi_video_bus_notify(struct acpi_device *device, u32 event)
1445 case ACPI_VIDEO_NOTIFY_SWITCH: /* User requested a switch, 1444 case ACPI_VIDEO_NOTIFY_SWITCH: /* User requested a switch,
1446 * most likely via hotkey. */ 1445 * most likely via hotkey. */
1447 acpi_bus_generate_proc_event(device, event, 0); 1446 acpi_bus_generate_proc_event(device, event, 0);
1448 keycode = KEY_SWITCHVIDEOMODE; 1447 if (!acpi_notifier_call_chain(device, event, 0))
1448 keycode = KEY_SWITCHVIDEOMODE;
1449 break; 1449 break;
1450 1450
1451 case ACPI_VIDEO_NOTIFY_PROBE: /* User plugged in or removed a video 1451 case ACPI_VIDEO_NOTIFY_PROBE: /* User plugged in or removed a video
@@ -1475,7 +1475,8 @@ static void acpi_video_bus_notify(struct acpi_device *device, u32 event)
1475 break; 1475 break;
1476 } 1476 }
1477 1477
1478 acpi_notifier_call_chain(device, event, 0); 1478 if (event != ACPI_VIDEO_NOTIFY_SWITCH)
1479 acpi_notifier_call_chain(device, event, 0);
1479 1480
1480 if (keycode) { 1481 if (keycode) {
1481 input_report_key(input, keycode, 1); 1482 input_report_key(input, keycode, 1);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 0a5bea9e3585..987a165ede26 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1199,6 +1199,26 @@ unsigned int cpufreq_quick_get(unsigned int cpu)
1199} 1199}
1200EXPORT_SYMBOL(cpufreq_quick_get); 1200EXPORT_SYMBOL(cpufreq_quick_get);
1201 1201
1202/**
1203 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1204 * @cpu: CPU number
1205 *
1206 * Just return the max possible frequency for a given CPU.
1207 */
1208unsigned int cpufreq_quick_get_max(unsigned int cpu)
1209{
1210 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1211 unsigned int ret_freq = 0;
1212
1213 if (policy) {
1214 ret_freq = policy->max;
1215 cpufreq_cpu_put(policy);
1216 }
1217
1218 return ret_freq;
1219}
1220EXPORT_SYMBOL(cpufreq_quick_get_max);
1221
1202 1222
1203static unsigned int __cpufreq_get(unsigned int cpu) 1223static unsigned int __cpufreq_get(unsigned int cpu)
1204{ 1224{
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 92369655dca3..f88a9b2c977b 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -560,6 +560,11 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
560 mode_changed = true; 560 mode_changed = true;
561 } else if (set->fb == NULL) { 561 } else if (set->fb == NULL) {
562 mode_changed = true; 562 mode_changed = true;
563 } else if (set->fb->depth != set->crtc->fb->depth) {
564 mode_changed = true;
565 } else if (set->fb->bits_per_pixel !=
566 set->crtc->fb->bits_per_pixel) {
567 mode_changed = true;
563 } else 568 } else
564 fb_changed = true; 569 fb_changed = true;
565 } 570 }
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 0a893f7400fa..e2662497d50f 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -865,7 +865,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
865 MEMSTAT_VID_SHIFT); 865 MEMSTAT_VID_SHIFT);
866 seq_printf(m, "Current P-state: %d\n", 866 seq_printf(m, "Current P-state: %d\n",
867 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); 867 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
868 } else if (IS_GEN6(dev)) { 868 } else if (IS_GEN6(dev) || IS_GEN7(dev)) {
869 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 869 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
870 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); 870 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
871 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 871 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
@@ -1123,6 +1123,44 @@ static int i915_emon_status(struct seq_file *m, void *unused)
1123 return 0; 1123 return 0;
1124} 1124}
1125 1125
1126static int i915_ring_freq_table(struct seq_file *m, void *unused)
1127{
1128 struct drm_info_node *node = (struct drm_info_node *) m->private;
1129 struct drm_device *dev = node->minor->dev;
1130 drm_i915_private_t *dev_priv = dev->dev_private;
1131 int ret;
1132 int gpu_freq, ia_freq;
1133
1134 if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
1135 seq_printf(m, "unsupported on this chipset\n");
1136 return 0;
1137 }
1138
1139 ret = mutex_lock_interruptible(&dev->struct_mutex);
1140 if (ret)
1141 return ret;
1142
1143 seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n");
1144
1145 for (gpu_freq = dev_priv->min_delay; gpu_freq <= dev_priv->max_delay;
1146 gpu_freq++) {
1147 I915_WRITE(GEN6_PCODE_DATA, gpu_freq);
1148 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
1149 GEN6_PCODE_READ_MIN_FREQ_TABLE);
1150 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
1151 GEN6_PCODE_READY) == 0, 10)) {
1152 DRM_ERROR("pcode read of freq table timed out\n");
1153 continue;
1154 }
1155 ia_freq = I915_READ(GEN6_PCODE_DATA);
1156 seq_printf(m, "%d\t\t%d\n", gpu_freq * 50, ia_freq * 100);
1157 }
1158
1159 mutex_unlock(&dev->struct_mutex);
1160
1161 return 0;
1162}
1163
1126static int i915_gfxec(struct seq_file *m, void *unused) 1164static int i915_gfxec(struct seq_file *m, void *unused)
1127{ 1165{
1128 struct drm_info_node *node = (struct drm_info_node *) m->private; 1166 struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -1430,6 +1468,7 @@ static struct drm_info_list i915_debugfs_list[] = {
1430 {"i915_inttoext_table", i915_inttoext_table, 0}, 1468 {"i915_inttoext_table", i915_inttoext_table, 0},
1431 {"i915_drpc_info", i915_drpc_info, 0}, 1469 {"i915_drpc_info", i915_drpc_info, 0},
1432 {"i915_emon_status", i915_emon_status, 0}, 1470 {"i915_emon_status", i915_emon_status, 0},
1471 {"i915_ring_freq_table", i915_ring_freq_table, 0},
1433 {"i915_gfxec", i915_gfxec, 0}, 1472 {"i915_gfxec", i915_gfxec, 0},
1434 {"i915_fbc_status", i915_fbc_status, 0}, 1473 {"i915_fbc_status", i915_fbc_status, 0},
1435 {"i915_sr_status", i915_sr_status, 0}, 1474 {"i915_sr_status", i915_sr_status, 0},
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index e1787022d6c8..12712824a6d2 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1073,6 +1073,9 @@ static void i915_setup_compression(struct drm_device *dev, int size)
1073 unsigned long cfb_base; 1073 unsigned long cfb_base;
1074 unsigned long ll_base = 0; 1074 unsigned long ll_base = 0;
1075 1075
1076 /* Just in case the BIOS is doing something questionable. */
1077 intel_disable_fbc(dev);
1078
1076 compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0); 1079 compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0);
1077 if (compressed_fb) 1080 if (compressed_fb)
1078 compressed_fb = drm_mm_get_block(compressed_fb, size, 4096); 1081 compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
@@ -1099,7 +1102,6 @@ static void i915_setup_compression(struct drm_device *dev, int size)
1099 1102
1100 dev_priv->cfb_size = size; 1103 dev_priv->cfb_size = size;
1101 1104
1102 intel_disable_fbc(dev);
1103 dev_priv->compressed_fb = compressed_fb; 1105 dev_priv->compressed_fb = compressed_fb;
1104 if (HAS_PCH_SPLIT(dev)) 1106 if (HAS_PCH_SPLIT(dev))
1105 I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start); 1107 I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
@@ -1943,7 +1945,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1943 if (!dev_priv->mm.gtt) { 1945 if (!dev_priv->mm.gtt) {
1944 DRM_ERROR("Failed to initialize GTT\n"); 1946 DRM_ERROR("Failed to initialize GTT\n");
1945 ret = -ENODEV; 1947 ret = -ENODEV;
1946 goto out_iomapfree; 1948 goto out_rmmap;
1947 } 1949 }
1948 1950
1949 agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; 1951 agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
@@ -1987,7 +1989,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1987 if (dev_priv->wq == NULL) { 1989 if (dev_priv->wq == NULL) {
1988 DRM_ERROR("Failed to create our workqueue.\n"); 1990 DRM_ERROR("Failed to create our workqueue.\n");
1989 ret = -ENOMEM; 1991 ret = -ENOMEM;
1990 goto out_iomapfree; 1992 goto out_mtrrfree;
1991 } 1993 }
1992 1994
1993 /* enable GEM by default */ 1995 /* enable GEM by default */
@@ -2074,13 +2076,21 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
2074 return 0; 2076 return 0;
2075 2077
2076out_gem_unload: 2078out_gem_unload:
2079 if (dev_priv->mm.inactive_shrinker.shrink)
2080 unregister_shrinker(&dev_priv->mm.inactive_shrinker);
2081
2077 if (dev->pdev->msi_enabled) 2082 if (dev->pdev->msi_enabled)
2078 pci_disable_msi(dev->pdev); 2083 pci_disable_msi(dev->pdev);
2079 2084
2080 intel_teardown_gmbus(dev); 2085 intel_teardown_gmbus(dev);
2081 intel_teardown_mchbar(dev); 2086 intel_teardown_mchbar(dev);
2082 destroy_workqueue(dev_priv->wq); 2087 destroy_workqueue(dev_priv->wq);
2083out_iomapfree: 2088out_mtrrfree:
2089 if (dev_priv->mm.gtt_mtrr >= 0) {
2090 mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base,
2091 dev->agp->agp_info.aper_size * 1024 * 1024);
2092 dev_priv->mm.gtt_mtrr = -1;
2093 }
2084 io_mapping_free(dev_priv->mm.gtt_mapping); 2094 io_mapping_free(dev_priv->mm.gtt_mapping);
2085out_rmmap: 2095out_rmmap:
2086 pci_iounmap(dev->pdev, dev_priv->regs); 2096 pci_iounmap(dev->pdev, dev_priv->regs);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 013d304455b9..ce045a8cf82c 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -37,38 +37,70 @@
37#include <linux/console.h> 37#include <linux/console.h>
38#include "drm_crtc_helper.h" 38#include "drm_crtc_helper.h"
39 39
40static int i915_modeset = -1; 40static int i915_modeset __read_mostly = -1;
41module_param_named(modeset, i915_modeset, int, 0400); 41module_param_named(modeset, i915_modeset, int, 0400);
42MODULE_PARM_DESC(modeset,
43 "Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, "
44 "1=on, -1=force vga console preference [default])");
42 45
43unsigned int i915_fbpercrtc = 0; 46unsigned int i915_fbpercrtc __always_unused = 0;
44module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400); 47module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
45 48
46int i915_panel_ignore_lid = 0; 49int i915_panel_ignore_lid __read_mostly = 0;
47module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600); 50module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600);
51MODULE_PARM_DESC(panel_ignore_lid,
52 "Override lid status (0=autodetect [default], 1=lid open, "
53 "-1=lid closed)");
48 54
49unsigned int i915_powersave = 1; 55unsigned int i915_powersave __read_mostly = 1;
50module_param_named(powersave, i915_powersave, int, 0600); 56module_param_named(powersave, i915_powersave, int, 0600);
57MODULE_PARM_DESC(powersave,
58 "Enable powersavings, fbc, downclocking, etc. (default: true)");
51 59
52unsigned int i915_semaphores = 0; 60unsigned int i915_semaphores __read_mostly = 0;
53module_param_named(semaphores, i915_semaphores, int, 0600); 61module_param_named(semaphores, i915_semaphores, int, 0600);
62MODULE_PARM_DESC(semaphores,
63 "Use semaphores for inter-ring sync (default: false)");
54 64
55unsigned int i915_enable_rc6 = 1; 65unsigned int i915_enable_rc6 __read_mostly = 0;
56module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600); 66module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
67MODULE_PARM_DESC(i915_enable_rc6,
68 "Enable power-saving render C-state 6 (default: true)");
57 69
58unsigned int i915_enable_fbc = 0; 70unsigned int i915_enable_fbc __read_mostly = 1;
59module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600); 71module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
72MODULE_PARM_DESC(i915_enable_fbc,
73 "Enable frame buffer compression for power savings "
74 "(default: false)");
60 75
61unsigned int i915_lvds_downclock = 0; 76unsigned int i915_lvds_downclock __read_mostly = 0;
62module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); 77module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
78MODULE_PARM_DESC(lvds_downclock,
79 "Use panel (LVDS/eDP) downclocking for power savings "
80 "(default: false)");
63 81
64unsigned int i915_panel_use_ssc = 1; 82unsigned int i915_panel_use_ssc __read_mostly = 1;
65module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600); 83module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600);
84MODULE_PARM_DESC(lvds_use_ssc,
85 "Use Spread Spectrum Clock with panels [LVDS/eDP] "
86 "(default: true)");
66 87
67int i915_vbt_sdvo_panel_type = -1; 88int i915_vbt_sdvo_panel_type __read_mostly = -1;
68module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600); 89module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600);
90MODULE_PARM_DESC(vbt_sdvo_panel_type,
91 "Override selection of SDVO panel mode in the VBT "
92 "(default: auto)");
69 93
70static bool i915_try_reset = true; 94static bool i915_try_reset __read_mostly = true;
71module_param_named(reset, i915_try_reset, bool, 0600); 95module_param_named(reset, i915_try_reset, bool, 0600);
96MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)");
97
98bool i915_enable_hangcheck __read_mostly = true;
99module_param_named(enable_hangcheck, i915_enable_hangcheck, bool, 0644);
100MODULE_PARM_DESC(enable_hangcheck,
101 "Periodically check GPU activity for detecting hangs. "
102 "WARNING: Disabling this can cause system wide hangs. "
103 "(default: true)");
72 104
73static struct drm_driver driver; 105static struct drm_driver driver;
74extern int intel_agp_enabled; 106extern int intel_agp_enabled;
@@ -345,12 +377,17 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
345 377
346void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) 378void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
347{ 379{
348 int loop = 500; 380 if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES ) {
349 u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); 381 int loop = 500;
350 while (fifo < 20 && loop--) { 382 u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
351 udelay(10); 383 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
352 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); 384 udelay(10);
385 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
386 }
387 WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES);
388 dev_priv->gt_fifo_count = fifo;
353 } 389 }
390 dev_priv->gt_fifo_count--;
354} 391}
355 392
356static int i915_drm_freeze(struct drm_device *dev) 393static int i915_drm_freeze(struct drm_device *dev)
@@ -577,6 +614,7 @@ int i915_reset(struct drm_device *dev, u8 flags)
577 if (get_seconds() - dev_priv->last_gpu_reset < 5) { 614 if (get_seconds() - dev_priv->last_gpu_reset < 5) {
578 DRM_ERROR("GPU hanging too fast, declaring wedged!\n"); 615 DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
579 } else switch (INTEL_INFO(dev)->gen) { 616 } else switch (INTEL_INFO(dev)->gen) {
617 case 7:
580 case 6: 618 case 6:
581 ret = gen6_do_reset(dev, flags); 619 ret = gen6_do_reset(dev, flags);
582 /* If reset with a user forcewake, try to restore */ 620 /* If reset with a user forcewake, try to restore */
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index f245c588ae95..78cdd158287a 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -214,6 +214,8 @@ struct drm_i915_display_funcs {
214 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, 214 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
215 struct drm_framebuffer *fb, 215 struct drm_framebuffer *fb,
216 struct drm_i915_gem_object *obj); 216 struct drm_i915_gem_object *obj);
217 int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
218 int x, int y);
217 /* clock updates for mode set */ 219 /* clock updates for mode set */
218 /* cursor updates */ 220 /* cursor updates */
219 /* render clock increase/decrease */ 221 /* render clock increase/decrease */
@@ -264,6 +266,7 @@ enum intel_pch {
264#define QUIRK_PIPEA_FORCE (1<<0) 266#define QUIRK_PIPEA_FORCE (1<<0)
265 267
266struct intel_fbdev; 268struct intel_fbdev;
269struct intel_fbc_work;
267 270
268typedef struct drm_i915_private { 271typedef struct drm_i915_private {
269 struct drm_device *dev; 272 struct drm_device *dev;
@@ -274,6 +277,7 @@ typedef struct drm_i915_private {
274 int relative_constants_mode; 277 int relative_constants_mode;
275 278
276 void __iomem *regs; 279 void __iomem *regs;
280 u32 gt_fifo_count;
277 281
278 struct intel_gmbus { 282 struct intel_gmbus {
279 struct i2c_adapter adapter; 283 struct i2c_adapter adapter;
@@ -328,11 +332,10 @@ typedef struct drm_i915_private {
328 uint32_t last_instdone1; 332 uint32_t last_instdone1;
329 333
330 unsigned long cfb_size; 334 unsigned long cfb_size;
331 unsigned long cfb_pitch; 335 unsigned int cfb_fb;
332 unsigned long cfb_offset; 336 enum plane cfb_plane;
333 int cfb_fence;
334 int cfb_plane;
335 int cfb_y; 337 int cfb_y;
338 struct intel_fbc_work *fbc_work;
336 339
337 struct intel_opregion opregion; 340 struct intel_opregion opregion;
338 341
@@ -985,15 +988,16 @@ struct drm_i915_file_private {
985 988
986extern struct drm_ioctl_desc i915_ioctls[]; 989extern struct drm_ioctl_desc i915_ioctls[];
987extern int i915_max_ioctl; 990extern int i915_max_ioctl;
988extern unsigned int i915_fbpercrtc; 991extern unsigned int i915_fbpercrtc __always_unused;
989extern int i915_panel_ignore_lid; 992extern int i915_panel_ignore_lid __read_mostly;
990extern unsigned int i915_powersave; 993extern unsigned int i915_powersave __read_mostly;
991extern unsigned int i915_semaphores; 994extern unsigned int i915_semaphores __read_mostly;
992extern unsigned int i915_lvds_downclock; 995extern unsigned int i915_lvds_downclock __read_mostly;
993extern unsigned int i915_panel_use_ssc; 996extern unsigned int i915_panel_use_ssc __read_mostly;
994extern int i915_vbt_sdvo_panel_type; 997extern int i915_vbt_sdvo_panel_type __read_mostly;
995extern unsigned int i915_enable_rc6; 998extern unsigned int i915_enable_rc6 __read_mostly;
996extern unsigned int i915_enable_fbc; 999extern unsigned int i915_enable_fbc __read_mostly;
1000extern bool i915_enable_hangcheck __read_mostly;
997 1001
998extern int i915_suspend(struct drm_device *dev, pm_message_t state); 1002extern int i915_suspend(struct drm_device *dev, pm_message_t state);
999extern int i915_resume(struct drm_device *dev); 1003extern int i915_resume(struct drm_device *dev);
@@ -1163,7 +1167,7 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
1163int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj, 1167int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
1164 uint32_t read_domains, 1168 uint32_t read_domains,
1165 uint32_t write_domain); 1169 uint32_t write_domain);
1166int __must_check i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj); 1170int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
1167int __must_check i915_gem_init_ringbuffer(struct drm_device *dev); 1171int __must_check i915_gem_init_ringbuffer(struct drm_device *dev);
1168void i915_gem_cleanup_ringbuffer(struct drm_device *dev); 1172void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
1169void i915_gem_do_init(struct drm_device *dev, 1173void i915_gem_do_init(struct drm_device *dev,
@@ -1182,7 +1186,8 @@ int __must_check
1182i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, 1186i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
1183 bool write); 1187 bool write);
1184int __must_check 1188int __must_check
1185i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj, 1189i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
1190 u32 alignment,
1186 struct intel_ring_buffer *pipelined); 1191 struct intel_ring_buffer *pipelined);
1187int i915_gem_attach_phys_object(struct drm_device *dev, 1192int i915_gem_attach_phys_object(struct drm_device *dev,
1188 struct drm_i915_gem_object *obj, 1193 struct drm_i915_gem_object *obj,
@@ -1196,9 +1201,14 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file);
1196uint32_t 1201uint32_t
1197i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj); 1202i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj);
1198 1203
1204int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
1205 enum i915_cache_level cache_level);
1206
1199/* i915_gem_gtt.c */ 1207/* i915_gem_gtt.c */
1200void i915_gem_restore_gtt_mappings(struct drm_device *dev); 1208void i915_gem_restore_gtt_mappings(struct drm_device *dev);
1201int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj); 1209int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj);
1210void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
1211 enum i915_cache_level cache_level);
1202void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj); 1212void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
1203 1213
1204/* i915_gem_evict.c */ 1214/* i915_gem_evict.c */
@@ -1280,12 +1290,8 @@ extern void intel_modeset_init(struct drm_device *dev);
1280extern void intel_modeset_gem_init(struct drm_device *dev); 1290extern void intel_modeset_gem_init(struct drm_device *dev);
1281extern void intel_modeset_cleanup(struct drm_device *dev); 1291extern void intel_modeset_cleanup(struct drm_device *dev);
1282extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); 1292extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
1283extern void i8xx_disable_fbc(struct drm_device *dev);
1284extern void g4x_disable_fbc(struct drm_device *dev);
1285extern void ironlake_disable_fbc(struct drm_device *dev);
1286extern void intel_disable_fbc(struct drm_device *dev);
1287extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
1288extern bool intel_fbc_enabled(struct drm_device *dev); 1293extern bool intel_fbc_enabled(struct drm_device *dev);
1294extern void intel_disable_fbc(struct drm_device *dev);
1289extern bool ironlake_set_drps(struct drm_device *dev, u8 val); 1295extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
1290extern void ironlake_enable_rc6(struct drm_device *dev); 1296extern void ironlake_enable_rc6(struct drm_device *dev);
1291extern void gen6_set_rps(struct drm_device *dev, u8 val); 1297extern void gen6_set_rps(struct drm_device *dev, u8 val);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 5c0d1247f453..e9d1d5c3a696 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1771,8 +1771,11 @@ i915_add_request(struct intel_ring_buffer *ring,
1771 ring->outstanding_lazy_request = false; 1771 ring->outstanding_lazy_request = false;
1772 1772
1773 if (!dev_priv->mm.suspended) { 1773 if (!dev_priv->mm.suspended) {
1774 mod_timer(&dev_priv->hangcheck_timer, 1774 if (i915_enable_hangcheck) {
1775 jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); 1775 mod_timer(&dev_priv->hangcheck_timer,
1776 jiffies +
1777 msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
1778 }
1776 if (was_empty) 1779 if (was_empty)
1777 queue_delayed_work(dev_priv->wq, 1780 queue_delayed_work(dev_priv->wq,
1778 &dev_priv->mm.retire_work, HZ); 1781 &dev_priv->mm.retire_work, HZ);
@@ -2143,6 +2146,30 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
2143 return 0; 2146 return 0;
2144} 2147}
2145 2148
2149static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2150{
2151 u32 old_write_domain, old_read_domains;
2152
2153 /* Act a barrier for all accesses through the GTT */
2154 mb();
2155
2156 /* Force a pagefault for domain tracking on next user access */
2157 i915_gem_release_mmap(obj);
2158
2159 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2160 return;
2161
2162 old_read_domains = obj->base.read_domains;
2163 old_write_domain = obj->base.write_domain;
2164
2165 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2166 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2167
2168 trace_i915_gem_object_change_domain(obj,
2169 old_read_domains,
2170 old_write_domain);
2171}
2172
2146/** 2173/**
2147 * Unbinds an object from the GTT aperture. 2174 * Unbinds an object from the GTT aperture.
2148 */ 2175 */
@@ -2159,23 +2186,28 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2159 return -EINVAL; 2186 return -EINVAL;
2160 } 2187 }
2161 2188
2162 /* blow away mappings if mapped through GTT */ 2189 ret = i915_gem_object_finish_gpu(obj);
2163 i915_gem_release_mmap(obj);
2164
2165 /* Move the object to the CPU domain to ensure that
2166 * any possible CPU writes while it's not in the GTT
2167 * are flushed when we go to remap it. This will
2168 * also ensure that all pending GPU writes are finished
2169 * before we unbind.
2170 */
2171 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
2172 if (ret == -ERESTARTSYS) 2190 if (ret == -ERESTARTSYS)
2173 return ret; 2191 return ret;
2174 /* Continue on if we fail due to EIO, the GPU is hung so we 2192 /* Continue on if we fail due to EIO, the GPU is hung so we
2175 * should be safe and we need to cleanup or else we might 2193 * should be safe and we need to cleanup or else we might
2176 * cause memory corruption through use-after-free. 2194 * cause memory corruption through use-after-free.
2177 */ 2195 */
2196
2197 i915_gem_object_finish_gtt(obj);
2198
2199 /* Move the object to the CPU domain to ensure that
2200 * any possible CPU writes while it's not in the GTT
2201 * are flushed when we go to remap it.
2202 */
2203 if (ret == 0)
2204 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
2205 if (ret == -ERESTARTSYS)
2206 return ret;
2178 if (ret) { 2207 if (ret) {
2208 /* In the event of a disaster, abandon all caches and
2209 * hope for the best.
2210 */
2179 i915_gem_clflush_object(obj); 2211 i915_gem_clflush_object(obj);
2180 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; 2212 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2181 } 2213 }
@@ -2997,51 +3029,139 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
2997 return 0; 3029 return 0;
2998} 3030}
2999 3031
3032int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3033 enum i915_cache_level cache_level)
3034{
3035 int ret;
3036
3037 if (obj->cache_level == cache_level)
3038 return 0;
3039
3040 if (obj->pin_count) {
3041 DRM_DEBUG("can not change the cache level of pinned objects\n");
3042 return -EBUSY;
3043 }
3044
3045 if (obj->gtt_space) {
3046 ret = i915_gem_object_finish_gpu(obj);
3047 if (ret)
3048 return ret;
3049
3050 i915_gem_object_finish_gtt(obj);
3051
3052 /* Before SandyBridge, you could not use tiling or fence
3053 * registers with snooped memory, so relinquish any fences
3054 * currently pointing to our region in the aperture.
3055 */
3056 if (INTEL_INFO(obj->base.dev)->gen < 6) {
3057 ret = i915_gem_object_put_fence(obj);
3058 if (ret)
3059 return ret;
3060 }
3061
3062 i915_gem_gtt_rebind_object(obj, cache_level);
3063 }
3064
3065 if (cache_level == I915_CACHE_NONE) {
3066 u32 old_read_domains, old_write_domain;
3067
3068 /* If we're coming from LLC cached, then we haven't
3069 * actually been tracking whether the data is in the
3070 * CPU cache or not, since we only allow one bit set
3071 * in obj->write_domain and have been skipping the clflushes.
3072 * Just set it to the CPU cache for now.
3073 */
3074 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
3075 WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU);
3076
3077 old_read_domains = obj->base.read_domains;
3078 old_write_domain = obj->base.write_domain;
3079
3080 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3081 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3082
3083 trace_i915_gem_object_change_domain(obj,
3084 old_read_domains,
3085 old_write_domain);
3086 }
3087
3088 obj->cache_level = cache_level;
3089 return 0;
3090}
3091
3000/* 3092/*
3001 * Prepare buffer for display plane. Use uninterruptible for possible flush 3093 * Prepare buffer for display plane (scanout, cursors, etc).
3002 * wait, as in modesetting process we're not supposed to be interrupted. 3094 * Can be called from an uninterruptible phase (modesetting) and allows
3095 * any flushes to be pipelined (for pageflips).
3096 *
3097 * For the display plane, we want to be in the GTT but out of any write
3098 * domains. So in many ways this looks like set_to_gtt_domain() apart from the
3099 * ability to pipeline the waits, pinning and any additional subtleties
3100 * that may differentiate the display plane from ordinary buffers.
3003 */ 3101 */
3004int 3102int
3005i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj, 3103i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3104 u32 alignment,
3006 struct intel_ring_buffer *pipelined) 3105 struct intel_ring_buffer *pipelined)
3007{ 3106{
3008 uint32_t old_read_domains; 3107 u32 old_read_domains, old_write_domain;
3009 int ret; 3108 int ret;
3010 3109
3011 /* Not valid to be called on unbound objects. */
3012 if (obj->gtt_space == NULL)
3013 return -EINVAL;
3014
3015 ret = i915_gem_object_flush_gpu_write_domain(obj); 3110 ret = i915_gem_object_flush_gpu_write_domain(obj);
3016 if (ret) 3111 if (ret)
3017 return ret; 3112 return ret;
3018 3113
3019
3020 /* Currently, we are always called from an non-interruptible context. */
3021 if (pipelined != obj->ring) { 3114 if (pipelined != obj->ring) {
3022 ret = i915_gem_object_wait_rendering(obj); 3115 ret = i915_gem_object_wait_rendering(obj);
3023 if (ret) 3116 if (ret)
3024 return ret; 3117 return ret;
3025 } 3118 }
3026 3119
3120 /* The display engine is not coherent with the LLC cache on gen6. As
3121 * a result, we make sure that the pinning that is about to occur is
3122 * done with uncached PTEs. This is lowest common denominator for all
3123 * chipsets.
3124 *
3125 * However for gen6+, we could do better by using the GFDT bit instead
3126 * of uncaching, which would allow us to flush all the LLC-cached data
3127 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3128 */
3129 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
3130 if (ret)
3131 return ret;
3132
3133 /* As the user may map the buffer once pinned in the display plane
3134 * (e.g. libkms for the bootup splash), we have to ensure that we
3135 * always use map_and_fenceable for all scanout buffers.
3136 */
3137 ret = i915_gem_object_pin(obj, alignment, true);
3138 if (ret)
3139 return ret;
3140
3027 i915_gem_object_flush_cpu_write_domain(obj); 3141 i915_gem_object_flush_cpu_write_domain(obj);
3028 3142
3143 old_write_domain = obj->base.write_domain;
3029 old_read_domains = obj->base.read_domains; 3144 old_read_domains = obj->base.read_domains;
3145
3146 /* It should now be out of any other write domains, and we can update
3147 * the domain values for our changes.
3148 */
3149 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3030 obj->base.read_domains |= I915_GEM_DOMAIN_GTT; 3150 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3031 3151
3032 trace_i915_gem_object_change_domain(obj, 3152 trace_i915_gem_object_change_domain(obj,
3033 old_read_domains, 3153 old_read_domains,
3034 obj->base.write_domain); 3154 old_write_domain);
3035 3155
3036 return 0; 3156 return 0;
3037} 3157}
3038 3158
3039int 3159int
3040i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj) 3160i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
3041{ 3161{
3042 int ret; 3162 int ret;
3043 3163
3044 if (!obj->active) 3164 if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
3045 return 0; 3165 return 0;
3046 3166
3047 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { 3167 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
@@ -3050,6 +3170,9 @@ i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj)
3050 return ret; 3170 return ret;
3051 } 3171 }
3052 3172
3173 /* Ensure that we invalidate the GPU's caches and TLBs. */
3174 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
3175
3053 return i915_gem_object_wait_rendering(obj); 3176 return i915_gem_object_wait_rendering(obj);
3054} 3177}
3055 3178
@@ -3576,7 +3699,23 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3576 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 3699 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3577 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 3700 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3578 3701
3579 obj->cache_level = I915_CACHE_NONE; 3702 if (IS_GEN6(dev)) {
3703 /* On Gen6, we can have the GPU use the LLC (the CPU
3704 * cache) for about a 10% performance improvement
3705 * compared to uncached. Graphics requests other than
3706 * display scanout are coherent with the CPU in
3707 * accessing this cache. This means in this mode we
3708 * don't need to clflush on the CPU side, and on the
3709 * GPU side we only need to flush internal caches to
3710 * get data visible to the CPU.
3711 *
3712 * However, we maintain the display planes as UC, and so
3713 * need to rebind when first used as such.
3714 */
3715 obj->cache_level = I915_CACHE_LLC;
3716 } else
3717 obj->cache_level = I915_CACHE_NONE;
3718
3580 obj->base.driver_private = NULL; 3719 obj->base.driver_private = NULL;
3581 obj->fence_reg = I915_FENCE_REG_NONE; 3720 obj->fence_reg = I915_FENCE_REG_NONE;
3582 INIT_LIST_HEAD(&obj->mm_list); 3721 INIT_LIST_HEAD(&obj->mm_list);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index e46b645773cf..7a709cd8d543 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -59,24 +59,8 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
59 (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE); 59 (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
60 60
61 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { 61 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
62 unsigned int agp_type =
63 cache_level_to_agp_type(dev, obj->cache_level);
64
65 i915_gem_clflush_object(obj); 62 i915_gem_clflush_object(obj);
66 63 i915_gem_gtt_rebind_object(obj, obj->cache_level);
67 if (dev_priv->mm.gtt->needs_dmar) {
68 BUG_ON(!obj->sg_list);
69
70 intel_gtt_insert_sg_entries(obj->sg_list,
71 obj->num_sg,
72 obj->gtt_space->start >> PAGE_SHIFT,
73 agp_type);
74 } else
75 intel_gtt_insert_pages(obj->gtt_space->start
76 >> PAGE_SHIFT,
77 obj->base.size >> PAGE_SHIFT,
78 obj->pages,
79 agp_type);
80 } 64 }
81 65
82 intel_gtt_chipset_flush(); 66 intel_gtt_chipset_flush();
@@ -110,6 +94,27 @@ int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj)
110 return 0; 94 return 0;
111} 95}
112 96
97void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
98 enum i915_cache_level cache_level)
99{
100 struct drm_device *dev = obj->base.dev;
101 struct drm_i915_private *dev_priv = dev->dev_private;
102 unsigned int agp_type = cache_level_to_agp_type(dev, cache_level);
103
104 if (dev_priv->mm.gtt->needs_dmar) {
105 BUG_ON(!obj->sg_list);
106
107 intel_gtt_insert_sg_entries(obj->sg_list,
108 obj->num_sg,
109 obj->gtt_space->start >> PAGE_SHIFT,
110 agp_type);
111 } else
112 intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
113 obj->base.size >> PAGE_SHIFT,
114 obj->pages,
115 agp_type);
116}
117
113void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) 118void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
114{ 119{
115 intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT, 120 intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 3b03f85ea627..23d1ae67d279 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -361,10 +361,12 @@ static void notify_ring(struct drm_device *dev,
361 361
362 ring->irq_seqno = seqno; 362 ring->irq_seqno = seqno;
363 wake_up_all(&ring->irq_queue); 363 wake_up_all(&ring->irq_queue);
364 364 if (i915_enable_hangcheck) {
365 dev_priv->hangcheck_count = 0; 365 dev_priv->hangcheck_count = 0;
366 mod_timer(&dev_priv->hangcheck_timer, 366 mod_timer(&dev_priv->hangcheck_timer,
367 jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); 367 jiffies +
368 msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
369 }
368} 370}
369 371
370static void gen6_pm_rps_work(struct work_struct *work) 372static void gen6_pm_rps_work(struct work_struct *work)
@@ -1664,6 +1666,9 @@ void i915_hangcheck_elapsed(unsigned long data)
1664 uint32_t acthd, instdone, instdone1; 1666 uint32_t acthd, instdone, instdone1;
1665 bool err = false; 1667 bool err = false;
1666 1668
1669 if (!i915_enable_hangcheck)
1670 return;
1671
1667 /* If all work is done then ACTHD clearly hasn't advanced. */ 1672 /* If all work is done then ACTHD clearly hasn't advanced. */
1668 if (i915_hangcheck_ring_idle(&dev_priv->ring[RCS], &err) && 1673 if (i915_hangcheck_ring_idle(&dev_priv->ring[RCS], &err) &&
1669 i915_hangcheck_ring_idle(&dev_priv->ring[VCS], &err) && 1674 i915_hangcheck_ring_idle(&dev_priv->ring[VCS], &err) &&
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 5d5def756c9e..02db299f621a 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -579,6 +579,7 @@
579#define DPFC_CTL_PLANEA (0<<30) 579#define DPFC_CTL_PLANEA (0<<30)
580#define DPFC_CTL_PLANEB (1<<30) 580#define DPFC_CTL_PLANEB (1<<30)
581#define DPFC_CTL_FENCE_EN (1<<29) 581#define DPFC_CTL_FENCE_EN (1<<29)
582#define DPFC_CTL_PERSISTENT_MODE (1<<25)
582#define DPFC_SR_EN (1<<10) 583#define DPFC_SR_EN (1<<10)
583#define DPFC_CTL_LIMIT_1X (0<<6) 584#define DPFC_CTL_LIMIT_1X (0<<6)
584#define DPFC_CTL_LIMIT_2X (1<<6) 585#define DPFC_CTL_LIMIT_2X (1<<6)
@@ -3360,6 +3361,7 @@
3360#define FORCEWAKE_ACK 0x130090 3361#define FORCEWAKE_ACK 0x130090
3361 3362
3362#define GT_FIFO_FREE_ENTRIES 0x120008 3363#define GT_FIFO_FREE_ENTRIES 0x120008
3364#define GT_FIFO_NUM_RESERVED_ENTRIES 20
3363 3365
3364#define GEN6_RPNSWREQ 0xA008 3366#define GEN6_RPNSWREQ 0xA008
3365#define GEN6_TURBO_DISABLE (1<<31) 3367#define GEN6_TURBO_DISABLE (1<<31)
@@ -3434,7 +3436,9 @@
3434#define GEN6_PCODE_MAILBOX 0x138124 3436#define GEN6_PCODE_MAILBOX 0x138124
3435#define GEN6_PCODE_READY (1<<31) 3437#define GEN6_PCODE_READY (1<<31)
3436#define GEN6_READ_OC_PARAMS 0xc 3438#define GEN6_READ_OC_PARAMS 0xc
3437#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x9 3439#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x8
3440#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9
3438#define GEN6_PCODE_DATA 0x138128 3441#define GEN6_PCODE_DATA 0x138128
3442#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
3439 3443
3440#endif /* _I915_REG_H_ */ 3444#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 5257cfc34c35..285758603ac8 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -760,15 +760,13 @@ static void i915_restore_display(struct drm_device *dev)
760 /* FIXME: restore TV & SDVO state */ 760 /* FIXME: restore TV & SDVO state */
761 761
762 /* only restore FBC info on the platform that supports FBC*/ 762 /* only restore FBC info on the platform that supports FBC*/
763 intel_disable_fbc(dev);
763 if (I915_HAS_FBC(dev)) { 764 if (I915_HAS_FBC(dev)) {
764 if (HAS_PCH_SPLIT(dev)) { 765 if (HAS_PCH_SPLIT(dev)) {
765 ironlake_disable_fbc(dev);
766 I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE); 766 I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
767 } else if (IS_GM45(dev)) { 767 } else if (IS_GM45(dev)) {
768 g4x_disable_fbc(dev);
769 I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE); 768 I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
770 } else { 769 } else {
771 i8xx_disable_fbc(dev);
772 I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE); 770 I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
773 I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE); 771 I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
774 I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2); 772 I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
@@ -878,8 +876,10 @@ int i915_restore_state(struct drm_device *dev)
878 intel_init_emon(dev); 876 intel_init_emon(dev);
879 } 877 }
880 878
881 if (IS_GEN6(dev)) 879 if (IS_GEN6(dev)) {
882 gen6_enable_rps(dev_priv); 880 gen6_enable_rps(dev_priv);
881 gen6_update_ring_freq(dev_priv);
882 }
883 883
884 mutex_lock(&dev->struct_mutex); 884 mutex_lock(&dev->struct_mutex);
885 885
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 927442a11925..61abef8a8119 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -74,7 +74,7 @@ get_blocksize(void *p)
74 74
75static void 75static void
76fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode, 76fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
77 struct lvds_dvo_timing *dvo_timing) 77 const struct lvds_dvo_timing *dvo_timing)
78{ 78{
79 panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) | 79 panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) |
80 dvo_timing->hactive_lo; 80 dvo_timing->hactive_lo;
@@ -115,20 +115,75 @@ fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
115 drm_mode_set_name(panel_fixed_mode); 115 drm_mode_set_name(panel_fixed_mode);
116} 116}
117 117
118static bool
119lvds_dvo_timing_equal_size(const struct lvds_dvo_timing *a,
120 const struct lvds_dvo_timing *b)
121{
122 if (a->hactive_hi != b->hactive_hi ||
123 a->hactive_lo != b->hactive_lo)
124 return false;
125
126 if (a->hsync_off_hi != b->hsync_off_hi ||
127 a->hsync_off_lo != b->hsync_off_lo)
128 return false;
129
130 if (a->hsync_pulse_width != b->hsync_pulse_width)
131 return false;
132
133 if (a->hblank_hi != b->hblank_hi ||
134 a->hblank_lo != b->hblank_lo)
135 return false;
136
137 if (a->vactive_hi != b->vactive_hi ||
138 a->vactive_lo != b->vactive_lo)
139 return false;
140
141 if (a->vsync_off != b->vsync_off)
142 return false;
143
144 if (a->vsync_pulse_width != b->vsync_pulse_width)
145 return false;
146
147 if (a->vblank_hi != b->vblank_hi ||
148 a->vblank_lo != b->vblank_lo)
149 return false;
150
151 return true;
152}
153
154static const struct lvds_dvo_timing *
155get_lvds_dvo_timing(const struct bdb_lvds_lfp_data *lvds_lfp_data,
156 const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs,
157 int index)
158{
159 /*
160 * the size of fp_timing varies on the different platform.
161 * So calculate the DVO timing relative offset in LVDS data
162 * entry to get the DVO timing entry
163 */
164
165 int lfp_data_size =
166 lvds_lfp_data_ptrs->ptr[1].dvo_timing_offset -
167 lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset;
168 int dvo_timing_offset =
169 lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset -
170 lvds_lfp_data_ptrs->ptr[0].fp_timing_offset;
171 char *entry = (char *)lvds_lfp_data->data + lfp_data_size * index;
172
173 return (struct lvds_dvo_timing *)(entry + dvo_timing_offset);
174}
175
118/* Try to find integrated panel data */ 176/* Try to find integrated panel data */
119static void 177static void
120parse_lfp_panel_data(struct drm_i915_private *dev_priv, 178parse_lfp_panel_data(struct drm_i915_private *dev_priv,
121 struct bdb_header *bdb) 179 struct bdb_header *bdb)
122{ 180{
123 struct bdb_lvds_options *lvds_options; 181 const struct bdb_lvds_options *lvds_options;
124 struct bdb_lvds_lfp_data *lvds_lfp_data; 182 const struct bdb_lvds_lfp_data *lvds_lfp_data;
125 struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs; 183 const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs;
126 struct bdb_lvds_lfp_data_entry *entry; 184 const struct lvds_dvo_timing *panel_dvo_timing;
127 struct lvds_dvo_timing *dvo_timing;
128 struct drm_display_mode *panel_fixed_mode; 185 struct drm_display_mode *panel_fixed_mode;
129 int lfp_data_size, dvo_timing_offset; 186 int i, downclock;
130 int i, temp_downclock;
131 struct drm_display_mode *temp_mode;
132 187
133 lvds_options = find_section(bdb, BDB_LVDS_OPTIONS); 188 lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
134 if (!lvds_options) 189 if (!lvds_options)
@@ -150,75 +205,44 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
150 205
151 dev_priv->lvds_vbt = 1; 206 dev_priv->lvds_vbt = 1;
152 207
153 lfp_data_size = lvds_lfp_data_ptrs->ptr[1].dvo_timing_offset - 208 panel_dvo_timing = get_lvds_dvo_timing(lvds_lfp_data,
154 lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset; 209 lvds_lfp_data_ptrs,
155 entry = (struct bdb_lvds_lfp_data_entry *) 210 lvds_options->panel_type);
156 ((uint8_t *)lvds_lfp_data->data + (lfp_data_size *
157 lvds_options->panel_type));
158 dvo_timing_offset = lvds_lfp_data_ptrs->ptr[0].dvo_timing_offset -
159 lvds_lfp_data_ptrs->ptr[0].fp_timing_offset;
160
161 /*
162 * the size of fp_timing varies on the different platform.
163 * So calculate the DVO timing relative offset in LVDS data
164 * entry to get the DVO timing entry
165 */
166 dvo_timing = (struct lvds_dvo_timing *)
167 ((unsigned char *)entry + dvo_timing_offset);
168 211
169 panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL); 212 panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
170 if (!panel_fixed_mode) 213 if (!panel_fixed_mode)
171 return; 214 return;
172 215
173 fill_detail_timing_data(panel_fixed_mode, dvo_timing); 216 fill_detail_timing_data(panel_fixed_mode, panel_dvo_timing);
174 217
175 dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode; 218 dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode;
176 219
177 DRM_DEBUG_KMS("Found panel mode in BIOS VBT tables:\n"); 220 DRM_DEBUG_KMS("Found panel mode in BIOS VBT tables:\n");
178 drm_mode_debug_printmodeline(panel_fixed_mode); 221 drm_mode_debug_printmodeline(panel_fixed_mode);
179 222
180 temp_mode = kzalloc(sizeof(*temp_mode), GFP_KERNEL);
181 temp_downclock = panel_fixed_mode->clock;
182 /* 223 /*
183 * enumerate the LVDS panel timing info entry in VBT to check whether 224 * Iterate over the LVDS panel timing info to find the lowest clock
184 * the LVDS downclock is found. 225 * for the native resolution.
185 */ 226 */
227 downclock = panel_dvo_timing->clock;
186 for (i = 0; i < 16; i++) { 228 for (i = 0; i < 16; i++) {
187 entry = (struct bdb_lvds_lfp_data_entry *) 229 const struct lvds_dvo_timing *dvo_timing;
188 ((uint8_t *)lvds_lfp_data->data + (lfp_data_size * i)); 230
189 dvo_timing = (struct lvds_dvo_timing *) 231 dvo_timing = get_lvds_dvo_timing(lvds_lfp_data,
190 ((unsigned char *)entry + dvo_timing_offset); 232 lvds_lfp_data_ptrs,
191 233 i);
192 fill_detail_timing_data(temp_mode, dvo_timing); 234 if (lvds_dvo_timing_equal_size(dvo_timing, panel_dvo_timing) &&
193 235 dvo_timing->clock < downclock)
194 if (temp_mode->hdisplay == panel_fixed_mode->hdisplay && 236 downclock = dvo_timing->clock;
195 temp_mode->hsync_start == panel_fixed_mode->hsync_start &&
196 temp_mode->hsync_end == panel_fixed_mode->hsync_end &&
197 temp_mode->htotal == panel_fixed_mode->htotal &&
198 temp_mode->vdisplay == panel_fixed_mode->vdisplay &&
199 temp_mode->vsync_start == panel_fixed_mode->vsync_start &&
200 temp_mode->vsync_end == panel_fixed_mode->vsync_end &&
201 temp_mode->vtotal == panel_fixed_mode->vtotal &&
202 temp_mode->clock < temp_downclock) {
203 /*
204 * downclock is already found. But we expect
205 * to find the lower downclock.
206 */
207 temp_downclock = temp_mode->clock;
208 }
209 /* clear it to zero */
210 memset(temp_mode, 0, sizeof(*temp_mode));
211 } 237 }
212 kfree(temp_mode); 238
213 if (temp_downclock < panel_fixed_mode->clock && 239 if (downclock < panel_dvo_timing->clock && i915_lvds_downclock) {
214 i915_lvds_downclock) {
215 dev_priv->lvds_downclock_avail = 1; 240 dev_priv->lvds_downclock_avail = 1;
216 dev_priv->lvds_downclock = temp_downclock; 241 dev_priv->lvds_downclock = downclock * 10;
217 DRM_DEBUG_KMS("LVDS downclock is found in VBT. " 242 DRM_DEBUG_KMS("LVDS downclock is found in VBT. "
218 "Normal Clock %dKHz, downclock %dKHz\n", 243 "Normal Clock %dKHz, downclock %dKHz\n",
219 temp_downclock, panel_fixed_mode->clock); 244 panel_fixed_mode->clock, 10*downclock);
220 } 245 }
221 return;
222} 246}
223 247
224/* Try to find sdvo panel data */ 248/* Try to find sdvo panel data */
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 21b6f93fe919..b5b15bda71d9 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -24,6 +24,7 @@
24 * Eric Anholt <eric@anholt.net> 24 * Eric Anholt <eric@anholt.net>
25 */ 25 */
26 26
27#include <linux/cpufreq.h>
27#include <linux/module.h> 28#include <linux/module.h>
28#include <linux/input.h> 29#include <linux/input.h>
29#include <linux/i2c.h> 30#include <linux/i2c.h>
@@ -1157,12 +1158,15 @@ static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
1157 1158
1158 reg = TRANSCONF(pipe); 1159 reg = TRANSCONF(pipe);
1159 val = I915_READ(reg); 1160 val = I915_READ(reg);
1160 /* 1161
1161 * make the BPC in transcoder be consistent with 1162 if (HAS_PCH_IBX(dev_priv->dev)) {
1162 * that in pipeconf reg. 1163 /*
1163 */ 1164 * make the BPC in transcoder be consistent with
1164 val &= ~PIPE_BPC_MASK; 1165 * that in pipeconf reg.
1165 val |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK; 1166 */
1167 val &= ~PIPE_BPC_MASK;
1168 val |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK;
1169 }
1166 I915_WRITE(reg, val | TRANS_ENABLE); 1170 I915_WRITE(reg, val | TRANS_ENABLE);
1167 if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) 1171 if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
1168 DRM_ERROR("failed to enable transcoder %d\n", pipe); 1172 DRM_ERROR("failed to enable transcoder %d\n", pipe);
@@ -1380,6 +1384,28 @@ static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
1380 disable_pch_hdmi(dev_priv, pipe, HDMID); 1384 disable_pch_hdmi(dev_priv, pipe, HDMID);
1381} 1385}
1382 1386
1387static void i8xx_disable_fbc(struct drm_device *dev)
1388{
1389 struct drm_i915_private *dev_priv = dev->dev_private;
1390 u32 fbc_ctl;
1391
1392 /* Disable compression */
1393 fbc_ctl = I915_READ(FBC_CONTROL);
1394 if ((fbc_ctl & FBC_CTL_EN) == 0)
1395 return;
1396
1397 fbc_ctl &= ~FBC_CTL_EN;
1398 I915_WRITE(FBC_CONTROL, fbc_ctl);
1399
1400 /* Wait for compressing bit to clear */
1401 if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
1402 DRM_DEBUG_KMS("FBC idle timed out\n");
1403 return;
1404 }
1405
1406 DRM_DEBUG_KMS("disabled FBC\n");
1407}
1408
1383static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) 1409static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1384{ 1410{
1385 struct drm_device *dev = crtc->dev; 1411 struct drm_device *dev = crtc->dev;
@@ -1388,36 +1414,25 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1388 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 1414 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1389 struct drm_i915_gem_object *obj = intel_fb->obj; 1415 struct drm_i915_gem_object *obj = intel_fb->obj;
1390 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1416 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1417 int cfb_pitch;
1391 int plane, i; 1418 int plane, i;
1392 u32 fbc_ctl, fbc_ctl2; 1419 u32 fbc_ctl, fbc_ctl2;
1393 1420
1394 if (fb->pitch == dev_priv->cfb_pitch && 1421 cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
1395 obj->fence_reg == dev_priv->cfb_fence && 1422 if (fb->pitch < cfb_pitch)
1396 intel_crtc->plane == dev_priv->cfb_plane && 1423 cfb_pitch = fb->pitch;
1397 I915_READ(FBC_CONTROL) & FBC_CTL_EN)
1398 return;
1399
1400 i8xx_disable_fbc(dev);
1401
1402 dev_priv->cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
1403
1404 if (fb->pitch < dev_priv->cfb_pitch)
1405 dev_priv->cfb_pitch = fb->pitch;
1406 1424
1407 /* FBC_CTL wants 64B units */ 1425 /* FBC_CTL wants 64B units */
1408 dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; 1426 cfb_pitch = (cfb_pitch / 64) - 1;
1409 dev_priv->cfb_fence = obj->fence_reg; 1427 plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
1410 dev_priv->cfb_plane = intel_crtc->plane;
1411 plane = dev_priv->cfb_plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
1412 1428
1413 /* Clear old tags */ 1429 /* Clear old tags */
1414 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) 1430 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
1415 I915_WRITE(FBC_TAG + (i * 4), 0); 1431 I915_WRITE(FBC_TAG + (i * 4), 0);
1416 1432
1417 /* Set it up... */ 1433 /* Set it up... */
1418 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | plane; 1434 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
1419 if (obj->tiling_mode != I915_TILING_NONE) 1435 fbc_ctl2 |= plane;
1420 fbc_ctl2 |= FBC_CTL_CPU_FENCE;
1421 I915_WRITE(FBC_CONTROL2, fbc_ctl2); 1436 I915_WRITE(FBC_CONTROL2, fbc_ctl2);
1422 I915_WRITE(FBC_FENCE_OFF, crtc->y); 1437 I915_WRITE(FBC_FENCE_OFF, crtc->y);
1423 1438
@@ -1425,36 +1440,13 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1425 fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC; 1440 fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
1426 if (IS_I945GM(dev)) 1441 if (IS_I945GM(dev))
1427 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ 1442 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
1428 fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; 1443 fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
1429 fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; 1444 fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
1430 if (obj->tiling_mode != I915_TILING_NONE) 1445 fbc_ctl |= obj->fence_reg;
1431 fbc_ctl |= dev_priv->cfb_fence;
1432 I915_WRITE(FBC_CONTROL, fbc_ctl);
1433
1434 DRM_DEBUG_KMS("enabled FBC, pitch %ld, yoff %d, plane %d, ",
1435 dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane);
1436}
1437
1438void i8xx_disable_fbc(struct drm_device *dev)
1439{
1440 struct drm_i915_private *dev_priv = dev->dev_private;
1441 u32 fbc_ctl;
1442
1443 /* Disable compression */
1444 fbc_ctl = I915_READ(FBC_CONTROL);
1445 if ((fbc_ctl & FBC_CTL_EN) == 0)
1446 return;
1447
1448 fbc_ctl &= ~FBC_CTL_EN;
1449 I915_WRITE(FBC_CONTROL, fbc_ctl); 1446 I915_WRITE(FBC_CONTROL, fbc_ctl);
1450 1447
1451 /* Wait for compressing bit to clear */ 1448 DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
1452 if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) { 1449 cfb_pitch, crtc->y, intel_crtc->plane);
1453 DRM_DEBUG_KMS("FBC idle timed out\n");
1454 return;
1455 }
1456
1457 DRM_DEBUG_KMS("disabled FBC\n");
1458} 1450}
1459 1451
1460static bool i8xx_fbc_enabled(struct drm_device *dev) 1452static bool i8xx_fbc_enabled(struct drm_device *dev)
@@ -1476,30 +1468,9 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1476 unsigned long stall_watermark = 200; 1468 unsigned long stall_watermark = 200;
1477 u32 dpfc_ctl; 1469 u32 dpfc_ctl;
1478 1470
1479 dpfc_ctl = I915_READ(DPFC_CONTROL);
1480 if (dpfc_ctl & DPFC_CTL_EN) {
1481 if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 &&
1482 dev_priv->cfb_fence == obj->fence_reg &&
1483 dev_priv->cfb_plane == intel_crtc->plane &&
1484 dev_priv->cfb_y == crtc->y)
1485 return;
1486
1487 I915_WRITE(DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN);
1488 intel_wait_for_vblank(dev, intel_crtc->pipe);
1489 }
1490
1491 dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
1492 dev_priv->cfb_fence = obj->fence_reg;
1493 dev_priv->cfb_plane = intel_crtc->plane;
1494 dev_priv->cfb_y = crtc->y;
1495
1496 dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X; 1471 dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
1497 if (obj->tiling_mode != I915_TILING_NONE) { 1472 dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
1498 dpfc_ctl |= DPFC_CTL_FENCE_EN | dev_priv->cfb_fence; 1473 I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
1499 I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
1500 } else {
1501 I915_WRITE(DPFC_CHICKEN, ~DPFC_HT_MODIFY);
1502 }
1503 1474
1504 I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | 1475 I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1505 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | 1476 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
@@ -1512,7 +1483,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1512 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); 1483 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1513} 1484}
1514 1485
1515void g4x_disable_fbc(struct drm_device *dev) 1486static void g4x_disable_fbc(struct drm_device *dev)
1516{ 1487{
1517 struct drm_i915_private *dev_priv = dev->dev_private; 1488 struct drm_i915_private *dev_priv = dev->dev_private;
1518 u32 dpfc_ctl; 1489 u32 dpfc_ctl;
@@ -1567,32 +1538,12 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1567 u32 dpfc_ctl; 1538 u32 dpfc_ctl;
1568 1539
1569 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); 1540 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1570 if (dpfc_ctl & DPFC_CTL_EN) {
1571 if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 &&
1572 dev_priv->cfb_fence == obj->fence_reg &&
1573 dev_priv->cfb_plane == intel_crtc->plane &&
1574 dev_priv->cfb_offset == obj->gtt_offset &&
1575 dev_priv->cfb_y == crtc->y)
1576 return;
1577
1578 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN);
1579 intel_wait_for_vblank(dev, intel_crtc->pipe);
1580 }
1581
1582 dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
1583 dev_priv->cfb_fence = obj->fence_reg;
1584 dev_priv->cfb_plane = intel_crtc->plane;
1585 dev_priv->cfb_offset = obj->gtt_offset;
1586 dev_priv->cfb_y = crtc->y;
1587
1588 dpfc_ctl &= DPFC_RESERVED; 1541 dpfc_ctl &= DPFC_RESERVED;
1589 dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X); 1542 dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
1590 if (obj->tiling_mode != I915_TILING_NONE) { 1543 /* Set persistent mode for front-buffer rendering, ala X. */
1591 dpfc_ctl |= (DPFC_CTL_FENCE_EN | dev_priv->cfb_fence); 1544 dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
1592 I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY); 1545 dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
1593 } else { 1546 I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
1594 I915_WRITE(ILK_DPFC_CHICKEN, ~DPFC_HT_MODIFY);
1595 }
1596 1547
1597 I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | 1548 I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1598 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | 1549 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
@@ -1604,7 +1555,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1604 1555
1605 if (IS_GEN6(dev)) { 1556 if (IS_GEN6(dev)) {
1606 I915_WRITE(SNB_DPFC_CTL_SA, 1557 I915_WRITE(SNB_DPFC_CTL_SA,
1607 SNB_CPU_FENCE_ENABLE | dev_priv->cfb_fence); 1558 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
1608 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y); 1559 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
1609 sandybridge_blit_fbc_update(dev); 1560 sandybridge_blit_fbc_update(dev);
1610 } 1561 }
@@ -1612,7 +1563,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1612 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); 1563 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1613} 1564}
1614 1565
1615void ironlake_disable_fbc(struct drm_device *dev) 1566static void ironlake_disable_fbc(struct drm_device *dev)
1616{ 1567{
1617 struct drm_i915_private *dev_priv = dev->dev_private; 1568 struct drm_i915_private *dev_priv = dev->dev_private;
1618 u32 dpfc_ctl; 1569 u32 dpfc_ctl;
@@ -1644,24 +1595,109 @@ bool intel_fbc_enabled(struct drm_device *dev)
1644 return dev_priv->display.fbc_enabled(dev); 1595 return dev_priv->display.fbc_enabled(dev);
1645} 1596}
1646 1597
1647void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval) 1598static void intel_fbc_work_fn(struct work_struct *__work)
1648{ 1599{
1649 struct drm_i915_private *dev_priv = crtc->dev->dev_private; 1600 struct intel_fbc_work *work =
1601 container_of(to_delayed_work(__work),
1602 struct intel_fbc_work, work);
1603 struct drm_device *dev = work->crtc->dev;
1604 struct drm_i915_private *dev_priv = dev->dev_private;
1605
1606 mutex_lock(&dev->struct_mutex);
1607 if (work == dev_priv->fbc_work) {
1608 /* Double check that we haven't switched fb without cancelling
1609 * the prior work.
1610 */
1611 if (work->crtc->fb == work->fb) {
1612 dev_priv->display.enable_fbc(work->crtc,
1613 work->interval);
1614
1615 dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
1616 dev_priv->cfb_fb = work->crtc->fb->base.id;
1617 dev_priv->cfb_y = work->crtc->y;
1618 }
1619
1620 dev_priv->fbc_work = NULL;
1621 }
1622 mutex_unlock(&dev->struct_mutex);
1623
1624 kfree(work);
1625}
1626
1627static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
1628{
1629 if (dev_priv->fbc_work == NULL)
1630 return;
1631
1632 DRM_DEBUG_KMS("cancelling pending FBC enable\n");
1633
1634 /* Synchronisation is provided by struct_mutex and checking of
1635 * dev_priv->fbc_work, so we can perform the cancellation
1636 * entirely asynchronously.
1637 */
1638 if (cancel_delayed_work(&dev_priv->fbc_work->work))
1639 /* tasklet was killed before being run, clean up */
1640 kfree(dev_priv->fbc_work);
1641
1642 /* Mark the work as no longer wanted so that if it does
1643 * wake-up (because the work was already running and waiting
1644 * for our mutex), it will discover that is no longer
1645 * necessary to run.
1646 */
1647 dev_priv->fbc_work = NULL;
1648}
1649
1650static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1651{
1652 struct intel_fbc_work *work;
1653 struct drm_device *dev = crtc->dev;
1654 struct drm_i915_private *dev_priv = dev->dev_private;
1650 1655
1651 if (!dev_priv->display.enable_fbc) 1656 if (!dev_priv->display.enable_fbc)
1652 return; 1657 return;
1653 1658
1654 dev_priv->display.enable_fbc(crtc, interval); 1659 intel_cancel_fbc_work(dev_priv);
1660
1661 work = kzalloc(sizeof *work, GFP_KERNEL);
1662 if (work == NULL) {
1663 dev_priv->display.enable_fbc(crtc, interval);
1664 return;
1665 }
1666
1667 work->crtc = crtc;
1668 work->fb = crtc->fb;
1669 work->interval = interval;
1670 INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
1671
1672 dev_priv->fbc_work = work;
1673
1674 DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
1675
1676 /* Delay the actual enabling to let pageflipping cease and the
1677 * display to settle before starting the compression. Note that
1678 * this delay also serves a second purpose: it allows for a
1679 * vblank to pass after disabling the FBC before we attempt
1680 * to modify the control registers.
1681 *
1682 * A more complicated solution would involve tracking vblanks
1683 * following the termination of the page-flipping sequence
1684 * and indeed performing the enable as a co-routine and not
1685 * waiting synchronously upon the vblank.
1686 */
1687 schedule_delayed_work(&work->work, msecs_to_jiffies(50));
1655} 1688}
1656 1689
1657void intel_disable_fbc(struct drm_device *dev) 1690void intel_disable_fbc(struct drm_device *dev)
1658{ 1691{
1659 struct drm_i915_private *dev_priv = dev->dev_private; 1692 struct drm_i915_private *dev_priv = dev->dev_private;
1660 1693
1694 intel_cancel_fbc_work(dev_priv);
1695
1661 if (!dev_priv->display.disable_fbc) 1696 if (!dev_priv->display.disable_fbc)
1662 return; 1697 return;
1663 1698
1664 dev_priv->display.disable_fbc(dev); 1699 dev_priv->display.disable_fbc(dev);
1700 dev_priv->cfb_plane = -1;
1665} 1701}
1666 1702
1667/** 1703/**
@@ -1760,8 +1796,13 @@ static void intel_update_fbc(struct drm_device *dev)
1760 dev_priv->no_fbc_reason = FBC_BAD_PLANE; 1796 dev_priv->no_fbc_reason = FBC_BAD_PLANE;
1761 goto out_disable; 1797 goto out_disable;
1762 } 1798 }
1763 if (obj->tiling_mode != I915_TILING_X) { 1799
1764 DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n"); 1800 /* The use of a CPU fence is mandatory in order to detect writes
1801 * by the CPU to the scanout and trigger updates to the FBC.
1802 */
1803 if (obj->tiling_mode != I915_TILING_X ||
1804 obj->fence_reg == I915_FENCE_REG_NONE) {
1805 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
1765 dev_priv->no_fbc_reason = FBC_NOT_TILED; 1806 dev_priv->no_fbc_reason = FBC_NOT_TILED;
1766 goto out_disable; 1807 goto out_disable;
1767 } 1808 }
@@ -1770,6 +1811,44 @@ static void intel_update_fbc(struct drm_device *dev)
1770 if (in_dbg_master()) 1811 if (in_dbg_master())
1771 goto out_disable; 1812 goto out_disable;
1772 1813
1814 /* If the scanout has not changed, don't modify the FBC settings.
1815 * Note that we make the fundamental assumption that the fb->obj
1816 * cannot be unpinned (and have its GTT offset and fence revoked)
1817 * without first being decoupled from the scanout and FBC disabled.
1818 */
1819 if (dev_priv->cfb_plane == intel_crtc->plane &&
1820 dev_priv->cfb_fb == fb->base.id &&
1821 dev_priv->cfb_y == crtc->y)
1822 return;
1823
1824 if (intel_fbc_enabled(dev)) {
1825 /* We update FBC along two paths, after changing fb/crtc
1826 * configuration (modeswitching) and after page-flipping
1827 * finishes. For the latter, we know that not only did
1828 * we disable the FBC at the start of the page-flip
1829 * sequence, but also more than one vblank has passed.
1830 *
1831 * For the former case of modeswitching, it is possible
1832 * to switch between two FBC valid configurations
1833 * instantaneously so we do need to disable the FBC
1834 * before we can modify its control registers. We also
1835 * have to wait for the next vblank for that to take
1836 * effect. However, since we delay enabling FBC we can
1837 * assume that a vblank has passed since disabling and
1838 * that we can safely alter the registers in the deferred
1839 * callback.
1840 *
1841 * In the scenario that we go from a valid to invalid
1842 * and then back to valid FBC configuration we have
1843 * no strict enforcement that a vblank occurred since
1844 * disabling the FBC. However, along all current pipe
1845 * disabling paths we do need to wait for a vblank at
1846 * some point. And we wait before enabling FBC anyway.
1847 */
1848 DRM_DEBUG_KMS("disabling active FBC for update\n");
1849 intel_disable_fbc(dev);
1850 }
1851
1773 intel_enable_fbc(crtc, 500); 1852 intel_enable_fbc(crtc, 500);
1774 return; 1853 return;
1775 1854
@@ -1812,14 +1891,10 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
1812 } 1891 }
1813 1892
1814 dev_priv->mm.interruptible = false; 1893 dev_priv->mm.interruptible = false;
1815 ret = i915_gem_object_pin(obj, alignment, true); 1894 ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
1816 if (ret) 1895 if (ret)
1817 goto err_interruptible; 1896 goto err_interruptible;
1818 1897
1819 ret = i915_gem_object_set_to_display_plane(obj, pipelined);
1820 if (ret)
1821 goto err_unpin;
1822
1823 /* Install a fence for tiled scan-out. Pre-i965 always needs a 1898 /* Install a fence for tiled scan-out. Pre-i965 always needs a
1824 * fence, whereas 965+ only requires a fence if using 1899 * fence, whereas 965+ only requires a fence if using
1825 * framebuffer compression. For simplicity, we always install 1900 * framebuffer compression. For simplicity, we always install
@@ -1841,10 +1916,8 @@ err_interruptible:
1841 return ret; 1916 return ret;
1842} 1917}
1843 1918
1844/* Assume fb object is pinned & idle & fenced and just update base pointers */ 1919static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1845static int 1920 int x, int y)
1846intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1847 int x, int y, enum mode_set_atomic state)
1848{ 1921{
1849 struct drm_device *dev = crtc->dev; 1922 struct drm_device *dev = crtc->dev;
1850 struct drm_i915_private *dev_priv = dev->dev_private; 1923 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1887,7 +1960,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1887 dspcntr |= DISPPLANE_32BPP_NO_ALPHA; 1960 dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
1888 break; 1961 break;
1889 default: 1962 default:
1890 DRM_ERROR("Unknown color depth\n"); 1963 DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
1891 return -EINVAL; 1964 return -EINVAL;
1892 } 1965 }
1893 if (INTEL_INFO(dev)->gen >= 4) { 1966 if (INTEL_INFO(dev)->gen >= 4) {
@@ -1897,10 +1970,6 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1897 dspcntr &= ~DISPPLANE_TILED; 1970 dspcntr &= ~DISPPLANE_TILED;
1898 } 1971 }
1899 1972
1900 if (HAS_PCH_SPLIT(dev))
1901 /* must disable */
1902 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
1903
1904 I915_WRITE(reg, dspcntr); 1973 I915_WRITE(reg, dspcntr);
1905 1974
1906 Start = obj->gtt_offset; 1975 Start = obj->gtt_offset;
@@ -1917,6 +1986,99 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1917 I915_WRITE(DSPADDR(plane), Start + Offset); 1986 I915_WRITE(DSPADDR(plane), Start + Offset);
1918 POSTING_READ(reg); 1987 POSTING_READ(reg);
1919 1988
1989 return 0;
1990}
1991
1992static int ironlake_update_plane(struct drm_crtc *crtc,
1993 struct drm_framebuffer *fb, int x, int y)
1994{
1995 struct drm_device *dev = crtc->dev;
1996 struct drm_i915_private *dev_priv = dev->dev_private;
1997 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1998 struct intel_framebuffer *intel_fb;
1999 struct drm_i915_gem_object *obj;
2000 int plane = intel_crtc->plane;
2001 unsigned long Start, Offset;
2002 u32 dspcntr;
2003 u32 reg;
2004
2005 switch (plane) {
2006 case 0:
2007 case 1:
2008 break;
2009 default:
2010 DRM_ERROR("Can't update plane %d in SAREA\n", plane);
2011 return -EINVAL;
2012 }
2013
2014 intel_fb = to_intel_framebuffer(fb);
2015 obj = intel_fb->obj;
2016
2017 reg = DSPCNTR(plane);
2018 dspcntr = I915_READ(reg);
2019 /* Mask out pixel format bits in case we change it */
2020 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2021 switch (fb->bits_per_pixel) {
2022 case 8:
2023 dspcntr |= DISPPLANE_8BPP;
2024 break;
2025 case 16:
2026 if (fb->depth != 16)
2027 return -EINVAL;
2028
2029 dspcntr |= DISPPLANE_16BPP;
2030 break;
2031 case 24:
2032 case 32:
2033 if (fb->depth == 24)
2034 dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
2035 else if (fb->depth == 30)
2036 dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
2037 else
2038 return -EINVAL;
2039 break;
2040 default:
2041 DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
2042 return -EINVAL;
2043 }
2044
2045 if (obj->tiling_mode != I915_TILING_NONE)
2046 dspcntr |= DISPPLANE_TILED;
2047 else
2048 dspcntr &= ~DISPPLANE_TILED;
2049
2050 /* must disable */
2051 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2052
2053 I915_WRITE(reg, dspcntr);
2054
2055 Start = obj->gtt_offset;
2056 Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
2057
2058 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2059 Start, Offset, x, y, fb->pitch);
2060 I915_WRITE(DSPSTRIDE(plane), fb->pitch);
2061 I915_WRITE(DSPSURF(plane), Start);
2062 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2063 I915_WRITE(DSPADDR(plane), Offset);
2064 POSTING_READ(reg);
2065
2066 return 0;
2067}
2068
2069/* Assume fb object is pinned & idle & fenced and just update base pointers */
2070static int
2071intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2072 int x, int y, enum mode_set_atomic state)
2073{
2074 struct drm_device *dev = crtc->dev;
2075 struct drm_i915_private *dev_priv = dev->dev_private;
2076 int ret;
2077
2078 ret = dev_priv->display.update_plane(crtc, fb, x, y);
2079 if (ret)
2080 return ret;
2081
1920 intel_update_fbc(dev); 2082 intel_update_fbc(dev);
1921 intel_increase_pllclock(crtc); 2083 intel_increase_pllclock(crtc);
1922 2084
@@ -1971,7 +2133,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1971 * This should only fail upon a hung GPU, in which case we 2133 * This should only fail upon a hung GPU, in which case we
1972 * can safely continue. 2134 * can safely continue.
1973 */ 2135 */
1974 ret = i915_gem_object_flush_gpu(obj); 2136 ret = i915_gem_object_finish_gpu(obj);
1975 (void) ret; 2137 (void) ret;
1976 } 2138 }
1977 2139
@@ -2622,6 +2784,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
2622 /* For PCH DP, enable TRANS_DP_CTL */ 2784 /* For PCH DP, enable TRANS_DP_CTL */
2623 if (HAS_PCH_CPT(dev) && 2785 if (HAS_PCH_CPT(dev) &&
2624 intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { 2786 intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
2787 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5;
2625 reg = TRANS_DP_CTL(pipe); 2788 reg = TRANS_DP_CTL(pipe);
2626 temp = I915_READ(reg); 2789 temp = I915_READ(reg);
2627 temp &= ~(TRANS_DP_PORT_SEL_MASK | 2790 temp &= ~(TRANS_DP_PORT_SEL_MASK |
@@ -2629,7 +2792,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
2629 TRANS_DP_BPC_MASK); 2792 TRANS_DP_BPC_MASK);
2630 temp |= (TRANS_DP_OUTPUT_ENABLE | 2793 temp |= (TRANS_DP_OUTPUT_ENABLE |
2631 TRANS_DP_ENH_FRAMING); 2794 TRANS_DP_ENH_FRAMING);
2632 temp |= TRANS_DP_8BPC; 2795 temp |= bpc << 9; /* same format but at 11:9 */
2633 2796
2634 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) 2797 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
2635 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; 2798 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
@@ -2732,9 +2895,8 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
2732 2895
2733 intel_disable_plane(dev_priv, plane, pipe); 2896 intel_disable_plane(dev_priv, plane, pipe);
2734 2897
2735 if (dev_priv->cfb_plane == plane && 2898 if (dev_priv->cfb_plane == plane)
2736 dev_priv->display.disable_fbc) 2899 intel_disable_fbc(dev);
2737 dev_priv->display.disable_fbc(dev);
2738 2900
2739 intel_disable_pipe(dev_priv, pipe); 2901 intel_disable_pipe(dev_priv, pipe);
2740 2902
@@ -2898,9 +3060,8 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
2898 intel_crtc_dpms_overlay(intel_crtc, false); 3060 intel_crtc_dpms_overlay(intel_crtc, false);
2899 intel_crtc_update_cursor(crtc, false); 3061 intel_crtc_update_cursor(crtc, false);
2900 3062
2901 if (dev_priv->cfb_plane == plane && 3063 if (dev_priv->cfb_plane == plane)
2902 dev_priv->display.disable_fbc) 3064 intel_disable_fbc(dev);
2903 dev_priv->display.disable_fbc(dev);
2904 3065
2905 intel_disable_plane(dev_priv, plane, pipe); 3066 intel_disable_plane(dev_priv, plane, pipe);
2906 intel_disable_pipe(dev_priv, pipe); 3067 intel_disable_pipe(dev_priv, pipe);
@@ -4308,6 +4469,133 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
4308 return dev_priv->lvds_use_ssc && i915_panel_use_ssc; 4469 return dev_priv->lvds_use_ssc && i915_panel_use_ssc;
4309} 4470}
4310 4471
4472/**
4473 * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
4474 * @crtc: CRTC structure
4475 *
4476 * A pipe may be connected to one or more outputs. Based on the depth of the
4477 * attached framebuffer, choose a good color depth to use on the pipe.
4478 *
4479 * If possible, match the pipe depth to the fb depth. In some cases, this
4480 * isn't ideal, because the connected output supports a lesser or restricted
4481 * set of depths. Resolve that here:
4482 * LVDS typically supports only 6bpc, so clamp down in that case
4483 * HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
4484 * Displays may support a restricted set as well, check EDID and clamp as
4485 * appropriate.
4486 *
4487 * RETURNS:
4488 * Dithering requirement (i.e. false if display bpc and pipe bpc match,
4489 * true if they don't match).
4490 */
4491static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
4492 unsigned int *pipe_bpp)
4493{
4494 struct drm_device *dev = crtc->dev;
4495 struct drm_i915_private *dev_priv = dev->dev_private;
4496 struct drm_encoder *encoder;
4497 struct drm_connector *connector;
4498 unsigned int display_bpc = UINT_MAX, bpc;
4499
4500 /* Walk the encoders & connectors on this crtc, get min bpc */
4501 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
4502 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
4503
4504 if (encoder->crtc != crtc)
4505 continue;
4506
4507 if (intel_encoder->type == INTEL_OUTPUT_LVDS) {
4508 unsigned int lvds_bpc;
4509
4510 if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) ==
4511 LVDS_A3_POWER_UP)
4512 lvds_bpc = 8;
4513 else
4514 lvds_bpc = 6;
4515
4516 if (lvds_bpc < display_bpc) {
4517 DRM_DEBUG_DRIVER("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc);
4518 display_bpc = lvds_bpc;
4519 }
4520 continue;
4521 }
4522
4523 if (intel_encoder->type == INTEL_OUTPUT_EDP) {
4524 /* Use VBT settings if we have an eDP panel */
4525 unsigned int edp_bpc = dev_priv->edp.bpp / 3;
4526
4527 if (edp_bpc < display_bpc) {
4528 DRM_DEBUG_DRIVER("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
4529 display_bpc = edp_bpc;
4530 }
4531 continue;
4532 }
4533
4534 /* Not one of the known troublemakers, check the EDID */
4535 list_for_each_entry(connector, &dev->mode_config.connector_list,
4536 head) {
4537 if (connector->encoder != encoder)
4538 continue;
4539
4540 if (connector->display_info.bpc < display_bpc) {
4541 DRM_DEBUG_DRIVER("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
4542 display_bpc = connector->display_info.bpc;
4543 }
4544 }
4545
4546 /*
4547 * HDMI is either 12 or 8, so if the display lets 10bpc sneak
4548 * through, clamp it down. (Note: >12bpc will be caught below.)
4549 */
4550 if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
4551 if (display_bpc > 8 && display_bpc < 12) {
4552 DRM_DEBUG_DRIVER("forcing bpc to 12 for HDMI\n");
4553 display_bpc = 12;
4554 } else {
4555 DRM_DEBUG_DRIVER("forcing bpc to 8 for HDMI\n");
4556 display_bpc = 8;
4557 }
4558 }
4559 }
4560
4561 /*
4562 * We could just drive the pipe at the highest bpc all the time and
4563 * enable dithering as needed, but that costs bandwidth. So choose
4564 * the minimum value that expresses the full color range of the fb but
4565 * also stays within the max display bpc discovered above.
4566 */
4567
4568 switch (crtc->fb->depth) {
4569 case 8:
4570 bpc = 8; /* since we go through a colormap */
4571 break;
4572 case 15:
4573 case 16:
4574 bpc = 6; /* min is 18bpp */
4575 break;
4576 case 24:
4577 bpc = min((unsigned int)8, display_bpc);
4578 break;
4579 case 30:
4580 bpc = min((unsigned int)10, display_bpc);
4581 break;
4582 case 48:
4583 bpc = min((unsigned int)12, display_bpc);
4584 break;
4585 default:
4586 DRM_DEBUG("unsupported depth, assuming 24 bits\n");
4587 bpc = min((unsigned int)8, display_bpc);
4588 break;
4589 }
4590
4591 DRM_DEBUG_DRIVER("setting pipe bpc to %d (max display bpc %d)\n",
4592 bpc, display_bpc);
4593
4594 *pipe_bpp = bpc * 3;
4595
4596 return display_bpc != bpc;
4597}
4598
4311static int i9xx_crtc_mode_set(struct drm_crtc *crtc, 4599static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4312 struct drm_display_mode *mode, 4600 struct drm_display_mode *mode,
4313 struct drm_display_mode *adjusted_mode, 4601 struct drm_display_mode *adjusted_mode,
@@ -4720,7 +5008,9 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
4720 struct fdi_m_n m_n = {0}; 5008 struct fdi_m_n m_n = {0};
4721 u32 temp; 5009 u32 temp;
4722 u32 lvds_sync = 0; 5010 u32 lvds_sync = 0;
4723 int target_clock, pixel_multiplier, lane, link_bw, bpp, factor; 5011 int target_clock, pixel_multiplier, lane, link_bw, factor;
5012 unsigned int pipe_bpp;
5013 bool dither;
4724 5014
4725 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { 5015 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
4726 if (encoder->base.crtc != crtc) 5016 if (encoder->base.crtc != crtc)
@@ -4847,56 +5137,37 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
4847 /* determine panel color depth */ 5137 /* determine panel color depth */
4848 temp = I915_READ(PIPECONF(pipe)); 5138 temp = I915_READ(PIPECONF(pipe));
4849 temp &= ~PIPE_BPC_MASK; 5139 temp &= ~PIPE_BPC_MASK;
4850 if (is_lvds) { 5140 dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp);
4851 /* the BPC will be 6 if it is 18-bit LVDS panel */ 5141 switch (pipe_bpp) {
4852 if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP) 5142 case 18:
4853 temp |= PIPE_8BPC; 5143 temp |= PIPE_6BPC;
4854 else
4855 temp |= PIPE_6BPC;
4856 } else if (has_edp_encoder) {
4857 switch (dev_priv->edp.bpp/3) {
4858 case 8:
4859 temp |= PIPE_8BPC;
4860 break;
4861 case 10:
4862 temp |= PIPE_10BPC;
4863 break;
4864 case 6:
4865 temp |= PIPE_6BPC;
4866 break;
4867 case 12:
4868 temp |= PIPE_12BPC;
4869 break;
4870 }
4871 } else
4872 temp |= PIPE_8BPC;
4873 I915_WRITE(PIPECONF(pipe), temp);
4874
4875 switch (temp & PIPE_BPC_MASK) {
4876 case PIPE_8BPC:
4877 bpp = 24;
4878 break; 5144 break;
4879 case PIPE_10BPC: 5145 case 24:
4880 bpp = 30; 5146 temp |= PIPE_8BPC;
4881 break; 5147 break;
4882 case PIPE_6BPC: 5148 case 30:
4883 bpp = 18; 5149 temp |= PIPE_10BPC;
4884 break; 5150 break;
4885 case PIPE_12BPC: 5151 case 36:
4886 bpp = 36; 5152 temp |= PIPE_12BPC;
4887 break; 5153 break;
4888 default: 5154 default:
4889 DRM_ERROR("unknown pipe bpc value\n"); 5155 WARN(1, "intel_choose_pipe_bpp returned invalid value\n");
4890 bpp = 24; 5156 temp |= PIPE_8BPC;
5157 pipe_bpp = 24;
5158 break;
4891 } 5159 }
4892 5160
5161 intel_crtc->bpp = pipe_bpp;
5162 I915_WRITE(PIPECONF(pipe), temp);
5163
4893 if (!lane) { 5164 if (!lane) {
4894 /* 5165 /*
4895 * Account for spread spectrum to avoid 5166 * Account for spread spectrum to avoid
4896 * oversubscribing the link. Max center spread 5167 * oversubscribing the link. Max center spread
4897 * is 2.5%; use 5% for safety's sake. 5168 * is 2.5%; use 5% for safety's sake.
4898 */ 5169 */
4899 u32 bps = target_clock * bpp * 21 / 20; 5170 u32 bps = target_clock * intel_crtc->bpp * 21 / 20;
4900 lane = bps / (link_bw * 8) + 1; 5171 lane = bps / (link_bw * 8) + 1;
4901 } 5172 }
4902 5173
@@ -4904,7 +5175,8 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
4904 5175
4905 if (pixel_multiplier > 1) 5176 if (pixel_multiplier > 1)
4906 link_bw *= pixel_multiplier; 5177 link_bw *= pixel_multiplier;
4907 ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n); 5178 ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
5179 &m_n);
4908 5180
4909 /* Ironlake: try to setup display ref clock before DPLL 5181 /* Ironlake: try to setup display ref clock before DPLL
4910 * enabling. This is only under driver's control after 5182 * enabling. This is only under driver's control after
@@ -5107,14 +5379,12 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5107 I915_WRITE(PCH_LVDS, temp); 5379 I915_WRITE(PCH_LVDS, temp);
5108 } 5380 }
5109 5381
5110 /* set the dithering flag and clear for anything other than a panel. */
5111 pipeconf &= ~PIPECONF_DITHER_EN; 5382 pipeconf &= ~PIPECONF_DITHER_EN;
5112 pipeconf &= ~PIPECONF_DITHER_TYPE_MASK; 5383 pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
5113 if (dev_priv->lvds_dither && (is_lvds || has_edp_encoder)) { 5384 if ((is_lvds && dev_priv->lvds_dither) || dither) {
5114 pipeconf |= PIPECONF_DITHER_EN; 5385 pipeconf |= PIPECONF_DITHER_EN;
5115 pipeconf |= PIPECONF_DITHER_TYPE_ST1; 5386 pipeconf |= PIPECONF_DITHER_TYPE_ST1;
5116 } 5387 }
5117
5118 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { 5388 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5119 intel_dp_set_m_n(crtc, mode, adjusted_mode); 5389 intel_dp_set_m_n(crtc, mode, adjusted_mode);
5120 } else { 5390 } else {
@@ -5434,21 +5704,15 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
5434 goto fail_locked; 5704 goto fail_locked;
5435 } 5705 }
5436 5706
5437 ret = i915_gem_object_pin(obj, PAGE_SIZE, true); 5707 ret = i915_gem_object_pin_to_display_plane(obj, 0, NULL);
5438 if (ret) {
5439 DRM_ERROR("failed to pin cursor bo\n");
5440 goto fail_locked;
5441 }
5442
5443 ret = i915_gem_object_set_to_gtt_domain(obj, 0);
5444 if (ret) { 5708 if (ret) {
5445 DRM_ERROR("failed to move cursor bo into the GTT\n"); 5709 DRM_ERROR("failed to move cursor bo into the GTT\n");
5446 goto fail_unpin; 5710 goto fail_locked;
5447 } 5711 }
5448 5712
5449 ret = i915_gem_object_put_fence(obj); 5713 ret = i915_gem_object_put_fence(obj);
5450 if (ret) { 5714 if (ret) {
5451 DRM_ERROR("failed to move cursor bo into the GTT\n"); 5715 DRM_ERROR("failed to release fence for cursor");
5452 goto fail_unpin; 5716 goto fail_unpin;
5453 } 5717 }
5454 5718
@@ -6151,6 +6415,7 @@ static void intel_unpin_work_fn(struct work_struct *__work)
6151 drm_gem_object_unreference(&work->pending_flip_obj->base); 6415 drm_gem_object_unreference(&work->pending_flip_obj->base);
6152 drm_gem_object_unreference(&work->old_fb_obj->base); 6416 drm_gem_object_unreference(&work->old_fb_obj->base);
6153 6417
6418 intel_update_fbc(work->dev);
6154 mutex_unlock(&work->dev->struct_mutex); 6419 mutex_unlock(&work->dev->struct_mutex);
6155 kfree(work); 6420 kfree(work);
6156} 6421}
@@ -6515,6 +6780,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
6515 if (ret) 6780 if (ret)
6516 goto cleanup_pending; 6781 goto cleanup_pending;
6517 6782
6783 intel_disable_fbc(dev);
6518 mutex_unlock(&dev->struct_mutex); 6784 mutex_unlock(&dev->struct_mutex);
6519 6785
6520 trace_i915_flip_request(intel_crtc->plane, obj); 6786 trace_i915_flip_request(intel_crtc->plane, obj);
@@ -6643,6 +6909,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
6643 6909
6644 intel_crtc_reset(&intel_crtc->base); 6910 intel_crtc_reset(&intel_crtc->base);
6645 intel_crtc->active = true; /* force the pipe off on setup_init_config */ 6911 intel_crtc->active = true; /* force the pipe off on setup_init_config */
6912 intel_crtc->bpp = 24; /* default for pre-Ironlake */
6646 6913
6647 if (HAS_PCH_SPLIT(dev)) { 6914 if (HAS_PCH_SPLIT(dev)) {
6648 intel_helper_funcs.prepare = ironlake_crtc_prepare; 6915 intel_helper_funcs.prepare = ironlake_crtc_prepare;
@@ -6869,6 +7136,11 @@ int intel_framebuffer_init(struct drm_device *dev,
6869 switch (mode_cmd->bpp) { 7136 switch (mode_cmd->bpp) {
6870 case 8: 7137 case 8:
6871 case 16: 7138 case 16:
7139 /* Only pre-ILK can handle 5:5:5 */
7140 if (mode_cmd->depth == 15 && !HAS_PCH_SPLIT(dev))
7141 return -EINVAL;
7142 break;
7143
6872 case 24: 7144 case 24:
6873 case 32: 7145 case 32:
6874 break; 7146 break;
@@ -7283,6 +7555,59 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
7283 mutex_unlock(&dev_priv->dev->struct_mutex); 7555 mutex_unlock(&dev_priv->dev->struct_mutex);
7284} 7556}
7285 7557
7558void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
7559{
7560 int min_freq = 15;
7561 int gpu_freq, ia_freq, max_ia_freq;
7562 int scaling_factor = 180;
7563
7564 max_ia_freq = cpufreq_quick_get_max(0);
7565 /*
7566 * Default to measured freq if none found, PCU will ensure we don't go
7567 * over
7568 */
7569 if (!max_ia_freq)
7570 max_ia_freq = tsc_khz;
7571
7572 /* Convert from kHz to MHz */
7573 max_ia_freq /= 1000;
7574
7575 mutex_lock(&dev_priv->dev->struct_mutex);
7576
7577 /*
7578 * For each potential GPU frequency, load a ring frequency we'd like
7579 * to use for memory access. We do this by specifying the IA frequency
7580 * the PCU should use as a reference to determine the ring frequency.
7581 */
7582 for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay;
7583 gpu_freq--) {
7584 int diff = dev_priv->max_delay - gpu_freq;
7585
7586 /*
7587 * For GPU frequencies less than 750MHz, just use the lowest
7588 * ring freq.
7589 */
7590 if (gpu_freq < min_freq)
7591 ia_freq = 800;
7592 else
7593 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
7594 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
7595
7596 I915_WRITE(GEN6_PCODE_DATA,
7597 (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) |
7598 gpu_freq);
7599 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
7600 GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
7601 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
7602 GEN6_PCODE_READY) == 0, 10)) {
7603 DRM_ERROR("pcode write of freq table timed out\n");
7604 continue;
7605 }
7606 }
7607
7608 mutex_unlock(&dev_priv->dev->struct_mutex);
7609}
7610
7286static void ironlake_init_clock_gating(struct drm_device *dev) 7611static void ironlake_init_clock_gating(struct drm_device *dev)
7287{ 7612{
7288 struct drm_i915_private *dev_priv = dev->dev_private; 7613 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -7639,9 +7964,11 @@ static void intel_init_display(struct drm_device *dev)
7639 if (HAS_PCH_SPLIT(dev)) { 7964 if (HAS_PCH_SPLIT(dev)) {
7640 dev_priv->display.dpms = ironlake_crtc_dpms; 7965 dev_priv->display.dpms = ironlake_crtc_dpms;
7641 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set; 7966 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
7967 dev_priv->display.update_plane = ironlake_update_plane;
7642 } else { 7968 } else {
7643 dev_priv->display.dpms = i9xx_crtc_dpms; 7969 dev_priv->display.dpms = i9xx_crtc_dpms;
7644 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; 7970 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
7971 dev_priv->display.update_plane = i9xx_update_plane;
7645 } 7972 }
7646 7973
7647 if (I915_HAS_FBC(dev)) { 7974 if (I915_HAS_FBC(dev)) {
@@ -7926,8 +8253,10 @@ void intel_modeset_init(struct drm_device *dev)
7926 intel_init_emon(dev); 8253 intel_init_emon(dev);
7927 } 8254 }
7928 8255
7929 if (IS_GEN6(dev)) 8256 if (IS_GEN6(dev) || IS_GEN7(dev)) {
7930 gen6_enable_rps(dev_priv); 8257 gen6_enable_rps(dev_priv);
8258 gen6_update_ring_freq(dev_priv);
8259 }
7931 8260
7932 INIT_WORK(&dev_priv->idle_work, intel_idle_update); 8261 INIT_WORK(&dev_priv->idle_work, intel_idle_update);
7933 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, 8262 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
@@ -7963,12 +8292,11 @@ void intel_modeset_cleanup(struct drm_device *dev)
7963 intel_increase_pllclock(crtc); 8292 intel_increase_pllclock(crtc);
7964 } 8293 }
7965 8294
7966 if (dev_priv->display.disable_fbc) 8295 intel_disable_fbc(dev);
7967 dev_priv->display.disable_fbc(dev);
7968 8296
7969 if (IS_IRONLAKE_M(dev)) 8297 if (IS_IRONLAKE_M(dev))
7970 ironlake_disable_drps(dev); 8298 ironlake_disable_drps(dev);
7971 if (IS_GEN6(dev)) 8299 if (IS_GEN6(dev) || IS_GEN7(dev))
7972 gen6_disable_rps(dev); 8300 gen6_disable_rps(dev);
7973 8301
7974 if (IS_IRONLAKE_M(dev)) 8302 if (IS_IRONLAKE_M(dev))
@@ -7981,6 +8309,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
7981 drm_irq_uninstall(dev); 8309 drm_irq_uninstall(dev);
7982 cancel_work_sync(&dev_priv->hotplug_work); 8310 cancel_work_sync(&dev_priv->hotplug_work);
7983 8311
8312 /* flush any delayed tasks or pending work */
8313 flush_scheduled_work();
8314
7984 /* Shut off idle work before the crtcs get freed. */ 8315 /* Shut off idle work before the crtcs get freed. */
7985 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 8316 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7986 intel_crtc = to_intel_crtc(crtc); 8317 intel_crtc = to_intel_crtc(crtc);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 391b55f1cc74..f797fb58ba9c 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -50,7 +50,6 @@ struct intel_dp {
50 bool has_audio; 50 bool has_audio;
51 int force_audio; 51 int force_audio;
52 uint32_t color_range; 52 uint32_t color_range;
53 int dpms_mode;
54 uint8_t link_bw; 53 uint8_t link_bw;
55 uint8_t lane_count; 54 uint8_t lane_count;
56 uint8_t dpcd[4]; 55 uint8_t dpcd[4];
@@ -138,8 +137,8 @@ intel_dp_max_lane_count(struct intel_dp *intel_dp)
138{ 137{
139 int max_lane_count = 4; 138 int max_lane_count = 4;
140 139
141 if (intel_dp->dpcd[0] >= 0x11) { 140 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
142 max_lane_count = intel_dp->dpcd[2] & 0x1f; 141 max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f;
143 switch (max_lane_count) { 142 switch (max_lane_count) {
144 case 1: case 2: case 4: 143 case 1: case 2: case 4:
145 break; 144 break;
@@ -153,7 +152,7 @@ intel_dp_max_lane_count(struct intel_dp *intel_dp)
153static int 152static int
154intel_dp_max_link_bw(struct intel_dp *intel_dp) 153intel_dp_max_link_bw(struct intel_dp *intel_dp)
155{ 154{
156 int max_link_bw = intel_dp->dpcd[1]; 155 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
157 156
158 switch (max_link_bw) { 157 switch (max_link_bw) {
159 case DP_LINK_BW_1_62: 158 case DP_LINK_BW_1_62:
@@ -179,12 +178,14 @@ intel_dp_link_clock(uint8_t link_bw)
179static int 178static int
180intel_dp_link_required(struct drm_device *dev, struct intel_dp *intel_dp, int pixel_clock) 179intel_dp_link_required(struct drm_device *dev, struct intel_dp *intel_dp, int pixel_clock)
181{ 180{
182 struct drm_i915_private *dev_priv = dev->dev_private; 181 struct drm_crtc *crtc = intel_dp->base.base.crtc;
182 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
183 int bpp = 24;
183 184
184 if (is_edp(intel_dp)) 185 if (intel_crtc)
185 return (pixel_clock * dev_priv->edp.bpp + 7) / 8; 186 bpp = intel_crtc->bpp;
186 else 187
187 return pixel_clock * 3; 188 return (pixel_clock * bpp + 7) / 8;
188} 189}
189 190
190static int 191static int
@@ -682,7 +683,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
682 struct drm_encoder *encoder; 683 struct drm_encoder *encoder;
683 struct drm_i915_private *dev_priv = dev->dev_private; 684 struct drm_i915_private *dev_priv = dev->dev_private;
684 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 685 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
685 int lane_count = 4, bpp = 24; 686 int lane_count = 4;
686 struct intel_dp_m_n m_n; 687 struct intel_dp_m_n m_n;
687 int pipe = intel_crtc->pipe; 688 int pipe = intel_crtc->pipe;
688 689
@@ -701,7 +702,6 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
701 break; 702 break;
702 } else if (is_edp(intel_dp)) { 703 } else if (is_edp(intel_dp)) {
703 lane_count = dev_priv->edp.lanes; 704 lane_count = dev_priv->edp.lanes;
704 bpp = dev_priv->edp.bpp;
705 break; 705 break;
706 } 706 }
707 } 707 }
@@ -711,7 +711,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
711 * the number of bytes_per_pixel post-LUT, which we always 711 * the number of bytes_per_pixel post-LUT, which we always
712 * set up for 8-bits of R/G/B, or 3 bytes total. 712 * set up for 8-bits of R/G/B, or 3 bytes total.
713 */ 713 */
714 intel_dp_compute_m_n(bpp, lane_count, 714 intel_dp_compute_m_n(intel_crtc->bpp, lane_count,
715 mode->clock, adjusted_mode->clock, &m_n); 715 mode->clock, adjusted_mode->clock, &m_n);
716 716
717 if (HAS_PCH_SPLIT(dev)) { 717 if (HAS_PCH_SPLIT(dev)) {
@@ -774,7 +774,8 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
774 /* 774 /*
775 * Check for DPCD version > 1.1 and enhanced framing support 775 * Check for DPCD version > 1.1 and enhanced framing support
776 */ 776 */
777 if (intel_dp->dpcd[0] >= 0x11 && (intel_dp->dpcd[2] & DP_ENHANCED_FRAME_CAP)) { 777 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
778 (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
778 intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; 779 intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
779 intel_dp->DP |= DP_ENHANCED_FRAMING; 780 intel_dp->DP |= DP_ENHANCED_FRAMING;
780 } 781 }
@@ -942,11 +943,44 @@ static void ironlake_edp_pll_off(struct drm_encoder *encoder)
942 udelay(200); 943 udelay(200);
943} 944}
944 945
946/* If the sink supports it, try to set the power state appropriately */
947static void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
948{
949 int ret, i;
950
951 /* Should have a valid DPCD by this point */
952 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
953 return;
954
955 if (mode != DRM_MODE_DPMS_ON) {
956 ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER,
957 DP_SET_POWER_D3);
958 if (ret != 1)
959 DRM_DEBUG_DRIVER("failed to write sink power state\n");
960 } else {
961 /*
962 * When turning on, we need to retry for 1ms to give the sink
963 * time to wake up.
964 */
965 for (i = 0; i < 3; i++) {
966 ret = intel_dp_aux_native_write_1(intel_dp,
967 DP_SET_POWER,
968 DP_SET_POWER_D0);
969 if (ret == 1)
970 break;
971 msleep(1);
972 }
973 }
974}
975
945static void intel_dp_prepare(struct drm_encoder *encoder) 976static void intel_dp_prepare(struct drm_encoder *encoder)
946{ 977{
947 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 978 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
948 struct drm_device *dev = encoder->dev; 979 struct drm_device *dev = encoder->dev;
949 980
981 /* Wake up the sink first */
982 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
983
950 if (is_edp(intel_dp)) { 984 if (is_edp(intel_dp)) {
951 ironlake_edp_backlight_off(dev); 985 ironlake_edp_backlight_off(dev);
952 ironlake_edp_panel_off(dev); 986 ironlake_edp_panel_off(dev);
@@ -990,6 +1024,7 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
990 if (mode != DRM_MODE_DPMS_ON) { 1024 if (mode != DRM_MODE_DPMS_ON) {
991 if (is_edp(intel_dp)) 1025 if (is_edp(intel_dp))
992 ironlake_edp_backlight_off(dev); 1026 ironlake_edp_backlight_off(dev);
1027 intel_dp_sink_dpms(intel_dp, mode);
993 intel_dp_link_down(intel_dp); 1028 intel_dp_link_down(intel_dp);
994 if (is_edp(intel_dp)) 1029 if (is_edp(intel_dp))
995 ironlake_edp_panel_off(dev); 1030 ironlake_edp_panel_off(dev);
@@ -998,6 +1033,7 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
998 } else { 1033 } else {
999 if (is_edp(intel_dp)) 1034 if (is_edp(intel_dp))
1000 ironlake_edp_panel_vdd_on(intel_dp); 1035 ironlake_edp_panel_vdd_on(intel_dp);
1036 intel_dp_sink_dpms(intel_dp, mode);
1001 if (!(dp_reg & DP_PORT_EN)) { 1037 if (!(dp_reg & DP_PORT_EN)) {
1002 intel_dp_start_link_train(intel_dp); 1038 intel_dp_start_link_train(intel_dp);
1003 if (is_edp(intel_dp)) { 1039 if (is_edp(intel_dp)) {
@@ -1009,7 +1045,31 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
1009 if (is_edp(intel_dp)) 1045 if (is_edp(intel_dp))
1010 ironlake_edp_backlight_on(dev); 1046 ironlake_edp_backlight_on(dev);
1011 } 1047 }
1012 intel_dp->dpms_mode = mode; 1048}
1049
1050/*
1051 * Native read with retry for link status and receiver capability reads for
1052 * cases where the sink may still be asleep.
1053 */
1054static bool
1055intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address,
1056 uint8_t *recv, int recv_bytes)
1057{
1058 int ret, i;
1059
1060 /*
1061 * Sinks are *supposed* to come up within 1ms from an off state,
1062 * but we're also supposed to retry 3 times per the spec.
1063 */
1064 for (i = 0; i < 3; i++) {
1065 ret = intel_dp_aux_native_read(intel_dp, address, recv,
1066 recv_bytes);
1067 if (ret == recv_bytes)
1068 return true;
1069 msleep(1);
1070 }
1071
1072 return false;
1013} 1073}
1014 1074
1015/* 1075/*
@@ -1019,14 +1079,10 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
1019static bool 1079static bool
1020intel_dp_get_link_status(struct intel_dp *intel_dp) 1080intel_dp_get_link_status(struct intel_dp *intel_dp)
1021{ 1081{
1022 int ret; 1082 return intel_dp_aux_native_read_retry(intel_dp,
1023 1083 DP_LANE0_1_STATUS,
1024 ret = intel_dp_aux_native_read(intel_dp, 1084 intel_dp->link_status,
1025 DP_LANE0_1_STATUS, 1085 DP_LINK_STATUS_SIZE);
1026 intel_dp->link_status, DP_LINK_STATUS_SIZE);
1027 if (ret != DP_LINK_STATUS_SIZE)
1028 return false;
1029 return true;
1030} 1086}
1031 1087
1032static uint8_t 1088static uint8_t
@@ -1515,6 +1571,8 @@ intel_dp_link_down(struct intel_dp *intel_dp)
1515static void 1571static void
1516intel_dp_check_link_status(struct intel_dp *intel_dp) 1572intel_dp_check_link_status(struct intel_dp *intel_dp)
1517{ 1573{
1574 int ret;
1575
1518 if (!intel_dp->base.base.crtc) 1576 if (!intel_dp->base.base.crtc)
1519 return; 1577 return;
1520 1578
@@ -1523,6 +1581,15 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
1523 return; 1581 return;
1524 } 1582 }
1525 1583
1584 /* Try to read receiver status if the link appears to be up */
1585 ret = intel_dp_aux_native_read(intel_dp,
1586 0x000, intel_dp->dpcd,
1587 sizeof (intel_dp->dpcd));
1588 if (ret != sizeof(intel_dp->dpcd)) {
1589 intel_dp_link_down(intel_dp);
1590 return;
1591 }
1592
1526 if (!intel_channel_eq_ok(intel_dp)) { 1593 if (!intel_channel_eq_ok(intel_dp)) {
1527 intel_dp_start_link_train(intel_dp); 1594 intel_dp_start_link_train(intel_dp);
1528 intel_dp_complete_link_train(intel_dp); 1595 intel_dp_complete_link_train(intel_dp);
@@ -1533,6 +1600,7 @@ static enum drm_connector_status
1533ironlake_dp_detect(struct intel_dp *intel_dp) 1600ironlake_dp_detect(struct intel_dp *intel_dp)
1534{ 1601{
1535 enum drm_connector_status status; 1602 enum drm_connector_status status;
1603 bool ret;
1536 1604
1537 /* Can't disconnect eDP, but you can close the lid... */ 1605 /* Can't disconnect eDP, but you can close the lid... */
1538 if (is_edp(intel_dp)) { 1606 if (is_edp(intel_dp)) {
@@ -1543,13 +1611,11 @@ ironlake_dp_detect(struct intel_dp *intel_dp)
1543 } 1611 }
1544 1612
1545 status = connector_status_disconnected; 1613 status = connector_status_disconnected;
1546 if (intel_dp_aux_native_read(intel_dp, 1614 ret = intel_dp_aux_native_read_retry(intel_dp,
1547 0x000, intel_dp->dpcd, 1615 0x000, intel_dp->dpcd,
1548 sizeof (intel_dp->dpcd)) 1616 sizeof (intel_dp->dpcd));
1549 == sizeof(intel_dp->dpcd)) { 1617 if (ret && intel_dp->dpcd[DP_DPCD_REV] != 0)
1550 if (intel_dp->dpcd[0] != 0) 1618 status = connector_status_connected;
1551 status = connector_status_connected;
1552 }
1553 DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", intel_dp->dpcd[0], 1619 DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", intel_dp->dpcd[0],
1554 intel_dp->dpcd[1], intel_dp->dpcd[2], intel_dp->dpcd[3]); 1620 intel_dp->dpcd[1], intel_dp->dpcd[2], intel_dp->dpcd[3]);
1555 return status; 1621 return status;
@@ -1586,7 +1652,7 @@ g4x_dp_detect(struct intel_dp *intel_dp)
1586 if (intel_dp_aux_native_read(intel_dp, 0x000, intel_dp->dpcd, 1652 if (intel_dp_aux_native_read(intel_dp, 0x000, intel_dp->dpcd,
1587 sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd)) 1653 sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd))
1588 { 1654 {
1589 if (intel_dp->dpcd[0] != 0) 1655 if (intel_dp->dpcd[DP_DPCD_REV] != 0)
1590 status = connector_status_connected; 1656 status = connector_status_connected;
1591 } 1657 }
1592 1658
@@ -1790,8 +1856,7 @@ intel_dp_hot_plug(struct intel_encoder *intel_encoder)
1790{ 1856{
1791 struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base); 1857 struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
1792 1858
1793 if (intel_dp->dpms_mode == DRM_MODE_DPMS_ON) 1859 intel_dp_check_link_status(intel_dp);
1794 intel_dp_check_link_status(intel_dp);
1795} 1860}
1796 1861
1797/* Return which DP Port should be selected for Transcoder DP control */ 1862/* Return which DP Port should be selected for Transcoder DP control */
@@ -1859,7 +1924,6 @@ intel_dp_init(struct drm_device *dev, int output_reg)
1859 return; 1924 return;
1860 1925
1861 intel_dp->output_reg = output_reg; 1926 intel_dp->output_reg = output_reg;
1862 intel_dp->dpms_mode = -1;
1863 1927
1864 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); 1928 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
1865 if (!intel_connector) { 1929 if (!intel_connector) {
@@ -1954,8 +2018,9 @@ intel_dp_init(struct drm_device *dev, int output_reg)
1954 sizeof(intel_dp->dpcd)); 2018 sizeof(intel_dp->dpcd));
1955 ironlake_edp_panel_vdd_off(intel_dp); 2019 ironlake_edp_panel_vdd_off(intel_dp);
1956 if (ret == sizeof(intel_dp->dpcd)) { 2020 if (ret == sizeof(intel_dp->dpcd)) {
1957 if (intel_dp->dpcd[0] >= 0x11) 2021 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
1958 dev_priv->no_aux_handshake = intel_dp->dpcd[3] & 2022 dev_priv->no_aux_handshake =
2023 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
1959 DP_NO_AUX_HANDSHAKE_LINK_TRAINING; 2024 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
1960 } else { 2025 } else {
1961 /* if this fails, presume the device is a ghost */ 2026 /* if this fails, presume the device is a ghost */
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 9ffa61eb4d7e..6e990f9760ef 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -170,6 +170,7 @@ struct intel_crtc {
170 int16_t cursor_x, cursor_y; 170 int16_t cursor_x, cursor_y;
171 int16_t cursor_width, cursor_height; 171 int16_t cursor_width, cursor_height;
172 bool cursor_visible; 172 bool cursor_visible;
173 unsigned int bpp;
173}; 174};
174 175
175#define to_intel_crtc(x) container_of(x, struct intel_crtc, base) 176#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
@@ -233,6 +234,13 @@ struct intel_unpin_work {
233 bool enable_stall_check; 234 bool enable_stall_check;
234}; 235};
235 236
237struct intel_fbc_work {
238 struct delayed_work work;
239 struct drm_crtc *crtc;
240 struct drm_framebuffer *fb;
241 int interval;
242};
243
236int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter); 244int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
237extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus); 245extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus);
238 246
@@ -317,6 +325,7 @@ extern void intel_enable_clock_gating(struct drm_device *dev);
317extern void ironlake_enable_drps(struct drm_device *dev); 325extern void ironlake_enable_drps(struct drm_device *dev);
318extern void ironlake_disable_drps(struct drm_device *dev); 326extern void ironlake_disable_drps(struct drm_device *dev);
319extern void gen6_enable_rps(struct drm_i915_private *dev_priv); 327extern void gen6_enable_rps(struct drm_i915_private *dev_priv);
328extern void gen6_update_ring_freq(struct drm_i915_private *dev_priv);
320extern void gen6_disable_rps(struct drm_device *dev); 329extern void gen6_disable_rps(struct drm_device *dev);
321extern void intel_init_emon(struct drm_device *dev); 330extern void intel_init_emon(struct drm_device *dev);
322 331
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index aa0a8e83142e..1ed8e6903915 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -124,12 +124,18 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
124 u32 sdvox; 124 u32 sdvox;
125 125
126 sdvox = SDVO_ENCODING_HDMI | SDVO_BORDER_ENABLE; 126 sdvox = SDVO_ENCODING_HDMI | SDVO_BORDER_ENABLE;
127 sdvox |= intel_hdmi->color_range; 127 if (!HAS_PCH_SPLIT(dev))
128 sdvox |= intel_hdmi->color_range;
128 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 129 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
129 sdvox |= SDVO_VSYNC_ACTIVE_HIGH; 130 sdvox |= SDVO_VSYNC_ACTIVE_HIGH;
130 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 131 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
131 sdvox |= SDVO_HSYNC_ACTIVE_HIGH; 132 sdvox |= SDVO_HSYNC_ACTIVE_HIGH;
132 133
134 if (intel_crtc->bpp > 24)
135 sdvox |= COLOR_FORMAT_12bpc;
136 else
137 sdvox |= COLOR_FORMAT_8bpc;
138
133 /* Required on CPT */ 139 /* Required on CPT */
134 if (intel_hdmi->has_hdmi_sink && HAS_PCH_CPT(dev)) 140 if (intel_hdmi->has_hdmi_sink && HAS_PCH_CPT(dev))
135 sdvox |= HDMI_MODE_SELECT; 141 sdvox |= HDMI_MODE_SELECT;
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index d2c710422908..b7c5ddb564d1 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -297,19 +297,26 @@ static int intel_opregion_video_event(struct notifier_block *nb,
297 /* The only video events relevant to opregion are 0x80. These indicate 297 /* The only video events relevant to opregion are 0x80. These indicate
298 either a docking event, lid switch or display switch request. In 298 either a docking event, lid switch or display switch request. In
299 Linux, these are handled by the dock, button and video drivers. 299 Linux, these are handled by the dock, button and video drivers.
300 We might want to fix the video driver to be opregion-aware in 300 */
301 future, but right now we just indicate to the firmware that the
302 request has been handled */
303 301
304 struct opregion_acpi *acpi; 302 struct opregion_acpi *acpi;
303 struct acpi_bus_event *event = data;
304 int ret = NOTIFY_OK;
305
306 if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0)
307 return NOTIFY_DONE;
305 308
306 if (!system_opregion) 309 if (!system_opregion)
307 return NOTIFY_DONE; 310 return NOTIFY_DONE;
308 311
309 acpi = system_opregion->acpi; 312 acpi = system_opregion->acpi;
313
314 if (event->type == 0x80 && !(acpi->cevt & 0x1))
315 ret = NOTIFY_BAD;
316
310 acpi->csts = 0; 317 acpi->csts = 0;
311 318
312 return NOTIFY_OK; 319 return ret;
313} 320}
314 321
315static struct notifier_block intel_opregion_notifier = { 322static struct notifier_block intel_opregion_notifier = {
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 9e2959bc91cd..d36038086826 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -773,14 +773,10 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
773 if (ret != 0) 773 if (ret != 0)
774 return ret; 774 return ret;
775 775
776 ret = i915_gem_object_pin(new_bo, PAGE_SIZE, true); 776 ret = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL);
777 if (ret != 0) 777 if (ret != 0)
778 return ret; 778 return ret;
779 779
780 ret = i915_gem_object_set_to_gtt_domain(new_bo, 0);
781 if (ret != 0)
782 goto out_unpin;
783
784 ret = i915_gem_object_put_fence(new_bo); 780 ret = i915_gem_object_put_fence(new_bo);
785 if (ret) 781 if (ret)
786 goto out_unpin; 782 goto out_unpin;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 95c4b1429935..e9615685a39c 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -236,7 +236,8 @@ init_pipe_control(struct intel_ring_buffer *ring)
236 ret = -ENOMEM; 236 ret = -ENOMEM;
237 goto err; 237 goto err;
238 } 238 }
239 obj->cache_level = I915_CACHE_LLC; 239
240 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
240 241
241 ret = i915_gem_object_pin(obj, 4096, true); 242 ret = i915_gem_object_pin(obj, 4096, true);
242 if (ret) 243 if (ret)
@@ -776,7 +777,8 @@ static int init_status_page(struct intel_ring_buffer *ring)
776 ret = -ENOMEM; 777 ret = -ENOMEM;
777 goto err; 778 goto err;
778 } 779 }
779 obj->cache_level = I915_CACHE_LLC; 780
781 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
780 782
781 ret = i915_gem_object_pin(obj, 4096, true); 783 ret = i915_gem_object_pin(obj, 4096, true);
782 if (ret != 0) { 784 if (ret != 0) {
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index c0e0ee63fbf4..39ac2b634ae5 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -165,7 +165,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
165int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n); 165int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n);
166static inline int intel_wait_ring_idle(struct intel_ring_buffer *ring) 166static inline int intel_wait_ring_idle(struct intel_ring_buffer *ring)
167{ 167{
168 return intel_wait_ring_buffer(ring, ring->space - 8); 168 return intel_wait_ring_buffer(ring, ring->size - 8);
169} 169}
170 170
171int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n); 171int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 113e4e7264cd..210d570fd516 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1236,6 +1236,8 @@ intel_tv_detect_type (struct intel_tv *intel_tv,
1236 struct drm_connector *connector) 1236 struct drm_connector *connector)
1237{ 1237{
1238 struct drm_encoder *encoder = &intel_tv->base.base; 1238 struct drm_encoder *encoder = &intel_tv->base.base;
1239 struct drm_crtc *crtc = encoder->crtc;
1240 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1239 struct drm_device *dev = encoder->dev; 1241 struct drm_device *dev = encoder->dev;
1240 struct drm_i915_private *dev_priv = dev->dev_private; 1242 struct drm_i915_private *dev_priv = dev->dev_private;
1241 unsigned long irqflags; 1243 unsigned long irqflags;
@@ -1258,6 +1260,10 @@ intel_tv_detect_type (struct intel_tv *intel_tv,
1258 /* Poll for TV detection */ 1260 /* Poll for TV detection */
1259 tv_ctl &= ~(TV_ENC_ENABLE | TV_TEST_MODE_MASK); 1261 tv_ctl &= ~(TV_ENC_ENABLE | TV_TEST_MODE_MASK);
1260 tv_ctl |= TV_TEST_MODE_MONITOR_DETECT; 1262 tv_ctl |= TV_TEST_MODE_MONITOR_DETECT;
1263 if (intel_crtc->pipe == 1)
1264 tv_ctl |= TV_ENC_PIPEB_SELECT;
1265 else
1266 tv_ctl &= ~TV_ENC_PIPEB_SELECT;
1261 1267
1262 tv_dac &= ~(TVDAC_SENSE_MASK | DAC_A_MASK | DAC_B_MASK | DAC_C_MASK); 1268 tv_dac &= ~(TVDAC_SENSE_MASK | DAC_A_MASK | DAC_B_MASK | DAC_C_MASK);
1263 tv_dac |= (TVDAC_STATE_CHG_EN | 1269 tv_dac |= (TVDAC_STATE_CHG_EN |
@@ -1277,26 +1283,26 @@ intel_tv_detect_type (struct intel_tv *intel_tv,
1277 to_intel_crtc(intel_tv->base.base.crtc)->pipe); 1283 to_intel_crtc(intel_tv->base.base.crtc)->pipe);
1278 1284
1279 type = -1; 1285 type = -1;
1280 if (wait_for((tv_dac = I915_READ(TV_DAC)) & TVDAC_STATE_CHG, 20) == 0) { 1286 tv_dac = I915_READ(TV_DAC);
1281 DRM_DEBUG_KMS("TV detected: %x, %x\n", tv_ctl, tv_dac); 1287 DRM_DEBUG_KMS("TV detected: %x, %x\n", tv_ctl, tv_dac);
1282 /* 1288 /*
1283 * A B C 1289 * A B C
1284 * 0 1 1 Composite 1290 * 0 1 1 Composite
1285 * 1 0 X svideo 1291 * 1 0 X svideo
1286 * 0 0 0 Component 1292 * 0 0 0 Component
1287 */ 1293 */
1288 if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) { 1294 if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) {
1289 DRM_DEBUG_KMS("Detected Composite TV connection\n"); 1295 DRM_DEBUG_KMS("Detected Composite TV connection\n");
1290 type = DRM_MODE_CONNECTOR_Composite; 1296 type = DRM_MODE_CONNECTOR_Composite;
1291 } else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) { 1297 } else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) {
1292 DRM_DEBUG_KMS("Detected S-Video TV connection\n"); 1298 DRM_DEBUG_KMS("Detected S-Video TV connection\n");
1293 type = DRM_MODE_CONNECTOR_SVIDEO; 1299 type = DRM_MODE_CONNECTOR_SVIDEO;
1294 } else if ((tv_dac & TVDAC_SENSE_MASK) == 0) { 1300 } else if ((tv_dac & TVDAC_SENSE_MASK) == 0) {
1295 DRM_DEBUG_KMS("Detected Component TV connection\n"); 1301 DRM_DEBUG_KMS("Detected Component TV connection\n");
1296 type = DRM_MODE_CONNECTOR_Component; 1302 type = DRM_MODE_CONNECTOR_Component;
1297 } else { 1303 } else {
1298 DRM_DEBUG_KMS("Unrecognised TV connection\n"); 1304 DRM_DEBUG_KMS("Unrecognised TV connection\n");
1299 } 1305 type = -1;
1300 } 1306 }
1301 1307
1302 I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN); 1308 I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
diff --git a/include/acpi/video.h b/include/acpi/video.h
index 0e98e679d3a7..61109f2609fc 100644
--- a/include/acpi/video.h
+++ b/include/acpi/video.h
@@ -5,6 +5,8 @@
5 5
6struct acpi_device; 6struct acpi_device;
7 7
8#define ACPI_VIDEO_CLASS "video"
9
8#define ACPI_VIDEO_DISPLAY_CRT 1 10#define ACPI_VIDEO_DISPLAY_CRT 1
9#define ACPI_VIDEO_DISPLAY_TV 2 11#define ACPI_VIDEO_DISPLAY_TV 2
10#define ACPI_VIDEO_DISPLAY_DVI 3 12#define ACPI_VIDEO_DISPLAY_DVI 3
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 11be48e0d168..6216115c7789 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -324,11 +324,16 @@ static inline unsigned int cpufreq_get(unsigned int cpu)
324/* query the last known CPU freq (in kHz). If zero, cpufreq couldn't detect it */ 324/* query the last known CPU freq (in kHz). If zero, cpufreq couldn't detect it */
325#ifdef CONFIG_CPU_FREQ 325#ifdef CONFIG_CPU_FREQ
326unsigned int cpufreq_quick_get(unsigned int cpu); 326unsigned int cpufreq_quick_get(unsigned int cpu);
327unsigned int cpufreq_quick_get_max(unsigned int cpu);
327#else 328#else
328static inline unsigned int cpufreq_quick_get(unsigned int cpu) 329static inline unsigned int cpufreq_quick_get(unsigned int cpu)
329{ 330{
330 return 0; 331 return 0;
331} 332}
333static inline unsigned int cpufreq_quick_get_max(unsigned int cpu)
334{
335 return 0;
336}
332#endif 337#endif
333 338
334 339