aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c24
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c66
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h29
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c172
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c2
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c58
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h56
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c9
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h36
-rw-r--r--drivers/gpu/drm/i915/intel_display.c586
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c143
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h10
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c9
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c331
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c14
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c8
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c12
17 files changed, 988 insertions, 577 deletions
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 2305a1234f1e..f19ffe87af3c 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -34,12 +34,15 @@
34#include "i915_drm.h" 34#include "i915_drm.h"
35#include "i915_drv.h" 35#include "i915_drv.h"
36#include "i915_trace.h" 36#include "i915_trace.h"
37#include <linux/pci.h>
37#include <linux/vgaarb.h> 38#include <linux/vgaarb.h>
38#include <linux/acpi.h> 39#include <linux/acpi.h>
39#include <linux/pnp.h> 40#include <linux/pnp.h>
40#include <linux/vga_switcheroo.h> 41#include <linux/vga_switcheroo.h>
41#include <linux/slab.h> 42#include <linux/slab.h>
42 43
44extern int intel_max_stolen; /* from AGP driver */
45
43/** 46/**
44 * Sets up the hardware status page for devices that need a physical address 47 * Sets up the hardware status page for devices that need a physical address
45 * in the register. 48 * in the register.
@@ -1256,7 +1259,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
1256 drm_mm_put_block(compressed_fb); 1259 drm_mm_put_block(compressed_fb);
1257 } 1260 }
1258 1261
1259 if (!IS_GM45(dev)) { 1262 if (!(IS_GM45(dev) || IS_IRONLAKE_M(dev))) {
1260 compressed_llb = drm_mm_search_free(&dev_priv->vram, 4096, 1263 compressed_llb = drm_mm_search_free(&dev_priv->vram, 4096,
1261 4096, 0); 1264 4096, 0);
1262 if (!compressed_llb) { 1265 if (!compressed_llb) {
@@ -1282,8 +1285,9 @@ static void i915_setup_compression(struct drm_device *dev, int size)
1282 1285
1283 intel_disable_fbc(dev); 1286 intel_disable_fbc(dev);
1284 dev_priv->compressed_fb = compressed_fb; 1287 dev_priv->compressed_fb = compressed_fb;
1285 1288 if (IS_IRONLAKE_M(dev))
1286 if (IS_GM45(dev)) { 1289 I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
1290 else if (IS_GM45(dev)) {
1287 I915_WRITE(DPFC_CB_BASE, compressed_fb->start); 1291 I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
1288 } else { 1292 } else {
1289 I915_WRITE(FBC_CFB_BASE, cfb_base); 1293 I915_WRITE(FBC_CFB_BASE, cfb_base);
@@ -1291,7 +1295,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
1291 dev_priv->compressed_llb = compressed_llb; 1295 dev_priv->compressed_llb = compressed_llb;
1292 } 1296 }
1293 1297
1294 DRM_DEBUG("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base, 1298 DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base,
1295 ll_base, size >> 20); 1299 ll_base, size >> 20);
1296} 1300}
1297 1301
@@ -1354,7 +1358,7 @@ static int i915_load_modeset_init(struct drm_device *dev,
1354 int fb_bar = IS_I9XX(dev) ? 2 : 0; 1358 int fb_bar = IS_I9XX(dev) ? 2 : 0;
1355 int ret = 0; 1359 int ret = 0;
1356 1360
1357 dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) & 1361 dev->mode_config.fb_base = pci_resource_start(dev->pdev, fb_bar) &
1358 0xff000000; 1362 0xff000000;
1359 1363
1360 /* Basic memrange allocator for stolen space (aka vram) */ 1364 /* Basic memrange allocator for stolen space (aka vram) */
@@ -2063,8 +2067,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
2063 2067
2064 /* Add register map (needed for suspend/resume) */ 2068 /* Add register map (needed for suspend/resume) */
2065 mmio_bar = IS_I9XX(dev) ? 0 : 1; 2069 mmio_bar = IS_I9XX(dev) ? 0 : 1;
2066 base = drm_get_resource_start(dev, mmio_bar); 2070 base = pci_resource_start(dev->pdev, mmio_bar);
2067 size = drm_get_resource_len(dev, mmio_bar); 2071 size = pci_resource_len(dev->pdev, mmio_bar);
2068 2072
2069 if (i915_get_bridge_dev(dev)) { 2073 if (i915_get_bridge_dev(dev)) {
2070 ret = -EIO; 2074 ret = -EIO;
@@ -2104,6 +2108,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
2104 if (ret) 2108 if (ret)
2105 goto out_iomapfree; 2109 goto out_iomapfree;
2106 2110
2111 if (prealloc_size > intel_max_stolen) {
2112 DRM_INFO("detected %dM stolen memory, trimming to %dM\n",
2113 prealloc_size >> 20, intel_max_stolen >> 20);
2114 prealloc_size = intel_max_stolen;
2115 }
2116
2107 dev_priv->wq = create_singlethread_workqueue("i915"); 2117 dev_priv->wq = create_singlethread_workqueue("i915");
2108 if (dev_priv->wq == NULL) { 2118 if (dev_priv->wq == NULL) {
2109 DRM_ERROR("Failed to create our workqueue.\n"); 2119 DRM_ERROR("Failed to create our workqueue.\n");
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 423dc90c1e20..5044f653e8ea 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -93,11 +93,11 @@ static const struct intel_device_info intel_i945gm_info = {
93}; 93};
94 94
95static const struct intel_device_info intel_i965g_info = { 95static const struct intel_device_info intel_i965g_info = {
96 .is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1, 96 .is_broadwater = 1, .is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1,
97}; 97};
98 98
99static const struct intel_device_info intel_i965gm_info = { 99static const struct intel_device_info intel_i965gm_info = {
100 .is_i965g = 1, .is_mobile = 1, .is_i965gm = 1, .is_i9xx = 1, 100 .is_crestline = 1, .is_i965g = 1, .is_i965gm = 1, .is_i9xx = 1,
101 .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, 101 .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1,
102 .has_hotplug = 1, 102 .has_hotplug = 1,
103}; 103};
@@ -114,7 +114,7 @@ static const struct intel_device_info intel_g45_info = {
114}; 114};
115 115
116static const struct intel_device_info intel_gm45_info = { 116static const struct intel_device_info intel_gm45_info = {
117 .is_i965g = 1, .is_mobile = 1, .is_g4x = 1, .is_i9xx = 1, 117 .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1,
118 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, 118 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
119 .has_pipe_cxsr = 1, 119 .has_pipe_cxsr = 1,
120 .has_hotplug = 1, 120 .has_hotplug = 1,
@@ -134,7 +134,7 @@ static const struct intel_device_info intel_ironlake_d_info = {
134 134
135static const struct intel_device_info intel_ironlake_m_info = { 135static const struct intel_device_info intel_ironlake_m_info = {
136 .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1, 136 .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1,
137 .need_gfx_hws = 1, .has_rc6 = 1, 137 .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
138 .has_hotplug = 1, 138 .has_hotplug = 1,
139}; 139};
140 140
@@ -148,33 +148,33 @@ static const struct intel_device_info intel_sandybridge_m_info = {
148 .has_hotplug = 1, .is_gen6 = 1, 148 .has_hotplug = 1, .is_gen6 = 1,
149}; 149};
150 150
151static const struct pci_device_id pciidlist[] = { 151static const struct pci_device_id pciidlist[] = { /* aka */
152 INTEL_VGA_DEVICE(0x3577, &intel_i830_info), 152 INTEL_VGA_DEVICE(0x3577, &intel_i830_info), /* I830_M */
153 INTEL_VGA_DEVICE(0x2562, &intel_845g_info), 153 INTEL_VGA_DEVICE(0x2562, &intel_845g_info), /* 845_G */
154 INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), 154 INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), /* I855_GM */
155 INTEL_VGA_DEVICE(0x358e, &intel_i85x_info), 155 INTEL_VGA_DEVICE(0x358e, &intel_i85x_info),
156 INTEL_VGA_DEVICE(0x2572, &intel_i865g_info), 156 INTEL_VGA_DEVICE(0x2572, &intel_i865g_info), /* I865_G */
157 INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), 157 INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), /* I915_G */
158 INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), 158 INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), /* E7221_G */
159 INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info), 159 INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info), /* I915_GM */
160 INTEL_VGA_DEVICE(0x2772, &intel_i945g_info), 160 INTEL_VGA_DEVICE(0x2772, &intel_i945g_info), /* I945_G */
161 INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info), 161 INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info), /* I945_GM */
162 INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info), 162 INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info), /* I945_GME */
163 INTEL_VGA_DEVICE(0x2972, &intel_i965g_info), 163 INTEL_VGA_DEVICE(0x2972, &intel_i965g_info), /* I946_GZ */
164 INTEL_VGA_DEVICE(0x2982, &intel_i965g_info), 164 INTEL_VGA_DEVICE(0x2982, &intel_i965g_info), /* G35_G */
165 INTEL_VGA_DEVICE(0x2992, &intel_i965g_info), 165 INTEL_VGA_DEVICE(0x2992, &intel_i965g_info), /* I965_Q */
166 INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info), 166 INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info), /* I965_G */
167 INTEL_VGA_DEVICE(0x29b2, &intel_g33_info), 167 INTEL_VGA_DEVICE(0x29b2, &intel_g33_info), /* Q35_G */
168 INTEL_VGA_DEVICE(0x29c2, &intel_g33_info), 168 INTEL_VGA_DEVICE(0x29c2, &intel_g33_info), /* G33_G */
169 INTEL_VGA_DEVICE(0x29d2, &intel_g33_info), 169 INTEL_VGA_DEVICE(0x29d2, &intel_g33_info), /* Q33_G */
170 INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info), 170 INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info), /* I965_GM */
171 INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info), 171 INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info), /* I965_GME */
172 INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info), 172 INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info), /* GM45_G */
173 INTEL_VGA_DEVICE(0x2e02, &intel_g45_info), 173 INTEL_VGA_DEVICE(0x2e02, &intel_g45_info), /* IGD_E_G */
174 INTEL_VGA_DEVICE(0x2e12, &intel_g45_info), 174 INTEL_VGA_DEVICE(0x2e12, &intel_g45_info), /* Q45_G */
175 INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), 175 INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), /* G45_G */
176 INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), 176 INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), /* G41_G */
177 INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), 177 INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), /* B43_G */
178 INTEL_VGA_DEVICE(0xa001, &intel_pineview_info), 178 INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
179 INTEL_VGA_DEVICE(0xa011, &intel_pineview_info), 179 INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
180 INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info), 180 INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
@@ -340,7 +340,7 @@ int i965_reset(struct drm_device *dev, u8 flags)
340 /* 340 /*
341 * Clear request list 341 * Clear request list
342 */ 342 */
343 i915_gem_retire_requests(dev, &dev_priv->render_ring); 343 i915_gem_retire_requests(dev);
344 344
345 if (need_display) 345 if (need_display)
346 i915_save_display(dev); 346 i915_save_display(dev);
@@ -413,7 +413,7 @@ int i965_reset(struct drm_device *dev, u8 flags)
413static int __devinit 413static int __devinit
414i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 414i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
415{ 415{
416 return drm_get_dev(pdev, ent, &driver); 416 return drm_get_pci_dev(pdev, ent, &driver);
417} 417}
418 418
419static void 419static void
@@ -482,7 +482,7 @@ static int i915_pm_poweroff(struct device *dev)
482 return i915_drm_freeze(drm_dev); 482 return i915_drm_freeze(drm_dev);
483} 483}
484 484
485const struct dev_pm_ops i915_pm_ops = { 485static const struct dev_pm_ops i915_pm_ops = {
486 .suspend = i915_pm_suspend, 486 .suspend = i915_pm_suspend,
487 .resume = i915_pm_resume, 487 .resume = i915_pm_resume,
488 .freeze = i915_pm_freeze, 488 .freeze = i915_pm_freeze,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 2e1744d37ad5..906663b9929e 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -176,7 +176,8 @@ struct drm_i915_display_funcs {
176 int (*get_display_clock_speed)(struct drm_device *dev); 176 int (*get_display_clock_speed)(struct drm_device *dev);
177 int (*get_fifo_size)(struct drm_device *dev, int plane); 177 int (*get_fifo_size)(struct drm_device *dev, int plane);
178 void (*update_wm)(struct drm_device *dev, int planea_clock, 178 void (*update_wm)(struct drm_device *dev, int planea_clock,
179 int planeb_clock, int sr_hdisplay, int pixel_size); 179 int planeb_clock, int sr_hdisplay, int sr_htotal,
180 int pixel_size);
180 /* clock updates for mode set */ 181 /* clock updates for mode set */
181 /* cursor updates */ 182 /* cursor updates */
182 /* render clock increase/decrease */ 183 /* render clock increase/decrease */
@@ -200,6 +201,8 @@ struct intel_device_info {
200 u8 need_gfx_hws : 1; 201 u8 need_gfx_hws : 1;
201 u8 is_g4x : 1; 202 u8 is_g4x : 1;
202 u8 is_pineview : 1; 203 u8 is_pineview : 1;
204 u8 is_broadwater : 1;
205 u8 is_crestline : 1;
203 u8 is_ironlake : 1; 206 u8 is_ironlake : 1;
204 u8 is_gen6 : 1; 207 u8 is_gen6 : 1;
205 u8 has_fbc : 1; 208 u8 has_fbc : 1;
@@ -288,6 +291,8 @@ typedef struct drm_i915_private {
288 struct timer_list hangcheck_timer; 291 struct timer_list hangcheck_timer;
289 int hangcheck_count; 292 int hangcheck_count;
290 uint32_t last_acthd; 293 uint32_t last_acthd;
294 uint32_t last_instdone;
295 uint32_t last_instdone1;
291 296
292 struct drm_mm vram; 297 struct drm_mm vram;
293 298
@@ -547,6 +552,14 @@ typedef struct drm_i915_private {
547 struct list_head fence_list; 552 struct list_head fence_list;
548 553
549 /** 554 /**
555 * List of objects currently pending being freed.
556 *
557 * These objects are no longer in use, but due to a signal
558 * we were prevented from freeing them at the appointed time.
559 */
560 struct list_head deferred_free_list;
561
562 /**
550 * We leave the user IRQ off as much as possible, 563 * We leave the user IRQ off as much as possible,
551 * but this means that requests will finish and never 564 * but this means that requests will finish and never
552 * be retired once the system goes idle. Set a timer to 565 * be retired once the system goes idle. Set a timer to
@@ -677,7 +690,7 @@ struct drm_i915_gem_object {
677 * 690 *
678 * Size: 4 bits for 16 fences + sign (for FENCE_REG_NONE) 691 * Size: 4 bits for 16 fences + sign (for FENCE_REG_NONE)
679 */ 692 */
680 int fence_reg : 5; 693 signed int fence_reg : 5;
681 694
682 /** 695 /**
683 * Used for checking the object doesn't appear more than once 696 * Used for checking the object doesn't appear more than once
@@ -713,7 +726,7 @@ struct drm_i915_gem_object {
713 * 726 *
714 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3 727 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
715 * bits with absolutely no headroom. So use 4 bits. */ 728 * bits with absolutely no headroom. So use 4 bits. */
716 int pin_count : 4; 729 unsigned int pin_count : 4;
717#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf 730#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
718 731
719 /** AGP memory structure for our GTT binding. */ 732 /** AGP memory structure for our GTT binding. */
@@ -743,7 +756,7 @@ struct drm_i915_gem_object {
743 uint32_t stride; 756 uint32_t stride;
744 757
745 /** Record of address bit 17 of each page at last unbind. */ 758 /** Record of address bit 17 of each page at last unbind. */
746 long *bit_17; 759 unsigned long *bit_17;
747 760
748 /** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */ 761 /** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */
749 uint32_t agp_type; 762 uint32_t agp_type;
@@ -955,8 +968,7 @@ uint32_t i915_get_gem_seqno(struct drm_device *dev,
955bool i915_seqno_passed(uint32_t seq1, uint32_t seq2); 968bool i915_seqno_passed(uint32_t seq1, uint32_t seq2);
956int i915_gem_object_get_fence_reg(struct drm_gem_object *obj); 969int i915_gem_object_get_fence_reg(struct drm_gem_object *obj);
957int i915_gem_object_put_fence_reg(struct drm_gem_object *obj); 970int i915_gem_object_put_fence_reg(struct drm_gem_object *obj);
958void i915_gem_retire_requests(struct drm_device *dev, 971void i915_gem_retire_requests(struct drm_device *dev);
959 struct intel_ring_buffer *ring);
960void i915_gem_retire_work_handler(struct work_struct *work); 972void i915_gem_retire_work_handler(struct work_struct *work);
961void i915_gem_clflush_object(struct drm_gem_object *obj); 973void i915_gem_clflush_object(struct drm_gem_object *obj);
962int i915_gem_object_set_domain(struct drm_gem_object *obj, 974int i915_gem_object_set_domain(struct drm_gem_object *obj,
@@ -986,7 +998,7 @@ void i915_gem_free_all_phys_object(struct drm_device *dev);
986int i915_gem_object_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); 998int i915_gem_object_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
987void i915_gem_object_put_pages(struct drm_gem_object *obj); 999void i915_gem_object_put_pages(struct drm_gem_object *obj);
988void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); 1000void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
989void i915_gem_object_flush_write_domain(struct drm_gem_object *obj); 1001int i915_gem_object_flush_write_domain(struct drm_gem_object *obj);
990 1002
991void i915_gem_shrinker_init(void); 1003void i915_gem_shrinker_init(void);
992void i915_gem_shrinker_exit(void); 1004void i915_gem_shrinker_exit(void);
@@ -1046,6 +1058,7 @@ extern void intel_modeset_cleanup(struct drm_device *dev);
1046extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); 1058extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
1047extern void i8xx_disable_fbc(struct drm_device *dev); 1059extern void i8xx_disable_fbc(struct drm_device *dev);
1048extern void g4x_disable_fbc(struct drm_device *dev); 1060extern void g4x_disable_fbc(struct drm_device *dev);
1061extern void ironlake_disable_fbc(struct drm_device *dev);
1049extern void intel_disable_fbc(struct drm_device *dev); 1062extern void intel_disable_fbc(struct drm_device *dev);
1050extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval); 1063extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
1051extern bool intel_fbc_enabled(struct drm_device *dev); 1064extern bool intel_fbc_enabled(struct drm_device *dev);
@@ -1135,6 +1148,8 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
1135#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm) 1148#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
1136#define IS_I965G(dev) (INTEL_INFO(dev)->is_i965g) 1149#define IS_I965G(dev) (INTEL_INFO(dev)->is_i965g)
1137#define IS_I965GM(dev) (INTEL_INFO(dev)->is_i965gm) 1150#define IS_I965GM(dev) (INTEL_INFO(dev)->is_i965gm)
1151#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
1152#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
1138#define IS_GM45(dev) ((dev)->pci_device == 0x2A42) 1153#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
1139#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x) 1154#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
1140#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001) 1155#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 5aa747fc25a9..2a4ed7ca8b4e 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -35,7 +35,7 @@
35#include <linux/swap.h> 35#include <linux/swap.h>
36#include <linux/pci.h> 36#include <linux/pci.h>
37 37
38static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); 38static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
39static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); 39static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
40static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); 40static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
41static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, 41static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
@@ -53,6 +53,7 @@ static int i915_gem_evict_from_inactive_list(struct drm_device *dev);
53static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, 53static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
54 struct drm_i915_gem_pwrite *args, 54 struct drm_i915_gem_pwrite *args,
55 struct drm_file *file_priv); 55 struct drm_file *file_priv);
56static void i915_gem_free_object_tail(struct drm_gem_object *obj);
56 57
57static LIST_HEAD(shrink_list); 58static LIST_HEAD(shrink_list);
58static DEFINE_SPINLOCK(shrink_list_lock); 59static DEFINE_SPINLOCK(shrink_list_lock);
@@ -127,8 +128,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
127 return -ENOMEM; 128 return -ENOMEM;
128 129
129 ret = drm_gem_handle_create(file_priv, obj, &handle); 130 ret = drm_gem_handle_create(file_priv, obj, &handle);
130 drm_gem_object_handle_unreference_unlocked(obj); 131 drm_gem_object_unreference_unlocked(obj);
131
132 if (ret) 132 if (ret)
133 return ret; 133 return ret;
134 134
@@ -496,10 +496,10 @@ fast_user_write(struct io_mapping *mapping,
496 char *vaddr_atomic; 496 char *vaddr_atomic;
497 unsigned long unwritten; 497 unsigned long unwritten;
498 498
499 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base); 499 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base, KM_USER0);
500 unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset, 500 unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
501 user_data, length); 501 user_data, length);
502 io_mapping_unmap_atomic(vaddr_atomic); 502 io_mapping_unmap_atomic(vaddr_atomic, KM_USER0);
503 if (unwritten) 503 if (unwritten)
504 return -EFAULT; 504 return -EFAULT;
505 return 0; 505 return 0;
@@ -1709,9 +1709,9 @@ i915_get_gem_seqno(struct drm_device *dev,
1709/** 1709/**
1710 * This function clears the request list as sequence numbers are passed. 1710 * This function clears the request list as sequence numbers are passed.
1711 */ 1711 */
1712void 1712static void
1713i915_gem_retire_requests(struct drm_device *dev, 1713i915_gem_retire_requests_ring(struct drm_device *dev,
1714 struct intel_ring_buffer *ring) 1714 struct intel_ring_buffer *ring)
1715{ 1715{
1716 drm_i915_private_t *dev_priv = dev->dev_private; 1716 drm_i915_private_t *dev_priv = dev->dev_private;
1717 uint32_t seqno; 1717 uint32_t seqno;
@@ -1751,6 +1751,30 @@ i915_gem_retire_requests(struct drm_device *dev,
1751} 1751}
1752 1752
1753void 1753void
1754i915_gem_retire_requests(struct drm_device *dev)
1755{
1756 drm_i915_private_t *dev_priv = dev->dev_private;
1757
1758 if (!list_empty(&dev_priv->mm.deferred_free_list)) {
1759 struct drm_i915_gem_object *obj_priv, *tmp;
1760
1761 /* We must be careful that during unbind() we do not
1762 * accidentally infinitely recurse into retire requests.
1763 * Currently:
1764 * retire -> free -> unbind -> wait -> retire_ring
1765 */
1766 list_for_each_entry_safe(obj_priv, tmp,
1767 &dev_priv->mm.deferred_free_list,
1768 list)
1769 i915_gem_free_object_tail(&obj_priv->base);
1770 }
1771
1772 i915_gem_retire_requests_ring(dev, &dev_priv->render_ring);
1773 if (HAS_BSD(dev))
1774 i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring);
1775}
1776
1777void
1754i915_gem_retire_work_handler(struct work_struct *work) 1778i915_gem_retire_work_handler(struct work_struct *work)
1755{ 1779{
1756 drm_i915_private_t *dev_priv; 1780 drm_i915_private_t *dev_priv;
@@ -1761,10 +1785,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
1761 dev = dev_priv->dev; 1785 dev = dev_priv->dev;
1762 1786
1763 mutex_lock(&dev->struct_mutex); 1787 mutex_lock(&dev->struct_mutex);
1764 i915_gem_retire_requests(dev, &dev_priv->render_ring); 1788 i915_gem_retire_requests(dev);
1765
1766 if (HAS_BSD(dev))
1767 i915_gem_retire_requests(dev, &dev_priv->bsd_ring);
1768 1789
1769 if (!dev_priv->mm.suspended && 1790 if (!dev_priv->mm.suspended &&
1770 (!list_empty(&dev_priv->render_ring.request_list) || 1791 (!list_empty(&dev_priv->render_ring.request_list) ||
@@ -1832,7 +1853,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
1832 * a separate wait queue to handle that. 1853 * a separate wait queue to handle that.
1833 */ 1854 */
1834 if (ret == 0) 1855 if (ret == 0)
1835 i915_gem_retire_requests(dev, ring); 1856 i915_gem_retire_requests_ring(dev, ring);
1836 1857
1837 return ret; 1858 return ret;
1838} 1859}
@@ -1945,11 +1966,12 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
1945 * before we unbind. 1966 * before we unbind.
1946 */ 1967 */
1947 ret = i915_gem_object_set_to_cpu_domain(obj, 1); 1968 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
1948 if (ret) { 1969 if (ret == -ERESTARTSYS)
1949 if (ret != -ERESTARTSYS)
1950 DRM_ERROR("set_domain failed: %d\n", ret);
1951 return ret; 1970 return ret;
1952 } 1971 /* Continue on if we fail due to EIO, the GPU is hung so we
1972 * should be safe and we need to cleanup or else we might
1973 * cause memory corruption through use-after-free.
1974 */
1953 1975
1954 BUG_ON(obj_priv->active); 1976 BUG_ON(obj_priv->active);
1955 1977
@@ -1985,7 +2007,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
1985 2007
1986 trace_i915_gem_object_unbind(obj); 2008 trace_i915_gem_object_unbind(obj);
1987 2009
1988 return 0; 2010 return ret;
1989} 2011}
1990 2012
1991static struct drm_gem_object * 2013static struct drm_gem_object *
@@ -2107,10 +2129,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
2107 struct intel_ring_buffer *render_ring = &dev_priv->render_ring; 2129 struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
2108 struct intel_ring_buffer *bsd_ring = &dev_priv->bsd_ring; 2130 struct intel_ring_buffer *bsd_ring = &dev_priv->bsd_ring;
2109 for (;;) { 2131 for (;;) {
2110 i915_gem_retire_requests(dev, render_ring); 2132 i915_gem_retire_requests(dev);
2111
2112 if (HAS_BSD(dev))
2113 i915_gem_retire_requests(dev, bsd_ring);
2114 2133
2115 /* If there's an inactive buffer available now, grab it 2134 /* If there's an inactive buffer available now, grab it
2116 * and be done. 2135 * and be done.
@@ -2583,7 +2602,10 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
2583 if (!IS_I965G(dev)) { 2602 if (!IS_I965G(dev)) {
2584 int ret; 2603 int ret;
2585 2604
2586 i915_gem_object_flush_gpu_write_domain(obj); 2605 ret = i915_gem_object_flush_gpu_write_domain(obj);
2606 if (ret != 0)
2607 return ret;
2608
2587 ret = i915_gem_object_wait_rendering(obj); 2609 ret = i915_gem_object_wait_rendering(obj);
2588 if (ret != 0) 2610 if (ret != 0)
2589 return ret; 2611 return ret;
@@ -2634,10 +2656,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2634 if (free_space != NULL) { 2656 if (free_space != NULL) {
2635 obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size, 2657 obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
2636 alignment); 2658 alignment);
2637 if (obj_priv->gtt_space != NULL) { 2659 if (obj_priv->gtt_space != NULL)
2638 obj_priv->gtt_space->private = obj;
2639 obj_priv->gtt_offset = obj_priv->gtt_space->start; 2660 obj_priv->gtt_offset = obj_priv->gtt_space->start;
2640 }
2641 } 2661 }
2642 if (obj_priv->gtt_space == NULL) { 2662 if (obj_priv->gtt_space == NULL) {
2643 /* If the gtt is empty and we're still having trouble 2663 /* If the gtt is empty and we're still having trouble
@@ -2733,7 +2753,7 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
2733} 2753}
2734 2754
2735/** Flushes any GPU write domain for the object if it's dirty. */ 2755/** Flushes any GPU write domain for the object if it's dirty. */
2736static void 2756static int
2737i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) 2757i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
2738{ 2758{
2739 struct drm_device *dev = obj->dev; 2759 struct drm_device *dev = obj->dev;
@@ -2741,17 +2761,18 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
2741 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 2761 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2742 2762
2743 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) 2763 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
2744 return; 2764 return 0;
2745 2765
2746 /* Queue the GPU write cache flushing we need. */ 2766 /* Queue the GPU write cache flushing we need. */
2747 old_write_domain = obj->write_domain; 2767 old_write_domain = obj->write_domain;
2748 i915_gem_flush(dev, 0, obj->write_domain); 2768 i915_gem_flush(dev, 0, obj->write_domain);
2749 (void) i915_add_request(dev, NULL, obj->write_domain, obj_priv->ring); 2769 if (i915_add_request(dev, NULL, obj->write_domain, obj_priv->ring) == 0)
2750 BUG_ON(obj->write_domain); 2770 return -ENOMEM;
2751 2771
2752 trace_i915_gem_object_change_domain(obj, 2772 trace_i915_gem_object_change_domain(obj,
2753 obj->read_domains, 2773 obj->read_domains,
2754 old_write_domain); 2774 old_write_domain);
2775 return 0;
2755} 2776}
2756 2777
2757/** Flushes the GTT write domain for the object if it's dirty. */ 2778/** Flushes the GTT write domain for the object if it's dirty. */
@@ -2795,9 +2816,11 @@ i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
2795 old_write_domain); 2816 old_write_domain);
2796} 2817}
2797 2818
2798void 2819int
2799i915_gem_object_flush_write_domain(struct drm_gem_object *obj) 2820i915_gem_object_flush_write_domain(struct drm_gem_object *obj)
2800{ 2821{
2822 int ret = 0;
2823
2801 switch (obj->write_domain) { 2824 switch (obj->write_domain) {
2802 case I915_GEM_DOMAIN_GTT: 2825 case I915_GEM_DOMAIN_GTT:
2803 i915_gem_object_flush_gtt_write_domain(obj); 2826 i915_gem_object_flush_gtt_write_domain(obj);
@@ -2806,9 +2829,11 @@ i915_gem_object_flush_write_domain(struct drm_gem_object *obj)
2806 i915_gem_object_flush_cpu_write_domain(obj); 2829 i915_gem_object_flush_cpu_write_domain(obj);
2807 break; 2830 break;
2808 default: 2831 default:
2809 i915_gem_object_flush_gpu_write_domain(obj); 2832 ret = i915_gem_object_flush_gpu_write_domain(obj);
2810 break; 2833 break;
2811 } 2834 }
2835
2836 return ret;
2812} 2837}
2813 2838
2814/** 2839/**
@@ -2828,7 +2853,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2828 if (obj_priv->gtt_space == NULL) 2853 if (obj_priv->gtt_space == NULL)
2829 return -EINVAL; 2854 return -EINVAL;
2830 2855
2831 i915_gem_object_flush_gpu_write_domain(obj); 2856 ret = i915_gem_object_flush_gpu_write_domain(obj);
2857 if (ret != 0)
2858 return ret;
2859
2832 /* Wait on any GPU rendering and flushing to occur. */ 2860 /* Wait on any GPU rendering and flushing to occur. */
2833 ret = i915_gem_object_wait_rendering(obj); 2861 ret = i915_gem_object_wait_rendering(obj);
2834 if (ret != 0) 2862 if (ret != 0)
@@ -2878,7 +2906,9 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
2878 if (obj_priv->gtt_space == NULL) 2906 if (obj_priv->gtt_space == NULL)
2879 return -EINVAL; 2907 return -EINVAL;
2880 2908
2881 i915_gem_object_flush_gpu_write_domain(obj); 2909 ret = i915_gem_object_flush_gpu_write_domain(obj);
2910 if (ret)
2911 return ret;
2882 2912
2883 /* Wait on any GPU rendering and flushing to occur. */ 2913 /* Wait on any GPU rendering and flushing to occur. */
2884 if (obj_priv->active) { 2914 if (obj_priv->active) {
@@ -2926,7 +2956,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
2926 uint32_t old_write_domain, old_read_domains; 2956 uint32_t old_write_domain, old_read_domains;
2927 int ret; 2957 int ret;
2928 2958
2929 i915_gem_object_flush_gpu_write_domain(obj); 2959 ret = i915_gem_object_flush_gpu_write_domain(obj);
2960 if (ret)
2961 return ret;
2962
2930 /* Wait on any GPU rendering and flushing to occur. */ 2963 /* Wait on any GPU rendering and flushing to occur. */
2931 ret = i915_gem_object_wait_rendering(obj); 2964 ret = i915_gem_object_wait_rendering(obj);
2932 if (ret != 0) 2965 if (ret != 0)
@@ -3216,7 +3249,10 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
3216 if (offset == 0 && size == obj->size) 3249 if (offset == 0 && size == obj->size)
3217 return i915_gem_object_set_to_cpu_domain(obj, 0); 3250 return i915_gem_object_set_to_cpu_domain(obj, 0);
3218 3251
3219 i915_gem_object_flush_gpu_write_domain(obj); 3252 ret = i915_gem_object_flush_gpu_write_domain(obj);
3253 if (ret)
3254 return ret;
3255
3220 /* Wait on any GPU rendering and flushing to occur. */ 3256 /* Wait on any GPU rendering and flushing to occur. */
3221 ret = i915_gem_object_wait_rendering(obj); 3257 ret = i915_gem_object_wait_rendering(obj);
3222 if (ret != 0) 3258 if (ret != 0)
@@ -3451,7 +3487,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3451 reloc_offset = obj_priv->gtt_offset + reloc->offset; 3487 reloc_offset = obj_priv->gtt_offset + reloc->offset;
3452 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, 3488 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
3453 (reloc_offset & 3489 (reloc_offset &
3454 ~(PAGE_SIZE - 1))); 3490 ~(PAGE_SIZE - 1)),
3491 KM_USER0);
3455 reloc_entry = (uint32_t __iomem *)(reloc_page + 3492 reloc_entry = (uint32_t __iomem *)(reloc_page +
3456 (reloc_offset & (PAGE_SIZE - 1))); 3493 (reloc_offset & (PAGE_SIZE - 1)));
3457 reloc_val = target_obj_priv->gtt_offset + reloc->delta; 3494 reloc_val = target_obj_priv->gtt_offset + reloc->delta;
@@ -3462,7 +3499,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3462 readl(reloc_entry), reloc_val); 3499 readl(reloc_entry), reloc_val);
3463#endif 3500#endif
3464 writel(reloc_val, reloc_entry); 3501 writel(reloc_val, reloc_entry);
3465 io_mapping_unmap_atomic(reloc_page); 3502 io_mapping_unmap_atomic(reloc_page, KM_USER0);
3466 3503
3467 /* The updated presumed offset for this entry will be 3504 /* The updated presumed offset for this entry will be
3468 * copied back out to the user. 3505 * copied back out to the user.
@@ -4313,7 +4350,6 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4313 struct drm_i915_gem_busy *args = data; 4350 struct drm_i915_gem_busy *args = data;
4314 struct drm_gem_object *obj; 4351 struct drm_gem_object *obj;
4315 struct drm_i915_gem_object *obj_priv; 4352 struct drm_i915_gem_object *obj_priv;
4316 drm_i915_private_t *dev_priv = dev->dev_private;
4317 4353
4318 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 4354 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
4319 if (obj == NULL) { 4355 if (obj == NULL) {
@@ -4328,10 +4364,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4328 * actually unmasked, and our working set ends up being larger than 4364 * actually unmasked, and our working set ends up being larger than
4329 * required. 4365 * required.
4330 */ 4366 */
4331 i915_gem_retire_requests(dev, &dev_priv->render_ring); 4367 i915_gem_retire_requests(dev);
4332
4333 if (HAS_BSD(dev))
4334 i915_gem_retire_requests(dev, &dev_priv->bsd_ring);
4335 4368
4336 obj_priv = to_intel_bo(obj); 4369 obj_priv = to_intel_bo(obj);
4337 /* Don't count being on the flushing list against the object being 4370 /* Don't count being on the flushing list against the object being
@@ -4441,20 +4474,19 @@ int i915_gem_init_object(struct drm_gem_object *obj)
4441 return 0; 4474 return 0;
4442} 4475}
4443 4476
4444void i915_gem_free_object(struct drm_gem_object *obj) 4477static void i915_gem_free_object_tail(struct drm_gem_object *obj)
4445{ 4478{
4446 struct drm_device *dev = obj->dev; 4479 struct drm_device *dev = obj->dev;
4480 drm_i915_private_t *dev_priv = dev->dev_private;
4447 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 4481 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4482 int ret;
4448 4483
4449 trace_i915_gem_object_destroy(obj); 4484 ret = i915_gem_object_unbind(obj);
4450 4485 if (ret == -ERESTARTSYS) {
4451 while (obj_priv->pin_count > 0) 4486 list_move(&obj_priv->list,
4452 i915_gem_object_unpin(obj); 4487 &dev_priv->mm.deferred_free_list);
4453 4488 return;
4454 if (obj_priv->phys_obj) 4489 }
4455 i915_gem_detach_phys_object(dev, obj);
4456
4457 i915_gem_object_unbind(obj);
4458 4490
4459 if (obj_priv->mmap_offset) 4491 if (obj_priv->mmap_offset)
4460 i915_gem_free_mmap_offset(obj); 4492 i915_gem_free_mmap_offset(obj);
@@ -4466,6 +4498,22 @@ void i915_gem_free_object(struct drm_gem_object *obj)
4466 kfree(obj_priv); 4498 kfree(obj_priv);
4467} 4499}
4468 4500
4501void i915_gem_free_object(struct drm_gem_object *obj)
4502{
4503 struct drm_device *dev = obj->dev;
4504 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4505
4506 trace_i915_gem_object_destroy(obj);
4507
4508 while (obj_priv->pin_count > 0)
4509 i915_gem_object_unpin(obj);
4510
4511 if (obj_priv->phys_obj)
4512 i915_gem_detach_phys_object(dev, obj);
4513
4514 i915_gem_free_object_tail(obj);
4515}
4516
4469/** Unbinds all inactive objects. */ 4517/** Unbinds all inactive objects. */
4470static int 4518static int
4471i915_gem_evict_from_inactive_list(struct drm_device *dev) 4519i915_gem_evict_from_inactive_list(struct drm_device *dev)
@@ -4689,9 +4737,19 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4689 BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list)); 4737 BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list));
4690 mutex_unlock(&dev->struct_mutex); 4738 mutex_unlock(&dev->struct_mutex);
4691 4739
4692 drm_irq_install(dev); 4740 ret = drm_irq_install(dev);
4741 if (ret)
4742 goto cleanup_ringbuffer;
4693 4743
4694 return 0; 4744 return 0;
4745
4746cleanup_ringbuffer:
4747 mutex_lock(&dev->struct_mutex);
4748 i915_gem_cleanup_ringbuffer(dev);
4749 dev_priv->mm.suspended = 1;
4750 mutex_unlock(&dev->struct_mutex);
4751
4752 return ret;
4695} 4753}
4696 4754
4697int 4755int
@@ -4729,6 +4787,7 @@ i915_gem_load(struct drm_device *dev)
4729 INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list); 4787 INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
4730 INIT_LIST_HEAD(&dev_priv->mm.inactive_list); 4788 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
4731 INIT_LIST_HEAD(&dev_priv->mm.fence_list); 4789 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4790 INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
4732 INIT_LIST_HEAD(&dev_priv->render_ring.active_list); 4791 INIT_LIST_HEAD(&dev_priv->render_ring.active_list);
4733 INIT_LIST_HEAD(&dev_priv->render_ring.request_list); 4792 INIT_LIST_HEAD(&dev_priv->render_ring.request_list);
4734 if (HAS_BSD(dev)) { 4793 if (HAS_BSD(dev)) {
@@ -5027,10 +5086,7 @@ rescan:
5027 continue; 5086 continue;
5028 5087
5029 spin_unlock(&shrink_list_lock); 5088 spin_unlock(&shrink_list_lock);
5030 i915_gem_retire_requests(dev, &dev_priv->render_ring); 5089 i915_gem_retire_requests(dev);
5031
5032 if (HAS_BSD(dev))
5033 i915_gem_retire_requests(dev, &dev_priv->bsd_ring);
5034 5090
5035 list_for_each_entry_safe(obj_priv, next_obj, 5091 list_for_each_entry_safe(obj_priv, next_obj,
5036 &dev_priv->mm.inactive_list, 5092 &dev_priv->mm.inactive_list,
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 4b7c49d4257d..155719e4d16f 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -333,8 +333,6 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
333 i915_gem_release_mmap(obj); 333 i915_gem_release_mmap(obj);
334 334
335 if (ret != 0) { 335 if (ret != 0) {
336 WARN(ret != -ERESTARTSYS,
337 "failed to reset object for tiling switch");
338 args->tiling_mode = obj_priv->tiling_mode; 336 args->tiling_mode = obj_priv->tiling_mode;
339 args->stride = obj_priv->stride; 337 args->stride = obj_priv->stride;
340 goto err; 338 goto err;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index dba53d4b9fb3..85785a8844ed 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -171,10 +171,10 @@ void intel_enable_asle (struct drm_device *dev)
171 ironlake_enable_display_irq(dev_priv, DE_GSE); 171 ironlake_enable_display_irq(dev_priv, DE_GSE);
172 else { 172 else {
173 i915_enable_pipestat(dev_priv, 1, 173 i915_enable_pipestat(dev_priv, 1,
174 I915_LEGACY_BLC_EVENT_ENABLE); 174 PIPE_LEGACY_BLC_EVENT_ENABLE);
175 if (IS_I965G(dev)) 175 if (IS_I965G(dev))
176 i915_enable_pipestat(dev_priv, 0, 176 i915_enable_pipestat(dev_priv, 0,
177 I915_LEGACY_BLC_EVENT_ENABLE); 177 PIPE_LEGACY_BLC_EVENT_ENABLE);
178 } 178 }
179} 179}
180 180
@@ -842,7 +842,6 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
842 u32 iir, new_iir; 842 u32 iir, new_iir;
843 u32 pipea_stats, pipeb_stats; 843 u32 pipea_stats, pipeb_stats;
844 u32 vblank_status; 844 u32 vblank_status;
845 u32 vblank_enable;
846 int vblank = 0; 845 int vblank = 0;
847 unsigned long irqflags; 846 unsigned long irqflags;
848 int irq_received; 847 int irq_received;
@@ -856,13 +855,10 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
856 855
857 iir = I915_READ(IIR); 856 iir = I915_READ(IIR);
858 857
859 if (IS_I965G(dev)) { 858 if (IS_I965G(dev))
860 vblank_status = I915_START_VBLANK_INTERRUPT_STATUS; 859 vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS;
861 vblank_enable = PIPE_START_VBLANK_INTERRUPT_ENABLE; 860 else
862 } else { 861 vblank_status = PIPE_VBLANK_INTERRUPT_STATUS;
863 vblank_status = I915_VBLANK_INTERRUPT_STATUS;
864 vblank_enable = I915_VBLANK_INTERRUPT_ENABLE;
865 }
866 862
867 for (;;) { 863 for (;;) {
868 irq_received = iir != 0; 864 irq_received = iir != 0;
@@ -966,8 +962,8 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
966 intel_finish_page_flip(dev, 1); 962 intel_finish_page_flip(dev, 1);
967 } 963 }
968 964
969 if ((pipea_stats & I915_LEGACY_BLC_EVENT_STATUS) || 965 if ((pipea_stats & PIPE_LEGACY_BLC_EVENT_STATUS) ||
970 (pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) || 966 (pipeb_stats & PIPE_LEGACY_BLC_EVENT_STATUS) ||
971 (iir & I915_ASLE_INTERRUPT)) 967 (iir & I915_ASLE_INTERRUPT))
972 opregion_asle_intr(dev); 968 opregion_asle_intr(dev);
973 969
@@ -1233,16 +1229,21 @@ void i915_hangcheck_elapsed(unsigned long data)
1233{ 1229{
1234 struct drm_device *dev = (struct drm_device *)data; 1230 struct drm_device *dev = (struct drm_device *)data;
1235 drm_i915_private_t *dev_priv = dev->dev_private; 1231 drm_i915_private_t *dev_priv = dev->dev_private;
1236 uint32_t acthd; 1232 uint32_t acthd, instdone, instdone1;
1237 1233
1238 /* No reset support on this chip yet. */ 1234 /* No reset support on this chip yet. */
1239 if (IS_GEN6(dev)) 1235 if (IS_GEN6(dev))
1240 return; 1236 return;
1241 1237
1242 if (!IS_I965G(dev)) 1238 if (!IS_I965G(dev)) {
1243 acthd = I915_READ(ACTHD); 1239 acthd = I915_READ(ACTHD);
1244 else 1240 instdone = I915_READ(INSTDONE);
1241 instdone1 = 0;
1242 } else {
1245 acthd = I915_READ(ACTHD_I965); 1243 acthd = I915_READ(ACTHD_I965);
1244 instdone = I915_READ(INSTDONE_I965);
1245 instdone1 = I915_READ(INSTDONE1);
1246 }
1246 1247
1247 /* If all work is done then ACTHD clearly hasn't advanced. */ 1248 /* If all work is done then ACTHD clearly hasn't advanced. */
1248 if (list_empty(&dev_priv->render_ring.request_list) || 1249 if (list_empty(&dev_priv->render_ring.request_list) ||
@@ -1253,21 +1254,24 @@ void i915_hangcheck_elapsed(unsigned long data)
1253 return; 1254 return;
1254 } 1255 }
1255 1256
1256 if (dev_priv->last_acthd == acthd && dev_priv->hangcheck_count > 0) { 1257 if (dev_priv->last_acthd == acthd &&
1257 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); 1258 dev_priv->last_instdone == instdone &&
1258 i915_handle_error(dev, true); 1259 dev_priv->last_instdone1 == instdone1) {
1259 return; 1260 if (dev_priv->hangcheck_count++ > 1) {
1260 } 1261 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
1262 i915_handle_error(dev, true);
1263 return;
1264 }
1265 } else {
1266 dev_priv->hangcheck_count = 0;
1267
1268 dev_priv->last_acthd = acthd;
1269 dev_priv->last_instdone = instdone;
1270 dev_priv->last_instdone1 = instdone1;
1271 }
1261 1272
1262 /* Reset timer case chip hangs without another request being added */ 1273 /* Reset timer case chip hangs without another request being added */
1263 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); 1274 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
1264
1265 if (acthd != dev_priv->last_acthd)
1266 dev_priv->hangcheck_count = 0;
1267 else
1268 dev_priv->hangcheck_count++;
1269
1270 dev_priv->last_acthd = acthd;
1271} 1275}
1272 1276
1273/* drm_dma.h hooks 1277/* drm_dma.h hooks
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index cf41c672defe..281db6e5403a 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -442,7 +442,7 @@
442#define GEN6_RENDER_IMR 0x20a8 442#define GEN6_RENDER_IMR 0x20a8
443#define GEN6_RENDER_CONTEXT_SWITCH_INTERRUPT (1 << 8) 443#define GEN6_RENDER_CONTEXT_SWITCH_INTERRUPT (1 << 8)
444#define GEN6_RENDER_PPGTT_PAGE_FAULT (1 << 7) 444#define GEN6_RENDER_PPGTT_PAGE_FAULT (1 << 7)
445#define GEN6_RENDER TIMEOUT_COUNTER_EXPIRED (1 << 6) 445#define GEN6_RENDER_TIMEOUT_COUNTER_EXPIRED (1 << 6)
446#define GEN6_RENDER_L3_PARITY_ERROR (1 << 5) 446#define GEN6_RENDER_L3_PARITY_ERROR (1 << 5)
447#define GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT (1 << 4) 447#define GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT (1 << 4)
448#define GEN6_RENDER_COMMAND_PARSER_MASTER_ERROR (1 << 3) 448#define GEN6_RENDER_COMMAND_PARSER_MASTER_ERROR (1 << 3)
@@ -530,6 +530,21 @@
530#define DPFC_CHICKEN 0x3224 530#define DPFC_CHICKEN 0x3224
531#define DPFC_HT_MODIFY (1<<31) 531#define DPFC_HT_MODIFY (1<<31)
532 532
533/* Framebuffer compression for Ironlake */
534#define ILK_DPFC_CB_BASE 0x43200
535#define ILK_DPFC_CONTROL 0x43208
536/* The bit 28-8 is reserved */
537#define DPFC_RESERVED (0x1FFFFF00)
538#define ILK_DPFC_RECOMP_CTL 0x4320c
539#define ILK_DPFC_STATUS 0x43210
540#define ILK_DPFC_FENCE_YOFF 0x43218
541#define ILK_DPFC_CHICKEN 0x43224
542#define ILK_FBC_RT_BASE 0x2128
543#define ILK_FBC_RT_VALID (1<<0)
544
545#define ILK_DISPLAY_CHICKEN1 0x42000
546#define ILK_FBCQ_DIS (1<<22)
547
533/* 548/*
534 * GPIO regs 549 * GPIO regs
535 */ 550 */
@@ -595,32 +610,6 @@
595#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */ 610#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
596#define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */ 611#define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */
597 612
598#define I915_FIFO_UNDERRUN_STATUS (1UL<<31)
599#define I915_CRC_ERROR_ENABLE (1UL<<29)
600#define I915_CRC_DONE_ENABLE (1UL<<28)
601#define I915_GMBUS_EVENT_ENABLE (1UL<<27)
602#define I915_VSYNC_INTERRUPT_ENABLE (1UL<<25)
603#define I915_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24)
604#define I915_DPST_EVENT_ENABLE (1UL<<23)
605#define I915_LEGACY_BLC_EVENT_ENABLE (1UL<<22)
606#define I915_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21)
607#define I915_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20)
608#define I915_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */
609#define I915_VBLANK_INTERRUPT_ENABLE (1UL<<17)
610#define I915_OVERLAY_UPDATED_ENABLE (1UL<<16)
611#define I915_CRC_ERROR_INTERRUPT_STATUS (1UL<<13)
612#define I915_CRC_DONE_INTERRUPT_STATUS (1UL<<12)
613#define I915_GMBUS_INTERRUPT_STATUS (1UL<<11)
614#define I915_VSYNC_INTERRUPT_STATUS (1UL<<9)
615#define I915_DISPLAY_LINE_COMPARE_STATUS (1UL<<8)
616#define I915_DPST_EVENT_STATUS (1UL<<7)
617#define I915_LEGACY_BLC_EVENT_STATUS (1UL<<6)
618#define I915_ODD_FIELD_INTERRUPT_STATUS (1UL<<5)
619#define I915_EVEN_FIELD_INTERRUPT_STATUS (1UL<<4)
620#define I915_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */
621#define I915_VBLANK_INTERRUPT_STATUS (1UL<<1)
622#define I915_OVERLAY_UPDATED_STATUS (1UL<<0)
623
624#define SRX_INDEX 0x3c4 613#define SRX_INDEX 0x3c4
625#define SRX_DATA 0x3c5 614#define SRX_DATA 0x3c5
626#define SR01 1 615#define SR01 1
@@ -2166,7 +2155,8 @@
2166#define I830_FIFO_LINE_SIZE 32 2155#define I830_FIFO_LINE_SIZE 32
2167 2156
2168#define G4X_FIFO_SIZE 127 2157#define G4X_FIFO_SIZE 127
2169#define I945_FIFO_SIZE 127 /* 945 & 965 */ 2158#define I965_FIFO_SIZE 512
2159#define I945_FIFO_SIZE 127
2170#define I915_FIFO_SIZE 95 2160#define I915_FIFO_SIZE 95
2171#define I855GM_FIFO_SIZE 127 /* In cachelines */ 2161#define I855GM_FIFO_SIZE 127 /* In cachelines */
2172#define I830_FIFO_SIZE 95 2162#define I830_FIFO_SIZE 95
@@ -2185,6 +2175,9 @@
2185#define PINEVIEW_CURSOR_DFT_WM 0 2175#define PINEVIEW_CURSOR_DFT_WM 0
2186#define PINEVIEW_CURSOR_GUARD_WM 5 2176#define PINEVIEW_CURSOR_GUARD_WM 5
2187 2177
2178#define I965_CURSOR_FIFO 64
2179#define I965_CURSOR_MAX_WM 32
2180#define I965_CURSOR_DFT_WM 8
2188 2181
2189/* define the Watermark register on Ironlake */ 2182/* define the Watermark register on Ironlake */
2190#define WM0_PIPEA_ILK 0x45100 2183#define WM0_PIPEA_ILK 0x45100
@@ -2212,6 +2205,9 @@
2212#define ILK_DISPLAY_FIFO 128 2205#define ILK_DISPLAY_FIFO 128
2213#define ILK_DISPLAY_MAXWM 64 2206#define ILK_DISPLAY_MAXWM 64
2214#define ILK_DISPLAY_DFTWM 8 2207#define ILK_DISPLAY_DFTWM 8
2208#define ILK_CURSOR_FIFO 32
2209#define ILK_CURSOR_MAXWM 16
2210#define ILK_CURSOR_DFTWM 8
2215 2211
2216#define ILK_DISPLAY_SR_FIFO 512 2212#define ILK_DISPLAY_SR_FIFO 512
2217#define ILK_DISPLAY_MAX_SRWM 0x1ff 2213#define ILK_DISPLAY_MAX_SRWM 0x1ff
@@ -2510,6 +2506,10 @@
2510#define ILK_VSDPFD_FULL (1<<21) 2506#define ILK_VSDPFD_FULL (1<<21)
2511#define ILK_DSPCLK_GATE 0x42020 2507#define ILK_DSPCLK_GATE 0x42020
2512#define ILK_DPARB_CLK_GATE (1<<5) 2508#define ILK_DPARB_CLK_GATE (1<<5)
2509/* According to spec this bit 7/8/9 of 0x42020 should be set to enable FBC */
2510#define ILK_CLK_FBC (1<<7)
2511#define ILK_DPFC_DIS1 (1<<8)
2512#define ILK_DPFC_DIS2 (1<<9)
2513 2513
2514#define DISP_ARB_CTL 0x45000 2514#define DISP_ARB_CTL 0x45000
2515#define DISP_TILE_SURFACE_SWIZZLING (1<<13) 2515#define DISP_TILE_SURFACE_SWIZZLING (1<<13)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 60a5800fba6e..6e2025274db5 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -602,7 +602,9 @@ void i915_save_display(struct drm_device *dev)
602 602
603 /* Only save FBC state on the platform that supports FBC */ 603 /* Only save FBC state on the platform that supports FBC */
604 if (I915_HAS_FBC(dev)) { 604 if (I915_HAS_FBC(dev)) {
605 if (IS_GM45(dev)) { 605 if (IS_IRONLAKE_M(dev)) {
606 dev_priv->saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE);
607 } else if (IS_GM45(dev)) {
606 dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE); 608 dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
607 } else { 609 } else {
608 dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE); 610 dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
@@ -706,7 +708,10 @@ void i915_restore_display(struct drm_device *dev)
706 708
707 /* only restore FBC info on the platform that supports FBC*/ 709 /* only restore FBC info on the platform that supports FBC*/
708 if (I915_HAS_FBC(dev)) { 710 if (I915_HAS_FBC(dev)) {
709 if (IS_GM45(dev)) { 711 if (IS_IRONLAKE_M(dev)) {
712 ironlake_disable_fbc(dev);
713 I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
714 } else if (IS_GM45(dev)) {
710 g4x_disable_fbc(dev); 715 g4x_disable_fbc(dev);
711 I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE); 716 I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
712 } else { 717 } else {
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index fab21760dd57..fea97a21cc14 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -262,6 +262,42 @@ DEFINE_EVENT(i915_ring, i915_ring_wait_end,
262 TP_ARGS(dev) 262 TP_ARGS(dev)
263); 263);
264 264
265TRACE_EVENT(i915_flip_request,
266 TP_PROTO(int plane, struct drm_gem_object *obj),
267
268 TP_ARGS(plane, obj),
269
270 TP_STRUCT__entry(
271 __field(int, plane)
272 __field(struct drm_gem_object *, obj)
273 ),
274
275 TP_fast_assign(
276 __entry->plane = plane;
277 __entry->obj = obj;
278 ),
279
280 TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj)
281);
282
283TRACE_EVENT(i915_flip_complete,
284 TP_PROTO(int plane, struct drm_gem_object *obj),
285
286 TP_ARGS(plane, obj),
287
288 TP_STRUCT__entry(
289 __field(int, plane)
290 __field(struct drm_gem_object *, obj)
291 ),
292
293 TP_fast_assign(
294 __entry->plane = plane;
295 __entry->obj = obj;
296 ),
297
298 TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj)
299);
300
265#endif /* _I915_TRACE_H_ */ 301#endif /* _I915_TRACE_H_ */
266 302
267/* This part must be outside protection */ 303/* This part must be outside protection */
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 714bf539918b..1e5e0d379fa9 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -33,6 +33,7 @@
33#include "intel_drv.h" 33#include "intel_drv.h"
34#include "i915_drm.h" 34#include "i915_drm.h"
35#include "i915_drv.h" 35#include "i915_drv.h"
36#include "i915_trace.h"
36#include "drm_dp_helper.h" 37#include "drm_dp_helper.h"
37 38
38#include "drm_crtc_helper.h" 39#include "drm_crtc_helper.h"
@@ -42,6 +43,7 @@
42bool intel_pipe_has_type (struct drm_crtc *crtc, int type); 43bool intel_pipe_has_type (struct drm_crtc *crtc, int type);
43static void intel_update_watermarks(struct drm_device *dev); 44static void intel_update_watermarks(struct drm_device *dev);
44static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule); 45static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule);
46static void intel_crtc_update_cursor(struct drm_crtc *crtc);
45 47
46typedef struct { 48typedef struct {
47 /* given values */ 49 /* given values */
@@ -322,6 +324,9 @@ struct intel_limit {
322#define IRONLAKE_DP_P1_MIN 1 324#define IRONLAKE_DP_P1_MIN 1
323#define IRONLAKE_DP_P1_MAX 2 325#define IRONLAKE_DP_P1_MAX 2
324 326
327/* FDI */
328#define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */
329
325static bool 330static bool
326intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 331intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
327 int target, int refclk, intel_clock_t *best_clock); 332 int target, int refclk, intel_clock_t *best_clock);
@@ -1125,6 +1130,67 @@ static bool g4x_fbc_enabled(struct drm_device *dev)
1125 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; 1130 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
1126} 1131}
1127 1132
1133static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1134{
1135 struct drm_device *dev = crtc->dev;
1136 struct drm_i915_private *dev_priv = dev->dev_private;
1137 struct drm_framebuffer *fb = crtc->fb;
1138 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1139 struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
1140 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1141 int plane = (intel_crtc->plane == 0) ? DPFC_CTL_PLANEA :
1142 DPFC_CTL_PLANEB;
1143 unsigned long stall_watermark = 200;
1144 u32 dpfc_ctl;
1145
1146 dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
1147 dev_priv->cfb_fence = obj_priv->fence_reg;
1148 dev_priv->cfb_plane = intel_crtc->plane;
1149
1150 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1151 dpfc_ctl &= DPFC_RESERVED;
1152 dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
1153 if (obj_priv->tiling_mode != I915_TILING_NONE) {
1154 dpfc_ctl |= (DPFC_CTL_FENCE_EN | dev_priv->cfb_fence);
1155 I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
1156 } else {
1157 I915_WRITE(ILK_DPFC_CHICKEN, ~DPFC_HT_MODIFY);
1158 }
1159
1160 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
1161 I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1162 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
1163 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
1164 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
1165 I915_WRITE(ILK_FBC_RT_BASE, obj_priv->gtt_offset | ILK_FBC_RT_VALID);
1166 /* enable it... */
1167 I915_WRITE(ILK_DPFC_CONTROL, I915_READ(ILK_DPFC_CONTROL) |
1168 DPFC_CTL_EN);
1169
1170 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1171}
1172
1173void ironlake_disable_fbc(struct drm_device *dev)
1174{
1175 struct drm_i915_private *dev_priv = dev->dev_private;
1176 u32 dpfc_ctl;
1177
1178 /* Disable compression */
1179 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1180 dpfc_ctl &= ~DPFC_CTL_EN;
1181 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
1182 intel_wait_for_vblank(dev);
1183
1184 DRM_DEBUG_KMS("disabled FBC\n");
1185}
1186
1187static bool ironlake_fbc_enabled(struct drm_device *dev)
1188{
1189 struct drm_i915_private *dev_priv = dev->dev_private;
1190
1191 return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
1192}
1193
1128bool intel_fbc_enabled(struct drm_device *dev) 1194bool intel_fbc_enabled(struct drm_device *dev)
1129{ 1195{
1130 struct drm_i915_private *dev_priv = dev->dev_private; 1196 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1286,7 +1352,12 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
1286 1352
1287 switch (obj_priv->tiling_mode) { 1353 switch (obj_priv->tiling_mode) {
1288 case I915_TILING_NONE: 1354 case I915_TILING_NONE:
1289 alignment = 64 * 1024; 1355 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
1356 alignment = 128 * 1024;
1357 else if (IS_I965G(dev))
1358 alignment = 4 * 1024;
1359 else
1360 alignment = 64 * 1024;
1290 break; 1361 break;
1291 case I915_TILING_X: 1362 case I915_TILING_X:
1292 /* pin() will align the object as required by fence */ 1363 /* pin() will align the object as required by fence */
@@ -1653,6 +1724,15 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
1653 int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR; 1724 int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
1654 u32 temp, tries = 0; 1725 u32 temp, tries = 0;
1655 1726
1727 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
1728 for train result */
1729 temp = I915_READ(fdi_rx_imr_reg);
1730 temp &= ~FDI_RX_SYMBOL_LOCK;
1731 temp &= ~FDI_RX_BIT_LOCK;
1732 I915_WRITE(fdi_rx_imr_reg, temp);
1733 I915_READ(fdi_rx_imr_reg);
1734 udelay(150);
1735
1656 /* enable CPU FDI TX and PCH FDI RX */ 1736 /* enable CPU FDI TX and PCH FDI RX */
1657 temp = I915_READ(fdi_tx_reg); 1737 temp = I915_READ(fdi_tx_reg);
1658 temp |= FDI_TX_ENABLE; 1738 temp |= FDI_TX_ENABLE;
@@ -1670,16 +1750,7 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
1670 I915_READ(fdi_rx_reg); 1750 I915_READ(fdi_rx_reg);
1671 udelay(150); 1751 udelay(150);
1672 1752
1673 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 1753 for (tries = 0; tries < 5; tries++) {
1674 for train result */
1675 temp = I915_READ(fdi_rx_imr_reg);
1676 temp &= ~FDI_RX_SYMBOL_LOCK;
1677 temp &= ~FDI_RX_BIT_LOCK;
1678 I915_WRITE(fdi_rx_imr_reg, temp);
1679 I915_READ(fdi_rx_imr_reg);
1680 udelay(150);
1681
1682 for (;;) {
1683 temp = I915_READ(fdi_rx_iir_reg); 1754 temp = I915_READ(fdi_rx_iir_reg);
1684 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 1755 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
1685 1756
@@ -1689,14 +1760,9 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
1689 temp | FDI_RX_BIT_LOCK); 1760 temp | FDI_RX_BIT_LOCK);
1690 break; 1761 break;
1691 } 1762 }
1692
1693 tries++;
1694
1695 if (tries > 5) {
1696 DRM_DEBUG_KMS("FDI train 1 fail!\n");
1697 break;
1698 }
1699 } 1763 }
1764 if (tries == 5)
1765 DRM_DEBUG_KMS("FDI train 1 fail!\n");
1700 1766
1701 /* Train 2 */ 1767 /* Train 2 */
1702 temp = I915_READ(fdi_tx_reg); 1768 temp = I915_READ(fdi_tx_reg);
@@ -1712,7 +1778,7 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
1712 1778
1713 tries = 0; 1779 tries = 0;
1714 1780
1715 for (;;) { 1781 for (tries = 0; tries < 5; tries++) {
1716 temp = I915_READ(fdi_rx_iir_reg); 1782 temp = I915_READ(fdi_rx_iir_reg);
1717 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 1783 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
1718 1784
@@ -1722,14 +1788,9 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
1722 DRM_DEBUG_KMS("FDI train 2 done.\n"); 1788 DRM_DEBUG_KMS("FDI train 2 done.\n");
1723 break; 1789 break;
1724 } 1790 }
1725
1726 tries++;
1727
1728 if (tries > 5) {
1729 DRM_DEBUG_KMS("FDI train 2 fail!\n");
1730 break;
1731 }
1732 } 1791 }
1792 if (tries == 5)
1793 DRM_DEBUG_KMS("FDI train 2 fail!\n");
1733 1794
1734 DRM_DEBUG_KMS("FDI train done\n"); 1795 DRM_DEBUG_KMS("FDI train done\n");
1735} 1796}
@@ -1754,6 +1815,15 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
1754 int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR; 1815 int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
1755 u32 temp, i; 1816 u32 temp, i;
1756 1817
1818 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
1819 for train result */
1820 temp = I915_READ(fdi_rx_imr_reg);
1821 temp &= ~FDI_RX_SYMBOL_LOCK;
1822 temp &= ~FDI_RX_BIT_LOCK;
1823 I915_WRITE(fdi_rx_imr_reg, temp);
1824 I915_READ(fdi_rx_imr_reg);
1825 udelay(150);
1826
1757 /* enable CPU FDI TX and PCH FDI RX */ 1827 /* enable CPU FDI TX and PCH FDI RX */
1758 temp = I915_READ(fdi_tx_reg); 1828 temp = I915_READ(fdi_tx_reg);
1759 temp |= FDI_TX_ENABLE; 1829 temp |= FDI_TX_ENABLE;
@@ -1779,15 +1849,6 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
1779 I915_READ(fdi_rx_reg); 1849 I915_READ(fdi_rx_reg);
1780 udelay(150); 1850 udelay(150);
1781 1851
1782 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
1783 for train result */
1784 temp = I915_READ(fdi_rx_imr_reg);
1785 temp &= ~FDI_RX_SYMBOL_LOCK;
1786 temp &= ~FDI_RX_BIT_LOCK;
1787 I915_WRITE(fdi_rx_imr_reg, temp);
1788 I915_READ(fdi_rx_imr_reg);
1789 udelay(150);
1790
1791 for (i = 0; i < 4; i++ ) { 1852 for (i = 0; i < 4; i++ ) {
1792 temp = I915_READ(fdi_tx_reg); 1853 temp = I915_READ(fdi_tx_reg);
1793 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 1854 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
@@ -1942,7 +2003,8 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1942 } 2003 }
1943 2004
1944 /* Enable panel fitting for LVDS */ 2005 /* Enable panel fitting for LVDS */
1945 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 2006 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
2007 || HAS_eDP || intel_pch_has_edp(crtc)) {
1946 temp = I915_READ(pf_ctl_reg); 2008 temp = I915_READ(pf_ctl_reg);
1947 I915_WRITE(pf_ctl_reg, temp | PF_ENABLE | PF_FILTER_MED_3x3); 2009 I915_WRITE(pf_ctl_reg, temp | PF_ENABLE | PF_FILTER_MED_3x3);
1948 2010
@@ -2037,9 +2099,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
2037 reg = I915_READ(trans_dp_ctl); 2099 reg = I915_READ(trans_dp_ctl);
2038 reg &= ~TRANS_DP_PORT_SEL_MASK; 2100 reg &= ~TRANS_DP_PORT_SEL_MASK;
2039 reg = TRANS_DP_OUTPUT_ENABLE | 2101 reg = TRANS_DP_OUTPUT_ENABLE |
2040 TRANS_DP_ENH_FRAMING | 2102 TRANS_DP_ENH_FRAMING;
2041 TRANS_DP_VSYNC_ACTIVE_HIGH | 2103
2042 TRANS_DP_HSYNC_ACTIVE_HIGH; 2104 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
2105 reg |= TRANS_DP_HSYNC_ACTIVE_HIGH;
2106 if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
2107 reg |= TRANS_DP_VSYNC_ACTIVE_HIGH;
2043 2108
2044 switch (intel_trans_dp_port_sel(crtc)) { 2109 switch (intel_trans_dp_port_sel(crtc)) {
2045 case PCH_DP_B: 2110 case PCH_DP_B:
@@ -2079,6 +2144,8 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
2079 2144
2080 intel_crtc_load_lut(crtc); 2145 intel_crtc_load_lut(crtc);
2081 2146
2147 intel_update_fbc(crtc, &crtc->mode);
2148
2082 break; 2149 break;
2083 case DRM_MODE_DPMS_OFF: 2150 case DRM_MODE_DPMS_OFF:
2084 DRM_DEBUG_KMS("crtc %d dpms off\n", pipe); 2151 DRM_DEBUG_KMS("crtc %d dpms off\n", pipe);
@@ -2093,6 +2160,10 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
2093 I915_READ(dspbase_reg); 2160 I915_READ(dspbase_reg);
2094 } 2161 }
2095 2162
2163 if (dev_priv->cfb_plane == plane &&
2164 dev_priv->display.disable_fbc)
2165 dev_priv->display.disable_fbc(dev);
2166
2096 i915_disable_vga(dev); 2167 i915_disable_vga(dev);
2097 2168
2098 /* disable cpu pipe, disable after all planes disabled */ 2169 /* disable cpu pipe, disable after all planes disabled */
@@ -2472,8 +2543,8 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
2472 struct drm_device *dev = crtc->dev; 2543 struct drm_device *dev = crtc->dev;
2473 if (HAS_PCH_SPLIT(dev)) { 2544 if (HAS_PCH_SPLIT(dev)) {
2474 /* FDI link clock is fixed at 2.7G */ 2545 /* FDI link clock is fixed at 2.7G */
2475 if (mode->clock * 3 > 27000 * 4) 2546 if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4)
2476 return MODE_CLOCK_HIGH; 2547 return false;
2477 } 2548 }
2478 return true; 2549 return true;
2479} 2550}
@@ -2655,6 +2726,20 @@ static struct intel_watermark_params g4x_wm_info = {
2655 2, 2726 2,
2656 G4X_FIFO_LINE_SIZE, 2727 G4X_FIFO_LINE_SIZE,
2657}; 2728};
2729static struct intel_watermark_params g4x_cursor_wm_info = {
2730 I965_CURSOR_FIFO,
2731 I965_CURSOR_MAX_WM,
2732 I965_CURSOR_DFT_WM,
2733 2,
2734 G4X_FIFO_LINE_SIZE,
2735};
2736static struct intel_watermark_params i965_cursor_wm_info = {
2737 I965_CURSOR_FIFO,
2738 I965_CURSOR_MAX_WM,
2739 I965_CURSOR_DFT_WM,
2740 2,
2741 I915_FIFO_LINE_SIZE,
2742};
2658static struct intel_watermark_params i945_wm_info = { 2743static struct intel_watermark_params i945_wm_info = {
2659 I945_FIFO_SIZE, 2744 I945_FIFO_SIZE,
2660 I915_MAX_WM, 2745 I915_MAX_WM,
@@ -2692,6 +2777,14 @@ static struct intel_watermark_params ironlake_display_wm_info = {
2692 ILK_FIFO_LINE_SIZE 2777 ILK_FIFO_LINE_SIZE
2693}; 2778};
2694 2779
2780static struct intel_watermark_params ironlake_cursor_wm_info = {
2781 ILK_CURSOR_FIFO,
2782 ILK_CURSOR_MAXWM,
2783 ILK_CURSOR_DFTWM,
2784 2,
2785 ILK_FIFO_LINE_SIZE
2786};
2787
2695static struct intel_watermark_params ironlake_display_srwm_info = { 2788static struct intel_watermark_params ironlake_display_srwm_info = {
2696 ILK_DISPLAY_SR_FIFO, 2789 ILK_DISPLAY_SR_FIFO,
2697 ILK_DISPLAY_MAX_SRWM, 2790 ILK_DISPLAY_MAX_SRWM,
@@ -2741,7 +2834,7 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
2741 */ 2834 */
2742 entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) / 2835 entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
2743 1000; 2836 1000;
2744 entries_required /= wm->cacheline_size; 2837 entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
2745 2838
2746 DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries_required); 2839 DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries_required);
2747 2840
@@ -2752,8 +2845,14 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
2752 /* Don't promote wm_size to unsigned... */ 2845 /* Don't promote wm_size to unsigned... */
2753 if (wm_size > (long)wm->max_wm) 2846 if (wm_size > (long)wm->max_wm)
2754 wm_size = wm->max_wm; 2847 wm_size = wm->max_wm;
2755 if (wm_size <= 0) 2848 if (wm_size <= 0) {
2756 wm_size = wm->default_wm; 2849 wm_size = wm->default_wm;
2850 DRM_ERROR("Insufficient FIFO for plane, expect flickering:"
2851 " entries required = %ld, available = %lu.\n",
2852 entries_required + wm->guard_size,
2853 wm->fifo_size);
2854 }
2855
2757 return wm_size; 2856 return wm_size;
2758} 2857}
2759 2858
@@ -2862,11 +2961,9 @@ static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
2862 uint32_t dsparb = I915_READ(DSPARB); 2961 uint32_t dsparb = I915_READ(DSPARB);
2863 int size; 2962 int size;
2864 2963
2865 if (plane == 0) 2964 size = dsparb & 0x7f;
2866 size = dsparb & 0x7f; 2965 if (plane)
2867 else 2966 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
2868 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) -
2869 (dsparb & 0x7f);
2870 2967
2871 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, 2968 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
2872 plane ? "B" : "A", size); 2969 plane ? "B" : "A", size);
@@ -2880,11 +2977,9 @@ static int i85x_get_fifo_size(struct drm_device *dev, int plane)
2880 uint32_t dsparb = I915_READ(DSPARB); 2977 uint32_t dsparb = I915_READ(DSPARB);
2881 int size; 2978 int size;
2882 2979
2883 if (plane == 0) 2980 size = dsparb & 0x1ff;
2884 size = dsparb & 0x1ff; 2981 if (plane)
2885 else 2982 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
2886 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) -
2887 (dsparb & 0x1ff);
2888 size >>= 1; /* Convert to cachelines */ 2983 size >>= 1; /* Convert to cachelines */
2889 2984
2890 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, 2985 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
@@ -2925,7 +3020,8 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane)
2925} 3020}
2926 3021
2927static void pineview_update_wm(struct drm_device *dev, int planea_clock, 3022static void pineview_update_wm(struct drm_device *dev, int planea_clock,
2928 int planeb_clock, int sr_hdisplay, int pixel_size) 3023 int planeb_clock, int sr_hdisplay, int unused,
3024 int pixel_size)
2929{ 3025{
2930 struct drm_i915_private *dev_priv = dev->dev_private; 3026 struct drm_i915_private *dev_priv = dev->dev_private;
2931 u32 reg; 3027 u32 reg;
@@ -2990,7 +3086,8 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock,
2990} 3086}
2991 3087
2992static void g4x_update_wm(struct drm_device *dev, int planea_clock, 3088static void g4x_update_wm(struct drm_device *dev, int planea_clock,
2993 int planeb_clock, int sr_hdisplay, int pixel_size) 3089 int planeb_clock, int sr_hdisplay, int sr_htotal,
3090 int pixel_size)
2994{ 3091{
2995 struct drm_i915_private *dev_priv = dev->dev_private; 3092 struct drm_i915_private *dev_priv = dev->dev_private;
2996 int total_size, cacheline_size; 3093 int total_size, cacheline_size;
@@ -3014,12 +3111,12 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
3014 */ 3111 */
3015 entries_required = ((planea_clock / 1000) * pixel_size * latency_ns) / 3112 entries_required = ((planea_clock / 1000) * pixel_size * latency_ns) /
3016 1000; 3113 1000;
3017 entries_required /= G4X_FIFO_LINE_SIZE; 3114 entries_required = DIV_ROUND_UP(entries_required, G4X_FIFO_LINE_SIZE);
3018 planea_wm = entries_required + planea_params.guard_size; 3115 planea_wm = entries_required + planea_params.guard_size;
3019 3116
3020 entries_required = ((planeb_clock / 1000) * pixel_size * latency_ns) / 3117 entries_required = ((planeb_clock / 1000) * pixel_size * latency_ns) /
3021 1000; 3118 1000;
3022 entries_required /= G4X_FIFO_LINE_SIZE; 3119 entries_required = DIV_ROUND_UP(entries_required, G4X_FIFO_LINE_SIZE);
3023 planeb_wm = entries_required + planeb_params.guard_size; 3120 planeb_wm = entries_required + planeb_params.guard_size;
3024 3121
3025 cursora_wm = cursorb_wm = 16; 3122 cursora_wm = cursorb_wm = 16;
@@ -3033,13 +3130,24 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
3033 static const int sr_latency_ns = 12000; 3130 static const int sr_latency_ns = 12000;
3034 3131
3035 sr_clock = planea_clock ? planea_clock : planeb_clock; 3132 sr_clock = planea_clock ? planea_clock : planeb_clock;
3036 line_time_us = ((sr_hdisplay * 1000) / sr_clock); 3133 line_time_us = ((sr_htotal * 1000) / sr_clock);
3037 3134
3038 /* Use ns/us then divide to preserve precision */ 3135 /* Use ns/us then divide to preserve precision */
3039 sr_entries = (((sr_latency_ns / line_time_us) + 1) * 3136 sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
3040 pixel_size * sr_hdisplay) / 1000; 3137 pixel_size * sr_hdisplay;
3041 sr_entries = roundup(sr_entries / cacheline_size, 1); 3138 sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size);
3042 DRM_DEBUG("self-refresh entries: %d\n", sr_entries); 3139
3140 entries_required = (((sr_latency_ns / line_time_us) +
3141 1000) / 1000) * pixel_size * 64;
3142 entries_required = DIV_ROUND_UP(entries_required,
3143 g4x_cursor_wm_info.cacheline_size);
3144 cursor_sr = entries_required + g4x_cursor_wm_info.guard_size;
3145
3146 if (cursor_sr > g4x_cursor_wm_info.max_wm)
3147 cursor_sr = g4x_cursor_wm_info.max_wm;
3148 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
3149 "cursor %d\n", sr_entries, cursor_sr);
3150
3043 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); 3151 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
3044 } else { 3152 } else {
3045 /* Turn off self refresh if both pipes are enabled */ 3153 /* Turn off self refresh if both pipes are enabled */
@@ -3064,11 +3172,13 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
3064} 3172}
3065 3173
3066static void i965_update_wm(struct drm_device *dev, int planea_clock, 3174static void i965_update_wm(struct drm_device *dev, int planea_clock,
3067 int planeb_clock, int sr_hdisplay, int pixel_size) 3175 int planeb_clock, int sr_hdisplay, int sr_htotal,
3176 int pixel_size)
3068{ 3177{
3069 struct drm_i915_private *dev_priv = dev->dev_private; 3178 struct drm_i915_private *dev_priv = dev->dev_private;
3070 unsigned long line_time_us; 3179 unsigned long line_time_us;
3071 int sr_clock, sr_entries, srwm = 1; 3180 int sr_clock, sr_entries, srwm = 1;
3181 int cursor_sr = 16;
3072 3182
3073 /* Calc sr entries for one plane configs */ 3183 /* Calc sr entries for one plane configs */
3074 if (sr_hdisplay && (!planea_clock || !planeb_clock)) { 3184 if (sr_hdisplay && (!planea_clock || !planeb_clock)) {
@@ -3076,17 +3186,31 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
3076 static const int sr_latency_ns = 12000; 3186 static const int sr_latency_ns = 12000;
3077 3187
3078 sr_clock = planea_clock ? planea_clock : planeb_clock; 3188 sr_clock = planea_clock ? planea_clock : planeb_clock;
3079 line_time_us = ((sr_hdisplay * 1000) / sr_clock); 3189 line_time_us = ((sr_htotal * 1000) / sr_clock);
3080 3190
3081 /* Use ns/us then divide to preserve precision */ 3191 /* Use ns/us then divide to preserve precision */
3082 sr_entries = (((sr_latency_ns / line_time_us) + 1) * 3192 sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
3083 pixel_size * sr_hdisplay) / 1000; 3193 pixel_size * sr_hdisplay;
3084 sr_entries = roundup(sr_entries / I915_FIFO_LINE_SIZE, 1); 3194 sr_entries = DIV_ROUND_UP(sr_entries, I915_FIFO_LINE_SIZE);
3085 DRM_DEBUG("self-refresh entries: %d\n", sr_entries); 3195 DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
3086 srwm = I945_FIFO_SIZE - sr_entries; 3196 srwm = I965_FIFO_SIZE - sr_entries;
3087 if (srwm < 0) 3197 if (srwm < 0)
3088 srwm = 1; 3198 srwm = 1;
3089 srwm &= 0x3f; 3199 srwm &= 0x1ff;
3200
3201 sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
3202 pixel_size * 64;
3203 sr_entries = DIV_ROUND_UP(sr_entries,
3204 i965_cursor_wm_info.cacheline_size);
3205 cursor_sr = i965_cursor_wm_info.fifo_size -
3206 (sr_entries + i965_cursor_wm_info.guard_size);
3207
3208 if (cursor_sr > i965_cursor_wm_info.max_wm)
3209 cursor_sr = i965_cursor_wm_info.max_wm;
3210
3211 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
3212 "cursor %d\n", srwm, cursor_sr);
3213
3090 if (IS_I965GM(dev)) 3214 if (IS_I965GM(dev))
3091 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); 3215 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
3092 } else { 3216 } else {
@@ -3103,10 +3227,13 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
3103 I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | (8 << 16) | (8 << 8) | 3227 I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | (8 << 16) | (8 << 8) |
3104 (8 << 0)); 3228 (8 << 0));
3105 I915_WRITE(DSPFW2, (8 << 8) | (8 << 0)); 3229 I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
3230 /* update cursor SR watermark */
3231 I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
3106} 3232}
3107 3233
3108static void i9xx_update_wm(struct drm_device *dev, int planea_clock, 3234static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
3109 int planeb_clock, int sr_hdisplay, int pixel_size) 3235 int planeb_clock, int sr_hdisplay, int sr_htotal,
3236 int pixel_size)
3110{ 3237{
3111 struct drm_i915_private *dev_priv = dev->dev_private; 3238 struct drm_i915_private *dev_priv = dev->dev_private;
3112 uint32_t fwater_lo; 3239 uint32_t fwater_lo;
@@ -3151,12 +3278,12 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
3151 static const int sr_latency_ns = 6000; 3278 static const int sr_latency_ns = 6000;
3152 3279
3153 sr_clock = planea_clock ? planea_clock : planeb_clock; 3280 sr_clock = planea_clock ? planea_clock : planeb_clock;
3154 line_time_us = ((sr_hdisplay * 1000) / sr_clock); 3281 line_time_us = ((sr_htotal * 1000) / sr_clock);
3155 3282
3156 /* Use ns/us then divide to preserve precision */ 3283 /* Use ns/us then divide to preserve precision */
3157 sr_entries = (((sr_latency_ns / line_time_us) + 1) * 3284 sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
3158 pixel_size * sr_hdisplay) / 1000; 3285 pixel_size * sr_hdisplay;
3159 sr_entries = roundup(sr_entries / cacheline_size, 1); 3286 sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size);
3160 DRM_DEBUG_KMS("self-refresh entries: %d\n", sr_entries); 3287 DRM_DEBUG_KMS("self-refresh entries: %d\n", sr_entries);
3161 srwm = total_size - sr_entries; 3288 srwm = total_size - sr_entries;
3162 if (srwm < 0) 3289 if (srwm < 0)
@@ -3194,7 +3321,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
3194} 3321}
3195 3322
3196static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused, 3323static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused,
3197 int unused2, int pixel_size) 3324 int unused2, int unused3, int pixel_size)
3198{ 3325{
3199 struct drm_i915_private *dev_priv = dev->dev_private; 3326 struct drm_i915_private *dev_priv = dev->dev_private;
3200 uint32_t fwater_lo = I915_READ(FW_BLC) & ~0xfff; 3327 uint32_t fwater_lo = I915_READ(FW_BLC) & ~0xfff;
@@ -3212,9 +3339,11 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused,
3212} 3339}
3213 3340
3214#define ILK_LP0_PLANE_LATENCY 700 3341#define ILK_LP0_PLANE_LATENCY 700
3342#define ILK_LP0_CURSOR_LATENCY 1300
3215 3343
3216static void ironlake_update_wm(struct drm_device *dev, int planea_clock, 3344static void ironlake_update_wm(struct drm_device *dev, int planea_clock,
3217 int planeb_clock, int sr_hdisplay, int pixel_size) 3345 int planeb_clock, int sr_hdisplay, int sr_htotal,
3346 int pixel_size)
3218{ 3347{
3219 struct drm_i915_private *dev_priv = dev->dev_private; 3348 struct drm_i915_private *dev_priv = dev->dev_private;
3220 int planea_wm, planeb_wm, cursora_wm, cursorb_wm; 3349 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
@@ -3222,20 +3351,48 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock,
3222 unsigned long line_time_us; 3351 unsigned long line_time_us;
3223 int sr_clock, entries_required; 3352 int sr_clock, entries_required;
3224 u32 reg_value; 3353 u32 reg_value;
3354 int line_count;
3355 int planea_htotal = 0, planeb_htotal = 0;
3356 struct drm_crtc *crtc;
3357 struct intel_crtc *intel_crtc;
3358
3359 /* Need htotal for all active display plane */
3360 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3361 intel_crtc = to_intel_crtc(crtc);
3362 if (crtc->enabled) {
3363 if (intel_crtc->plane == 0)
3364 planea_htotal = crtc->mode.htotal;
3365 else
3366 planeb_htotal = crtc->mode.htotal;
3367 }
3368 }
3225 3369
3226 /* Calculate and update the watermark for plane A */ 3370 /* Calculate and update the watermark for plane A */
3227 if (planea_clock) { 3371 if (planea_clock) {
3228 entries_required = ((planea_clock / 1000) * pixel_size * 3372 entries_required = ((planea_clock / 1000) * pixel_size *
3229 ILK_LP0_PLANE_LATENCY) / 1000; 3373 ILK_LP0_PLANE_LATENCY) / 1000;
3230 entries_required = DIV_ROUND_UP(entries_required, 3374 entries_required = DIV_ROUND_UP(entries_required,
3231 ironlake_display_wm_info.cacheline_size); 3375 ironlake_display_wm_info.cacheline_size);
3232 planea_wm = entries_required + 3376 planea_wm = entries_required +
3233 ironlake_display_wm_info.guard_size; 3377 ironlake_display_wm_info.guard_size;
3234 3378
3235 if (planea_wm > (int)ironlake_display_wm_info.max_wm) 3379 if (planea_wm > (int)ironlake_display_wm_info.max_wm)
3236 planea_wm = ironlake_display_wm_info.max_wm; 3380 planea_wm = ironlake_display_wm_info.max_wm;
3237 3381
3238 cursora_wm = 16; 3382 /* Use the large buffer method to calculate cursor watermark */
3383 line_time_us = (planea_htotal * 1000) / planea_clock;
3384
3385 /* Use ns/us then divide to preserve precision */
3386 line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000;
3387
3388 /* calculate the cursor watermark for cursor A */
3389 entries_required = line_count * 64 * pixel_size;
3390 entries_required = DIV_ROUND_UP(entries_required,
3391 ironlake_cursor_wm_info.cacheline_size);
3392 cursora_wm = entries_required + ironlake_cursor_wm_info.guard_size;
3393 if (cursora_wm > ironlake_cursor_wm_info.max_wm)
3394 cursora_wm = ironlake_cursor_wm_info.max_wm;
3395
3239 reg_value = I915_READ(WM0_PIPEA_ILK); 3396 reg_value = I915_READ(WM0_PIPEA_ILK);
3240 reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); 3397 reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
3241 reg_value |= (planea_wm << WM0_PIPE_PLANE_SHIFT) | 3398 reg_value |= (planea_wm << WM0_PIPE_PLANE_SHIFT) |
@@ -3249,14 +3406,27 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock,
3249 entries_required = ((planeb_clock / 1000) * pixel_size * 3406 entries_required = ((planeb_clock / 1000) * pixel_size *
3250 ILK_LP0_PLANE_LATENCY) / 1000; 3407 ILK_LP0_PLANE_LATENCY) / 1000;
3251 entries_required = DIV_ROUND_UP(entries_required, 3408 entries_required = DIV_ROUND_UP(entries_required,
3252 ironlake_display_wm_info.cacheline_size); 3409 ironlake_display_wm_info.cacheline_size);
3253 planeb_wm = entries_required + 3410 planeb_wm = entries_required +
3254 ironlake_display_wm_info.guard_size; 3411 ironlake_display_wm_info.guard_size;
3255 3412
3256 if (planeb_wm > (int)ironlake_display_wm_info.max_wm) 3413 if (planeb_wm > (int)ironlake_display_wm_info.max_wm)
3257 planeb_wm = ironlake_display_wm_info.max_wm; 3414 planeb_wm = ironlake_display_wm_info.max_wm;
3258 3415
3259 cursorb_wm = 16; 3416 /* Use the large buffer method to calculate cursor watermark */
3417 line_time_us = (planeb_htotal * 1000) / planeb_clock;
3418
3419 /* Use ns/us then divide to preserve precision */
3420 line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000;
3421
3422 /* calculate the cursor watermark for cursor B */
3423 entries_required = line_count * 64 * pixel_size;
3424 entries_required = DIV_ROUND_UP(entries_required,
3425 ironlake_cursor_wm_info.cacheline_size);
3426 cursorb_wm = entries_required + ironlake_cursor_wm_info.guard_size;
3427 if (cursorb_wm > ironlake_cursor_wm_info.max_wm)
3428 cursorb_wm = ironlake_cursor_wm_info.max_wm;
3429
3260 reg_value = I915_READ(WM0_PIPEB_ILK); 3430 reg_value = I915_READ(WM0_PIPEB_ILK);
3261 reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); 3431 reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
3262 reg_value |= (planeb_wm << WM0_PIPE_PLANE_SHIFT) | 3432 reg_value |= (planeb_wm << WM0_PIPE_PLANE_SHIFT) |
@@ -3271,12 +3441,12 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock,
3271 * display plane is used. 3441 * display plane is used.
3272 */ 3442 */
3273 if (!planea_clock || !planeb_clock) { 3443 if (!planea_clock || !planeb_clock) {
3274 int line_count; 3444
3275 /* Read the self-refresh latency. The unit is 0.5us */ 3445 /* Read the self-refresh latency. The unit is 0.5us */
3276 int ilk_sr_latency = I915_READ(MLTR_ILK) & ILK_SRLT_MASK; 3446 int ilk_sr_latency = I915_READ(MLTR_ILK) & ILK_SRLT_MASK;
3277 3447
3278 sr_clock = planea_clock ? planea_clock : planeb_clock; 3448 sr_clock = planea_clock ? planea_clock : planeb_clock;
3279 line_time_us = ((sr_hdisplay * 1000) / sr_clock); 3449 line_time_us = ((sr_htotal * 1000) / sr_clock);
3280 3450
3281 /* Use ns/us then divide to preserve precision */ 3451 /* Use ns/us then divide to preserve precision */
3282 line_count = ((ilk_sr_latency * 500) / line_time_us + 1000) 3452 line_count = ((ilk_sr_latency * 500) / line_time_us + 1000)
@@ -3285,14 +3455,14 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock,
3285 /* calculate the self-refresh watermark for display plane */ 3455 /* calculate the self-refresh watermark for display plane */
3286 entries_required = line_count * sr_hdisplay * pixel_size; 3456 entries_required = line_count * sr_hdisplay * pixel_size;
3287 entries_required = DIV_ROUND_UP(entries_required, 3457 entries_required = DIV_ROUND_UP(entries_required,
3288 ironlake_display_srwm_info.cacheline_size); 3458 ironlake_display_srwm_info.cacheline_size);
3289 sr_wm = entries_required + 3459 sr_wm = entries_required +
3290 ironlake_display_srwm_info.guard_size; 3460 ironlake_display_srwm_info.guard_size;
3291 3461
3292 /* calculate the self-refresh watermark for display cursor */ 3462 /* calculate the self-refresh watermark for display cursor */
3293 entries_required = line_count * pixel_size * 64; 3463 entries_required = line_count * pixel_size * 64;
3294 entries_required = DIV_ROUND_UP(entries_required, 3464 entries_required = DIV_ROUND_UP(entries_required,
3295 ironlake_cursor_srwm_info.cacheline_size); 3465 ironlake_cursor_srwm_info.cacheline_size);
3296 cursor_wm = entries_required + 3466 cursor_wm = entries_required +
3297 ironlake_cursor_srwm_info.guard_size; 3467 ironlake_cursor_srwm_info.guard_size;
3298 3468
@@ -3336,6 +3506,7 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock,
3336 * bytes per pixel 3506 * bytes per pixel
3337 * where 3507 * where
3338 * line time = htotal / dotclock 3508 * line time = htotal / dotclock
3509 * surface width = hdisplay for normal plane and 64 for cursor
3339 * and latency is assumed to be high, as above. 3510 * and latency is assumed to be high, as above.
3340 * 3511 *
3341 * The final value programmed to the register should always be rounded up, 3512 * The final value programmed to the register should always be rounded up,
@@ -3352,6 +3523,7 @@ static void intel_update_watermarks(struct drm_device *dev)
3352 int sr_hdisplay = 0; 3523 int sr_hdisplay = 0;
3353 unsigned long planea_clock = 0, planeb_clock = 0, sr_clock = 0; 3524 unsigned long planea_clock = 0, planeb_clock = 0, sr_clock = 0;
3354 int enabled = 0, pixel_size = 0; 3525 int enabled = 0, pixel_size = 0;
3526 int sr_htotal = 0;
3355 3527
3356 if (!dev_priv->display.update_wm) 3528 if (!dev_priv->display.update_wm)
3357 return; 3529 return;
@@ -3372,6 +3544,7 @@ static void intel_update_watermarks(struct drm_device *dev)
3372 } 3544 }
3373 sr_hdisplay = crtc->mode.hdisplay; 3545 sr_hdisplay = crtc->mode.hdisplay;
3374 sr_clock = crtc->mode.clock; 3546 sr_clock = crtc->mode.clock;
3547 sr_htotal = crtc->mode.htotal;
3375 if (crtc->fb) 3548 if (crtc->fb)
3376 pixel_size = crtc->fb->bits_per_pixel / 8; 3549 pixel_size = crtc->fb->bits_per_pixel / 8;
3377 else 3550 else
@@ -3383,7 +3556,7 @@ static void intel_update_watermarks(struct drm_device *dev)
3383 return; 3556 return;
3384 3557
3385 dev_priv->display.update_wm(dev, planea_clock, planeb_clock, 3558 dev_priv->display.update_wm(dev, planea_clock, planeb_clock,
3386 sr_hdisplay, pixel_size); 3559 sr_hdisplay, sr_htotal, pixel_size);
3387} 3560}
3388 3561
3389static int intel_crtc_mode_set(struct drm_crtc *crtc, 3562static int intel_crtc_mode_set(struct drm_crtc *crtc,
@@ -3502,6 +3675,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3502 return -EINVAL; 3675 return -EINVAL;
3503 } 3676 }
3504 3677
3678 /* Ensure that the cursor is valid for the new mode before changing... */
3679 intel_crtc_update_cursor(crtc);
3680
3505 if (is_lvds && dev_priv->lvds_downclock_avail) { 3681 if (is_lvds && dev_priv->lvds_downclock_avail) {
3506 has_reduced_clock = limit->find_pll(limit, crtc, 3682 has_reduced_clock = limit->find_pll(limit, crtc,
3507 dev_priv->lvds_downclock, 3683 dev_priv->lvds_downclock,
@@ -3568,7 +3744,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3568 temp |= PIPE_8BPC; 3744 temp |= PIPE_8BPC;
3569 else 3745 else
3570 temp |= PIPE_6BPC; 3746 temp |= PIPE_6BPC;
3571 } else if (is_edp) { 3747 } else if (is_edp || (is_dp && intel_pch_has_edp(crtc))) {
3572 switch (dev_priv->edp_bpp/3) { 3748 switch (dev_priv->edp_bpp/3) {
3573 case 8: 3749 case 8:
3574 temp |= PIPE_8BPC; 3750 temp |= PIPE_8BPC;
@@ -3811,6 +3987,11 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3811 udelay(150); 3987 udelay(150);
3812 } 3988 }
3813 3989
3990 if (HAS_PCH_SPLIT(dev)) {
3991 pipeconf &= ~PIPE_ENABLE_DITHER;
3992 pipeconf &= ~PIPE_DITHER_TYPE_MASK;
3993 }
3994
3814 /* The LVDS pin pair needs to be on before the DPLLs are enabled. 3995 /* The LVDS pin pair needs to be on before the DPLLs are enabled.
3815 * This is an exception to the general rule that mode_set doesn't turn 3996 * This is an exception to the general rule that mode_set doesn't turn
3816 * things on. 3997 * things on.
@@ -3853,16 +4034,13 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3853 if (dev_priv->lvds_dither) { 4034 if (dev_priv->lvds_dither) {
3854 if (HAS_PCH_SPLIT(dev)) { 4035 if (HAS_PCH_SPLIT(dev)) {
3855 pipeconf |= PIPE_ENABLE_DITHER; 4036 pipeconf |= PIPE_ENABLE_DITHER;
3856 pipeconf &= ~PIPE_DITHER_TYPE_MASK;
3857 pipeconf |= PIPE_DITHER_TYPE_ST01; 4037 pipeconf |= PIPE_DITHER_TYPE_ST01;
3858 } else 4038 } else
3859 lvds |= LVDS_ENABLE_DITHER; 4039 lvds |= LVDS_ENABLE_DITHER;
3860 } else { 4040 } else {
3861 if (HAS_PCH_SPLIT(dev)) { 4041 if (!HAS_PCH_SPLIT(dev)) {
3862 pipeconf &= ~PIPE_ENABLE_DITHER;
3863 pipeconf &= ~PIPE_DITHER_TYPE_MASK;
3864 } else
3865 lvds &= ~LVDS_ENABLE_DITHER; 4042 lvds &= ~LVDS_ENABLE_DITHER;
4043 }
3866 } 4044 }
3867 } 4045 }
3868 I915_WRITE(lvds_reg, lvds); 4046 I915_WRITE(lvds_reg, lvds);
@@ -4038,6 +4216,85 @@ void intel_crtc_load_lut(struct drm_crtc *crtc)
4038 } 4216 }
4039} 4217}
4040 4218
4219/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
4220static void intel_crtc_update_cursor(struct drm_crtc *crtc)
4221{
4222 struct drm_device *dev = crtc->dev;
4223 struct drm_i915_private *dev_priv = dev->dev_private;
4224 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4225 int pipe = intel_crtc->pipe;
4226 int x = intel_crtc->cursor_x;
4227 int y = intel_crtc->cursor_y;
4228 uint32_t base, pos;
4229 bool visible;
4230
4231 pos = 0;
4232
4233 if (crtc->fb) {
4234 base = intel_crtc->cursor_addr;
4235 if (x > (int) crtc->fb->width)
4236 base = 0;
4237
4238 if (y > (int) crtc->fb->height)
4239 base = 0;
4240 } else
4241 base = 0;
4242
4243 if (x < 0) {
4244 if (x + intel_crtc->cursor_width < 0)
4245 base = 0;
4246
4247 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
4248 x = -x;
4249 }
4250 pos |= x << CURSOR_X_SHIFT;
4251
4252 if (y < 0) {
4253 if (y + intel_crtc->cursor_height < 0)
4254 base = 0;
4255
4256 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
4257 y = -y;
4258 }
4259 pos |= y << CURSOR_Y_SHIFT;
4260
4261 visible = base != 0;
4262 if (!visible && !intel_crtc->cursor_visble)
4263 return;
4264
4265 I915_WRITE(pipe == 0 ? CURAPOS : CURBPOS, pos);
4266 if (intel_crtc->cursor_visble != visible) {
4267 uint32_t cntl = I915_READ(pipe == 0 ? CURACNTR : CURBCNTR);
4268 if (base) {
4269 /* Hooray for CUR*CNTR differences */
4270 if (IS_MOBILE(dev) || IS_I9XX(dev)) {
4271 cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
4272 cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
4273 cntl |= pipe << 28; /* Connect to correct pipe */
4274 } else {
4275 cntl &= ~(CURSOR_FORMAT_MASK);
4276 cntl |= CURSOR_ENABLE;
4277 cntl |= CURSOR_FORMAT_ARGB | CURSOR_GAMMA_ENABLE;
4278 }
4279 } else {
4280 if (IS_MOBILE(dev) || IS_I9XX(dev)) {
4281 cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
4282 cntl |= CURSOR_MODE_DISABLE;
4283 } else {
4284 cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
4285 }
4286 }
4287 I915_WRITE(pipe == 0 ? CURACNTR : CURBCNTR, cntl);
4288
4289 intel_crtc->cursor_visble = visible;
4290 }
4291 /* and commit changes on next vblank */
4292 I915_WRITE(pipe == 0 ? CURABASE : CURBBASE, base);
4293
4294 if (visible)
4295 intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj);
4296}
4297
4041static int intel_crtc_cursor_set(struct drm_crtc *crtc, 4298static int intel_crtc_cursor_set(struct drm_crtc *crtc,
4042 struct drm_file *file_priv, 4299 struct drm_file *file_priv,
4043 uint32_t handle, 4300 uint32_t handle,
@@ -4048,11 +4305,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
4048 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4305 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4049 struct drm_gem_object *bo; 4306 struct drm_gem_object *bo;
4050 struct drm_i915_gem_object *obj_priv; 4307 struct drm_i915_gem_object *obj_priv;
4051 int pipe = intel_crtc->pipe; 4308 uint32_t addr;
4052 uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
4053 uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
4054 uint32_t temp = I915_READ(control);
4055 size_t addr;
4056 int ret; 4309 int ret;
4057 4310
4058 DRM_DEBUG_KMS("\n"); 4311 DRM_DEBUG_KMS("\n");
@@ -4060,12 +4313,6 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
4060 /* if we want to turn off the cursor ignore width and height */ 4313 /* if we want to turn off the cursor ignore width and height */
4061 if (!handle) { 4314 if (!handle) {
4062 DRM_DEBUG_KMS("cursor off\n"); 4315 DRM_DEBUG_KMS("cursor off\n");
4063 if (IS_MOBILE(dev) || IS_I9XX(dev)) {
4064 temp &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
4065 temp |= CURSOR_MODE_DISABLE;
4066 } else {
4067 temp &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
4068 }
4069 addr = 0; 4316 addr = 0;
4070 bo = NULL; 4317 bo = NULL;
4071 mutex_lock(&dev->struct_mutex); 4318 mutex_lock(&dev->struct_mutex);
@@ -4107,7 +4354,8 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
4107 4354
4108 addr = obj_priv->gtt_offset; 4355 addr = obj_priv->gtt_offset;
4109 } else { 4356 } else {
4110 ret = i915_gem_attach_phys_object(dev, bo, (pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1); 4357 ret = i915_gem_attach_phys_object(dev, bo,
4358 (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1);
4111 if (ret) { 4359 if (ret) {
4112 DRM_ERROR("failed to attach phys object\n"); 4360 DRM_ERROR("failed to attach phys object\n");
4113 goto fail_locked; 4361 goto fail_locked;
@@ -4118,21 +4366,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
4118 if (!IS_I9XX(dev)) 4366 if (!IS_I9XX(dev))
4119 I915_WRITE(CURSIZE, (height << 12) | width); 4367 I915_WRITE(CURSIZE, (height << 12) | width);
4120 4368
4121 /* Hooray for CUR*CNTR differences */
4122 if (IS_MOBILE(dev) || IS_I9XX(dev)) {
4123 temp &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
4124 temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
4125 temp |= (pipe << 28); /* Connect to correct pipe */
4126 } else {
4127 temp &= ~(CURSOR_FORMAT_MASK);
4128 temp |= CURSOR_ENABLE;
4129 temp |= CURSOR_FORMAT_ARGB | CURSOR_GAMMA_ENABLE;
4130 }
4131
4132 finish: 4369 finish:
4133 I915_WRITE(control, temp);
4134 I915_WRITE(base, addr);
4135
4136 if (intel_crtc->cursor_bo) { 4370 if (intel_crtc->cursor_bo) {
4137 if (dev_priv->info->cursor_needs_physical) { 4371 if (dev_priv->info->cursor_needs_physical) {
4138 if (intel_crtc->cursor_bo != bo) 4372 if (intel_crtc->cursor_bo != bo)
@@ -4146,6 +4380,10 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
4146 4380
4147 intel_crtc->cursor_addr = addr; 4381 intel_crtc->cursor_addr = addr;
4148 intel_crtc->cursor_bo = bo; 4382 intel_crtc->cursor_bo = bo;
4383 intel_crtc->cursor_width = width;
4384 intel_crtc->cursor_height = height;
4385
4386 intel_crtc_update_cursor(crtc);
4149 4387
4150 return 0; 4388 return 0;
4151fail_unpin: 4389fail_unpin:
@@ -4159,34 +4397,12 @@ fail:
4159 4397
4160static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) 4398static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
4161{ 4399{
4162 struct drm_device *dev = crtc->dev;
4163 struct drm_i915_private *dev_priv = dev->dev_private;
4164 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4400 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4165 struct intel_framebuffer *intel_fb;
4166 int pipe = intel_crtc->pipe;
4167 uint32_t temp = 0;
4168 uint32_t adder;
4169 4401
4170 if (crtc->fb) { 4402 intel_crtc->cursor_x = x;
4171 intel_fb = to_intel_framebuffer(crtc->fb); 4403 intel_crtc->cursor_y = y;
4172 intel_mark_busy(dev, intel_fb->obj);
4173 }
4174 4404
4175 if (x < 0) { 4405 intel_crtc_update_cursor(crtc);
4176 temp |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
4177 x = -x;
4178 }
4179 if (y < 0) {
4180 temp |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
4181 y = -y;
4182 }
4183
4184 temp |= x << CURSOR_X_SHIFT;
4185 temp |= y << CURSOR_Y_SHIFT;
4186
4187 adder = intel_crtc->cursor_addr;
4188 I915_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
4189 I915_WRITE((pipe == 0) ? CURABASE : CURBBASE, adder);
4190 4406
4191 return 0; 4407 return 0;
4192} 4408}
@@ -4770,6 +4986,8 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
4770 atomic_dec_and_test(&obj_priv->pending_flip)) 4986 atomic_dec_and_test(&obj_priv->pending_flip))
4771 DRM_WAKEUP(&dev_priv->pending_flip_queue); 4987 DRM_WAKEUP(&dev_priv->pending_flip_queue);
4772 schedule_work(&work->work); 4988 schedule_work(&work->work);
4989
4990 trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
4773} 4991}
4774 4992
4775void intel_finish_page_flip(struct drm_device *dev, int pipe) 4993void intel_finish_page_flip(struct drm_device *dev, int pipe)
@@ -4847,27 +5065,22 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4847 5065
4848 mutex_lock(&dev->struct_mutex); 5066 mutex_lock(&dev->struct_mutex);
4849 ret = intel_pin_and_fence_fb_obj(dev, obj); 5067 ret = intel_pin_and_fence_fb_obj(dev, obj);
4850 if (ret != 0) { 5068 if (ret)
4851 mutex_unlock(&dev->struct_mutex); 5069 goto cleanup_work;
4852
4853 spin_lock_irqsave(&dev->event_lock, flags);
4854 intel_crtc->unpin_work = NULL;
4855 spin_unlock_irqrestore(&dev->event_lock, flags);
4856
4857 kfree(work);
4858
4859 DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n",
4860 to_intel_bo(obj));
4861 return ret;
4862 }
4863 5070
4864 /* Reference the objects for the scheduled work. */ 5071 /* Reference the objects for the scheduled work. */
4865 drm_gem_object_reference(work->old_fb_obj); 5072 drm_gem_object_reference(work->old_fb_obj);
4866 drm_gem_object_reference(obj); 5073 drm_gem_object_reference(obj);
4867 5074
4868 crtc->fb = fb; 5075 crtc->fb = fb;
4869 i915_gem_object_flush_write_domain(obj); 5076 ret = i915_gem_object_flush_write_domain(obj);
4870 drm_vblank_get(dev, intel_crtc->pipe); 5077 if (ret)
5078 goto cleanup_objs;
5079
5080 ret = drm_vblank_get(dev, intel_crtc->pipe);
5081 if (ret)
5082 goto cleanup_objs;
5083
4871 obj_priv = to_intel_bo(obj); 5084 obj_priv = to_intel_bo(obj);
4872 atomic_inc(&obj_priv->pending_flip); 5085 atomic_inc(&obj_priv->pending_flip);
4873 work->pending_flip_obj = obj; 5086 work->pending_flip_obj = obj;
@@ -4905,7 +5118,23 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4905 5118
4906 mutex_unlock(&dev->struct_mutex); 5119 mutex_unlock(&dev->struct_mutex);
4907 5120
5121 trace_i915_flip_request(intel_crtc->plane, obj);
5122
4908 return 0; 5123 return 0;
5124
5125cleanup_objs:
5126 drm_gem_object_unreference(work->old_fb_obj);
5127 drm_gem_object_unreference(obj);
5128cleanup_work:
5129 mutex_unlock(&dev->struct_mutex);
5130
5131 spin_lock_irqsave(&dev->event_lock, flags);
5132 intel_crtc->unpin_work = NULL;
5133 spin_unlock_irqrestore(&dev->event_lock, flags);
5134
5135 kfree(work);
5136
5137 return ret;
4909} 5138}
4910 5139
4911static const struct drm_crtc_helper_funcs intel_helper_funcs = { 5140static const struct drm_crtc_helper_funcs intel_helper_funcs = {
@@ -5032,19 +5261,26 @@ static void intel_setup_outputs(struct drm_device *dev)
5032{ 5261{
5033 struct drm_i915_private *dev_priv = dev->dev_private; 5262 struct drm_i915_private *dev_priv = dev->dev_private;
5034 struct drm_encoder *encoder; 5263 struct drm_encoder *encoder;
5264 bool dpd_is_edp = false;
5035 5265
5036 intel_crt_init(dev);
5037
5038 /* Set up integrated LVDS */
5039 if (IS_MOBILE(dev) && !IS_I830(dev)) 5266 if (IS_MOBILE(dev) && !IS_I830(dev))
5040 intel_lvds_init(dev); 5267 intel_lvds_init(dev);
5041 5268
5042 if (HAS_PCH_SPLIT(dev)) { 5269 if (HAS_PCH_SPLIT(dev)) {
5043 int found; 5270 dpd_is_edp = intel_dpd_is_edp(dev);
5044 5271
5045 if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED)) 5272 if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED))
5046 intel_dp_init(dev, DP_A); 5273 intel_dp_init(dev, DP_A);
5047 5274
5275 if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
5276 intel_dp_init(dev, PCH_DP_D);
5277 }
5278
5279 intel_crt_init(dev);
5280
5281 if (HAS_PCH_SPLIT(dev)) {
5282 int found;
5283
5048 if (I915_READ(HDMIB) & PORT_DETECTED) { 5284 if (I915_READ(HDMIB) & PORT_DETECTED) {
5049 /* PCH SDVOB multiplex with HDMIB */ 5285 /* PCH SDVOB multiplex with HDMIB */
5050 found = intel_sdvo_init(dev, PCH_SDVOB); 5286 found = intel_sdvo_init(dev, PCH_SDVOB);
@@ -5063,7 +5299,7 @@ static void intel_setup_outputs(struct drm_device *dev)
5063 if (I915_READ(PCH_DP_C) & DP_DETECTED) 5299 if (I915_READ(PCH_DP_C) & DP_DETECTED)
5064 intel_dp_init(dev, PCH_DP_C); 5300 intel_dp_init(dev, PCH_DP_C);
5065 5301
5066 if (I915_READ(PCH_DP_D) & DP_DETECTED) 5302 if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
5067 intel_dp_init(dev, PCH_DP_D); 5303 intel_dp_init(dev, PCH_DP_D);
5068 5304
5069 } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { 5305 } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
@@ -5472,6 +5708,26 @@ void intel_init_clock_gating(struct drm_device *dev)
5472 (I915_READ(DISP_ARB_CTL) | 5708 (I915_READ(DISP_ARB_CTL) |
5473 DISP_FBC_WM_DIS)); 5709 DISP_FBC_WM_DIS));
5474 } 5710 }
5711 /*
5712 * Based on the document from hardware guys the following bits
5713 * should be set unconditionally in order to enable FBC.
5714 * The bit 22 of 0x42000
5715 * The bit 22 of 0x42004
5716 * The bit 7,8,9 of 0x42020.
5717 */
5718 if (IS_IRONLAKE_M(dev)) {
5719 I915_WRITE(ILK_DISPLAY_CHICKEN1,
5720 I915_READ(ILK_DISPLAY_CHICKEN1) |
5721 ILK_FBCQ_DIS);
5722 I915_WRITE(ILK_DISPLAY_CHICKEN2,
5723 I915_READ(ILK_DISPLAY_CHICKEN2) |
5724 ILK_DPARB_GATE);
5725 I915_WRITE(ILK_DSPCLK_GATE,
5726 I915_READ(ILK_DSPCLK_GATE) |
5727 ILK_DPFC_DIS1 |
5728 ILK_DPFC_DIS2 |
5729 ILK_CLK_FBC);
5730 }
5475 return; 5731 return;
5476 } else if (IS_G4X(dev)) { 5732 } else if (IS_G4X(dev)) {
5477 uint32_t dspclk_gate; 5733 uint32_t dspclk_gate;
@@ -5550,7 +5806,11 @@ static void intel_init_display(struct drm_device *dev)
5550 dev_priv->display.dpms = i9xx_crtc_dpms; 5806 dev_priv->display.dpms = i9xx_crtc_dpms;
5551 5807
5552 if (I915_HAS_FBC(dev)) { 5808 if (I915_HAS_FBC(dev)) {
5553 if (IS_GM45(dev)) { 5809 if (IS_IRONLAKE_M(dev)) {
5810 dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
5811 dev_priv->display.enable_fbc = ironlake_enable_fbc;
5812 dev_priv->display.disable_fbc = ironlake_disable_fbc;
5813 } else if (IS_GM45(dev)) {
5554 dev_priv->display.fbc_enabled = g4x_fbc_enabled; 5814 dev_priv->display.fbc_enabled = g4x_fbc_enabled;
5555 dev_priv->display.enable_fbc = g4x_enable_fbc; 5815 dev_priv->display.enable_fbc = g4x_enable_fbc;
5556 dev_priv->display.disable_fbc = g4x_disable_fbc; 5816 dev_priv->display.disable_fbc = g4x_disable_fbc;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 5dde80f9e652..40be1fa65be1 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -43,6 +43,7 @@
43#define DP_LINK_CONFIGURATION_SIZE 9 43#define DP_LINK_CONFIGURATION_SIZE 9
44 44
45#define IS_eDP(i) ((i)->type == INTEL_OUTPUT_EDP) 45#define IS_eDP(i) ((i)->type == INTEL_OUTPUT_EDP)
46#define IS_PCH_eDP(dp_priv) ((dp_priv)->is_pch_edp)
46 47
47struct intel_dp_priv { 48struct intel_dp_priv {
48 uint32_t output_reg; 49 uint32_t output_reg;
@@ -56,6 +57,7 @@ struct intel_dp_priv {
56 struct intel_encoder *intel_encoder; 57 struct intel_encoder *intel_encoder;
57 struct i2c_adapter adapter; 58 struct i2c_adapter adapter;
58 struct i2c_algo_dp_aux_data algo; 59 struct i2c_algo_dp_aux_data algo;
60 bool is_pch_edp;
59}; 61};
60 62
61static void 63static void
@@ -128,8 +130,9 @@ intel_dp_link_required(struct drm_device *dev,
128 struct intel_encoder *intel_encoder, int pixel_clock) 130 struct intel_encoder *intel_encoder, int pixel_clock)
129{ 131{
130 struct drm_i915_private *dev_priv = dev->dev_private; 132 struct drm_i915_private *dev_priv = dev->dev_private;
133 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
131 134
132 if (IS_eDP(intel_encoder)) 135 if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv))
133 return (pixel_clock * dev_priv->edp_bpp) / 8; 136 return (pixel_clock * dev_priv->edp_bpp) / 8;
134 else 137 else
135 return pixel_clock * 3; 138 return pixel_clock * 3;
@@ -147,9 +150,21 @@ intel_dp_mode_valid(struct drm_connector *connector,
147{ 150{
148 struct drm_encoder *encoder = intel_attached_encoder(connector); 151 struct drm_encoder *encoder = intel_attached_encoder(connector);
149 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 152 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
153 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
154 struct drm_device *dev = connector->dev;
155 struct drm_i915_private *dev_priv = dev->dev_private;
150 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_encoder)); 156 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_encoder));
151 int max_lanes = intel_dp_max_lane_count(intel_encoder); 157 int max_lanes = intel_dp_max_lane_count(intel_encoder);
152 158
159 if ((IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) &&
160 dev_priv->panel_fixed_mode) {
161 if (mode->hdisplay > dev_priv->panel_fixed_mode->hdisplay)
162 return MODE_PANEL;
163
164 if (mode->vdisplay > dev_priv->panel_fixed_mode->vdisplay)
165 return MODE_PANEL;
166 }
167
153 /* only refuse the mode on non eDP since we have seen some wierd eDP panels 168 /* only refuse the mode on non eDP since we have seen some wierd eDP panels
154 which are outside spec tolerances but somehow work by magic */ 169 which are outside spec tolerances but somehow work by magic */
155 if (!IS_eDP(intel_encoder) && 170 if (!IS_eDP(intel_encoder) &&
@@ -508,11 +523,37 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
508{ 523{
509 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 524 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
510 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; 525 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
526 struct drm_device *dev = encoder->dev;
527 struct drm_i915_private *dev_priv = dev->dev_private;
511 int lane_count, clock; 528 int lane_count, clock;
512 int max_lane_count = intel_dp_max_lane_count(intel_encoder); 529 int max_lane_count = intel_dp_max_lane_count(intel_encoder);
513 int max_clock = intel_dp_max_link_bw(intel_encoder) == DP_LINK_BW_2_7 ? 1 : 0; 530 int max_clock = intel_dp_max_link_bw(intel_encoder) == DP_LINK_BW_2_7 ? 1 : 0;
514 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; 531 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
515 532
533 if ((IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) &&
534 dev_priv->panel_fixed_mode) {
535 struct drm_display_mode *fixed_mode = dev_priv->panel_fixed_mode;
536
537 adjusted_mode->hdisplay = fixed_mode->hdisplay;
538 adjusted_mode->hsync_start = fixed_mode->hsync_start;
539 adjusted_mode->hsync_end = fixed_mode->hsync_end;
540 adjusted_mode->htotal = fixed_mode->htotal;
541
542 adjusted_mode->vdisplay = fixed_mode->vdisplay;
543 adjusted_mode->vsync_start = fixed_mode->vsync_start;
544 adjusted_mode->vsync_end = fixed_mode->vsync_end;
545 adjusted_mode->vtotal = fixed_mode->vtotal;
546
547 adjusted_mode->clock = fixed_mode->clock;
548 drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
549
550 /*
551 * the mode->clock is used to calculate the Data&Link M/N
552 * of the pipe. For the eDP the fixed clock should be used.
553 */
554 mode->clock = dev_priv->panel_fixed_mode->clock;
555 }
556
516 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { 557 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
517 for (clock = 0; clock <= max_clock; clock++) { 558 for (clock = 0; clock <= max_clock; clock++) {
518 int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); 559 int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
@@ -531,7 +572,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
531 } 572 }
532 } 573 }
533 574
534 if (IS_eDP(intel_encoder)) { 575 if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) {
535 /* okay we failed just pick the highest */ 576 /* okay we failed just pick the highest */
536 dp_priv->lane_count = max_lane_count; 577 dp_priv->lane_count = max_lane_count;
537 dp_priv->link_bw = bws[max_clock]; 578 dp_priv->link_bw = bws[max_clock];
@@ -563,14 +604,14 @@ intel_reduce_ratio(uint32_t *num, uint32_t *den)
563} 604}
564 605
565static void 606static void
566intel_dp_compute_m_n(int bytes_per_pixel, 607intel_dp_compute_m_n(int bpp,
567 int nlanes, 608 int nlanes,
568 int pixel_clock, 609 int pixel_clock,
569 int link_clock, 610 int link_clock,
570 struct intel_dp_m_n *m_n) 611 struct intel_dp_m_n *m_n)
571{ 612{
572 m_n->tu = 64; 613 m_n->tu = 64;
573 m_n->gmch_m = pixel_clock * bytes_per_pixel; 614 m_n->gmch_m = (pixel_clock * bpp) >> 3;
574 m_n->gmch_n = link_clock * nlanes; 615 m_n->gmch_n = link_clock * nlanes;
575 intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); 616 intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
576 m_n->link_m = pixel_clock; 617 m_n->link_m = pixel_clock;
@@ -578,6 +619,28 @@ intel_dp_compute_m_n(int bytes_per_pixel,
578 intel_reduce_ratio(&m_n->link_m, &m_n->link_n); 619 intel_reduce_ratio(&m_n->link_m, &m_n->link_n);
579} 620}
580 621
622bool intel_pch_has_edp(struct drm_crtc *crtc)
623{
624 struct drm_device *dev = crtc->dev;
625 struct drm_mode_config *mode_config = &dev->mode_config;
626 struct drm_encoder *encoder;
627
628 list_for_each_entry(encoder, &mode_config->encoder_list, head) {
629 struct intel_encoder *intel_encoder;
630 struct intel_dp_priv *dp_priv;
631
632 if (!encoder || encoder->crtc != crtc)
633 continue;
634
635 intel_encoder = enc_to_intel_encoder(encoder);
636 dp_priv = intel_encoder->dev_priv;
637
638 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT)
639 return dp_priv->is_pch_edp;
640 }
641 return false;
642}
643
581void 644void
582intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, 645intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
583 struct drm_display_mode *adjusted_mode) 646 struct drm_display_mode *adjusted_mode)
@@ -587,7 +650,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
587 struct drm_encoder *encoder; 650 struct drm_encoder *encoder;
588 struct drm_i915_private *dev_priv = dev->dev_private; 651 struct drm_i915_private *dev_priv = dev->dev_private;
589 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 652 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
590 int lane_count = 4; 653 int lane_count = 4, bpp = 24;
591 struct intel_dp_m_n m_n; 654 struct intel_dp_m_n m_n;
592 655
593 /* 656 /*
@@ -605,6 +668,8 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
605 668
606 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) { 669 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
607 lane_count = dp_priv->lane_count; 670 lane_count = dp_priv->lane_count;
671 if (IS_PCH_eDP(dp_priv))
672 bpp = dev_priv->edp_bpp;
608 break; 673 break;
609 } 674 }
610 } 675 }
@@ -614,7 +679,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
614 * the number of bytes_per_pixel post-LUT, which we always 679 * the number of bytes_per_pixel post-LUT, which we always
615 * set up for 8-bits of R/G/B, or 3 bytes total. 680 * set up for 8-bits of R/G/B, or 3 bytes total.
616 */ 681 */
617 intel_dp_compute_m_n(3, lane_count, 682 intel_dp_compute_m_n(bpp, lane_count,
618 mode->clock, adjusted_mode->clock, &m_n); 683 mode->clock, adjusted_mode->clock, &m_n);
619 684
620 if (HAS_PCH_SPLIT(dev)) { 685 if (HAS_PCH_SPLIT(dev)) {
@@ -796,7 +861,7 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
796 if (mode != DRM_MODE_DPMS_ON) { 861 if (mode != DRM_MODE_DPMS_ON) {
797 if (dp_reg & DP_PORT_EN) { 862 if (dp_reg & DP_PORT_EN) {
798 intel_dp_link_down(intel_encoder, dp_priv->DP); 863 intel_dp_link_down(intel_encoder, dp_priv->DP);
799 if (IS_eDP(intel_encoder)) { 864 if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) {
800 ironlake_edp_backlight_off(dev); 865 ironlake_edp_backlight_off(dev);
801 ironlake_edp_panel_off(dev); 866 ironlake_edp_panel_off(dev);
802 } 867 }
@@ -804,7 +869,7 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
804 } else { 869 } else {
805 if (!(dp_reg & DP_PORT_EN)) { 870 if (!(dp_reg & DP_PORT_EN)) {
806 intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration); 871 intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration);
807 if (IS_eDP(intel_encoder)) { 872 if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) {
808 ironlake_edp_panel_on(dev); 873 ironlake_edp_panel_on(dev);
809 ironlake_edp_backlight_on(dev); 874 ironlake_edp_backlight_on(dev);
810 } 875 }
@@ -1340,17 +1405,32 @@ static int intel_dp_get_modes(struct drm_connector *connector)
1340 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 1405 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1341 struct drm_device *dev = intel_encoder->enc.dev; 1406 struct drm_device *dev = intel_encoder->enc.dev;
1342 struct drm_i915_private *dev_priv = dev->dev_private; 1407 struct drm_i915_private *dev_priv = dev->dev_private;
1408 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
1343 int ret; 1409 int ret;
1344 1410
1345 /* We should parse the EDID data and find out if it has an audio sink 1411 /* We should parse the EDID data and find out if it has an audio sink
1346 */ 1412 */
1347 1413
1348 ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus); 1414 ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
1349 if (ret) 1415 if (ret) {
1416 if ((IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) &&
1417 !dev_priv->panel_fixed_mode) {
1418 struct drm_display_mode *newmode;
1419 list_for_each_entry(newmode, &connector->probed_modes,
1420 head) {
1421 if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
1422 dev_priv->panel_fixed_mode =
1423 drm_mode_duplicate(dev, newmode);
1424 break;
1425 }
1426 }
1427 }
1428
1350 return ret; 1429 return ret;
1430 }
1351 1431
1352 /* if eDP has no EDID, try to use fixed panel mode from VBT */ 1432 /* if eDP has no EDID, try to use fixed panel mode from VBT */
1353 if (IS_eDP(intel_encoder)) { 1433 if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) {
1354 if (dev_priv->panel_fixed_mode != NULL) { 1434 if (dev_priv->panel_fixed_mode != NULL) {
1355 struct drm_display_mode *mode; 1435 struct drm_display_mode *mode;
1356 mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode); 1436 mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
@@ -1435,6 +1515,26 @@ intel_trans_dp_port_sel (struct drm_crtc *crtc)
1435 return -1; 1515 return -1;
1436} 1516}
1437 1517
1518/* check the VBT to see whether the eDP is on DP-D port */
1519bool intel_dpd_is_edp(struct drm_device *dev)
1520{
1521 struct drm_i915_private *dev_priv = dev->dev_private;
1522 struct child_device_config *p_child;
1523 int i;
1524
1525 if (!dev_priv->child_dev_num)
1526 return false;
1527
1528 for (i = 0; i < dev_priv->child_dev_num; i++) {
1529 p_child = dev_priv->child_dev + i;
1530
1531 if (p_child->dvo_port == PORT_IDPD &&
1532 p_child->device_type == DEVICE_TYPE_eDP)
1533 return true;
1534 }
1535 return false;
1536}
1537
1438void 1538void
1439intel_dp_init(struct drm_device *dev, int output_reg) 1539intel_dp_init(struct drm_device *dev, int output_reg)
1440{ 1540{
@@ -1444,6 +1544,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
1444 struct intel_connector *intel_connector; 1544 struct intel_connector *intel_connector;
1445 struct intel_dp_priv *dp_priv; 1545 struct intel_dp_priv *dp_priv;
1446 const char *name = NULL; 1546 const char *name = NULL;
1547 int type;
1447 1548
1448 intel_encoder = kcalloc(sizeof(struct intel_encoder) + 1549 intel_encoder = kcalloc(sizeof(struct intel_encoder) +
1449 sizeof(struct intel_dp_priv), 1, GFP_KERNEL); 1550 sizeof(struct intel_dp_priv), 1, GFP_KERNEL);
@@ -1458,18 +1559,24 @@ intel_dp_init(struct drm_device *dev, int output_reg)
1458 1559
1459 dp_priv = (struct intel_dp_priv *)(intel_encoder + 1); 1560 dp_priv = (struct intel_dp_priv *)(intel_encoder + 1);
1460 1561
1562 if (HAS_PCH_SPLIT(dev) && (output_reg == PCH_DP_D))
1563 if (intel_dpd_is_edp(dev))
1564 dp_priv->is_pch_edp = true;
1565
1566 if (output_reg == DP_A || IS_PCH_eDP(dp_priv)) {
1567 type = DRM_MODE_CONNECTOR_eDP;
1568 intel_encoder->type = INTEL_OUTPUT_EDP;
1569 } else {
1570 type = DRM_MODE_CONNECTOR_DisplayPort;
1571 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
1572 }
1573
1461 connector = &intel_connector->base; 1574 connector = &intel_connector->base;
1462 drm_connector_init(dev, connector, &intel_dp_connector_funcs, 1575 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
1463 DRM_MODE_CONNECTOR_DisplayPort);
1464 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); 1576 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
1465 1577
1466 connector->polled = DRM_CONNECTOR_POLL_HPD; 1578 connector->polled = DRM_CONNECTOR_POLL_HPD;
1467 1579
1468 if (output_reg == DP_A)
1469 intel_encoder->type = INTEL_OUTPUT_EDP;
1470 else
1471 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
1472
1473 if (output_reg == DP_B || output_reg == PCH_DP_B) 1580 if (output_reg == DP_B || output_reg == PCH_DP_B)
1474 intel_encoder->clone_mask = (1 << INTEL_DP_B_CLONE_BIT); 1581 intel_encoder->clone_mask = (1 << INTEL_DP_B_CLONE_BIT);
1475 else if (output_reg == DP_C || output_reg == PCH_DP_C) 1582 else if (output_reg == DP_C || output_reg == PCH_DP_C)
@@ -1528,7 +1635,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
1528 intel_encoder->ddc_bus = &dp_priv->adapter; 1635 intel_encoder->ddc_bus = &dp_priv->adapter;
1529 intel_encoder->hot_plug = intel_dp_hot_plug; 1636 intel_encoder->hot_plug = intel_dp_hot_plug;
1530 1637
1531 if (output_reg == DP_A) { 1638 if (output_reg == DP_A || IS_PCH_eDP(dp_priv)) {
1532 /* initialize panel mode from VBT if available for eDP */ 1639 /* initialize panel mode from VBT if available for eDP */
1533 if (dev_priv->lfp_lvds_vbt_mode) { 1640 if (dev_priv->lfp_lvds_vbt_mode) {
1534 dev_priv->panel_fixed_mode = 1641 dev_priv->panel_fixed_mode =
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 2f7970be9051..b2190148703a 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -143,8 +143,6 @@ struct intel_crtc {
143 struct drm_crtc base; 143 struct drm_crtc base;
144 enum pipe pipe; 144 enum pipe pipe;
145 enum plane plane; 145 enum plane plane;
146 struct drm_gem_object *cursor_bo;
147 uint32_t cursor_addr;
148 u8 lut_r[256], lut_g[256], lut_b[256]; 146 u8 lut_r[256], lut_g[256], lut_b[256];
149 int dpms_mode; 147 int dpms_mode;
150 bool busy; /* is scanout buffer being updated frequently? */ 148 bool busy; /* is scanout buffer being updated frequently? */
@@ -153,6 +151,12 @@ struct intel_crtc {
153 struct intel_overlay *overlay; 151 struct intel_overlay *overlay;
154 struct intel_unpin_work *unpin_work; 152 struct intel_unpin_work *unpin_work;
155 int fdi_lanes; 153 int fdi_lanes;
154
155 struct drm_gem_object *cursor_bo;
156 uint32_t cursor_addr;
157 int16_t cursor_x, cursor_y;
158 int16_t cursor_width, cursor_height;
159 bool cursor_visble;
156}; 160};
157 161
158#define to_intel_crtc(x) container_of(x, struct intel_crtc, base) 162#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
@@ -179,6 +183,8 @@ extern void intel_dp_init(struct drm_device *dev, int dp_reg);
179void 183void
180intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, 184intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
181 struct drm_display_mode *adjusted_mode); 185 struct drm_display_mode *adjusted_mode);
186extern bool intel_pch_has_edp(struct drm_crtc *crtc);
187extern bool intel_dpd_is_edp(struct drm_device *dev);
182extern void intel_edp_link_config (struct intel_encoder *, int *, int *); 188extern void intel_edp_link_config (struct intel_encoder *, int *, int *);
183 189
184 190
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 83bd764b000e..197887ed1823 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -54,10 +54,11 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
54 struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv; 54 struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
55 u32 sdvox; 55 u32 sdvox;
56 56
57 sdvox = SDVO_ENCODING_HDMI | 57 sdvox = SDVO_ENCODING_HDMI | SDVO_BORDER_ENABLE;
58 SDVO_BORDER_ENABLE | 58 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
59 SDVO_VSYNC_ACTIVE_HIGH | 59 sdvox |= SDVO_VSYNC_ACTIVE_HIGH;
60 SDVO_HSYNC_ACTIVE_HIGH; 60 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
61 sdvox |= SDVO_HSYNC_ACTIVE_HIGH;
61 62
62 if (hdmi_priv->has_hdmi_sink) { 63 if (hdmi_priv->has_hdmi_sink) {
63 sdvox |= SDVO_AUDIO_ENABLE; 64 sdvox |= SDVO_AUDIO_ENABLE;
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 0eab8df5bf7e..0a2e60059fb3 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -156,31 +156,73 @@ static int intel_lvds_mode_valid(struct drm_connector *connector,
156 return MODE_OK; 156 return MODE_OK;
157} 157}
158 158
159static void
160centre_horizontally(struct drm_display_mode *mode,
161 int width)
162{
163 u32 border, sync_pos, blank_width, sync_width;
164
165 /* keep the hsync and hblank widths constant */
166 sync_width = mode->crtc_hsync_end - mode->crtc_hsync_start;
167 blank_width = mode->crtc_hblank_end - mode->crtc_hblank_start;
168 sync_pos = (blank_width - sync_width + 1) / 2;
169
170 border = (mode->hdisplay - width + 1) / 2;
171 border += border & 1; /* make the border even */
172
173 mode->crtc_hdisplay = width;
174 mode->crtc_hblank_start = width + border;
175 mode->crtc_hblank_end = mode->crtc_hblank_start + blank_width;
176
177 mode->crtc_hsync_start = mode->crtc_hblank_start + sync_pos;
178 mode->crtc_hsync_end = mode->crtc_hsync_start + sync_width;
179}
180
181static void
182centre_vertically(struct drm_display_mode *mode,
183 int height)
184{
185 u32 border, sync_pos, blank_width, sync_width;
186
187 /* keep the vsync and vblank widths constant */
188 sync_width = mode->crtc_vsync_end - mode->crtc_vsync_start;
189 blank_width = mode->crtc_vblank_end - mode->crtc_vblank_start;
190 sync_pos = (blank_width - sync_width + 1) / 2;
191
192 border = (mode->vdisplay - height + 1) / 2;
193
194 mode->crtc_vdisplay = height;
195 mode->crtc_vblank_start = height + border;
196 mode->crtc_vblank_end = mode->crtc_vblank_start + blank_width;
197
198 mode->crtc_vsync_start = mode->crtc_vblank_start + sync_pos;
199 mode->crtc_vsync_end = mode->crtc_vsync_start + sync_width;
200}
201
202static inline u32 panel_fitter_scaling(u32 source, u32 target)
203{
204 /*
205 * Floating point operation is not supported. So the FACTOR
206 * is defined, which can avoid the floating point computation
207 * when calculating the panel ratio.
208 */
209#define ACCURACY 12
210#define FACTOR (1 << ACCURACY)
211 u32 ratio = source * FACTOR / target;
212 return (FACTOR * ratio + FACTOR/2) / FACTOR;
213}
214
159static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, 215static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
160 struct drm_display_mode *mode, 216 struct drm_display_mode *mode,
161 struct drm_display_mode *adjusted_mode) 217 struct drm_display_mode *adjusted_mode)
162{ 218{
163 /*
164 * float point operation is not supported . So the PANEL_RATIO_FACTOR
165 * is defined, which can avoid the float point computation when
166 * calculating the panel ratio.
167 */
168#define PANEL_RATIO_FACTOR 8192
169 struct drm_device *dev = encoder->dev; 219 struct drm_device *dev = encoder->dev;
170 struct drm_i915_private *dev_priv = dev->dev_private; 220 struct drm_i915_private *dev_priv = dev->dev_private;
171 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 221 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
172 struct drm_encoder *tmp_encoder; 222 struct drm_encoder *tmp_encoder;
173 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 223 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
174 struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv; 224 struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv;
175 u32 pfit_control = 0, pfit_pgm_ratios = 0; 225 u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
176 int left_border = 0, right_border = 0, top_border = 0;
177 int bottom_border = 0;
178 bool border = 0;
179 int panel_ratio, desired_ratio, vert_scale, horiz_scale;
180 int horiz_ratio, vert_ratio;
181 u32 hsync_width, vsync_width;
182 u32 hblank_width, vblank_width;
183 u32 hsync_pos, vsync_pos;
184 226
185 /* Should never happen!! */ 227 /* Should never happen!! */
186 if (!IS_I965G(dev) && intel_crtc->pipe == 0) { 228 if (!IS_I965G(dev) && intel_crtc->pipe == 0) {
@@ -200,27 +242,25 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
200 if (dev_priv->panel_fixed_mode == NULL) 242 if (dev_priv->panel_fixed_mode == NULL)
201 return true; 243 return true;
202 /* 244 /*
203 * If we have timings from the BIOS for the panel, put them in 245 * We have timings from the BIOS for the panel, put them in
204 * to the adjusted mode. The CRTC will be set up for this mode, 246 * to the adjusted mode. The CRTC will be set up for this mode,
205 * with the panel scaling set up to source from the H/VDisplay 247 * with the panel scaling set up to source from the H/VDisplay
206 * of the original mode. 248 * of the original mode.
207 */ 249 */
208 if (dev_priv->panel_fixed_mode != NULL) { 250 adjusted_mode->hdisplay = dev_priv->panel_fixed_mode->hdisplay;
209 adjusted_mode->hdisplay = dev_priv->panel_fixed_mode->hdisplay; 251 adjusted_mode->hsync_start =
210 adjusted_mode->hsync_start = 252 dev_priv->panel_fixed_mode->hsync_start;
211 dev_priv->panel_fixed_mode->hsync_start; 253 adjusted_mode->hsync_end =
212 adjusted_mode->hsync_end = 254 dev_priv->panel_fixed_mode->hsync_end;
213 dev_priv->panel_fixed_mode->hsync_end; 255 adjusted_mode->htotal = dev_priv->panel_fixed_mode->htotal;
214 adjusted_mode->htotal = dev_priv->panel_fixed_mode->htotal; 256 adjusted_mode->vdisplay = dev_priv->panel_fixed_mode->vdisplay;
215 adjusted_mode->vdisplay = dev_priv->panel_fixed_mode->vdisplay; 257 adjusted_mode->vsync_start =
216 adjusted_mode->vsync_start = 258 dev_priv->panel_fixed_mode->vsync_start;
217 dev_priv->panel_fixed_mode->vsync_start; 259 adjusted_mode->vsync_end =
218 adjusted_mode->vsync_end = 260 dev_priv->panel_fixed_mode->vsync_end;
219 dev_priv->panel_fixed_mode->vsync_end; 261 adjusted_mode->vtotal = dev_priv->panel_fixed_mode->vtotal;
220 adjusted_mode->vtotal = dev_priv->panel_fixed_mode->vtotal; 262 adjusted_mode->clock = dev_priv->panel_fixed_mode->clock;
221 adjusted_mode->clock = dev_priv->panel_fixed_mode->clock; 263 drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
222 drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
223 }
224 264
225 /* Make sure pre-965s set dither correctly */ 265 /* Make sure pre-965s set dither correctly */
226 if (!IS_I965G(dev)) { 266 if (!IS_I965G(dev)) {
@@ -230,11 +270,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
230 270
231 /* Native modes don't need fitting */ 271 /* Native modes don't need fitting */
232 if (adjusted_mode->hdisplay == mode->hdisplay && 272 if (adjusted_mode->hdisplay == mode->hdisplay &&
233 adjusted_mode->vdisplay == mode->vdisplay) { 273 adjusted_mode->vdisplay == mode->vdisplay)
234 pfit_pgm_ratios = 0;
235 border = 0;
236 goto out; 274 goto out;
237 }
238 275
239 /* full screen scale for now */ 276 /* full screen scale for now */
240 if (HAS_PCH_SPLIT(dev)) 277 if (HAS_PCH_SPLIT(dev))
@@ -242,25 +279,9 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
242 279
243 /* 965+ wants fuzzy fitting */ 280 /* 965+ wants fuzzy fitting */
244 if (IS_I965G(dev)) 281 if (IS_I965G(dev))
245 pfit_control |= (intel_crtc->pipe << PFIT_PIPE_SHIFT) | 282 pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
246 PFIT_FILTER_FUZZY; 283 PFIT_FILTER_FUZZY);
247 284
248 hsync_width = adjusted_mode->crtc_hsync_end -
249 adjusted_mode->crtc_hsync_start;
250 vsync_width = adjusted_mode->crtc_vsync_end -
251 adjusted_mode->crtc_vsync_start;
252 hblank_width = adjusted_mode->crtc_hblank_end -
253 adjusted_mode->crtc_hblank_start;
254 vblank_width = adjusted_mode->crtc_vblank_end -
255 adjusted_mode->crtc_vblank_start;
256 /*
257 * Deal with panel fitting options. Figure out how to stretch the
258 * image based on its aspect ratio & the current panel fitting mode.
259 */
260 panel_ratio = adjusted_mode->hdisplay * PANEL_RATIO_FACTOR /
261 adjusted_mode->vdisplay;
262 desired_ratio = mode->hdisplay * PANEL_RATIO_FACTOR /
263 mode->vdisplay;
264 /* 285 /*
265 * Enable automatic panel scaling for non-native modes so that they fill 286 * Enable automatic panel scaling for non-native modes so that they fill
266 * the screen. Should be enabled before the pipe is enabled, according 287 * the screen. Should be enabled before the pipe is enabled, according
@@ -278,170 +299,63 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
278 * For centered modes, we have to calculate border widths & 299 * For centered modes, we have to calculate border widths &
279 * heights and modify the values programmed into the CRTC. 300 * heights and modify the values programmed into the CRTC.
280 */ 301 */
281 left_border = (adjusted_mode->hdisplay - mode->hdisplay) / 2; 302 centre_horizontally(adjusted_mode, mode->hdisplay);
282 right_border = left_border; 303 centre_vertically(adjusted_mode, mode->vdisplay);
283 if (mode->hdisplay & 1) 304 border = LVDS_BORDER_ENABLE;
284 right_border++;
285 top_border = (adjusted_mode->vdisplay - mode->vdisplay) / 2;
286 bottom_border = top_border;
287 if (mode->vdisplay & 1)
288 bottom_border++;
289 /* Set active & border values */
290 adjusted_mode->crtc_hdisplay = mode->hdisplay;
291 /* Keep the boder be even */
292 if (right_border & 1)
293 right_border++;
294 /* use the border directly instead of border minuse one */
295 adjusted_mode->crtc_hblank_start = mode->hdisplay +
296 right_border;
297 /* keep the blank width constant */
298 adjusted_mode->crtc_hblank_end =
299 adjusted_mode->crtc_hblank_start + hblank_width;
300 /* get the hsync pos relative to hblank start */
301 hsync_pos = (hblank_width - hsync_width) / 2;
302 /* keep the hsync pos be even */
303 if (hsync_pos & 1)
304 hsync_pos++;
305 adjusted_mode->crtc_hsync_start =
306 adjusted_mode->crtc_hblank_start + hsync_pos;
307 /* keep the hsync width constant */
308 adjusted_mode->crtc_hsync_end =
309 adjusted_mode->crtc_hsync_start + hsync_width;
310 adjusted_mode->crtc_vdisplay = mode->vdisplay;
311 /* use the border instead of border minus one */
312 adjusted_mode->crtc_vblank_start = mode->vdisplay +
313 bottom_border;
314 /* keep the vblank width constant */
315 adjusted_mode->crtc_vblank_end =
316 adjusted_mode->crtc_vblank_start + vblank_width;
317 /* get the vsync start postion relative to vblank start */
318 vsync_pos = (vblank_width - vsync_width) / 2;
319 adjusted_mode->crtc_vsync_start =
320 adjusted_mode->crtc_vblank_start + vsync_pos;
321 /* keep the vsync width constant */
322 adjusted_mode->crtc_vsync_end =
323 adjusted_mode->crtc_vsync_start + vsync_width;
324 border = 1;
325 break; 305 break;
306
326 case DRM_MODE_SCALE_ASPECT: 307 case DRM_MODE_SCALE_ASPECT:
327 /* Scale but preserve the spect ratio */ 308 /* Scale but preserve the aspect ratio */
328 pfit_control |= PFIT_ENABLE;
329 if (IS_I965G(dev)) { 309 if (IS_I965G(dev)) {
310 u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
311 u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
312
313 pfit_control |= PFIT_ENABLE;
330 /* 965+ is easy, it does everything in hw */ 314 /* 965+ is easy, it does everything in hw */
331 if (panel_ratio > desired_ratio) 315 if (scaled_width > scaled_height)
332 pfit_control |= PFIT_SCALING_PILLAR; 316 pfit_control |= PFIT_SCALING_PILLAR;
333 else if (panel_ratio < desired_ratio) 317 else if (scaled_width < scaled_height)
334 pfit_control |= PFIT_SCALING_LETTER; 318 pfit_control |= PFIT_SCALING_LETTER;
335 else 319 else
336 pfit_control |= PFIT_SCALING_AUTO; 320 pfit_control |= PFIT_SCALING_AUTO;
337 } else { 321 } else {
322 u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
323 u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
338 /* 324 /*
339 * For earlier chips we have to calculate the scaling 325 * For earlier chips we have to calculate the scaling
340 * ratio by hand and program it into the 326 * ratio by hand and program it into the
341 * PFIT_PGM_RATIO register 327 * PFIT_PGM_RATIO register
342 */ 328 */
343 u32 horiz_bits, vert_bits, bits = 12; 329 if (scaled_width > scaled_height) { /* pillar */
344 horiz_ratio = mode->hdisplay * PANEL_RATIO_FACTOR/ 330 centre_horizontally(adjusted_mode, scaled_height / mode->vdisplay);
345 adjusted_mode->hdisplay; 331
346 vert_ratio = mode->vdisplay * PANEL_RATIO_FACTOR/ 332 border = LVDS_BORDER_ENABLE;
347 adjusted_mode->vdisplay; 333 if (mode->vdisplay != adjusted_mode->vdisplay) {
348 horiz_scale = adjusted_mode->hdisplay * 334 u32 bits = panel_fitter_scaling(mode->vdisplay, adjusted_mode->vdisplay);
349 PANEL_RATIO_FACTOR / mode->hdisplay; 335 pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
350 vert_scale = adjusted_mode->vdisplay * 336 bits << PFIT_VERT_SCALE_SHIFT);
351 PANEL_RATIO_FACTOR / mode->vdisplay; 337 pfit_control |= (PFIT_ENABLE |
352 338 VERT_INTERP_BILINEAR |
353 /* retain aspect ratio */ 339 HORIZ_INTERP_BILINEAR);
354 if (panel_ratio > desired_ratio) { /* Pillar */ 340 }
355 u32 scaled_width; 341 } else if (scaled_width < scaled_height) { /* letter */
356 scaled_width = mode->hdisplay * vert_scale / 342 centre_vertically(adjusted_mode, scaled_width / mode->hdisplay);
357 PANEL_RATIO_FACTOR; 343
358 horiz_ratio = vert_ratio; 344 border = LVDS_BORDER_ENABLE;
359 pfit_control |= (VERT_AUTO_SCALE | 345 if (mode->hdisplay != adjusted_mode->hdisplay) {
360 VERT_INTERP_BILINEAR | 346 u32 bits = panel_fitter_scaling(mode->hdisplay, adjusted_mode->hdisplay);
361 HORIZ_INTERP_BILINEAR); 347 pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
362 /* Pillar will have left/right borders */ 348 bits << PFIT_VERT_SCALE_SHIFT);
363 left_border = (adjusted_mode->hdisplay - 349 pfit_control |= (PFIT_ENABLE |
364 scaled_width) / 2; 350 VERT_INTERP_BILINEAR |
365 right_border = left_border; 351 HORIZ_INTERP_BILINEAR);
366 if (mode->hdisplay & 1) /* odd resolutions */ 352 }
367 right_border++; 353 } else
368 /* keep the border be even */ 354 /* Aspects match, Let hw scale both directions */
369 if (right_border & 1) 355 pfit_control |= (PFIT_ENABLE |
370 right_border++; 356 VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
371 adjusted_mode->crtc_hdisplay = scaled_width;
372 /* use border instead of border minus one */
373 adjusted_mode->crtc_hblank_start =
374 scaled_width + right_border;
375 /* keep the hblank width constant */
376 adjusted_mode->crtc_hblank_end =
377 adjusted_mode->crtc_hblank_start +
378 hblank_width;
379 /*
380 * get the hsync start pos relative to
381 * hblank start
382 */
383 hsync_pos = (hblank_width - hsync_width) / 2;
384 /* keep the hsync_pos be even */
385 if (hsync_pos & 1)
386 hsync_pos++;
387 adjusted_mode->crtc_hsync_start =
388 adjusted_mode->crtc_hblank_start +
389 hsync_pos;
390 /* keept hsync width constant */
391 adjusted_mode->crtc_hsync_end =
392 adjusted_mode->crtc_hsync_start +
393 hsync_width;
394 border = 1;
395 } else if (panel_ratio < desired_ratio) { /* letter */
396 u32 scaled_height = mode->vdisplay *
397 horiz_scale / PANEL_RATIO_FACTOR;
398 vert_ratio = horiz_ratio;
399 pfit_control |= (HORIZ_AUTO_SCALE |
400 VERT_INTERP_BILINEAR |
401 HORIZ_INTERP_BILINEAR);
402 /* Letterbox will have top/bottom border */
403 top_border = (adjusted_mode->vdisplay -
404 scaled_height) / 2;
405 bottom_border = top_border;
406 if (mode->vdisplay & 1)
407 bottom_border++;
408 adjusted_mode->crtc_vdisplay = scaled_height;
409 /* use border instead of border minus one */
410 adjusted_mode->crtc_vblank_start =
411 scaled_height + bottom_border;
412 /* keep the vblank width constant */
413 adjusted_mode->crtc_vblank_end =
414 adjusted_mode->crtc_vblank_start +
415 vblank_width;
416 /*
417 * get the vsync start pos relative to
418 * vblank start
419 */
420 vsync_pos = (vblank_width - vsync_width) / 2;
421 adjusted_mode->crtc_vsync_start =
422 adjusted_mode->crtc_vblank_start +
423 vsync_pos;
424 /* keep the vsync width constant */
425 adjusted_mode->crtc_vsync_end =
426 adjusted_mode->crtc_vsync_start +
427 vsync_width;
428 border = 1;
429 } else {
430 /* Aspects match, Let hw scale both directions */
431 pfit_control |= (VERT_AUTO_SCALE |
432 HORIZ_AUTO_SCALE |
433 VERT_INTERP_BILINEAR | 357 VERT_INTERP_BILINEAR |
434 HORIZ_INTERP_BILINEAR); 358 HORIZ_INTERP_BILINEAR);
435 }
436 horiz_bits = (1 << bits) * horiz_ratio /
437 PANEL_RATIO_FACTOR;
438 vert_bits = (1 << bits) * vert_ratio /
439 PANEL_RATIO_FACTOR;
440 pfit_pgm_ratios =
441 ((vert_bits << PFIT_VERT_SCALE_SHIFT) &
442 PFIT_VERT_SCALE_MASK) |
443 ((horiz_bits << PFIT_HORIZ_SCALE_SHIFT) &
444 PFIT_HORIZ_SCALE_MASK);
445 } 359 }
446 break; 360 break;
447 361
@@ -458,6 +372,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
458 VERT_INTERP_BILINEAR | 372 VERT_INTERP_BILINEAR |
459 HORIZ_INTERP_BILINEAR); 373 HORIZ_INTERP_BILINEAR);
460 break; 374 break;
375
461 default: 376 default:
462 break; 377 break;
463 } 378 }
@@ -465,14 +380,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
465out: 380out:
466 lvds_priv->pfit_control = pfit_control; 381 lvds_priv->pfit_control = pfit_control;
467 lvds_priv->pfit_pgm_ratios = pfit_pgm_ratios; 382 lvds_priv->pfit_pgm_ratios = pfit_pgm_ratios;
468 /* 383 dev_priv->lvds_border_bits = border;
469 * When there exists the border, it means that the LVDS_BORDR 384
470 * should be enabled.
471 */
472 if (border)
473 dev_priv->lvds_border_bits |= LVDS_BORDER_ENABLE;
474 else
475 dev_priv->lvds_border_bits &= ~(LVDS_BORDER_ENABLE);
476 /* 385 /*
477 * XXX: It would be nice to support lower refresh rates on the 386 * XXX: It would be nice to support lower refresh rates on the
478 * panels to reduce power consumption, and perhaps match the 387 * panels to reduce power consumption, and perhaps match the
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index d7ad5139d17c..d39aea24eabe 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -65,7 +65,7 @@
65#define OCMD_YUV_410_PLANAR (0xe<<10) /* also 411 */ 65#define OCMD_YUV_410_PLANAR (0xe<<10) /* also 411 */
66#define OCMD_TVSYNCFLIP_PARITY (0x1<<9) 66#define OCMD_TVSYNCFLIP_PARITY (0x1<<9)
67#define OCMD_TVSYNCFLIP_ENABLE (0x1<<7) 67#define OCMD_TVSYNCFLIP_ENABLE (0x1<<7)
68#define OCMD_BUF_TYPE_MASK (Ox1<<5) 68#define OCMD_BUF_TYPE_MASK (0x1<<5)
69#define OCMD_BUF_TYPE_FRAME (0x0<<5) 69#define OCMD_BUF_TYPE_FRAME (0x0<<5)
70#define OCMD_BUF_TYPE_FIELD (0x1<<5) 70#define OCMD_BUF_TYPE_FIELD (0x1<<5)
71#define OCMD_TEST_MODE (0x1<<4) 71#define OCMD_TEST_MODE (0x1<<4)
@@ -185,7 +185,8 @@ static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_over
185 185
186 if (OVERLAY_NONPHYSICAL(overlay->dev)) { 186 if (OVERLAY_NONPHYSICAL(overlay->dev)) {
187 regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, 187 regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
188 overlay->reg_bo->gtt_offset); 188 overlay->reg_bo->gtt_offset,
189 KM_USER0);
189 190
190 if (!regs) { 191 if (!regs) {
191 DRM_ERROR("failed to map overlay regs in GTT\n"); 192 DRM_ERROR("failed to map overlay regs in GTT\n");
@@ -200,7 +201,7 @@ static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_over
200static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay) 201static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay)
201{ 202{
202 if (OVERLAY_NONPHYSICAL(overlay->dev)) 203 if (OVERLAY_NONPHYSICAL(overlay->dev))
203 io_mapping_unmap_atomic(overlay->virt_addr); 204 io_mapping_unmap_atomic(overlay->virt_addr, KM_USER0);
204 205
205 overlay->virt_addr = NULL; 206 overlay->virt_addr = NULL;
206 207
@@ -958,7 +959,7 @@ static int check_overlay_src(struct drm_device *dev,
958 || rec->src_width < N_HORIZ_Y_TAPS*4) 959 || rec->src_width < N_HORIZ_Y_TAPS*4)
959 return -EINVAL; 960 return -EINVAL;
960 961
961 /* check alingment constrains */ 962 /* check alignment constraints */
962 switch (rec->flags & I915_OVERLAY_TYPE_MASK) { 963 switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
963 case I915_OVERLAY_RGB: 964 case I915_OVERLAY_RGB:
964 /* not implemented */ 965 /* not implemented */
@@ -990,7 +991,10 @@ static int check_overlay_src(struct drm_device *dev,
990 return -EINVAL; 991 return -EINVAL;
991 992
992 /* stride checking */ 993 /* stride checking */
993 stride_mask = 63; 994 if (IS_I830(dev) || IS_845G(dev))
995 stride_mask = 255;
996 else
997 stride_mask = 63;
994 998
995 if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask) 999 if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask)
996 return -EINVAL; 1000 return -EINVAL;
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 03c231be2273..d9d4d51aa89e 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1237,9 +1237,11 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1237 1237
1238 /* Set the SDVO control regs. */ 1238 /* Set the SDVO control regs. */
1239 if (IS_I965G(dev)) { 1239 if (IS_I965G(dev)) {
1240 sdvox |= SDVO_BORDER_ENABLE | 1240 sdvox |= SDVO_BORDER_ENABLE;
1241 SDVO_VSYNC_ACTIVE_HIGH | 1241 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1242 SDVO_HSYNC_ACTIVE_HIGH; 1242 sdvox |= SDVO_VSYNC_ACTIVE_HIGH;
1243 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1244 sdvox |= SDVO_HSYNC_ACTIVE_HIGH;
1243 } else { 1245 } else {
1244 sdvox |= I915_READ(sdvo_priv->sdvo_reg); 1246 sdvox |= I915_READ(sdvo_priv->sdvo_reg);
1245 switch (sdvo_priv->sdvo_reg) { 1247 switch (sdvo_priv->sdvo_reg) {
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index d2d4e4045ca9..cc3726a4a1cb 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -476,7 +476,7 @@ static const struct tv_mode tv_modes[] = {
476 .vi_end_f1 = 20, .vi_end_f2 = 21, 476 .vi_end_f1 = 20, .vi_end_f2 = 21,
477 .nbr_end = 240, 477 .nbr_end = 240,
478 478
479 .burst_ena = 8, 479 .burst_ena = true,
480 .hburst_start = 72, .hburst_len = 34, 480 .hburst_start = 72, .hburst_len = 34,
481 .vburst_start_f1 = 9, .vburst_end_f1 = 240, 481 .vburst_start_f1 = 9, .vburst_end_f1 = 240,
482 .vburst_start_f2 = 10, .vburst_end_f2 = 240, 482 .vburst_start_f2 = 10, .vburst_end_f2 = 240,
@@ -896,8 +896,6 @@ static const struct tv_mode tv_modes[] = {
896 }, 896 },
897}; 897};
898 898
899#define NUM_TV_MODES sizeof(tv_modes) / sizeof (tv_modes[0])
900
901static void 899static void
902intel_tv_dpms(struct drm_encoder *encoder, int mode) 900intel_tv_dpms(struct drm_encoder *encoder, int mode)
903{ 901{
@@ -1512,7 +1510,7 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
1512 tv_priv->margin[TV_MARGIN_BOTTOM] = val; 1510 tv_priv->margin[TV_MARGIN_BOTTOM] = val;
1513 changed = true; 1511 changed = true;
1514 } else if (property == dev->mode_config.tv_mode_property) { 1512 } else if (property == dev->mode_config.tv_mode_property) {
1515 if (val >= NUM_TV_MODES) { 1513 if (val >= ARRAY_SIZE(tv_modes)) {
1516 ret = -EINVAL; 1514 ret = -EINVAL;
1517 goto out; 1515 goto out;
1518 } 1516 }
@@ -1693,13 +1691,13 @@ intel_tv_init(struct drm_device *dev)
1693 connector->doublescan_allowed = false; 1691 connector->doublescan_allowed = false;
1694 1692
1695 /* Create TV properties then attach current values */ 1693 /* Create TV properties then attach current values */
1696 tv_format_names = kmalloc(sizeof(char *) * NUM_TV_MODES, 1694 tv_format_names = kmalloc(sizeof(char *) * ARRAY_SIZE(tv_modes),
1697 GFP_KERNEL); 1695 GFP_KERNEL);
1698 if (!tv_format_names) 1696 if (!tv_format_names)
1699 goto out; 1697 goto out;
1700 for (i = 0; i < NUM_TV_MODES; i++) 1698 for (i = 0; i < ARRAY_SIZE(tv_modes); i++)
1701 tv_format_names[i] = tv_modes[i].name; 1699 tv_format_names[i] = tv_modes[i].name;
1702 drm_mode_create_tv_properties(dev, NUM_TV_MODES, tv_format_names); 1700 drm_mode_create_tv_properties(dev, ARRAY_SIZE(tv_modes), tv_format_names);
1703 1701
1704 drm_connector_attach_property(connector, dev->mode_config.tv_mode_property, 1702 drm_connector_attach_property(connector, dev->mode_config.tv_mode_property,
1705 initial_mode); 1703 initial_mode);