diff options
author | Stefan Richter <stefanr@s5r6.in-berlin.de> | 2010-02-24 14:31:04 -0500 |
---|---|---|
committer | Stefan Richter <stefanr@s5r6.in-berlin.de> | 2010-02-24 14:33:45 -0500 |
commit | 109d28152b6e9d5de64cd23e3bc08885ccb3d1ef (patch) | |
tree | b7b8863faa05254781acfb85cc41da3eef467c6b /drivers/gpu/drm/i915 | |
parent | 168cf9af699e87d5a6f44b684583714ecabb8e71 (diff) | |
parent | 60b341b778cc2929df16c0a504c91621b3c6a4ad (diff) |
Merge tag 'v2.6.33' for its firewire changes since last branch point
Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de>
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r-- | drivers/gpu/drm/i915/i915_debugfs.c | 33 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_dma.c | 36 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.c | 270 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 139 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 390 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_tiling.c | 46 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_irq.c | 132 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_reg.h | 14 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_suspend.c | 12 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_bios.c | 35 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_bios.h | 40 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_crt.c | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_display.c | 732 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_dp.c | 81 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_fb.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_hdmi.c | 55 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_lvds.c | 116 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_sdvo.c | 94 |
18 files changed, 1347 insertions, 885 deletions
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 18476bf0b580..a894ade03093 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -272,7 +272,7 @@ static void i915_dump_pages(struct seq_file *m, struct page **pages, int page_co | |||
272 | mem = kmap_atomic(pages[page], KM_USER0); | 272 | mem = kmap_atomic(pages[page], KM_USER0); |
273 | for (i = 0; i < PAGE_SIZE; i += 4) | 273 | for (i = 0; i < PAGE_SIZE; i += 4) |
274 | seq_printf(m, "%08x : %08x\n", i, mem[i / 4]); | 274 | seq_printf(m, "%08x : %08x\n", i, mem[i / 4]); |
275 | kunmap_atomic(pages[page], KM_USER0); | 275 | kunmap_atomic(mem, KM_USER0); |
276 | } | 276 | } |
277 | } | 277 | } |
278 | 278 | ||
@@ -290,7 +290,7 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data) | |||
290 | list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { | 290 | list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { |
291 | obj = obj_priv->obj; | 291 | obj = obj_priv->obj; |
292 | if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { | 292 | if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { |
293 | ret = i915_gem_object_get_pages(obj); | 293 | ret = i915_gem_object_get_pages(obj, 0); |
294 | if (ret) { | 294 | if (ret) { |
295 | DRM_ERROR("Failed to get pages: %d\n", ret); | 295 | DRM_ERROR("Failed to get pages: %d\n", ret); |
296 | spin_unlock(&dev_priv->mm.active_list_lock); | 296 | spin_unlock(&dev_priv->mm.active_list_lock); |
@@ -386,34 +386,6 @@ out: | |||
386 | return 0; | 386 | return 0; |
387 | } | 387 | } |
388 | 388 | ||
389 | static int i915_registers_info(struct seq_file *m, void *data) { | ||
390 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
391 | struct drm_device *dev = node->minor->dev; | ||
392 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
393 | uint32_t reg; | ||
394 | |||
395 | #define DUMP_RANGE(start, end) \ | ||
396 | for (reg=start; reg < end; reg += 4) \ | ||
397 | seq_printf(m, "%08x\t%08x\n", reg, I915_READ(reg)); | ||
398 | |||
399 | DUMP_RANGE(0x00000, 0x00fff); /* VGA registers */ | ||
400 | DUMP_RANGE(0x02000, 0x02fff); /* instruction, memory, interrupt control registers */ | ||
401 | DUMP_RANGE(0x03000, 0x031ff); /* FENCE and PPGTT control registers */ | ||
402 | DUMP_RANGE(0x03200, 0x03fff); /* frame buffer compression registers */ | ||
403 | DUMP_RANGE(0x05000, 0x05fff); /* I/O control registers */ | ||
404 | DUMP_RANGE(0x06000, 0x06fff); /* clock control registers */ | ||
405 | DUMP_RANGE(0x07000, 0x07fff); /* 3D internal debug registers */ | ||
406 | DUMP_RANGE(0x07400, 0x088ff); /* GPE debug registers */ | ||
407 | DUMP_RANGE(0x0a000, 0x0afff); /* display palette registers */ | ||
408 | DUMP_RANGE(0x10000, 0x13fff); /* MMIO MCHBAR */ | ||
409 | DUMP_RANGE(0x30000, 0x3ffff); /* overlay registers */ | ||
410 | DUMP_RANGE(0x60000, 0x6ffff); /* display engine pipeline registers */ | ||
411 | DUMP_RANGE(0x70000, 0x72fff); /* display and cursor registers */ | ||
412 | DUMP_RANGE(0x73000, 0x73fff); /* performance counters */ | ||
413 | |||
414 | return 0; | ||
415 | } | ||
416 | |||
417 | static int | 389 | static int |
418 | i915_wedged_open(struct inode *inode, | 390 | i915_wedged_open(struct inode *inode, |
419 | struct file *filp) | 391 | struct file *filp) |
@@ -519,7 +491,6 @@ static int i915_wedged_create(struct dentry *root, struct drm_minor *minor) | |||
519 | } | 491 | } |
520 | 492 | ||
521 | static struct drm_info_list i915_debugfs_list[] = { | 493 | static struct drm_info_list i915_debugfs_list[] = { |
522 | {"i915_regs", i915_registers_info, 0}, | ||
523 | {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, | 494 | {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, |
524 | {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, | 495 | {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, |
525 | {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, | 496 | {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 701bfeac7f57..2307f98349f7 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -123,7 +123,7 @@ static int i915_init_phys_hws(struct drm_device *dev) | |||
123 | drm_i915_private_t *dev_priv = dev->dev_private; | 123 | drm_i915_private_t *dev_priv = dev->dev_private; |
124 | /* Program Hardware Status Page */ | 124 | /* Program Hardware Status Page */ |
125 | dev_priv->status_page_dmah = | 125 | dev_priv->status_page_dmah = |
126 | drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff); | 126 | drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE); |
127 | 127 | ||
128 | if (!dev_priv->status_page_dmah) { | 128 | if (!dev_priv->status_page_dmah) { |
129 | DRM_ERROR("Can not allocate hardware status page\n"); | 129 | DRM_ERROR("Can not allocate hardware status page\n"); |
@@ -134,6 +134,10 @@ static int i915_init_phys_hws(struct drm_device *dev) | |||
134 | 134 | ||
135 | memset(dev_priv->hw_status_page, 0, PAGE_SIZE); | 135 | memset(dev_priv->hw_status_page, 0, PAGE_SIZE); |
136 | 136 | ||
137 | if (IS_I965G(dev)) | ||
138 | dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) & | ||
139 | 0xf0; | ||
140 | |||
137 | I915_WRITE(HWS_PGA, dev_priv->dma_status_page); | 141 | I915_WRITE(HWS_PGA, dev_priv->dma_status_page); |
138 | DRM_DEBUG_DRIVER("Enabled hardware status page\n"); | 142 | DRM_DEBUG_DRIVER("Enabled hardware status page\n"); |
139 | return 0; | 143 | return 0; |
@@ -731,8 +735,10 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data, | |||
731 | if (cmdbuf->num_cliprects) { | 735 | if (cmdbuf->num_cliprects) { |
732 | cliprects = kcalloc(cmdbuf->num_cliprects, | 736 | cliprects = kcalloc(cmdbuf->num_cliprects, |
733 | sizeof(struct drm_clip_rect), GFP_KERNEL); | 737 | sizeof(struct drm_clip_rect), GFP_KERNEL); |
734 | if (cliprects == NULL) | 738 | if (cliprects == NULL) { |
739 | ret = -ENOMEM; | ||
735 | goto fail_batch_free; | 740 | goto fail_batch_free; |
741 | } | ||
736 | 742 | ||
737 | ret = copy_from_user(cliprects, cmdbuf->cliprects, | 743 | ret = copy_from_user(cliprects, cmdbuf->cliprects, |
738 | cmdbuf->num_cliprects * | 744 | cmdbuf->num_cliprects * |
@@ -813,9 +819,13 @@ static int i915_getparam(struct drm_device *dev, void *data, | |||
813 | case I915_PARAM_HAS_PAGEFLIPPING: | 819 | case I915_PARAM_HAS_PAGEFLIPPING: |
814 | value = 1; | 820 | value = 1; |
815 | break; | 821 | break; |
822 | case I915_PARAM_HAS_EXECBUF2: | ||
823 | /* depends on GEM */ | ||
824 | value = dev_priv->has_gem; | ||
825 | break; | ||
816 | default: | 826 | default: |
817 | DRM_DEBUG_DRIVER("Unknown parameter %d\n", | 827 | DRM_DEBUG_DRIVER("Unknown parameter %d\n", |
818 | param->param); | 828 | param->param); |
819 | return -EINVAL; | 829 | return -EINVAL; |
820 | } | 830 | } |
821 | 831 | ||
@@ -1117,7 +1127,8 @@ static void i915_setup_compression(struct drm_device *dev, int size) | |||
1117 | { | 1127 | { |
1118 | struct drm_i915_private *dev_priv = dev->dev_private; | 1128 | struct drm_i915_private *dev_priv = dev->dev_private; |
1119 | struct drm_mm_node *compressed_fb, *compressed_llb; | 1129 | struct drm_mm_node *compressed_fb, *compressed_llb; |
1120 | unsigned long cfb_base, ll_base; | 1130 | unsigned long cfb_base; |
1131 | unsigned long ll_base = 0; | ||
1121 | 1132 | ||
1122 | /* Leave 1M for line length buffer & misc. */ | 1133 | /* Leave 1M for line length buffer & misc. */ |
1123 | compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0); | 1134 | compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0); |
@@ -1200,14 +1211,6 @@ static int i915_load_modeset_init(struct drm_device *dev, | |||
1200 | dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) & | 1211 | dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) & |
1201 | 0xff000000; | 1212 | 0xff000000; |
1202 | 1213 | ||
1203 | if (IS_MOBILE(dev) || IS_I9XX(dev)) | ||
1204 | dev_priv->cursor_needs_physical = true; | ||
1205 | else | ||
1206 | dev_priv->cursor_needs_physical = false; | ||
1207 | |||
1208 | if (IS_I965G(dev) || IS_G33(dev)) | ||
1209 | dev_priv->cursor_needs_physical = false; | ||
1210 | |||
1211 | /* Basic memrange allocator for stolen space (aka vram) */ | 1214 | /* Basic memrange allocator for stolen space (aka vram) */ |
1212 | drm_mm_init(&dev_priv->vram, 0, prealloc_size); | 1215 | drm_mm_init(&dev_priv->vram, 0, prealloc_size); |
1213 | DRM_INFO("set up %ldM of stolen space\n", prealloc_size / (1024*1024)); | 1216 | DRM_INFO("set up %ldM of stolen space\n", prealloc_size / (1024*1024)); |
@@ -1257,6 +1260,8 @@ static int i915_load_modeset_init(struct drm_device *dev, | |||
1257 | if (ret) | 1260 | if (ret) |
1258 | goto destroy_ringbuffer; | 1261 | goto destroy_ringbuffer; |
1259 | 1262 | ||
1263 | intel_modeset_init(dev); | ||
1264 | |||
1260 | ret = drm_irq_install(dev); | 1265 | ret = drm_irq_install(dev); |
1261 | if (ret) | 1266 | if (ret) |
1262 | goto destroy_ringbuffer; | 1267 | goto destroy_ringbuffer; |
@@ -1271,8 +1276,6 @@ static int i915_load_modeset_init(struct drm_device *dev, | |||
1271 | 1276 | ||
1272 | I915_WRITE(INSTPM, (1 << 5) | (1 << 21)); | 1277 | I915_WRITE(INSTPM, (1 << 5) | (1 << 21)); |
1273 | 1278 | ||
1274 | intel_modeset_init(dev); | ||
1275 | |||
1276 | drm_helper_initial_config(dev); | 1279 | drm_helper_initial_config(dev); |
1277 | 1280 | ||
1278 | return 0; | 1281 | return 0; |
@@ -1360,7 +1363,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1360 | { | 1363 | { |
1361 | struct drm_i915_private *dev_priv = dev->dev_private; | 1364 | struct drm_i915_private *dev_priv = dev->dev_private; |
1362 | resource_size_t base, size; | 1365 | resource_size_t base, size; |
1363 | int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1; | 1366 | int ret = 0, mmio_bar; |
1364 | uint32_t agp_size, prealloc_size, prealloc_start; | 1367 | uint32_t agp_size, prealloc_size, prealloc_start; |
1365 | 1368 | ||
1366 | /* i915 has 4 more counters */ | 1369 | /* i915 has 4 more counters */ |
@@ -1376,8 +1379,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1376 | 1379 | ||
1377 | dev->dev_private = (void *)dev_priv; | 1380 | dev->dev_private = (void *)dev_priv; |
1378 | dev_priv->dev = dev; | 1381 | dev_priv->dev = dev; |
1382 | dev_priv->info = (struct intel_device_info *) flags; | ||
1379 | 1383 | ||
1380 | /* Add register map (needed for suspend/resume) */ | 1384 | /* Add register map (needed for suspend/resume) */ |
1385 | mmio_bar = IS_I9XX(dev) ? 0 : 1; | ||
1381 | base = drm_get_resource_start(dev, mmio_bar); | 1386 | base = drm_get_resource_start(dev, mmio_bar); |
1382 | size = drm_get_resource_len(dev, mmio_bar); | 1387 | size = drm_get_resource_len(dev, mmio_bar); |
1383 | 1388 | ||
@@ -1652,6 +1657,7 @@ struct drm_ioctl_desc i915_ioctls[] = { | |||
1652 | DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 1657 | DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
1653 | DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 1658 | DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
1654 | DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH), | 1659 | DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH), |
1660 | DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH), | ||
1655 | DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), | 1661 | DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), |
1656 | DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), | 1662 | DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), |
1657 | DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH), | 1663 | DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH), |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 24286ca168fc..cf4cb3e9a0c2 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -33,7 +33,6 @@ | |||
33 | #include "i915_drm.h" | 33 | #include "i915_drm.h" |
34 | #include "i915_drv.h" | 34 | #include "i915_drv.h" |
35 | 35 | ||
36 | #include "drm_pciids.h" | ||
37 | #include <linux/console.h> | 36 | #include <linux/console.h> |
38 | #include "drm_crtc_helper.h" | 37 | #include "drm_crtc_helper.h" |
39 | 38 | ||
@@ -46,36 +45,149 @@ module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400); | |||
46 | unsigned int i915_powersave = 1; | 45 | unsigned int i915_powersave = 1; |
47 | module_param_named(powersave, i915_powersave, int, 0400); | 46 | module_param_named(powersave, i915_powersave, int, 0400); |
48 | 47 | ||
48 | unsigned int i915_lvds_downclock = 0; | ||
49 | module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); | ||
50 | |||
49 | static struct drm_driver driver; | 51 | static struct drm_driver driver; |
50 | 52 | ||
51 | static struct pci_device_id pciidlist[] = { | 53 | #define INTEL_VGA_DEVICE(id, info) { \ |
52 | i915_PCI_IDS | 54 | .class = PCI_CLASS_DISPLAY_VGA << 8, \ |
55 | .class_mask = 0xffff00, \ | ||
56 | .vendor = 0x8086, \ | ||
57 | .device = id, \ | ||
58 | .subvendor = PCI_ANY_ID, \ | ||
59 | .subdevice = PCI_ANY_ID, \ | ||
60 | .driver_data = (unsigned long) info } | ||
61 | |||
62 | const static struct intel_device_info intel_i830_info = { | ||
63 | .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1, | ||
64 | }; | ||
65 | |||
66 | const static struct intel_device_info intel_845g_info = { | ||
67 | .is_i8xx = 1, | ||
68 | }; | ||
69 | |||
70 | const static struct intel_device_info intel_i85x_info = { | ||
71 | .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1, | ||
72 | }; | ||
73 | |||
74 | const static struct intel_device_info intel_i865g_info = { | ||
75 | .is_i8xx = 1, | ||
76 | }; | ||
77 | |||
78 | const static struct intel_device_info intel_i915g_info = { | ||
79 | .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1, | ||
80 | }; | ||
81 | const static struct intel_device_info intel_i915gm_info = { | ||
82 | .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1, | ||
83 | .cursor_needs_physical = 1, | ||
84 | }; | ||
85 | const static struct intel_device_info intel_i945g_info = { | ||
86 | .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1, | ||
87 | }; | ||
88 | const static struct intel_device_info intel_i945gm_info = { | ||
89 | .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1, | ||
90 | .has_hotplug = 1, .cursor_needs_physical = 1, | ||
91 | }; | ||
92 | |||
93 | const static struct intel_device_info intel_i965g_info = { | ||
94 | .is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1, | ||
95 | }; | ||
96 | |||
97 | const static struct intel_device_info intel_i965gm_info = { | ||
98 | .is_i965g = 1, .is_mobile = 1, .is_i965gm = 1, .is_i9xx = 1, | ||
99 | .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, | ||
100 | .has_hotplug = 1, | ||
101 | }; | ||
102 | |||
103 | const static struct intel_device_info intel_g33_info = { | ||
104 | .is_g33 = 1, .is_i9xx = 1, .need_gfx_hws = 1, | ||
105 | .has_hotplug = 1, | ||
106 | }; | ||
107 | |||
108 | const static struct intel_device_info intel_g45_info = { | ||
109 | .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1, | ||
110 | .has_pipe_cxsr = 1, | ||
111 | .has_hotplug = 1, | ||
112 | }; | ||
113 | |||
114 | const static struct intel_device_info intel_gm45_info = { | ||
115 | .is_i965g = 1, .is_mobile = 1, .is_g4x = 1, .is_i9xx = 1, | ||
116 | .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, | ||
117 | .has_pipe_cxsr = 1, | ||
118 | .has_hotplug = 1, | ||
119 | }; | ||
120 | |||
121 | const static struct intel_device_info intel_pineview_info = { | ||
122 | .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1, | ||
123 | .need_gfx_hws = 1, | ||
124 | .has_hotplug = 1, | ||
125 | }; | ||
126 | |||
127 | const static struct intel_device_info intel_ironlake_d_info = { | ||
128 | .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1, | ||
129 | .has_pipe_cxsr = 1, | ||
130 | .has_hotplug = 1, | ||
131 | }; | ||
132 | |||
133 | const static struct intel_device_info intel_ironlake_m_info = { | ||
134 | .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1, | ||
135 | .need_gfx_hws = 1, .has_rc6 = 1, | ||
136 | .has_hotplug = 1, | ||
137 | }; | ||
138 | |||
139 | const static struct pci_device_id pciidlist[] = { | ||
140 | INTEL_VGA_DEVICE(0x3577, &intel_i830_info), | ||
141 | INTEL_VGA_DEVICE(0x2562, &intel_845g_info), | ||
142 | INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), | ||
143 | INTEL_VGA_DEVICE(0x35e8, &intel_i85x_info), | ||
144 | INTEL_VGA_DEVICE(0x2572, &intel_i865g_info), | ||
145 | INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), | ||
146 | INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), | ||
147 | INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info), | ||
148 | INTEL_VGA_DEVICE(0x2772, &intel_i945g_info), | ||
149 | INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info), | ||
150 | INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info), | ||
151 | INTEL_VGA_DEVICE(0x2972, &intel_i965g_info), | ||
152 | INTEL_VGA_DEVICE(0x2982, &intel_i965g_info), | ||
153 | INTEL_VGA_DEVICE(0x2992, &intel_i965g_info), | ||
154 | INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info), | ||
155 | INTEL_VGA_DEVICE(0x29b2, &intel_g33_info), | ||
156 | INTEL_VGA_DEVICE(0x29c2, &intel_g33_info), | ||
157 | INTEL_VGA_DEVICE(0x29d2, &intel_g33_info), | ||
158 | INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info), | ||
159 | INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info), | ||
160 | INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info), | ||
161 | INTEL_VGA_DEVICE(0x2e02, &intel_g45_info), | ||
162 | INTEL_VGA_DEVICE(0x2e12, &intel_g45_info), | ||
163 | INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), | ||
164 | INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), | ||
165 | INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), | ||
166 | INTEL_VGA_DEVICE(0xa001, &intel_pineview_info), | ||
167 | INTEL_VGA_DEVICE(0xa011, &intel_pineview_info), | ||
168 | INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info), | ||
169 | INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info), | ||
170 | {0, 0, 0} | ||
53 | }; | 171 | }; |
54 | 172 | ||
55 | #if defined(CONFIG_DRM_I915_KMS) | 173 | #if defined(CONFIG_DRM_I915_KMS) |
56 | MODULE_DEVICE_TABLE(pci, pciidlist); | 174 | MODULE_DEVICE_TABLE(pci, pciidlist); |
57 | #endif | 175 | #endif |
58 | 176 | ||
59 | static int i915_suspend(struct drm_device *dev, pm_message_t state) | 177 | static int i915_drm_freeze(struct drm_device *dev) |
60 | { | 178 | { |
61 | struct drm_i915_private *dev_priv = dev->dev_private; | 179 | struct drm_i915_private *dev_priv = dev->dev_private; |
62 | 180 | ||
63 | if (!dev || !dev_priv) { | ||
64 | DRM_ERROR("dev: %p, dev_priv: %p\n", dev, dev_priv); | ||
65 | DRM_ERROR("DRM not initialized, aborting suspend.\n"); | ||
66 | return -ENODEV; | ||
67 | } | ||
68 | |||
69 | if (state.event == PM_EVENT_PRETHAW) | ||
70 | return 0; | ||
71 | |||
72 | pci_save_state(dev->pdev); | 181 | pci_save_state(dev->pdev); |
73 | 182 | ||
74 | /* If KMS is active, we do the leavevt stuff here */ | 183 | /* If KMS is active, we do the leavevt stuff here */ |
75 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | 184 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
76 | if (i915_gem_idle(dev)) | 185 | int error = i915_gem_idle(dev); |
186 | if (error) { | ||
77 | dev_err(&dev->pdev->dev, | 187 | dev_err(&dev->pdev->dev, |
78 | "GEM idle failed, resume may fail\n"); | 188 | "GEM idle failed, resume might fail\n"); |
189 | return error; | ||
190 | } | ||
79 | drm_irq_uninstall(dev); | 191 | drm_irq_uninstall(dev); |
80 | } | 192 | } |
81 | 193 | ||
@@ -83,26 +195,42 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state) | |||
83 | 195 | ||
84 | intel_opregion_free(dev, 1); | 196 | intel_opregion_free(dev, 1); |
85 | 197 | ||
198 | /* Modeset on resume, not lid events */ | ||
199 | dev_priv->modeset_on_lid = 0; | ||
200 | |||
201 | return 0; | ||
202 | } | ||
203 | |||
204 | static int i915_suspend(struct drm_device *dev, pm_message_t state) | ||
205 | { | ||
206 | int error; | ||
207 | |||
208 | if (!dev || !dev->dev_private) { | ||
209 | DRM_ERROR("dev: %p\n", dev); | ||
210 | DRM_ERROR("DRM not initialized, aborting suspend.\n"); | ||
211 | return -ENODEV; | ||
212 | } | ||
213 | |||
214 | if (state.event == PM_EVENT_PRETHAW) | ||
215 | return 0; | ||
216 | |||
217 | error = i915_drm_freeze(dev); | ||
218 | if (error) | ||
219 | return error; | ||
220 | |||
86 | if (state.event == PM_EVENT_SUSPEND) { | 221 | if (state.event == PM_EVENT_SUSPEND) { |
87 | /* Shut down the device */ | 222 | /* Shut down the device */ |
88 | pci_disable_device(dev->pdev); | 223 | pci_disable_device(dev->pdev); |
89 | pci_set_power_state(dev->pdev, PCI_D3hot); | 224 | pci_set_power_state(dev->pdev, PCI_D3hot); |
90 | } | 225 | } |
91 | 226 | ||
92 | /* Modeset on resume, not lid events */ | ||
93 | dev_priv->modeset_on_lid = 0; | ||
94 | |||
95 | return 0; | 227 | return 0; |
96 | } | 228 | } |
97 | 229 | ||
98 | static int i915_resume(struct drm_device *dev) | 230 | static int i915_drm_thaw(struct drm_device *dev) |
99 | { | 231 | { |
100 | struct drm_i915_private *dev_priv = dev->dev_private; | 232 | struct drm_i915_private *dev_priv = dev->dev_private; |
101 | int ret = 0; | 233 | int error = 0; |
102 | |||
103 | if (pci_enable_device(dev->pdev)) | ||
104 | return -1; | ||
105 | pci_set_master(dev->pdev); | ||
106 | 234 | ||
107 | i915_restore_state(dev); | 235 | i915_restore_state(dev); |
108 | 236 | ||
@@ -113,21 +241,28 @@ static int i915_resume(struct drm_device *dev) | |||
113 | mutex_lock(&dev->struct_mutex); | 241 | mutex_lock(&dev->struct_mutex); |
114 | dev_priv->mm.suspended = 0; | 242 | dev_priv->mm.suspended = 0; |
115 | 243 | ||
116 | ret = i915_gem_init_ringbuffer(dev); | 244 | error = i915_gem_init_ringbuffer(dev); |
117 | if (ret != 0) | ||
118 | ret = -1; | ||
119 | mutex_unlock(&dev->struct_mutex); | 245 | mutex_unlock(&dev->struct_mutex); |
120 | 246 | ||
121 | drm_irq_install(dev); | 247 | drm_irq_install(dev); |
122 | } | 248 | |
123 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | ||
124 | /* Resume the modeset for every activated CRTC */ | 249 | /* Resume the modeset for every activated CRTC */ |
125 | drm_helper_resume_force_mode(dev); | 250 | drm_helper_resume_force_mode(dev); |
126 | } | 251 | } |
127 | 252 | ||
128 | dev_priv->modeset_on_lid = 0; | 253 | dev_priv->modeset_on_lid = 0; |
129 | 254 | ||
130 | return ret; | 255 | return error; |
256 | } | ||
257 | |||
258 | static int i915_resume(struct drm_device *dev) | ||
259 | { | ||
260 | if (pci_enable_device(dev->pdev)) | ||
261 | return -EIO; | ||
262 | |||
263 | pci_set_master(dev->pdev); | ||
264 | |||
265 | return i915_drm_thaw(dev); | ||
131 | } | 266 | } |
132 | 267 | ||
133 | /** | 268 | /** |
@@ -268,22 +403,73 @@ i915_pci_remove(struct pci_dev *pdev) | |||
268 | drm_put_dev(dev); | 403 | drm_put_dev(dev); |
269 | } | 404 | } |
270 | 405 | ||
271 | static int | 406 | static int i915_pm_suspend(struct device *dev) |
272 | i915_pci_suspend(struct pci_dev *pdev, pm_message_t state) | ||
273 | { | 407 | { |
274 | struct drm_device *dev = pci_get_drvdata(pdev); | 408 | struct pci_dev *pdev = to_pci_dev(dev); |
409 | struct drm_device *drm_dev = pci_get_drvdata(pdev); | ||
410 | int error; | ||
275 | 411 | ||
276 | return i915_suspend(dev, state); | 412 | if (!drm_dev || !drm_dev->dev_private) { |
413 | dev_err(dev, "DRM not initialized, aborting suspend.\n"); | ||
414 | return -ENODEV; | ||
415 | } | ||
416 | |||
417 | error = i915_drm_freeze(drm_dev); | ||
418 | if (error) | ||
419 | return error; | ||
420 | |||
421 | pci_disable_device(pdev); | ||
422 | pci_set_power_state(pdev, PCI_D3hot); | ||
423 | |||
424 | return 0; | ||
277 | } | 425 | } |
278 | 426 | ||
279 | static int | 427 | static int i915_pm_resume(struct device *dev) |
280 | i915_pci_resume(struct pci_dev *pdev) | ||
281 | { | 428 | { |
282 | struct drm_device *dev = pci_get_drvdata(pdev); | 429 | struct pci_dev *pdev = to_pci_dev(dev); |
430 | struct drm_device *drm_dev = pci_get_drvdata(pdev); | ||
283 | 431 | ||
284 | return i915_resume(dev); | 432 | return i915_resume(drm_dev); |
285 | } | 433 | } |
286 | 434 | ||
435 | static int i915_pm_freeze(struct device *dev) | ||
436 | { | ||
437 | struct pci_dev *pdev = to_pci_dev(dev); | ||
438 | struct drm_device *drm_dev = pci_get_drvdata(pdev); | ||
439 | |||
440 | if (!drm_dev || !drm_dev->dev_private) { | ||
441 | dev_err(dev, "DRM not initialized, aborting suspend.\n"); | ||
442 | return -ENODEV; | ||
443 | } | ||
444 | |||
445 | return i915_drm_freeze(drm_dev); | ||
446 | } | ||
447 | |||
448 | static int i915_pm_thaw(struct device *dev) | ||
449 | { | ||
450 | struct pci_dev *pdev = to_pci_dev(dev); | ||
451 | struct drm_device *drm_dev = pci_get_drvdata(pdev); | ||
452 | |||
453 | return i915_drm_thaw(drm_dev); | ||
454 | } | ||
455 | |||
456 | static int i915_pm_poweroff(struct device *dev) | ||
457 | { | ||
458 | struct pci_dev *pdev = to_pci_dev(dev); | ||
459 | struct drm_device *drm_dev = pci_get_drvdata(pdev); | ||
460 | |||
461 | return i915_drm_freeze(drm_dev); | ||
462 | } | ||
463 | |||
464 | const struct dev_pm_ops i915_pm_ops = { | ||
465 | .suspend = i915_pm_suspend, | ||
466 | .resume = i915_pm_resume, | ||
467 | .freeze = i915_pm_freeze, | ||
468 | .thaw = i915_pm_thaw, | ||
469 | .poweroff = i915_pm_poweroff, | ||
470 | .restore = i915_pm_resume, | ||
471 | }; | ||
472 | |||
287 | static struct vm_operations_struct i915_gem_vm_ops = { | 473 | static struct vm_operations_struct i915_gem_vm_ops = { |
288 | .fault = i915_gem_fault, | 474 | .fault = i915_gem_fault, |
289 | .open = drm_gem_vm_open, | 475 | .open = drm_gem_vm_open, |
@@ -303,8 +489,11 @@ static struct drm_driver driver = { | |||
303 | .lastclose = i915_driver_lastclose, | 489 | .lastclose = i915_driver_lastclose, |
304 | .preclose = i915_driver_preclose, | 490 | .preclose = i915_driver_preclose, |
305 | .postclose = i915_driver_postclose, | 491 | .postclose = i915_driver_postclose, |
492 | |||
493 | /* Used in place of i915_pm_ops for non-DRIVER_MODESET */ | ||
306 | .suspend = i915_suspend, | 494 | .suspend = i915_suspend, |
307 | .resume = i915_resume, | 495 | .resume = i915_resume, |
496 | |||
308 | .device_is_agp = i915_driver_device_is_agp, | 497 | .device_is_agp = i915_driver_device_is_agp, |
309 | .enable_vblank = i915_enable_vblank, | 498 | .enable_vblank = i915_enable_vblank, |
310 | .disable_vblank = i915_disable_vblank, | 499 | .disable_vblank = i915_disable_vblank, |
@@ -344,10 +533,7 @@ static struct drm_driver driver = { | |||
344 | .id_table = pciidlist, | 533 | .id_table = pciidlist, |
345 | .probe = i915_pci_probe, | 534 | .probe = i915_pci_probe, |
346 | .remove = i915_pci_remove, | 535 | .remove = i915_pci_remove, |
347 | #ifdef CONFIG_PM | 536 | .driver.pm = &i915_pm_ops, |
348 | .resume = i915_pci_resume, | ||
349 | .suspend = i915_pci_suspend, | ||
350 | #endif | ||
351 | }, | 537 | }, |
352 | 538 | ||
353 | .name = DRIVER_NAME, | 539 | .name = DRIVER_NAME, |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index fbecac72f5bb..b99b6a841d95 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -172,9 +172,31 @@ struct drm_i915_display_funcs { | |||
172 | 172 | ||
173 | struct intel_overlay; | 173 | struct intel_overlay; |
174 | 174 | ||
175 | struct intel_device_info { | ||
176 | u8 is_mobile : 1; | ||
177 | u8 is_i8xx : 1; | ||
178 | u8 is_i915g : 1; | ||
179 | u8 is_i9xx : 1; | ||
180 | u8 is_i945gm : 1; | ||
181 | u8 is_i965g : 1; | ||
182 | u8 is_i965gm : 1; | ||
183 | u8 is_g33 : 1; | ||
184 | u8 need_gfx_hws : 1; | ||
185 | u8 is_g4x : 1; | ||
186 | u8 is_pineview : 1; | ||
187 | u8 is_ironlake : 1; | ||
188 | u8 has_fbc : 1; | ||
189 | u8 has_rc6 : 1; | ||
190 | u8 has_pipe_cxsr : 1; | ||
191 | u8 has_hotplug : 1; | ||
192 | u8 cursor_needs_physical : 1; | ||
193 | }; | ||
194 | |||
175 | typedef struct drm_i915_private { | 195 | typedef struct drm_i915_private { |
176 | struct drm_device *dev; | 196 | struct drm_device *dev; |
177 | 197 | ||
198 | const struct intel_device_info *info; | ||
199 | |||
178 | int has_gem; | 200 | int has_gem; |
179 | 201 | ||
180 | void __iomem *regs; | 202 | void __iomem *regs; |
@@ -232,8 +254,6 @@ typedef struct drm_i915_private { | |||
232 | int hangcheck_count; | 254 | int hangcheck_count; |
233 | uint32_t last_acthd; | 255 | uint32_t last_acthd; |
234 | 256 | ||
235 | bool cursor_needs_physical; | ||
236 | |||
237 | struct drm_mm vram; | 257 | struct drm_mm vram; |
238 | 258 | ||
239 | unsigned long cfb_size; | 259 | unsigned long cfb_size; |
@@ -263,6 +283,7 @@ typedef struct drm_i915_private { | |||
263 | unsigned int lvds_use_ssc:1; | 283 | unsigned int lvds_use_ssc:1; |
264 | unsigned int edp_support:1; | 284 | unsigned int edp_support:1; |
265 | int lvds_ssc_freq; | 285 | int lvds_ssc_freq; |
286 | int edp_bpp; | ||
266 | 287 | ||
267 | struct notifier_block lid_notifier; | 288 | struct notifier_block lid_notifier; |
268 | 289 | ||
@@ -287,8 +308,6 @@ typedef struct drm_i915_private { | |||
287 | u32 saveDSPACNTR; | 308 | u32 saveDSPACNTR; |
288 | u32 saveDSPBCNTR; | 309 | u32 saveDSPBCNTR; |
289 | u32 saveDSPARB; | 310 | u32 saveDSPARB; |
290 | u32 saveRENDERSTANDBY; | ||
291 | u32 savePWRCTXA; | ||
292 | u32 saveHWS; | 311 | u32 saveHWS; |
293 | u32 savePIPEACONF; | 312 | u32 savePIPEACONF; |
294 | u32 savePIPEBCONF; | 313 | u32 savePIPEBCONF; |
@@ -474,6 +493,15 @@ typedef struct drm_i915_private { | |||
474 | struct list_head flushing_list; | 493 | struct list_head flushing_list; |
475 | 494 | ||
476 | /** | 495 | /** |
496 | * List of objects currently pending a GPU write flush. | ||
497 | * | ||
498 | * All elements on this list will belong to either the | ||
499 | * active_list or flushing_list, last_rendering_seqno can | ||
500 | * be used to differentiate between the two elements. | ||
501 | */ | ||
502 | struct list_head gpu_write_list; | ||
503 | |||
504 | /** | ||
477 | * LRU list of objects which are not in the ringbuffer and | 505 | * LRU list of objects which are not in the ringbuffer and |
478 | * are ready to unbind, but are still in the GTT. | 506 | * are ready to unbind, but are still in the GTT. |
479 | * | 507 | * |
@@ -561,6 +589,7 @@ typedef struct drm_i915_private { | |||
561 | u16 orig_clock; | 589 | u16 orig_clock; |
562 | int child_dev_num; | 590 | int child_dev_num; |
563 | struct child_device_config *child_dev; | 591 | struct child_device_config *child_dev; |
592 | struct drm_connector *int_lvds_connector; | ||
564 | } drm_i915_private_t; | 593 | } drm_i915_private_t; |
565 | 594 | ||
566 | /** driver private structure attached to each drm_gem_object */ | 595 | /** driver private structure attached to each drm_gem_object */ |
@@ -572,6 +601,8 @@ struct drm_i915_gem_object { | |||
572 | 601 | ||
573 | /** This object's place on the active/flushing/inactive lists */ | 602 | /** This object's place on the active/flushing/inactive lists */ |
574 | struct list_head list; | 603 | struct list_head list; |
604 | /** This object's place on GPU write list */ | ||
605 | struct list_head gpu_write_list; | ||
575 | 606 | ||
576 | /** This object's place on the fenced object LRU */ | 607 | /** This object's place on the fenced object LRU */ |
577 | struct list_head fence_list; | 608 | struct list_head fence_list; |
@@ -703,6 +734,7 @@ extern struct drm_ioctl_desc i915_ioctls[]; | |||
703 | extern int i915_max_ioctl; | 734 | extern int i915_max_ioctl; |
704 | extern unsigned int i915_fbpercrtc; | 735 | extern unsigned int i915_fbpercrtc; |
705 | extern unsigned int i915_powersave; | 736 | extern unsigned int i915_powersave; |
737 | extern unsigned int i915_lvds_downclock; | ||
706 | 738 | ||
707 | extern void i915_save_display(struct drm_device *dev); | 739 | extern void i915_save_display(struct drm_device *dev); |
708 | extern void i915_restore_display(struct drm_device *dev); | 740 | extern void i915_restore_display(struct drm_device *dev); |
@@ -794,6 +826,8 @@ int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, | |||
794 | struct drm_file *file_priv); | 826 | struct drm_file *file_priv); |
795 | int i915_gem_execbuffer(struct drm_device *dev, void *data, | 827 | int i915_gem_execbuffer(struct drm_device *dev, void *data, |
796 | struct drm_file *file_priv); | 828 | struct drm_file *file_priv); |
829 | int i915_gem_execbuffer2(struct drm_device *dev, void *data, | ||
830 | struct drm_file *file_priv); | ||
797 | int i915_gem_pin_ioctl(struct drm_device *dev, void *data, | 831 | int i915_gem_pin_ioctl(struct drm_device *dev, void *data, |
798 | struct drm_file *file_priv); | 832 | struct drm_file *file_priv); |
799 | int i915_gem_unpin_ioctl(struct drm_device *dev, void *data, | 833 | int i915_gem_unpin_ioctl(struct drm_device *dev, void *data, |
@@ -843,12 +877,13 @@ int i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptib | |||
843 | int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); | 877 | int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); |
844 | int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, | 878 | int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, |
845 | int write); | 879 | int write); |
880 | int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj); | ||
846 | int i915_gem_attach_phys_object(struct drm_device *dev, | 881 | int i915_gem_attach_phys_object(struct drm_device *dev, |
847 | struct drm_gem_object *obj, int id); | 882 | struct drm_gem_object *obj, int id); |
848 | void i915_gem_detach_phys_object(struct drm_device *dev, | 883 | void i915_gem_detach_phys_object(struct drm_device *dev, |
849 | struct drm_gem_object *obj); | 884 | struct drm_gem_object *obj); |
850 | void i915_gem_free_all_phys_object(struct drm_device *dev); | 885 | void i915_gem_free_all_phys_object(struct drm_device *dev); |
851 | int i915_gem_object_get_pages(struct drm_gem_object *obj); | 886 | int i915_gem_object_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); |
852 | void i915_gem_object_put_pages(struct drm_gem_object *obj); | 887 | void i915_gem_object_put_pages(struct drm_gem_object *obj); |
853 | void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); | 888 | void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); |
854 | void i915_gem_object_flush_write_domain(struct drm_gem_object *obj); | 889 | void i915_gem_object_flush_write_domain(struct drm_gem_object *obj); |
@@ -860,6 +895,9 @@ void i915_gem_shrinker_exit(void); | |||
860 | void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); | 895 | void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); |
861 | void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj); | 896 | void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj); |
862 | void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj); | 897 | void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj); |
898 | bool i915_tiling_ok(struct drm_device *dev, int stride, int size, | ||
899 | int tiling_mode); | ||
900 | bool i915_obj_fenceable(struct drm_device *dev, struct drm_gem_object *obj); | ||
863 | 901 | ||
864 | /* i915_gem_debug.c */ | 902 | /* i915_gem_debug.c */ |
865 | void i915_gem_dump_object(struct drm_gem_object *obj, int len, | 903 | void i915_gem_dump_object(struct drm_gem_object *obj, int len, |
@@ -982,67 +1020,33 @@ extern void g4x_disable_fbc(struct drm_device *dev); | |||
982 | extern int i915_wrap_ring(struct drm_device * dev); | 1020 | extern int i915_wrap_ring(struct drm_device * dev); |
983 | extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); | 1021 | extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); |
984 | 1022 | ||
985 | #define IS_I830(dev) ((dev)->pci_device == 0x3577) | 1023 | #define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info) |
986 | #define IS_845G(dev) ((dev)->pci_device == 0x2562) | 1024 | |
987 | #define IS_I85X(dev) ((dev)->pci_device == 0x3582) | 1025 | #define IS_I830(dev) ((dev)->pci_device == 0x3577) |
988 | #define IS_I865G(dev) ((dev)->pci_device == 0x2572) | 1026 | #define IS_845G(dev) ((dev)->pci_device == 0x2562) |
989 | #define IS_I8XX(dev) (IS_I830(dev) || IS_845G(dev) || IS_I85X(dev) || IS_I865G(dev)) | 1027 | #define IS_I85X(dev) ((dev)->pci_device == 0x3582) |
990 | 1028 | #define IS_I865G(dev) ((dev)->pci_device == 0x2572) | |
991 | #define IS_I915G(dev) ((dev)->pci_device == 0x2582 || (dev)->pci_device == 0x258a) | 1029 | #define IS_I8XX(dev) (INTEL_INFO(dev)->is_i8xx) |
992 | #define IS_I915GM(dev) ((dev)->pci_device == 0x2592) | 1030 | #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) |
993 | #define IS_I945G(dev) ((dev)->pci_device == 0x2772) | 1031 | #define IS_I915GM(dev) ((dev)->pci_device == 0x2592) |
994 | #define IS_I945GM(dev) ((dev)->pci_device == 0x27A2 ||\ | 1032 | #define IS_I945G(dev) ((dev)->pci_device == 0x2772) |
995 | (dev)->pci_device == 0x27AE) | 1033 | #define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm) |
996 | #define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \ | 1034 | #define IS_I965G(dev) (INTEL_INFO(dev)->is_i965g) |
997 | (dev)->pci_device == 0x2982 || \ | 1035 | #define IS_I965GM(dev) (INTEL_INFO(dev)->is_i965gm) |
998 | (dev)->pci_device == 0x2992 || \ | 1036 | #define IS_GM45(dev) ((dev)->pci_device == 0x2A42) |
999 | (dev)->pci_device == 0x29A2 || \ | 1037 | #define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x) |
1000 | (dev)->pci_device == 0x2A02 || \ | 1038 | #define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001) |
1001 | (dev)->pci_device == 0x2A12 || \ | 1039 | #define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011) |
1002 | (dev)->pci_device == 0x2A42 || \ | 1040 | #define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview) |
1003 | (dev)->pci_device == 0x2E02 || \ | 1041 | #define IS_G33(dev) (INTEL_INFO(dev)->is_g33) |
1004 | (dev)->pci_device == 0x2E12 || \ | ||
1005 | (dev)->pci_device == 0x2E22 || \ | ||
1006 | (dev)->pci_device == 0x2E32 || \ | ||
1007 | (dev)->pci_device == 0x2E42 || \ | ||
1008 | (dev)->pci_device == 0x0042 || \ | ||
1009 | (dev)->pci_device == 0x0046) | ||
1010 | |||
1011 | #define IS_I965GM(dev) ((dev)->pci_device == 0x2A02 || \ | ||
1012 | (dev)->pci_device == 0x2A12) | ||
1013 | |||
1014 | #define IS_GM45(dev) ((dev)->pci_device == 0x2A42) | ||
1015 | |||
1016 | #define IS_G4X(dev) ((dev)->pci_device == 0x2E02 || \ | ||
1017 | (dev)->pci_device == 0x2E12 || \ | ||
1018 | (dev)->pci_device == 0x2E22 || \ | ||
1019 | (dev)->pci_device == 0x2E32 || \ | ||
1020 | (dev)->pci_device == 0x2E42 || \ | ||
1021 | IS_GM45(dev)) | ||
1022 | |||
1023 | #define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001) | ||
1024 | #define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011) | ||
1025 | #define IS_PINEVIEW(dev) (IS_PINEVIEW_G(dev) || IS_PINEVIEW_M(dev)) | ||
1026 | |||
1027 | #define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \ | ||
1028 | (dev)->pci_device == 0x29B2 || \ | ||
1029 | (dev)->pci_device == 0x29D2 || \ | ||
1030 | (IS_PINEVIEW(dev))) | ||
1031 | |||
1032 | #define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) | 1042 | #define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) |
1033 | #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) | 1043 | #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) |
1034 | #define IS_IRONLAKE(dev) (IS_IRONLAKE_D(dev) || IS_IRONLAKE_M(dev)) | 1044 | #define IS_IRONLAKE(dev) (INTEL_INFO(dev)->is_ironlake) |
1035 | 1045 | #define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx) | |
1036 | #define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \ | 1046 | #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) |
1037 | IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev) || \ | ||
1038 | IS_IRONLAKE(dev)) | ||
1039 | 1047 | ||
1040 | #define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \ | 1048 | #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) |
1041 | IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \ | ||
1042 | IS_PINEVIEW(dev) || IS_IRONLAKE_M(dev)) | ||
1043 | 1049 | ||
1044 | #define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev) || \ | ||
1045 | IS_IRONLAKE(dev)) | ||
1046 | /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte | 1050 | /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte |
1047 | * rows, which changed the alignment requirements and fence programming. | 1051 | * rows, which changed the alignment requirements and fence programming. |
1048 | */ | 1052 | */ |
@@ -1054,17 +1058,14 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); | |||
1054 | #define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev)) | 1058 | #define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev)) |
1055 | #define SUPPORTS_TV(dev) (IS_I9XX(dev) && IS_MOBILE(dev) && \ | 1059 | #define SUPPORTS_TV(dev) (IS_I9XX(dev) && IS_MOBILE(dev) && \ |
1056 | !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev)) | 1060 | !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev)) |
1057 | #define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev) || IS_I965G(dev)) | 1061 | #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) |
1058 | /* dsparb controlled by hw only */ | 1062 | /* dsparb controlled by hw only */ |
1059 | #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) | 1063 | #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) |
1060 | 1064 | ||
1061 | #define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IRONLAKE(dev)) | 1065 | #define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IRONLAKE(dev)) |
1062 | #define HAS_PIPE_CXSR(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) | 1066 | #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) |
1063 | #define I915_HAS_FBC(dev) (IS_MOBILE(dev) && \ | 1067 | #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) |
1064 | (IS_I9XX(dev) || IS_GM45(dev)) && \ | 1068 | #define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6) |
1065 | !IS_PINEVIEW(dev) && \ | ||
1066 | !IS_IRONLAKE(dev)) | ||
1067 | #define I915_HAS_RC6(dev) (IS_I965GM(dev) || IS_GM45(dev) || IS_IRONLAKE_M(dev)) | ||
1068 | 1069 | ||
1069 | #define PRIMARY_RINGBUFFER_SIZE (128*1024) | 1070 | #define PRIMARY_RINGBUFFER_SIZE (128*1024) |
1070 | 1071 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 8c463cf2050a..ec8a0d7ffa39 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -277,7 +277,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
277 | 277 | ||
278 | mutex_lock(&dev->struct_mutex); | 278 | mutex_lock(&dev->struct_mutex); |
279 | 279 | ||
280 | ret = i915_gem_object_get_pages(obj); | 280 | ret = i915_gem_object_get_pages(obj, 0); |
281 | if (ret != 0) | 281 | if (ret != 0) |
282 | goto fail_unlock; | 282 | goto fail_unlock; |
283 | 283 | ||
@@ -321,40 +321,24 @@ fail_unlock: | |||
321 | return ret; | 321 | return ret; |
322 | } | 322 | } |
323 | 323 | ||
324 | static inline gfp_t | ||
325 | i915_gem_object_get_page_gfp_mask (struct drm_gem_object *obj) | ||
326 | { | ||
327 | return mapping_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping); | ||
328 | } | ||
329 | |||
330 | static inline void | ||
331 | i915_gem_object_set_page_gfp_mask (struct drm_gem_object *obj, gfp_t gfp) | ||
332 | { | ||
333 | mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, gfp); | ||
334 | } | ||
335 | |||
336 | static int | 324 | static int |
337 | i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj) | 325 | i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj) |
338 | { | 326 | { |
339 | int ret; | 327 | int ret; |
340 | 328 | ||
341 | ret = i915_gem_object_get_pages(obj); | 329 | ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN); |
342 | 330 | ||
343 | /* If we've insufficient memory to map in the pages, attempt | 331 | /* If we've insufficient memory to map in the pages, attempt |
344 | * to make some space by throwing out some old buffers. | 332 | * to make some space by throwing out some old buffers. |
345 | */ | 333 | */ |
346 | if (ret == -ENOMEM) { | 334 | if (ret == -ENOMEM) { |
347 | struct drm_device *dev = obj->dev; | 335 | struct drm_device *dev = obj->dev; |
348 | gfp_t gfp; | ||
349 | 336 | ||
350 | ret = i915_gem_evict_something(dev, obj->size); | 337 | ret = i915_gem_evict_something(dev, obj->size); |
351 | if (ret) | 338 | if (ret) |
352 | return ret; | 339 | return ret; |
353 | 340 | ||
354 | gfp = i915_gem_object_get_page_gfp_mask(obj); | 341 | ret = i915_gem_object_get_pages(obj, 0); |
355 | i915_gem_object_set_page_gfp_mask(obj, gfp & ~__GFP_NORETRY); | ||
356 | ret = i915_gem_object_get_pages(obj); | ||
357 | i915_gem_object_set_page_gfp_mask (obj, gfp); | ||
358 | } | 342 | } |
359 | 343 | ||
360 | return ret; | 344 | return ret; |
@@ -790,7 +774,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, | |||
790 | 774 | ||
791 | mutex_lock(&dev->struct_mutex); | 775 | mutex_lock(&dev->struct_mutex); |
792 | 776 | ||
793 | ret = i915_gem_object_get_pages(obj); | 777 | ret = i915_gem_object_get_pages(obj, 0); |
794 | if (ret != 0) | 778 | if (ret != 0) |
795 | goto fail_unlock; | 779 | goto fail_unlock; |
796 | 780 | ||
@@ -1568,6 +1552,8 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj) | |||
1568 | else | 1552 | else |
1569 | list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); | 1553 | list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); |
1570 | 1554 | ||
1555 | BUG_ON(!list_empty(&obj_priv->gpu_write_list)); | ||
1556 | |||
1571 | obj_priv->last_rendering_seqno = 0; | 1557 | obj_priv->last_rendering_seqno = 0; |
1572 | if (obj_priv->active) { | 1558 | if (obj_priv->active) { |
1573 | obj_priv->active = 0; | 1559 | obj_priv->active = 0; |
@@ -1638,7 +1624,8 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, | |||
1638 | struct drm_i915_gem_object *obj_priv, *next; | 1624 | struct drm_i915_gem_object *obj_priv, *next; |
1639 | 1625 | ||
1640 | list_for_each_entry_safe(obj_priv, next, | 1626 | list_for_each_entry_safe(obj_priv, next, |
1641 | &dev_priv->mm.flushing_list, list) { | 1627 | &dev_priv->mm.gpu_write_list, |
1628 | gpu_write_list) { | ||
1642 | struct drm_gem_object *obj = obj_priv->obj; | 1629 | struct drm_gem_object *obj = obj_priv->obj; |
1643 | 1630 | ||
1644 | if ((obj->write_domain & flush_domains) == | 1631 | if ((obj->write_domain & flush_domains) == |
@@ -1646,6 +1633,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, | |||
1646 | uint32_t old_write_domain = obj->write_domain; | 1633 | uint32_t old_write_domain = obj->write_domain; |
1647 | 1634 | ||
1648 | obj->write_domain = 0; | 1635 | obj->write_domain = 0; |
1636 | list_del_init(&obj_priv->gpu_write_list); | ||
1649 | i915_gem_object_move_to_active(obj, seqno); | 1637 | i915_gem_object_move_to_active(obj, seqno); |
1650 | 1638 | ||
1651 | trace_i915_gem_object_change_domain(obj, | 1639 | trace_i915_gem_object_change_domain(obj, |
@@ -2021,9 +2009,6 @@ i915_gem_object_unbind(struct drm_gem_object *obj) | |||
2021 | /* blow away mappings if mapped through GTT */ | 2009 | /* blow away mappings if mapped through GTT */ |
2022 | i915_gem_release_mmap(obj); | 2010 | i915_gem_release_mmap(obj); |
2023 | 2011 | ||
2024 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) | ||
2025 | i915_gem_clear_fence_reg(obj); | ||
2026 | |||
2027 | /* Move the object to the CPU domain to ensure that | 2012 | /* Move the object to the CPU domain to ensure that |
2028 | * any possible CPU writes while it's not in the GTT | 2013 | * any possible CPU writes while it's not in the GTT |
2029 | * are flushed when we go to remap it. This will | 2014 | * are flushed when we go to remap it. This will |
@@ -2039,6 +2024,10 @@ i915_gem_object_unbind(struct drm_gem_object *obj) | |||
2039 | 2024 | ||
2040 | BUG_ON(obj_priv->active); | 2025 | BUG_ON(obj_priv->active); |
2041 | 2026 | ||
2027 | /* release the fence reg _after_ flushing */ | ||
2028 | if (obj_priv->fence_reg != I915_FENCE_REG_NONE) | ||
2029 | i915_gem_clear_fence_reg(obj); | ||
2030 | |||
2042 | if (obj_priv->agp_mem != NULL) { | 2031 | if (obj_priv->agp_mem != NULL) { |
2043 | drm_unbind_agp(obj_priv->agp_mem); | 2032 | drm_unbind_agp(obj_priv->agp_mem); |
2044 | drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE); | 2033 | drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE); |
@@ -2099,8 +2088,8 @@ static int | |||
2099 | i915_gem_evict_everything(struct drm_device *dev) | 2088 | i915_gem_evict_everything(struct drm_device *dev) |
2100 | { | 2089 | { |
2101 | drm_i915_private_t *dev_priv = dev->dev_private; | 2090 | drm_i915_private_t *dev_priv = dev->dev_private; |
2102 | uint32_t seqno; | ||
2103 | int ret; | 2091 | int ret; |
2092 | uint32_t seqno; | ||
2104 | bool lists_empty; | 2093 | bool lists_empty; |
2105 | 2094 | ||
2106 | spin_lock(&dev_priv->mm.active_list_lock); | 2095 | spin_lock(&dev_priv->mm.active_list_lock); |
@@ -2122,6 +2111,8 @@ i915_gem_evict_everything(struct drm_device *dev) | |||
2122 | if (ret) | 2111 | if (ret) |
2123 | return ret; | 2112 | return ret; |
2124 | 2113 | ||
2114 | BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); | ||
2115 | |||
2125 | ret = i915_gem_evict_from_inactive_list(dev); | 2116 | ret = i915_gem_evict_from_inactive_list(dev); |
2126 | if (ret) | 2117 | if (ret) |
2127 | return ret; | 2118 | return ret; |
@@ -2229,7 +2220,8 @@ i915_gem_evict_something(struct drm_device *dev, int min_size) | |||
2229 | } | 2220 | } |
2230 | 2221 | ||
2231 | int | 2222 | int |
2232 | i915_gem_object_get_pages(struct drm_gem_object *obj) | 2223 | i915_gem_object_get_pages(struct drm_gem_object *obj, |
2224 | gfp_t gfpmask) | ||
2233 | { | 2225 | { |
2234 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2226 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
2235 | int page_count, i; | 2227 | int page_count, i; |
@@ -2255,7 +2247,10 @@ i915_gem_object_get_pages(struct drm_gem_object *obj) | |||
2255 | inode = obj->filp->f_path.dentry->d_inode; | 2247 | inode = obj->filp->f_path.dentry->d_inode; |
2256 | mapping = inode->i_mapping; | 2248 | mapping = inode->i_mapping; |
2257 | for (i = 0; i < page_count; i++) { | 2249 | for (i = 0; i < page_count; i++) { |
2258 | page = read_mapping_page(mapping, i, NULL); | 2250 | page = read_cache_page_gfp(mapping, i, |
2251 | mapping_gfp_mask (mapping) | | ||
2252 | __GFP_COLD | | ||
2253 | gfpmask); | ||
2259 | if (IS_ERR(page)) { | 2254 | if (IS_ERR(page)) { |
2260 | ret = PTR_ERR(page); | 2255 | ret = PTR_ERR(page); |
2261 | i915_gem_object_put_pages(obj); | 2256 | i915_gem_object_put_pages(obj); |
@@ -2578,12 +2573,9 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
2578 | drm_i915_private_t *dev_priv = dev->dev_private; | 2573 | drm_i915_private_t *dev_priv = dev->dev_private; |
2579 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 2574 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
2580 | struct drm_mm_node *free_space; | 2575 | struct drm_mm_node *free_space; |
2581 | bool retry_alloc = false; | 2576 | gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN; |
2582 | int ret; | 2577 | int ret; |
2583 | 2578 | ||
2584 | if (dev_priv->mm.suspended) | ||
2585 | return -EBUSY; | ||
2586 | |||
2587 | if (obj_priv->madv != I915_MADV_WILLNEED) { | 2579 | if (obj_priv->madv != I915_MADV_WILLNEED) { |
2588 | DRM_ERROR("Attempting to bind a purgeable object\n"); | 2580 | DRM_ERROR("Attempting to bind a purgeable object\n"); |
2589 | return -EINVAL; | 2581 | return -EINVAL; |
@@ -2625,15 +2617,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
2625 | DRM_INFO("Binding object of size %zd at 0x%08x\n", | 2617 | DRM_INFO("Binding object of size %zd at 0x%08x\n", |
2626 | obj->size, obj_priv->gtt_offset); | 2618 | obj->size, obj_priv->gtt_offset); |
2627 | #endif | 2619 | #endif |
2628 | if (retry_alloc) { | 2620 | ret = i915_gem_object_get_pages(obj, gfpmask); |
2629 | i915_gem_object_set_page_gfp_mask (obj, | ||
2630 | i915_gem_object_get_page_gfp_mask (obj) & ~__GFP_NORETRY); | ||
2631 | } | ||
2632 | ret = i915_gem_object_get_pages(obj); | ||
2633 | if (retry_alloc) { | ||
2634 | i915_gem_object_set_page_gfp_mask (obj, | ||
2635 | i915_gem_object_get_page_gfp_mask (obj) | __GFP_NORETRY); | ||
2636 | } | ||
2637 | if (ret) { | 2621 | if (ret) { |
2638 | drm_mm_put_block(obj_priv->gtt_space); | 2622 | drm_mm_put_block(obj_priv->gtt_space); |
2639 | obj_priv->gtt_space = NULL; | 2623 | obj_priv->gtt_space = NULL; |
@@ -2643,9 +2627,9 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
2643 | ret = i915_gem_evict_something(dev, obj->size); | 2627 | ret = i915_gem_evict_something(dev, obj->size); |
2644 | if (ret) { | 2628 | if (ret) { |
2645 | /* now try to shrink everyone else */ | 2629 | /* now try to shrink everyone else */ |
2646 | if (! retry_alloc) { | 2630 | if (gfpmask) { |
2647 | retry_alloc = true; | 2631 | gfpmask = 0; |
2648 | goto search_free; | 2632 | goto search_free; |
2649 | } | 2633 | } |
2650 | 2634 | ||
2651 | return ret; | 2635 | return ret; |
@@ -2723,7 +2707,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) | |||
2723 | old_write_domain = obj->write_domain; | 2707 | old_write_domain = obj->write_domain; |
2724 | i915_gem_flush(dev, 0, obj->write_domain); | 2708 | i915_gem_flush(dev, 0, obj->write_domain); |
2725 | seqno = i915_add_request(dev, NULL, obj->write_domain); | 2709 | seqno = i915_add_request(dev, NULL, obj->write_domain); |
2726 | obj->write_domain = 0; | 2710 | BUG_ON(obj->write_domain); |
2727 | i915_gem_object_move_to_active(obj, seqno); | 2711 | i915_gem_object_move_to_active(obj, seqno); |
2728 | 2712 | ||
2729 | trace_i915_gem_object_change_domain(obj, | 2713 | trace_i915_gem_object_change_domain(obj, |
@@ -2839,6 +2823,57 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) | |||
2839 | return 0; | 2823 | return 0; |
2840 | } | 2824 | } |
2841 | 2825 | ||
2826 | /* | ||
2827 | * Prepare buffer for display plane. Use uninterruptible for possible flush | ||
2828 | * wait, as in modesetting process we're not supposed to be interrupted. | ||
2829 | */ | ||
2830 | int | ||
2831 | i915_gem_object_set_to_display_plane(struct drm_gem_object *obj) | ||
2832 | { | ||
2833 | struct drm_device *dev = obj->dev; | ||
2834 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
2835 | uint32_t old_write_domain, old_read_domains; | ||
2836 | int ret; | ||
2837 | |||
2838 | /* Not valid to be called on unbound objects. */ | ||
2839 | if (obj_priv->gtt_space == NULL) | ||
2840 | return -EINVAL; | ||
2841 | |||
2842 | i915_gem_object_flush_gpu_write_domain(obj); | ||
2843 | |||
2844 | /* Wait on any GPU rendering and flushing to occur. */ | ||
2845 | if (obj_priv->active) { | ||
2846 | #if WATCH_BUF | ||
2847 | DRM_INFO("%s: object %p wait for seqno %08x\n", | ||
2848 | __func__, obj, obj_priv->last_rendering_seqno); | ||
2849 | #endif | ||
2850 | ret = i915_do_wait_request(dev, obj_priv->last_rendering_seqno, 0); | ||
2851 | if (ret != 0) | ||
2852 | return ret; | ||
2853 | } | ||
2854 | |||
2855 | old_write_domain = obj->write_domain; | ||
2856 | old_read_domains = obj->read_domains; | ||
2857 | |||
2858 | obj->read_domains &= I915_GEM_DOMAIN_GTT; | ||
2859 | |||
2860 | i915_gem_object_flush_cpu_write_domain(obj); | ||
2861 | |||
2862 | /* It should now be out of any other write domains, and we can update | ||
2863 | * the domain values for our changes. | ||
2864 | */ | ||
2865 | BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0); | ||
2866 | obj->read_domains |= I915_GEM_DOMAIN_GTT; | ||
2867 | obj->write_domain = I915_GEM_DOMAIN_GTT; | ||
2868 | obj_priv->dirty = 1; | ||
2869 | |||
2870 | trace_i915_gem_object_change_domain(obj, | ||
2871 | old_read_domains, | ||
2872 | old_write_domain); | ||
2873 | |||
2874 | return 0; | ||
2875 | } | ||
2876 | |||
2842 | /** | 2877 | /** |
2843 | * Moves a single object to the CPU read, and possibly write domain. | 2878 | * Moves a single object to the CPU read, and possibly write domain. |
2844 | * | 2879 | * |
@@ -3198,7 +3233,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, | |||
3198 | static int | 3233 | static int |
3199 | i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | 3234 | i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, |
3200 | struct drm_file *file_priv, | 3235 | struct drm_file *file_priv, |
3201 | struct drm_i915_gem_exec_object *entry, | 3236 | struct drm_i915_gem_exec_object2 *entry, |
3202 | struct drm_i915_gem_relocation_entry *relocs) | 3237 | struct drm_i915_gem_relocation_entry *relocs) |
3203 | { | 3238 | { |
3204 | struct drm_device *dev = obj->dev; | 3239 | struct drm_device *dev = obj->dev; |
@@ -3206,12 +3241,35 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
3206 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | 3241 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
3207 | int i, ret; | 3242 | int i, ret; |
3208 | void __iomem *reloc_page; | 3243 | void __iomem *reloc_page; |
3244 | bool need_fence; | ||
3245 | |||
3246 | need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE && | ||
3247 | obj_priv->tiling_mode != I915_TILING_NONE; | ||
3248 | |||
3249 | /* Check fence reg constraints and rebind if necessary */ | ||
3250 | if (need_fence && !i915_obj_fenceable(dev, obj)) | ||
3251 | i915_gem_object_unbind(obj); | ||
3209 | 3252 | ||
3210 | /* Choose the GTT offset for our buffer and put it there. */ | 3253 | /* Choose the GTT offset for our buffer and put it there. */ |
3211 | ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment); | 3254 | ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment); |
3212 | if (ret) | 3255 | if (ret) |
3213 | return ret; | 3256 | return ret; |
3214 | 3257 | ||
3258 | /* | ||
3259 | * Pre-965 chips need a fence register set up in order to | ||
3260 | * properly handle blits to/from tiled surfaces. | ||
3261 | */ | ||
3262 | if (need_fence) { | ||
3263 | ret = i915_gem_object_get_fence_reg(obj); | ||
3264 | if (ret != 0) { | ||
3265 | if (ret != -EBUSY && ret != -ERESTARTSYS) | ||
3266 | DRM_ERROR("Failure to install fence: %d\n", | ||
3267 | ret); | ||
3268 | i915_gem_object_unpin(obj); | ||
3269 | return ret; | ||
3270 | } | ||
3271 | } | ||
3272 | |||
3215 | entry->offset = obj_priv->gtt_offset; | 3273 | entry->offset = obj_priv->gtt_offset; |
3216 | 3274 | ||
3217 | /* Apply the relocations, using the GTT aperture to avoid cache | 3275 | /* Apply the relocations, using the GTT aperture to avoid cache |
@@ -3373,7 +3431,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | |||
3373 | */ | 3431 | */ |
3374 | static int | 3432 | static int |
3375 | i915_dispatch_gem_execbuffer(struct drm_device *dev, | 3433 | i915_dispatch_gem_execbuffer(struct drm_device *dev, |
3376 | struct drm_i915_gem_execbuffer *exec, | 3434 | struct drm_i915_gem_execbuffer2 *exec, |
3377 | struct drm_clip_rect *cliprects, | 3435 | struct drm_clip_rect *cliprects, |
3378 | uint64_t exec_offset) | 3436 | uint64_t exec_offset) |
3379 | { | 3437 | { |
@@ -3463,7 +3521,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv) | |||
3463 | } | 3521 | } |
3464 | 3522 | ||
3465 | static int | 3523 | static int |
3466 | i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list, | 3524 | i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object2 *exec_list, |
3467 | uint32_t buffer_count, | 3525 | uint32_t buffer_count, |
3468 | struct drm_i915_gem_relocation_entry **relocs) | 3526 | struct drm_i915_gem_relocation_entry **relocs) |
3469 | { | 3527 | { |
@@ -3478,8 +3536,10 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list, | |||
3478 | } | 3536 | } |
3479 | 3537 | ||
3480 | *relocs = drm_calloc_large(reloc_count, sizeof(**relocs)); | 3538 | *relocs = drm_calloc_large(reloc_count, sizeof(**relocs)); |
3481 | if (*relocs == NULL) | 3539 | if (*relocs == NULL) { |
3540 | DRM_ERROR("failed to alloc relocs, count %d\n", reloc_count); | ||
3482 | return -ENOMEM; | 3541 | return -ENOMEM; |
3542 | } | ||
3483 | 3543 | ||
3484 | for (i = 0; i < buffer_count; i++) { | 3544 | for (i = 0; i < buffer_count; i++) { |
3485 | struct drm_i915_gem_relocation_entry __user *user_relocs; | 3545 | struct drm_i915_gem_relocation_entry __user *user_relocs; |
@@ -3503,13 +3563,16 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list, | |||
3503 | } | 3563 | } |
3504 | 3564 | ||
3505 | static int | 3565 | static int |
3506 | i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list, | 3566 | i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list, |
3507 | uint32_t buffer_count, | 3567 | uint32_t buffer_count, |
3508 | struct drm_i915_gem_relocation_entry *relocs) | 3568 | struct drm_i915_gem_relocation_entry *relocs) |
3509 | { | 3569 | { |
3510 | uint32_t reloc_count = 0, i; | 3570 | uint32_t reloc_count = 0, i; |
3511 | int ret = 0; | 3571 | int ret = 0; |
3512 | 3572 | ||
3573 | if (relocs == NULL) | ||
3574 | return 0; | ||
3575 | |||
3513 | for (i = 0; i < buffer_count; i++) { | 3576 | for (i = 0; i < buffer_count; i++) { |
3514 | struct drm_i915_gem_relocation_entry __user *user_relocs; | 3577 | struct drm_i915_gem_relocation_entry __user *user_relocs; |
3515 | int unwritten; | 3578 | int unwritten; |
@@ -3536,7 +3599,7 @@ err: | |||
3536 | } | 3599 | } |
3537 | 3600 | ||
3538 | static int | 3601 | static int |
3539 | i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer *exec, | 3602 | i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec, |
3540 | uint64_t exec_offset) | 3603 | uint64_t exec_offset) |
3541 | { | 3604 | { |
3542 | uint32_t exec_start, exec_len; | 3605 | uint32_t exec_start, exec_len; |
@@ -3589,18 +3652,18 @@ i915_gem_wait_for_pending_flip(struct drm_device *dev, | |||
3589 | } | 3652 | } |
3590 | 3653 | ||
3591 | int | 3654 | int |
3592 | i915_gem_execbuffer(struct drm_device *dev, void *data, | 3655 | i915_gem_do_execbuffer(struct drm_device *dev, void *data, |
3593 | struct drm_file *file_priv) | 3656 | struct drm_file *file_priv, |
3657 | struct drm_i915_gem_execbuffer2 *args, | ||
3658 | struct drm_i915_gem_exec_object2 *exec_list) | ||
3594 | { | 3659 | { |
3595 | drm_i915_private_t *dev_priv = dev->dev_private; | 3660 | drm_i915_private_t *dev_priv = dev->dev_private; |
3596 | struct drm_i915_gem_execbuffer *args = data; | ||
3597 | struct drm_i915_gem_exec_object *exec_list = NULL; | ||
3598 | struct drm_gem_object **object_list = NULL; | 3661 | struct drm_gem_object **object_list = NULL; |
3599 | struct drm_gem_object *batch_obj; | 3662 | struct drm_gem_object *batch_obj; |
3600 | struct drm_i915_gem_object *obj_priv; | 3663 | struct drm_i915_gem_object *obj_priv; |
3601 | struct drm_clip_rect *cliprects = NULL; | 3664 | struct drm_clip_rect *cliprects = NULL; |
3602 | struct drm_i915_gem_relocation_entry *relocs; | 3665 | struct drm_i915_gem_relocation_entry *relocs = NULL; |
3603 | int ret, ret2, i, pinned = 0; | 3666 | int ret = 0, ret2, i, pinned = 0; |
3604 | uint64_t exec_offset; | 3667 | uint64_t exec_offset; |
3605 | uint32_t seqno, flush_domains, reloc_index; | 3668 | uint32_t seqno, flush_domains, reloc_index; |
3606 | int pin_tries, flips; | 3669 | int pin_tries, flips; |
@@ -3614,31 +3677,21 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
3614 | DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); | 3677 | DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); |
3615 | return -EINVAL; | 3678 | return -EINVAL; |
3616 | } | 3679 | } |
3617 | /* Copy in the exec list from userland */ | ||
3618 | exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count); | ||
3619 | object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count); | 3680 | object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count); |
3620 | if (exec_list == NULL || object_list == NULL) { | 3681 | if (object_list == NULL) { |
3621 | DRM_ERROR("Failed to allocate exec or object list " | 3682 | DRM_ERROR("Failed to allocate object list for %d buffers\n", |
3622 | "for %d buffers\n", | ||
3623 | args->buffer_count); | 3683 | args->buffer_count); |
3624 | ret = -ENOMEM; | 3684 | ret = -ENOMEM; |
3625 | goto pre_mutex_err; | 3685 | goto pre_mutex_err; |
3626 | } | 3686 | } |
3627 | ret = copy_from_user(exec_list, | ||
3628 | (struct drm_i915_relocation_entry __user *) | ||
3629 | (uintptr_t) args->buffers_ptr, | ||
3630 | sizeof(*exec_list) * args->buffer_count); | ||
3631 | if (ret != 0) { | ||
3632 | DRM_ERROR("copy %d exec entries failed %d\n", | ||
3633 | args->buffer_count, ret); | ||
3634 | goto pre_mutex_err; | ||
3635 | } | ||
3636 | 3687 | ||
3637 | if (args->num_cliprects != 0) { | 3688 | if (args->num_cliprects != 0) { |
3638 | cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects), | 3689 | cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects), |
3639 | GFP_KERNEL); | 3690 | GFP_KERNEL); |
3640 | if (cliprects == NULL) | 3691 | if (cliprects == NULL) { |
3692 | ret = -ENOMEM; | ||
3641 | goto pre_mutex_err; | 3693 | goto pre_mutex_err; |
3694 | } | ||
3642 | 3695 | ||
3643 | ret = copy_from_user(cliprects, | 3696 | ret = copy_from_user(cliprects, |
3644 | (struct drm_clip_rect __user *) | 3697 | (struct drm_clip_rect __user *) |
@@ -3680,6 +3733,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
3680 | if (object_list[i] == NULL) { | 3733 | if (object_list[i] == NULL) { |
3681 | DRM_ERROR("Invalid object handle %d at index %d\n", | 3734 | DRM_ERROR("Invalid object handle %d at index %d\n", |
3682 | exec_list[i].handle, i); | 3735 | exec_list[i].handle, i); |
3736 | /* prevent error path from reading uninitialized data */ | ||
3737 | args->buffer_count = i + 1; | ||
3683 | ret = -EBADF; | 3738 | ret = -EBADF; |
3684 | goto err; | 3739 | goto err; |
3685 | } | 3740 | } |
@@ -3688,6 +3743,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
3688 | if (obj_priv->in_execbuffer) { | 3743 | if (obj_priv->in_execbuffer) { |
3689 | DRM_ERROR("Object %p appears more than once in object list\n", | 3744 | DRM_ERROR("Object %p appears more than once in object list\n", |
3690 | object_list[i]); | 3745 | object_list[i]); |
3746 | /* prevent error path from reading uninitialized data */ | ||
3747 | args->buffer_count = i + 1; | ||
3691 | ret = -EBADF; | 3748 | ret = -EBADF; |
3692 | goto err; | 3749 | goto err; |
3693 | } | 3750 | } |
@@ -3801,16 +3858,23 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
3801 | i915_gem_flush(dev, | 3858 | i915_gem_flush(dev, |
3802 | dev->invalidate_domains, | 3859 | dev->invalidate_domains, |
3803 | dev->flush_domains); | 3860 | dev->flush_domains); |
3804 | if (dev->flush_domains) | 3861 | if (dev->flush_domains & I915_GEM_GPU_DOMAINS) |
3805 | (void)i915_add_request(dev, file_priv, | 3862 | (void)i915_add_request(dev, file_priv, |
3806 | dev->flush_domains); | 3863 | dev->flush_domains); |
3807 | } | 3864 | } |
3808 | 3865 | ||
3809 | for (i = 0; i < args->buffer_count; i++) { | 3866 | for (i = 0; i < args->buffer_count; i++) { |
3810 | struct drm_gem_object *obj = object_list[i]; | 3867 | struct drm_gem_object *obj = object_list[i]; |
3868 | struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
3811 | uint32_t old_write_domain = obj->write_domain; | 3869 | uint32_t old_write_domain = obj->write_domain; |
3812 | 3870 | ||
3813 | obj->write_domain = obj->pending_write_domain; | 3871 | obj->write_domain = obj->pending_write_domain; |
3872 | if (obj->write_domain) | ||
3873 | list_move_tail(&obj_priv->gpu_write_list, | ||
3874 | &dev_priv->mm.gpu_write_list); | ||
3875 | else | ||
3876 | list_del_init(&obj_priv->gpu_write_list); | ||
3877 | |||
3814 | trace_i915_gem_object_change_domain(obj, | 3878 | trace_i915_gem_object_change_domain(obj, |
3815 | obj->read_domains, | 3879 | obj->read_domains, |
3816 | old_write_domain); | 3880 | old_write_domain); |
@@ -3884,8 +3948,101 @@ err: | |||
3884 | 3948 | ||
3885 | mutex_unlock(&dev->struct_mutex); | 3949 | mutex_unlock(&dev->struct_mutex); |
3886 | 3950 | ||
3951 | pre_mutex_err: | ||
3952 | /* Copy the updated relocations out regardless of current error | ||
3953 | * state. Failure to update the relocs would mean that the next | ||
3954 | * time userland calls execbuf, it would do so with presumed offset | ||
3955 | * state that didn't match the actual object state. | ||
3956 | */ | ||
3957 | ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count, | ||
3958 | relocs); | ||
3959 | if (ret2 != 0) { | ||
3960 | DRM_ERROR("Failed to copy relocations back out: %d\n", ret2); | ||
3961 | |||
3962 | if (ret == 0) | ||
3963 | ret = ret2; | ||
3964 | } | ||
3965 | |||
3966 | drm_free_large(object_list); | ||
3967 | kfree(cliprects); | ||
3968 | |||
3969 | return ret; | ||
3970 | } | ||
3971 | |||
3972 | /* | ||
3973 | * Legacy execbuffer just creates an exec2 list from the original exec object | ||
3974 | * list array and passes it to the real function. | ||
3975 | */ | ||
3976 | int | ||
3977 | i915_gem_execbuffer(struct drm_device *dev, void *data, | ||
3978 | struct drm_file *file_priv) | ||
3979 | { | ||
3980 | struct drm_i915_gem_execbuffer *args = data; | ||
3981 | struct drm_i915_gem_execbuffer2 exec2; | ||
3982 | struct drm_i915_gem_exec_object *exec_list = NULL; | ||
3983 | struct drm_i915_gem_exec_object2 *exec2_list = NULL; | ||
3984 | int ret, i; | ||
3985 | |||
3986 | #if WATCH_EXEC | ||
3987 | DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", | ||
3988 | (int) args->buffers_ptr, args->buffer_count, args->batch_len); | ||
3989 | #endif | ||
3990 | |||
3991 | if (args->buffer_count < 1) { | ||
3992 | DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); | ||
3993 | return -EINVAL; | ||
3994 | } | ||
3995 | |||
3996 | /* Copy in the exec list from userland */ | ||
3997 | exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count); | ||
3998 | exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count); | ||
3999 | if (exec_list == NULL || exec2_list == NULL) { | ||
4000 | DRM_ERROR("Failed to allocate exec list for %d buffers\n", | ||
4001 | args->buffer_count); | ||
4002 | drm_free_large(exec_list); | ||
4003 | drm_free_large(exec2_list); | ||
4004 | return -ENOMEM; | ||
4005 | } | ||
4006 | ret = copy_from_user(exec_list, | ||
4007 | (struct drm_i915_relocation_entry __user *) | ||
4008 | (uintptr_t) args->buffers_ptr, | ||
4009 | sizeof(*exec_list) * args->buffer_count); | ||
4010 | if (ret != 0) { | ||
4011 | DRM_ERROR("copy %d exec entries failed %d\n", | ||
4012 | args->buffer_count, ret); | ||
4013 | drm_free_large(exec_list); | ||
4014 | drm_free_large(exec2_list); | ||
4015 | return -EFAULT; | ||
4016 | } | ||
4017 | |||
4018 | for (i = 0; i < args->buffer_count; i++) { | ||
4019 | exec2_list[i].handle = exec_list[i].handle; | ||
4020 | exec2_list[i].relocation_count = exec_list[i].relocation_count; | ||
4021 | exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr; | ||
4022 | exec2_list[i].alignment = exec_list[i].alignment; | ||
4023 | exec2_list[i].offset = exec_list[i].offset; | ||
4024 | if (!IS_I965G(dev)) | ||
4025 | exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE; | ||
4026 | else | ||
4027 | exec2_list[i].flags = 0; | ||
4028 | } | ||
4029 | |||
4030 | exec2.buffers_ptr = args->buffers_ptr; | ||
4031 | exec2.buffer_count = args->buffer_count; | ||
4032 | exec2.batch_start_offset = args->batch_start_offset; | ||
4033 | exec2.batch_len = args->batch_len; | ||
4034 | exec2.DR1 = args->DR1; | ||
4035 | exec2.DR4 = args->DR4; | ||
4036 | exec2.num_cliprects = args->num_cliprects; | ||
4037 | exec2.cliprects_ptr = args->cliprects_ptr; | ||
4038 | exec2.flags = 0; | ||
4039 | |||
4040 | ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list); | ||
3887 | if (!ret) { | 4041 | if (!ret) { |
3888 | /* Copy the new buffer offsets back to the user's exec list. */ | 4042 | /* Copy the new buffer offsets back to the user's exec list. */ |
4043 | for (i = 0; i < args->buffer_count; i++) | ||
4044 | exec_list[i].offset = exec2_list[i].offset; | ||
4045 | /* ... and back out to userspace */ | ||
3889 | ret = copy_to_user((struct drm_i915_relocation_entry __user *) | 4046 | ret = copy_to_user((struct drm_i915_relocation_entry __user *) |
3890 | (uintptr_t) args->buffers_ptr, | 4047 | (uintptr_t) args->buffers_ptr, |
3891 | exec_list, | 4048 | exec_list, |
@@ -3898,25 +4055,62 @@ err: | |||
3898 | } | 4055 | } |
3899 | } | 4056 | } |
3900 | 4057 | ||
3901 | /* Copy the updated relocations out regardless of current error | 4058 | drm_free_large(exec_list); |
3902 | * state. Failure to update the relocs would mean that the next | 4059 | drm_free_large(exec2_list); |
3903 | * time userland calls execbuf, it would do so with presumed offset | 4060 | return ret; |
3904 | * state that didn't match the actual object state. | 4061 | } |
3905 | */ | ||
3906 | ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count, | ||
3907 | relocs); | ||
3908 | if (ret2 != 0) { | ||
3909 | DRM_ERROR("Failed to copy relocations back out: %d\n", ret2); | ||
3910 | 4062 | ||
3911 | if (ret == 0) | 4063 | int |
3912 | ret = ret2; | 4064 | i915_gem_execbuffer2(struct drm_device *dev, void *data, |
4065 | struct drm_file *file_priv) | ||
4066 | { | ||
4067 | struct drm_i915_gem_execbuffer2 *args = data; | ||
4068 | struct drm_i915_gem_exec_object2 *exec2_list = NULL; | ||
4069 | int ret; | ||
4070 | |||
4071 | #if WATCH_EXEC | ||
4072 | DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", | ||
4073 | (int) args->buffers_ptr, args->buffer_count, args->batch_len); | ||
4074 | #endif | ||
4075 | |||
4076 | if (args->buffer_count < 1) { | ||
4077 | DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count); | ||
4078 | return -EINVAL; | ||
3913 | } | 4079 | } |
3914 | 4080 | ||
3915 | pre_mutex_err: | 4081 | exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count); |
3916 | drm_free_large(object_list); | 4082 | if (exec2_list == NULL) { |
3917 | drm_free_large(exec_list); | 4083 | DRM_ERROR("Failed to allocate exec list for %d buffers\n", |
3918 | kfree(cliprects); | 4084 | args->buffer_count); |
4085 | return -ENOMEM; | ||
4086 | } | ||
4087 | ret = copy_from_user(exec2_list, | ||
4088 | (struct drm_i915_relocation_entry __user *) | ||
4089 | (uintptr_t) args->buffers_ptr, | ||
4090 | sizeof(*exec2_list) * args->buffer_count); | ||
4091 | if (ret != 0) { | ||
4092 | DRM_ERROR("copy %d exec entries failed %d\n", | ||
4093 | args->buffer_count, ret); | ||
4094 | drm_free_large(exec2_list); | ||
4095 | return -EFAULT; | ||
4096 | } | ||
4097 | |||
4098 | ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list); | ||
4099 | if (!ret) { | ||
4100 | /* Copy the new buffer offsets back to the user's exec list. */ | ||
4101 | ret = copy_to_user((struct drm_i915_relocation_entry __user *) | ||
4102 | (uintptr_t) args->buffers_ptr, | ||
4103 | exec2_list, | ||
4104 | sizeof(*exec2_list) * args->buffer_count); | ||
4105 | if (ret) { | ||
4106 | ret = -EFAULT; | ||
4107 | DRM_ERROR("failed to copy %d exec entries " | ||
4108 | "back to user (%d)\n", | ||
4109 | args->buffer_count, ret); | ||
4110 | } | ||
4111 | } | ||
3919 | 4112 | ||
4113 | drm_free_large(exec2_list); | ||
3920 | return ret; | 4114 | return ret; |
3921 | } | 4115 | } |
3922 | 4116 | ||
@@ -3933,19 +4127,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) | |||
3933 | if (ret) | 4127 | if (ret) |
3934 | return ret; | 4128 | return ret; |
3935 | } | 4129 | } |
3936 | /* | 4130 | |
3937 | * Pre-965 chips need a fence register set up in order to | ||
3938 | * properly handle tiled surfaces. | ||
3939 | */ | ||
3940 | if (!IS_I965G(dev) && obj_priv->tiling_mode != I915_TILING_NONE) { | ||
3941 | ret = i915_gem_object_get_fence_reg(obj); | ||
3942 | if (ret != 0) { | ||
3943 | if (ret != -EBUSY && ret != -ERESTARTSYS) | ||
3944 | DRM_ERROR("Failure to install fence: %d\n", | ||
3945 | ret); | ||
3946 | return ret; | ||
3947 | } | ||
3948 | } | ||
3949 | obj_priv->pin_count++; | 4131 | obj_priv->pin_count++; |
3950 | 4132 | ||
3951 | /* If the object is not active and not pending a flush, | 4133 | /* If the object is not active and not pending a flush, |
@@ -4203,6 +4385,7 @@ int i915_gem_init_object(struct drm_gem_object *obj) | |||
4203 | obj_priv->obj = obj; | 4385 | obj_priv->obj = obj; |
4204 | obj_priv->fence_reg = I915_FENCE_REG_NONE; | 4386 | obj_priv->fence_reg = I915_FENCE_REG_NONE; |
4205 | INIT_LIST_HEAD(&obj_priv->list); | 4387 | INIT_LIST_HEAD(&obj_priv->list); |
4388 | INIT_LIST_HEAD(&obj_priv->gpu_write_list); | ||
4206 | INIT_LIST_HEAD(&obj_priv->fence_list); | 4389 | INIT_LIST_HEAD(&obj_priv->fence_list); |
4207 | obj_priv->madv = I915_MADV_WILLNEED; | 4390 | obj_priv->madv = I915_MADV_WILLNEED; |
4208 | 4391 | ||
@@ -4654,6 +4837,7 @@ i915_gem_load(struct drm_device *dev) | |||
4654 | spin_lock_init(&dev_priv->mm.active_list_lock); | 4837 | spin_lock_init(&dev_priv->mm.active_list_lock); |
4655 | INIT_LIST_HEAD(&dev_priv->mm.active_list); | 4838 | INIT_LIST_HEAD(&dev_priv->mm.active_list); |
4656 | INIT_LIST_HEAD(&dev_priv->mm.flushing_list); | 4839 | INIT_LIST_HEAD(&dev_priv->mm.flushing_list); |
4840 | INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list); | ||
4657 | INIT_LIST_HEAD(&dev_priv->mm.inactive_list); | 4841 | INIT_LIST_HEAD(&dev_priv->mm.inactive_list); |
4658 | INIT_LIST_HEAD(&dev_priv->mm.request_list); | 4842 | INIT_LIST_HEAD(&dev_priv->mm.request_list); |
4659 | INIT_LIST_HEAD(&dev_priv->mm.fence_list); | 4843 | INIT_LIST_HEAD(&dev_priv->mm.fence_list); |
@@ -4708,7 +4892,7 @@ int i915_gem_init_phys_object(struct drm_device *dev, | |||
4708 | 4892 | ||
4709 | phys_obj->id = id; | 4893 | phys_obj->id = id; |
4710 | 4894 | ||
4711 | phys_obj->handle = drm_pci_alloc(dev, size, 0, 0xffffffff); | 4895 | phys_obj->handle = drm_pci_alloc(dev, size, 0); |
4712 | if (!phys_obj->handle) { | 4896 | if (!phys_obj->handle) { |
4713 | ret = -ENOMEM; | 4897 | ret = -ENOMEM; |
4714 | goto kfree_obj; | 4898 | goto kfree_obj; |
@@ -4766,7 +4950,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev, | |||
4766 | if (!obj_priv->phys_obj) | 4950 | if (!obj_priv->phys_obj) |
4767 | return; | 4951 | return; |
4768 | 4952 | ||
4769 | ret = i915_gem_object_get_pages(obj); | 4953 | ret = i915_gem_object_get_pages(obj, 0); |
4770 | if (ret) | 4954 | if (ret) |
4771 | goto out; | 4955 | goto out; |
4772 | 4956 | ||
@@ -4824,7 +5008,7 @@ i915_gem_attach_phys_object(struct drm_device *dev, | |||
4824 | obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1]; | 5008 | obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1]; |
4825 | obj_priv->phys_obj->cur_obj = obj; | 5009 | obj_priv->phys_obj->cur_obj = obj; |
4826 | 5010 | ||
4827 | ret = i915_gem_object_get_pages(obj); | 5011 | ret = i915_gem_object_get_pages(obj, 0); |
4828 | if (ret) { | 5012 | if (ret) { |
4829 | DRM_ERROR("failed to get page list\n"); | 5013 | DRM_ERROR("failed to get page list\n"); |
4830 | goto out; | 5014 | goto out; |
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 30d6af6c09bb..df278b2685bf 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c | |||
@@ -304,35 +304,39 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) | |||
304 | 304 | ||
305 | 305 | ||
306 | /** | 306 | /** |
307 | * Returns the size of the fence for a tiled object of the given size. | 307 | * Returns whether an object is currently fenceable. If not, it may need |
308 | * to be unbound and have its pitch adjusted. | ||
308 | */ | 309 | */ |
309 | static int | 310 | bool |
310 | i915_get_fence_size(struct drm_device *dev, int size) | 311 | i915_obj_fenceable(struct drm_device *dev, struct drm_gem_object *obj) |
311 | { | 312 | { |
312 | int i; | 313 | struct drm_i915_gem_object *obj_priv = obj->driver_private; |
313 | int start; | ||
314 | 314 | ||
315 | if (IS_I965G(dev)) { | 315 | if (IS_I965G(dev)) { |
316 | /* The 965 can have fences at any page boundary. */ | 316 | /* The 965 can have fences at any page boundary. */ |
317 | return ALIGN(size, 4096); | 317 | if (obj->size & 4095) |
318 | return false; | ||
319 | return true; | ||
320 | } else if (IS_I9XX(dev)) { | ||
321 | if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK) | ||
322 | return false; | ||
318 | } else { | 323 | } else { |
319 | /* Align the size to a power of two greater than the smallest | 324 | if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK) |
320 | * fence size. | 325 | return false; |
321 | */ | 326 | } |
322 | if (IS_I9XX(dev)) | ||
323 | start = 1024 * 1024; | ||
324 | else | ||
325 | start = 512 * 1024; | ||
326 | 327 | ||
327 | for (i = start; i < size; i <<= 1) | 328 | /* Power of two sized... */ |
328 | ; | 329 | if (obj->size & (obj->size - 1)) |
330 | return false; | ||
329 | 331 | ||
330 | return i; | 332 | /* Objects must be size aligned as well */ |
331 | } | 333 | if (obj_priv->gtt_offset & (obj->size - 1)) |
334 | return false; | ||
335 | return true; | ||
332 | } | 336 | } |
333 | 337 | ||
334 | /* Check pitch constriants for all chips & tiling formats */ | 338 | /* Check pitch constriants for all chips & tiling formats */ |
335 | static bool | 339 | bool |
336 | i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) | 340 | i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) |
337 | { | 341 | { |
338 | int tile_width; | 342 | int tile_width; |
@@ -384,12 +388,6 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) | |||
384 | if (stride & (stride - 1)) | 388 | if (stride & (stride - 1)) |
385 | return false; | 389 | return false; |
386 | 390 | ||
387 | /* We don't 0handle the aperture area covered by the fence being bigger | ||
388 | * than the object size. | ||
389 | */ | ||
390 | if (i915_get_fence_size(dev, size) != size) | ||
391 | return false; | ||
392 | |||
393 | return true; | 391 | return true; |
394 | } | 392 | } |
395 | 393 | ||
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 85f4c5de97e2..a17d6bdfe63e 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -274,7 +274,6 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev) | |||
274 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 274 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
275 | int ret = IRQ_NONE; | 275 | int ret = IRQ_NONE; |
276 | u32 de_iir, gt_iir, de_ier, pch_iir; | 276 | u32 de_iir, gt_iir, de_ier, pch_iir; |
277 | u32 new_de_iir, new_gt_iir, new_pch_iir; | ||
278 | struct drm_i915_master_private *master_priv; | 277 | struct drm_i915_master_private *master_priv; |
279 | 278 | ||
280 | /* disable master interrupt before clearing iir */ | 279 | /* disable master interrupt before clearing iir */ |
@@ -286,49 +285,58 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev) | |||
286 | gt_iir = I915_READ(GTIIR); | 285 | gt_iir = I915_READ(GTIIR); |
287 | pch_iir = I915_READ(SDEIIR); | 286 | pch_iir = I915_READ(SDEIIR); |
288 | 287 | ||
289 | for (;;) { | 288 | if (de_iir == 0 && gt_iir == 0 && pch_iir == 0) |
290 | if (de_iir == 0 && gt_iir == 0 && pch_iir == 0) | 289 | goto done; |
291 | break; | ||
292 | 290 | ||
293 | ret = IRQ_HANDLED; | 291 | ret = IRQ_HANDLED; |
294 | 292 | ||
295 | /* should clear PCH hotplug event before clear CPU irq */ | 293 | if (dev->primary->master) { |
296 | I915_WRITE(SDEIIR, pch_iir); | 294 | master_priv = dev->primary->master->driver_priv; |
297 | new_pch_iir = I915_READ(SDEIIR); | 295 | if (master_priv->sarea_priv) |
296 | master_priv->sarea_priv->last_dispatch = | ||
297 | READ_BREADCRUMB(dev_priv); | ||
298 | } | ||
298 | 299 | ||
299 | I915_WRITE(DEIIR, de_iir); | 300 | if (gt_iir & GT_USER_INTERRUPT) { |
300 | new_de_iir = I915_READ(DEIIR); | 301 | u32 seqno = i915_get_gem_seqno(dev); |
301 | I915_WRITE(GTIIR, gt_iir); | 302 | dev_priv->mm.irq_gem_seqno = seqno; |
302 | new_gt_iir = I915_READ(GTIIR); | 303 | trace_i915_gem_request_complete(dev, seqno); |
304 | DRM_WAKEUP(&dev_priv->irq_queue); | ||
305 | dev_priv->hangcheck_count = 0; | ||
306 | mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); | ||
307 | } | ||
303 | 308 | ||
304 | if (dev->primary->master) { | 309 | if (de_iir & DE_GSE) |
305 | master_priv = dev->primary->master->driver_priv; | 310 | ironlake_opregion_gse_intr(dev); |
306 | if (master_priv->sarea_priv) | ||
307 | master_priv->sarea_priv->last_dispatch = | ||
308 | READ_BREADCRUMB(dev_priv); | ||
309 | } | ||
310 | 311 | ||
311 | if (gt_iir & GT_USER_INTERRUPT) { | 312 | if (de_iir & DE_PLANEA_FLIP_DONE) { |
312 | u32 seqno = i915_get_gem_seqno(dev); | 313 | intel_prepare_page_flip(dev, 0); |
313 | dev_priv->mm.irq_gem_seqno = seqno; | 314 | intel_finish_page_flip(dev, 0); |
314 | trace_i915_gem_request_complete(dev, seqno); | 315 | } |
315 | DRM_WAKEUP(&dev_priv->irq_queue); | ||
316 | } | ||
317 | 316 | ||
318 | if (de_iir & DE_GSE) | 317 | if (de_iir & DE_PLANEB_FLIP_DONE) { |
319 | ironlake_opregion_gse_intr(dev); | 318 | intel_prepare_page_flip(dev, 1); |
319 | intel_finish_page_flip(dev, 1); | ||
320 | } | ||
320 | 321 | ||
321 | /* check event from PCH */ | 322 | if (de_iir & DE_PIPEA_VBLANK) |
322 | if ((de_iir & DE_PCH_EVENT) && | 323 | drm_handle_vblank(dev, 0); |
323 | (pch_iir & SDE_HOTPLUG_MASK)) { | ||
324 | queue_work(dev_priv->wq, &dev_priv->hotplug_work); | ||
325 | } | ||
326 | 324 | ||
327 | de_iir = new_de_iir; | 325 | if (de_iir & DE_PIPEB_VBLANK) |
328 | gt_iir = new_gt_iir; | 326 | drm_handle_vblank(dev, 1); |
329 | pch_iir = new_pch_iir; | 327 | |
328 | /* check event from PCH */ | ||
329 | if ((de_iir & DE_PCH_EVENT) && | ||
330 | (pch_iir & SDE_HOTPLUG_MASK)) { | ||
331 | queue_work(dev_priv->wq, &dev_priv->hotplug_work); | ||
330 | } | 332 | } |
331 | 333 | ||
334 | /* should clear PCH hotplug event before clear CPU irq */ | ||
335 | I915_WRITE(SDEIIR, pch_iir); | ||
336 | I915_WRITE(GTIIR, gt_iir); | ||
337 | I915_WRITE(DEIIR, de_iir); | ||
338 | |||
339 | done: | ||
332 | I915_WRITE(DEIER, de_ier); | 340 | I915_WRITE(DEIER, de_ier); |
333 | (void)I915_READ(DEIER); | 341 | (void)I915_READ(DEIER); |
334 | 342 | ||
@@ -852,11 +860,11 @@ int i915_enable_vblank(struct drm_device *dev, int pipe) | |||
852 | if (!(pipeconf & PIPEACONF_ENABLE)) | 860 | if (!(pipeconf & PIPEACONF_ENABLE)) |
853 | return -EINVAL; | 861 | return -EINVAL; |
854 | 862 | ||
855 | if (IS_IRONLAKE(dev)) | ||
856 | return 0; | ||
857 | |||
858 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); | 863 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); |
859 | if (IS_I965G(dev)) | 864 | if (IS_IRONLAKE(dev)) |
865 | ironlake_enable_display_irq(dev_priv, (pipe == 0) ? | ||
866 | DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); | ||
867 | else if (IS_I965G(dev)) | ||
860 | i915_enable_pipestat(dev_priv, pipe, | 868 | i915_enable_pipestat(dev_priv, pipe, |
861 | PIPE_START_VBLANK_INTERRUPT_ENABLE); | 869 | PIPE_START_VBLANK_INTERRUPT_ENABLE); |
862 | else | 870 | else |
@@ -874,13 +882,14 @@ void i915_disable_vblank(struct drm_device *dev, int pipe) | |||
874 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 882 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
875 | unsigned long irqflags; | 883 | unsigned long irqflags; |
876 | 884 | ||
877 | if (IS_IRONLAKE(dev)) | ||
878 | return; | ||
879 | |||
880 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); | 885 | spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); |
881 | i915_disable_pipestat(dev_priv, pipe, | 886 | if (IS_IRONLAKE(dev)) |
882 | PIPE_VBLANK_INTERRUPT_ENABLE | | 887 | ironlake_disable_display_irq(dev_priv, (pipe == 0) ? |
883 | PIPE_START_VBLANK_INTERRUPT_ENABLE); | 888 | DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); |
889 | else | ||
890 | i915_disable_pipestat(dev_priv, pipe, | ||
891 | PIPE_VBLANK_INTERRUPT_ENABLE | | ||
892 | PIPE_START_VBLANK_INTERRUPT_ENABLE); | ||
884 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); | 893 | spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); |
885 | } | 894 | } |
886 | 895 | ||
@@ -1023,13 +1032,14 @@ static int ironlake_irq_postinstall(struct drm_device *dev) | |||
1023 | { | 1032 | { |
1024 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 1033 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
1025 | /* enable kind of interrupts always enabled */ | 1034 | /* enable kind of interrupts always enabled */ |
1026 | u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT; | 1035 | u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | |
1036 | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; | ||
1027 | u32 render_mask = GT_USER_INTERRUPT; | 1037 | u32 render_mask = GT_USER_INTERRUPT; |
1028 | u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | | 1038 | u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | |
1029 | SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; | 1039 | SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; |
1030 | 1040 | ||
1031 | dev_priv->irq_mask_reg = ~display_mask; | 1041 | dev_priv->irq_mask_reg = ~display_mask; |
1032 | dev_priv->de_irq_enable_reg = display_mask; | 1042 | dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK; |
1033 | 1043 | ||
1034 | /* should always can generate irq */ | 1044 | /* should always can generate irq */ |
1035 | I915_WRITE(DEIIR, I915_READ(DEIIR)); | 1045 | I915_WRITE(DEIIR, I915_READ(DEIIR)); |
@@ -1084,6 +1094,10 @@ void i915_driver_irq_preinstall(struct drm_device * dev) | |||
1084 | (void) I915_READ(IER); | 1094 | (void) I915_READ(IER); |
1085 | } | 1095 | } |
1086 | 1096 | ||
1097 | /* | ||
1098 | * Must be called after intel_modeset_init or hotplug interrupts won't be | ||
1099 | * enabled correctly. | ||
1100 | */ | ||
1087 | int i915_driver_irq_postinstall(struct drm_device *dev) | 1101 | int i915_driver_irq_postinstall(struct drm_device *dev) |
1088 | { | 1102 | { |
1089 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 1103 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
@@ -1106,19 +1120,23 @@ int i915_driver_irq_postinstall(struct drm_device *dev) | |||
1106 | if (I915_HAS_HOTPLUG(dev)) { | 1120 | if (I915_HAS_HOTPLUG(dev)) { |
1107 | u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); | 1121 | u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); |
1108 | 1122 | ||
1109 | /* Leave other bits alone */ | 1123 | /* Note HDMI and DP share bits */ |
1110 | hotplug_en |= HOTPLUG_EN_MASK; | 1124 | if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) |
1125 | hotplug_en |= HDMIB_HOTPLUG_INT_EN; | ||
1126 | if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) | ||
1127 | hotplug_en |= HDMIC_HOTPLUG_INT_EN; | ||
1128 | if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) | ||
1129 | hotplug_en |= HDMID_HOTPLUG_INT_EN; | ||
1130 | if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS) | ||
1131 | hotplug_en |= SDVOC_HOTPLUG_INT_EN; | ||
1132 | if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS) | ||
1133 | hotplug_en |= SDVOB_HOTPLUG_INT_EN; | ||
1134 | if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) | ||
1135 | hotplug_en |= CRT_HOTPLUG_INT_EN; | ||
1136 | /* Ignore TV since it's buggy */ | ||
1137 | |||
1111 | I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); | 1138 | I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); |
1112 | 1139 | ||
1113 | dev_priv->hotplug_supported_mask = CRT_HOTPLUG_INT_STATUS | | ||
1114 | TV_HOTPLUG_INT_STATUS | SDVOC_HOTPLUG_INT_STATUS | | ||
1115 | SDVOB_HOTPLUG_INT_STATUS; | ||
1116 | if (IS_G4X(dev)) { | ||
1117 | dev_priv->hotplug_supported_mask |= | ||
1118 | HDMIB_HOTPLUG_INT_STATUS | | ||
1119 | HDMIC_HOTPLUG_INT_STATUS | | ||
1120 | HDMID_HOTPLUG_INT_STATUS; | ||
1121 | } | ||
1122 | /* Enable in IER... */ | 1140 | /* Enable in IER... */ |
1123 | enable_mask |= I915_DISPLAY_PORT_INTERRUPT; | 1141 | enable_mask |= I915_DISPLAY_PORT_INTERRUPT; |
1124 | /* and unmask in IMR */ | 1142 | /* and unmask in IMR */ |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 974b3cf70618..ab1bd2d3d3b6 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -338,6 +338,7 @@ | |||
338 | #define FBC_CTL_PERIODIC (1<<30) | 338 | #define FBC_CTL_PERIODIC (1<<30) |
339 | #define FBC_CTL_INTERVAL_SHIFT (16) | 339 | #define FBC_CTL_INTERVAL_SHIFT (16) |
340 | #define FBC_CTL_UNCOMPRESSIBLE (1<<14) | 340 | #define FBC_CTL_UNCOMPRESSIBLE (1<<14) |
341 | #define FBC_C3_IDLE (1<<13) | ||
341 | #define FBC_CTL_STRIDE_SHIFT (5) | 342 | #define FBC_CTL_STRIDE_SHIFT (5) |
342 | #define FBC_CTL_FENCENO (1<<0) | 343 | #define FBC_CTL_FENCENO (1<<0) |
343 | #define FBC_COMMAND 0x0320c | 344 | #define FBC_COMMAND 0x0320c |
@@ -879,13 +880,6 @@ | |||
879 | #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) | 880 | #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) |
880 | #define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */ | 881 | #define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */ |
881 | #define CRT_FORCE_HOTPLUG_MASK 0xfffffe1f | 882 | #define CRT_FORCE_HOTPLUG_MASK 0xfffffe1f |
882 | #define HOTPLUG_EN_MASK (HDMIB_HOTPLUG_INT_EN | \ | ||
883 | HDMIC_HOTPLUG_INT_EN | \ | ||
884 | HDMID_HOTPLUG_INT_EN | \ | ||
885 | SDVOB_HOTPLUG_INT_EN | \ | ||
886 | SDVOC_HOTPLUG_INT_EN | \ | ||
887 | CRT_HOTPLUG_INT_EN) | ||
888 | |||
889 | 883 | ||
890 | #define PORT_HOTPLUG_STAT 0x61114 | 884 | #define PORT_HOTPLUG_STAT 0x61114 |
891 | #define HDMIB_HOTPLUG_INT_STATUS (1 << 29) | 885 | #define HDMIB_HOTPLUG_INT_STATUS (1 << 29) |
@@ -982,6 +976,8 @@ | |||
982 | #define LVDS_PORT_EN (1 << 31) | 976 | #define LVDS_PORT_EN (1 << 31) |
983 | /* Selects pipe B for LVDS data. Must be set on pre-965. */ | 977 | /* Selects pipe B for LVDS data. Must be set on pre-965. */ |
984 | #define LVDS_PIPEB_SELECT (1 << 30) | 978 | #define LVDS_PIPEB_SELECT (1 << 30) |
979 | /* LVDS dithering flag on 965/g4x platform */ | ||
980 | #define LVDS_ENABLE_DITHER (1 << 25) | ||
985 | /* Enable border for unscaled (or aspect-scaled) display */ | 981 | /* Enable border for unscaled (or aspect-scaled) display */ |
986 | #define LVDS_BORDER_ENABLE (1 << 15) | 982 | #define LVDS_BORDER_ENABLE (1 << 15) |
987 | /* | 983 | /* |
@@ -1751,6 +1747,8 @@ | |||
1751 | 1747 | ||
1752 | /* Display & cursor control */ | 1748 | /* Display & cursor control */ |
1753 | 1749 | ||
1750 | /* dithering flag on Ironlake */ | ||
1751 | #define PIPE_ENABLE_DITHER (1 << 4) | ||
1754 | /* Pipe A */ | 1752 | /* Pipe A */ |
1755 | #define PIPEADSL 0x70000 | 1753 | #define PIPEADSL 0x70000 |
1756 | #define PIPEACONF 0x70008 | 1754 | #define PIPEACONF 0x70008 |
@@ -1818,7 +1816,7 @@ | |||
1818 | #define DSPFW_PLANEB_SHIFT 8 | 1816 | #define DSPFW_PLANEB_SHIFT 8 |
1819 | #define DSPFW2 0x70038 | 1817 | #define DSPFW2 0x70038 |
1820 | #define DSPFW_CURSORA_MASK 0x00003f00 | 1818 | #define DSPFW_CURSORA_MASK 0x00003f00 |
1821 | #define DSPFW_CURSORA_SHIFT 16 | 1819 | #define DSPFW_CURSORA_SHIFT 8 |
1822 | #define DSPFW3 0x7003c | 1820 | #define DSPFW3 0x7003c |
1823 | #define DSPFW_HPLL_SR_EN (1<<31) | 1821 | #define DSPFW_HPLL_SR_EN (1<<31) |
1824 | #define DSPFW_CURSOR_SR_SHIFT 24 | 1822 | #define DSPFW_CURSOR_SR_SHIFT 24 |
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index d5ebb00a9d49..a3b90c9561dc 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c | |||
@@ -732,12 +732,6 @@ int i915_save_state(struct drm_device *dev) | |||
732 | 732 | ||
733 | pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); | 733 | pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); |
734 | 734 | ||
735 | /* Render Standby */ | ||
736 | if (I915_HAS_RC6(dev)) { | ||
737 | dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY); | ||
738 | dev_priv->savePWRCTXA = I915_READ(PWRCTXA); | ||
739 | } | ||
740 | |||
741 | /* Hardware status page */ | 735 | /* Hardware status page */ |
742 | dev_priv->saveHWS = I915_READ(HWS_PGA); | 736 | dev_priv->saveHWS = I915_READ(HWS_PGA); |
743 | 737 | ||
@@ -793,12 +787,6 @@ int i915_restore_state(struct drm_device *dev) | |||
793 | 787 | ||
794 | pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB); | 788 | pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB); |
795 | 789 | ||
796 | /* Render Standby */ | ||
797 | if (I915_HAS_RC6(dev)) { | ||
798 | I915_WRITE(MCHBAR_RENDER_STANDBY, dev_priv->saveRENDERSTANDBY); | ||
799 | I915_WRITE(PWRCTXA, dev_priv->savePWRCTXA); | ||
800 | } | ||
801 | |||
802 | /* Hardware status page */ | 790 | /* Hardware status page */ |
803 | I915_WRITE(HWS_PGA, dev_priv->saveHWS); | 791 | I915_WRITE(HWS_PGA, dev_priv->saveHWS); |
804 | 792 | ||
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index f27567747580..15fbc1b5a83e 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c | |||
@@ -33,6 +33,8 @@ | |||
33 | #define SLAVE_ADDR1 0x70 | 33 | #define SLAVE_ADDR1 0x70 |
34 | #define SLAVE_ADDR2 0x72 | 34 | #define SLAVE_ADDR2 0x72 |
35 | 35 | ||
36 | static int panel_type; | ||
37 | |||
36 | static void * | 38 | static void * |
37 | find_section(struct bdb_header *bdb, int section_id) | 39 | find_section(struct bdb_header *bdb, int section_id) |
38 | { | 40 | { |
@@ -128,6 +130,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv, | |||
128 | dev_priv->lvds_dither = lvds_options->pixel_dither; | 130 | dev_priv->lvds_dither = lvds_options->pixel_dither; |
129 | if (lvds_options->panel_type == 0xff) | 131 | if (lvds_options->panel_type == 0xff) |
130 | return; | 132 | return; |
133 | panel_type = lvds_options->panel_type; | ||
131 | 134 | ||
132 | lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA); | 135 | lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA); |
133 | if (!lvds_lfp_data) | 136 | if (!lvds_lfp_data) |
@@ -197,7 +200,8 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv, | |||
197 | memset(temp_mode, 0, sizeof(*temp_mode)); | 200 | memset(temp_mode, 0, sizeof(*temp_mode)); |
198 | } | 201 | } |
199 | kfree(temp_mode); | 202 | kfree(temp_mode); |
200 | if (temp_downclock < panel_fixed_mode->clock) { | 203 | if (temp_downclock < panel_fixed_mode->clock && |
204 | i915_lvds_downclock) { | ||
201 | dev_priv->lvds_downclock_avail = 1; | 205 | dev_priv->lvds_downclock_avail = 1; |
202 | dev_priv->lvds_downclock = temp_downclock; | 206 | dev_priv->lvds_downclock = temp_downclock; |
203 | DRM_DEBUG_KMS("LVDS downclock is found in VBT. ", | 207 | DRM_DEBUG_KMS("LVDS downclock is found in VBT. ", |
@@ -405,6 +409,34 @@ parse_driver_features(struct drm_i915_private *dev_priv, | |||
405 | } | 409 | } |
406 | 410 | ||
407 | static void | 411 | static void |
412 | parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb) | ||
413 | { | ||
414 | struct bdb_edp *edp; | ||
415 | |||
416 | edp = find_section(bdb, BDB_EDP); | ||
417 | if (!edp) { | ||
418 | if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp_support) { | ||
419 | DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported,\ | ||
420 | assume 18bpp panel color depth.\n"); | ||
421 | dev_priv->edp_bpp = 18; | ||
422 | } | ||
423 | return; | ||
424 | } | ||
425 | |||
426 | switch ((edp->color_depth >> (panel_type * 2)) & 3) { | ||
427 | case EDP_18BPP: | ||
428 | dev_priv->edp_bpp = 18; | ||
429 | break; | ||
430 | case EDP_24BPP: | ||
431 | dev_priv->edp_bpp = 24; | ||
432 | break; | ||
433 | case EDP_30BPP: | ||
434 | dev_priv->edp_bpp = 30; | ||
435 | break; | ||
436 | } | ||
437 | } | ||
438 | |||
439 | static void | ||
408 | parse_device_mapping(struct drm_i915_private *dev_priv, | 440 | parse_device_mapping(struct drm_i915_private *dev_priv, |
409 | struct bdb_header *bdb) | 441 | struct bdb_header *bdb) |
410 | { | 442 | { |
@@ -521,6 +553,7 @@ intel_init_bios(struct drm_device *dev) | |||
521 | parse_sdvo_device_mapping(dev_priv, bdb); | 553 | parse_sdvo_device_mapping(dev_priv, bdb); |
522 | parse_device_mapping(dev_priv, bdb); | 554 | parse_device_mapping(dev_priv, bdb); |
523 | parse_driver_features(dev_priv, bdb); | 555 | parse_driver_features(dev_priv, bdb); |
556 | parse_edp(dev_priv, bdb); | ||
524 | 557 | ||
525 | pci_unmap_rom(pdev, bios); | 558 | pci_unmap_rom(pdev, bios); |
526 | 559 | ||
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h index 425ac9d7f724..4c18514f6f80 100644 --- a/drivers/gpu/drm/i915/intel_bios.h +++ b/drivers/gpu/drm/i915/intel_bios.h | |||
@@ -98,6 +98,7 @@ struct vbios_data { | |||
98 | #define BDB_SDVO_LVDS_PNP_IDS 24 | 98 | #define BDB_SDVO_LVDS_PNP_IDS 24 |
99 | #define BDB_SDVO_LVDS_POWER_SEQ 25 | 99 | #define BDB_SDVO_LVDS_POWER_SEQ 25 |
100 | #define BDB_TV_OPTIONS 26 | 100 | #define BDB_TV_OPTIONS 26 |
101 | #define BDB_EDP 27 | ||
101 | #define BDB_LVDS_OPTIONS 40 | 102 | #define BDB_LVDS_OPTIONS 40 |
102 | #define BDB_LVDS_LFP_DATA_PTRS 41 | 103 | #define BDB_LVDS_LFP_DATA_PTRS 41 |
103 | #define BDB_LVDS_LFP_DATA 42 | 104 | #define BDB_LVDS_LFP_DATA 42 |
@@ -426,6 +427,45 @@ struct bdb_driver_features { | |||
426 | u8 custom_vbt_version; | 427 | u8 custom_vbt_version; |
427 | } __attribute__((packed)); | 428 | } __attribute__((packed)); |
428 | 429 | ||
430 | #define EDP_18BPP 0 | ||
431 | #define EDP_24BPP 1 | ||
432 | #define EDP_30BPP 2 | ||
433 | #define EDP_RATE_1_62 0 | ||
434 | #define EDP_RATE_2_7 1 | ||
435 | #define EDP_LANE_1 0 | ||
436 | #define EDP_LANE_2 1 | ||
437 | #define EDP_LANE_4 3 | ||
438 | #define EDP_PREEMPHASIS_NONE 0 | ||
439 | #define EDP_PREEMPHASIS_3_5dB 1 | ||
440 | #define EDP_PREEMPHASIS_6dB 2 | ||
441 | #define EDP_PREEMPHASIS_9_5dB 3 | ||
442 | #define EDP_VSWING_0_4V 0 | ||
443 | #define EDP_VSWING_0_6V 1 | ||
444 | #define EDP_VSWING_0_8V 2 | ||
445 | #define EDP_VSWING_1_2V 3 | ||
446 | |||
447 | struct edp_power_seq { | ||
448 | u16 t3; | ||
449 | u16 t7; | ||
450 | u16 t9; | ||
451 | u16 t10; | ||
452 | u16 t12; | ||
453 | } __attribute__ ((packed)); | ||
454 | |||
455 | struct edp_link_params { | ||
456 | u8 rate:4; | ||
457 | u8 lanes:4; | ||
458 | u8 preemphasis:4; | ||
459 | u8 vswing:4; | ||
460 | } __attribute__ ((packed)); | ||
461 | |||
462 | struct bdb_edp { | ||
463 | struct edp_power_seq power_seqs[16]; | ||
464 | u32 color_depth; | ||
465 | u32 sdrrs_msa_timing_delay; | ||
466 | struct edp_link_params link_params[16]; | ||
467 | } __attribute__ ((packed)); | ||
468 | |||
429 | bool intel_init_bios(struct drm_device *dev); | 469 | bool intel_init_bios(struct drm_device *dev); |
430 | 470 | ||
431 | /* | 471 | /* |
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 9f3d3e563414..79dd4026586f 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
@@ -157,6 +157,9 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) | |||
157 | adpa = I915_READ(PCH_ADPA); | 157 | adpa = I915_READ(PCH_ADPA); |
158 | 158 | ||
159 | adpa &= ~ADPA_CRT_HOTPLUG_MASK; | 159 | adpa &= ~ADPA_CRT_HOTPLUG_MASK; |
160 | /* disable HPD first */ | ||
161 | I915_WRITE(PCH_ADPA, adpa); | ||
162 | (void)I915_READ(PCH_ADPA); | ||
160 | 163 | ||
161 | adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 | | 164 | adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 | |
162 | ADPA_CRT_HOTPLUG_WARMUP_10MS | | 165 | ADPA_CRT_HOTPLUG_WARMUP_10MS | |
@@ -548,4 +551,6 @@ void intel_crt_init(struct drm_device *dev) | |||
548 | drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); | 551 | drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); |
549 | 552 | ||
550 | drm_sysfs_connector_add(connector); | 553 | drm_sysfs_connector_add(connector); |
554 | |||
555 | dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS; | ||
551 | } | 556 | } |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 52cd9b006da2..b27202d23ebc 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -70,8 +70,6 @@ struct intel_limit { | |||
70 | intel_p2_t p2; | 70 | intel_p2_t p2; |
71 | bool (* find_pll)(const intel_limit_t *, struct drm_crtc *, | 71 | bool (* find_pll)(const intel_limit_t *, struct drm_crtc *, |
72 | int, int, intel_clock_t *); | 72 | int, int, intel_clock_t *); |
73 | bool (* find_reduced_pll)(const intel_limit_t *, struct drm_crtc *, | ||
74 | int, int, intel_clock_t *); | ||
75 | }; | 73 | }; |
76 | 74 | ||
77 | #define I8XX_DOT_MIN 25000 | 75 | #define I8XX_DOT_MIN 25000 |
@@ -242,38 +240,93 @@ struct intel_limit { | |||
242 | #define IRONLAKE_DOT_MAX 350000 | 240 | #define IRONLAKE_DOT_MAX 350000 |
243 | #define IRONLAKE_VCO_MIN 1760000 | 241 | #define IRONLAKE_VCO_MIN 1760000 |
244 | #define IRONLAKE_VCO_MAX 3510000 | 242 | #define IRONLAKE_VCO_MAX 3510000 |
245 | #define IRONLAKE_N_MIN 1 | ||
246 | #define IRONLAKE_N_MAX 5 | ||
247 | #define IRONLAKE_M_MIN 79 | ||
248 | #define IRONLAKE_M_MAX 118 | ||
249 | #define IRONLAKE_M1_MIN 12 | 243 | #define IRONLAKE_M1_MIN 12 |
250 | #define IRONLAKE_M1_MAX 23 | 244 | #define IRONLAKE_M1_MAX 22 |
251 | #define IRONLAKE_M2_MIN 5 | 245 | #define IRONLAKE_M2_MIN 5 |
252 | #define IRONLAKE_M2_MAX 9 | 246 | #define IRONLAKE_M2_MAX 9 |
253 | #define IRONLAKE_P_SDVO_DAC_MIN 5 | ||
254 | #define IRONLAKE_P_SDVO_DAC_MAX 80 | ||
255 | #define IRONLAKE_P_LVDS_MIN 28 | ||
256 | #define IRONLAKE_P_LVDS_MAX 112 | ||
257 | #define IRONLAKE_P1_MIN 1 | ||
258 | #define IRONLAKE_P1_MAX 8 | ||
259 | #define IRONLAKE_P2_SDVO_DAC_SLOW 10 | ||
260 | #define IRONLAKE_P2_SDVO_DAC_FAST 5 | ||
261 | #define IRONLAKE_P2_LVDS_SLOW 14 /* single channel */ | ||
262 | #define IRONLAKE_P2_LVDS_FAST 7 /* double channel */ | ||
263 | #define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */ | 247 | #define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */ |
264 | 248 | ||
249 | /* We have parameter ranges for different type of outputs. */ | ||
250 | |||
251 | /* DAC & HDMI Refclk 120Mhz */ | ||
252 | #define IRONLAKE_DAC_N_MIN 1 | ||
253 | #define IRONLAKE_DAC_N_MAX 5 | ||
254 | #define IRONLAKE_DAC_M_MIN 79 | ||
255 | #define IRONLAKE_DAC_M_MAX 127 | ||
256 | #define IRONLAKE_DAC_P_MIN 5 | ||
257 | #define IRONLAKE_DAC_P_MAX 80 | ||
258 | #define IRONLAKE_DAC_P1_MIN 1 | ||
259 | #define IRONLAKE_DAC_P1_MAX 8 | ||
260 | #define IRONLAKE_DAC_P2_SLOW 10 | ||
261 | #define IRONLAKE_DAC_P2_FAST 5 | ||
262 | |||
263 | /* LVDS single-channel 120Mhz refclk */ | ||
264 | #define IRONLAKE_LVDS_S_N_MIN 1 | ||
265 | #define IRONLAKE_LVDS_S_N_MAX 3 | ||
266 | #define IRONLAKE_LVDS_S_M_MIN 79 | ||
267 | #define IRONLAKE_LVDS_S_M_MAX 118 | ||
268 | #define IRONLAKE_LVDS_S_P_MIN 28 | ||
269 | #define IRONLAKE_LVDS_S_P_MAX 112 | ||
270 | #define IRONLAKE_LVDS_S_P1_MIN 2 | ||
271 | #define IRONLAKE_LVDS_S_P1_MAX 8 | ||
272 | #define IRONLAKE_LVDS_S_P2_SLOW 14 | ||
273 | #define IRONLAKE_LVDS_S_P2_FAST 14 | ||
274 | |||
275 | /* LVDS dual-channel 120Mhz refclk */ | ||
276 | #define IRONLAKE_LVDS_D_N_MIN 1 | ||
277 | #define IRONLAKE_LVDS_D_N_MAX 3 | ||
278 | #define IRONLAKE_LVDS_D_M_MIN 79 | ||
279 | #define IRONLAKE_LVDS_D_M_MAX 127 | ||
280 | #define IRONLAKE_LVDS_D_P_MIN 14 | ||
281 | #define IRONLAKE_LVDS_D_P_MAX 56 | ||
282 | #define IRONLAKE_LVDS_D_P1_MIN 2 | ||
283 | #define IRONLAKE_LVDS_D_P1_MAX 8 | ||
284 | #define IRONLAKE_LVDS_D_P2_SLOW 7 | ||
285 | #define IRONLAKE_LVDS_D_P2_FAST 7 | ||
286 | |||
287 | /* LVDS single-channel 100Mhz refclk */ | ||
288 | #define IRONLAKE_LVDS_S_SSC_N_MIN 1 | ||
289 | #define IRONLAKE_LVDS_S_SSC_N_MAX 2 | ||
290 | #define IRONLAKE_LVDS_S_SSC_M_MIN 79 | ||
291 | #define IRONLAKE_LVDS_S_SSC_M_MAX 126 | ||
292 | #define IRONLAKE_LVDS_S_SSC_P_MIN 28 | ||
293 | #define IRONLAKE_LVDS_S_SSC_P_MAX 112 | ||
294 | #define IRONLAKE_LVDS_S_SSC_P1_MIN 2 | ||
295 | #define IRONLAKE_LVDS_S_SSC_P1_MAX 8 | ||
296 | #define IRONLAKE_LVDS_S_SSC_P2_SLOW 14 | ||
297 | #define IRONLAKE_LVDS_S_SSC_P2_FAST 14 | ||
298 | |||
299 | /* LVDS dual-channel 100Mhz refclk */ | ||
300 | #define IRONLAKE_LVDS_D_SSC_N_MIN 1 | ||
301 | #define IRONLAKE_LVDS_D_SSC_N_MAX 3 | ||
302 | #define IRONLAKE_LVDS_D_SSC_M_MIN 79 | ||
303 | #define IRONLAKE_LVDS_D_SSC_M_MAX 126 | ||
304 | #define IRONLAKE_LVDS_D_SSC_P_MIN 14 | ||
305 | #define IRONLAKE_LVDS_D_SSC_P_MAX 42 | ||
306 | #define IRONLAKE_LVDS_D_SSC_P1_MIN 2 | ||
307 | #define IRONLAKE_LVDS_D_SSC_P1_MAX 6 | ||
308 | #define IRONLAKE_LVDS_D_SSC_P2_SLOW 7 | ||
309 | #define IRONLAKE_LVDS_D_SSC_P2_FAST 7 | ||
310 | |||
311 | /* DisplayPort */ | ||
312 | #define IRONLAKE_DP_N_MIN 1 | ||
313 | #define IRONLAKE_DP_N_MAX 2 | ||
314 | #define IRONLAKE_DP_M_MIN 81 | ||
315 | #define IRONLAKE_DP_M_MAX 90 | ||
316 | #define IRONLAKE_DP_P_MIN 10 | ||
317 | #define IRONLAKE_DP_P_MAX 20 | ||
318 | #define IRONLAKE_DP_P2_FAST 10 | ||
319 | #define IRONLAKE_DP_P2_SLOW 10 | ||
320 | #define IRONLAKE_DP_P2_LIMIT 0 | ||
321 | #define IRONLAKE_DP_P1_MIN 1 | ||
322 | #define IRONLAKE_DP_P1_MAX 2 | ||
323 | |||
265 | static bool | 324 | static bool |
266 | intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | 325 | intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, |
267 | int target, int refclk, intel_clock_t *best_clock); | 326 | int target, int refclk, intel_clock_t *best_clock); |
268 | static bool | 327 | static bool |
269 | intel_find_best_reduced_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | ||
270 | int target, int refclk, intel_clock_t *best_clock); | ||
271 | static bool | ||
272 | intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | 328 | intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, |
273 | int target, int refclk, intel_clock_t *best_clock); | 329 | int target, int refclk, intel_clock_t *best_clock); |
274 | static bool | ||
275 | intel_ironlake_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | ||
276 | int target, int refclk, intel_clock_t *best_clock); | ||
277 | 330 | ||
278 | static bool | 331 | static bool |
279 | intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc, | 332 | intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc, |
@@ -294,7 +347,6 @@ static const intel_limit_t intel_limits_i8xx_dvo = { | |||
294 | .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, | 347 | .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, |
295 | .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST }, | 348 | .p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST }, |
296 | .find_pll = intel_find_best_PLL, | 349 | .find_pll = intel_find_best_PLL, |
297 | .find_reduced_pll = intel_find_best_reduced_PLL, | ||
298 | }; | 350 | }; |
299 | 351 | ||
300 | static const intel_limit_t intel_limits_i8xx_lvds = { | 352 | static const intel_limit_t intel_limits_i8xx_lvds = { |
@@ -309,7 +361,6 @@ static const intel_limit_t intel_limits_i8xx_lvds = { | |||
309 | .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, | 361 | .p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT, |
310 | .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST }, | 362 | .p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST }, |
311 | .find_pll = intel_find_best_PLL, | 363 | .find_pll = intel_find_best_PLL, |
312 | .find_reduced_pll = intel_find_best_reduced_PLL, | ||
313 | }; | 364 | }; |
314 | 365 | ||
315 | static const intel_limit_t intel_limits_i9xx_sdvo = { | 366 | static const intel_limit_t intel_limits_i9xx_sdvo = { |
@@ -324,7 +375,6 @@ static const intel_limit_t intel_limits_i9xx_sdvo = { | |||
324 | .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, | 375 | .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, |
325 | .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, | 376 | .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, |
326 | .find_pll = intel_find_best_PLL, | 377 | .find_pll = intel_find_best_PLL, |
327 | .find_reduced_pll = intel_find_best_reduced_PLL, | ||
328 | }; | 378 | }; |
329 | 379 | ||
330 | static const intel_limit_t intel_limits_i9xx_lvds = { | 380 | static const intel_limit_t intel_limits_i9xx_lvds = { |
@@ -342,7 +392,6 @@ static const intel_limit_t intel_limits_i9xx_lvds = { | |||
342 | .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, | 392 | .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, |
343 | .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST }, | 393 | .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST }, |
344 | .find_pll = intel_find_best_PLL, | 394 | .find_pll = intel_find_best_PLL, |
345 | .find_reduced_pll = intel_find_best_reduced_PLL, | ||
346 | }; | 395 | }; |
347 | 396 | ||
348 | /* below parameter and function is for G4X Chipset Family*/ | 397 | /* below parameter and function is for G4X Chipset Family*/ |
@@ -360,7 +409,6 @@ static const intel_limit_t intel_limits_g4x_sdvo = { | |||
360 | .p2_fast = G4X_P2_SDVO_FAST | 409 | .p2_fast = G4X_P2_SDVO_FAST |
361 | }, | 410 | }, |
362 | .find_pll = intel_g4x_find_best_PLL, | 411 | .find_pll = intel_g4x_find_best_PLL, |
363 | .find_reduced_pll = intel_g4x_find_best_PLL, | ||
364 | }; | 412 | }; |
365 | 413 | ||
366 | static const intel_limit_t intel_limits_g4x_hdmi = { | 414 | static const intel_limit_t intel_limits_g4x_hdmi = { |
@@ -377,7 +425,6 @@ static const intel_limit_t intel_limits_g4x_hdmi = { | |||
377 | .p2_fast = G4X_P2_HDMI_DAC_FAST | 425 | .p2_fast = G4X_P2_HDMI_DAC_FAST |
378 | }, | 426 | }, |
379 | .find_pll = intel_g4x_find_best_PLL, | 427 | .find_pll = intel_g4x_find_best_PLL, |
380 | .find_reduced_pll = intel_g4x_find_best_PLL, | ||
381 | }; | 428 | }; |
382 | 429 | ||
383 | static const intel_limit_t intel_limits_g4x_single_channel_lvds = { | 430 | static const intel_limit_t intel_limits_g4x_single_channel_lvds = { |
@@ -402,7 +449,6 @@ static const intel_limit_t intel_limits_g4x_single_channel_lvds = { | |||
402 | .p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST | 449 | .p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST |
403 | }, | 450 | }, |
404 | .find_pll = intel_g4x_find_best_PLL, | 451 | .find_pll = intel_g4x_find_best_PLL, |
405 | .find_reduced_pll = intel_g4x_find_best_PLL, | ||
406 | }; | 452 | }; |
407 | 453 | ||
408 | static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { | 454 | static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { |
@@ -427,7 +473,6 @@ static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { | |||
427 | .p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST | 473 | .p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST |
428 | }, | 474 | }, |
429 | .find_pll = intel_g4x_find_best_PLL, | 475 | .find_pll = intel_g4x_find_best_PLL, |
430 | .find_reduced_pll = intel_g4x_find_best_PLL, | ||
431 | }; | 476 | }; |
432 | 477 | ||
433 | static const intel_limit_t intel_limits_g4x_display_port = { | 478 | static const intel_limit_t intel_limits_g4x_display_port = { |
@@ -465,7 +510,6 @@ static const intel_limit_t intel_limits_pineview_sdvo = { | |||
465 | .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, | 510 | .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, |
466 | .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, | 511 | .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, |
467 | .find_pll = intel_find_best_PLL, | 512 | .find_pll = intel_find_best_PLL, |
468 | .find_reduced_pll = intel_find_best_reduced_PLL, | ||
469 | }; | 513 | }; |
470 | 514 | ||
471 | static const intel_limit_t intel_limits_pineview_lvds = { | 515 | static const intel_limit_t intel_limits_pineview_lvds = { |
@@ -481,46 +525,135 @@ static const intel_limit_t intel_limits_pineview_lvds = { | |||
481 | .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, | 525 | .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, |
482 | .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW }, | 526 | .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW }, |
483 | .find_pll = intel_find_best_PLL, | 527 | .find_pll = intel_find_best_PLL, |
484 | .find_reduced_pll = intel_find_best_reduced_PLL, | ||
485 | }; | 528 | }; |
486 | 529 | ||
487 | static const intel_limit_t intel_limits_ironlake_sdvo = { | 530 | static const intel_limit_t intel_limits_ironlake_dac = { |
531 | .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, | ||
532 | .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, | ||
533 | .n = { .min = IRONLAKE_DAC_N_MIN, .max = IRONLAKE_DAC_N_MAX }, | ||
534 | .m = { .min = IRONLAKE_DAC_M_MIN, .max = IRONLAKE_DAC_M_MAX }, | ||
535 | .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, | ||
536 | .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, | ||
537 | .p = { .min = IRONLAKE_DAC_P_MIN, .max = IRONLAKE_DAC_P_MAX }, | ||
538 | .p1 = { .min = IRONLAKE_DAC_P1_MIN, .max = IRONLAKE_DAC_P1_MAX }, | ||
539 | .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, | ||
540 | .p2_slow = IRONLAKE_DAC_P2_SLOW, | ||
541 | .p2_fast = IRONLAKE_DAC_P2_FAST }, | ||
542 | .find_pll = intel_g4x_find_best_PLL, | ||
543 | }; | ||
544 | |||
545 | static const intel_limit_t intel_limits_ironlake_single_lvds = { | ||
546 | .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, | ||
547 | .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, | ||
548 | .n = { .min = IRONLAKE_LVDS_S_N_MIN, .max = IRONLAKE_LVDS_S_N_MAX }, | ||
549 | .m = { .min = IRONLAKE_LVDS_S_M_MIN, .max = IRONLAKE_LVDS_S_M_MAX }, | ||
550 | .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, | ||
551 | .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, | ||
552 | .p = { .min = IRONLAKE_LVDS_S_P_MIN, .max = IRONLAKE_LVDS_S_P_MAX }, | ||
553 | .p1 = { .min = IRONLAKE_LVDS_S_P1_MIN, .max = IRONLAKE_LVDS_S_P1_MAX }, | ||
554 | .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, | ||
555 | .p2_slow = IRONLAKE_LVDS_S_P2_SLOW, | ||
556 | .p2_fast = IRONLAKE_LVDS_S_P2_FAST }, | ||
557 | .find_pll = intel_g4x_find_best_PLL, | ||
558 | }; | ||
559 | |||
560 | static const intel_limit_t intel_limits_ironlake_dual_lvds = { | ||
488 | .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, | 561 | .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, |
489 | .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, | 562 | .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, |
490 | .n = { .min = IRONLAKE_N_MIN, .max = IRONLAKE_N_MAX }, | 563 | .n = { .min = IRONLAKE_LVDS_D_N_MIN, .max = IRONLAKE_LVDS_D_N_MAX }, |
491 | .m = { .min = IRONLAKE_M_MIN, .max = IRONLAKE_M_MAX }, | 564 | .m = { .min = IRONLAKE_LVDS_D_M_MIN, .max = IRONLAKE_LVDS_D_M_MAX }, |
492 | .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, | 565 | .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, |
493 | .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, | 566 | .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, |
494 | .p = { .min = IRONLAKE_P_SDVO_DAC_MIN, .max = IRONLAKE_P_SDVO_DAC_MAX }, | 567 | .p = { .min = IRONLAKE_LVDS_D_P_MIN, .max = IRONLAKE_LVDS_D_P_MAX }, |
495 | .p1 = { .min = IRONLAKE_P1_MIN, .max = IRONLAKE_P1_MAX }, | 568 | .p1 = { .min = IRONLAKE_LVDS_D_P1_MIN, .max = IRONLAKE_LVDS_D_P1_MAX }, |
496 | .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, | 569 | .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, |
497 | .p2_slow = IRONLAKE_P2_SDVO_DAC_SLOW, | 570 | .p2_slow = IRONLAKE_LVDS_D_P2_SLOW, |
498 | .p2_fast = IRONLAKE_P2_SDVO_DAC_FAST }, | 571 | .p2_fast = IRONLAKE_LVDS_D_P2_FAST }, |
499 | .find_pll = intel_ironlake_find_best_PLL, | 572 | .find_pll = intel_g4x_find_best_PLL, |
500 | }; | 573 | }; |
501 | 574 | ||
502 | static const intel_limit_t intel_limits_ironlake_lvds = { | 575 | static const intel_limit_t intel_limits_ironlake_single_lvds_100m = { |
503 | .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, | 576 | .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, |
504 | .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, | 577 | .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, |
505 | .n = { .min = IRONLAKE_N_MIN, .max = IRONLAKE_N_MAX }, | 578 | .n = { .min = IRONLAKE_LVDS_S_SSC_N_MIN, .max = IRONLAKE_LVDS_S_SSC_N_MAX }, |
506 | .m = { .min = IRONLAKE_M_MIN, .max = IRONLAKE_M_MAX }, | 579 | .m = { .min = IRONLAKE_LVDS_S_SSC_M_MIN, .max = IRONLAKE_LVDS_S_SSC_M_MAX }, |
507 | .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, | 580 | .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, |
508 | .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, | 581 | .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, |
509 | .p = { .min = IRONLAKE_P_LVDS_MIN, .max = IRONLAKE_P_LVDS_MAX }, | 582 | .p = { .min = IRONLAKE_LVDS_S_SSC_P_MIN, .max = IRONLAKE_LVDS_S_SSC_P_MAX }, |
510 | .p1 = { .min = IRONLAKE_P1_MIN, .max = IRONLAKE_P1_MAX }, | 583 | .p1 = { .min = IRONLAKE_LVDS_S_SSC_P1_MIN,.max = IRONLAKE_LVDS_S_SSC_P1_MAX }, |
511 | .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, | 584 | .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, |
512 | .p2_slow = IRONLAKE_P2_LVDS_SLOW, | 585 | .p2_slow = IRONLAKE_LVDS_S_SSC_P2_SLOW, |
513 | .p2_fast = IRONLAKE_P2_LVDS_FAST }, | 586 | .p2_fast = IRONLAKE_LVDS_S_SSC_P2_FAST }, |
514 | .find_pll = intel_ironlake_find_best_PLL, | 587 | .find_pll = intel_g4x_find_best_PLL, |
588 | }; | ||
589 | |||
590 | static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = { | ||
591 | .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX }, | ||
592 | .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX }, | ||
593 | .n = { .min = IRONLAKE_LVDS_D_SSC_N_MIN, .max = IRONLAKE_LVDS_D_SSC_N_MAX }, | ||
594 | .m = { .min = IRONLAKE_LVDS_D_SSC_M_MIN, .max = IRONLAKE_LVDS_D_SSC_M_MAX }, | ||
595 | .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX }, | ||
596 | .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX }, | ||
597 | .p = { .min = IRONLAKE_LVDS_D_SSC_P_MIN, .max = IRONLAKE_LVDS_D_SSC_P_MAX }, | ||
598 | .p1 = { .min = IRONLAKE_LVDS_D_SSC_P1_MIN,.max = IRONLAKE_LVDS_D_SSC_P1_MAX }, | ||
599 | .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, | ||
600 | .p2_slow = IRONLAKE_LVDS_D_SSC_P2_SLOW, | ||
601 | .p2_fast = IRONLAKE_LVDS_D_SSC_P2_FAST }, | ||
602 | .find_pll = intel_g4x_find_best_PLL, | ||
603 | }; | ||
604 | |||
605 | static const intel_limit_t intel_limits_ironlake_display_port = { | ||
606 | .dot = { .min = IRONLAKE_DOT_MIN, | ||
607 | .max = IRONLAKE_DOT_MAX }, | ||
608 | .vco = { .min = IRONLAKE_VCO_MIN, | ||
609 | .max = IRONLAKE_VCO_MAX}, | ||
610 | .n = { .min = IRONLAKE_DP_N_MIN, | ||
611 | .max = IRONLAKE_DP_N_MAX }, | ||
612 | .m = { .min = IRONLAKE_DP_M_MIN, | ||
613 | .max = IRONLAKE_DP_M_MAX }, | ||
614 | .m1 = { .min = IRONLAKE_M1_MIN, | ||
615 | .max = IRONLAKE_M1_MAX }, | ||
616 | .m2 = { .min = IRONLAKE_M2_MIN, | ||
617 | .max = IRONLAKE_M2_MAX }, | ||
618 | .p = { .min = IRONLAKE_DP_P_MIN, | ||
619 | .max = IRONLAKE_DP_P_MAX }, | ||
620 | .p1 = { .min = IRONLAKE_DP_P1_MIN, | ||
621 | .max = IRONLAKE_DP_P1_MAX}, | ||
622 | .p2 = { .dot_limit = IRONLAKE_DP_P2_LIMIT, | ||
623 | .p2_slow = IRONLAKE_DP_P2_SLOW, | ||
624 | .p2_fast = IRONLAKE_DP_P2_FAST }, | ||
625 | .find_pll = intel_find_pll_ironlake_dp, | ||
515 | }; | 626 | }; |
516 | 627 | ||
517 | static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc) | 628 | static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc) |
518 | { | 629 | { |
630 | struct drm_device *dev = crtc->dev; | ||
631 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
519 | const intel_limit_t *limit; | 632 | const intel_limit_t *limit; |
520 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) | 633 | int refclk = 120; |
521 | limit = &intel_limits_ironlake_lvds; | 634 | |
635 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | ||
636 | if (dev_priv->lvds_use_ssc && dev_priv->lvds_ssc_freq == 100) | ||
637 | refclk = 100; | ||
638 | |||
639 | if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == | ||
640 | LVDS_CLKB_POWER_UP) { | ||
641 | /* LVDS dual channel */ | ||
642 | if (refclk == 100) | ||
643 | limit = &intel_limits_ironlake_dual_lvds_100m; | ||
644 | else | ||
645 | limit = &intel_limits_ironlake_dual_lvds; | ||
646 | } else { | ||
647 | if (refclk == 100) | ||
648 | limit = &intel_limits_ironlake_single_lvds_100m; | ||
649 | else | ||
650 | limit = &intel_limits_ironlake_single_lvds; | ||
651 | } | ||
652 | } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || | ||
653 | HAS_eDP) | ||
654 | limit = &intel_limits_ironlake_display_port; | ||
522 | else | 655 | else |
523 | limit = &intel_limits_ironlake_sdvo; | 656 | limit = &intel_limits_ironlake_dac; |
524 | 657 | ||
525 | return limit; | 658 | return limit; |
526 | } | 659 | } |
@@ -737,46 +870,6 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
737 | return (err != target); | 870 | return (err != target); |
738 | } | 871 | } |
739 | 872 | ||
740 | |||
741 | static bool | ||
742 | intel_find_best_reduced_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | ||
743 | int target, int refclk, intel_clock_t *best_clock) | ||
744 | |||
745 | { | ||
746 | struct drm_device *dev = crtc->dev; | ||
747 | intel_clock_t clock; | ||
748 | int err = target; | ||
749 | bool found = false; | ||
750 | |||
751 | memcpy(&clock, best_clock, sizeof(intel_clock_t)); | ||
752 | |||
753 | for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { | ||
754 | for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max; clock.m2++) { | ||
755 | /* m1 is always 0 in Pineview */ | ||
756 | if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev)) | ||
757 | break; | ||
758 | for (clock.n = limit->n.min; clock.n <= limit->n.max; | ||
759 | clock.n++) { | ||
760 | int this_err; | ||
761 | |||
762 | intel_clock(dev, refclk, &clock); | ||
763 | |||
764 | if (!intel_PLL_is_valid(crtc, &clock)) | ||
765 | continue; | ||
766 | |||
767 | this_err = abs(clock.dot - target); | ||
768 | if (this_err < err) { | ||
769 | *best_clock = clock; | ||
770 | err = this_err; | ||
771 | found = true; | ||
772 | } | ||
773 | } | ||
774 | } | ||
775 | } | ||
776 | |||
777 | return found; | ||
778 | } | ||
779 | |||
780 | static bool | 873 | static bool |
781 | intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | 874 | intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, |
782 | int target, int refclk, intel_clock_t *best_clock) | 875 | int target, int refclk, intel_clock_t *best_clock) |
@@ -791,7 +884,13 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
791 | found = false; | 884 | found = false; |
792 | 885 | ||
793 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | 886 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { |
794 | if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == | 887 | int lvds_reg; |
888 | |||
889 | if (IS_IRONLAKE(dev)) | ||
890 | lvds_reg = PCH_LVDS; | ||
891 | else | ||
892 | lvds_reg = LVDS; | ||
893 | if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) == | ||
795 | LVDS_CLKB_POWER_UP) | 894 | LVDS_CLKB_POWER_UP) |
796 | clock.p2 = limit->p2.p2_fast; | 895 | clock.p2 = limit->p2.p2_fast; |
797 | else | 896 | else |
@@ -839,6 +938,11 @@ intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
839 | { | 938 | { |
840 | struct drm_device *dev = crtc->dev; | 939 | struct drm_device *dev = crtc->dev; |
841 | intel_clock_t clock; | 940 | intel_clock_t clock; |
941 | |||
942 | /* return directly when it is eDP */ | ||
943 | if (HAS_eDP) | ||
944 | return true; | ||
945 | |||
842 | if (target < 200000) { | 946 | if (target < 200000) { |
843 | clock.n = 1; | 947 | clock.n = 1; |
844 | clock.p1 = 2; | 948 | clock.p1 = 2; |
@@ -857,68 +961,6 @@ intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
857 | return true; | 961 | return true; |
858 | } | 962 | } |
859 | 963 | ||
860 | static bool | ||
861 | intel_ironlake_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | ||
862 | int target, int refclk, intel_clock_t *best_clock) | ||
863 | { | ||
864 | struct drm_device *dev = crtc->dev; | ||
865 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
866 | intel_clock_t clock; | ||
867 | int err_most = 47; | ||
868 | int err_min = 10000; | ||
869 | |||
870 | /* eDP has only 2 clock choice, no n/m/p setting */ | ||
871 | if (HAS_eDP) | ||
872 | return true; | ||
873 | |||
874 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) | ||
875 | return intel_find_pll_ironlake_dp(limit, crtc, target, | ||
876 | refclk, best_clock); | ||
877 | |||
878 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | ||
879 | if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == | ||
880 | LVDS_CLKB_POWER_UP) | ||
881 | clock.p2 = limit->p2.p2_fast; | ||
882 | else | ||
883 | clock.p2 = limit->p2.p2_slow; | ||
884 | } else { | ||
885 | if (target < limit->p2.dot_limit) | ||
886 | clock.p2 = limit->p2.p2_slow; | ||
887 | else | ||
888 | clock.p2 = limit->p2.p2_fast; | ||
889 | } | ||
890 | |||
891 | memset(best_clock, 0, sizeof(*best_clock)); | ||
892 | for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { | ||
893 | /* based on hardware requriment prefer smaller n to precision */ | ||
894 | for (clock.n = limit->n.min; clock.n <= limit->n.max; clock.n++) { | ||
895 | /* based on hardware requirment prefere larger m1,m2 */ | ||
896 | for (clock.m1 = limit->m1.max; | ||
897 | clock.m1 >= limit->m1.min; clock.m1--) { | ||
898 | for (clock.m2 = limit->m2.max; | ||
899 | clock.m2 >= limit->m2.min; clock.m2--) { | ||
900 | int this_err; | ||
901 | |||
902 | intel_clock(dev, refclk, &clock); | ||
903 | if (!intel_PLL_is_valid(crtc, &clock)) | ||
904 | continue; | ||
905 | this_err = abs((10000 - (target*10000/clock.dot))); | ||
906 | if (this_err < err_most) { | ||
907 | *best_clock = clock; | ||
908 | /* found on first matching */ | ||
909 | goto out; | ||
910 | } else if (this_err < err_min) { | ||
911 | *best_clock = clock; | ||
912 | err_min = this_err; | ||
913 | } | ||
914 | } | ||
915 | } | ||
916 | } | ||
917 | } | ||
918 | out: | ||
919 | return true; | ||
920 | } | ||
921 | |||
922 | /* DisplayPort has only two frequencies, 162MHz and 270MHz */ | 964 | /* DisplayPort has only two frequencies, 162MHz and 270MHz */ |
923 | static bool | 965 | static bool |
924 | intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc, | 966 | intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc, |
@@ -989,6 +1031,8 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
989 | 1031 | ||
990 | /* enable it... */ | 1032 | /* enable it... */ |
991 | fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC; | 1033 | fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC; |
1034 | if (IS_I945GM(dev)) | ||
1035 | fbc_ctl |= FBC_C3_IDLE; /* 945 needs special SR handling */ | ||
992 | fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; | 1036 | fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; |
993 | fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; | 1037 | fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; |
994 | if (obj_priv->tiling_mode != I915_TILING_NONE) | 1038 | if (obj_priv->tiling_mode != I915_TILING_NONE) |
@@ -1282,7 +1326,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
1282 | return ret; | 1326 | return ret; |
1283 | } | 1327 | } |
1284 | 1328 | ||
1285 | ret = i915_gem_object_set_to_gtt_domain(obj, 1); | 1329 | ret = i915_gem_object_set_to_display_plane(obj); |
1286 | if (ret != 0) { | 1330 | if (ret != 0) { |
1287 | i915_gem_object_unpin(obj); | 1331 | i915_gem_object_unpin(obj); |
1288 | mutex_unlock(&dev->struct_mutex); | 1332 | mutex_unlock(&dev->struct_mutex); |
@@ -1493,6 +1537,10 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1493 | int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B; | 1537 | int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B; |
1494 | u32 temp; | 1538 | u32 temp; |
1495 | int tries = 5, j, n; | 1539 | int tries = 5, j, n; |
1540 | u32 pipe_bpc; | ||
1541 | |||
1542 | temp = I915_READ(pipeconf_reg); | ||
1543 | pipe_bpc = temp & PIPE_BPC_MASK; | ||
1496 | 1544 | ||
1497 | /* XXX: When our outputs are all unaware of DPMS modes other than off | 1545 | /* XXX: When our outputs are all unaware of DPMS modes other than off |
1498 | * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. | 1546 | * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. |
@@ -1524,6 +1572,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1524 | 1572 | ||
1525 | /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ | 1573 | /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ |
1526 | temp = I915_READ(fdi_rx_reg); | 1574 | temp = I915_READ(fdi_rx_reg); |
1575 | /* | ||
1576 | * make the BPC in FDI Rx be consistent with that in | ||
1577 | * pipeconf reg. | ||
1578 | */ | ||
1579 | temp &= ~(0x7 << 16); | ||
1580 | temp |= (pipe_bpc << 11); | ||
1527 | I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE | | 1581 | I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE | |
1528 | FDI_SEL_PCDCLK | | 1582 | FDI_SEL_PCDCLK | |
1529 | FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */ | 1583 | FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */ |
@@ -1666,6 +1720,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1666 | 1720 | ||
1667 | /* enable PCH transcoder */ | 1721 | /* enable PCH transcoder */ |
1668 | temp = I915_READ(transconf_reg); | 1722 | temp = I915_READ(transconf_reg); |
1723 | /* | ||
1724 | * make the BPC in transcoder be consistent with | ||
1725 | * that in pipeconf reg. | ||
1726 | */ | ||
1727 | temp &= ~PIPE_BPC_MASK; | ||
1728 | temp |= pipe_bpc; | ||
1669 | I915_WRITE(transconf_reg, temp | TRANS_ENABLE); | 1729 | I915_WRITE(transconf_reg, temp | TRANS_ENABLE); |
1670 | I915_READ(transconf_reg); | 1730 | I915_READ(transconf_reg); |
1671 | 1731 | ||
@@ -1697,6 +1757,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1697 | case DRM_MODE_DPMS_OFF: | 1757 | case DRM_MODE_DPMS_OFF: |
1698 | DRM_DEBUG_KMS("crtc %d dpms off\n", pipe); | 1758 | DRM_DEBUG_KMS("crtc %d dpms off\n", pipe); |
1699 | 1759 | ||
1760 | drm_vblank_off(dev, pipe); | ||
1700 | /* Disable display plane */ | 1761 | /* Disable display plane */ |
1701 | temp = I915_READ(dspcntr_reg); | 1762 | temp = I915_READ(dspcntr_reg); |
1702 | if ((temp & DISPLAY_PLANE_ENABLE) != 0) { | 1763 | if ((temp & DISPLAY_PLANE_ENABLE) != 0) { |
@@ -1745,6 +1806,9 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1745 | I915_READ(fdi_tx_reg); | 1806 | I915_READ(fdi_tx_reg); |
1746 | 1807 | ||
1747 | temp = I915_READ(fdi_rx_reg); | 1808 | temp = I915_READ(fdi_rx_reg); |
1809 | /* BPC in FDI rx is consistent with that in pipeconf */ | ||
1810 | temp &= ~(0x07 << 16); | ||
1811 | temp |= (pipe_bpc << 11); | ||
1748 | I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE); | 1812 | I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE); |
1749 | I915_READ(fdi_rx_reg); | 1813 | I915_READ(fdi_rx_reg); |
1750 | 1814 | ||
@@ -1789,7 +1853,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
1789 | } | 1853 | } |
1790 | } | 1854 | } |
1791 | } | 1855 | } |
1792 | 1856 | temp = I915_READ(transconf_reg); | |
1857 | /* BPC in transcoder is consistent with that in pipeconf */ | ||
1858 | temp &= ~PIPE_BPC_MASK; | ||
1859 | temp |= pipe_bpc; | ||
1860 | I915_WRITE(transconf_reg, temp); | ||
1861 | I915_READ(transconf_reg); | ||
1793 | udelay(100); | 1862 | udelay(100); |
1794 | 1863 | ||
1795 | /* disable PCH DPLL */ | 1864 | /* disable PCH DPLL */ |
@@ -2448,7 +2517,7 @@ static void pineview_enable_cxsr(struct drm_device *dev, unsigned long clock, | |||
2448 | * A value of 5us seems to be a good balance; safe for very low end | 2517 | * A value of 5us seems to be a good balance; safe for very low end |
2449 | * platforms but not overly aggressive on lower latency configs. | 2518 | * platforms but not overly aggressive on lower latency configs. |
2450 | */ | 2519 | */ |
2451 | const static int latency_ns = 5000; | 2520 | static const int latency_ns = 5000; |
2452 | 2521 | ||
2453 | static int i9xx_get_fifo_size(struct drm_device *dev, int plane) | 2522 | static int i9xx_get_fifo_size(struct drm_device *dev, int plane) |
2454 | { | 2523 | { |
@@ -2559,7 +2628,7 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock, | |||
2559 | /* Calc sr entries for one plane configs */ | 2628 | /* Calc sr entries for one plane configs */ |
2560 | if (sr_hdisplay && (!planea_clock || !planeb_clock)) { | 2629 | if (sr_hdisplay && (!planea_clock || !planeb_clock)) { |
2561 | /* self-refresh has much higher latency */ | 2630 | /* self-refresh has much higher latency */ |
2562 | const static int sr_latency_ns = 12000; | 2631 | static const int sr_latency_ns = 12000; |
2563 | 2632 | ||
2564 | sr_clock = planea_clock ? planea_clock : planeb_clock; | 2633 | sr_clock = planea_clock ? planea_clock : planeb_clock; |
2565 | line_time_us = ((sr_hdisplay * 1000) / sr_clock); | 2634 | line_time_us = ((sr_hdisplay * 1000) / sr_clock); |
@@ -2570,6 +2639,10 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock, | |||
2570 | sr_entries = roundup(sr_entries / cacheline_size, 1); | 2639 | sr_entries = roundup(sr_entries / cacheline_size, 1); |
2571 | DRM_DEBUG("self-refresh entries: %d\n", sr_entries); | 2640 | DRM_DEBUG("self-refresh entries: %d\n", sr_entries); |
2572 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); | 2641 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); |
2642 | } else { | ||
2643 | /* Turn off self refresh if both pipes are enabled */ | ||
2644 | I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) | ||
2645 | & ~FW_BLC_SELF_EN); | ||
2573 | } | 2646 | } |
2574 | 2647 | ||
2575 | DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n", | 2648 | DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n", |
@@ -2598,7 +2671,7 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock, | |||
2598 | /* Calc sr entries for one plane configs */ | 2671 | /* Calc sr entries for one plane configs */ |
2599 | if (sr_hdisplay && (!planea_clock || !planeb_clock)) { | 2672 | if (sr_hdisplay && (!planea_clock || !planeb_clock)) { |
2600 | /* self-refresh has much higher latency */ | 2673 | /* self-refresh has much higher latency */ |
2601 | const static int sr_latency_ns = 12000; | 2674 | static const int sr_latency_ns = 12000; |
2602 | 2675 | ||
2603 | sr_clock = planea_clock ? planea_clock : planeb_clock; | 2676 | sr_clock = planea_clock ? planea_clock : planeb_clock; |
2604 | line_time_us = ((sr_hdisplay * 1000) / sr_clock); | 2677 | line_time_us = ((sr_hdisplay * 1000) / sr_clock); |
@@ -2613,6 +2686,10 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock, | |||
2613 | srwm = 1; | 2686 | srwm = 1; |
2614 | srwm &= 0x3f; | 2687 | srwm &= 0x3f; |
2615 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); | 2688 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); |
2689 | } else { | ||
2690 | /* Turn off self refresh if both pipes are enabled */ | ||
2691 | I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) | ||
2692 | & ~FW_BLC_SELF_EN); | ||
2616 | } | 2693 | } |
2617 | 2694 | ||
2618 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", | 2695 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", |
@@ -2667,7 +2744,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock, | |||
2667 | if (HAS_FW_BLC(dev) && sr_hdisplay && | 2744 | if (HAS_FW_BLC(dev) && sr_hdisplay && |
2668 | (!planea_clock || !planeb_clock)) { | 2745 | (!planea_clock || !planeb_clock)) { |
2669 | /* self-refresh has much higher latency */ | 2746 | /* self-refresh has much higher latency */ |
2670 | const static int sr_latency_ns = 6000; | 2747 | static const int sr_latency_ns = 6000; |
2671 | 2748 | ||
2672 | sr_clock = planea_clock ? planea_clock : planeb_clock; | 2749 | sr_clock = planea_clock ? planea_clock : planeb_clock; |
2673 | line_time_us = ((sr_hdisplay * 1000) / sr_clock); | 2750 | line_time_us = ((sr_hdisplay * 1000) / sr_clock); |
@@ -2681,6 +2758,10 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock, | |||
2681 | if (srwm < 0) | 2758 | if (srwm < 0) |
2682 | srwm = 1; | 2759 | srwm = 1; |
2683 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f)); | 2760 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f)); |
2761 | } else { | ||
2762 | /* Turn off self refresh if both pipes are enabled */ | ||
2763 | I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) | ||
2764 | & ~FW_BLC_SELF_EN); | ||
2684 | } | 2765 | } |
2685 | 2766 | ||
2686 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", | 2767 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", |
@@ -2906,10 +2987,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
2906 | return -EINVAL; | 2987 | return -EINVAL; |
2907 | } | 2988 | } |
2908 | 2989 | ||
2909 | if (is_lvds && limit->find_reduced_pll && | 2990 | if (is_lvds && dev_priv->lvds_downclock_avail) { |
2910 | dev_priv->lvds_downclock_avail) { | 2991 | has_reduced_clock = limit->find_pll(limit, crtc, |
2911 | memcpy(&reduced_clock, &clock, sizeof(intel_clock_t)); | ||
2912 | has_reduced_clock = limit->find_reduced_pll(limit, crtc, | ||
2913 | dev_priv->lvds_downclock, | 2992 | dev_priv->lvds_downclock, |
2914 | refclk, | 2993 | refclk, |
2915 | &reduced_clock); | 2994 | &reduced_clock); |
@@ -2969,6 +3048,33 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
2969 | 3048 | ||
2970 | /* determine panel color depth */ | 3049 | /* determine panel color depth */ |
2971 | temp = I915_READ(pipeconf_reg); | 3050 | temp = I915_READ(pipeconf_reg); |
3051 | temp &= ~PIPE_BPC_MASK; | ||
3052 | if (is_lvds) { | ||
3053 | int lvds_reg = I915_READ(PCH_LVDS); | ||
3054 | /* the BPC will be 6 if it is 18-bit LVDS panel */ | ||
3055 | if ((lvds_reg & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP) | ||
3056 | temp |= PIPE_8BPC; | ||
3057 | else | ||
3058 | temp |= PIPE_6BPC; | ||
3059 | } else if (is_edp) { | ||
3060 | switch (dev_priv->edp_bpp/3) { | ||
3061 | case 8: | ||
3062 | temp |= PIPE_8BPC; | ||
3063 | break; | ||
3064 | case 10: | ||
3065 | temp |= PIPE_10BPC; | ||
3066 | break; | ||
3067 | case 6: | ||
3068 | temp |= PIPE_6BPC; | ||
3069 | break; | ||
3070 | case 12: | ||
3071 | temp |= PIPE_12BPC; | ||
3072 | break; | ||
3073 | } | ||
3074 | } else | ||
3075 | temp |= PIPE_8BPC; | ||
3076 | I915_WRITE(pipeconf_reg, temp); | ||
3077 | I915_READ(pipeconf_reg); | ||
2972 | 3078 | ||
2973 | switch (temp & PIPE_BPC_MASK) { | 3079 | switch (temp & PIPE_BPC_MASK) { |
2974 | case PIPE_8BPC: | 3080 | case PIPE_8BPC: |
@@ -3195,7 +3301,20 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3195 | * appropriately here, but we need to look more thoroughly into how | 3301 | * appropriately here, but we need to look more thoroughly into how |
3196 | * panels behave in the two modes. | 3302 | * panels behave in the two modes. |
3197 | */ | 3303 | */ |
3198 | 3304 | /* set the dithering flag */ | |
3305 | if (IS_I965G(dev)) { | ||
3306 | if (dev_priv->lvds_dither) { | ||
3307 | if (IS_IRONLAKE(dev)) | ||
3308 | pipeconf |= PIPE_ENABLE_DITHER; | ||
3309 | else | ||
3310 | lvds |= LVDS_ENABLE_DITHER; | ||
3311 | } else { | ||
3312 | if (IS_IRONLAKE(dev)) | ||
3313 | pipeconf &= ~PIPE_ENABLE_DITHER; | ||
3314 | else | ||
3315 | lvds &= ~LVDS_ENABLE_DITHER; | ||
3316 | } | ||
3317 | } | ||
3199 | I915_WRITE(lvds_reg, lvds); | 3318 | I915_WRITE(lvds_reg, lvds); |
3200 | I915_READ(lvds_reg); | 3319 | I915_READ(lvds_reg); |
3201 | } | 3320 | } |
@@ -3385,7 +3504,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
3385 | 3504 | ||
3386 | /* we only need to pin inside GTT if cursor is non-phy */ | 3505 | /* we only need to pin inside GTT if cursor is non-phy */ |
3387 | mutex_lock(&dev->struct_mutex); | 3506 | mutex_lock(&dev->struct_mutex); |
3388 | if (!dev_priv->cursor_needs_physical) { | 3507 | if (!dev_priv->info->cursor_needs_physical) { |
3389 | ret = i915_gem_object_pin(bo, PAGE_SIZE); | 3508 | ret = i915_gem_object_pin(bo, PAGE_SIZE); |
3390 | if (ret) { | 3509 | if (ret) { |
3391 | DRM_ERROR("failed to pin cursor bo\n"); | 3510 | DRM_ERROR("failed to pin cursor bo\n"); |
@@ -3420,7 +3539,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, | |||
3420 | I915_WRITE(base, addr); | 3539 | I915_WRITE(base, addr); |
3421 | 3540 | ||
3422 | if (intel_crtc->cursor_bo) { | 3541 | if (intel_crtc->cursor_bo) { |
3423 | if (dev_priv->cursor_needs_physical) { | 3542 | if (dev_priv->info->cursor_needs_physical) { |
3424 | if (intel_crtc->cursor_bo != bo) | 3543 | if (intel_crtc->cursor_bo != bo) |
3425 | i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); | 3544 | i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); |
3426 | } else | 3545 | } else |
@@ -3779,125 +3898,6 @@ static void intel_gpu_idle_timer(unsigned long arg) | |||
3779 | queue_work(dev_priv->wq, &dev_priv->idle_work); | 3898 | queue_work(dev_priv->wq, &dev_priv->idle_work); |
3780 | } | 3899 | } |
3781 | 3900 | ||
3782 | void intel_increase_renderclock(struct drm_device *dev, bool schedule) | ||
3783 | { | ||
3784 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
3785 | |||
3786 | if (IS_IRONLAKE(dev)) | ||
3787 | return; | ||
3788 | |||
3789 | if (!dev_priv->render_reclock_avail) { | ||
3790 | DRM_DEBUG_DRIVER("not reclocking render clock\n"); | ||
3791 | return; | ||
3792 | } | ||
3793 | |||
3794 | /* Restore render clock frequency to original value */ | ||
3795 | if (IS_G4X(dev) || IS_I9XX(dev)) | ||
3796 | pci_write_config_word(dev->pdev, GCFGC, dev_priv->orig_clock); | ||
3797 | else if (IS_I85X(dev)) | ||
3798 | pci_write_config_word(dev->pdev, HPLLCC, dev_priv->orig_clock); | ||
3799 | DRM_DEBUG_DRIVER("increasing render clock frequency\n"); | ||
3800 | |||
3801 | /* Schedule downclock */ | ||
3802 | if (schedule) | ||
3803 | mod_timer(&dev_priv->idle_timer, jiffies + | ||
3804 | msecs_to_jiffies(GPU_IDLE_TIMEOUT)); | ||
3805 | } | ||
3806 | |||
3807 | void intel_decrease_renderclock(struct drm_device *dev) | ||
3808 | { | ||
3809 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
3810 | |||
3811 | if (IS_IRONLAKE(dev)) | ||
3812 | return; | ||
3813 | |||
3814 | if (!dev_priv->render_reclock_avail) { | ||
3815 | DRM_DEBUG_DRIVER("not reclocking render clock\n"); | ||
3816 | return; | ||
3817 | } | ||
3818 | |||
3819 | if (IS_G4X(dev)) { | ||
3820 | u16 gcfgc; | ||
3821 | |||
3822 | /* Adjust render clock... */ | ||
3823 | pci_read_config_word(dev->pdev, GCFGC, &gcfgc); | ||
3824 | |||
3825 | /* Down to minimum... */ | ||
3826 | gcfgc &= ~GM45_GC_RENDER_CLOCK_MASK; | ||
3827 | gcfgc |= GM45_GC_RENDER_CLOCK_266_MHZ; | ||
3828 | |||
3829 | pci_write_config_word(dev->pdev, GCFGC, gcfgc); | ||
3830 | } else if (IS_I965G(dev)) { | ||
3831 | u16 gcfgc; | ||
3832 | |||
3833 | /* Adjust render clock... */ | ||
3834 | pci_read_config_word(dev->pdev, GCFGC, &gcfgc); | ||
3835 | |||
3836 | /* Down to minimum... */ | ||
3837 | gcfgc &= ~I965_GC_RENDER_CLOCK_MASK; | ||
3838 | gcfgc |= I965_GC_RENDER_CLOCK_267_MHZ; | ||
3839 | |||
3840 | pci_write_config_word(dev->pdev, GCFGC, gcfgc); | ||
3841 | } else if (IS_I945G(dev) || IS_I945GM(dev)) { | ||
3842 | u16 gcfgc; | ||
3843 | |||
3844 | /* Adjust render clock... */ | ||
3845 | pci_read_config_word(dev->pdev, GCFGC, &gcfgc); | ||
3846 | |||
3847 | /* Down to minimum... */ | ||
3848 | gcfgc &= ~I945_GC_RENDER_CLOCK_MASK; | ||
3849 | gcfgc |= I945_GC_RENDER_CLOCK_166_MHZ; | ||
3850 | |||
3851 | pci_write_config_word(dev->pdev, GCFGC, gcfgc); | ||
3852 | } else if (IS_I915G(dev)) { | ||
3853 | u16 gcfgc; | ||
3854 | |||
3855 | /* Adjust render clock... */ | ||
3856 | pci_read_config_word(dev->pdev, GCFGC, &gcfgc); | ||
3857 | |||
3858 | /* Down to minimum... */ | ||
3859 | gcfgc &= ~I915_GC_RENDER_CLOCK_MASK; | ||
3860 | gcfgc |= I915_GC_RENDER_CLOCK_166_MHZ; | ||
3861 | |||
3862 | pci_write_config_word(dev->pdev, GCFGC, gcfgc); | ||
3863 | } else if (IS_I85X(dev)) { | ||
3864 | u16 hpllcc; | ||
3865 | |||
3866 | /* Adjust render clock... */ | ||
3867 | pci_read_config_word(dev->pdev, HPLLCC, &hpllcc); | ||
3868 | |||
3869 | /* Up to maximum... */ | ||
3870 | hpllcc &= ~GC_CLOCK_CONTROL_MASK; | ||
3871 | hpllcc |= GC_CLOCK_133_200; | ||
3872 | |||
3873 | pci_write_config_word(dev->pdev, HPLLCC, hpllcc); | ||
3874 | } | ||
3875 | DRM_DEBUG_DRIVER("decreasing render clock frequency\n"); | ||
3876 | } | ||
3877 | |||
3878 | /* Note that no increase function is needed for this - increase_renderclock() | ||
3879 | * will also rewrite these bits | ||
3880 | */ | ||
3881 | void intel_decrease_displayclock(struct drm_device *dev) | ||
3882 | { | ||
3883 | if (IS_IRONLAKE(dev)) | ||
3884 | return; | ||
3885 | |||
3886 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_I915G(dev) || | ||
3887 | IS_I915GM(dev)) { | ||
3888 | u16 gcfgc; | ||
3889 | |||
3890 | /* Adjust render clock... */ | ||
3891 | pci_read_config_word(dev->pdev, GCFGC, &gcfgc); | ||
3892 | |||
3893 | /* Down to minimum... */ | ||
3894 | gcfgc &= ~0xf0; | ||
3895 | gcfgc |= 0x80; | ||
3896 | |||
3897 | pci_write_config_word(dev->pdev, GCFGC, gcfgc); | ||
3898 | } | ||
3899 | } | ||
3900 | |||
3901 | #define CRTC_IDLE_TIMEOUT 1000 /* ms */ | 3901 | #define CRTC_IDLE_TIMEOUT 1000 /* ms */ |
3902 | 3902 | ||
3903 | static void intel_crtc_idle_timer(unsigned long arg) | 3903 | static void intel_crtc_idle_timer(unsigned long arg) |
@@ -4011,12 +4011,6 @@ static void intel_idle_update(struct work_struct *work) | |||
4011 | 4011 | ||
4012 | mutex_lock(&dev->struct_mutex); | 4012 | mutex_lock(&dev->struct_mutex); |
4013 | 4013 | ||
4014 | /* GPU isn't processing, downclock it. */ | ||
4015 | if (!dev_priv->busy) { | ||
4016 | intel_decrease_renderclock(dev); | ||
4017 | intel_decrease_displayclock(dev); | ||
4018 | } | ||
4019 | |||
4020 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 4014 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
4021 | /* Skip inactive CRTCs */ | 4015 | /* Skip inactive CRTCs */ |
4022 | if (!crtc->fb) | 4016 | if (!crtc->fb) |
@@ -4050,13 +4044,11 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj) | |||
4050 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | 4044 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
4051 | return; | 4045 | return; |
4052 | 4046 | ||
4053 | if (!dev_priv->busy) { | 4047 | if (!dev_priv->busy) |
4054 | dev_priv->busy = true; | 4048 | dev_priv->busy = true; |
4055 | intel_increase_renderclock(dev, true); | 4049 | else |
4056 | } else { | ||
4057 | mod_timer(&dev_priv->idle_timer, jiffies + | 4050 | mod_timer(&dev_priv->idle_timer, jiffies + |
4058 | msecs_to_jiffies(GPU_IDLE_TIMEOUT)); | 4051 | msecs_to_jiffies(GPU_IDLE_TIMEOUT)); |
4059 | } | ||
4060 | 4052 | ||
4061 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 4053 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
4062 | if (!crtc->fb) | 4054 | if (!crtc->fb) |
@@ -4089,7 +4081,8 @@ static void intel_crtc_destroy(struct drm_crtc *crtc) | |||
4089 | struct intel_unpin_work { | 4081 | struct intel_unpin_work { |
4090 | struct work_struct work; | 4082 | struct work_struct work; |
4091 | struct drm_device *dev; | 4083 | struct drm_device *dev; |
4092 | struct drm_gem_object *obj; | 4084 | struct drm_gem_object *old_fb_obj; |
4085 | struct drm_gem_object *pending_flip_obj; | ||
4093 | struct drm_pending_vblank_event *event; | 4086 | struct drm_pending_vblank_event *event; |
4094 | int pending; | 4087 | int pending; |
4095 | }; | 4088 | }; |
@@ -4100,8 +4093,9 @@ static void intel_unpin_work_fn(struct work_struct *__work) | |||
4100 | container_of(__work, struct intel_unpin_work, work); | 4093 | container_of(__work, struct intel_unpin_work, work); |
4101 | 4094 | ||
4102 | mutex_lock(&work->dev->struct_mutex); | 4095 | mutex_lock(&work->dev->struct_mutex); |
4103 | i915_gem_object_unpin(work->obj); | 4096 | i915_gem_object_unpin(work->old_fb_obj); |
4104 | drm_gem_object_unreference(work->obj); | 4097 | drm_gem_object_unreference(work->pending_flip_obj); |
4098 | drm_gem_object_unreference(work->old_fb_obj); | ||
4105 | mutex_unlock(&work->dev->struct_mutex); | 4099 | mutex_unlock(&work->dev->struct_mutex); |
4106 | kfree(work); | 4100 | kfree(work); |
4107 | } | 4101 | } |
@@ -4124,6 +4118,12 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe) | |||
4124 | spin_lock_irqsave(&dev->event_lock, flags); | 4118 | spin_lock_irqsave(&dev->event_lock, flags); |
4125 | work = intel_crtc->unpin_work; | 4119 | work = intel_crtc->unpin_work; |
4126 | if (work == NULL || !work->pending) { | 4120 | if (work == NULL || !work->pending) { |
4121 | if (work && !work->pending) { | ||
4122 | obj_priv = work->pending_flip_obj->driver_private; | ||
4123 | DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n", | ||
4124 | obj_priv, | ||
4125 | atomic_read(&obj_priv->pending_flip)); | ||
4126 | } | ||
4127 | spin_unlock_irqrestore(&dev->event_lock, flags); | 4127 | spin_unlock_irqrestore(&dev->event_lock, flags); |
4128 | return; | 4128 | return; |
4129 | } | 4129 | } |
@@ -4144,8 +4144,11 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe) | |||
4144 | 4144 | ||
4145 | spin_unlock_irqrestore(&dev->event_lock, flags); | 4145 | spin_unlock_irqrestore(&dev->event_lock, flags); |
4146 | 4146 | ||
4147 | obj_priv = work->obj->driver_private; | 4147 | obj_priv = work->pending_flip_obj->driver_private; |
4148 | if (atomic_dec_and_test(&obj_priv->pending_flip)) | 4148 | |
4149 | /* Initial scanout buffer will have a 0 pending flip count */ | ||
4150 | if ((atomic_read(&obj_priv->pending_flip) == 0) || | ||
4151 | atomic_dec_and_test(&obj_priv->pending_flip)) | ||
4149 | DRM_WAKEUP(&dev_priv->pending_flip_queue); | 4152 | DRM_WAKEUP(&dev_priv->pending_flip_queue); |
4150 | schedule_work(&work->work); | 4153 | schedule_work(&work->work); |
4151 | } | 4154 | } |
@@ -4158,8 +4161,11 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane) | |||
4158 | unsigned long flags; | 4161 | unsigned long flags; |
4159 | 4162 | ||
4160 | spin_lock_irqsave(&dev->event_lock, flags); | 4163 | spin_lock_irqsave(&dev->event_lock, flags); |
4161 | if (intel_crtc->unpin_work) | 4164 | if (intel_crtc->unpin_work) { |
4162 | intel_crtc->unpin_work->pending = 1; | 4165 | intel_crtc->unpin_work->pending = 1; |
4166 | } else { | ||
4167 | DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n"); | ||
4168 | } | ||
4163 | spin_unlock_irqrestore(&dev->event_lock, flags); | 4169 | spin_unlock_irqrestore(&dev->event_lock, flags); |
4164 | } | 4170 | } |
4165 | 4171 | ||
@@ -4175,7 +4181,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
4175 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 4181 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
4176 | struct intel_unpin_work *work; | 4182 | struct intel_unpin_work *work; |
4177 | unsigned long flags; | 4183 | unsigned long flags; |
4178 | int ret; | 4184 | int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC; |
4185 | int ret, pipesrc; | ||
4179 | RING_LOCALS; | 4186 | RING_LOCALS; |
4180 | 4187 | ||
4181 | work = kzalloc(sizeof *work, GFP_KERNEL); | 4188 | work = kzalloc(sizeof *work, GFP_KERNEL); |
@@ -4187,12 +4194,13 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
4187 | work->event = event; | 4194 | work->event = event; |
4188 | work->dev = crtc->dev; | 4195 | work->dev = crtc->dev; |
4189 | intel_fb = to_intel_framebuffer(crtc->fb); | 4196 | intel_fb = to_intel_framebuffer(crtc->fb); |
4190 | work->obj = intel_fb->obj; | 4197 | work->old_fb_obj = intel_fb->obj; |
4191 | INIT_WORK(&work->work, intel_unpin_work_fn); | 4198 | INIT_WORK(&work->work, intel_unpin_work_fn); |
4192 | 4199 | ||
4193 | /* We borrow the event spin lock for protecting unpin_work */ | 4200 | /* We borrow the event spin lock for protecting unpin_work */ |
4194 | spin_lock_irqsave(&dev->event_lock, flags); | 4201 | spin_lock_irqsave(&dev->event_lock, flags); |
4195 | if (intel_crtc->unpin_work) { | 4202 | if (intel_crtc->unpin_work) { |
4203 | DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); | ||
4196 | spin_unlock_irqrestore(&dev->event_lock, flags); | 4204 | spin_unlock_irqrestore(&dev->event_lock, flags); |
4197 | kfree(work); | 4205 | kfree(work); |
4198 | mutex_unlock(&dev->struct_mutex); | 4206 | mutex_unlock(&dev->struct_mutex); |
@@ -4206,19 +4214,24 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
4206 | 4214 | ||
4207 | ret = intel_pin_and_fence_fb_obj(dev, obj); | 4215 | ret = intel_pin_and_fence_fb_obj(dev, obj); |
4208 | if (ret != 0) { | 4216 | if (ret != 0) { |
4217 | DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n", | ||
4218 | obj->driver_private); | ||
4209 | kfree(work); | 4219 | kfree(work); |
4220 | intel_crtc->unpin_work = NULL; | ||
4210 | mutex_unlock(&dev->struct_mutex); | 4221 | mutex_unlock(&dev->struct_mutex); |
4211 | return ret; | 4222 | return ret; |
4212 | } | 4223 | } |
4213 | 4224 | ||
4214 | /* Reference the old fb object for the scheduled work. */ | 4225 | /* Reference the objects for the scheduled work. */ |
4215 | drm_gem_object_reference(work->obj); | 4226 | drm_gem_object_reference(work->old_fb_obj); |
4227 | drm_gem_object_reference(obj); | ||
4216 | 4228 | ||
4217 | crtc->fb = fb; | 4229 | crtc->fb = fb; |
4218 | i915_gem_object_flush_write_domain(obj); | 4230 | i915_gem_object_flush_write_domain(obj); |
4219 | drm_vblank_get(dev, intel_crtc->pipe); | 4231 | drm_vblank_get(dev, intel_crtc->pipe); |
4220 | obj_priv = obj->driver_private; | 4232 | obj_priv = obj->driver_private; |
4221 | atomic_inc(&obj_priv->pending_flip); | 4233 | atomic_inc(&obj_priv->pending_flip); |
4234 | work->pending_flip_obj = obj; | ||
4222 | 4235 | ||
4223 | BEGIN_LP_RING(4); | 4236 | BEGIN_LP_RING(4); |
4224 | OUT_RING(MI_DISPLAY_FLIP | | 4237 | OUT_RING(MI_DISPLAY_FLIP | |
@@ -4226,7 +4239,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
4226 | OUT_RING(fb->pitch); | 4239 | OUT_RING(fb->pitch); |
4227 | if (IS_I965G(dev)) { | 4240 | if (IS_I965G(dev)) { |
4228 | OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode); | 4241 | OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode); |
4229 | OUT_RING((fb->width << 16) | fb->height); | 4242 | pipesrc = I915_READ(pipesrc_reg); |
4243 | OUT_RING(pipesrc & 0x0fff0fff); | ||
4230 | } else { | 4244 | } else { |
4231 | OUT_RING(obj_priv->gtt_offset); | 4245 | OUT_RING(obj_priv->gtt_offset); |
4232 | OUT_RING(MI_NOOP); | 4246 | OUT_RING(MI_NOOP); |
@@ -4400,29 +4414,43 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
4400 | bool found = false; | 4414 | bool found = false; |
4401 | 4415 | ||
4402 | if (I915_READ(SDVOB) & SDVO_DETECTED) { | 4416 | if (I915_READ(SDVOB) & SDVO_DETECTED) { |
4417 | DRM_DEBUG_KMS("probing SDVOB\n"); | ||
4403 | found = intel_sdvo_init(dev, SDVOB); | 4418 | found = intel_sdvo_init(dev, SDVOB); |
4404 | if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) | 4419 | if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) { |
4420 | DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); | ||
4405 | intel_hdmi_init(dev, SDVOB); | 4421 | intel_hdmi_init(dev, SDVOB); |
4422 | } | ||
4406 | 4423 | ||
4407 | if (!found && SUPPORTS_INTEGRATED_DP(dev)) | 4424 | if (!found && SUPPORTS_INTEGRATED_DP(dev)) { |
4425 | DRM_DEBUG_KMS("probing DP_B\n"); | ||
4408 | intel_dp_init(dev, DP_B); | 4426 | intel_dp_init(dev, DP_B); |
4427 | } | ||
4409 | } | 4428 | } |
4410 | 4429 | ||
4411 | /* Before G4X SDVOC doesn't have its own detect register */ | 4430 | /* Before G4X SDVOC doesn't have its own detect register */ |
4412 | 4431 | ||
4413 | if (I915_READ(SDVOB) & SDVO_DETECTED) | 4432 | if (I915_READ(SDVOB) & SDVO_DETECTED) { |
4433 | DRM_DEBUG_KMS("probing SDVOC\n"); | ||
4414 | found = intel_sdvo_init(dev, SDVOC); | 4434 | found = intel_sdvo_init(dev, SDVOC); |
4435 | } | ||
4415 | 4436 | ||
4416 | if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) { | 4437 | if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) { |
4417 | 4438 | ||
4418 | if (SUPPORTS_INTEGRATED_HDMI(dev)) | 4439 | if (SUPPORTS_INTEGRATED_HDMI(dev)) { |
4440 | DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); | ||
4419 | intel_hdmi_init(dev, SDVOC); | 4441 | intel_hdmi_init(dev, SDVOC); |
4420 | if (SUPPORTS_INTEGRATED_DP(dev)) | 4442 | } |
4443 | if (SUPPORTS_INTEGRATED_DP(dev)) { | ||
4444 | DRM_DEBUG_KMS("probing DP_C\n"); | ||
4421 | intel_dp_init(dev, DP_C); | 4445 | intel_dp_init(dev, DP_C); |
4446 | } | ||
4422 | } | 4447 | } |
4423 | 4448 | ||
4424 | if (SUPPORTS_INTEGRATED_DP(dev) && (I915_READ(DP_D) & DP_DETECTED)) | 4449 | if (SUPPORTS_INTEGRATED_DP(dev) && |
4450 | (I915_READ(DP_D) & DP_DETECTED)) { | ||
4451 | DRM_DEBUG_KMS("probing DP_D\n"); | ||
4425 | intel_dp_init(dev, DP_D); | 4452 | intel_dp_init(dev, DP_D); |
4453 | } | ||
4426 | } else if (IS_I8XX(dev)) | 4454 | } else if (IS_I8XX(dev)) |
4427 | intel_dvo_init(dev); | 4455 | intel_dvo_init(dev); |
4428 | 4456 | ||
@@ -4527,6 +4555,42 @@ static const struct drm_mode_config_funcs intel_mode_funcs = { | |||
4527 | .fb_changed = intelfb_probe, | 4555 | .fb_changed = intelfb_probe, |
4528 | }; | 4556 | }; |
4529 | 4557 | ||
4558 | static struct drm_gem_object * | ||
4559 | intel_alloc_power_context(struct drm_device *dev) | ||
4560 | { | ||
4561 | struct drm_gem_object *pwrctx; | ||
4562 | int ret; | ||
4563 | |||
4564 | pwrctx = drm_gem_object_alloc(dev, 4096); | ||
4565 | if (!pwrctx) { | ||
4566 | DRM_DEBUG("failed to alloc power context, RC6 disabled\n"); | ||
4567 | return NULL; | ||
4568 | } | ||
4569 | |||
4570 | mutex_lock(&dev->struct_mutex); | ||
4571 | ret = i915_gem_object_pin(pwrctx, 4096); | ||
4572 | if (ret) { | ||
4573 | DRM_ERROR("failed to pin power context: %d\n", ret); | ||
4574 | goto err_unref; | ||
4575 | } | ||
4576 | |||
4577 | ret = i915_gem_object_set_to_gtt_domain(pwrctx, 1); | ||
4578 | if (ret) { | ||
4579 | DRM_ERROR("failed to set-domain on power context: %d\n", ret); | ||
4580 | goto err_unpin; | ||
4581 | } | ||
4582 | mutex_unlock(&dev->struct_mutex); | ||
4583 | |||
4584 | return pwrctx; | ||
4585 | |||
4586 | err_unpin: | ||
4587 | i915_gem_object_unpin(pwrctx); | ||
4588 | err_unref: | ||
4589 | drm_gem_object_unreference(pwrctx); | ||
4590 | mutex_unlock(&dev->struct_mutex); | ||
4591 | return NULL; | ||
4592 | } | ||
4593 | |||
4530 | void intel_init_clock_gating(struct drm_device *dev) | 4594 | void intel_init_clock_gating(struct drm_device *dev) |
4531 | { | 4595 | { |
4532 | struct drm_i915_private *dev_priv = dev->dev_private; | 4596 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -4579,42 +4643,27 @@ void intel_init_clock_gating(struct drm_device *dev) | |||
4579 | * GPU can automatically power down the render unit if given a page | 4643 | * GPU can automatically power down the render unit if given a page |
4580 | * to save state. | 4644 | * to save state. |
4581 | */ | 4645 | */ |
4582 | if (I915_HAS_RC6(dev)) { | 4646 | if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) { |
4583 | struct drm_gem_object *pwrctx; | 4647 | struct drm_i915_gem_object *obj_priv = NULL; |
4584 | struct drm_i915_gem_object *obj_priv; | ||
4585 | int ret; | ||
4586 | 4648 | ||
4587 | if (dev_priv->pwrctx) { | 4649 | if (dev_priv->pwrctx) { |
4588 | obj_priv = dev_priv->pwrctx->driver_private; | 4650 | obj_priv = dev_priv->pwrctx->driver_private; |
4589 | } else { | 4651 | } else { |
4590 | pwrctx = drm_gem_object_alloc(dev, 4096); | 4652 | struct drm_gem_object *pwrctx; |
4591 | if (!pwrctx) { | ||
4592 | DRM_DEBUG("failed to alloc power context, " | ||
4593 | "RC6 disabled\n"); | ||
4594 | goto out; | ||
4595 | } | ||
4596 | 4653 | ||
4597 | ret = i915_gem_object_pin(pwrctx, 4096); | 4654 | pwrctx = intel_alloc_power_context(dev); |
4598 | if (ret) { | 4655 | if (pwrctx) { |
4599 | DRM_ERROR("failed to pin power context: %d\n", | 4656 | dev_priv->pwrctx = pwrctx; |
4600 | ret); | 4657 | obj_priv = pwrctx->driver_private; |
4601 | drm_gem_object_unreference(pwrctx); | ||
4602 | goto out; | ||
4603 | } | 4658 | } |
4604 | |||
4605 | i915_gem_object_set_to_gtt_domain(pwrctx, 1); | ||
4606 | |||
4607 | dev_priv->pwrctx = pwrctx; | ||
4608 | obj_priv = pwrctx->driver_private; | ||
4609 | } | 4659 | } |
4610 | 4660 | ||
4611 | I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN); | 4661 | if (obj_priv) { |
4612 | I915_WRITE(MCHBAR_RENDER_STANDBY, | 4662 | I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN); |
4613 | I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT); | 4663 | I915_WRITE(MCHBAR_RENDER_STANDBY, |
4664 | I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT); | ||
4665 | } | ||
4614 | } | 4666 | } |
4615 | |||
4616 | out: | ||
4617 | return; | ||
4618 | } | 4667 | } |
4619 | 4668 | ||
4620 | /* Set up chip specific display functions */ | 4669 | /* Set up chip specific display functions */ |
@@ -4770,7 +4819,6 @@ void intel_modeset_cleanup(struct drm_device *dev) | |||
4770 | del_timer_sync(&intel_crtc->idle_timer); | 4819 | del_timer_sync(&intel_crtc->idle_timer); |
4771 | } | 4820 | } |
4772 | 4821 | ||
4773 | intel_increase_renderclock(dev, false); | ||
4774 | del_timer_sync(&dev_priv->idle_timer); | 4822 | del_timer_sync(&dev_priv->idle_timer); |
4775 | 4823 | ||
4776 | if (dev_priv->display.disable_fbc) | 4824 | if (dev_priv->display.disable_fbc) |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 4e7aa8b7b938..439506cefc14 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -125,9 +125,15 @@ intel_dp_link_clock(uint8_t link_bw) | |||
125 | 125 | ||
126 | /* I think this is a fiction */ | 126 | /* I think this is a fiction */ |
127 | static int | 127 | static int |
128 | intel_dp_link_required(int pixel_clock) | 128 | intel_dp_link_required(struct drm_device *dev, |
129 | struct intel_output *intel_output, int pixel_clock) | ||
129 | { | 130 | { |
130 | return pixel_clock * 3; | 131 | struct drm_i915_private *dev_priv = dev->dev_private; |
132 | |||
133 | if (IS_eDP(intel_output)) | ||
134 | return (pixel_clock * dev_priv->edp_bpp) / 8; | ||
135 | else | ||
136 | return pixel_clock * 3; | ||
131 | } | 137 | } |
132 | 138 | ||
133 | static int | 139 | static int |
@@ -138,7 +144,8 @@ intel_dp_mode_valid(struct drm_connector *connector, | |||
138 | int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_output)); | 144 | int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_output)); |
139 | int max_lanes = intel_dp_max_lane_count(intel_output); | 145 | int max_lanes = intel_dp_max_lane_count(intel_output); |
140 | 146 | ||
141 | if (intel_dp_link_required(mode->clock) > max_link_clock * max_lanes) | 147 | if (intel_dp_link_required(connector->dev, intel_output, mode->clock) |
148 | > max_link_clock * max_lanes) | ||
142 | return MODE_CLOCK_HIGH; | 149 | return MODE_CLOCK_HIGH; |
143 | 150 | ||
144 | if (mode->clock < 10000) | 151 | if (mode->clock < 10000) |
@@ -492,7 +499,8 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
492 | for (clock = 0; clock <= max_clock; clock++) { | 499 | for (clock = 0; clock <= max_clock; clock++) { |
493 | int link_avail = intel_dp_link_clock(bws[clock]) * lane_count; | 500 | int link_avail = intel_dp_link_clock(bws[clock]) * lane_count; |
494 | 501 | ||
495 | if (intel_dp_link_required(mode->clock) <= link_avail) { | 502 | if (intel_dp_link_required(encoder->dev, intel_output, mode->clock) |
503 | <= link_avail) { | ||
496 | dp_priv->link_bw = bws[clock]; | 504 | dp_priv->link_bw = bws[clock]; |
497 | dp_priv->lane_count = lane_count; | 505 | dp_priv->lane_count = lane_count; |
498 | adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw); | 506 | adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw); |
@@ -1289,53 +1297,7 @@ intel_dp_hot_plug(struct intel_output *intel_output) | |||
1289 | if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON) | 1297 | if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON) |
1290 | intel_dp_check_link_status(intel_output); | 1298 | intel_dp_check_link_status(intel_output); |
1291 | } | 1299 | } |
1292 | /* | 1300 | |
1293 | * Enumerate the child dev array parsed from VBT to check whether | ||
1294 | * the given DP is present. | ||
1295 | * If it is present, return 1. | ||
1296 | * If it is not present, return false. | ||
1297 | * If no child dev is parsed from VBT, it is assumed that the given | ||
1298 | * DP is present. | ||
1299 | */ | ||
1300 | static int dp_is_present_in_vbt(struct drm_device *dev, int dp_reg) | ||
1301 | { | ||
1302 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1303 | struct child_device_config *p_child; | ||
1304 | int i, dp_port, ret; | ||
1305 | |||
1306 | if (!dev_priv->child_dev_num) | ||
1307 | return 1; | ||
1308 | |||
1309 | dp_port = 0; | ||
1310 | if (dp_reg == DP_B || dp_reg == PCH_DP_B) | ||
1311 | dp_port = PORT_IDPB; | ||
1312 | else if (dp_reg == DP_C || dp_reg == PCH_DP_C) | ||
1313 | dp_port = PORT_IDPC; | ||
1314 | else if (dp_reg == DP_D || dp_reg == PCH_DP_D) | ||
1315 | dp_port = PORT_IDPD; | ||
1316 | |||
1317 | ret = 0; | ||
1318 | for (i = 0; i < dev_priv->child_dev_num; i++) { | ||
1319 | p_child = dev_priv->child_dev + i; | ||
1320 | /* | ||
1321 | * If the device type is not DP, continue. | ||
1322 | */ | ||
1323 | if (p_child->device_type != DEVICE_TYPE_DP && | ||
1324 | p_child->device_type != DEVICE_TYPE_eDP) | ||
1325 | continue; | ||
1326 | /* Find the eDP port */ | ||
1327 | if (dp_reg == DP_A && p_child->device_type == DEVICE_TYPE_eDP) { | ||
1328 | ret = 1; | ||
1329 | break; | ||
1330 | } | ||
1331 | /* Find the DP port */ | ||
1332 | if (p_child->dvo_port == dp_port) { | ||
1333 | ret = 1; | ||
1334 | break; | ||
1335 | } | ||
1336 | } | ||
1337 | return ret; | ||
1338 | } | ||
1339 | void | 1301 | void |
1340 | intel_dp_init(struct drm_device *dev, int output_reg) | 1302 | intel_dp_init(struct drm_device *dev, int output_reg) |
1341 | { | 1303 | { |
@@ -1345,10 +1307,6 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
1345 | struct intel_dp_priv *dp_priv; | 1307 | struct intel_dp_priv *dp_priv; |
1346 | const char *name = NULL; | 1308 | const char *name = NULL; |
1347 | 1309 | ||
1348 | if (!dp_is_present_in_vbt(dev, output_reg)) { | ||
1349 | DRM_DEBUG_KMS("DP is not present. Ignore it\n"); | ||
1350 | return; | ||
1351 | } | ||
1352 | intel_output = kcalloc(sizeof(struct intel_output) + | 1310 | intel_output = kcalloc(sizeof(struct intel_output) + |
1353 | sizeof(struct intel_dp_priv), 1, GFP_KERNEL); | 1311 | sizeof(struct intel_dp_priv), 1, GFP_KERNEL); |
1354 | if (!intel_output) | 1312 | if (!intel_output) |
@@ -1373,11 +1331,10 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
1373 | else if (output_reg == DP_D || output_reg == PCH_DP_D) | 1331 | else if (output_reg == DP_D || output_reg == PCH_DP_D) |
1374 | intel_output->clone_mask = (1 << INTEL_DP_D_CLONE_BIT); | 1332 | intel_output->clone_mask = (1 << INTEL_DP_D_CLONE_BIT); |
1375 | 1333 | ||
1376 | if (IS_eDP(intel_output)) { | 1334 | if (IS_eDP(intel_output)) |
1377 | intel_output->crtc_mask = (1 << 1); | ||
1378 | intel_output->clone_mask = (1 << INTEL_EDP_CLONE_BIT); | 1335 | intel_output->clone_mask = (1 << INTEL_EDP_CLONE_BIT); |
1379 | } else | 1336 | |
1380 | intel_output->crtc_mask = (1 << 0) | (1 << 1); | 1337 | intel_output->crtc_mask = (1 << 0) | (1 << 1); |
1381 | connector->interlace_allowed = true; | 1338 | connector->interlace_allowed = true; |
1382 | connector->doublescan_allowed = 0; | 1339 | connector->doublescan_allowed = 0; |
1383 | 1340 | ||
@@ -1402,14 +1359,20 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
1402 | break; | 1359 | break; |
1403 | case DP_B: | 1360 | case DP_B: |
1404 | case PCH_DP_B: | 1361 | case PCH_DP_B: |
1362 | dev_priv->hotplug_supported_mask |= | ||
1363 | HDMIB_HOTPLUG_INT_STATUS; | ||
1405 | name = "DPDDC-B"; | 1364 | name = "DPDDC-B"; |
1406 | break; | 1365 | break; |
1407 | case DP_C: | 1366 | case DP_C: |
1408 | case PCH_DP_C: | 1367 | case PCH_DP_C: |
1368 | dev_priv->hotplug_supported_mask |= | ||
1369 | HDMIC_HOTPLUG_INT_STATUS; | ||
1409 | name = "DPDDC-C"; | 1370 | name = "DPDDC-C"; |
1410 | break; | 1371 | break; |
1411 | case DP_D: | 1372 | case DP_D: |
1412 | case PCH_DP_D: | 1373 | case PCH_DP_D: |
1374 | dev_priv->hotplug_supported_mask |= | ||
1375 | HDMID_HOTPLUG_INT_STATUS; | ||
1413 | name = "DPDDC-D"; | 1376 | name = "DPDDC-D"; |
1414 | break; | 1377 | break; |
1415 | } | 1378 | } |
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index 371d753e362b..aaabbcbe5905 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c | |||
@@ -148,7 +148,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, | |||
148 | 148 | ||
149 | mutex_lock(&dev->struct_mutex); | 149 | mutex_lock(&dev->struct_mutex); |
150 | 150 | ||
151 | ret = i915_gem_object_pin(fbo, PAGE_SIZE); | 151 | ret = i915_gem_object_pin(fbo, 64*1024); |
152 | if (ret) { | 152 | if (ret) { |
153 | DRM_ERROR("failed to pin fb: %d\n", ret); | 153 | DRM_ERROR("failed to pin fb: %d\n", ret); |
154 | goto out_unref; | 154 | goto out_unref; |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index f04dbbe7d400..0e268deed761 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
@@ -225,52 +225,6 @@ static const struct drm_encoder_funcs intel_hdmi_enc_funcs = { | |||
225 | .destroy = intel_hdmi_enc_destroy, | 225 | .destroy = intel_hdmi_enc_destroy, |
226 | }; | 226 | }; |
227 | 227 | ||
228 | /* | ||
229 | * Enumerate the child dev array parsed from VBT to check whether | ||
230 | * the given HDMI is present. | ||
231 | * If it is present, return 1. | ||
232 | * If it is not present, return false. | ||
233 | * If no child dev is parsed from VBT, it assumes that the given | ||
234 | * HDMI is present. | ||
235 | */ | ||
236 | static int hdmi_is_present_in_vbt(struct drm_device *dev, int hdmi_reg) | ||
237 | { | ||
238 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
239 | struct child_device_config *p_child; | ||
240 | int i, hdmi_port, ret; | ||
241 | |||
242 | if (!dev_priv->child_dev_num) | ||
243 | return 1; | ||
244 | |||
245 | if (hdmi_reg == SDVOB) | ||
246 | hdmi_port = DVO_B; | ||
247 | else if (hdmi_reg == SDVOC) | ||
248 | hdmi_port = DVO_C; | ||
249 | else if (hdmi_reg == HDMIB) | ||
250 | hdmi_port = DVO_B; | ||
251 | else if (hdmi_reg == HDMIC) | ||
252 | hdmi_port = DVO_C; | ||
253 | else if (hdmi_reg == HDMID) | ||
254 | hdmi_port = DVO_D; | ||
255 | else | ||
256 | return 0; | ||
257 | |||
258 | ret = 0; | ||
259 | for (i = 0; i < dev_priv->child_dev_num; i++) { | ||
260 | p_child = dev_priv->child_dev + i; | ||
261 | /* | ||
262 | * If the device type is not HDMI, continue. | ||
263 | */ | ||
264 | if (p_child->device_type != DEVICE_TYPE_HDMI) | ||
265 | continue; | ||
266 | /* Find the HDMI port */ | ||
267 | if (p_child->dvo_port == hdmi_port) { | ||
268 | ret = 1; | ||
269 | break; | ||
270 | } | ||
271 | } | ||
272 | return ret; | ||
273 | } | ||
274 | void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) | 228 | void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) |
275 | { | 229 | { |
276 | struct drm_i915_private *dev_priv = dev->dev_private; | 230 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -278,10 +232,6 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) | |||
278 | struct intel_output *intel_output; | 232 | struct intel_output *intel_output; |
279 | struct intel_hdmi_priv *hdmi_priv; | 233 | struct intel_hdmi_priv *hdmi_priv; |
280 | 234 | ||
281 | if (!hdmi_is_present_in_vbt(dev, sdvox_reg)) { | ||
282 | DRM_DEBUG_KMS("HDMI is not present. Ignored it \n"); | ||
283 | return; | ||
284 | } | ||
285 | intel_output = kcalloc(sizeof(struct intel_output) + | 235 | intel_output = kcalloc(sizeof(struct intel_output) + |
286 | sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL); | 236 | sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL); |
287 | if (!intel_output) | 237 | if (!intel_output) |
@@ -303,21 +253,26 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) | |||
303 | if (sdvox_reg == SDVOB) { | 253 | if (sdvox_reg == SDVOB) { |
304 | intel_output->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT); | 254 | intel_output->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT); |
305 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB"); | 255 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB"); |
256 | dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; | ||
306 | } else if (sdvox_reg == SDVOC) { | 257 | } else if (sdvox_reg == SDVOC) { |
307 | intel_output->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT); | 258 | intel_output->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT); |
308 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC"); | 259 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC"); |
260 | dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; | ||
309 | } else if (sdvox_reg == HDMIB) { | 261 | } else if (sdvox_reg == HDMIB) { |
310 | intel_output->clone_mask = (1 << INTEL_HDMID_CLONE_BIT); | 262 | intel_output->clone_mask = (1 << INTEL_HDMID_CLONE_BIT); |
311 | intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOE, | 263 | intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOE, |
312 | "HDMIB"); | 264 | "HDMIB"); |
265 | dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; | ||
313 | } else if (sdvox_reg == HDMIC) { | 266 | } else if (sdvox_reg == HDMIC) { |
314 | intel_output->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT); | 267 | intel_output->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT); |
315 | intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOD, | 268 | intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOD, |
316 | "HDMIC"); | 269 | "HDMIC"); |
270 | dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; | ||
317 | } else if (sdvox_reg == HDMID) { | 271 | } else if (sdvox_reg == HDMID) { |
318 | intel_output->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT); | 272 | intel_output->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT); |
319 | intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOF, | 273 | intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOF, |
320 | "HDMID"); | 274 | "HDMID"); |
275 | dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS; | ||
321 | } | 276 | } |
322 | if (!intel_output->ddc_bus) | 277 | if (!intel_output->ddc_bus) |
323 | goto err_connector; | 278 | goto err_connector; |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 3118ce274e67..c2e8a45780d5 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -602,12 +602,47 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder, | |||
602 | /* Some lid devices report incorrect lid status, assume they're connected */ | 602 | /* Some lid devices report incorrect lid status, assume they're connected */ |
603 | static const struct dmi_system_id bad_lid_status[] = { | 603 | static const struct dmi_system_id bad_lid_status[] = { |
604 | { | 604 | { |
605 | .ident = "Compaq nx9020", | ||
606 | .matches = { | ||
607 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), | ||
608 | DMI_MATCH(DMI_BOARD_NAME, "3084"), | ||
609 | }, | ||
610 | }, | ||
611 | { | ||
612 | .ident = "Samsung SX20S", | ||
613 | .matches = { | ||
614 | DMI_MATCH(DMI_SYS_VENDOR, "Samsung Electronics"), | ||
615 | DMI_MATCH(DMI_BOARD_NAME, "SX20S"), | ||
616 | }, | ||
617 | }, | ||
618 | { | ||
605 | .ident = "Aspire One", | 619 | .ident = "Aspire One", |
606 | .matches = { | 620 | .matches = { |
607 | DMI_MATCH(DMI_SYS_VENDOR, "Acer"), | 621 | DMI_MATCH(DMI_SYS_VENDOR, "Acer"), |
608 | DMI_MATCH(DMI_PRODUCT_NAME, "Aspire one"), | 622 | DMI_MATCH(DMI_PRODUCT_NAME, "Aspire one"), |
609 | }, | 623 | }, |
610 | }, | 624 | }, |
625 | { | ||
626 | .ident = "Aspire 1810T", | ||
627 | .matches = { | ||
628 | DMI_MATCH(DMI_SYS_VENDOR, "Acer"), | ||
629 | DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1810T"), | ||
630 | }, | ||
631 | }, | ||
632 | { | ||
633 | .ident = "PC-81005", | ||
634 | .matches = { | ||
635 | DMI_MATCH(DMI_SYS_VENDOR, "MALATA"), | ||
636 | DMI_MATCH(DMI_PRODUCT_NAME, "PC-81005"), | ||
637 | }, | ||
638 | }, | ||
639 | { | ||
640 | .ident = "Clevo M5x0N", | ||
641 | .matches = { | ||
642 | DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."), | ||
643 | DMI_MATCH(DMI_BOARD_NAME, "M5x0N"), | ||
644 | }, | ||
645 | }, | ||
611 | { } | 646 | { } |
612 | }; | 647 | }; |
613 | 648 | ||
@@ -622,7 +657,7 @@ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connect | |||
622 | { | 657 | { |
623 | enum drm_connector_status status = connector_status_connected; | 658 | enum drm_connector_status status = connector_status_connected; |
624 | 659 | ||
625 | if (!acpi_lid_open() && !dmi_check_system(bad_lid_status)) | 660 | if (!dmi_check_system(bad_lid_status) && !acpi_lid_open()) |
626 | status = connector_status_disconnected; | 661 | status = connector_status_disconnected; |
627 | 662 | ||
628 | return status; | 663 | return status; |
@@ -679,7 +714,14 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val, | |||
679 | struct drm_i915_private *dev_priv = | 714 | struct drm_i915_private *dev_priv = |
680 | container_of(nb, struct drm_i915_private, lid_notifier); | 715 | container_of(nb, struct drm_i915_private, lid_notifier); |
681 | struct drm_device *dev = dev_priv->dev; | 716 | struct drm_device *dev = dev_priv->dev; |
717 | struct drm_connector *connector = dev_priv->int_lvds_connector; | ||
682 | 718 | ||
719 | /* | ||
720 | * check and update the status of LVDS connector after receiving | ||
721 | * the LID nofication event. | ||
722 | */ | ||
723 | if (connector) | ||
724 | connector->status = connector->funcs->detect(connector); | ||
683 | if (!acpi_lid_open()) { | 725 | if (!acpi_lid_open()) { |
684 | dev_priv->modeset_on_lid = 1; | 726 | dev_priv->modeset_on_lid = 1; |
685 | return NOTIFY_OK; | 727 | return NOTIFY_OK; |
@@ -854,65 +896,6 @@ static const struct dmi_system_id intel_no_lvds[] = { | |||
854 | { } /* terminating entry */ | 896 | { } /* terminating entry */ |
855 | }; | 897 | }; |
856 | 898 | ||
857 | #ifdef CONFIG_ACPI | ||
858 | /* | ||
859 | * check_lid_device -- check whether @handle is an ACPI LID device. | ||
860 | * @handle: ACPI device handle | ||
861 | * @level : depth in the ACPI namespace tree | ||
862 | * @context: the number of LID device when we find the device | ||
863 | * @rv: a return value to fill if desired (Not use) | ||
864 | */ | ||
865 | static acpi_status | ||
866 | check_lid_device(acpi_handle handle, u32 level, void *context, | ||
867 | void **return_value) | ||
868 | { | ||
869 | struct acpi_device *acpi_dev; | ||
870 | int *lid_present = context; | ||
871 | |||
872 | acpi_dev = NULL; | ||
873 | /* Get the acpi device for device handle */ | ||
874 | if (acpi_bus_get_device(handle, &acpi_dev) || !acpi_dev) { | ||
875 | /* If there is no ACPI device for handle, return */ | ||
876 | return AE_OK; | ||
877 | } | ||
878 | |||
879 | if (!strncmp(acpi_device_hid(acpi_dev), "PNP0C0D", 7)) | ||
880 | *lid_present = 1; | ||
881 | |||
882 | return AE_OK; | ||
883 | } | ||
884 | |||
885 | /** | ||
886 | * check whether there exists the ACPI LID device by enumerating the ACPI | ||
887 | * device tree. | ||
888 | */ | ||
889 | static int intel_lid_present(void) | ||
890 | { | ||
891 | int lid_present = 0; | ||
892 | |||
893 | if (acpi_disabled) { | ||
894 | /* If ACPI is disabled, there is no ACPI device tree to | ||
895 | * check, so assume the LID device would have been present. | ||
896 | */ | ||
897 | return 1; | ||
898 | } | ||
899 | |||
900 | acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, | ||
901 | ACPI_UINT32_MAX, | ||
902 | check_lid_device, NULL, &lid_present, NULL); | ||
903 | |||
904 | return lid_present; | ||
905 | } | ||
906 | #else | ||
907 | static int intel_lid_present(void) | ||
908 | { | ||
909 | /* In the absence of ACPI built in, assume that the LID device would | ||
910 | * have been present. | ||
911 | */ | ||
912 | return 1; | ||
913 | } | ||
914 | #endif | ||
915 | |||
916 | /** | 899 | /** |
917 | * intel_find_lvds_downclock - find the reduced downclock for LVDS in EDID | 900 | * intel_find_lvds_downclock - find the reduced downclock for LVDS in EDID |
918 | * @dev: drm device | 901 | * @dev: drm device |
@@ -957,7 +940,8 @@ static void intel_find_lvds_downclock(struct drm_device *dev, | |||
957 | } | 940 | } |
958 | } | 941 | } |
959 | mutex_unlock(&dev->mode_config.mutex); | 942 | mutex_unlock(&dev->mode_config.mutex); |
960 | if (temp_downclock < panel_fixed_mode->clock) { | 943 | if (temp_downclock < panel_fixed_mode->clock && |
944 | i915_lvds_downclock) { | ||
961 | /* We found the downclock for LVDS. */ | 945 | /* We found the downclock for LVDS. */ |
962 | dev_priv->lvds_downclock_avail = 1; | 946 | dev_priv->lvds_downclock_avail = 1; |
963 | dev_priv->lvds_downclock = temp_downclock; | 947 | dev_priv->lvds_downclock = temp_downclock; |
@@ -1031,12 +1015,8 @@ void intel_lvds_init(struct drm_device *dev) | |||
1031 | if (dmi_check_system(intel_no_lvds)) | 1015 | if (dmi_check_system(intel_no_lvds)) |
1032 | return; | 1016 | return; |
1033 | 1017 | ||
1034 | /* | 1018 | if (!lvds_is_present_in_vbt(dev)) { |
1035 | * Assume LVDS is present if there's an ACPI lid device or if the | 1019 | DRM_DEBUG_KMS("LVDS is not present in VBT\n"); |
1036 | * device is present in the VBT. | ||
1037 | */ | ||
1038 | if (!lvds_is_present_in_vbt(dev) && !intel_lid_present()) { | ||
1039 | DRM_DEBUG_KMS("LVDS is not present in VBT and no lid detected\n"); | ||
1040 | return; | 1020 | return; |
1041 | } | 1021 | } |
1042 | 1022 | ||
@@ -1180,6 +1160,8 @@ out: | |||
1180 | DRM_DEBUG_KMS("lid notifier registration failed\n"); | 1160 | DRM_DEBUG_KMS("lid notifier registration failed\n"); |
1181 | dev_priv->lid_notifier.notifier_call = NULL; | 1161 | dev_priv->lid_notifier.notifier_call = NULL; |
1182 | } | 1162 | } |
1163 | /* keep the LVDS connector */ | ||
1164 | dev_priv->int_lvds_connector = connector; | ||
1183 | drm_sysfs_connector_add(connector); | 1165 | drm_sysfs_connector_add(connector); |
1184 | return; | 1166 | return; |
1185 | 1167 | ||
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 24a3dc99716c..82678d30ab06 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
@@ -462,14 +462,63 @@ static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode) | |||
462 | } | 462 | } |
463 | 463 | ||
464 | /** | 464 | /** |
465 | * Don't check status code from this as it switches the bus back to the | 465 | * Try to read the response after issuie the DDC switch command. But it |
466 | * SDVO chips which defeats the purpose of doing a bus switch in the first | 466 | * is noted that we must do the action of reading response and issuing DDC |
467 | * place. | 467 | * switch command in one I2C transaction. Otherwise when we try to start |
468 | * another I2C transaction after issuing the DDC bus switch, it will be | ||
469 | * switched to the internal SDVO register. | ||
468 | */ | 470 | */ |
469 | static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output, | 471 | static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output, |
470 | u8 target) | 472 | u8 target) |
471 | { | 473 | { |
472 | intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_CONTROL_BUS_SWITCH, &target, 1); | 474 | struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; |
475 | u8 out_buf[2], cmd_buf[2], ret_value[2], ret; | ||
476 | struct i2c_msg msgs[] = { | ||
477 | { | ||
478 | .addr = sdvo_priv->slave_addr >> 1, | ||
479 | .flags = 0, | ||
480 | .len = 2, | ||
481 | .buf = out_buf, | ||
482 | }, | ||
483 | /* the following two are to read the response */ | ||
484 | { | ||
485 | .addr = sdvo_priv->slave_addr >> 1, | ||
486 | .flags = 0, | ||
487 | .len = 1, | ||
488 | .buf = cmd_buf, | ||
489 | }, | ||
490 | { | ||
491 | .addr = sdvo_priv->slave_addr >> 1, | ||
492 | .flags = I2C_M_RD, | ||
493 | .len = 1, | ||
494 | .buf = ret_value, | ||
495 | }, | ||
496 | }; | ||
497 | |||
498 | intel_sdvo_debug_write(intel_output, SDVO_CMD_SET_CONTROL_BUS_SWITCH, | ||
499 | &target, 1); | ||
500 | /* write the DDC switch command argument */ | ||
501 | intel_sdvo_write_byte(intel_output, SDVO_I2C_ARG_0, target); | ||
502 | |||
503 | out_buf[0] = SDVO_I2C_OPCODE; | ||
504 | out_buf[1] = SDVO_CMD_SET_CONTROL_BUS_SWITCH; | ||
505 | cmd_buf[0] = SDVO_I2C_CMD_STATUS; | ||
506 | cmd_buf[1] = 0; | ||
507 | ret_value[0] = 0; | ||
508 | ret_value[1] = 0; | ||
509 | |||
510 | ret = i2c_transfer(intel_output->i2c_bus, msgs, 3); | ||
511 | if (ret != 3) { | ||
512 | /* failure in I2C transfer */ | ||
513 | DRM_DEBUG_KMS("I2c transfer returned %d\n", ret); | ||
514 | return; | ||
515 | } | ||
516 | if (ret_value[0] != SDVO_CMD_STATUS_SUCCESS) { | ||
517 | DRM_DEBUG_KMS("DDC switch command returns response %d\n", | ||
518 | ret_value[0]); | ||
519 | return; | ||
520 | } | ||
521 | return; | ||
473 | } | 522 | } |
474 | 523 | ||
475 | static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool target_0, bool target_1) | 524 | static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool target_0, bool target_1) |
@@ -1579,6 +1628,32 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response) | |||
1579 | edid = drm_get_edid(&intel_output->base, | 1628 | edid = drm_get_edid(&intel_output->base, |
1580 | intel_output->ddc_bus); | 1629 | intel_output->ddc_bus); |
1581 | 1630 | ||
1631 | /* This is only applied to SDVO cards with multiple outputs */ | ||
1632 | if (edid == NULL && intel_sdvo_multifunc_encoder(intel_output)) { | ||
1633 | uint8_t saved_ddc, temp_ddc; | ||
1634 | saved_ddc = sdvo_priv->ddc_bus; | ||
1635 | temp_ddc = sdvo_priv->ddc_bus >> 1; | ||
1636 | /* | ||
1637 | * Don't use the 1 as the argument of DDC bus switch to get | ||
1638 | * the EDID. It is used for SDVO SPD ROM. | ||
1639 | */ | ||
1640 | while(temp_ddc > 1) { | ||
1641 | sdvo_priv->ddc_bus = temp_ddc; | ||
1642 | edid = drm_get_edid(&intel_output->base, | ||
1643 | intel_output->ddc_bus); | ||
1644 | if (edid) { | ||
1645 | /* | ||
1646 | * When we can get the EDID, maybe it is the | ||
1647 | * correct DDC bus. Update it. | ||
1648 | */ | ||
1649 | sdvo_priv->ddc_bus = temp_ddc; | ||
1650 | break; | ||
1651 | } | ||
1652 | temp_ddc >>= 1; | ||
1653 | } | ||
1654 | if (edid == NULL) | ||
1655 | sdvo_priv->ddc_bus = saved_ddc; | ||
1656 | } | ||
1582 | /* when there is no edid and no monitor is connected with VGA | 1657 | /* when there is no edid and no monitor is connected with VGA |
1583 | * port, try to use the CRT ddc to read the EDID for DVI-connector | 1658 | * port, try to use the CRT ddc to read the EDID for DVI-connector |
1584 | */ | 1659 | */ |
@@ -2270,6 +2345,14 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) | |||
2270 | connector->connector_type = DRM_MODE_CONNECTOR_VGA; | 2345 | connector->connector_type = DRM_MODE_CONNECTOR_VGA; |
2271 | intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | | 2346 | intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | |
2272 | (1 << INTEL_ANALOG_CLONE_BIT); | 2347 | (1 << INTEL_ANALOG_CLONE_BIT); |
2348 | } else if (flags & SDVO_OUTPUT_CVBS0) { | ||
2349 | |||
2350 | sdvo_priv->controlled_output = SDVO_OUTPUT_CVBS0; | ||
2351 | encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; | ||
2352 | connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; | ||
2353 | sdvo_priv->is_tv = true; | ||
2354 | intel_output->needs_tv_clock = true; | ||
2355 | intel_output->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; | ||
2273 | } else if (flags & SDVO_OUTPUT_LVDS0) { | 2356 | } else if (flags & SDVO_OUTPUT_LVDS0) { |
2274 | 2357 | ||
2275 | sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; | 2358 | sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; |
@@ -2662,6 +2745,7 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector) | |||
2662 | 2745 | ||
2663 | bool intel_sdvo_init(struct drm_device *dev, int output_device) | 2746 | bool intel_sdvo_init(struct drm_device *dev, int output_device) |
2664 | { | 2747 | { |
2748 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2665 | struct drm_connector *connector; | 2749 | struct drm_connector *connector; |
2666 | struct intel_output *intel_output; | 2750 | struct intel_output *intel_output; |
2667 | struct intel_sdvo_priv *sdvo_priv; | 2751 | struct intel_sdvo_priv *sdvo_priv; |
@@ -2708,10 +2792,12 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device) | |||
2708 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS"); | 2792 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS"); |
2709 | sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, | 2793 | sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, |
2710 | "SDVOB/VGA DDC BUS"); | 2794 | "SDVOB/VGA DDC BUS"); |
2795 | dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS; | ||
2711 | } else { | 2796 | } else { |
2712 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS"); | 2797 | intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS"); |
2713 | sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, | 2798 | sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, |
2714 | "SDVOC/VGA DDC BUS"); | 2799 | "SDVOC/VGA DDC BUS"); |
2800 | dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS; | ||
2715 | } | 2801 | } |
2716 | 2802 | ||
2717 | if (intel_output->ddc_bus == NULL) | 2803 | if (intel_output->ddc_bus == NULL) |