aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2010-05-18 19:35:51 -0400
committerDave Airlie <airlied@redhat.com>2010-05-18 19:35:51 -0400
commit05ea893c46805b2981ea8ba6df881e3d65edd63b (patch)
treeea381e22d99f49bd2c95238f88491d48b797a17b /drivers/gpu/drm/i915
parent26481fb15644b5fd85d4cea020f74a234cdf6803 (diff)
parenta7c542782e92f9487c62a571565637be3d6b0ffd (diff)
Merge remote branch 'anholt/drm-intel-next' into drm-next
* anholt/drm-intel-next: (515 commits) drm/i915: Fix out of tree builds drm/i915: move fence lru to struct drm_i915_fence_reg drm/i915: don't allow tiling changes on pinned buffers v2 drm/i915: Be extra careful about A/D matching for multifunction SDVO drm/i915: Fix DDC bus selection for multifunction SDVO drm/i915: cleanup mode setting before unmapping registers drm/i915: Make fbc control wrapper functions drm/i915: Wait for the GPU whilst shrinking, if truly desperate. drm/i915: Use spatio-temporal dithering on PCH [MTD] Remove zero-length files mtdbdi.c and internal.ho pata_pcmcia / ide-cs: Fix bad hashes for Transcend and kingston IDs libata: Fix several inaccuracies in developer's guide slub: Fix bad boundary check in init_kmem_cache_nodes() raid6: fix recovery performance regression KEYS: call_sbin_request_key() must write lock keyrings before modifying them KEYS: Use RCU dereference wrappers in keyring key type code KEYS: find_keyring_by_name() can gain access to a freed keyring ALSA: hda: Fix 0 dB for Packard Bell models using Conexant CX20549 (Venice) ALSA: hda - Add quirk for Dell Inspiron 19T using a Conexant CX20582 ALSA: take tu->qlock with irqs disabled ...
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/Makefile2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c13
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c21
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c5
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h20
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c229
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c27
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c8
-rw-r--r--drivers/gpu/drm/i915/i915_opregion.c54
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h18
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h2
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c1
-rw-r--r--drivers/gpu/drm/i915/intel_display.c87
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c86
14 files changed, 416 insertions, 157 deletions
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 9929f84ec3e1..95639017bdbe 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -33,3 +33,5 @@ i915-$(CONFIG_ACPI) += i915_opregion.o
33i915-$(CONFIG_COMPAT) += i915_ioc32.o 33i915-$(CONFIG_COMPAT) += i915_ioc32.o
34 34
35obj-$(CONFIG_DRM_I915) += i915.o 35obj-$(CONFIG_DRM_I915) += i915.o
36
37CFLAGS_i915_trace_points.o := -I$(src)
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 213aa3f67314..322070c0c631 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -566,23 +566,14 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
566{ 566{
567 struct drm_info_node *node = (struct drm_info_node *) m->private; 567 struct drm_info_node *node = (struct drm_info_node *) m->private;
568 struct drm_device *dev = node->minor->dev; 568 struct drm_device *dev = node->minor->dev;
569 struct drm_crtc *crtc;
570 drm_i915_private_t *dev_priv = dev->dev_private; 569 drm_i915_private_t *dev_priv = dev->dev_private;
571 bool fbc_enabled = false;
572 570
573 if (!dev_priv->display.fbc_enabled) { 571 if (!I915_HAS_FBC(dev)) {
574 seq_printf(m, "FBC unsupported on this chipset\n"); 572 seq_printf(m, "FBC unsupported on this chipset\n");
575 return 0; 573 return 0;
576 } 574 }
577 575
578 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 576 if (intel_fbc_enabled(dev)) {
579 if (!crtc->enabled)
580 continue;
581 if (dev_priv->display.fbc_enabled(crtc))
582 fbc_enabled = true;
583 }
584
585 if (fbc_enabled) {
586 seq_printf(m, "FBC enabled\n"); 577 seq_printf(m, "FBC enabled\n");
587 } else { 578 } else {
588 seq_printf(m, "FBC disabled: "); 579 seq_printf(m, "FBC disabled: ");
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 8fe66ac4e1a5..2a6b5de5ae5d 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1357,19 +1357,30 @@ static void i915_setup_compression(struct drm_device *dev, int size)
1357 1357
1358 dev_priv->cfb_size = size; 1358 dev_priv->cfb_size = size;
1359 1359
1360 intel_disable_fbc(dev);
1361 dev_priv->compressed_fb = compressed_fb;
1362
1360 if (IS_GM45(dev)) { 1363 if (IS_GM45(dev)) {
1361 g4x_disable_fbc(dev);
1362 I915_WRITE(DPFC_CB_BASE, compressed_fb->start); 1364 I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
1363 } else { 1365 } else {
1364 i8xx_disable_fbc(dev);
1365 I915_WRITE(FBC_CFB_BASE, cfb_base); 1366 I915_WRITE(FBC_CFB_BASE, cfb_base);
1366 I915_WRITE(FBC_LL_BASE, ll_base); 1367 I915_WRITE(FBC_LL_BASE, ll_base);
1368 dev_priv->compressed_llb = compressed_llb;
1367 } 1369 }
1368 1370
1369 DRM_DEBUG("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base, 1371 DRM_DEBUG("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base,
1370 ll_base, size >> 20); 1372 ll_base, size >> 20);
1371} 1373}
1372 1374
1375static void i915_cleanup_compression(struct drm_device *dev)
1376{
1377 struct drm_i915_private *dev_priv = dev->dev_private;
1378
1379 drm_mm_put_block(dev_priv->compressed_fb);
1380 if (!IS_GM45(dev))
1381 drm_mm_put_block(dev_priv->compressed_llb);
1382}
1383
1373/* true = enable decode, false = disable decoder */ 1384/* true = enable decode, false = disable decoder */
1374static unsigned int i915_vga_set_decode(void *cookie, bool state) 1385static unsigned int i915_vga_set_decode(void *cookie, bool state)
1375{ 1386{
@@ -1759,6 +1770,8 @@ int i915_driver_unload(struct drm_device *dev)
1759 } 1770 }
1760 1771
1761 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1772 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1773 intel_modeset_cleanup(dev);
1774
1762 /* 1775 /*
1763 * free the memory space allocated for the child device 1776 * free the memory space allocated for the child device
1764 * config parsed from VBT 1777 * config parsed from VBT
@@ -1782,13 +1795,13 @@ int i915_driver_unload(struct drm_device *dev)
1782 intel_opregion_free(dev, 0); 1795 intel_opregion_free(dev, 0);
1783 1796
1784 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1797 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1785 intel_modeset_cleanup(dev);
1786
1787 i915_gem_free_all_phys_object(dev); 1798 i915_gem_free_all_phys_object(dev);
1788 1799
1789 mutex_lock(&dev->struct_mutex); 1800 mutex_lock(&dev->struct_mutex);
1790 i915_gem_cleanup_ringbuffer(dev); 1801 i915_gem_cleanup_ringbuffer(dev);
1791 mutex_unlock(&dev->struct_mutex); 1802 mutex_unlock(&dev->struct_mutex);
1803 if (I915_HAS_FBC(dev) && i915_powersave)
1804 i915_cleanup_compression(dev);
1792 drm_mm_takedown(&dev_priv->vram); 1805 drm_mm_takedown(&dev_priv->vram);
1793 i915_gem_lastclose(dev); 1806 i915_gem_lastclose(dev);
1794 1807
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 01e91ea5bdea..5c51e45ab68d 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -69,7 +69,8 @@ const static struct intel_device_info intel_845g_info = {
69}; 69};
70 70
71const static struct intel_device_info intel_i85x_info = { 71const static struct intel_device_info intel_i85x_info = {
72 .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1, 72 .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1,
73 .cursor_needs_physical = 1,
73}; 74};
74 75
75const static struct intel_device_info intel_i865g_info = { 76const static struct intel_device_info intel_i865g_info = {
@@ -151,7 +152,7 @@ const static struct pci_device_id pciidlist[] = {
151 INTEL_VGA_DEVICE(0x3577, &intel_i830_info), 152 INTEL_VGA_DEVICE(0x3577, &intel_i830_info),
152 INTEL_VGA_DEVICE(0x2562, &intel_845g_info), 153 INTEL_VGA_DEVICE(0x2562, &intel_845g_info),
153 INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), 154 INTEL_VGA_DEVICE(0x3582, &intel_i85x_info),
154 INTEL_VGA_DEVICE(0x35e8, &intel_i85x_info), 155 INTEL_VGA_DEVICE(0x358e, &intel_i85x_info),
155 INTEL_VGA_DEVICE(0x2572, &intel_i865g_info), 156 INTEL_VGA_DEVICE(0x2572, &intel_i865g_info),
156 INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), 157 INTEL_VGA_DEVICE(0x2582, &intel_i915g_info),
157 INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), 158 INTEL_VGA_DEVICE(0x258a, &intel_i915g_info),
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 242993bedab3..7f797ef1ab39 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -128,6 +128,7 @@ struct drm_i915_master_private {
128 128
129struct drm_i915_fence_reg { 129struct drm_i915_fence_reg {
130 struct drm_gem_object *obj; 130 struct drm_gem_object *obj;
131 struct list_head lru_list;
131}; 132};
132 133
133struct sdvo_device_mapping { 134struct sdvo_device_mapping {
@@ -135,6 +136,7 @@ struct sdvo_device_mapping {
135 u8 slave_addr; 136 u8 slave_addr;
136 u8 dvo_wiring; 137 u8 dvo_wiring;
137 u8 initialized; 138 u8 initialized;
139 u8 ddc_pin;
138}; 140};
139 141
140struct drm_i915_error_state { 142struct drm_i915_error_state {
@@ -175,7 +177,7 @@ struct drm_i915_error_state {
175 177
176struct drm_i915_display_funcs { 178struct drm_i915_display_funcs {
177 void (*dpms)(struct drm_crtc *crtc, int mode); 179 void (*dpms)(struct drm_crtc *crtc, int mode);
178 bool (*fbc_enabled)(struct drm_crtc *crtc); 180 bool (*fbc_enabled)(struct drm_device *dev);
179 void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval); 181 void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
180 void (*disable_fbc)(struct drm_device *dev); 182 void (*disable_fbc)(struct drm_device *dev);
181 int (*get_display_clock_speed)(struct drm_device *dev); 183 int (*get_display_clock_speed)(struct drm_device *dev);
@@ -195,6 +197,7 @@ struct intel_overlay;
195struct intel_device_info { 197struct intel_device_info {
196 u8 is_mobile : 1; 198 u8 is_mobile : 1;
197 u8 is_i8xx : 1; 199 u8 is_i8xx : 1;
200 u8 is_i85x : 1;
198 u8 is_i915g : 1; 201 u8 is_i915g : 1;
199 u8 is_i9xx : 1; 202 u8 is_i9xx : 1;
200 u8 is_i945gm : 1; 203 u8 is_i945gm : 1;
@@ -242,11 +245,14 @@ typedef struct drm_i915_private {
242 245
243 drm_dma_handle_t *status_page_dmah; 246 drm_dma_handle_t *status_page_dmah;
244 void *hw_status_page; 247 void *hw_status_page;
248 void *seqno_page;
245 dma_addr_t dma_status_page; 249 dma_addr_t dma_status_page;
246 uint32_t counter; 250 uint32_t counter;
247 unsigned int status_gfx_addr; 251 unsigned int status_gfx_addr;
252 unsigned int seqno_gfx_addr;
248 drm_local_map_t hws_map; 253 drm_local_map_t hws_map;
249 struct drm_gem_object *hws_obj; 254 struct drm_gem_object *hws_obj;
255 struct drm_gem_object *seqno_obj;
250 struct drm_gem_object *pwrctx; 256 struct drm_gem_object *pwrctx;
251 257
252 struct resource mch_res; 258 struct resource mch_res;
@@ -641,6 +647,9 @@ typedef struct drm_i915_private {
641 647
642 enum no_fbc_reason no_fbc_reason; 648 enum no_fbc_reason no_fbc_reason;
643 649
650 struct drm_mm_node *compressed_fb;
651 struct drm_mm_node *compressed_llb;
652
644 /* list of fbdev register on this device */ 653 /* list of fbdev register on this device */
645 struct intel_fbdev *fbdev; 654 struct intel_fbdev *fbdev;
646} drm_i915_private_t; 655} drm_i915_private_t;
@@ -657,9 +666,6 @@ struct drm_i915_gem_object {
657 /** This object's place on GPU write list */ 666 /** This object's place on GPU write list */
658 struct list_head gpu_write_list; 667 struct list_head gpu_write_list;
659 668
660 /** This object's place on the fenced object LRU */
661 struct list_head fence_list;
662
663 /** 669 /**
664 * This is set if the object is on the active or flushing lists 670 * This is set if the object is on the active or flushing lists
665 * (has pending rendering), and is not set if it's on inactive (ready 671 * (has pending rendering), and is not set if it's on inactive (ready
@@ -1006,6 +1012,9 @@ extern void intel_modeset_cleanup(struct drm_device *dev);
1006extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); 1012extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
1007extern void i8xx_disable_fbc(struct drm_device *dev); 1013extern void i8xx_disable_fbc(struct drm_device *dev);
1008extern void g4x_disable_fbc(struct drm_device *dev); 1014extern void g4x_disable_fbc(struct drm_device *dev);
1015extern void intel_disable_fbc(struct drm_device *dev);
1016extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
1017extern bool intel_fbc_enabled(struct drm_device *dev);
1009 1018
1010extern void intel_detect_pch (struct drm_device *dev); 1019extern void intel_detect_pch (struct drm_device *dev);
1011extern int intel_trans_dp_port_sel (struct drm_crtc *crtc); 1020extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
@@ -1088,7 +1097,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
1088 1097
1089#define IS_I830(dev) ((dev)->pci_device == 0x3577) 1098#define IS_I830(dev) ((dev)->pci_device == 0x3577)
1090#define IS_845G(dev) ((dev)->pci_device == 0x2562) 1099#define IS_845G(dev) ((dev)->pci_device == 0x2562)
1091#define IS_I85X(dev) ((dev)->pci_device == 0x3582) 1100#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
1092#define IS_I865G(dev) ((dev)->pci_device == 0x2572) 1101#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
1093#define IS_GEN2(dev) (INTEL_INFO(dev)->is_i8xx) 1102#define IS_GEN2(dev) (INTEL_INFO(dev)->is_i8xx)
1094#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) 1103#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
@@ -1154,6 +1163,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
1154 1163
1155#define HAS_PCH_SPLIT(dev) (IS_IRONLAKE(dev) || \ 1164#define HAS_PCH_SPLIT(dev) (IS_IRONLAKE(dev) || \
1156 IS_GEN6(dev)) 1165 IS_GEN6(dev))
1166#define HAS_PIPE_CONTROL(dev) (IS_IRONLAKE(dev) || IS_GEN6(dev))
1157 1167
1158#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) 1168#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
1159#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) 1169#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 47c46ed384f1..112699f71fa4 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1051,7 +1051,9 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1051 * about to occur. 1051 * about to occur.
1052 */ 1052 */
1053 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { 1053 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
1054 list_move_tail(&obj_priv->fence_list, 1054 struct drm_i915_fence_reg *reg =
1055 &dev_priv->fence_regs[obj_priv->fence_reg];
1056 list_move_tail(&reg->lru_list,
1055 &dev_priv->mm.fence_list); 1057 &dev_priv->mm.fence_list);
1056 } 1058 }
1057 1059
@@ -1577,9 +1579,12 @@ i915_gem_process_flushing_list(struct drm_device *dev,
1577 i915_gem_object_move_to_active(obj, seqno); 1579 i915_gem_object_move_to_active(obj, seqno);
1578 1580
1579 /* update the fence lru list */ 1581 /* update the fence lru list */
1580 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) 1582 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
1581 list_move_tail(&obj_priv->fence_list, 1583 struct drm_i915_fence_reg *reg =
1584 &dev_priv->fence_regs[obj_priv->fence_reg];
1585 list_move_tail(&reg->lru_list,
1582 &dev_priv->mm.fence_list); 1586 &dev_priv->mm.fence_list);
1587 }
1583 1588
1584 trace_i915_gem_object_change_domain(obj, 1589 trace_i915_gem_object_change_domain(obj,
1585 obj->read_domains, 1590 obj->read_domains,
@@ -1588,6 +1593,13 @@ i915_gem_process_flushing_list(struct drm_device *dev,
1588 } 1593 }
1589} 1594}
1590 1595
1596#define PIPE_CONTROL_FLUSH(addr) \
1597 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \
1598 PIPE_CONTROL_DEPTH_STALL); \
1599 OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \
1600 OUT_RING(0); \
1601 OUT_RING(0); \
1602
1591/** 1603/**
1592 * Creates a new sequence number, emitting a write of it to the status page 1604 * Creates a new sequence number, emitting a write of it to the status page
1593 * plus an interrupt, which will trigger i915_user_interrupt_handler. 1605 * plus an interrupt, which will trigger i915_user_interrupt_handler.
@@ -1622,13 +1634,47 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1622 if (dev_priv->mm.next_gem_seqno == 0) 1634 if (dev_priv->mm.next_gem_seqno == 0)
1623 dev_priv->mm.next_gem_seqno++; 1635 dev_priv->mm.next_gem_seqno++;
1624 1636
1625 BEGIN_LP_RING(4); 1637 if (HAS_PIPE_CONTROL(dev)) {
1626 OUT_RING(MI_STORE_DWORD_INDEX); 1638 u32 scratch_addr = dev_priv->seqno_gfx_addr + 128;
1627 OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1628 OUT_RING(seqno);
1629 1639
1630 OUT_RING(MI_USER_INTERRUPT); 1640 /*
1631 ADVANCE_LP_RING(); 1641 * Workaround qword write incoherence by flushing the
1642 * PIPE_NOTIFY buffers out to memory before requesting
1643 * an interrupt.
1644 */
1645 BEGIN_LP_RING(32);
1646 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
1647 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
1648 OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
1649 OUT_RING(seqno);
1650 OUT_RING(0);
1651 PIPE_CONTROL_FLUSH(scratch_addr);
1652 scratch_addr += 128; /* write to separate cachelines */
1653 PIPE_CONTROL_FLUSH(scratch_addr);
1654 scratch_addr += 128;
1655 PIPE_CONTROL_FLUSH(scratch_addr);
1656 scratch_addr += 128;
1657 PIPE_CONTROL_FLUSH(scratch_addr);
1658 scratch_addr += 128;
1659 PIPE_CONTROL_FLUSH(scratch_addr);
1660 scratch_addr += 128;
1661 PIPE_CONTROL_FLUSH(scratch_addr);
1662 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
1663 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
1664 PIPE_CONTROL_NOTIFY);
1665 OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
1666 OUT_RING(seqno);
1667 OUT_RING(0);
1668 ADVANCE_LP_RING();
1669 } else {
1670 BEGIN_LP_RING(4);
1671 OUT_RING(MI_STORE_DWORD_INDEX);
1672 OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1673 OUT_RING(seqno);
1674
1675 OUT_RING(MI_USER_INTERRUPT);
1676 ADVANCE_LP_RING();
1677 }
1632 1678
1633 DRM_DEBUG_DRIVER("%d\n", seqno); 1679 DRM_DEBUG_DRIVER("%d\n", seqno);
1634 1680
@@ -1752,7 +1798,10 @@ i915_get_gem_seqno(struct drm_device *dev)
1752{ 1798{
1753 drm_i915_private_t *dev_priv = dev->dev_private; 1799 drm_i915_private_t *dev_priv = dev->dev_private;
1754 1800
1755 return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX); 1801 if (HAS_PIPE_CONTROL(dev))
1802 return ((volatile u32 *)(dev_priv->seqno_page))[0];
1803 else
1804 return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
1756} 1805}
1757 1806
1758/** 1807/**
@@ -2362,6 +2411,12 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
2362 pitch_val = obj_priv->stride / tile_width; 2411 pitch_val = obj_priv->stride / tile_width;
2363 pitch_val = ffs(pitch_val) - 1; 2412 pitch_val = ffs(pitch_val) - 1;
2364 2413
2414 if (obj_priv->tiling_mode == I915_TILING_Y &&
2415 HAS_128_BYTE_Y_TILING(dev))
2416 WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
2417 else
2418 WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL);
2419
2365 val = obj_priv->gtt_offset; 2420 val = obj_priv->gtt_offset;
2366 if (obj_priv->tiling_mode == I915_TILING_Y) 2421 if (obj_priv->tiling_mode == I915_TILING_Y)
2367 val |= 1 << I830_FENCE_TILING_Y_SHIFT; 2422 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
@@ -2435,9 +2490,10 @@ static int i915_find_fence_reg(struct drm_device *dev)
2435 2490
2436 /* None available, try to steal one or wait for a user to finish */ 2491 /* None available, try to steal one or wait for a user to finish */
2437 i = I915_FENCE_REG_NONE; 2492 i = I915_FENCE_REG_NONE;
2438 list_for_each_entry(obj_priv, &dev_priv->mm.fence_list, 2493 list_for_each_entry(reg, &dev_priv->mm.fence_list,
2439 fence_list) { 2494 lru_list) {
2440 obj = &obj_priv->base; 2495 obj = reg->obj;
2496 obj_priv = to_intel_bo(obj);
2441 2497
2442 if (obj_priv->pin_count) 2498 if (obj_priv->pin_count)
2443 continue; 2499 continue;
@@ -2486,7 +2542,8 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
2486 2542
2487 /* Just update our place in the LRU if our fence is getting used. */ 2543 /* Just update our place in the LRU if our fence is getting used. */
2488 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { 2544 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
2489 list_move_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list); 2545 reg = &dev_priv->fence_regs[obj_priv->fence_reg];
2546 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
2490 return 0; 2547 return 0;
2491 } 2548 }
2492 2549
@@ -2516,7 +2573,7 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
2516 2573
2517 obj_priv->fence_reg = ret; 2574 obj_priv->fence_reg = ret;
2518 reg = &dev_priv->fence_regs[obj_priv->fence_reg]; 2575 reg = &dev_priv->fence_regs[obj_priv->fence_reg];
2519 list_add_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list); 2576 list_add_tail(&reg->lru_list, &dev_priv->mm.fence_list);
2520 2577
2521 reg->obj = obj; 2578 reg->obj = obj;
2522 2579
@@ -2548,6 +2605,8 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
2548 struct drm_device *dev = obj->dev; 2605 struct drm_device *dev = obj->dev;
2549 drm_i915_private_t *dev_priv = dev->dev_private; 2606 drm_i915_private_t *dev_priv = dev->dev_private;
2550 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 2607 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2608 struct drm_i915_fence_reg *reg =
2609 &dev_priv->fence_regs[obj_priv->fence_reg];
2551 2610
2552 if (IS_GEN6(dev)) { 2611 if (IS_GEN6(dev)) {
2553 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + 2612 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
@@ -2566,9 +2625,9 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
2566 I915_WRITE(fence_reg, 0); 2625 I915_WRITE(fence_reg, 0);
2567 } 2626 }
2568 2627
2569 dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL; 2628 reg->obj = NULL;
2570 obj_priv->fence_reg = I915_FENCE_REG_NONE; 2629 obj_priv->fence_reg = I915_FENCE_REG_NONE;
2571 list_del_init(&obj_priv->fence_list); 2630 list_del_init(&reg->lru_list);
2572} 2631}
2573 2632
2574/** 2633/**
@@ -4439,12 +4498,10 @@ struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
4439 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 4498 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4440 4499
4441 obj->agp_type = AGP_USER_MEMORY; 4500 obj->agp_type = AGP_USER_MEMORY;
4442
4443 obj->base.driver_private = NULL; 4501 obj->base.driver_private = NULL;
4444 obj->fence_reg = I915_FENCE_REG_NONE; 4502 obj->fence_reg = I915_FENCE_REG_NONE;
4445 INIT_LIST_HEAD(&obj->list); 4503 INIT_LIST_HEAD(&obj->list);
4446 INIT_LIST_HEAD(&obj->gpu_write_list); 4504 INIT_LIST_HEAD(&obj->gpu_write_list);
4447 INIT_LIST_HEAD(&obj->fence_list);
4448 obj->madv = I915_MADV_WILLNEED; 4505 obj->madv = I915_MADV_WILLNEED;
4449 4506
4450 trace_i915_gem_object_create(&obj->base); 4507 trace_i915_gem_object_create(&obj->base);
@@ -4554,6 +4611,49 @@ i915_gem_idle(struct drm_device *dev)
4554 return 0; 4611 return 0;
4555} 4612}
4556 4613
4614/*
4615 * 965+ support PIPE_CONTROL commands, which provide finer grained control
4616 * over cache flushing.
4617 */
4618static int
4619i915_gem_init_pipe_control(struct drm_device *dev)
4620{
4621 drm_i915_private_t *dev_priv = dev->dev_private;
4622 struct drm_gem_object *obj;
4623 struct drm_i915_gem_object *obj_priv;
4624 int ret;
4625
4626 obj = i915_gem_alloc_object(dev, 4096);
4627 if (obj == NULL) {
4628 DRM_ERROR("Failed to allocate seqno page\n");
4629 ret = -ENOMEM;
4630 goto err;
4631 }
4632 obj_priv = to_intel_bo(obj);
4633 obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
4634
4635 ret = i915_gem_object_pin(obj, 4096);
4636 if (ret)
4637 goto err_unref;
4638
4639 dev_priv->seqno_gfx_addr = obj_priv->gtt_offset;
4640 dev_priv->seqno_page = kmap(obj_priv->pages[0]);
4641 if (dev_priv->seqno_page == NULL)
4642 goto err_unpin;
4643
4644 dev_priv->seqno_obj = obj;
4645 memset(dev_priv->seqno_page, 0, PAGE_SIZE);
4646
4647 return 0;
4648
4649err_unpin:
4650 i915_gem_object_unpin(obj);
4651err_unref:
4652 drm_gem_object_unreference(obj);
4653err:
4654 return ret;
4655}
4656
4557static int 4657static int
4558i915_gem_init_hws(struct drm_device *dev) 4658i915_gem_init_hws(struct drm_device *dev)
4559{ 4659{
@@ -4571,7 +4671,8 @@ i915_gem_init_hws(struct drm_device *dev)
4571 obj = i915_gem_alloc_object(dev, 4096); 4671 obj = i915_gem_alloc_object(dev, 4096);
4572 if (obj == NULL) { 4672 if (obj == NULL) {
4573 DRM_ERROR("Failed to allocate status page\n"); 4673 DRM_ERROR("Failed to allocate status page\n");
4574 return -ENOMEM; 4674 ret = -ENOMEM;
4675 goto err;
4575 } 4676 }
4576 obj_priv = to_intel_bo(obj); 4677 obj_priv = to_intel_bo(obj);
4577 obj_priv->agp_type = AGP_USER_CACHED_MEMORY; 4678 obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
@@ -4579,7 +4680,7 @@ i915_gem_init_hws(struct drm_device *dev)
4579 ret = i915_gem_object_pin(obj, 4096); 4680 ret = i915_gem_object_pin(obj, 4096);
4580 if (ret != 0) { 4681 if (ret != 0) {
4581 drm_gem_object_unreference(obj); 4682 drm_gem_object_unreference(obj);
4582 return ret; 4683 goto err_unref;
4583 } 4684 }
4584 4685
4585 dev_priv->status_gfx_addr = obj_priv->gtt_offset; 4686 dev_priv->status_gfx_addr = obj_priv->gtt_offset;
@@ -4588,10 +4689,16 @@ i915_gem_init_hws(struct drm_device *dev)
4588 if (dev_priv->hw_status_page == NULL) { 4689 if (dev_priv->hw_status_page == NULL) {
4589 DRM_ERROR("Failed to map status page.\n"); 4690 DRM_ERROR("Failed to map status page.\n");
4590 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); 4691 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
4591 i915_gem_object_unpin(obj); 4692 ret = -EINVAL;
4592 drm_gem_object_unreference(obj); 4693 goto err_unpin;
4593 return -EINVAL;
4594 } 4694 }
4695
4696 if (HAS_PIPE_CONTROL(dev)) {
4697 ret = i915_gem_init_pipe_control(dev);
4698 if (ret)
4699 goto err_unpin;
4700 }
4701
4595 dev_priv->hws_obj = obj; 4702 dev_priv->hws_obj = obj;
4596 memset(dev_priv->hw_status_page, 0, PAGE_SIZE); 4703 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
4597 if (IS_GEN6(dev)) { 4704 if (IS_GEN6(dev)) {
@@ -4604,6 +4711,30 @@ i915_gem_init_hws(struct drm_device *dev)
4604 DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr); 4711 DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
4605 4712
4606 return 0; 4713 return 0;
4714
4715err_unpin:
4716 i915_gem_object_unpin(obj);
4717err_unref:
4718 drm_gem_object_unreference(obj);
4719err:
4720 return 0;
4721}
4722
4723static void
4724i915_gem_cleanup_pipe_control(struct drm_device *dev)
4725{
4726 drm_i915_private_t *dev_priv = dev->dev_private;
4727 struct drm_gem_object *obj;
4728 struct drm_i915_gem_object *obj_priv;
4729
4730 obj = dev_priv->seqno_obj;
4731 obj_priv = to_intel_bo(obj);
4732 kunmap(obj_priv->pages[0]);
4733 i915_gem_object_unpin(obj);
4734 drm_gem_object_unreference(obj);
4735 dev_priv->seqno_obj = NULL;
4736
4737 dev_priv->seqno_page = NULL;
4607} 4738}
4608 4739
4609static void 4740static void
@@ -4627,6 +4758,9 @@ i915_gem_cleanup_hws(struct drm_device *dev)
4627 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); 4758 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
4628 dev_priv->hw_status_page = NULL; 4759 dev_priv->hw_status_page = NULL;
4629 4760
4761 if (HAS_PIPE_CONTROL(dev))
4762 i915_gem_cleanup_pipe_control(dev);
4763
4630 /* Write high address into HWS_PGA when disabling. */ 4764 /* Write high address into HWS_PGA when disabling. */
4631 I915_WRITE(HWS_PGA, 0x1ffff000); 4765 I915_WRITE(HWS_PGA, 0x1ffff000);
4632} 4766}
@@ -4838,6 +4972,8 @@ i915_gem_load(struct drm_device *dev)
4838 INIT_LIST_HEAD(&dev_priv->mm.inactive_list); 4972 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
4839 INIT_LIST_HEAD(&dev_priv->mm.request_list); 4973 INIT_LIST_HEAD(&dev_priv->mm.request_list);
4840 INIT_LIST_HEAD(&dev_priv->mm.fence_list); 4974 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4975 for (i = 0; i < 16; i++)
4976 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
4841 INIT_DELAYED_WORK(&dev_priv->mm.retire_work, 4977 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4842 i915_gem_retire_work_handler); 4978 i915_gem_retire_work_handler);
4843 dev_priv->mm.next_gem_seqno = 1; 4979 dev_priv->mm.next_gem_seqno = 1;
@@ -5066,6 +5202,20 @@ void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
5066} 5202}
5067 5203
5068static int 5204static int
5205i915_gpu_is_active(struct drm_device *dev)
5206{
5207 drm_i915_private_t *dev_priv = dev->dev_private;
5208 int lists_empty;
5209
5210 spin_lock(&dev_priv->mm.active_list_lock);
5211 lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
5212 list_empty(&dev_priv->mm.active_list);
5213 spin_unlock(&dev_priv->mm.active_list_lock);
5214
5215 return !lists_empty;
5216}
5217
5218static int
5069i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask) 5219i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
5070{ 5220{
5071 drm_i915_private_t *dev_priv, *next_dev; 5221 drm_i915_private_t *dev_priv, *next_dev;
@@ -5094,6 +5244,7 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
5094 5244
5095 spin_lock(&shrink_list_lock); 5245 spin_lock(&shrink_list_lock);
5096 5246
5247rescan:
5097 /* first scan for clean buffers */ 5248 /* first scan for clean buffers */
5098 list_for_each_entry_safe(dev_priv, next_dev, 5249 list_for_each_entry_safe(dev_priv, next_dev,
5099 &shrink_list, mm.shrink_list) { 5250 &shrink_list, mm.shrink_list) {
@@ -5151,6 +5302,36 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
5151 would_deadlock = 0; 5302 would_deadlock = 0;
5152 } 5303 }
5153 5304
5305 if (nr_to_scan) {
5306 int active = 0;
5307
5308 /*
5309 * We are desperate for pages, so as a last resort, wait
5310 * for the GPU to finish and discard whatever we can.
5311 * This has a dramatic impact to reduce the number of
5312 * OOM-killer events whilst running the GPU aggressively.
5313 */
5314 list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
5315 struct drm_device *dev = dev_priv->dev;
5316
5317 if (!mutex_trylock(&dev->struct_mutex))
5318 continue;
5319
5320 spin_unlock(&shrink_list_lock);
5321
5322 if (i915_gpu_is_active(dev)) {
5323 i915_gpu_idle(dev);
5324 active++;
5325 }
5326
5327 spin_lock(&shrink_list_lock);
5328 mutex_unlock(&dev->struct_mutex);
5329 }
5330
5331 if (active)
5332 goto rescan;
5333 }
5334
5154 spin_unlock(&shrink_list_lock); 5335 spin_unlock(&shrink_list_lock);
5155 5336
5156 if (would_deadlock) 5337 if (would_deadlock)
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 449157f71610..4b7c49d4257d 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -202,21 +202,17 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
202 * reg, so dont bother to check the size */ 202 * reg, so dont bother to check the size */
203 if (stride / 128 > I965_FENCE_MAX_PITCH_VAL) 203 if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
204 return false; 204 return false;
205 } else if (IS_I9XX(dev)) { 205 } else if (IS_GEN3(dev) || IS_GEN2(dev)) {
206 uint32_t pitch_val = ffs(stride / tile_width) - 1; 206 if (stride > 8192)
207
208 /* XXX: For Y tiling, FENCE_MAX_PITCH_VAL is actually 6 (8KB)
209 * instead of 4 (2KB) on 945s.
210 */
211 if (pitch_val > I915_FENCE_MAX_PITCH_VAL ||
212 size > (I830_FENCE_MAX_SIZE_VAL << 20))
213 return false; 207 return false;
214 } else {
215 uint32_t pitch_val = ffs(stride / tile_width) - 1;
216 208
217 if (pitch_val > I830_FENCE_MAX_PITCH_VAL || 209 if (IS_GEN3(dev)) {
218 size > (I830_FENCE_MAX_SIZE_VAL << 19)) 210 if (size > I830_FENCE_MAX_SIZE_VAL << 20)
219 return false; 211 return false;
212 } else {
213 if (size > I830_FENCE_MAX_SIZE_VAL << 19)
214 return false;
215 }
220 } 216 }
221 217
222 /* 965+ just needs multiples of tile width */ 218 /* 965+ just needs multiples of tile width */
@@ -287,6 +283,11 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
287 return -EINVAL; 283 return -EINVAL;
288 } 284 }
289 285
286 if (obj_priv->pin_count) {
287 drm_gem_object_unreference_unlocked(obj);
288 return -EBUSY;
289 }
290
290 if (args->tiling_mode == I915_TILING_NONE) { 291 if (args->tiling_mode == I915_TILING_NONE) {
291 args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; 292 args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
292 args->stride = 0; 293 args->stride = 0;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index b034ea36731c..26792af7e1a1 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -353,7 +353,7 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
353 READ_BREADCRUMB(dev_priv); 353 READ_BREADCRUMB(dev_priv);
354 } 354 }
355 355
356 if (gt_iir & GT_USER_INTERRUPT) { 356 if (gt_iir & GT_PIPE_NOTIFY) {
357 u32 seqno = i915_get_gem_seqno(dev); 357 u32 seqno = i915_get_gem_seqno(dev);
358 dev_priv->mm.irq_gem_seqno = seqno; 358 dev_priv->mm.irq_gem_seqno = seqno;
359 trace_i915_gem_request_complete(dev, seqno); 359 trace_i915_gem_request_complete(dev, seqno);
@@ -1010,7 +1010,7 @@ void i915_user_irq_get(struct drm_device *dev)
1010 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 1010 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
1011 if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) { 1011 if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) {
1012 if (HAS_PCH_SPLIT(dev)) 1012 if (HAS_PCH_SPLIT(dev))
1013 ironlake_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT); 1013 ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
1014 else 1014 else
1015 i915_enable_irq(dev_priv, I915_USER_INTERRUPT); 1015 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
1016 } 1016 }
@@ -1026,7 +1026,7 @@ void i915_user_irq_put(struct drm_device *dev)
1026 BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0); 1026 BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
1027 if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) { 1027 if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
1028 if (HAS_PCH_SPLIT(dev)) 1028 if (HAS_PCH_SPLIT(dev))
1029 ironlake_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT); 1029 ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
1030 else 1030 else
1031 i915_disable_irq(dev_priv, I915_USER_INTERRUPT); 1031 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
1032 } 1032 }
@@ -1310,7 +1310,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
1310 /* enable kind of interrupts always enabled */ 1310 /* enable kind of interrupts always enabled */
1311 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 1311 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
1312 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; 1312 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
1313 u32 render_mask = GT_USER_INTERRUPT; 1313 u32 render_mask = GT_PIPE_NOTIFY;
1314 u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | 1314 u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
1315 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; 1315 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
1316 1316
diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c
index 7cc8410239cb..8fcc75c1aa28 100644
--- a/drivers/gpu/drm/i915/i915_opregion.c
+++ b/drivers/gpu/drm/i915/i915_opregion.c
@@ -382,8 +382,57 @@ static void intel_didl_outputs(struct drm_device *dev)
382 struct drm_i915_private *dev_priv = dev->dev_private; 382 struct drm_i915_private *dev_priv = dev->dev_private;
383 struct intel_opregion *opregion = &dev_priv->opregion; 383 struct intel_opregion *opregion = &dev_priv->opregion;
384 struct drm_connector *connector; 384 struct drm_connector *connector;
385 acpi_handle handle;
386 struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL;
387 unsigned long long device_id;
388 acpi_status status;
385 int i = 0; 389 int i = 0;
386 390
391 handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
392 if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev)))
393 return;
394
395 if (acpi_is_video_device(acpi_dev))
396 acpi_video_bus = acpi_dev;
397 else {
398 list_for_each_entry(acpi_cdev, &acpi_dev->children, node) {
399 if (acpi_is_video_device(acpi_cdev)) {
400 acpi_video_bus = acpi_cdev;
401 break;
402 }
403 }
404 }
405
406 if (!acpi_video_bus) {
407 printk(KERN_WARNING "No ACPI video bus found\n");
408 return;
409 }
410
411 list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) {
412 if (i >= 8) {
413 dev_printk (KERN_ERR, &dev->pdev->dev,
414 "More than 8 outputs detected\n");
415 return;
416 }
417 status =
418 acpi_evaluate_integer(acpi_cdev->handle, "_ADR",
419 NULL, &device_id);
420 if (ACPI_SUCCESS(status)) {
421 if (!device_id)
422 goto blind_set;
423 opregion->acpi->didl[i] = (u32)(device_id & 0x0f0f);
424 i++;
425 }
426 }
427
428end:
429 /* If fewer than 8 outputs, the list must be null terminated */
430 if (i < 8)
431 opregion->acpi->didl[i] = 0;
432 return;
433
434blind_set:
435 i = 0;
387 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 436 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
388 int output_type = ACPI_OTHER_OUTPUT; 437 int output_type = ACPI_OTHER_OUTPUT;
389 if (i >= 8) { 438 if (i >= 8) {
@@ -416,10 +465,7 @@ static void intel_didl_outputs(struct drm_device *dev)
416 opregion->acpi->didl[i] |= (1<<31) | output_type | i; 465 opregion->acpi->didl[i] |= (1<<31) | output_type | i;
417 i++; 466 i++;
418 } 467 }
419 468 goto end;
420 /* If fewer than 8 outputs, the list must be null terminated */
421 if (i < 8)
422 opregion->acpi->didl[i] = 0;
423} 469}
424 470
425int intel_opregion_init(struct drm_device *dev, int resume) 471int intel_opregion_init(struct drm_device *dev, int resume)
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 527d30aecda2..f3e39cc46f0d 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -230,6 +230,16 @@
230#define ASYNC_FLIP (1<<22) 230#define ASYNC_FLIP (1<<22)
231#define DISPLAY_PLANE_A (0<<20) 231#define DISPLAY_PLANE_A (0<<20)
232#define DISPLAY_PLANE_B (1<<20) 232#define DISPLAY_PLANE_B (1<<20)
233#define GFX_OP_PIPE_CONTROL ((0x3<<29)|(0x3<<27)|(0x2<<24)|2)
234#define PIPE_CONTROL_QW_WRITE (1<<14)
235#define PIPE_CONTROL_DEPTH_STALL (1<<13)
236#define PIPE_CONTROL_WC_FLUSH (1<<12)
237#define PIPE_CONTROL_IS_FLUSH (1<<11) /* MBZ on Ironlake */
238#define PIPE_CONTROL_TC_FLUSH (1<<10) /* GM45+ only */
239#define PIPE_CONTROL_ISP_DIS (1<<9)
240#define PIPE_CONTROL_NOTIFY (1<<8)
241#define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */
242#define PIPE_CONTROL_STALL_EN (1<<1) /* in addr word, Ironlake+ only */
233 243
234/* 244/*
235 * Fence registers 245 * Fence registers
@@ -241,7 +251,7 @@
241#define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8) 251#define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8)
242#define I830_FENCE_PITCH_SHIFT 4 252#define I830_FENCE_PITCH_SHIFT 4
243#define I830_FENCE_REG_VALID (1<<0) 253#define I830_FENCE_REG_VALID (1<<0)
244#define I915_FENCE_MAX_PITCH_VAL 0x10 254#define I915_FENCE_MAX_PITCH_VAL 4
245#define I830_FENCE_MAX_PITCH_VAL 6 255#define I830_FENCE_MAX_PITCH_VAL 6
246#define I830_FENCE_MAX_SIZE_VAL (1<<8) 256#define I830_FENCE_MAX_SIZE_VAL (1<<8)
247 257
@@ -1922,7 +1932,10 @@
1922/* Display & cursor control */ 1932/* Display & cursor control */
1923 1933
1924/* dithering flag on Ironlake */ 1934/* dithering flag on Ironlake */
1925#define PIPE_ENABLE_DITHER (1 << 4) 1935#define PIPE_ENABLE_DITHER (1 << 4)
1936#define PIPE_DITHER_TYPE_MASK (3 << 2)
1937#define PIPE_DITHER_TYPE_SPATIAL (0 << 2)
1938#define PIPE_DITHER_TYPE_ST01 (1 << 2)
1926/* Pipe A */ 1939/* Pipe A */
1927#define PIPEADSL 0x70000 1940#define PIPEADSL 0x70000
1928#define PIPEACONF 0x70008 1941#define PIPEACONF 0x70008
@@ -2339,6 +2352,7 @@
2339#define DEIER 0x4400c 2352#define DEIER 0x4400c
2340 2353
2341/* GT interrupt */ 2354/* GT interrupt */
2355#define GT_PIPE_NOTIFY (1 << 4)
2342#define GT_SYNC_STATUS (1 << 2) 2356#define GT_SYNC_STATUS (1 << 2)
2343#define GT_USER_INTERRUPT (1 << 0) 2357#define GT_USER_INTERRUPT (1 << 0)
2344 2358
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 303815321c79..9e4c45f68d6e 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -276,5 +276,5 @@ DEFINE_EVENT(i915_ring, i915_ring_wait_end,
276 276
277/* This part must be outside protection */ 277/* This part must be outside protection */
278#undef TRACE_INCLUDE_PATH 278#undef TRACE_INCLUDE_PATH
279#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/i915 279#define TRACE_INCLUDE_PATH .
280#include <trace/define_trace.h> 280#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index f9ba452f0cbf..4c748d8f73d6 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -366,6 +366,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
366 p_mapping->dvo_port = p_child->dvo_port; 366 p_mapping->dvo_port = p_child->dvo_port;
367 p_mapping->slave_addr = p_child->slave_addr; 367 p_mapping->slave_addr = p_child->slave_addr;
368 p_mapping->dvo_wiring = p_child->dvo_wiring; 368 p_mapping->dvo_wiring = p_child->dvo_wiring;
369 p_mapping->ddc_pin = p_child->ddc_pin;
369 p_mapping->initialized = 1; 370 p_mapping->initialized = 1;
370 } else { 371 } else {
371 DRM_DEBUG_KMS("Maybe one SDVO port is shared by " 372 DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 4d739a1b13ca..8c668e3122a5 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1048,9 +1048,8 @@ void i8xx_disable_fbc(struct drm_device *dev)
1048 DRM_DEBUG_KMS("disabled FBC\n"); 1048 DRM_DEBUG_KMS("disabled FBC\n");
1049} 1049}
1050 1050
1051static bool i8xx_fbc_enabled(struct drm_crtc *crtc) 1051static bool i8xx_fbc_enabled(struct drm_device *dev)
1052{ 1052{
1053 struct drm_device *dev = crtc->dev;
1054 struct drm_i915_private *dev_priv = dev->dev_private; 1053 struct drm_i915_private *dev_priv = dev->dev_private;
1055 1054
1056 return I915_READ(FBC_CONTROL) & FBC_CTL_EN; 1055 return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
@@ -1107,14 +1106,43 @@ void g4x_disable_fbc(struct drm_device *dev)
1107 DRM_DEBUG_KMS("disabled FBC\n"); 1106 DRM_DEBUG_KMS("disabled FBC\n");
1108} 1107}
1109 1108
1110static bool g4x_fbc_enabled(struct drm_crtc *crtc) 1109static bool g4x_fbc_enabled(struct drm_device *dev)
1111{ 1110{
1112 struct drm_device *dev = crtc->dev;
1113 struct drm_i915_private *dev_priv = dev->dev_private; 1111 struct drm_i915_private *dev_priv = dev->dev_private;
1114 1112
1115 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; 1113 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
1116} 1114}
1117 1115
1116bool intel_fbc_enabled(struct drm_device *dev)
1117{
1118 struct drm_i915_private *dev_priv = dev->dev_private;
1119
1120 if (!dev_priv->display.fbc_enabled)
1121 return false;
1122
1123 return dev_priv->display.fbc_enabled(dev);
1124}
1125
1126void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1127{
1128 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
1129
1130 if (!dev_priv->display.enable_fbc)
1131 return;
1132
1133 dev_priv->display.enable_fbc(crtc, interval);
1134}
1135
1136void intel_disable_fbc(struct drm_device *dev)
1137{
1138 struct drm_i915_private *dev_priv = dev->dev_private;
1139
1140 if (!dev_priv->display.disable_fbc)
1141 return;
1142
1143 dev_priv->display.disable_fbc(dev);
1144}
1145
1118/** 1146/**
1119 * intel_update_fbc - enable/disable FBC as needed 1147 * intel_update_fbc - enable/disable FBC as needed
1120 * @crtc: CRTC to point the compressor at 1148 * @crtc: CRTC to point the compressor at
@@ -1149,9 +1177,7 @@ static void intel_update_fbc(struct drm_crtc *crtc,
1149 if (!i915_powersave) 1177 if (!i915_powersave)
1150 return; 1178 return;
1151 1179
1152 if (!dev_priv->display.fbc_enabled || 1180 if (!I915_HAS_FBC(dev))
1153 !dev_priv->display.enable_fbc ||
1154 !dev_priv->display.disable_fbc)
1155 return; 1181 return;
1156 1182
1157 if (!crtc->fb) 1183 if (!crtc->fb)
@@ -1198,28 +1224,25 @@ static void intel_update_fbc(struct drm_crtc *crtc,
1198 goto out_disable; 1224 goto out_disable;
1199 } 1225 }
1200 1226
1201 if (dev_priv->display.fbc_enabled(crtc)) { 1227 if (intel_fbc_enabled(dev)) {
1202 /* We can re-enable it in this case, but need to update pitch */ 1228 /* We can re-enable it in this case, but need to update pitch */
1203 if (fb->pitch > dev_priv->cfb_pitch) 1229 if ((fb->pitch > dev_priv->cfb_pitch) ||
1204 dev_priv->display.disable_fbc(dev); 1230 (obj_priv->fence_reg != dev_priv->cfb_fence) ||
1205 if (obj_priv->fence_reg != dev_priv->cfb_fence) 1231 (plane != dev_priv->cfb_plane))
1206 dev_priv->display.disable_fbc(dev); 1232 intel_disable_fbc(dev);
1207 if (plane != dev_priv->cfb_plane)
1208 dev_priv->display.disable_fbc(dev);
1209 } 1233 }
1210 1234
1211 if (!dev_priv->display.fbc_enabled(crtc)) { 1235 /* Now try to turn it back on if possible */
1212 /* Now try to turn it back on if possible */ 1236 if (!intel_fbc_enabled(dev))
1213 dev_priv->display.enable_fbc(crtc, 500); 1237 intel_enable_fbc(crtc, 500);
1214 }
1215 1238
1216 return; 1239 return;
1217 1240
1218out_disable: 1241out_disable:
1219 DRM_DEBUG_KMS("unsupported config, disabling FBC\n"); 1242 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
1220 /* Multiple disables should be harmless */ 1243 /* Multiple disables should be harmless */
1221 if (dev_priv->display.fbc_enabled(crtc)) 1244 if (intel_fbc_enabled(dev))
1222 dev_priv->display.disable_fbc(dev); 1245 intel_disable_fbc(dev);
1223} 1246}
1224 1247
1225static int 1248static int
@@ -3677,14 +3700,16 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3677 /* set the dithering flag */ 3700 /* set the dithering flag */
3678 if (IS_I965G(dev)) { 3701 if (IS_I965G(dev)) {
3679 if (dev_priv->lvds_dither) { 3702 if (dev_priv->lvds_dither) {
3680 if (HAS_PCH_SPLIT(dev)) 3703 if (HAS_PCH_SPLIT(dev)) {
3681 pipeconf |= PIPE_ENABLE_DITHER; 3704 pipeconf |= PIPE_ENABLE_DITHER;
3682 else 3705 pipeconf |= PIPE_DITHER_TYPE_ST01;
3706 } else
3683 lvds |= LVDS_ENABLE_DITHER; 3707 lvds |= LVDS_ENABLE_DITHER;
3684 } else { 3708 } else {
3685 if (HAS_PCH_SPLIT(dev)) 3709 if (HAS_PCH_SPLIT(dev)) {
3686 pipeconf &= ~PIPE_ENABLE_DITHER; 3710 pipeconf &= ~PIPE_ENABLE_DITHER;
3687 else 3711 pipeconf &= ~PIPE_DITHER_TYPE_MASK;
3712 } else
3688 lvds &= ~LVDS_ENABLE_DITHER; 3713 lvds &= ~LVDS_ENABLE_DITHER;
3689 } 3714 }
3690 } 3715 }
@@ -5202,8 +5227,7 @@ static void intel_init_display(struct drm_device *dev)
5202 else 5227 else
5203 dev_priv->display.dpms = i9xx_crtc_dpms; 5228 dev_priv->display.dpms = i9xx_crtc_dpms;
5204 5229
5205 /* Only mobile has FBC, leave pointers NULL for other chips */ 5230 if (I915_HAS_FBC(dev)) {
5206 if (IS_MOBILE(dev)) {
5207 if (IS_GM45(dev)) { 5231 if (IS_GM45(dev)) {
5208 dev_priv->display.fbc_enabled = g4x_fbc_enabled; 5232 dev_priv->display.fbc_enabled = g4x_fbc_enabled;
5209 dev_priv->display.enable_fbc = g4x_enable_fbc; 5233 dev_priv->display.enable_fbc = g4x_enable_fbc;
@@ -5268,17 +5292,18 @@ static void intel_init_display(struct drm_device *dev)
5268 dev_priv->display.update_wm = g4x_update_wm; 5292 dev_priv->display.update_wm = g4x_update_wm;
5269 else if (IS_I965G(dev)) 5293 else if (IS_I965G(dev))
5270 dev_priv->display.update_wm = i965_update_wm; 5294 dev_priv->display.update_wm = i965_update_wm;
5271 else if (IS_I9XX(dev) || IS_MOBILE(dev)) { 5295 else if (IS_I9XX(dev)) {
5272 dev_priv->display.update_wm = i9xx_update_wm; 5296 dev_priv->display.update_wm = i9xx_update_wm;
5273 dev_priv->display.get_fifo_size = i9xx_get_fifo_size; 5297 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
5298 } else if (IS_I85X(dev)) {
5299 dev_priv->display.update_wm = i9xx_update_wm;
5300 dev_priv->display.get_fifo_size = i85x_get_fifo_size;
5274 } else { 5301 } else {
5275 if (IS_I85X(dev)) 5302 dev_priv->display.update_wm = i830_update_wm;
5276 dev_priv->display.get_fifo_size = i85x_get_fifo_size; 5303 if (IS_845G(dev))
5277 else if (IS_845G(dev))
5278 dev_priv->display.get_fifo_size = i845_get_fifo_size; 5304 dev_priv->display.get_fifo_size = i845_get_fifo_size;
5279 else 5305 else
5280 dev_priv->display.get_fifo_size = i830_get_fifo_size; 5306 dev_priv->display.get_fifo_size = i830_get_fifo_size;
5281 dev_priv->display.update_wm = i830_update_wm;
5282 } 5307 }
5283} 5308}
5284 5309
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index ca372abc36cd..aba72c489a2f 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1504,16 +1504,17 @@ intel_analog_is_connected(struct drm_device *dev)
1504} 1504}
1505 1505
1506enum drm_connector_status 1506enum drm_connector_status
1507intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response) 1507intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
1508{ 1508{
1509 struct drm_encoder *encoder = intel_attached_encoder(connector); 1509 struct drm_encoder *encoder = intel_attached_encoder(connector);
1510 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); 1510 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1511 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv; 1511 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1512 struct intel_connector *intel_connector = to_intel_connector(connector);
1513 struct intel_sdvo_connector *sdvo_connector = intel_connector->dev_priv;
1512 enum drm_connector_status status = connector_status_connected; 1514 enum drm_connector_status status = connector_status_connected;
1513 struct edid *edid = NULL; 1515 struct edid *edid = NULL;
1514 1516
1515 edid = drm_get_edid(connector, 1517 edid = drm_get_edid(connector, intel_encoder->ddc_bus);
1516 intel_encoder->ddc_bus);
1517 1518
1518 /* This is only applied to SDVO cards with multiple outputs */ 1519 /* This is only applied to SDVO cards with multiple outputs */
1519 if (edid == NULL && intel_sdvo_multifunc_encoder(intel_encoder)) { 1520 if (edid == NULL && intel_sdvo_multifunc_encoder(intel_encoder)) {
@@ -1526,8 +1527,7 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
1526 */ 1527 */
1527 while(temp_ddc > 1) { 1528 while(temp_ddc > 1) {
1528 sdvo_priv->ddc_bus = temp_ddc; 1529 sdvo_priv->ddc_bus = temp_ddc;
1529 edid = drm_get_edid(connector, 1530 edid = drm_get_edid(connector, intel_encoder->ddc_bus);
1530 intel_encoder->ddc_bus);
1531 if (edid) { 1531 if (edid) {
1532 /* 1532 /*
1533 * When we can get the EDID, maybe it is the 1533 * When we can get the EDID, maybe it is the
@@ -1544,28 +1544,25 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
1544 /* when there is no edid and no monitor is connected with VGA 1544 /* when there is no edid and no monitor is connected with VGA
1545 * port, try to use the CRT ddc to read the EDID for DVI-connector 1545 * port, try to use the CRT ddc to read the EDID for DVI-connector
1546 */ 1546 */
1547 if (edid == NULL && 1547 if (edid == NULL && sdvo_priv->analog_ddc_bus &&
1548 sdvo_priv->analog_ddc_bus &&
1549 !intel_analog_is_connected(connector->dev)) 1548 !intel_analog_is_connected(connector->dev))
1550 edid = drm_get_edid(connector, 1549 edid = drm_get_edid(connector, sdvo_priv->analog_ddc_bus);
1551 sdvo_priv->analog_ddc_bus); 1550
1552 if (edid != NULL) { 1551 if (edid != NULL) {
1553 /* Don't report the output as connected if it's a DVI-I 1552 bool is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
1554 * connector with a non-digital EDID coming out. 1553 bool need_digital = !!(sdvo_connector->output_flag & SDVO_TMDS_MASK);
1555 */
1556 if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) {
1557 if (edid->input & DRM_EDID_INPUT_DIGITAL)
1558 sdvo_priv->is_hdmi =
1559 drm_detect_hdmi_monitor(edid);
1560 else
1561 status = connector_status_disconnected;
1562 }
1563 1554
1564 kfree(edid); 1555 /* DDC bus is shared, match EDID to connector type */
1565 connector->display_info.raw_edid = NULL; 1556 if (is_digital && need_digital)
1557 sdvo_priv->is_hdmi = drm_detect_hdmi_monitor(edid);
1558 else if (is_digital != need_digital)
1559 status = connector_status_disconnected;
1566 1560
1567 } else if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) 1561 connector->display_info.raw_edid = NULL;
1562 } else
1568 status = connector_status_disconnected; 1563 status = connector_status_disconnected;
1564
1565 kfree(edid);
1569 1566
1570 return status; 1567 return status;
1571} 1568}
@@ -1601,8 +1598,8 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
1601 1598
1602 if ((sdvo_connector->output_flag & response) == 0) 1599 if ((sdvo_connector->output_flag & response) == 0)
1603 ret = connector_status_disconnected; 1600 ret = connector_status_disconnected;
1604 else if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) 1601 else if (response & SDVO_TMDS_MASK)
1605 ret = intel_sdvo_hdmi_sink_detect(connector, response); 1602 ret = intel_sdvo_hdmi_sink_detect(connector);
1606 else 1603 else
1607 ret = connector_status_connected; 1604 ret = connector_status_connected;
1608 1605
@@ -2054,40 +2051,17 @@ static const struct drm_encoder_funcs intel_sdvo_enc_funcs = {
2054 * outputs, then LVDS outputs. 2051 * outputs, then LVDS outputs.
2055 */ 2052 */
2056static void 2053static void
2057intel_sdvo_select_ddc_bus(struct intel_sdvo_priv *dev_priv) 2054intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
2055 struct intel_sdvo_priv *sdvo, u32 reg)
2058{ 2056{
2059 uint16_t mask = 0; 2057 struct sdvo_device_mapping *mapping;
2060 unsigned int num_bits;
2061 2058
2062 /* Make a mask of outputs less than or equal to our own priority in the 2059 if (IS_SDVOB(reg))
2063 * list. 2060 mapping = &(dev_priv->sdvo_mappings[0]);
2064 */ 2061 else
2065 switch (dev_priv->controlled_output) { 2062 mapping = &(dev_priv->sdvo_mappings[1]);
2066 case SDVO_OUTPUT_LVDS1:
2067 mask |= SDVO_OUTPUT_LVDS1;
2068 case SDVO_OUTPUT_LVDS0:
2069 mask |= SDVO_OUTPUT_LVDS0;
2070 case SDVO_OUTPUT_TMDS1:
2071 mask |= SDVO_OUTPUT_TMDS1;
2072 case SDVO_OUTPUT_TMDS0:
2073 mask |= SDVO_OUTPUT_TMDS0;
2074 case SDVO_OUTPUT_RGB1:
2075 mask |= SDVO_OUTPUT_RGB1;
2076 case SDVO_OUTPUT_RGB0:
2077 mask |= SDVO_OUTPUT_RGB0;
2078 break;
2079 }
2080
2081 /* Count bits to find what number we are in the priority list. */
2082 mask &= dev_priv->caps.output_flags;
2083 num_bits = hweight16(mask);
2084 if (num_bits > 3) {
2085 /* if more than 3 outputs, default to DDC bus 3 for now */
2086 num_bits = 3;
2087 }
2088 2063
2089 /* Corresponds to SDVO_CONTROL_BUS_DDCx */ 2064 sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4);
2090 dev_priv->ddc_bus = 1 << num_bits;
2091} 2065}
2092 2066
2093static bool 2067static bool
@@ -2866,7 +2840,7 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
2866 goto err_i2c; 2840 goto err_i2c;
2867 } 2841 }
2868 2842
2869 intel_sdvo_select_ddc_bus(sdvo_priv); 2843 intel_sdvo_select_ddc_bus(dev_priv, sdvo_priv, sdvo_reg);
2870 2844
2871 /* Set the input timing to the screen. Assume always input 0. */ 2845 /* Set the input timing to the screen. Assume always input 0. */
2872 intel_sdvo_set_target_input(intel_encoder, true, false); 2846 intel_sdvo_set_target_input(intel_encoder, true, false);