aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2012-04-12 05:27:01 -0400
committerDave Airlie <airlied@redhat.com>2012-04-12 05:27:01 -0400
commiteffbc4fd8e37e41d6f2bb6bcc611c14b4fbdcf9b (patch)
tree8bc2a6a2116f1031b0033bf1a8f9fbe92201c5c1 /drivers
parent6a7068b4ef17dfb9de3191321f1adc91fa1659ca (diff)
parentec34a01de31128e5c08e5f05c47f4a787f45a33c (diff)
Merge branch 'drm-intel-next' of git://people.freedesktop.org/~danvet/drm-intel into drm-core-next
Daniel Vetter wrote First pull request for 3.5-next, slightly large than usual because new things kept coming in since the last pull for 3.4. Highlights: - first batch of hw enablement for vlv (Jesse et al) and hsw (Eugeni). pci ids are not yet added, and there's still quite a few patches to merge (mostly modesetting). To make QA easier I've decided to merge this stuff in pieces. - loads of cleanups and prep patches spurred by the above. Especially vlv is a real frankenstein chip, but also hsw is stretching our driver's code design. Expect more to come in this area for 3.5. - more gmbus fixes, cleanups and improvements by Daniel Kurtz. Again, there are more patches needed (and some already queued up), but I wanted to split this a bit for better testing. - pwrite/pread rework and retuning. This series has been in the works for a few months already and a lot of i-g-t tests have been created for it. Now it's finally ready to be merged. Note that one patch in this series touches include/pagemap.h, that patch is acked-by akpm. - reduce mappable pressure and relocation throughput improvements from Chris. - mmap offset exhaustion mitigation by Chris Wilson. - a start at figuring out which codepaths in our messy dri1/ums+gem/kms driver we actually need to support by bailing out of unsupported case. The driver now refuses to load without kms on gen6+ and disallows a few ioctls that userspace never used in certain cases. More of this will definitely come. - More decoupling of global gtt and ppgtt. - Improved dual-link lvds detection by Takashi Iwai. - Shut up the compiler + plus fix the fallout (Ben) - Inverted panel brightness handling (mostly Acer manages to break things in this way). - Small fixlets and adjustements and some minor things to help debugging. Regression-wise QA reported quite a few issues on ivb, but all of them turned out to be hw stability issues which are already fixed in drm-intel-fixes (QA runs the nightly regression tests on -next alone, without -fixes automatically merged in). There's still one issue open on snb, it looks like occlusion query writes are not quite as cache coherent as we've expected. With some of the pwrite adjustements we can now reliably hit this. Kernel workaround for it is in the works." * 'drm-intel-next' of git://people.freedesktop.org/~danvet/drm-intel: (101 commits) drm/i915: VCS is not the last ring drm/i915: Add a dual link lvds quirk for MacBook Pro 8,2 drm/i915: make quirks more verbose drm/i915: dump the DMA fetch addr register on pre-gen6 drm/i915/sdvo: Include YRPB as an additional TV output type drm/i915: disallow gem init ioctl on ilk drm/i915: refuse to load on gen6+ without kms drm/i915: extract gt interrupt handler drm/i915: use render gen to switch ring irq functions drm/i915: rip out old HWSTAM missed irq WA for vlv drm/i915: open code gen6+ ring irqs drm/i915: ring irq cleanups drm/i915: add SFUSE_STRAP registers for digital port detection drm/i915: add WM_LINETIME registers drm/i915: add WRPLL clocks drm/i915: add LCPLL control registers drm/i915: add SSC offsets for SBI access drm/i915: add port clock selection support for HSW drm/i915: add S PLL control drm/i915: add PIXCLK_GATE register ... Conflicts: drivers/char/agp/intel-agp.h drivers/char/agp/intel-gtt.c drivers/gpu/drm/i915/i915_debugfs.c
Diffstat (limited to 'drivers')
-rw-r--r--drivers/char/agp/intel-agp.c1
-rw-r--r--drivers/char/agp/intel-agp.h14
-rw-r--r--drivers/char/agp/intel-gtt.c45
-rw-r--r--drivers/gpu/drm/drm_cache.c23
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c91
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c85
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c88
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h74
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c824
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c94
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c69
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c450
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h374
-rw-r--r--drivers/gpu/drm/i915/intel_acpi.c2
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c45
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c16
-rw-r--r--drivers/gpu/drm/i915/intel_display.c784
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h17
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c6
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c46
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c295
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c11
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c3
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c4
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c28
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c92
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h1
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c51
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c9
29 files changed, 2508 insertions, 1134 deletions
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 962e75dc4781..74c2d9274c53 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -907,6 +907,7 @@ static struct pci_device_id agp_intel_pci_table[] = {
907 ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_HB), 907 ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_HB),
908 ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_HB), 908 ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_HB),
909 ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB), 909 ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB),
910 ID(PCI_DEVICE_ID_INTEL_VALLEYVIEW_HB),
910 { } 911 { }
911}; 912};
912 913
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index 7ea18a5fe71c..c0091753a0d1 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -96,6 +96,7 @@
96#define G4x_GMCH_SIZE_VT_2M (G4x_GMCH_SIZE_2M | G4x_GMCH_SIZE_VT_EN) 96#define G4x_GMCH_SIZE_VT_2M (G4x_GMCH_SIZE_2M | G4x_GMCH_SIZE_VT_EN)
97 97
98#define GFX_FLSH_CNTL 0x2170 /* 915+ */ 98#define GFX_FLSH_CNTL 0x2170 /* 915+ */
99#define GFX_FLSH_CNTL_VLV 0x101008
99 100
100#define I810_DRAM_CTL 0x3000 101#define I810_DRAM_CTL 0x3000
101#define I810_DRAM_ROW_0 0x00000001 102#define I810_DRAM_ROW_0 0x00000001
@@ -235,6 +236,19 @@
235#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB 0x0158 /* Server */ 236#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB 0x0158 /* Server */
236#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG 0x015A 237#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG 0x015A
237#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG 0x016A 238#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG 0x016A
239#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_HB 0x0F00 /* VLV1 */
240#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG 0x0F30
241#define PCI_DEVICE_ID_INTEL_HASWELL_HB 0x0400 /* Desktop */
242#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG 0x0402
243#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG 0x0412
244#define PCI_DEVICE_ID_INTEL_HASWELL_M_HB 0x0404 /* Mobile */
245#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG 0x0406
246#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG 0x0416
247#define PCI_DEVICE_ID_INTEL_HASWELL_S_HB 0x0408 /* Server */
248#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG 0x040a
249#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG 0x041a
250#define PCI_DEVICE_ID_INTEL_HASWELL_SDV 0x0c16 /* SDV */
251#define PCI_DEVICE_ID_INTEL_HASWELL_E_HB 0x0c04
238 252
239int intel_gmch_probe(struct pci_dev *pdev, 253int intel_gmch_probe(struct pci_dev *pdev,
240 struct agp_bridge_data *bridge); 254 struct agp_bridge_data *bridge);
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 7f025fb620de..1237e7575c3f 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -1179,6 +1179,20 @@ static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
1179 writel(addr | pte_flags, intel_private.gtt + entry); 1179 writel(addr | pte_flags, intel_private.gtt + entry);
1180} 1180}
1181 1181
1182static void valleyview_write_entry(dma_addr_t addr, unsigned int entry,
1183 unsigned int flags)
1184{
1185 u32 pte_flags;
1186
1187 pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
1188
1189 /* gen6 has bit11-4 for physical addr bit39-32 */
1190 addr |= (addr >> 28) & 0xff0;
1191 writel(addr | pte_flags, intel_private.gtt + entry);
1192
1193 writel(1, intel_private.registers + GFX_FLSH_CNTL_VLV);
1194}
1195
1182static void gen6_cleanup(void) 1196static void gen6_cleanup(void)
1183{ 1197{
1184} 1198}
@@ -1205,12 +1219,16 @@ static inline int needs_idle_maps(void)
1205static int i9xx_setup(void) 1219static int i9xx_setup(void)
1206{ 1220{
1207 u32 reg_addr; 1221 u32 reg_addr;
1222 int size = KB(512);
1208 1223
1209 pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &reg_addr); 1224 pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &reg_addr);
1210 1225
1211 reg_addr &= 0xfff80000; 1226 reg_addr &= 0xfff80000;
1212 1227
1213 intel_private.registers = ioremap(reg_addr, 128 * 4096); 1228 if (INTEL_GTT_GEN >= 7)
1229 size = MB(2);
1230
1231 intel_private.registers = ioremap(reg_addr, size);
1214 if (!intel_private.registers) 1232 if (!intel_private.registers)
1215 return -ENOMEM; 1233 return -ENOMEM;
1216 1234
@@ -1354,6 +1372,15 @@ static const struct intel_gtt_driver sandybridge_gtt_driver = {
1354 .check_flags = gen6_check_flags, 1372 .check_flags = gen6_check_flags,
1355 .chipset_flush = i9xx_chipset_flush, 1373 .chipset_flush = i9xx_chipset_flush,
1356}; 1374};
1375static const struct intel_gtt_driver valleyview_gtt_driver = {
1376 .gen = 7,
1377 .setup = i9xx_setup,
1378 .cleanup = gen6_cleanup,
1379 .write_entry = valleyview_write_entry,
1380 .dma_mask_size = 40,
1381 .check_flags = gen6_check_flags,
1382 .chipset_flush = i9xx_chipset_flush,
1383};
1357 1384
1358/* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of 1385/* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of
1359 * driver and gmch_driver must be non-null, and find_gmch will determine 1386 * driver and gmch_driver must be non-null, and find_gmch will determine
@@ -1460,6 +1487,22 @@ static const struct intel_gtt_driver_description {
1460 "Ivybridge", &sandybridge_gtt_driver }, 1487 "Ivybridge", &sandybridge_gtt_driver },
1461 { PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG, 1488 { PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG,
1462 "Ivybridge", &sandybridge_gtt_driver }, 1489 "Ivybridge", &sandybridge_gtt_driver },
1490 { PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG,
1491 "ValleyView", &valleyview_gtt_driver },
1492 { PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG,
1493 "Haswell", &sandybridge_gtt_driver },
1494 { PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG,
1495 "Haswell", &sandybridge_gtt_driver },
1496 { PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG,
1497 "Haswell", &sandybridge_gtt_driver },
1498 { PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG,
1499 "Haswell", &sandybridge_gtt_driver },
1500 { PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG,
1501 "Haswell", &sandybridge_gtt_driver },
1502 { PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG,
1503 "Haswell", &sandybridge_gtt_driver },
1504 { PCI_DEVICE_ID_INTEL_HASWELL_SDV,
1505 "Haswell", &sandybridge_gtt_driver },
1463 { 0, NULL, NULL } 1506 { 0, NULL, NULL }
1464}; 1507};
1465 1508
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index 4b8653b932f9..08758e061478 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -98,3 +98,26 @@ drm_clflush_pages(struct page *pages[], unsigned long num_pages)
98#endif 98#endif
99} 99}
100EXPORT_SYMBOL(drm_clflush_pages); 100EXPORT_SYMBOL(drm_clflush_pages);
101
102void
103drm_clflush_virt_range(char *addr, unsigned long length)
104{
105#if defined(CONFIG_X86)
106 if (cpu_has_clflush) {
107 char *end = addr + length;
108 mb();
109 for (; addr < end; addr += boot_cpu_data.x86_clflush_size)
110 clflush(addr);
111 clflush(end - 1);
112 mb();
113 return;
114 }
115
116 if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0)
117 printk(KERN_ERR "Timed out waiting for cache flush.\n");
118#else
119 printk(KERN_ERR "Architecture has no drm_cache.c support\n");
120 WARN_ON_ONCE(1);
121#endif
122}
123EXPORT_SYMBOL(drm_clflush_virt_range);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index b505b70dba05..967fb928c577 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -468,7 +468,45 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
468 if (ret) 468 if (ret)
469 return ret; 469 return ret;
470 470
471 if (!HAS_PCH_SPLIT(dev)) { 471 if (IS_VALLEYVIEW(dev)) {
472 seq_printf(m, "Display IER:\t%08x\n",
473 I915_READ(VLV_IER));
474 seq_printf(m, "Display IIR:\t%08x\n",
475 I915_READ(VLV_IIR));
476 seq_printf(m, "Display IIR_RW:\t%08x\n",
477 I915_READ(VLV_IIR_RW));
478 seq_printf(m, "Display IMR:\t%08x\n",
479 I915_READ(VLV_IMR));
480 for_each_pipe(pipe)
481 seq_printf(m, "Pipe %c stat:\t%08x\n",
482 pipe_name(pipe),
483 I915_READ(PIPESTAT(pipe)));
484
485 seq_printf(m, "Master IER:\t%08x\n",
486 I915_READ(VLV_MASTER_IER));
487
488 seq_printf(m, "Render IER:\t%08x\n",
489 I915_READ(GTIER));
490 seq_printf(m, "Render IIR:\t%08x\n",
491 I915_READ(GTIIR));
492 seq_printf(m, "Render IMR:\t%08x\n",
493 I915_READ(GTIMR));
494
495 seq_printf(m, "PM IER:\t\t%08x\n",
496 I915_READ(GEN6_PMIER));
497 seq_printf(m, "PM IIR:\t\t%08x\n",
498 I915_READ(GEN6_PMIIR));
499 seq_printf(m, "PM IMR:\t\t%08x\n",
500 I915_READ(GEN6_PMIMR));
501
502 seq_printf(m, "Port hotplug:\t%08x\n",
503 I915_READ(PORT_HOTPLUG_EN));
504 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
505 I915_READ(VLV_DPFLIPSTAT));
506 seq_printf(m, "DPINVGTT:\t%08x\n",
507 I915_READ(DPINVGTT));
508
509 } else if (!HAS_PCH_SPLIT(dev)) {
472 seq_printf(m, "Interrupt enable: %08x\n", 510 seq_printf(m, "Interrupt enable: %08x\n",
473 I915_READ(IER)); 511 I915_READ(IER));
474 seq_printf(m, "Interrupt identity: %08x\n", 512 seq_printf(m, "Interrupt identity: %08x\n",
@@ -704,6 +742,7 @@ static void i915_ring_error_state(struct seq_file *m,
704 struct drm_i915_error_state *error, 742 struct drm_i915_error_state *error,
705 unsigned ring) 743 unsigned ring)
706{ 744{
745 BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */
707 seq_printf(m, "%s command stream:\n", ring_str(ring)); 746 seq_printf(m, "%s command stream:\n", ring_str(ring));
708 seq_printf(m, " HEAD: 0x%08x\n", error->head[ring]); 747 seq_printf(m, " HEAD: 0x%08x\n", error->head[ring]);
709 seq_printf(m, " TAIL: 0x%08x\n", error->tail[ring]); 748 seq_printf(m, " TAIL: 0x%08x\n", error->tail[ring]);
@@ -718,8 +757,8 @@ static void i915_ring_error_state(struct seq_file *m,
718 if (INTEL_INFO(dev)->gen >= 4) 757 if (INTEL_INFO(dev)->gen >= 4)
719 seq_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]); 758 seq_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]);
720 seq_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]); 759 seq_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
760 seq_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]);
721 if (INTEL_INFO(dev)->gen >= 6) { 761 if (INTEL_INFO(dev)->gen >= 6) {
722 seq_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]);
723 seq_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]); 762 seq_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
724 seq_printf(m, " SYNC_0: 0x%08x\n", 763 seq_printf(m, " SYNC_0: 0x%08x\n",
725 error->semaphore_mboxes[ring][0]); 764 error->semaphore_mboxes[ring][0]);
@@ -1502,6 +1541,53 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
1502 return 0; 1541 return 0;
1503} 1542}
1504 1543
1544static int i915_dpio_info(struct seq_file *m, void *data)
1545{
1546 struct drm_info_node *node = (struct drm_info_node *) m->private;
1547 struct drm_device *dev = node->minor->dev;
1548 struct drm_i915_private *dev_priv = dev->dev_private;
1549 int ret;
1550
1551
1552 if (!IS_VALLEYVIEW(dev)) {
1553 seq_printf(m, "unsupported\n");
1554 return 0;
1555 }
1556
1557 ret = mutex_lock_interruptible(&dev->mode_config.mutex);
1558 if (ret)
1559 return ret;
1560
1561 seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL));
1562
1563 seq_printf(m, "DPIO_DIV_A: 0x%08x\n",
1564 intel_dpio_read(dev_priv, _DPIO_DIV_A));
1565 seq_printf(m, "DPIO_DIV_B: 0x%08x\n",
1566 intel_dpio_read(dev_priv, _DPIO_DIV_B));
1567
1568 seq_printf(m, "DPIO_REFSFR_A: 0x%08x\n",
1569 intel_dpio_read(dev_priv, _DPIO_REFSFR_A));
1570 seq_printf(m, "DPIO_REFSFR_B: 0x%08x\n",
1571 intel_dpio_read(dev_priv, _DPIO_REFSFR_B));
1572
1573 seq_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n",
1574 intel_dpio_read(dev_priv, _DPIO_CORE_CLK_A));
1575 seq_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n",
1576 intel_dpio_read(dev_priv, _DPIO_CORE_CLK_B));
1577
1578 seq_printf(m, "DPIO_LFP_COEFF_A: 0x%08x\n",
1579 intel_dpio_read(dev_priv, _DPIO_LFP_COEFF_A));
1580 seq_printf(m, "DPIO_LFP_COEFF_B: 0x%08x\n",
1581 intel_dpio_read(dev_priv, _DPIO_LFP_COEFF_B));
1582
1583 seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
1584 intel_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE));
1585
1586 mutex_unlock(&dev->mode_config.mutex);
1587
1588 return 0;
1589}
1590
1505static ssize_t 1591static ssize_t
1506i915_wedged_read(struct file *filp, 1592i915_wedged_read(struct file *filp,
1507 char __user *ubuf, 1593 char __user *ubuf,
@@ -1836,6 +1922,7 @@ static struct drm_info_list i915_debugfs_list[] = {
1836 {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0}, 1922 {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
1837 {"i915_swizzle_info", i915_swizzle_info, 0}, 1923 {"i915_swizzle_info", i915_swizzle_info, 0},
1838 {"i915_ppgtt_info", i915_ppgtt_info, 0}, 1924 {"i915_ppgtt_info", i915_ppgtt_info, 0},
1925 {"i915_dpio", i915_dpio_info, 0},
1839}; 1926};
1840#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 1927#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
1841 1928
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 785f67f963ef..652f43f00ef2 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -26,6 +26,8 @@
26 * 26 *
27 */ 27 */
28 28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
29#include "drmP.h" 31#include "drmP.h"
30#include "drm.h" 32#include "drm.h"
31#include "drm_crtc_helper.h" 33#include "drm_crtc_helper.h"
@@ -43,6 +45,7 @@
43#include <linux/slab.h> 45#include <linux/slab.h>
44#include <linux/module.h> 46#include <linux/module.h>
45#include <acpi/video.h> 47#include <acpi/video.h>
48#include <asm/pat.h>
46 49
47static void i915_write_hws_pga(struct drm_device *dev) 50static void i915_write_hws_pga(struct drm_device *dev)
48{ 51{
@@ -787,6 +790,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
787 case I915_PARAM_HAS_LLC: 790 case I915_PARAM_HAS_LLC:
788 value = HAS_LLC(dev); 791 value = HAS_LLC(dev);
789 break; 792 break;
793 case I915_PARAM_HAS_ALIASING_PPGTT:
794 value = dev_priv->mm.aliasing_ppgtt ? 1 : 0;
795 break;
790 default: 796 default:
791 DRM_DEBUG_DRIVER("Unknown parameter %d\n", 797 DRM_DEBUG_DRIVER("Unknown parameter %d\n",
792 param->param); 798 param->param);
@@ -1158,14 +1164,14 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_
1158 struct drm_device *dev = pci_get_drvdata(pdev); 1164 struct drm_device *dev = pci_get_drvdata(pdev);
1159 pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; 1165 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
1160 if (state == VGA_SWITCHEROO_ON) { 1166 if (state == VGA_SWITCHEROO_ON) {
1161 printk(KERN_INFO "i915: switched on\n"); 1167 pr_info("switched on\n");
1162 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 1168 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1163 /* i915 resume handler doesn't set to D0 */ 1169 /* i915 resume handler doesn't set to D0 */
1164 pci_set_power_state(dev->pdev, PCI_D0); 1170 pci_set_power_state(dev->pdev, PCI_D0);
1165 i915_resume(dev); 1171 i915_resume(dev);
1166 dev->switch_power_state = DRM_SWITCH_POWER_ON; 1172 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1167 } else { 1173 } else {
1168 printk(KERN_ERR "i915: switched off\n"); 1174 pr_err("switched off\n");
1169 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 1175 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1170 i915_suspend(dev, pmm); 1176 i915_suspend(dev, pmm);
1171 dev->switch_power_state = DRM_SWITCH_POWER_OFF; 1177 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
@@ -1216,10 +1222,8 @@ static int i915_load_gem_init(struct drm_device *dev)
1216 /* PPGTT pdes are stolen from global gtt ptes, so shrink the 1222 /* PPGTT pdes are stolen from global gtt ptes, so shrink the
1217 * aperture accordingly when using aliasing ppgtt. */ 1223 * aperture accordingly when using aliasing ppgtt. */
1218 gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE; 1224 gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
1219 /* For paranoia keep the guard page in between. */
1220 gtt_size -= PAGE_SIZE;
1221 1225
1222 i915_gem_do_init(dev, 0, mappable_size, gtt_size); 1226 i915_gem_init_global_gtt(dev, 0, mappable_size, gtt_size);
1223 1227
1224 ret = i915_gem_init_aliasing_ppgtt(dev); 1228 ret = i915_gem_init_aliasing_ppgtt(dev);
1225 if (ret) { 1229 if (ret) {
@@ -1237,7 +1241,8 @@ static int i915_load_gem_init(struct drm_device *dev)
1237 * should be enough to keep any prefetching inside of the 1241 * should be enough to keep any prefetching inside of the
1238 * aperture. 1242 * aperture.
1239 */ 1243 */
1240 i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE); 1244 i915_gem_init_global_gtt(dev, 0, mappable_size,
1245 gtt_size);
1241 } 1246 }
1242 1247
1243 ret = i915_gem_init_hw(dev); 1248 ret = i915_gem_init_hw(dev);
@@ -1931,6 +1936,29 @@ ips_ping_for_i915_load(void)
1931 } 1936 }
1932} 1937}
1933 1938
1939static void
1940i915_mtrr_setup(struct drm_i915_private *dev_priv, unsigned long base,
1941 unsigned long size)
1942{
1943 dev_priv->mm.gtt_mtrr = -1;
1944
1945#if defined(CONFIG_X86_PAT)
1946 if (cpu_has_pat)
1947 return;
1948#endif
1949
1950 /* Set up a WC MTRR for non-PAT systems. This is more common than
1951 * one would think, because the kernel disables PAT on first
1952 * generation Core chips because WC PAT gets overridden by a UC
1953 * MTRR if present. Even if a UC MTRR isn't present.
1954 */
1955 dev_priv->mm.gtt_mtrr = mtrr_add(base, size, MTRR_TYPE_WRCOMB, 1);
1956 if (dev_priv->mm.gtt_mtrr < 0) {
1957 DRM_INFO("MTRR allocation failed. Graphics "
1958 "performance may suffer.\n");
1959 }
1960}
1961
1934/** 1962/**
1935 * i915_driver_load - setup chip and create an initial config 1963 * i915_driver_load - setup chip and create an initial config
1936 * @dev: DRM device 1964 * @dev: DRM device
@@ -1945,8 +1973,16 @@ ips_ping_for_i915_load(void)
1945int i915_driver_load(struct drm_device *dev, unsigned long flags) 1973int i915_driver_load(struct drm_device *dev, unsigned long flags)
1946{ 1974{
1947 struct drm_i915_private *dev_priv; 1975 struct drm_i915_private *dev_priv;
1976 struct intel_device_info *info;
1948 int ret = 0, mmio_bar; 1977 int ret = 0, mmio_bar;
1949 uint32_t agp_size; 1978 uint32_t aperture_size;
1979
1980 info = (struct intel_device_info *) flags;
1981
1982 /* Refuse to load on gen6+ without kms enabled. */
1983 if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET))
1984 return -ENODEV;
1985
1950 1986
1951 /* i915 has 4 more counters */ 1987 /* i915 has 4 more counters */
1952 dev->counters += 4; 1988 dev->counters += 4;
@@ -1961,7 +1997,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1961 1997
1962 dev->dev_private = (void *)dev_priv; 1998 dev->dev_private = (void *)dev_priv;
1963 dev_priv->dev = dev; 1999 dev_priv->dev = dev;
1964 dev_priv->info = (struct intel_device_info *) flags; 2000 dev_priv->info = info;
1965 2001
1966 if (i915_get_bridge_dev(dev)) { 2002 if (i915_get_bridge_dev(dev)) {
1967 ret = -EIO; 2003 ret = -EIO;
@@ -2000,27 +2036,16 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
2000 goto out_rmmap; 2036 goto out_rmmap;
2001 } 2037 }
2002 2038
2003 agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; 2039 aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
2004 2040
2005 dev_priv->mm.gtt_mapping = 2041 dev_priv->mm.gtt_mapping =
2006 io_mapping_create_wc(dev->agp->base, agp_size); 2042 io_mapping_create_wc(dev->agp->base, aperture_size);
2007 if (dev_priv->mm.gtt_mapping == NULL) { 2043 if (dev_priv->mm.gtt_mapping == NULL) {
2008 ret = -EIO; 2044 ret = -EIO;
2009 goto out_rmmap; 2045 goto out_rmmap;
2010 } 2046 }
2011 2047
2012 /* Set up a WC MTRR for non-PAT systems. This is more common than 2048 i915_mtrr_setup(dev_priv, dev->agp->base, aperture_size);
2013 * one would think, because the kernel disables PAT on first
2014 * generation Core chips because WC PAT gets overridden by a UC
2015 * MTRR if present. Even if a UC MTRR isn't present.
2016 */
2017 dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base,
2018 agp_size,
2019 MTRR_TYPE_WRCOMB, 1);
2020 if (dev_priv->mm.gtt_mtrr < 0) {
2021 DRM_INFO("MTRR allocation failed. Graphics "
2022 "performance may suffer.\n");
2023 }
2024 2049
2025 /* The i915 workqueue is primarily used for batched retirement of 2050 /* The i915 workqueue is primarily used for batched retirement of
2026 * requests (and thus managing bo) once the task has been completed 2051 * requests (and thus managing bo) once the task has been completed
@@ -2272,7 +2297,7 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file)
2272 * mode setting case, we want to restore the kernel's initial mode (just 2297 * mode setting case, we want to restore the kernel's initial mode (just
2273 * in case the last client left us in a bad state). 2298 * in case the last client left us in a bad state).
2274 * 2299 *
2275 * Additionally, in the non-mode setting case, we'll tear down the AGP 2300 * Additionally, in the non-mode setting case, we'll tear down the GTT
2276 * and DMA structures, since the kernel won't be using them, and clea 2301 * and DMA structures, since the kernel won't be using them, and clea
2277 * up any GEM state. 2302 * up any GEM state.
2278 */ 2303 */
@@ -2350,16 +2375,10 @@ struct drm_ioctl_desc i915_ioctls[] = {
2350 2375
2351int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); 2376int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
2352 2377
2353/** 2378/*
2354 * Determine if the device really is AGP or not. 2379 * This is really ugly: Because old userspace abused the linux agp interface to
2355 * 2380 * manage the gtt, we need to claim that all intel devices are agp. For
2356 * All Intel graphics chipsets are treated as AGP, even if they are really 2381 * otherwise the drm core refuses to initialize the agp support code.
2357 * PCI-e.
2358 *
2359 * \param dev The device to be tested.
2360 *
2361 * \returns
2362 * A value of 1 is always retured to indictate every i9x5 is AGP.
2363 */ 2382 */
2364int i915_driver_device_is_agp(struct drm_device * dev) 2383int i915_driver_device_is_agp(struct drm_device * dev)
2365{ 2384{
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index dfa55e7478fb..c33b0a41a73d 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -84,6 +84,12 @@ MODULE_PARM_DESC(lvds_downclock,
84 "Use panel (LVDS/eDP) downclocking for power savings " 84 "Use panel (LVDS/eDP) downclocking for power savings "
85 "(default: false)"); 85 "(default: false)");
86 86
87int i915_lvds_channel_mode __read_mostly;
88module_param_named(lvds_channel_mode, i915_lvds_channel_mode, int, 0600);
89MODULE_PARM_DESC(lvds_channel_mode,
90 "Specify LVDS channel mode "
91 "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
92
87int i915_panel_use_ssc __read_mostly = -1; 93int i915_panel_use_ssc __read_mostly = -1;
88module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600); 94module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600);
89MODULE_PARM_DESC(lvds_use_ssc, 95MODULE_PARM_DESC(lvds_use_ssc,
@@ -93,8 +99,8 @@ MODULE_PARM_DESC(lvds_use_ssc,
93int i915_vbt_sdvo_panel_type __read_mostly = -1; 99int i915_vbt_sdvo_panel_type __read_mostly = -1;
94module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600); 100module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600);
95MODULE_PARM_DESC(vbt_sdvo_panel_type, 101MODULE_PARM_DESC(vbt_sdvo_panel_type,
96 "Override selection of SDVO panel mode in the VBT " 102 "Override/Ignore selection of SDVO panel mode in the VBT "
97 "(default: auto)"); 103 "(-2=ignore, -1=auto [default], index in VBT BIOS table)");
98 104
99static bool i915_try_reset __read_mostly = true; 105static bool i915_try_reset __read_mostly = true;
100module_param_named(reset, i915_try_reset, bool, 0600); 106module_param_named(reset, i915_try_reset, bool, 0600);
@@ -209,6 +215,7 @@ static const struct intel_device_info intel_ironlake_d_info = {
209 .gen = 5, 215 .gen = 5,
210 .need_gfx_hws = 1, .has_hotplug = 1, 216 .need_gfx_hws = 1, .has_hotplug = 1,
211 .has_bsd_ring = 1, 217 .has_bsd_ring = 1,
218 .has_pch_split = 1,
212}; 219};
213 220
214static const struct intel_device_info intel_ironlake_m_info = { 221static const struct intel_device_info intel_ironlake_m_info = {
@@ -216,6 +223,7 @@ static const struct intel_device_info intel_ironlake_m_info = {
216 .need_gfx_hws = 1, .has_hotplug = 1, 223 .need_gfx_hws = 1, .has_hotplug = 1,
217 .has_fbc = 1, 224 .has_fbc = 1,
218 .has_bsd_ring = 1, 225 .has_bsd_ring = 1,
226 .has_pch_split = 1,
219}; 227};
220 228
221static const struct intel_device_info intel_sandybridge_d_info = { 229static const struct intel_device_info intel_sandybridge_d_info = {
@@ -224,6 +232,7 @@ static const struct intel_device_info intel_sandybridge_d_info = {
224 .has_bsd_ring = 1, 232 .has_bsd_ring = 1,
225 .has_blt_ring = 1, 233 .has_blt_ring = 1,
226 .has_llc = 1, 234 .has_llc = 1,
235 .has_pch_split = 1,
227}; 236};
228 237
229static const struct intel_device_info intel_sandybridge_m_info = { 238static const struct intel_device_info intel_sandybridge_m_info = {
@@ -233,6 +242,7 @@ static const struct intel_device_info intel_sandybridge_m_info = {
233 .has_bsd_ring = 1, 242 .has_bsd_ring = 1,
234 .has_blt_ring = 1, 243 .has_blt_ring = 1,
235 .has_llc = 1, 244 .has_llc = 1,
245 .has_pch_split = 1,
236}; 246};
237 247
238static const struct intel_device_info intel_ivybridge_d_info = { 248static const struct intel_device_info intel_ivybridge_d_info = {
@@ -241,6 +251,7 @@ static const struct intel_device_info intel_ivybridge_d_info = {
241 .has_bsd_ring = 1, 251 .has_bsd_ring = 1,
242 .has_blt_ring = 1, 252 .has_blt_ring = 1,
243 .has_llc = 1, 253 .has_llc = 1,
254 .has_pch_split = 1,
244}; 255};
245 256
246static const struct intel_device_info intel_ivybridge_m_info = { 257static const struct intel_device_info intel_ivybridge_m_info = {
@@ -250,6 +261,43 @@ static const struct intel_device_info intel_ivybridge_m_info = {
250 .has_bsd_ring = 1, 261 .has_bsd_ring = 1,
251 .has_blt_ring = 1, 262 .has_blt_ring = 1,
252 .has_llc = 1, 263 .has_llc = 1,
264 .has_pch_split = 1,
265};
266
267static const struct intel_device_info intel_valleyview_m_info = {
268 .gen = 7, .is_mobile = 1,
269 .need_gfx_hws = 1, .has_hotplug = 1,
270 .has_fbc = 0,
271 .has_bsd_ring = 1,
272 .has_blt_ring = 1,
273 .is_valleyview = 1,
274};
275
276static const struct intel_device_info intel_valleyview_d_info = {
277 .gen = 7,
278 .need_gfx_hws = 1, .has_hotplug = 1,
279 .has_fbc = 0,
280 .has_bsd_ring = 1,
281 .has_blt_ring = 1,
282 .is_valleyview = 1,
283};
284
285static const struct intel_device_info intel_haswell_d_info = {
286 .is_haswell = 1, .gen = 7,
287 .need_gfx_hws = 1, .has_hotplug = 1,
288 .has_bsd_ring = 1,
289 .has_blt_ring = 1,
290 .has_llc = 1,
291 .has_pch_split = 1,
292};
293
294static const struct intel_device_info intel_haswell_m_info = {
295 .is_haswell = 1, .gen = 7, .is_mobile = 1,
296 .need_gfx_hws = 1, .has_hotplug = 1,
297 .has_bsd_ring = 1,
298 .has_blt_ring = 1,
299 .has_llc = 1,
300 .has_pch_split = 1,
253}; 301};
254 302
255static const struct pci_device_id pciidlist[] = { /* aka */ 303static const struct pci_device_id pciidlist[] = { /* aka */
@@ -308,6 +356,7 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
308#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 356#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
309#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 357#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
310#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00 358#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
359#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
311 360
312void intel_detect_pch(struct drm_device *dev) 361void intel_detect_pch(struct drm_device *dev)
313{ 362{
@@ -336,6 +385,9 @@ void intel_detect_pch(struct drm_device *dev)
336 /* PantherPoint is CPT compatible */ 385 /* PantherPoint is CPT compatible */
337 dev_priv->pch_type = PCH_CPT; 386 dev_priv->pch_type = PCH_CPT;
338 DRM_DEBUG_KMS("Found PatherPoint PCH\n"); 387 DRM_DEBUG_KMS("Found PatherPoint PCH\n");
388 } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
389 dev_priv->pch_type = PCH_LPT;
390 DRM_DEBUG_KMS("Found LynxPoint PCH\n");
339 } 391 }
340 } 392 }
341 pci_dev_put(pch); 393 pci_dev_put(pch);
@@ -446,6 +498,31 @@ int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
446 return ret; 498 return ret;
447} 499}
448 500
501void vlv_force_wake_get(struct drm_i915_private *dev_priv)
502{
503 int count;
504
505 count = 0;
506
507 /* Already awake? */
508 if ((I915_READ(0x130094) & 0xa1) == 0xa1)
509 return;
510
511 I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffffffff);
512 POSTING_READ(FORCEWAKE_VLV);
513
514 count = 0;
515 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0)
516 udelay(10);
517}
518
519void vlv_force_wake_put(struct drm_i915_private *dev_priv)
520{
521 I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffff0000);
522 /* FIXME: confirm VLV behavior with Punit folks */
523 POSTING_READ(FORCEWAKE_VLV);
524}
525
449static int i915_drm_freeze(struct drm_device *dev) 526static int i915_drm_freeze(struct drm_device *dev)
450{ 527{
451 struct drm_i915_private *dev_priv = dev->dev_private; 528 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -993,6 +1070,13 @@ MODULE_AUTHOR(DRIVER_AUTHOR);
993MODULE_DESCRIPTION(DRIVER_DESC); 1070MODULE_DESCRIPTION(DRIVER_DESC);
994MODULE_LICENSE("GPL and additional rights"); 1071MODULE_LICENSE("GPL and additional rights");
995 1072
1073/* We give fast paths for the really cool registers */
1074#define NEEDS_FORCE_WAKE(dev_priv, reg) \
1075 (((dev_priv)->info->gen >= 6) && \
1076 ((reg) < 0x40000) && \
1077 ((reg) != FORCEWAKE)) && \
1078 (!IS_VALLEYVIEW((dev_priv)->dev))
1079
996#define __i915_read(x, y) \ 1080#define __i915_read(x, y) \
997u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ 1081u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
998 u##x val = 0; \ 1082 u##x val = 0; \
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 5fabc6c31fec..92e496afc6f4 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -63,6 +63,16 @@ enum plane {
63}; 63};
64#define plane_name(p) ((p) + 'A') 64#define plane_name(p) ((p) + 'A')
65 65
66enum port {
67 PORT_A = 0,
68 PORT_B,
69 PORT_C,
70 PORT_D,
71 PORT_E,
72 I915_MAX_PORTS
73};
74#define port_name(p) ((p) + 'A')
75
66#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) 76#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
67 77
68#define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++) 78#define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++)
@@ -255,6 +265,9 @@ struct intel_device_info {
255 u8 is_broadwater:1; 265 u8 is_broadwater:1;
256 u8 is_crestline:1; 266 u8 is_crestline:1;
257 u8 is_ivybridge:1; 267 u8 is_ivybridge:1;
268 u8 is_valleyview:1;
269 u8 has_pch_split:1;
270 u8 is_haswell:1;
258 u8 has_fbc:1; 271 u8 has_fbc:1;
259 u8 has_pipe_cxsr:1; 272 u8 has_pipe_cxsr:1;
260 u8 has_hotplug:1; 273 u8 has_hotplug:1;
@@ -291,10 +304,12 @@ enum no_fbc_reason {
291enum intel_pch { 304enum intel_pch {
292 PCH_IBX, /* Ibexpeak PCH */ 305 PCH_IBX, /* Ibexpeak PCH */
293 PCH_CPT, /* Cougarpoint PCH */ 306 PCH_CPT, /* Cougarpoint PCH */
307 PCH_LPT, /* Lynxpoint PCH */
294}; 308};
295 309
296#define QUIRK_PIPEA_FORCE (1<<0) 310#define QUIRK_PIPEA_FORCE (1<<0)
297#define QUIRK_LVDS_SSC_DISABLE (1<<1) 311#define QUIRK_LVDS_SSC_DISABLE (1<<1)
312#define QUIRK_INVERT_BRIGHTNESS (1<<2)
298 313
299struct intel_fbdev; 314struct intel_fbdev;
300struct intel_fbc_work; 315struct intel_fbc_work;
@@ -302,7 +317,6 @@ struct intel_fbc_work;
302struct intel_gmbus { 317struct intel_gmbus {
303 struct i2c_adapter adapter; 318 struct i2c_adapter adapter;
304 bool force_bit; 319 bool force_bit;
305 bool has_gpio;
306 u32 reg0; 320 u32 reg0;
307 u32 gpio_reg; 321 u32 gpio_reg;
308 struct i2c_algo_bit_data bit_algo; 322 struct i2c_algo_bit_data bit_algo;
@@ -326,12 +340,17 @@ typedef struct drm_i915_private {
326 /** gt_lock is also taken in irq contexts. */ 340 /** gt_lock is also taken in irq contexts. */
327 struct spinlock gt_lock; 341 struct spinlock gt_lock;
328 342
329 struct intel_gmbus *gmbus; 343 struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
330 344
331 /** gmbus_mutex protects against concurrent usage of the single hw gmbus 345 /** gmbus_mutex protects against concurrent usage of the single hw gmbus
332 * controller on different i2c buses. */ 346 * controller on different i2c buses. */
333 struct mutex gmbus_mutex; 347 struct mutex gmbus_mutex;
334 348
349 /**
350 * Base address of the gmbus and gpio block.
351 */
352 uint32_t gpio_mmio_base;
353
335 struct pci_dev *bridge_dev; 354 struct pci_dev *bridge_dev;
336 struct intel_ring_buffer ring[I915_NUM_RINGS]; 355 struct intel_ring_buffer ring[I915_NUM_RINGS];
337 uint32_t next_seqno; 356 uint32_t next_seqno;
@@ -354,6 +373,10 @@ typedef struct drm_i915_private {
354 373
355 /* protects the irq masks */ 374 /* protects the irq masks */
356 spinlock_t irq_lock; 375 spinlock_t irq_lock;
376
377 /* DPIO indirect register protection */
378 spinlock_t dpio_lock;
379
357 /** Cached value of IMR to avoid reads in updating the bitfield */ 380 /** Cached value of IMR to avoid reads in updating the bitfield */
358 u32 pipestat[2]; 381 u32 pipestat[2];
359 u32 irq_mask; 382 u32 irq_mask;
@@ -405,6 +428,8 @@ typedef struct drm_i915_private {
405 unsigned int lvds_use_ssc:1; 428 unsigned int lvds_use_ssc:1;
406 unsigned int display_clock_mode:1; 429 unsigned int display_clock_mode:1;
407 int lvds_ssc_freq; 430 int lvds_ssc_freq;
431 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
432 unsigned int lvds_val; /* used for checking LVDS channel mode */
408 struct { 433 struct {
409 int rate; 434 int rate;
410 int lanes; 435 int lanes;
@@ -881,6 +906,7 @@ struct drm_i915_gem_object {
881 unsigned int cache_level:2; 906 unsigned int cache_level:2;
882 907
883 unsigned int has_aliasing_ppgtt_mapping:1; 908 unsigned int has_aliasing_ppgtt_mapping:1;
909 unsigned int has_global_gtt_mapping:1;
884 910
885 struct page **pages; 911 struct page **pages;
886 912
@@ -918,13 +944,6 @@ struct drm_i915_gem_object {
918 /** Record of address bit 17 of each page at last unbind. */ 944 /** Record of address bit 17 of each page at last unbind. */
919 unsigned long *bit_17; 945 unsigned long *bit_17;
920 946
921
922 /**
923 * If present, while GEM_DOMAIN_CPU is in the read domain this array
924 * flags which individual pages are valid.
925 */
926 uint8_t *page_cpu_valid;
927
928 /** User space pin count and filp owning the pin */ 947 /** User space pin count and filp owning the pin */
929 uint32_t user_pin_count; 948 uint32_t user_pin_count;
930 struct drm_file *pin_filp; 949 struct drm_file *pin_filp;
@@ -1001,6 +1020,8 @@ struct drm_i915_file_private {
1001#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) 1020#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
1002#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) 1021#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
1003#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge) 1022#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
1023#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
1024#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
1004#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) 1025#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
1005 1026
1006/* 1027/*
@@ -1044,10 +1065,11 @@ struct drm_i915_file_private {
1044#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) 1065#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
1045#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) 1066#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
1046 1067
1047#define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev)) 1068#define HAS_PCH_SPLIT(dev) (INTEL_INFO(dev)->has_pch_split)
1048#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5) 1069#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
1049 1070
1050#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) 1071#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
1072#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
1051#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) 1073#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
1052#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) 1074#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
1053 1075
@@ -1081,6 +1103,7 @@ extern int i915_panel_ignore_lid __read_mostly;
1081extern unsigned int i915_powersave __read_mostly; 1103extern unsigned int i915_powersave __read_mostly;
1082extern int i915_semaphores __read_mostly; 1104extern int i915_semaphores __read_mostly;
1083extern unsigned int i915_lvds_downclock __read_mostly; 1105extern unsigned int i915_lvds_downclock __read_mostly;
1106extern int i915_lvds_channel_mode __read_mostly;
1084extern int i915_panel_use_ssc __read_mostly; 1107extern int i915_panel_use_ssc __read_mostly;
1085extern int i915_vbt_sdvo_panel_type __read_mostly; 1108extern int i915_vbt_sdvo_panel_type __read_mostly;
1086extern int i915_enable_rc6 __read_mostly; 1109extern int i915_enable_rc6 __read_mostly;
@@ -1264,10 +1287,6 @@ int __must_check i915_gem_init_hw(struct drm_device *dev);
1264void i915_gem_init_swizzling(struct drm_device *dev); 1287void i915_gem_init_swizzling(struct drm_device *dev);
1265void i915_gem_init_ppgtt(struct drm_device *dev); 1288void i915_gem_init_ppgtt(struct drm_device *dev);
1266void i915_gem_cleanup_ringbuffer(struct drm_device *dev); 1289void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
1267void i915_gem_do_init(struct drm_device *dev,
1268 unsigned long start,
1269 unsigned long mappable_end,
1270 unsigned long end);
1271int __must_check i915_gpu_idle(struct drm_device *dev, bool do_retire); 1290int __must_check i915_gpu_idle(struct drm_device *dev, bool do_retire);
1272int __must_check i915_gem_idle(struct drm_device *dev); 1291int __must_check i915_gem_idle(struct drm_device *dev);
1273int __must_check i915_add_request(struct intel_ring_buffer *ring, 1292int __must_check i915_add_request(struct intel_ring_buffer *ring,
@@ -1281,6 +1300,8 @@ int __must_check
1281i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, 1300i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
1282 bool write); 1301 bool write);
1283int __must_check 1302int __must_check
1303i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
1304int __must_check
1284i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 1305i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
1285 u32 alignment, 1306 u32 alignment,
1286 struct intel_ring_buffer *pipelined); 1307 struct intel_ring_buffer *pipelined);
@@ -1311,10 +1332,15 @@ void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
1311 struct drm_i915_gem_object *obj); 1332 struct drm_i915_gem_object *obj);
1312 1333
1313void i915_gem_restore_gtt_mappings(struct drm_device *dev); 1334void i915_gem_restore_gtt_mappings(struct drm_device *dev);
1314int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj); 1335int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
1315void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj, 1336void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
1316 enum i915_cache_level cache_level); 1337 enum i915_cache_level cache_level);
1317void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj); 1338void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
1339void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
1340void i915_gem_init_global_gtt(struct drm_device *dev,
1341 unsigned long start,
1342 unsigned long mappable_end,
1343 unsigned long end);
1318 1344
1319/* i915_gem_evict.c */ 1345/* i915_gem_evict.c */
1320int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size, 1346int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
@@ -1357,6 +1383,13 @@ extern int i915_restore_state(struct drm_device *dev);
1357/* intel_i2c.c */ 1383/* intel_i2c.c */
1358extern int intel_setup_gmbus(struct drm_device *dev); 1384extern int intel_setup_gmbus(struct drm_device *dev);
1359extern void intel_teardown_gmbus(struct drm_device *dev); 1385extern void intel_teardown_gmbus(struct drm_device *dev);
1386extern inline bool intel_gmbus_is_port_valid(unsigned port)
1387{
1388 return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD);
1389}
1390
1391extern struct i2c_adapter *intel_gmbus_get_adapter(
1392 struct drm_i915_private *dev_priv, unsigned port);
1360extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed); 1393extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
1361extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit); 1394extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
1362extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter) 1395extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
@@ -1409,6 +1442,9 @@ extern void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv);
1409extern void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); 1442extern void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
1410extern void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv); 1443extern void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv);
1411 1444
1445extern void vlv_force_wake_get(struct drm_i915_private *dev_priv);
1446extern void vlv_force_wake_put(struct drm_i915_private *dev_priv);
1447
1412/* overlay */ 1448/* overlay */
1413#ifdef CONFIG_DEBUG_FS 1449#ifdef CONFIG_DEBUG_FS
1414extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); 1450extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
@@ -1450,12 +1486,6 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
1450void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); 1486void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
1451int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv); 1487int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
1452 1488
1453/* We give fast paths for the really cool registers */
1454#define NEEDS_FORCE_WAKE(dev_priv, reg) \
1455 (((dev_priv)->info->gen >= 6) && \
1456 ((reg) < 0x40000) && \
1457 ((reg) != FORCEWAKE))
1458
1459#define __i915_read(x, y) \ 1489#define __i915_read(x, y) \
1460 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg); 1490 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
1461 1491
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 4c65c639f772..b851bd34ca18 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -39,12 +39,6 @@
39static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj); 39static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
40static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); 40static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
41static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); 41static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
42static __must_check int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj,
43 bool write);
44static __must_check int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
45 uint64_t offset,
46 uint64_t size);
47static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj);
48static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, 42static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
49 unsigned alignment, 43 unsigned alignment,
50 bool map_and_fenceable); 44 bool map_and_fenceable);
@@ -125,25 +119,6 @@ i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
125 return obj->gtt_space && !obj->active && obj->pin_count == 0; 119 return obj->gtt_space && !obj->active && obj->pin_count == 0;
126} 120}
127 121
128void i915_gem_do_init(struct drm_device *dev,
129 unsigned long start,
130 unsigned long mappable_end,
131 unsigned long end)
132{
133 drm_i915_private_t *dev_priv = dev->dev_private;
134
135 drm_mm_init(&dev_priv->mm.gtt_space, start, end - start);
136
137 dev_priv->mm.gtt_start = start;
138 dev_priv->mm.gtt_mappable_end = mappable_end;
139 dev_priv->mm.gtt_end = end;
140 dev_priv->mm.gtt_total = end - start;
141 dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
142
143 /* Take over this portion of the GTT */
144 intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
145}
146
147int 122int
148i915_gem_init_ioctl(struct drm_device *dev, void *data, 123i915_gem_init_ioctl(struct drm_device *dev, void *data,
149 struct drm_file *file) 124 struct drm_file *file)
@@ -154,8 +129,13 @@ i915_gem_init_ioctl(struct drm_device *dev, void *data,
154 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1)) 129 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
155 return -EINVAL; 130 return -EINVAL;
156 131
132 /* GEM with user mode setting was never supported on ilk and later. */
133 if (INTEL_INFO(dev)->gen >= 5)
134 return -ENODEV;
135
157 mutex_lock(&dev->struct_mutex); 136 mutex_lock(&dev->struct_mutex);
158 i915_gem_do_init(dev, args->gtt_start, args->gtt_end, args->gtt_end); 137 i915_gem_init_global_gtt(dev, args->gtt_start,
138 args->gtt_end, args->gtt_end);
159 mutex_unlock(&dev->struct_mutex); 139 mutex_unlock(&dev->struct_mutex);
160 140
161 return 0; 141 return 0;
@@ -259,66 +239,6 @@ static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
259 obj->tiling_mode != I915_TILING_NONE; 239 obj->tiling_mode != I915_TILING_NONE;
260} 240}
261 241
262/**
263 * This is the fast shmem pread path, which attempts to copy_from_user directly
264 * from the backing pages of the object to the user's address space. On a
265 * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
266 */
267static int
268i915_gem_shmem_pread_fast(struct drm_device *dev,
269 struct drm_i915_gem_object *obj,
270 struct drm_i915_gem_pread *args,
271 struct drm_file *file)
272{
273 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
274 ssize_t remain;
275 loff_t offset;
276 char __user *user_data;
277 int page_offset, page_length;
278
279 user_data = (char __user *) (uintptr_t) args->data_ptr;
280 remain = args->size;
281
282 offset = args->offset;
283
284 while (remain > 0) {
285 struct page *page;
286 char *vaddr;
287 int ret;
288
289 /* Operation in this page
290 *
291 * page_offset = offset within page
292 * page_length = bytes to copy for this page
293 */
294 page_offset = offset_in_page(offset);
295 page_length = remain;
296 if ((page_offset + remain) > PAGE_SIZE)
297 page_length = PAGE_SIZE - page_offset;
298
299 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
300 if (IS_ERR(page))
301 return PTR_ERR(page);
302
303 vaddr = kmap_atomic(page);
304 ret = __copy_to_user_inatomic(user_data,
305 vaddr + page_offset,
306 page_length);
307 kunmap_atomic(vaddr);
308
309 mark_page_accessed(page);
310 page_cache_release(page);
311 if (ret)
312 return -EFAULT;
313
314 remain -= page_length;
315 user_data += page_length;
316 offset += page_length;
317 }
318
319 return 0;
320}
321
322static inline int 242static inline int
323__copy_to_user_swizzled(char __user *cpu_vaddr, 243__copy_to_user_swizzled(char __user *cpu_vaddr,
324 const char *gpu_vaddr, int gpu_offset, 244 const char *gpu_vaddr, int gpu_offset,
@@ -371,37 +291,121 @@ __copy_from_user_swizzled(char __user *gpu_vaddr, int gpu_offset,
371 return 0; 291 return 0;
372} 292}
373 293
374/** 294/* Per-page copy function for the shmem pread fastpath.
375 * This is the fallback shmem pread path, which allocates temporary storage 295 * Flushes invalid cachelines before reading the target if
376 * in kernel space to copy_to_user into outside of the struct_mutex, so we 296 * needs_clflush is set. */
377 * can copy out of the object's backing pages while holding the struct mutex
378 * and not take page faults.
379 */
380static int 297static int
381i915_gem_shmem_pread_slow(struct drm_device *dev, 298shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
382 struct drm_i915_gem_object *obj, 299 char __user *user_data,
383 struct drm_i915_gem_pread *args, 300 bool page_do_bit17_swizzling, bool needs_clflush)
384 struct drm_file *file) 301{
302 char *vaddr;
303 int ret;
304
305 if (unlikely(page_do_bit17_swizzling))
306 return -EINVAL;
307
308 vaddr = kmap_atomic(page);
309 if (needs_clflush)
310 drm_clflush_virt_range(vaddr + shmem_page_offset,
311 page_length);
312 ret = __copy_to_user_inatomic(user_data,
313 vaddr + shmem_page_offset,
314 page_length);
315 kunmap_atomic(vaddr);
316
317 return ret;
318}
319
320static void
321shmem_clflush_swizzled_range(char *addr, unsigned long length,
322 bool swizzled)
323{
324 if (unlikely(swizzled)) {
325 unsigned long start = (unsigned long) addr;
326 unsigned long end = (unsigned long) addr + length;
327
328 /* For swizzling simply ensure that we always flush both
329 * channels. Lame, but simple and it works. Swizzled
330 * pwrite/pread is far from a hotpath - current userspace
331 * doesn't use it at all. */
332 start = round_down(start, 128);
333 end = round_up(end, 128);
334
335 drm_clflush_virt_range((void *)start, end - start);
336 } else {
337 drm_clflush_virt_range(addr, length);
338 }
339
340}
341
342/* Only difference to the fast-path function is that this can handle bit17
343 * and uses non-atomic copy and kmap functions. */
344static int
345shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
346 char __user *user_data,
347 bool page_do_bit17_swizzling, bool needs_clflush)
348{
349 char *vaddr;
350 int ret;
351
352 vaddr = kmap(page);
353 if (needs_clflush)
354 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
355 page_length,
356 page_do_bit17_swizzling);
357
358 if (page_do_bit17_swizzling)
359 ret = __copy_to_user_swizzled(user_data,
360 vaddr, shmem_page_offset,
361 page_length);
362 else
363 ret = __copy_to_user(user_data,
364 vaddr + shmem_page_offset,
365 page_length);
366 kunmap(page);
367
368 return ret;
369}
370
371static int
372i915_gem_shmem_pread(struct drm_device *dev,
373 struct drm_i915_gem_object *obj,
374 struct drm_i915_gem_pread *args,
375 struct drm_file *file)
385{ 376{
386 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; 377 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
387 char __user *user_data; 378 char __user *user_data;
388 ssize_t remain; 379 ssize_t remain;
389 loff_t offset; 380 loff_t offset;
390 int shmem_page_offset, page_length, ret; 381 int shmem_page_offset, page_length, ret = 0;
391 int obj_do_bit17_swizzling, page_do_bit17_swizzling; 382 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
383 int hit_slowpath = 0;
384 int prefaulted = 0;
385 int needs_clflush = 0;
386 int release_page;
392 387
393 user_data = (char __user *) (uintptr_t) args->data_ptr; 388 user_data = (char __user *) (uintptr_t) args->data_ptr;
394 remain = args->size; 389 remain = args->size;
395 390
396 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); 391 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
397 392
398 offset = args->offset; 393 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
394 /* If we're not in the cpu read domain, set ourself into the gtt
395 * read domain and manually flush cachelines (if required). This
396 * optimizes for the case when the gpu will dirty the data
397 * anyway again before the next pread happens. */
398 if (obj->cache_level == I915_CACHE_NONE)
399 needs_clflush = 1;
400 ret = i915_gem_object_set_to_gtt_domain(obj, false);
401 if (ret)
402 return ret;
403 }
399 404
400 mutex_unlock(&dev->struct_mutex); 405 offset = args->offset;
401 406
402 while (remain > 0) { 407 while (remain > 0) {
403 struct page *page; 408 struct page *page;
404 char *vaddr;
405 409
406 /* Operation in this page 410 /* Operation in this page
407 * 411 *
@@ -413,28 +417,51 @@ i915_gem_shmem_pread_slow(struct drm_device *dev,
413 if ((shmem_page_offset + page_length) > PAGE_SIZE) 417 if ((shmem_page_offset + page_length) > PAGE_SIZE)
414 page_length = PAGE_SIZE - shmem_page_offset; 418 page_length = PAGE_SIZE - shmem_page_offset;
415 419
416 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); 420 if (obj->pages) {
417 if (IS_ERR(page)) { 421 page = obj->pages[offset >> PAGE_SHIFT];
418 ret = PTR_ERR(page); 422 release_page = 0;
419 goto out; 423 } else {
424 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
425 if (IS_ERR(page)) {
426 ret = PTR_ERR(page);
427 goto out;
428 }
429 release_page = 1;
420 } 430 }
421 431
422 page_do_bit17_swizzling = obj_do_bit17_swizzling && 432 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
423 (page_to_phys(page) & (1 << 17)) != 0; 433 (page_to_phys(page) & (1 << 17)) != 0;
424 434
425 vaddr = kmap(page); 435 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
426 if (page_do_bit17_swizzling) 436 user_data, page_do_bit17_swizzling,
427 ret = __copy_to_user_swizzled(user_data, 437 needs_clflush);
428 vaddr, shmem_page_offset, 438 if (ret == 0)
429 page_length); 439 goto next_page;
430 else
431 ret = __copy_to_user(user_data,
432 vaddr + shmem_page_offset,
433 page_length);
434 kunmap(page);
435 440
436 mark_page_accessed(page); 441 hit_slowpath = 1;
442 page_cache_get(page);
443 mutex_unlock(&dev->struct_mutex);
444
445 if (!prefaulted) {
446 ret = fault_in_multipages_writeable(user_data, remain);
447 /* Userspace is tricking us, but we've already clobbered
448 * its pages with the prefault and promised to write the
449 * data up to the first fault. Hence ignore any errors
450 * and just continue. */
451 (void)ret;
452 prefaulted = 1;
453 }
454
455 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
456 user_data, page_do_bit17_swizzling,
457 needs_clflush);
458
459 mutex_lock(&dev->struct_mutex);
437 page_cache_release(page); 460 page_cache_release(page);
461next_page:
462 mark_page_accessed(page);
463 if (release_page)
464 page_cache_release(page);
438 465
439 if (ret) { 466 if (ret) {
440 ret = -EFAULT; 467 ret = -EFAULT;
@@ -447,10 +474,11 @@ i915_gem_shmem_pread_slow(struct drm_device *dev,
447 } 474 }
448 475
449out: 476out:
450 mutex_lock(&dev->struct_mutex); 477 if (hit_slowpath) {
451 /* Fixup: Kill any reinstated backing storage pages */ 478 /* Fixup: Kill any reinstated backing storage pages */
452 if (obj->madv == __I915_MADV_PURGED) 479 if (obj->madv == __I915_MADV_PURGED)
453 i915_gem_object_truncate(obj); 480 i915_gem_object_truncate(obj);
481 }
454 482
455 return ret; 483 return ret;
456} 484}
@@ -476,11 +504,6 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
476 args->size)) 504 args->size))
477 return -EFAULT; 505 return -EFAULT;
478 506
479 ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr,
480 args->size);
481 if (ret)
482 return -EFAULT;
483
484 ret = i915_mutex_lock_interruptible(dev); 507 ret = i915_mutex_lock_interruptible(dev);
485 if (ret) 508 if (ret)
486 return ret; 509 return ret;
@@ -500,17 +523,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
500 523
501 trace_i915_gem_object_pread(obj, args->offset, args->size); 524 trace_i915_gem_object_pread(obj, args->offset, args->size);
502 525
503 ret = i915_gem_object_set_cpu_read_domain_range(obj, 526 ret = i915_gem_shmem_pread(dev, obj, args, file);
504 args->offset,
505 args->size);
506 if (ret)
507 goto out;
508
509 ret = -EFAULT;
510 if (!i915_gem_object_needs_bit17_swizzle(obj))
511 ret = i915_gem_shmem_pread_fast(dev, obj, args, file);
512 if (ret == -EFAULT)
513 ret = i915_gem_shmem_pread_slow(dev, obj, args, file);
514 527
515out: 528out:
516 drm_gem_object_unreference(&obj->base); 529 drm_gem_object_unreference(&obj->base);
@@ -539,30 +552,6 @@ fast_user_write(struct io_mapping *mapping,
539 return unwritten; 552 return unwritten;
540} 553}
541 554
542/* Here's the write path which can sleep for
543 * page faults
544 */
545
546static inline void
547slow_kernel_write(struct io_mapping *mapping,
548 loff_t gtt_base, int gtt_offset,
549 struct page *user_page, int user_offset,
550 int length)
551{
552 char __iomem *dst_vaddr;
553 char *src_vaddr;
554
555 dst_vaddr = io_mapping_map_wc(mapping, gtt_base);
556 src_vaddr = kmap(user_page);
557
558 memcpy_toio(dst_vaddr + gtt_offset,
559 src_vaddr + user_offset,
560 length);
561
562 kunmap(user_page);
563 io_mapping_unmap(dst_vaddr);
564}
565
566/** 555/**
567 * This is the fast pwrite path, where we copy the data directly from the 556 * This is the fast pwrite path, where we copy the data directly from the
568 * user into the GTT, uncached. 557 * user into the GTT, uncached.
@@ -577,7 +566,19 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
577 ssize_t remain; 566 ssize_t remain;
578 loff_t offset, page_base; 567 loff_t offset, page_base;
579 char __user *user_data; 568 char __user *user_data;
580 int page_offset, page_length; 569 int page_offset, page_length, ret;
570
571 ret = i915_gem_object_pin(obj, 0, true);
572 if (ret)
573 goto out;
574
575 ret = i915_gem_object_set_to_gtt_domain(obj, true);
576 if (ret)
577 goto out_unpin;
578
579 ret = i915_gem_object_put_fence(obj);
580 if (ret)
581 goto out_unpin;
581 582
582 user_data = (char __user *) (uintptr_t) args->data_ptr; 583 user_data = (char __user *) (uintptr_t) args->data_ptr;
583 remain = args->size; 584 remain = args->size;
@@ -602,214 +603,133 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
602 * retry in the slow path. 603 * retry in the slow path.
603 */ 604 */
604 if (fast_user_write(dev_priv->mm.gtt_mapping, page_base, 605 if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
605 page_offset, user_data, page_length)) 606 page_offset, user_data, page_length)) {
606 return -EFAULT; 607 ret = -EFAULT;
608 goto out_unpin;
609 }
607 610
608 remain -= page_length; 611 remain -= page_length;
609 user_data += page_length; 612 user_data += page_length;
610 offset += page_length; 613 offset += page_length;
611 } 614 }
612 615
613 return 0; 616out_unpin:
617 i915_gem_object_unpin(obj);
618out:
619 return ret;
614} 620}
615 621
616/** 622/* Per-page copy function for the shmem pwrite fastpath.
617 * This is the fallback GTT pwrite path, which uses get_user_pages to pin 623 * Flushes invalid cachelines before writing to the target if
618 * the memory and maps it using kmap_atomic for copying. 624 * needs_clflush_before is set and flushes out any written cachelines after
619 * 625 * writing if needs_clflush is set. */
620 * This code resulted in x11perf -rgb10text consuming about 10% more CPU
621 * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
622 */
623static int 626static int
624i915_gem_gtt_pwrite_slow(struct drm_device *dev, 627shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
625 struct drm_i915_gem_object *obj, 628 char __user *user_data,
626 struct drm_i915_gem_pwrite *args, 629 bool page_do_bit17_swizzling,
627 struct drm_file *file) 630 bool needs_clflush_before,
631 bool needs_clflush_after)
628{ 632{
629 drm_i915_private_t *dev_priv = dev->dev_private; 633 char *vaddr;
630 ssize_t remain;
631 loff_t gtt_page_base, offset;
632 loff_t first_data_page, last_data_page, num_pages;
633 loff_t pinned_pages, i;
634 struct page **user_pages;
635 struct mm_struct *mm = current->mm;
636 int gtt_page_offset, data_page_offset, data_page_index, page_length;
637 int ret; 634 int ret;
638 uint64_t data_ptr = args->data_ptr;
639
640 remain = args->size;
641
642 /* Pin the user pages containing the data. We can't fault while
643 * holding the struct mutex, and all of the pwrite implementations
644 * want to hold it while dereferencing the user data.
645 */
646 first_data_page = data_ptr / PAGE_SIZE;
647 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
648 num_pages = last_data_page - first_data_page + 1;
649
650 user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
651 if (user_pages == NULL)
652 return -ENOMEM;
653
654 mutex_unlock(&dev->struct_mutex);
655 down_read(&mm->mmap_sem);
656 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
657 num_pages, 0, 0, user_pages, NULL);
658 up_read(&mm->mmap_sem);
659 mutex_lock(&dev->struct_mutex);
660 if (pinned_pages < num_pages) {
661 ret = -EFAULT;
662 goto out_unpin_pages;
663 }
664
665 ret = i915_gem_object_set_to_gtt_domain(obj, true);
666 if (ret)
667 goto out_unpin_pages;
668
669 ret = i915_gem_object_put_fence(obj);
670 if (ret)
671 goto out_unpin_pages;
672
673 offset = obj->gtt_offset + args->offset;
674 635
675 while (remain > 0) { 636 if (unlikely(page_do_bit17_swizzling))
676 /* Operation in this page 637 return -EINVAL;
677 *
678 * gtt_page_base = page offset within aperture
679 * gtt_page_offset = offset within page in aperture
680 * data_page_index = page number in get_user_pages return
681 * data_page_offset = offset with data_page_index page.
682 * page_length = bytes to copy for this page
683 */
684 gtt_page_base = offset & PAGE_MASK;
685 gtt_page_offset = offset_in_page(offset);
686 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
687 data_page_offset = offset_in_page(data_ptr);
688
689 page_length = remain;
690 if ((gtt_page_offset + page_length) > PAGE_SIZE)
691 page_length = PAGE_SIZE - gtt_page_offset;
692 if ((data_page_offset + page_length) > PAGE_SIZE)
693 page_length = PAGE_SIZE - data_page_offset;
694
695 slow_kernel_write(dev_priv->mm.gtt_mapping,
696 gtt_page_base, gtt_page_offset,
697 user_pages[data_page_index],
698 data_page_offset,
699 page_length);
700
701 remain -= page_length;
702 offset += page_length;
703 data_ptr += page_length;
704 }
705 638
706out_unpin_pages: 639 vaddr = kmap_atomic(page);
707 for (i = 0; i < pinned_pages; i++) 640 if (needs_clflush_before)
708 page_cache_release(user_pages[i]); 641 drm_clflush_virt_range(vaddr + shmem_page_offset,
709 drm_free_large(user_pages); 642 page_length);
643 ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset,
644 user_data,
645 page_length);
646 if (needs_clflush_after)
647 drm_clflush_virt_range(vaddr + shmem_page_offset,
648 page_length);
649 kunmap_atomic(vaddr);
710 650
711 return ret; 651 return ret;
712} 652}
713 653
714/** 654/* Only difference to the fast-path function is that this can handle bit17
715 * This is the fast shmem pwrite path, which attempts to directly 655 * and uses non-atomic copy and kmap functions. */
716 * copy_from_user into the kmapped pages backing the object.
717 */
718static int 656static int
719i915_gem_shmem_pwrite_fast(struct drm_device *dev, 657shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
720 struct drm_i915_gem_object *obj, 658 char __user *user_data,
721 struct drm_i915_gem_pwrite *args, 659 bool page_do_bit17_swizzling,
722 struct drm_file *file) 660 bool needs_clflush_before,
661 bool needs_clflush_after)
723{ 662{
724 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; 663 char *vaddr;
725 ssize_t remain; 664 int ret;
726 loff_t offset;
727 char __user *user_data;
728 int page_offset, page_length;
729
730 user_data = (char __user *) (uintptr_t) args->data_ptr;
731 remain = args->size;
732
733 offset = args->offset;
734 obj->dirty = 1;
735
736 while (remain > 0) {
737 struct page *page;
738 char *vaddr;
739 int ret;
740
741 /* Operation in this page
742 *
743 * page_offset = offset within page
744 * page_length = bytes to copy for this page
745 */
746 page_offset = offset_in_page(offset);
747 page_length = remain;
748 if ((page_offset + remain) > PAGE_SIZE)
749 page_length = PAGE_SIZE - page_offset;
750
751 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
752 if (IS_ERR(page))
753 return PTR_ERR(page);
754 665
755 vaddr = kmap_atomic(page); 666 vaddr = kmap(page);
756 ret = __copy_from_user_inatomic(vaddr + page_offset, 667 if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
668 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
669 page_length,
670 page_do_bit17_swizzling);
671 if (page_do_bit17_swizzling)
672 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
757 user_data, 673 user_data,
758 page_length); 674 page_length);
759 kunmap_atomic(vaddr); 675 else
760 676 ret = __copy_from_user(vaddr + shmem_page_offset,
761 set_page_dirty(page); 677 user_data,
762 mark_page_accessed(page); 678 page_length);
763 page_cache_release(page); 679 if (needs_clflush_after)
764 680 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
765 /* If we get a fault while copying data, then (presumably) our 681 page_length,
766 * source page isn't available. Return the error and we'll 682 page_do_bit17_swizzling);
767 * retry in the slow path. 683 kunmap(page);
768 */
769 if (ret)
770 return -EFAULT;
771
772 remain -= page_length;
773 user_data += page_length;
774 offset += page_length;
775 }
776 684
777 return 0; 685 return ret;
778} 686}
779 687
780/**
781 * This is the fallback shmem pwrite path, which uses get_user_pages to pin
782 * the memory and maps it using kmap_atomic for copying.
783 *
784 * This avoids taking mmap_sem for faulting on the user's address while the
785 * struct_mutex is held.
786 */
787static int 688static int
788i915_gem_shmem_pwrite_slow(struct drm_device *dev, 689i915_gem_shmem_pwrite(struct drm_device *dev,
789 struct drm_i915_gem_object *obj, 690 struct drm_i915_gem_object *obj,
790 struct drm_i915_gem_pwrite *args, 691 struct drm_i915_gem_pwrite *args,
791 struct drm_file *file) 692 struct drm_file *file)
792{ 693{
793 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; 694 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
794 ssize_t remain; 695 ssize_t remain;
795 loff_t offset; 696 loff_t offset;
796 char __user *user_data; 697 char __user *user_data;
797 int shmem_page_offset, page_length, ret; 698 int shmem_page_offset, page_length, ret = 0;
798 int obj_do_bit17_swizzling, page_do_bit17_swizzling; 699 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
700 int hit_slowpath = 0;
701 int needs_clflush_after = 0;
702 int needs_clflush_before = 0;
703 int release_page;
799 704
800 user_data = (char __user *) (uintptr_t) args->data_ptr; 705 user_data = (char __user *) (uintptr_t) args->data_ptr;
801 remain = args->size; 706 remain = args->size;
802 707
803 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); 708 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
804 709
710 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
711 /* If we're not in the cpu write domain, set ourself into the gtt
712 * write domain and manually flush cachelines (if required). This
713 * optimizes for the case when the gpu will use the data
714 * right away and we therefore have to clflush anyway. */
715 if (obj->cache_level == I915_CACHE_NONE)
716 needs_clflush_after = 1;
717 ret = i915_gem_object_set_to_gtt_domain(obj, true);
718 if (ret)
719 return ret;
720 }
721 /* Same trick applies for invalidate partially written cachelines before
722 * writing. */
723 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)
724 && obj->cache_level == I915_CACHE_NONE)
725 needs_clflush_before = 1;
726
805 offset = args->offset; 727 offset = args->offset;
806 obj->dirty = 1; 728 obj->dirty = 1;
807 729
808 mutex_unlock(&dev->struct_mutex);
809
810 while (remain > 0) { 730 while (remain > 0) {
811 struct page *page; 731 struct page *page;
812 char *vaddr; 732 int partial_cacheline_write;
813 733
814 /* Operation in this page 734 /* Operation in this page
815 * 735 *
@@ -822,29 +742,51 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev,
822 if ((shmem_page_offset + page_length) > PAGE_SIZE) 742 if ((shmem_page_offset + page_length) > PAGE_SIZE)
823 page_length = PAGE_SIZE - shmem_page_offset; 743 page_length = PAGE_SIZE - shmem_page_offset;
824 744
825 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); 745 /* If we don't overwrite a cacheline completely we need to be
826 if (IS_ERR(page)) { 746 * careful to have up-to-date data by first clflushing. Don't
827 ret = PTR_ERR(page); 747 * overcomplicate things and flush the entire patch. */
828 goto out; 748 partial_cacheline_write = needs_clflush_before &&
749 ((shmem_page_offset | page_length)
750 & (boot_cpu_data.x86_clflush_size - 1));
751
752 if (obj->pages) {
753 page = obj->pages[offset >> PAGE_SHIFT];
754 release_page = 0;
755 } else {
756 page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
757 if (IS_ERR(page)) {
758 ret = PTR_ERR(page);
759 goto out;
760 }
761 release_page = 1;
829 } 762 }
830 763
831 page_do_bit17_swizzling = obj_do_bit17_swizzling && 764 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
832 (page_to_phys(page) & (1 << 17)) != 0; 765 (page_to_phys(page) & (1 << 17)) != 0;
833 766
834 vaddr = kmap(page); 767 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
835 if (page_do_bit17_swizzling) 768 user_data, page_do_bit17_swizzling,
836 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset, 769 partial_cacheline_write,
837 user_data, 770 needs_clflush_after);
838 page_length); 771 if (ret == 0)
839 else 772 goto next_page;
840 ret = __copy_from_user(vaddr + shmem_page_offset,
841 user_data,
842 page_length);
843 kunmap(page);
844 773
774 hit_slowpath = 1;
775 page_cache_get(page);
776 mutex_unlock(&dev->struct_mutex);
777
778 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
779 user_data, page_do_bit17_swizzling,
780 partial_cacheline_write,
781 needs_clflush_after);
782
783 mutex_lock(&dev->struct_mutex);
784 page_cache_release(page);
785next_page:
845 set_page_dirty(page); 786 set_page_dirty(page);
846 mark_page_accessed(page); 787 mark_page_accessed(page);
847 page_cache_release(page); 788 if (release_page)
789 page_cache_release(page);
848 790
849 if (ret) { 791 if (ret) {
850 ret = -EFAULT; 792 ret = -EFAULT;
@@ -857,17 +799,21 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev,
857 } 799 }
858 800
859out: 801out:
860 mutex_lock(&dev->struct_mutex); 802 if (hit_slowpath) {
861 /* Fixup: Kill any reinstated backing storage pages */ 803 /* Fixup: Kill any reinstated backing storage pages */
862 if (obj->madv == __I915_MADV_PURGED) 804 if (obj->madv == __I915_MADV_PURGED)
863 i915_gem_object_truncate(obj); 805 i915_gem_object_truncate(obj);
864 /* and flush dirty cachelines in case the object isn't in the cpu write 806 /* and flush dirty cachelines in case the object isn't in the cpu write
865 * domain anymore. */ 807 * domain anymore. */
866 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) { 808 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
867 i915_gem_clflush_object(obj); 809 i915_gem_clflush_object(obj);
868 intel_gtt_chipset_flush(); 810 intel_gtt_chipset_flush();
811 }
869 } 812 }
870 813
814 if (needs_clflush_after)
815 intel_gtt_chipset_flush();
816
871 return ret; 817 return ret;
872} 818}
873 819
@@ -892,8 +838,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
892 args->size)) 838 args->size))
893 return -EFAULT; 839 return -EFAULT;
894 840
895 ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr, 841 ret = fault_in_multipages_readable((char __user *)(uintptr_t)args->data_ptr,
896 args->size); 842 args->size);
897 if (ret) 843 if (ret)
898 return -EFAULT; 844 return -EFAULT;
899 845
@@ -916,6 +862,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
916 862
917 trace_i915_gem_object_pwrite(obj, args->offset, args->size); 863 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
918 864
865 ret = -EFAULT;
919 /* We can only do the GTT pwrite on untiled buffers, as otherwise 866 /* We can only do the GTT pwrite on untiled buffers, as otherwise
920 * it would end up going through the fenced access, and we'll get 867 * it would end up going through the fenced access, and we'll get
921 * different detiling behavior between reading and writing. 868 * different detiling behavior between reading and writing.
@@ -928,42 +875,17 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
928 } 875 }
929 876
930 if (obj->gtt_space && 877 if (obj->gtt_space &&
878 obj->cache_level == I915_CACHE_NONE &&
879 obj->map_and_fenceable &&
931 obj->base.write_domain != I915_GEM_DOMAIN_CPU) { 880 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
932 ret = i915_gem_object_pin(obj, 0, true);
933 if (ret)
934 goto out;
935
936 ret = i915_gem_object_set_to_gtt_domain(obj, true);
937 if (ret)
938 goto out_unpin;
939
940 ret = i915_gem_object_put_fence(obj);
941 if (ret)
942 goto out_unpin;
943
944 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file); 881 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
945 if (ret == -EFAULT) 882 /* Note that the gtt paths might fail with non-page-backed user
946 ret = i915_gem_gtt_pwrite_slow(dev, obj, args, file); 883 * pointers (e.g. gtt mappings when moving data between
947 884 * textures). Fallback to the shmem path in that case. */
948out_unpin:
949 i915_gem_object_unpin(obj);
950
951 if (ret != -EFAULT)
952 goto out;
953 /* Fall through to the shmfs paths because the gtt paths might
954 * fail with non-page-backed user pointers (e.g. gtt mappings
955 * when moving data between textures). */
956 } 885 }
957 886
958 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
959 if (ret)
960 goto out;
961
962 ret = -EFAULT;
963 if (!i915_gem_object_needs_bit17_swizzle(obj))
964 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
965 if (ret == -EFAULT) 887 if (ret == -EFAULT)
966 ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file); 888 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
967 889
968out: 890out:
969 drm_gem_object_unreference(&obj->base); 891 drm_gem_object_unreference(&obj->base);
@@ -1153,6 +1075,9 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1153 goto unlock; 1075 goto unlock;
1154 } 1076 }
1155 1077
1078 if (!obj->has_global_gtt_mapping)
1079 i915_gem_gtt_bind_object(obj, obj->cache_level);
1080
1156 if (obj->tiling_mode == I915_TILING_NONE) 1081 if (obj->tiling_mode == I915_TILING_NONE)
1157 ret = i915_gem_object_put_fence(obj); 1082 ret = i915_gem_object_put_fence(obj);
1158 else 1083 else
@@ -1546,6 +1471,9 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1546 inode = obj->base.filp->f_path.dentry->d_inode; 1471 inode = obj->base.filp->f_path.dentry->d_inode;
1547 shmem_truncate_range(inode, 0, (loff_t)-1); 1472 shmem_truncate_range(inode, 0, (loff_t)-1);
1548 1473
1474 if (obj->base.map_list.map)
1475 drm_gem_free_mmap_offset(&obj->base);
1476
1549 obj->madv = __I915_MADV_PURGED; 1477 obj->madv = __I915_MADV_PURGED;
1550} 1478}
1551 1479
@@ -1954,6 +1882,8 @@ i915_wait_request(struct intel_ring_buffer *ring,
1954 if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) { 1882 if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
1955 if (HAS_PCH_SPLIT(ring->dev)) 1883 if (HAS_PCH_SPLIT(ring->dev))
1956 ier = I915_READ(DEIER) | I915_READ(GTIER); 1884 ier = I915_READ(DEIER) | I915_READ(GTIER);
1885 else if (IS_VALLEYVIEW(ring->dev))
1886 ier = I915_READ(GTIER) | I915_READ(VLV_IER);
1957 else 1887 else
1958 ier = I915_READ(IER); 1888 ier = I915_READ(IER);
1959 if (!ier) { 1889 if (!ier) {
@@ -2100,11 +2030,13 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2100 2030
2101 trace_i915_gem_object_unbind(obj); 2031 trace_i915_gem_object_unbind(obj);
2102 2032
2103 i915_gem_gtt_unbind_object(obj); 2033 if (obj->has_global_gtt_mapping)
2034 i915_gem_gtt_unbind_object(obj);
2104 if (obj->has_aliasing_ppgtt_mapping) { 2035 if (obj->has_aliasing_ppgtt_mapping) {
2105 i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj); 2036 i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
2106 obj->has_aliasing_ppgtt_mapping = 0; 2037 obj->has_aliasing_ppgtt_mapping = 0;
2107 } 2038 }
2039 i915_gem_gtt_finish_object(obj);
2108 2040
2109 i915_gem_object_put_pages_gtt(obj); 2041 i915_gem_object_put_pages_gtt(obj);
2110 2042
@@ -2749,7 +2681,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2749 return ret; 2681 return ret;
2750 } 2682 }
2751 2683
2752 ret = i915_gem_gtt_bind_object(obj); 2684 ret = i915_gem_gtt_prepare_object(obj);
2753 if (ret) { 2685 if (ret) {
2754 i915_gem_object_put_pages_gtt(obj); 2686 i915_gem_object_put_pages_gtt(obj);
2755 drm_mm_put_block(obj->gtt_space); 2687 drm_mm_put_block(obj->gtt_space);
@@ -2761,6 +2693,9 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2761 goto search_free; 2693 goto search_free;
2762 } 2694 }
2763 2695
2696 if (!dev_priv->mm.aliasing_ppgtt)
2697 i915_gem_gtt_bind_object(obj, obj->cache_level);
2698
2764 list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list); 2699 list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
2765 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); 2700 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
2766 2701
@@ -2953,7 +2888,8 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
2953 return ret; 2888 return ret;
2954 } 2889 }
2955 2890
2956 i915_gem_gtt_rebind_object(obj, cache_level); 2891 if (obj->has_global_gtt_mapping)
2892 i915_gem_gtt_bind_object(obj, cache_level);
2957 if (obj->has_aliasing_ppgtt_mapping) 2893 if (obj->has_aliasing_ppgtt_mapping)
2958 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt, 2894 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
2959 obj, cache_level); 2895 obj, cache_level);
@@ -3082,7 +3018,7 @@ i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
3082 * This function returns when the move is complete, including waiting on 3018 * This function returns when the move is complete, including waiting on
3083 * flushes to occur. 3019 * flushes to occur.
3084 */ 3020 */
3085static int 3021int
3086i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) 3022i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3087{ 3023{
3088 uint32_t old_write_domain, old_read_domains; 3024 uint32_t old_write_domain, old_read_domains;
@@ -3101,11 +3037,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3101 3037
3102 i915_gem_object_flush_gtt_write_domain(obj); 3038 i915_gem_object_flush_gtt_write_domain(obj);
3103 3039
3104 /* If we have a partially-valid cache of the object in the CPU,
3105 * finish invalidating it and free the per-page flags.
3106 */
3107 i915_gem_object_set_to_full_cpu_read_domain(obj);
3108
3109 old_write_domain = obj->base.write_domain; 3040 old_write_domain = obj->base.write_domain;
3110 old_read_domains = obj->base.read_domains; 3041 old_read_domains = obj->base.read_domains;
3111 3042
@@ -3136,113 +3067,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3136 return 0; 3067 return 0;
3137} 3068}
3138 3069
3139/**
3140 * Moves the object from a partially CPU read to a full one.
3141 *
3142 * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
3143 * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
3144 */
3145static void
3146i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj)
3147{
3148 if (!obj->page_cpu_valid)
3149 return;
3150
3151 /* If we're partially in the CPU read domain, finish moving it in.
3152 */
3153 if (obj->base.read_domains & I915_GEM_DOMAIN_CPU) {
3154 int i;
3155
3156 for (i = 0; i <= (obj->base.size - 1) / PAGE_SIZE; i++) {
3157 if (obj->page_cpu_valid[i])
3158 continue;
3159 drm_clflush_pages(obj->pages + i, 1);
3160 }
3161 }
3162
3163 /* Free the page_cpu_valid mappings which are now stale, whether
3164 * or not we've got I915_GEM_DOMAIN_CPU.
3165 */
3166 kfree(obj->page_cpu_valid);
3167 obj->page_cpu_valid = NULL;
3168}
3169
3170/**
3171 * Set the CPU read domain on a range of the object.
3172 *
3173 * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
3174 * not entirely valid. The page_cpu_valid member of the object flags which
3175 * pages have been flushed, and will be respected by
3176 * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
3177 * of the whole object.
3178 *
3179 * This function returns when the move is complete, including waiting on
3180 * flushes to occur.
3181 */
3182static int
3183i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
3184 uint64_t offset, uint64_t size)
3185{
3186 uint32_t old_read_domains;
3187 int i, ret;
3188
3189 if (offset == 0 && size == obj->base.size)
3190 return i915_gem_object_set_to_cpu_domain(obj, 0);
3191
3192 ret = i915_gem_object_flush_gpu_write_domain(obj);
3193 if (ret)
3194 return ret;
3195
3196 ret = i915_gem_object_wait_rendering(obj);
3197 if (ret)
3198 return ret;
3199
3200 i915_gem_object_flush_gtt_write_domain(obj);
3201
3202 /* If we're already fully in the CPU read domain, we're done. */
3203 if (obj->page_cpu_valid == NULL &&
3204 (obj->base.read_domains & I915_GEM_DOMAIN_CPU) != 0)
3205 return 0;
3206
3207 /* Otherwise, create/clear the per-page CPU read domain flag if we're
3208 * newly adding I915_GEM_DOMAIN_CPU
3209 */
3210 if (obj->page_cpu_valid == NULL) {
3211 obj->page_cpu_valid = kzalloc(obj->base.size / PAGE_SIZE,
3212 GFP_KERNEL);
3213 if (obj->page_cpu_valid == NULL)
3214 return -ENOMEM;
3215 } else if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
3216 memset(obj->page_cpu_valid, 0, obj->base.size / PAGE_SIZE);
3217
3218 /* Flush the cache on any pages that are still invalid from the CPU's
3219 * perspective.
3220 */
3221 for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
3222 i++) {
3223 if (obj->page_cpu_valid[i])
3224 continue;
3225
3226 drm_clflush_pages(obj->pages + i, 1);
3227
3228 obj->page_cpu_valid[i] = 1;
3229 }
3230
3231 /* It should now be out of any other write domains, and we can update
3232 * the domain values for our changes.
3233 */
3234 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3235
3236 old_read_domains = obj->base.read_domains;
3237 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3238
3239 trace_i915_gem_object_change_domain(obj,
3240 old_read_domains,
3241 obj->base.write_domain);
3242
3243 return 0;
3244}
3245
3246/* Throttle our rendering by waiting until the ring has completed our requests 3070/* Throttle our rendering by waiting until the ring has completed our requests
3247 * emitted over 20 msec ago. 3071 * emitted over 20 msec ago.
3248 * 3072 *
@@ -3343,6 +3167,9 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
3343 return ret; 3167 return ret;
3344 } 3168 }
3345 3169
3170 if (!obj->has_global_gtt_mapping && map_and_fenceable)
3171 i915_gem_gtt_bind_object(obj, obj->cache_level);
3172
3346 if (obj->pin_count++ == 0) { 3173 if (obj->pin_count++ == 0) {
3347 if (!obj->active) 3174 if (!obj->active)
3348 list_move_tail(&obj->mm_list, 3175 list_move_tail(&obj->mm_list,
@@ -3664,7 +3491,6 @@ static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
3664 drm_gem_object_release(&obj->base); 3491 drm_gem_object_release(&obj->base);
3665 i915_gem_info_remove_obj(dev_priv, obj->base.size); 3492 i915_gem_info_remove_obj(dev_priv, obj->base.size);
3666 3493
3667 kfree(obj->page_cpu_valid);
3668 kfree(obj->bit_17); 3494 kfree(obj->bit_17);
3669 kfree(obj); 3495 kfree(obj);
3670} 3496}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index f51a696486cb..254e2f6ac4f0 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -266,6 +266,12 @@ eb_destroy(struct eb_objects *eb)
266 kfree(eb); 266 kfree(eb);
267} 267}
268 268
269static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
270{
271 return (obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
272 obj->cache_level != I915_CACHE_NONE);
273}
274
269static int 275static int
270i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, 276i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
271 struct eb_objects *eb, 277 struct eb_objects *eb,
@@ -273,6 +279,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
273{ 279{
274 struct drm_device *dev = obj->base.dev; 280 struct drm_device *dev = obj->base.dev;
275 struct drm_gem_object *target_obj; 281 struct drm_gem_object *target_obj;
282 struct drm_i915_gem_object *target_i915_obj;
276 uint32_t target_offset; 283 uint32_t target_offset;
277 int ret = -EINVAL; 284 int ret = -EINVAL;
278 285
@@ -281,7 +288,8 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
281 if (unlikely(target_obj == NULL)) 288 if (unlikely(target_obj == NULL))
282 return -ENOENT; 289 return -ENOENT;
283 290
284 target_offset = to_intel_bo(target_obj)->gtt_offset; 291 target_i915_obj = to_intel_bo(target_obj);
292 target_offset = target_i915_obj->gtt_offset;
285 293
286 /* The target buffer should have appeared before us in the 294 /* The target buffer should have appeared before us in the
287 * exec_object list, so it should have a GTT space bound by now. 295 * exec_object list, so it should have a GTT space bound by now.
@@ -352,11 +360,19 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
352 return ret; 360 return ret;
353 } 361 }
354 362
363 /* We can't wait for rendering with pagefaults disabled */
364 if (obj->active && in_atomic())
365 return -EFAULT;
366
355 reloc->delta += target_offset; 367 reloc->delta += target_offset;
356 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) { 368 if (use_cpu_reloc(obj)) {
357 uint32_t page_offset = reloc->offset & ~PAGE_MASK; 369 uint32_t page_offset = reloc->offset & ~PAGE_MASK;
358 char *vaddr; 370 char *vaddr;
359 371
372 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
373 if (ret)
374 return ret;
375
360 vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]); 376 vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]);
361 *(uint32_t *)(vaddr + page_offset) = reloc->delta; 377 *(uint32_t *)(vaddr + page_offset) = reloc->delta;
362 kunmap_atomic(vaddr); 378 kunmap_atomic(vaddr);
@@ -365,10 +381,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
365 uint32_t __iomem *reloc_entry; 381 uint32_t __iomem *reloc_entry;
366 void __iomem *reloc_page; 382 void __iomem *reloc_page;
367 383
368 /* We can't wait for rendering with pagefaults disabled */
369 if (obj->active && in_atomic())
370 return -EFAULT;
371
372 ret = i915_gem_object_set_to_gtt_domain(obj, 1); 384 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
373 if (ret) 385 if (ret)
374 return ret; 386 return ret;
@@ -383,6 +395,16 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
383 io_mapping_unmap_atomic(reloc_page); 395 io_mapping_unmap_atomic(reloc_page);
384 } 396 }
385 397
398 /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
399 * pipe_control writes because the gpu doesn't properly redirect them
400 * through the ppgtt for non_secure batchbuffers. */
401 if (unlikely(IS_GEN6(dev) &&
402 reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
403 !target_i915_obj->has_global_gtt_mapping)) {
404 i915_gem_gtt_bind_object(target_i915_obj,
405 target_i915_obj->cache_level);
406 }
407
386 /* and update the user's relocation entry */ 408 /* and update the user's relocation entry */
387 reloc->presumed_offset = target_offset; 409 reloc->presumed_offset = target_offset;
388 410
@@ -393,30 +415,46 @@ static int
393i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj, 415i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
394 struct eb_objects *eb) 416 struct eb_objects *eb)
395{ 417{
418#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
419 struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
396 struct drm_i915_gem_relocation_entry __user *user_relocs; 420 struct drm_i915_gem_relocation_entry __user *user_relocs;
397 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; 421 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
398 int i, ret; 422 int remain, ret;
399 423
400 user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr; 424 user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
401 for (i = 0; i < entry->relocation_count; i++) {
402 struct drm_i915_gem_relocation_entry reloc;
403 425
404 if (__copy_from_user_inatomic(&reloc, 426 remain = entry->relocation_count;
405 user_relocs+i, 427 while (remain) {
406 sizeof(reloc))) 428 struct drm_i915_gem_relocation_entry *r = stack_reloc;
429 int count = remain;
430 if (count > ARRAY_SIZE(stack_reloc))
431 count = ARRAY_SIZE(stack_reloc);
432 remain -= count;
433
434 if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
407 return -EFAULT; 435 return -EFAULT;
408 436
409 ret = i915_gem_execbuffer_relocate_entry(obj, eb, &reloc); 437 do {
410 if (ret) 438 u64 offset = r->presumed_offset;
411 return ret;
412 439
413 if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset, 440 ret = i915_gem_execbuffer_relocate_entry(obj, eb, r);
414 &reloc.presumed_offset, 441 if (ret)
415 sizeof(reloc.presumed_offset))) 442 return ret;
416 return -EFAULT; 443
444 if (r->presumed_offset != offset &&
445 __copy_to_user_inatomic(&user_relocs->presumed_offset,
446 &r->presumed_offset,
447 sizeof(r->presumed_offset))) {
448 return -EFAULT;
449 }
450
451 user_relocs++;
452 r++;
453 } while (--count);
417 } 454 }
418 455
419 return 0; 456 return 0;
457#undef N_RELOC
420} 458}
421 459
422static int 460static int
@@ -465,6 +503,13 @@ i915_gem_execbuffer_relocate(struct drm_device *dev,
465#define __EXEC_OBJECT_HAS_FENCE (1<<31) 503#define __EXEC_OBJECT_HAS_FENCE (1<<31)
466 504
467static int 505static int
506need_reloc_mappable(struct drm_i915_gem_object *obj)
507{
508 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
509 return entry->relocation_count && !use_cpu_reloc(obj);
510}
511
512static int
468pin_and_fence_object(struct drm_i915_gem_object *obj, 513pin_and_fence_object(struct drm_i915_gem_object *obj,
469 struct intel_ring_buffer *ring) 514 struct intel_ring_buffer *ring)
470{ 515{
@@ -477,8 +522,7 @@ pin_and_fence_object(struct drm_i915_gem_object *obj,
477 has_fenced_gpu_access && 522 has_fenced_gpu_access &&
478 entry->flags & EXEC_OBJECT_NEEDS_FENCE && 523 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
479 obj->tiling_mode != I915_TILING_NONE; 524 obj->tiling_mode != I915_TILING_NONE;
480 need_mappable = 525 need_mappable = need_fence || need_reloc_mappable(obj);
481 entry->relocation_count ? true : need_fence;
482 526
483 ret = i915_gem_object_pin(obj, entry->alignment, need_mappable); 527 ret = i915_gem_object_pin(obj, entry->alignment, need_mappable);
484 if (ret) 528 if (ret)
@@ -535,8 +579,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
535 has_fenced_gpu_access && 579 has_fenced_gpu_access &&
536 entry->flags & EXEC_OBJECT_NEEDS_FENCE && 580 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
537 obj->tiling_mode != I915_TILING_NONE; 581 obj->tiling_mode != I915_TILING_NONE;
538 need_mappable = 582 need_mappable = need_fence || need_reloc_mappable(obj);
539 entry->relocation_count ? true : need_fence;
540 583
541 if (need_mappable) 584 if (need_mappable)
542 list_move(&obj->exec_list, &ordered_objects); 585 list_move(&obj->exec_list, &ordered_objects);
@@ -576,8 +619,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
576 has_fenced_gpu_access && 619 has_fenced_gpu_access &&
577 entry->flags & EXEC_OBJECT_NEEDS_FENCE && 620 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
578 obj->tiling_mode != I915_TILING_NONE; 621 obj->tiling_mode != I915_TILING_NONE;
579 need_mappable = 622 need_mappable = need_fence || need_reloc_mappable(obj);
580 entry->relocation_count ? true : need_fence;
581 623
582 if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) || 624 if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) ||
583 (need_mappable && !obj->map_and_fenceable)) 625 (need_mappable && !obj->map_and_fenceable))
@@ -955,7 +997,7 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
955 if (!access_ok(VERIFY_WRITE, ptr, length)) 997 if (!access_ok(VERIFY_WRITE, ptr, length))
956 return -EFAULT; 998 return -EFAULT;
957 999
958 if (fault_in_pages_readable(ptr, length)) 1000 if (fault_in_multipages_readable(ptr, length))
959 return -EFAULT; 1001 return -EFAULT;
960 } 1002 }
961 1003
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index a135c61f4119..4fb875de32e6 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -346,42 +346,28 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
346 346
347 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { 347 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
348 i915_gem_clflush_object(obj); 348 i915_gem_clflush_object(obj);
349 i915_gem_gtt_rebind_object(obj, obj->cache_level); 349 i915_gem_gtt_bind_object(obj, obj->cache_level);
350 } 350 }
351 351
352 intel_gtt_chipset_flush(); 352 intel_gtt_chipset_flush();
353} 353}
354 354
355int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj) 355int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
356{ 356{
357 struct drm_device *dev = obj->base.dev; 357 struct drm_device *dev = obj->base.dev;
358 struct drm_i915_private *dev_priv = dev->dev_private; 358 struct drm_i915_private *dev_priv = dev->dev_private;
359 unsigned int agp_type = cache_level_to_agp_type(dev, obj->cache_level);
360 int ret;
361 359
362 if (dev_priv->mm.gtt->needs_dmar) { 360 if (dev_priv->mm.gtt->needs_dmar)
363 ret = intel_gtt_map_memory(obj->pages, 361 return intel_gtt_map_memory(obj->pages,
364 obj->base.size >> PAGE_SHIFT, 362 obj->base.size >> PAGE_SHIFT,
365 &obj->sg_list, 363 &obj->sg_list,
366 &obj->num_sg); 364 &obj->num_sg);
367 if (ret != 0) 365 else
368 return ret; 366 return 0;
369
370 intel_gtt_insert_sg_entries(obj->sg_list,
371 obj->num_sg,
372 obj->gtt_space->start >> PAGE_SHIFT,
373 agp_type);
374 } else
375 intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
376 obj->base.size >> PAGE_SHIFT,
377 obj->pages,
378 agp_type);
379
380 return 0;
381} 367}
382 368
383void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj, 369void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
384 enum i915_cache_level cache_level) 370 enum i915_cache_level cache_level)
385{ 371{
386 struct drm_device *dev = obj->base.dev; 372 struct drm_device *dev = obj->base.dev;
387 struct drm_i915_private *dev_priv = dev->dev_private; 373 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -399,19 +385,26 @@ void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
399 obj->base.size >> PAGE_SHIFT, 385 obj->base.size >> PAGE_SHIFT,
400 obj->pages, 386 obj->pages,
401 agp_type); 387 agp_type);
388
389 obj->has_global_gtt_mapping = 1;
402} 390}
403 391
404void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) 392void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
405{ 393{
394 intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
395 obj->base.size >> PAGE_SHIFT);
396
397 obj->has_global_gtt_mapping = 0;
398}
399
400void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
401{
406 struct drm_device *dev = obj->base.dev; 402 struct drm_device *dev = obj->base.dev;
407 struct drm_i915_private *dev_priv = dev->dev_private; 403 struct drm_i915_private *dev_priv = dev->dev_private;
408 bool interruptible; 404 bool interruptible;
409 405
410 interruptible = do_idling(dev_priv); 406 interruptible = do_idling(dev_priv);
411 407
412 intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
413 obj->base.size >> PAGE_SHIFT);
414
415 if (obj->sg_list) { 408 if (obj->sg_list) {
416 intel_gtt_unmap_memory(obj->sg_list, obj->num_sg); 409 intel_gtt_unmap_memory(obj->sg_list, obj->num_sg);
417 obj->sg_list = NULL; 410 obj->sg_list = NULL;
@@ -419,3 +412,23 @@ void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
419 412
420 undo_idling(dev_priv, interruptible); 413 undo_idling(dev_priv, interruptible);
421} 414}
415
416void i915_gem_init_global_gtt(struct drm_device *dev,
417 unsigned long start,
418 unsigned long mappable_end,
419 unsigned long end)
420{
421 drm_i915_private_t *dev_priv = dev->dev_private;
422
423 /* Substract the guard page ... */
424 drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
425
426 dev_priv->mm.gtt_start = start;
427 dev_priv->mm.gtt_mappable_end = mappable_end;
428 dev_priv->mm.gtt_end = end;
429 dev_priv->mm.gtt_total = end - start;
430 dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
431
432 /* ... but ensure that we clear the entire range. */
433 intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
434}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index afd4e03e337e..febddc2952fb 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -26,6 +26,8 @@
26 * 26 *
27 */ 27 */
28 28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
29#include <linux/sysrq.h> 31#include <linux/sysrq.h>
30#include <linux/slab.h> 32#include <linux/slab.h>
31#include "drmP.h" 33#include "drmP.h"
@@ -118,6 +120,10 @@ void intel_enable_asle(struct drm_device *dev)
118 drm_i915_private_t *dev_priv = dev->dev_private; 120 drm_i915_private_t *dev_priv = dev->dev_private;
119 unsigned long irqflags; 121 unsigned long irqflags;
120 122
123 /* FIXME: opregion/asle for VLV */
124 if (IS_VALLEYVIEW(dev))
125 return;
126
121 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 127 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
122 128
123 if (HAS_PCH_SPLIT(dev)) 129 if (HAS_PCH_SPLIT(dev))
@@ -424,6 +430,128 @@ static void gen6_pm_rps_work(struct work_struct *work)
424 mutex_unlock(&dev_priv->dev->struct_mutex); 430 mutex_unlock(&dev_priv->dev->struct_mutex);
425} 431}
426 432
433static void snb_gt_irq_handler(struct drm_device *dev,
434 struct drm_i915_private *dev_priv,
435 u32 gt_iir)
436{
437
438 if (gt_iir & (GEN6_RENDER_USER_INTERRUPT |
439 GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT))
440 notify_ring(dev, &dev_priv->ring[RCS]);
441 if (gt_iir & GEN6_BSD_USER_INTERRUPT)
442 notify_ring(dev, &dev_priv->ring[VCS]);
443 if (gt_iir & GEN6_BLITTER_USER_INTERRUPT)
444 notify_ring(dev, &dev_priv->ring[BCS]);
445
446 if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT |
447 GT_GEN6_BSD_CS_ERROR_INTERRUPT |
448 GT_RENDER_CS_ERROR_INTERRUPT)) {
449 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
450 i915_handle_error(dev, false);
451 }
452}
453
454static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
455{
456 struct drm_device *dev = (struct drm_device *) arg;
457 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
458 u32 iir, gt_iir, pm_iir;
459 irqreturn_t ret = IRQ_NONE;
460 unsigned long irqflags;
461 int pipe;
462 u32 pipe_stats[I915_MAX_PIPES];
463 u32 vblank_status;
464 int vblank = 0;
465 bool blc_event;
466
467 atomic_inc(&dev_priv->irq_received);
468
469 vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS |
470 PIPE_VBLANK_INTERRUPT_STATUS;
471
472 while (true) {
473 iir = I915_READ(VLV_IIR);
474 gt_iir = I915_READ(GTIIR);
475 pm_iir = I915_READ(GEN6_PMIIR);
476
477 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
478 goto out;
479
480 ret = IRQ_HANDLED;
481
482 snb_gt_irq_handler(dev, dev_priv, gt_iir);
483
484 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
485 for_each_pipe(pipe) {
486 int reg = PIPESTAT(pipe);
487 pipe_stats[pipe] = I915_READ(reg);
488
489 /*
490 * Clear the PIPE*STAT regs before the IIR
491 */
492 if (pipe_stats[pipe] & 0x8000ffff) {
493 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
494 DRM_DEBUG_DRIVER("pipe %c underrun\n",
495 pipe_name(pipe));
496 I915_WRITE(reg, pipe_stats[pipe]);
497 }
498 }
499 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
500
501 /* Consume port. Then clear IIR or we'll miss events */
502 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
503 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
504
505 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
506 hotplug_status);
507 if (hotplug_status & dev_priv->hotplug_supported_mask)
508 queue_work(dev_priv->wq,
509 &dev_priv->hotplug_work);
510
511 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
512 I915_READ(PORT_HOTPLUG_STAT);
513 }
514
515
516 if (iir & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT) {
517 drm_handle_vblank(dev, 0);
518 vblank++;
519 if (!dev_priv->flip_pending_is_done) {
520 intel_finish_page_flip(dev, 0);
521 }
522 }
523
524 if (iir & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT) {
525 drm_handle_vblank(dev, 1);
526 vblank++;
527 if (!dev_priv->flip_pending_is_done) {
528 intel_finish_page_flip(dev, 0);
529 }
530 }
531
532 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
533 blc_event = true;
534
535 if (pm_iir & GEN6_PM_DEFERRED_EVENTS) {
536 unsigned long flags;
537 spin_lock_irqsave(&dev_priv->rps_lock, flags);
538 WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
539 dev_priv->pm_iir |= pm_iir;
540 I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
541 POSTING_READ(GEN6_PMIMR);
542 spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
543 queue_work(dev_priv->wq, &dev_priv->rps_work);
544 }
545
546 I915_WRITE(GTIIR, gt_iir);
547 I915_WRITE(GEN6_PMIIR, pm_iir);
548 I915_WRITE(VLV_IIR, iir);
549 }
550
551out:
552 return ret;
553}
554
427static void pch_irq_handler(struct drm_device *dev) 555static void pch_irq_handler(struct drm_device *dev)
428{ 556{
429 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 557 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -499,12 +627,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
499 READ_BREADCRUMB(dev_priv); 627 READ_BREADCRUMB(dev_priv);
500 } 628 }
501 629
502 if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY)) 630 snb_gt_irq_handler(dev, dev_priv, gt_iir);
503 notify_ring(dev, &dev_priv->ring[RCS]);
504 if (gt_iir & GT_GEN6_BSD_USER_INTERRUPT)
505 notify_ring(dev, &dev_priv->ring[VCS]);
506 if (gt_iir & GT_BLT_USER_INTERRUPT)
507 notify_ring(dev, &dev_priv->ring[BCS]);
508 631
509 if (de_iir & DE_GSE_IVB) 632 if (de_iir & DE_GSE_IVB)
510 intel_opregion_gse_intr(dev); 633 intel_opregion_gse_intr(dev);
@@ -556,6 +679,16 @@ done:
556 return ret; 679 return ret;
557} 680}
558 681
682static void ilk_gt_irq_handler(struct drm_device *dev,
683 struct drm_i915_private *dev_priv,
684 u32 gt_iir)
685{
686 if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
687 notify_ring(dev, &dev_priv->ring[RCS]);
688 if (gt_iir & GT_BSD_USER_INTERRUPT)
689 notify_ring(dev, &dev_priv->ring[VCS]);
690}
691
559static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS) 692static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
560{ 693{
561 struct drm_device *dev = (struct drm_device *) arg; 694 struct drm_device *dev = (struct drm_device *) arg;
@@ -564,13 +697,9 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
564 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir; 697 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
565 u32 hotplug_mask; 698 u32 hotplug_mask;
566 struct drm_i915_master_private *master_priv; 699 struct drm_i915_master_private *master_priv;
567 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
568 700
569 atomic_inc(&dev_priv->irq_received); 701 atomic_inc(&dev_priv->irq_received);
570 702
571 if (IS_GEN6(dev))
572 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
573
574 /* disable master interrupt before clearing iir */ 703 /* disable master interrupt before clearing iir */
575 de_ier = I915_READ(DEIER); 704 de_ier = I915_READ(DEIER);
576 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 705 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
@@ -599,12 +728,10 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
599 READ_BREADCRUMB(dev_priv); 728 READ_BREADCRUMB(dev_priv);
600 } 729 }
601 730
602 if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY)) 731 if (IS_GEN5(dev))
603 notify_ring(dev, &dev_priv->ring[RCS]); 732 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
604 if (gt_iir & bsd_usr_interrupt) 733 else
605 notify_ring(dev, &dev_priv->ring[VCS]); 734 snb_gt_irq_handler(dev, dev_priv, gt_iir);
606 if (gt_iir & GT_BLT_USER_INTERRUPT)
607 notify_ring(dev, &dev_priv->ring[BCS]);
608 735
609 if (de_iir & DE_GSE) 736 if (de_iir & DE_GSE)
610 intel_opregion_gse_intr(dev); 737 intel_opregion_gse_intr(dev);
@@ -727,7 +854,8 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
727 goto unwind; 854 goto unwind;
728 855
729 local_irq_save(flags); 856 local_irq_save(flags);
730 if (reloc_offset < dev_priv->mm.gtt_mappable_end) { 857 if (reloc_offset < dev_priv->mm.gtt_mappable_end &&
858 src->has_global_gtt_mapping) {
731 void __iomem *s; 859 void __iomem *s;
732 860
733 /* Simply ignore tiling or any overlapping fence. 861 /* Simply ignore tiling or any overlapping fence.
@@ -901,7 +1029,6 @@ static void i915_record_ring_state(struct drm_device *dev,
901 struct drm_i915_private *dev_priv = dev->dev_private; 1029 struct drm_i915_private *dev_priv = dev->dev_private;
902 1030
903 if (INTEL_INFO(dev)->gen >= 6) { 1031 if (INTEL_INFO(dev)->gen >= 6) {
904 error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
905 error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring)); 1032 error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
906 error->semaphore_mboxes[ring->id][0] 1033 error->semaphore_mboxes[ring->id][0]
907 = I915_READ(RING_SYNC_0(ring->mmio_base)); 1034 = I915_READ(RING_SYNC_0(ring->mmio_base));
@@ -910,6 +1037,7 @@ static void i915_record_ring_state(struct drm_device *dev,
910 } 1037 }
911 1038
912 if (INTEL_INFO(dev)->gen >= 4) { 1039 if (INTEL_INFO(dev)->gen >= 4) {
1040 error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
913 error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base)); 1041 error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
914 error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base)); 1042 error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
915 error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base)); 1043 error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
@@ -919,6 +1047,7 @@ static void i915_record_ring_state(struct drm_device *dev,
919 error->bbaddr = I915_READ64(BB_ADDR); 1047 error->bbaddr = I915_READ64(BB_ADDR);
920 } 1048 }
921 } else { 1049 } else {
1050 error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
922 error->ipeir[ring->id] = I915_READ(IPEIR); 1051 error->ipeir[ring->id] = I915_READ(IPEIR);
923 error->ipehr[ring->id] = I915_READ(IPEHR); 1052 error->ipehr[ring->id] = I915_READ(IPEHR);
924 error->instdone[ring->id] = I915_READ(INSTDONE); 1053 error->instdone[ring->id] = I915_READ(INSTDONE);
@@ -1103,33 +1232,26 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
1103 if (!eir) 1232 if (!eir)
1104 return; 1233 return;
1105 1234
1106 printk(KERN_ERR "render error detected, EIR: 0x%08x\n", 1235 pr_err("render error detected, EIR: 0x%08x\n", eir);
1107 eir);
1108 1236
1109 if (IS_G4X(dev)) { 1237 if (IS_G4X(dev)) {
1110 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { 1238 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
1111 u32 ipeir = I915_READ(IPEIR_I965); 1239 u32 ipeir = I915_READ(IPEIR_I965);
1112 1240
1113 printk(KERN_ERR " IPEIR: 0x%08x\n", 1241 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1114 I915_READ(IPEIR_I965)); 1242 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1115 printk(KERN_ERR " IPEHR: 0x%08x\n", 1243 pr_err(" INSTDONE: 0x%08x\n",
1116 I915_READ(IPEHR_I965));
1117 printk(KERN_ERR " INSTDONE: 0x%08x\n",
1118 I915_READ(INSTDONE_I965)); 1244 I915_READ(INSTDONE_I965));
1119 printk(KERN_ERR " INSTPS: 0x%08x\n", 1245 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
1120 I915_READ(INSTPS)); 1246 pr_err(" INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1));
1121 printk(KERN_ERR " INSTDONE1: 0x%08x\n", 1247 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1122 I915_READ(INSTDONE1));
1123 printk(KERN_ERR " ACTHD: 0x%08x\n",
1124 I915_READ(ACTHD_I965));
1125 I915_WRITE(IPEIR_I965, ipeir); 1248 I915_WRITE(IPEIR_I965, ipeir);
1126 POSTING_READ(IPEIR_I965); 1249 POSTING_READ(IPEIR_I965);
1127 } 1250 }
1128 if (eir & GM45_ERROR_PAGE_TABLE) { 1251 if (eir & GM45_ERROR_PAGE_TABLE) {
1129 u32 pgtbl_err = I915_READ(PGTBL_ER); 1252 u32 pgtbl_err = I915_READ(PGTBL_ER);
1130 printk(KERN_ERR "page table error\n"); 1253 pr_err("page table error\n");
1131 printk(KERN_ERR " PGTBL_ER: 0x%08x\n", 1254 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
1132 pgtbl_err);
1133 I915_WRITE(PGTBL_ER, pgtbl_err); 1255 I915_WRITE(PGTBL_ER, pgtbl_err);
1134 POSTING_READ(PGTBL_ER); 1256 POSTING_READ(PGTBL_ER);
1135 } 1257 }
@@ -1138,53 +1260,42 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
1138 if (!IS_GEN2(dev)) { 1260 if (!IS_GEN2(dev)) {
1139 if (eir & I915_ERROR_PAGE_TABLE) { 1261 if (eir & I915_ERROR_PAGE_TABLE) {
1140 u32 pgtbl_err = I915_READ(PGTBL_ER); 1262 u32 pgtbl_err = I915_READ(PGTBL_ER);
1141 printk(KERN_ERR "page table error\n"); 1263 pr_err("page table error\n");
1142 printk(KERN_ERR " PGTBL_ER: 0x%08x\n", 1264 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
1143 pgtbl_err);
1144 I915_WRITE(PGTBL_ER, pgtbl_err); 1265 I915_WRITE(PGTBL_ER, pgtbl_err);
1145 POSTING_READ(PGTBL_ER); 1266 POSTING_READ(PGTBL_ER);
1146 } 1267 }
1147 } 1268 }
1148 1269
1149 if (eir & I915_ERROR_MEMORY_REFRESH) { 1270 if (eir & I915_ERROR_MEMORY_REFRESH) {
1150 printk(KERN_ERR "memory refresh error:\n"); 1271 pr_err("memory refresh error:\n");
1151 for_each_pipe(pipe) 1272 for_each_pipe(pipe)
1152 printk(KERN_ERR "pipe %c stat: 0x%08x\n", 1273 pr_err("pipe %c stat: 0x%08x\n",
1153 pipe_name(pipe), I915_READ(PIPESTAT(pipe))); 1274 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
1154 /* pipestat has already been acked */ 1275 /* pipestat has already been acked */
1155 } 1276 }
1156 if (eir & I915_ERROR_INSTRUCTION) { 1277 if (eir & I915_ERROR_INSTRUCTION) {
1157 printk(KERN_ERR "instruction error\n"); 1278 pr_err("instruction error\n");
1158 printk(KERN_ERR " INSTPM: 0x%08x\n", 1279 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
1159 I915_READ(INSTPM));
1160 if (INTEL_INFO(dev)->gen < 4) { 1280 if (INTEL_INFO(dev)->gen < 4) {
1161 u32 ipeir = I915_READ(IPEIR); 1281 u32 ipeir = I915_READ(IPEIR);
1162 1282
1163 printk(KERN_ERR " IPEIR: 0x%08x\n", 1283 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
1164 I915_READ(IPEIR)); 1284 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
1165 printk(KERN_ERR " IPEHR: 0x%08x\n", 1285 pr_err(" INSTDONE: 0x%08x\n", I915_READ(INSTDONE));
1166 I915_READ(IPEHR)); 1286 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
1167 printk(KERN_ERR " INSTDONE: 0x%08x\n",
1168 I915_READ(INSTDONE));
1169 printk(KERN_ERR " ACTHD: 0x%08x\n",
1170 I915_READ(ACTHD));
1171 I915_WRITE(IPEIR, ipeir); 1287 I915_WRITE(IPEIR, ipeir);
1172 POSTING_READ(IPEIR); 1288 POSTING_READ(IPEIR);
1173 } else { 1289 } else {
1174 u32 ipeir = I915_READ(IPEIR_I965); 1290 u32 ipeir = I915_READ(IPEIR_I965);
1175 1291
1176 printk(KERN_ERR " IPEIR: 0x%08x\n", 1292 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1177 I915_READ(IPEIR_I965)); 1293 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1178 printk(KERN_ERR " IPEHR: 0x%08x\n", 1294 pr_err(" INSTDONE: 0x%08x\n",
1179 I915_READ(IPEHR_I965));
1180 printk(KERN_ERR " INSTDONE: 0x%08x\n",
1181 I915_READ(INSTDONE_I965)); 1295 I915_READ(INSTDONE_I965));
1182 printk(KERN_ERR " INSTPS: 0x%08x\n", 1296 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
1183 I915_READ(INSTPS)); 1297 pr_err(" INSTDONE1: 0x%08x\n", I915_READ(INSTDONE1));
1184 printk(KERN_ERR " INSTDONE1: 0x%08x\n", 1298 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1185 I915_READ(INSTDONE1));
1186 printk(KERN_ERR " ACTHD: 0x%08x\n",
1187 I915_READ(ACTHD_I965));
1188 I915_WRITE(IPEIR_I965, ipeir); 1299 I915_WRITE(IPEIR_I965, ipeir);
1189 POSTING_READ(IPEIR_I965); 1300 POSTING_READ(IPEIR_I965);
1190 } 1301 }
@@ -1582,6 +1693,32 @@ static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
1582 return 0; 1693 return 0;
1583} 1694}
1584 1695
1696static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
1697{
1698 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1699 unsigned long irqflags;
1700 u32 dpfl, imr;
1701
1702 if (!i915_pipe_enabled(dev, pipe))
1703 return -EINVAL;
1704
1705 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1706 dpfl = I915_READ(VLV_DPFLIPSTAT);
1707 imr = I915_READ(VLV_IMR);
1708 if (pipe == 0) {
1709 dpfl |= PIPEA_VBLANK_INT_EN;
1710 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1711 } else {
1712 dpfl |= PIPEA_VBLANK_INT_EN;
1713 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1714 }
1715 I915_WRITE(VLV_DPFLIPSTAT, dpfl);
1716 I915_WRITE(VLV_IMR, imr);
1717 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1718
1719 return 0;
1720}
1721
1585/* Called from drm generic code, passed 'crtc' which 1722/* Called from drm generic code, passed 'crtc' which
1586 * we use as a pipe index 1723 * we use as a pipe index
1587 */ 1724 */
@@ -1623,6 +1760,28 @@ static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
1623 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1760 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1624} 1761}
1625 1762
1763static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
1764{
1765 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1766 unsigned long irqflags;
1767 u32 dpfl, imr;
1768
1769 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1770 dpfl = I915_READ(VLV_DPFLIPSTAT);
1771 imr = I915_READ(VLV_IMR);
1772 if (pipe == 0) {
1773 dpfl &= ~PIPEA_VBLANK_INT_EN;
1774 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1775 } else {
1776 dpfl &= ~PIPEB_VBLANK_INT_EN;
1777 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1778 }
1779 I915_WRITE(VLV_IMR, imr);
1780 I915_WRITE(VLV_DPFLIPSTAT, dpfl);
1781 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1782}
1783
1784
1626/* Set the vblank monitor pipe 1785/* Set the vblank monitor pipe
1627 */ 1786 */
1628int i915_vblank_pipe_set(struct drm_device *dev, void *data, 1787int i915_vblank_pipe_set(struct drm_device *dev, void *data,
@@ -1832,6 +1991,41 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
1832 POSTING_READ(SDEIER); 1991 POSTING_READ(SDEIER);
1833} 1992}
1834 1993
1994static void valleyview_irq_preinstall(struct drm_device *dev)
1995{
1996 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1997 int pipe;
1998
1999 atomic_set(&dev_priv->irq_received, 0);
2000
2001 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
2002 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
2003
2004 /* VLV magic */
2005 I915_WRITE(VLV_IMR, 0);
2006 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
2007 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
2008 I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
2009
2010 /* and GT */
2011 I915_WRITE(GTIIR, I915_READ(GTIIR));
2012 I915_WRITE(GTIIR, I915_READ(GTIIR));
2013 I915_WRITE(GTIMR, 0xffffffff);
2014 I915_WRITE(GTIER, 0x0);
2015 POSTING_READ(GTIER);
2016
2017 I915_WRITE(DPINVGTT, 0xff);
2018
2019 I915_WRITE(PORT_HOTPLUG_EN, 0);
2020 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2021 for_each_pipe(pipe)
2022 I915_WRITE(PIPESTAT(pipe), 0xffff);
2023 I915_WRITE(VLV_IIR, 0xffffffff);
2024 I915_WRITE(VLV_IMR, 0xffffffff);
2025 I915_WRITE(VLV_IER, 0x0);
2026 POSTING_READ(VLV_IER);
2027}
2028
1835/* 2029/*
1836 * Enable digital hotplug on the PCH, and configure the DP short pulse 2030 * Enable digital hotplug on the PCH, and configure the DP short pulse
1837 * duration to 2ms (which is the minimum in the Display Port spec) 2031 * duration to 2ms (which is the minimum in the Display Port spec)
@@ -1884,8 +2078,8 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
1884 if (IS_GEN6(dev)) 2078 if (IS_GEN6(dev))
1885 render_irqs = 2079 render_irqs =
1886 GT_USER_INTERRUPT | 2080 GT_USER_INTERRUPT |
1887 GT_GEN6_BSD_USER_INTERRUPT | 2081 GEN6_BSD_USER_INTERRUPT |
1888 GT_BLT_USER_INTERRUPT; 2082 GEN6_BLITTER_USER_INTERRUPT;
1889 else 2083 else
1890 render_irqs = 2084 render_irqs =
1891 GT_USER_INTERRUPT | 2085 GT_USER_INTERRUPT |
@@ -1957,8 +2151,8 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
1957 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2151 I915_WRITE(GTIIR, I915_READ(GTIIR));
1958 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 2152 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1959 2153
1960 render_irqs = GT_USER_INTERRUPT | GT_GEN6_BSD_USER_INTERRUPT | 2154 render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
1961 GT_BLT_USER_INTERRUPT; 2155 GEN6_BLITTER_USER_INTERRUPT;
1962 I915_WRITE(GTIER, render_irqs); 2156 I915_WRITE(GTIER, render_irqs);
1963 POSTING_READ(GTIER); 2157 POSTING_READ(GTIER);
1964 2158
@@ -1978,6 +2172,96 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
1978 return 0; 2172 return 0;
1979} 2173}
1980 2174
2175static int valleyview_irq_postinstall(struct drm_device *dev)
2176{
2177 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2178 u32 render_irqs;
2179 u32 enable_mask;
2180 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
2181 u16 msid;
2182
2183 enable_mask = I915_DISPLAY_PORT_INTERRUPT;
2184 enable_mask |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2185 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2186
2187 dev_priv->irq_mask = ~enable_mask;
2188
2189
2190 DRM_INIT_WAITQUEUE(&dev_priv->ring[RCS].irq_queue);
2191 DRM_INIT_WAITQUEUE(&dev_priv->ring[VCS].irq_queue);
2192 DRM_INIT_WAITQUEUE(&dev_priv->ring[BCS].irq_queue);
2193
2194 dev_priv->pipestat[0] = 0;
2195 dev_priv->pipestat[1] = 0;
2196
2197 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
2198
2199 /* Hack for broken MSIs on VLV */
2200 pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000);
2201 pci_read_config_word(dev->pdev, 0x98, &msid);
2202 msid &= 0xff; /* mask out delivery bits */
2203 msid |= (1<<14);
2204 pci_write_config_word(dev_priv->dev->pdev, 0x98, msid);
2205
2206 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
2207 I915_WRITE(VLV_IER, enable_mask);
2208 I915_WRITE(VLV_IIR, 0xffffffff);
2209 I915_WRITE(PIPESTAT(0), 0xffff);
2210 I915_WRITE(PIPESTAT(1), 0xffff);
2211 POSTING_READ(VLV_IER);
2212
2213 I915_WRITE(VLV_IIR, 0xffffffff);
2214 I915_WRITE(VLV_IIR, 0xffffffff);
2215
2216 render_irqs = GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT |
2217 GT_GEN6_BLT_CS_ERROR_INTERRUPT |
2218 GT_GEN6_BLT_USER_INTERRUPT |
2219 GT_GEN6_BSD_USER_INTERRUPT |
2220 GT_GEN6_BSD_CS_ERROR_INTERRUPT |
2221 GT_GEN7_L3_PARITY_ERROR_INTERRUPT |
2222 GT_PIPE_NOTIFY |
2223 GT_RENDER_CS_ERROR_INTERRUPT |
2224 GT_SYNC_STATUS |
2225 GT_USER_INTERRUPT;
2226
2227 dev_priv->gt_irq_mask = ~render_irqs;
2228
2229 I915_WRITE(GTIIR, I915_READ(GTIIR));
2230 I915_WRITE(GTIIR, I915_READ(GTIIR));
2231 I915_WRITE(GTIMR, 0);
2232 I915_WRITE(GTIER, render_irqs);
2233 POSTING_READ(GTIER);
2234
2235 /* ack & enable invalid PTE error interrupts */
2236#if 0 /* FIXME: add support to irq handler for checking these bits */
2237 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
2238 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
2239#endif
2240
2241 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
2242#if 0 /* FIXME: check register definitions; some have moved */
2243 /* Note HDMI and DP share bits */
2244 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
2245 hotplug_en |= HDMIB_HOTPLUG_INT_EN;
2246 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
2247 hotplug_en |= HDMIC_HOTPLUG_INT_EN;
2248 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
2249 hotplug_en |= HDMID_HOTPLUG_INT_EN;
2250 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
2251 hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2252 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
2253 hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2254 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
2255 hotplug_en |= CRT_HOTPLUG_INT_EN;
2256 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2257 }
2258#endif
2259
2260 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2261
2262 return 0;
2263}
2264
1981static void i915_driver_irq_preinstall(struct drm_device * dev) 2265static void i915_driver_irq_preinstall(struct drm_device * dev)
1982{ 2266{
1983 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2267 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -2081,6 +2365,30 @@ static int i915_driver_irq_postinstall(struct drm_device *dev)
2081 return 0; 2365 return 0;
2082} 2366}
2083 2367
2368static void valleyview_irq_uninstall(struct drm_device *dev)
2369{
2370 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2371 int pipe;
2372
2373 if (!dev_priv)
2374 return;
2375
2376 dev_priv->vblank_pipe = 0;
2377
2378 for_each_pipe(pipe)
2379 I915_WRITE(PIPESTAT(pipe), 0xffff);
2380
2381 I915_WRITE(HWSTAM, 0xffffffff);
2382 I915_WRITE(PORT_HOTPLUG_EN, 0);
2383 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2384 for_each_pipe(pipe)
2385 I915_WRITE(PIPESTAT(pipe), 0xffff);
2386 I915_WRITE(VLV_IIR, 0xffffffff);
2387 I915_WRITE(VLV_IMR, 0xffffffff);
2388 I915_WRITE(VLV_IER, 0x0);
2389 POSTING_READ(VLV_IER);
2390}
2391
2084static void ironlake_irq_uninstall(struct drm_device *dev) 2392static void ironlake_irq_uninstall(struct drm_device *dev)
2085{ 2393{
2086 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2394 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -2136,7 +2444,8 @@ void intel_irq_init(struct drm_device *dev)
2136{ 2444{
2137 dev->driver->get_vblank_counter = i915_get_vblank_counter; 2445 dev->driver->get_vblank_counter = i915_get_vblank_counter;
2138 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 2446 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
2139 if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev)) { 2447 if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev) ||
2448 IS_VALLEYVIEW(dev)) {
2140 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 2449 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
2141 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 2450 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
2142 } 2451 }
@@ -2147,7 +2456,14 @@ void intel_irq_init(struct drm_device *dev)
2147 dev->driver->get_vblank_timestamp = NULL; 2456 dev->driver->get_vblank_timestamp = NULL;
2148 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 2457 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
2149 2458
2150 if (IS_IVYBRIDGE(dev)) { 2459 if (IS_VALLEYVIEW(dev)) {
2460 dev->driver->irq_handler = valleyview_irq_handler;
2461 dev->driver->irq_preinstall = valleyview_irq_preinstall;
2462 dev->driver->irq_postinstall = valleyview_irq_postinstall;
2463 dev->driver->irq_uninstall = valleyview_irq_uninstall;
2464 dev->driver->enable_vblank = valleyview_enable_vblank;
2465 dev->driver->disable_vblank = valleyview_disable_vblank;
2466 } else if (IS_IVYBRIDGE(dev)) {
2151 /* Share pre & uninstall handlers with ILK/SNB */ 2467 /* Share pre & uninstall handlers with ILK/SNB */
2152 dev->driver->irq_handler = ivybridge_irq_handler; 2468 dev->driver->irq_handler = ivybridge_irq_handler;
2153 dev->driver->irq_preinstall = ironlake_irq_preinstall; 2469 dev->driver->irq_preinstall = ironlake_irq_preinstall;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 2abf4eb94039..6924f44a88df 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -27,6 +27,8 @@
27 27
28#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a))) 28#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
29 29
30#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
31
30/* 32/*
31 * The Bridge device's PCI config space has information about the 33 * The Bridge device's PCI config space has information about the
32 * fb aperture size and the amount of pre-reserved memory. 34 * fb aperture size and the amount of pre-reserved memory.
@@ -301,6 +303,61 @@
301#define DEBUG_RESET_RENDER (1<<8) 303#define DEBUG_RESET_RENDER (1<<8)
302#define DEBUG_RESET_DISPLAY (1<<9) 304#define DEBUG_RESET_DISPLAY (1<<9)
303 305
306/*
307 * DPIO - a special bus for various display related registers to hide behind:
308 * 0x800c: m1, m2, n, p1, p2, k dividers
309 * 0x8014: REF and SFR select
310 * 0x8014: N divider, VCO select
311 * 0x801c/3c: core clock bits
312 * 0x8048/68: low pass filter coefficients
313 * 0x8100: fast clock controls
314 */
315#define DPIO_PKT 0x2100
316#define DPIO_RID (0<<24)
317#define DPIO_OP_WRITE (1<<16)
318#define DPIO_OP_READ (0<<16)
319#define DPIO_PORTID (0x12<<8)
320#define DPIO_BYTE (0xf<<4)
321#define DPIO_BUSY (1<<0) /* status only */
322#define DPIO_DATA 0x2104
323#define DPIO_REG 0x2108
324#define DPIO_CTL 0x2110
325#define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */
326#define DPIO_MODSEL0 (1<<2) /* if ref clk a == 27 */
327#define DPIO_SFR_BYPASS (1<<1)
328#define DPIO_RESET (1<<0)
329
330#define _DPIO_DIV_A 0x800c
331#define DPIO_POST_DIV_SHIFT (28) /* 3 bits */
332#define DPIO_K_SHIFT (24) /* 4 bits */
333#define DPIO_P1_SHIFT (21) /* 3 bits */
334#define DPIO_P2_SHIFT (16) /* 5 bits */
335#define DPIO_N_SHIFT (12) /* 4 bits */
336#define DPIO_ENABLE_CALIBRATION (1<<11)
337#define DPIO_M1DIV_SHIFT (8) /* 3 bits */
338#define DPIO_M2DIV_MASK 0xff
339#define _DPIO_DIV_B 0x802c
340#define DPIO_DIV(pipe) _PIPE(pipe, _DPIO_DIV_A, _DPIO_DIV_B)
341
342#define _DPIO_REFSFR_A 0x8014
343#define DPIO_REFSEL_OVERRIDE 27
344#define DPIO_PLL_MODESEL_SHIFT 24 /* 3 bits */
345#define DPIO_BIAS_CURRENT_CTL_SHIFT 21 /* 3 bits, always 0x7 */
346#define DPIO_PLL_REFCLK_SEL_SHIFT 16 /* 2 bits */
347#define DPIO_DRIVER_CTL_SHIFT 12 /* always set to 0x8 */
348#define DPIO_CLK_BIAS_CTL_SHIFT 8 /* always set to 0x5 */
349#define _DPIO_REFSFR_B 0x8034
350#define DPIO_REFSFR(pipe) _PIPE(pipe, _DPIO_REFSFR_A, _DPIO_REFSFR_B)
351
352#define _DPIO_CORE_CLK_A 0x801c
353#define _DPIO_CORE_CLK_B 0x803c
354#define DPIO_CORE_CLK(pipe) _PIPE(pipe, _DPIO_CORE_CLK_A, _DPIO_CORE_CLK_B)
355
356#define _DPIO_LFP_COEFF_A 0x8048
357#define _DPIO_LFP_COEFF_B 0x8068
358#define DPIO_LFP_COEFF(pipe) _PIPE(pipe, _DPIO_LFP_COEFF_A, _DPIO_LFP_COEFF_B)
359
360#define DPIO_FASTCLK_DISABLE 0x8100
304 361
305/* 362/*
306 * Fence registers 363 * Fence registers
@@ -417,6 +474,7 @@
417#define INSTDONE 0x02090 474#define INSTDONE 0x02090
418#define NOPID 0x02094 475#define NOPID 0x02094
419#define HWSTAM 0x02098 476#define HWSTAM 0x02098
477#define DMA_FADD_I8XX 0x020d0
420 478
421#define ERROR_GEN6 0x040a0 479#define ERROR_GEN6 0x040a0
422 480
@@ -455,6 +513,11 @@
455#define IIR 0x020a4 513#define IIR 0x020a4
456#define IMR 0x020a8 514#define IMR 0x020a8
457#define ISR 0x020ac 515#define ISR 0x020ac
516#define VLV_IIR_RW 0x182084
517#define VLV_IER 0x1820a0
518#define VLV_IIR 0x1820a4
519#define VLV_IMR 0x1820a8
520#define VLV_ISR 0x1820ac
458#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18) 521#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
459#define I915_DISPLAY_PORT_INTERRUPT (1<<17) 522#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
460#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15) 523#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15)
@@ -578,7 +641,12 @@
578#define ECO_GATING_CX_ONLY (1<<3) 641#define ECO_GATING_CX_ONLY (1<<3)
579#define ECO_FLIP_DONE (1<<0) 642#define ECO_FLIP_DONE (1<<0)
580 643
581/* GEN6 interrupt control */ 644#define CACHE_MODE_1 0x7004 /* IVB+ */
645#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6)
646
647/* GEN6 interrupt control
648 * Note that the per-ring interrupt bits do alias with the global interrupt bits
649 * in GTIMR. */
582#define GEN6_RENDER_HWSTAM 0x2098 650#define GEN6_RENDER_HWSTAM 0x2098
583#define GEN6_RENDER_IMR 0x20a8 651#define GEN6_RENDER_IMR 0x20a8
584#define GEN6_RENDER_CONTEXT_SWITCH_INTERRUPT (1 << 8) 652#define GEN6_RENDER_CONTEXT_SWITCH_INTERRUPT (1 << 8)
@@ -742,9 +810,9 @@
742#define GMBUS_PORT_PANEL 3 810#define GMBUS_PORT_PANEL 3
743#define GMBUS_PORT_DPC 4 /* HDMIC */ 811#define GMBUS_PORT_DPC 4 /* HDMIC */
744#define GMBUS_PORT_DPB 5 /* SDVO, HDMIB */ 812#define GMBUS_PORT_DPB 5 /* SDVO, HDMIB */
745 /* 6 reserved */ 813#define GMBUS_PORT_DPD 6 /* HDMID */
746#define GMBUS_PORT_DPD 7 /* HDMID */ 814#define GMBUS_PORT_RESERVED 7 /* 7 reserved */
747#define GMBUS_NUM_PORTS 8 815#define GMBUS_NUM_PORTS (GMBUS_PORT_DPD - GMBUS_PORT_SSC + 1)
748#define GMBUS1 0x5104 /* command/status */ 816#define GMBUS1 0x5104 /* command/status */
749#define GMBUS_SW_CLR_INT (1<<31) 817#define GMBUS_SW_CLR_INT (1<<31)
750#define GMBUS_SW_RDY (1<<30) 818#define GMBUS_SW_RDY (1<<30)
@@ -796,7 +864,9 @@
796#define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B) 864#define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B)
797#define DPLL_VCO_ENABLE (1 << 31) 865#define DPLL_VCO_ENABLE (1 << 31)
798#define DPLL_DVO_HIGH_SPEED (1 << 30) 866#define DPLL_DVO_HIGH_SPEED (1 << 30)
867#define DPLL_EXT_BUFFER_ENABLE_VLV (1 << 30)
799#define DPLL_SYNCLOCK_ENABLE (1 << 29) 868#define DPLL_SYNCLOCK_ENABLE (1 << 29)
869#define DPLL_REFA_CLK_ENABLE_VLV (1 << 29)
800#define DPLL_VGA_MODE_DIS (1 << 28) 870#define DPLL_VGA_MODE_DIS (1 << 28)
801#define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */ 871#define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */
802#define DPLLB_MODE_LVDS (2 << 26) /* i915 */ 872#define DPLLB_MODE_LVDS (2 << 26) /* i915 */
@@ -808,6 +878,7 @@
808#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */ 878#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
809#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */ 879#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
810#define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */ 880#define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */
881#define DPLL_INTEGRATED_CLOCK_VLV (1<<13)
811 882
812#define SRX_INDEX 0x3c4 883#define SRX_INDEX 0x3c4
813#define SRX_DATA 0x3c5 884#define SRX_DATA 0x3c5
@@ -903,6 +974,7 @@
903#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0 974#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
904#define _DPLL_B_MD 0x06020 /* 965+ only */ 975#define _DPLL_B_MD 0x06020 /* 965+ only */
905#define DPLL_MD(pipe) _PIPE(pipe, _DPLL_A_MD, _DPLL_B_MD) 976#define DPLL_MD(pipe) _PIPE(pipe, _DPLL_A_MD, _DPLL_B_MD)
977
906#define _FPA0 0x06040 978#define _FPA0 0x06040
907#define _FPA1 0x06044 979#define _FPA1 0x06044
908#define _FPB0 0x06048 980#define _FPB0 0x06048
@@ -1043,6 +1115,9 @@
1043#define RAMCLK_GATE_D 0x6210 /* CRL only */ 1115#define RAMCLK_GATE_D 0x6210 /* CRL only */
1044#define DEUC 0x6214 /* CRL only */ 1116#define DEUC 0x6214 /* CRL only */
1045 1117
1118#define FW_BLC_SELF_VLV 0x6500
1119#define FW_CSPWRDWNEN (1<<15)
1120
1046/* 1121/*
1047 * Palette regs 1122 * Palette regs
1048 */ 1123 */
@@ -2421,23 +2496,30 @@
2421#define PIPECONF_DITHER_TYPE_TEMP (3<<2) 2496#define PIPECONF_DITHER_TYPE_TEMP (3<<2)
2422#define _PIPEASTAT 0x70024 2497#define _PIPEASTAT 0x70024
2423#define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31) 2498#define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31)
2499#define SPRITE1_FLIPDONE_INT_EN_VLV (1UL<<30)
2424#define PIPE_CRC_ERROR_ENABLE (1UL<<29) 2500#define PIPE_CRC_ERROR_ENABLE (1UL<<29)
2425#define PIPE_CRC_DONE_ENABLE (1UL<<28) 2501#define PIPE_CRC_DONE_ENABLE (1UL<<28)
2426#define PIPE_GMBUS_EVENT_ENABLE (1UL<<27) 2502#define PIPE_GMBUS_EVENT_ENABLE (1UL<<27)
2503#define PLANE_FLIP_DONE_INT_EN_VLV (1UL<<26)
2427#define PIPE_HOTPLUG_INTERRUPT_ENABLE (1UL<<26) 2504#define PIPE_HOTPLUG_INTERRUPT_ENABLE (1UL<<26)
2428#define PIPE_VSYNC_INTERRUPT_ENABLE (1UL<<25) 2505#define PIPE_VSYNC_INTERRUPT_ENABLE (1UL<<25)
2429#define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24) 2506#define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24)
2430#define PIPE_DPST_EVENT_ENABLE (1UL<<23) 2507#define PIPE_DPST_EVENT_ENABLE (1UL<<23)
2508#define SPRITE0_FLIP_DONE_INT_EN_VLV (1UL<<26)
2431#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL<<22) 2509#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL<<22)
2432#define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21) 2510#define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21)
2433#define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20) 2511#define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20)
2434#define PIPE_HOTPLUG_TV_INTERRUPT_ENABLE (1UL<<18) /* pre-965 */ 2512#define PIPE_HOTPLUG_TV_INTERRUPT_ENABLE (1UL<<18) /* pre-965 */
2435#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */ 2513#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */
2436#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17) 2514#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17)
2515#define PIPEA_HBLANK_INT_EN_VLV (1UL<<16)
2437#define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16) 2516#define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16)
2517#define SPRITE1_FLIPDONE_INT_STATUS_VLV (1UL<<15)
2518#define SPRITE0_FLIPDONE_INT_STATUS_VLV (1UL<<15)
2438#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13) 2519#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13)
2439#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12) 2520#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12)
2440#define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11) 2521#define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11)
2522#define PLANE_FLIPDONE_INT_STATUS_VLV (1UL<<10)
2441#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL<<10) 2523#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL<<10)
2442#define PIPE_VSYNC_INTERRUPT_STATUS (1UL<<9) 2524#define PIPE_VSYNC_INTERRUPT_STATUS (1UL<<9)
2443#define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL<<8) 2525#define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL<<8)
@@ -2462,6 +2544,40 @@
2462#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL) 2544#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL)
2463#define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT) 2545#define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT)
2464 2546
2547#define VLV_DPFLIPSTAT 0x70028
2548#define PIPEB_LINE_COMPARE_STATUS (1<<29)
2549#define PIPEB_HLINE_INT_EN (1<<28)
2550#define PIPEB_VBLANK_INT_EN (1<<27)
2551#define SPRITED_FLIPDONE_INT_EN (1<<26)
2552#define SPRITEC_FLIPDONE_INT_EN (1<<25)
2553#define PLANEB_FLIPDONE_INT_EN (1<<24)
2554#define PIPEA_LINE_COMPARE_STATUS (1<<21)
2555#define PIPEA_HLINE_INT_EN (1<<20)
2556#define PIPEA_VBLANK_INT_EN (1<<19)
2557#define SPRITEB_FLIPDONE_INT_EN (1<<18)
2558#define SPRITEA_FLIPDONE_INT_EN (1<<17)
2559#define PLANEA_FLIPDONE_INT_EN (1<<16)
2560
2561#define DPINVGTT 0x7002c /* VLV only */
2562#define CURSORB_INVALID_GTT_INT_EN (1<<23)
2563#define CURSORA_INVALID_GTT_INT_EN (1<<22)
2564#define SPRITED_INVALID_GTT_INT_EN (1<<21)
2565#define SPRITEC_INVALID_GTT_INT_EN (1<<20)
2566#define PLANEB_INVALID_GTT_INT_EN (1<<19)
2567#define SPRITEB_INVALID_GTT_INT_EN (1<<18)
2568#define SPRITEA_INVALID_GTT_INT_EN (1<<17)
2569#define PLANEA_INVALID_GTT_INT_EN (1<<16)
2570#define DPINVGTT_EN_MASK 0xff0000
2571#define CURSORB_INVALID_GTT_STATUS (1<<7)
2572#define CURSORA_INVALID_GTT_STATUS (1<<6)
2573#define SPRITED_INVALID_GTT_STATUS (1<<5)
2574#define SPRITEC_INVALID_GTT_STATUS (1<<4)
2575#define PLANEB_INVALID_GTT_STATUS (1<<3)
2576#define SPRITEB_INVALID_GTT_STATUS (1<<2)
2577#define SPRITEA_INVALID_GTT_STATUS (1<<1)
2578#define PLANEA_INVALID_GTT_STATUS (1<<0)
2579#define DPINVGTT_STATUS_MASK 0xff
2580
2465#define DSPARB 0x70030 2581#define DSPARB 0x70030
2466#define DSPARB_CSTART_MASK (0x7f << 7) 2582#define DSPARB_CSTART_MASK (0x7f << 7)
2467#define DSPARB_CSTART_SHIFT 7 2583#define DSPARB_CSTART_SHIFT 7
@@ -2491,11 +2607,28 @@
2491#define DSPFW_HPLL_CURSOR_MASK (0x3f<<16) 2607#define DSPFW_HPLL_CURSOR_MASK (0x3f<<16)
2492#define DSPFW_HPLL_SR_MASK (0x1ff) 2608#define DSPFW_HPLL_SR_MASK (0x1ff)
2493 2609
2610/* drain latency register values*/
2611#define DRAIN_LATENCY_PRECISION_32 32
2612#define DRAIN_LATENCY_PRECISION_16 16
2613#define VLV_DDL1 0x70050
2614#define DDL_CURSORA_PRECISION_32 (1<<31)
2615#define DDL_CURSORA_PRECISION_16 (0<<31)
2616#define DDL_CURSORA_SHIFT 24
2617#define DDL_PLANEA_PRECISION_32 (1<<7)
2618#define DDL_PLANEA_PRECISION_16 (0<<7)
2619#define VLV_DDL2 0x70054
2620#define DDL_CURSORB_PRECISION_32 (1<<31)
2621#define DDL_CURSORB_PRECISION_16 (0<<31)
2622#define DDL_CURSORB_SHIFT 24
2623#define DDL_PLANEB_PRECISION_32 (1<<7)
2624#define DDL_PLANEB_PRECISION_16 (0<<7)
2625
2494/* FIFO watermark sizes etc */ 2626/* FIFO watermark sizes etc */
2495#define G4X_FIFO_LINE_SIZE 64 2627#define G4X_FIFO_LINE_SIZE 64
2496#define I915_FIFO_LINE_SIZE 64 2628#define I915_FIFO_LINE_SIZE 64
2497#define I830_FIFO_LINE_SIZE 32 2629#define I830_FIFO_LINE_SIZE 32
2498 2630
2631#define VALLEYVIEW_FIFO_SIZE 255
2499#define G4X_FIFO_SIZE 127 2632#define G4X_FIFO_SIZE 127
2500#define I965_FIFO_SIZE 512 2633#define I965_FIFO_SIZE 512
2501#define I945_FIFO_SIZE 127 2634#define I945_FIFO_SIZE 127
@@ -2503,6 +2636,7 @@
2503#define I855GM_FIFO_SIZE 127 /* In cachelines */ 2636#define I855GM_FIFO_SIZE 127 /* In cachelines */
2504#define I830_FIFO_SIZE 95 2637#define I830_FIFO_SIZE 95
2505 2638
2639#define VALLEYVIEW_MAX_WM 0xff
2506#define G4X_MAX_WM 0x3f 2640#define G4X_MAX_WM 0x3f
2507#define I915_MAX_WM 0x3f 2641#define I915_MAX_WM 0x3f
2508 2642
@@ -2517,6 +2651,7 @@
2517#define PINEVIEW_CURSOR_DFT_WM 0 2651#define PINEVIEW_CURSOR_DFT_WM 0
2518#define PINEVIEW_CURSOR_GUARD_WM 5 2652#define PINEVIEW_CURSOR_GUARD_WM 5
2519 2653
2654#define VALLEYVIEW_CURSOR_MAX_WM 64
2520#define I965_CURSOR_FIFO 64 2655#define I965_CURSOR_FIFO 64
2521#define I965_CURSOR_MAX_WM 32 2656#define I965_CURSOR_MAX_WM 32
2522#define I965_CURSOR_DFT_WM 8 2657#define I965_CURSOR_DFT_WM 8
@@ -3064,18 +3199,28 @@
3064#define DE_PIPEB_VBLANK_IVB (1<<5) 3199#define DE_PIPEB_VBLANK_IVB (1<<5)
3065#define DE_PIPEA_VBLANK_IVB (1<<0) 3200#define DE_PIPEA_VBLANK_IVB (1<<0)
3066 3201
3202#define VLV_MASTER_IER 0x4400c /* Gunit master IER */
3203#define MASTER_INTERRUPT_ENABLE (1<<31)
3204
3067#define DEISR 0x44000 3205#define DEISR 0x44000
3068#define DEIMR 0x44004 3206#define DEIMR 0x44004
3069#define DEIIR 0x44008 3207#define DEIIR 0x44008
3070#define DEIER 0x4400c 3208#define DEIER 0x4400c
3071 3209
3072/* GT interrupt */ 3210/* GT interrupt.
3073#define GT_PIPE_NOTIFY (1 << 4) 3211 * Note that for gen6+ the ring-specific interrupt bits do alias with the
3074#define GT_SYNC_STATUS (1 << 2) 3212 * corresponding bits in the per-ring interrupt control registers. */
3075#define GT_USER_INTERRUPT (1 << 0) 3213#define GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT (1 << 26)
3076#define GT_BSD_USER_INTERRUPT (1 << 5) 3214#define GT_GEN6_BLT_CS_ERROR_INTERRUPT (1 << 25)
3077#define GT_GEN6_BSD_USER_INTERRUPT (1 << 12) 3215#define GT_GEN6_BLT_USER_INTERRUPT (1 << 22)
3078#define GT_BLT_USER_INTERRUPT (1 << 22) 3216#define GT_GEN6_BSD_CS_ERROR_INTERRUPT (1 << 15)
3217#define GT_GEN6_BSD_USER_INTERRUPT (1 << 12)
3218#define GT_BSD_USER_INTERRUPT (1 << 5) /* ilk only */
3219#define GT_GEN7_L3_PARITY_ERROR_INTERRUPT (1 << 5)
3220#define GT_PIPE_NOTIFY (1 << 4)
3221#define GT_RENDER_CS_ERROR_INTERRUPT (1 << 3)
3222#define GT_SYNC_STATUS (1 << 2)
3223#define GT_USER_INTERRUPT (1 << 0)
3079 3224
3080#define GTISR 0x44010 3225#define GTISR 0x44010
3081#define GTIMR 0x44014 3226#define GTIMR 0x44014
@@ -3328,6 +3473,21 @@
3328#define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B) 3473#define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
3329#define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B) 3474#define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
3330 3475
3476#define VLV_VIDEO_DIP_CTL_A 0x60220
3477#define VLV_VIDEO_DIP_DATA_A 0x60208
3478#define VLV_VIDEO_DIP_GDCP_PAYLOAD_A 0x60210
3479
3480#define VLV_VIDEO_DIP_CTL_B 0x61170
3481#define VLV_VIDEO_DIP_DATA_B 0x61174
3482#define VLV_VIDEO_DIP_GDCP_PAYLOAD_B 0x61178
3483
3484#define VLV_TVIDEO_DIP_CTL(pipe) \
3485 _PIPE(pipe, VLV_VIDEO_DIP_CTL_A, VLV_VIDEO_DIP_CTL_B)
3486#define VLV_TVIDEO_DIP_DATA(pipe) \
3487 _PIPE(pipe, VLV_VIDEO_DIP_DATA_A, VLV_VIDEO_DIP_DATA_B)
3488#define VLV_TVIDEO_DIP_GCP(pipe) \
3489 _PIPE(pipe, VLV_VIDEO_DIP_GDCP_PAYLOAD_A, VLV_VIDEO_DIP_GDCP_PAYLOAD_B)
3490
3331#define _TRANS_HTOTAL_B 0xe1000 3491#define _TRANS_HTOTAL_B 0xe1000
3332#define _TRANS_HBLANK_B 0xe1004 3492#define _TRANS_HBLANK_B 0xe1004
3333#define _TRANS_HSYNC_B 0xe1008 3493#define _TRANS_HSYNC_B 0xe1008
@@ -3548,6 +3708,7 @@
3548#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16) 3708#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
3549 3709
3550/* or SDVOB */ 3710/* or SDVOB */
3711#define VLV_HDMIB 0x61140
3551#define HDMIB 0xe1140 3712#define HDMIB 0xe1140
3552#define PORT_ENABLE (1 << 31) 3713#define PORT_ENABLE (1 << 31)
3553#define TRANSCODER(pipe) ((pipe) << 30) 3714#define TRANSCODER(pipe) ((pipe) << 30)
@@ -3713,6 +3874,8 @@
3713#define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f<<22) 3874#define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f<<22)
3714 3875
3715#define FORCEWAKE 0xA18C 3876#define FORCEWAKE 0xA18C
3877#define FORCEWAKE_VLV 0x1300b0
3878#define FORCEWAKE_ACK_VLV 0x1300b4
3716#define FORCEWAKE_ACK 0x130090 3879#define FORCEWAKE_ACK 0x130090
3717#define FORCEWAKE_MT 0xa188 /* multi-threaded */ 3880#define FORCEWAKE_MT 0xa188 /* multi-threaded */
3718#define FORCEWAKE_MT_ACK 0x130040 3881#define FORCEWAKE_MT_ACK 0x130040
@@ -3866,4 +4029,193 @@
3866#define AUD_CONFIG_PIXEL_CLOCK_HDMI (0xf << 16) 4029#define AUD_CONFIG_PIXEL_CLOCK_HDMI (0xf << 16)
3867#define AUD_CONFIG_DISABLE_NCTS (1 << 3) 4030#define AUD_CONFIG_DISABLE_NCTS (1 << 3)
3868 4031
4032/* HSW Power Wells */
4033#define HSW_PWR_WELL_CTL1 0x45400 /* BIOS */
4034#define HSW_PWR_WELL_CTL2 0x45404 /* Driver */
4035#define HSW_PWR_WELL_CTL3 0x45408 /* KVMR */
4036#define HSW_PWR_WELL_CTL4 0x4540C /* Debug */
4037#define HSW_PWR_WELL_ENABLE (1<<31)
4038#define HSW_PWR_WELL_STATE (1<<30)
4039#define HSW_PWR_WELL_CTL5 0x45410
4040#define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1<<31)
4041#define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1<<20)
4042#define HSW_PWR_WELL_FORCE_ON (1<<19)
4043#define HSW_PWR_WELL_CTL6 0x45414
4044
4045/* Per-pipe DDI Function Control */
4046#define PIPE_DDI_FUNC_CTL_A 0x60400
4047#define PIPE_DDI_FUNC_CTL_B 0x61400
4048#define PIPE_DDI_FUNC_CTL_C 0x62400
4049#define PIPE_DDI_FUNC_CTL_EDP 0x6F400
4050#define DDI_FUNC_CTL(pipe) _PIPE(pipe, \
4051 PIPE_DDI_FUNC_CTL_A, \
4052 PIPE_DDI_FUNC_CTL_B)
4053#define PIPE_DDI_FUNC_ENABLE (1<<31)
4054/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
4055#define PIPE_DDI_PORT_MASK (0xf<<28)
4056#define PIPE_DDI_SELECT_PORT(x) ((x)<<28)
4057#define PIPE_DDI_MODE_SELECT_HDMI (0<<24)
4058#define PIPE_DDI_MODE_SELECT_DVI (1<<24)
4059#define PIPE_DDI_MODE_SELECT_DP_SST (2<<24)
4060#define PIPE_DDI_MODE_SELECT_DP_MST (3<<24)
4061#define PIPE_DDI_MODE_SELECT_FDI (4<<24)
4062#define PIPE_DDI_BPC_8 (0<<20)
4063#define PIPE_DDI_BPC_10 (1<<20)
4064#define PIPE_DDI_BPC_6 (2<<20)
4065#define PIPE_DDI_BPC_12 (3<<20)
4066#define PIPE_DDI_BFI_ENABLE (1<<4)
4067#define PIPE_DDI_PORT_WIDTH_X1 (0<<1)
4068#define PIPE_DDI_PORT_WIDTH_X2 (1<<1)
4069#define PIPE_DDI_PORT_WIDTH_X4 (3<<1)
4070
4071/* DisplayPort Transport Control */
4072#define DP_TP_CTL_A 0x64040
4073#define DP_TP_CTL_B 0x64140
4074#define DP_TP_CTL(port) _PORT(port, \
4075 DP_TP_CTL_A, \
4076 DP_TP_CTL_B)
4077#define DP_TP_CTL_ENABLE (1<<31)
4078#define DP_TP_CTL_MODE_SST (0<<27)
4079#define DP_TP_CTL_MODE_MST (1<<27)
4080#define DP_TP_CTL_ENHANCED_FRAME_ENABLE (1<<18)
4081#define DP_TP_CTL_FDI_AUTOTRAIN (1<<15)
4082#define DP_TP_CTL_LINK_TRAIN_MASK (7<<8)
4083#define DP_TP_CTL_LINK_TRAIN_PAT1 (0<<8)
4084#define DP_TP_CTL_LINK_TRAIN_PAT2 (1<<8)
4085#define DP_TP_CTL_LINK_TRAIN_NORMAL (3<<8)
4086
4087/* DisplayPort Transport Status */
4088#define DP_TP_STATUS_A 0x64044
4089#define DP_TP_STATUS_B 0x64144
4090#define DP_TP_STATUS(port) _PORT(port, \
4091 DP_TP_STATUS_A, \
4092 DP_TP_STATUS_B)
4093#define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12)
4094
4095/* DDI Buffer Control */
4096#define DDI_BUF_CTL_A 0x64000
4097#define DDI_BUF_CTL_B 0x64100
4098#define DDI_BUF_CTL(port) _PORT(port, \
4099 DDI_BUF_CTL_A, \
4100 DDI_BUF_CTL_B)
4101#define DDI_BUF_CTL_ENABLE (1<<31)
4102#define DDI_BUF_EMP_400MV_0DB_HSW (0<<24) /* Sel0 */
4103#define DDI_BUF_EMP_400MV_3_5DB_HSW (1<<24) /* Sel1 */
4104#define DDI_BUF_EMP_400MV_6DB_HSW (2<<24) /* Sel2 */
4105#define DDI_BUF_EMP_400MV_9_5DB_HSW (3<<24) /* Sel3 */
4106#define DDI_BUF_EMP_600MV_0DB_HSW (4<<24) /* Sel4 */
4107#define DDI_BUF_EMP_600MV_3_5DB_HSW (5<<24) /* Sel5 */
4108#define DDI_BUF_EMP_600MV_6DB_HSW (6<<24) /* Sel6 */
4109#define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */
4110#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */
4111#define DDI_BUF_EMP_MASK (0xf<<24)
4112#define DDI_BUF_IS_IDLE (1<<7)
4113#define DDI_PORT_WIDTH_X1 (0<<1)
4114#define DDI_PORT_WIDTH_X2 (1<<1)
4115#define DDI_PORT_WIDTH_X4 (3<<1)
4116#define DDI_INIT_DISPLAY_DETECTED (1<<0)
4117
4118/* DDI Buffer Translations */
4119#define DDI_BUF_TRANS_A 0x64E00
4120#define DDI_BUF_TRANS_B 0x64E60
4121#define DDI_BUF_TRANS(port) _PORT(port, \
4122 DDI_BUF_TRANS_A, \
4123 DDI_BUF_TRANS_B)
4124
4125/* Sideband Interface (SBI) is programmed indirectly, via
4126 * SBI_ADDR, which contains the register offset; and SBI_DATA,
4127 * which contains the payload */
4128#define SBI_ADDR 0xC6000
4129#define SBI_DATA 0xC6004
4130#define SBI_CTL_STAT 0xC6008
4131#define SBI_CTL_OP_CRRD (0x6<<8)
4132#define SBI_CTL_OP_CRWR (0x7<<8)
4133#define SBI_RESPONSE_FAIL (0x1<<1)
4134#define SBI_RESPONSE_SUCCESS (0x0<<1)
4135#define SBI_BUSY (0x1<<0)
4136#define SBI_READY (0x0<<0)
4137
4138/* SBI offsets */
4139#define SBI_SSCDIVINTPHASE6 0x0600
4140#define SBI_SSCDIVINTPHASE_DIVSEL_MASK ((0x7f)<<1)
4141#define SBI_SSCDIVINTPHASE_DIVSEL(x) ((x)<<1)
4142#define SBI_SSCDIVINTPHASE_INCVAL_MASK ((0x7f)<<8)
4143#define SBI_SSCDIVINTPHASE_INCVAL(x) ((x)<<8)
4144#define SBI_SSCDIVINTPHASE_DIR(x) ((x)<<15)
4145#define SBI_SSCDIVINTPHASE_PROPAGATE (1<<0)
4146#define SBI_SSCCTL 0x020c
4147#define SBI_SSCCTL6 0x060C
4148#define SBI_SSCCTL_DISABLE (1<<0)
4149#define SBI_SSCAUXDIV6 0x0610
4150#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4)
4151#define SBI_DBUFF0 0x2a00
4152
4153/* LPT PIXCLK_GATE */
4154#define PIXCLK_GATE 0xC6020
4155#define PIXCLK_GATE_UNGATE 1<<0
4156#define PIXCLK_GATE_GATE 0<<0
4157
4158/* SPLL */
4159#define SPLL_CTL 0x46020
4160#define SPLL_PLL_ENABLE (1<<31)
4161#define SPLL_PLL_SCC (1<<28)
4162#define SPLL_PLL_NON_SCC (2<<28)
4163#define SPLL_PLL_FREQ_810MHz (0<<26)
4164#define SPLL_PLL_FREQ_1350MHz (1<<26)
4165
4166/* WRPLL */
4167#define WRPLL_CTL1 0x46040
4168#define WRPLL_CTL2 0x46060
4169#define WRPLL_PLL_ENABLE (1<<31)
4170#define WRPLL_PLL_SELECT_SSC (0x01<<28)
4171#define WRPLL_PLL_SELECT_NON_SCC (0x02<<28)
4172#define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28)
4173
4174/* Port clock selection */
4175#define PORT_CLK_SEL_A 0x46100
4176#define PORT_CLK_SEL_B 0x46104
4177#define PORT_CLK_SEL(port) _PORT(port, \
4178 PORT_CLK_SEL_A, \
4179 PORT_CLK_SEL_B)
4180#define PORT_CLK_SEL_LCPLL_2700 (0<<29)
4181#define PORT_CLK_SEL_LCPLL_1350 (1<<29)
4182#define PORT_CLK_SEL_LCPLL_810 (2<<29)
4183#define PORT_CLK_SEL_SPLL (3<<29)
4184#define PORT_CLK_SEL_WRPLL1 (4<<29)
4185#define PORT_CLK_SEL_WRPLL2 (5<<29)
4186
4187/* Pipe clock selection */
4188#define PIPE_CLK_SEL_A 0x46140
4189#define PIPE_CLK_SEL_B 0x46144
4190#define PIPE_CLK_SEL(pipe) _PIPE(pipe, \
4191 PIPE_CLK_SEL_A, \
4192 PIPE_CLK_SEL_B)
4193/* For each pipe, we need to select the corresponding port clock */
4194#define PIPE_CLK_SEL_DISABLED (0x0<<29)
4195#define PIPE_CLK_SEL_PORT(x) ((x+1)<<29)
4196
4197/* LCPLL Control */
4198#define LCPLL_CTL 0x130040
4199#define LCPLL_PLL_DISABLE (1<<31)
4200#define LCPLL_PLL_LOCK (1<<30)
4201#define LCPLL_CD_CLOCK_DISABLE (1<<25)
4202#define LCPLL_CD2X_CLOCK_DISABLE (1<<23)
4203
4204/* Pipe WM_LINETIME - watermark line time */
4205#define PIPE_WM_LINETIME_A 0x45270
4206#define PIPE_WM_LINETIME_B 0x45274
4207#define PIPE_WM_LINETIME(pipe) _PIPE(pipe, \
4208 PIPE_WM_LINETIME_A, \
4209 PIPE_WM_LINETIME_A)
4210#define PIPE_WM_LINETIME_MASK (0x1ff)
4211#define PIPE_WM_LINETIME_TIME(x) ((x))
4212#define PIPE_WM_LINETIME_IPS_LINETIME_MASK (0x1ff<<16)
4213#define PIPE_WM_LINETIME_IPS_LINETIME(x) ((x)<<16)
4214
4215/* SFUSE_STRAP */
4216#define SFUSE_STRAP 0xc2014
4217#define SFUSE_STRAP_DDIB_DETECTED (1<<2)
4218#define SFUSE_STRAP_DDIC_DETECTED (1<<1)
4219#define SFUSE_STRAP_DDID_DETECTED (1<<0)
4220
3869#endif /* _I915_REG_H_ */ 4221#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c
index bae3edf956a4..f152b2a7fc54 100644
--- a/drivers/gpu/drm/i915/intel_acpi.c
+++ b/drivers/gpu/drm/i915/intel_acpi.c
@@ -182,8 +182,6 @@ static void intel_dsm_platform_mux_info(void)
182 DRM_DEBUG_DRIVER(" hpd mux info: %s\n", 182 DRM_DEBUG_DRIVER(" hpd mux info: %s\n",
183 intel_dsm_mux_type(info->buffer.pointer[3])); 183 intel_dsm_mux_type(info->buffer.pointer[3]));
184 } 184 }
185 } else {
186 DRM_ERROR("MUX INFO call failed\n");
187 } 185 }
188 186
189out: 187out:
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index b48fc2a8410c..353459362f6f 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -174,6 +174,28 @@ get_lvds_dvo_timing(const struct bdb_lvds_lfp_data *lvds_lfp_data,
174 return (struct lvds_dvo_timing *)(entry + dvo_timing_offset); 174 return (struct lvds_dvo_timing *)(entry + dvo_timing_offset);
175} 175}
176 176
177/* get lvds_fp_timing entry
178 * this function may return NULL if the corresponding entry is invalid
179 */
180static const struct lvds_fp_timing *
181get_lvds_fp_timing(const struct bdb_header *bdb,
182 const struct bdb_lvds_lfp_data *data,
183 const struct bdb_lvds_lfp_data_ptrs *ptrs,
184 int index)
185{
186 size_t data_ofs = (const u8 *)data - (const u8 *)bdb;
187 u16 data_size = ((const u16 *)data)[-1]; /* stored in header */
188 size_t ofs;
189
190 if (index >= ARRAY_SIZE(ptrs->ptr))
191 return NULL;
192 ofs = ptrs->ptr[index].fp_timing_offset;
193 if (ofs < data_ofs ||
194 ofs + sizeof(struct lvds_fp_timing) > data_ofs + data_size)
195 return NULL;
196 return (const struct lvds_fp_timing *)((const u8 *)bdb + ofs);
197}
198
177/* Try to find integrated panel data */ 199/* Try to find integrated panel data */
178static void 200static void
179parse_lfp_panel_data(struct drm_i915_private *dev_priv, 201parse_lfp_panel_data(struct drm_i915_private *dev_priv,
@@ -183,6 +205,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
183 const struct bdb_lvds_lfp_data *lvds_lfp_data; 205 const struct bdb_lvds_lfp_data *lvds_lfp_data;
184 const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs; 206 const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs;
185 const struct lvds_dvo_timing *panel_dvo_timing; 207 const struct lvds_dvo_timing *panel_dvo_timing;
208 const struct lvds_fp_timing *fp_timing;
186 struct drm_display_mode *panel_fixed_mode; 209 struct drm_display_mode *panel_fixed_mode;
187 int i, downclock; 210 int i, downclock;
188 211
@@ -244,6 +267,19 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
244 "Normal Clock %dKHz, downclock %dKHz\n", 267 "Normal Clock %dKHz, downclock %dKHz\n",
245 panel_fixed_mode->clock, 10*downclock); 268 panel_fixed_mode->clock, 10*downclock);
246 } 269 }
270
271 fp_timing = get_lvds_fp_timing(bdb, lvds_lfp_data,
272 lvds_lfp_data_ptrs,
273 lvds_options->panel_type);
274 if (fp_timing) {
275 /* check the resolution, just to be sure */
276 if (fp_timing->x_res == panel_fixed_mode->hdisplay &&
277 fp_timing->y_res == panel_fixed_mode->vdisplay) {
278 dev_priv->bios_lvds_val = fp_timing->lvds_reg_val;
279 DRM_DEBUG_KMS("VBT initial LVDS value %x\n",
280 dev_priv->bios_lvds_val);
281 }
282 }
247} 283}
248 284
249/* Try to find sdvo panel data */ 285/* Try to find sdvo panel data */
@@ -256,6 +292,11 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
256 int index; 292 int index;
257 293
258 index = i915_vbt_sdvo_panel_type; 294 index = i915_vbt_sdvo_panel_type;
295 if (index == -2) {
296 DRM_DEBUG_KMS("Ignore SDVO panel mode from BIOS VBT tables.\n");
297 return;
298 }
299
259 if (index == -1) { 300 if (index == -1) {
260 struct bdb_sdvo_lvds_options *sdvo_lvds_options; 301 struct bdb_sdvo_lvds_options *sdvo_lvds_options;
261 302
@@ -332,11 +373,11 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
332 if (block_size >= sizeof(*general)) { 373 if (block_size >= sizeof(*general)) {
333 int bus_pin = general->crt_ddc_gmbus_pin; 374 int bus_pin = general->crt_ddc_gmbus_pin;
334 DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin); 375 DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin);
335 if (bus_pin >= 1 && bus_pin <= 6) 376 if (intel_gmbus_is_port_valid(bus_pin))
336 dev_priv->crt_ddc_pin = bus_pin; 377 dev_priv->crt_ddc_pin = bus_pin;
337 } else { 378 } else {
338 DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n", 379 DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n",
339 block_size); 380 block_size);
340 } 381 }
341 } 382 }
342} 383}
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 4d3d736a4f56..70b0f1abf149 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -278,9 +278,10 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
278 if (intel_ddc_probe(&crt->base, dev_priv->crt_ddc_pin)) { 278 if (intel_ddc_probe(&crt->base, dev_priv->crt_ddc_pin)) {
279 struct edid *edid; 279 struct edid *edid;
280 bool is_digital = false; 280 bool is_digital = false;
281 struct i2c_adapter *i2c;
281 282
282 edid = drm_get_edid(connector, 283 i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin);
283 &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter); 284 edid = drm_get_edid(connector, i2c);
284 /* 285 /*
285 * This may be a DVI-I connector with a shared DDC 286 * This may be a DVI-I connector with a shared DDC
286 * link between analog and digital outputs, so we 287 * link between analog and digital outputs, so we
@@ -483,15 +484,16 @@ static int intel_crt_get_modes(struct drm_connector *connector)
483 struct drm_device *dev = connector->dev; 484 struct drm_device *dev = connector->dev;
484 struct drm_i915_private *dev_priv = dev->dev_private; 485 struct drm_i915_private *dev_priv = dev->dev_private;
485 int ret; 486 int ret;
487 struct i2c_adapter *i2c;
486 488
487 ret = intel_ddc_get_modes(connector, 489 i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin);
488 &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter); 490 ret = intel_ddc_get_modes(connector, i2c);
489 if (ret || !IS_G4X(dev)) 491 if (ret || !IS_G4X(dev))
490 return ret; 492 return ret;
491 493
492 /* Try to probe digital port for output in DVI-I -> VGA mode. */ 494 /* Try to probe digital port for output in DVI-I -> VGA mode. */
493 return intel_ddc_get_modes(connector, 495 i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB);
494 &dev_priv->gmbus[GMBUS_PORT_DPB].adapter); 496 return intel_ddc_get_modes(connector, i2c);
495} 497}
496 498
497static int intel_crt_set_property(struct drm_connector *connector, 499static int intel_crt_set_property(struct drm_connector *connector,
@@ -543,7 +545,7 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = {
543 545
544static int __init intel_no_crt_dmi_callback(const struct dmi_system_id *id) 546static int __init intel_no_crt_dmi_callback(const struct dmi_system_id *id)
545{ 547{
546 DRM_DEBUG_KMS("Skipping CRT initialization for %s\n", id->ident); 548 DRM_INFO("Skipping CRT initialization for %s\n", id->ident);
547 return 1; 549 return 1;
548} 550}
549 551
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 91b35fd1db8c..37514a52b05c 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -24,6 +24,7 @@
24 * Eric Anholt <eric@anholt.net> 24 * Eric Anholt <eric@anholt.net>
25 */ 25 */
26 26
27#include <linux/dmi.h>
27#include <linux/cpufreq.h> 28#include <linux/cpufreq.h>
28#include <linux/module.h> 29#include <linux/module.h>
29#include <linux/input.h> 30#include <linux/input.h>
@@ -360,6 +361,110 @@ static const intel_limit_t intel_limits_ironlake_display_port = {
360 .find_pll = intel_find_pll_ironlake_dp, 361 .find_pll = intel_find_pll_ironlake_dp,
361}; 362};
362 363
364u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg)
365{
366 unsigned long flags;
367 u32 val = 0;
368
369 spin_lock_irqsave(&dev_priv->dpio_lock, flags);
370 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
371 DRM_ERROR("DPIO idle wait timed out\n");
372 goto out_unlock;
373 }
374
375 I915_WRITE(DPIO_REG, reg);
376 I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_READ | DPIO_PORTID |
377 DPIO_BYTE);
378 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
379 DRM_ERROR("DPIO read wait timed out\n");
380 goto out_unlock;
381 }
382 val = I915_READ(DPIO_DATA);
383
384out_unlock:
385 spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
386 return val;
387}
388
389static void intel_dpio_write(struct drm_i915_private *dev_priv, int reg,
390 u32 val)
391{
392 unsigned long flags;
393
394 spin_lock_irqsave(&dev_priv->dpio_lock, flags);
395 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
396 DRM_ERROR("DPIO idle wait timed out\n");
397 goto out_unlock;
398 }
399
400 I915_WRITE(DPIO_DATA, val);
401 I915_WRITE(DPIO_REG, reg);
402 I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_WRITE | DPIO_PORTID |
403 DPIO_BYTE);
404 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100))
405 DRM_ERROR("DPIO write wait timed out\n");
406
407out_unlock:
408 spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
409}
410
411static void vlv_init_dpio(struct drm_device *dev)
412{
413 struct drm_i915_private *dev_priv = dev->dev_private;
414
415 /* Reset the DPIO config */
416 I915_WRITE(DPIO_CTL, 0);
417 POSTING_READ(DPIO_CTL);
418 I915_WRITE(DPIO_CTL, 1);
419 POSTING_READ(DPIO_CTL);
420}
421
422static int intel_dual_link_lvds_callback(const struct dmi_system_id *id)
423{
424 DRM_INFO("Forcing lvds to dual link mode on %s\n", id->ident);
425 return 1;
426}
427
428static const struct dmi_system_id intel_dual_link_lvds[] = {
429 {
430 .callback = intel_dual_link_lvds_callback,
431 .ident = "Apple MacBook Pro (Core i5/i7 Series)",
432 .matches = {
433 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
434 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"),
435 },
436 },
437 { } /* terminating entry */
438};
439
440static bool is_dual_link_lvds(struct drm_i915_private *dev_priv,
441 unsigned int reg)
442{
443 unsigned int val;
444
445 /* use the module option value if specified */
446 if (i915_lvds_channel_mode > 0)
447 return i915_lvds_channel_mode == 2;
448
449 if (dmi_check_system(intel_dual_link_lvds))
450 return true;
451
452 if (dev_priv->lvds_val)
453 val = dev_priv->lvds_val;
454 else {
455 /* BIOS should set the proper LVDS register value at boot, but
456 * in reality, it doesn't set the value when the lid is closed;
457 * we need to check "the value to be set" in VBT when LVDS
458 * register is uninitialized.
459 */
460 val = I915_READ(reg);
461 if (!(val & ~LVDS_DETECTED))
462 val = dev_priv->bios_lvds_val;
463 dev_priv->lvds_val = val;
464 }
465 return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
466}
467
363static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc, 468static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
364 int refclk) 469 int refclk)
365{ 470{
@@ -368,8 +473,7 @@ static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
368 const intel_limit_t *limit; 473 const intel_limit_t *limit;
369 474
370 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 475 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
371 if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == 476 if (is_dual_link_lvds(dev_priv, PCH_LVDS)) {
372 LVDS_CLKB_POWER_UP) {
373 /* LVDS dual channel */ 477 /* LVDS dual channel */
374 if (refclk == 100000) 478 if (refclk == 100000)
375 limit = &intel_limits_ironlake_dual_lvds_100m; 479 limit = &intel_limits_ironlake_dual_lvds_100m;
@@ -397,8 +501,7 @@ static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
397 const intel_limit_t *limit; 501 const intel_limit_t *limit;
398 502
399 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 503 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
400 if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == 504 if (is_dual_link_lvds(dev_priv, LVDS))
401 LVDS_CLKB_POWER_UP)
402 /* LVDS with dual channel */ 505 /* LVDS with dual channel */
403 limit = &intel_limits_g4x_dual_channel_lvds; 506 limit = &intel_limits_g4x_dual_channel_lvds;
404 else 507 else
@@ -536,8 +639,7 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
536 * reliably set up different single/dual channel state, if we 639 * reliably set up different single/dual channel state, if we
537 * even can. 640 * even can.
538 */ 641 */
539 if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == 642 if (is_dual_link_lvds(dev_priv, LVDS))
540 LVDS_CLKB_POWER_UP)
541 clock.p2 = limit->p2.p2_fast; 643 clock.p2 = limit->p2.p2_fast;
542 else 644 else
543 clock.p2 = limit->p2.p2_slow; 645 clock.p2 = limit->p2.p2_slow;
@@ -2537,7 +2639,7 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
2537 struct drm_i915_private *dev_priv = dev->dev_private; 2639 struct drm_i915_private *dev_priv = dev->dev_private;
2538 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2640 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2539 int pipe = intel_crtc->pipe; 2641 int pipe = intel_crtc->pipe;
2540 u32 reg, temp, i; 2642 u32 reg, temp, i, retry;
2541 2643
2542 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 2644 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2543 for train result */ 2645 for train result */
@@ -2589,15 +2691,19 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
2589 POSTING_READ(reg); 2691 POSTING_READ(reg);
2590 udelay(500); 2692 udelay(500);
2591 2693
2592 reg = FDI_RX_IIR(pipe); 2694 for (retry = 0; retry < 5; retry++) {
2593 temp = I915_READ(reg); 2695 reg = FDI_RX_IIR(pipe);
2594 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 2696 temp = I915_READ(reg);
2595 2697 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2596 if (temp & FDI_RX_BIT_LOCK) { 2698 if (temp & FDI_RX_BIT_LOCK) {
2597 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 2699 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2598 DRM_DEBUG_KMS("FDI train 1 done.\n"); 2700 DRM_DEBUG_KMS("FDI train 1 done.\n");
2599 break; 2701 break;
2702 }
2703 udelay(50);
2600 } 2704 }
2705 if (retry < 5)
2706 break;
2601 } 2707 }
2602 if (i == 4) 2708 if (i == 4)
2603 DRM_ERROR("FDI train 1 fail!\n"); 2709 DRM_ERROR("FDI train 1 fail!\n");
@@ -2638,15 +2744,19 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
2638 POSTING_READ(reg); 2744 POSTING_READ(reg);
2639 udelay(500); 2745 udelay(500);
2640 2746
2641 reg = FDI_RX_IIR(pipe); 2747 for (retry = 0; retry < 5; retry++) {
2642 temp = I915_READ(reg); 2748 reg = FDI_RX_IIR(pipe);
2643 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 2749 temp = I915_READ(reg);
2644 2750 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2645 if (temp & FDI_RX_SYMBOL_LOCK) { 2751 if (temp & FDI_RX_SYMBOL_LOCK) {
2646 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 2752 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2647 DRM_DEBUG_KMS("FDI train 2 done.\n"); 2753 DRM_DEBUG_KMS("FDI train 2 done.\n");
2648 break; 2754 break;
2755 }
2756 udelay(50);
2649 } 2757 }
2758 if (retry < 5)
2759 break;
2650 } 2760 }
2651 if (i == 4) 2761 if (i == 4)
2652 DRM_ERROR("FDI train 2 fail!\n"); 2762 DRM_ERROR("FDI train 2 fail!\n");
@@ -3457,6 +3567,11 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
3457 return true; 3567 return true;
3458} 3568}
3459 3569
3570static int valleyview_get_display_clock_speed(struct drm_device *dev)
3571{
3572 return 400000; /* FIXME */
3573}
3574
3460static int i945_get_display_clock_speed(struct drm_device *dev) 3575static int i945_get_display_clock_speed(struct drm_device *dev)
3461{ 3576{
3462 return 400000; 3577 return 400000;
@@ -3606,6 +3721,20 @@ static const struct intel_watermark_params g4x_cursor_wm_info = {
3606 2, 3721 2,
3607 G4X_FIFO_LINE_SIZE, 3722 G4X_FIFO_LINE_SIZE,
3608}; 3723};
3724static const struct intel_watermark_params valleyview_wm_info = {
3725 VALLEYVIEW_FIFO_SIZE,
3726 VALLEYVIEW_MAX_WM,
3727 VALLEYVIEW_MAX_WM,
3728 2,
3729 G4X_FIFO_LINE_SIZE,
3730};
3731static const struct intel_watermark_params valleyview_cursor_wm_info = {
3732 I965_CURSOR_FIFO,
3733 VALLEYVIEW_CURSOR_MAX_WM,
3734 I965_CURSOR_DFT_WM,
3735 2,
3736 G4X_FIFO_LINE_SIZE,
3737};
3609static const struct intel_watermark_params i965_cursor_wm_info = { 3738static const struct intel_watermark_params i965_cursor_wm_info = {
3610 I965_CURSOR_FIFO, 3739 I965_CURSOR_FIFO,
3611 I965_CURSOR_MAX_WM, 3740 I965_CURSOR_MAX_WM,
@@ -4128,8 +4257,134 @@ static bool g4x_compute_srwm(struct drm_device *dev,
4128 display, cursor); 4257 display, cursor);
4129} 4258}
4130 4259
4260static bool vlv_compute_drain_latency(struct drm_device *dev,
4261 int plane,
4262 int *plane_prec_mult,
4263 int *plane_dl,
4264 int *cursor_prec_mult,
4265 int *cursor_dl)
4266{
4267 struct drm_crtc *crtc;
4268 int clock, pixel_size;
4269 int entries;
4270
4271 crtc = intel_get_crtc_for_plane(dev, plane);
4272 if (crtc->fb == NULL || !crtc->enabled)
4273 return false;
4274
4275 clock = crtc->mode.clock; /* VESA DOT Clock */
4276 pixel_size = crtc->fb->bits_per_pixel / 8; /* BPP */
4277
4278 entries = (clock / 1000) * pixel_size;
4279 *plane_prec_mult = (entries > 256) ?
4280 DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
4281 *plane_dl = (64 * (*plane_prec_mult) * 4) / ((clock / 1000) *
4282 pixel_size);
4283
4284 entries = (clock / 1000) * 4; /* BPP is always 4 for cursor */
4285 *cursor_prec_mult = (entries > 256) ?
4286 DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
4287 *cursor_dl = (64 * (*cursor_prec_mult) * 4) / ((clock / 1000) * 4);
4288
4289 return true;
4290}
4291
4292/*
4293 * Update drain latency registers of memory arbiter
4294 *
4295 * Valleyview SoC has a new memory arbiter and needs drain latency registers
4296 * to be programmed. Each plane has a drain latency multiplier and a drain
4297 * latency value.
4298 */
4299
4300static void vlv_update_drain_latency(struct drm_device *dev)
4301{
4302 struct drm_i915_private *dev_priv = dev->dev_private;
4303 int planea_prec, planea_dl, planeb_prec, planeb_dl;
4304 int cursora_prec, cursora_dl, cursorb_prec, cursorb_dl;
4305 int plane_prec_mult, cursor_prec_mult; /* Precision multiplier is
4306 either 16 or 32 */
4307
4308 /* For plane A, Cursor A */
4309 if (vlv_compute_drain_latency(dev, 0, &plane_prec_mult, &planea_dl,
4310 &cursor_prec_mult, &cursora_dl)) {
4311 cursora_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
4312 DDL_CURSORA_PRECISION_32 : DDL_CURSORA_PRECISION_16;
4313 planea_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
4314 DDL_PLANEA_PRECISION_32 : DDL_PLANEA_PRECISION_16;
4315
4316 I915_WRITE(VLV_DDL1, cursora_prec |
4317 (cursora_dl << DDL_CURSORA_SHIFT) |
4318 planea_prec | planea_dl);
4319 }
4320
4321 /* For plane B, Cursor B */
4322 if (vlv_compute_drain_latency(dev, 1, &plane_prec_mult, &planeb_dl,
4323 &cursor_prec_mult, &cursorb_dl)) {
4324 cursorb_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
4325 DDL_CURSORB_PRECISION_32 : DDL_CURSORB_PRECISION_16;
4326 planeb_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
4327 DDL_PLANEB_PRECISION_32 : DDL_PLANEB_PRECISION_16;
4328
4329 I915_WRITE(VLV_DDL2, cursorb_prec |
4330 (cursorb_dl << DDL_CURSORB_SHIFT) |
4331 planeb_prec | planeb_dl);
4332 }
4333}
4334
4131#define single_plane_enabled(mask) is_power_of_2(mask) 4335#define single_plane_enabled(mask) is_power_of_2(mask)
4132 4336
4337static void valleyview_update_wm(struct drm_device *dev)
4338{
4339 static const int sr_latency_ns = 12000;
4340 struct drm_i915_private *dev_priv = dev->dev_private;
4341 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
4342 int plane_sr, cursor_sr;
4343 unsigned int enabled = 0;
4344
4345 vlv_update_drain_latency(dev);
4346
4347 if (g4x_compute_wm0(dev, 0,
4348 &valleyview_wm_info, latency_ns,
4349 &valleyview_cursor_wm_info, latency_ns,
4350 &planea_wm, &cursora_wm))
4351 enabled |= 1;
4352
4353 if (g4x_compute_wm0(dev, 1,
4354 &valleyview_wm_info, latency_ns,
4355 &valleyview_cursor_wm_info, latency_ns,
4356 &planeb_wm, &cursorb_wm))
4357 enabled |= 2;
4358
4359 plane_sr = cursor_sr = 0;
4360 if (single_plane_enabled(enabled) &&
4361 g4x_compute_srwm(dev, ffs(enabled) - 1,
4362 sr_latency_ns,
4363 &valleyview_wm_info,
4364 &valleyview_cursor_wm_info,
4365 &plane_sr, &cursor_sr))
4366 I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN);
4367 else
4368 I915_WRITE(FW_BLC_SELF_VLV,
4369 I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN);
4370
4371 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
4372 planea_wm, cursora_wm,
4373 planeb_wm, cursorb_wm,
4374 plane_sr, cursor_sr);
4375
4376 I915_WRITE(DSPFW1,
4377 (plane_sr << DSPFW_SR_SHIFT) |
4378 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
4379 (planeb_wm << DSPFW_PLANEB_SHIFT) |
4380 planea_wm);
4381 I915_WRITE(DSPFW2,
4382 (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
4383 (cursora_wm << DSPFW_CURSORA_SHIFT));
4384 I915_WRITE(DSPFW3,
4385 (I915_READ(DSPFW3) | (cursor_sr << DSPFW_CURSOR_SR_SHIFT)));
4386}
4387
4133static void g4x_update_wm(struct drm_device *dev) 4388static void g4x_update_wm(struct drm_device *dev)
4134{ 4389{
4135 static const int sr_latency_ns = 12000; 4390 static const int sr_latency_ns = 12000;
@@ -5113,6 +5368,233 @@ static void i9xx_update_pll_dividers(struct drm_crtc *crtc,
5113 } 5368 }
5114} 5369}
5115 5370
5371static void intel_update_lvds(struct drm_crtc *crtc, intel_clock_t *clock,
5372 struct drm_display_mode *adjusted_mode)
5373{
5374 struct drm_device *dev = crtc->dev;
5375 struct drm_i915_private *dev_priv = dev->dev_private;
5376 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5377 int pipe = intel_crtc->pipe;
5378 u32 temp, lvds_sync = 0;
5379
5380 temp = I915_READ(LVDS);
5381 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
5382 if (pipe == 1) {
5383 temp |= LVDS_PIPEB_SELECT;
5384 } else {
5385 temp &= ~LVDS_PIPEB_SELECT;
5386 }
5387 /* set the corresponsding LVDS_BORDER bit */
5388 temp |= dev_priv->lvds_border_bits;
5389 /* Set the B0-B3 data pairs corresponding to whether we're going to
5390 * set the DPLLs for dual-channel mode or not.
5391 */
5392 if (clock->p2 == 7)
5393 temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
5394 else
5395 temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
5396
5397 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
5398 * appropriately here, but we need to look more thoroughly into how
5399 * panels behave in the two modes.
5400 */
5401 /* set the dithering flag on LVDS as needed */
5402 if (INTEL_INFO(dev)->gen >= 4) {
5403 if (dev_priv->lvds_dither)
5404 temp |= LVDS_ENABLE_DITHER;
5405 else
5406 temp &= ~LVDS_ENABLE_DITHER;
5407 }
5408 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
5409 lvds_sync |= LVDS_HSYNC_POLARITY;
5410 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
5411 lvds_sync |= LVDS_VSYNC_POLARITY;
5412 if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
5413 != lvds_sync) {
5414 char flags[2] = "-+";
5415 DRM_INFO("Changing LVDS panel from "
5416 "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
5417 flags[!(temp & LVDS_HSYNC_POLARITY)],
5418 flags[!(temp & LVDS_VSYNC_POLARITY)],
5419 flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
5420 flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
5421 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
5422 temp |= lvds_sync;
5423 }
5424 I915_WRITE(LVDS, temp);
5425}
5426
5427static void i9xx_update_pll(struct drm_crtc *crtc,
5428 struct drm_display_mode *mode,
5429 struct drm_display_mode *adjusted_mode,
5430 intel_clock_t *clock, intel_clock_t *reduced_clock,
5431 int num_connectors)
5432{
5433 struct drm_device *dev = crtc->dev;
5434 struct drm_i915_private *dev_priv = dev->dev_private;
5435 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5436 int pipe = intel_crtc->pipe;
5437 u32 dpll;
5438 bool is_sdvo;
5439
5440 is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
5441 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
5442
5443 dpll = DPLL_VGA_MODE_DIS;
5444
5445 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
5446 dpll |= DPLLB_MODE_LVDS;
5447 else
5448 dpll |= DPLLB_MODE_DAC_SERIAL;
5449 if (is_sdvo) {
5450 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
5451 if (pixel_multiplier > 1) {
5452 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
5453 dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
5454 }
5455 dpll |= DPLL_DVO_HIGH_SPEED;
5456 }
5457 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
5458 dpll |= DPLL_DVO_HIGH_SPEED;
5459
5460 /* compute bitmask from p1 value */
5461 if (IS_PINEVIEW(dev))
5462 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
5463 else {
5464 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5465 if (IS_G4X(dev) && reduced_clock)
5466 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
5467 }
5468 switch (clock->p2) {
5469 case 5:
5470 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
5471 break;
5472 case 7:
5473 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
5474 break;
5475 case 10:
5476 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
5477 break;
5478 case 14:
5479 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
5480 break;
5481 }
5482 if (INTEL_INFO(dev)->gen >= 4)
5483 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
5484
5485 if (is_sdvo && intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
5486 dpll |= PLL_REF_INPUT_TVCLKINBC;
5487 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
5488 /* XXX: just matching BIOS for now */
5489 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
5490 dpll |= 3;
5491 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
5492 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5493 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5494 else
5495 dpll |= PLL_REF_INPUT_DREFCLK;
5496
5497 dpll |= DPLL_VCO_ENABLE;
5498 I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
5499 POSTING_READ(DPLL(pipe));
5500 udelay(150);
5501
5502 /* The LVDS pin pair needs to be on before the DPLLs are enabled.
5503 * This is an exception to the general rule that mode_set doesn't turn
5504 * things on.
5505 */
5506 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
5507 intel_update_lvds(crtc, clock, adjusted_mode);
5508
5509 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
5510 intel_dp_set_m_n(crtc, mode, adjusted_mode);
5511
5512 I915_WRITE(DPLL(pipe), dpll);
5513
5514 /* Wait for the clocks to stabilize. */
5515 POSTING_READ(DPLL(pipe));
5516 udelay(150);
5517
5518 if (INTEL_INFO(dev)->gen >= 4) {
5519 u32 temp = 0;
5520 if (is_sdvo) {
5521 temp = intel_mode_get_pixel_multiplier(adjusted_mode);
5522 if (temp > 1)
5523 temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
5524 else
5525 temp = 0;
5526 }
5527 I915_WRITE(DPLL_MD(pipe), temp);
5528 } else {
5529 /* The pixel multiplier can only be updated once the
5530 * DPLL is enabled and the clocks are stable.
5531 *
5532 * So write it again.
5533 */
5534 I915_WRITE(DPLL(pipe), dpll);
5535 }
5536}
5537
5538static void i8xx_update_pll(struct drm_crtc *crtc,
5539 struct drm_display_mode *adjusted_mode,
5540 intel_clock_t *clock,
5541 int num_connectors)
5542{
5543 struct drm_device *dev = crtc->dev;
5544 struct drm_i915_private *dev_priv = dev->dev_private;
5545 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5546 int pipe = intel_crtc->pipe;
5547 u32 dpll;
5548
5549 dpll = DPLL_VGA_MODE_DIS;
5550
5551 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
5552 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5553 } else {
5554 if (clock->p1 == 2)
5555 dpll |= PLL_P1_DIVIDE_BY_TWO;
5556 else
5557 dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5558 if (clock->p2 == 4)
5559 dpll |= PLL_P2_DIVIDE_BY_4;
5560 }
5561
5562 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
5563 /* XXX: just matching BIOS for now */
5564 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
5565 dpll |= 3;
5566 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
5567 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5568 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5569 else
5570 dpll |= PLL_REF_INPUT_DREFCLK;
5571
5572 dpll |= DPLL_VCO_ENABLE;
5573 I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
5574 POSTING_READ(DPLL(pipe));
5575 udelay(150);
5576
5577 I915_WRITE(DPLL(pipe), dpll);
5578
5579 /* Wait for the clocks to stabilize. */
5580 POSTING_READ(DPLL(pipe));
5581 udelay(150);
5582
5583 /* The LVDS pin pair needs to be on before the DPLLs are enabled.
5584 * This is an exception to the general rule that mode_set doesn't turn
5585 * things on.
5586 */
5587 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
5588 intel_update_lvds(crtc, clock, adjusted_mode);
5589
5590 /* The pixel multiplier can only be updated once the
5591 * DPLL is enabled and the clocks are stable.
5592 *
5593 * So write it again.
5594 */
5595 I915_WRITE(DPLL(pipe), dpll);
5596}
5597
5116static int i9xx_crtc_mode_set(struct drm_crtc *crtc, 5598static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
5117 struct drm_display_mode *mode, 5599 struct drm_display_mode *mode,
5118 struct drm_display_mode *adjusted_mode, 5600 struct drm_display_mode *adjusted_mode,
@@ -5126,15 +5608,13 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
5126 int plane = intel_crtc->plane; 5608 int plane = intel_crtc->plane;
5127 int refclk, num_connectors = 0; 5609 int refclk, num_connectors = 0;
5128 intel_clock_t clock, reduced_clock; 5610 intel_clock_t clock, reduced_clock;
5129 u32 dpll, dspcntr, pipeconf, vsyncshift; 5611 u32 dspcntr, pipeconf, vsyncshift;
5130 bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false; 5612 bool ok, has_reduced_clock = false, is_sdvo = false;
5131 bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; 5613 bool is_lvds = false, is_tv = false, is_dp = false;
5132 struct drm_mode_config *mode_config = &dev->mode_config; 5614 struct drm_mode_config *mode_config = &dev->mode_config;
5133 struct intel_encoder *encoder; 5615 struct intel_encoder *encoder;
5134 const intel_limit_t *limit; 5616 const intel_limit_t *limit;
5135 int ret; 5617 int ret;
5136 u32 temp;
5137 u32 lvds_sync = 0;
5138 5618
5139 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { 5619 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
5140 if (encoder->base.crtc != crtc) 5620 if (encoder->base.crtc != crtc)
@@ -5150,15 +5630,9 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
5150 if (encoder->needs_tv_clock) 5630 if (encoder->needs_tv_clock)
5151 is_tv = true; 5631 is_tv = true;
5152 break; 5632 break;
5153 case INTEL_OUTPUT_DVO:
5154 is_dvo = true;
5155 break;
5156 case INTEL_OUTPUT_TVOUT: 5633 case INTEL_OUTPUT_TVOUT:
5157 is_tv = true; 5634 is_tv = true;
5158 break; 5635 break;
5159 case INTEL_OUTPUT_ANALOG:
5160 is_crt = true;
5161 break;
5162 case INTEL_OUTPUT_DISPLAYPORT: 5636 case INTEL_OUTPUT_DISPLAYPORT:
5163 is_dp = true; 5637 is_dp = true;
5164 break; 5638 break;
@@ -5205,71 +5679,12 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
5205 i9xx_update_pll_dividers(crtc, &clock, has_reduced_clock ? 5679 i9xx_update_pll_dividers(crtc, &clock, has_reduced_clock ?
5206 &reduced_clock : NULL); 5680 &reduced_clock : NULL);
5207 5681
5208 dpll = DPLL_VGA_MODE_DIS; 5682 if (IS_GEN2(dev))
5209 5683 i8xx_update_pll(crtc, adjusted_mode, &clock, num_connectors);
5210 if (!IS_GEN2(dev)) {
5211 if (is_lvds)
5212 dpll |= DPLLB_MODE_LVDS;
5213 else
5214 dpll |= DPLLB_MODE_DAC_SERIAL;
5215 if (is_sdvo) {
5216 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
5217 if (pixel_multiplier > 1) {
5218 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
5219 dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
5220 }
5221 dpll |= DPLL_DVO_HIGH_SPEED;
5222 }
5223 if (is_dp)
5224 dpll |= DPLL_DVO_HIGH_SPEED;
5225
5226 /* compute bitmask from p1 value */
5227 if (IS_PINEVIEW(dev))
5228 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
5229 else {
5230 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5231 if (IS_G4X(dev) && has_reduced_clock)
5232 dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
5233 }
5234 switch (clock.p2) {
5235 case 5:
5236 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
5237 break;
5238 case 7:
5239 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
5240 break;
5241 case 10:
5242 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
5243 break;
5244 case 14:
5245 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
5246 break;
5247 }
5248 if (INTEL_INFO(dev)->gen >= 4)
5249 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
5250 } else {
5251 if (is_lvds) {
5252 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5253 } else {
5254 if (clock.p1 == 2)
5255 dpll |= PLL_P1_DIVIDE_BY_TWO;
5256 else
5257 dpll |= (clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5258 if (clock.p2 == 4)
5259 dpll |= PLL_P2_DIVIDE_BY_4;
5260 }
5261 }
5262
5263 if (is_sdvo && is_tv)
5264 dpll |= PLL_REF_INPUT_TVCLKINBC;
5265 else if (is_tv)
5266 /* XXX: just matching BIOS for now */
5267 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
5268 dpll |= 3;
5269 else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5270 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5271 else 5684 else
5272 dpll |= PLL_REF_INPUT_DREFCLK; 5685 i9xx_update_pll(crtc, mode, adjusted_mode, &clock,
5686 has_reduced_clock ? &reduced_clock : NULL,
5687 num_connectors);
5273 5688
5274 /* setup pipeconf */ 5689 /* setup pipeconf */
5275 pipeconf = I915_READ(PIPECONF(pipe)); 5690 pipeconf = I915_READ(PIPECONF(pipe));
@@ -5306,97 +5721,9 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
5306 } 5721 }
5307 } 5722 }
5308 5723
5309 dpll |= DPLL_VCO_ENABLE;
5310
5311 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); 5724 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
5312 drm_mode_debug_printmodeline(mode); 5725 drm_mode_debug_printmodeline(mode);
5313 5726
5314 I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
5315
5316 POSTING_READ(DPLL(pipe));
5317 udelay(150);
5318
5319 /* The LVDS pin pair needs to be on before the DPLLs are enabled.
5320 * This is an exception to the general rule that mode_set doesn't turn
5321 * things on.
5322 */
5323 if (is_lvds) {
5324 temp = I915_READ(LVDS);
5325 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
5326 if (pipe == 1) {
5327 temp |= LVDS_PIPEB_SELECT;
5328 } else {
5329 temp &= ~LVDS_PIPEB_SELECT;
5330 }
5331 /* set the corresponsding LVDS_BORDER bit */
5332 temp |= dev_priv->lvds_border_bits;
5333 /* Set the B0-B3 data pairs corresponding to whether we're going to
5334 * set the DPLLs for dual-channel mode or not.
5335 */
5336 if (clock.p2 == 7)
5337 temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
5338 else
5339 temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
5340
5341 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
5342 * appropriately here, but we need to look more thoroughly into how
5343 * panels behave in the two modes.
5344 */
5345 /* set the dithering flag on LVDS as needed */
5346 if (INTEL_INFO(dev)->gen >= 4) {
5347 if (dev_priv->lvds_dither)
5348 temp |= LVDS_ENABLE_DITHER;
5349 else
5350 temp &= ~LVDS_ENABLE_DITHER;
5351 }
5352 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
5353 lvds_sync |= LVDS_HSYNC_POLARITY;
5354 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
5355 lvds_sync |= LVDS_VSYNC_POLARITY;
5356 if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
5357 != lvds_sync) {
5358 char flags[2] = "-+";
5359 DRM_INFO("Changing LVDS panel from "
5360 "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
5361 flags[!(temp & LVDS_HSYNC_POLARITY)],
5362 flags[!(temp & LVDS_VSYNC_POLARITY)],
5363 flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
5364 flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
5365 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
5366 temp |= lvds_sync;
5367 }
5368 I915_WRITE(LVDS, temp);
5369 }
5370
5371 if (is_dp) {
5372 intel_dp_set_m_n(crtc, mode, adjusted_mode);
5373 }
5374
5375 I915_WRITE(DPLL(pipe), dpll);
5376
5377 /* Wait for the clocks to stabilize. */
5378 POSTING_READ(DPLL(pipe));
5379 udelay(150);
5380
5381 if (INTEL_INFO(dev)->gen >= 4) {
5382 temp = 0;
5383 if (is_sdvo) {
5384 temp = intel_mode_get_pixel_multiplier(adjusted_mode);
5385 if (temp > 1)
5386 temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
5387 else
5388 temp = 0;
5389 }
5390 I915_WRITE(DPLL_MD(pipe), temp);
5391 } else {
5392 /* The pixel multiplier can only be updated once the
5393 * DPLL is enabled and the clocks are stable.
5394 *
5395 * So write it again.
5396 */
5397 I915_WRITE(DPLL(pipe), dpll);
5398 }
5399
5400 if (HAS_PIPE_CXSR(dev)) { 5727 if (HAS_PIPE_CXSR(dev)) {
5401 if (intel_crtc->lowfreq_avail) { 5728 if (intel_crtc->lowfreq_avail) {
5402 DRM_DEBUG_KMS("enabling CxSR downclocking\n"); 5729 DRM_DEBUG_KMS("enabling CxSR downclocking\n");
@@ -7796,7 +8123,7 @@ static void intel_setup_outputs(struct drm_device *dev)
7796 8123
7797 if (I915_READ(HDMIB) & PORT_DETECTED) { 8124 if (I915_READ(HDMIB) & PORT_DETECTED) {
7798 /* PCH SDVOB multiplex with HDMIB */ 8125 /* PCH SDVOB multiplex with HDMIB */
7799 found = intel_sdvo_init(dev, PCH_SDVOB); 8126 found = intel_sdvo_init(dev, PCH_SDVOB, true);
7800 if (!found) 8127 if (!found)
7801 intel_hdmi_init(dev, HDMIB); 8128 intel_hdmi_init(dev, HDMIB);
7802 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) 8129 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
@@ -7820,7 +8147,7 @@ static void intel_setup_outputs(struct drm_device *dev)
7820 8147
7821 if (I915_READ(SDVOB) & SDVO_DETECTED) { 8148 if (I915_READ(SDVOB) & SDVO_DETECTED) {
7822 DRM_DEBUG_KMS("probing SDVOB\n"); 8149 DRM_DEBUG_KMS("probing SDVOB\n");
7823 found = intel_sdvo_init(dev, SDVOB); 8150 found = intel_sdvo_init(dev, SDVOB, true);
7824 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) { 8151 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
7825 DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); 8152 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
7826 intel_hdmi_init(dev, SDVOB); 8153 intel_hdmi_init(dev, SDVOB);
@@ -7836,7 +8163,7 @@ static void intel_setup_outputs(struct drm_device *dev)
7836 8163
7837 if (I915_READ(SDVOB) & SDVO_DETECTED) { 8164 if (I915_READ(SDVOB) & SDVO_DETECTED) {
7838 DRM_DEBUG_KMS("probing SDVOC\n"); 8165 DRM_DEBUG_KMS("probing SDVOC\n");
7839 found = intel_sdvo_init(dev, SDVOC); 8166 found = intel_sdvo_init(dev, SDVOC, false);
7840 } 8167 }
7841 8168
7842 if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) { 8169 if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
@@ -8617,6 +8944,54 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
8617 } 8944 }
8618} 8945}
8619 8946
8947static void valleyview_init_clock_gating(struct drm_device *dev)
8948{
8949 struct drm_i915_private *dev_priv = dev->dev_private;
8950 int pipe;
8951 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
8952
8953 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
8954
8955 I915_WRITE(WM3_LP_ILK, 0);
8956 I915_WRITE(WM2_LP_ILK, 0);
8957 I915_WRITE(WM1_LP_ILK, 0);
8958
8959 /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
8960 * This implements the WaDisableRCZUnitClockGating workaround.
8961 */
8962 I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
8963
8964 I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
8965
8966 I915_WRITE(IVB_CHICKEN3,
8967 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
8968 CHICKEN3_DGMG_DONE_FIX_DISABLE);
8969
8970 /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
8971 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
8972 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
8973
8974 /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
8975 I915_WRITE(GEN7_L3CNTLREG1, GEN7_WA_FOR_GEN7_L3_CONTROL);
8976 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
8977
8978 /* This is required by WaCatErrorRejectionIssue */
8979 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
8980 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
8981 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
8982
8983 for_each_pipe(pipe) {
8984 I915_WRITE(DSPCNTR(pipe),
8985 I915_READ(DSPCNTR(pipe)) |
8986 DISPPLANE_TRICKLE_FEED_DISABLE);
8987 intel_flush_display_plane(dev_priv, pipe);
8988 }
8989
8990 I915_WRITE(CACHE_MODE_1, I915_READ(CACHE_MODE_1) |
8991 (PIXEL_SUBSPAN_COLLECT_OPT_DISABLE << 16) |
8992 PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
8993}
8994
8620static void g4x_init_clock_gating(struct drm_device *dev) 8995static void g4x_init_clock_gating(struct drm_device *dev)
8621{ 8996{
8622 struct drm_i915_private *dev_priv = dev->dev_private; 8997 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -8871,7 +9246,10 @@ static void intel_init_display(struct drm_device *dev)
8871 } 9246 }
8872 9247
8873 /* Returns the core display clock speed */ 9248 /* Returns the core display clock speed */
8874 if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev))) 9249 if (IS_VALLEYVIEW(dev))
9250 dev_priv->display.get_display_clock_speed =
9251 valleyview_get_display_clock_speed;
9252 else if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
8875 dev_priv->display.get_display_clock_speed = 9253 dev_priv->display.get_display_clock_speed =
8876 i945_get_display_clock_speed; 9254 i945_get_display_clock_speed;
8877 else if (IS_I915G(dev)) 9255 else if (IS_I915G(dev))
@@ -8966,6 +9344,12 @@ static void intel_init_display(struct drm_device *dev)
8966 dev_priv->display.write_eld = ironlake_write_eld; 9344 dev_priv->display.write_eld = ironlake_write_eld;
8967 } else 9345 } else
8968 dev_priv->display.update_wm = NULL; 9346 dev_priv->display.update_wm = NULL;
9347 } else if (IS_VALLEYVIEW(dev)) {
9348 dev_priv->display.update_wm = valleyview_update_wm;
9349 dev_priv->display.init_clock_gating =
9350 valleyview_init_clock_gating;
9351 dev_priv->display.force_wake_get = vlv_force_wake_get;
9352 dev_priv->display.force_wake_put = vlv_force_wake_put;
8969 } else if (IS_PINEVIEW(dev)) { 9353 } else if (IS_PINEVIEW(dev)) {
8970 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev), 9354 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
8971 dev_priv->is_ddr3, 9355 dev_priv->is_ddr3,
@@ -9049,7 +9433,7 @@ static void quirk_pipea_force(struct drm_device *dev)
9049 struct drm_i915_private *dev_priv = dev->dev_private; 9433 struct drm_i915_private *dev_priv = dev->dev_private;
9050 9434
9051 dev_priv->quirks |= QUIRK_PIPEA_FORCE; 9435 dev_priv->quirks |= QUIRK_PIPEA_FORCE;
9052 DRM_DEBUG_DRIVER("applying pipe a force quirk\n"); 9436 DRM_INFO("applying pipe a force quirk\n");
9053} 9437}
9054 9438
9055/* 9439/*
@@ -9059,6 +9443,18 @@ static void quirk_ssc_force_disable(struct drm_device *dev)
9059{ 9443{
9060 struct drm_i915_private *dev_priv = dev->dev_private; 9444 struct drm_i915_private *dev_priv = dev->dev_private;
9061 dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE; 9445 dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
9446 DRM_INFO("applying lvds SSC disable quirk\n");
9447}
9448
9449/*
9450 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
9451 * brightness value
9452 */
9453static void quirk_invert_brightness(struct drm_device *dev)
9454{
9455 struct drm_i915_private *dev_priv = dev->dev_private;
9456 dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
9457 DRM_INFO("applying inverted panel brightness quirk\n");
9062} 9458}
9063 9459
9064struct intel_quirk { 9460struct intel_quirk {
@@ -9093,6 +9489,9 @@ struct intel_quirk intel_quirks[] = {
9093 9489
9094 /* Sony Vaio Y cannot use SSC on LVDS */ 9490 /* Sony Vaio Y cannot use SSC on LVDS */
9095 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable }, 9491 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
9492
9493 /* Acer Aspire 5734Z must invert backlight brightness */
9494 { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
9096}; 9495};
9097 9496
9098static void intel_init_quirks(struct drm_device *dev) 9497static void intel_init_quirks(struct drm_device *dev)
@@ -9236,6 +9635,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
9236 if (IS_IRONLAKE_M(dev)) 9635 if (IS_IRONLAKE_M(dev))
9237 ironlake_disable_rc6(dev); 9636 ironlake_disable_rc6(dev);
9238 9637
9638 if (IS_VALLEYVIEW(dev))
9639 vlv_init_dpio(dev);
9640
9239 mutex_unlock(&dev->struct_mutex); 9641 mutex_unlock(&dev->struct_mutex);
9240 9642
9241 /* Disable the irq before mode object teardown, for the irq might 9643 /* Disable the irq before mode object teardown, for the irq might
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 5a14149b3794..79cabf58d877 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -45,6 +45,18 @@
45 ret__; \ 45 ret__; \
46}) 46})
47 47
48#define wait_for_atomic_us(COND, US) ({ \
49 int i, ret__ = -ETIMEDOUT; \
50 for (i = 0; i < (US); i++) { \
51 if ((COND)) { \
52 ret__ = 0; \
53 break; \
54 } \
55 udelay(1); \
56 } \
57 ret__; \
58})
59
48#define wait_for(COND, MS) _wait_for(COND, MS, 1) 60#define wait_for(COND, MS) _wait_for(COND, MS, 1)
49#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0) 61#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
50 62
@@ -293,7 +305,8 @@ extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector)
293extern void intel_crt_init(struct drm_device *dev); 305extern void intel_crt_init(struct drm_device *dev);
294extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg); 306extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
295void intel_dip_infoframe_csum(struct dip_infoframe *avi_if); 307void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
296extern bool intel_sdvo_init(struct drm_device *dev, int output_device); 308extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
309 bool is_sdvob);
297extern void intel_dvo_init(struct drm_device *dev); 310extern void intel_dvo_init(struct drm_device *dev);
298extern void intel_tv_init(struct drm_device *dev); 311extern void intel_tv_init(struct drm_device *dev);
299extern void intel_mark_busy(struct drm_device *dev, 312extern void intel_mark_busy(struct drm_device *dev,
@@ -419,4 +432,6 @@ extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
419extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data, 432extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
420 struct drm_file *file_priv); 433 struct drm_file *file_priv);
421 434
435extern u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg);
436
422#endif /* __INTEL_DRV_H__ */ 437#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 020a7d7f744d..60ba50b956f2 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -243,7 +243,7 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
243 * that's not the case. 243 * that's not the case.
244 */ 244 */
245 intel_ddc_get_modes(connector, 245 intel_ddc_get_modes(connector,
246 &dev_priv->gmbus[GMBUS_PORT_DPC].adapter); 246 intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPC));
247 if (!list_empty(&connector->probed_modes)) 247 if (!list_empty(&connector->probed_modes))
248 return 1; 248 return 1;
249 249
@@ -375,7 +375,7 @@ void intel_dvo_init(struct drm_device *dev)
375 * special cases, but otherwise default to what's defined 375 * special cases, but otherwise default to what's defined
376 * in the spec. 376 * in the spec.
377 */ 377 */
378 if (dvo->gpio != 0) 378 if (intel_gmbus_is_port_valid(dvo->gpio))
379 gpio = dvo->gpio; 379 gpio = dvo->gpio;
380 else if (dvo->type == INTEL_DVO_CHIP_LVDS) 380 else if (dvo->type == INTEL_DVO_CHIP_LVDS)
381 gpio = GMBUS_PORT_SSC; 381 gpio = GMBUS_PORT_SSC;
@@ -386,7 +386,7 @@ void intel_dvo_init(struct drm_device *dev)
386 * It appears that everything is on GPIOE except for panels 386 * It appears that everything is on GPIOE except for panels
387 * on i830 laptops, which are on GPIOB (DVOA). 387 * on i830 laptops, which are on GPIOB (DVOA).
388 */ 388 */
389 i2c = &dev_priv->gmbus[gpio].adapter; 389 i2c = intel_gmbus_get_adapter(dev_priv, gpio);
390 390
391 intel_dvo->dev = *dvo; 391 intel_dvo->dev = *dvo;
392 if (!dvo->dev_ops->init(&intel_dvo->dev, i2c)) 392 if (!dvo->dev_ops->init(&intel_dvo->dev, i2c))
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index cae3e5f17a49..7de2d3b85b32 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -177,6 +177,37 @@ static void ironlake_write_infoframe(struct drm_encoder *encoder,
177 177
178 I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags); 178 I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags);
179} 179}
180
181static void vlv_write_infoframe(struct drm_encoder *encoder,
182 struct dip_infoframe *frame)
183{
184 uint32_t *data = (uint32_t *)frame;
185 struct drm_device *dev = encoder->dev;
186 struct drm_i915_private *dev_priv = dev->dev_private;
187 struct drm_crtc *crtc = encoder->crtc;
188 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
189 int reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
190 unsigned i, len = DIP_HEADER_SIZE + frame->len;
191 u32 flags, val = I915_READ(reg);
192
193 intel_wait_for_vblank(dev, intel_crtc->pipe);
194
195 flags = intel_infoframe_index(frame);
196
197 val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
198
199 I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags);
200
201 for (i = 0; i < len; i += 4) {
202 I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
203 data++;
204 }
205
206 flags |= intel_infoframe_flags(frame);
207
208 I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags);
209}
210
180static void intel_set_infoframe(struct drm_encoder *encoder, 211static void intel_set_infoframe(struct drm_encoder *encoder,
181 struct dip_infoframe *frame) 212 struct dip_infoframe *frame)
182{ 213{
@@ -334,7 +365,8 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
334 intel_hdmi->has_hdmi_sink = false; 365 intel_hdmi->has_hdmi_sink = false;
335 intel_hdmi->has_audio = false; 366 intel_hdmi->has_audio = false;
336 edid = drm_get_edid(connector, 367 edid = drm_get_edid(connector,
337 &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter); 368 intel_gmbus_get_adapter(dev_priv,
369 intel_hdmi->ddc_bus));
338 370
339 if (edid) { 371 if (edid) {
340 if (edid->input & DRM_EDID_INPUT_DIGITAL) { 372 if (edid->input & DRM_EDID_INPUT_DIGITAL) {
@@ -367,7 +399,8 @@ static int intel_hdmi_get_modes(struct drm_connector *connector)
367 */ 399 */
368 400
369 return intel_ddc_get_modes(connector, 401 return intel_ddc_get_modes(connector,
370 &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter); 402 intel_gmbus_get_adapter(dev_priv,
403 intel_hdmi->ddc_bus));
371} 404}
372 405
373static bool 406static bool
@@ -379,7 +412,8 @@ intel_hdmi_detect_audio(struct drm_connector *connector)
379 bool has_audio = false; 412 bool has_audio = false;
380 413
381 edid = drm_get_edid(connector, 414 edid = drm_get_edid(connector,
382 &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter); 415 intel_gmbus_get_adapter(dev_priv,
416 intel_hdmi->ddc_bus));
383 if (edid) { 417 if (edid) {
384 if (edid->input & DRM_EDID_INPUT_DIGITAL) 418 if (edid->input & DRM_EDID_INPUT_DIGITAL)
385 has_audio = drm_detect_monitor_audio(edid); 419 has_audio = drm_detect_monitor_audio(edid);
@@ -549,7 +583,11 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
549 if (!HAS_PCH_SPLIT(dev)) { 583 if (!HAS_PCH_SPLIT(dev)) {
550 intel_hdmi->write_infoframe = i9xx_write_infoframe; 584 intel_hdmi->write_infoframe = i9xx_write_infoframe;
551 I915_WRITE(VIDEO_DIP_CTL, 0); 585 I915_WRITE(VIDEO_DIP_CTL, 0);
552 } else { 586 } else if (IS_VALLEYVIEW(dev)) {
587 intel_hdmi->write_infoframe = vlv_write_infoframe;
588 for_each_pipe(i)
589 I915_WRITE(VLV_TVIDEO_DIP_CTL(i), 0);
590 } else {
553 intel_hdmi->write_infoframe = ironlake_write_infoframe; 591 intel_hdmi->write_infoframe = ironlake_write_infoframe;
554 for_each_pipe(i) 592 for_each_pipe(i)
555 I915_WRITE(TVIDEO_DIP_CTL(i), 0); 593 I915_WRITE(TVIDEO_DIP_CTL(i), 0);
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 601c86e664af..c12db7265893 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -35,6 +35,20 @@
35#include "i915_drm.h" 35#include "i915_drm.h"
36#include "i915_drv.h" 36#include "i915_drv.h"
37 37
38struct gmbus_port {
39 const char *name;
40 int reg;
41};
42
43static const struct gmbus_port gmbus_ports[] = {
44 { "ssc", GPIOB },
45 { "vga", GPIOA },
46 { "panel", GPIOC },
47 { "dpc", GPIOD },
48 { "dpb", GPIOE },
49 { "dpd", GPIOF },
50};
51
38/* Intel GPIO access functions */ 52/* Intel GPIO access functions */
39 53
40#define I2C_RISEFALL_TIME 10 54#define I2C_RISEFALL_TIME 10
@@ -49,10 +63,7 @@ void
49intel_i2c_reset(struct drm_device *dev) 63intel_i2c_reset(struct drm_device *dev)
50{ 64{
51 struct drm_i915_private *dev_priv = dev->dev_private; 65 struct drm_i915_private *dev_priv = dev->dev_private;
52 if (HAS_PCH_SPLIT(dev)) 66 I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0);
53 I915_WRITE(PCH_GMBUS0, 0);
54 else
55 I915_WRITE(GMBUS0, 0);
56} 67}
57 68
58static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable) 69static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
@@ -140,65 +151,132 @@ static void set_data(void *data, int state_high)
140 POSTING_READ(bus->gpio_reg); 151 POSTING_READ(bus->gpio_reg);
141} 152}
142 153
143static bool 154static int
155intel_gpio_pre_xfer(struct i2c_adapter *adapter)
156{
157 struct intel_gmbus *bus = container_of(adapter,
158 struct intel_gmbus,
159 adapter);
160 struct drm_i915_private *dev_priv = bus->dev_priv;
161
162 intel_i2c_reset(dev_priv->dev);
163 intel_i2c_quirk_set(dev_priv, true);
164 set_data(bus, 1);
165 set_clock(bus, 1);
166 udelay(I2C_RISEFALL_TIME);
167 return 0;
168}
169
170static void
171intel_gpio_post_xfer(struct i2c_adapter *adapter)
172{
173 struct intel_gmbus *bus = container_of(adapter,
174 struct intel_gmbus,
175 adapter);
176 struct drm_i915_private *dev_priv = bus->dev_priv;
177
178 set_data(bus, 1);
179 set_clock(bus, 1);
180 intel_i2c_quirk_set(dev_priv, false);
181}
182
183static void
144intel_gpio_setup(struct intel_gmbus *bus, u32 pin) 184intel_gpio_setup(struct intel_gmbus *bus, u32 pin)
145{ 185{
146 struct drm_i915_private *dev_priv = bus->dev_priv; 186 struct drm_i915_private *dev_priv = bus->dev_priv;
147 static const int map_pin_to_reg[] = {
148 0,
149 GPIOB,
150 GPIOA,
151 GPIOC,
152 GPIOD,
153 GPIOE,
154 0,
155 GPIOF,
156 };
157 struct i2c_algo_bit_data *algo; 187 struct i2c_algo_bit_data *algo;
158 188
159 if (pin >= ARRAY_SIZE(map_pin_to_reg) || !map_pin_to_reg[pin])
160 return false;
161
162 algo = &bus->bit_algo; 189 algo = &bus->bit_algo;
163 190
164 bus->gpio_reg = map_pin_to_reg[pin]; 191 /* -1 to map pin pair to gmbus index */
165 if (HAS_PCH_SPLIT(dev_priv->dev)) 192 bus->gpio_reg = dev_priv->gpio_mmio_base + gmbus_ports[pin - 1].reg;
166 bus->gpio_reg += PCH_GPIOA - GPIOA;
167 193
168 bus->adapter.algo_data = algo; 194 bus->adapter.algo_data = algo;
169 algo->setsda = set_data; 195 algo->setsda = set_data;
170 algo->setscl = set_clock; 196 algo->setscl = set_clock;
171 algo->getsda = get_data; 197 algo->getsda = get_data;
172 algo->getscl = get_clock; 198 algo->getscl = get_clock;
199 algo->pre_xfer = intel_gpio_pre_xfer;
200 algo->post_xfer = intel_gpio_post_xfer;
173 algo->udelay = I2C_RISEFALL_TIME; 201 algo->udelay = I2C_RISEFALL_TIME;
174 algo->timeout = usecs_to_jiffies(2200); 202 algo->timeout = usecs_to_jiffies(2200);
175 algo->data = bus; 203 algo->data = bus;
176
177 return true;
178} 204}
179 205
180static int 206static int
181intel_i2c_quirk_xfer(struct intel_gmbus *bus, 207gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
182 struct i2c_msg *msgs, 208 bool last)
183 int num)
184{ 209{
185 struct drm_i915_private *dev_priv = bus->dev_priv; 210 int reg_offset = dev_priv->gpio_mmio_base;
186 int ret; 211 u16 len = msg->len;
212 u8 *buf = msg->buf;
213
214 I915_WRITE(GMBUS1 + reg_offset,
215 GMBUS_CYCLE_WAIT |
216 (last ? GMBUS_CYCLE_STOP : 0) |
217 (len << GMBUS_BYTE_COUNT_SHIFT) |
218 (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) |
219 GMBUS_SLAVE_READ | GMBUS_SW_RDY);
220 POSTING_READ(GMBUS2 + reg_offset);
221 do {
222 u32 val, loop = 0;
223
224 if (wait_for(I915_READ(GMBUS2 + reg_offset) &
225 (GMBUS_SATOER | GMBUS_HW_RDY),
226 50))
227 return -ETIMEDOUT;
228 if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
229 return -ENXIO;
187 230
188 intel_i2c_reset(dev_priv->dev); 231 val = I915_READ(GMBUS3 + reg_offset);
232 do {
233 *buf++ = val & 0xff;
234 val >>= 8;
235 } while (--len && ++loop < 4);
236 } while (len);
189 237
190 intel_i2c_quirk_set(dev_priv, true); 238 return 0;
191 set_data(bus, 1); 239}
192 set_clock(bus, 1);
193 udelay(I2C_RISEFALL_TIME);
194 240
195 ret = i2c_bit_algo.master_xfer(&bus->adapter, msgs, num); 241static int
242gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
243 bool last)
244{
245 int reg_offset = dev_priv->gpio_mmio_base;
246 u16 len = msg->len;
247 u8 *buf = msg->buf;
248 u32 val, loop;
249
250 val = loop = 0;
251 do {
252 val |= *buf++ << (8 * loop);
253 } while (--len && ++loop < 4);
254
255 I915_WRITE(GMBUS3 + reg_offset, val);
256 I915_WRITE(GMBUS1 + reg_offset,
257 GMBUS_CYCLE_WAIT |
258 (last ? GMBUS_CYCLE_STOP : 0) |
259 (msg->len << GMBUS_BYTE_COUNT_SHIFT) |
260 (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) |
261 GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
262 POSTING_READ(GMBUS2 + reg_offset);
263 while (len) {
264 if (wait_for(I915_READ(GMBUS2 + reg_offset) &
265 (GMBUS_SATOER | GMBUS_HW_RDY),
266 50))
267 return -ETIMEDOUT;
268 if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
269 return -ENXIO;
196 270
197 set_data(bus, 1); 271 val = loop = 0;
198 set_clock(bus, 1); 272 do {
199 intel_i2c_quirk_set(dev_priv, false); 273 val |= *buf++ << (8 * loop);
274 } while (--len && ++loop < 4);
200 275
201 return ret; 276 I915_WRITE(GMBUS3 + reg_offset, val);
277 POSTING_READ(GMBUS2 + reg_offset);
278 }
279 return 0;
202} 280}
203 281
204static int 282static int
@@ -215,74 +293,31 @@ gmbus_xfer(struct i2c_adapter *adapter,
215 mutex_lock(&dev_priv->gmbus_mutex); 293 mutex_lock(&dev_priv->gmbus_mutex);
216 294
217 if (bus->force_bit) { 295 if (bus->force_bit) {
218 ret = intel_i2c_quirk_xfer(bus, msgs, num); 296 ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
219 goto out; 297 goto out;
220 } 298 }
221 299
222 reg_offset = HAS_PCH_SPLIT(dev_priv->dev) ? PCH_GMBUS0 - GMBUS0 : 0; 300 reg_offset = dev_priv->gpio_mmio_base;
223 301
224 I915_WRITE(GMBUS0 + reg_offset, bus->reg0); 302 I915_WRITE(GMBUS0 + reg_offset, bus->reg0);
225 303
226 for (i = 0; i < num; i++) { 304 for (i = 0; i < num; i++) {
227 u16 len = msgs[i].len; 305 bool last = i + 1 == num;
228 u8 *buf = msgs[i].buf; 306
229 307 if (msgs[i].flags & I2C_M_RD)
230 if (msgs[i].flags & I2C_M_RD) { 308 ret = gmbus_xfer_read(dev_priv, &msgs[i], last);
231 I915_WRITE(GMBUS1 + reg_offset, 309 else
232 GMBUS_CYCLE_WAIT | 310 ret = gmbus_xfer_write(dev_priv, &msgs[i], last);
233 (i + 1 == num ? GMBUS_CYCLE_STOP : 0) | 311
234 (len << GMBUS_BYTE_COUNT_SHIFT) | 312 if (ret == -ETIMEDOUT)
235 (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) | 313 goto timeout;
236 GMBUS_SLAVE_READ | GMBUS_SW_RDY); 314 if (ret == -ENXIO)
237 POSTING_READ(GMBUS2+reg_offset); 315 goto clear_err;
238 do { 316
239 u32 val, loop = 0; 317 if (!last &&
240 318 wait_for(I915_READ(GMBUS2 + reg_offset) &
241 if (wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50)) 319 (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE),
242 goto timeout; 320 50))
243 if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
244 goto clear_err;
245
246 val = I915_READ(GMBUS3 + reg_offset);
247 do {
248 *buf++ = val & 0xff;
249 val >>= 8;
250 } while (--len && ++loop < 4);
251 } while (len);
252 } else {
253 u32 val, loop;
254
255 val = loop = 0;
256 do {
257 val |= *buf++ << (8 * loop);
258 } while (--len && ++loop < 4);
259
260 I915_WRITE(GMBUS3 + reg_offset, val);
261 I915_WRITE(GMBUS1 + reg_offset,
262 GMBUS_CYCLE_WAIT |
263 (i + 1 == num ? GMBUS_CYCLE_STOP : 0) |
264 (msgs[i].len << GMBUS_BYTE_COUNT_SHIFT) |
265 (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
266 GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
267 POSTING_READ(GMBUS2+reg_offset);
268
269 while (len) {
270 if (wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
271 goto timeout;
272 if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
273 goto clear_err;
274
275 val = loop = 0;
276 do {
277 val |= *buf++ << (8 * loop);
278 } while (--len && ++loop < 4);
279
280 I915_WRITE(GMBUS3 + reg_offset, val);
281 POSTING_READ(GMBUS2+reg_offset);
282 }
283 }
284
285 if (i + 1 < num && wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE), 50))
286 goto timeout; 321 goto timeout;
287 if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER) 322 if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
288 goto clear_err; 323 goto clear_err;
@@ -304,23 +339,21 @@ done:
304 * till then let it sleep. 339 * till then let it sleep.
305 */ 340 */
306 if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0, 10)) 341 if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0, 10))
307 DRM_INFO("GMBUS timed out waiting for idle\n"); 342 DRM_INFO("GMBUS [%s] timed out waiting for idle\n",
343 bus->adapter.name);
308 I915_WRITE(GMBUS0 + reg_offset, 0); 344 I915_WRITE(GMBUS0 + reg_offset, 0);
309 ret = i; 345 ret = i;
310 goto out; 346 goto out;
311 347
312timeout: 348timeout:
313 DRM_INFO("GMBUS timed out, falling back to bit banging on pin %d [%s]\n", 349 DRM_INFO("GMBUS [%s] timed out, falling back to bit banging on pin %d\n",
314 bus->reg0 & 0xff, bus->adapter.name); 350 bus->adapter.name, bus->reg0 & 0xff);
315 I915_WRITE(GMBUS0 + reg_offset, 0); 351 I915_WRITE(GMBUS0 + reg_offset, 0);
316 352
317 /* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */ 353 /* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
318 if (!bus->has_gpio) { 354 bus->force_bit = true;
319 ret = -EIO; 355 ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
320 } else { 356
321 bus->force_bit = true;
322 ret = intel_i2c_quirk_xfer(bus, msgs, num);
323 }
324out: 357out:
325 mutex_unlock(&dev_priv->gmbus_mutex); 358 mutex_unlock(&dev_priv->gmbus_mutex);
326 return ret; 359 return ret;
@@ -346,35 +379,26 @@ static const struct i2c_algorithm gmbus_algorithm = {
346 */ 379 */
347int intel_setup_gmbus(struct drm_device *dev) 380int intel_setup_gmbus(struct drm_device *dev)
348{ 381{
349 static const char *names[GMBUS_NUM_PORTS] = {
350 "disabled",
351 "ssc",
352 "vga",
353 "panel",
354 "dpc",
355 "dpb",
356 "reserved",
357 "dpd",
358 };
359 struct drm_i915_private *dev_priv = dev->dev_private; 382 struct drm_i915_private *dev_priv = dev->dev_private;
360 int ret, i; 383 int ret, i;
361 384
362 dev_priv->gmbus = kcalloc(GMBUS_NUM_PORTS, sizeof(struct intel_gmbus), 385 if (HAS_PCH_SPLIT(dev))
363 GFP_KERNEL); 386 dev_priv->gpio_mmio_base = PCH_GPIOA - GPIOA;
364 if (dev_priv->gmbus == NULL) 387 else
365 return -ENOMEM; 388 dev_priv->gpio_mmio_base = 0;
366 389
367 mutex_init(&dev_priv->gmbus_mutex); 390 mutex_init(&dev_priv->gmbus_mutex);
368 391
369 for (i = 0; i < GMBUS_NUM_PORTS; i++) { 392 for (i = 0; i < GMBUS_NUM_PORTS; i++) {
370 struct intel_gmbus *bus = &dev_priv->gmbus[i]; 393 struct intel_gmbus *bus = &dev_priv->gmbus[i];
394 u32 port = i + 1; /* +1 to map gmbus index to pin pair */
371 395
372 bus->adapter.owner = THIS_MODULE; 396 bus->adapter.owner = THIS_MODULE;
373 bus->adapter.class = I2C_CLASS_DDC; 397 bus->adapter.class = I2C_CLASS_DDC;
374 snprintf(bus->adapter.name, 398 snprintf(bus->adapter.name,
375 sizeof(bus->adapter.name), 399 sizeof(bus->adapter.name),
376 "i915 gmbus %s", 400 "i915 gmbus %s",
377 names[i]); 401 gmbus_ports[i].name);
378 402
379 bus->adapter.dev.parent = &dev->pdev->dev; 403 bus->adapter.dev.parent = &dev->pdev->dev;
380 bus->dev_priv = dev_priv; 404 bus->dev_priv = dev_priv;
@@ -385,13 +409,9 @@ int intel_setup_gmbus(struct drm_device *dev)
385 goto err; 409 goto err;
386 410
387 /* By default use a conservative clock rate */ 411 /* By default use a conservative clock rate */
388 bus->reg0 = i | GMBUS_RATE_100KHZ; 412 bus->reg0 = port | GMBUS_RATE_100KHZ;
389
390 bus->has_gpio = intel_gpio_setup(bus, i);
391 413
392 /* XXX force bit banging until GMBUS is fully debugged */ 414 intel_gpio_setup(bus, port);
393 if (bus->has_gpio && IS_GEN2(dev))
394 bus->force_bit = true;
395 } 415 }
396 416
397 intel_i2c_reset(dev_priv->dev); 417 intel_i2c_reset(dev_priv->dev);
@@ -403,11 +423,18 @@ err:
403 struct intel_gmbus *bus = &dev_priv->gmbus[i]; 423 struct intel_gmbus *bus = &dev_priv->gmbus[i];
404 i2c_del_adapter(&bus->adapter); 424 i2c_del_adapter(&bus->adapter);
405 } 425 }
406 kfree(dev_priv->gmbus);
407 dev_priv->gmbus = NULL;
408 return ret; 426 return ret;
409} 427}
410 428
429struct i2c_adapter *intel_gmbus_get_adapter(struct drm_i915_private *dev_priv,
430 unsigned port)
431{
432 WARN_ON(!intel_gmbus_is_port_valid(port));
433 /* -1 to map pin pair to gmbus index */
434 return (intel_gmbus_is_port_valid(port)) ?
435 &dev_priv->gmbus[port - 1].adapter : NULL;
436}
437
411void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed) 438void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed)
412{ 439{
413 struct intel_gmbus *bus = to_intel_gmbus(adapter); 440 struct intel_gmbus *bus = to_intel_gmbus(adapter);
@@ -419,8 +446,7 @@ void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
419{ 446{
420 struct intel_gmbus *bus = to_intel_gmbus(adapter); 447 struct intel_gmbus *bus = to_intel_gmbus(adapter);
421 448
422 if (bus->has_gpio) 449 bus->force_bit = force_bit;
423 bus->force_bit = force_bit;
424} 450}
425 451
426void intel_teardown_gmbus(struct drm_device *dev) 452void intel_teardown_gmbus(struct drm_device *dev)
@@ -435,7 +461,4 @@ void intel_teardown_gmbus(struct drm_device *dev)
435 struct intel_gmbus *bus = &dev_priv->gmbus[i]; 461 struct intel_gmbus *bus = &dev_priv->gmbus[i];
436 i2c_del_adapter(&bus->adapter); 462 i2c_del_adapter(&bus->adapter);
437 } 463 }
438
439 kfree(dev_priv->gmbus);
440 dev_priv->gmbus = NULL;
441} 464}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 95db2e988227..17a4630cec8a 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -474,7 +474,7 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
474 474
475static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id) 475static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id)
476{ 476{
477 DRM_DEBUG_KMS("Skipping forced modeset for %s\n", id->ident); 477 DRM_INFO("Skipping forced modeset for %s\n", id->ident);
478 return 1; 478 return 1;
479} 479}
480 480
@@ -622,7 +622,7 @@ static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
622 622
623static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id) 623static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id)
624{ 624{
625 DRM_DEBUG_KMS("Skipping LVDS initialization for %s\n", id->ident); 625 DRM_INFO("Skipping LVDS initialization for %s\n", id->ident);
626 return 1; 626 return 1;
627} 627}
628 628
@@ -845,8 +845,8 @@ static bool lvds_is_present_in_vbt(struct drm_device *dev,
845 child->device_type != DEVICE_TYPE_LFP) 845 child->device_type != DEVICE_TYPE_LFP)
846 continue; 846 continue;
847 847
848 if (child->i2c_pin) 848 if (intel_gmbus_is_port_valid(child->i2c_pin))
849 *i2c_pin = child->i2c_pin; 849 *i2c_pin = child->i2c_pin;
850 850
851 /* However, we cannot trust the BIOS writers to populate 851 /* However, we cannot trust the BIOS writers to populate
852 * the VBT correctly. Since LVDS requires additional 852 * the VBT correctly. Since LVDS requires additional
@@ -987,7 +987,8 @@ bool intel_lvds_init(struct drm_device *dev)
987 * preferred mode is the right one. 987 * preferred mode is the right one.
988 */ 988 */
989 intel_lvds->edid = drm_get_edid(connector, 989 intel_lvds->edid = drm_get_edid(connector,
990 &dev_priv->gmbus[pin].adapter); 990 intel_gmbus_get_adapter(dev_priv,
991 pin));
991 if (intel_lvds->edid) { 992 if (intel_lvds->edid) {
992 if (drm_add_edid_modes(connector, 993 if (drm_add_edid_modes(connector,
993 intel_lvds->edid)) { 994 intel_lvds->edid)) {
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index d1928e79d9b6..d67ec3a51e42 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -56,7 +56,8 @@ bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus)
56 } 56 }
57 }; 57 };
58 58
59 return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 2) == 2; 59 return i2c_transfer(intel_gmbus_get_adapter(dev_priv, ddc_bus),
60 msgs, 2) == 2;
60} 61}
61 62
62/** 63/**
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 289140bc83cb..34929aeca66b 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -25,6 +25,8 @@
25 * 25 *
26 */ 26 */
27 27
28#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
29
28#include <linux/acpi.h> 30#include <linux/acpi.h>
29#include <linux/acpi_io.h> 31#include <linux/acpi_io.h>
30#include <acpi/video.h> 32#include <acpi/video.h>
@@ -355,7 +357,7 @@ static void intel_didl_outputs(struct drm_device *dev)
355 } 357 }
356 358
357 if (!acpi_video_bus) { 359 if (!acpi_video_bus) {
358 printk(KERN_WARNING "No ACPI video bus found\n"); 360 pr_warn("No ACPI video bus found\n");
359 return; 361 return;
360 } 362 }
361 363
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 230a141dbea3..cad45ff8251b 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -28,6 +28,9 @@
28 * Chris Wilson <chris@chris-wilson.co.uk> 28 * Chris Wilson <chris@chris-wilson.co.uk>
29 */ 29 */
30 30
31#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
32
33#include <linux/moduleparam.h>
31#include "intel_drv.h" 34#include "intel_drv.h"
32 35
33#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */ 36#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */
@@ -171,7 +174,7 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev)
171 /* XXX add code here to query mode clock or hardware clock 174 /* XXX add code here to query mode clock or hardware clock
172 * and program max PWM appropriately. 175 * and program max PWM appropriately.
173 */ 176 */
174 printk_once(KERN_WARNING "fixme: max PWM is zero.\n"); 177 pr_warn_once("fixme: max PWM is zero\n");
175 return 1; 178 return 1;
176 } 179 }
177 180
@@ -191,6 +194,27 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev)
191 return max; 194 return max;
192} 195}
193 196
197static int i915_panel_invert_brightness;
198MODULE_PARM_DESC(invert_brightness, "Invert backlight brightness "
199 "(-1 force normal, 0 machine defaults, 1 force inversion), please "
200 "report PCI device ID, subsystem vendor and subsystem device ID "
201 "to dri-devel@lists.freedesktop.org, if your machine needs it. "
202 "It will then be included in an upcoming module version.");
203module_param_named(invert_brightness, i915_panel_invert_brightness, int, 0600);
204static u32 intel_panel_compute_brightness(struct drm_device *dev, u32 val)
205{
206 struct drm_i915_private *dev_priv = dev->dev_private;
207
208 if (i915_panel_invert_brightness < 0)
209 return val;
210
211 if (i915_panel_invert_brightness > 0 ||
212 dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS)
213 return intel_panel_get_max_backlight(dev) - val;
214
215 return val;
216}
217
194u32 intel_panel_get_backlight(struct drm_device *dev) 218u32 intel_panel_get_backlight(struct drm_device *dev)
195{ 219{
196 struct drm_i915_private *dev_priv = dev->dev_private; 220 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -211,6 +235,7 @@ u32 intel_panel_get_backlight(struct drm_device *dev)
211 } 235 }
212 } 236 }
213 237
238 val = intel_panel_compute_brightness(dev, val);
214 DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val); 239 DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
215 return val; 240 return val;
216} 241}
@@ -228,6 +253,7 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level
228 u32 tmp; 253 u32 tmp;
229 254
230 DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level); 255 DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
256 level = intel_panel_compute_brightness(dev, level);
231 257
232 if (HAS_PCH_SPLIT(dev)) 258 if (HAS_PCH_SPLIT(dev))
233 return intel_pch_panel_set_backlight(dev, level); 259 return intel_pch_panel_set_backlight(dev, level);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index e25581a9f60f..dfdb613752c5 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -290,9 +290,9 @@ static int init_ring_common(struct intel_ring_buffer *ring)
290 | RING_VALID); 290 | RING_VALID);
291 291
292 /* If the head is still not zero, the ring is dead */ 292 /* If the head is still not zero, the ring is dead */
293 if ((I915_READ_CTL(ring) & RING_VALID) == 0 || 293 if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
294 I915_READ_START(ring) != obj->gtt_offset || 294 I915_READ_START(ring) == obj->gtt_offset &&
295 (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) { 295 (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
296 DRM_ERROR("%s initialization failed " 296 DRM_ERROR("%s initialization failed "
297 "ctl %08x head %08x tail %08x start %08x\n", 297 "ctl %08x head %08x tail %08x start %08x\n",
298 ring->name, 298 ring->name,
@@ -687,7 +687,7 @@ render_ring_get_irq(struct intel_ring_buffer *ring)
687 687
688 spin_lock(&ring->irq_lock); 688 spin_lock(&ring->irq_lock);
689 if (ring->irq_refcount++ == 0) { 689 if (ring->irq_refcount++ == 0) {
690 if (HAS_PCH_SPLIT(dev)) 690 if (INTEL_INFO(dev)->gen >= 5)
691 ironlake_enable_irq(dev_priv, 691 ironlake_enable_irq(dev_priv,
692 GT_PIPE_NOTIFY | GT_USER_INTERRUPT); 692 GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
693 else 693 else
@@ -706,7 +706,7 @@ render_ring_put_irq(struct intel_ring_buffer *ring)
706 706
707 spin_lock(&ring->irq_lock); 707 spin_lock(&ring->irq_lock);
708 if (--ring->irq_refcount == 0) { 708 if (--ring->irq_refcount == 0) {
709 if (HAS_PCH_SPLIT(dev)) 709 if (INTEL_INFO(dev)->gen >= 5)
710 ironlake_disable_irq(dev_priv, 710 ironlake_disable_irq(dev_priv,
711 GT_USER_INTERRUPT | 711 GT_USER_INTERRUPT |
712 GT_PIPE_NOTIFY); 712 GT_PIPE_NOTIFY);
@@ -788,10 +788,11 @@ ring_add_request(struct intel_ring_buffer *ring,
788} 788}
789 789
790static bool 790static bool
791gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) 791gen6_ring_get_irq(struct intel_ring_buffer *ring)
792{ 792{
793 struct drm_device *dev = ring->dev; 793 struct drm_device *dev = ring->dev;
794 drm_i915_private_t *dev_priv = dev->dev_private; 794 drm_i915_private_t *dev_priv = dev->dev_private;
795 u32 mask = ring->irq_enable;
795 796
796 if (!dev->irq_enabled) 797 if (!dev->irq_enabled)
797 return false; 798 return false;
@@ -803,9 +804,9 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
803 804
804 spin_lock(&ring->irq_lock); 805 spin_lock(&ring->irq_lock);
805 if (ring->irq_refcount++ == 0) { 806 if (ring->irq_refcount++ == 0) {
806 ring->irq_mask &= ~rflag; 807 ring->irq_mask &= ~mask;
807 I915_WRITE_IMR(ring, ring->irq_mask); 808 I915_WRITE_IMR(ring, ring->irq_mask);
808 ironlake_enable_irq(dev_priv, gflag); 809 ironlake_enable_irq(dev_priv, mask);
809 } 810 }
810 spin_unlock(&ring->irq_lock); 811 spin_unlock(&ring->irq_lock);
811 812
@@ -813,16 +814,17 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
813} 814}
814 815
815static void 816static void
816gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) 817gen6_ring_put_irq(struct intel_ring_buffer *ring)
817{ 818{
818 struct drm_device *dev = ring->dev; 819 struct drm_device *dev = ring->dev;
819 drm_i915_private_t *dev_priv = dev->dev_private; 820 drm_i915_private_t *dev_priv = dev->dev_private;
821 u32 mask = ring->irq_enable;
820 822
821 spin_lock(&ring->irq_lock); 823 spin_lock(&ring->irq_lock);
822 if (--ring->irq_refcount == 0) { 824 if (--ring->irq_refcount == 0) {
823 ring->irq_mask |= rflag; 825 ring->irq_mask |= mask;
824 I915_WRITE_IMR(ring, ring->irq_mask); 826 I915_WRITE_IMR(ring, ring->irq_mask);
825 ironlake_disable_irq(dev_priv, gflag); 827 ironlake_disable_irq(dev_priv, mask);
826 } 828 }
827 spin_unlock(&ring->irq_lock); 829 spin_unlock(&ring->irq_lock);
828 830
@@ -1361,38 +1363,6 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1361 return 0; 1363 return 0;
1362} 1364}
1363 1365
1364static bool
1365gen6_render_ring_get_irq(struct intel_ring_buffer *ring)
1366{
1367 return gen6_ring_get_irq(ring,
1368 GT_USER_INTERRUPT,
1369 GEN6_RENDER_USER_INTERRUPT);
1370}
1371
1372static void
1373gen6_render_ring_put_irq(struct intel_ring_buffer *ring)
1374{
1375 return gen6_ring_put_irq(ring,
1376 GT_USER_INTERRUPT,
1377 GEN6_RENDER_USER_INTERRUPT);
1378}
1379
1380static bool
1381gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
1382{
1383 return gen6_ring_get_irq(ring,
1384 GT_GEN6_BSD_USER_INTERRUPT,
1385 GEN6_BSD_USER_INTERRUPT);
1386}
1387
1388static void
1389gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
1390{
1391 return gen6_ring_put_irq(ring,
1392 GT_GEN6_BSD_USER_INTERRUPT,
1393 GEN6_BSD_USER_INTERRUPT);
1394}
1395
1396/* ring buffer for Video Codec for Gen6+ */ 1366/* ring buffer for Video Codec for Gen6+ */
1397static const struct intel_ring_buffer gen6_bsd_ring = { 1367static const struct intel_ring_buffer gen6_bsd_ring = {
1398 .name = "gen6 bsd ring", 1368 .name = "gen6 bsd ring",
@@ -1404,8 +1374,9 @@ static const struct intel_ring_buffer gen6_bsd_ring = {
1404 .flush = gen6_ring_flush, 1374 .flush = gen6_ring_flush,
1405 .add_request = gen6_add_request, 1375 .add_request = gen6_add_request,
1406 .get_seqno = gen6_ring_get_seqno, 1376 .get_seqno = gen6_ring_get_seqno,
1407 .irq_get = gen6_bsd_ring_get_irq, 1377 .irq_enable = GEN6_BSD_USER_INTERRUPT,
1408 .irq_put = gen6_bsd_ring_put_irq, 1378 .irq_get = gen6_ring_get_irq,
1379 .irq_put = gen6_ring_put_irq,
1409 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, 1380 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
1410 .sync_to = gen6_bsd_ring_sync_to, 1381 .sync_to = gen6_bsd_ring_sync_to,
1411 .semaphore_register = {MI_SEMAPHORE_SYNC_VR, 1382 .semaphore_register = {MI_SEMAPHORE_SYNC_VR,
@@ -1416,22 +1387,6 @@ static const struct intel_ring_buffer gen6_bsd_ring = {
1416 1387
1417/* Blitter support (SandyBridge+) */ 1388/* Blitter support (SandyBridge+) */
1418 1389
1419static bool
1420blt_ring_get_irq(struct intel_ring_buffer *ring)
1421{
1422 return gen6_ring_get_irq(ring,
1423 GT_BLT_USER_INTERRUPT,
1424 GEN6_BLITTER_USER_INTERRUPT);
1425}
1426
1427static void
1428blt_ring_put_irq(struct intel_ring_buffer *ring)
1429{
1430 gen6_ring_put_irq(ring,
1431 GT_BLT_USER_INTERRUPT,
1432 GEN6_BLITTER_USER_INTERRUPT);
1433}
1434
1435static int blt_ring_flush(struct intel_ring_buffer *ring, 1390static int blt_ring_flush(struct intel_ring_buffer *ring,
1436 u32 invalidate, u32 flush) 1391 u32 invalidate, u32 flush)
1437{ 1392{
@@ -1463,8 +1418,9 @@ static const struct intel_ring_buffer gen6_blt_ring = {
1463 .flush = blt_ring_flush, 1418 .flush = blt_ring_flush,
1464 .add_request = gen6_add_request, 1419 .add_request = gen6_add_request,
1465 .get_seqno = gen6_ring_get_seqno, 1420 .get_seqno = gen6_ring_get_seqno,
1466 .irq_get = blt_ring_get_irq, 1421 .irq_get = gen6_ring_get_irq,
1467 .irq_put = blt_ring_put_irq, 1422 .irq_put = gen6_ring_put_irq,
1423 .irq_enable = GEN6_BLITTER_USER_INTERRUPT,
1468 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, 1424 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
1469 .sync_to = gen6_blt_ring_sync_to, 1425 .sync_to = gen6_blt_ring_sync_to,
1470 .semaphore_register = {MI_SEMAPHORE_SYNC_BR, 1426 .semaphore_register = {MI_SEMAPHORE_SYNC_BR,
@@ -1482,8 +1438,9 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1482 if (INTEL_INFO(dev)->gen >= 6) { 1438 if (INTEL_INFO(dev)->gen >= 6) {
1483 ring->add_request = gen6_add_request; 1439 ring->add_request = gen6_add_request;
1484 ring->flush = gen6_render_ring_flush; 1440 ring->flush = gen6_render_ring_flush;
1485 ring->irq_get = gen6_render_ring_get_irq; 1441 ring->irq_get = gen6_ring_get_irq;
1486 ring->irq_put = gen6_render_ring_put_irq; 1442 ring->irq_put = gen6_ring_put_irq;
1443 ring->irq_enable = GT_USER_INTERRUPT;
1487 ring->get_seqno = gen6_ring_get_seqno; 1444 ring->get_seqno = gen6_ring_get_seqno;
1488 } else if (IS_GEN5(dev)) { 1445 } else if (IS_GEN5(dev)) {
1489 ring->add_request = pc_render_add_request; 1446 ring->add_request = pc_render_add_request;
@@ -1506,8 +1463,9 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1506 *ring = render_ring; 1463 *ring = render_ring;
1507 if (INTEL_INFO(dev)->gen >= 6) { 1464 if (INTEL_INFO(dev)->gen >= 6) {
1508 ring->add_request = gen6_add_request; 1465 ring->add_request = gen6_add_request;
1509 ring->irq_get = gen6_render_ring_get_irq; 1466 ring->irq_get = gen6_ring_get_irq;
1510 ring->irq_put = gen6_render_ring_put_irq; 1467 ring->irq_put = gen6_ring_put_irq;
1468 ring->irq_enable = GT_USER_INTERRUPT;
1511 } else if (IS_GEN5(dev)) { 1469 } else if (IS_GEN5(dev)) {
1512 ring->add_request = pc_render_add_request; 1470 ring->add_request = pc_render_add_request;
1513 ring->get_seqno = pc_render_get_seqno; 1471 ring->get_seqno = pc_render_get_seqno;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index bc0365b8fa4d..3488a5a127db 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -59,6 +59,7 @@ struct intel_ring_buffer {
59 spinlock_t irq_lock; 59 spinlock_t irq_lock;
60 u32 irq_refcount; 60 u32 irq_refcount;
61 u32 irq_mask; 61 u32 irq_mask;
62 u32 irq_enable; /* IRQs enabled for this ring */
62 u32 irq_seqno; /* last seq seem at irq time */ 63 u32 irq_seqno; /* last seq seem at irq time */
63 u32 trace_irq_seqno; 64 u32 trace_irq_seqno;
64 u32 waiting_seqno; 65 u32 waiting_seqno;
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index e36b171c1e7d..6898145b44ce 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -41,7 +41,7 @@
41#define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1) 41#define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)
42#define SDVO_RGB_MASK (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1) 42#define SDVO_RGB_MASK (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1)
43#define SDVO_LVDS_MASK (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1) 43#define SDVO_LVDS_MASK (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1)
44#define SDVO_TV_MASK (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0) 44#define SDVO_TV_MASK (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_YPRPB0)
45 45
46#define SDVO_OUTPUT_MASK (SDVO_TMDS_MASK | SDVO_RGB_MASK | SDVO_LVDS_MASK |\ 46#define SDVO_OUTPUT_MASK (SDVO_TMDS_MASK | SDVO_RGB_MASK | SDVO_LVDS_MASK |\
47 SDVO_TV_MASK) 47 SDVO_TV_MASK)
@@ -74,7 +74,7 @@ struct intel_sdvo {
74 struct i2c_adapter ddc; 74 struct i2c_adapter ddc;
75 75
76 /* Register for the SDVO device: SDVOB or SDVOC */ 76 /* Register for the SDVO device: SDVOB or SDVOC */
77 int sdvo_reg; 77 uint32_t sdvo_reg;
78 78
79 /* Active outputs controlled by this SDVO output */ 79 /* Active outputs controlled by this SDVO output */
80 uint16_t controlled_output; 80 uint16_t controlled_output;
@@ -114,6 +114,9 @@ struct intel_sdvo {
114 */ 114 */
115 bool is_tv; 115 bool is_tv;
116 116
117 /* On different gens SDVOB is at different places. */
118 bool is_sdvob;
119
117 /* This is for current tv format name */ 120 /* This is for current tv format name */
118 int tv_format_index; 121 int tv_format_index;
119 122
@@ -403,8 +406,7 @@ static const struct _sdvo_cmd_name {
403 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA), 406 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
404}; 407};
405 408
406#define IS_SDVOB(reg) (reg == SDVOB || reg == PCH_SDVOB) 409#define SDVO_NAME(svdo) ((svdo)->is_sdvob ? "SDVOB" : "SDVOC")
407#define SDVO_NAME(svdo) (IS_SDVOB((svdo)->sdvo_reg) ? "SDVOB" : "SDVOC")
408 410
409static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd, 411static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
410 const void *args, int args_len) 412 const void *args, int args_len)
@@ -1252,7 +1254,8 @@ intel_sdvo_get_analog_edid(struct drm_connector *connector)
1252 struct drm_i915_private *dev_priv = connector->dev->dev_private; 1254 struct drm_i915_private *dev_priv = connector->dev->dev_private;
1253 1255
1254 return drm_get_edid(connector, 1256 return drm_get_edid(connector,
1255 &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter); 1257 intel_gmbus_get_adapter(dev_priv,
1258 dev_priv->crt_ddc_pin));
1256} 1259}
1257 1260
1258enum drm_connector_status 1261enum drm_connector_status
@@ -1341,8 +1344,7 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
1341 return connector_status_unknown; 1344 return connector_status_unknown;
1342 1345
1343 /* add 30ms delay when the output type might be TV */ 1346 /* add 30ms delay when the output type might be TV */
1344 if (intel_sdvo->caps.output_flags & 1347 if (intel_sdvo->caps.output_flags & SDVO_TV_MASK)
1345 (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0))
1346 mdelay(30); 1348 mdelay(30);
1347 1349
1348 if (!intel_sdvo_read_response(intel_sdvo, &response, 2)) 1350 if (!intel_sdvo_read_response(intel_sdvo, &response, 2))
@@ -1893,7 +1895,7 @@ intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
1893{ 1895{
1894 struct sdvo_device_mapping *mapping; 1896 struct sdvo_device_mapping *mapping;
1895 1897
1896 if (IS_SDVOB(reg)) 1898 if (sdvo->is_sdvob)
1897 mapping = &(dev_priv->sdvo_mappings[0]); 1899 mapping = &(dev_priv->sdvo_mappings[0]);
1898 else 1900 else
1899 mapping = &(dev_priv->sdvo_mappings[1]); 1901 mapping = &(dev_priv->sdvo_mappings[1]);
@@ -1911,7 +1913,7 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
1911 struct sdvo_device_mapping *mapping; 1913 struct sdvo_device_mapping *mapping;
1912 u8 pin; 1914 u8 pin;
1913 1915
1914 if (IS_SDVOB(reg)) 1916 if (sdvo->is_sdvob)
1915 mapping = &dev_priv->sdvo_mappings[0]; 1917 mapping = &dev_priv->sdvo_mappings[0];
1916 else 1918 else
1917 mapping = &dev_priv->sdvo_mappings[1]; 1919 mapping = &dev_priv->sdvo_mappings[1];
@@ -1920,12 +1922,12 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
1920 if (mapping->initialized) 1922 if (mapping->initialized)
1921 pin = mapping->i2c_pin; 1923 pin = mapping->i2c_pin;
1922 1924
1923 if (pin < GMBUS_NUM_PORTS) { 1925 if (intel_gmbus_is_port_valid(pin)) {
1924 sdvo->i2c = &dev_priv->gmbus[pin].adapter; 1926 sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin);
1925 intel_gmbus_set_speed(sdvo->i2c, GMBUS_RATE_1MHZ); 1927 intel_gmbus_set_speed(sdvo->i2c, GMBUS_RATE_1MHZ);
1926 intel_gmbus_force_bit(sdvo->i2c, true); 1928 intel_gmbus_force_bit(sdvo->i2c, true);
1927 } else { 1929 } else {
1928 sdvo->i2c = &dev_priv->gmbus[GMBUS_PORT_DPB].adapter; 1930 sdvo->i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB);
1929 } 1931 }
1930} 1932}
1931 1933
@@ -1936,12 +1938,12 @@ intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device)
1936} 1938}
1937 1939
1938static u8 1940static u8
1939intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg) 1941intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo)
1940{ 1942{
1941 struct drm_i915_private *dev_priv = dev->dev_private; 1943 struct drm_i915_private *dev_priv = dev->dev_private;
1942 struct sdvo_device_mapping *my_mapping, *other_mapping; 1944 struct sdvo_device_mapping *my_mapping, *other_mapping;
1943 1945
1944 if (IS_SDVOB(sdvo_reg)) { 1946 if (sdvo->is_sdvob) {
1945 my_mapping = &dev_priv->sdvo_mappings[0]; 1947 my_mapping = &dev_priv->sdvo_mappings[0];
1946 other_mapping = &dev_priv->sdvo_mappings[1]; 1948 other_mapping = &dev_priv->sdvo_mappings[1];
1947 } else { 1949 } else {
@@ -1966,7 +1968,7 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
1966 /* No SDVO device info is found for another DVO port, 1968 /* No SDVO device info is found for another DVO port,
1967 * so use mapping assumption we had before BIOS parsing. 1969 * so use mapping assumption we had before BIOS parsing.
1968 */ 1970 */
1969 if (IS_SDVOB(sdvo_reg)) 1971 if (sdvo->is_sdvob)
1970 return 0x70; 1972 return 0x70;
1971 else 1973 else
1972 return 0x72; 1974 return 0x72;
@@ -2191,6 +2193,10 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags)
2191 if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_CVBS0)) 2193 if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_CVBS0))
2192 return false; 2194 return false;
2193 2195
2196 if (flags & SDVO_OUTPUT_YPRPB0)
2197 if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_YPRPB0))
2198 return false;
2199
2194 if (flags & SDVO_OUTPUT_RGB0) 2200 if (flags & SDVO_OUTPUT_RGB0)
2195 if (!intel_sdvo_analog_init(intel_sdvo, 0)) 2201 if (!intel_sdvo_analog_init(intel_sdvo, 0))
2196 return false; 2202 return false;
@@ -2482,7 +2488,7 @@ intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo,
2482 return i2c_add_adapter(&sdvo->ddc) == 0; 2488 return i2c_add_adapter(&sdvo->ddc) == 0;
2483} 2489}
2484 2490
2485bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) 2491bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
2486{ 2492{
2487 struct drm_i915_private *dev_priv = dev->dev_private; 2493 struct drm_i915_private *dev_priv = dev->dev_private;
2488 struct intel_encoder *intel_encoder; 2494 struct intel_encoder *intel_encoder;
@@ -2494,7 +2500,8 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
2494 return false; 2500 return false;
2495 2501
2496 intel_sdvo->sdvo_reg = sdvo_reg; 2502 intel_sdvo->sdvo_reg = sdvo_reg;
2497 intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg) >> 1; 2503 intel_sdvo->is_sdvob = is_sdvob;
2504 intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, intel_sdvo) >> 1;
2498 intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg); 2505 intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg);
2499 if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) { 2506 if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) {
2500 kfree(intel_sdvo); 2507 kfree(intel_sdvo);
@@ -2511,13 +2518,13 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
2511 u8 byte; 2518 u8 byte;
2512 2519
2513 if (!intel_sdvo_read_byte(intel_sdvo, i, &byte)) { 2520 if (!intel_sdvo_read_byte(intel_sdvo, i, &byte)) {
2514 DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n", 2521 DRM_DEBUG_KMS("No SDVO device found on %s\n",
2515 IS_SDVOB(sdvo_reg) ? 'B' : 'C'); 2522 SDVO_NAME(intel_sdvo));
2516 goto err; 2523 goto err;
2517 } 2524 }
2518 } 2525 }
2519 2526
2520 if (IS_SDVOB(sdvo_reg)) 2527 if (intel_sdvo->is_sdvob)
2521 dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS; 2528 dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
2522 else 2529 else
2523 dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS; 2530 dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
@@ -2538,8 +2545,8 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
2538 2545
2539 if (intel_sdvo_output_setup(intel_sdvo, 2546 if (intel_sdvo_output_setup(intel_sdvo,
2540 intel_sdvo->caps.output_flags) != true) { 2547 intel_sdvo->caps.output_flags) != true) {
2541 DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n", 2548 DRM_DEBUG_KMS("SDVO output failed to setup on %s\n",
2542 IS_SDVOB(sdvo_reg) ? 'B' : 'C'); 2549 SDVO_NAME(intel_sdvo));
2543 goto err; 2550 goto err;
2544 } 2551 }
2545 2552
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 05f765ef5464..ca12c709f3eb 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1153,6 +1153,15 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
1153 DAC_B_0_7_V | 1153 DAC_B_0_7_V |
1154 DAC_C_0_7_V); 1154 DAC_C_0_7_V);
1155 1155
1156
1157 /*
1158 * The TV sense state should be cleared to zero on cantiga platform. Otherwise
1159 * the TV is misdetected. This is hardware requirement.
1160 */
1161 if (IS_GM45(dev))
1162 tv_dac &= ~(TVDAC_STATE_CHG_EN | TVDAC_A_SENSE_CTL |
1163 TVDAC_B_SENSE_CTL | TVDAC_C_SENSE_CTL);
1164
1156 I915_WRITE(TV_CTL, tv_ctl); 1165 I915_WRITE(TV_CTL, tv_ctl);
1157 I915_WRITE(TV_DAC, tv_dac); 1166 I915_WRITE(TV_DAC, tv_dac);
1158 POSTING_READ(TV_DAC); 1167 POSTING_READ(TV_DAC);