diff options
author | Dave Airlie <airlied@redhat.com> | 2013-04-22 04:48:45 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2013-04-22 04:48:45 -0400 |
commit | f0aa848fe5f2ed2599b7d54a6d3719d9df0a0965 (patch) | |
tree | e5a762cab2108dd6c2dad6ffeabc5d4203e85859 | |
parent | e1adc78caf440d3f6be81a947c2b913e73514a68 (diff) | |
parent | bd080ee57c2173cefdcadc39c7863a76c249d049 (diff) |
Merge branch 'drm-intel-fixes' of git://people.freedesktop.org/~danvet/drm-intel into drm-next
Daniel writes:
As promised a stash of (mostly) fixes. Two pieces of non-fixes included:
- A notch more gtt refactoring from Ben, beating to death with igt in our
nightly testing.
- Support for display display-less server chips (again from Ben). New hw
support which is only likely to break itself ;-)
Otherwise just tons of fixes:
- hpd irq storm mitigation from Egbert Eich. Your -next tree already has
the infrastructure, this here just supplies the logic.
- sdvo hw state check fix from Egbert Eich
- fb cb tune settings for the pch pll clocks on cpt/ppt
- "Bring a bigger gun" coherence workaround for multi-threade, mulit-core
& thrashing tiled gtt cpu access from Chris.
- Update haswell mPHY code.
- l3$ caching for context objects on ivb/hsw (Chris).
- dp aux refclock fix for haswell (Jani)
- moar overclocking fixes for snb/ivb (Ben)
- ecobits ppgtt pte caching control fixes from Ville
- fence stride check fixes and limit improvements (Ville)
- fix up crtc force restoring, potentially resulting in tons of hw state
check WARNs
- OOPS fix for NULL derefencing of fb pointers when force-restoring a crtc
when other crtcs are disabled and the force-restored crtc is _not_ the
first one.
- Fix pfit disabling on gen2/3.
- Haswell ring freq scaling fixes (Chris).
- backlight init/teardown fix (failed eDP init killed the lvds backlight)
from Jani
- cpt/ppt fdi polarity fixes from Paulo (should help a lot of the FDI link
train failures).
- And a bunch of smaller things all over.
* 'drm-intel-fixes' of git://people.freedesktop.org/~danvet/drm-intel: (56 commits)
drm/i915: fix bpc vs. bpp confusion in intel_crtc_compute_config
drm/i915: move cpu_transcoder to the pipe configuration
drm/i915: preserve the PBC bits of TRANS_CHICKEN2
drm/i915: set CPT FDI RX polarity bits based on VBT
drm/i915: Add Reenable Timer to turn Hotplug Detection back on (v4)
drm/i915: Disable HPD interrupt on pin when irq storm is detected (v3)
drm/i915: Mask out the HPD irq bits before setting them individually.
drm/i915: (re)init HPD interrupt storm statistics
drm/i915: Add HPD IRQ storm detection (v5)
drm/i915: WARN when LPT-LP is not paired with ULT CPU
drm/i915: don't intel_crt_init on any ULT machines
drm/i915: remove comment about IVB link training from intel_pm.c
drm/i915: VLV doesn't have LLC
drm/i915: Scale ring, rather than ia, frequency on Haswell
drm/i915: shorten debugfs output simple attributes
drm/i915: Fixup pfit disabling for gen2/3
drm/i915: Fixup Oops in the pipe config computation
drm/i915: ensure single initialization and cleanup of backlight device
drm/i915: don't touch the PF regs if the power well is down
drm/i915: add intel_using_power_well
...
26 files changed, 706 insertions, 291 deletions
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index be88532b35cf..e913d325d5b8 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -901,7 +901,7 @@ i915_next_seqno_set(void *data, u64 val) | |||
901 | 901 | ||
902 | DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops, | 902 | DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops, |
903 | i915_next_seqno_get, i915_next_seqno_set, | 903 | i915_next_seqno_get, i915_next_seqno_set, |
904 | "next_seqno : 0x%llx\n"); | 904 | "0x%llx\n"); |
905 | 905 | ||
906 | static int i915_rstdby_delays(struct seq_file *m, void *unused) | 906 | static int i915_rstdby_delays(struct seq_file *m, void *unused) |
907 | { | 907 | { |
@@ -1006,6 +1006,9 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) | |||
1006 | max_freq = rp_state_cap & 0xff; | 1006 | max_freq = rp_state_cap & 0xff; |
1007 | seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", | 1007 | seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", |
1008 | max_freq * GT_FREQUENCY_MULTIPLIER); | 1008 | max_freq * GT_FREQUENCY_MULTIPLIER); |
1009 | |||
1010 | seq_printf(m, "Max overclocked frequency: %dMHz\n", | ||
1011 | dev_priv->rps.hw_max * GT_FREQUENCY_MULTIPLIER); | ||
1009 | } else { | 1012 | } else { |
1010 | seq_printf(m, "no P-state info available\n"); | 1013 | seq_printf(m, "no P-state info available\n"); |
1011 | } | 1014 | } |
@@ -1354,7 +1357,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) | |||
1354 | if (ret) | 1357 | if (ret) |
1355 | return ret; | 1358 | return ret; |
1356 | 1359 | ||
1357 | seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n"); | 1360 | seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n"); |
1358 | 1361 | ||
1359 | for (gpu_freq = dev_priv->rps.min_delay; | 1362 | for (gpu_freq = dev_priv->rps.min_delay; |
1360 | gpu_freq <= dev_priv->rps.max_delay; | 1363 | gpu_freq <= dev_priv->rps.max_delay; |
@@ -1363,7 +1366,10 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) | |||
1363 | sandybridge_pcode_read(dev_priv, | 1366 | sandybridge_pcode_read(dev_priv, |
1364 | GEN6_PCODE_READ_MIN_FREQ_TABLE, | 1367 | GEN6_PCODE_READ_MIN_FREQ_TABLE, |
1365 | &ia_freq); | 1368 | &ia_freq); |
1366 | seq_printf(m, "%d\t\t%d\n", gpu_freq * GT_FREQUENCY_MULTIPLIER, ia_freq * 100); | 1369 | seq_printf(m, "%d\t\t%d\t\t\t\t%d\n", |
1370 | gpu_freq * GT_FREQUENCY_MULTIPLIER, | ||
1371 | ((ia_freq >> 0) & 0xff) * 100, | ||
1372 | ((ia_freq >> 8) & 0xff) * 100); | ||
1367 | } | 1373 | } |
1368 | 1374 | ||
1369 | mutex_unlock(&dev_priv->rps.hw_lock); | 1375 | mutex_unlock(&dev_priv->rps.hw_lock); |
@@ -1687,7 +1693,7 @@ i915_wedged_set(void *data, u64 val) | |||
1687 | 1693 | ||
1688 | DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops, | 1694 | DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops, |
1689 | i915_wedged_get, i915_wedged_set, | 1695 | i915_wedged_get, i915_wedged_set, |
1690 | "wedged : %llu\n"); | 1696 | "%llu\n"); |
1691 | 1697 | ||
1692 | static int | 1698 | static int |
1693 | i915_ring_stop_get(void *data, u64 *val) | 1699 | i915_ring_stop_get(void *data, u64 *val) |
@@ -1841,7 +1847,7 @@ i915_max_freq_set(void *data, u64 val) | |||
1841 | 1847 | ||
1842 | DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops, | 1848 | DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops, |
1843 | i915_max_freq_get, i915_max_freq_set, | 1849 | i915_max_freq_get, i915_max_freq_set, |
1844 | "max freq: %llu\n"); | 1850 | "%llu\n"); |
1845 | 1851 | ||
1846 | static int | 1852 | static int |
1847 | i915_min_freq_get(void *data, u64 *val) | 1853 | i915_min_freq_get(void *data, u64 *val) |
@@ -1892,7 +1898,7 @@ i915_min_freq_set(void *data, u64 val) | |||
1892 | 1898 | ||
1893 | DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops, | 1899 | DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops, |
1894 | i915_min_freq_get, i915_min_freq_set, | 1900 | i915_min_freq_get, i915_min_freq_set, |
1895 | "min freq: %llu\n"); | 1901 | "%llu\n"); |
1896 | 1902 | ||
1897 | static int | 1903 | static int |
1898 | i915_cache_sharing_get(void *data, u64 *val) | 1904 | i915_cache_sharing_get(void *data, u64 *val) |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 4be58e3b8e4f..3b315ba85a3e 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -1322,6 +1322,10 @@ static int i915_load_modeset_init(struct drm_device *dev) | |||
1322 | /* Always safe in the mode setting case. */ | 1322 | /* Always safe in the mode setting case. */ |
1323 | /* FIXME: do pre/post-mode set stuff in core KMS code */ | 1323 | /* FIXME: do pre/post-mode set stuff in core KMS code */ |
1324 | dev->vblank_disable_allowed = 1; | 1324 | dev->vblank_disable_allowed = 1; |
1325 | if (INTEL_INFO(dev)->num_pipes == 0) { | ||
1326 | dev_priv->mm.suspended = 0; | ||
1327 | return 0; | ||
1328 | } | ||
1325 | 1329 | ||
1326 | ret = intel_fbdev_init(dev); | 1330 | ret = intel_fbdev_init(dev); |
1327 | if (ret) | 1331 | if (ret) |
@@ -1514,6 +1518,28 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1514 | goto free_priv; | 1518 | goto free_priv; |
1515 | } | 1519 | } |
1516 | 1520 | ||
1521 | mmio_bar = IS_GEN2(dev) ? 1 : 0; | ||
1522 | /* Before gen4, the registers and the GTT are behind different BARs. | ||
1523 | * However, from gen4 onwards, the registers and the GTT are shared | ||
1524 | * in the same BAR, so we want to restrict this ioremap from | ||
1525 | * clobbering the GTT which we want ioremap_wc instead. Fortunately, | ||
1526 | * the register BAR remains the same size for all the earlier | ||
1527 | * generations up to Ironlake. | ||
1528 | */ | ||
1529 | if (info->gen < 5) | ||
1530 | mmio_size = 512*1024; | ||
1531 | else | ||
1532 | mmio_size = 2*1024*1024; | ||
1533 | |||
1534 | dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size); | ||
1535 | if (!dev_priv->regs) { | ||
1536 | DRM_ERROR("failed to map registers\n"); | ||
1537 | ret = -EIO; | ||
1538 | goto put_bridge; | ||
1539 | } | ||
1540 | |||
1541 | intel_early_sanitize_regs(dev); | ||
1542 | |||
1517 | ret = i915_gem_gtt_init(dev); | 1543 | ret = i915_gem_gtt_init(dev); |
1518 | if (ret) | 1544 | if (ret) |
1519 | goto put_bridge; | 1545 | goto put_bridge; |
@@ -1538,28 +1564,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1538 | if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) | 1564 | if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) |
1539 | dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32)); | 1565 | dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32)); |
1540 | 1566 | ||
1541 | mmio_bar = IS_GEN2(dev) ? 1 : 0; | ||
1542 | /* Before gen4, the registers and the GTT are behind different BARs. | ||
1543 | * However, from gen4 onwards, the registers and the GTT are shared | ||
1544 | * in the same BAR, so we want to restrict this ioremap from | ||
1545 | * clobbering the GTT which we want ioremap_wc instead. Fortunately, | ||
1546 | * the register BAR remains the same size for all the earlier | ||
1547 | * generations up to Ironlake. | ||
1548 | */ | ||
1549 | if (info->gen < 5) | ||
1550 | mmio_size = 512*1024; | ||
1551 | else | ||
1552 | mmio_size = 2*1024*1024; | ||
1553 | |||
1554 | dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size); | ||
1555 | if (!dev_priv->regs) { | ||
1556 | DRM_ERROR("failed to map registers\n"); | ||
1557 | ret = -EIO; | ||
1558 | goto put_gmch; | ||
1559 | } | ||
1560 | |||
1561 | intel_early_sanitize_regs(dev); | ||
1562 | |||
1563 | aperture_size = dev_priv->gtt.mappable_end; | 1567 | aperture_size = dev_priv->gtt.mappable_end; |
1564 | 1568 | ||
1565 | dev_priv->gtt.mappable = | 1569 | dev_priv->gtt.mappable = |
@@ -1634,9 +1638,11 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1634 | if (IS_VALLEYVIEW(dev)) | 1638 | if (IS_VALLEYVIEW(dev)) |
1635 | dev_priv->num_plane = 2; | 1639 | dev_priv->num_plane = 2; |
1636 | 1640 | ||
1637 | ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes); | 1641 | if (INTEL_INFO(dev)->num_pipes) { |
1638 | if (ret) | 1642 | ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes); |
1639 | goto out_gem_unload; | 1643 | if (ret) |
1644 | goto out_gem_unload; | ||
1645 | } | ||
1640 | 1646 | ||
1641 | /* Start out suspended */ | 1647 | /* Start out suspended */ |
1642 | dev_priv->mm.suspended = 1; | 1648 | dev_priv->mm.suspended = 1; |
@@ -1651,9 +1657,11 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1651 | 1657 | ||
1652 | i915_setup_sysfs(dev); | 1658 | i915_setup_sysfs(dev); |
1653 | 1659 | ||
1654 | /* Must be done after probing outputs */ | 1660 | if (INTEL_INFO(dev)->num_pipes) { |
1655 | intel_opregion_init(dev); | 1661 | /* Must be done after probing outputs */ |
1656 | acpi_video_register(); | 1662 | intel_opregion_init(dev); |
1663 | acpi_video_register(); | ||
1664 | } | ||
1657 | 1665 | ||
1658 | if (IS_GEN5(dev)) | 1666 | if (IS_GEN5(dev)) |
1659 | intel_gpu_ips_init(dev_priv); | 1667 | intel_gpu_ips_init(dev_priv); |
@@ -1678,10 +1686,9 @@ out_mtrrfree: | |||
1678 | dev_priv->mm.gtt_mtrr = -1; | 1686 | dev_priv->mm.gtt_mtrr = -1; |
1679 | } | 1687 | } |
1680 | io_mapping_free(dev_priv->gtt.mappable); | 1688 | io_mapping_free(dev_priv->gtt.mappable); |
1689 | dev_priv->gtt.gtt_remove(dev); | ||
1681 | out_rmmap: | 1690 | out_rmmap: |
1682 | pci_iounmap(dev->pdev, dev_priv->regs); | 1691 | pci_iounmap(dev->pdev, dev_priv->regs); |
1683 | put_gmch: | ||
1684 | dev_priv->gtt.gtt_remove(dev); | ||
1685 | put_bridge: | 1692 | put_bridge: |
1686 | pci_dev_put(dev_priv->bridge_dev); | 1693 | pci_dev_put(dev_priv->bridge_dev); |
1687 | free_priv: | 1694 | free_priv: |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 3b4b9c09a20b..9ebe895c17d6 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -140,6 +140,16 @@ extern int intel_agp_enabled; | |||
140 | .subdevice = PCI_ANY_ID, \ | 140 | .subdevice = PCI_ANY_ID, \ |
141 | .driver_data = (unsigned long) info } | 141 | .driver_data = (unsigned long) info } |
142 | 142 | ||
143 | #define INTEL_QUANTA_VGA_DEVICE(info) { \ | ||
144 | .class = PCI_BASE_CLASS_DISPLAY << 16, \ | ||
145 | .class_mask = 0xff0000, \ | ||
146 | .vendor = 0x8086, \ | ||
147 | .device = 0x16a, \ | ||
148 | .subvendor = 0x152d, \ | ||
149 | .subdevice = 0x8990, \ | ||
150 | .driver_data = (unsigned long) info } | ||
151 | |||
152 | |||
143 | static const struct intel_device_info intel_i830_info = { | 153 | static const struct intel_device_info intel_i830_info = { |
144 | .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2, | 154 | .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2, |
145 | .has_overlay = 1, .overlay_needs_physical = 1, | 155 | .has_overlay = 1, .overlay_needs_physical = 1, |
@@ -272,12 +282,19 @@ static const struct intel_device_info intel_ivybridge_m_info = { | |||
272 | .is_mobile = 1, | 282 | .is_mobile = 1, |
273 | }; | 283 | }; |
274 | 284 | ||
285 | static const struct intel_device_info intel_ivybridge_q_info = { | ||
286 | GEN7_FEATURES, | ||
287 | .is_ivybridge = 1, | ||
288 | .num_pipes = 0, /* legal, last one wins */ | ||
289 | }; | ||
290 | |||
275 | static const struct intel_device_info intel_valleyview_m_info = { | 291 | static const struct intel_device_info intel_valleyview_m_info = { |
276 | GEN7_FEATURES, | 292 | GEN7_FEATURES, |
277 | .is_mobile = 1, | 293 | .is_mobile = 1, |
278 | .num_pipes = 2, | 294 | .num_pipes = 2, |
279 | .is_valleyview = 1, | 295 | .is_valleyview = 1, |
280 | .display_mmio_offset = VLV_DISPLAY_BASE, | 296 | .display_mmio_offset = VLV_DISPLAY_BASE, |
297 | .has_llc = 0, /* legal, last one wins */ | ||
281 | }; | 298 | }; |
282 | 299 | ||
283 | static const struct intel_device_info intel_valleyview_d_info = { | 300 | static const struct intel_device_info intel_valleyview_d_info = { |
@@ -285,6 +302,7 @@ static const struct intel_device_info intel_valleyview_d_info = { | |||
285 | .num_pipes = 2, | 302 | .num_pipes = 2, |
286 | .is_valleyview = 1, | 303 | .is_valleyview = 1, |
287 | .display_mmio_offset = VLV_DISPLAY_BASE, | 304 | .display_mmio_offset = VLV_DISPLAY_BASE, |
305 | .has_llc = 0, /* legal, last one wins */ | ||
288 | }; | 306 | }; |
289 | 307 | ||
290 | static const struct intel_device_info intel_haswell_d_info = { | 308 | static const struct intel_device_info intel_haswell_d_info = { |
@@ -342,6 +360,7 @@ static const struct pci_device_id pciidlist[] = { /* aka */ | |||
342 | INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */ | 360 | INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */ |
343 | INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */ | 361 | INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */ |
344 | INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */ | 362 | INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */ |
363 | INTEL_QUANTA_VGA_DEVICE(&intel_ivybridge_q_info), /* Quanta transcode */ | ||
345 | INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */ | 364 | INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */ |
346 | INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */ | 365 | INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */ |
347 | INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */ | 366 | INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */ |
@@ -397,6 +416,15 @@ void intel_detect_pch(struct drm_device *dev) | |||
397 | struct drm_i915_private *dev_priv = dev->dev_private; | 416 | struct drm_i915_private *dev_priv = dev->dev_private; |
398 | struct pci_dev *pch; | 417 | struct pci_dev *pch; |
399 | 418 | ||
419 | /* In all current cases, num_pipes is equivalent to the PCH_NOP setting | ||
420 | * (which really amounts to a PCH but no South Display). | ||
421 | */ | ||
422 | if (INTEL_INFO(dev)->num_pipes == 0) { | ||
423 | dev_priv->pch_type = PCH_NOP; | ||
424 | dev_priv->num_pch_pll = 0; | ||
425 | return; | ||
426 | } | ||
427 | |||
400 | /* | 428 | /* |
401 | * The reason to probe ISA bridge instead of Dev31:Fun0 is to | 429 | * The reason to probe ISA bridge instead of Dev31:Fun0 is to |
402 | * make graphics device passthrough work easy for VMM, that only | 430 | * make graphics device passthrough work easy for VMM, that only |
@@ -431,11 +459,13 @@ void intel_detect_pch(struct drm_device *dev) | |||
431 | dev_priv->num_pch_pll = 0; | 459 | dev_priv->num_pch_pll = 0; |
432 | DRM_DEBUG_KMS("Found LynxPoint PCH\n"); | 460 | DRM_DEBUG_KMS("Found LynxPoint PCH\n"); |
433 | WARN_ON(!IS_HASWELL(dev)); | 461 | WARN_ON(!IS_HASWELL(dev)); |
462 | WARN_ON(IS_ULT(dev)); | ||
434 | } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { | 463 | } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { |
435 | dev_priv->pch_type = PCH_LPT; | 464 | dev_priv->pch_type = PCH_LPT; |
436 | dev_priv->num_pch_pll = 0; | 465 | dev_priv->num_pch_pll = 0; |
437 | DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); | 466 | DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); |
438 | WARN_ON(!IS_HASWELL(dev)); | 467 | WARN_ON(!IS_HASWELL(dev)); |
468 | WARN_ON(!IS_ULT(dev)); | ||
439 | } | 469 | } |
440 | BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS); | 470 | BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS); |
441 | } | 471 | } |
@@ -901,7 +931,11 @@ int i915_reset(struct drm_device *dev) | |||
901 | ring->init(ring); | 931 | ring->init(ring); |
902 | 932 | ||
903 | i915_gem_context_init(dev); | 933 | i915_gem_context_init(dev); |
904 | i915_gem_init_ppgtt(dev); | 934 | if (dev_priv->mm.aliasing_ppgtt) { |
935 | ret = dev_priv->mm.aliasing_ppgtt->enable(dev); | ||
936 | if (ret) | ||
937 | i915_gem_cleanup_aliasing_ppgtt(dev); | ||
938 | } | ||
905 | 939 | ||
906 | /* | 940 | /* |
907 | * It would make sense to re-init all the other hw state, at | 941 | * It would make sense to re-init all the other hw state, at |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 44fca0b69473..d5dcf7fe1ee9 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -195,9 +195,9 @@ struct drm_i915_master_private { | |||
195 | struct _drm_i915_sarea *sarea_priv; | 195 | struct _drm_i915_sarea *sarea_priv; |
196 | }; | 196 | }; |
197 | #define I915_FENCE_REG_NONE -1 | 197 | #define I915_FENCE_REG_NONE -1 |
198 | #define I915_MAX_NUM_FENCES 16 | 198 | #define I915_MAX_NUM_FENCES 32 |
199 | /* 16 fences + sign bit for FENCE_REG_NONE */ | 199 | /* 32 fences + sign bit for FENCE_REG_NONE */ |
200 | #define I915_MAX_NUM_FENCE_BITS 5 | 200 | #define I915_MAX_NUM_FENCE_BITS 6 |
201 | 201 | ||
202 | struct drm_i915_fence_reg { | 202 | struct drm_i915_fence_reg { |
203 | struct list_head lru_list; | 203 | struct list_head lru_list; |
@@ -449,6 +449,7 @@ struct i915_hw_ppgtt { | |||
449 | struct sg_table *st, | 449 | struct sg_table *st, |
450 | unsigned int pg_start, | 450 | unsigned int pg_start, |
451 | enum i915_cache_level cache_level); | 451 | enum i915_cache_level cache_level); |
452 | int (*enable)(struct drm_device *dev); | ||
452 | void (*cleanup)(struct i915_hw_ppgtt *ppgtt); | 453 | void (*cleanup)(struct i915_hw_ppgtt *ppgtt); |
453 | }; | 454 | }; |
454 | 455 | ||
@@ -479,6 +480,7 @@ enum intel_pch { | |||
479 | PCH_IBX, /* Ibexpeak PCH */ | 480 | PCH_IBX, /* Ibexpeak PCH */ |
480 | PCH_CPT, /* Cougarpoint PCH */ | 481 | PCH_CPT, /* Cougarpoint PCH */ |
481 | PCH_LPT, /* Lynxpoint PCH */ | 482 | PCH_LPT, /* Lynxpoint PCH */ |
483 | PCH_NOP, | ||
482 | }; | 484 | }; |
483 | 485 | ||
484 | enum intel_sbi_destination { | 486 | enum intel_sbi_destination { |
@@ -666,6 +668,7 @@ struct intel_gen6_power_mgmt { | |||
666 | u8 cur_delay; | 668 | u8 cur_delay; |
667 | u8 min_delay; | 669 | u8 min_delay; |
668 | u8 max_delay; | 670 | u8 max_delay; |
671 | u8 hw_max; | ||
669 | 672 | ||
670 | struct delayed_work delayed_resume_work; | 673 | struct delayed_work delayed_resume_work; |
671 | 674 | ||
@@ -929,6 +932,16 @@ typedef struct drm_i915_private { | |||
929 | 932 | ||
930 | struct work_struct hotplug_work; | 933 | struct work_struct hotplug_work; |
931 | bool enable_hotplug_processing; | 934 | bool enable_hotplug_processing; |
935 | struct { | ||
936 | unsigned long hpd_last_jiffies; | ||
937 | int hpd_cnt; | ||
938 | enum { | ||
939 | HPD_ENABLED = 0, | ||
940 | HPD_DISABLED = 1, | ||
941 | HPD_MARK_DISABLED = 2 | ||
942 | } hpd_mark; | ||
943 | } hpd_stats[HPD_NUM_PINS]; | ||
944 | struct timer_list hotplug_reenable_timer; | ||
932 | 945 | ||
933 | int num_pch_pll; | 946 | int num_pch_pll; |
934 | int num_plane; | 947 | int num_plane; |
@@ -963,6 +976,7 @@ typedef struct drm_i915_private { | |||
963 | unsigned int int_crt_support:1; | 976 | unsigned int int_crt_support:1; |
964 | unsigned int lvds_use_ssc:1; | 977 | unsigned int lvds_use_ssc:1; |
965 | unsigned int display_clock_mode:1; | 978 | unsigned int display_clock_mode:1; |
979 | unsigned int fdi_rx_polarity_inverted:1; | ||
966 | int lvds_ssc_freq; | 980 | int lvds_ssc_freq; |
967 | unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ | 981 | unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ |
968 | struct { | 982 | struct { |
@@ -1373,6 +1387,7 @@ struct drm_i915_file_private { | |||
1373 | #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT) | 1387 | #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT) |
1374 | #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) | 1388 | #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) |
1375 | #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) | 1389 | #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) |
1390 | #define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP) | ||
1376 | #define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE) | 1391 | #define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE) |
1377 | 1392 | ||
1378 | #define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake) | 1393 | #define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake) |
@@ -1640,7 +1655,6 @@ int __must_check i915_gem_init(struct drm_device *dev); | |||
1640 | int __must_check i915_gem_init_hw(struct drm_device *dev); | 1655 | int __must_check i915_gem_init_hw(struct drm_device *dev); |
1641 | void i915_gem_l3_remap(struct drm_device *dev); | 1656 | void i915_gem_l3_remap(struct drm_device *dev); |
1642 | void i915_gem_init_swizzling(struct drm_device *dev); | 1657 | void i915_gem_init_swizzling(struct drm_device *dev); |
1643 | void i915_gem_init_ppgtt(struct drm_device *dev); | ||
1644 | void i915_gem_cleanup_ringbuffer(struct drm_device *dev); | 1658 | void i915_gem_cleanup_ringbuffer(struct drm_device *dev); |
1645 | int __must_check i915_gpu_idle(struct drm_device *dev); | 1659 | int __must_check i915_gpu_idle(struct drm_device *dev); |
1646 | int __must_check i915_gem_idle(struct drm_device *dev); | 1660 | int __must_check i915_gem_idle(struct drm_device *dev); |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 911bd40ef513..6be940effefd 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -2683,17 +2683,35 @@ static inline int fence_number(struct drm_i915_private *dev_priv, | |||
2683 | return fence - dev_priv->fence_regs; | 2683 | return fence - dev_priv->fence_regs; |
2684 | } | 2684 | } |
2685 | 2685 | ||
2686 | static void i915_gem_write_fence__ipi(void *data) | ||
2687 | { | ||
2688 | wbinvd(); | ||
2689 | } | ||
2690 | |||
2686 | static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, | 2691 | static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, |
2687 | struct drm_i915_fence_reg *fence, | 2692 | struct drm_i915_fence_reg *fence, |
2688 | bool enable) | 2693 | bool enable) |
2689 | { | 2694 | { |
2690 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; | 2695 | struct drm_device *dev = obj->base.dev; |
2691 | int reg = fence_number(dev_priv, fence); | 2696 | struct drm_i915_private *dev_priv = dev->dev_private; |
2692 | 2697 | int fence_reg = fence_number(dev_priv, fence); | |
2693 | i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL); | 2698 | |
2699 | /* In order to fully serialize access to the fenced region and | ||
2700 | * the update to the fence register we need to take extreme | ||
2701 | * measures on SNB+. In theory, the write to the fence register | ||
2702 | * flushes all memory transactions before, and coupled with the | ||
2703 | * mb() placed around the register write we serialise all memory | ||
2704 | * operations with respect to the changes in the tiler. Yet, on | ||
2705 | * SNB+ we need to take a step further and emit an explicit wbinvd() | ||
2706 | * on each processor in order to manually flush all memory | ||
2707 | * transactions before updating the fence register. | ||
2708 | */ | ||
2709 | if (HAS_LLC(obj->base.dev)) | ||
2710 | on_each_cpu(i915_gem_write_fence__ipi, NULL, 1); | ||
2711 | i915_gem_write_fence(dev, fence_reg, enable ? obj : NULL); | ||
2694 | 2712 | ||
2695 | if (enable) { | 2713 | if (enable) { |
2696 | obj->fence_reg = reg; | 2714 | obj->fence_reg = fence_reg; |
2697 | fence->obj = obj; | 2715 | fence->obj = obj; |
2698 | list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list); | 2716 | list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list); |
2699 | } else { | 2717 | } else { |
@@ -3992,6 +4010,12 @@ i915_gem_init_hw(struct drm_device *dev) | |||
3992 | if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1)) | 4010 | if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1)) |
3993 | I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000); | 4011 | I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000); |
3994 | 4012 | ||
4013 | if (HAS_PCH_NOP(dev)) { | ||
4014 | u32 temp = I915_READ(GEN7_MSG_CTL); | ||
4015 | temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK); | ||
4016 | I915_WRITE(GEN7_MSG_CTL, temp); | ||
4017 | } | ||
4018 | |||
3995 | i915_gem_l3_remap(dev); | 4019 | i915_gem_l3_remap(dev); |
3996 | 4020 | ||
3997 | i915_gem_init_swizzling(dev); | 4021 | i915_gem_init_swizzling(dev); |
@@ -4005,7 +4029,13 @@ i915_gem_init_hw(struct drm_device *dev) | |||
4005 | * contexts before PPGTT. | 4029 | * contexts before PPGTT. |
4006 | */ | 4030 | */ |
4007 | i915_gem_context_init(dev); | 4031 | i915_gem_context_init(dev); |
4008 | i915_gem_init_ppgtt(dev); | 4032 | if (dev_priv->mm.aliasing_ppgtt) { |
4033 | ret = dev_priv->mm.aliasing_ppgtt->enable(dev); | ||
4034 | if (ret) { | ||
4035 | i915_gem_cleanup_aliasing_ppgtt(dev); | ||
4036 | DRM_INFO("PPGTT enable failed. This is not fatal, but unexpected\n"); | ||
4037 | } | ||
4038 | } | ||
4009 | 4039 | ||
4010 | return 0; | 4040 | return 0; |
4011 | } | 4041 | } |
@@ -4160,7 +4190,9 @@ i915_gem_load(struct drm_device *dev) | |||
4160 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | 4190 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
4161 | dev_priv->fence_reg_start = 3; | 4191 | dev_priv->fence_reg_start = 3; |
4162 | 4192 | ||
4163 | if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) | 4193 | if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) |
4194 | dev_priv->num_fence_regs = 32; | ||
4195 | else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) | ||
4164 | dev_priv->num_fence_regs = 16; | 4196 | dev_priv->num_fence_regs = 16; |
4165 | else | 4197 | else |
4166 | dev_priv->num_fence_regs = 8; | 4198 | dev_priv->num_fence_regs = 8; |
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 94d873a6cffb..a1e8ecb6adf6 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c | |||
@@ -152,6 +152,13 @@ create_hw_context(struct drm_device *dev, | |||
152 | return ERR_PTR(-ENOMEM); | 152 | return ERR_PTR(-ENOMEM); |
153 | } | 153 | } |
154 | 154 | ||
155 | if (INTEL_INFO(dev)->gen >= 7) { | ||
156 | ret = i915_gem_object_set_cache_level(ctx->obj, | ||
157 | I915_CACHE_LLC_MLC); | ||
158 | if (ret) | ||
159 | goto err_out; | ||
160 | } | ||
161 | |||
155 | /* The ring associated with the context object is handled by the normal | 162 | /* The ring associated with the context object is handled by the normal |
156 | * object tracking code. We give an initial ring value simple to pass an | 163 | * object tracking code. We give an initial ring value simple to pass an |
157 | * assertion in the context switch code. | 164 | * assertion in the context switch code. |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 24a23b31b55f..50df194914a6 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
@@ -28,7 +28,7 @@ | |||
28 | #include "i915_trace.h" | 28 | #include "i915_trace.h" |
29 | #include "intel_drv.h" | 29 | #include "intel_drv.h" |
30 | 30 | ||
31 | typedef uint32_t gtt_pte_t; | 31 | typedef uint32_t gen6_gtt_pte_t; |
32 | 32 | ||
33 | /* PPGTT stuff */ | 33 | /* PPGTT stuff */ |
34 | #define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) | 34 | #define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) |
@@ -44,11 +44,11 @@ typedef uint32_t gtt_pte_t; | |||
44 | #define GEN6_PTE_CACHE_LLC_MLC (3 << 1) | 44 | #define GEN6_PTE_CACHE_LLC_MLC (3 << 1) |
45 | #define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) | 45 | #define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) |
46 | 46 | ||
47 | static inline gtt_pte_t gen6_pte_encode(struct drm_device *dev, | 47 | static inline gen6_gtt_pte_t gen6_pte_encode(struct drm_device *dev, |
48 | dma_addr_t addr, | 48 | dma_addr_t addr, |
49 | enum i915_cache_level level) | 49 | enum i915_cache_level level) |
50 | { | 50 | { |
51 | gtt_pte_t pte = GEN6_PTE_VALID; | 51 | gen6_gtt_pte_t pte = GEN6_PTE_VALID; |
52 | pte |= GEN6_PTE_ADDR_ENCODE(addr); | 52 | pte |= GEN6_PTE_ADDR_ENCODE(addr); |
53 | 53 | ||
54 | switch (level) { | 54 | switch (level) { |
@@ -72,17 +72,84 @@ static inline gtt_pte_t gen6_pte_encode(struct drm_device *dev, | |||
72 | BUG(); | 72 | BUG(); |
73 | } | 73 | } |
74 | 74 | ||
75 | |||
76 | return pte; | 75 | return pte; |
77 | } | 76 | } |
78 | 77 | ||
78 | static int gen6_ppgtt_enable(struct drm_device *dev) | ||
79 | { | ||
80 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
81 | uint32_t pd_offset; | ||
82 | struct intel_ring_buffer *ring; | ||
83 | struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; | ||
84 | gen6_gtt_pte_t __iomem *pd_addr; | ||
85 | uint32_t pd_entry; | ||
86 | int i; | ||
87 | |||
88 | pd_addr = (gen6_gtt_pte_t __iomem*)dev_priv->gtt.gsm + | ||
89 | ppgtt->pd_offset / sizeof(gen6_gtt_pte_t); | ||
90 | for (i = 0; i < ppgtt->num_pd_entries; i++) { | ||
91 | dma_addr_t pt_addr; | ||
92 | |||
93 | pt_addr = ppgtt->pt_dma_addr[i]; | ||
94 | pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr); | ||
95 | pd_entry |= GEN6_PDE_VALID; | ||
96 | |||
97 | writel(pd_entry, pd_addr + i); | ||
98 | } | ||
99 | readl(pd_addr); | ||
100 | |||
101 | pd_offset = ppgtt->pd_offset; | ||
102 | pd_offset /= 64; /* in cachelines, */ | ||
103 | pd_offset <<= 16; | ||
104 | |||
105 | if (INTEL_INFO(dev)->gen == 6) { | ||
106 | uint32_t ecochk, gab_ctl, ecobits; | ||
107 | |||
108 | ecobits = I915_READ(GAC_ECO_BITS); | ||
109 | I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT | | ||
110 | ECOBITS_PPGTT_CACHE64B); | ||
111 | |||
112 | gab_ctl = I915_READ(GAB_CTL); | ||
113 | I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT); | ||
114 | |||
115 | ecochk = I915_READ(GAM_ECOCHK); | ||
116 | I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | | ||
117 | ECOCHK_PPGTT_CACHE64B); | ||
118 | I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); | ||
119 | } else if (INTEL_INFO(dev)->gen >= 7) { | ||
120 | uint32_t ecochk, ecobits; | ||
121 | |||
122 | ecobits = I915_READ(GAC_ECO_BITS); | ||
123 | I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B); | ||
124 | |||
125 | ecochk = I915_READ(GAM_ECOCHK); | ||
126 | if (IS_HASWELL(dev)) { | ||
127 | ecochk |= ECOCHK_PPGTT_WB_HSW; | ||
128 | } else { | ||
129 | ecochk |= ECOCHK_PPGTT_LLC_IVB; | ||
130 | ecochk &= ~ECOCHK_PPGTT_GFDT_IVB; | ||
131 | } | ||
132 | I915_WRITE(GAM_ECOCHK, ecochk); | ||
133 | /* GFX_MODE is per-ring on gen7+ */ | ||
134 | } | ||
135 | |||
136 | for_each_ring(ring, dev_priv, i) { | ||
137 | if (INTEL_INFO(dev)->gen >= 7) | ||
138 | I915_WRITE(RING_MODE_GEN7(ring), | ||
139 | _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); | ||
140 | |||
141 | I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G); | ||
142 | I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset); | ||
143 | } | ||
144 | return 0; | ||
145 | } | ||
146 | |||
79 | /* PPGTT support for Sandybdrige/Gen6 and later */ | 147 | /* PPGTT support for Sandybdrige/Gen6 and later */ |
80 | static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt, | 148 | static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt, |
81 | unsigned first_entry, | 149 | unsigned first_entry, |
82 | unsigned num_entries) | 150 | unsigned num_entries) |
83 | { | 151 | { |
84 | gtt_pte_t *pt_vaddr; | 152 | gen6_gtt_pte_t *pt_vaddr, scratch_pte; |
85 | gtt_pte_t scratch_pte; | ||
86 | unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES; | 153 | unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES; |
87 | unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; | 154 | unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; |
88 | unsigned last_pte, i; | 155 | unsigned last_pte, i; |
@@ -114,7 +181,7 @@ static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt, | |||
114 | unsigned first_entry, | 181 | unsigned first_entry, |
115 | enum i915_cache_level cache_level) | 182 | enum i915_cache_level cache_level) |
116 | { | 183 | { |
117 | gtt_pte_t *pt_vaddr; | 184 | gen6_gtt_pte_t *pt_vaddr; |
118 | unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES; | 185 | unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES; |
119 | unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES; | 186 | unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES; |
120 | struct sg_page_iter sg_iter; | 187 | struct sg_page_iter sg_iter; |
@@ -170,6 +237,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) | |||
170 | gtt_total_entries(dev_priv->gtt) - I915_PPGTT_PD_ENTRIES; | 237 | gtt_total_entries(dev_priv->gtt) - I915_PPGTT_PD_ENTRIES; |
171 | 238 | ||
172 | ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES; | 239 | ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES; |
240 | ppgtt->enable = gen6_ppgtt_enable; | ||
173 | ppgtt->clear_range = gen6_ppgtt_clear_range; | 241 | ppgtt->clear_range = gen6_ppgtt_clear_range; |
174 | ppgtt->insert_entries = gen6_ppgtt_insert_entries; | 242 | ppgtt->insert_entries = gen6_ppgtt_insert_entries; |
175 | ppgtt->cleanup = gen6_ppgtt_cleanup; | 243 | ppgtt->cleanup = gen6_ppgtt_cleanup; |
@@ -203,12 +271,10 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) | |||
203 | ppgtt->pt_dma_addr[i] = pt_addr; | 271 | ppgtt->pt_dma_addr[i] = pt_addr; |
204 | } | 272 | } |
205 | 273 | ||
206 | ppgtt->scratch_page_dma_addr = dev_priv->gtt.scratch_page_dma; | ||
207 | |||
208 | ppgtt->clear_range(ppgtt, 0, | 274 | ppgtt->clear_range(ppgtt, 0, |
209 | ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES); | 275 | ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES); |
210 | 276 | ||
211 | ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(gtt_pte_t); | 277 | ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t); |
212 | 278 | ||
213 | return 0; | 279 | return 0; |
214 | 280 | ||
@@ -240,8 +306,13 @@ static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev) | |||
240 | return -ENOMEM; | 306 | return -ENOMEM; |
241 | 307 | ||
242 | ppgtt->dev = dev; | 308 | ppgtt->dev = dev; |
309 | ppgtt->scratch_page_dma_addr = dev_priv->gtt.scratch_page_dma; | ||
310 | |||
311 | if (INTEL_INFO(dev)->gen < 8) | ||
312 | ret = gen6_ppgtt_init(ppgtt); | ||
313 | else | ||
314 | BUG(); | ||
243 | 315 | ||
244 | ret = gen6_ppgtt_init(ppgtt); | ||
245 | if (ret) | 316 | if (ret) |
246 | kfree(ppgtt); | 317 | kfree(ppgtt); |
247 | else | 318 | else |
@@ -259,6 +330,7 @@ void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev) | |||
259 | return; | 330 | return; |
260 | 331 | ||
261 | ppgtt->cleanup(ppgtt); | 332 | ppgtt->cleanup(ppgtt); |
333 | dev_priv->mm.aliasing_ppgtt = NULL; | ||
262 | } | 334 | } |
263 | 335 | ||
264 | void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, | 336 | void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, |
@@ -278,64 +350,6 @@ void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, | |||
278 | obj->base.size >> PAGE_SHIFT); | 350 | obj->base.size >> PAGE_SHIFT); |
279 | } | 351 | } |
280 | 352 | ||
281 | void i915_gem_init_ppgtt(struct drm_device *dev) | ||
282 | { | ||
283 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
284 | uint32_t pd_offset; | ||
285 | struct intel_ring_buffer *ring; | ||
286 | struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; | ||
287 | gtt_pte_t __iomem *pd_addr; | ||
288 | uint32_t pd_entry; | ||
289 | int i; | ||
290 | |||
291 | if (!dev_priv->mm.aliasing_ppgtt) | ||
292 | return; | ||
293 | |||
294 | |||
295 | pd_addr = (gtt_pte_t __iomem*)dev_priv->gtt.gsm + ppgtt->pd_offset/sizeof(gtt_pte_t); | ||
296 | for (i = 0; i < ppgtt->num_pd_entries; i++) { | ||
297 | dma_addr_t pt_addr; | ||
298 | |||
299 | pt_addr = ppgtt->pt_dma_addr[i]; | ||
300 | pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr); | ||
301 | pd_entry |= GEN6_PDE_VALID; | ||
302 | |||
303 | writel(pd_entry, pd_addr + i); | ||
304 | } | ||
305 | readl(pd_addr); | ||
306 | |||
307 | pd_offset = ppgtt->pd_offset; | ||
308 | pd_offset /= 64; /* in cachelines, */ | ||
309 | pd_offset <<= 16; | ||
310 | |||
311 | if (INTEL_INFO(dev)->gen == 6) { | ||
312 | uint32_t ecochk, gab_ctl, ecobits; | ||
313 | |||
314 | ecobits = I915_READ(GAC_ECO_BITS); | ||
315 | I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B); | ||
316 | |||
317 | gab_ctl = I915_READ(GAB_CTL); | ||
318 | I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT); | ||
319 | |||
320 | ecochk = I915_READ(GAM_ECOCHK); | ||
321 | I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | | ||
322 | ECOCHK_PPGTT_CACHE64B); | ||
323 | I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); | ||
324 | } else if (INTEL_INFO(dev)->gen >= 7) { | ||
325 | I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B); | ||
326 | /* GFX_MODE is per-ring on gen7+ */ | ||
327 | } | ||
328 | |||
329 | for_each_ring(ring, dev_priv, i) { | ||
330 | if (INTEL_INFO(dev)->gen >= 7) | ||
331 | I915_WRITE(RING_MODE_GEN7(ring), | ||
332 | _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); | ||
333 | |||
334 | I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G); | ||
335 | I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset); | ||
336 | } | ||
337 | } | ||
338 | |||
339 | extern int intel_iommu_gfx_mapped; | 353 | extern int intel_iommu_gfx_mapped; |
340 | /* Certain Gen5 chipsets require require idling the GPU before | 354 | /* Certain Gen5 chipsets require require idling the GPU before |
341 | * unmapping anything from the GTT when VT-d is enabled. | 355 | * unmapping anything from the GTT when VT-d is enabled. |
@@ -416,8 +430,8 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev, | |||
416 | enum i915_cache_level level) | 430 | enum i915_cache_level level) |
417 | { | 431 | { |
418 | struct drm_i915_private *dev_priv = dev->dev_private; | 432 | struct drm_i915_private *dev_priv = dev->dev_private; |
419 | gtt_pte_t __iomem *gtt_entries = | 433 | gen6_gtt_pte_t __iomem *gtt_entries = |
420 | (gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; | 434 | (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; |
421 | int i = 0; | 435 | int i = 0; |
422 | struct sg_page_iter sg_iter; | 436 | struct sg_page_iter sg_iter; |
423 | dma_addr_t addr; | 437 | dma_addr_t addr; |
@@ -451,8 +465,8 @@ static void gen6_ggtt_clear_range(struct drm_device *dev, | |||
451 | unsigned int num_entries) | 465 | unsigned int num_entries) |
452 | { | 466 | { |
453 | struct drm_i915_private *dev_priv = dev->dev_private; | 467 | struct drm_i915_private *dev_priv = dev->dev_private; |
454 | gtt_pte_t scratch_pte; | 468 | gen6_gtt_pte_t scratch_pte, __iomem *gtt_base = |
455 | gtt_pte_t __iomem *gtt_base = (gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; | 469 | (gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; |
456 | const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; | 470 | const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; |
457 | int i; | 471 | int i; |
458 | 472 | ||
@@ -626,9 +640,12 @@ void i915_gem_init_global_gtt(struct drm_device *dev) | |||
626 | 640 | ||
627 | if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) { | 641 | if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) { |
628 | int ret; | 642 | int ret; |
629 | /* PPGTT pdes are stolen from global gtt ptes, so shrink the | 643 | |
630 | * aperture accordingly when using aliasing ppgtt. */ | 644 | if (INTEL_INFO(dev)->gen <= 7) { |
631 | gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE; | 645 | /* PPGTT pdes are stolen from global gtt ptes, so shrink the |
646 | * aperture accordingly when using aliasing ppgtt. */ | ||
647 | gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE; | ||
648 | } | ||
632 | 649 | ||
633 | i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size); | 650 | i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size); |
634 | 651 | ||
@@ -736,10 +753,12 @@ static int gen6_gmch_probe(struct drm_device *dev, | |||
736 | else | 753 | else |
737 | *stolen = gen6_get_stolen_size(snb_gmch_ctl); | 754 | *stolen = gen6_get_stolen_size(snb_gmch_ctl); |
738 | 755 | ||
739 | *gtt_total = (gtt_size / sizeof(gtt_pte_t)) << PAGE_SHIFT; | 756 | *gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT; |
757 | |||
758 | /* For Modern GENs the PTEs and register space are split in the BAR */ | ||
759 | gtt_bus_addr = pci_resource_start(dev->pdev, 0) + | ||
760 | (pci_resource_len(dev->pdev, 0) / 2); | ||
740 | 761 | ||
741 | /* For GEN6+ the PTEs for the ggtt live at 2MB + BAR0 */ | ||
742 | gtt_bus_addr = pci_resource_start(dev->pdev, 0) + (2<<20); | ||
743 | dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size); | 762 | dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size); |
744 | if (!dev_priv->gtt.gsm) { | 763 | if (!dev_priv->gtt.gsm) { |
745 | DRM_ERROR("Failed to map the gtt page table\n"); | 764 | DRM_ERROR("Failed to map the gtt page table\n"); |
@@ -796,7 +815,6 @@ int i915_gem_gtt_init(struct drm_device *dev) | |||
796 | { | 815 | { |
797 | struct drm_i915_private *dev_priv = dev->dev_private; | 816 | struct drm_i915_private *dev_priv = dev->dev_private; |
798 | struct i915_gtt *gtt = &dev_priv->gtt; | 817 | struct i915_gtt *gtt = &dev_priv->gtt; |
799 | unsigned long gtt_size; | ||
800 | int ret; | 818 | int ret; |
801 | 819 | ||
802 | if (INTEL_INFO(dev)->gen <= 5) { | 820 | if (INTEL_INFO(dev)->gen <= 5) { |
@@ -814,8 +832,6 @@ int i915_gem_gtt_init(struct drm_device *dev) | |||
814 | if (ret) | 832 | if (ret) |
815 | return ret; | 833 | return ret; |
816 | 834 | ||
817 | gtt_size = (dev_priv->gtt.total >> PAGE_SHIFT) * sizeof(gtt_pte_t); | ||
818 | |||
819 | /* GMADR is the PCI mmio aperture into the global GTT. */ | 835 | /* GMADR is the PCI mmio aperture into the global GTT. */ |
820 | DRM_INFO("Memory usable by graphics device = %zdM\n", | 836 | DRM_INFO("Memory usable by graphics device = %zdM\n", |
821 | dev_priv->gtt.total >> 20); | 837 | dev_priv->gtt.total >> 20); |
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index c807eb93755b..537545be69db 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c | |||
@@ -217,9 +217,12 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) | |||
217 | tile_width = 512; | 217 | tile_width = 512; |
218 | 218 | ||
219 | /* check maximum stride & object size */ | 219 | /* check maximum stride & object size */ |
220 | if (INTEL_INFO(dev)->gen >= 4) { | 220 | /* i965+ stores the end address of the gtt mapping in the fence |
221 | /* i965 stores the end address of the gtt mapping in the fence | 221 | * reg, so dont bother to check the size */ |
222 | * reg, so dont bother to check the size */ | 222 | if (INTEL_INFO(dev)->gen >= 7) { |
223 | if (stride / 128 > GEN7_FENCE_MAX_PITCH_VAL) | ||
224 | return false; | ||
225 | } else if (INTEL_INFO(dev)->gen >= 4) { | ||
223 | if (stride / 128 > I965_FENCE_MAX_PITCH_VAL) | 226 | if (stride / 128 > I965_FENCE_MAX_PITCH_VAL) |
224 | return false; | 227 | return false; |
225 | } else { | 228 | } else { |
@@ -235,6 +238,9 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) | |||
235 | } | 238 | } |
236 | } | 239 | } |
237 | 240 | ||
241 | if (stride < tile_width) | ||
242 | return false; | ||
243 | |||
238 | /* 965+ just needs multiples of tile width */ | 244 | /* 965+ just needs multiples of tile width */ |
239 | if (INTEL_INFO(dev)->gen >= 4) { | 245 | if (INTEL_INFO(dev)->gen >= 4) { |
240 | if (stride & (tile_width - 1)) | 246 | if (stride & (tile_width - 1)) |
@@ -243,9 +249,6 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) | |||
243 | } | 249 | } |
244 | 250 | ||
245 | /* Pre-965 needs power of two tile widths */ | 251 | /* Pre-965 needs power of two tile widths */ |
246 | if (stride < tile_width) | ||
247 | return false; | ||
248 | |||
249 | if (stride & (stride - 1)) | 252 | if (stride & (stride - 1)) |
250 | return false; | 253 | return false; |
251 | 254 | ||
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 4c5bdd037388..0aa2ef0d2ae0 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -88,7 +88,8 @@ static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */ | |||
88 | [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS | 88 | [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS |
89 | }; | 89 | }; |
90 | 90 | ||
91 | 91 | static void ibx_hpd_irq_setup(struct drm_device *dev); | |
92 | static void i915_hpd_irq_setup(struct drm_device *dev); | ||
92 | 93 | ||
93 | /* For display hotplug interrupt */ | 94 | /* For display hotplug interrupt */ |
94 | static void | 95 | static void |
@@ -336,13 +337,19 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, | |||
336 | /* | 337 | /* |
337 | * Handle hotplug events outside the interrupt handler proper. | 338 | * Handle hotplug events outside the interrupt handler proper. |
338 | */ | 339 | */ |
340 | #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000) | ||
341 | |||
339 | static void i915_hotplug_work_func(struct work_struct *work) | 342 | static void i915_hotplug_work_func(struct work_struct *work) |
340 | { | 343 | { |
341 | drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, | 344 | drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, |
342 | hotplug_work); | 345 | hotplug_work); |
343 | struct drm_device *dev = dev_priv->dev; | 346 | struct drm_device *dev = dev_priv->dev; |
344 | struct drm_mode_config *mode_config = &dev->mode_config; | 347 | struct drm_mode_config *mode_config = &dev->mode_config; |
345 | struct intel_encoder *encoder; | 348 | struct intel_connector *intel_connector; |
349 | struct intel_encoder *intel_encoder; | ||
350 | struct drm_connector *connector; | ||
351 | unsigned long irqflags; | ||
352 | bool hpd_disabled = false; | ||
346 | 353 | ||
347 | /* HPD irq before everything is fully set up. */ | 354 | /* HPD irq before everything is fully set up. */ |
348 | if (!dev_priv->enable_hotplug_processing) | 355 | if (!dev_priv->enable_hotplug_processing) |
@@ -351,9 +358,36 @@ static void i915_hotplug_work_func(struct work_struct *work) | |||
351 | mutex_lock(&mode_config->mutex); | 358 | mutex_lock(&mode_config->mutex); |
352 | DRM_DEBUG_KMS("running encoder hotplug functions\n"); | 359 | DRM_DEBUG_KMS("running encoder hotplug functions\n"); |
353 | 360 | ||
354 | list_for_each_entry(encoder, &mode_config->encoder_list, base.head) | 361 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
355 | if (encoder->hot_plug) | 362 | list_for_each_entry(connector, &mode_config->connector_list, head) { |
356 | encoder->hot_plug(encoder); | 363 | intel_connector = to_intel_connector(connector); |
364 | intel_encoder = intel_connector->encoder; | ||
365 | if (intel_encoder->hpd_pin > HPD_NONE && | ||
366 | dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED && | ||
367 | connector->polled == DRM_CONNECTOR_POLL_HPD) { | ||
368 | DRM_INFO("HPD interrupt storm detected on connector %s: " | ||
369 | "switching from hotplug detection to polling\n", | ||
370 | drm_get_connector_name(connector)); | ||
371 | dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED; | ||
372 | connector->polled = DRM_CONNECTOR_POLL_CONNECT | ||
373 | | DRM_CONNECTOR_POLL_DISCONNECT; | ||
374 | hpd_disabled = true; | ||
375 | } | ||
376 | } | ||
377 | /* if there were no outputs to poll, poll was disabled, | ||
378 | * therefore make sure it's enabled when disabling HPD on | ||
379 | * some connectors */ | ||
380 | if (hpd_disabled) { | ||
381 | drm_kms_helper_poll_enable(dev); | ||
382 | mod_timer(&dev_priv->hotplug_reenable_timer, | ||
383 | jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); | ||
384 | } | ||
385 | |||
386 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | ||
387 | |||
388 | list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) | ||
389 | if (intel_encoder->hot_plug) | ||
390 | intel_encoder->hot_plug(intel_encoder); | ||
357 | 391 | ||
358 | mutex_unlock(&mode_config->mutex); | 392 | mutex_unlock(&mode_config->mutex); |
359 | 393 | ||
@@ -582,6 +616,45 @@ static void gen6_queue_rps_work(struct drm_i915_private *dev_priv, | |||
582 | queue_work(dev_priv->wq, &dev_priv->rps.work); | 616 | queue_work(dev_priv->wq, &dev_priv->rps.work); |
583 | } | 617 | } |
584 | 618 | ||
619 | #define HPD_STORM_DETECT_PERIOD 1000 | ||
620 | #define HPD_STORM_THRESHOLD 5 | ||
621 | |||
622 | static inline bool hotplug_irq_storm_detect(struct drm_device *dev, | ||
623 | u32 hotplug_trigger, | ||
624 | const u32 *hpd) | ||
625 | { | ||
626 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
627 | unsigned long irqflags; | ||
628 | int i; | ||
629 | bool ret = false; | ||
630 | |||
631 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | ||
632 | |||
633 | for (i = 1; i < HPD_NUM_PINS; i++) { | ||
634 | |||
635 | if (!(hpd[i] & hotplug_trigger) || | ||
636 | dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) | ||
637 | continue; | ||
638 | |||
639 | if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies, | ||
640 | dev_priv->hpd_stats[i].hpd_last_jiffies | ||
641 | + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) { | ||
642 | dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies; | ||
643 | dev_priv->hpd_stats[i].hpd_cnt = 0; | ||
644 | } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) { | ||
645 | dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED; | ||
646 | DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i); | ||
647 | ret = true; | ||
648 | } else { | ||
649 | dev_priv->hpd_stats[i].hpd_cnt++; | ||
650 | } | ||
651 | } | ||
652 | |||
653 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | ||
654 | |||
655 | return ret; | ||
656 | } | ||
657 | |||
585 | static void gmbus_irq_handler(struct drm_device *dev) | 658 | static void gmbus_irq_handler(struct drm_device *dev) |
586 | { | 659 | { |
587 | struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; | 660 | struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; |
@@ -650,13 +723,16 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg) | |||
650 | /* Consume port. Then clear IIR or we'll miss events */ | 723 | /* Consume port. Then clear IIR or we'll miss events */ |
651 | if (iir & I915_DISPLAY_PORT_INTERRUPT) { | 724 | if (iir & I915_DISPLAY_PORT_INTERRUPT) { |
652 | u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); | 725 | u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); |
726 | u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; | ||
653 | 727 | ||
654 | DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", | 728 | DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", |
655 | hotplug_status); | 729 | hotplug_status); |
656 | if (hotplug_status & HOTPLUG_INT_STATUS_I915) | 730 | if (hotplug_trigger) { |
731 | if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_status_i915)) | ||
732 | i915_hpd_irq_setup(dev); | ||
657 | queue_work(dev_priv->wq, | 733 | queue_work(dev_priv->wq, |
658 | &dev_priv->hotplug_work); | 734 | &dev_priv->hotplug_work); |
659 | 735 | } | |
660 | I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); | 736 | I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); |
661 | I915_READ(PORT_HOTPLUG_STAT); | 737 | I915_READ(PORT_HOTPLUG_STAT); |
662 | } | 738 | } |
@@ -680,10 +756,13 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) | |||
680 | { | 756 | { |
681 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 757 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
682 | int pipe; | 758 | int pipe; |
759 | u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; | ||
683 | 760 | ||
684 | if (pch_iir & SDE_HOTPLUG_MASK) | 761 | if (hotplug_trigger) { |
762 | if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_ibx)) | ||
763 | ibx_hpd_irq_setup(dev); | ||
685 | queue_work(dev_priv->wq, &dev_priv->hotplug_work); | 764 | queue_work(dev_priv->wq, &dev_priv->hotplug_work); |
686 | 765 | } | |
687 | if (pch_iir & SDE_AUDIO_POWER_MASK) | 766 | if (pch_iir & SDE_AUDIO_POWER_MASK) |
688 | DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", | 767 | DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", |
689 | (pch_iir & SDE_AUDIO_POWER_MASK) >> | 768 | (pch_iir & SDE_AUDIO_POWER_MASK) >> |
@@ -726,10 +805,13 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) | |||
726 | { | 805 | { |
727 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 806 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
728 | int pipe; | 807 | int pipe; |
808 | u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; | ||
729 | 809 | ||
730 | if (pch_iir & SDE_HOTPLUG_MASK_CPT) | 810 | if (hotplug_trigger) { |
811 | if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_cpt)) | ||
812 | ibx_hpd_irq_setup(dev); | ||
731 | queue_work(dev_priv->wq, &dev_priv->hotplug_work); | 813 | queue_work(dev_priv->wq, &dev_priv->hotplug_work); |
732 | 814 | } | |
733 | if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) | 815 | if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) |
734 | DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", | 816 | DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", |
735 | (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> | 817 | (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> |
@@ -758,7 +840,7 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg) | |||
758 | { | 840 | { |
759 | struct drm_device *dev = (struct drm_device *) arg; | 841 | struct drm_device *dev = (struct drm_device *) arg; |
760 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 842 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
761 | u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier; | 843 | u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier = 0; |
762 | irqreturn_t ret = IRQ_NONE; | 844 | irqreturn_t ret = IRQ_NONE; |
763 | int i; | 845 | int i; |
764 | 846 | ||
@@ -773,9 +855,11 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg) | |||
773 | * able to process them after we restore SDEIER (as soon as we restore | 855 | * able to process them after we restore SDEIER (as soon as we restore |
774 | * it, we'll get an interrupt if SDEIIR still has something to process | 856 | * it, we'll get an interrupt if SDEIIR still has something to process |
775 | * due to its back queue). */ | 857 | * due to its back queue). */ |
776 | sde_ier = I915_READ(SDEIER); | 858 | if (!HAS_PCH_NOP(dev)) { |
777 | I915_WRITE(SDEIER, 0); | 859 | sde_ier = I915_READ(SDEIER); |
778 | POSTING_READ(SDEIER); | 860 | I915_WRITE(SDEIER, 0); |
861 | POSTING_READ(SDEIER); | ||
862 | } | ||
779 | 863 | ||
780 | gt_iir = I915_READ(GTIIR); | 864 | gt_iir = I915_READ(GTIIR); |
781 | if (gt_iir) { | 865 | if (gt_iir) { |
@@ -802,7 +886,7 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg) | |||
802 | } | 886 | } |
803 | 887 | ||
804 | /* check event from PCH */ | 888 | /* check event from PCH */ |
805 | if (de_iir & DE_PCH_EVENT_IVB) { | 889 | if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { |
806 | u32 pch_iir = I915_READ(SDEIIR); | 890 | u32 pch_iir = I915_READ(SDEIIR); |
807 | 891 | ||
808 | cpt_irq_handler(dev, pch_iir); | 892 | cpt_irq_handler(dev, pch_iir); |
@@ -825,8 +909,10 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg) | |||
825 | 909 | ||
826 | I915_WRITE(DEIER, de_ier); | 910 | I915_WRITE(DEIER, de_ier); |
827 | POSTING_READ(DEIER); | 911 | POSTING_READ(DEIER); |
828 | I915_WRITE(SDEIER, sde_ier); | 912 | if (!HAS_PCH_NOP(dev)) { |
829 | POSTING_READ(SDEIER); | 913 | I915_WRITE(SDEIER, sde_ier); |
914 | POSTING_READ(SDEIER); | ||
915 | } | ||
830 | 916 | ||
831 | return ret; | 917 | return ret; |
832 | } | 918 | } |
@@ -1209,7 +1295,7 @@ static void i915_gem_record_fences(struct drm_device *dev, | |||
1209 | switch (INTEL_INFO(dev)->gen) { | 1295 | switch (INTEL_INFO(dev)->gen) { |
1210 | case 7: | 1296 | case 7: |
1211 | case 6: | 1297 | case 6: |
1212 | for (i = 0; i < 16; i++) | 1298 | for (i = 0; i < dev_priv->num_fence_regs; i++) |
1213 | error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); | 1299 | error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); |
1214 | break; | 1300 | break; |
1215 | case 5: | 1301 | case 5: |
@@ -2027,6 +2113,9 @@ static void ironlake_irq_preinstall(struct drm_device *dev) | |||
2027 | I915_WRITE(GTIER, 0x0); | 2113 | I915_WRITE(GTIER, 0x0); |
2028 | POSTING_READ(GTIER); | 2114 | POSTING_READ(GTIER); |
2029 | 2115 | ||
2116 | if (HAS_PCH_NOP(dev)) | ||
2117 | return; | ||
2118 | |||
2030 | /* south display irq */ | 2119 | /* south display irq */ |
2031 | I915_WRITE(SDEIMR, 0xffffffff); | 2120 | I915_WRITE(SDEIMR, 0xffffffff); |
2032 | /* | 2121 | /* |
@@ -2080,11 +2169,15 @@ static void ibx_hpd_irq_setup(struct drm_device *dev) | |||
2080 | u32 hotplug; | 2169 | u32 hotplug; |
2081 | 2170 | ||
2082 | if (HAS_PCH_IBX(dev)) { | 2171 | if (HAS_PCH_IBX(dev)) { |
2172 | mask &= ~SDE_HOTPLUG_MASK; | ||
2083 | list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) | 2173 | list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) |
2084 | mask |= hpd_ibx[intel_encoder->hpd_pin]; | 2174 | if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) |
2175 | mask |= hpd_ibx[intel_encoder->hpd_pin]; | ||
2085 | } else { | 2176 | } else { |
2177 | mask &= ~SDE_HOTPLUG_MASK_CPT; | ||
2086 | list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) | 2178 | list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) |
2087 | mask |= hpd_cpt[intel_encoder->hpd_pin]; | 2179 | if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) |
2180 | mask |= hpd_cpt[intel_encoder->hpd_pin]; | ||
2088 | } | 2181 | } |
2089 | 2182 | ||
2090 | I915_WRITE(SDEIMR, ~mask); | 2183 | I915_WRITE(SDEIMR, ~mask); |
@@ -2112,6 +2205,10 @@ static void ibx_irq_postinstall(struct drm_device *dev) | |||
2112 | mask = SDE_GMBUS | SDE_AUX_MASK; | 2205 | mask = SDE_GMBUS | SDE_AUX_MASK; |
2113 | else | 2206 | else |
2114 | mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; | 2207 | mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; |
2208 | |||
2209 | if (HAS_PCH_NOP(dev)) | ||
2210 | return; | ||
2211 | |||
2115 | I915_WRITE(SDEIIR, I915_READ(SDEIIR)); | 2212 | I915_WRITE(SDEIIR, I915_READ(SDEIIR)); |
2116 | I915_WRITE(SDEIMR, ~mask); | 2213 | I915_WRITE(SDEIMR, ~mask); |
2117 | } | 2214 | } |
@@ -2275,6 +2372,8 @@ static void valleyview_irq_uninstall(struct drm_device *dev) | |||
2275 | if (!dev_priv) | 2372 | if (!dev_priv) |
2276 | return; | 2373 | return; |
2277 | 2374 | ||
2375 | del_timer_sync(&dev_priv->hotplug_reenable_timer); | ||
2376 | |||
2278 | for_each_pipe(pipe) | 2377 | for_each_pipe(pipe) |
2279 | I915_WRITE(PIPESTAT(pipe), 0xffff); | 2378 | I915_WRITE(PIPESTAT(pipe), 0xffff); |
2280 | 2379 | ||
@@ -2296,6 +2395,8 @@ static void ironlake_irq_uninstall(struct drm_device *dev) | |||
2296 | if (!dev_priv) | 2395 | if (!dev_priv) |
2297 | return; | 2396 | return; |
2298 | 2397 | ||
2398 | del_timer_sync(&dev_priv->hotplug_reenable_timer); | ||
2399 | |||
2299 | I915_WRITE(HWSTAM, 0xffffffff); | 2400 | I915_WRITE(HWSTAM, 0xffffffff); |
2300 | 2401 | ||
2301 | I915_WRITE(DEIMR, 0xffffffff); | 2402 | I915_WRITE(DEIMR, 0xffffffff); |
@@ -2306,6 +2407,9 @@ static void ironlake_irq_uninstall(struct drm_device *dev) | |||
2306 | I915_WRITE(GTIER, 0x0); | 2407 | I915_WRITE(GTIER, 0x0); |
2307 | I915_WRITE(GTIIR, I915_READ(GTIIR)); | 2408 | I915_WRITE(GTIIR, I915_READ(GTIIR)); |
2308 | 2409 | ||
2410 | if (HAS_PCH_NOP(dev)) | ||
2411 | return; | ||
2412 | |||
2309 | I915_WRITE(SDEIMR, 0xffffffff); | 2413 | I915_WRITE(SDEIMR, 0xffffffff); |
2310 | I915_WRITE(SDEIER, 0x0); | 2414 | I915_WRITE(SDEIER, 0x0); |
2311 | I915_WRITE(SDEIIR, I915_READ(SDEIIR)); | 2415 | I915_WRITE(SDEIIR, I915_READ(SDEIIR)); |
@@ -2607,13 +2711,16 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) | |||
2607 | if ((I915_HAS_HOTPLUG(dev)) && | 2711 | if ((I915_HAS_HOTPLUG(dev)) && |
2608 | (iir & I915_DISPLAY_PORT_INTERRUPT)) { | 2712 | (iir & I915_DISPLAY_PORT_INTERRUPT)) { |
2609 | u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); | 2713 | u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); |
2714 | u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; | ||
2610 | 2715 | ||
2611 | DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", | 2716 | DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", |
2612 | hotplug_status); | 2717 | hotplug_status); |
2613 | if (hotplug_status & HOTPLUG_INT_STATUS_I915) | 2718 | if (hotplug_trigger) { |
2719 | if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_status_i915)) | ||
2720 | i915_hpd_irq_setup(dev); | ||
2614 | queue_work(dev_priv->wq, | 2721 | queue_work(dev_priv->wq, |
2615 | &dev_priv->hotplug_work); | 2722 | &dev_priv->hotplug_work); |
2616 | 2723 | } | |
2617 | I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); | 2724 | I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); |
2618 | POSTING_READ(PORT_HOTPLUG_STAT); | 2725 | POSTING_READ(PORT_HOTPLUG_STAT); |
2619 | } | 2726 | } |
@@ -2669,6 +2776,8 @@ static void i915_irq_uninstall(struct drm_device * dev) | |||
2669 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 2776 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
2670 | int pipe; | 2777 | int pipe; |
2671 | 2778 | ||
2779 | del_timer_sync(&dev_priv->hotplug_reenable_timer); | ||
2780 | |||
2672 | if (I915_HAS_HOTPLUG(dev)) { | 2781 | if (I915_HAS_HOTPLUG(dev)) { |
2673 | I915_WRITE(PORT_HOTPLUG_EN, 0); | 2782 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
2674 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | 2783 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); |
@@ -2760,7 +2869,7 @@ static void i915_hpd_irq_setup(struct drm_device *dev) | |||
2760 | { | 2869 | { |
2761 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 2870 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
2762 | struct drm_mode_config *mode_config = &dev->mode_config; | 2871 | struct drm_mode_config *mode_config = &dev->mode_config; |
2763 | struct intel_encoder *encoder; | 2872 | struct intel_encoder *intel_encoder; |
2764 | u32 hotplug_en; | 2873 | u32 hotplug_en; |
2765 | 2874 | ||
2766 | if (I915_HAS_HOTPLUG(dev)) { | 2875 | if (I915_HAS_HOTPLUG(dev)) { |
@@ -2768,8 +2877,9 @@ static void i915_hpd_irq_setup(struct drm_device *dev) | |||
2768 | hotplug_en &= ~HOTPLUG_INT_EN_MASK; | 2877 | hotplug_en &= ~HOTPLUG_INT_EN_MASK; |
2769 | /* Note HDMI and DP share hotplug bits */ | 2878 | /* Note HDMI and DP share hotplug bits */ |
2770 | /* enable bits are the same for all generations */ | 2879 | /* enable bits are the same for all generations */ |
2771 | list_for_each_entry(encoder, &mode_config->encoder_list, base.head) | 2880 | list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) |
2772 | hotplug_en |= hpd_mask_i915[encoder->hpd_pin]; | 2881 | if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) |
2882 | hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin]; | ||
2773 | /* Programming the CRT detection parameters tends | 2883 | /* Programming the CRT detection parameters tends |
2774 | to generate a spurious hotplug event about three | 2884 | to generate a spurious hotplug event about three |
2775 | seconds later. So just do it once. | 2885 | seconds later. So just do it once. |
@@ -2840,15 +2950,19 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) | |||
2840 | /* Consume port. Then clear IIR or we'll miss events */ | 2950 | /* Consume port. Then clear IIR or we'll miss events */ |
2841 | if (iir & I915_DISPLAY_PORT_INTERRUPT) { | 2951 | if (iir & I915_DISPLAY_PORT_INTERRUPT) { |
2842 | u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); | 2952 | u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); |
2953 | u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ? | ||
2954 | HOTPLUG_INT_STATUS_G4X : | ||
2955 | HOTPLUG_INT_STATUS_I965); | ||
2843 | 2956 | ||
2844 | DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", | 2957 | DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", |
2845 | hotplug_status); | 2958 | hotplug_status); |
2846 | if (hotplug_status & (IS_G4X(dev) ? | 2959 | if (hotplug_trigger) { |
2847 | HOTPLUG_INT_STATUS_G4X : | 2960 | if (hotplug_irq_storm_detect(dev, hotplug_trigger, |
2848 | HOTPLUG_INT_STATUS_I965)) | 2961 | IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i965)) |
2962 | i915_hpd_irq_setup(dev); | ||
2849 | queue_work(dev_priv->wq, | 2963 | queue_work(dev_priv->wq, |
2850 | &dev_priv->hotplug_work); | 2964 | &dev_priv->hotplug_work); |
2851 | 2965 | } | |
2852 | I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); | 2966 | I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); |
2853 | I915_READ(PORT_HOTPLUG_STAT); | 2967 | I915_READ(PORT_HOTPLUG_STAT); |
2854 | } | 2968 | } |
@@ -2908,6 +3022,8 @@ static void i965_irq_uninstall(struct drm_device * dev) | |||
2908 | if (!dev_priv) | 3022 | if (!dev_priv) |
2909 | return; | 3023 | return; |
2910 | 3024 | ||
3025 | del_timer_sync(&dev_priv->hotplug_reenable_timer); | ||
3026 | |||
2911 | I915_WRITE(PORT_HOTPLUG_EN, 0); | 3027 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
2912 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | 3028 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); |
2913 | 3029 | ||
@@ -2923,6 +3039,41 @@ static void i965_irq_uninstall(struct drm_device * dev) | |||
2923 | I915_WRITE(IIR, I915_READ(IIR)); | 3039 | I915_WRITE(IIR, I915_READ(IIR)); |
2924 | } | 3040 | } |
2925 | 3041 | ||
3042 | static void i915_reenable_hotplug_timer_func(unsigned long data) | ||
3043 | { | ||
3044 | drm_i915_private_t *dev_priv = (drm_i915_private_t *)data; | ||
3045 | struct drm_device *dev = dev_priv->dev; | ||
3046 | struct drm_mode_config *mode_config = &dev->mode_config; | ||
3047 | unsigned long irqflags; | ||
3048 | int i; | ||
3049 | |||
3050 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | ||
3051 | for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) { | ||
3052 | struct drm_connector *connector; | ||
3053 | |||
3054 | if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED) | ||
3055 | continue; | ||
3056 | |||
3057 | dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; | ||
3058 | |||
3059 | list_for_each_entry(connector, &mode_config->connector_list, head) { | ||
3060 | struct intel_connector *intel_connector = to_intel_connector(connector); | ||
3061 | |||
3062 | if (intel_connector->encoder->hpd_pin == i) { | ||
3063 | if (connector->polled != intel_connector->polled) | ||
3064 | DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", | ||
3065 | drm_get_connector_name(connector)); | ||
3066 | connector->polled = intel_connector->polled; | ||
3067 | if (!connector->polled) | ||
3068 | connector->polled = DRM_CONNECTOR_POLL_HPD; | ||
3069 | } | ||
3070 | } | ||
3071 | } | ||
3072 | if (dev_priv->display.hpd_irq_setup) | ||
3073 | dev_priv->display.hpd_irq_setup(dev); | ||
3074 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | ||
3075 | } | ||
3076 | |||
2926 | void intel_irq_init(struct drm_device *dev) | 3077 | void intel_irq_init(struct drm_device *dev) |
2927 | { | 3078 | { |
2928 | struct drm_i915_private *dev_priv = dev->dev_private; | 3079 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -2935,6 +3086,8 @@ void intel_irq_init(struct drm_device *dev) | |||
2935 | setup_timer(&dev_priv->gpu_error.hangcheck_timer, | 3086 | setup_timer(&dev_priv->gpu_error.hangcheck_timer, |
2936 | i915_hangcheck_elapsed, | 3087 | i915_hangcheck_elapsed, |
2937 | (unsigned long) dev); | 3088 | (unsigned long) dev); |
3089 | setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func, | ||
3090 | (unsigned long) dev_priv); | ||
2938 | 3091 | ||
2939 | pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); | 3092 | pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); |
2940 | 3093 | ||
@@ -3003,7 +3156,20 @@ void intel_irq_init(struct drm_device *dev) | |||
3003 | void intel_hpd_init(struct drm_device *dev) | 3156 | void intel_hpd_init(struct drm_device *dev) |
3004 | { | 3157 | { |
3005 | struct drm_i915_private *dev_priv = dev->dev_private; | 3158 | struct drm_i915_private *dev_priv = dev->dev_private; |
3159 | struct drm_mode_config *mode_config = &dev->mode_config; | ||
3160 | struct drm_connector *connector; | ||
3161 | int i; | ||
3006 | 3162 | ||
3163 | for (i = 1; i < HPD_NUM_PINS; i++) { | ||
3164 | dev_priv->hpd_stats[i].hpd_cnt = 0; | ||
3165 | dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; | ||
3166 | } | ||
3167 | list_for_each_entry(connector, &mode_config->connector_list, head) { | ||
3168 | struct intel_connector *intel_connector = to_intel_connector(connector); | ||
3169 | connector->polled = intel_connector->polled; | ||
3170 | if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) | ||
3171 | connector->polled = DRM_CONNECTOR_POLL_HPD; | ||
3172 | } | ||
3007 | if (dev_priv->display.hpd_irq_setup) | 3173 | if (dev_priv->display.hpd_irq_setup) |
3008 | dev_priv->display.hpd_irq_setup(dev); | 3174 | dev_priv->display.hpd_irq_setup(dev); |
3009 | } | 3175 | } |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 058686c0dbbf..31de7e4b1f3e 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -125,8 +125,14 @@ | |||
125 | #define HSW_ECOCHK_ARB_PRIO_SOL (1<<6) | 125 | #define HSW_ECOCHK_ARB_PRIO_SOL (1<<6) |
126 | #define ECOCHK_PPGTT_CACHE64B (0x3<<3) | 126 | #define ECOCHK_PPGTT_CACHE64B (0x3<<3) |
127 | #define ECOCHK_PPGTT_CACHE4B (0x0<<3) | 127 | #define ECOCHK_PPGTT_CACHE4B (0x0<<3) |
128 | #define ECOCHK_PPGTT_GFDT_IVB (0x1<<4) | ||
129 | #define ECOCHK_PPGTT_LLC_IVB (0x1<<3) | ||
130 | #define ECOCHK_PPGTT_UC_HSW (0x1<<3) | ||
131 | #define ECOCHK_PPGTT_WT_HSW (0x2<<3) | ||
132 | #define ECOCHK_PPGTT_WB_HSW (0x3<<3) | ||
128 | 133 | ||
129 | #define GAC_ECO_BITS 0x14090 | 134 | #define GAC_ECO_BITS 0x14090 |
135 | #define ECOBITS_SNB_BIT (1<<13) | ||
130 | #define ECOBITS_PPGTT_CACHE64B (3<<8) | 136 | #define ECOBITS_PPGTT_CACHE64B (3<<8) |
131 | #define ECOBITS_PPGTT_CACHE4B (0<<8) | 137 | #define ECOBITS_PPGTT_CACHE4B (0<<8) |
132 | 138 | ||
@@ -424,6 +430,7 @@ | |||
424 | 430 | ||
425 | #define FENCE_REG_SANDYBRIDGE_0 0x100000 | 431 | #define FENCE_REG_SANDYBRIDGE_0 0x100000 |
426 | #define SANDYBRIDGE_FENCE_PITCH_SHIFT 32 | 432 | #define SANDYBRIDGE_FENCE_PITCH_SHIFT 32 |
433 | #define GEN7_FENCE_MAX_PITCH_VAL 0x0800 | ||
427 | 434 | ||
428 | /* control register for cpu gtt access */ | 435 | /* control register for cpu gtt access */ |
429 | #define TILECTL 0x101000 | 436 | #define TILECTL 0x101000 |
@@ -1203,6 +1210,9 @@ | |||
1203 | 1210 | ||
1204 | #define MCHBAR_MIRROR_BASE_SNB 0x140000 | 1211 | #define MCHBAR_MIRROR_BASE_SNB 0x140000 |
1205 | 1212 | ||
1213 | /* Memory controller frequency in MCHBAR for Haswell (possible SNB+) */ | ||
1214 | #define DCLK 0x5e04 | ||
1215 | |||
1206 | /** 915-945 and GM965 MCH register controlling DRAM channel access */ | 1216 | /** 915-945 and GM965 MCH register controlling DRAM channel access */ |
1207 | #define DCC 0x10200 | 1217 | #define DCC 0x10200 |
1208 | #define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0) | 1218 | #define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0) |
@@ -3568,6 +3578,9 @@ | |||
3568 | #define DISP_ARB_CTL 0x45000 | 3578 | #define DISP_ARB_CTL 0x45000 |
3569 | #define DISP_TILE_SURFACE_SWIZZLING (1<<13) | 3579 | #define DISP_TILE_SURFACE_SWIZZLING (1<<13) |
3570 | #define DISP_FBC_WM_DIS (1<<15) | 3580 | #define DISP_FBC_WM_DIS (1<<15) |
3581 | #define GEN7_MSG_CTL 0x45010 | ||
3582 | #define WAIT_FOR_PCH_RESET_ACK (1<<1) | ||
3583 | #define WAIT_FOR_PCH_FLR_ACK (1<<0) | ||
3571 | 3584 | ||
3572 | /* GEN7 chicken */ | 3585 | /* GEN7 chicken */ |
3573 | #define GEN7_COMMON_SLICE_CHICKEN1 0x7010 | 3586 | #define GEN7_COMMON_SLICE_CHICKEN1 0x7010 |
@@ -3946,8 +3959,11 @@ | |||
3946 | #define _TRANSA_CHICKEN2 0xf0064 | 3959 | #define _TRANSA_CHICKEN2 0xf0064 |
3947 | #define _TRANSB_CHICKEN2 0xf1064 | 3960 | #define _TRANSB_CHICKEN2 0xf1064 |
3948 | #define TRANS_CHICKEN2(pipe) _PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2) | 3961 | #define TRANS_CHICKEN2(pipe) _PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2) |
3949 | #define TRANS_CHICKEN2_TIMING_OVERRIDE (1<<31) | 3962 | #define TRANS_CHICKEN2_TIMING_OVERRIDE (1<<31) |
3950 | 3963 | #define TRANS_CHICKEN2_FDI_POLARITY_REVERSED (1<<29) | |
3964 | #define TRANS_CHICKEN2_FRAME_START_DELAY_MASK (3<<27) | ||
3965 | #define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER (1<<26) | ||
3966 | #define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH (1<<25) | ||
3951 | 3967 | ||
3952 | #define SOUTH_CHICKEN1 0xc2000 | 3968 | #define SOUTH_CHICKEN1 0xc2000 |
3953 | #define FDIA_PHASE_SYNC_SHIFT_OVR 19 | 3969 | #define FDIA_PHASE_SYNC_SHIFT_OVR 19 |
@@ -4380,6 +4396,7 @@ | |||
4380 | #define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245) | 4396 | #define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245) |
4381 | #define GEN6_PCODE_DATA 0x138128 | 4397 | #define GEN6_PCODE_DATA 0x138128 |
4382 | #define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8 | 4398 | #define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8 |
4399 | #define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16 | ||
4383 | 4400 | ||
4384 | #define VLV_IOSF_DOORBELL_REQ 0x182100 | 4401 | #define VLV_IOSF_DOORBELL_REQ 0x182100 |
4385 | #define IOSF_DEVFN_SHIFT 24 | 4402 | #define IOSF_DEVFN_SHIFT 24 |
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index a3a3e22f1a84..d5e1890678f9 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c | |||
@@ -239,7 +239,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, | |||
239 | struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); | 239 | struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); |
240 | struct drm_device *dev = minor->dev; | 240 | struct drm_device *dev = minor->dev; |
241 | struct drm_i915_private *dev_priv = dev->dev_private; | 241 | struct drm_i915_private *dev_priv = dev->dev_private; |
242 | u32 val, rp_state_cap, hw_max, hw_min; | 242 | u32 val, rp_state_cap, hw_max, hw_min, non_oc_max; |
243 | ssize_t ret; | 243 | ssize_t ret; |
244 | 244 | ||
245 | ret = kstrtou32(buf, 0, &val); | 245 | ret = kstrtou32(buf, 0, &val); |
@@ -251,7 +251,8 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, | |||
251 | mutex_lock(&dev_priv->rps.hw_lock); | 251 | mutex_lock(&dev_priv->rps.hw_lock); |
252 | 252 | ||
253 | rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); | 253 | rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); |
254 | hw_max = (rp_state_cap & 0xff); | 254 | hw_max = dev_priv->rps.hw_max; |
255 | non_oc_max = (rp_state_cap & 0xff); | ||
255 | hw_min = ((rp_state_cap & 0xff0000) >> 16); | 256 | hw_min = ((rp_state_cap & 0xff0000) >> 16); |
256 | 257 | ||
257 | if (val < hw_min || val > hw_max || val < dev_priv->rps.min_delay) { | 258 | if (val < hw_min || val > hw_max || val < dev_priv->rps.min_delay) { |
@@ -259,6 +260,10 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, | |||
259 | return -EINVAL; | 260 | return -EINVAL; |
260 | } | 261 | } |
261 | 262 | ||
263 | if (val > non_oc_max) | ||
264 | DRM_DEBUG("User requested overclocking to %d\n", | ||
265 | val * GT_FREQUENCY_MULTIPLIER); | ||
266 | |||
262 | if (dev_priv->rps.cur_delay > val) | 267 | if (dev_priv->rps.cur_delay > val) |
263 | gen6_set_rps(dev_priv->dev, val); | 268 | gen6_set_rps(dev_priv->dev, val); |
264 | 269 | ||
@@ -302,7 +307,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, | |||
302 | mutex_lock(&dev_priv->rps.hw_lock); | 307 | mutex_lock(&dev_priv->rps.hw_lock); |
303 | 308 | ||
304 | rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); | 309 | rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); |
305 | hw_max = (rp_state_cap & 0xff); | 310 | hw_max = dev_priv->rps.hw_max; |
306 | hw_min = ((rp_state_cap & 0xff0000) >> 16); | 311 | hw_min = ((rp_state_cap & 0xff0000) >> 16); |
307 | 312 | ||
308 | if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) { | 313 | if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) { |
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 55ffba1f5818..95070b2124c6 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c | |||
@@ -351,12 +351,14 @@ parse_general_features(struct drm_i915_private *dev_priv, | |||
351 | dev_priv->lvds_ssc_freq = | 351 | dev_priv->lvds_ssc_freq = |
352 | intel_bios_ssc_frequency(dev, general->ssc_freq); | 352 | intel_bios_ssc_frequency(dev, general->ssc_freq); |
353 | dev_priv->display_clock_mode = general->display_clock_mode; | 353 | dev_priv->display_clock_mode = general->display_clock_mode; |
354 | DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d\n", | 354 | dev_priv->fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted; |
355 | DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n", | ||
355 | dev_priv->int_tv_support, | 356 | dev_priv->int_tv_support, |
356 | dev_priv->int_crt_support, | 357 | dev_priv->int_crt_support, |
357 | dev_priv->lvds_use_ssc, | 358 | dev_priv->lvds_use_ssc, |
358 | dev_priv->lvds_ssc_freq, | 359 | dev_priv->lvds_ssc_freq, |
359 | dev_priv->display_clock_mode); | 360 | dev_priv->display_clock_mode, |
361 | dev_priv->fdi_rx_polarity_inverted); | ||
360 | } | 362 | } |
361 | } | 363 | } |
362 | 364 | ||
@@ -692,6 +694,9 @@ intel_parse_bios(struct drm_device *dev) | |||
692 | struct bdb_header *bdb = NULL; | 694 | struct bdb_header *bdb = NULL; |
693 | u8 __iomem *bios = NULL; | 695 | u8 __iomem *bios = NULL; |
694 | 696 | ||
697 | if (HAS_PCH_NOP(dev)) | ||
698 | return -ENODEV; | ||
699 | |||
695 | init_vbt_defaults(dev_priv); | 700 | init_vbt_defaults(dev_priv); |
696 | 701 | ||
697 | /* XXX Should this validation be moved to intel_opregion.c? */ | 702 | /* XXX Should this validation be moved to intel_opregion.c? */ |
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h index 36e57f934373..e088d6f0956a 100644 --- a/drivers/gpu/drm/i915/intel_bios.h +++ b/drivers/gpu/drm/i915/intel_bios.h | |||
@@ -127,7 +127,9 @@ struct bdb_general_features { | |||
127 | /* bits 3 */ | 127 | /* bits 3 */ |
128 | u8 disable_smooth_vision:1; | 128 | u8 disable_smooth_vision:1; |
129 | u8 single_dvi:1; | 129 | u8 single_dvi:1; |
130 | u8 rsvd9:6; /* finish byte */ | 130 | u8 rsvd9:1; |
131 | u8 fdi_rx_polarity_inverted:1; | ||
132 | u8 rsvd10:4; /* finish byte */ | ||
131 | 133 | ||
132 | /* bits 4 */ | 134 | /* bits 4 */ |
133 | u8 legacy_monitor_detect; | 135 | u8 legacy_monitor_detect; |
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 1d8d63aff444..58b4a53715cd 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
@@ -787,10 +787,8 @@ void intel_crt_init(struct drm_device *dev) | |||
787 | 787 | ||
788 | drm_sysfs_connector_add(connector); | 788 | drm_sysfs_connector_add(connector); |
789 | 789 | ||
790 | if (I915_HAS_HOTPLUG(dev)) | 790 | if (!I915_HAS_HOTPLUG(dev)) |
791 | connector->polled = DRM_CONNECTOR_POLL_HPD; | 791 | intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT; |
792 | else | ||
793 | connector->polled = DRM_CONNECTOR_POLL_CONNECT; | ||
794 | 792 | ||
795 | /* | 793 | /* |
796 | * Configure the automatic hotplug detection stuff | 794 | * Configure the automatic hotplug detection stuff |
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 22524cb6903b..26a0a570f92e 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c | |||
@@ -924,7 +924,7 @@ void intel_ddi_set_pipe_settings(struct drm_crtc *crtc) | |||
924 | struct drm_i915_private *dev_priv = crtc->dev->dev_private; | 924 | struct drm_i915_private *dev_priv = crtc->dev->dev_private; |
925 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 925 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
926 | struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); | 926 | struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); |
927 | enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; | 927 | enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; |
928 | int type = intel_encoder->type; | 928 | int type = intel_encoder->type; |
929 | uint32_t temp; | 929 | uint32_t temp; |
930 | 930 | ||
@@ -958,7 +958,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc) | |||
958 | struct drm_encoder *encoder = &intel_encoder->base; | 958 | struct drm_encoder *encoder = &intel_encoder->base; |
959 | struct drm_i915_private *dev_priv = crtc->dev->dev_private; | 959 | struct drm_i915_private *dev_priv = crtc->dev->dev_private; |
960 | enum pipe pipe = intel_crtc->pipe; | 960 | enum pipe pipe = intel_crtc->pipe; |
961 | enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; | 961 | enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; |
962 | enum port port = intel_ddi_get_encoder_port(intel_encoder); | 962 | enum port port = intel_ddi_get_encoder_port(intel_encoder); |
963 | int type = intel_encoder->type; | 963 | int type = intel_encoder->type; |
964 | uint32_t temp; | 964 | uint32_t temp; |
@@ -1223,7 +1223,7 @@ void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc) | |||
1223 | struct drm_i915_private *dev_priv = crtc->dev->dev_private; | 1223 | struct drm_i915_private *dev_priv = crtc->dev->dev_private; |
1224 | struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); | 1224 | struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); |
1225 | enum port port = intel_ddi_get_encoder_port(intel_encoder); | 1225 | enum port port = intel_ddi_get_encoder_port(intel_encoder); |
1226 | enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; | 1226 | enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; |
1227 | 1227 | ||
1228 | if (cpu_transcoder != TRANSCODER_EDP) | 1228 | if (cpu_transcoder != TRANSCODER_EDP) |
1229 | I915_WRITE(TRANS_CLK_SEL(cpu_transcoder), | 1229 | I915_WRITE(TRANS_CLK_SEL(cpu_transcoder), |
@@ -1233,7 +1233,7 @@ void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc) | |||
1233 | void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc) | 1233 | void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc) |
1234 | { | 1234 | { |
1235 | struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private; | 1235 | struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private; |
1236 | enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; | 1236 | enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; |
1237 | 1237 | ||
1238 | if (cpu_transcoder != TRANSCODER_EDP) | 1238 | if (cpu_transcoder != TRANSCODER_EDP) |
1239 | I915_WRITE(TRANS_CLK_SEL(cpu_transcoder), | 1239 | I915_WRITE(TRANS_CLK_SEL(cpu_transcoder), |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index b7005640144c..6e423e04c35e 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -892,7 +892,7 @@ enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, | |||
892 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | 892 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; |
893 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 893 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
894 | 894 | ||
895 | return intel_crtc->cpu_transcoder; | 895 | return intel_crtc->config.cpu_transcoder; |
896 | } | 896 | } |
897 | 897 | ||
898 | static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe) | 898 | static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe) |
@@ -1227,8 +1227,8 @@ void assert_pipe(struct drm_i915_private *dev_priv, | |||
1227 | if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) | 1227 | if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) |
1228 | state = true; | 1228 | state = true; |
1229 | 1229 | ||
1230 | if (IS_HASWELL(dev_priv->dev) && cpu_transcoder != TRANSCODER_EDP && | 1230 | if (!intel_using_power_well(dev_priv->dev) && |
1231 | !(I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_ENABLE)) { | 1231 | cpu_transcoder != TRANSCODER_EDP) { |
1232 | cur_state = false; | 1232 | cur_state = false; |
1233 | } else { | 1233 | } else { |
1234 | reg = PIPECONF(cpu_transcoder); | 1234 | reg = PIPECONF(cpu_transcoder); |
@@ -2002,8 +2002,10 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, | |||
2002 | alignment = 0; | 2002 | alignment = 0; |
2003 | break; | 2003 | break; |
2004 | case I915_TILING_Y: | 2004 | case I915_TILING_Y: |
2005 | /* FIXME: Is this true? */ | 2005 | /* Despite that we check this in framebuffer_init userspace can |
2006 | DRM_ERROR("Y tiled not allowed for scan out buffers\n"); | 2006 | * screw us over and change the tiling after the fact. Only |
2007 | * pinned buffers can't change their tiling. */ | ||
2008 | DRM_DEBUG_DRIVER("Y tiled not allowed for scan out buffers\n"); | ||
2007 | return -EINVAL; | 2009 | return -EINVAL; |
2008 | default: | 2010 | default: |
2009 | BUG(); | 2011 | BUG(); |
@@ -3201,7 +3203,7 @@ static void lpt_pch_enable(struct drm_crtc *crtc) | |||
3201 | struct drm_device *dev = crtc->dev; | 3203 | struct drm_device *dev = crtc->dev; |
3202 | struct drm_i915_private *dev_priv = dev->dev_private; | 3204 | struct drm_i915_private *dev_priv = dev->dev_private; |
3203 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 3205 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
3204 | enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; | 3206 | enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; |
3205 | 3207 | ||
3206 | assert_transcoder_disabled(dev_priv, TRANSCODER_A); | 3208 | assert_transcoder_disabled(dev_priv, TRANSCODER_A); |
3207 | 3209 | ||
@@ -3576,7 +3578,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc) | |||
3576 | struct intel_encoder *encoder; | 3578 | struct intel_encoder *encoder; |
3577 | int pipe = intel_crtc->pipe; | 3579 | int pipe = intel_crtc->pipe; |
3578 | int plane = intel_crtc->plane; | 3580 | int plane = intel_crtc->plane; |
3579 | enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; | 3581 | enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; |
3580 | 3582 | ||
3581 | if (!intel_crtc->active) | 3583 | if (!intel_crtc->active) |
3582 | return; | 3584 | return; |
@@ -3597,9 +3599,13 @@ static void haswell_crtc_disable(struct drm_crtc *crtc) | |||
3597 | 3599 | ||
3598 | intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder); | 3600 | intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder); |
3599 | 3601 | ||
3600 | /* Disable PF */ | 3602 | /* XXX: Once we have proper panel fitter state tracking implemented with |
3601 | I915_WRITE(PF_CTL(pipe), 0); | 3603 | * hardware state read/check support we should switch to only disable |
3602 | I915_WRITE(PF_WIN_SZ(pipe), 0); | 3604 | * the panel fitter when we know it's used. */ |
3605 | if (intel_using_power_well(dev)) { | ||
3606 | I915_WRITE(PF_CTL(pipe), 0); | ||
3607 | I915_WRITE(PF_WIN_SZ(pipe), 0); | ||
3608 | } | ||
3603 | 3609 | ||
3604 | intel_ddi_disable_pipe_clock(intel_crtc); | 3610 | intel_ddi_disable_pipe_clock(intel_crtc); |
3605 | 3611 | ||
@@ -3632,7 +3638,7 @@ static void haswell_crtc_off(struct drm_crtc *crtc) | |||
3632 | 3638 | ||
3633 | /* Stop saying we're using TRANSCODER_EDP because some other CRTC might | 3639 | /* Stop saying we're using TRANSCODER_EDP because some other CRTC might |
3634 | * start using it. */ | 3640 | * start using it. */ |
3635 | intel_crtc->cpu_transcoder = (enum transcoder) intel_crtc->pipe; | 3641 | intel_crtc->config.cpu_transcoder = (enum transcoder) intel_crtc->pipe; |
3636 | 3642 | ||
3637 | intel_ddi_put_crtc_pll(crtc); | 3643 | intel_ddi_put_crtc_pll(crtc); |
3638 | } | 3644 | } |
@@ -3718,6 +3724,26 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc) | |||
3718 | encoder->enable(encoder); | 3724 | encoder->enable(encoder); |
3719 | } | 3725 | } |
3720 | 3726 | ||
3727 | static void i9xx_pfit_disable(struct intel_crtc *crtc) | ||
3728 | { | ||
3729 | struct drm_device *dev = crtc->base.dev; | ||
3730 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
3731 | enum pipe pipe; | ||
3732 | uint32_t pctl = I915_READ(PFIT_CONTROL); | ||
3733 | |||
3734 | assert_pipe_disabled(dev_priv, crtc->pipe); | ||
3735 | |||
3736 | if (INTEL_INFO(dev)->gen >= 4) | ||
3737 | pipe = (pctl & PFIT_PIPE_MASK) >> PFIT_PIPE_SHIFT; | ||
3738 | else | ||
3739 | pipe = PIPE_B; | ||
3740 | |||
3741 | if (pipe == crtc->pipe) { | ||
3742 | DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n", pctl); | ||
3743 | I915_WRITE(PFIT_CONTROL, 0); | ||
3744 | } | ||
3745 | } | ||
3746 | |||
3721 | static void i9xx_crtc_disable(struct drm_crtc *crtc) | 3747 | static void i9xx_crtc_disable(struct drm_crtc *crtc) |
3722 | { | 3748 | { |
3723 | struct drm_device *dev = crtc->dev; | 3749 | struct drm_device *dev = crtc->dev; |
@@ -3726,8 +3752,6 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) | |||
3726 | struct intel_encoder *encoder; | 3752 | struct intel_encoder *encoder; |
3727 | int pipe = intel_crtc->pipe; | 3753 | int pipe = intel_crtc->pipe; |
3728 | int plane = intel_crtc->plane; | 3754 | int plane = intel_crtc->plane; |
3729 | u32 pctl; | ||
3730 | |||
3731 | 3755 | ||
3732 | if (!intel_crtc->active) | 3756 | if (!intel_crtc->active) |
3733 | return; | 3757 | return; |
@@ -3747,11 +3771,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) | |||
3747 | intel_disable_plane(dev_priv, plane, pipe); | 3771 | intel_disable_plane(dev_priv, plane, pipe); |
3748 | intel_disable_pipe(dev_priv, pipe); | 3772 | intel_disable_pipe(dev_priv, pipe); |
3749 | 3773 | ||
3750 | /* Disable pannel fitter if it is on this pipe. */ | 3774 | i9xx_pfit_disable(intel_crtc); |
3751 | pctl = I915_READ(PFIT_CONTROL); | ||
3752 | if ((pctl & PFIT_ENABLE) && | ||
3753 | ((pctl & PFIT_PIPE_MASK) >> PFIT_PIPE_SHIFT) == pipe) | ||
3754 | I915_WRITE(PFIT_CONTROL, 0); | ||
3755 | 3775 | ||
3756 | intel_disable_pll(dev_priv, pipe); | 3776 | intel_disable_pll(dev_priv, pipe); |
3757 | 3777 | ||
@@ -3983,9 +4003,9 @@ static bool intel_crtc_compute_config(struct drm_crtc *crtc, | |||
3983 | adjusted_mode->hsync_start == adjusted_mode->hdisplay) | 4003 | adjusted_mode->hsync_start == adjusted_mode->hdisplay) |
3984 | return false; | 4004 | return false; |
3985 | 4005 | ||
3986 | if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && pipe_config->pipe_bpp > 10) { | 4006 | if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && pipe_config->pipe_bpp > 10*3) { |
3987 | pipe_config->pipe_bpp = 10*3; /* 12bpc is gen5+ */ | 4007 | pipe_config->pipe_bpp = 10*3; /* 12bpc is gen5+ */ |
3988 | } else if (INTEL_INFO(dev)->gen <= 4 && pipe_config->pipe_bpp > 8) { | 4008 | } else if (INTEL_INFO(dev)->gen <= 4 && pipe_config->pipe_bpp > 8*3) { |
3989 | /* only a 8bpc pipe, with 6bpc dither through the panel fitter | 4009 | /* only a 8bpc pipe, with 6bpc dither through the panel fitter |
3990 | * for lvds. */ | 4010 | * for lvds. */ |
3991 | pipe_config->pipe_bpp = 8*3; | 4011 | pipe_config->pipe_bpp = 8*3; |
@@ -4474,7 +4494,7 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc, | |||
4474 | struct drm_device *dev = intel_crtc->base.dev; | 4494 | struct drm_device *dev = intel_crtc->base.dev; |
4475 | struct drm_i915_private *dev_priv = dev->dev_private; | 4495 | struct drm_i915_private *dev_priv = dev->dev_private; |
4476 | enum pipe pipe = intel_crtc->pipe; | 4496 | enum pipe pipe = intel_crtc->pipe; |
4477 | enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; | 4497 | enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; |
4478 | uint32_t vsyncshift; | 4498 | uint32_t vsyncshift; |
4479 | 4499 | ||
4480 | if (!IS_GEN2(dev) && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { | 4500 | if (!IS_GEN2(dev) && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { |
@@ -4956,13 +4976,6 @@ static void lpt_init_pch_refclk(struct drm_device *dev) | |||
4956 | tmp |= (0x12 << 24); | 4976 | tmp |= (0x12 << 24); |
4957 | intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY); | 4977 | intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY); |
4958 | 4978 | ||
4959 | if (!is_sdv) { | ||
4960 | tmp = intel_sbi_read(dev_priv, 0x808C, SBI_MPHY); | ||
4961 | tmp &= ~(0x3 << 6); | ||
4962 | tmp |= (1 << 6) | (1 << 0); | ||
4963 | intel_sbi_write(dev_priv, 0x808C, tmp, SBI_MPHY); | ||
4964 | } | ||
4965 | |||
4966 | if (is_sdv) { | 4979 | if (is_sdv) { |
4967 | tmp = intel_sbi_read(dev_priv, 0x800C, SBI_MPHY); | 4980 | tmp = intel_sbi_read(dev_priv, 0x800C, SBI_MPHY); |
4968 | tmp |= 0x7FFF; | 4981 | tmp |= 0x7FFF; |
@@ -5223,7 +5236,7 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc, | |||
5223 | { | 5236 | { |
5224 | struct drm_i915_private *dev_priv = crtc->dev->dev_private; | 5237 | struct drm_i915_private *dev_priv = crtc->dev->dev_private; |
5225 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 5238 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
5226 | enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; | 5239 | enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; |
5227 | uint32_t val; | 5240 | uint32_t val; |
5228 | 5241 | ||
5229 | val = I915_READ(PIPECONF(cpu_transcoder)); | 5242 | val = I915_READ(PIPECONF(cpu_transcoder)); |
@@ -5417,7 +5430,7 @@ void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc, | |||
5417 | struct drm_device *dev = crtc->base.dev; | 5430 | struct drm_device *dev = crtc->base.dev; |
5418 | struct drm_i915_private *dev_priv = dev->dev_private; | 5431 | struct drm_i915_private *dev_priv = dev->dev_private; |
5419 | int pipe = crtc->pipe; | 5432 | int pipe = crtc->pipe; |
5420 | enum transcoder transcoder = crtc->cpu_transcoder; | 5433 | enum transcoder transcoder = crtc->config.cpu_transcoder; |
5421 | 5434 | ||
5422 | if (INTEL_INFO(dev)->gen >= 5) { | 5435 | if (INTEL_INFO(dev)->gen >= 5) { |
5423 | I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m); | 5436 | I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m); |
@@ -5469,7 +5482,8 @@ static void ironlake_fdi_set_m_n(struct drm_crtc *crtc) | |||
5469 | } | 5482 | } |
5470 | 5483 | ||
5471 | static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc, | 5484 | static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc, |
5472 | intel_clock_t *clock, u32 fp) | 5485 | intel_clock_t *clock, u32 *fp, |
5486 | intel_clock_t *reduced_clock, u32 *fp2) | ||
5473 | { | 5487 | { |
5474 | struct drm_crtc *crtc = &intel_crtc->base; | 5488 | struct drm_crtc *crtc = &intel_crtc->base; |
5475 | struct drm_device *dev = crtc->dev; | 5489 | struct drm_device *dev = crtc->dev; |
@@ -5503,13 +5517,16 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc, | |||
5503 | if (is_lvds) { | 5517 | if (is_lvds) { |
5504 | if ((intel_panel_use_ssc(dev_priv) && | 5518 | if ((intel_panel_use_ssc(dev_priv) && |
5505 | dev_priv->lvds_ssc_freq == 100) || | 5519 | dev_priv->lvds_ssc_freq == 100) || |
5506 | intel_is_dual_link_lvds(dev)) | 5520 | (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev))) |
5507 | factor = 25; | 5521 | factor = 25; |
5508 | } else if (is_sdvo && is_tv) | 5522 | } else if (is_sdvo && is_tv) |
5509 | factor = 20; | 5523 | factor = 20; |
5510 | 5524 | ||
5511 | if (clock->m < factor * clock->n) | 5525 | if (clock->m < factor * clock->n) |
5512 | fp |= FP_CB_TUNE; | 5526 | *fp |= FP_CB_TUNE; |
5527 | |||
5528 | if (fp2 && (reduced_clock->m < factor * reduced_clock->n)) | ||
5529 | *fp2 |= FP_CB_TUNE; | ||
5513 | 5530 | ||
5514 | dpll = 0; | 5531 | dpll = 0; |
5515 | 5532 | ||
@@ -5596,7 +5613,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, | |||
5596 | WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)), | 5613 | WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)), |
5597 | "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev)); | 5614 | "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev)); |
5598 | 5615 | ||
5599 | intel_crtc->cpu_transcoder = pipe; | 5616 | intel_crtc->config.cpu_transcoder = pipe; |
5600 | 5617 | ||
5601 | ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock, | 5618 | ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock, |
5602 | &has_reduced_clock, &reduced_clock); | 5619 | &has_reduced_clock, &reduced_clock); |
@@ -5626,7 +5643,8 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, | |||
5626 | fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 | | 5643 | fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 | |
5627 | reduced_clock.m2; | 5644 | reduced_clock.m2; |
5628 | 5645 | ||
5629 | dpll = ironlake_compute_dpll(intel_crtc, &clock, fp); | 5646 | dpll = ironlake_compute_dpll(intel_crtc, &clock, &fp, &reduced_clock, |
5647 | has_reduced_clock ? &fp2 : NULL); | ||
5630 | 5648 | ||
5631 | DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe); | 5649 | DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe); |
5632 | drm_mode_debug_printmodeline(mode); | 5650 | drm_mode_debug_printmodeline(mode); |
@@ -5779,9 +5797,9 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc, | |||
5779 | } | 5797 | } |
5780 | 5798 | ||
5781 | if (is_cpu_edp) | 5799 | if (is_cpu_edp) |
5782 | intel_crtc->cpu_transcoder = TRANSCODER_EDP; | 5800 | intel_crtc->config.cpu_transcoder = TRANSCODER_EDP; |
5783 | else | 5801 | else |
5784 | intel_crtc->cpu_transcoder = pipe; | 5802 | intel_crtc->config.cpu_transcoder = pipe; |
5785 | 5803 | ||
5786 | /* We are not sure yet this won't happen. */ | 5804 | /* We are not sure yet this won't happen. */ |
5787 | WARN(!HAS_PCH_LPT(dev), "Unexpected PCH type %d\n", | 5805 | WARN(!HAS_PCH_LPT(dev), "Unexpected PCH type %d\n", |
@@ -5790,7 +5808,7 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc, | |||
5790 | WARN(num_connectors != 1, "%d connectors attached to pipe %c\n", | 5808 | WARN(num_connectors != 1, "%d connectors attached to pipe %c\n", |
5791 | num_connectors, pipe_name(pipe)); | 5809 | num_connectors, pipe_name(pipe)); |
5792 | 5810 | ||
5793 | WARN_ON(I915_READ(PIPECONF(intel_crtc->cpu_transcoder)) & | 5811 | WARN_ON(I915_READ(PIPECONF(intel_crtc->config.cpu_transcoder)) & |
5794 | (PIPECONF_ENABLE | I965_PIPECONF_ACTIVE)); | 5812 | (PIPECONF_ENABLE | I965_PIPECONF_ACTIVE)); |
5795 | 5813 | ||
5796 | WARN_ON(I915_READ(DSPCNTR(plane)) & DISPLAY_PLANE_ENABLE); | 5814 | WARN_ON(I915_READ(DSPCNTR(plane)) & DISPLAY_PLANE_ENABLE); |
@@ -5841,7 +5859,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, | |||
5841 | struct drm_i915_private *dev_priv = dev->dev_private; | 5859 | struct drm_i915_private *dev_priv = dev->dev_private; |
5842 | uint32_t tmp; | 5860 | uint32_t tmp; |
5843 | 5861 | ||
5844 | tmp = I915_READ(PIPECONF(crtc->cpu_transcoder)); | 5862 | tmp = I915_READ(PIPECONF(crtc->config.cpu_transcoder)); |
5845 | if (!(tmp & PIPECONF_ENABLE)) | 5863 | if (!(tmp & PIPECONF_ENABLE)) |
5846 | return false; | 5864 | return false; |
5847 | 5865 | ||
@@ -6809,7 +6827,7 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, | |||
6809 | { | 6827 | { |
6810 | struct drm_i915_private *dev_priv = dev->dev_private; | 6828 | struct drm_i915_private *dev_priv = dev->dev_private; |
6811 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 6829 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6812 | enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; | 6830 | enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; |
6813 | struct drm_display_mode *mode; | 6831 | struct drm_display_mode *mode; |
6814 | int htot = I915_READ(HTOTAL(cpu_transcoder)); | 6832 | int htot = I915_READ(HTOTAL(cpu_transcoder)); |
6815 | int hsync = I915_READ(HSYNC(cpu_transcoder)); | 6833 | int hsync = I915_READ(HSYNC(cpu_transcoder)); |
@@ -7708,22 +7726,25 @@ intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes, | |||
7708 | if (crtc->enabled) | 7726 | if (crtc->enabled) |
7709 | *prepare_pipes |= 1 << intel_crtc->pipe; | 7727 | *prepare_pipes |= 1 << intel_crtc->pipe; |
7710 | 7728 | ||
7711 | /* We only support modeset on one single crtc, hence we need to do that | 7729 | /* |
7712 | * only for the passed in crtc iff we change anything else than just | 7730 | * For simplicity do a full modeset on any pipe where the output routing |
7713 | * disable crtcs. | 7731 | * changed. We could be more clever, but that would require us to be |
7714 | * | 7732 | * more careful with calling the relevant encoder->mode_set functions. |
7715 | * This is actually not true, to be fully compatible with the old crtc | 7733 | */ |
7716 | * helper we automatically disable _any_ output (i.e. doesn't need to be | ||
7717 | * connected to the crtc we're modesetting on) if it's disconnected. | ||
7718 | * Which is a rather nutty api (since changed the output configuration | ||
7719 | * without userspace's explicit request can lead to confusion), but | ||
7720 | * alas. Hence we currently need to modeset on all pipes we prepare. */ | ||
7721 | if (*prepare_pipes) | 7734 | if (*prepare_pipes) |
7722 | *modeset_pipes = *prepare_pipes; | 7735 | *modeset_pipes = *prepare_pipes; |
7723 | 7736 | ||
7724 | /* ... and mask these out. */ | 7737 | /* ... and mask these out. */ |
7725 | *modeset_pipes &= ~(*disable_pipes); | 7738 | *modeset_pipes &= ~(*disable_pipes); |
7726 | *prepare_pipes &= ~(*disable_pipes); | 7739 | *prepare_pipes &= ~(*disable_pipes); |
7740 | |||
7741 | /* | ||
7742 | * HACK: We don't (yet) fully support global modesets. intel_set_config | ||
7743 | * obies this rule, but the modeset restore mode of | ||
7744 | * intel_modeset_setup_hw_state does not. | ||
7745 | */ | ||
7746 | *modeset_pipes &= 1 << intel_crtc->pipe; | ||
7747 | *prepare_pipes &= 1 << intel_crtc->pipe; | ||
7727 | } | 7748 | } |
7728 | 7749 | ||
7729 | static bool intel_crtc_in_use(struct drm_crtc *crtc) | 7750 | static bool intel_crtc_in_use(struct drm_crtc *crtc) |
@@ -7916,9 +7937,9 @@ intel_modeset_check_state(struct drm_device *dev) | |||
7916 | } | 7937 | } |
7917 | } | 7938 | } |
7918 | 7939 | ||
7919 | int intel_set_mode(struct drm_crtc *crtc, | 7940 | static int __intel_set_mode(struct drm_crtc *crtc, |
7920 | struct drm_display_mode *mode, | 7941 | struct drm_display_mode *mode, |
7921 | int x, int y, struct drm_framebuffer *fb) | 7942 | int x, int y, struct drm_framebuffer *fb) |
7922 | { | 7943 | { |
7923 | struct drm_device *dev = crtc->dev; | 7944 | struct drm_device *dev = crtc->dev; |
7924 | drm_i915_private_t *dev_priv = dev->dev_private; | 7945 | drm_i915_private_t *dev_priv = dev->dev_private; |
@@ -7969,10 +7990,12 @@ int intel_set_mode(struct drm_crtc *crtc, | |||
7969 | * to set it here already despite that we pass it down the callchain. | 7990 | * to set it here already despite that we pass it down the callchain. |
7970 | */ | 7991 | */ |
7971 | if (modeset_pipes) { | 7992 | if (modeset_pipes) { |
7993 | enum transcoder tmp = to_intel_crtc(crtc)->config.cpu_transcoder; | ||
7972 | crtc->mode = *mode; | 7994 | crtc->mode = *mode; |
7973 | /* mode_set/enable/disable functions rely on a correct pipe | 7995 | /* mode_set/enable/disable functions rely on a correct pipe |
7974 | * config. */ | 7996 | * config. */ |
7975 | to_intel_crtc(crtc)->config = *pipe_config; | 7997 | to_intel_crtc(crtc)->config = *pipe_config; |
7998 | to_intel_crtc(crtc)->config.cpu_transcoder = tmp; | ||
7976 | } | 7999 | } |
7977 | 8000 | ||
7978 | /* Only after disabling all output pipelines that will be changed can we | 8001 | /* Only after disabling all output pipelines that will be changed can we |
@@ -8012,8 +8035,6 @@ done: | |||
8012 | if (ret && crtc->enabled) { | 8035 | if (ret && crtc->enabled) { |
8013 | crtc->hwmode = *saved_hwmode; | 8036 | crtc->hwmode = *saved_hwmode; |
8014 | crtc->mode = *saved_mode; | 8037 | crtc->mode = *saved_mode; |
8015 | } else { | ||
8016 | intel_modeset_check_state(dev); | ||
8017 | } | 8038 | } |
8018 | 8039 | ||
8019 | out: | 8040 | out: |
@@ -8022,6 +8043,20 @@ out: | |||
8022 | return ret; | 8043 | return ret; |
8023 | } | 8044 | } |
8024 | 8045 | ||
8046 | int intel_set_mode(struct drm_crtc *crtc, | ||
8047 | struct drm_display_mode *mode, | ||
8048 | int x, int y, struct drm_framebuffer *fb) | ||
8049 | { | ||
8050 | int ret; | ||
8051 | |||
8052 | ret = __intel_set_mode(crtc, mode, x, y, fb); | ||
8053 | |||
8054 | if (ret == 0) | ||
8055 | intel_modeset_check_state(crtc->dev); | ||
8056 | |||
8057 | return ret; | ||
8058 | } | ||
8059 | |||
8025 | void intel_crtc_restore_mode(struct drm_crtc *crtc) | 8060 | void intel_crtc_restore_mode(struct drm_crtc *crtc) |
8026 | { | 8061 | { |
8027 | intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->fb); | 8062 | intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->fb); |
@@ -8371,7 +8406,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) | |||
8371 | /* Swap pipes & planes for FBC on pre-965 */ | 8406 | /* Swap pipes & planes for FBC on pre-965 */ |
8372 | intel_crtc->pipe = pipe; | 8407 | intel_crtc->pipe = pipe; |
8373 | intel_crtc->plane = pipe; | 8408 | intel_crtc->plane = pipe; |
8374 | intel_crtc->cpu_transcoder = pipe; | 8409 | intel_crtc->config.cpu_transcoder = pipe; |
8375 | if (IS_MOBILE(dev) && IS_GEN3(dev)) { | 8410 | if (IS_MOBILE(dev) && IS_GEN3(dev)) { |
8376 | DRM_DEBUG_KMS("swapping pipes & planes for FBC\n"); | 8411 | DRM_DEBUG_KMS("swapping pipes & planes for FBC\n"); |
8377 | intel_crtc->plane = !pipe; | 8412 | intel_crtc->plane = !pipe; |
@@ -8462,7 +8497,7 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
8462 | I915_WRITE(PFIT_CONTROL, 0); | 8497 | I915_WRITE(PFIT_CONTROL, 0); |
8463 | } | 8498 | } |
8464 | 8499 | ||
8465 | if (!(HAS_DDI(dev) && (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES))) | 8500 | if (!IS_ULT(dev)) |
8466 | intel_crt_init(dev); | 8501 | intel_crt_init(dev); |
8467 | 8502 | ||
8468 | if (HAS_DDI(dev)) { | 8503 | if (HAS_DDI(dev)) { |
@@ -8991,6 +9026,9 @@ void intel_modeset_init(struct drm_device *dev) | |||
8991 | 9026 | ||
8992 | intel_init_pm(dev); | 9027 | intel_init_pm(dev); |
8993 | 9028 | ||
9029 | if (INTEL_INFO(dev)->num_pipes == 0) | ||
9030 | return; | ||
9031 | |||
8994 | intel_init_display(dev); | 9032 | intel_init_display(dev); |
8995 | 9033 | ||
8996 | if (IS_GEN2(dev)) { | 9034 | if (IS_GEN2(dev)) { |
@@ -9093,7 +9131,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc) | |||
9093 | u32 reg; | 9131 | u32 reg; |
9094 | 9132 | ||
9095 | /* Clear any frame start delays used for debugging left by the BIOS */ | 9133 | /* Clear any frame start delays used for debugging left by the BIOS */ |
9096 | reg = PIPECONF(crtc->cpu_transcoder); | 9134 | reg = PIPECONF(crtc->config.cpu_transcoder); |
9097 | I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); | 9135 | I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); |
9098 | 9136 | ||
9099 | /* We need to sanitize the plane -> pipe mapping first because this will | 9137 | /* We need to sanitize the plane -> pipe mapping first because this will |
@@ -9259,7 +9297,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev, | |||
9259 | } | 9297 | } |
9260 | 9298 | ||
9261 | crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); | 9299 | crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); |
9262 | crtc->cpu_transcoder = TRANSCODER_EDP; | 9300 | crtc->config.cpu_transcoder = TRANSCODER_EDP; |
9263 | 9301 | ||
9264 | DRM_DEBUG_KMS("Pipe %c using transcoder EDP\n", | 9302 | DRM_DEBUG_KMS("Pipe %c using transcoder EDP\n", |
9265 | pipe_name(pipe)); | 9303 | pipe_name(pipe)); |
@@ -9269,7 +9307,10 @@ void intel_modeset_setup_hw_state(struct drm_device *dev, | |||
9269 | setup_pipes: | 9307 | setup_pipes: |
9270 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, | 9308 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, |
9271 | base.head) { | 9309 | base.head) { |
9310 | enum transcoder tmp = crtc->config.cpu_transcoder; | ||
9272 | memset(&crtc->config, 0, sizeof(crtc->config)); | 9311 | memset(&crtc->config, 0, sizeof(crtc->config)); |
9312 | crtc->config.cpu_transcoder = tmp; | ||
9313 | |||
9273 | crtc->active = dev_priv->display.get_pipe_config(crtc, | 9314 | crtc->active = dev_priv->display.get_pipe_config(crtc, |
9274 | &crtc->config); | 9315 | &crtc->config); |
9275 | 9316 | ||
@@ -9330,10 +9371,16 @@ setup_pipes: | |||
9330 | } | 9371 | } |
9331 | 9372 | ||
9332 | if (force_restore) { | 9373 | if (force_restore) { |
9374 | /* | ||
9375 | * We need to use raw interfaces for restoring state to avoid | ||
9376 | * checking (bogus) intermediate states. | ||
9377 | */ | ||
9333 | for_each_pipe(pipe) { | 9378 | for_each_pipe(pipe) { |
9334 | struct drm_crtc *crtc = | 9379 | struct drm_crtc *crtc = |
9335 | dev_priv->pipe_to_crtc_mapping[pipe]; | 9380 | dev_priv->pipe_to_crtc_mapping[pipe]; |
9336 | intel_crtc_restore_mode(crtc); | 9381 | |
9382 | __intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, | ||
9383 | crtc->fb); | ||
9337 | } | 9384 | } |
9338 | list_for_each_entry(plane, &dev->mode_config.plane_list, head) | 9385 | list_for_each_entry(plane, &dev->mode_config.plane_list, head) |
9339 | intel_plane_restore(plane); | 9386 | intel_plane_restore(plane); |
@@ -9398,6 +9445,9 @@ void intel_modeset_cleanup(struct drm_device *dev) | |||
9398 | /* flush any delayed tasks or pending work */ | 9445 | /* flush any delayed tasks or pending work */ |
9399 | flush_scheduled_work(); | 9446 | flush_scheduled_work(); |
9400 | 9447 | ||
9448 | /* destroy backlight, if any, before the connectors */ | ||
9449 | intel_panel_destroy_backlight(dev); | ||
9450 | |||
9401 | drm_mode_config_cleanup(dev); | 9451 | drm_mode_config_cleanup(dev); |
9402 | 9452 | ||
9403 | intel_cleanup_overlay(dev); | 9453 | intel_cleanup_overlay(dev); |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index b30e82b98439..a3288376ac71 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -353,10 +353,14 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, | |||
353 | aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */ | 353 | aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */ |
354 | else | 354 | else |
355 | aux_clock_divider = 225; /* eDP input clock at 450Mhz */ | 355 | aux_clock_divider = 225; /* eDP input clock at 450Mhz */ |
356 | } else if (HAS_PCH_SPLIT(dev)) | 356 | } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { |
357 | /* Workaround for non-ULT HSW */ | ||
358 | aux_clock_divider = 74; | ||
359 | } else if (HAS_PCH_SPLIT(dev)) { | ||
357 | aux_clock_divider = DIV_ROUND_UP(intel_pch_rawclk(dev), 2); | 360 | aux_clock_divider = DIV_ROUND_UP(intel_pch_rawclk(dev), 2); |
358 | else | 361 | } else { |
359 | aux_clock_divider = intel_hrawclk(dev) / 2; | 362 | aux_clock_divider = intel_hrawclk(dev) / 2; |
363 | } | ||
360 | 364 | ||
361 | if (IS_GEN6(dev)) | 365 | if (IS_GEN6(dev)) |
362 | precharge = 3; | 366 | precharge = 3; |
@@ -2470,17 +2474,14 @@ done: | |||
2470 | static void | 2474 | static void |
2471 | intel_dp_destroy(struct drm_connector *connector) | 2475 | intel_dp_destroy(struct drm_connector *connector) |
2472 | { | 2476 | { |
2473 | struct drm_device *dev = connector->dev; | ||
2474 | struct intel_dp *intel_dp = intel_attached_dp(connector); | 2477 | struct intel_dp *intel_dp = intel_attached_dp(connector); |
2475 | struct intel_connector *intel_connector = to_intel_connector(connector); | 2478 | struct intel_connector *intel_connector = to_intel_connector(connector); |
2476 | 2479 | ||
2477 | if (!IS_ERR_OR_NULL(intel_connector->edid)) | 2480 | if (!IS_ERR_OR_NULL(intel_connector->edid)) |
2478 | kfree(intel_connector->edid); | 2481 | kfree(intel_connector->edid); |
2479 | 2482 | ||
2480 | if (is_edp(intel_dp)) { | 2483 | if (is_edp(intel_dp)) |
2481 | intel_panel_destroy_backlight(dev); | ||
2482 | intel_panel_fini(&intel_connector->panel); | 2484 | intel_panel_fini(&intel_connector->panel); |
2483 | } | ||
2484 | 2485 | ||
2485 | drm_sysfs_connector_remove(connector); | 2486 | drm_sysfs_connector_remove(connector); |
2486 | drm_connector_cleanup(connector); | 2487 | drm_connector_cleanup(connector); |
@@ -2789,7 +2790,6 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, | |||
2789 | drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); | 2790 | drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); |
2790 | drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); | 2791 | drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); |
2791 | 2792 | ||
2792 | connector->polled = DRM_CONNECTOR_POLL_HPD; | ||
2793 | connector->interlace_allowed = true; | 2793 | connector->interlace_allowed = true; |
2794 | connector->doublescan_allowed = 0; | 2794 | connector->doublescan_allowed = 0; |
2795 | 2795 | ||
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index d7bd031dd642..b5b6d19e6dd3 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -171,6 +171,10 @@ struct intel_connector { | |||
171 | 171 | ||
172 | /* Cached EDID for eDP and LVDS. May hold ERR_PTR for invalid EDID. */ | 172 | /* Cached EDID for eDP and LVDS. May hold ERR_PTR for invalid EDID. */ |
173 | struct edid *edid; | 173 | struct edid *edid; |
174 | |||
175 | /* since POLL and HPD connectors may use the same HPD line keep the native | ||
176 | state of connector->polled in case hotplug storm detection changes it */ | ||
177 | u8 polled; | ||
174 | }; | 178 | }; |
175 | 179 | ||
176 | struct intel_crtc_config { | 180 | struct intel_crtc_config { |
@@ -184,6 +188,10 @@ struct intel_crtc_config { | |||
184 | * between pch encoders and cpu encoders. */ | 188 | * between pch encoders and cpu encoders. */ |
185 | bool has_pch_encoder; | 189 | bool has_pch_encoder; |
186 | 190 | ||
191 | /* CPU Transcoder for the pipe. Currently this can only differ from the | ||
192 | * pipe on Haswell (where we have a special eDP transcoder). */ | ||
193 | enum transcoder cpu_transcoder; | ||
194 | |||
187 | /* | 195 | /* |
188 | * Use reduced/limited/broadcast rbg range, compressing from the full | 196 | * Use reduced/limited/broadcast rbg range, compressing from the full |
189 | * range fed into the crtcs. | 197 | * range fed into the crtcs. |
@@ -222,7 +230,6 @@ struct intel_crtc { | |||
222 | struct drm_crtc base; | 230 | struct drm_crtc base; |
223 | enum pipe pipe; | 231 | enum pipe pipe; |
224 | enum plane plane; | 232 | enum plane plane; |
225 | enum transcoder cpu_transcoder; | ||
226 | u8 lut_r[256], lut_g[256], lut_b[256]; | 233 | u8 lut_r[256], lut_g[256], lut_b[256]; |
227 | /* | 234 | /* |
228 | * Whether the crtc and the connected output pipeline is active. Implies | 235 | * Whether the crtc and the connected output pipeline is active. Implies |
@@ -693,6 +700,7 @@ extern void intel_update_fbc(struct drm_device *dev); | |||
693 | extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv); | 700 | extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv); |
694 | extern void intel_gpu_ips_teardown(void); | 701 | extern void intel_gpu_ips_teardown(void); |
695 | 702 | ||
703 | extern bool intel_using_power_well(struct drm_device *dev); | ||
696 | extern void intel_init_power_well(struct drm_device *dev); | 704 | extern void intel_init_power_well(struct drm_device *dev); |
697 | extern void intel_set_power_well(struct drm_device *dev, bool enable); | 705 | extern void intel_set_power_well(struct drm_device *dev, bool enable); |
698 | extern void intel_enable_gt_powersave(struct drm_device *dev); | 706 | extern void intel_enable_gt_powersave(struct drm_device *dev); |
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index 8d81c929b7b5..0e19e575a1b4 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c | |||
@@ -283,6 +283,9 @@ void intel_fb_restore_mode(struct drm_device *dev) | |||
283 | struct drm_mode_config *config = &dev->mode_config; | 283 | struct drm_mode_config *config = &dev->mode_config; |
284 | struct drm_plane *plane; | 284 | struct drm_plane *plane; |
285 | 285 | ||
286 | if (INTEL_INFO(dev)->num_pipes == 0) | ||
287 | return; | ||
288 | |||
286 | drm_modeset_lock_all(dev); | 289 | drm_modeset_lock_all(dev); |
287 | 290 | ||
288 | ret = drm_fb_helper_restore_fbdev_mode(&dev_priv->fbdev->helper); | 291 | ret = drm_fb_helper_restore_fbdev_mode(&dev_priv->fbdev->helper); |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index ee4a8da8311e..3e6a3ef10d5c 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
@@ -294,8 +294,8 @@ static void hsw_write_infoframe(struct drm_encoder *encoder, | |||
294 | struct drm_device *dev = encoder->dev; | 294 | struct drm_device *dev = encoder->dev; |
295 | struct drm_i915_private *dev_priv = dev->dev_private; | 295 | struct drm_i915_private *dev_priv = dev->dev_private; |
296 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | 296 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); |
297 | u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->cpu_transcoder); | 297 | u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config.cpu_transcoder); |
298 | u32 data_reg = hsw_infoframe_data_reg(frame, intel_crtc->cpu_transcoder); | 298 | u32 data_reg = hsw_infoframe_data_reg(frame, intel_crtc->config.cpu_transcoder); |
299 | unsigned int i, len = DIP_HEADER_SIZE + frame->len; | 299 | unsigned int i, len = DIP_HEADER_SIZE + frame->len; |
300 | u32 val = I915_READ(ctl_reg); | 300 | u32 val = I915_READ(ctl_reg); |
301 | 301 | ||
@@ -570,7 +570,7 @@ static void hsw_set_infoframes(struct drm_encoder *encoder, | |||
570 | struct drm_i915_private *dev_priv = encoder->dev->dev_private; | 570 | struct drm_i915_private *dev_priv = encoder->dev->dev_private; |
571 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | 571 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); |
572 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); | 572 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); |
573 | u32 reg = HSW_TVIDEO_DIP_CTL(intel_crtc->cpu_transcoder); | 573 | u32 reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config.cpu_transcoder); |
574 | u32 val = I915_READ(reg); | 574 | u32 val = I915_READ(reg); |
575 | 575 | ||
576 | assert_hdmi_port_disabled(intel_hdmi); | 576 | assert_hdmi_port_disabled(intel_hdmi); |
@@ -998,7 +998,6 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, | |||
998 | DRM_MODE_CONNECTOR_HDMIA); | 998 | DRM_MODE_CONNECTOR_HDMIA); |
999 | drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs); | 999 | drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs); |
1000 | 1000 | ||
1001 | connector->polled = DRM_CONNECTOR_POLL_HPD; | ||
1002 | connector->interlace_allowed = 1; | 1001 | connector->interlace_allowed = 1; |
1003 | connector->doublescan_allowed = 0; | 1002 | connector->doublescan_allowed = 0; |
1004 | 1003 | ||
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index ef4744e1bf0b..5d245031e391 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c | |||
@@ -522,7 +522,9 @@ int intel_setup_gmbus(struct drm_device *dev) | |||
522 | struct drm_i915_private *dev_priv = dev->dev_private; | 522 | struct drm_i915_private *dev_priv = dev->dev_private; |
523 | int ret, i; | 523 | int ret, i; |
524 | 524 | ||
525 | if (HAS_PCH_SPLIT(dev)) | 525 | if (HAS_PCH_NOP(dev)) |
526 | return 0; | ||
527 | else if (HAS_PCH_SPLIT(dev)) | ||
526 | dev_priv->gpio_mmio_base = PCH_GPIOA - GPIOA; | 528 | dev_priv->gpio_mmio_base = PCH_GPIOA - GPIOA; |
527 | else if (IS_VALLEYVIEW(dev)) | 529 | else if (IS_VALLEYVIEW(dev)) |
528 | dev_priv->gpio_mmio_base = VLV_DISPLAY_BASE; | 530 | dev_priv->gpio_mmio_base = VLV_DISPLAY_BASE; |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index ca2d903c19bb..f36f1baabd5a 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -631,7 +631,6 @@ static void intel_lvds_destroy(struct drm_connector *connector) | |||
631 | if (!IS_ERR_OR_NULL(lvds_connector->base.edid)) | 631 | if (!IS_ERR_OR_NULL(lvds_connector->base.edid)) |
632 | kfree(lvds_connector->base.edid); | 632 | kfree(lvds_connector->base.edid); |
633 | 633 | ||
634 | intel_panel_destroy_backlight(connector->dev); | ||
635 | intel_panel_fini(&lvds_connector->base.panel); | 634 | intel_panel_fini(&lvds_connector->base.panel); |
636 | 635 | ||
637 | drm_sysfs_connector_remove(connector); | 636 | drm_sysfs_connector_remove(connector); |
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 7874cecc2863..eb5e6e95f3c7 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c | |||
@@ -428,6 +428,9 @@ int intel_panel_setup_backlight(struct drm_connector *connector) | |||
428 | 428 | ||
429 | intel_panel_init_backlight(dev); | 429 | intel_panel_init_backlight(dev); |
430 | 430 | ||
431 | if (WARN_ON(dev_priv->backlight.device)) | ||
432 | return -ENODEV; | ||
433 | |||
431 | memset(&props, 0, sizeof(props)); | 434 | memset(&props, 0, sizeof(props)); |
432 | props.type = BACKLIGHT_RAW; | 435 | props.type = BACKLIGHT_RAW; |
433 | props.brightness = dev_priv->backlight.level; | 436 | props.brightness = dev_priv->backlight.level; |
@@ -453,8 +456,10 @@ int intel_panel_setup_backlight(struct drm_connector *connector) | |||
453 | void intel_panel_destroy_backlight(struct drm_device *dev) | 456 | void intel_panel_destroy_backlight(struct drm_device *dev) |
454 | { | 457 | { |
455 | struct drm_i915_private *dev_priv = dev->dev_private; | 458 | struct drm_i915_private *dev_priv = dev->dev_private; |
456 | if (dev_priv->backlight.device) | 459 | if (dev_priv->backlight.device) { |
457 | backlight_device_unregister(dev_priv->backlight.device); | 460 | backlight_device_unregister(dev_priv->backlight.device); |
461 | dev_priv->backlight.device = NULL; | ||
462 | } | ||
458 | } | 463 | } |
459 | #else | 464 | #else |
460 | int intel_panel_setup_backlight(struct drm_connector *connector) | 465 | int intel_panel_setup_backlight(struct drm_connector *connector) |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 13a0666a53b4..e34ad9642519 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
@@ -2558,8 +2558,8 @@ static void gen6_enable_rps(struct drm_device *dev) | |||
2558 | rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); | 2558 | rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); |
2559 | gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); | 2559 | gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); |
2560 | 2560 | ||
2561 | /* In units of 100MHz */ | 2561 | /* In units of 50MHz */ |
2562 | dev_priv->rps.max_delay = rp_state_cap & 0xff; | 2562 | dev_priv->rps.hw_max = dev_priv->rps.max_delay = rp_state_cap & 0xff; |
2563 | dev_priv->rps.min_delay = (rp_state_cap & 0xff0000) >> 16; | 2563 | dev_priv->rps.min_delay = (rp_state_cap & 0xff0000) >> 16; |
2564 | dev_priv->rps.cur_delay = 0; | 2564 | dev_priv->rps.cur_delay = 0; |
2565 | 2565 | ||
@@ -2643,10 +2643,10 @@ static void gen6_enable_rps(struct drm_device *dev) | |||
2643 | pcu_mbox = 0; | 2643 | pcu_mbox = 0; |
2644 | ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox); | 2644 | ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox); |
2645 | if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */ | 2645 | if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */ |
2646 | DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max from %dMHz to %dMHz\n", | 2646 | DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n", |
2647 | (dev_priv->rps.max_delay & 0xff) * 50, | 2647 | (dev_priv->rps.max_delay & 0xff) * 50, |
2648 | (pcu_mbox & 0xff) * 50); | 2648 | (pcu_mbox & 0xff) * 50); |
2649 | dev_priv->rps.max_delay = pcu_mbox & 0xff; | 2649 | dev_priv->rps.hw_max = pcu_mbox & 0xff; |
2650 | } | 2650 | } |
2651 | } else { | 2651 | } else { |
2652 | DRM_DEBUG_DRIVER("Failed to set the min frequency\n"); | 2652 | DRM_DEBUG_DRIVER("Failed to set the min frequency\n"); |
@@ -2684,8 +2684,8 @@ static void gen6_update_ring_freq(struct drm_device *dev) | |||
2684 | { | 2684 | { |
2685 | struct drm_i915_private *dev_priv = dev->dev_private; | 2685 | struct drm_i915_private *dev_priv = dev->dev_private; |
2686 | int min_freq = 15; | 2686 | int min_freq = 15; |
2687 | int gpu_freq; | 2687 | unsigned int gpu_freq; |
2688 | unsigned int ia_freq, max_ia_freq; | 2688 | unsigned int max_ia_freq, min_ring_freq; |
2689 | int scaling_factor = 180; | 2689 | int scaling_factor = 180; |
2690 | 2690 | ||
2691 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); | 2691 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); |
@@ -2701,6 +2701,10 @@ static void gen6_update_ring_freq(struct drm_device *dev) | |||
2701 | /* Convert from kHz to MHz */ | 2701 | /* Convert from kHz to MHz */ |
2702 | max_ia_freq /= 1000; | 2702 | max_ia_freq /= 1000; |
2703 | 2703 | ||
2704 | min_ring_freq = I915_READ(MCHBAR_MIRROR_BASE_SNB + DCLK); | ||
2705 | /* convert DDR frequency from units of 133.3MHz to bandwidth */ | ||
2706 | min_ring_freq = (2 * 4 * min_ring_freq + 2) / 3; | ||
2707 | |||
2704 | /* | 2708 | /* |
2705 | * For each potential GPU frequency, load a ring frequency we'd like | 2709 | * For each potential GPU frequency, load a ring frequency we'd like |
2706 | * to use for memory access. We do this by specifying the IA frequency | 2710 | * to use for memory access. We do this by specifying the IA frequency |
@@ -2709,21 +2713,32 @@ static void gen6_update_ring_freq(struct drm_device *dev) | |||
2709 | for (gpu_freq = dev_priv->rps.max_delay; gpu_freq >= dev_priv->rps.min_delay; | 2713 | for (gpu_freq = dev_priv->rps.max_delay; gpu_freq >= dev_priv->rps.min_delay; |
2710 | gpu_freq--) { | 2714 | gpu_freq--) { |
2711 | int diff = dev_priv->rps.max_delay - gpu_freq; | 2715 | int diff = dev_priv->rps.max_delay - gpu_freq; |
2712 | 2716 | unsigned int ia_freq = 0, ring_freq = 0; | |
2713 | /* | 2717 | |
2714 | * For GPU frequencies less than 750MHz, just use the lowest | 2718 | if (IS_HASWELL(dev)) { |
2715 | * ring freq. | 2719 | ring_freq = (gpu_freq * 5 + 3) / 4; |
2716 | */ | 2720 | ring_freq = max(min_ring_freq, ring_freq); |
2717 | if (gpu_freq < min_freq) | 2721 | /* leave ia_freq as the default, chosen by cpufreq */ |
2718 | ia_freq = 800; | 2722 | } else { |
2719 | else | 2723 | /* On older processors, there is no separate ring |
2720 | ia_freq = max_ia_freq - ((diff * scaling_factor) / 2); | 2724 | * clock domain, so in order to boost the bandwidth |
2721 | ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100); | 2725 | * of the ring, we need to upclock the CPU (ia_freq). |
2722 | ia_freq <<= GEN6_PCODE_FREQ_IA_RATIO_SHIFT; | 2726 | * |
2727 | * For GPU frequencies less than 750MHz, | ||
2728 | * just use the lowest ring freq. | ||
2729 | */ | ||
2730 | if (gpu_freq < min_freq) | ||
2731 | ia_freq = 800; | ||
2732 | else | ||
2733 | ia_freq = max_ia_freq - ((diff * scaling_factor) / 2); | ||
2734 | ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100); | ||
2735 | } | ||
2723 | 2736 | ||
2724 | sandybridge_pcode_write(dev_priv, | 2737 | sandybridge_pcode_write(dev_priv, |
2725 | GEN6_PCODE_WRITE_MIN_FREQ_TABLE, | 2738 | GEN6_PCODE_WRITE_MIN_FREQ_TABLE, |
2726 | ia_freq | gpu_freq); | 2739 | ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT | |
2740 | ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT | | ||
2741 | gpu_freq); | ||
2727 | } | 2742 | } |
2728 | } | 2743 | } |
2729 | 2744 | ||
@@ -3575,6 +3590,7 @@ static void cpt_init_clock_gating(struct drm_device *dev) | |||
3575 | { | 3590 | { |
3576 | struct drm_i915_private *dev_priv = dev->dev_private; | 3591 | struct drm_i915_private *dev_priv = dev->dev_private; |
3577 | int pipe; | 3592 | int pipe; |
3593 | uint32_t val; | ||
3578 | 3594 | ||
3579 | /* | 3595 | /* |
3580 | * On Ibex Peak and Cougar Point, we need to disable clock | 3596 | * On Ibex Peak and Cougar Point, we need to disable clock |
@@ -3587,8 +3603,17 @@ static void cpt_init_clock_gating(struct drm_device *dev) | |||
3587 | /* The below fixes the weird display corruption, a few pixels shifted | 3603 | /* The below fixes the weird display corruption, a few pixels shifted |
3588 | * downward, on (only) LVDS of some HP laptops with IVY. | 3604 | * downward, on (only) LVDS of some HP laptops with IVY. |
3589 | */ | 3605 | */ |
3590 | for_each_pipe(pipe) | 3606 | for_each_pipe(pipe) { |
3591 | I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_CHICKEN2_TIMING_OVERRIDE); | 3607 | val = I915_READ(TRANS_CHICKEN2(pipe)); |
3608 | val |= TRANS_CHICKEN2_TIMING_OVERRIDE; | ||
3609 | val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED; | ||
3610 | if (dev_priv->fdi_rx_polarity_inverted) | ||
3611 | val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED; | ||
3612 | val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK; | ||
3613 | val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER; | ||
3614 | val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH; | ||
3615 | I915_WRITE(TRANS_CHICKEN2(pipe), val); | ||
3616 | } | ||
3592 | /* WADP0ClockGatingDisable */ | 3617 | /* WADP0ClockGatingDisable */ |
3593 | for_each_pipe(pipe) { | 3618 | for_each_pipe(pipe) { |
3594 | I915_WRITE(TRANS_CHICKEN1(pipe), | 3619 | I915_WRITE(TRANS_CHICKEN1(pipe), |
@@ -3890,7 +3915,8 @@ static void ivybridge_init_clock_gating(struct drm_device *dev) | |||
3890 | snpcr |= GEN6_MBC_SNPCR_MED; | 3915 | snpcr |= GEN6_MBC_SNPCR_MED; |
3891 | I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); | 3916 | I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); |
3892 | 3917 | ||
3893 | cpt_init_clock_gating(dev); | 3918 | if (!HAS_PCH_NOP(dev)) |
3919 | cpt_init_clock_gating(dev); | ||
3894 | 3920 | ||
3895 | gen6_check_mch_setup(dev); | 3921 | gen6_check_mch_setup(dev); |
3896 | } | 3922 | } |
@@ -4084,6 +4110,22 @@ void intel_init_clock_gating(struct drm_device *dev) | |||
4084 | dev_priv->display.init_clock_gating(dev); | 4110 | dev_priv->display.init_clock_gating(dev); |
4085 | } | 4111 | } |
4086 | 4112 | ||
4113 | /** | ||
4114 | * We should only use the power well if we explicitly asked the hardware to | ||
4115 | * enable it, so check if it's enabled and also check if we've requested it to | ||
4116 | * be enabled. | ||
4117 | */ | ||
4118 | bool intel_using_power_well(struct drm_device *dev) | ||
4119 | { | ||
4120 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
4121 | |||
4122 | if (IS_HASWELL(dev)) | ||
4123 | return I915_READ(HSW_PWR_WELL_DRIVER) == | ||
4124 | (HSW_PWR_WELL_ENABLE | HSW_PWR_WELL_STATE); | ||
4125 | else | ||
4126 | return true; | ||
4127 | } | ||
4128 | |||
4087 | void intel_set_power_well(struct drm_device *dev, bool enable) | 4129 | void intel_set_power_well(struct drm_device *dev, bool enable) |
4088 | { | 4130 | { |
4089 | struct drm_i915_private *dev_priv = dev->dev_private; | 4131 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -4190,7 +4232,6 @@ void intel_init_pm(struct drm_device *dev) | |||
4190 | } | 4232 | } |
4191 | dev_priv->display.init_clock_gating = gen6_init_clock_gating; | 4233 | dev_priv->display.init_clock_gating = gen6_init_clock_gating; |
4192 | } else if (IS_IVYBRIDGE(dev)) { | 4234 | } else if (IS_IVYBRIDGE(dev)) { |
4193 | /* FIXME: detect B0+ stepping and use auto training */ | ||
4194 | if (SNB_READ_WM0_LATENCY()) { | 4235 | if (SNB_READ_WM0_LATENCY()) { |
4195 | dev_priv->display.update_wm = ivybridge_update_wm; | 4236 | dev_priv->display.update_wm = ivybridge_update_wm; |
4196 | dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm; | 4237 | dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm; |
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 298dc85ec32c..a618a6a45a77 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
@@ -1231,12 +1231,8 @@ static bool intel_sdvo_connector_get_hw_state(struct intel_connector *connector) | |||
1231 | struct intel_sdvo_connector *intel_sdvo_connector = | 1231 | struct intel_sdvo_connector *intel_sdvo_connector = |
1232 | to_intel_sdvo_connector(&connector->base); | 1232 | to_intel_sdvo_connector(&connector->base); |
1233 | struct intel_sdvo *intel_sdvo = intel_attached_sdvo(&connector->base); | 1233 | struct intel_sdvo *intel_sdvo = intel_attached_sdvo(&connector->base); |
1234 | struct drm_i915_private *dev_priv = intel_sdvo->base.base.dev->dev_private; | ||
1235 | u16 active_outputs; | 1234 | u16 active_outputs; |
1236 | 1235 | ||
1237 | if (!(I915_READ(intel_sdvo->sdvo_reg) & SDVO_ENABLE)) | ||
1238 | return false; | ||
1239 | |||
1240 | intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs); | 1236 | intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs); |
1241 | 1237 | ||
1242 | if (active_outputs & intel_sdvo_connector->output_flag) | 1238 | if (active_outputs & intel_sdvo_connector->output_flag) |
@@ -1251,11 +1247,13 @@ static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder, | |||
1251 | struct drm_device *dev = encoder->base.dev; | 1247 | struct drm_device *dev = encoder->base.dev; |
1252 | struct drm_i915_private *dev_priv = dev->dev_private; | 1248 | struct drm_i915_private *dev_priv = dev->dev_private; |
1253 | struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); | 1249 | struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); |
1250 | u16 active_outputs; | ||
1254 | u32 tmp; | 1251 | u32 tmp; |
1255 | 1252 | ||
1256 | tmp = I915_READ(intel_sdvo->sdvo_reg); | 1253 | tmp = I915_READ(intel_sdvo->sdvo_reg); |
1254 | intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs); | ||
1257 | 1255 | ||
1258 | if (!(tmp & SDVO_ENABLE)) | 1256 | if (!(tmp & SDVO_ENABLE) && (active_outputs == 0)) |
1259 | return false; | 1257 | return false; |
1260 | 1258 | ||
1261 | if (HAS_PCH_CPT(dev)) | 1259 | if (HAS_PCH_CPT(dev)) |
@@ -2276,7 +2274,6 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) | |||
2276 | connector = &intel_connector->base; | 2274 | connector = &intel_connector->base; |
2277 | if (intel_sdvo_get_hotplug_support(intel_sdvo) & | 2275 | if (intel_sdvo_get_hotplug_support(intel_sdvo) & |
2278 | intel_sdvo_connector->output_flag) { | 2276 | intel_sdvo_connector->output_flag) { |
2279 | connector->polled = DRM_CONNECTOR_POLL_HPD; | ||
2280 | intel_sdvo->hotplug_active |= intel_sdvo_connector->output_flag; | 2277 | intel_sdvo->hotplug_active |= intel_sdvo_connector->output_flag; |
2281 | /* Some SDVO devices have one-shot hotplug interrupts. | 2278 | /* Some SDVO devices have one-shot hotplug interrupts. |
2282 | * Ensure that they get re-enabled when an interrupt happens. | 2279 | * Ensure that they get re-enabled when an interrupt happens. |
@@ -2284,7 +2281,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) | |||
2284 | intel_encoder->hot_plug = intel_sdvo_enable_hotplug; | 2281 | intel_encoder->hot_plug = intel_sdvo_enable_hotplug; |
2285 | intel_sdvo_enable_hotplug(intel_encoder); | 2282 | intel_sdvo_enable_hotplug(intel_encoder); |
2286 | } else { | 2283 | } else { |
2287 | connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; | 2284 | intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; |
2288 | } | 2285 | } |
2289 | encoder->encoder_type = DRM_MODE_ENCODER_TMDS; | 2286 | encoder->encoder_type = DRM_MODE_ENCODER_TMDS; |
2290 | connector->connector_type = DRM_MODE_CONNECTOR_DVID; | 2287 | connector->connector_type = DRM_MODE_CONNECTOR_DVID; |
@@ -2353,7 +2350,7 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device) | |||
2353 | 2350 | ||
2354 | intel_connector = &intel_sdvo_connector->base; | 2351 | intel_connector = &intel_sdvo_connector->base; |
2355 | connector = &intel_connector->base; | 2352 | connector = &intel_connector->base; |
2356 | connector->polled = DRM_CONNECTOR_POLL_CONNECT; | 2353 | intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT; |
2357 | encoder->encoder_type = DRM_MODE_ENCODER_DAC; | 2354 | encoder->encoder_type = DRM_MODE_ENCODER_DAC; |
2358 | connector->connector_type = DRM_MODE_CONNECTOR_VGA; | 2355 | connector->connector_type = DRM_MODE_CONNECTOR_VGA; |
2359 | 2356 | ||
@@ -2746,7 +2743,6 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob) | |||
2746 | struct intel_sdvo *intel_sdvo; | 2743 | struct intel_sdvo *intel_sdvo; |
2747 | u32 hotplug_mask; | 2744 | u32 hotplug_mask; |
2748 | int i; | 2745 | int i; |
2749 | |||
2750 | intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL); | 2746 | intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL); |
2751 | if (!intel_sdvo) | 2747 | if (!intel_sdvo) |
2752 | return false; | 2748 | return false; |
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 66737265200f..b945bc54207a 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c | |||
@@ -1613,7 +1613,7 @@ intel_tv_init(struct drm_device *dev) | |||
1613 | * | 1613 | * |
1614 | * More recent chipsets favour HDMI rather than integrated S-Video. | 1614 | * More recent chipsets favour HDMI rather than integrated S-Video. |
1615 | */ | 1615 | */ |
1616 | connector->polled = DRM_CONNECTOR_POLL_CONNECT; | 1616 | intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT; |
1617 | 1617 | ||
1618 | drm_connector_init(dev, connector, &intel_tv_connector_funcs, | 1618 | drm_connector_init(dev, connector, &intel_tv_connector_funcs, |
1619 | DRM_MODE_CONNECTOR_SVIDEO); | 1619 | DRM_MODE_CONNECTOR_SVIDEO); |