diff options
author | Dave Airlie <airlied@gmail.com> | 2013-08-07 04:09:03 -0400 |
---|---|---|
committer | Dave Airlie <airlied@gmail.com> | 2013-08-07 04:11:35 -0400 |
commit | 32c913e4369ce7bd1d16a9b6983f7b8975c13f5a (patch) | |
tree | da5868a2b7e7c068d4b733420330a15001786365 /drivers/gpu/drm | |
parent | abf190351b49937335130970a99a0b4275402b5e (diff) | |
parent | cd234b0bfd5ab012e42274b24aae420fa1823d58 (diff) |
Merge tag 'drm-intel-next-2013-07-26-fixed' of git://people.freedesktop.org/~danvet/drm-intel into drm-next
Neat that QA (and Ben) keeps on humming along while I'm on vacation, so
you already get the next feature pull request:
- proper eLLC support for HSW from Ben
- more interrupt refactoring
- add w/a tags where we implement them already (Damien)
- hangcheck fixes (Chris) + hangcheck stats (Mika)
- flesh out the new vm structs for ppgtt and ggtt (Ben)
- PSR for Haswell, still disabled by default (Rodrigo et al.)
- pc8+ refclock sequence code from Paulo
- more interrupt refactoring from Paulo, unifying ilk/snb with the ivb/hsw
interrupt code
- full solution for the Haswell concurrent reg access issues (Chris)
- fix racy object accounting, used by some new leak tests
- fix sync polarity settings on ch7xxx dvo encoder
- random bits&pieces, little fixes and better debug output all over
[airlied: fix conflict with drm_mm cleanups]
* tag 'drm-intel-next-2013-07-26-fixed' of git://people.freedesktop.org/~danvet/drm-intel: (289 commits)
drm/i915: Do not dereference NULL crtc or fb until after checking
drm/i915: fix pnv display core clock readout out
drm/i915: Replace open-coded offset_in_page()
drm/i915: Retry DP aux_ch communications with a different clock after failure
drm/i915: Add messages useful for HPD storm detection debugging (v2)
drm/i915: dvo_ch7xxx: fix vsync polarity setting
drm/i915: fix the racy object accounting
drm/i915: Convert the register access tracepoint to be conditional
drm/i915: Squash gen lookup through multiple indirections inside GT access
drm/i915: Use the common register access functions for NOTRACE variants
drm/i915: Use a private interface for register access within GT
drm/i915: Colocate all GT access routines in the same file
drm/i915: fix reference counting in i915_gem_create
drm/i915: Use Graphics Base of Stolen Memory on all gen3+
drm/i915: disable stolen mem for OVERLAY_NEEDS_PHYSICAL
drm/i915: add functions to disable and restore LCPLL
drm/i915: disable CLKOUT_DP when it's not needed
drm/i915: extend lpt_enable_clkout_dp
drm/i915: fix up error cleanup in i915_gem_object_bind_to_gtt
drm/i915: Add some debug breadcrumbs to connector detection
...
Diffstat (limited to 'drivers/gpu/drm')
46 files changed, 2547 insertions, 1528 deletions
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 738a4294d820..6a647493ca7f 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c | |||
@@ -677,6 +677,11 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) | |||
677 | /* don't break so fail path works correct */ | 677 | /* don't break so fail path works correct */ |
678 | fail = 1; | 678 | fail = 1; |
679 | break; | 679 | break; |
680 | |||
681 | if (connector->dpms != DRM_MODE_DPMS_ON) { | ||
682 | DRM_DEBUG_KMS("connector dpms not on, full mode switch\n"); | ||
683 | mode_changed = true; | ||
684 | } | ||
680 | } | 685 | } |
681 | } | 686 | } |
682 | 687 | ||
@@ -754,6 +759,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) | |||
754 | ret = -EINVAL; | 759 | ret = -EINVAL; |
755 | goto fail; | 760 | goto fail; |
756 | } | 761 | } |
762 | DRM_DEBUG_KMS("Setting connector DPMS state to on\n"); | ||
763 | for (i = 0; i < set->num_connectors; i++) { | ||
764 | DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id, | ||
765 | drm_get_connector_name(set->connectors[i])); | ||
766 | set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON); | ||
767 | } | ||
757 | } | 768 | } |
758 | drm_helper_disable_unused_functions(dev); | 769 | drm_helper_disable_unused_functions(dev); |
759 | } else if (fb_changed) { | 770 | } else if (fb_changed) { |
@@ -771,22 +782,6 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) | |||
771 | } | 782 | } |
772 | } | 783 | } |
773 | 784 | ||
774 | /* | ||
775 | * crtc set_config helpers implicit set the crtc and all connected | ||
776 | * encoders to DPMS on for a full mode set. But for just an fb update it | ||
777 | * doesn't do that. To not confuse userspace, do an explicit DPMS_ON | ||
778 | * unconditionally. This will also ensure driver internal dpms state is | ||
779 | * consistent again. | ||
780 | */ | ||
781 | if (set->crtc->enabled) { | ||
782 | DRM_DEBUG_KMS("Setting connector DPMS state to on\n"); | ||
783 | for (i = 0; i < set->num_connectors; i++) { | ||
784 | DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id, | ||
785 | drm_get_connector_name(set->connectors[i])); | ||
786 | set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON); | ||
787 | } | ||
788 | } | ||
789 | |||
790 | kfree(save_connectors); | 785 | kfree(save_connectors); |
791 | kfree(save_encoders); | 786 | kfree(save_encoders); |
792 | kfree(save_crtcs); | 787 | kfree(save_crtcs); |
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 9d1da7cceb21..b8449a84a0dc 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile | |||
@@ -38,6 +38,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \ | |||
38 | intel_sprite.o \ | 38 | intel_sprite.o \ |
39 | intel_opregion.o \ | 39 | intel_opregion.o \ |
40 | intel_sideband.o \ | 40 | intel_sideband.o \ |
41 | intel_uncore.o \ | ||
41 | dvo_ch7xxx.o \ | 42 | dvo_ch7xxx.o \ |
42 | dvo_ch7017.o \ | 43 | dvo_ch7017.o \ |
43 | dvo_ivch.o \ | 44 | dvo_ivch.o \ |
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c index 757e0fa11043..af42e94f6846 100644 --- a/drivers/gpu/drm/i915/dvo_ch7xxx.c +++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c | |||
@@ -307,7 +307,7 @@ static void ch7xxx_mode_set(struct intel_dvo_device *dvo, | |||
307 | idf |= CH7xxx_IDF_HSP; | 307 | idf |= CH7xxx_IDF_HSP; |
308 | 308 | ||
309 | if (mode->flags & DRM_MODE_FLAG_PVSYNC) | 309 | if (mode->flags & DRM_MODE_FLAG_PVSYNC) |
310 | idf |= CH7xxx_IDF_HSP; | 310 | idf |= CH7xxx_IDF_VSP; |
311 | 311 | ||
312 | ch7xxx_writeb(dvo, CH7xxx_IDF, idf); | 312 | ch7xxx_writeb(dvo, CH7xxx_IDF, idf); |
313 | } | 313 | } |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 86379799dab8..ed72fe08217c 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -135,7 +135,8 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) | |||
135 | uintptr_t list = (uintptr_t) node->info_ent->data; | 135 | uintptr_t list = (uintptr_t) node->info_ent->data; |
136 | struct list_head *head; | 136 | struct list_head *head; |
137 | struct drm_device *dev = node->minor->dev; | 137 | struct drm_device *dev = node->minor->dev; |
138 | drm_i915_private_t *dev_priv = dev->dev_private; | 138 | struct drm_i915_private *dev_priv = dev->dev_private; |
139 | struct i915_address_space *vm = &dev_priv->gtt.base; | ||
139 | struct drm_i915_gem_object *obj; | 140 | struct drm_i915_gem_object *obj; |
140 | size_t total_obj_size, total_gtt_size; | 141 | size_t total_obj_size, total_gtt_size; |
141 | int count, ret; | 142 | int count, ret; |
@@ -147,11 +148,11 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) | |||
147 | switch (list) { | 148 | switch (list) { |
148 | case ACTIVE_LIST: | 149 | case ACTIVE_LIST: |
149 | seq_puts(m, "Active:\n"); | 150 | seq_puts(m, "Active:\n"); |
150 | head = &dev_priv->mm.active_list; | 151 | head = &vm->active_list; |
151 | break; | 152 | break; |
152 | case INACTIVE_LIST: | 153 | case INACTIVE_LIST: |
153 | seq_puts(m, "Inactive:\n"); | 154 | seq_puts(m, "Inactive:\n"); |
154 | head = &dev_priv->mm.inactive_list; | 155 | head = &vm->inactive_list; |
155 | break; | 156 | break; |
156 | default: | 157 | default: |
157 | mutex_unlock(&dev->struct_mutex); | 158 | mutex_unlock(&dev->struct_mutex); |
@@ -219,6 +220,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data) | |||
219 | u32 count, mappable_count, purgeable_count; | 220 | u32 count, mappable_count, purgeable_count; |
220 | size_t size, mappable_size, purgeable_size; | 221 | size_t size, mappable_size, purgeable_size; |
221 | struct drm_i915_gem_object *obj; | 222 | struct drm_i915_gem_object *obj; |
223 | struct i915_address_space *vm = &dev_priv->gtt.base; | ||
222 | struct drm_file *file; | 224 | struct drm_file *file; |
223 | int ret; | 225 | int ret; |
224 | 226 | ||
@@ -236,12 +238,12 @@ static int i915_gem_object_info(struct seq_file *m, void *data) | |||
236 | count, mappable_count, size, mappable_size); | 238 | count, mappable_count, size, mappable_size); |
237 | 239 | ||
238 | size = count = mappable_size = mappable_count = 0; | 240 | size = count = mappable_size = mappable_count = 0; |
239 | count_objects(&dev_priv->mm.active_list, mm_list); | 241 | count_objects(&vm->active_list, mm_list); |
240 | seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n", | 242 | seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n", |
241 | count, mappable_count, size, mappable_size); | 243 | count, mappable_count, size, mappable_size); |
242 | 244 | ||
243 | size = count = mappable_size = mappable_count = 0; | 245 | size = count = mappable_size = mappable_count = 0; |
244 | count_objects(&dev_priv->mm.inactive_list, mm_list); | 246 | count_objects(&vm->inactive_list, mm_list); |
245 | seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n", | 247 | seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n", |
246 | count, mappable_count, size, mappable_size); | 248 | count, mappable_count, size, mappable_size); |
247 | 249 | ||
@@ -276,8 +278,8 @@ static int i915_gem_object_info(struct seq_file *m, void *data) | |||
276 | count, size); | 278 | count, size); |
277 | 279 | ||
278 | seq_printf(m, "%zu [%lu] gtt total\n", | 280 | seq_printf(m, "%zu [%lu] gtt total\n", |
279 | dev_priv->gtt.total, | 281 | dev_priv->gtt.base.total, |
280 | dev_priv->gtt.mappable_end - dev_priv->gtt.start); | 282 | dev_priv->gtt.mappable_end - dev_priv->gtt.base.start); |
281 | 283 | ||
282 | seq_putc(m, '\n'); | 284 | seq_putc(m, '\n'); |
283 | list_for_each_entry_reverse(file, &dev->filelist, lhead) { | 285 | list_for_each_entry_reverse(file, &dev->filelist, lhead) { |
@@ -987,9 +989,9 @@ static int gen6_drpc_info(struct seq_file *m) | |||
987 | if (ret) | 989 | if (ret) |
988 | return ret; | 990 | return ret; |
989 | 991 | ||
990 | spin_lock_irq(&dev_priv->gt_lock); | 992 | spin_lock_irq(&dev_priv->uncore.lock); |
991 | forcewake_count = dev_priv->forcewake_count; | 993 | forcewake_count = dev_priv->uncore.forcewake_count; |
992 | spin_unlock_irq(&dev_priv->gt_lock); | 994 | spin_unlock_irq(&dev_priv->uncore.lock); |
993 | 995 | ||
994 | if (forcewake_count) { | 996 | if (forcewake_count) { |
995 | seq_puts(m, "RC information inaccurate because somebody " | 997 | seq_puts(m, "RC information inaccurate because somebody " |
@@ -1002,7 +1004,7 @@ static int gen6_drpc_info(struct seq_file *m) | |||
1002 | } | 1004 | } |
1003 | 1005 | ||
1004 | gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS); | 1006 | gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS); |
1005 | trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4); | 1007 | trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true); |
1006 | 1008 | ||
1007 | rpmodectl1 = I915_READ(GEN6_RP_CONTROL); | 1009 | rpmodectl1 = I915_READ(GEN6_RP_CONTROL); |
1008 | rcctl1 = I915_READ(GEN6_RC_CONTROL); | 1010 | rcctl1 = I915_READ(GEN6_RC_CONTROL); |
@@ -1373,9 +1375,9 @@ static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data) | |||
1373 | struct drm_i915_private *dev_priv = dev->dev_private; | 1375 | struct drm_i915_private *dev_priv = dev->dev_private; |
1374 | unsigned forcewake_count; | 1376 | unsigned forcewake_count; |
1375 | 1377 | ||
1376 | spin_lock_irq(&dev_priv->gt_lock); | 1378 | spin_lock_irq(&dev_priv->uncore.lock); |
1377 | forcewake_count = dev_priv->forcewake_count; | 1379 | forcewake_count = dev_priv->uncore.forcewake_count; |
1378 | spin_unlock_irq(&dev_priv->gt_lock); | 1380 | spin_unlock_irq(&dev_priv->uncore.lock); |
1379 | 1381 | ||
1380 | seq_printf(m, "forcewake count = %u\n", forcewake_count); | 1382 | seq_printf(m, "forcewake count = %u\n", forcewake_count); |
1381 | 1383 | ||
@@ -1530,6 +1532,148 @@ static int i915_dpio_info(struct seq_file *m, void *data) | |||
1530 | return 0; | 1532 | return 0; |
1531 | } | 1533 | } |
1532 | 1534 | ||
1535 | static int i915_llc(struct seq_file *m, void *data) | ||
1536 | { | ||
1537 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
1538 | struct drm_device *dev = node->minor->dev; | ||
1539 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1540 | |||
1541 | /* Size calculation for LLC is a bit of a pain. Ignore for now. */ | ||
1542 | seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev))); | ||
1543 | seq_printf(m, "eLLC: %zuMB\n", dev_priv->ellc_size); | ||
1544 | |||
1545 | return 0; | ||
1546 | } | ||
1547 | |||
1548 | static int i915_edp_psr_status(struct seq_file *m, void *data) | ||
1549 | { | ||
1550 | struct drm_info_node *node = m->private; | ||
1551 | struct drm_device *dev = node->minor->dev; | ||
1552 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1553 | u32 psrstat, psrperf; | ||
1554 | |||
1555 | if (!IS_HASWELL(dev)) { | ||
1556 | seq_puts(m, "PSR not supported on this platform\n"); | ||
1557 | } else if (IS_HASWELL(dev) && I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE) { | ||
1558 | seq_puts(m, "PSR enabled\n"); | ||
1559 | } else { | ||
1560 | seq_puts(m, "PSR disabled: "); | ||
1561 | switch (dev_priv->no_psr_reason) { | ||
1562 | case PSR_NO_SOURCE: | ||
1563 | seq_puts(m, "not supported on this platform"); | ||
1564 | break; | ||
1565 | case PSR_NO_SINK: | ||
1566 | seq_puts(m, "not supported by panel"); | ||
1567 | break; | ||
1568 | case PSR_MODULE_PARAM: | ||
1569 | seq_puts(m, "disabled by flag"); | ||
1570 | break; | ||
1571 | case PSR_CRTC_NOT_ACTIVE: | ||
1572 | seq_puts(m, "crtc not active"); | ||
1573 | break; | ||
1574 | case PSR_PWR_WELL_ENABLED: | ||
1575 | seq_puts(m, "power well enabled"); | ||
1576 | break; | ||
1577 | case PSR_NOT_TILED: | ||
1578 | seq_puts(m, "not tiled"); | ||
1579 | break; | ||
1580 | case PSR_SPRITE_ENABLED: | ||
1581 | seq_puts(m, "sprite enabled"); | ||
1582 | break; | ||
1583 | case PSR_S3D_ENABLED: | ||
1584 | seq_puts(m, "stereo 3d enabled"); | ||
1585 | break; | ||
1586 | case PSR_INTERLACED_ENABLED: | ||
1587 | seq_puts(m, "interlaced enabled"); | ||
1588 | break; | ||
1589 | case PSR_HSW_NOT_DDIA: | ||
1590 | seq_puts(m, "HSW ties PSR to DDI A (eDP)"); | ||
1591 | break; | ||
1592 | default: | ||
1593 | seq_puts(m, "unknown reason"); | ||
1594 | } | ||
1595 | seq_puts(m, "\n"); | ||
1596 | return 0; | ||
1597 | } | ||
1598 | |||
1599 | psrstat = I915_READ(EDP_PSR_STATUS_CTL); | ||
1600 | |||
1601 | seq_puts(m, "PSR Current State: "); | ||
1602 | switch (psrstat & EDP_PSR_STATUS_STATE_MASK) { | ||
1603 | case EDP_PSR_STATUS_STATE_IDLE: | ||
1604 | seq_puts(m, "Reset state\n"); | ||
1605 | break; | ||
1606 | case EDP_PSR_STATUS_STATE_SRDONACK: | ||
1607 | seq_puts(m, "Wait for TG/Stream to send on frame of data after SRD conditions are met\n"); | ||
1608 | break; | ||
1609 | case EDP_PSR_STATUS_STATE_SRDENT: | ||
1610 | seq_puts(m, "SRD entry\n"); | ||
1611 | break; | ||
1612 | case EDP_PSR_STATUS_STATE_BUFOFF: | ||
1613 | seq_puts(m, "Wait for buffer turn off\n"); | ||
1614 | break; | ||
1615 | case EDP_PSR_STATUS_STATE_BUFON: | ||
1616 | seq_puts(m, "Wait for buffer turn on\n"); | ||
1617 | break; | ||
1618 | case EDP_PSR_STATUS_STATE_AUXACK: | ||
1619 | seq_puts(m, "Wait for AUX to acknowledge on SRD exit\n"); | ||
1620 | break; | ||
1621 | case EDP_PSR_STATUS_STATE_SRDOFFACK: | ||
1622 | seq_puts(m, "Wait for TG/Stream to acknowledge the SRD VDM exit\n"); | ||
1623 | break; | ||
1624 | default: | ||
1625 | seq_puts(m, "Unknown\n"); | ||
1626 | break; | ||
1627 | } | ||
1628 | |||
1629 | seq_puts(m, "Link Status: "); | ||
1630 | switch (psrstat & EDP_PSR_STATUS_LINK_MASK) { | ||
1631 | case EDP_PSR_STATUS_LINK_FULL_OFF: | ||
1632 | seq_puts(m, "Link is fully off\n"); | ||
1633 | break; | ||
1634 | case EDP_PSR_STATUS_LINK_FULL_ON: | ||
1635 | seq_puts(m, "Link is fully on\n"); | ||
1636 | break; | ||
1637 | case EDP_PSR_STATUS_LINK_STANDBY: | ||
1638 | seq_puts(m, "Link is in standby\n"); | ||
1639 | break; | ||
1640 | default: | ||
1641 | seq_puts(m, "Unknown\n"); | ||
1642 | break; | ||
1643 | } | ||
1644 | |||
1645 | seq_printf(m, "PSR Entry Count: %u\n", | ||
1646 | psrstat >> EDP_PSR_STATUS_COUNT_SHIFT & | ||
1647 | EDP_PSR_STATUS_COUNT_MASK); | ||
1648 | |||
1649 | seq_printf(m, "Max Sleep Timer Counter: %u\n", | ||
1650 | psrstat >> EDP_PSR_STATUS_MAX_SLEEP_TIMER_SHIFT & | ||
1651 | EDP_PSR_STATUS_MAX_SLEEP_TIMER_MASK); | ||
1652 | |||
1653 | seq_printf(m, "Had AUX error: %s\n", | ||
1654 | yesno(psrstat & EDP_PSR_STATUS_AUX_ERROR)); | ||
1655 | |||
1656 | seq_printf(m, "Sending AUX: %s\n", | ||
1657 | yesno(psrstat & EDP_PSR_STATUS_AUX_SENDING)); | ||
1658 | |||
1659 | seq_printf(m, "Sending Idle: %s\n", | ||
1660 | yesno(psrstat & EDP_PSR_STATUS_SENDING_IDLE)); | ||
1661 | |||
1662 | seq_printf(m, "Sending TP2 TP3: %s\n", | ||
1663 | yesno(psrstat & EDP_PSR_STATUS_SENDING_TP2_TP3)); | ||
1664 | |||
1665 | seq_printf(m, "Sending TP1: %s\n", | ||
1666 | yesno(psrstat & EDP_PSR_STATUS_SENDING_TP1)); | ||
1667 | |||
1668 | seq_printf(m, "Idle Count: %u\n", | ||
1669 | psrstat & EDP_PSR_STATUS_IDLE_MASK); | ||
1670 | |||
1671 | psrperf = (I915_READ(EDP_PSR_PERF_CNT)) & EDP_PSR_PERF_CNT_MASK; | ||
1672 | seq_printf(m, "Performance Counter: %u\n", psrperf); | ||
1673 | |||
1674 | return 0; | ||
1675 | } | ||
1676 | |||
1533 | static int | 1677 | static int |
1534 | i915_wedged_get(void *data, u64 *val) | 1678 | i915_wedged_get(void *data, u64 *val) |
1535 | { | 1679 | { |
@@ -1612,6 +1756,7 @@ i915_drop_caches_set(void *data, u64 val) | |||
1612 | struct drm_device *dev = data; | 1756 | struct drm_device *dev = data; |
1613 | struct drm_i915_private *dev_priv = dev->dev_private; | 1757 | struct drm_i915_private *dev_priv = dev->dev_private; |
1614 | struct drm_i915_gem_object *obj, *next; | 1758 | struct drm_i915_gem_object *obj, *next; |
1759 | struct i915_address_space *vm = &dev_priv->gtt.base; | ||
1615 | int ret; | 1760 | int ret; |
1616 | 1761 | ||
1617 | DRM_DEBUG_DRIVER("Dropping caches: 0x%08llx\n", val); | 1762 | DRM_DEBUG_DRIVER("Dropping caches: 0x%08llx\n", val); |
@@ -1632,7 +1777,8 @@ i915_drop_caches_set(void *data, u64 val) | |||
1632 | i915_gem_retire_requests(dev); | 1777 | i915_gem_retire_requests(dev); |
1633 | 1778 | ||
1634 | if (val & DROP_BOUND) { | 1779 | if (val & DROP_BOUND) { |
1635 | list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list, mm_list) | 1780 | list_for_each_entry_safe(obj, next, &vm->inactive_list, |
1781 | mm_list) | ||
1636 | if (obj->pin_count == 0) { | 1782 | if (obj->pin_count == 0) { |
1637 | ret = i915_gem_object_unbind(obj); | 1783 | ret = i915_gem_object_unbind(obj); |
1638 | if (ret) | 1784 | if (ret) |
@@ -1959,6 +2105,8 @@ static struct drm_info_list i915_debugfs_list[] = { | |||
1959 | {"i915_swizzle_info", i915_swizzle_info, 0}, | 2105 | {"i915_swizzle_info", i915_swizzle_info, 0}, |
1960 | {"i915_ppgtt_info", i915_ppgtt_info, 0}, | 2106 | {"i915_ppgtt_info", i915_ppgtt_info, 0}, |
1961 | {"i915_dpio", i915_dpio_info, 0}, | 2107 | {"i915_dpio", i915_dpio_info, 0}, |
2108 | {"i915_llc", i915_llc, 0}, | ||
2109 | {"i915_edp_psr_status", i915_edp_psr_status, 0}, | ||
1962 | }; | 2110 | }; |
1963 | #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) | 2111 | #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) |
1964 | 2112 | ||
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 78ad4dcc8e2f..05756f95be7c 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -1358,7 +1358,7 @@ cleanup_gem: | |||
1358 | i915_gem_context_fini(dev); | 1358 | i915_gem_context_fini(dev); |
1359 | mutex_unlock(&dev->struct_mutex); | 1359 | mutex_unlock(&dev->struct_mutex); |
1360 | i915_gem_cleanup_aliasing_ppgtt(dev); | 1360 | i915_gem_cleanup_aliasing_ppgtt(dev); |
1361 | drm_mm_takedown(&dev_priv->mm.gtt_space); | 1361 | drm_mm_takedown(&dev_priv->gtt.base.mm); |
1362 | cleanup_irq: | 1362 | cleanup_irq: |
1363 | drm_irq_uninstall(dev); | 1363 | drm_irq_uninstall(dev); |
1364 | cleanup_gem_stolen: | 1364 | cleanup_gem_stolen: |
@@ -1436,22 +1436,6 @@ static void i915_dump_device_info(struct drm_i915_private *dev_priv) | |||
1436 | } | 1436 | } |
1437 | 1437 | ||
1438 | /** | 1438 | /** |
1439 | * intel_early_sanitize_regs - clean up BIOS state | ||
1440 | * @dev: DRM device | ||
1441 | * | ||
1442 | * This function must be called before we do any I915_READ or I915_WRITE. Its | ||
1443 | * purpose is to clean up any state left by the BIOS that may affect us when | ||
1444 | * reading and/or writing registers. | ||
1445 | */ | ||
1446 | static void intel_early_sanitize_regs(struct drm_device *dev) | ||
1447 | { | ||
1448 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1449 | |||
1450 | if (HAS_FPGA_DBG_UNCLAIMED(dev)) | ||
1451 | I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM); | ||
1452 | } | ||
1453 | |||
1454 | /** | ||
1455 | * i915_driver_load - setup chip and create an initial config | 1439 | * i915_driver_load - setup chip and create an initial config |
1456 | * @dev: DRM device | 1440 | * @dev: DRM device |
1457 | * @flags: startup flags | 1441 | * @flags: startup flags |
@@ -1490,8 +1474,21 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1490 | dev_priv->dev = dev; | 1474 | dev_priv->dev = dev; |
1491 | dev_priv->info = info; | 1475 | dev_priv->info = info; |
1492 | 1476 | ||
1477 | spin_lock_init(&dev_priv->irq_lock); | ||
1478 | spin_lock_init(&dev_priv->gpu_error.lock); | ||
1479 | spin_lock_init(&dev_priv->backlight.lock); | ||
1480 | spin_lock_init(&dev_priv->uncore.lock); | ||
1481 | spin_lock_init(&dev_priv->mm.object_stat_lock); | ||
1482 | mutex_init(&dev_priv->dpio_lock); | ||
1483 | mutex_init(&dev_priv->rps.hw_lock); | ||
1484 | mutex_init(&dev_priv->modeset_restore_lock); | ||
1485 | |||
1493 | i915_dump_device_info(dev_priv); | 1486 | i915_dump_device_info(dev_priv); |
1494 | 1487 | ||
1488 | INIT_LIST_HEAD(&dev_priv->vm_list); | ||
1489 | INIT_LIST_HEAD(&dev_priv->gtt.base.global_link); | ||
1490 | list_add(&dev_priv->gtt.base.global_link, &dev_priv->vm_list); | ||
1491 | |||
1495 | if (i915_get_bridge_dev(dev)) { | 1492 | if (i915_get_bridge_dev(dev)) { |
1496 | ret = -EIO; | 1493 | ret = -EIO; |
1497 | goto free_priv; | 1494 | goto free_priv; |
@@ -1517,7 +1514,17 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1517 | goto put_bridge; | 1514 | goto put_bridge; |
1518 | } | 1515 | } |
1519 | 1516 | ||
1520 | intel_early_sanitize_regs(dev); | 1517 | intel_uncore_early_sanitize(dev); |
1518 | |||
1519 | if (IS_HASWELL(dev) && (I915_READ(HSW_EDRAM_PRESENT) == 1)) { | ||
1520 | /* The docs do not explain exactly how the calculation can be | ||
1521 | * made. It is somewhat guessable, but for now, it's always | ||
1522 | * 128MB. | ||
1523 | * NB: We can't write IDICR yet because we do not have gt funcs | ||
1524 | * set up */ | ||
1525 | dev_priv->ellc_size = 128; | ||
1526 | DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size); | ||
1527 | } | ||
1521 | 1528 | ||
1522 | ret = i915_gem_gtt_init(dev); | 1529 | ret = i915_gem_gtt_init(dev); |
1523 | if (ret) | 1530 | if (ret) |
@@ -1580,7 +1587,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1580 | intel_detect_pch(dev); | 1587 | intel_detect_pch(dev); |
1581 | 1588 | ||
1582 | intel_irq_init(dev); | 1589 | intel_irq_init(dev); |
1583 | intel_gt_init(dev); | 1590 | intel_pm_init(dev); |
1591 | intel_uncore_sanitize(dev); | ||
1592 | intel_uncore_init(dev); | ||
1584 | 1593 | ||
1585 | /* Try to make sure MCHBAR is enabled before poking at it */ | 1594 | /* Try to make sure MCHBAR is enabled before poking at it */ |
1586 | intel_setup_mchbar(dev); | 1595 | intel_setup_mchbar(dev); |
@@ -1605,14 +1614,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1605 | if (!IS_I945G(dev) && !IS_I945GM(dev)) | 1614 | if (!IS_I945G(dev) && !IS_I945GM(dev)) |
1606 | pci_enable_msi(dev->pdev); | 1615 | pci_enable_msi(dev->pdev); |
1607 | 1616 | ||
1608 | spin_lock_init(&dev_priv->irq_lock); | ||
1609 | spin_lock_init(&dev_priv->gpu_error.lock); | ||
1610 | spin_lock_init(&dev_priv->backlight.lock); | ||
1611 | mutex_init(&dev_priv->dpio_lock); | ||
1612 | |||
1613 | mutex_init(&dev_priv->rps.hw_lock); | ||
1614 | mutex_init(&dev_priv->modeset_restore_lock); | ||
1615 | |||
1616 | dev_priv->num_plane = 1; | 1617 | dev_priv->num_plane = 1; |
1617 | if (IS_VALLEYVIEW(dev)) | 1618 | if (IS_VALLEYVIEW(dev)) |
1618 | dev_priv->num_plane = 2; | 1619 | dev_priv->num_plane = 2; |
@@ -1642,7 +1643,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1642 | if (INTEL_INFO(dev)->num_pipes) { | 1643 | if (INTEL_INFO(dev)->num_pipes) { |
1643 | /* Must be done after probing outputs */ | 1644 | /* Must be done after probing outputs */ |
1644 | intel_opregion_init(dev); | 1645 | intel_opregion_init(dev); |
1645 | acpi_video_register(); | 1646 | acpi_video_register_with_quirks(); |
1646 | } | 1647 | } |
1647 | 1648 | ||
1648 | if (IS_GEN5(dev)) | 1649 | if (IS_GEN5(dev)) |
@@ -1663,7 +1664,7 @@ out_gem_unload: | |||
1663 | out_mtrrfree: | 1664 | out_mtrrfree: |
1664 | arch_phys_wc_del(dev_priv->gtt.mtrr); | 1665 | arch_phys_wc_del(dev_priv->gtt.mtrr); |
1665 | io_mapping_free(dev_priv->gtt.mappable); | 1666 | io_mapping_free(dev_priv->gtt.mappable); |
1666 | dev_priv->gtt.gtt_remove(dev); | 1667 | dev_priv->gtt.base.cleanup(&dev_priv->gtt.base); |
1667 | out_rmmap: | 1668 | out_rmmap: |
1668 | pci_iounmap(dev->pdev, dev_priv->regs); | 1669 | pci_iounmap(dev->pdev, dev_priv->regs); |
1669 | put_bridge: | 1670 | put_bridge: |
@@ -1748,7 +1749,9 @@ int i915_driver_unload(struct drm_device *dev) | |||
1748 | i915_free_hws(dev); | 1749 | i915_free_hws(dev); |
1749 | } | 1750 | } |
1750 | 1751 | ||
1751 | drm_mm_takedown(&dev_priv->mm.gtt_space); | 1752 | list_del(&dev_priv->gtt.base.global_link); |
1753 | WARN_ON(!list_empty(&dev_priv->vm_list)); | ||
1754 | drm_mm_takedown(&dev_priv->gtt.base.mm); | ||
1752 | if (dev_priv->regs != NULL) | 1755 | if (dev_priv->regs != NULL) |
1753 | pci_iounmap(dev->pdev, dev_priv->regs); | 1756 | pci_iounmap(dev->pdev, dev_priv->regs); |
1754 | 1757 | ||
@@ -1758,7 +1761,7 @@ int i915_driver_unload(struct drm_device *dev) | |||
1758 | destroy_workqueue(dev_priv->wq); | 1761 | destroy_workqueue(dev_priv->wq); |
1759 | pm_qos_remove_request(&dev_priv->pm_qos); | 1762 | pm_qos_remove_request(&dev_priv->pm_qos); |
1760 | 1763 | ||
1761 | dev_priv->gtt.gtt_remove(dev); | 1764 | dev_priv->gtt.base.cleanup(&dev_priv->gtt.base); |
1762 | 1765 | ||
1763 | if (dev_priv->slab) | 1766 | if (dev_priv->slab) |
1764 | kmem_cache_destroy(dev_priv->slab); | 1767 | kmem_cache_destroy(dev_priv->slab); |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index cca12db6dbb7..13457e3e9cad 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -118,6 +118,10 @@ module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0600); | |||
118 | MODULE_PARM_DESC(i915_enable_ppgtt, | 118 | MODULE_PARM_DESC(i915_enable_ppgtt, |
119 | "Enable PPGTT (default: true)"); | 119 | "Enable PPGTT (default: true)"); |
120 | 120 | ||
121 | int i915_enable_psr __read_mostly = 0; | ||
122 | module_param_named(enable_psr, i915_enable_psr, int, 0600); | ||
123 | MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)"); | ||
124 | |||
121 | unsigned int i915_preliminary_hw_support __read_mostly = 0; | 125 | unsigned int i915_preliminary_hw_support __read_mostly = 0; |
122 | module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 0600); | 126 | module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 0600); |
123 | MODULE_PARM_DESC(preliminary_hw_support, | 127 | MODULE_PARM_DESC(preliminary_hw_support, |
@@ -137,6 +141,11 @@ module_param_named(fastboot, i915_fastboot, bool, 0600); | |||
137 | MODULE_PARM_DESC(fastboot, "Try to skip unnecessary mode sets at boot time " | 141 | MODULE_PARM_DESC(fastboot, "Try to skip unnecessary mode sets at boot time " |
138 | "(default: false)"); | 142 | "(default: false)"); |
139 | 143 | ||
144 | bool i915_prefault_disable __read_mostly; | ||
145 | module_param_named(prefault_disable, i915_prefault_disable, bool, 0600); | ||
146 | MODULE_PARM_DESC(prefault_disable, | ||
147 | "Disable page prefaulting for pread/pwrite/reloc (default:false). For developers only."); | ||
148 | |||
140 | static struct drm_driver driver; | 149 | static struct drm_driver driver; |
141 | extern int intel_agp_enabled; | 150 | extern int intel_agp_enabled; |
142 | 151 | ||
@@ -714,7 +723,7 @@ static int i915_drm_thaw(struct drm_device *dev) | |||
714 | { | 723 | { |
715 | int error = 0; | 724 | int error = 0; |
716 | 725 | ||
717 | intel_gt_reset(dev); | 726 | intel_uncore_sanitize(dev); |
718 | 727 | ||
719 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | 728 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
720 | mutex_lock(&dev->struct_mutex); | 729 | mutex_lock(&dev->struct_mutex); |
@@ -740,7 +749,7 @@ int i915_resume(struct drm_device *dev) | |||
740 | 749 | ||
741 | pci_set_master(dev->pdev); | 750 | pci_set_master(dev->pdev); |
742 | 751 | ||
743 | intel_gt_reset(dev); | 752 | intel_uncore_sanitize(dev); |
744 | 753 | ||
745 | /* | 754 | /* |
746 | * Platforms with opregion should have sane BIOS, older ones (gen3 and | 755 | * Platforms with opregion should have sane BIOS, older ones (gen3 and |
@@ -761,140 +770,6 @@ int i915_resume(struct drm_device *dev) | |||
761 | return 0; | 770 | return 0; |
762 | } | 771 | } |
763 | 772 | ||
764 | static int i8xx_do_reset(struct drm_device *dev) | ||
765 | { | ||
766 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
767 | |||
768 | if (IS_I85X(dev)) | ||
769 | return -ENODEV; | ||
770 | |||
771 | I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830); | ||
772 | POSTING_READ(D_STATE); | ||
773 | |||
774 | if (IS_I830(dev) || IS_845G(dev)) { | ||
775 | I915_WRITE(DEBUG_RESET_I830, | ||
776 | DEBUG_RESET_DISPLAY | | ||
777 | DEBUG_RESET_RENDER | | ||
778 | DEBUG_RESET_FULL); | ||
779 | POSTING_READ(DEBUG_RESET_I830); | ||
780 | msleep(1); | ||
781 | |||
782 | I915_WRITE(DEBUG_RESET_I830, 0); | ||
783 | POSTING_READ(DEBUG_RESET_I830); | ||
784 | } | ||
785 | |||
786 | msleep(1); | ||
787 | |||
788 | I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830); | ||
789 | POSTING_READ(D_STATE); | ||
790 | |||
791 | return 0; | ||
792 | } | ||
793 | |||
794 | static int i965_reset_complete(struct drm_device *dev) | ||
795 | { | ||
796 | u8 gdrst; | ||
797 | pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst); | ||
798 | return (gdrst & GRDOM_RESET_ENABLE) == 0; | ||
799 | } | ||
800 | |||
801 | static int i965_do_reset(struct drm_device *dev) | ||
802 | { | ||
803 | int ret; | ||
804 | |||
805 | /* | ||
806 | * Set the domains we want to reset (GRDOM/bits 2 and 3) as | ||
807 | * well as the reset bit (GR/bit 0). Setting the GR bit | ||
808 | * triggers the reset; when done, the hardware will clear it. | ||
809 | */ | ||
810 | pci_write_config_byte(dev->pdev, I965_GDRST, | ||
811 | GRDOM_RENDER | GRDOM_RESET_ENABLE); | ||
812 | ret = wait_for(i965_reset_complete(dev), 500); | ||
813 | if (ret) | ||
814 | return ret; | ||
815 | |||
816 | /* We can't reset render&media without also resetting display ... */ | ||
817 | pci_write_config_byte(dev->pdev, I965_GDRST, | ||
818 | GRDOM_MEDIA | GRDOM_RESET_ENABLE); | ||
819 | |||
820 | ret = wait_for(i965_reset_complete(dev), 500); | ||
821 | if (ret) | ||
822 | return ret; | ||
823 | |||
824 | pci_write_config_byte(dev->pdev, I965_GDRST, 0); | ||
825 | |||
826 | return 0; | ||
827 | } | ||
828 | |||
829 | static int ironlake_do_reset(struct drm_device *dev) | ||
830 | { | ||
831 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
832 | u32 gdrst; | ||
833 | int ret; | ||
834 | |||
835 | gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR); | ||
836 | gdrst &= ~GRDOM_MASK; | ||
837 | I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, | ||
838 | gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE); | ||
839 | ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500); | ||
840 | if (ret) | ||
841 | return ret; | ||
842 | |||
843 | /* We can't reset render&media without also resetting display ... */ | ||
844 | gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR); | ||
845 | gdrst &= ~GRDOM_MASK; | ||
846 | I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, | ||
847 | gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE); | ||
848 | return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500); | ||
849 | } | ||
850 | |||
851 | static int gen6_do_reset(struct drm_device *dev) | ||
852 | { | ||
853 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
854 | int ret; | ||
855 | unsigned long irqflags; | ||
856 | |||
857 | /* Hold gt_lock across reset to prevent any register access | ||
858 | * with forcewake not set correctly | ||
859 | */ | ||
860 | spin_lock_irqsave(&dev_priv->gt_lock, irqflags); | ||
861 | |||
862 | /* Reset the chip */ | ||
863 | |||
864 | /* GEN6_GDRST is not in the gt power well, no need to check | ||
865 | * for fifo space for the write or forcewake the chip for | ||
866 | * the read | ||
867 | */ | ||
868 | I915_WRITE_NOTRACE(GEN6_GDRST, GEN6_GRDOM_FULL); | ||
869 | |||
870 | /* Spin waiting for the device to ack the reset request */ | ||
871 | ret = wait_for((I915_READ_NOTRACE(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500); | ||
872 | |||
873 | /* If reset with a user forcewake, try to restore, otherwise turn it off */ | ||
874 | if (dev_priv->forcewake_count) | ||
875 | dev_priv->gt.force_wake_get(dev_priv); | ||
876 | else | ||
877 | dev_priv->gt.force_wake_put(dev_priv); | ||
878 | |||
879 | /* Restore fifo count */ | ||
880 | dev_priv->gt_fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); | ||
881 | |||
882 | spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); | ||
883 | return ret; | ||
884 | } | ||
885 | |||
886 | int intel_gpu_reset(struct drm_device *dev) | ||
887 | { | ||
888 | switch (INTEL_INFO(dev)->gen) { | ||
889 | case 7: | ||
890 | case 6: return gen6_do_reset(dev); | ||
891 | case 5: return ironlake_do_reset(dev); | ||
892 | case 4: return i965_do_reset(dev); | ||
893 | case 2: return i8xx_do_reset(dev); | ||
894 | default: return -ENODEV; | ||
895 | } | ||
896 | } | ||
897 | |||
898 | /** | 773 | /** |
899 | * i915_reset - reset chip after a hang | 774 | * i915_reset - reset chip after a hang |
900 | * @dev: drm device to reset | 775 | * @dev: drm device to reset |
@@ -1224,133 +1099,3 @@ module_exit(i915_exit); | |||
1224 | MODULE_AUTHOR(DRIVER_AUTHOR); | 1099 | MODULE_AUTHOR(DRIVER_AUTHOR); |
1225 | MODULE_DESCRIPTION(DRIVER_DESC); | 1100 | MODULE_DESCRIPTION(DRIVER_DESC); |
1226 | MODULE_LICENSE("GPL and additional rights"); | 1101 | MODULE_LICENSE("GPL and additional rights"); |
1227 | |||
1228 | /* We give fast paths for the really cool registers */ | ||
1229 | #define NEEDS_FORCE_WAKE(dev_priv, reg) \ | ||
1230 | ((HAS_FORCE_WAKE((dev_priv)->dev)) && \ | ||
1231 | ((reg) < 0x40000) && \ | ||
1232 | ((reg) != FORCEWAKE)) | ||
1233 | static void | ||
1234 | ilk_dummy_write(struct drm_i915_private *dev_priv) | ||
1235 | { | ||
1236 | /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up | ||
1237 | * the chip from rc6 before touching it for real. MI_MODE is masked, | ||
1238 | * hence harmless to write 0 into. */ | ||
1239 | I915_WRITE_NOTRACE(MI_MODE, 0); | ||
1240 | } | ||
1241 | |||
1242 | static void | ||
1243 | hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg) | ||
1244 | { | ||
1245 | if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) && | ||
1246 | (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { | ||
1247 | DRM_ERROR("Unknown unclaimed register before writing to %x\n", | ||
1248 | reg); | ||
1249 | I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM); | ||
1250 | } | ||
1251 | } | ||
1252 | |||
1253 | static void | ||
1254 | hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg) | ||
1255 | { | ||
1256 | if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) && | ||
1257 | (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { | ||
1258 | DRM_ERROR("Unclaimed write to %x\n", reg); | ||
1259 | I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM); | ||
1260 | } | ||
1261 | } | ||
1262 | |||
1263 | #define __i915_read(x, y) \ | ||
1264 | u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ | ||
1265 | u##x val = 0; \ | ||
1266 | if (IS_GEN5(dev_priv->dev)) \ | ||
1267 | ilk_dummy_write(dev_priv); \ | ||
1268 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ | ||
1269 | unsigned long irqflags; \ | ||
1270 | spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \ | ||
1271 | if (dev_priv->forcewake_count == 0) \ | ||
1272 | dev_priv->gt.force_wake_get(dev_priv); \ | ||
1273 | val = read##y(dev_priv->regs + reg); \ | ||
1274 | if (dev_priv->forcewake_count == 0) \ | ||
1275 | dev_priv->gt.force_wake_put(dev_priv); \ | ||
1276 | spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \ | ||
1277 | } else { \ | ||
1278 | val = read##y(dev_priv->regs + reg); \ | ||
1279 | } \ | ||
1280 | trace_i915_reg_rw(false, reg, val, sizeof(val)); \ | ||
1281 | return val; \ | ||
1282 | } | ||
1283 | |||
1284 | __i915_read(8, b) | ||
1285 | __i915_read(16, w) | ||
1286 | __i915_read(32, l) | ||
1287 | __i915_read(64, q) | ||
1288 | #undef __i915_read | ||
1289 | |||
1290 | #define __i915_write(x, y) \ | ||
1291 | void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ | ||
1292 | u32 __fifo_ret = 0; \ | ||
1293 | trace_i915_reg_rw(true, reg, val, sizeof(val)); \ | ||
1294 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ | ||
1295 | __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ | ||
1296 | } \ | ||
1297 | if (IS_GEN5(dev_priv->dev)) \ | ||
1298 | ilk_dummy_write(dev_priv); \ | ||
1299 | hsw_unclaimed_reg_clear(dev_priv, reg); \ | ||
1300 | write##y(val, dev_priv->regs + reg); \ | ||
1301 | if (unlikely(__fifo_ret)) { \ | ||
1302 | gen6_gt_check_fifodbg(dev_priv); \ | ||
1303 | } \ | ||
1304 | hsw_unclaimed_reg_check(dev_priv, reg); \ | ||
1305 | } | ||
1306 | __i915_write(8, b) | ||
1307 | __i915_write(16, w) | ||
1308 | __i915_write(32, l) | ||
1309 | __i915_write(64, q) | ||
1310 | #undef __i915_write | ||
1311 | |||
1312 | static const struct register_whitelist { | ||
1313 | uint64_t offset; | ||
1314 | uint32_t size; | ||
1315 | uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */ | ||
1316 | } whitelist[] = { | ||
1317 | { RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 }, | ||
1318 | }; | ||
1319 | |||
1320 | int i915_reg_read_ioctl(struct drm_device *dev, | ||
1321 | void *data, struct drm_file *file) | ||
1322 | { | ||
1323 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1324 | struct drm_i915_reg_read *reg = data; | ||
1325 | struct register_whitelist const *entry = whitelist; | ||
1326 | int i; | ||
1327 | |||
1328 | for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { | ||
1329 | if (entry->offset == reg->offset && | ||
1330 | (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask)) | ||
1331 | break; | ||
1332 | } | ||
1333 | |||
1334 | if (i == ARRAY_SIZE(whitelist)) | ||
1335 | return -EINVAL; | ||
1336 | |||
1337 | switch (entry->size) { | ||
1338 | case 8: | ||
1339 | reg->val = I915_READ64(reg->offset); | ||
1340 | break; | ||
1341 | case 4: | ||
1342 | reg->val = I915_READ(reg->offset); | ||
1343 | break; | ||
1344 | case 2: | ||
1345 | reg->val = I915_READ16(reg->offset); | ||
1346 | break; | ||
1347 | case 1: | ||
1348 | reg->val = I915_READ8(reg->offset); | ||
1349 | break; | ||
1350 | default: | ||
1351 | WARN_ON(1); | ||
1352 | return -EINVAL; | ||
1353 | } | ||
1354 | |||
1355 | return 0; | ||
1356 | } | ||
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 3dd5731769fa..ab568201b932 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -391,11 +391,20 @@ struct drm_i915_display_funcs { | |||
391 | /* pll clock increase/decrease */ | 391 | /* pll clock increase/decrease */ |
392 | }; | 392 | }; |
393 | 393 | ||
394 | struct drm_i915_gt_funcs { | 394 | struct intel_uncore_funcs { |
395 | void (*force_wake_get)(struct drm_i915_private *dev_priv); | 395 | void (*force_wake_get)(struct drm_i915_private *dev_priv); |
396 | void (*force_wake_put)(struct drm_i915_private *dev_priv); | 396 | void (*force_wake_put)(struct drm_i915_private *dev_priv); |
397 | }; | 397 | }; |
398 | 398 | ||
399 | struct intel_uncore { | ||
400 | spinlock_t lock; /** lock is also taken in irq contexts. */ | ||
401 | |||
402 | struct intel_uncore_funcs funcs; | ||
403 | |||
404 | unsigned fifo_count; | ||
405 | unsigned forcewake_count; | ||
406 | }; | ||
407 | |||
399 | #define DEV_INFO_FOR_EACH_FLAG(func, sep) \ | 408 | #define DEV_INFO_FOR_EACH_FLAG(func, sep) \ |
400 | func(is_mobile) sep \ | 409 | func(is_mobile) sep \ |
401 | func(is_i85x) sep \ | 410 | func(is_i85x) sep \ |
@@ -446,6 +455,54 @@ enum i915_cache_level { | |||
446 | 455 | ||
447 | typedef uint32_t gen6_gtt_pte_t; | 456 | typedef uint32_t gen6_gtt_pte_t; |
448 | 457 | ||
458 | struct i915_address_space { | ||
459 | struct drm_mm mm; | ||
460 | struct drm_device *dev; | ||
461 | struct list_head global_link; | ||
462 | unsigned long start; /* Start offset always 0 for dri2 */ | ||
463 | size_t total; /* size addr space maps (ex. 2GB for ggtt) */ | ||
464 | |||
465 | struct { | ||
466 | dma_addr_t addr; | ||
467 | struct page *page; | ||
468 | } scratch; | ||
469 | |||
470 | /** | ||
471 | * List of objects currently involved in rendering. | ||
472 | * | ||
473 | * Includes buffers having the contents of their GPU caches | ||
474 | * flushed, not necessarily primitives. last_rendering_seqno | ||
475 | * represents when the rendering involved will be completed. | ||
476 | * | ||
477 | * A reference is held on the buffer while on this list. | ||
478 | */ | ||
479 | struct list_head active_list; | ||
480 | |||
481 | /** | ||
482 | * LRU list of objects which are not in the ringbuffer and | ||
483 | * are ready to unbind, but are still in the GTT. | ||
484 | * | ||
485 | * last_rendering_seqno is 0 while an object is in this list. | ||
486 | * | ||
487 | * A reference is not held on the buffer while on this list, | ||
488 | * as merely being GTT-bound shouldn't prevent its being | ||
489 | * freed, and we'll pull it off the list in the free path. | ||
490 | */ | ||
491 | struct list_head inactive_list; | ||
492 | |||
493 | /* FIXME: Need a more generic return type */ | ||
494 | gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr, | ||
495 | enum i915_cache_level level); | ||
496 | void (*clear_range)(struct i915_address_space *vm, | ||
497 | unsigned int first_entry, | ||
498 | unsigned int num_entries); | ||
499 | void (*insert_entries)(struct i915_address_space *vm, | ||
500 | struct sg_table *st, | ||
501 | unsigned int first_entry, | ||
502 | enum i915_cache_level cache_level); | ||
503 | void (*cleanup)(struct i915_address_space *vm); | ||
504 | }; | ||
505 | |||
449 | /* The Graphics Translation Table is the way in which GEN hardware translates a | 506 | /* The Graphics Translation Table is the way in which GEN hardware translates a |
450 | * Graphics Virtual Address into a Physical Address. In addition to the normal | 507 | * Graphics Virtual Address into a Physical Address. In addition to the normal |
451 | * collateral associated with any va->pa translations GEN hardware also has a | 508 | * collateral associated with any va->pa translations GEN hardware also has a |
@@ -454,8 +511,7 @@ typedef uint32_t gen6_gtt_pte_t; | |||
454 | * the spec. | 511 | * the spec. |
455 | */ | 512 | */ |
456 | struct i915_gtt { | 513 | struct i915_gtt { |
457 | unsigned long start; /* Start offset of used GTT */ | 514 | struct i915_address_space base; |
458 | size_t total; /* Total size GTT can map */ | ||
459 | size_t stolen_size; /* Total size of stolen memory */ | 515 | size_t stolen_size; /* Total size of stolen memory */ |
460 | 516 | ||
461 | unsigned long mappable_end; /* End offset that we can CPU map */ | 517 | unsigned long mappable_end; /* End offset that we can CPU map */ |
@@ -466,10 +522,6 @@ struct i915_gtt { | |||
466 | void __iomem *gsm; | 522 | void __iomem *gsm; |
467 | 523 | ||
468 | bool do_idle_maps; | 524 | bool do_idle_maps; |
469 | struct { | ||
470 | dma_addr_t addr; | ||
471 | struct page *page; | ||
472 | } scratch; | ||
473 | 525 | ||
474 | int mtrr; | 526 | int mtrr; |
475 | 527 | ||
@@ -477,38 +529,28 @@ struct i915_gtt { | |||
477 | int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total, | 529 | int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total, |
478 | size_t *stolen, phys_addr_t *mappable_base, | 530 | size_t *stolen, phys_addr_t *mappable_base, |
479 | unsigned long *mappable_end); | 531 | unsigned long *mappable_end); |
480 | void (*gtt_remove)(struct drm_device *dev); | ||
481 | void (*gtt_clear_range)(struct drm_device *dev, | ||
482 | unsigned int first_entry, | ||
483 | unsigned int num_entries); | ||
484 | void (*gtt_insert_entries)(struct drm_device *dev, | ||
485 | struct sg_table *st, | ||
486 | unsigned int pg_start, | ||
487 | enum i915_cache_level cache_level); | ||
488 | gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr, | ||
489 | enum i915_cache_level level); | ||
490 | }; | 532 | }; |
491 | #define gtt_total_entries(gtt) ((gtt).total >> PAGE_SHIFT) | 533 | #define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT) |
492 | 534 | ||
493 | struct i915_hw_ppgtt { | 535 | struct i915_hw_ppgtt { |
494 | struct drm_device *dev; | 536 | struct i915_address_space base; |
495 | unsigned num_pd_entries; | 537 | unsigned num_pd_entries; |
496 | struct page **pt_pages; | 538 | struct page **pt_pages; |
497 | uint32_t pd_offset; | 539 | uint32_t pd_offset; |
498 | dma_addr_t *pt_dma_addr; | 540 | dma_addr_t *pt_dma_addr; |
499 | 541 | ||
500 | /* pte functions, mirroring the interface of the global gtt. */ | ||
501 | void (*clear_range)(struct i915_hw_ppgtt *ppgtt, | ||
502 | unsigned int first_entry, | ||
503 | unsigned int num_entries); | ||
504 | void (*insert_entries)(struct i915_hw_ppgtt *ppgtt, | ||
505 | struct sg_table *st, | ||
506 | unsigned int pg_start, | ||
507 | enum i915_cache_level cache_level); | ||
508 | gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr, | ||
509 | enum i915_cache_level level); | ||
510 | int (*enable)(struct drm_device *dev); | 542 | int (*enable)(struct drm_device *dev); |
511 | void (*cleanup)(struct i915_hw_ppgtt *ppgtt); | 543 | }; |
544 | |||
545 | /* To make things as simple as possible (ie. no refcounting), a VMA's lifetime | ||
546 | * will always be <= an objects lifetime. So object refcounting should cover us. | ||
547 | */ | ||
548 | struct i915_vma { | ||
549 | struct drm_mm_node node; | ||
550 | struct drm_i915_gem_object *obj; | ||
551 | struct i915_address_space *vm; | ||
552 | |||
553 | struct list_head vma_link; /* Link in the object's VMA list */ | ||
512 | }; | 554 | }; |
513 | 555 | ||
514 | struct i915_ctx_hang_stats { | 556 | struct i915_ctx_hang_stats { |
@@ -560,6 +602,18 @@ struct i915_fbc { | |||
560 | } no_fbc_reason; | 602 | } no_fbc_reason; |
561 | }; | 603 | }; |
562 | 604 | ||
605 | enum no_psr_reason { | ||
606 | PSR_NO_SOURCE, /* Not supported on platform */ | ||
607 | PSR_NO_SINK, /* Not supported by panel */ | ||
608 | PSR_MODULE_PARAM, | ||
609 | PSR_CRTC_NOT_ACTIVE, | ||
610 | PSR_PWR_WELL_ENABLED, | ||
611 | PSR_NOT_TILED, | ||
612 | PSR_SPRITE_ENABLED, | ||
613 | PSR_S3D_ENABLED, | ||
614 | PSR_INTERLACED_ENABLED, | ||
615 | PSR_HSW_NOT_DDIA, | ||
616 | }; | ||
563 | 617 | ||
564 | enum intel_pch { | 618 | enum intel_pch { |
565 | PCH_NONE = 0, /* No PCH present */ | 619 | PCH_NONE = 0, /* No PCH present */ |
@@ -577,6 +631,7 @@ enum intel_sbi_destination { | |||
577 | #define QUIRK_PIPEA_FORCE (1<<0) | 631 | #define QUIRK_PIPEA_FORCE (1<<0) |
578 | #define QUIRK_LVDS_SSC_DISABLE (1<<1) | 632 | #define QUIRK_LVDS_SSC_DISABLE (1<<1) |
579 | #define QUIRK_INVERT_BRIGHTNESS (1<<2) | 633 | #define QUIRK_INVERT_BRIGHTNESS (1<<2) |
634 | #define QUIRK_NO_PCH_PWM_ENABLE (1<<3) | ||
580 | 635 | ||
581 | struct intel_fbdev; | 636 | struct intel_fbdev; |
582 | struct intel_fbc_work; | 637 | struct intel_fbc_work; |
@@ -834,8 +889,6 @@ struct intel_l3_parity { | |||
834 | struct i915_gem_mm { | 889 | struct i915_gem_mm { |
835 | /** Memory allocator for GTT stolen memory */ | 890 | /** Memory allocator for GTT stolen memory */ |
836 | struct drm_mm stolen; | 891 | struct drm_mm stolen; |
837 | /** Memory allocator for GTT */ | ||
838 | struct drm_mm gtt_space; | ||
839 | /** List of all objects in gtt_space. Used to restore gtt | 892 | /** List of all objects in gtt_space. Used to restore gtt |
840 | * mappings on resume */ | 893 | * mappings on resume */ |
841 | struct list_head bound_list; | 894 | struct list_head bound_list; |
@@ -855,29 +908,6 @@ struct i915_gem_mm { | |||
855 | struct shrinker inactive_shrinker; | 908 | struct shrinker inactive_shrinker; |
856 | bool shrinker_no_lock_stealing; | 909 | bool shrinker_no_lock_stealing; |
857 | 910 | ||
858 | /** | ||
859 | * List of objects currently involved in rendering. | ||
860 | * | ||
861 | * Includes buffers having the contents of their GPU caches | ||
862 | * flushed, not necessarily primitives. last_rendering_seqno | ||
863 | * represents when the rendering involved will be completed. | ||
864 | * | ||
865 | * A reference is held on the buffer while on this list. | ||
866 | */ | ||
867 | struct list_head active_list; | ||
868 | |||
869 | /** | ||
870 | * LRU list of objects which are not in the ringbuffer and | ||
871 | * are ready to unbind, but are still in the GTT. | ||
872 | * | ||
873 | * last_rendering_seqno is 0 while an object is in this list. | ||
874 | * | ||
875 | * A reference is not held on the buffer while on this list, | ||
876 | * as merely being GTT-bound shouldn't prevent its being | ||
877 | * freed, and we'll pull it off the list in the free path. | ||
878 | */ | ||
879 | struct list_head inactive_list; | ||
880 | |||
881 | /** LRU list of objects with fence regs on them. */ | 911 | /** LRU list of objects with fence regs on them. */ |
882 | struct list_head fence_list; | 912 | struct list_head fence_list; |
883 | 913 | ||
@@ -905,6 +935,7 @@ struct i915_gem_mm { | |||
905 | struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; | 935 | struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; |
906 | 936 | ||
907 | /* accounting, useful for userland debugging */ | 937 | /* accounting, useful for userland debugging */ |
938 | spinlock_t object_stat_lock; | ||
908 | size_t object_memory; | 939 | size_t object_memory; |
909 | u32 object_count; | 940 | u32 object_count; |
910 | }; | 941 | }; |
@@ -1024,14 +1055,7 @@ typedef struct drm_i915_private { | |||
1024 | 1055 | ||
1025 | void __iomem *regs; | 1056 | void __iomem *regs; |
1026 | 1057 | ||
1027 | struct drm_i915_gt_funcs gt; | 1058 | struct intel_uncore uncore; |
1028 | /** gt_fifo_count and the subsequent register write are synchronized | ||
1029 | * with dev->struct_mutex. */ | ||
1030 | unsigned gt_fifo_count; | ||
1031 | /** forcewake_count is protected by gt_lock */ | ||
1032 | unsigned forcewake_count; | ||
1033 | /** gt_lock is also taken in irq contexts. */ | ||
1034 | spinlock_t gt_lock; | ||
1035 | 1059 | ||
1036 | struct intel_gmbus gmbus[GMBUS_NUM_PORTS]; | 1060 | struct intel_gmbus gmbus[GMBUS_NUM_PORTS]; |
1037 | 1061 | ||
@@ -1124,7 +1148,8 @@ typedef struct drm_i915_private { | |||
1124 | enum modeset_restore modeset_restore; | 1148 | enum modeset_restore modeset_restore; |
1125 | struct mutex modeset_restore_lock; | 1149 | struct mutex modeset_restore_lock; |
1126 | 1150 | ||
1127 | struct i915_gtt gtt; | 1151 | struct list_head vm_list; /* Global list of all address spaces */ |
1152 | struct i915_gtt gtt; /* VMA representing the global address space */ | ||
1128 | 1153 | ||
1129 | struct i915_gem_mm mm; | 1154 | struct i915_gem_mm mm; |
1130 | 1155 | ||
@@ -1151,6 +1176,9 @@ typedef struct drm_i915_private { | |||
1151 | 1176 | ||
1152 | struct intel_l3_parity l3_parity; | 1177 | struct intel_l3_parity l3_parity; |
1153 | 1178 | ||
1179 | /* Cannot be determined by PCIID. You must always read a register. */ | ||
1180 | size_t ellc_size; | ||
1181 | |||
1154 | /* gen6+ rps state */ | 1182 | /* gen6+ rps state */ |
1155 | struct intel_gen6_power_mgmt rps; | 1183 | struct intel_gen6_power_mgmt rps; |
1156 | 1184 | ||
@@ -1161,6 +1189,8 @@ typedef struct drm_i915_private { | |||
1161 | /* Haswell power well */ | 1189 | /* Haswell power well */ |
1162 | struct i915_power_well power_well; | 1190 | struct i915_power_well power_well; |
1163 | 1191 | ||
1192 | enum no_psr_reason no_psr_reason; | ||
1193 | |||
1164 | struct i915_gpu_error gpu_error; | 1194 | struct i915_gpu_error gpu_error; |
1165 | 1195 | ||
1166 | struct drm_i915_gem_object *vlv_pctx; | 1196 | struct drm_i915_gem_object *vlv_pctx; |
@@ -1228,8 +1258,9 @@ struct drm_i915_gem_object { | |||
1228 | 1258 | ||
1229 | const struct drm_i915_gem_object_ops *ops; | 1259 | const struct drm_i915_gem_object_ops *ops; |
1230 | 1260 | ||
1231 | /** Current space allocated to this object in the GTT, if any. */ | 1261 | /** List of VMAs backed by this object */ |
1232 | struct drm_mm_node gtt_space; | 1262 | struct list_head vma_list; |
1263 | |||
1233 | /** Stolen memory for this object, instead of being backed by shmem. */ | 1264 | /** Stolen memory for this object, instead of being backed by shmem. */ |
1234 | struct drm_mm_node *stolen; | 1265 | struct drm_mm_node *stolen; |
1235 | struct list_head global_list; | 1266 | struct list_head global_list; |
@@ -1355,18 +1386,32 @@ struct drm_i915_gem_object { | |||
1355 | 1386 | ||
1356 | #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) | 1387 | #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) |
1357 | 1388 | ||
1358 | /* Offset of the first PTE pointing to this object */ | 1389 | /* This is a temporary define to help transition us to real VMAs. If you see |
1359 | static inline unsigned long | 1390 | * this, you're either reviewing code, or bisecting it. */ |
1360 | i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o) | 1391 | static inline struct i915_vma * |
1392 | __i915_gem_obj_to_vma(struct drm_i915_gem_object *obj) | ||
1361 | { | 1393 | { |
1362 | return o->gtt_space.start; | 1394 | if (list_empty(&obj->vma_list)) |
1395 | return NULL; | ||
1396 | return list_first_entry(&obj->vma_list, struct i915_vma, vma_link); | ||
1363 | } | 1397 | } |
1364 | 1398 | ||
1365 | /* Whether or not this object is currently mapped by the translation tables */ | 1399 | /* Whether or not this object is currently mapped by the translation tables */ |
1366 | static inline bool | 1400 | static inline bool |
1367 | i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *o) | 1401 | i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *o) |
1368 | { | 1402 | { |
1369 | return drm_mm_node_allocated(&o->gtt_space); | 1403 | struct i915_vma *vma = __i915_gem_obj_to_vma(o); |
1404 | if (vma == NULL) | ||
1405 | return false; | ||
1406 | return drm_mm_node_allocated(&vma->node); | ||
1407 | } | ||
1408 | |||
1409 | /* Offset of the first PTE pointing to this object */ | ||
1410 | static inline unsigned long | ||
1411 | i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o) | ||
1412 | { | ||
1413 | BUG_ON(list_empty(&o->vma_list)); | ||
1414 | return __i915_gem_obj_to_vma(o)->node.start; | ||
1370 | } | 1415 | } |
1371 | 1416 | ||
1372 | /* The size used in the translation tables may be larger than the actual size of | 1417 | /* The size used in the translation tables may be larger than the actual size of |
@@ -1376,14 +1421,15 @@ i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *o) | |||
1376 | static inline unsigned long | 1421 | static inline unsigned long |
1377 | i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o) | 1422 | i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o) |
1378 | { | 1423 | { |
1379 | return o->gtt_space.size; | 1424 | BUG_ON(list_empty(&o->vma_list)); |
1425 | return __i915_gem_obj_to_vma(o)->node.size; | ||
1380 | } | 1426 | } |
1381 | 1427 | ||
1382 | static inline void | 1428 | static inline void |
1383 | i915_gem_obj_ggtt_set_color(struct drm_i915_gem_object *o, | 1429 | i915_gem_obj_ggtt_set_color(struct drm_i915_gem_object *o, |
1384 | enum i915_cache_level color) | 1430 | enum i915_cache_level color) |
1385 | { | 1431 | { |
1386 | o->gtt_space.color = color; | 1432 | __i915_gem_obj_to_vma(o)->node.color = color; |
1387 | } | 1433 | } |
1388 | 1434 | ||
1389 | /** | 1435 | /** |
@@ -1580,10 +1626,12 @@ extern int i915_enable_rc6 __read_mostly; | |||
1580 | extern int i915_enable_fbc __read_mostly; | 1626 | extern int i915_enable_fbc __read_mostly; |
1581 | extern bool i915_enable_hangcheck __read_mostly; | 1627 | extern bool i915_enable_hangcheck __read_mostly; |
1582 | extern int i915_enable_ppgtt __read_mostly; | 1628 | extern int i915_enable_ppgtt __read_mostly; |
1629 | extern int i915_enable_psr __read_mostly; | ||
1583 | extern unsigned int i915_preliminary_hw_support __read_mostly; | 1630 | extern unsigned int i915_preliminary_hw_support __read_mostly; |
1584 | extern int i915_disable_power_well __read_mostly; | 1631 | extern int i915_disable_power_well __read_mostly; |
1585 | extern int i915_enable_ips __read_mostly; | 1632 | extern int i915_enable_ips __read_mostly; |
1586 | extern bool i915_fastboot __read_mostly; | 1633 | extern bool i915_fastboot __read_mostly; |
1634 | extern bool i915_prefault_disable __read_mostly; | ||
1587 | 1635 | ||
1588 | extern int i915_suspend(struct drm_device *dev, pm_message_t state); | 1636 | extern int i915_suspend(struct drm_device *dev, pm_message_t state); |
1589 | extern int i915_resume(struct drm_device *dev); | 1637 | extern int i915_resume(struct drm_device *dev); |
@@ -1619,13 +1667,20 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); | |||
1619 | extern void intel_console_resume(struct work_struct *work); | 1667 | extern void intel_console_resume(struct work_struct *work); |
1620 | 1668 | ||
1621 | /* i915_irq.c */ | 1669 | /* i915_irq.c */ |
1670 | void i915_queue_hangcheck(struct drm_device *dev); | ||
1622 | void i915_hangcheck_elapsed(unsigned long data); | 1671 | void i915_hangcheck_elapsed(unsigned long data); |
1623 | void i915_handle_error(struct drm_device *dev, bool wedged); | 1672 | void i915_handle_error(struct drm_device *dev, bool wedged); |
1624 | 1673 | ||
1625 | extern void intel_irq_init(struct drm_device *dev); | 1674 | extern void intel_irq_init(struct drm_device *dev); |
1626 | extern void intel_hpd_init(struct drm_device *dev); | 1675 | extern void intel_hpd_init(struct drm_device *dev); |
1627 | extern void intel_gt_init(struct drm_device *dev); | 1676 | extern void intel_pm_init(struct drm_device *dev); |
1628 | extern void intel_gt_reset(struct drm_device *dev); | 1677 | |
1678 | extern void intel_uncore_sanitize(struct drm_device *dev); | ||
1679 | extern void intel_uncore_early_sanitize(struct drm_device *dev); | ||
1680 | extern void intel_uncore_init(struct drm_device *dev); | ||
1681 | extern void intel_uncore_reset(struct drm_device *dev); | ||
1682 | extern void intel_uncore_clear_errors(struct drm_device *dev); | ||
1683 | extern void intel_uncore_check_errors(struct drm_device *dev); | ||
1629 | 1684 | ||
1630 | void | 1685 | void |
1631 | i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); | 1686 | i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); |
@@ -1689,6 +1744,9 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, | |||
1689 | struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, | 1744 | struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, |
1690 | size_t size); | 1745 | size_t size); |
1691 | void i915_gem_free_object(struct drm_gem_object *obj); | 1746 | void i915_gem_free_object(struct drm_gem_object *obj); |
1747 | struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj, | ||
1748 | struct i915_address_space *vm); | ||
1749 | void i915_gem_vma_destroy(struct i915_vma *vma); | ||
1692 | 1750 | ||
1693 | int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, | 1751 | int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, |
1694 | uint32_t alignment, | 1752 | uint32_t alignment, |
@@ -1857,7 +1915,7 @@ static inline void i915_gem_context_unreference(struct i915_hw_context *ctx) | |||
1857 | } | 1915 | } |
1858 | 1916 | ||
1859 | struct i915_ctx_hang_stats * __must_check | 1917 | struct i915_ctx_hang_stats * __must_check |
1860 | i915_gem_context_get_hang_stats(struct intel_ring_buffer *ring, | 1918 | i915_gem_context_get_hang_stats(struct drm_device *dev, |
1861 | struct drm_file *file, | 1919 | struct drm_file *file, |
1862 | u32 id); | 1920 | u32 id); |
1863 | int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, | 1921 | int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, |
@@ -2056,7 +2114,6 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, | |||
2056 | */ | 2114 | */ |
2057 | void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv); | 2115 | void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv); |
2058 | void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); | 2116 | void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); |
2059 | int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv); | ||
2060 | 2117 | ||
2061 | int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val); | 2118 | int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val); |
2062 | int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val); | 2119 | int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val); |
@@ -2075,39 +2132,37 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value, | |||
2075 | int vlv_gpu_freq(int ddr_freq, int val); | 2132 | int vlv_gpu_freq(int ddr_freq, int val); |
2076 | int vlv_freq_opcode(int ddr_freq, int val); | 2133 | int vlv_freq_opcode(int ddr_freq, int val); |
2077 | 2134 | ||
2078 | #define __i915_read(x, y) \ | 2135 | #define __i915_read(x) \ |
2079 | u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg); | 2136 | u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace); |
2080 | 2137 | __i915_read(8) | |
2081 | __i915_read(8, b) | 2138 | __i915_read(16) |
2082 | __i915_read(16, w) | 2139 | __i915_read(32) |
2083 | __i915_read(32, l) | 2140 | __i915_read(64) |
2084 | __i915_read(64, q) | ||
2085 | #undef __i915_read | 2141 | #undef __i915_read |
2086 | 2142 | ||
2087 | #define __i915_write(x, y) \ | 2143 | #define __i915_write(x) \ |
2088 | void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val); | 2144 | void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool trace); |
2089 | 2145 | __i915_write(8) | |
2090 | __i915_write(8, b) | 2146 | __i915_write(16) |
2091 | __i915_write(16, w) | 2147 | __i915_write(32) |
2092 | __i915_write(32, l) | 2148 | __i915_write(64) |
2093 | __i915_write(64, q) | ||
2094 | #undef __i915_write | 2149 | #undef __i915_write |
2095 | 2150 | ||
2096 | #define I915_READ8(reg) i915_read8(dev_priv, (reg)) | 2151 | #define I915_READ8(reg) i915_read8(dev_priv, (reg), true) |
2097 | #define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val)) | 2152 | #define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val), true) |
2098 | 2153 | ||
2099 | #define I915_READ16(reg) i915_read16(dev_priv, (reg)) | 2154 | #define I915_READ16(reg) i915_read16(dev_priv, (reg), true) |
2100 | #define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val)) | 2155 | #define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val), true) |
2101 | #define I915_READ16_NOTRACE(reg) readw(dev_priv->regs + (reg)) | 2156 | #define I915_READ16_NOTRACE(reg) i915_read16(dev_priv, (reg), false) |
2102 | #define I915_WRITE16_NOTRACE(reg, val) writew(val, dev_priv->regs + (reg)) | 2157 | #define I915_WRITE16_NOTRACE(reg, val) i915_write16(dev_priv, (reg), (val), false) |
2103 | 2158 | ||
2104 | #define I915_READ(reg) i915_read32(dev_priv, (reg)) | 2159 | #define I915_READ(reg) i915_read32(dev_priv, (reg), true) |
2105 | #define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val)) | 2160 | #define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val), true) |
2106 | #define I915_READ_NOTRACE(reg) readl(dev_priv->regs + (reg)) | 2161 | #define I915_READ_NOTRACE(reg) i915_read32(dev_priv, (reg), false) |
2107 | #define I915_WRITE_NOTRACE(reg, val) writel(val, dev_priv->regs + (reg)) | 2162 | #define I915_WRITE_NOTRACE(reg, val) i915_write32(dev_priv, (reg), (val), false) |
2108 | 2163 | ||
2109 | #define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val)) | 2164 | #define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val), true) |
2110 | #define I915_READ64(reg) i915_read64(dev_priv, (reg)) | 2165 | #define I915_READ64(reg) i915_read64(dev_priv, (reg), true) |
2111 | 2166 | ||
2112 | #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) | 2167 | #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) |
2113 | #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) | 2168 | #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index ea2d83d7324e..26c5f802a9df 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -76,15 +76,19 @@ static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj) | |||
76 | static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv, | 76 | static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv, |
77 | size_t size) | 77 | size_t size) |
78 | { | 78 | { |
79 | spin_lock(&dev_priv->mm.object_stat_lock); | ||
79 | dev_priv->mm.object_count++; | 80 | dev_priv->mm.object_count++; |
80 | dev_priv->mm.object_memory += size; | 81 | dev_priv->mm.object_memory += size; |
82 | spin_unlock(&dev_priv->mm.object_stat_lock); | ||
81 | } | 83 | } |
82 | 84 | ||
83 | static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv, | 85 | static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv, |
84 | size_t size) | 86 | size_t size) |
85 | { | 87 | { |
88 | spin_lock(&dev_priv->mm.object_stat_lock); | ||
86 | dev_priv->mm.object_count--; | 89 | dev_priv->mm.object_count--; |
87 | dev_priv->mm.object_memory -= size; | 90 | dev_priv->mm.object_memory -= size; |
91 | spin_unlock(&dev_priv->mm.object_stat_lock); | ||
88 | } | 92 | } |
89 | 93 | ||
90 | static int | 94 | static int |
@@ -182,7 +186,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, | |||
182 | pinned += i915_gem_obj_ggtt_size(obj); | 186 | pinned += i915_gem_obj_ggtt_size(obj); |
183 | mutex_unlock(&dev->struct_mutex); | 187 | mutex_unlock(&dev->struct_mutex); |
184 | 188 | ||
185 | args->aper_size = dev_priv->gtt.total; | 189 | args->aper_size = dev_priv->gtt.base.total; |
186 | args->aper_available_size = args->aper_size - pinned; | 190 | args->aper_available_size = args->aper_size - pinned; |
187 | 191 | ||
188 | return 0; | 192 | return 0; |
@@ -220,16 +224,10 @@ i915_gem_create(struct drm_file *file, | |||
220 | return -ENOMEM; | 224 | return -ENOMEM; |
221 | 225 | ||
222 | ret = drm_gem_handle_create(file, &obj->base, &handle); | 226 | ret = drm_gem_handle_create(file, &obj->base, &handle); |
223 | if (ret) { | ||
224 | drm_gem_object_release(&obj->base); | ||
225 | i915_gem_info_remove_obj(dev->dev_private, obj->base.size); | ||
226 | i915_gem_object_free(obj); | ||
227 | return ret; | ||
228 | } | ||
229 | |||
230 | /* drop reference from allocate - handle holds it now */ | 227 | /* drop reference from allocate - handle holds it now */ |
231 | drm_gem_object_unreference(&obj->base); | 228 | drm_gem_object_unreference_unlocked(&obj->base); |
232 | trace_i915_gem_object_create(obj); | 229 | if (ret) |
230 | return ret; | ||
233 | 231 | ||
234 | *handle_p = handle; | 232 | *handle_p = handle; |
235 | return 0; | 233 | return 0; |
@@ -459,7 +457,7 @@ i915_gem_shmem_pread(struct drm_device *dev, | |||
459 | 457 | ||
460 | mutex_unlock(&dev->struct_mutex); | 458 | mutex_unlock(&dev->struct_mutex); |
461 | 459 | ||
462 | if (!prefaulted) { | 460 | if (likely(!i915_prefault_disable) && !prefaulted) { |
463 | ret = fault_in_multipages_writeable(user_data, remain); | 461 | ret = fault_in_multipages_writeable(user_data, remain); |
464 | /* Userspace is tricking us, but we've already clobbered | 462 | /* Userspace is tricking us, but we've already clobbered |
465 | * its pages with the prefault and promised to write the | 463 | * its pages with the prefault and promised to write the |
@@ -854,10 +852,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
854 | args->size)) | 852 | args->size)) |
855 | return -EFAULT; | 853 | return -EFAULT; |
856 | 854 | ||
857 | ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr), | 855 | if (likely(!i915_prefault_disable)) { |
858 | args->size); | 856 | ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr), |
859 | if (ret) | 857 | args->size); |
860 | return -EFAULT; | 858 | if (ret) |
859 | return -EFAULT; | ||
860 | } | ||
861 | 861 | ||
862 | ret = i915_mutex_lock_interruptible(dev); | 862 | ret = i915_mutex_lock_interruptible(dev); |
863 | if (ret) | 863 | if (ret) |
@@ -1679,6 +1679,7 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target, | |||
1679 | bool purgeable_only) | 1679 | bool purgeable_only) |
1680 | { | 1680 | { |
1681 | struct drm_i915_gem_object *obj, *next; | 1681 | struct drm_i915_gem_object *obj, *next; |
1682 | struct i915_address_space *vm = &dev_priv->gtt.base; | ||
1682 | long count = 0; | 1683 | long count = 0; |
1683 | 1684 | ||
1684 | list_for_each_entry_safe(obj, next, | 1685 | list_for_each_entry_safe(obj, next, |
@@ -1692,9 +1693,7 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target, | |||
1692 | } | 1693 | } |
1693 | } | 1694 | } |
1694 | 1695 | ||
1695 | list_for_each_entry_safe(obj, next, | 1696 | list_for_each_entry_safe(obj, next, &vm->inactive_list, mm_list) { |
1696 | &dev_priv->mm.inactive_list, | ||
1697 | mm_list) { | ||
1698 | if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) && | 1697 | if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) && |
1699 | i915_gem_object_unbind(obj) == 0 && | 1698 | i915_gem_object_unbind(obj) == 0 && |
1700 | i915_gem_object_put_pages(obj) == 0) { | 1699 | i915_gem_object_put_pages(obj) == 0) { |
@@ -1865,6 +1864,7 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, | |||
1865 | { | 1864 | { |
1866 | struct drm_device *dev = obj->base.dev; | 1865 | struct drm_device *dev = obj->base.dev; |
1867 | struct drm_i915_private *dev_priv = dev->dev_private; | 1866 | struct drm_i915_private *dev_priv = dev->dev_private; |
1867 | struct i915_address_space *vm = &dev_priv->gtt.base; | ||
1868 | u32 seqno = intel_ring_get_seqno(ring); | 1868 | u32 seqno = intel_ring_get_seqno(ring); |
1869 | 1869 | ||
1870 | BUG_ON(ring == NULL); | 1870 | BUG_ON(ring == NULL); |
@@ -1881,7 +1881,7 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, | |||
1881 | } | 1881 | } |
1882 | 1882 | ||
1883 | /* Move from whatever list we were on to the tail of execution. */ | 1883 | /* Move from whatever list we were on to the tail of execution. */ |
1884 | list_move_tail(&obj->mm_list, &dev_priv->mm.active_list); | 1884 | list_move_tail(&obj->mm_list, &vm->active_list); |
1885 | list_move_tail(&obj->ring_list, &ring->active_list); | 1885 | list_move_tail(&obj->ring_list, &ring->active_list); |
1886 | 1886 | ||
1887 | obj->last_read_seqno = seqno; | 1887 | obj->last_read_seqno = seqno; |
@@ -1905,11 +1905,12 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) | |||
1905 | { | 1905 | { |
1906 | struct drm_device *dev = obj->base.dev; | 1906 | struct drm_device *dev = obj->base.dev; |
1907 | struct drm_i915_private *dev_priv = dev->dev_private; | 1907 | struct drm_i915_private *dev_priv = dev->dev_private; |
1908 | struct i915_address_space *vm = &dev_priv->gtt.base; | ||
1908 | 1909 | ||
1909 | BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS); | 1910 | BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS); |
1910 | BUG_ON(!obj->active); | 1911 | BUG_ON(!obj->active); |
1911 | 1912 | ||
1912 | list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); | 1913 | list_move_tail(&obj->mm_list, &vm->inactive_list); |
1913 | 1914 | ||
1914 | list_del_init(&obj->ring_list); | 1915 | list_del_init(&obj->ring_list); |
1915 | obj->ring = NULL; | 1916 | obj->ring = NULL; |
@@ -2074,10 +2075,8 @@ int __i915_add_request(struct intel_ring_buffer *ring, | |||
2074 | ring->outstanding_lazy_request = 0; | 2075 | ring->outstanding_lazy_request = 0; |
2075 | 2076 | ||
2076 | if (!dev_priv->ums.mm_suspended) { | 2077 | if (!dev_priv->ums.mm_suspended) { |
2077 | if (i915_enable_hangcheck) { | 2078 | i915_queue_hangcheck(ring->dev); |
2078 | mod_timer(&dev_priv->gpu_error.hangcheck_timer, | 2079 | |
2079 | round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); | ||
2080 | } | ||
2081 | if (was_empty) { | 2080 | if (was_empty) { |
2082 | queue_delayed_work(dev_priv->wq, | 2081 | queue_delayed_work(dev_priv->wq, |
2083 | &dev_priv->mm.retire_work, | 2082 | &dev_priv->mm.retire_work, |
@@ -2246,13 +2245,24 @@ void i915_gem_restore_fences(struct drm_device *dev) | |||
2246 | 2245 | ||
2247 | for (i = 0; i < dev_priv->num_fence_regs; i++) { | 2246 | for (i = 0; i < dev_priv->num_fence_regs; i++) { |
2248 | struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; | 2247 | struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; |
2249 | i915_gem_write_fence(dev, i, reg->obj); | 2248 | |
2249 | /* | ||
2250 | * Commit delayed tiling changes if we have an object still | ||
2251 | * attached to the fence, otherwise just clear the fence. | ||
2252 | */ | ||
2253 | if (reg->obj) { | ||
2254 | i915_gem_object_update_fence(reg->obj, reg, | ||
2255 | reg->obj->tiling_mode); | ||
2256 | } else { | ||
2257 | i915_gem_write_fence(dev, i, NULL); | ||
2258 | } | ||
2250 | } | 2259 | } |
2251 | } | 2260 | } |
2252 | 2261 | ||
2253 | void i915_gem_reset(struct drm_device *dev) | 2262 | void i915_gem_reset(struct drm_device *dev) |
2254 | { | 2263 | { |
2255 | struct drm_i915_private *dev_priv = dev->dev_private; | 2264 | struct drm_i915_private *dev_priv = dev->dev_private; |
2265 | struct i915_address_space *vm = &dev_priv->gtt.base; | ||
2256 | struct drm_i915_gem_object *obj; | 2266 | struct drm_i915_gem_object *obj; |
2257 | struct intel_ring_buffer *ring; | 2267 | struct intel_ring_buffer *ring; |
2258 | int i; | 2268 | int i; |
@@ -2263,12 +2273,8 @@ void i915_gem_reset(struct drm_device *dev) | |||
2263 | /* Move everything out of the GPU domains to ensure we do any | 2273 | /* Move everything out of the GPU domains to ensure we do any |
2264 | * necessary invalidation upon reuse. | 2274 | * necessary invalidation upon reuse. |
2265 | */ | 2275 | */ |
2266 | list_for_each_entry(obj, | 2276 | list_for_each_entry(obj, &vm->inactive_list, mm_list) |
2267 | &dev_priv->mm.inactive_list, | ||
2268 | mm_list) | ||
2269 | { | ||
2270 | obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS; | 2277 | obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS; |
2271 | } | ||
2272 | 2278 | ||
2273 | i915_gem_restore_fences(dev); | 2279 | i915_gem_restore_fences(dev); |
2274 | } | 2280 | } |
@@ -2571,6 +2577,7 @@ int | |||
2571 | i915_gem_object_unbind(struct drm_i915_gem_object *obj) | 2577 | i915_gem_object_unbind(struct drm_i915_gem_object *obj) |
2572 | { | 2578 | { |
2573 | drm_i915_private_t *dev_priv = obj->base.dev->dev_private; | 2579 | drm_i915_private_t *dev_priv = obj->base.dev->dev_private; |
2580 | struct i915_vma *vma; | ||
2574 | int ret; | 2581 | int ret; |
2575 | 2582 | ||
2576 | if (!i915_gem_obj_ggtt_bound(obj)) | 2583 | if (!i915_gem_obj_ggtt_bound(obj)) |
@@ -2608,11 +2615,20 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) | |||
2608 | i915_gem_object_unpin_pages(obj); | 2615 | i915_gem_object_unpin_pages(obj); |
2609 | 2616 | ||
2610 | list_del(&obj->mm_list); | 2617 | list_del(&obj->mm_list); |
2611 | list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list); | ||
2612 | /* Avoid an unnecessary call to unbind on rebind. */ | 2618 | /* Avoid an unnecessary call to unbind on rebind. */ |
2613 | obj->map_and_fenceable = true; | 2619 | obj->map_and_fenceable = true; |
2614 | 2620 | ||
2615 | drm_mm_remove_node(&obj->gtt_space); | 2621 | vma = __i915_gem_obj_to_vma(obj); |
2622 | list_del(&vma->vma_link); | ||
2623 | drm_mm_remove_node(&vma->node); | ||
2624 | i915_gem_vma_destroy(vma); | ||
2625 | |||
2626 | /* Since the unbound list is global, only move to that list if | ||
2627 | * no more VMAs exist. | ||
2628 | * NB: Until we have real VMAs there will only ever be one */ | ||
2629 | WARN_ON(!list_empty(&obj->vma_list)); | ||
2630 | if (list_empty(&obj->vma_list)) | ||
2631 | list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list); | ||
2616 | 2632 | ||
2617 | return 0; | 2633 | return 0; |
2618 | } | 2634 | } |
@@ -2781,6 +2797,10 @@ static void i915_gem_write_fence(struct drm_device *dev, int reg, | |||
2781 | if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj)) | 2797 | if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj)) |
2782 | mb(); | 2798 | mb(); |
2783 | 2799 | ||
2800 | WARN(obj && (!obj->stride || !obj->tiling_mode), | ||
2801 | "bogus fence setup with stride: 0x%x, tiling mode: %i\n", | ||
2802 | obj->stride, obj->tiling_mode); | ||
2803 | |||
2784 | switch (INTEL_INFO(dev)->gen) { | 2804 | switch (INTEL_INFO(dev)->gen) { |
2785 | case 7: | 2805 | case 7: |
2786 | case 6: | 2806 | case 6: |
@@ -2822,6 +2842,7 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, | |||
2822 | fence->obj = NULL; | 2842 | fence->obj = NULL; |
2823 | list_del_init(&fence->lru_list); | 2843 | list_del_init(&fence->lru_list); |
2824 | } | 2844 | } |
2845 | obj->fence_dirty = false; | ||
2825 | } | 2846 | } |
2826 | 2847 | ||
2827 | static int | 2848 | static int |
@@ -2951,7 +2972,6 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj) | |||
2951 | return 0; | 2972 | return 0; |
2952 | 2973 | ||
2953 | i915_gem_object_update_fence(obj, reg, enable); | 2974 | i915_gem_object_update_fence(obj, reg, enable); |
2954 | obj->fence_dirty = false; | ||
2955 | 2975 | ||
2956 | return 0; | 2976 | return 0; |
2957 | } | 2977 | } |
@@ -3037,12 +3057,17 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, | |||
3037 | { | 3057 | { |
3038 | struct drm_device *dev = obj->base.dev; | 3058 | struct drm_device *dev = obj->base.dev; |
3039 | drm_i915_private_t *dev_priv = dev->dev_private; | 3059 | drm_i915_private_t *dev_priv = dev->dev_private; |
3060 | struct i915_address_space *vm = &dev_priv->gtt.base; | ||
3040 | u32 size, fence_size, fence_alignment, unfenced_alignment; | 3061 | u32 size, fence_size, fence_alignment, unfenced_alignment; |
3041 | bool mappable, fenceable; | 3062 | bool mappable, fenceable; |
3042 | size_t gtt_max = map_and_fenceable ? | 3063 | size_t gtt_max = map_and_fenceable ? |
3043 | dev_priv->gtt.mappable_end : dev_priv->gtt.total; | 3064 | dev_priv->gtt.mappable_end : dev_priv->gtt.base.total; |
3065 | struct i915_vma *vma; | ||
3044 | int ret; | 3066 | int ret; |
3045 | 3067 | ||
3068 | if (WARN_ON(!list_empty(&obj->vma_list))) | ||
3069 | return -EBUSY; | ||
3070 | |||
3046 | fence_size = i915_gem_get_gtt_size(dev, | 3071 | fence_size = i915_gem_get_gtt_size(dev, |
3047 | obj->base.size, | 3072 | obj->base.size, |
3048 | obj->tiling_mode); | 3073 | obj->tiling_mode); |
@@ -3081,9 +3106,15 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, | |||
3081 | 3106 | ||
3082 | i915_gem_object_pin_pages(obj); | 3107 | i915_gem_object_pin_pages(obj); |
3083 | 3108 | ||
3109 | vma = i915_gem_vma_create(obj, &dev_priv->gtt.base); | ||
3110 | if (IS_ERR(vma)) { | ||
3111 | ret = PTR_ERR(vma); | ||
3112 | goto err_unpin; | ||
3113 | } | ||
3114 | |||
3084 | search_free: | 3115 | search_free: |
3085 | ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, | 3116 | ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm, |
3086 | &obj->gtt_space, | 3117 | &vma->node, |
3087 | size, alignment, | 3118 | size, alignment, |
3088 | obj->cache_level, 0, gtt_max, | 3119 | obj->cache_level, 0, gtt_max, |
3089 | DRM_MM_SEARCH_DEFAULT); | 3120 | DRM_MM_SEARCH_DEFAULT); |
@@ -3095,25 +3126,21 @@ search_free: | |||
3095 | if (ret == 0) | 3126 | if (ret == 0) |
3096 | goto search_free; | 3127 | goto search_free; |
3097 | 3128 | ||
3098 | i915_gem_object_unpin_pages(obj); | 3129 | goto err_free_vma; |
3099 | return ret; | ||
3100 | } | 3130 | } |
3101 | if (WARN_ON(!i915_gem_valid_gtt_space(dev, &obj->gtt_space, | 3131 | if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node, |
3102 | obj->cache_level))) { | 3132 | obj->cache_level))) { |
3103 | i915_gem_object_unpin_pages(obj); | 3133 | ret = -EINVAL; |
3104 | drm_mm_remove_node(&obj->gtt_space); | 3134 | goto err_remove_node; |
3105 | return -EINVAL; | ||
3106 | } | 3135 | } |
3107 | 3136 | ||
3108 | ret = i915_gem_gtt_prepare_object(obj); | 3137 | ret = i915_gem_gtt_prepare_object(obj); |
3109 | if (ret) { | 3138 | if (ret) |
3110 | i915_gem_object_unpin_pages(obj); | 3139 | goto err_remove_node; |
3111 | drm_mm_remove_node(&obj->gtt_space); | ||
3112 | return ret; | ||
3113 | } | ||
3114 | 3140 | ||
3115 | list_move_tail(&obj->global_list, &dev_priv->mm.bound_list); | 3141 | list_move_tail(&obj->global_list, &dev_priv->mm.bound_list); |
3116 | list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); | 3142 | list_add_tail(&obj->mm_list, &vm->inactive_list); |
3143 | list_add(&vma->vma_link, &obj->vma_list); | ||
3117 | 3144 | ||
3118 | fenceable = | 3145 | fenceable = |
3119 | i915_gem_obj_ggtt_size(obj) == fence_size && | 3146 | i915_gem_obj_ggtt_size(obj) == fence_size && |
@@ -3127,6 +3154,14 @@ search_free: | |||
3127 | trace_i915_gem_object_bind(obj, map_and_fenceable); | 3154 | trace_i915_gem_object_bind(obj, map_and_fenceable); |
3128 | i915_gem_verify_gtt(dev); | 3155 | i915_gem_verify_gtt(dev); |
3129 | return 0; | 3156 | return 0; |
3157 | |||
3158 | err_remove_node: | ||
3159 | drm_mm_remove_node(&vma->node); | ||
3160 | err_free_vma: | ||
3161 | i915_gem_vma_destroy(vma); | ||
3162 | err_unpin: | ||
3163 | i915_gem_object_unpin_pages(obj); | ||
3164 | return ret; | ||
3130 | } | 3165 | } |
3131 | 3166 | ||
3132 | void | 3167 | void |
@@ -3261,7 +3296,8 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) | |||
3261 | 3296 | ||
3262 | /* And bump the LRU for this access */ | 3297 | /* And bump the LRU for this access */ |
3263 | if (i915_gem_object_is_inactive(obj)) | 3298 | if (i915_gem_object_is_inactive(obj)) |
3264 | list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); | 3299 | list_move_tail(&obj->mm_list, |
3300 | &dev_priv->gtt.base.inactive_list); | ||
3265 | 3301 | ||
3266 | return 0; | 3302 | return 0; |
3267 | } | 3303 | } |
@@ -3271,6 +3307,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, | |||
3271 | { | 3307 | { |
3272 | struct drm_device *dev = obj->base.dev; | 3308 | struct drm_device *dev = obj->base.dev; |
3273 | drm_i915_private_t *dev_priv = dev->dev_private; | 3309 | drm_i915_private_t *dev_priv = dev->dev_private; |
3310 | struct i915_vma *vma = __i915_gem_obj_to_vma(obj); | ||
3274 | int ret; | 3311 | int ret; |
3275 | 3312 | ||
3276 | if (obj->cache_level == cache_level) | 3313 | if (obj->cache_level == cache_level) |
@@ -3281,7 +3318,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, | |||
3281 | return -EBUSY; | 3318 | return -EBUSY; |
3282 | } | 3319 | } |
3283 | 3320 | ||
3284 | if (!i915_gem_valid_gtt_space(dev, &obj->gtt_space, cache_level)) { | 3321 | if (vma && !i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) { |
3285 | ret = i915_gem_object_unbind(obj); | 3322 | ret = i915_gem_object_unbind(obj); |
3286 | if (ret) | 3323 | if (ret) |
3287 | return ret; | 3324 | return ret; |
@@ -3826,6 +3863,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, | |||
3826 | INIT_LIST_HEAD(&obj->global_list); | 3863 | INIT_LIST_HEAD(&obj->global_list); |
3827 | INIT_LIST_HEAD(&obj->ring_list); | 3864 | INIT_LIST_HEAD(&obj->ring_list); |
3828 | INIT_LIST_HEAD(&obj->exec_list); | 3865 | INIT_LIST_HEAD(&obj->exec_list); |
3866 | INIT_LIST_HEAD(&obj->vma_list); | ||
3829 | 3867 | ||
3830 | obj->ops = ops; | 3868 | obj->ops = ops; |
3831 | 3869 | ||
@@ -3890,6 +3928,8 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, | |||
3890 | } else | 3928 | } else |
3891 | obj->cache_level = I915_CACHE_NONE; | 3929 | obj->cache_level = I915_CACHE_NONE; |
3892 | 3930 | ||
3931 | trace_i915_gem_object_create(obj); | ||
3932 | |||
3893 | return obj; | 3933 | return obj; |
3894 | } | 3934 | } |
3895 | 3935 | ||
@@ -3946,6 +3986,26 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) | |||
3946 | i915_gem_object_free(obj); | 3986 | i915_gem_object_free(obj); |
3947 | } | 3987 | } |
3948 | 3988 | ||
3989 | struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj, | ||
3990 | struct i915_address_space *vm) | ||
3991 | { | ||
3992 | struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL); | ||
3993 | if (vma == NULL) | ||
3994 | return ERR_PTR(-ENOMEM); | ||
3995 | |||
3996 | INIT_LIST_HEAD(&vma->vma_link); | ||
3997 | vma->vm = vm; | ||
3998 | vma->obj = obj; | ||
3999 | |||
4000 | return vma; | ||
4001 | } | ||
4002 | |||
4003 | void i915_gem_vma_destroy(struct i915_vma *vma) | ||
4004 | { | ||
4005 | WARN_ON(vma->node.allocated); | ||
4006 | kfree(vma); | ||
4007 | } | ||
4008 | |||
3949 | int | 4009 | int |
3950 | i915_gem_idle(struct drm_device *dev) | 4010 | i915_gem_idle(struct drm_device *dev) |
3951 | { | 4011 | { |
@@ -4105,8 +4165,8 @@ i915_gem_init_hw(struct drm_device *dev) | |||
4105 | if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) | 4165 | if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) |
4106 | return -EIO; | 4166 | return -EIO; |
4107 | 4167 | ||
4108 | if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1)) | 4168 | if (dev_priv->ellc_size) |
4109 | I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000); | 4169 | I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf)); |
4110 | 4170 | ||
4111 | if (HAS_PCH_NOP(dev)) { | 4171 | if (HAS_PCH_NOP(dev)) { |
4112 | u32 temp = I915_READ(GEN7_MSG_CTL); | 4172 | u32 temp = I915_READ(GEN7_MSG_CTL); |
@@ -4202,7 +4262,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, | |||
4202 | return ret; | 4262 | return ret; |
4203 | } | 4263 | } |
4204 | 4264 | ||
4205 | BUG_ON(!list_empty(&dev_priv->mm.active_list)); | 4265 | BUG_ON(!list_empty(&dev_priv->gtt.base.active_list)); |
4206 | mutex_unlock(&dev->struct_mutex); | 4266 | mutex_unlock(&dev->struct_mutex); |
4207 | 4267 | ||
4208 | ret = drm_irq_install(dev); | 4268 | ret = drm_irq_install(dev); |
@@ -4280,8 +4340,8 @@ i915_gem_load(struct drm_device *dev) | |||
4280 | SLAB_HWCACHE_ALIGN, | 4340 | SLAB_HWCACHE_ALIGN, |
4281 | NULL); | 4341 | NULL); |
4282 | 4342 | ||
4283 | INIT_LIST_HEAD(&dev_priv->mm.active_list); | 4343 | INIT_LIST_HEAD(&dev_priv->gtt.base.active_list); |
4284 | INIT_LIST_HEAD(&dev_priv->mm.inactive_list); | 4344 | INIT_LIST_HEAD(&dev_priv->gtt.base.inactive_list); |
4285 | INIT_LIST_HEAD(&dev_priv->mm.unbound_list); | 4345 | INIT_LIST_HEAD(&dev_priv->mm.unbound_list); |
4286 | INIT_LIST_HEAD(&dev_priv->mm.bound_list); | 4346 | INIT_LIST_HEAD(&dev_priv->mm.bound_list); |
4287 | INIT_LIST_HEAD(&dev_priv->mm.fence_list); | 4347 | INIT_LIST_HEAD(&dev_priv->mm.fence_list); |
@@ -4552,6 +4612,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc) | |||
4552 | struct drm_i915_private, | 4612 | struct drm_i915_private, |
4553 | mm.inactive_shrinker); | 4613 | mm.inactive_shrinker); |
4554 | struct drm_device *dev = dev_priv->dev; | 4614 | struct drm_device *dev = dev_priv->dev; |
4615 | struct i915_address_space *vm = &dev_priv->gtt.base; | ||
4555 | struct drm_i915_gem_object *obj; | 4616 | struct drm_i915_gem_object *obj; |
4556 | int nr_to_scan = sc->nr_to_scan; | 4617 | int nr_to_scan = sc->nr_to_scan; |
4557 | bool unlock = true; | 4618 | bool unlock = true; |
@@ -4580,7 +4641,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc) | |||
4580 | list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) | 4641 | list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) |
4581 | if (obj->pages_pin_count == 0) | 4642 | if (obj->pages_pin_count == 0) |
4582 | cnt += obj->base.size >> PAGE_SHIFT; | 4643 | cnt += obj->base.size >> PAGE_SHIFT; |
4583 | list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) | 4644 | list_for_each_entry(obj, &vm->inactive_list, mm_list) |
4584 | if (obj->pin_count == 0 && obj->pages_pin_count == 0) | 4645 | if (obj->pin_count == 0 && obj->pages_pin_count == 0) |
4585 | cnt += obj->base.size >> PAGE_SHIFT; | 4646 | cnt += obj->base.size >> PAGE_SHIFT; |
4586 | 4647 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 2074544682cf..2470206a4d07 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c | |||
@@ -304,31 +304,24 @@ static int context_idr_cleanup(int id, void *p, void *data) | |||
304 | } | 304 | } |
305 | 305 | ||
306 | struct i915_ctx_hang_stats * | 306 | struct i915_ctx_hang_stats * |
307 | i915_gem_context_get_hang_stats(struct intel_ring_buffer *ring, | 307 | i915_gem_context_get_hang_stats(struct drm_device *dev, |
308 | struct drm_file *file, | 308 | struct drm_file *file, |
309 | u32 id) | 309 | u32 id) |
310 | { | 310 | { |
311 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | 311 | struct drm_i915_private *dev_priv = dev->dev_private; |
312 | struct drm_i915_file_private *file_priv = file->driver_priv; | 312 | struct drm_i915_file_private *file_priv = file->driver_priv; |
313 | struct i915_hw_context *to; | 313 | struct i915_hw_context *ctx; |
314 | |||
315 | if (dev_priv->hw_contexts_disabled) | ||
316 | return ERR_PTR(-ENOENT); | ||
317 | |||
318 | if (ring->id != RCS) | ||
319 | return ERR_PTR(-EINVAL); | ||
320 | |||
321 | if (file == NULL) | ||
322 | return ERR_PTR(-EINVAL); | ||
323 | 314 | ||
324 | if (id == DEFAULT_CONTEXT_ID) | 315 | if (id == DEFAULT_CONTEXT_ID) |
325 | return &file_priv->hang_stats; | 316 | return &file_priv->hang_stats; |
326 | 317 | ||
327 | to = i915_gem_context_get(file->driver_priv, id); | 318 | ctx = NULL; |
328 | if (to == NULL) | 319 | if (!dev_priv->hw_contexts_disabled) |
320 | ctx = i915_gem_context_get(file->driver_priv, id); | ||
321 | if (ctx == NULL) | ||
329 | return ERR_PTR(-ENOENT); | 322 | return ERR_PTR(-ENOENT); |
330 | 323 | ||
331 | return &to->hang_stats; | 324 | return &ctx->hang_stats; |
332 | } | 325 | } |
333 | 326 | ||
334 | void i915_gem_context_close(struct drm_device *dev, struct drm_file *file) | 327 | void i915_gem_context_close(struct drm_device *dev, struct drm_file *file) |
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c index 582e6a5f3dac..bf945a39fbb1 100644 --- a/drivers/gpu/drm/i915/i915_gem_debug.c +++ b/drivers/gpu/drm/i915/i915_gem_debug.c | |||
@@ -97,7 +97,7 @@ i915_verify_lists(struct drm_device *dev) | |||
97 | } | 97 | } |
98 | } | 98 | } |
99 | 99 | ||
100 | list_for_each_entry(obj, &dev_priv->mm.inactive_list, list) { | 100 | list_for_each_entry(obj, &i915_gtt_vm->inactive_list, list) { |
101 | if (obj->base.dev != dev || | 101 | if (obj->base.dev != dev || |
102 | !atomic_read(&obj->base.refcount.refcount)) { | 102 | !atomic_read(&obj->base.refcount.refcount)) { |
103 | DRM_ERROR("freed inactive %p\n", obj); | 103 | DRM_ERROR("freed inactive %p\n", obj); |
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 5f8afc48bb7e..df61f338dea1 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c | |||
@@ -34,11 +34,13 @@ | |||
34 | static bool | 34 | static bool |
35 | mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind) | 35 | mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind) |
36 | { | 36 | { |
37 | struct i915_vma *vma = __i915_gem_obj_to_vma(obj); | ||
38 | |||
37 | if (obj->pin_count) | 39 | if (obj->pin_count) |
38 | return false; | 40 | return false; |
39 | 41 | ||
40 | list_add(&obj->exec_list, unwind); | 42 | list_add(&obj->exec_list, unwind); |
41 | return drm_mm_scan_add_block(&obj->gtt_space); | 43 | return drm_mm_scan_add_block(&vma->node); |
42 | } | 44 | } |
43 | 45 | ||
44 | int | 46 | int |
@@ -47,7 +49,9 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, | |||
47 | bool mappable, bool nonblocking) | 49 | bool mappable, bool nonblocking) |
48 | { | 50 | { |
49 | drm_i915_private_t *dev_priv = dev->dev_private; | 51 | drm_i915_private_t *dev_priv = dev->dev_private; |
52 | struct i915_address_space *vm = &dev_priv->gtt.base; | ||
50 | struct list_head eviction_list, unwind_list; | 53 | struct list_head eviction_list, unwind_list; |
54 | struct i915_vma *vma; | ||
51 | struct drm_i915_gem_object *obj; | 55 | struct drm_i915_gem_object *obj; |
52 | int ret = 0; | 56 | int ret = 0; |
53 | 57 | ||
@@ -78,15 +82,14 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, | |||
78 | 82 | ||
79 | INIT_LIST_HEAD(&unwind_list); | 83 | INIT_LIST_HEAD(&unwind_list); |
80 | if (mappable) | 84 | if (mappable) |
81 | drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, | 85 | drm_mm_init_scan_with_range(&vm->mm, min_size, |
82 | min_size, alignment, cache_level, | 86 | alignment, cache_level, 0, |
83 | 0, dev_priv->gtt.mappable_end); | 87 | dev_priv->gtt.mappable_end); |
84 | else | 88 | else |
85 | drm_mm_init_scan(&dev_priv->mm.gtt_space, | 89 | drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level); |
86 | min_size, alignment, cache_level); | ||
87 | 90 | ||
88 | /* First see if there is a large enough contiguous idle region... */ | 91 | /* First see if there is a large enough contiguous idle region... */ |
89 | list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) { | 92 | list_for_each_entry(obj, &vm->inactive_list, mm_list) { |
90 | if (mark_free(obj, &unwind_list)) | 93 | if (mark_free(obj, &unwind_list)) |
91 | goto found; | 94 | goto found; |
92 | } | 95 | } |
@@ -95,7 +98,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, | |||
95 | goto none; | 98 | goto none; |
96 | 99 | ||
97 | /* Now merge in the soon-to-be-expired objects... */ | 100 | /* Now merge in the soon-to-be-expired objects... */ |
98 | list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { | 101 | list_for_each_entry(obj, &vm->active_list, mm_list) { |
99 | if (mark_free(obj, &unwind_list)) | 102 | if (mark_free(obj, &unwind_list)) |
100 | goto found; | 103 | goto found; |
101 | } | 104 | } |
@@ -106,8 +109,8 @@ none: | |||
106 | obj = list_first_entry(&unwind_list, | 109 | obj = list_first_entry(&unwind_list, |
107 | struct drm_i915_gem_object, | 110 | struct drm_i915_gem_object, |
108 | exec_list); | 111 | exec_list); |
109 | 112 | vma = __i915_gem_obj_to_vma(obj); | |
110 | ret = drm_mm_scan_remove_block(&obj->gtt_space); | 113 | ret = drm_mm_scan_remove_block(&vma->node); |
111 | BUG_ON(ret); | 114 | BUG_ON(ret); |
112 | 115 | ||
113 | list_del_init(&obj->exec_list); | 116 | list_del_init(&obj->exec_list); |
@@ -127,7 +130,8 @@ found: | |||
127 | obj = list_first_entry(&unwind_list, | 130 | obj = list_first_entry(&unwind_list, |
128 | struct drm_i915_gem_object, | 131 | struct drm_i915_gem_object, |
129 | exec_list); | 132 | exec_list); |
130 | if (drm_mm_scan_remove_block(&obj->gtt_space)) { | 133 | vma = __i915_gem_obj_to_vma(obj); |
134 | if (drm_mm_scan_remove_block(&vma->node)) { | ||
131 | list_move(&obj->exec_list, &eviction_list); | 135 | list_move(&obj->exec_list, &eviction_list); |
132 | drm_gem_object_reference(&obj->base); | 136 | drm_gem_object_reference(&obj->base); |
133 | continue; | 137 | continue; |
@@ -154,12 +158,13 @@ int | |||
154 | i915_gem_evict_everything(struct drm_device *dev) | 158 | i915_gem_evict_everything(struct drm_device *dev) |
155 | { | 159 | { |
156 | drm_i915_private_t *dev_priv = dev->dev_private; | 160 | drm_i915_private_t *dev_priv = dev->dev_private; |
161 | struct i915_address_space *vm = &dev_priv->gtt.base; | ||
157 | struct drm_i915_gem_object *obj, *next; | 162 | struct drm_i915_gem_object *obj, *next; |
158 | bool lists_empty; | 163 | bool lists_empty; |
159 | int ret; | 164 | int ret; |
160 | 165 | ||
161 | lists_empty = (list_empty(&dev_priv->mm.inactive_list) && | 166 | lists_empty = (list_empty(&vm->inactive_list) && |
162 | list_empty(&dev_priv->mm.active_list)); | 167 | list_empty(&vm->active_list)); |
163 | if (lists_empty) | 168 | if (lists_empty) |
164 | return -ENOSPC; | 169 | return -ENOSPC; |
165 | 170 | ||
@@ -176,8 +181,7 @@ i915_gem_evict_everything(struct drm_device *dev) | |||
176 | i915_gem_retire_requests(dev); | 181 | i915_gem_retire_requests(dev); |
177 | 182 | ||
178 | /* Having flushed everything, unbind() should never raise an error */ | 183 | /* Having flushed everything, unbind() should never raise an error */ |
179 | list_for_each_entry_safe(obj, next, | 184 | list_for_each_entry_safe(obj, next, &vm->inactive_list, mm_list) |
180 | &dev_priv->mm.inactive_list, mm_list) | ||
181 | if (obj->pin_count == 0) | 185 | if (obj->pin_count == 0) |
182 | WARN_ON(i915_gem_object_unbind(obj)); | 186 | WARN_ON(i915_gem_object_unbind(obj)); |
183 | 187 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 64eda4463b70..5b6d764e9bb2 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
@@ -255,7 +255,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, | |||
255 | 255 | ||
256 | reloc->delta += target_offset; | 256 | reloc->delta += target_offset; |
257 | if (use_cpu_reloc(obj)) { | 257 | if (use_cpu_reloc(obj)) { |
258 | uint32_t page_offset = reloc->offset & ~PAGE_MASK; | 258 | uint32_t page_offset = offset_in_page(reloc->offset); |
259 | char *vaddr; | 259 | char *vaddr; |
260 | 260 | ||
261 | ret = i915_gem_object_set_to_cpu_domain(obj, 1); | 261 | ret = i915_gem_object_set_to_cpu_domain(obj, 1); |
@@ -284,7 +284,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, | |||
284 | reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, | 284 | reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, |
285 | reloc->offset & PAGE_MASK); | 285 | reloc->offset & PAGE_MASK); |
286 | reloc_entry = (uint32_t __iomem *) | 286 | reloc_entry = (uint32_t __iomem *) |
287 | (reloc_page + (reloc->offset & ~PAGE_MASK)); | 287 | (reloc_page + offset_in_page(reloc->offset)); |
288 | iowrite32(reloc->delta, reloc_entry); | 288 | iowrite32(reloc->delta, reloc_entry); |
289 | io_mapping_unmap_atomic(reloc_page); | 289 | io_mapping_unmap_atomic(reloc_page); |
290 | } | 290 | } |
@@ -759,8 +759,10 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec, | |||
759 | if (!access_ok(VERIFY_WRITE, ptr, length)) | 759 | if (!access_ok(VERIFY_WRITE, ptr, length)) |
760 | return -EFAULT; | 760 | return -EFAULT; |
761 | 761 | ||
762 | if (fault_in_multipages_readable(ptr, length)) | 762 | if (likely(!i915_prefault_disable)) { |
763 | return -EFAULT; | 763 | if (fault_in_multipages_readable(ptr, length)) |
764 | return -EFAULT; | ||
765 | } | ||
764 | } | 766 | } |
765 | 767 | ||
766 | return 0; | 768 | return 0; |
@@ -873,7 +875,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
873 | break; | 875 | break; |
874 | case I915_EXEC_BSD: | 876 | case I915_EXEC_BSD: |
875 | ring = &dev_priv->ring[VCS]; | 877 | ring = &dev_priv->ring[VCS]; |
876 | if (ctx_id != 0) { | 878 | if (ctx_id != DEFAULT_CONTEXT_ID) { |
877 | DRM_DEBUG("Ring %s doesn't support contexts\n", | 879 | DRM_DEBUG("Ring %s doesn't support contexts\n", |
878 | ring->name); | 880 | ring->name); |
879 | return -EPERM; | 881 | return -EPERM; |
@@ -881,7 +883,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
881 | break; | 883 | break; |
882 | case I915_EXEC_BLT: | 884 | case I915_EXEC_BLT: |
883 | ring = &dev_priv->ring[BCS]; | 885 | ring = &dev_priv->ring[BCS]; |
884 | if (ctx_id != 0) { | 886 | if (ctx_id != DEFAULT_CONTEXT_ID) { |
885 | DRM_DEBUG("Ring %s doesn't support contexts\n", | 887 | DRM_DEBUG("Ring %s doesn't support contexts\n", |
886 | ring->name); | 888 | ring->name); |
887 | return -EPERM; | 889 | return -EPERM; |
@@ -889,7 +891,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
889 | break; | 891 | break; |
890 | case I915_EXEC_VEBOX: | 892 | case I915_EXEC_VEBOX: |
891 | ring = &dev_priv->ring[VECS]; | 893 | ring = &dev_priv->ring[VECS]; |
892 | if (ctx_id != 0) { | 894 | if (ctx_id != DEFAULT_CONTEXT_ID) { |
893 | DRM_DEBUG("Ring %s doesn't support contexts\n", | 895 | DRM_DEBUG("Ring %s doesn't support contexts\n", |
894 | ring->name); | 896 | ring->name); |
895 | return -EPERM; | 897 | return -EPERM; |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 242d0f9bb9e4..3b639a94dddf 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
@@ -33,6 +33,7 @@ | |||
33 | 33 | ||
34 | /* PPGTT stuff */ | 34 | /* PPGTT stuff */ |
35 | #define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) | 35 | #define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) |
36 | #define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0)) | ||
36 | 37 | ||
37 | #define GEN6_PDE_VALID (1 << 0) | 38 | #define GEN6_PDE_VALID (1 << 0) |
38 | /* gen6+ has bit 11-4 for physical addr bit 39-32 */ | 39 | /* gen6+ has bit 11-4 for physical addr bit 39-32 */ |
@@ -44,6 +45,15 @@ | |||
44 | #define GEN6_PTE_CACHE_LLC (2 << 1) | 45 | #define GEN6_PTE_CACHE_LLC (2 << 1) |
45 | #define GEN6_PTE_CACHE_LLC_MLC (3 << 1) | 46 | #define GEN6_PTE_CACHE_LLC_MLC (3 << 1) |
46 | #define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) | 47 | #define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) |
48 | #define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr) | ||
49 | |||
50 | /* Cacheability Control is a 4-bit value. The low three bits are stored in * | ||
51 | * bits 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE. | ||
52 | */ | ||
53 | #define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \ | ||
54 | (((bits) & 0x8) << (11 - 3))) | ||
55 | #define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3) | ||
56 | #define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb) | ||
47 | 57 | ||
48 | static gen6_gtt_pte_t gen6_pte_encode(dma_addr_t addr, | 58 | static gen6_gtt_pte_t gen6_pte_encode(dma_addr_t addr, |
49 | enum i915_cache_level level) | 59 | enum i915_cache_level level) |
@@ -92,17 +102,29 @@ static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr, | |||
92 | enum i915_cache_level level) | 102 | enum i915_cache_level level) |
93 | { | 103 | { |
94 | gen6_gtt_pte_t pte = GEN6_PTE_VALID; | 104 | gen6_gtt_pte_t pte = GEN6_PTE_VALID; |
95 | pte |= GEN6_PTE_ADDR_ENCODE(addr); | 105 | pte |= HSW_PTE_ADDR_ENCODE(addr); |
96 | 106 | ||
97 | if (level != I915_CACHE_NONE) | 107 | if (level != I915_CACHE_NONE) |
98 | pte |= GEN6_PTE_CACHE_LLC; | 108 | pte |= HSW_WB_LLC_AGE0; |
109 | |||
110 | return pte; | ||
111 | } | ||
112 | |||
113 | static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr, | ||
114 | enum i915_cache_level level) | ||
115 | { | ||
116 | gen6_gtt_pte_t pte = GEN6_PTE_VALID; | ||
117 | pte |= HSW_PTE_ADDR_ENCODE(addr); | ||
118 | |||
119 | if (level != I915_CACHE_NONE) | ||
120 | pte |= HSW_WB_ELLC_LLC_AGE0; | ||
99 | 121 | ||
100 | return pte; | 122 | return pte; |
101 | } | 123 | } |
102 | 124 | ||
103 | static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt) | 125 | static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt) |
104 | { | 126 | { |
105 | struct drm_i915_private *dev_priv = ppgtt->dev->dev_private; | 127 | struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private; |
106 | gen6_gtt_pte_t __iomem *pd_addr; | 128 | gen6_gtt_pte_t __iomem *pd_addr; |
107 | uint32_t pd_entry; | 129 | uint32_t pd_entry; |
108 | int i; | 130 | int i; |
@@ -181,18 +203,18 @@ static int gen6_ppgtt_enable(struct drm_device *dev) | |||
181 | } | 203 | } |
182 | 204 | ||
183 | /* PPGTT support for Sandybdrige/Gen6 and later */ | 205 | /* PPGTT support for Sandybdrige/Gen6 and later */ |
184 | static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt, | 206 | static void gen6_ppgtt_clear_range(struct i915_address_space *vm, |
185 | unsigned first_entry, | 207 | unsigned first_entry, |
186 | unsigned num_entries) | 208 | unsigned num_entries) |
187 | { | 209 | { |
188 | struct drm_i915_private *dev_priv = ppgtt->dev->dev_private; | 210 | struct i915_hw_ppgtt *ppgtt = |
211 | container_of(vm, struct i915_hw_ppgtt, base); | ||
189 | gen6_gtt_pte_t *pt_vaddr, scratch_pte; | 212 | gen6_gtt_pte_t *pt_vaddr, scratch_pte; |
190 | unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES; | 213 | unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES; |
191 | unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; | 214 | unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; |
192 | unsigned last_pte, i; | 215 | unsigned last_pte, i; |
193 | 216 | ||
194 | scratch_pte = ppgtt->pte_encode(dev_priv->gtt.scratch.addr, | 217 | scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC); |
195 | I915_CACHE_LLC); | ||
196 | 218 | ||
197 | while (num_entries) { | 219 | while (num_entries) { |
198 | last_pte = first_pte + num_entries; | 220 | last_pte = first_pte + num_entries; |
@@ -212,11 +234,13 @@ static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt, | |||
212 | } | 234 | } |
213 | } | 235 | } |
214 | 236 | ||
215 | static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt, | 237 | static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, |
216 | struct sg_table *pages, | 238 | struct sg_table *pages, |
217 | unsigned first_entry, | 239 | unsigned first_entry, |
218 | enum i915_cache_level cache_level) | 240 | enum i915_cache_level cache_level) |
219 | { | 241 | { |
242 | struct i915_hw_ppgtt *ppgtt = | ||
243 | container_of(vm, struct i915_hw_ppgtt, base); | ||
220 | gen6_gtt_pte_t *pt_vaddr; | 244 | gen6_gtt_pte_t *pt_vaddr; |
221 | unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES; | 245 | unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES; |
222 | unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES; | 246 | unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES; |
@@ -227,7 +251,7 @@ static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt, | |||
227 | dma_addr_t page_addr; | 251 | dma_addr_t page_addr; |
228 | 252 | ||
229 | page_addr = sg_page_iter_dma_address(&sg_iter); | 253 | page_addr = sg_page_iter_dma_address(&sg_iter); |
230 | pt_vaddr[act_pte] = ppgtt->pte_encode(page_addr, cache_level); | 254 | pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level); |
231 | if (++act_pte == I915_PPGTT_PT_ENTRIES) { | 255 | if (++act_pte == I915_PPGTT_PT_ENTRIES) { |
232 | kunmap_atomic(pt_vaddr); | 256 | kunmap_atomic(pt_vaddr); |
233 | act_pt++; | 257 | act_pt++; |
@@ -239,13 +263,17 @@ static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt, | |||
239 | kunmap_atomic(pt_vaddr); | 263 | kunmap_atomic(pt_vaddr); |
240 | } | 264 | } |
241 | 265 | ||
242 | static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt) | 266 | static void gen6_ppgtt_cleanup(struct i915_address_space *vm) |
243 | { | 267 | { |
268 | struct i915_hw_ppgtt *ppgtt = | ||
269 | container_of(vm, struct i915_hw_ppgtt, base); | ||
244 | int i; | 270 | int i; |
245 | 271 | ||
272 | drm_mm_takedown(&ppgtt->base.mm); | ||
273 | |||
246 | if (ppgtt->pt_dma_addr) { | 274 | if (ppgtt->pt_dma_addr) { |
247 | for (i = 0; i < ppgtt->num_pd_entries; i++) | 275 | for (i = 0; i < ppgtt->num_pd_entries; i++) |
248 | pci_unmap_page(ppgtt->dev->pdev, | 276 | pci_unmap_page(ppgtt->base.dev->pdev, |
249 | ppgtt->pt_dma_addr[i], | 277 | ppgtt->pt_dma_addr[i], |
250 | 4096, PCI_DMA_BIDIRECTIONAL); | 278 | 4096, PCI_DMA_BIDIRECTIONAL); |
251 | } | 279 | } |
@@ -259,7 +287,7 @@ static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt) | |||
259 | 287 | ||
260 | static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) | 288 | static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) |
261 | { | 289 | { |
262 | struct drm_device *dev = ppgtt->dev; | 290 | struct drm_device *dev = ppgtt->base.dev; |
263 | struct drm_i915_private *dev_priv = dev->dev_private; | 291 | struct drm_i915_private *dev_priv = dev->dev_private; |
264 | unsigned first_pd_entry_in_global_pt; | 292 | unsigned first_pd_entry_in_global_pt; |
265 | int i; | 293 | int i; |
@@ -271,17 +299,18 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) | |||
271 | first_pd_entry_in_global_pt = gtt_total_entries(dev_priv->gtt); | 299 | first_pd_entry_in_global_pt = gtt_total_entries(dev_priv->gtt); |
272 | 300 | ||
273 | if (IS_HASWELL(dev)) { | 301 | if (IS_HASWELL(dev)) { |
274 | ppgtt->pte_encode = hsw_pte_encode; | 302 | ppgtt->base.pte_encode = hsw_pte_encode; |
275 | } else if (IS_VALLEYVIEW(dev)) { | 303 | } else if (IS_VALLEYVIEW(dev)) { |
276 | ppgtt->pte_encode = byt_pte_encode; | 304 | ppgtt->base.pte_encode = byt_pte_encode; |
277 | } else { | 305 | } else { |
278 | ppgtt->pte_encode = gen6_pte_encode; | 306 | ppgtt->base.pte_encode = gen6_pte_encode; |
279 | } | 307 | } |
280 | ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES; | 308 | ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES; |
281 | ppgtt->enable = gen6_ppgtt_enable; | 309 | ppgtt->enable = gen6_ppgtt_enable; |
282 | ppgtt->clear_range = gen6_ppgtt_clear_range; | 310 | ppgtt->base.clear_range = gen6_ppgtt_clear_range; |
283 | ppgtt->insert_entries = gen6_ppgtt_insert_entries; | 311 | ppgtt->base.insert_entries = gen6_ppgtt_insert_entries; |
284 | ppgtt->cleanup = gen6_ppgtt_cleanup; | 312 | ppgtt->base.cleanup = gen6_ppgtt_cleanup; |
313 | ppgtt->base.scratch = dev_priv->gtt.base.scratch; | ||
285 | ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries, | 314 | ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries, |
286 | GFP_KERNEL); | 315 | GFP_KERNEL); |
287 | if (!ppgtt->pt_pages) | 316 | if (!ppgtt->pt_pages) |
@@ -312,8 +341,8 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) | |||
312 | ppgtt->pt_dma_addr[i] = pt_addr; | 341 | ppgtt->pt_dma_addr[i] = pt_addr; |
313 | } | 342 | } |
314 | 343 | ||
315 | ppgtt->clear_range(ppgtt, 0, | 344 | ppgtt->base.clear_range(&ppgtt->base, 0, |
316 | ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES); | 345 | ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES); |
317 | 346 | ||
318 | ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t); | 347 | ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t); |
319 | 348 | ||
@@ -346,7 +375,7 @@ static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev) | |||
346 | if (!ppgtt) | 375 | if (!ppgtt) |
347 | return -ENOMEM; | 376 | return -ENOMEM; |
348 | 377 | ||
349 | ppgtt->dev = dev; | 378 | ppgtt->base.dev = dev; |
350 | 379 | ||
351 | if (INTEL_INFO(dev)->gen < 8) | 380 | if (INTEL_INFO(dev)->gen < 8) |
352 | ret = gen6_ppgtt_init(ppgtt); | 381 | ret = gen6_ppgtt_init(ppgtt); |
@@ -355,8 +384,11 @@ static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev) | |||
355 | 384 | ||
356 | if (ret) | 385 | if (ret) |
357 | kfree(ppgtt); | 386 | kfree(ppgtt); |
358 | else | 387 | else { |
359 | dev_priv->mm.aliasing_ppgtt = ppgtt; | 388 | dev_priv->mm.aliasing_ppgtt = ppgtt; |
389 | drm_mm_init(&ppgtt->base.mm, ppgtt->base.start, | ||
390 | ppgtt->base.total); | ||
391 | } | ||
360 | 392 | ||
361 | return ret; | 393 | return ret; |
362 | } | 394 | } |
@@ -369,7 +401,7 @@ void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev) | |||
369 | if (!ppgtt) | 401 | if (!ppgtt) |
370 | return; | 402 | return; |
371 | 403 | ||
372 | ppgtt->cleanup(ppgtt); | 404 | ppgtt->base.cleanup(&ppgtt->base); |
373 | dev_priv->mm.aliasing_ppgtt = NULL; | 405 | dev_priv->mm.aliasing_ppgtt = NULL; |
374 | } | 406 | } |
375 | 407 | ||
@@ -377,17 +409,17 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, | |||
377 | struct drm_i915_gem_object *obj, | 409 | struct drm_i915_gem_object *obj, |
378 | enum i915_cache_level cache_level) | 410 | enum i915_cache_level cache_level) |
379 | { | 411 | { |
380 | ppgtt->insert_entries(ppgtt, obj->pages, | 412 | ppgtt->base.insert_entries(&ppgtt->base, obj->pages, |
381 | i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT, | 413 | i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT, |
382 | cache_level); | 414 | cache_level); |
383 | } | 415 | } |
384 | 416 | ||
385 | void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, | 417 | void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, |
386 | struct drm_i915_gem_object *obj) | 418 | struct drm_i915_gem_object *obj) |
387 | { | 419 | { |
388 | ppgtt->clear_range(ppgtt, | 420 | ppgtt->base.clear_range(&ppgtt->base, |
389 | i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT, | 421 | i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT, |
390 | obj->base.size >> PAGE_SHIFT); | 422 | obj->base.size >> PAGE_SHIFT); |
391 | } | 423 | } |
392 | 424 | ||
393 | extern int intel_iommu_gfx_mapped; | 425 | extern int intel_iommu_gfx_mapped; |
@@ -434,8 +466,9 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) | |||
434 | struct drm_i915_gem_object *obj; | 466 | struct drm_i915_gem_object *obj; |
435 | 467 | ||
436 | /* First fill our portion of the GTT with scratch pages */ | 468 | /* First fill our portion of the GTT with scratch pages */ |
437 | dev_priv->gtt.gtt_clear_range(dev, dev_priv->gtt.start / PAGE_SIZE, | 469 | dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, |
438 | dev_priv->gtt.total / PAGE_SIZE); | 470 | dev_priv->gtt.base.start / PAGE_SIZE, |
471 | dev_priv->gtt.base.total / PAGE_SIZE); | ||
439 | 472 | ||
440 | list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { | 473 | list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { |
441 | i915_gem_clflush_object(obj); | 474 | i915_gem_clflush_object(obj); |
@@ -464,12 +497,12 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) | |||
464 | * within the global GTT as well as accessible by the GPU through the GMADR | 497 | * within the global GTT as well as accessible by the GPU through the GMADR |
465 | * mapped BAR (dev_priv->mm.gtt->gtt). | 498 | * mapped BAR (dev_priv->mm.gtt->gtt). |
466 | */ | 499 | */ |
467 | static void gen6_ggtt_insert_entries(struct drm_device *dev, | 500 | static void gen6_ggtt_insert_entries(struct i915_address_space *vm, |
468 | struct sg_table *st, | 501 | struct sg_table *st, |
469 | unsigned int first_entry, | 502 | unsigned int first_entry, |
470 | enum i915_cache_level level) | 503 | enum i915_cache_level level) |
471 | { | 504 | { |
472 | struct drm_i915_private *dev_priv = dev->dev_private; | 505 | struct drm_i915_private *dev_priv = vm->dev->dev_private; |
473 | gen6_gtt_pte_t __iomem *gtt_entries = | 506 | gen6_gtt_pte_t __iomem *gtt_entries = |
474 | (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; | 507 | (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; |
475 | int i = 0; | 508 | int i = 0; |
@@ -478,8 +511,7 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev, | |||
478 | 511 | ||
479 | for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { | 512 | for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { |
480 | addr = sg_page_iter_dma_address(&sg_iter); | 513 | addr = sg_page_iter_dma_address(&sg_iter); |
481 | iowrite32(dev_priv->gtt.pte_encode(addr, level), | 514 | iowrite32(vm->pte_encode(addr, level), >t_entries[i]); |
482 | >t_entries[i]); | ||
483 | i++; | 515 | i++; |
484 | } | 516 | } |
485 | 517 | ||
@@ -490,8 +522,8 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev, | |||
490 | * hardware should work, we must keep this posting read for paranoia. | 522 | * hardware should work, we must keep this posting read for paranoia. |
491 | */ | 523 | */ |
492 | if (i != 0) | 524 | if (i != 0) |
493 | WARN_ON(readl(>t_entries[i-1]) | 525 | WARN_ON(readl(>t_entries[i-1]) != |
494 | != dev_priv->gtt.pte_encode(addr, level)); | 526 | vm->pte_encode(addr, level)); |
495 | 527 | ||
496 | /* This next bit makes the above posting read even more important. We | 528 | /* This next bit makes the above posting read even more important. We |
497 | * want to flush the TLBs only after we're certain all the PTE updates | 529 | * want to flush the TLBs only after we're certain all the PTE updates |
@@ -501,11 +533,11 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev, | |||
501 | POSTING_READ(GFX_FLSH_CNTL_GEN6); | 533 | POSTING_READ(GFX_FLSH_CNTL_GEN6); |
502 | } | 534 | } |
503 | 535 | ||
504 | static void gen6_ggtt_clear_range(struct drm_device *dev, | 536 | static void gen6_ggtt_clear_range(struct i915_address_space *vm, |
505 | unsigned int first_entry, | 537 | unsigned int first_entry, |
506 | unsigned int num_entries) | 538 | unsigned int num_entries) |
507 | { | 539 | { |
508 | struct drm_i915_private *dev_priv = dev->dev_private; | 540 | struct drm_i915_private *dev_priv = vm->dev->dev_private; |
509 | gen6_gtt_pte_t scratch_pte, __iomem *gtt_base = | 541 | gen6_gtt_pte_t scratch_pte, __iomem *gtt_base = |
510 | (gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; | 542 | (gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; |
511 | const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; | 543 | const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; |
@@ -516,15 +548,14 @@ static void gen6_ggtt_clear_range(struct drm_device *dev, | |||
516 | first_entry, num_entries, max_entries)) | 548 | first_entry, num_entries, max_entries)) |
517 | num_entries = max_entries; | 549 | num_entries = max_entries; |
518 | 550 | ||
519 | scratch_pte = dev_priv->gtt.pte_encode(dev_priv->gtt.scratch.addr, | 551 | scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC); |
520 | I915_CACHE_LLC); | ||
521 | for (i = 0; i < num_entries; i++) | 552 | for (i = 0; i < num_entries; i++) |
522 | iowrite32(scratch_pte, >t_base[i]); | 553 | iowrite32(scratch_pte, >t_base[i]); |
523 | readl(gtt_base); | 554 | readl(gtt_base); |
524 | } | 555 | } |
525 | 556 | ||
526 | 557 | ||
527 | static void i915_ggtt_insert_entries(struct drm_device *dev, | 558 | static void i915_ggtt_insert_entries(struct i915_address_space *vm, |
528 | struct sg_table *st, | 559 | struct sg_table *st, |
529 | unsigned int pg_start, | 560 | unsigned int pg_start, |
530 | enum i915_cache_level cache_level) | 561 | enum i915_cache_level cache_level) |
@@ -536,7 +567,7 @@ static void i915_ggtt_insert_entries(struct drm_device *dev, | |||
536 | 567 | ||
537 | } | 568 | } |
538 | 569 | ||
539 | static void i915_ggtt_clear_range(struct drm_device *dev, | 570 | static void i915_ggtt_clear_range(struct i915_address_space *vm, |
540 | unsigned int first_entry, | 571 | unsigned int first_entry, |
541 | unsigned int num_entries) | 572 | unsigned int num_entries) |
542 | { | 573 | { |
@@ -549,10 +580,11 @@ void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, | |||
549 | { | 580 | { |
550 | struct drm_device *dev = obj->base.dev; | 581 | struct drm_device *dev = obj->base.dev; |
551 | struct drm_i915_private *dev_priv = dev->dev_private; | 582 | struct drm_i915_private *dev_priv = dev->dev_private; |
583 | const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT; | ||
552 | 584 | ||
553 | dev_priv->gtt.gtt_insert_entries(dev, obj->pages, | 585 | dev_priv->gtt.base.insert_entries(&dev_priv->gtt.base, obj->pages, |
554 | i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT, | 586 | entry, |
555 | cache_level); | 587 | cache_level); |
556 | 588 | ||
557 | obj->has_global_gtt_mapping = 1; | 589 | obj->has_global_gtt_mapping = 1; |
558 | } | 590 | } |
@@ -561,10 +593,11 @@ void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) | |||
561 | { | 593 | { |
562 | struct drm_device *dev = obj->base.dev; | 594 | struct drm_device *dev = obj->base.dev; |
563 | struct drm_i915_private *dev_priv = dev->dev_private; | 595 | struct drm_i915_private *dev_priv = dev->dev_private; |
596 | const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT; | ||
564 | 597 | ||
565 | dev_priv->gtt.gtt_clear_range(obj->base.dev, | 598 | dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, |
566 | i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT, | 599 | entry, |
567 | obj->base.size >> PAGE_SHIFT); | 600 | obj->base.size >> PAGE_SHIFT); |
568 | 601 | ||
569 | obj->has_global_gtt_mapping = 0; | 602 | obj->has_global_gtt_mapping = 0; |
570 | } | 603 | } |
@@ -623,38 +656,42 @@ void i915_gem_setup_global_gtt(struct drm_device *dev, | |||
623 | BUG_ON(mappable_end > end); | 656 | BUG_ON(mappable_end > end); |
624 | 657 | ||
625 | /* Subtract the guard page ... */ | 658 | /* Subtract the guard page ... */ |
626 | drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE); | 659 | drm_mm_init(&dev_priv->gtt.base.mm, start, end - start - PAGE_SIZE); |
627 | if (!HAS_LLC(dev)) | 660 | if (!HAS_LLC(dev)) |
628 | dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust; | 661 | dev_priv->gtt.base.mm.color_adjust = i915_gtt_color_adjust; |
629 | 662 | ||
630 | /* Mark any preallocated objects as occupied */ | 663 | /* Mark any preallocated objects as occupied */ |
631 | list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { | 664 | list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { |
665 | struct i915_vma *vma = __i915_gem_obj_to_vma(obj); | ||
632 | int ret; | 666 | int ret; |
633 | DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n", | 667 | DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n", |
634 | i915_gem_obj_ggtt_offset(obj), obj->base.size); | 668 | i915_gem_obj_ggtt_offset(obj), obj->base.size); |
635 | 669 | ||
636 | WARN_ON(i915_gem_obj_ggtt_bound(obj)); | 670 | WARN_ON(i915_gem_obj_ggtt_bound(obj)); |
637 | ret = drm_mm_reserve_node(&dev_priv->mm.gtt_space, | 671 | ret = drm_mm_reserve_node(&dev_priv->gtt.base.mm, &vma->node); |
638 | &obj->gtt_space); | ||
639 | if (ret) | 672 | if (ret) |
640 | DRM_DEBUG_KMS("Reservation failed\n"); | 673 | DRM_DEBUG_KMS("Reservation failed\n"); |
641 | obj->has_global_gtt_mapping = 1; | 674 | obj->has_global_gtt_mapping = 1; |
675 | list_add(&vma->vma_link, &obj->vma_list); | ||
642 | } | 676 | } |
643 | 677 | ||
644 | dev_priv->gtt.start = start; | 678 | dev_priv->gtt.base.start = start; |
645 | dev_priv->gtt.total = end - start; | 679 | dev_priv->gtt.base.total = end - start; |
646 | 680 | ||
647 | /* Clear any non-preallocated blocks */ | 681 | /* Clear any non-preallocated blocks */ |
648 | drm_mm_for_each_hole(entry, &dev_priv->mm.gtt_space, | 682 | drm_mm_for_each_hole(entry, &dev_priv->gtt.base.mm, |
649 | hole_start, hole_end) { | 683 | hole_start, hole_end) { |
684 | const unsigned long count = (hole_end - hole_start) / PAGE_SIZE; | ||
650 | DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", | 685 | DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", |
651 | hole_start, hole_end); | 686 | hole_start, hole_end); |
652 | dev_priv->gtt.gtt_clear_range(dev, hole_start / PAGE_SIZE, | 687 | dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, |
653 | (hole_end-hole_start) / PAGE_SIZE); | 688 | hole_start / PAGE_SIZE, |
689 | count); | ||
654 | } | 690 | } |
655 | 691 | ||
656 | /* And finally clear the reserved guard page */ | 692 | /* And finally clear the reserved guard page */ |
657 | dev_priv->gtt.gtt_clear_range(dev, end / PAGE_SIZE - 1, 1); | 693 | dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, |
694 | end / PAGE_SIZE - 1, 1); | ||
658 | } | 695 | } |
659 | 696 | ||
660 | static bool | 697 | static bool |
@@ -677,7 +714,7 @@ void i915_gem_init_global_gtt(struct drm_device *dev) | |||
677 | struct drm_i915_private *dev_priv = dev->dev_private; | 714 | struct drm_i915_private *dev_priv = dev->dev_private; |
678 | unsigned long gtt_size, mappable_size; | 715 | unsigned long gtt_size, mappable_size; |
679 | 716 | ||
680 | gtt_size = dev_priv->gtt.total; | 717 | gtt_size = dev_priv->gtt.base.total; |
681 | mappable_size = dev_priv->gtt.mappable_end; | 718 | mappable_size = dev_priv->gtt.mappable_end; |
682 | 719 | ||
683 | if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) { | 720 | if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) { |
@@ -696,7 +733,7 @@ void i915_gem_init_global_gtt(struct drm_device *dev) | |||
696 | return; | 733 | return; |
697 | 734 | ||
698 | DRM_ERROR("Aliased PPGTT setup failed %d\n", ret); | 735 | DRM_ERROR("Aliased PPGTT setup failed %d\n", ret); |
699 | drm_mm_takedown(&dev_priv->mm.gtt_space); | 736 | drm_mm_takedown(&dev_priv->gtt.base.mm); |
700 | gtt_size += GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE; | 737 | gtt_size += GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE; |
701 | } | 738 | } |
702 | i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size); | 739 | i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size); |
@@ -722,8 +759,8 @@ static int setup_scratch_page(struct drm_device *dev) | |||
722 | #else | 759 | #else |
723 | dma_addr = page_to_phys(page); | 760 | dma_addr = page_to_phys(page); |
724 | #endif | 761 | #endif |
725 | dev_priv->gtt.scratch.page = page; | 762 | dev_priv->gtt.base.scratch.page = page; |
726 | dev_priv->gtt.scratch.addr = dma_addr; | 763 | dev_priv->gtt.base.scratch.addr = dma_addr; |
727 | 764 | ||
728 | return 0; | 765 | return 0; |
729 | } | 766 | } |
@@ -731,11 +768,13 @@ static int setup_scratch_page(struct drm_device *dev) | |||
731 | static void teardown_scratch_page(struct drm_device *dev) | 768 | static void teardown_scratch_page(struct drm_device *dev) |
732 | { | 769 | { |
733 | struct drm_i915_private *dev_priv = dev->dev_private; | 770 | struct drm_i915_private *dev_priv = dev->dev_private; |
734 | set_pages_wb(dev_priv->gtt.scratch.page, 1); | 771 | struct page *page = dev_priv->gtt.base.scratch.page; |
735 | pci_unmap_page(dev->pdev, dev_priv->gtt.scratch.addr, | 772 | |
773 | set_pages_wb(page, 1); | ||
774 | pci_unmap_page(dev->pdev, dev_priv->gtt.base.scratch.addr, | ||
736 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | 775 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
737 | put_page(dev_priv->gtt.scratch.page); | 776 | put_page(page); |
738 | __free_page(dev_priv->gtt.scratch.page); | 777 | __free_page(page); |
739 | } | 778 | } |
740 | 779 | ||
741 | static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) | 780 | static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) |
@@ -798,17 +837,18 @@ static int gen6_gmch_probe(struct drm_device *dev, | |||
798 | if (ret) | 837 | if (ret) |
799 | DRM_ERROR("Scratch setup failed\n"); | 838 | DRM_ERROR("Scratch setup failed\n"); |
800 | 839 | ||
801 | dev_priv->gtt.gtt_clear_range = gen6_ggtt_clear_range; | 840 | dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range; |
802 | dev_priv->gtt.gtt_insert_entries = gen6_ggtt_insert_entries; | 841 | dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries; |
803 | 842 | ||
804 | return ret; | 843 | return ret; |
805 | } | 844 | } |
806 | 845 | ||
807 | static void gen6_gmch_remove(struct drm_device *dev) | 846 | static void gen6_gmch_remove(struct i915_address_space *vm) |
808 | { | 847 | { |
809 | struct drm_i915_private *dev_priv = dev->dev_private; | 848 | |
810 | iounmap(dev_priv->gtt.gsm); | 849 | struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base); |
811 | teardown_scratch_page(dev_priv->dev); | 850 | iounmap(gtt->gsm); |
851 | teardown_scratch_page(vm->dev); | ||
812 | } | 852 | } |
813 | 853 | ||
814 | static int i915_gmch_probe(struct drm_device *dev, | 854 | static int i915_gmch_probe(struct drm_device *dev, |
@@ -829,13 +869,13 @@ static int i915_gmch_probe(struct drm_device *dev, | |||
829 | intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end); | 869 | intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end); |
830 | 870 | ||
831 | dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev); | 871 | dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev); |
832 | dev_priv->gtt.gtt_clear_range = i915_ggtt_clear_range; | 872 | dev_priv->gtt.base.clear_range = i915_ggtt_clear_range; |
833 | dev_priv->gtt.gtt_insert_entries = i915_ggtt_insert_entries; | 873 | dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries; |
834 | 874 | ||
835 | return 0; | 875 | return 0; |
836 | } | 876 | } |
837 | 877 | ||
838 | static void i915_gmch_remove(struct drm_device *dev) | 878 | static void i915_gmch_remove(struct i915_address_space *vm) |
839 | { | 879 | { |
840 | intel_gmch_remove(); | 880 | intel_gmch_remove(); |
841 | } | 881 | } |
@@ -848,25 +888,30 @@ int i915_gem_gtt_init(struct drm_device *dev) | |||
848 | 888 | ||
849 | if (INTEL_INFO(dev)->gen <= 5) { | 889 | if (INTEL_INFO(dev)->gen <= 5) { |
850 | gtt->gtt_probe = i915_gmch_probe; | 890 | gtt->gtt_probe = i915_gmch_probe; |
851 | gtt->gtt_remove = i915_gmch_remove; | 891 | gtt->base.cleanup = i915_gmch_remove; |
852 | } else { | 892 | } else { |
853 | gtt->gtt_probe = gen6_gmch_probe; | 893 | gtt->gtt_probe = gen6_gmch_probe; |
854 | gtt->gtt_remove = gen6_gmch_remove; | 894 | gtt->base.cleanup = gen6_gmch_remove; |
855 | if (IS_HASWELL(dev)) | 895 | if (IS_HASWELL(dev) && dev_priv->ellc_size) |
856 | gtt->pte_encode = hsw_pte_encode; | 896 | gtt->base.pte_encode = iris_pte_encode; |
897 | else if (IS_HASWELL(dev)) | ||
898 | gtt->base.pte_encode = hsw_pte_encode; | ||
857 | else if (IS_VALLEYVIEW(dev)) | 899 | else if (IS_VALLEYVIEW(dev)) |
858 | gtt->pte_encode = byt_pte_encode; | 900 | gtt->base.pte_encode = byt_pte_encode; |
859 | else | 901 | else |
860 | gtt->pte_encode = gen6_pte_encode; | 902 | gtt->base.pte_encode = gen6_pte_encode; |
861 | } | 903 | } |
862 | 904 | ||
863 | ret = gtt->gtt_probe(dev, >t->total, >t->stolen_size, | 905 | ret = gtt->gtt_probe(dev, >t->base.total, >t->stolen_size, |
864 | >t->mappable_base, >t->mappable_end); | 906 | >t->mappable_base, >t->mappable_end); |
865 | if (ret) | 907 | if (ret) |
866 | return ret; | 908 | return ret; |
867 | 909 | ||
910 | gtt->base.dev = dev; | ||
911 | |||
868 | /* GMADR is the PCI mmio aperture into the global GTT. */ | 912 | /* GMADR is the PCI mmio aperture into the global GTT. */ |
869 | DRM_INFO("Memory usable by graphics device = %zdM\n", gtt->total >> 20); | 913 | DRM_INFO("Memory usable by graphics device = %zdM\n", |
914 | gtt->base.total >> 20); | ||
870 | DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20); | 915 | DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20); |
871 | DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20); | 916 | DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20); |
872 | 917 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index a3d1a125b5e0..38afadf5eaf6 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c | |||
@@ -45,45 +45,27 @@ | |||
45 | static unsigned long i915_stolen_to_physical(struct drm_device *dev) | 45 | static unsigned long i915_stolen_to_physical(struct drm_device *dev) |
46 | { | 46 | { |
47 | struct drm_i915_private *dev_priv = dev->dev_private; | 47 | struct drm_i915_private *dev_priv = dev->dev_private; |
48 | struct pci_dev *pdev = dev_priv->bridge_dev; | ||
49 | struct resource *r; | 48 | struct resource *r; |
50 | u32 base; | 49 | u32 base; |
51 | 50 | ||
52 | /* On the machines I have tested the Graphics Base of Stolen Memory | 51 | /* Almost universally we can find the Graphics Base of Stolen Memory |
53 | * is unreliable, so on those compute the base by subtracting the | 52 | * at offset 0x5c in the igfx configuration space. On a few (desktop) |
54 | * stolen memory from the Top of Low Usable DRAM which is where the | 53 | * machines this is also mirrored in the bridge device at different |
55 | * BIOS places the graphics stolen memory. | 54 | * locations, or in the MCHBAR. On gen2, the layout is again slightly |
55 | * different with the Graphics Segment immediately following Top of | ||
56 | * Memory (or Top of Usable DRAM). Note it appears that TOUD is only | ||
57 | * reported by 865g, so we just use the top of memory as determined | ||
58 | * by the e820 probe. | ||
56 | * | 59 | * |
57 | * On gen2, the layout is slightly different with the Graphics Segment | 60 | * XXX However gen2 requires an unavailable symbol. |
58 | * immediately following Top of Memory (or Top of Usable DRAM). Note | ||
59 | * it appears that TOUD is only reported by 865g, so we just use the | ||
60 | * top of memory as determined by the e820 probe. | ||
61 | * | ||
62 | * XXX gen2 requires an unavailable symbol and 945gm fails with | ||
63 | * its value of TOLUD. | ||
64 | */ | 61 | */ |
65 | base = 0; | 62 | base = 0; |
66 | if (IS_VALLEYVIEW(dev)) { | 63 | if (INTEL_INFO(dev)->gen >= 3) { |
64 | /* Read Graphics Base of Stolen Memory directly */ | ||
67 | pci_read_config_dword(dev->pdev, 0x5c, &base); | 65 | pci_read_config_dword(dev->pdev, 0x5c, &base); |
68 | base &= ~((1<<20) - 1); | 66 | base &= ~((1<<20) - 1); |
69 | } else if (INTEL_INFO(dev)->gen >= 6) { | 67 | } else { /* GEN2 */ |
70 | /* Read Base Data of Stolen Memory Register (BDSM) directly. | ||
71 | * Note that there is also a MCHBAR miror at 0x1080c0 or | ||
72 | * we could use device 2:0x5c instead. | ||
73 | */ | ||
74 | pci_read_config_dword(pdev, 0xB0, &base); | ||
75 | base &= ~4095; /* lower bits used for locking register */ | ||
76 | } else if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) { | ||
77 | /* Read Graphics Base of Stolen Memory directly */ | ||
78 | pci_read_config_dword(pdev, 0xA4, &base); | ||
79 | #if 0 | 68 | #if 0 |
80 | } else if (IS_GEN3(dev)) { | ||
81 | u8 val; | ||
82 | /* Stolen is immediately below Top of Low Usable DRAM */ | ||
83 | pci_read_config_byte(pdev, 0x9c, &val); | ||
84 | base = val >> 3 << 27; | ||
85 | base -= dev_priv->mm.gtt->stolen_size; | ||
86 | } else { | ||
87 | /* Stolen is immediately above Top of Memory */ | 69 | /* Stolen is immediately above Top of Memory */ |
88 | base = max_low_pfn_mapped << PAGE_SHIFT; | 70 | base = max_low_pfn_mapped << PAGE_SHIFT; |
89 | #endif | 71 | #endif |
@@ -367,8 +349,10 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, | |||
367 | u32 size) | 349 | u32 size) |
368 | { | 350 | { |
369 | struct drm_i915_private *dev_priv = dev->dev_private; | 351 | struct drm_i915_private *dev_priv = dev->dev_private; |
352 | struct i915_address_space *vm = &dev_priv->gtt.base; | ||
370 | struct drm_i915_gem_object *obj; | 353 | struct drm_i915_gem_object *obj; |
371 | struct drm_mm_node *stolen; | 354 | struct drm_mm_node *stolen; |
355 | struct i915_vma *vma; | ||
372 | int ret; | 356 | int ret; |
373 | 357 | ||
374 | if (!drm_mm_initialized(&dev_priv->mm.stolen)) | 358 | if (!drm_mm_initialized(&dev_priv->mm.stolen)) |
@@ -409,30 +393,38 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, | |||
409 | if (gtt_offset == I915_GTT_OFFSET_NONE) | 393 | if (gtt_offset == I915_GTT_OFFSET_NONE) |
410 | return obj; | 394 | return obj; |
411 | 395 | ||
396 | vma = i915_gem_vma_create(obj, &dev_priv->gtt.base); | ||
397 | if (IS_ERR(vma)) { | ||
398 | ret = PTR_ERR(vma); | ||
399 | goto err_out; | ||
400 | } | ||
401 | |||
412 | /* To simplify the initialisation sequence between KMS and GTT, | 402 | /* To simplify the initialisation sequence between KMS and GTT, |
413 | * we allow construction of the stolen object prior to | 403 | * we allow construction of the stolen object prior to |
414 | * setting up the GTT space. The actual reservation will occur | 404 | * setting up the GTT space. The actual reservation will occur |
415 | * later. | 405 | * later. |
416 | */ | 406 | */ |
417 | obj->gtt_space.start = gtt_offset; | 407 | vma->node.start = gtt_offset; |
418 | obj->gtt_space.size = size; | 408 | vma->node.size = size; |
419 | if (drm_mm_initialized(&dev_priv->mm.gtt_space)) { | 409 | if (drm_mm_initialized(&dev_priv->gtt.base.mm)) { |
420 | ret = drm_mm_reserve_node(&dev_priv->mm.gtt_space, | 410 | ret = drm_mm_reserve_node(&dev_priv->gtt.base.mm, &vma->node); |
421 | &obj->gtt_space); | ||
422 | if (ret) { | 411 | if (ret) { |
423 | DRM_DEBUG_KMS("failed to allocate stolen GTT space\n"); | 412 | DRM_DEBUG_KMS("failed to allocate stolen GTT space\n"); |
424 | goto unref_out; | 413 | i915_gem_vma_destroy(vma); |
414 | goto err_out; | ||
425 | } | 415 | } |
426 | } | 416 | } |
427 | 417 | ||
428 | obj->has_global_gtt_mapping = 1; | 418 | obj->has_global_gtt_mapping = 1; |
429 | 419 | ||
430 | list_add_tail(&obj->global_list, &dev_priv->mm.bound_list); | 420 | list_add_tail(&obj->global_list, &dev_priv->mm.bound_list); |
431 | list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); | 421 | list_add_tail(&obj->mm_list, &vm->inactive_list); |
432 | 422 | ||
433 | return obj; | 423 | return obj; |
434 | 424 | ||
435 | unref_out: | 425 | err_out: |
426 | drm_mm_remove_node(stolen); | ||
427 | kfree(stolen); | ||
436 | drm_gem_object_unreference(&obj->base); | 428 | drm_gem_object_unreference(&obj->base); |
437 | return NULL; | 429 | return NULL; |
438 | } | 430 | } |
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 58386cebb865..d970d84da65f 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c | |||
@@ -622,6 +622,7 @@ static struct drm_i915_error_object * | |||
622 | i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, | 622 | i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, |
623 | struct intel_ring_buffer *ring) | 623 | struct intel_ring_buffer *ring) |
624 | { | 624 | { |
625 | struct i915_address_space *vm = &dev_priv->gtt.base; | ||
625 | struct drm_i915_gem_object *obj; | 626 | struct drm_i915_gem_object *obj; |
626 | u32 seqno; | 627 | u32 seqno; |
627 | 628 | ||
@@ -641,7 +642,7 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, | |||
641 | } | 642 | } |
642 | 643 | ||
643 | seqno = ring->get_seqno(ring, false); | 644 | seqno = ring->get_seqno(ring, false); |
644 | list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { | 645 | list_for_each_entry(obj, &vm->active_list, mm_list) { |
645 | if (obj->ring != ring) | 646 | if (obj->ring != ring) |
646 | continue; | 647 | continue; |
647 | 648 | ||
@@ -773,11 +774,12 @@ static void i915_gem_record_rings(struct drm_device *dev, | |||
773 | static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv, | 774 | static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv, |
774 | struct drm_i915_error_state *error) | 775 | struct drm_i915_error_state *error) |
775 | { | 776 | { |
777 | struct i915_address_space *vm = &dev_priv->gtt.base; | ||
776 | struct drm_i915_gem_object *obj; | 778 | struct drm_i915_gem_object *obj; |
777 | int i; | 779 | int i; |
778 | 780 | ||
779 | i = 0; | 781 | i = 0; |
780 | list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) | 782 | list_for_each_entry(obj, &vm->active_list, mm_list) |
781 | i++; | 783 | i++; |
782 | error->active_bo_count = i; | 784 | error->active_bo_count = i; |
783 | list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) | 785 | list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) |
@@ -797,7 +799,7 @@ static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv, | |||
797 | error->active_bo_count = | 799 | error->active_bo_count = |
798 | capture_active_bo(error->active_bo, | 800 | capture_active_bo(error->active_bo, |
799 | error->active_bo_count, | 801 | error->active_bo_count, |
800 | &dev_priv->mm.active_list); | 802 | &vm->active_list); |
801 | 803 | ||
802 | if (error->pinned_bo) | 804 | if (error->pinned_bo) |
803 | error->pinned_bo_count = | 805 | error->pinned_bo_count = |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 64db680fb7a4..6a1c207a296b 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -698,18 +698,13 @@ static void ironlake_rps_change_irq_handler(struct drm_device *dev) | |||
698 | static void notify_ring(struct drm_device *dev, | 698 | static void notify_ring(struct drm_device *dev, |
699 | struct intel_ring_buffer *ring) | 699 | struct intel_ring_buffer *ring) |
700 | { | 700 | { |
701 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
702 | |||
703 | if (ring->obj == NULL) | 701 | if (ring->obj == NULL) |
704 | return; | 702 | return; |
705 | 703 | ||
706 | trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false)); | 704 | trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false)); |
707 | 705 | ||
708 | wake_up_all(&ring->irq_queue); | 706 | wake_up_all(&ring->irq_queue); |
709 | if (i915_enable_hangcheck) { | 707 | i915_queue_hangcheck(dev); |
710 | mod_timer(&dev_priv->gpu_error.hangcheck_timer, | ||
711 | round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); | ||
712 | } | ||
713 | } | 708 | } |
714 | 709 | ||
715 | static void gen6_pm_rps_work(struct work_struct *work) | 710 | static void gen6_pm_rps_work(struct work_struct *work) |
@@ -817,7 +812,7 @@ static void ivybridge_parity_work(struct work_struct *work) | |||
817 | 812 | ||
818 | mutex_unlock(&dev_priv->dev->struct_mutex); | 813 | mutex_unlock(&dev_priv->dev->struct_mutex); |
819 | 814 | ||
820 | parity_event[0] = "L3_PARITY_ERROR=1"; | 815 | parity_event[0] = I915_L3_PARITY_UEVENT "=1"; |
821 | parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); | 816 | parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); |
822 | parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); | 817 | parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); |
823 | parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); | 818 | parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); |
@@ -849,6 +844,17 @@ static void ivybridge_parity_error_irq_handler(struct drm_device *dev) | |||
849 | queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); | 844 | queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); |
850 | } | 845 | } |
851 | 846 | ||
847 | static void ilk_gt_irq_handler(struct drm_device *dev, | ||
848 | struct drm_i915_private *dev_priv, | ||
849 | u32 gt_iir) | ||
850 | { | ||
851 | if (gt_iir & | ||
852 | (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) | ||
853 | notify_ring(dev, &dev_priv->ring[RCS]); | ||
854 | if (gt_iir & ILK_BSD_USER_INTERRUPT) | ||
855 | notify_ring(dev, &dev_priv->ring[VCS]); | ||
856 | } | ||
857 | |||
852 | static void snb_gt_irq_handler(struct drm_device *dev, | 858 | static void snb_gt_irq_handler(struct drm_device *dev, |
853 | struct drm_i915_private *dev_priv, | 859 | struct drm_i915_private *dev_priv, |
854 | u32 gt_iir) | 860 | u32 gt_iir) |
@@ -913,6 +919,10 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev, | |||
913 | spin_lock(&dev_priv->irq_lock); | 919 | spin_lock(&dev_priv->irq_lock); |
914 | for (i = 1; i < HPD_NUM_PINS; i++) { | 920 | for (i = 1; i < HPD_NUM_PINS; i++) { |
915 | 921 | ||
922 | WARN(((hpd[i] & hotplug_trigger) && | ||
923 | dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED), | ||
924 | "Received HPD interrupt although disabled\n"); | ||
925 | |||
916 | if (!(hpd[i] & hotplug_trigger) || | 926 | if (!(hpd[i] & hotplug_trigger) || |
917 | dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) | 927 | dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) |
918 | continue; | 928 | continue; |
@@ -923,6 +933,7 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev, | |||
923 | + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) { | 933 | + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) { |
924 | dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies; | 934 | dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies; |
925 | dev_priv->hpd_stats[i].hpd_cnt = 0; | 935 | dev_priv->hpd_stats[i].hpd_cnt = 0; |
936 | DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i); | ||
926 | } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) { | 937 | } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) { |
927 | dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED; | 938 | dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED; |
928 | dev_priv->hpd_event_bits &= ~(1 << i); | 939 | dev_priv->hpd_event_bits &= ~(1 << i); |
@@ -930,6 +941,8 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev, | |||
930 | storm_detected = true; | 941 | storm_detected = true; |
931 | } else { | 942 | } else { |
932 | dev_priv->hpd_stats[i].hpd_cnt++; | 943 | dev_priv->hpd_stats[i].hpd_cnt++; |
944 | DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i, | ||
945 | dev_priv->hpd_stats[i].hpd_cnt); | ||
933 | } | 946 | } |
934 | } | 947 | } |
935 | 948 | ||
@@ -1202,27 +1215,111 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) | |||
1202 | cpt_serr_int_handler(dev); | 1215 | cpt_serr_int_handler(dev); |
1203 | } | 1216 | } |
1204 | 1217 | ||
1205 | static irqreturn_t ivybridge_irq_handler(int irq, void *arg) | 1218 | static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) |
1219 | { | ||
1220 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1221 | |||
1222 | if (de_iir & DE_AUX_CHANNEL_A) | ||
1223 | dp_aux_irq_handler(dev); | ||
1224 | |||
1225 | if (de_iir & DE_GSE) | ||
1226 | intel_opregion_asle_intr(dev); | ||
1227 | |||
1228 | if (de_iir & DE_PIPEA_VBLANK) | ||
1229 | drm_handle_vblank(dev, 0); | ||
1230 | |||
1231 | if (de_iir & DE_PIPEB_VBLANK) | ||
1232 | drm_handle_vblank(dev, 1); | ||
1233 | |||
1234 | if (de_iir & DE_POISON) | ||
1235 | DRM_ERROR("Poison interrupt\n"); | ||
1236 | |||
1237 | if (de_iir & DE_PIPEA_FIFO_UNDERRUN) | ||
1238 | if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false)) | ||
1239 | DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n"); | ||
1240 | |||
1241 | if (de_iir & DE_PIPEB_FIFO_UNDERRUN) | ||
1242 | if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false)) | ||
1243 | DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n"); | ||
1244 | |||
1245 | if (de_iir & DE_PLANEA_FLIP_DONE) { | ||
1246 | intel_prepare_page_flip(dev, 0); | ||
1247 | intel_finish_page_flip_plane(dev, 0); | ||
1248 | } | ||
1249 | |||
1250 | if (de_iir & DE_PLANEB_FLIP_DONE) { | ||
1251 | intel_prepare_page_flip(dev, 1); | ||
1252 | intel_finish_page_flip_plane(dev, 1); | ||
1253 | } | ||
1254 | |||
1255 | /* check event from PCH */ | ||
1256 | if (de_iir & DE_PCH_EVENT) { | ||
1257 | u32 pch_iir = I915_READ(SDEIIR); | ||
1258 | |||
1259 | if (HAS_PCH_CPT(dev)) | ||
1260 | cpt_irq_handler(dev, pch_iir); | ||
1261 | else | ||
1262 | ibx_irq_handler(dev, pch_iir); | ||
1263 | |||
1264 | /* should clear PCH hotplug event before clear CPU irq */ | ||
1265 | I915_WRITE(SDEIIR, pch_iir); | ||
1266 | } | ||
1267 | |||
1268 | if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) | ||
1269 | ironlake_rps_change_irq_handler(dev); | ||
1270 | } | ||
1271 | |||
1272 | static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) | ||
1273 | { | ||
1274 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1275 | int i; | ||
1276 | |||
1277 | if (de_iir & DE_ERR_INT_IVB) | ||
1278 | ivb_err_int_handler(dev); | ||
1279 | |||
1280 | if (de_iir & DE_AUX_CHANNEL_A_IVB) | ||
1281 | dp_aux_irq_handler(dev); | ||
1282 | |||
1283 | if (de_iir & DE_GSE_IVB) | ||
1284 | intel_opregion_asle_intr(dev); | ||
1285 | |||
1286 | for (i = 0; i < 3; i++) { | ||
1287 | if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i))) | ||
1288 | drm_handle_vblank(dev, i); | ||
1289 | if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) { | ||
1290 | intel_prepare_page_flip(dev, i); | ||
1291 | intel_finish_page_flip_plane(dev, i); | ||
1292 | } | ||
1293 | } | ||
1294 | |||
1295 | /* check event from PCH */ | ||
1296 | if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { | ||
1297 | u32 pch_iir = I915_READ(SDEIIR); | ||
1298 | |||
1299 | cpt_irq_handler(dev, pch_iir); | ||
1300 | |||
1301 | /* clear PCH hotplug event before clear CPU irq */ | ||
1302 | I915_WRITE(SDEIIR, pch_iir); | ||
1303 | } | ||
1304 | } | ||
1305 | |||
1306 | static irqreturn_t ironlake_irq_handler(int irq, void *arg) | ||
1206 | { | 1307 | { |
1207 | struct drm_device *dev = (struct drm_device *) arg; | 1308 | struct drm_device *dev = (struct drm_device *) arg; |
1208 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 1309 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
1209 | u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier = 0; | 1310 | u32 de_iir, gt_iir, de_ier, sde_ier = 0; |
1210 | irqreturn_t ret = IRQ_NONE; | 1311 | irqreturn_t ret = IRQ_NONE; |
1211 | int i; | ||
1212 | 1312 | ||
1213 | atomic_inc(&dev_priv->irq_received); | 1313 | atomic_inc(&dev_priv->irq_received); |
1214 | 1314 | ||
1215 | /* We get interrupts on unclaimed registers, so check for this before we | 1315 | /* We get interrupts on unclaimed registers, so check for this before we |
1216 | * do any I915_{READ,WRITE}. */ | 1316 | * do any I915_{READ,WRITE}. */ |
1217 | if (IS_HASWELL(dev) && | 1317 | intel_uncore_check_errors(dev); |
1218 | (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { | ||
1219 | DRM_ERROR("Unclaimed register before interrupt\n"); | ||
1220 | I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM); | ||
1221 | } | ||
1222 | 1318 | ||
1223 | /* disable master interrupt before clearing iir */ | 1319 | /* disable master interrupt before clearing iir */ |
1224 | de_ier = I915_READ(DEIER); | 1320 | de_ier = I915_READ(DEIER); |
1225 | I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); | 1321 | I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); |
1322 | POSTING_READ(DEIER); | ||
1226 | 1323 | ||
1227 | /* Disable south interrupts. We'll only write to SDEIIR once, so further | 1324 | /* Disable south interrupts. We'll only write to SDEIIR once, so further |
1228 | * interrupts will will be stored on its back queue, and then we'll be | 1325 | * interrupts will will be stored on its back queue, and then we'll be |
@@ -1246,53 +1343,34 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg) | |||
1246 | 1343 | ||
1247 | gt_iir = I915_READ(GTIIR); | 1344 | gt_iir = I915_READ(GTIIR); |
1248 | if (gt_iir) { | 1345 | if (gt_iir) { |
1249 | snb_gt_irq_handler(dev, dev_priv, gt_iir); | 1346 | if (INTEL_INFO(dev)->gen >= 6) |
1347 | snb_gt_irq_handler(dev, dev_priv, gt_iir); | ||
1348 | else | ||
1349 | ilk_gt_irq_handler(dev, dev_priv, gt_iir); | ||
1250 | I915_WRITE(GTIIR, gt_iir); | 1350 | I915_WRITE(GTIIR, gt_iir); |
1251 | ret = IRQ_HANDLED; | 1351 | ret = IRQ_HANDLED; |
1252 | } | 1352 | } |
1253 | 1353 | ||
1254 | de_iir = I915_READ(DEIIR); | 1354 | de_iir = I915_READ(DEIIR); |
1255 | if (de_iir) { | 1355 | if (de_iir) { |
1256 | if (de_iir & DE_ERR_INT_IVB) | 1356 | if (INTEL_INFO(dev)->gen >= 7) |
1257 | ivb_err_int_handler(dev); | 1357 | ivb_display_irq_handler(dev, de_iir); |
1258 | 1358 | else | |
1259 | if (de_iir & DE_AUX_CHANNEL_A_IVB) | 1359 | ilk_display_irq_handler(dev, de_iir); |
1260 | dp_aux_irq_handler(dev); | ||
1261 | |||
1262 | if (de_iir & DE_GSE_IVB) | ||
1263 | intel_opregion_asle_intr(dev); | ||
1264 | |||
1265 | for (i = 0; i < 3; i++) { | ||
1266 | if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i))) | ||
1267 | drm_handle_vblank(dev, i); | ||
1268 | if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) { | ||
1269 | intel_prepare_page_flip(dev, i); | ||
1270 | intel_finish_page_flip_plane(dev, i); | ||
1271 | } | ||
1272 | } | ||
1273 | |||
1274 | /* check event from PCH */ | ||
1275 | if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { | ||
1276 | u32 pch_iir = I915_READ(SDEIIR); | ||
1277 | |||
1278 | cpt_irq_handler(dev, pch_iir); | ||
1279 | |||
1280 | /* clear PCH hotplug event before clear CPU irq */ | ||
1281 | I915_WRITE(SDEIIR, pch_iir); | ||
1282 | } | ||
1283 | |||
1284 | I915_WRITE(DEIIR, de_iir); | 1360 | I915_WRITE(DEIIR, de_iir); |
1285 | ret = IRQ_HANDLED; | 1361 | ret = IRQ_HANDLED; |
1286 | } | 1362 | } |
1287 | 1363 | ||
1288 | pm_iir = I915_READ(GEN6_PMIIR); | 1364 | if (INTEL_INFO(dev)->gen >= 6) { |
1289 | if (pm_iir) { | 1365 | u32 pm_iir = I915_READ(GEN6_PMIIR); |
1290 | if (IS_HASWELL(dev)) | 1366 | if (pm_iir) { |
1291 | hsw_pm_irq_handler(dev_priv, pm_iir); | 1367 | if (IS_HASWELL(dev)) |
1292 | else if (pm_iir & GEN6_PM_RPS_EVENTS) | 1368 | hsw_pm_irq_handler(dev_priv, pm_iir); |
1293 | gen6_rps_irq_handler(dev_priv, pm_iir); | 1369 | else if (pm_iir & GEN6_PM_RPS_EVENTS) |
1294 | I915_WRITE(GEN6_PMIIR, pm_iir); | 1370 | gen6_rps_irq_handler(dev_priv, pm_iir); |
1295 | ret = IRQ_HANDLED; | 1371 | I915_WRITE(GEN6_PMIIR, pm_iir); |
1372 | ret = IRQ_HANDLED; | ||
1373 | } | ||
1296 | } | 1374 | } |
1297 | 1375 | ||
1298 | if (IS_HASWELL(dev)) { | 1376 | if (IS_HASWELL(dev)) { |
@@ -1312,119 +1390,6 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg) | |||
1312 | return ret; | 1390 | return ret; |
1313 | } | 1391 | } |
1314 | 1392 | ||
1315 | static void ilk_gt_irq_handler(struct drm_device *dev, | ||
1316 | struct drm_i915_private *dev_priv, | ||
1317 | u32 gt_iir) | ||
1318 | { | ||
1319 | if (gt_iir & | ||
1320 | (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) | ||
1321 | notify_ring(dev, &dev_priv->ring[RCS]); | ||
1322 | if (gt_iir & ILK_BSD_USER_INTERRUPT) | ||
1323 | notify_ring(dev, &dev_priv->ring[VCS]); | ||
1324 | } | ||
1325 | |||
1326 | static irqreturn_t ironlake_irq_handler(int irq, void *arg) | ||
1327 | { | ||
1328 | struct drm_device *dev = (struct drm_device *) arg; | ||
1329 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | ||
1330 | int ret = IRQ_NONE; | ||
1331 | u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier; | ||
1332 | |||
1333 | atomic_inc(&dev_priv->irq_received); | ||
1334 | |||
1335 | /* disable master interrupt before clearing iir */ | ||
1336 | de_ier = I915_READ(DEIER); | ||
1337 | I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); | ||
1338 | POSTING_READ(DEIER); | ||
1339 | |||
1340 | /* Disable south interrupts. We'll only write to SDEIIR once, so further | ||
1341 | * interrupts will will be stored on its back queue, and then we'll be | ||
1342 | * able to process them after we restore SDEIER (as soon as we restore | ||
1343 | * it, we'll get an interrupt if SDEIIR still has something to process | ||
1344 | * due to its back queue). */ | ||
1345 | sde_ier = I915_READ(SDEIER); | ||
1346 | I915_WRITE(SDEIER, 0); | ||
1347 | POSTING_READ(SDEIER); | ||
1348 | |||
1349 | de_iir = I915_READ(DEIIR); | ||
1350 | gt_iir = I915_READ(GTIIR); | ||
1351 | pm_iir = I915_READ(GEN6_PMIIR); | ||
1352 | |||
1353 | if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0)) | ||
1354 | goto done; | ||
1355 | |||
1356 | ret = IRQ_HANDLED; | ||
1357 | |||
1358 | if (IS_GEN5(dev)) | ||
1359 | ilk_gt_irq_handler(dev, dev_priv, gt_iir); | ||
1360 | else | ||
1361 | snb_gt_irq_handler(dev, dev_priv, gt_iir); | ||
1362 | |||
1363 | if (de_iir & DE_AUX_CHANNEL_A) | ||
1364 | dp_aux_irq_handler(dev); | ||
1365 | |||
1366 | if (de_iir & DE_GSE) | ||
1367 | intel_opregion_asle_intr(dev); | ||
1368 | |||
1369 | if (de_iir & DE_PIPEA_VBLANK) | ||
1370 | drm_handle_vblank(dev, 0); | ||
1371 | |||
1372 | if (de_iir & DE_PIPEB_VBLANK) | ||
1373 | drm_handle_vblank(dev, 1); | ||
1374 | |||
1375 | if (de_iir & DE_POISON) | ||
1376 | DRM_ERROR("Poison interrupt\n"); | ||
1377 | |||
1378 | if (de_iir & DE_PIPEA_FIFO_UNDERRUN) | ||
1379 | if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false)) | ||
1380 | DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n"); | ||
1381 | |||
1382 | if (de_iir & DE_PIPEB_FIFO_UNDERRUN) | ||
1383 | if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false)) | ||
1384 | DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n"); | ||
1385 | |||
1386 | if (de_iir & DE_PLANEA_FLIP_DONE) { | ||
1387 | intel_prepare_page_flip(dev, 0); | ||
1388 | intel_finish_page_flip_plane(dev, 0); | ||
1389 | } | ||
1390 | |||
1391 | if (de_iir & DE_PLANEB_FLIP_DONE) { | ||
1392 | intel_prepare_page_flip(dev, 1); | ||
1393 | intel_finish_page_flip_plane(dev, 1); | ||
1394 | } | ||
1395 | |||
1396 | /* check event from PCH */ | ||
1397 | if (de_iir & DE_PCH_EVENT) { | ||
1398 | u32 pch_iir = I915_READ(SDEIIR); | ||
1399 | |||
1400 | if (HAS_PCH_CPT(dev)) | ||
1401 | cpt_irq_handler(dev, pch_iir); | ||
1402 | else | ||
1403 | ibx_irq_handler(dev, pch_iir); | ||
1404 | |||
1405 | /* should clear PCH hotplug event before clear CPU irq */ | ||
1406 | I915_WRITE(SDEIIR, pch_iir); | ||
1407 | } | ||
1408 | |||
1409 | if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) | ||
1410 | ironlake_rps_change_irq_handler(dev); | ||
1411 | |||
1412 | if (IS_GEN6(dev) && pm_iir & GEN6_PM_RPS_EVENTS) | ||
1413 | gen6_rps_irq_handler(dev_priv, pm_iir); | ||
1414 | |||
1415 | I915_WRITE(GTIIR, gt_iir); | ||
1416 | I915_WRITE(DEIIR, de_iir); | ||
1417 | I915_WRITE(GEN6_PMIIR, pm_iir); | ||
1418 | |||
1419 | done: | ||
1420 | I915_WRITE(DEIER, de_ier); | ||
1421 | POSTING_READ(DEIER); | ||
1422 | I915_WRITE(SDEIER, sde_ier); | ||
1423 | POSTING_READ(SDEIER); | ||
1424 | |||
1425 | return ret; | ||
1426 | } | ||
1427 | |||
1428 | /** | 1393 | /** |
1429 | * i915_error_work_func - do process context error handling work | 1394 | * i915_error_work_func - do process context error handling work |
1430 | * @work: work struct | 1395 | * @work: work struct |
@@ -1440,9 +1405,9 @@ static void i915_error_work_func(struct work_struct *work) | |||
1440 | gpu_error); | 1405 | gpu_error); |
1441 | struct drm_device *dev = dev_priv->dev; | 1406 | struct drm_device *dev = dev_priv->dev; |
1442 | struct intel_ring_buffer *ring; | 1407 | struct intel_ring_buffer *ring; |
1443 | char *error_event[] = { "ERROR=1", NULL }; | 1408 | char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; |
1444 | char *reset_event[] = { "RESET=1", NULL }; | 1409 | char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; |
1445 | char *reset_done_event[] = { "ERROR=0", NULL }; | 1410 | char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; |
1446 | int i, ret; | 1411 | int i, ret; |
1447 | 1412 | ||
1448 | kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); | 1413 | kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); |
@@ -1696,29 +1661,14 @@ static int ironlake_enable_vblank(struct drm_device *dev, int pipe) | |||
1696 | { | 1661 | { |
1697 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 1662 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
1698 | unsigned long irqflags; | 1663 | unsigned long irqflags; |
1664 | uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : | ||
1665 | DE_PIPE_VBLANK_ILK(pipe); | ||
1699 | 1666 | ||
1700 | if (!i915_pipe_enabled(dev, pipe)) | 1667 | if (!i915_pipe_enabled(dev, pipe)) |
1701 | return -EINVAL; | 1668 | return -EINVAL; |
1702 | 1669 | ||
1703 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | 1670 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
1704 | ironlake_enable_display_irq(dev_priv, (pipe == 0) ? | 1671 | ironlake_enable_display_irq(dev_priv, bit); |
1705 | DE_PIPEA_VBLANK : DE_PIPEB_VBLANK); | ||
1706 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | ||
1707 | |||
1708 | return 0; | ||
1709 | } | ||
1710 | |||
1711 | static int ivybridge_enable_vblank(struct drm_device *dev, int pipe) | ||
1712 | { | ||
1713 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | ||
1714 | unsigned long irqflags; | ||
1715 | |||
1716 | if (!i915_pipe_enabled(dev, pipe)) | ||
1717 | return -EINVAL; | ||
1718 | |||
1719 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | ||
1720 | ironlake_enable_display_irq(dev_priv, | ||
1721 | DE_PIPEA_VBLANK_IVB << (5 * pipe)); | ||
1722 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | 1672 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
1723 | 1673 | ||
1724 | return 0; | 1674 | return 0; |
@@ -1769,21 +1719,11 @@ static void ironlake_disable_vblank(struct drm_device *dev, int pipe) | |||
1769 | { | 1719 | { |
1770 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 1720 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
1771 | unsigned long irqflags; | 1721 | unsigned long irqflags; |
1722 | uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : | ||
1723 | DE_PIPE_VBLANK_ILK(pipe); | ||
1772 | 1724 | ||
1773 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | 1725 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
1774 | ironlake_disable_display_irq(dev_priv, (pipe == 0) ? | 1726 | ironlake_disable_display_irq(dev_priv, bit); |
1775 | DE_PIPEA_VBLANK : DE_PIPEB_VBLANK); | ||
1776 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | ||
1777 | } | ||
1778 | |||
1779 | static void ivybridge_disable_vblank(struct drm_device *dev, int pipe) | ||
1780 | { | ||
1781 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | ||
1782 | unsigned long irqflags; | ||
1783 | |||
1784 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | ||
1785 | ironlake_disable_display_irq(dev_priv, | ||
1786 | DE_PIPEA_VBLANK_IVB << (pipe * 5)); | ||
1787 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | 1727 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
1788 | } | 1728 | } |
1789 | 1729 | ||
@@ -2030,9 +1970,17 @@ void i915_hangcheck_elapsed(unsigned long data) | |||
2030 | if (busy_count) | 1970 | if (busy_count) |
2031 | /* Reset timer case chip hangs without another request | 1971 | /* Reset timer case chip hangs without another request |
2032 | * being added */ | 1972 | * being added */ |
2033 | mod_timer(&dev_priv->gpu_error.hangcheck_timer, | 1973 | i915_queue_hangcheck(dev); |
2034 | round_jiffies_up(jiffies + | 1974 | } |
2035 | DRM_I915_HANGCHECK_JIFFIES)); | 1975 | |
1976 | void i915_queue_hangcheck(struct drm_device *dev) | ||
1977 | { | ||
1978 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1979 | if (!i915_enable_hangcheck) | ||
1980 | return; | ||
1981 | |||
1982 | mod_timer(&dev_priv->gpu_error.hangcheck_timer, | ||
1983 | round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); | ||
2036 | } | 1984 | } |
2037 | 1985 | ||
2038 | static void ibx_irq_preinstall(struct drm_device *dev) | 1986 | static void ibx_irq_preinstall(struct drm_device *dev) |
@@ -2054,31 +2002,26 @@ static void ibx_irq_preinstall(struct drm_device *dev) | |||
2054 | POSTING_READ(SDEIER); | 2002 | POSTING_READ(SDEIER); |
2055 | } | 2003 | } |
2056 | 2004 | ||
2057 | /* drm_dma.h hooks | 2005 | static void gen5_gt_irq_preinstall(struct drm_device *dev) |
2058 | */ | ||
2059 | static void ironlake_irq_preinstall(struct drm_device *dev) | ||
2060 | { | 2006 | { |
2061 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 2007 | struct drm_i915_private *dev_priv = dev->dev_private; |
2062 | |||
2063 | atomic_set(&dev_priv->irq_received, 0); | ||
2064 | |||
2065 | I915_WRITE(HWSTAM, 0xeffe); | ||
2066 | |||
2067 | /* XXX hotplug from PCH */ | ||
2068 | |||
2069 | I915_WRITE(DEIMR, 0xffffffff); | ||
2070 | I915_WRITE(DEIER, 0x0); | ||
2071 | POSTING_READ(DEIER); | ||
2072 | 2008 | ||
2073 | /* and GT */ | 2009 | /* and GT */ |
2074 | I915_WRITE(GTIMR, 0xffffffff); | 2010 | I915_WRITE(GTIMR, 0xffffffff); |
2075 | I915_WRITE(GTIER, 0x0); | 2011 | I915_WRITE(GTIER, 0x0); |
2076 | POSTING_READ(GTIER); | 2012 | POSTING_READ(GTIER); |
2077 | 2013 | ||
2078 | ibx_irq_preinstall(dev); | 2014 | if (INTEL_INFO(dev)->gen >= 6) { |
2015 | /* and PM */ | ||
2016 | I915_WRITE(GEN6_PMIMR, 0xffffffff); | ||
2017 | I915_WRITE(GEN6_PMIER, 0x0); | ||
2018 | POSTING_READ(GEN6_PMIER); | ||
2019 | } | ||
2079 | } | 2020 | } |
2080 | 2021 | ||
2081 | static void ivybridge_irq_preinstall(struct drm_device *dev) | 2022 | /* drm_dma.h hooks |
2023 | */ | ||
2024 | static void ironlake_irq_preinstall(struct drm_device *dev) | ||
2082 | { | 2025 | { |
2083 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 2026 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
2084 | 2027 | ||
@@ -2086,21 +2029,11 @@ static void ivybridge_irq_preinstall(struct drm_device *dev) | |||
2086 | 2029 | ||
2087 | I915_WRITE(HWSTAM, 0xeffe); | 2030 | I915_WRITE(HWSTAM, 0xeffe); |
2088 | 2031 | ||
2089 | /* XXX hotplug from PCH */ | ||
2090 | |||
2091 | I915_WRITE(DEIMR, 0xffffffff); | 2032 | I915_WRITE(DEIMR, 0xffffffff); |
2092 | I915_WRITE(DEIER, 0x0); | 2033 | I915_WRITE(DEIER, 0x0); |
2093 | POSTING_READ(DEIER); | 2034 | POSTING_READ(DEIER); |
2094 | 2035 | ||
2095 | /* and GT */ | 2036 | gen5_gt_irq_preinstall(dev); |
2096 | I915_WRITE(GTIMR, 0xffffffff); | ||
2097 | I915_WRITE(GTIER, 0x0); | ||
2098 | POSTING_READ(GTIER); | ||
2099 | |||
2100 | /* Power management */ | ||
2101 | I915_WRITE(GEN6_PMIMR, 0xffffffff); | ||
2102 | I915_WRITE(GEN6_PMIER, 0x0); | ||
2103 | POSTING_READ(GEN6_PMIER); | ||
2104 | 2037 | ||
2105 | ibx_irq_preinstall(dev); | 2038 | ibx_irq_preinstall(dev); |
2106 | } | 2039 | } |
@@ -2121,9 +2054,8 @@ static void valleyview_irq_preinstall(struct drm_device *dev) | |||
2121 | /* and GT */ | 2054 | /* and GT */ |
2122 | I915_WRITE(GTIIR, I915_READ(GTIIR)); | 2055 | I915_WRITE(GTIIR, I915_READ(GTIIR)); |
2123 | I915_WRITE(GTIIR, I915_READ(GTIIR)); | 2056 | I915_WRITE(GTIIR, I915_READ(GTIIR)); |
2124 | I915_WRITE(GTIMR, 0xffffffff); | 2057 | |
2125 | I915_WRITE(GTIER, 0x0); | 2058 | gen5_gt_irq_preinstall(dev); |
2126 | POSTING_READ(GTIER); | ||
2127 | 2059 | ||
2128 | I915_WRITE(DPINVGTT, 0xff); | 2060 | I915_WRITE(DPINVGTT, 0xff); |
2129 | 2061 | ||
@@ -2193,120 +2125,99 @@ static void ibx_irq_postinstall(struct drm_device *dev) | |||
2193 | I915_WRITE(SDEIMR, ~mask); | 2125 | I915_WRITE(SDEIMR, ~mask); |
2194 | } | 2126 | } |
2195 | 2127 | ||
2196 | static int ironlake_irq_postinstall(struct drm_device *dev) | 2128 | static void gen5_gt_irq_postinstall(struct drm_device *dev) |
2197 | { | 2129 | { |
2198 | unsigned long irqflags; | 2130 | struct drm_i915_private *dev_priv = dev->dev_private; |
2199 | 2131 | u32 pm_irqs, gt_irqs; | |
2200 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | ||
2201 | /* enable kind of interrupts always enabled */ | ||
2202 | u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | | ||
2203 | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | | ||
2204 | DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN | | ||
2205 | DE_PIPEA_FIFO_UNDERRUN | DE_POISON; | ||
2206 | u32 gt_irqs; | ||
2207 | |||
2208 | dev_priv->irq_mask = ~display_mask; | ||
2209 | 2132 | ||
2210 | /* should always can generate irq */ | 2133 | pm_irqs = gt_irqs = 0; |
2211 | I915_WRITE(DEIIR, I915_READ(DEIIR)); | ||
2212 | I915_WRITE(DEIMR, dev_priv->irq_mask); | ||
2213 | I915_WRITE(DEIER, display_mask | | ||
2214 | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT); | ||
2215 | POSTING_READ(DEIER); | ||
2216 | 2134 | ||
2217 | dev_priv->gt_irq_mask = ~0; | 2135 | dev_priv->gt_irq_mask = ~0; |
2136 | if (HAS_L3_GPU_CACHE(dev)) { | ||
2137 | /* L3 parity interrupt is always unmasked. */ | ||
2138 | dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT; | ||
2139 | gt_irqs |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; | ||
2140 | } | ||
2218 | 2141 | ||
2219 | I915_WRITE(GTIIR, I915_READ(GTIIR)); | 2142 | gt_irqs |= GT_RENDER_USER_INTERRUPT; |
2220 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | 2143 | if (IS_GEN5(dev)) { |
2221 | |||
2222 | gt_irqs = GT_RENDER_USER_INTERRUPT; | ||
2223 | |||
2224 | if (IS_GEN6(dev)) | ||
2225 | gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; | ||
2226 | else | ||
2227 | gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT | | 2144 | gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT | |
2228 | ILK_BSD_USER_INTERRUPT; | 2145 | ILK_BSD_USER_INTERRUPT; |
2146 | } else { | ||
2147 | gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; | ||
2148 | } | ||
2229 | 2149 | ||
2150 | I915_WRITE(GTIIR, I915_READ(GTIIR)); | ||
2151 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | ||
2230 | I915_WRITE(GTIER, gt_irqs); | 2152 | I915_WRITE(GTIER, gt_irqs); |
2231 | POSTING_READ(GTIER); | 2153 | POSTING_READ(GTIER); |
2232 | 2154 | ||
2233 | ibx_irq_postinstall(dev); | 2155 | if (INTEL_INFO(dev)->gen >= 6) { |
2156 | pm_irqs |= GEN6_PM_RPS_EVENTS; | ||
2234 | 2157 | ||
2235 | if (IS_IRONLAKE_M(dev)) { | 2158 | if (HAS_VEBOX(dev)) |
2236 | /* Enable PCU event interrupts | 2159 | pm_irqs |= PM_VEBOX_USER_INTERRUPT; |
2237 | * | ||
2238 | * spinlocking not required here for correctness since interrupt | ||
2239 | * setup is guaranteed to run in single-threaded context. But we | ||
2240 | * need it to make the assert_spin_locked happy. */ | ||
2241 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | ||
2242 | ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); | ||
2243 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | ||
2244 | } | ||
2245 | 2160 | ||
2246 | return 0; | 2161 | I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); |
2162 | I915_WRITE(GEN6_PMIMR, 0xffffffff); | ||
2163 | I915_WRITE(GEN6_PMIER, pm_irqs); | ||
2164 | POSTING_READ(GEN6_PMIER); | ||
2165 | } | ||
2247 | } | 2166 | } |
2248 | 2167 | ||
2249 | static int ivybridge_irq_postinstall(struct drm_device *dev) | 2168 | static int ironlake_irq_postinstall(struct drm_device *dev) |
2250 | { | 2169 | { |
2170 | unsigned long irqflags; | ||
2251 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 2171 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
2252 | /* enable kind of interrupts always enabled */ | 2172 | u32 display_mask, extra_mask; |
2253 | u32 display_mask = | 2173 | |
2254 | DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB | | 2174 | if (INTEL_INFO(dev)->gen >= 7) { |
2255 | DE_PLANEC_FLIP_DONE_IVB | | 2175 | display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | |
2256 | DE_PLANEB_FLIP_DONE_IVB | | 2176 | DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | |
2257 | DE_PLANEA_FLIP_DONE_IVB | | 2177 | DE_PLANEB_FLIP_DONE_IVB | |
2258 | DE_AUX_CHANNEL_A_IVB | | 2178 | DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB | |
2259 | DE_ERR_INT_IVB; | 2179 | DE_ERR_INT_IVB); |
2260 | u32 pm_irqs = GEN6_PM_RPS_EVENTS; | 2180 | extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | |
2261 | u32 gt_irqs; | 2181 | DE_PIPEA_VBLANK_IVB); |
2182 | |||
2183 | I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); | ||
2184 | } else { | ||
2185 | display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | | ||
2186 | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | | ||
2187 | DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN | | ||
2188 | DE_PIPEA_FIFO_UNDERRUN | DE_POISON); | ||
2189 | extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT; | ||
2190 | } | ||
2262 | 2191 | ||
2263 | dev_priv->irq_mask = ~display_mask; | 2192 | dev_priv->irq_mask = ~display_mask; |
2264 | 2193 | ||
2265 | /* should always can generate irq */ | 2194 | /* should always can generate irq */ |
2266 | I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); | ||
2267 | I915_WRITE(DEIIR, I915_READ(DEIIR)); | 2195 | I915_WRITE(DEIIR, I915_READ(DEIIR)); |
2268 | I915_WRITE(DEIMR, dev_priv->irq_mask); | 2196 | I915_WRITE(DEIMR, dev_priv->irq_mask); |
2269 | I915_WRITE(DEIER, | 2197 | I915_WRITE(DEIER, display_mask | extra_mask); |
2270 | display_mask | | ||
2271 | DE_PIPEC_VBLANK_IVB | | ||
2272 | DE_PIPEB_VBLANK_IVB | | ||
2273 | DE_PIPEA_VBLANK_IVB); | ||
2274 | POSTING_READ(DEIER); | 2198 | POSTING_READ(DEIER); |
2275 | 2199 | ||
2276 | dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT; | 2200 | gen5_gt_irq_postinstall(dev); |
2277 | |||
2278 | I915_WRITE(GTIIR, I915_READ(GTIIR)); | ||
2279 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | ||
2280 | |||
2281 | gt_irqs = GT_RENDER_USER_INTERRUPT | GT_BSD_USER_INTERRUPT | | ||
2282 | GT_BLT_USER_INTERRUPT | GT_RENDER_L3_PARITY_ERROR_INTERRUPT; | ||
2283 | I915_WRITE(GTIER, gt_irqs); | ||
2284 | POSTING_READ(GTIER); | ||
2285 | |||
2286 | I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); | ||
2287 | if (HAS_VEBOX(dev)) | ||
2288 | pm_irqs |= PM_VEBOX_USER_INTERRUPT; | ||
2289 | |||
2290 | /* Our enable/disable rps functions may touch these registers so | ||
2291 | * make sure to set a known state for only the non-RPS bits. | ||
2292 | * The RMW is extra paranoia since this should be called after being set | ||
2293 | * to a known state in preinstall. | ||
2294 | * */ | ||
2295 | I915_WRITE(GEN6_PMIMR, | ||
2296 | (I915_READ(GEN6_PMIMR) | ~GEN6_PM_RPS_EVENTS) & ~pm_irqs); | ||
2297 | I915_WRITE(GEN6_PMIER, | ||
2298 | (I915_READ(GEN6_PMIER) & GEN6_PM_RPS_EVENTS) | pm_irqs); | ||
2299 | POSTING_READ(GEN6_PMIER); | ||
2300 | 2201 | ||
2301 | ibx_irq_postinstall(dev); | 2202 | ibx_irq_postinstall(dev); |
2302 | 2203 | ||
2204 | if (IS_IRONLAKE_M(dev)) { | ||
2205 | /* Enable PCU event interrupts | ||
2206 | * | ||
2207 | * spinlocking not required here for correctness since interrupt | ||
2208 | * setup is guaranteed to run in single-threaded context. But we | ||
2209 | * need it to make the assert_spin_locked happy. */ | ||
2210 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | ||
2211 | ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); | ||
2212 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | ||
2213 | } | ||
2214 | |||
2303 | return 0; | 2215 | return 0; |
2304 | } | 2216 | } |
2305 | 2217 | ||
2306 | static int valleyview_irq_postinstall(struct drm_device *dev) | 2218 | static int valleyview_irq_postinstall(struct drm_device *dev) |
2307 | { | 2219 | { |
2308 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 2220 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
2309 | u32 gt_irqs; | ||
2310 | u32 enable_mask; | 2221 | u32 enable_mask; |
2311 | u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV; | 2222 | u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV; |
2312 | unsigned long irqflags; | 2223 | unsigned long irqflags; |
@@ -2346,13 +2257,7 @@ static int valleyview_irq_postinstall(struct drm_device *dev) | |||
2346 | I915_WRITE(VLV_IIR, 0xffffffff); | 2257 | I915_WRITE(VLV_IIR, 0xffffffff); |
2347 | I915_WRITE(VLV_IIR, 0xffffffff); | 2258 | I915_WRITE(VLV_IIR, 0xffffffff); |
2348 | 2259 | ||
2349 | I915_WRITE(GTIIR, I915_READ(GTIIR)); | 2260 | gen5_gt_irq_postinstall(dev); |
2350 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | ||
2351 | |||
2352 | gt_irqs = GT_RENDER_USER_INTERRUPT | GT_BSD_USER_INTERRUPT | | ||
2353 | GT_BLT_USER_INTERRUPT; | ||
2354 | I915_WRITE(GTIER, gt_irqs); | ||
2355 | POSTING_READ(GTIER); | ||
2356 | 2261 | ||
2357 | /* ack & enable invalid PTE error interrupts */ | 2262 | /* ack & enable invalid PTE error interrupts */ |
2358 | #if 0 /* FIXME: add support to irq handler for checking these bits */ | 2263 | #if 0 /* FIXME: add support to irq handler for checking these bits */ |
@@ -3118,15 +3023,6 @@ void intel_irq_init(struct drm_device *dev) | |||
3118 | dev->driver->enable_vblank = valleyview_enable_vblank; | 3023 | dev->driver->enable_vblank = valleyview_enable_vblank; |
3119 | dev->driver->disable_vblank = valleyview_disable_vblank; | 3024 | dev->driver->disable_vblank = valleyview_disable_vblank; |
3120 | dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; | 3025 | dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; |
3121 | } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { | ||
3122 | /* Share uninstall handlers with ILK/SNB */ | ||
3123 | dev->driver->irq_handler = ivybridge_irq_handler; | ||
3124 | dev->driver->irq_preinstall = ivybridge_irq_preinstall; | ||
3125 | dev->driver->irq_postinstall = ivybridge_irq_postinstall; | ||
3126 | dev->driver->irq_uninstall = ironlake_irq_uninstall; | ||
3127 | dev->driver->enable_vblank = ivybridge_enable_vblank; | ||
3128 | dev->driver->disable_vblank = ivybridge_disable_vblank; | ||
3129 | dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; | ||
3130 | } else if (HAS_PCH_SPLIT(dev)) { | 3026 | } else if (HAS_PCH_SPLIT(dev)) { |
3131 | dev->driver->irq_handler = ironlake_irq_handler; | 3027 | dev->driver->irq_handler = ironlake_irq_handler; |
3132 | dev->driver->irq_preinstall = ironlake_irq_preinstall; | 3028 | dev->driver->irq_preinstall = ironlake_irq_preinstall; |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index dc3d6a74f391..3aebe5dee4df 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -61,6 +61,12 @@ | |||
61 | #define GC_LOW_FREQUENCY_ENABLE (1 << 7) | 61 | #define GC_LOW_FREQUENCY_ENABLE (1 << 7) |
62 | #define GC_DISPLAY_CLOCK_190_200_MHZ (0 << 4) | 62 | #define GC_DISPLAY_CLOCK_190_200_MHZ (0 << 4) |
63 | #define GC_DISPLAY_CLOCK_333_MHZ (4 << 4) | 63 | #define GC_DISPLAY_CLOCK_333_MHZ (4 << 4) |
64 | #define GC_DISPLAY_CLOCK_267_MHZ_PNV (0 << 4) | ||
65 | #define GC_DISPLAY_CLOCK_333_MHZ_PNV (1 << 4) | ||
66 | #define GC_DISPLAY_CLOCK_444_MHZ_PNV (2 << 4) | ||
67 | #define GC_DISPLAY_CLOCK_200_MHZ_PNV (5 << 4) | ||
68 | #define GC_DISPLAY_CLOCK_133_MHZ_PNV (6 << 4) | ||
69 | #define GC_DISPLAY_CLOCK_167_MHZ_PNV (7 << 4) | ||
64 | #define GC_DISPLAY_CLOCK_MASK (7 << 4) | 70 | #define GC_DISPLAY_CLOCK_MASK (7 << 4) |
65 | #define GM45_GC_RENDER_CLOCK_MASK (0xf << 0) | 71 | #define GM45_GC_RENDER_CLOCK_MASK (0xf << 0) |
66 | #define GM45_GC_RENDER_CLOCK_266_MHZ (8 << 0) | 72 | #define GM45_GC_RENDER_CLOCK_266_MHZ (8 << 0) |
@@ -1779,6 +1785,71 @@ | |||
1779 | #define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B) | 1785 | #define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B) |
1780 | #define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B) | 1786 | #define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B) |
1781 | 1787 | ||
1788 | /* HSW eDP PSR registers */ | ||
1789 | #define EDP_PSR_CTL 0x64800 | ||
1790 | #define EDP_PSR_ENABLE (1<<31) | ||
1791 | #define EDP_PSR_LINK_DISABLE (0<<27) | ||
1792 | #define EDP_PSR_LINK_STANDBY (1<<27) | ||
1793 | #define EDP_PSR_MIN_LINK_ENTRY_TIME_MASK (3<<25) | ||
1794 | #define EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES (0<<25) | ||
1795 | #define EDP_PSR_MIN_LINK_ENTRY_TIME_4_LINES (1<<25) | ||
1796 | #define EDP_PSR_MIN_LINK_ENTRY_TIME_2_LINES (2<<25) | ||
1797 | #define EDP_PSR_MIN_LINK_ENTRY_TIME_0_LINES (3<<25) | ||
1798 | #define EDP_PSR_MAX_SLEEP_TIME_SHIFT 20 | ||
1799 | #define EDP_PSR_SKIP_AUX_EXIT (1<<12) | ||
1800 | #define EDP_PSR_TP1_TP2_SEL (0<<11) | ||
1801 | #define EDP_PSR_TP1_TP3_SEL (1<<11) | ||
1802 | #define EDP_PSR_TP2_TP3_TIME_500us (0<<8) | ||
1803 | #define EDP_PSR_TP2_TP3_TIME_100us (1<<8) | ||
1804 | #define EDP_PSR_TP2_TP3_TIME_2500us (2<<8) | ||
1805 | #define EDP_PSR_TP2_TP3_TIME_0us (3<<8) | ||
1806 | #define EDP_PSR_TP1_TIME_500us (0<<4) | ||
1807 | #define EDP_PSR_TP1_TIME_100us (1<<4) | ||
1808 | #define EDP_PSR_TP1_TIME_2500us (2<<4) | ||
1809 | #define EDP_PSR_TP1_TIME_0us (3<<4) | ||
1810 | #define EDP_PSR_IDLE_FRAME_SHIFT 0 | ||
1811 | |||
1812 | #define EDP_PSR_AUX_CTL 0x64810 | ||
1813 | #define EDP_PSR_AUX_DATA1 0x64814 | ||
1814 | #define EDP_PSR_DPCD_COMMAND 0x80060000 | ||
1815 | #define EDP_PSR_AUX_DATA2 0x64818 | ||
1816 | #define EDP_PSR_DPCD_NORMAL_OPERATION (1<<24) | ||
1817 | #define EDP_PSR_AUX_DATA3 0x6481c | ||
1818 | #define EDP_PSR_AUX_DATA4 0x64820 | ||
1819 | #define EDP_PSR_AUX_DATA5 0x64824 | ||
1820 | |||
1821 | #define EDP_PSR_STATUS_CTL 0x64840 | ||
1822 | #define EDP_PSR_STATUS_STATE_MASK (7<<29) | ||
1823 | #define EDP_PSR_STATUS_STATE_IDLE (0<<29) | ||
1824 | #define EDP_PSR_STATUS_STATE_SRDONACK (1<<29) | ||
1825 | #define EDP_PSR_STATUS_STATE_SRDENT (2<<29) | ||
1826 | #define EDP_PSR_STATUS_STATE_BUFOFF (3<<29) | ||
1827 | #define EDP_PSR_STATUS_STATE_BUFON (4<<29) | ||
1828 | #define EDP_PSR_STATUS_STATE_AUXACK (5<<29) | ||
1829 | #define EDP_PSR_STATUS_STATE_SRDOFFACK (6<<29) | ||
1830 | #define EDP_PSR_STATUS_LINK_MASK (3<<26) | ||
1831 | #define EDP_PSR_STATUS_LINK_FULL_OFF (0<<26) | ||
1832 | #define EDP_PSR_STATUS_LINK_FULL_ON (1<<26) | ||
1833 | #define EDP_PSR_STATUS_LINK_STANDBY (2<<26) | ||
1834 | #define EDP_PSR_STATUS_MAX_SLEEP_TIMER_SHIFT 20 | ||
1835 | #define EDP_PSR_STATUS_MAX_SLEEP_TIMER_MASK 0x1f | ||
1836 | #define EDP_PSR_STATUS_COUNT_SHIFT 16 | ||
1837 | #define EDP_PSR_STATUS_COUNT_MASK 0xf | ||
1838 | #define EDP_PSR_STATUS_AUX_ERROR (1<<15) | ||
1839 | #define EDP_PSR_STATUS_AUX_SENDING (1<<12) | ||
1840 | #define EDP_PSR_STATUS_SENDING_IDLE (1<<9) | ||
1841 | #define EDP_PSR_STATUS_SENDING_TP2_TP3 (1<<8) | ||
1842 | #define EDP_PSR_STATUS_SENDING_TP1 (1<<4) | ||
1843 | #define EDP_PSR_STATUS_IDLE_MASK 0xf | ||
1844 | |||
1845 | #define EDP_PSR_PERF_CNT 0x64844 | ||
1846 | #define EDP_PSR_PERF_CNT_MASK 0xffffff | ||
1847 | |||
1848 | #define EDP_PSR_DEBUG_CTL 0x64860 | ||
1849 | #define EDP_PSR_DEBUG_MASK_LPSP (1<<27) | ||
1850 | #define EDP_PSR_DEBUG_MASK_MEMUP (1<<26) | ||
1851 | #define EDP_PSR_DEBUG_MASK_HPD (1<<25) | ||
1852 | |||
1782 | /* VGA port control */ | 1853 | /* VGA port control */ |
1783 | #define ADPA 0x61100 | 1854 | #define ADPA 0x61100 |
1784 | #define PCH_ADPA 0xe1100 | 1855 | #define PCH_ADPA 0xe1100 |
@@ -2048,6 +2119,7 @@ | |||
2048 | * (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte | 2119 | * (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte |
2049 | * of the infoframe structure specified by CEA-861. */ | 2120 | * of the infoframe structure specified by CEA-861. */ |
2050 | #define VIDEO_DIP_DATA_SIZE 32 | 2121 | #define VIDEO_DIP_DATA_SIZE 32 |
2122 | #define VIDEO_DIP_VSC_DATA_SIZE 36 | ||
2051 | #define VIDEO_DIP_CTL 0x61170 | 2123 | #define VIDEO_DIP_CTL 0x61170 |
2052 | /* Pre HSW: */ | 2124 | /* Pre HSW: */ |
2053 | #define VIDEO_DIP_ENABLE (1 << 31) | 2125 | #define VIDEO_DIP_ENABLE (1 << 31) |
@@ -2195,6 +2267,8 @@ | |||
2195 | #define BLC_PWM_CPU_CTL2 0x48250 | 2267 | #define BLC_PWM_CPU_CTL2 0x48250 |
2196 | #define BLC_PWM_CPU_CTL 0x48254 | 2268 | #define BLC_PWM_CPU_CTL 0x48254 |
2197 | 2269 | ||
2270 | #define HSW_BLC_PWM2_CTL 0x48350 | ||
2271 | |||
2198 | /* PCH CTL1 is totally different, all but the below bits are reserved. CTL2 is | 2272 | /* PCH CTL1 is totally different, all but the below bits are reserved. CTL2 is |
2199 | * like the normal CTL from gen4 and earlier. Hooray for confusing naming. */ | 2273 | * like the normal CTL from gen4 and earlier. Hooray for confusing naming. */ |
2200 | #define BLC_PWM_PCH_CTL1 0xc8250 | 2274 | #define BLC_PWM_PCH_CTL1 0xc8250 |
@@ -2203,6 +2277,12 @@ | |||
2203 | #define BLM_PCH_POLARITY (1 << 29) | 2277 | #define BLM_PCH_POLARITY (1 << 29) |
2204 | #define BLC_PWM_PCH_CTL2 0xc8254 | 2278 | #define BLC_PWM_PCH_CTL2 0xc8254 |
2205 | 2279 | ||
2280 | #define UTIL_PIN_CTL 0x48400 | ||
2281 | #define UTIL_PIN_ENABLE (1 << 31) | ||
2282 | |||
2283 | #define PCH_GTC_CTL 0xe7000 | ||
2284 | #define PCH_GTC_ENABLE (1 << 31) | ||
2285 | |||
2206 | /* TV port control */ | 2286 | /* TV port control */ |
2207 | #define TV_CTL 0x68000 | 2287 | #define TV_CTL 0x68000 |
2208 | /** Enables the TV encoder */ | 2288 | /** Enables the TV encoder */ |
@@ -3721,6 +3801,9 @@ | |||
3721 | #define DE_PLANEA_FLIP_DONE_IVB (1<<3) | 3801 | #define DE_PLANEA_FLIP_DONE_IVB (1<<3) |
3722 | #define DE_PIPEA_VBLANK_IVB (1<<0) | 3802 | #define DE_PIPEA_VBLANK_IVB (1<<0) |
3723 | 3803 | ||
3804 | #define DE_PIPE_VBLANK_ILK(pipe) (1 << ((pipe * 8) + 7)) | ||
3805 | #define DE_PIPE_VBLANK_IVB(pipe) (1 << (pipe * 5)) | ||
3806 | |||
3724 | #define VLV_MASTER_IER 0x4400c /* Gunit master IER */ | 3807 | #define VLV_MASTER_IER 0x4400c /* Gunit master IER */ |
3725 | #define MASTER_INTERRUPT_ENABLE (1<<31) | 3808 | #define MASTER_INTERRUPT_ENABLE (1<<31) |
3726 | 3809 | ||
@@ -4084,6 +4167,13 @@ | |||
4084 | #define HSW_TVIDEO_DIP_VSC_DATA(trans) \ | 4167 | #define HSW_TVIDEO_DIP_VSC_DATA(trans) \ |
4085 | _TRANSCODER(trans, HSW_VIDEO_DIP_VSC_DATA_A, HSW_VIDEO_DIP_VSC_DATA_B) | 4168 | _TRANSCODER(trans, HSW_VIDEO_DIP_VSC_DATA_A, HSW_VIDEO_DIP_VSC_DATA_B) |
4086 | 4169 | ||
4170 | #define HSW_STEREO_3D_CTL_A 0x70020 | ||
4171 | #define S3D_ENABLE (1<<31) | ||
4172 | #define HSW_STEREO_3D_CTL_B 0x71020 | ||
4173 | |||
4174 | #define HSW_STEREO_3D_CTL(trans) \ | ||
4175 | _TRANSCODER(trans, HSW_STEREO_3D_CTL_A, HSW_STEREO_3D_CTL_A) | ||
4176 | |||
4087 | #define _PCH_TRANS_HTOTAL_B 0xe1000 | 4177 | #define _PCH_TRANS_HTOTAL_B 0xe1000 |
4088 | #define _PCH_TRANS_HBLANK_B 0xe1004 | 4178 | #define _PCH_TRANS_HBLANK_B 0xe1004 |
4089 | #define _PCH_TRANS_HSYNC_B 0xe1008 | 4179 | #define _PCH_TRANS_HSYNC_B 0xe1008 |
@@ -4472,6 +4562,10 @@ | |||
4472 | #define GT_FIFO_FREE_ENTRIES 0x120008 | 4562 | #define GT_FIFO_FREE_ENTRIES 0x120008 |
4473 | #define GT_FIFO_NUM_RESERVED_ENTRIES 20 | 4563 | #define GT_FIFO_NUM_RESERVED_ENTRIES 20 |
4474 | 4564 | ||
4565 | #define HSW_IDICR 0x9008 | ||
4566 | #define IDIHASHMSK(x) (((x) & 0x3f) << 16) | ||
4567 | #define HSW_EDRAM_PRESENT 0x120010 | ||
4568 | |||
4475 | #define GEN6_UCGCTL1 0x9400 | 4569 | #define GEN6_UCGCTL1 0x9400 |
4476 | # define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5) | 4570 | # define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5) |
4477 | # define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7) | 4571 | # define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7) |
@@ -4862,7 +4956,8 @@ | |||
4862 | #define SBI_SSCAUXDIV6 0x0610 | 4956 | #define SBI_SSCAUXDIV6 0x0610 |
4863 | #define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4) | 4957 | #define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4) |
4864 | #define SBI_DBUFF0 0x2a00 | 4958 | #define SBI_DBUFF0 0x2a00 |
4865 | #define SBI_DBUFF0_ENABLE (1<<0) | 4959 | #define SBI_GEN0 0x1f00 |
4960 | #define SBI_GEN0_CFG_BUFFENABLE_DISABLE (1<<0) | ||
4866 | 4961 | ||
4867 | /* LPT PIXCLK_GATE */ | 4962 | /* LPT PIXCLK_GATE */ |
4868 | #define PIXCLK_GATE 0xC6020 | 4963 | #define PIXCLK_GATE 0xC6020 |
@@ -4928,7 +5023,14 @@ | |||
4928 | #define LCPLL_CLK_FREQ_450 (0<<26) | 5023 | #define LCPLL_CLK_FREQ_450 (0<<26) |
4929 | #define LCPLL_CD_CLOCK_DISABLE (1<<25) | 5024 | #define LCPLL_CD_CLOCK_DISABLE (1<<25) |
4930 | #define LCPLL_CD2X_CLOCK_DISABLE (1<<23) | 5025 | #define LCPLL_CD2X_CLOCK_DISABLE (1<<23) |
5026 | #define LCPLL_POWER_DOWN_ALLOW (1<<22) | ||
4931 | #define LCPLL_CD_SOURCE_FCLK (1<<21) | 5027 | #define LCPLL_CD_SOURCE_FCLK (1<<21) |
5028 | #define LCPLL_CD_SOURCE_FCLK_DONE (1<<19) | ||
5029 | |||
5030 | #define D_COMP (MCHBAR_MIRROR_BASE_SNB + 0x5F0C) | ||
5031 | #define D_COMP_RCOMP_IN_PROGRESS (1<<9) | ||
5032 | #define D_COMP_COMP_FORCE (1<<8) | ||
5033 | #define D_COMP_COMP_DISABLE (1<<0) | ||
4932 | 5034 | ||
4933 | /* Pipe WM_LINETIME - watermark line time */ | 5035 | /* Pipe WM_LINETIME - watermark line time */ |
4934 | #define PIPE_WM_LINETIME_A 0x45270 | 5036 | #define PIPE_WM_LINETIME_A 0x45270 |
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index 7d283b5fcbf9..2933e2ffeaa4 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h | |||
@@ -406,10 +406,12 @@ TRACE_EVENT(i915_flip_complete, | |||
406 | TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj) | 406 | TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj) |
407 | ); | 407 | ); |
408 | 408 | ||
409 | TRACE_EVENT(i915_reg_rw, | 409 | TRACE_EVENT_CONDITION(i915_reg_rw, |
410 | TP_PROTO(bool write, u32 reg, u64 val, int len), | 410 | TP_PROTO(bool write, u32 reg, u64 val, int len, bool trace), |
411 | 411 | ||
412 | TP_ARGS(write, reg, val, len), | 412 | TP_ARGS(write, reg, val, len, trace), |
413 | |||
414 | TP_CONDITION(trace), | ||
413 | 415 | ||
414 | TP_STRUCT__entry( | 416 | TP_STRUCT__entry( |
415 | __field(u64, val) | 417 | __field(u64, val) |
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 3acec8c48166..0c0d4e8d768e 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
@@ -613,6 +613,10 @@ intel_crt_detect(struct drm_connector *connector, bool force) | |||
613 | enum drm_connector_status status; | 613 | enum drm_connector_status status; |
614 | struct intel_load_detect_pipe tmp; | 614 | struct intel_load_detect_pipe tmp; |
615 | 615 | ||
616 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n", | ||
617 | connector->base.id, drm_get_connector_name(connector), | ||
618 | force); | ||
619 | |||
616 | if (I915_HAS_HOTPLUG(dev)) { | 620 | if (I915_HAS_HOTPLUG(dev)) { |
617 | /* We can not rely on the HPD pin always being correctly wired | 621 | /* We can not rely on the HPD pin always being correctly wired |
618 | * up, for example many KVM do not pass it through, and so | 622 | * up, for example many KVM do not pass it through, and so |
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 324211ac9c55..931b4bb1f9dc 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c | |||
@@ -301,7 +301,7 @@ static void intel_ddi_mode_set(struct drm_encoder *encoder, | |||
301 | struct intel_digital_port *intel_dig_port = | 301 | struct intel_digital_port *intel_dig_port = |
302 | enc_to_dig_port(encoder); | 302 | enc_to_dig_port(encoder); |
303 | 303 | ||
304 | intel_dp->DP = intel_dig_port->port_reversal | | 304 | intel_dp->DP = intel_dig_port->saved_port_bits | |
305 | DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW; | 305 | DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW; |
306 | intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count); | 306 | intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count); |
307 | 307 | ||
@@ -1109,7 +1109,8 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder) | |||
1109 | * enabling the port. | 1109 | * enabling the port. |
1110 | */ | 1110 | */ |
1111 | I915_WRITE(DDI_BUF_CTL(port), | 1111 | I915_WRITE(DDI_BUF_CTL(port), |
1112 | intel_dig_port->port_reversal | DDI_BUF_CTL_ENABLE); | 1112 | intel_dig_port->saved_port_bits | |
1113 | DDI_BUF_CTL_ENABLE); | ||
1113 | } else if (type == INTEL_OUTPUT_EDP) { | 1114 | } else if (type == INTEL_OUTPUT_EDP) { |
1114 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); | 1115 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
1115 | 1116 | ||
@@ -1117,6 +1118,7 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder) | |||
1117 | intel_dp_stop_link_train(intel_dp); | 1118 | intel_dp_stop_link_train(intel_dp); |
1118 | 1119 | ||
1119 | ironlake_edp_backlight_on(intel_dp); | 1120 | ironlake_edp_backlight_on(intel_dp); |
1121 | intel_edp_psr_enable(intel_dp); | ||
1120 | } | 1122 | } |
1121 | 1123 | ||
1122 | if (intel_crtc->eld_vld && type != INTEL_OUTPUT_EDP) { | 1124 | if (intel_crtc->eld_vld && type != INTEL_OUTPUT_EDP) { |
@@ -1147,6 +1149,7 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder) | |||
1147 | if (type == INTEL_OUTPUT_EDP) { | 1149 | if (type == INTEL_OUTPUT_EDP) { |
1148 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); | 1150 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
1149 | 1151 | ||
1152 | intel_edp_psr_disable(intel_dp); | ||
1150 | ironlake_edp_backlight_off(intel_dp); | 1153 | ironlake_edp_backlight_off(intel_dp); |
1151 | } | 1154 | } |
1152 | } | 1155 | } |
@@ -1347,8 +1350,9 @@ void intel_ddi_init(struct drm_device *dev, enum port port) | |||
1347 | intel_encoder->get_config = intel_ddi_get_config; | 1350 | intel_encoder->get_config = intel_ddi_get_config; |
1348 | 1351 | ||
1349 | intel_dig_port->port = port; | 1352 | intel_dig_port->port = port; |
1350 | intel_dig_port->port_reversal = I915_READ(DDI_BUF_CTL(port)) & | 1353 | intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) & |
1351 | DDI_BUF_PORT_REVERSAL; | 1354 | (DDI_BUF_PORT_REVERSAL | |
1355 | DDI_A_4_LANES); | ||
1352 | intel_dig_port->dp.output_reg = DDI_BUF_CTL(port); | 1356 | intel_dig_port->dp.output_reg = DDI_BUF_CTL(port); |
1353 | 1357 | ||
1354 | intel_encoder->type = INTEL_OUTPUT_UNKNOWN; | 1358 | intel_encoder->type = INTEL_OUTPUT_UNKNOWN; |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index ae3dc5d1ff52..3e66f05ea342 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -2274,6 +2274,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
2274 | } | 2274 | } |
2275 | 2275 | ||
2276 | intel_update_fbc(dev); | 2276 | intel_update_fbc(dev); |
2277 | intel_edp_psr_update(dev); | ||
2277 | mutex_unlock(&dev->struct_mutex); | 2278 | mutex_unlock(&dev->struct_mutex); |
2278 | 2279 | ||
2279 | intel_crtc_update_sarea_pos(crtc, x, y); | 2280 | intel_crtc_update_sarea_pos(crtc, x, y); |
@@ -4162,6 +4163,30 @@ static int i9xx_misc_get_display_clock_speed(struct drm_device *dev) | |||
4162 | return 200000; | 4163 | return 200000; |
4163 | } | 4164 | } |
4164 | 4165 | ||
4166 | static int pnv_get_display_clock_speed(struct drm_device *dev) | ||
4167 | { | ||
4168 | u16 gcfgc = 0; | ||
4169 | |||
4170 | pci_read_config_word(dev->pdev, GCFGC, &gcfgc); | ||
4171 | |||
4172 | switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { | ||
4173 | case GC_DISPLAY_CLOCK_267_MHZ_PNV: | ||
4174 | return 267000; | ||
4175 | case GC_DISPLAY_CLOCK_333_MHZ_PNV: | ||
4176 | return 333000; | ||
4177 | case GC_DISPLAY_CLOCK_444_MHZ_PNV: | ||
4178 | return 444000; | ||
4179 | case GC_DISPLAY_CLOCK_200_MHZ_PNV: | ||
4180 | return 200000; | ||
4181 | default: | ||
4182 | DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc); | ||
4183 | case GC_DISPLAY_CLOCK_133_MHZ_PNV: | ||
4184 | return 133000; | ||
4185 | case GC_DISPLAY_CLOCK_167_MHZ_PNV: | ||
4186 | return 167000; | ||
4187 | } | ||
4188 | } | ||
4189 | |||
4165 | static int i915gm_get_display_clock_speed(struct drm_device *dev) | 4190 | static int i915gm_get_display_clock_speed(struct drm_device *dev) |
4166 | { | 4191 | { |
4167 | u16 gcfgc = 0; | 4192 | u16 gcfgc = 0; |
@@ -4946,22 +4971,19 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc, | |||
4946 | uint32_t tmp; | 4971 | uint32_t tmp; |
4947 | 4972 | ||
4948 | tmp = I915_READ(PFIT_CONTROL); | 4973 | tmp = I915_READ(PFIT_CONTROL); |
4974 | if (!(tmp & PFIT_ENABLE)) | ||
4975 | return; | ||
4949 | 4976 | ||
4977 | /* Check whether the pfit is attached to our pipe. */ | ||
4950 | if (INTEL_INFO(dev)->gen < 4) { | 4978 | if (INTEL_INFO(dev)->gen < 4) { |
4951 | if (crtc->pipe != PIPE_B) | 4979 | if (crtc->pipe != PIPE_B) |
4952 | return; | 4980 | return; |
4953 | |||
4954 | /* gen2/3 store dither state in pfit control, needs to match */ | ||
4955 | pipe_config->gmch_pfit.control = tmp & PANEL_8TO6_DITHER_ENABLE; | ||
4956 | } else { | 4981 | } else { |
4957 | if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT)) | 4982 | if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT)) |
4958 | return; | 4983 | return; |
4959 | } | 4984 | } |
4960 | 4985 | ||
4961 | if (!(tmp & PFIT_ENABLE)) | 4986 | pipe_config->gmch_pfit.control = tmp; |
4962 | return; | ||
4963 | |||
4964 | pipe_config->gmch_pfit.control = I915_READ(PFIT_CONTROL); | ||
4965 | pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS); | 4987 | pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS); |
4966 | if (INTEL_INFO(dev)->gen < 5) | 4988 | if (INTEL_INFO(dev)->gen < 5) |
4967 | pipe_config->gmch_pfit.lvds_border_bits = | 4989 | pipe_config->gmch_pfit.lvds_border_bits = |
@@ -5166,74 +5188,37 @@ static void ironlake_init_pch_refclk(struct drm_device *dev) | |||
5166 | BUG_ON(val != final); | 5188 | BUG_ON(val != final); |
5167 | } | 5189 | } |
5168 | 5190 | ||
5169 | /* Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O. */ | 5191 | static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv) |
5170 | static void lpt_init_pch_refclk(struct drm_device *dev) | ||
5171 | { | 5192 | { |
5172 | struct drm_i915_private *dev_priv = dev->dev_private; | 5193 | uint32_t tmp; |
5173 | struct drm_mode_config *mode_config = &dev->mode_config; | ||
5174 | struct intel_encoder *encoder; | ||
5175 | bool has_vga = false; | ||
5176 | bool is_sdv = false; | ||
5177 | u32 tmp; | ||
5178 | |||
5179 | list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { | ||
5180 | switch (encoder->type) { | ||
5181 | case INTEL_OUTPUT_ANALOG: | ||
5182 | has_vga = true; | ||
5183 | break; | ||
5184 | } | ||
5185 | } | ||
5186 | |||
5187 | if (!has_vga) | ||
5188 | return; | ||
5189 | |||
5190 | mutex_lock(&dev_priv->dpio_lock); | ||
5191 | |||
5192 | /* XXX: Rip out SDV support once Haswell ships for real. */ | ||
5193 | if (IS_HASWELL(dev) && (dev->pci_device & 0xFF00) == 0x0C00) | ||
5194 | is_sdv = true; | ||
5195 | 5194 | ||
5196 | tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); | 5195 | tmp = I915_READ(SOUTH_CHICKEN2); |
5197 | tmp &= ~SBI_SSCCTL_DISABLE; | 5196 | tmp |= FDI_MPHY_IOSFSB_RESET_CTL; |
5198 | tmp |= SBI_SSCCTL_PATHALT; | 5197 | I915_WRITE(SOUTH_CHICKEN2, tmp); |
5199 | intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); | ||
5200 | 5198 | ||
5201 | udelay(24); | 5199 | if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) & |
5200 | FDI_MPHY_IOSFSB_RESET_STATUS, 100)) | ||
5201 | DRM_ERROR("FDI mPHY reset assert timeout\n"); | ||
5202 | 5202 | ||
5203 | tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); | 5203 | tmp = I915_READ(SOUTH_CHICKEN2); |
5204 | tmp &= ~SBI_SSCCTL_PATHALT; | 5204 | tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL; |
5205 | intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); | 5205 | I915_WRITE(SOUTH_CHICKEN2, tmp); |
5206 | 5206 | ||
5207 | if (!is_sdv) { | 5207 | if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) & |
5208 | tmp = I915_READ(SOUTH_CHICKEN2); | 5208 | FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100)) |
5209 | tmp |= FDI_MPHY_IOSFSB_RESET_CTL; | 5209 | DRM_ERROR("FDI mPHY reset de-assert timeout\n"); |
5210 | I915_WRITE(SOUTH_CHICKEN2, tmp); | 5210 | } |
5211 | |||
5212 | if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) & | ||
5213 | FDI_MPHY_IOSFSB_RESET_STATUS, 100)) | ||
5214 | DRM_ERROR("FDI mPHY reset assert timeout\n"); | ||
5215 | |||
5216 | tmp = I915_READ(SOUTH_CHICKEN2); | ||
5217 | tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL; | ||
5218 | I915_WRITE(SOUTH_CHICKEN2, tmp); | ||
5219 | 5211 | ||
5220 | if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) & | 5212 | /* WaMPhyProgramming:hsw */ |
5221 | FDI_MPHY_IOSFSB_RESET_STATUS) == 0, | 5213 | static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv) |
5222 | 100)) | 5214 | { |
5223 | DRM_ERROR("FDI mPHY reset de-assert timeout\n"); | 5215 | uint32_t tmp; |
5224 | } | ||
5225 | 5216 | ||
5226 | tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY); | 5217 | tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY); |
5227 | tmp &= ~(0xFF << 24); | 5218 | tmp &= ~(0xFF << 24); |
5228 | tmp |= (0x12 << 24); | 5219 | tmp |= (0x12 << 24); |
5229 | intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY); | 5220 | intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY); |
5230 | 5221 | ||
5231 | if (is_sdv) { | ||
5232 | tmp = intel_sbi_read(dev_priv, 0x800C, SBI_MPHY); | ||
5233 | tmp |= 0x7FFF; | ||
5234 | intel_sbi_write(dev_priv, 0x800C, tmp, SBI_MPHY); | ||
5235 | } | ||
5236 | |||
5237 | tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY); | 5222 | tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY); |
5238 | tmp |= (1 << 11); | 5223 | tmp |= (1 << 11); |
5239 | intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY); | 5224 | intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY); |
@@ -5242,24 +5227,6 @@ static void lpt_init_pch_refclk(struct drm_device *dev) | |||
5242 | tmp |= (1 << 11); | 5227 | tmp |= (1 << 11); |
5243 | intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY); | 5228 | intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY); |
5244 | 5229 | ||
5245 | if (is_sdv) { | ||
5246 | tmp = intel_sbi_read(dev_priv, 0x2038, SBI_MPHY); | ||
5247 | tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16); | ||
5248 | intel_sbi_write(dev_priv, 0x2038, tmp, SBI_MPHY); | ||
5249 | |||
5250 | tmp = intel_sbi_read(dev_priv, 0x2138, SBI_MPHY); | ||
5251 | tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16); | ||
5252 | intel_sbi_write(dev_priv, 0x2138, tmp, SBI_MPHY); | ||
5253 | |||
5254 | tmp = intel_sbi_read(dev_priv, 0x203C, SBI_MPHY); | ||
5255 | tmp |= (0x3F << 8); | ||
5256 | intel_sbi_write(dev_priv, 0x203C, tmp, SBI_MPHY); | ||
5257 | |||
5258 | tmp = intel_sbi_read(dev_priv, 0x213C, SBI_MPHY); | ||
5259 | tmp |= (0x3F << 8); | ||
5260 | intel_sbi_write(dev_priv, 0x213C, tmp, SBI_MPHY); | ||
5261 | } | ||
5262 | |||
5263 | tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY); | 5230 | tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY); |
5264 | tmp |= (1 << 24) | (1 << 21) | (1 << 18); | 5231 | tmp |= (1 << 24) | (1 << 21) | (1 << 18); |
5265 | intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY); | 5232 | intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY); |
@@ -5268,17 +5235,15 @@ static void lpt_init_pch_refclk(struct drm_device *dev) | |||
5268 | tmp |= (1 << 24) | (1 << 21) | (1 << 18); | 5235 | tmp |= (1 << 24) | (1 << 21) | (1 << 18); |
5269 | intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY); | 5236 | intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY); |
5270 | 5237 | ||
5271 | if (!is_sdv) { | 5238 | tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY); |
5272 | tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY); | 5239 | tmp &= ~(7 << 13); |
5273 | tmp &= ~(7 << 13); | 5240 | tmp |= (5 << 13); |
5274 | tmp |= (5 << 13); | 5241 | intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY); |
5275 | intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY); | ||
5276 | 5242 | ||
5277 | tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY); | 5243 | tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY); |
5278 | tmp &= ~(7 << 13); | 5244 | tmp &= ~(7 << 13); |
5279 | tmp |= (5 << 13); | 5245 | tmp |= (5 << 13); |
5280 | intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY); | 5246 | intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY); |
5281 | } | ||
5282 | 5247 | ||
5283 | tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY); | 5248 | tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY); |
5284 | tmp &= ~0xFF; | 5249 | tmp &= ~0xFF; |
@@ -5300,34 +5265,120 @@ static void lpt_init_pch_refclk(struct drm_device *dev) | |||
5300 | tmp |= (0x1C << 16); | 5265 | tmp |= (0x1C << 16); |
5301 | intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY); | 5266 | intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY); |
5302 | 5267 | ||
5303 | if (!is_sdv) { | 5268 | tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY); |
5304 | tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY); | 5269 | tmp |= (1 << 27); |
5305 | tmp |= (1 << 27); | 5270 | intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY); |
5306 | intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY); | 5271 | |
5272 | tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY); | ||
5273 | tmp |= (1 << 27); | ||
5274 | intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY); | ||
5275 | |||
5276 | tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY); | ||
5277 | tmp &= ~(0xF << 28); | ||
5278 | tmp |= (4 << 28); | ||
5279 | intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY); | ||
5280 | |||
5281 | tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY); | ||
5282 | tmp &= ~(0xF << 28); | ||
5283 | tmp |= (4 << 28); | ||
5284 | intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY); | ||
5285 | } | ||
5286 | |||
5287 | /* Implements 3 different sequences from BSpec chapter "Display iCLK | ||
5288 | * Programming" based on the parameters passed: | ||
5289 | * - Sequence to enable CLKOUT_DP | ||
5290 | * - Sequence to enable CLKOUT_DP without spread | ||
5291 | * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O | ||
5292 | */ | ||
5293 | static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread, | ||
5294 | bool with_fdi) | ||
5295 | { | ||
5296 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
5297 | uint32_t reg, tmp; | ||
5298 | |||
5299 | if (WARN(with_fdi && !with_spread, "FDI requires downspread\n")) | ||
5300 | with_spread = true; | ||
5301 | if (WARN(dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE && | ||
5302 | with_fdi, "LP PCH doesn't have FDI\n")) | ||
5303 | with_fdi = false; | ||
5307 | 5304 | ||
5308 | tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY); | 5305 | mutex_lock(&dev_priv->dpio_lock); |
5309 | tmp |= (1 << 27); | ||
5310 | intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY); | ||
5311 | 5306 | ||
5312 | tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY); | 5307 | tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); |
5313 | tmp &= ~(0xF << 28); | 5308 | tmp &= ~SBI_SSCCTL_DISABLE; |
5314 | tmp |= (4 << 28); | 5309 | tmp |= SBI_SSCCTL_PATHALT; |
5315 | intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY); | 5310 | intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); |
5311 | |||
5312 | udelay(24); | ||
5316 | 5313 | ||
5317 | tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY); | 5314 | if (with_spread) { |
5318 | tmp &= ~(0xF << 28); | 5315 | tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); |
5319 | tmp |= (4 << 28); | 5316 | tmp &= ~SBI_SSCCTL_PATHALT; |
5320 | intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY); | 5317 | intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); |
5318 | |||
5319 | if (with_fdi) { | ||
5320 | lpt_reset_fdi_mphy(dev_priv); | ||
5321 | lpt_program_fdi_mphy(dev_priv); | ||
5322 | } | ||
5321 | } | 5323 | } |
5322 | 5324 | ||
5323 | /* ULT uses SBI_GEN0, but ULT doesn't have VGA, so we don't care. */ | 5325 | reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ? |
5324 | tmp = intel_sbi_read(dev_priv, SBI_DBUFF0, SBI_ICLK); | 5326 | SBI_GEN0 : SBI_DBUFF0; |
5325 | tmp |= SBI_DBUFF0_ENABLE; | 5327 | tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); |
5326 | intel_sbi_write(dev_priv, SBI_DBUFF0, tmp, SBI_ICLK); | 5328 | tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE; |
5329 | intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); | ||
5330 | |||
5331 | mutex_unlock(&dev_priv->dpio_lock); | ||
5332 | } | ||
5333 | |||
5334 | /* Sequence to disable CLKOUT_DP */ | ||
5335 | static void lpt_disable_clkout_dp(struct drm_device *dev) | ||
5336 | { | ||
5337 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
5338 | uint32_t reg, tmp; | ||
5339 | |||
5340 | mutex_lock(&dev_priv->dpio_lock); | ||
5341 | |||
5342 | reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ? | ||
5343 | SBI_GEN0 : SBI_DBUFF0; | ||
5344 | tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); | ||
5345 | tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE; | ||
5346 | intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); | ||
5347 | |||
5348 | tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); | ||
5349 | if (!(tmp & SBI_SSCCTL_DISABLE)) { | ||
5350 | if (!(tmp & SBI_SSCCTL_PATHALT)) { | ||
5351 | tmp |= SBI_SSCCTL_PATHALT; | ||
5352 | intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); | ||
5353 | udelay(32); | ||
5354 | } | ||
5355 | tmp |= SBI_SSCCTL_DISABLE; | ||
5356 | intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); | ||
5357 | } | ||
5327 | 5358 | ||
5328 | mutex_unlock(&dev_priv->dpio_lock); | 5359 | mutex_unlock(&dev_priv->dpio_lock); |
5329 | } | 5360 | } |
5330 | 5361 | ||
5362 | static void lpt_init_pch_refclk(struct drm_device *dev) | ||
5363 | { | ||
5364 | struct drm_mode_config *mode_config = &dev->mode_config; | ||
5365 | struct intel_encoder *encoder; | ||
5366 | bool has_vga = false; | ||
5367 | |||
5368 | list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { | ||
5369 | switch (encoder->type) { | ||
5370 | case INTEL_OUTPUT_ANALOG: | ||
5371 | has_vga = true; | ||
5372 | break; | ||
5373 | } | ||
5374 | } | ||
5375 | |||
5376 | if (has_vga) | ||
5377 | lpt_enable_clkout_dp(dev, true, true); | ||
5378 | else | ||
5379 | lpt_disable_clkout_dp(dev); | ||
5380 | } | ||
5381 | |||
5331 | /* | 5382 | /* |
5332 | * Initialize reference clocks when the driver loads | 5383 | * Initialize reference clocks when the driver loads |
5333 | */ | 5384 | */ |
@@ -5895,6 +5946,142 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc, | |||
5895 | return true; | 5946 | return true; |
5896 | } | 5947 | } |
5897 | 5948 | ||
5949 | static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) | ||
5950 | { | ||
5951 | struct drm_device *dev = dev_priv->dev; | ||
5952 | struct intel_ddi_plls *plls = &dev_priv->ddi_plls; | ||
5953 | struct intel_crtc *crtc; | ||
5954 | unsigned long irqflags; | ||
5955 | uint32_t val, pch_hpd_mask; | ||
5956 | |||
5957 | pch_hpd_mask = SDE_PORTB_HOTPLUG_CPT | SDE_PORTC_HOTPLUG_CPT; | ||
5958 | if (!(dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)) | ||
5959 | pch_hpd_mask |= SDE_PORTD_HOTPLUG_CPT | SDE_CRT_HOTPLUG_CPT; | ||
5960 | |||
5961 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) | ||
5962 | WARN(crtc->base.enabled, "CRTC for pipe %c enabled\n", | ||
5963 | pipe_name(crtc->pipe)); | ||
5964 | |||
5965 | WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n"); | ||
5966 | WARN(plls->spll_refcount, "SPLL enabled\n"); | ||
5967 | WARN(plls->wrpll1_refcount, "WRPLL1 enabled\n"); | ||
5968 | WARN(plls->wrpll2_refcount, "WRPLL2 enabled\n"); | ||
5969 | WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n"); | ||
5970 | WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, | ||
5971 | "CPU PWM1 enabled\n"); | ||
5972 | WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE, | ||
5973 | "CPU PWM2 enabled\n"); | ||
5974 | WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE, | ||
5975 | "PCH PWM1 enabled\n"); | ||
5976 | WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE, | ||
5977 | "Utility pin enabled\n"); | ||
5978 | WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n"); | ||
5979 | |||
5980 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | ||
5981 | val = I915_READ(DEIMR); | ||
5982 | WARN((val & ~DE_PCH_EVENT_IVB) != val, | ||
5983 | "Unexpected DEIMR bits enabled: 0x%x\n", val); | ||
5984 | val = I915_READ(SDEIMR); | ||
5985 | WARN((val & ~pch_hpd_mask) != val, | ||
5986 | "Unexpected SDEIMR bits enabled: 0x%x\n", val); | ||
5987 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | ||
5988 | } | ||
5989 | |||
5990 | /* | ||
5991 | * This function implements pieces of two sequences from BSpec: | ||
5992 | * - Sequence for display software to disable LCPLL | ||
5993 | * - Sequence for display software to allow package C8+ | ||
5994 | * The steps implemented here are just the steps that actually touch the LCPLL | ||
5995 | * register. Callers should take care of disabling all the display engine | ||
5996 | * functions, doing the mode unset, fixing interrupts, etc. | ||
5997 | */ | ||
5998 | void hsw_disable_lcpll(struct drm_i915_private *dev_priv, | ||
5999 | bool switch_to_fclk, bool allow_power_down) | ||
6000 | { | ||
6001 | uint32_t val; | ||
6002 | |||
6003 | assert_can_disable_lcpll(dev_priv); | ||
6004 | |||
6005 | val = I915_READ(LCPLL_CTL); | ||
6006 | |||
6007 | if (switch_to_fclk) { | ||
6008 | val |= LCPLL_CD_SOURCE_FCLK; | ||
6009 | I915_WRITE(LCPLL_CTL, val); | ||
6010 | |||
6011 | if (wait_for_atomic_us(I915_READ(LCPLL_CTL) & | ||
6012 | LCPLL_CD_SOURCE_FCLK_DONE, 1)) | ||
6013 | DRM_ERROR("Switching to FCLK failed\n"); | ||
6014 | |||
6015 | val = I915_READ(LCPLL_CTL); | ||
6016 | } | ||
6017 | |||
6018 | val |= LCPLL_PLL_DISABLE; | ||
6019 | I915_WRITE(LCPLL_CTL, val); | ||
6020 | POSTING_READ(LCPLL_CTL); | ||
6021 | |||
6022 | if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1)) | ||
6023 | DRM_ERROR("LCPLL still locked\n"); | ||
6024 | |||
6025 | val = I915_READ(D_COMP); | ||
6026 | val |= D_COMP_COMP_DISABLE; | ||
6027 | I915_WRITE(D_COMP, val); | ||
6028 | POSTING_READ(D_COMP); | ||
6029 | ndelay(100); | ||
6030 | |||
6031 | if (wait_for((I915_READ(D_COMP) & D_COMP_RCOMP_IN_PROGRESS) == 0, 1)) | ||
6032 | DRM_ERROR("D_COMP RCOMP still in progress\n"); | ||
6033 | |||
6034 | if (allow_power_down) { | ||
6035 | val = I915_READ(LCPLL_CTL); | ||
6036 | val |= LCPLL_POWER_DOWN_ALLOW; | ||
6037 | I915_WRITE(LCPLL_CTL, val); | ||
6038 | POSTING_READ(LCPLL_CTL); | ||
6039 | } | ||
6040 | } | ||
6041 | |||
6042 | /* | ||
6043 | * Fully restores LCPLL, disallowing power down and switching back to LCPLL | ||
6044 | * source. | ||
6045 | */ | ||
6046 | void hsw_restore_lcpll(struct drm_i915_private *dev_priv) | ||
6047 | { | ||
6048 | uint32_t val; | ||
6049 | |||
6050 | val = I915_READ(LCPLL_CTL); | ||
6051 | |||
6052 | if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK | | ||
6053 | LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK) | ||
6054 | return; | ||
6055 | |||
6056 | if (val & LCPLL_POWER_DOWN_ALLOW) { | ||
6057 | val &= ~LCPLL_POWER_DOWN_ALLOW; | ||
6058 | I915_WRITE(LCPLL_CTL, val); | ||
6059 | } | ||
6060 | |||
6061 | val = I915_READ(D_COMP); | ||
6062 | val |= D_COMP_COMP_FORCE; | ||
6063 | val &= ~D_COMP_COMP_DISABLE; | ||
6064 | I915_WRITE(D_COMP, val); | ||
6065 | I915_READ(D_COMP); | ||
6066 | |||
6067 | val = I915_READ(LCPLL_CTL); | ||
6068 | val &= ~LCPLL_PLL_DISABLE; | ||
6069 | I915_WRITE(LCPLL_CTL, val); | ||
6070 | |||
6071 | if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5)) | ||
6072 | DRM_ERROR("LCPLL not locked yet\n"); | ||
6073 | |||
6074 | if (val & LCPLL_CD_SOURCE_FCLK) { | ||
6075 | val = I915_READ(LCPLL_CTL); | ||
6076 | val &= ~LCPLL_CD_SOURCE_FCLK; | ||
6077 | I915_WRITE(LCPLL_CTL, val); | ||
6078 | |||
6079 | if (wait_for_atomic_us((I915_READ(LCPLL_CTL) & | ||
6080 | LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) | ||
6081 | DRM_ERROR("Switching back to LCPLL failed\n"); | ||
6082 | } | ||
6083 | } | ||
6084 | |||
5898 | static void haswell_modeset_global_resources(struct drm_device *dev) | 6085 | static void haswell_modeset_global_resources(struct drm_device *dev) |
5899 | { | 6086 | { |
5900 | bool enable = false; | 6087 | bool enable = false; |
@@ -8434,6 +8621,8 @@ check_shared_dpll_state(struct drm_device *dev) | |||
8434 | pll->active, pll->refcount); | 8621 | pll->active, pll->refcount); |
8435 | WARN(pll->active && !pll->on, | 8622 | WARN(pll->active && !pll->on, |
8436 | "pll in active use but not on in sw tracking\n"); | 8623 | "pll in active use but not on in sw tracking\n"); |
8624 | WARN(pll->on && !pll->active, | ||
8625 | "pll in on but not on in use in sw tracking\n"); | ||
8437 | WARN(pll->on != active, | 8626 | WARN(pll->on != active, |
8438 | "pll on state mismatch (expected %i, found %i)\n", | 8627 | "pll on state mismatch (expected %i, found %i)\n", |
8439 | pll->on, active); | 8628 | pll->on, active); |
@@ -8658,15 +8847,20 @@ static void intel_set_config_restore_state(struct drm_device *dev, | |||
8658 | } | 8847 | } |
8659 | 8848 | ||
8660 | static bool | 8849 | static bool |
8661 | is_crtc_connector_off(struct drm_crtc *crtc, struct drm_connector *connectors, | 8850 | is_crtc_connector_off(struct drm_mode_set *set) |
8662 | int num_connectors) | ||
8663 | { | 8851 | { |
8664 | int i; | 8852 | int i; |
8665 | 8853 | ||
8666 | for (i = 0; i < num_connectors; i++) | 8854 | if (set->num_connectors == 0) |
8667 | if (connectors[i].encoder && | 8855 | return false; |
8668 | connectors[i].encoder->crtc == crtc && | 8856 | |
8669 | connectors[i].dpms != DRM_MODE_DPMS_ON) | 8857 | if (WARN_ON(set->connectors == NULL)) |
8858 | return false; | ||
8859 | |||
8860 | for (i = 0; i < set->num_connectors; i++) | ||
8861 | if (set->connectors[i]->encoder && | ||
8862 | set->connectors[i]->encoder->crtc == set->crtc && | ||
8863 | set->connectors[i]->dpms != DRM_MODE_DPMS_ON) | ||
8670 | return true; | 8864 | return true; |
8671 | 8865 | ||
8672 | return false; | 8866 | return false; |
@@ -8679,10 +8873,8 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set, | |||
8679 | 8873 | ||
8680 | /* We should be able to check here if the fb has the same properties | 8874 | /* We should be able to check here if the fb has the same properties |
8681 | * and then just flip_or_move it */ | 8875 | * and then just flip_or_move it */ |
8682 | if (set->connectors != NULL && | 8876 | if (is_crtc_connector_off(set)) { |
8683 | is_crtc_connector_off(set->crtc, *set->connectors, | 8877 | config->mode_changed = true; |
8684 | set->num_connectors)) { | ||
8685 | config->mode_changed = true; | ||
8686 | } else if (set->crtc->fb != set->fb) { | 8878 | } else if (set->crtc->fb != set->fb) { |
8687 | /* If we have no fb then treat it as a full mode set */ | 8879 | /* If we have no fb then treat it as a full mode set */ |
8688 | if (set->crtc->fb == NULL) { | 8880 | if (set->crtc->fb == NULL) { |
@@ -9437,9 +9629,12 @@ static void intel_init_display(struct drm_device *dev) | |||
9437 | else if (IS_I915G(dev)) | 9629 | else if (IS_I915G(dev)) |
9438 | dev_priv->display.get_display_clock_speed = | 9630 | dev_priv->display.get_display_clock_speed = |
9439 | i915_get_display_clock_speed; | 9631 | i915_get_display_clock_speed; |
9440 | else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev)) | 9632 | else if (IS_I945GM(dev) || IS_845G(dev)) |
9441 | dev_priv->display.get_display_clock_speed = | 9633 | dev_priv->display.get_display_clock_speed = |
9442 | i9xx_misc_get_display_clock_speed; | 9634 | i9xx_misc_get_display_clock_speed; |
9635 | else if (IS_PINEVIEW(dev)) | ||
9636 | dev_priv->display.get_display_clock_speed = | ||
9637 | pnv_get_display_clock_speed; | ||
9443 | else if (IS_I915GM(dev)) | 9638 | else if (IS_I915GM(dev)) |
9444 | dev_priv->display.get_display_clock_speed = | 9639 | dev_priv->display.get_display_clock_speed = |
9445 | i915gm_get_display_clock_speed; | 9640 | i915gm_get_display_clock_speed; |
@@ -9536,6 +9731,17 @@ static void quirk_invert_brightness(struct drm_device *dev) | |||
9536 | DRM_INFO("applying inverted panel brightness quirk\n"); | 9731 | DRM_INFO("applying inverted panel brightness quirk\n"); |
9537 | } | 9732 | } |
9538 | 9733 | ||
9734 | /* | ||
9735 | * Some machines (Dell XPS13) suffer broken backlight controls if | ||
9736 | * BLM_PCH_PWM_ENABLE is set. | ||
9737 | */ | ||
9738 | static void quirk_no_pcm_pwm_enable(struct drm_device *dev) | ||
9739 | { | ||
9740 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
9741 | dev_priv->quirks |= QUIRK_NO_PCH_PWM_ENABLE; | ||
9742 | DRM_INFO("applying no-PCH_PWM_ENABLE quirk\n"); | ||
9743 | } | ||
9744 | |||
9539 | struct intel_quirk { | 9745 | struct intel_quirk { |
9540 | int device; | 9746 | int device; |
9541 | int subsystem_vendor; | 9747 | int subsystem_vendor; |
@@ -9605,6 +9811,11 @@ static struct intel_quirk intel_quirks[] = { | |||
9605 | 9811 | ||
9606 | /* Acer Aspire 4736Z */ | 9812 | /* Acer Aspire 4736Z */ |
9607 | { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness }, | 9813 | { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness }, |
9814 | |||
9815 | /* Dell XPS13 HD Sandy Bridge */ | ||
9816 | { 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable }, | ||
9817 | /* Dell XPS13 HD and XPS13 FHD Ivy Bridge */ | ||
9818 | { 0x0166, 0x1028, 0x058b, quirk_no_pcm_pwm_enable }, | ||
9608 | }; | 9819 | }; |
9609 | 9820 | ||
9610 | static void intel_init_quirks(struct drm_device *dev) | 9821 | static void intel_init_quirks(struct drm_device *dev) |
@@ -9955,8 +10166,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) | |||
9955 | } | 10166 | } |
9956 | pll->refcount = pll->active; | 10167 | pll->refcount = pll->active; |
9957 | 10168 | ||
9958 | DRM_DEBUG_KMS("%s hw state readout: refcount %i\n", | 10169 | DRM_DEBUG_KMS("%s hw state readout: refcount %i, on %i\n", |
9959 | pll->name, pll->refcount); | 10170 | pll->name, pll->refcount, pll->on); |
9960 | } | 10171 | } |
9961 | 10172 | ||
9962 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, | 10173 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, |
@@ -10016,6 +10227,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev, | |||
10016 | struct drm_plane *plane; | 10227 | struct drm_plane *plane; |
10017 | struct intel_crtc *crtc; | 10228 | struct intel_crtc *crtc; |
10018 | struct intel_encoder *encoder; | 10229 | struct intel_encoder *encoder; |
10230 | int i; | ||
10019 | 10231 | ||
10020 | intel_modeset_readout_hw_state(dev); | 10232 | intel_modeset_readout_hw_state(dev); |
10021 | 10233 | ||
@@ -10047,6 +10259,18 @@ void intel_modeset_setup_hw_state(struct drm_device *dev, | |||
10047 | intel_dump_pipe_config(crtc, &crtc->config, "[setup_hw_state]"); | 10259 | intel_dump_pipe_config(crtc, &crtc->config, "[setup_hw_state]"); |
10048 | } | 10260 | } |
10049 | 10261 | ||
10262 | for (i = 0; i < dev_priv->num_shared_dpll; i++) { | ||
10263 | struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; | ||
10264 | |||
10265 | if (!pll->on || pll->active) | ||
10266 | continue; | ||
10267 | |||
10268 | DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name); | ||
10269 | |||
10270 | pll->disable(dev_priv, pll); | ||
10271 | pll->on = false; | ||
10272 | } | ||
10273 | |||
10050 | if (force_restore) { | 10274 | if (force_restore) { |
10051 | /* | 10275 | /* |
10052 | * We need to use raw interfaces for restoring state to avoid | 10276 | * We need to use raw interfaces for restoring state to avoid |
@@ -10256,8 +10480,7 @@ intel_display_capture_error_state(struct drm_device *dev) | |||
10256 | * well was on, so here we have to clear the FPGA_DBG_RM_NOCLAIM bit to | 10480 | * well was on, so here we have to clear the FPGA_DBG_RM_NOCLAIM bit to |
10257 | * prevent the next I915_WRITE from detecting it and printing an error | 10481 | * prevent the next I915_WRITE from detecting it and printing an error |
10258 | * message. */ | 10482 | * message. */ |
10259 | if (HAS_POWER_WELL(dev)) | 10483 | intel_uncore_clear_errors(dev); |
10260 | I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM); | ||
10261 | 10484 | ||
10262 | return error; | 10485 | return error; |
10263 | } | 10486 | } |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 7db2cd76786d..d0c3f9b08387 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -276,29 +276,13 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq) | |||
276 | return status; | 276 | return status; |
277 | } | 277 | } |
278 | 278 | ||
279 | static int | 279 | static uint32_t get_aux_clock_divider(struct intel_dp *intel_dp, |
280 | intel_dp_aux_ch(struct intel_dp *intel_dp, | 280 | int index) |
281 | uint8_t *send, int send_bytes, | ||
282 | uint8_t *recv, int recv_size) | ||
283 | { | 281 | { |
284 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | 282 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
285 | struct drm_device *dev = intel_dig_port->base.base.dev; | 283 | struct drm_device *dev = intel_dig_port->base.base.dev; |
286 | struct drm_i915_private *dev_priv = dev->dev_private; | 284 | struct drm_i915_private *dev_priv = dev->dev_private; |
287 | uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg; | ||
288 | uint32_t ch_data = ch_ctl + 4; | ||
289 | int i, ret, recv_bytes; | ||
290 | uint32_t status; | ||
291 | uint32_t aux_clock_divider; | ||
292 | int try, precharge; | ||
293 | bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev); | ||
294 | 285 | ||
295 | /* dp aux is extremely sensitive to irq latency, hence request the | ||
296 | * lowest possible wakeup latency and so prevent the cpu from going into | ||
297 | * deep sleep states. | ||
298 | */ | ||
299 | pm_qos_update_request(&dev_priv->pm_qos, 0); | ||
300 | |||
301 | intel_dp_check_edp(intel_dp); | ||
302 | /* The clock divider is based off the hrawclk, | 286 | /* The clock divider is based off the hrawclk, |
303 | * and would like to run at 2MHz. So, take the | 287 | * and would like to run at 2MHz. So, take the |
304 | * hrawclk value and divide by 2 and use that | 288 | * hrawclk value and divide by 2 and use that |
@@ -307,23 +291,53 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, | |||
307 | * clock divider. | 291 | * clock divider. |
308 | */ | 292 | */ |
309 | if (IS_VALLEYVIEW(dev)) { | 293 | if (IS_VALLEYVIEW(dev)) { |
310 | aux_clock_divider = 100; | 294 | return index ? 0 : 100; |
311 | } else if (intel_dig_port->port == PORT_A) { | 295 | } else if (intel_dig_port->port == PORT_A) { |
296 | if (index) | ||
297 | return 0; | ||
312 | if (HAS_DDI(dev)) | 298 | if (HAS_DDI(dev)) |
313 | aux_clock_divider = DIV_ROUND_CLOSEST( | 299 | return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000); |
314 | intel_ddi_get_cdclk_freq(dev_priv), 2000); | ||
315 | else if (IS_GEN6(dev) || IS_GEN7(dev)) | 300 | else if (IS_GEN6(dev) || IS_GEN7(dev)) |
316 | aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */ | 301 | return 200; /* SNB & IVB eDP input clock at 400Mhz */ |
317 | else | 302 | else |
318 | aux_clock_divider = 225; /* eDP input clock at 450Mhz */ | 303 | return 225; /* eDP input clock at 450Mhz */ |
319 | } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { | 304 | } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { |
320 | /* Workaround for non-ULT HSW */ | 305 | /* Workaround for non-ULT HSW */ |
321 | aux_clock_divider = 74; | 306 | switch (index) { |
307 | case 0: return 63; | ||
308 | case 1: return 72; | ||
309 | default: return 0; | ||
310 | } | ||
322 | } else if (HAS_PCH_SPLIT(dev)) { | 311 | } else if (HAS_PCH_SPLIT(dev)) { |
323 | aux_clock_divider = DIV_ROUND_UP(intel_pch_rawclk(dev), 2); | 312 | return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2); |
324 | } else { | 313 | } else { |
325 | aux_clock_divider = intel_hrawclk(dev) / 2; | 314 | return index ? 0 :intel_hrawclk(dev) / 2; |
326 | } | 315 | } |
316 | } | ||
317 | |||
318 | static int | ||
319 | intel_dp_aux_ch(struct intel_dp *intel_dp, | ||
320 | uint8_t *send, int send_bytes, | ||
321 | uint8_t *recv, int recv_size) | ||
322 | { | ||
323 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | ||
324 | struct drm_device *dev = intel_dig_port->base.base.dev; | ||
325 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
326 | uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg; | ||
327 | uint32_t ch_data = ch_ctl + 4; | ||
328 | uint32_t aux_clock_divider; | ||
329 | int i, ret, recv_bytes; | ||
330 | uint32_t status; | ||
331 | int try, precharge, clock = 0; | ||
332 | bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev); | ||
333 | |||
334 | /* dp aux is extremely sensitive to irq latency, hence request the | ||
335 | * lowest possible wakeup latency and so prevent the cpu from going into | ||
336 | * deep sleep states. | ||
337 | */ | ||
338 | pm_qos_update_request(&dev_priv->pm_qos, 0); | ||
339 | |||
340 | intel_dp_check_edp(intel_dp); | ||
327 | 341 | ||
328 | if (IS_GEN6(dev)) | 342 | if (IS_GEN6(dev)) |
329 | precharge = 3; | 343 | precharge = 3; |
@@ -345,37 +359,41 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, | |||
345 | goto out; | 359 | goto out; |
346 | } | 360 | } |
347 | 361 | ||
348 | /* Must try at least 3 times according to DP spec */ | 362 | while ((aux_clock_divider = get_aux_clock_divider(intel_dp, clock++))) { |
349 | for (try = 0; try < 5; try++) { | 363 | /* Must try at least 3 times according to DP spec */ |
350 | /* Load the send data into the aux channel data registers */ | 364 | for (try = 0; try < 5; try++) { |
351 | for (i = 0; i < send_bytes; i += 4) | 365 | /* Load the send data into the aux channel data registers */ |
352 | I915_WRITE(ch_data + i, | 366 | for (i = 0; i < send_bytes; i += 4) |
353 | pack_aux(send + i, send_bytes - i)); | 367 | I915_WRITE(ch_data + i, |
354 | 368 | pack_aux(send + i, send_bytes - i)); | |
355 | /* Send the command and wait for it to complete */ | 369 | |
356 | I915_WRITE(ch_ctl, | 370 | /* Send the command and wait for it to complete */ |
357 | DP_AUX_CH_CTL_SEND_BUSY | | 371 | I915_WRITE(ch_ctl, |
358 | (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) | | 372 | DP_AUX_CH_CTL_SEND_BUSY | |
359 | DP_AUX_CH_CTL_TIME_OUT_400us | | 373 | (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) | |
360 | (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | | 374 | DP_AUX_CH_CTL_TIME_OUT_400us | |
361 | (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | | 375 | (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | |
362 | (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) | | 376 | (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | |
363 | DP_AUX_CH_CTL_DONE | | 377 | (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) | |
364 | DP_AUX_CH_CTL_TIME_OUT_ERROR | | 378 | DP_AUX_CH_CTL_DONE | |
365 | DP_AUX_CH_CTL_RECEIVE_ERROR); | 379 | DP_AUX_CH_CTL_TIME_OUT_ERROR | |
366 | 380 | DP_AUX_CH_CTL_RECEIVE_ERROR); | |
367 | status = intel_dp_aux_wait_done(intel_dp, has_aux_irq); | 381 | |
368 | 382 | status = intel_dp_aux_wait_done(intel_dp, has_aux_irq); | |
369 | /* Clear done status and any errors */ | 383 | |
370 | I915_WRITE(ch_ctl, | 384 | /* Clear done status and any errors */ |
371 | status | | 385 | I915_WRITE(ch_ctl, |
372 | DP_AUX_CH_CTL_DONE | | 386 | status | |
373 | DP_AUX_CH_CTL_TIME_OUT_ERROR | | 387 | DP_AUX_CH_CTL_DONE | |
374 | DP_AUX_CH_CTL_RECEIVE_ERROR); | 388 | DP_AUX_CH_CTL_TIME_OUT_ERROR | |
375 | 389 | DP_AUX_CH_CTL_RECEIVE_ERROR); | |
376 | if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR | | 390 | |
377 | DP_AUX_CH_CTL_RECEIVE_ERROR)) | 391 | if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR | |
378 | continue; | 392 | DP_AUX_CH_CTL_RECEIVE_ERROR)) |
393 | continue; | ||
394 | if (status & DP_AUX_CH_CTL_DONE) | ||
395 | break; | ||
396 | } | ||
379 | if (status & DP_AUX_CH_CTL_DONE) | 397 | if (status & DP_AUX_CH_CTL_DONE) |
380 | break; | 398 | break; |
381 | } | 399 | } |
@@ -710,8 +728,11 @@ intel_dp_compute_config(struct intel_encoder *encoder, | |||
710 | /* Walk through all bpp values. Luckily they're all nicely spaced with 2 | 728 | /* Walk through all bpp values. Luckily they're all nicely spaced with 2 |
711 | * bpc in between. */ | 729 | * bpc in between. */ |
712 | bpp = pipe_config->pipe_bpp; | 730 | bpp = pipe_config->pipe_bpp; |
713 | if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp) | 731 | if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp) { |
732 | DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n", | ||
733 | dev_priv->vbt.edp_bpp); | ||
714 | bpp = min_t(int, bpp, dev_priv->vbt.edp_bpp); | 734 | bpp = min_t(int, bpp, dev_priv->vbt.edp_bpp); |
735 | } | ||
715 | 736 | ||
716 | for (; bpp >= 6*3; bpp -= 2*3) { | 737 | for (; bpp >= 6*3; bpp -= 2*3) { |
717 | mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp); | 738 | mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp); |
@@ -1369,6 +1390,268 @@ static void intel_dp_get_config(struct intel_encoder *encoder, | |||
1369 | } | 1390 | } |
1370 | } | 1391 | } |
1371 | 1392 | ||
1393 | static bool is_edp_psr(struct intel_dp *intel_dp) | ||
1394 | { | ||
1395 | return is_edp(intel_dp) && | ||
1396 | intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED; | ||
1397 | } | ||
1398 | |||
1399 | static bool intel_edp_is_psr_enabled(struct drm_device *dev) | ||
1400 | { | ||
1401 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1402 | |||
1403 | if (!IS_HASWELL(dev)) | ||
1404 | return false; | ||
1405 | |||
1406 | return I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE; | ||
1407 | } | ||
1408 | |||
1409 | static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp, | ||
1410 | struct edp_vsc_psr *vsc_psr) | ||
1411 | { | ||
1412 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); | ||
1413 | struct drm_device *dev = dig_port->base.base.dev; | ||
1414 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1415 | struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); | ||
1416 | u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder); | ||
1417 | u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder); | ||
1418 | uint32_t *data = (uint32_t *) vsc_psr; | ||
1419 | unsigned int i; | ||
1420 | |||
1421 | /* As per BSPec (Pipe Video Data Island Packet), we need to disable | ||
1422 | the video DIP being updated before program video DIP data buffer | ||
1423 | registers for DIP being updated. */ | ||
1424 | I915_WRITE(ctl_reg, 0); | ||
1425 | POSTING_READ(ctl_reg); | ||
1426 | |||
1427 | for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) { | ||
1428 | if (i < sizeof(struct edp_vsc_psr)) | ||
1429 | I915_WRITE(data_reg + i, *data++); | ||
1430 | else | ||
1431 | I915_WRITE(data_reg + i, 0); | ||
1432 | } | ||
1433 | |||
1434 | I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW); | ||
1435 | POSTING_READ(ctl_reg); | ||
1436 | } | ||
1437 | |||
1438 | static void intel_edp_psr_setup(struct intel_dp *intel_dp) | ||
1439 | { | ||
1440 | struct drm_device *dev = intel_dp_to_dev(intel_dp); | ||
1441 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1442 | struct edp_vsc_psr psr_vsc; | ||
1443 | |||
1444 | if (intel_dp->psr_setup_done) | ||
1445 | return; | ||
1446 | |||
1447 | /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */ | ||
1448 | memset(&psr_vsc, 0, sizeof(psr_vsc)); | ||
1449 | psr_vsc.sdp_header.HB0 = 0; | ||
1450 | psr_vsc.sdp_header.HB1 = 0x7; | ||
1451 | psr_vsc.sdp_header.HB2 = 0x2; | ||
1452 | psr_vsc.sdp_header.HB3 = 0x8; | ||
1453 | intel_edp_psr_write_vsc(intel_dp, &psr_vsc); | ||
1454 | |||
1455 | /* Avoid continuous PSR exit by masking memup and hpd */ | ||
1456 | I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP | | ||
1457 | EDP_PSR_DEBUG_MASK_HPD); | ||
1458 | |||
1459 | intel_dp->psr_setup_done = true; | ||
1460 | } | ||
1461 | |||
1462 | static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp) | ||
1463 | { | ||
1464 | struct drm_device *dev = intel_dp_to_dev(intel_dp); | ||
1465 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1466 | uint32_t aux_clock_divider = get_aux_clock_divider(intel_dp, 0); | ||
1467 | int precharge = 0x3; | ||
1468 | int msg_size = 5; /* Header(4) + Message(1) */ | ||
1469 | |||
1470 | /* Enable PSR in sink */ | ||
1471 | if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) | ||
1472 | intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG, | ||
1473 | DP_PSR_ENABLE & | ||
1474 | ~DP_PSR_MAIN_LINK_ACTIVE); | ||
1475 | else | ||
1476 | intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG, | ||
1477 | DP_PSR_ENABLE | | ||
1478 | DP_PSR_MAIN_LINK_ACTIVE); | ||
1479 | |||
1480 | /* Setup AUX registers */ | ||
1481 | I915_WRITE(EDP_PSR_AUX_DATA1, EDP_PSR_DPCD_COMMAND); | ||
1482 | I915_WRITE(EDP_PSR_AUX_DATA2, EDP_PSR_DPCD_NORMAL_OPERATION); | ||
1483 | I915_WRITE(EDP_PSR_AUX_CTL, | ||
1484 | DP_AUX_CH_CTL_TIME_OUT_400us | | ||
1485 | (msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | | ||
1486 | (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | | ||
1487 | (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT)); | ||
1488 | } | ||
1489 | |||
1490 | static void intel_edp_psr_enable_source(struct intel_dp *intel_dp) | ||
1491 | { | ||
1492 | struct drm_device *dev = intel_dp_to_dev(intel_dp); | ||
1493 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1494 | uint32_t max_sleep_time = 0x1f; | ||
1495 | uint32_t idle_frames = 1; | ||
1496 | uint32_t val = 0x0; | ||
1497 | |||
1498 | if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) { | ||
1499 | val |= EDP_PSR_LINK_STANDBY; | ||
1500 | val |= EDP_PSR_TP2_TP3_TIME_0us; | ||
1501 | val |= EDP_PSR_TP1_TIME_0us; | ||
1502 | val |= EDP_PSR_SKIP_AUX_EXIT; | ||
1503 | } else | ||
1504 | val |= EDP_PSR_LINK_DISABLE; | ||
1505 | |||
1506 | I915_WRITE(EDP_PSR_CTL, val | | ||
1507 | EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES | | ||
1508 | max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT | | ||
1509 | idle_frames << EDP_PSR_IDLE_FRAME_SHIFT | | ||
1510 | EDP_PSR_ENABLE); | ||
1511 | } | ||
1512 | |||
1513 | static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp) | ||
1514 | { | ||
1515 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); | ||
1516 | struct drm_device *dev = dig_port->base.base.dev; | ||
1517 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1518 | struct drm_crtc *crtc = dig_port->base.base.crtc; | ||
1519 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
1520 | struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->fb)->obj; | ||
1521 | struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base; | ||
1522 | |||
1523 | if (!IS_HASWELL(dev)) { | ||
1524 | DRM_DEBUG_KMS("PSR not supported on this platform\n"); | ||
1525 | dev_priv->no_psr_reason = PSR_NO_SOURCE; | ||
1526 | return false; | ||
1527 | } | ||
1528 | |||
1529 | if ((intel_encoder->type != INTEL_OUTPUT_EDP) || | ||
1530 | (dig_port->port != PORT_A)) { | ||
1531 | DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n"); | ||
1532 | dev_priv->no_psr_reason = PSR_HSW_NOT_DDIA; | ||
1533 | return false; | ||
1534 | } | ||
1535 | |||
1536 | if (!is_edp_psr(intel_dp)) { | ||
1537 | DRM_DEBUG_KMS("PSR not supported by this panel\n"); | ||
1538 | dev_priv->no_psr_reason = PSR_NO_SINK; | ||
1539 | return false; | ||
1540 | } | ||
1541 | |||
1542 | if (!i915_enable_psr) { | ||
1543 | DRM_DEBUG_KMS("PSR disable by flag\n"); | ||
1544 | dev_priv->no_psr_reason = PSR_MODULE_PARAM; | ||
1545 | return false; | ||
1546 | } | ||
1547 | |||
1548 | crtc = dig_port->base.base.crtc; | ||
1549 | if (crtc == NULL) { | ||
1550 | DRM_DEBUG_KMS("crtc not active for PSR\n"); | ||
1551 | dev_priv->no_psr_reason = PSR_CRTC_NOT_ACTIVE; | ||
1552 | return false; | ||
1553 | } | ||
1554 | |||
1555 | intel_crtc = to_intel_crtc(crtc); | ||
1556 | if (!intel_crtc->active || !crtc->fb || !crtc->mode.clock) { | ||
1557 | DRM_DEBUG_KMS("crtc not active for PSR\n"); | ||
1558 | dev_priv->no_psr_reason = PSR_CRTC_NOT_ACTIVE; | ||
1559 | return false; | ||
1560 | } | ||
1561 | |||
1562 | obj = to_intel_framebuffer(crtc->fb)->obj; | ||
1563 | if (obj->tiling_mode != I915_TILING_X || | ||
1564 | obj->fence_reg == I915_FENCE_REG_NONE) { | ||
1565 | DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n"); | ||
1566 | dev_priv->no_psr_reason = PSR_NOT_TILED; | ||
1567 | return false; | ||
1568 | } | ||
1569 | |||
1570 | if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) { | ||
1571 | DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n"); | ||
1572 | dev_priv->no_psr_reason = PSR_SPRITE_ENABLED; | ||
1573 | return false; | ||
1574 | } | ||
1575 | |||
1576 | if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) & | ||
1577 | S3D_ENABLE) { | ||
1578 | DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n"); | ||
1579 | dev_priv->no_psr_reason = PSR_S3D_ENABLED; | ||
1580 | return false; | ||
1581 | } | ||
1582 | |||
1583 | if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) { | ||
1584 | DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n"); | ||
1585 | dev_priv->no_psr_reason = PSR_INTERLACED_ENABLED; | ||
1586 | return false; | ||
1587 | } | ||
1588 | |||
1589 | return true; | ||
1590 | } | ||
1591 | |||
1592 | static void intel_edp_psr_do_enable(struct intel_dp *intel_dp) | ||
1593 | { | ||
1594 | struct drm_device *dev = intel_dp_to_dev(intel_dp); | ||
1595 | |||
1596 | if (!intel_edp_psr_match_conditions(intel_dp) || | ||
1597 | intel_edp_is_psr_enabled(dev)) | ||
1598 | return; | ||
1599 | |||
1600 | /* Setup PSR once */ | ||
1601 | intel_edp_psr_setup(intel_dp); | ||
1602 | |||
1603 | /* Enable PSR on the panel */ | ||
1604 | intel_edp_psr_enable_sink(intel_dp); | ||
1605 | |||
1606 | /* Enable PSR on the host */ | ||
1607 | intel_edp_psr_enable_source(intel_dp); | ||
1608 | } | ||
1609 | |||
1610 | void intel_edp_psr_enable(struct intel_dp *intel_dp) | ||
1611 | { | ||
1612 | struct drm_device *dev = intel_dp_to_dev(intel_dp); | ||
1613 | |||
1614 | if (intel_edp_psr_match_conditions(intel_dp) && | ||
1615 | !intel_edp_is_psr_enabled(dev)) | ||
1616 | intel_edp_psr_do_enable(intel_dp); | ||
1617 | } | ||
1618 | |||
1619 | void intel_edp_psr_disable(struct intel_dp *intel_dp) | ||
1620 | { | ||
1621 | struct drm_device *dev = intel_dp_to_dev(intel_dp); | ||
1622 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1623 | |||
1624 | if (!intel_edp_is_psr_enabled(dev)) | ||
1625 | return; | ||
1626 | |||
1627 | I915_WRITE(EDP_PSR_CTL, I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE); | ||
1628 | |||
1629 | /* Wait till PSR is idle */ | ||
1630 | if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL) & | ||
1631 | EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10)) | ||
1632 | DRM_ERROR("Timed out waiting for PSR Idle State\n"); | ||
1633 | } | ||
1634 | |||
1635 | void intel_edp_psr_update(struct drm_device *dev) | ||
1636 | { | ||
1637 | struct intel_encoder *encoder; | ||
1638 | struct intel_dp *intel_dp = NULL; | ||
1639 | |||
1640 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) | ||
1641 | if (encoder->type == INTEL_OUTPUT_EDP) { | ||
1642 | intel_dp = enc_to_intel_dp(&encoder->base); | ||
1643 | |||
1644 | if (!is_edp_psr(intel_dp)) | ||
1645 | return; | ||
1646 | |||
1647 | if (!intel_edp_psr_match_conditions(intel_dp)) | ||
1648 | intel_edp_psr_disable(intel_dp); | ||
1649 | else | ||
1650 | if (!intel_edp_is_psr_enabled(dev)) | ||
1651 | intel_edp_psr_do_enable(intel_dp); | ||
1652 | } | ||
1653 | } | ||
1654 | |||
1372 | static void intel_disable_dp(struct intel_encoder *encoder) | 1655 | static void intel_disable_dp(struct intel_encoder *encoder) |
1373 | { | 1656 | { |
1374 | struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); | 1657 | struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); |
@@ -2282,6 +2565,13 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) | |||
2282 | if (intel_dp->dpcd[DP_DPCD_REV] == 0) | 2565 | if (intel_dp->dpcd[DP_DPCD_REV] == 0) |
2283 | return false; /* DPCD not present */ | 2566 | return false; /* DPCD not present */ |
2284 | 2567 | ||
2568 | /* Check if the panel supports PSR */ | ||
2569 | memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd)); | ||
2570 | intel_dp_aux_native_read_retry(intel_dp, DP_PSR_SUPPORT, | ||
2571 | intel_dp->psr_dpcd, | ||
2572 | sizeof(intel_dp->psr_dpcd)); | ||
2573 | if (is_edp_psr(intel_dp)) | ||
2574 | DRM_DEBUG_KMS("Detected EDP PSR Panel.\n"); | ||
2285 | if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & | 2575 | if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & |
2286 | DP_DWN_STRM_PORT_PRESENT)) | 2576 | DP_DWN_STRM_PORT_PRESENT)) |
2287 | return true; /* native DP sink */ | 2577 | return true; /* native DP sink */ |
@@ -2549,6 +2839,9 @@ intel_dp_detect(struct drm_connector *connector, bool force) | |||
2549 | enum drm_connector_status status; | 2839 | enum drm_connector_status status; |
2550 | struct edid *edid = NULL; | 2840 | struct edid *edid = NULL; |
2551 | 2841 | ||
2842 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", | ||
2843 | connector->base.id, drm_get_connector_name(connector)); | ||
2844 | |||
2552 | intel_dp->has_audio = false; | 2845 | intel_dp->has_audio = false; |
2553 | 2846 | ||
2554 | if (HAS_PCH_SPLIT(dev)) | 2847 | if (HAS_PCH_SPLIT(dev)) |
@@ -3173,6 +3466,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, | |||
3173 | WARN(error, "intel_dp_i2c_init failed with error %d for port %c\n", | 3466 | WARN(error, "intel_dp_i2c_init failed with error %d for port %c\n", |
3174 | error, port_name(port)); | 3467 | error, port_name(port)); |
3175 | 3468 | ||
3469 | intel_dp->psr_setup_done = false; | ||
3470 | |||
3176 | if (!intel_edp_init_connector(intel_dp, intel_connector)) { | 3471 | if (!intel_edp_init_connector(intel_dp, intel_connector)) { |
3177 | i2c_del_adapter(&intel_dp->adapter); | 3472 | i2c_del_adapter(&intel_dp->adapter); |
3178 | if (is_edp(intel_dp)) { | 3473 | if (is_edp(intel_dp)) { |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 5dfc1a0f2351..d9f50e368fe9 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -487,6 +487,7 @@ struct intel_dp { | |||
487 | uint8_t link_bw; | 487 | uint8_t link_bw; |
488 | uint8_t lane_count; | 488 | uint8_t lane_count; |
489 | uint8_t dpcd[DP_RECEIVER_CAP_SIZE]; | 489 | uint8_t dpcd[DP_RECEIVER_CAP_SIZE]; |
490 | uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE]; | ||
490 | uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; | 491 | uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; |
491 | struct i2c_adapter adapter; | 492 | struct i2c_adapter adapter; |
492 | struct i2c_algo_dp_aux_data algo; | 493 | struct i2c_algo_dp_aux_data algo; |
@@ -498,13 +499,14 @@ struct intel_dp { | |||
498 | int backlight_off_delay; | 499 | int backlight_off_delay; |
499 | struct delayed_work panel_vdd_work; | 500 | struct delayed_work panel_vdd_work; |
500 | bool want_panel_vdd; | 501 | bool want_panel_vdd; |
502 | bool psr_setup_done; | ||
501 | struct intel_connector *attached_connector; | 503 | struct intel_connector *attached_connector; |
502 | }; | 504 | }; |
503 | 505 | ||
504 | struct intel_digital_port { | 506 | struct intel_digital_port { |
505 | struct intel_encoder base; | 507 | struct intel_encoder base; |
506 | enum port port; | 508 | enum port port; |
507 | u32 port_reversal; | 509 | u32 saved_port_bits; |
508 | struct intel_dp dp; | 510 | struct intel_dp dp; |
509 | struct intel_hdmi hdmi; | 511 | struct intel_hdmi hdmi; |
510 | }; | 512 | }; |
@@ -804,7 +806,6 @@ extern void intel_init_power_well(struct drm_device *dev); | |||
804 | extern void intel_set_power_well(struct drm_device *dev, bool enable); | 806 | extern void intel_set_power_well(struct drm_device *dev, bool enable); |
805 | extern void intel_enable_gt_powersave(struct drm_device *dev); | 807 | extern void intel_enable_gt_powersave(struct drm_device *dev); |
806 | extern void intel_disable_gt_powersave(struct drm_device *dev); | 808 | extern void intel_disable_gt_powersave(struct drm_device *dev); |
807 | extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv); | ||
808 | extern void ironlake_teardown_rc6(struct drm_device *dev); | 809 | extern void ironlake_teardown_rc6(struct drm_device *dev); |
809 | 810 | ||
810 | extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder, | 811 | extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder, |
@@ -833,4 +834,11 @@ extern bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev, | |||
833 | enum transcoder pch_transcoder, | 834 | enum transcoder pch_transcoder, |
834 | bool enable); | 835 | bool enable); |
835 | 836 | ||
837 | extern void intel_edp_psr_enable(struct intel_dp *intel_dp); | ||
838 | extern void intel_edp_psr_disable(struct intel_dp *intel_dp); | ||
839 | extern void intel_edp_psr_update(struct drm_device *dev); | ||
840 | extern void hsw_disable_lcpll(struct drm_i915_private *dev_priv, | ||
841 | bool switch_to_fclk, bool allow_power_down); | ||
842 | extern void hsw_restore_lcpll(struct drm_i915_private *dev_priv); | ||
843 | |||
836 | #endif /* __INTEL_DRV_H__ */ | 844 | #endif /* __INTEL_DRV_H__ */ |
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index cbbc49dc03be..8b4ad27791f3 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c | |||
@@ -332,6 +332,8 @@ static enum drm_connector_status | |||
332 | intel_dvo_detect(struct drm_connector *connector, bool force) | 332 | intel_dvo_detect(struct drm_connector *connector, bool force) |
333 | { | 333 | { |
334 | struct intel_dvo *intel_dvo = intel_attached_dvo(connector); | 334 | struct intel_dvo *intel_dvo = intel_attached_dvo(connector); |
335 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", | ||
336 | connector->base.id, drm_get_connector_name(connector)); | ||
335 | return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev); | 337 | return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev); |
336 | } | 338 | } |
337 | 339 | ||
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 98df2a0c85bd..af18da76c04b 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
@@ -866,6 +866,9 @@ intel_hdmi_detect(struct drm_connector *connector, bool force) | |||
866 | struct edid *edid; | 866 | struct edid *edid; |
867 | enum drm_connector_status status = connector_status_disconnected; | 867 | enum drm_connector_status status = connector_status_disconnected; |
868 | 868 | ||
869 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", | ||
870 | connector->base.id, drm_get_connector_name(connector)); | ||
871 | |||
869 | intel_hdmi->has_hdmi_sink = false; | 872 | intel_hdmi->has_hdmi_sink = false; |
870 | intel_hdmi->has_audio = false; | 873 | intel_hdmi->has_audio = false; |
871 | intel_hdmi->rgb_quant_range_selectable = false; | 874 | intel_hdmi->rgb_quant_range_selectable = false; |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index a0745d143902..2110df24454b 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -109,6 +109,13 @@ static void intel_lvds_get_config(struct intel_encoder *encoder, | |||
109 | flags |= DRM_MODE_FLAG_PVSYNC; | 109 | flags |= DRM_MODE_FLAG_PVSYNC; |
110 | 110 | ||
111 | pipe_config->adjusted_mode.flags |= flags; | 111 | pipe_config->adjusted_mode.flags |= flags; |
112 | |||
113 | /* gen2/3 store dither state in pfit control, needs to match */ | ||
114 | if (INTEL_INFO(dev)->gen < 4) { | ||
115 | tmp = I915_READ(PFIT_CONTROL); | ||
116 | |||
117 | pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE; | ||
118 | } | ||
112 | } | 119 | } |
113 | 120 | ||
114 | /* The LVDS pin pair needs to be on before the DPLLs are enabled. | 121 | /* The LVDS pin pair needs to be on before the DPLLs are enabled. |
@@ -297,14 +304,11 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder, | |||
297 | 304 | ||
298 | intel_pch_panel_fitting(intel_crtc, pipe_config, | 305 | intel_pch_panel_fitting(intel_crtc, pipe_config, |
299 | intel_connector->panel.fitting_mode); | 306 | intel_connector->panel.fitting_mode); |
300 | return true; | ||
301 | } else { | 307 | } else { |
302 | intel_gmch_panel_fitting(intel_crtc, pipe_config, | 308 | intel_gmch_panel_fitting(intel_crtc, pipe_config, |
303 | intel_connector->panel.fitting_mode); | 309 | intel_connector->panel.fitting_mode); |
304 | } | ||
305 | 310 | ||
306 | drm_mode_set_crtcinfo(adjusted_mode, 0); | 311 | } |
307 | pipe_config->timings_set = true; | ||
308 | 312 | ||
309 | /* | 313 | /* |
310 | * XXX: It would be nice to support lower refresh rates on the | 314 | * XXX: It would be nice to support lower refresh rates on the |
@@ -339,6 +343,9 @@ intel_lvds_detect(struct drm_connector *connector, bool force) | |||
339 | struct drm_device *dev = connector->dev; | 343 | struct drm_device *dev = connector->dev; |
340 | enum drm_connector_status status; | 344 | enum drm_connector_status status; |
341 | 345 | ||
346 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", | ||
347 | connector->base.id, drm_get_connector_name(connector)); | ||
348 | |||
342 | status = intel_panel_detect(dev); | 349 | status = intel_panel_detect(dev); |
343 | if (status != connector_status_unknown) | 350 | if (status != connector_status_unknown) |
344 | return status; | 351 | return status; |
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 2abb53e6f1e0..9ec5a4e12af2 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c | |||
@@ -1333,7 +1333,9 @@ void intel_setup_overlay(struct drm_device *dev) | |||
1333 | 1333 | ||
1334 | overlay->dev = dev; | 1334 | overlay->dev = dev; |
1335 | 1335 | ||
1336 | reg_bo = i915_gem_object_create_stolen(dev, PAGE_SIZE); | 1336 | reg_bo = NULL; |
1337 | if (!OVERLAY_NEEDS_PHYSICAL(dev)) | ||
1338 | reg_bo = i915_gem_object_create_stolen(dev, PAGE_SIZE); | ||
1337 | if (reg_bo == NULL) | 1339 | if (reg_bo == NULL) |
1338 | reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE); | 1340 | reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE); |
1339 | if (reg_bo == NULL) | 1341 | if (reg_bo == NULL) |
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 80bea1d3209f..67e2c1f1c9a8 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c | |||
@@ -194,6 +194,9 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc, | |||
194 | adjusted_mode->vdisplay == mode->vdisplay) | 194 | adjusted_mode->vdisplay == mode->vdisplay) |
195 | goto out; | 195 | goto out; |
196 | 196 | ||
197 | drm_mode_set_crtcinfo(adjusted_mode, 0); | ||
198 | pipe_config->timings_set = true; | ||
199 | |||
197 | switch (fitting_mode) { | 200 | switch (fitting_mode) { |
198 | case DRM_MODE_SCALE_CENTER: | 201 | case DRM_MODE_SCALE_CENTER: |
199 | /* | 202 | /* |
@@ -580,7 +583,8 @@ void intel_panel_enable_backlight(struct drm_device *dev, | |||
580 | POSTING_READ(reg); | 583 | POSTING_READ(reg); |
581 | I915_WRITE(reg, tmp | BLM_PWM_ENABLE); | 584 | I915_WRITE(reg, tmp | BLM_PWM_ENABLE); |
582 | 585 | ||
583 | if (HAS_PCH_SPLIT(dev)) { | 586 | if (HAS_PCH_SPLIT(dev) && |
587 | !(dev_priv->quirks & QUIRK_NO_PCH_PWM_ENABLE)) { | ||
584 | tmp = I915_READ(BLC_PWM_PCH_CTL1); | 588 | tmp = I915_READ(BLC_PWM_PCH_CTL1); |
585 | tmp |= BLM_PCH_PWM_ENABLE; | 589 | tmp |= BLM_PCH_PWM_ENABLE; |
586 | tmp &= ~BLM_PCH_OVERRIDE_ENABLE; | 590 | tmp &= ~BLM_PCH_OVERRIDE_ENABLE; |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index fb4afaa8036f..0a5ba92a4b12 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
@@ -32,8 +32,6 @@ | |||
32 | #include <linux/module.h> | 32 | #include <linux/module.h> |
33 | #include <drm/i915_powerwell.h> | 33 | #include <drm/i915_powerwell.h> |
34 | 34 | ||
35 | #define FORCEWAKE_ACK_TIMEOUT_MS 2 | ||
36 | |||
37 | /* FBC, or Frame Buffer Compression, is a technique employed to compress the | 35 | /* FBC, or Frame Buffer Compression, is a technique employed to compress the |
38 | * framebuffer contents in-memory, aiming at reducing the required bandwidth | 36 | * framebuffer contents in-memory, aiming at reducing the required bandwidth |
39 | * during in-memory transfers and, therefore, reduce the power packet. | 37 | * during in-memory transfers and, therefore, reduce the power packet. |
@@ -404,6 +402,8 @@ static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | |||
404 | * following the termination of the page-flipping sequence | 402 | * following the termination of the page-flipping sequence |
405 | * and indeed performing the enable as a co-routine and not | 403 | * and indeed performing the enable as a co-routine and not |
406 | * waiting synchronously upon the vblank. | 404 | * waiting synchronously upon the vblank. |
405 | * | ||
406 | * WaFbcWaitForVBlankBeforeEnable:ilk,snb | ||
407 | */ | 407 | */ |
408 | schedule_delayed_work(&work->work, msecs_to_jiffies(50)); | 408 | schedule_delayed_work(&work->work, msecs_to_jiffies(50)); |
409 | } | 409 | } |
@@ -3121,13 +3121,10 @@ void valleyview_set_rps(struct drm_device *dev, u8 val) | |||
3121 | trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv->mem_freq, val)); | 3121 | trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv->mem_freq, val)); |
3122 | } | 3122 | } |
3123 | 3123 | ||
3124 | 3124 | static void gen6_disable_rps_interrupts(struct drm_device *dev) | |
3125 | static void gen6_disable_rps(struct drm_device *dev) | ||
3126 | { | 3125 | { |
3127 | struct drm_i915_private *dev_priv = dev->dev_private; | 3126 | struct drm_i915_private *dev_priv = dev->dev_private; |
3128 | 3127 | ||
3129 | I915_WRITE(GEN6_RC_CONTROL, 0); | ||
3130 | I915_WRITE(GEN6_RPNSWREQ, 1 << 31); | ||
3131 | I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); | 3128 | I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); |
3132 | I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) & ~GEN6_PM_RPS_EVENTS); | 3129 | I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) & ~GEN6_PM_RPS_EVENTS); |
3133 | /* Complete PM interrupt masking here doesn't race with the rps work | 3130 | /* Complete PM interrupt masking here doesn't race with the rps work |
@@ -3142,23 +3139,23 @@ static void gen6_disable_rps(struct drm_device *dev) | |||
3142 | I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS); | 3139 | I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS); |
3143 | } | 3140 | } |
3144 | 3141 | ||
3145 | static void valleyview_disable_rps(struct drm_device *dev) | 3142 | static void gen6_disable_rps(struct drm_device *dev) |
3146 | { | 3143 | { |
3147 | struct drm_i915_private *dev_priv = dev->dev_private; | 3144 | struct drm_i915_private *dev_priv = dev->dev_private; |
3148 | 3145 | ||
3149 | I915_WRITE(GEN6_RC_CONTROL, 0); | 3146 | I915_WRITE(GEN6_RC_CONTROL, 0); |
3150 | I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); | 3147 | I915_WRITE(GEN6_RPNSWREQ, 1 << 31); |
3151 | I915_WRITE(GEN6_PMIER, 0); | ||
3152 | /* Complete PM interrupt masking here doesn't race with the rps work | ||
3153 | * item again unmasking PM interrupts because that is using a different | ||
3154 | * register (PMIMR) to mask PM interrupts. The only risk is in leaving | ||
3155 | * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */ | ||
3156 | 3148 | ||
3157 | spin_lock_irq(&dev_priv->irq_lock); | 3149 | gen6_disable_rps_interrupts(dev); |
3158 | dev_priv->rps.pm_iir = 0; | 3150 | } |
3159 | spin_unlock_irq(&dev_priv->irq_lock); | 3151 | |
3152 | static void valleyview_disable_rps(struct drm_device *dev) | ||
3153 | { | ||
3154 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
3155 | |||
3156 | I915_WRITE(GEN6_RC_CONTROL, 0); | ||
3160 | 3157 | ||
3161 | I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); | 3158 | gen6_disable_rps_interrupts(dev); |
3162 | 3159 | ||
3163 | if (dev_priv->vlv_pctx) { | 3160 | if (dev_priv->vlv_pctx) { |
3164 | drm_gem_object_unreference(&dev_priv->vlv_pctx->base); | 3161 | drm_gem_object_unreference(&dev_priv->vlv_pctx->base); |
@@ -3168,6 +3165,10 @@ static void valleyview_disable_rps(struct drm_device *dev) | |||
3168 | 3165 | ||
3169 | int intel_enable_rc6(const struct drm_device *dev) | 3166 | int intel_enable_rc6(const struct drm_device *dev) |
3170 | { | 3167 | { |
3168 | /* No RC6 before Ironlake */ | ||
3169 | if (INTEL_INFO(dev)->gen < 5) | ||
3170 | return 0; | ||
3171 | |||
3171 | /* Respect the kernel parameter if it is set */ | 3172 | /* Respect the kernel parameter if it is set */ |
3172 | if (i915_enable_rc6 >= 0) | 3173 | if (i915_enable_rc6 >= 0) |
3173 | return i915_enable_rc6; | 3174 | return i915_enable_rc6; |
@@ -3191,6 +3192,19 @@ int intel_enable_rc6(const struct drm_device *dev) | |||
3191 | return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE); | 3192 | return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE); |
3192 | } | 3193 | } |
3193 | 3194 | ||
3195 | static void gen6_enable_rps_interrupts(struct drm_device *dev) | ||
3196 | { | ||
3197 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
3198 | |||
3199 | spin_lock_irq(&dev_priv->irq_lock); | ||
3200 | WARN_ON(dev_priv->rps.pm_iir); | ||
3201 | I915_WRITE(GEN6_PMIMR, I915_READ(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS); | ||
3202 | I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS); | ||
3203 | spin_unlock_irq(&dev_priv->irq_lock); | ||
3204 | /* unmask all PM interrupts */ | ||
3205 | I915_WRITE(GEN6_PMINTRMSK, 0); | ||
3206 | } | ||
3207 | |||
3194 | static void gen6_enable_rps(struct drm_device *dev) | 3208 | static void gen6_enable_rps(struct drm_device *dev) |
3195 | { | 3209 | { |
3196 | struct drm_i915_private *dev_priv = dev->dev_private; | 3210 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -3319,17 +3333,7 @@ static void gen6_enable_rps(struct drm_device *dev) | |||
3319 | 3333 | ||
3320 | gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8); | 3334 | gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8); |
3321 | 3335 | ||
3322 | /* requires MSI enabled */ | 3336 | gen6_enable_rps_interrupts(dev); |
3323 | I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) | GEN6_PM_RPS_EVENTS); | ||
3324 | spin_lock_irq(&dev_priv->irq_lock); | ||
3325 | /* FIXME: Our interrupt enabling sequence is bonghits. | ||
3326 | * dev_priv->rps.pm_iir really should be 0 here. */ | ||
3327 | dev_priv->rps.pm_iir = 0; | ||
3328 | I915_WRITE(GEN6_PMIMR, I915_READ(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS); | ||
3329 | I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS); | ||
3330 | spin_unlock_irq(&dev_priv->irq_lock); | ||
3331 | /* unmask all PM interrupts */ | ||
3332 | I915_WRITE(GEN6_PMINTRMSK, 0); | ||
3333 | 3337 | ||
3334 | rc6vids = 0; | 3338 | rc6vids = 0; |
3335 | ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); | 3339 | ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); |
@@ -3599,14 +3603,7 @@ static void valleyview_enable_rps(struct drm_device *dev) | |||
3599 | 3603 | ||
3600 | valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay); | 3604 | valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay); |
3601 | 3605 | ||
3602 | /* requires MSI enabled */ | 3606 | gen6_enable_rps_interrupts(dev); |
3603 | I915_WRITE(GEN6_PMIER, GEN6_PM_RPS_EVENTS); | ||
3604 | spin_lock_irq(&dev_priv->irq_lock); | ||
3605 | WARN_ON(dev_priv->rps.pm_iir != 0); | ||
3606 | I915_WRITE(GEN6_PMIMR, 0); | ||
3607 | spin_unlock_irq(&dev_priv->irq_lock); | ||
3608 | /* enable all PM interrupts */ | ||
3609 | I915_WRITE(GEN6_PMINTRMSK, 0); | ||
3610 | 3607 | ||
3611 | gen6_gt_force_wake_put(dev_priv); | 3608 | gen6_gt_force_wake_put(dev_priv); |
3612 | } | 3609 | } |
@@ -4421,7 +4418,10 @@ static void ironlake_init_clock_gating(struct drm_device *dev) | |||
4421 | struct drm_i915_private *dev_priv = dev->dev_private; | 4418 | struct drm_i915_private *dev_priv = dev->dev_private; |
4422 | uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; | 4419 | uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; |
4423 | 4420 | ||
4424 | /* Required for FBC */ | 4421 | /* |
4422 | * Required for FBC | ||
4423 | * WaFbcDisableDpfcClockGating:ilk | ||
4424 | */ | ||
4425 | dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE | | 4425 | dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE | |
4426 | ILK_DPFCUNIT_CLOCK_GATE_DISABLE | | 4426 | ILK_DPFCUNIT_CLOCK_GATE_DISABLE | |
4427 | ILK_DPFDUNIT_CLOCK_GATE_ENABLE; | 4427 | ILK_DPFDUNIT_CLOCK_GATE_ENABLE; |
@@ -4458,6 +4458,7 @@ static void ironlake_init_clock_gating(struct drm_device *dev) | |||
4458 | * The bit 7,8,9 of 0x42020. | 4458 | * The bit 7,8,9 of 0x42020. |
4459 | */ | 4459 | */ |
4460 | if (IS_IRONLAKE_M(dev)) { | 4460 | if (IS_IRONLAKE_M(dev)) { |
4461 | /* WaFbcAsynchFlipDisableFbcQueue:ilk */ | ||
4461 | I915_WRITE(ILK_DISPLAY_CHICKEN1, | 4462 | I915_WRITE(ILK_DISPLAY_CHICKEN1, |
4462 | I915_READ(ILK_DISPLAY_CHICKEN1) | | 4463 | I915_READ(ILK_DISPLAY_CHICKEN1) | |
4463 | ILK_FBCQ_DIS); | 4464 | ILK_FBCQ_DIS); |
@@ -4594,6 +4595,8 @@ static void gen6_init_clock_gating(struct drm_device *dev) | |||
4594 | * The bit5 and bit7 of 0x42020 | 4595 | * The bit5 and bit7 of 0x42020 |
4595 | * The bit14 of 0x70180 | 4596 | * The bit14 of 0x70180 |
4596 | * The bit14 of 0x71180 | 4597 | * The bit14 of 0x71180 |
4598 | * | ||
4599 | * WaFbcAsynchFlipDisableFbcQueue:snb | ||
4597 | */ | 4600 | */ |
4598 | I915_WRITE(ILK_DISPLAY_CHICKEN1, | 4601 | I915_WRITE(ILK_DISPLAY_CHICKEN1, |
4599 | I915_READ(ILK_DISPLAY_CHICKEN1) | | 4602 | I915_READ(ILK_DISPLAY_CHICKEN1) | |
@@ -5284,254 +5287,6 @@ void intel_init_pm(struct drm_device *dev) | |||
5284 | } | 5287 | } |
5285 | } | 5288 | } |
5286 | 5289 | ||
5287 | static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv) | ||
5288 | { | ||
5289 | u32 gt_thread_status_mask; | ||
5290 | |||
5291 | if (IS_HASWELL(dev_priv->dev)) | ||
5292 | gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW; | ||
5293 | else | ||
5294 | gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK; | ||
5295 | |||
5296 | /* w/a for a sporadic read returning 0 by waiting for the GT | ||
5297 | * thread to wake up. | ||
5298 | */ | ||
5299 | if (wait_for_atomic_us((I915_READ_NOTRACE(GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500)) | ||
5300 | DRM_ERROR("GT thread status wait timed out\n"); | ||
5301 | } | ||
5302 | |||
5303 | static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv) | ||
5304 | { | ||
5305 | I915_WRITE_NOTRACE(FORCEWAKE, 0); | ||
5306 | POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ | ||
5307 | } | ||
5308 | |||
5309 | static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) | ||
5310 | { | ||
5311 | if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0, | ||
5312 | FORCEWAKE_ACK_TIMEOUT_MS)) | ||
5313 | DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); | ||
5314 | |||
5315 | I915_WRITE_NOTRACE(FORCEWAKE, 1); | ||
5316 | POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ | ||
5317 | |||
5318 | if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1), | ||
5319 | FORCEWAKE_ACK_TIMEOUT_MS)) | ||
5320 | DRM_ERROR("Timed out waiting for forcewake to ack request.\n"); | ||
5321 | |||
5322 | /* WaRsForcewakeWaitTC0:snb */ | ||
5323 | __gen6_gt_wait_for_thread_c0(dev_priv); | ||
5324 | } | ||
5325 | |||
5326 | static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv) | ||
5327 | { | ||
5328 | I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff)); | ||
5329 | /* something from same cacheline, but !FORCEWAKE_MT */ | ||
5330 | POSTING_READ(ECOBUS); | ||
5331 | } | ||
5332 | |||
5333 | static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) | ||
5334 | { | ||
5335 | u32 forcewake_ack; | ||
5336 | |||
5337 | if (IS_HASWELL(dev_priv->dev)) | ||
5338 | forcewake_ack = FORCEWAKE_ACK_HSW; | ||
5339 | else | ||
5340 | forcewake_ack = FORCEWAKE_MT_ACK; | ||
5341 | |||
5342 | if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & FORCEWAKE_KERNEL) == 0, | ||
5343 | FORCEWAKE_ACK_TIMEOUT_MS)) | ||
5344 | DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); | ||
5345 | |||
5346 | I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); | ||
5347 | /* something from same cacheline, but !FORCEWAKE_MT */ | ||
5348 | POSTING_READ(ECOBUS); | ||
5349 | |||
5350 | if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & FORCEWAKE_KERNEL), | ||
5351 | FORCEWAKE_ACK_TIMEOUT_MS)) | ||
5352 | DRM_ERROR("Timed out waiting for forcewake to ack request.\n"); | ||
5353 | |||
5354 | /* WaRsForcewakeWaitTC0:ivb,hsw */ | ||
5355 | __gen6_gt_wait_for_thread_c0(dev_priv); | ||
5356 | } | ||
5357 | |||
5358 | /* | ||
5359 | * Generally this is called implicitly by the register read function. However, | ||
5360 | * if some sequence requires the GT to not power down then this function should | ||
5361 | * be called at the beginning of the sequence followed by a call to | ||
5362 | * gen6_gt_force_wake_put() at the end of the sequence. | ||
5363 | */ | ||
5364 | void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) | ||
5365 | { | ||
5366 | unsigned long irqflags; | ||
5367 | |||
5368 | spin_lock_irqsave(&dev_priv->gt_lock, irqflags); | ||
5369 | if (dev_priv->forcewake_count++ == 0) | ||
5370 | dev_priv->gt.force_wake_get(dev_priv); | ||
5371 | spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); | ||
5372 | } | ||
5373 | |||
5374 | void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv) | ||
5375 | { | ||
5376 | u32 gtfifodbg; | ||
5377 | gtfifodbg = I915_READ_NOTRACE(GTFIFODBG); | ||
5378 | if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK, | ||
5379 | "MMIO read or write has been dropped %x\n", gtfifodbg)) | ||
5380 | I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK); | ||
5381 | } | ||
5382 | |||
5383 | static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) | ||
5384 | { | ||
5385 | I915_WRITE_NOTRACE(FORCEWAKE, 0); | ||
5386 | /* something from same cacheline, but !FORCEWAKE */ | ||
5387 | POSTING_READ(ECOBUS); | ||
5388 | gen6_gt_check_fifodbg(dev_priv); | ||
5389 | } | ||
5390 | |||
5391 | static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv) | ||
5392 | { | ||
5393 | I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); | ||
5394 | /* something from same cacheline, but !FORCEWAKE_MT */ | ||
5395 | POSTING_READ(ECOBUS); | ||
5396 | gen6_gt_check_fifodbg(dev_priv); | ||
5397 | } | ||
5398 | |||
5399 | /* | ||
5400 | * see gen6_gt_force_wake_get() | ||
5401 | */ | ||
5402 | void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) | ||
5403 | { | ||
5404 | unsigned long irqflags; | ||
5405 | |||
5406 | spin_lock_irqsave(&dev_priv->gt_lock, irqflags); | ||
5407 | if (--dev_priv->forcewake_count == 0) | ||
5408 | dev_priv->gt.force_wake_put(dev_priv); | ||
5409 | spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); | ||
5410 | } | ||
5411 | |||
5412 | int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) | ||
5413 | { | ||
5414 | int ret = 0; | ||
5415 | |||
5416 | if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) { | ||
5417 | int loop = 500; | ||
5418 | u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); | ||
5419 | while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) { | ||
5420 | udelay(10); | ||
5421 | fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); | ||
5422 | } | ||
5423 | if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES)) | ||
5424 | ++ret; | ||
5425 | dev_priv->gt_fifo_count = fifo; | ||
5426 | } | ||
5427 | dev_priv->gt_fifo_count--; | ||
5428 | |||
5429 | return ret; | ||
5430 | } | ||
5431 | |||
5432 | static void vlv_force_wake_reset(struct drm_i915_private *dev_priv) | ||
5433 | { | ||
5434 | I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(0xffff)); | ||
5435 | /* something from same cacheline, but !FORCEWAKE_VLV */ | ||
5436 | POSTING_READ(FORCEWAKE_ACK_VLV); | ||
5437 | } | ||
5438 | |||
5439 | static void vlv_force_wake_get(struct drm_i915_private *dev_priv) | ||
5440 | { | ||
5441 | if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL) == 0, | ||
5442 | FORCEWAKE_ACK_TIMEOUT_MS)) | ||
5443 | DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); | ||
5444 | |||
5445 | I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); | ||
5446 | I915_WRITE_NOTRACE(FORCEWAKE_MEDIA_VLV, | ||
5447 | _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); | ||
5448 | |||
5449 | if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL), | ||
5450 | FORCEWAKE_ACK_TIMEOUT_MS)) | ||
5451 | DRM_ERROR("Timed out waiting for GT to ack forcewake request.\n"); | ||
5452 | |||
5453 | if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_MEDIA_VLV) & | ||
5454 | FORCEWAKE_KERNEL), | ||
5455 | FORCEWAKE_ACK_TIMEOUT_MS)) | ||
5456 | DRM_ERROR("Timed out waiting for media to ack forcewake request.\n"); | ||
5457 | |||
5458 | /* WaRsForcewakeWaitTC0:vlv */ | ||
5459 | __gen6_gt_wait_for_thread_c0(dev_priv); | ||
5460 | } | ||
5461 | |||
5462 | static void vlv_force_wake_put(struct drm_i915_private *dev_priv) | ||
5463 | { | ||
5464 | I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); | ||
5465 | I915_WRITE_NOTRACE(FORCEWAKE_MEDIA_VLV, | ||
5466 | _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); | ||
5467 | /* The below doubles as a POSTING_READ */ | ||
5468 | gen6_gt_check_fifodbg(dev_priv); | ||
5469 | } | ||
5470 | |||
5471 | void intel_gt_reset(struct drm_device *dev) | ||
5472 | { | ||
5473 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
5474 | |||
5475 | if (IS_VALLEYVIEW(dev)) { | ||
5476 | vlv_force_wake_reset(dev_priv); | ||
5477 | } else if (INTEL_INFO(dev)->gen >= 6) { | ||
5478 | __gen6_gt_force_wake_reset(dev_priv); | ||
5479 | if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) | ||
5480 | __gen6_gt_force_wake_mt_reset(dev_priv); | ||
5481 | } | ||
5482 | } | ||
5483 | |||
5484 | void intel_gt_init(struct drm_device *dev) | ||
5485 | { | ||
5486 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
5487 | |||
5488 | spin_lock_init(&dev_priv->gt_lock); | ||
5489 | |||
5490 | intel_gt_reset(dev); | ||
5491 | |||
5492 | if (IS_VALLEYVIEW(dev)) { | ||
5493 | dev_priv->gt.force_wake_get = vlv_force_wake_get; | ||
5494 | dev_priv->gt.force_wake_put = vlv_force_wake_put; | ||
5495 | } else if (IS_HASWELL(dev)) { | ||
5496 | dev_priv->gt.force_wake_get = __gen6_gt_force_wake_mt_get; | ||
5497 | dev_priv->gt.force_wake_put = __gen6_gt_force_wake_mt_put; | ||
5498 | } else if (IS_IVYBRIDGE(dev)) { | ||
5499 | u32 ecobus; | ||
5500 | |||
5501 | /* IVB configs may use multi-threaded forcewake */ | ||
5502 | |||
5503 | /* A small trick here - if the bios hasn't configured | ||
5504 | * MT forcewake, and if the device is in RC6, then | ||
5505 | * force_wake_mt_get will not wake the device and the | ||
5506 | * ECOBUS read will return zero. Which will be | ||
5507 | * (correctly) interpreted by the test below as MT | ||
5508 | * forcewake being disabled. | ||
5509 | */ | ||
5510 | mutex_lock(&dev->struct_mutex); | ||
5511 | __gen6_gt_force_wake_mt_get(dev_priv); | ||
5512 | ecobus = I915_READ_NOTRACE(ECOBUS); | ||
5513 | __gen6_gt_force_wake_mt_put(dev_priv); | ||
5514 | mutex_unlock(&dev->struct_mutex); | ||
5515 | |||
5516 | if (ecobus & FORCEWAKE_MT_ENABLE) { | ||
5517 | dev_priv->gt.force_wake_get = | ||
5518 | __gen6_gt_force_wake_mt_get; | ||
5519 | dev_priv->gt.force_wake_put = | ||
5520 | __gen6_gt_force_wake_mt_put; | ||
5521 | } else { | ||
5522 | DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); | ||
5523 | DRM_INFO("when using vblank-synced partial screen updates.\n"); | ||
5524 | dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get; | ||
5525 | dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put; | ||
5526 | } | ||
5527 | } else if (IS_GEN6(dev)) { | ||
5528 | dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get; | ||
5529 | dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put; | ||
5530 | } | ||
5531 | INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work, | ||
5532 | intel_gen6_powersave_work); | ||
5533 | } | ||
5534 | |||
5535 | int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val) | 5290 | int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val) |
5536 | { | 5291 | { |
5537 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); | 5292 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); |
@@ -5634,3 +5389,11 @@ int vlv_freq_opcode(int ddr_freq, int val) | |||
5634 | return val; | 5389 | return val; |
5635 | } | 5390 | } |
5636 | 5391 | ||
5392 | void intel_pm_init(struct drm_device *dev) | ||
5393 | { | ||
5394 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
5395 | |||
5396 | INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work, | ||
5397 | intel_gen6_powersave_work); | ||
5398 | } | ||
5399 | |||
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 798df114cfd3..c3b59b8593b9 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
@@ -1696,6 +1696,9 @@ intel_sdvo_detect(struct drm_connector *connector, bool force) | |||
1696 | struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); | 1696 | struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); |
1697 | enum drm_connector_status ret; | 1697 | enum drm_connector_status ret; |
1698 | 1698 | ||
1699 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", | ||
1700 | connector->base.id, drm_get_connector_name(connector)); | ||
1701 | |||
1699 | if (!intel_sdvo_get_value(intel_sdvo, | 1702 | if (!intel_sdvo_get_value(intel_sdvo, |
1700 | SDVO_CMD_GET_ATTACHED_DISPLAYS, | 1703 | SDVO_CMD_GET_ATTACHED_DISPLAYS, |
1701 | &response, 2)) | 1704 | &response, 2)) |
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 39debd80d190..b0b446f630f7 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c | |||
@@ -1305,6 +1305,10 @@ intel_tv_detect(struct drm_connector *connector, bool force) | |||
1305 | struct intel_tv *intel_tv = intel_attached_tv(connector); | 1305 | struct intel_tv *intel_tv = intel_attached_tv(connector); |
1306 | int type; | 1306 | int type; |
1307 | 1307 | ||
1308 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n", | ||
1309 | connector->base.id, drm_get_connector_name(connector), | ||
1310 | force); | ||
1311 | |||
1308 | mode = reported_modes[0]; | 1312 | mode = reported_modes[0]; |
1309 | 1313 | ||
1310 | if (force) { | 1314 | if (force) { |
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c new file mode 100644 index 000000000000..8f5bc869c023 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_uncore.c | |||
@@ -0,0 +1,595 @@ | |||
1 | /* | ||
2 | * Copyright © 2013 Intel Corporation | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
21 | * IN THE SOFTWARE. | ||
22 | */ | ||
23 | |||
24 | #include "i915_drv.h" | ||
25 | #include "intel_drv.h" | ||
26 | |||
27 | #define FORCEWAKE_ACK_TIMEOUT_MS 2 | ||
28 | |||
29 | #define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__)) | ||
30 | #define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__)) | ||
31 | |||
32 | #define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__)) | ||
33 | #define __raw_i915_write16(dev_priv__, reg__, val__) writew(val__, (dev_priv__)->regs + (reg__)) | ||
34 | |||
35 | #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__)) | ||
36 | #define __raw_i915_write32(dev_priv__, reg__, val__) writel(val__, (dev_priv__)->regs + (reg__)) | ||
37 | |||
38 | #define __raw_i915_read64(dev_priv__, reg__) readq((dev_priv__)->regs + (reg__)) | ||
39 | #define __raw_i915_write64(dev_priv__, reg__, val__) writeq(val__, (dev_priv__)->regs + (reg__)) | ||
40 | |||
41 | #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__) | ||
42 | |||
43 | |||
44 | static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv) | ||
45 | { | ||
46 | u32 gt_thread_status_mask; | ||
47 | |||
48 | if (IS_HASWELL(dev_priv->dev)) | ||
49 | gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW; | ||
50 | else | ||
51 | gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK; | ||
52 | |||
53 | /* w/a for a sporadic read returning 0 by waiting for the GT | ||
54 | * thread to wake up. | ||
55 | */ | ||
56 | if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500)) | ||
57 | DRM_ERROR("GT thread status wait timed out\n"); | ||
58 | } | ||
59 | |||
60 | static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv) | ||
61 | { | ||
62 | __raw_i915_write32(dev_priv, FORCEWAKE, 0); | ||
63 | /* something from same cacheline, but !FORCEWAKE */ | ||
64 | __raw_posting_read(dev_priv, ECOBUS); | ||
65 | } | ||
66 | |||
67 | static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) | ||
68 | { | ||
69 | if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1) == 0, | ||
70 | FORCEWAKE_ACK_TIMEOUT_MS)) | ||
71 | DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); | ||
72 | |||
73 | __raw_i915_write32(dev_priv, FORCEWAKE, 1); | ||
74 | /* something from same cacheline, but !FORCEWAKE */ | ||
75 | __raw_posting_read(dev_priv, ECOBUS); | ||
76 | |||
77 | if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1), | ||
78 | FORCEWAKE_ACK_TIMEOUT_MS)) | ||
79 | DRM_ERROR("Timed out waiting for forcewake to ack request.\n"); | ||
80 | |||
81 | /* WaRsForcewakeWaitTC0:snb */ | ||
82 | __gen6_gt_wait_for_thread_c0(dev_priv); | ||
83 | } | ||
84 | |||
85 | static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv) | ||
86 | { | ||
87 | __raw_i915_write32(dev_priv, FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff)); | ||
88 | /* something from same cacheline, but !FORCEWAKE_MT */ | ||
89 | __raw_posting_read(dev_priv, ECOBUS); | ||
90 | } | ||
91 | |||
92 | static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) | ||
93 | { | ||
94 | u32 forcewake_ack; | ||
95 | |||
96 | if (IS_HASWELL(dev_priv->dev)) | ||
97 | forcewake_ack = FORCEWAKE_ACK_HSW; | ||
98 | else | ||
99 | forcewake_ack = FORCEWAKE_MT_ACK; | ||
100 | |||
101 | if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL) == 0, | ||
102 | FORCEWAKE_ACK_TIMEOUT_MS)) | ||
103 | DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); | ||
104 | |||
105 | __raw_i915_write32(dev_priv, FORCEWAKE_MT, | ||
106 | _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); | ||
107 | /* something from same cacheline, but !FORCEWAKE_MT */ | ||
108 | __raw_posting_read(dev_priv, ECOBUS); | ||
109 | |||
110 | if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL), | ||
111 | FORCEWAKE_ACK_TIMEOUT_MS)) | ||
112 | DRM_ERROR("Timed out waiting for forcewake to ack request.\n"); | ||
113 | |||
114 | /* WaRsForcewakeWaitTC0:ivb,hsw */ | ||
115 | __gen6_gt_wait_for_thread_c0(dev_priv); | ||
116 | } | ||
117 | |||
118 | static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv) | ||
119 | { | ||
120 | u32 gtfifodbg; | ||
121 | |||
122 | gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG); | ||
123 | if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK, | ||
124 | "MMIO read or write has been dropped %x\n", gtfifodbg)) | ||
125 | __raw_i915_write32(dev_priv, GTFIFODBG, GT_FIFO_CPU_ERROR_MASK); | ||
126 | } | ||
127 | |||
128 | static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) | ||
129 | { | ||
130 | __raw_i915_write32(dev_priv, FORCEWAKE, 0); | ||
131 | /* something from same cacheline, but !FORCEWAKE */ | ||
132 | __raw_posting_read(dev_priv, ECOBUS); | ||
133 | gen6_gt_check_fifodbg(dev_priv); | ||
134 | } | ||
135 | |||
136 | static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv) | ||
137 | { | ||
138 | __raw_i915_write32(dev_priv, FORCEWAKE_MT, | ||
139 | _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); | ||
140 | /* something from same cacheline, but !FORCEWAKE_MT */ | ||
141 | __raw_posting_read(dev_priv, ECOBUS); | ||
142 | gen6_gt_check_fifodbg(dev_priv); | ||
143 | } | ||
144 | |||
145 | static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) | ||
146 | { | ||
147 | int ret = 0; | ||
148 | |||
149 | if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) { | ||
150 | int loop = 500; | ||
151 | u32 fifo = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES); | ||
152 | while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) { | ||
153 | udelay(10); | ||
154 | fifo = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES); | ||
155 | } | ||
156 | if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES)) | ||
157 | ++ret; | ||
158 | dev_priv->uncore.fifo_count = fifo; | ||
159 | } | ||
160 | dev_priv->uncore.fifo_count--; | ||
161 | |||
162 | return ret; | ||
163 | } | ||
164 | |||
165 | static void vlv_force_wake_reset(struct drm_i915_private *dev_priv) | ||
166 | { | ||
167 | __raw_i915_write32(dev_priv, FORCEWAKE_VLV, | ||
168 | _MASKED_BIT_DISABLE(0xffff)); | ||
169 | /* something from same cacheline, but !FORCEWAKE_VLV */ | ||
170 | __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV); | ||
171 | } | ||
172 | |||
173 | static void vlv_force_wake_get(struct drm_i915_private *dev_priv) | ||
174 | { | ||
175 | if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL) == 0, | ||
176 | FORCEWAKE_ACK_TIMEOUT_MS)) | ||
177 | DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); | ||
178 | |||
179 | __raw_i915_write32(dev_priv, FORCEWAKE_VLV, | ||
180 | _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); | ||
181 | __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV, | ||
182 | _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); | ||
183 | |||
184 | if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL), | ||
185 | FORCEWAKE_ACK_TIMEOUT_MS)) | ||
186 | DRM_ERROR("Timed out waiting for GT to ack forcewake request.\n"); | ||
187 | |||
188 | if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_MEDIA_VLV) & | ||
189 | FORCEWAKE_KERNEL), | ||
190 | FORCEWAKE_ACK_TIMEOUT_MS)) | ||
191 | DRM_ERROR("Timed out waiting for media to ack forcewake request.\n"); | ||
192 | |||
193 | /* WaRsForcewakeWaitTC0:vlv */ | ||
194 | __gen6_gt_wait_for_thread_c0(dev_priv); | ||
195 | } | ||
196 | |||
197 | static void vlv_force_wake_put(struct drm_i915_private *dev_priv) | ||
198 | { | ||
199 | __raw_i915_write32(dev_priv, FORCEWAKE_VLV, | ||
200 | _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); | ||
201 | __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV, | ||
202 | _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); | ||
203 | /* The below doubles as a POSTING_READ */ | ||
204 | gen6_gt_check_fifodbg(dev_priv); | ||
205 | } | ||
206 | |||
207 | void intel_uncore_early_sanitize(struct drm_device *dev) | ||
208 | { | ||
209 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
210 | |||
211 | if (HAS_FPGA_DBG_UNCLAIMED(dev)) | ||
212 | __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); | ||
213 | } | ||
214 | |||
215 | void intel_uncore_init(struct drm_device *dev) | ||
216 | { | ||
217 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
218 | |||
219 | if (IS_VALLEYVIEW(dev)) { | ||
220 | dev_priv->uncore.funcs.force_wake_get = vlv_force_wake_get; | ||
221 | dev_priv->uncore.funcs.force_wake_put = vlv_force_wake_put; | ||
222 | } else if (IS_HASWELL(dev)) { | ||
223 | dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get; | ||
224 | dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put; | ||
225 | } else if (IS_IVYBRIDGE(dev)) { | ||
226 | u32 ecobus; | ||
227 | |||
228 | /* IVB configs may use multi-threaded forcewake */ | ||
229 | |||
230 | /* A small trick here - if the bios hasn't configured | ||
231 | * MT forcewake, and if the device is in RC6, then | ||
232 | * force_wake_mt_get will not wake the device and the | ||
233 | * ECOBUS read will return zero. Which will be | ||
234 | * (correctly) interpreted by the test below as MT | ||
235 | * forcewake being disabled. | ||
236 | */ | ||
237 | mutex_lock(&dev->struct_mutex); | ||
238 | __gen6_gt_force_wake_mt_get(dev_priv); | ||
239 | ecobus = __raw_i915_read32(dev_priv, ECOBUS); | ||
240 | __gen6_gt_force_wake_mt_put(dev_priv); | ||
241 | mutex_unlock(&dev->struct_mutex); | ||
242 | |||
243 | if (ecobus & FORCEWAKE_MT_ENABLE) { | ||
244 | dev_priv->uncore.funcs.force_wake_get = | ||
245 | __gen6_gt_force_wake_mt_get; | ||
246 | dev_priv->uncore.funcs.force_wake_put = | ||
247 | __gen6_gt_force_wake_mt_put; | ||
248 | } else { | ||
249 | DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); | ||
250 | DRM_INFO("when using vblank-synced partial screen updates.\n"); | ||
251 | dev_priv->uncore.funcs.force_wake_get = | ||
252 | __gen6_gt_force_wake_get; | ||
253 | dev_priv->uncore.funcs.force_wake_put = | ||
254 | __gen6_gt_force_wake_put; | ||
255 | } | ||
256 | } else if (IS_GEN6(dev)) { | ||
257 | dev_priv->uncore.funcs.force_wake_get = | ||
258 | __gen6_gt_force_wake_get; | ||
259 | dev_priv->uncore.funcs.force_wake_put = | ||
260 | __gen6_gt_force_wake_put; | ||
261 | } | ||
262 | } | ||
263 | |||
264 | void intel_uncore_sanitize(struct drm_device *dev) | ||
265 | { | ||
266 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
267 | |||
268 | if (IS_VALLEYVIEW(dev)) { | ||
269 | vlv_force_wake_reset(dev_priv); | ||
270 | } else if (INTEL_INFO(dev)->gen >= 6) { | ||
271 | __gen6_gt_force_wake_reset(dev_priv); | ||
272 | if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) | ||
273 | __gen6_gt_force_wake_mt_reset(dev_priv); | ||
274 | } | ||
275 | |||
276 | /* BIOS often leaves RC6 enabled, but disable it for hw init */ | ||
277 | intel_disable_gt_powersave(dev); | ||
278 | } | ||
279 | |||
280 | /* | ||
281 | * Generally this is called implicitly by the register read function. However, | ||
282 | * if some sequence requires the GT to not power down then this function should | ||
283 | * be called at the beginning of the sequence followed by a call to | ||
284 | * gen6_gt_force_wake_put() at the end of the sequence. | ||
285 | */ | ||
286 | void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) | ||
287 | { | ||
288 | unsigned long irqflags; | ||
289 | |||
290 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); | ||
291 | if (dev_priv->uncore.forcewake_count++ == 0) | ||
292 | dev_priv->uncore.funcs.force_wake_get(dev_priv); | ||
293 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); | ||
294 | } | ||
295 | |||
296 | /* | ||
297 | * see gen6_gt_force_wake_get() | ||
298 | */ | ||
299 | void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) | ||
300 | { | ||
301 | unsigned long irqflags; | ||
302 | |||
303 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); | ||
304 | if (--dev_priv->uncore.forcewake_count == 0) | ||
305 | dev_priv->uncore.funcs.force_wake_put(dev_priv); | ||
306 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); | ||
307 | } | ||
308 | |||
309 | /* We give fast paths for the really cool registers */ | ||
310 | #define NEEDS_FORCE_WAKE(dev_priv, reg) \ | ||
311 | ((HAS_FORCE_WAKE((dev_priv)->dev)) && \ | ||
312 | ((reg) < 0x40000) && \ | ||
313 | ((reg) != FORCEWAKE)) | ||
314 | |||
315 | static void | ||
316 | ilk_dummy_write(struct drm_i915_private *dev_priv) | ||
317 | { | ||
318 | /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up | ||
319 | * the chip from rc6 before touching it for real. MI_MODE is masked, | ||
320 | * hence harmless to write 0 into. */ | ||
321 | __raw_i915_write32(dev_priv, MI_MODE, 0); | ||
322 | } | ||
323 | |||
324 | static void | ||
325 | hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg) | ||
326 | { | ||
327 | if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) && | ||
328 | (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { | ||
329 | DRM_ERROR("Unknown unclaimed register before writing to %x\n", | ||
330 | reg); | ||
331 | __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); | ||
332 | } | ||
333 | } | ||
334 | |||
335 | static void | ||
336 | hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg) | ||
337 | { | ||
338 | if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) && | ||
339 | (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { | ||
340 | DRM_ERROR("Unclaimed write to %x\n", reg); | ||
341 | __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); | ||
342 | } | ||
343 | } | ||
344 | |||
345 | #define __i915_read(x) \ | ||
346 | u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace) { \ | ||
347 | unsigned long irqflags; \ | ||
348 | u##x val = 0; \ | ||
349 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \ | ||
350 | if (dev_priv->info->gen == 5) \ | ||
351 | ilk_dummy_write(dev_priv); \ | ||
352 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ | ||
353 | if (dev_priv->uncore.forcewake_count == 0) \ | ||
354 | dev_priv->uncore.funcs.force_wake_get(dev_priv); \ | ||
355 | val = __raw_i915_read##x(dev_priv, reg); \ | ||
356 | if (dev_priv->uncore.forcewake_count == 0) \ | ||
357 | dev_priv->uncore.funcs.force_wake_put(dev_priv); \ | ||
358 | } else { \ | ||
359 | val = __raw_i915_read##x(dev_priv, reg); \ | ||
360 | } \ | ||
361 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ | ||
362 | trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ | ||
363 | return val; \ | ||
364 | } | ||
365 | |||
366 | __i915_read(8) | ||
367 | __i915_read(16) | ||
368 | __i915_read(32) | ||
369 | __i915_read(64) | ||
370 | #undef __i915_read | ||
371 | |||
372 | #define __i915_write(x) \ | ||
373 | void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool trace) { \ | ||
374 | unsigned long irqflags; \ | ||
375 | u32 __fifo_ret = 0; \ | ||
376 | trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ | ||
377 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \ | ||
378 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ | ||
379 | __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ | ||
380 | } \ | ||
381 | if (dev_priv->info->gen == 5) \ | ||
382 | ilk_dummy_write(dev_priv); \ | ||
383 | hsw_unclaimed_reg_clear(dev_priv, reg); \ | ||
384 | __raw_i915_write##x(dev_priv, reg, val); \ | ||
385 | if (unlikely(__fifo_ret)) { \ | ||
386 | gen6_gt_check_fifodbg(dev_priv); \ | ||
387 | } \ | ||
388 | hsw_unclaimed_reg_check(dev_priv, reg); \ | ||
389 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ | ||
390 | } | ||
391 | __i915_write(8) | ||
392 | __i915_write(16) | ||
393 | __i915_write(32) | ||
394 | __i915_write(64) | ||
395 | #undef __i915_write | ||
396 | |||
397 | static const struct register_whitelist { | ||
398 | uint64_t offset; | ||
399 | uint32_t size; | ||
400 | uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */ | ||
401 | } whitelist[] = { | ||
402 | { RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 }, | ||
403 | }; | ||
404 | |||
405 | int i915_reg_read_ioctl(struct drm_device *dev, | ||
406 | void *data, struct drm_file *file) | ||
407 | { | ||
408 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
409 | struct drm_i915_reg_read *reg = data; | ||
410 | struct register_whitelist const *entry = whitelist; | ||
411 | int i; | ||
412 | |||
413 | for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { | ||
414 | if (entry->offset == reg->offset && | ||
415 | (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask)) | ||
416 | break; | ||
417 | } | ||
418 | |||
419 | if (i == ARRAY_SIZE(whitelist)) | ||
420 | return -EINVAL; | ||
421 | |||
422 | switch (entry->size) { | ||
423 | case 8: | ||
424 | reg->val = I915_READ64(reg->offset); | ||
425 | break; | ||
426 | case 4: | ||
427 | reg->val = I915_READ(reg->offset); | ||
428 | break; | ||
429 | case 2: | ||
430 | reg->val = I915_READ16(reg->offset); | ||
431 | break; | ||
432 | case 1: | ||
433 | reg->val = I915_READ8(reg->offset); | ||
434 | break; | ||
435 | default: | ||
436 | WARN_ON(1); | ||
437 | return -EINVAL; | ||
438 | } | ||
439 | |||
440 | return 0; | ||
441 | } | ||
442 | |||
443 | static int i8xx_do_reset(struct drm_device *dev) | ||
444 | { | ||
445 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
446 | |||
447 | if (IS_I85X(dev)) | ||
448 | return -ENODEV; | ||
449 | |||
450 | I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830); | ||
451 | POSTING_READ(D_STATE); | ||
452 | |||
453 | if (IS_I830(dev) || IS_845G(dev)) { | ||
454 | I915_WRITE(DEBUG_RESET_I830, | ||
455 | DEBUG_RESET_DISPLAY | | ||
456 | DEBUG_RESET_RENDER | | ||
457 | DEBUG_RESET_FULL); | ||
458 | POSTING_READ(DEBUG_RESET_I830); | ||
459 | msleep(1); | ||
460 | |||
461 | I915_WRITE(DEBUG_RESET_I830, 0); | ||
462 | POSTING_READ(DEBUG_RESET_I830); | ||
463 | } | ||
464 | |||
465 | msleep(1); | ||
466 | |||
467 | I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830); | ||
468 | POSTING_READ(D_STATE); | ||
469 | |||
470 | return 0; | ||
471 | } | ||
472 | |||
473 | static int i965_reset_complete(struct drm_device *dev) | ||
474 | { | ||
475 | u8 gdrst; | ||
476 | pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst); | ||
477 | return (gdrst & GRDOM_RESET_ENABLE) == 0; | ||
478 | } | ||
479 | |||
480 | static int i965_do_reset(struct drm_device *dev) | ||
481 | { | ||
482 | int ret; | ||
483 | |||
484 | /* | ||
485 | * Set the domains we want to reset (GRDOM/bits 2 and 3) as | ||
486 | * well as the reset bit (GR/bit 0). Setting the GR bit | ||
487 | * triggers the reset; when done, the hardware will clear it. | ||
488 | */ | ||
489 | pci_write_config_byte(dev->pdev, I965_GDRST, | ||
490 | GRDOM_RENDER | GRDOM_RESET_ENABLE); | ||
491 | ret = wait_for(i965_reset_complete(dev), 500); | ||
492 | if (ret) | ||
493 | return ret; | ||
494 | |||
495 | /* We can't reset render&media without also resetting display ... */ | ||
496 | pci_write_config_byte(dev->pdev, I965_GDRST, | ||
497 | GRDOM_MEDIA | GRDOM_RESET_ENABLE); | ||
498 | |||
499 | ret = wait_for(i965_reset_complete(dev), 500); | ||
500 | if (ret) | ||
501 | return ret; | ||
502 | |||
503 | pci_write_config_byte(dev->pdev, I965_GDRST, 0); | ||
504 | |||
505 | return 0; | ||
506 | } | ||
507 | |||
508 | static int ironlake_do_reset(struct drm_device *dev) | ||
509 | { | ||
510 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
511 | u32 gdrst; | ||
512 | int ret; | ||
513 | |||
514 | gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR); | ||
515 | gdrst &= ~GRDOM_MASK; | ||
516 | I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, | ||
517 | gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE); | ||
518 | ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500); | ||
519 | if (ret) | ||
520 | return ret; | ||
521 | |||
522 | /* We can't reset render&media without also resetting display ... */ | ||
523 | gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR); | ||
524 | gdrst &= ~GRDOM_MASK; | ||
525 | I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, | ||
526 | gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE); | ||
527 | return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500); | ||
528 | } | ||
529 | |||
530 | static int gen6_do_reset(struct drm_device *dev) | ||
531 | { | ||
532 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
533 | int ret; | ||
534 | unsigned long irqflags; | ||
535 | |||
536 | /* Hold uncore.lock across reset to prevent any register access | ||
537 | * with forcewake not set correctly | ||
538 | */ | ||
539 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); | ||
540 | |||
541 | /* Reset the chip */ | ||
542 | |||
543 | /* GEN6_GDRST is not in the gt power well, no need to check | ||
544 | * for fifo space for the write or forcewake the chip for | ||
545 | * the read | ||
546 | */ | ||
547 | __raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL); | ||
548 | |||
549 | /* Spin waiting for the device to ack the reset request */ | ||
550 | ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500); | ||
551 | |||
552 | /* If reset with a user forcewake, try to restore, otherwise turn it off */ | ||
553 | if (dev_priv->uncore.forcewake_count) | ||
554 | dev_priv->uncore.funcs.force_wake_get(dev_priv); | ||
555 | else | ||
556 | dev_priv->uncore.funcs.force_wake_put(dev_priv); | ||
557 | |||
558 | /* Restore fifo count */ | ||
559 | dev_priv->uncore.fifo_count = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES); | ||
560 | |||
561 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); | ||
562 | return ret; | ||
563 | } | ||
564 | |||
565 | int intel_gpu_reset(struct drm_device *dev) | ||
566 | { | ||
567 | switch (INTEL_INFO(dev)->gen) { | ||
568 | case 7: | ||
569 | case 6: return gen6_do_reset(dev); | ||
570 | case 5: return ironlake_do_reset(dev); | ||
571 | case 4: return i965_do_reset(dev); | ||
572 | case 2: return i8xx_do_reset(dev); | ||
573 | default: return -ENODEV; | ||
574 | } | ||
575 | } | ||
576 | |||
577 | void intel_uncore_clear_errors(struct drm_device *dev) | ||
578 | { | ||
579 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
580 | |||
581 | /* XXX needs spinlock around caller's grouping */ | ||
582 | if (HAS_FPGA_DBG_UNCLAIMED(dev)) | ||
583 | __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); | ||
584 | } | ||
585 | |||
586 | void intel_uncore_check_errors(struct drm_device *dev) | ||
587 | { | ||
588 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
589 | |||
590 | if (HAS_FPGA_DBG_UNCLAIMED(dev) && | ||
591 | (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { | ||
592 | DRM_ERROR("Unclaimed register before interrupt\n"); | ||
593 | __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); | ||
594 | } | ||
595 | } | ||
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c index 262c9f5f5f60..ce860de43e61 100644 --- a/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c | |||
@@ -90,6 +90,7 @@ nvc0_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
90 | return ret; | 90 | return ret; |
91 | 91 | ||
92 | nv_subdev(priv)->unit = 0x00008000; | 92 | nv_subdev(priv)->unit = 0x00008000; |
93 | nv_subdev(priv)->intr = nouveau_falcon_intr; | ||
93 | nv_engine(priv)->cclass = &nvc0_bsp_cclass; | 94 | nv_engine(priv)->cclass = &nvc0_bsp_cclass; |
94 | nv_engine(priv)->sclass = nvc0_bsp_sclass; | 95 | nv_engine(priv)->sclass = nvc0_bsp_sclass; |
95 | return 0; | 96 | return 0; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c index c46882c83982..ba6aeca0285e 100644 --- a/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c +++ b/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c | |||
@@ -90,6 +90,7 @@ nve0_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
90 | return ret; | 90 | return ret; |
91 | 91 | ||
92 | nv_subdev(priv)->unit = 0x00008000; | 92 | nv_subdev(priv)->unit = 0x00008000; |
93 | nv_subdev(priv)->intr = nouveau_falcon_intr; | ||
93 | nv_engine(priv)->cclass = &nve0_bsp_cclass; | 94 | nv_engine(priv)->cclass = &nve0_bsp_cclass; |
94 | nv_engine(priv)->sclass = nve0_bsp_sclass; | 95 | nv_engine(priv)->sclass = nve0_bsp_sclass; |
95 | return 0; | 96 | return 0; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/falcon.c b/drivers/gpu/drm/nouveau/core/engine/falcon.c index 3c7a31f7590e..e03fc8e4dc1d 100644 --- a/drivers/gpu/drm/nouveau/core/engine/falcon.c +++ b/drivers/gpu/drm/nouveau/core/engine/falcon.c | |||
@@ -23,6 +23,25 @@ | |||
23 | #include <engine/falcon.h> | 23 | #include <engine/falcon.h> |
24 | #include <subdev/timer.h> | 24 | #include <subdev/timer.h> |
25 | 25 | ||
26 | void | ||
27 | nouveau_falcon_intr(struct nouveau_subdev *subdev) | ||
28 | { | ||
29 | struct nouveau_falcon *falcon = (void *)subdev; | ||
30 | u32 dispatch = nv_ro32(falcon, 0x01c); | ||
31 | u32 intr = nv_ro32(falcon, 0x008) & dispatch & ~(dispatch >> 16); | ||
32 | |||
33 | if (intr & 0x00000010) { | ||
34 | nv_debug(falcon, "ucode halted\n"); | ||
35 | nv_wo32(falcon, 0x004, 0x00000010); | ||
36 | intr &= ~0x00000010; | ||
37 | } | ||
38 | |||
39 | if (intr) { | ||
40 | nv_error(falcon, "unhandled intr 0x%08x\n", intr); | ||
41 | nv_wo32(falcon, 0x004, intr); | ||
42 | } | ||
43 | } | ||
44 | |||
26 | u32 | 45 | u32 |
27 | _nouveau_falcon_rd32(struct nouveau_object *object, u64 addr) | 46 | _nouveau_falcon_rd32(struct nouveau_object *object, u64 addr) |
28 | { | 47 | { |
diff --git a/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c index 98072c1ff360..73719aaa62d6 100644 --- a/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c | |||
@@ -90,6 +90,7 @@ nvc0_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
90 | return ret; | 90 | return ret; |
91 | 91 | ||
92 | nv_subdev(priv)->unit = 0x00000002; | 92 | nv_subdev(priv)->unit = 0x00000002; |
93 | nv_subdev(priv)->intr = nouveau_falcon_intr; | ||
93 | nv_engine(priv)->cclass = &nvc0_ppp_cclass; | 94 | nv_engine(priv)->cclass = &nvc0_ppp_cclass; |
94 | nv_engine(priv)->sclass = nvc0_ppp_sclass; | 95 | nv_engine(priv)->sclass = nvc0_ppp_sclass; |
95 | return 0; | 96 | return 0; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c index 1879229b60eb..ac1f62aace72 100644 --- a/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c | |||
@@ -90,6 +90,7 @@ nvc0_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
90 | return ret; | 90 | return ret; |
91 | 91 | ||
92 | nv_subdev(priv)->unit = 0x00020000; | 92 | nv_subdev(priv)->unit = 0x00020000; |
93 | nv_subdev(priv)->intr = nouveau_falcon_intr; | ||
93 | nv_engine(priv)->cclass = &nvc0_vp_cclass; | 94 | nv_engine(priv)->cclass = &nvc0_vp_cclass; |
94 | nv_engine(priv)->sclass = nvc0_vp_sclass; | 95 | nv_engine(priv)->sclass = nvc0_vp_sclass; |
95 | return 0; | 96 | return 0; |
diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c index d28ecbf7bc49..d4c3108479c9 100644 --- a/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c +++ b/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c | |||
@@ -90,6 +90,7 @@ nve0_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, | |||
90 | return ret; | 90 | return ret; |
91 | 91 | ||
92 | nv_subdev(priv)->unit = 0x00020000; | 92 | nv_subdev(priv)->unit = 0x00020000; |
93 | nv_subdev(priv)->intr = nouveau_falcon_intr; | ||
93 | nv_engine(priv)->cclass = &nve0_vp_cclass; | 94 | nv_engine(priv)->cclass = &nve0_vp_cclass; |
94 | nv_engine(priv)->sclass = nve0_vp_sclass; | 95 | nv_engine(priv)->sclass = nve0_vp_sclass; |
95 | return 0; | 96 | return 0; |
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/falcon.h b/drivers/gpu/drm/nouveau/core/include/engine/falcon.h index 1edec386ab36..181aa7da524d 100644 --- a/drivers/gpu/drm/nouveau/core/include/engine/falcon.h +++ b/drivers/gpu/drm/nouveau/core/include/engine/falcon.h | |||
@@ -72,6 +72,8 @@ int nouveau_falcon_create_(struct nouveau_object *, struct nouveau_object *, | |||
72 | struct nouveau_oclass *, u32, bool, const char *, | 72 | struct nouveau_oclass *, u32, bool, const char *, |
73 | const char *, int, void **); | 73 | const char *, int, void **); |
74 | 74 | ||
75 | void nouveau_falcon_intr(struct nouveau_subdev *subdev); | ||
76 | |||
75 | #define _nouveau_falcon_dtor _nouveau_engine_dtor | 77 | #define _nouveau_falcon_dtor _nouveau_engine_dtor |
76 | int _nouveau_falcon_init(struct nouveau_object *); | 78 | int _nouveau_falcon_init(struct nouveau_object *); |
77 | int _nouveau_falcon_fini(struct nouveau_object *, bool); | 79 | int _nouveau_falcon_fini(struct nouveau_object *, bool); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 4b1afb131380..4e7ee5f4155c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c | |||
@@ -148,6 +148,7 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo) | |||
148 | 148 | ||
149 | if (unlikely(nvbo->gem)) | 149 | if (unlikely(nvbo->gem)) |
150 | DRM_ERROR("bo %p still attached to GEM object\n", bo); | 150 | DRM_ERROR("bo %p still attached to GEM object\n", bo); |
151 | WARN_ON(nvbo->pin_refcnt > 0); | ||
151 | nv10_bo_put_tile_region(dev, nvbo->tile, NULL); | 152 | nv10_bo_put_tile_region(dev, nvbo->tile, NULL); |
152 | kfree(nvbo); | 153 | kfree(nvbo); |
153 | } | 154 | } |
@@ -197,6 +198,12 @@ nouveau_bo_new(struct drm_device *dev, int size, int align, | |||
197 | size_t acc_size; | 198 | size_t acc_size; |
198 | int ret; | 199 | int ret; |
199 | int type = ttm_bo_type_device; | 200 | int type = ttm_bo_type_device; |
201 | int max_size = INT_MAX & ~((1 << drm->client.base.vm->vmm->lpg_shift) - 1); | ||
202 | |||
203 | if (size <= 0 || size > max_size) { | ||
204 | nv_warn(drm, "skipped size %x\n", (u32)size); | ||
205 | return -EINVAL; | ||
206 | } | ||
200 | 207 | ||
201 | if (sg) | 208 | if (sg) |
202 | type = ttm_bo_type_sg; | 209 | type = ttm_bo_type_sg; |
@@ -340,13 +347,15 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo) | |||
340 | { | 347 | { |
341 | struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); | 348 | struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); |
342 | struct ttm_buffer_object *bo = &nvbo->bo; | 349 | struct ttm_buffer_object *bo = &nvbo->bo; |
343 | int ret; | 350 | int ret, ref; |
344 | 351 | ||
345 | ret = ttm_bo_reserve(bo, false, false, false, 0); | 352 | ret = ttm_bo_reserve(bo, false, false, false, 0); |
346 | if (ret) | 353 | if (ret) |
347 | return ret; | 354 | return ret; |
348 | 355 | ||
349 | if (--nvbo->pin_refcnt) | 356 | ref = --nvbo->pin_refcnt; |
357 | WARN_ON_ONCE(ref < 0); | ||
358 | if (ref) | ||
350 | goto out; | 359 | goto out; |
351 | 360 | ||
352 | nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); | 361 | nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); |
@@ -578,7 +587,7 @@ nve0_bo_move_init(struct nouveau_channel *chan, u32 handle) | |||
578 | int ret = RING_SPACE(chan, 2); | 587 | int ret = RING_SPACE(chan, 2); |
579 | if (ret == 0) { | 588 | if (ret == 0) { |
580 | BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1); | 589 | BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1); |
581 | OUT_RING (chan, handle); | 590 | OUT_RING (chan, handle & 0x0000ffff); |
582 | FIRE_RING (chan); | 591 | FIRE_RING (chan); |
583 | } | 592 | } |
584 | return ret; | 593 | return ret; |
@@ -973,7 +982,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, | |||
973 | struct ttm_mem_reg *old_mem = &bo->mem; | 982 | struct ttm_mem_reg *old_mem = &bo->mem; |
974 | int ret; | 983 | int ret; |
975 | 984 | ||
976 | mutex_lock(&chan->cli->mutex); | 985 | mutex_lock_nested(&chan->cli->mutex, SINGLE_DEPTH_NESTING); |
977 | 986 | ||
978 | /* create temporary vmas for the transfer and attach them to the | 987 | /* create temporary vmas for the transfer and attach them to the |
979 | * old nouveau_mem node, these will get cleaned up after ttm has | 988 | * old nouveau_mem node, these will get cleaned up after ttm has |
@@ -1014,7 +1023,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm) | |||
1014 | struct ttm_mem_reg *, struct ttm_mem_reg *); | 1023 | struct ttm_mem_reg *, struct ttm_mem_reg *); |
1015 | int (*init)(struct nouveau_channel *, u32 handle); | 1024 | int (*init)(struct nouveau_channel *, u32 handle); |
1016 | } _methods[] = { | 1025 | } _methods[] = { |
1017 | { "COPY", 0, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init }, | 1026 | { "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init }, |
1018 | { "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init }, | 1027 | { "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init }, |
1019 | { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init }, | 1028 | { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init }, |
1020 | { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init }, | 1029 | { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init }, |
@@ -1034,7 +1043,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm) | |||
1034 | struct nouveau_channel *chan; | 1043 | struct nouveau_channel *chan; |
1035 | u32 handle = (mthd->engine << 16) | mthd->oclass; | 1044 | u32 handle = (mthd->engine << 16) | mthd->oclass; |
1036 | 1045 | ||
1037 | if (mthd->init == nve0_bo_move_init) | 1046 | if (mthd->engine) |
1038 | chan = drm->cechan; | 1047 | chan = drm->cechan; |
1039 | else | 1048 | else |
1040 | chan = drm->channel; | 1049 | chan = drm->channel; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index c8ffba24720a..78637afb9b94 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c | |||
@@ -138,7 +138,7 @@ nouveau_user_framebuffer_create(struct drm_device *dev, | |||
138 | { | 138 | { |
139 | struct nouveau_framebuffer *nouveau_fb; | 139 | struct nouveau_framebuffer *nouveau_fb; |
140 | struct drm_gem_object *gem; | 140 | struct drm_gem_object *gem; |
141 | int ret; | 141 | int ret = -ENOMEM; |
142 | 142 | ||
143 | gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]); | 143 | gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]); |
144 | if (!gem) | 144 | if (!gem) |
@@ -146,15 +146,19 @@ nouveau_user_framebuffer_create(struct drm_device *dev, | |||
146 | 146 | ||
147 | nouveau_fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL); | 147 | nouveau_fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL); |
148 | if (!nouveau_fb) | 148 | if (!nouveau_fb) |
149 | return ERR_PTR(-ENOMEM); | 149 | goto err_unref; |
150 | 150 | ||
151 | ret = nouveau_framebuffer_init(dev, nouveau_fb, mode_cmd, nouveau_gem_object(gem)); | 151 | ret = nouveau_framebuffer_init(dev, nouveau_fb, mode_cmd, nouveau_gem_object(gem)); |
152 | if (ret) { | 152 | if (ret) |
153 | drm_gem_object_unreference(gem); | 153 | goto err; |
154 | return ERR_PTR(ret); | ||
155 | } | ||
156 | 154 | ||
157 | return &nouveau_fb->base; | 155 | return &nouveau_fb->base; |
156 | |||
157 | err: | ||
158 | kfree(nouveau_fb); | ||
159 | err_unref: | ||
160 | drm_gem_object_unreference(gem); | ||
161 | return ERR_PTR(ret); | ||
158 | } | 162 | } |
159 | 163 | ||
160 | static const struct drm_mode_config_funcs nouveau_mode_config_funcs = { | 164 | static const struct drm_mode_config_funcs nouveau_mode_config_funcs = { |
@@ -524,9 +528,12 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
524 | struct nouveau_page_flip_state *s; | 528 | struct nouveau_page_flip_state *s; |
525 | struct nouveau_channel *chan = NULL; | 529 | struct nouveau_channel *chan = NULL; |
526 | struct nouveau_fence *fence; | 530 | struct nouveau_fence *fence; |
527 | struct list_head res; | 531 | struct ttm_validate_buffer resv[2] = { |
528 | struct ttm_validate_buffer res_val[2]; | 532 | { .bo = &old_bo->bo }, |
533 | { .bo = &new_bo->bo }, | ||
534 | }; | ||
529 | struct ww_acquire_ctx ticket; | 535 | struct ww_acquire_ctx ticket; |
536 | LIST_HEAD(res); | ||
530 | int ret; | 537 | int ret; |
531 | 538 | ||
532 | if (!drm->channel) | 539 | if (!drm->channel) |
@@ -545,27 +552,19 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
545 | chan = drm->channel; | 552 | chan = drm->channel; |
546 | spin_unlock(&old_bo->bo.bdev->fence_lock); | 553 | spin_unlock(&old_bo->bo.bdev->fence_lock); |
547 | 554 | ||
548 | mutex_lock(&chan->cli->mutex); | ||
549 | |||
550 | if (new_bo != old_bo) { | 555 | if (new_bo != old_bo) { |
551 | ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM); | 556 | ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM); |
552 | if (likely(!ret)) { | 557 | if (ret) |
553 | res_val[0].bo = &old_bo->bo; | 558 | goto fail_free; |
554 | res_val[1].bo = &new_bo->bo; | ||
555 | INIT_LIST_HEAD(&res); | ||
556 | list_add_tail(&res_val[0].head, &res); | ||
557 | list_add_tail(&res_val[1].head, &res); | ||
558 | ret = ttm_eu_reserve_buffers(&ticket, &res); | ||
559 | if (ret) | ||
560 | nouveau_bo_unpin(new_bo); | ||
561 | } | ||
562 | } else | ||
563 | ret = ttm_bo_reserve(&new_bo->bo, false, false, false, 0); | ||
564 | 559 | ||
565 | if (ret) { | 560 | list_add(&resv[1].head, &res); |
566 | mutex_unlock(&chan->cli->mutex); | ||
567 | goto fail_free; | ||
568 | } | 561 | } |
562 | list_add(&resv[0].head, &res); | ||
563 | |||
564 | mutex_lock(&chan->cli->mutex); | ||
565 | ret = ttm_eu_reserve_buffers(&ticket, &res); | ||
566 | if (ret) | ||
567 | goto fail_unpin; | ||
569 | 568 | ||
570 | /* Initialize a page flip struct */ | 569 | /* Initialize a page flip struct */ |
571 | *s = (struct nouveau_page_flip_state) | 570 | *s = (struct nouveau_page_flip_state) |
@@ -576,10 +575,8 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
576 | /* Emit a page flip */ | 575 | /* Emit a page flip */ |
577 | if (nv_device(drm->device)->card_type >= NV_50) { | 576 | if (nv_device(drm->device)->card_type >= NV_50) { |
578 | ret = nv50_display_flip_next(crtc, fb, chan, 0); | 577 | ret = nv50_display_flip_next(crtc, fb, chan, 0); |
579 | if (ret) { | 578 | if (ret) |
580 | mutex_unlock(&chan->cli->mutex); | ||
581 | goto fail_unreserve; | 579 | goto fail_unreserve; |
582 | } | ||
583 | } | 580 | } |
584 | 581 | ||
585 | ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence); | 582 | ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence); |
@@ -590,22 +587,18 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
590 | /* Update the crtc struct and cleanup */ | 587 | /* Update the crtc struct and cleanup */ |
591 | crtc->fb = fb; | 588 | crtc->fb = fb; |
592 | 589 | ||
593 | if (old_bo != new_bo) { | 590 | ttm_eu_fence_buffer_objects(&ticket, &res, fence); |
594 | ttm_eu_fence_buffer_objects(&ticket, &res, fence); | 591 | if (old_bo != new_bo) |
595 | nouveau_bo_unpin(old_bo); | 592 | nouveau_bo_unpin(old_bo); |
596 | } else { | ||
597 | nouveau_bo_fence(new_bo, fence); | ||
598 | ttm_bo_unreserve(&new_bo->bo); | ||
599 | } | ||
600 | nouveau_fence_unref(&fence); | 593 | nouveau_fence_unref(&fence); |
601 | return 0; | 594 | return 0; |
602 | 595 | ||
603 | fail_unreserve: | 596 | fail_unreserve: |
604 | if (old_bo != new_bo) { | 597 | ttm_eu_backoff_reservation(&ticket, &res); |
605 | ttm_eu_backoff_reservation(&ticket, &res); | 598 | fail_unpin: |
599 | mutex_unlock(&chan->cli->mutex); | ||
600 | if (old_bo != new_bo) | ||
606 | nouveau_bo_unpin(new_bo); | 601 | nouveau_bo_unpin(new_bo); |
607 | } else | ||
608 | ttm_bo_unreserve(&new_bo->bo); | ||
609 | fail_free: | 602 | fail_free: |
610 | kfree(s); | 603 | kfree(s); |
611 | return ret; | 604 | return ret; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 2c2097af2378..1faa75f42393 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c | |||
@@ -192,6 +192,18 @@ nouveau_accel_init(struct nouveau_drm *drm) | |||
192 | 192 | ||
193 | arg0 = NVE0_CHANNEL_IND_ENGINE_GR; | 193 | arg0 = NVE0_CHANNEL_IND_ENGINE_GR; |
194 | arg1 = 1; | 194 | arg1 = 1; |
195 | } else | ||
196 | if (device->chipset >= 0xa3 && | ||
197 | device->chipset != 0xaa && | ||
198 | device->chipset != 0xac) { | ||
199 | ret = nouveau_channel_new(drm, &drm->client, NVDRM_DEVICE, | ||
200 | NVDRM_CHAN + 1, NvDmaFB, NvDmaTT, | ||
201 | &drm->cechan); | ||
202 | if (ret) | ||
203 | NV_ERROR(drm, "failed to create ce channel, %d\n", ret); | ||
204 | |||
205 | arg0 = NvDmaFB; | ||
206 | arg1 = NvDmaTT; | ||
195 | } else { | 207 | } else { |
196 | arg0 = NvDmaFB; | 208 | arg0 = NvDmaFB; |
197 | arg1 = NvDmaTT; | 209 | arg1 = NvDmaTT; |
@@ -284,8 +296,6 @@ static int nouveau_drm_probe(struct pci_dev *pdev, | |||
284 | return 0; | 296 | return 0; |
285 | } | 297 | } |
286 | 298 | ||
287 | static struct lock_class_key drm_client_lock_class_key; | ||
288 | |||
289 | static int | 299 | static int |
290 | nouveau_drm_load(struct drm_device *dev, unsigned long flags) | 300 | nouveau_drm_load(struct drm_device *dev, unsigned long flags) |
291 | { | 301 | { |
@@ -297,7 +307,6 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags) | |||
297 | ret = nouveau_cli_create(pdev, "DRM", sizeof(*drm), (void**)&drm); | 307 | ret = nouveau_cli_create(pdev, "DRM", sizeof(*drm), (void**)&drm); |
298 | if (ret) | 308 | if (ret) |
299 | return ret; | 309 | return ret; |
300 | lockdep_set_class(&drm->client.mutex, &drm_client_lock_class_key); | ||
301 | 310 | ||
302 | dev->dev_private = drm; | 311 | dev->dev_private = drm; |
303 | drm->dev = dev; | 312 | drm->dev = dev; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 9352010030e9..4c1bc061fae2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c | |||
@@ -385,6 +385,7 @@ out_unlock: | |||
385 | mutex_unlock(&dev->struct_mutex); | 385 | mutex_unlock(&dev->struct_mutex); |
386 | if (chan) | 386 | if (chan) |
387 | nouveau_bo_vma_del(nvbo, &fbcon->nouveau_fb.vma); | 387 | nouveau_bo_vma_del(nvbo, &fbcon->nouveau_fb.vma); |
388 | nouveau_bo_unmap(nvbo); | ||
388 | out_unpin: | 389 | out_unpin: |
389 | nouveau_bo_unpin(nvbo); | 390 | nouveau_bo_unpin(nvbo); |
390 | out_unref: | 391 | out_unref: |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 1680d9187bab..be3149932c2d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c | |||
@@ -143,7 +143,7 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan) | |||
143 | int ret; | 143 | int ret; |
144 | 144 | ||
145 | fence->channel = chan; | 145 | fence->channel = chan; |
146 | fence->timeout = jiffies + (3 * DRM_HZ); | 146 | fence->timeout = jiffies + (15 * DRM_HZ); |
147 | fence->sequence = ++fctx->sequence; | 147 | fence->sequence = ++fctx->sequence; |
148 | 148 | ||
149 | ret = fctx->emit(fence); | 149 | ret = fctx->emit(fence); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 86597ebf8c98..487242fb3fdc 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c | |||
@@ -50,12 +50,6 @@ nouveau_gem_object_del(struct drm_gem_object *gem) | |||
50 | return; | 50 | return; |
51 | nvbo->gem = NULL; | 51 | nvbo->gem = NULL; |
52 | 52 | ||
53 | /* Lockdep hates you for doing reserve with gem object lock held */ | ||
54 | if (WARN_ON_ONCE(nvbo->pin_refcnt)) { | ||
55 | nvbo->pin_refcnt = 1; | ||
56 | nouveau_bo_unpin(nvbo); | ||
57 | } | ||
58 | |||
59 | if (gem->import_attach) | 53 | if (gem->import_attach) |
60 | drm_prime_gem_destroy(gem, nvbo->bo.sg); | 54 | drm_prime_gem_destroy(gem, nvbo->bo.sg); |
61 | 55 | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 54dc6355b0c2..8b40a36c1b57 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c | |||
@@ -355,6 +355,7 @@ struct nv50_oimm { | |||
355 | 355 | ||
356 | struct nv50_head { | 356 | struct nv50_head { |
357 | struct nouveau_crtc base; | 357 | struct nouveau_crtc base; |
358 | struct nouveau_bo *image; | ||
358 | struct nv50_curs curs; | 359 | struct nv50_curs curs; |
359 | struct nv50_sync sync; | 360 | struct nv50_sync sync; |
360 | struct nv50_ovly ovly; | 361 | struct nv50_ovly ovly; |
@@ -517,9 +518,10 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
517 | { | 518 | { |
518 | struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb); | 519 | struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb); |
519 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | 520 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); |
521 | struct nv50_head *head = nv50_head(crtc); | ||
520 | struct nv50_sync *sync = nv50_sync(crtc); | 522 | struct nv50_sync *sync = nv50_sync(crtc); |
521 | int head = nv_crtc->index, ret; | ||
522 | u32 *push; | 523 | u32 *push; |
524 | int ret; | ||
523 | 525 | ||
524 | swap_interval <<= 4; | 526 | swap_interval <<= 4; |
525 | if (swap_interval == 0) | 527 | if (swap_interval == 0) |
@@ -537,7 +539,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
537 | return ret; | 539 | return ret; |
538 | 540 | ||
539 | BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2); | 541 | BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2); |
540 | OUT_RING (chan, NvEvoSema0 + head); | 542 | OUT_RING (chan, NvEvoSema0 + nv_crtc->index); |
541 | OUT_RING (chan, sync->addr ^ 0x10); | 543 | OUT_RING (chan, sync->addr ^ 0x10); |
542 | BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1); | 544 | BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1); |
543 | OUT_RING (chan, sync->data + 1); | 545 | OUT_RING (chan, sync->data + 1); |
@@ -546,7 +548,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
546 | OUT_RING (chan, sync->data); | 548 | OUT_RING (chan, sync->data); |
547 | } else | 549 | } else |
548 | if (chan && nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) { | 550 | if (chan && nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) { |
549 | u64 addr = nv84_fence_crtc(chan, head) + sync->addr; | 551 | u64 addr = nv84_fence_crtc(chan, nv_crtc->index) + sync->addr; |
550 | ret = RING_SPACE(chan, 12); | 552 | ret = RING_SPACE(chan, 12); |
551 | if (ret) | 553 | if (ret) |
552 | return ret; | 554 | return ret; |
@@ -565,7 +567,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
565 | OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL); | 567 | OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL); |
566 | } else | 568 | } else |
567 | if (chan) { | 569 | if (chan) { |
568 | u64 addr = nv84_fence_crtc(chan, head) + sync->addr; | 570 | u64 addr = nv84_fence_crtc(chan, nv_crtc->index) + sync->addr; |
569 | ret = RING_SPACE(chan, 10); | 571 | ret = RING_SPACE(chan, 10); |
570 | if (ret) | 572 | if (ret) |
571 | return ret; | 573 | return ret; |
@@ -630,6 +632,8 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
630 | evo_mthd(push, 0x0080, 1); | 632 | evo_mthd(push, 0x0080, 1); |
631 | evo_data(push, 0x00000000); | 633 | evo_data(push, 0x00000000); |
632 | evo_kick(push, sync); | 634 | evo_kick(push, sync); |
635 | |||
636 | nouveau_bo_ref(nv_fb->nvbo, &head->image); | ||
633 | return 0; | 637 | return 0; |
634 | } | 638 | } |
635 | 639 | ||
@@ -1038,18 +1042,17 @@ static int | |||
1038 | nv50_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb) | 1042 | nv50_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb) |
1039 | { | 1043 | { |
1040 | struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb); | 1044 | struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb); |
1045 | struct nv50_head *head = nv50_head(crtc); | ||
1041 | int ret; | 1046 | int ret; |
1042 | 1047 | ||
1043 | ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM); | 1048 | ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM); |
1044 | if (ret) | 1049 | if (ret == 0) { |
1045 | return ret; | 1050 | if (head->image) |
1046 | 1051 | nouveau_bo_unpin(head->image); | |
1047 | if (old_fb) { | 1052 | nouveau_bo_ref(nvfb->nvbo, &head->image); |
1048 | nvfb = nouveau_framebuffer(old_fb); | ||
1049 | nouveau_bo_unpin(nvfb->nvbo); | ||
1050 | } | 1053 | } |
1051 | 1054 | ||
1052 | return 0; | 1055 | return ret; |
1053 | } | 1056 | } |
1054 | 1057 | ||
1055 | static int | 1058 | static int |
@@ -1198,6 +1201,15 @@ nv50_crtc_lut_load(struct drm_crtc *crtc) | |||
1198 | } | 1201 | } |
1199 | } | 1202 | } |
1200 | 1203 | ||
1204 | static void | ||
1205 | nv50_crtc_disable(struct drm_crtc *crtc) | ||
1206 | { | ||
1207 | struct nv50_head *head = nv50_head(crtc); | ||
1208 | if (head->image) | ||
1209 | nouveau_bo_unpin(head->image); | ||
1210 | nouveau_bo_ref(NULL, &head->image); | ||
1211 | } | ||
1212 | |||
1201 | static int | 1213 | static int |
1202 | nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, | 1214 | nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, |
1203 | uint32_t handle, uint32_t width, uint32_t height) | 1215 | uint32_t handle, uint32_t width, uint32_t height) |
@@ -1271,18 +1283,29 @@ nv50_crtc_destroy(struct drm_crtc *crtc) | |||
1271 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | 1283 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); |
1272 | struct nv50_disp *disp = nv50_disp(crtc->dev); | 1284 | struct nv50_disp *disp = nv50_disp(crtc->dev); |
1273 | struct nv50_head *head = nv50_head(crtc); | 1285 | struct nv50_head *head = nv50_head(crtc); |
1286 | |||
1274 | nv50_dmac_destroy(disp->core, &head->ovly.base); | 1287 | nv50_dmac_destroy(disp->core, &head->ovly.base); |
1275 | nv50_pioc_destroy(disp->core, &head->oimm.base); | 1288 | nv50_pioc_destroy(disp->core, &head->oimm.base); |
1276 | nv50_dmac_destroy(disp->core, &head->sync.base); | 1289 | nv50_dmac_destroy(disp->core, &head->sync.base); |
1277 | nv50_pioc_destroy(disp->core, &head->curs.base); | 1290 | nv50_pioc_destroy(disp->core, &head->curs.base); |
1291 | |||
1292 | /*XXX: this shouldn't be necessary, but the core doesn't call | ||
1293 | * disconnect() during the cleanup paths | ||
1294 | */ | ||
1295 | if (head->image) | ||
1296 | nouveau_bo_unpin(head->image); | ||
1297 | nouveau_bo_ref(NULL, &head->image); | ||
1298 | |||
1278 | nouveau_bo_unmap(nv_crtc->cursor.nvbo); | 1299 | nouveau_bo_unmap(nv_crtc->cursor.nvbo); |
1279 | if (nv_crtc->cursor.nvbo) | 1300 | if (nv_crtc->cursor.nvbo) |
1280 | nouveau_bo_unpin(nv_crtc->cursor.nvbo); | 1301 | nouveau_bo_unpin(nv_crtc->cursor.nvbo); |
1281 | nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); | 1302 | nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); |
1303 | |||
1282 | nouveau_bo_unmap(nv_crtc->lut.nvbo); | 1304 | nouveau_bo_unmap(nv_crtc->lut.nvbo); |
1283 | if (nv_crtc->lut.nvbo) | 1305 | if (nv_crtc->lut.nvbo) |
1284 | nouveau_bo_unpin(nv_crtc->lut.nvbo); | 1306 | nouveau_bo_unpin(nv_crtc->lut.nvbo); |
1285 | nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo); | 1307 | nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo); |
1308 | |||
1286 | drm_crtc_cleanup(crtc); | 1309 | drm_crtc_cleanup(crtc); |
1287 | kfree(crtc); | 1310 | kfree(crtc); |
1288 | } | 1311 | } |
@@ -1296,6 +1319,7 @@ static const struct drm_crtc_helper_funcs nv50_crtc_hfunc = { | |||
1296 | .mode_set_base = nv50_crtc_mode_set_base, | 1319 | .mode_set_base = nv50_crtc_mode_set_base, |
1297 | .mode_set_base_atomic = nv50_crtc_mode_set_base_atomic, | 1320 | .mode_set_base_atomic = nv50_crtc_mode_set_base_atomic, |
1298 | .load_lut = nv50_crtc_lut_load, | 1321 | .load_lut = nv50_crtc_lut_load, |
1322 | .disable = nv50_crtc_disable, | ||
1299 | }; | 1323 | }; |
1300 | 1324 | ||
1301 | static const struct drm_crtc_funcs nv50_crtc_func = { | 1325 | static const struct drm_crtc_funcs nv50_crtc_func = { |