diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-07-12 17:18:00 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-07-12 17:18:00 -0400 |
commit | d1ca1a004822983e2fc702d5382b4b9a5527cfbe (patch) | |
tree | ced77f6c6a5f98bb8d441030813d5e336dd24fa4 | |
parent | 620917de59eeb934b9f8cf35cc2d95c1ac8ed0fc (diff) | |
parent | a94919eaddaa3fede1df8563ce4d761a75374645 (diff) |
Merge branch 'drm-intel-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/keithp/linux-2.6
* 'drm-intel-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/keithp/linux-2.6:
drm/i915/ringbuffer: Idling requires waiting for the ring to be empty
Revert "drm/i915: enable rc6 by default"
drm/i915: Clean up i915_driver_load failure path
drm/i915: Enable GPU reset on Ivybridge.
drm/i915/dp: manage sink power state if possible
drm/i915/dp: consolidate AUX retry code
drm/i915/dp: remove DPMS mode tracking from DP
drm/i915/dp: try to read receiver capabilities 3 times when detecting
drm/i915/dp: read more receiver capability bits on hotplug
drm/i915/dp: use DP DPCD defines when looking at DPCD values
drm/i915/dp: retry link status read 3 times on failure
-rw-r--r-- | drivers/gpu/drm/i915/i915_dma.c | 14 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_dp.c | 118 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.h | 2 |
4 files changed, 105 insertions, 32 deletions
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index e1787022d6c8..296fbd66f0e1 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -1943,7 +1943,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1943 | if (!dev_priv->mm.gtt) { | 1943 | if (!dev_priv->mm.gtt) { |
1944 | DRM_ERROR("Failed to initialize GTT\n"); | 1944 | DRM_ERROR("Failed to initialize GTT\n"); |
1945 | ret = -ENODEV; | 1945 | ret = -ENODEV; |
1946 | goto out_iomapfree; | 1946 | goto out_rmmap; |
1947 | } | 1947 | } |
1948 | 1948 | ||
1949 | agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; | 1949 | agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; |
@@ -1987,7 +1987,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1987 | if (dev_priv->wq == NULL) { | 1987 | if (dev_priv->wq == NULL) { |
1988 | DRM_ERROR("Failed to create our workqueue.\n"); | 1988 | DRM_ERROR("Failed to create our workqueue.\n"); |
1989 | ret = -ENOMEM; | 1989 | ret = -ENOMEM; |
1990 | goto out_iomapfree; | 1990 | goto out_mtrrfree; |
1991 | } | 1991 | } |
1992 | 1992 | ||
1993 | /* enable GEM by default */ | 1993 | /* enable GEM by default */ |
@@ -2074,13 +2074,21 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
2074 | return 0; | 2074 | return 0; |
2075 | 2075 | ||
2076 | out_gem_unload: | 2076 | out_gem_unload: |
2077 | if (dev_priv->mm.inactive_shrinker.shrink) | ||
2078 | unregister_shrinker(&dev_priv->mm.inactive_shrinker); | ||
2079 | |||
2077 | if (dev->pdev->msi_enabled) | 2080 | if (dev->pdev->msi_enabled) |
2078 | pci_disable_msi(dev->pdev); | 2081 | pci_disable_msi(dev->pdev); |
2079 | 2082 | ||
2080 | intel_teardown_gmbus(dev); | 2083 | intel_teardown_gmbus(dev); |
2081 | intel_teardown_mchbar(dev); | 2084 | intel_teardown_mchbar(dev); |
2082 | destroy_workqueue(dev_priv->wq); | 2085 | destroy_workqueue(dev_priv->wq); |
2083 | out_iomapfree: | 2086 | out_mtrrfree: |
2087 | if (dev_priv->mm.gtt_mtrr >= 0) { | ||
2088 | mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base, | ||
2089 | dev->agp->agp_info.aper_size * 1024 * 1024); | ||
2090 | dev_priv->mm.gtt_mtrr = -1; | ||
2091 | } | ||
2084 | io_mapping_free(dev_priv->mm.gtt_mapping); | 2092 | io_mapping_free(dev_priv->mm.gtt_mapping); |
2085 | out_rmmap: | 2093 | out_rmmap: |
2086 | pci_iounmap(dev->pdev, dev_priv->regs); | 2094 | pci_iounmap(dev->pdev, dev_priv->regs); |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 013d304455b9..eb91e2dd7914 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -52,7 +52,7 @@ module_param_named(powersave, i915_powersave, int, 0600); | |||
52 | unsigned int i915_semaphores = 0; | 52 | unsigned int i915_semaphores = 0; |
53 | module_param_named(semaphores, i915_semaphores, int, 0600); | 53 | module_param_named(semaphores, i915_semaphores, int, 0600); |
54 | 54 | ||
55 | unsigned int i915_enable_rc6 = 1; | 55 | unsigned int i915_enable_rc6 = 0; |
56 | module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600); | 56 | module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600); |
57 | 57 | ||
58 | unsigned int i915_enable_fbc = 0; | 58 | unsigned int i915_enable_fbc = 0; |
@@ -577,6 +577,7 @@ int i915_reset(struct drm_device *dev, u8 flags) | |||
577 | if (get_seconds() - dev_priv->last_gpu_reset < 5) { | 577 | if (get_seconds() - dev_priv->last_gpu_reset < 5) { |
578 | DRM_ERROR("GPU hanging too fast, declaring wedged!\n"); | 578 | DRM_ERROR("GPU hanging too fast, declaring wedged!\n"); |
579 | } else switch (INTEL_INFO(dev)->gen) { | 579 | } else switch (INTEL_INFO(dev)->gen) { |
580 | case 7: | ||
580 | case 6: | 581 | case 6: |
581 | ret = gen6_do_reset(dev, flags); | 582 | ret = gen6_do_reset(dev, flags); |
582 | /* If reset with a user forcewake, try to restore */ | 583 | /* If reset with a user forcewake, try to restore */ |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 391b55f1cc74..e2aced6eec4c 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -50,7 +50,6 @@ struct intel_dp { | |||
50 | bool has_audio; | 50 | bool has_audio; |
51 | int force_audio; | 51 | int force_audio; |
52 | uint32_t color_range; | 52 | uint32_t color_range; |
53 | int dpms_mode; | ||
54 | uint8_t link_bw; | 53 | uint8_t link_bw; |
55 | uint8_t lane_count; | 54 | uint8_t lane_count; |
56 | uint8_t dpcd[4]; | 55 | uint8_t dpcd[4]; |
@@ -138,8 +137,8 @@ intel_dp_max_lane_count(struct intel_dp *intel_dp) | |||
138 | { | 137 | { |
139 | int max_lane_count = 4; | 138 | int max_lane_count = 4; |
140 | 139 | ||
141 | if (intel_dp->dpcd[0] >= 0x11) { | 140 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) { |
142 | max_lane_count = intel_dp->dpcd[2] & 0x1f; | 141 | max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f; |
143 | switch (max_lane_count) { | 142 | switch (max_lane_count) { |
144 | case 1: case 2: case 4: | 143 | case 1: case 2: case 4: |
145 | break; | 144 | break; |
@@ -153,7 +152,7 @@ intel_dp_max_lane_count(struct intel_dp *intel_dp) | |||
153 | static int | 152 | static int |
154 | intel_dp_max_link_bw(struct intel_dp *intel_dp) | 153 | intel_dp_max_link_bw(struct intel_dp *intel_dp) |
155 | { | 154 | { |
156 | int max_link_bw = intel_dp->dpcd[1]; | 155 | int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE]; |
157 | 156 | ||
158 | switch (max_link_bw) { | 157 | switch (max_link_bw) { |
159 | case DP_LINK_BW_1_62: | 158 | case DP_LINK_BW_1_62: |
@@ -774,7 +773,8 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
774 | /* | 773 | /* |
775 | * Check for DPCD version > 1.1 and enhanced framing support | 774 | * Check for DPCD version > 1.1 and enhanced framing support |
776 | */ | 775 | */ |
777 | if (intel_dp->dpcd[0] >= 0x11 && (intel_dp->dpcd[2] & DP_ENHANCED_FRAME_CAP)) { | 776 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && |
777 | (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) { | ||
778 | intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; | 778 | intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; |
779 | intel_dp->DP |= DP_ENHANCED_FRAMING; | 779 | intel_dp->DP |= DP_ENHANCED_FRAMING; |
780 | } | 780 | } |
@@ -942,11 +942,44 @@ static void ironlake_edp_pll_off(struct drm_encoder *encoder) | |||
942 | udelay(200); | 942 | udelay(200); |
943 | } | 943 | } |
944 | 944 | ||
945 | /* If the sink supports it, try to set the power state appropriately */ | ||
946 | static void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) | ||
947 | { | ||
948 | int ret, i; | ||
949 | |||
950 | /* Should have a valid DPCD by this point */ | ||
951 | if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) | ||
952 | return; | ||
953 | |||
954 | if (mode != DRM_MODE_DPMS_ON) { | ||
955 | ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER, | ||
956 | DP_SET_POWER_D3); | ||
957 | if (ret != 1) | ||
958 | DRM_DEBUG_DRIVER("failed to write sink power state\n"); | ||
959 | } else { | ||
960 | /* | ||
961 | * When turning on, we need to retry for 1ms to give the sink | ||
962 | * time to wake up. | ||
963 | */ | ||
964 | for (i = 0; i < 3; i++) { | ||
965 | ret = intel_dp_aux_native_write_1(intel_dp, | ||
966 | DP_SET_POWER, | ||
967 | DP_SET_POWER_D0); | ||
968 | if (ret == 1) | ||
969 | break; | ||
970 | msleep(1); | ||
971 | } | ||
972 | } | ||
973 | } | ||
974 | |||
945 | static void intel_dp_prepare(struct drm_encoder *encoder) | 975 | static void intel_dp_prepare(struct drm_encoder *encoder) |
946 | { | 976 | { |
947 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); | 977 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
948 | struct drm_device *dev = encoder->dev; | 978 | struct drm_device *dev = encoder->dev; |
949 | 979 | ||
980 | /* Wake up the sink first */ | ||
981 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); | ||
982 | |||
950 | if (is_edp(intel_dp)) { | 983 | if (is_edp(intel_dp)) { |
951 | ironlake_edp_backlight_off(dev); | 984 | ironlake_edp_backlight_off(dev); |
952 | ironlake_edp_panel_off(dev); | 985 | ironlake_edp_panel_off(dev); |
@@ -990,6 +1023,7 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) | |||
990 | if (mode != DRM_MODE_DPMS_ON) { | 1023 | if (mode != DRM_MODE_DPMS_ON) { |
991 | if (is_edp(intel_dp)) | 1024 | if (is_edp(intel_dp)) |
992 | ironlake_edp_backlight_off(dev); | 1025 | ironlake_edp_backlight_off(dev); |
1026 | intel_dp_sink_dpms(intel_dp, mode); | ||
993 | intel_dp_link_down(intel_dp); | 1027 | intel_dp_link_down(intel_dp); |
994 | if (is_edp(intel_dp)) | 1028 | if (is_edp(intel_dp)) |
995 | ironlake_edp_panel_off(dev); | 1029 | ironlake_edp_panel_off(dev); |
@@ -998,6 +1032,7 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) | |||
998 | } else { | 1032 | } else { |
999 | if (is_edp(intel_dp)) | 1033 | if (is_edp(intel_dp)) |
1000 | ironlake_edp_panel_vdd_on(intel_dp); | 1034 | ironlake_edp_panel_vdd_on(intel_dp); |
1035 | intel_dp_sink_dpms(intel_dp, mode); | ||
1001 | if (!(dp_reg & DP_PORT_EN)) { | 1036 | if (!(dp_reg & DP_PORT_EN)) { |
1002 | intel_dp_start_link_train(intel_dp); | 1037 | intel_dp_start_link_train(intel_dp); |
1003 | if (is_edp(intel_dp)) { | 1038 | if (is_edp(intel_dp)) { |
@@ -1009,7 +1044,31 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) | |||
1009 | if (is_edp(intel_dp)) | 1044 | if (is_edp(intel_dp)) |
1010 | ironlake_edp_backlight_on(dev); | 1045 | ironlake_edp_backlight_on(dev); |
1011 | } | 1046 | } |
1012 | intel_dp->dpms_mode = mode; | 1047 | } |
1048 | |||
1049 | /* | ||
1050 | * Native read with retry for link status and receiver capability reads for | ||
1051 | * cases where the sink may still be asleep. | ||
1052 | */ | ||
1053 | static bool | ||
1054 | intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address, | ||
1055 | uint8_t *recv, int recv_bytes) | ||
1056 | { | ||
1057 | int ret, i; | ||
1058 | |||
1059 | /* | ||
1060 | * Sinks are *supposed* to come up within 1ms from an off state, | ||
1061 | * but we're also supposed to retry 3 times per the spec. | ||
1062 | */ | ||
1063 | for (i = 0; i < 3; i++) { | ||
1064 | ret = intel_dp_aux_native_read(intel_dp, address, recv, | ||
1065 | recv_bytes); | ||
1066 | if (ret == recv_bytes) | ||
1067 | return true; | ||
1068 | msleep(1); | ||
1069 | } | ||
1070 | |||
1071 | return false; | ||
1013 | } | 1072 | } |
1014 | 1073 | ||
1015 | /* | 1074 | /* |
@@ -1019,14 +1078,10 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) | |||
1019 | static bool | 1078 | static bool |
1020 | intel_dp_get_link_status(struct intel_dp *intel_dp) | 1079 | intel_dp_get_link_status(struct intel_dp *intel_dp) |
1021 | { | 1080 | { |
1022 | int ret; | 1081 | return intel_dp_aux_native_read_retry(intel_dp, |
1023 | 1082 | DP_LANE0_1_STATUS, | |
1024 | ret = intel_dp_aux_native_read(intel_dp, | 1083 | intel_dp->link_status, |
1025 | DP_LANE0_1_STATUS, | 1084 | DP_LINK_STATUS_SIZE); |
1026 | intel_dp->link_status, DP_LINK_STATUS_SIZE); | ||
1027 | if (ret != DP_LINK_STATUS_SIZE) | ||
1028 | return false; | ||
1029 | return true; | ||
1030 | } | 1085 | } |
1031 | 1086 | ||
1032 | static uint8_t | 1087 | static uint8_t |
@@ -1515,6 +1570,8 @@ intel_dp_link_down(struct intel_dp *intel_dp) | |||
1515 | static void | 1570 | static void |
1516 | intel_dp_check_link_status(struct intel_dp *intel_dp) | 1571 | intel_dp_check_link_status(struct intel_dp *intel_dp) |
1517 | { | 1572 | { |
1573 | int ret; | ||
1574 | |||
1518 | if (!intel_dp->base.base.crtc) | 1575 | if (!intel_dp->base.base.crtc) |
1519 | return; | 1576 | return; |
1520 | 1577 | ||
@@ -1523,6 +1580,15 @@ intel_dp_check_link_status(struct intel_dp *intel_dp) | |||
1523 | return; | 1580 | return; |
1524 | } | 1581 | } |
1525 | 1582 | ||
1583 | /* Try to read receiver status if the link appears to be up */ | ||
1584 | ret = intel_dp_aux_native_read(intel_dp, | ||
1585 | 0x000, intel_dp->dpcd, | ||
1586 | sizeof (intel_dp->dpcd)); | ||
1587 | if (ret != sizeof(intel_dp->dpcd)) { | ||
1588 | intel_dp_link_down(intel_dp); | ||
1589 | return; | ||
1590 | } | ||
1591 | |||
1526 | if (!intel_channel_eq_ok(intel_dp)) { | 1592 | if (!intel_channel_eq_ok(intel_dp)) { |
1527 | intel_dp_start_link_train(intel_dp); | 1593 | intel_dp_start_link_train(intel_dp); |
1528 | intel_dp_complete_link_train(intel_dp); | 1594 | intel_dp_complete_link_train(intel_dp); |
@@ -1533,6 +1599,7 @@ static enum drm_connector_status | |||
1533 | ironlake_dp_detect(struct intel_dp *intel_dp) | 1599 | ironlake_dp_detect(struct intel_dp *intel_dp) |
1534 | { | 1600 | { |
1535 | enum drm_connector_status status; | 1601 | enum drm_connector_status status; |
1602 | bool ret; | ||
1536 | 1603 | ||
1537 | /* Can't disconnect eDP, but you can close the lid... */ | 1604 | /* Can't disconnect eDP, but you can close the lid... */ |
1538 | if (is_edp(intel_dp)) { | 1605 | if (is_edp(intel_dp)) { |
@@ -1543,13 +1610,11 @@ ironlake_dp_detect(struct intel_dp *intel_dp) | |||
1543 | } | 1610 | } |
1544 | 1611 | ||
1545 | status = connector_status_disconnected; | 1612 | status = connector_status_disconnected; |
1546 | if (intel_dp_aux_native_read(intel_dp, | 1613 | ret = intel_dp_aux_native_read_retry(intel_dp, |
1547 | 0x000, intel_dp->dpcd, | 1614 | 0x000, intel_dp->dpcd, |
1548 | sizeof (intel_dp->dpcd)) | 1615 | sizeof (intel_dp->dpcd)); |
1549 | == sizeof(intel_dp->dpcd)) { | 1616 | if (ret && intel_dp->dpcd[DP_DPCD_REV] != 0) |
1550 | if (intel_dp->dpcd[0] != 0) | 1617 | status = connector_status_connected; |
1551 | status = connector_status_connected; | ||
1552 | } | ||
1553 | DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", intel_dp->dpcd[0], | 1618 | DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", intel_dp->dpcd[0], |
1554 | intel_dp->dpcd[1], intel_dp->dpcd[2], intel_dp->dpcd[3]); | 1619 | intel_dp->dpcd[1], intel_dp->dpcd[2], intel_dp->dpcd[3]); |
1555 | return status; | 1620 | return status; |
@@ -1586,7 +1651,7 @@ g4x_dp_detect(struct intel_dp *intel_dp) | |||
1586 | if (intel_dp_aux_native_read(intel_dp, 0x000, intel_dp->dpcd, | 1651 | if (intel_dp_aux_native_read(intel_dp, 0x000, intel_dp->dpcd, |
1587 | sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd)) | 1652 | sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd)) |
1588 | { | 1653 | { |
1589 | if (intel_dp->dpcd[0] != 0) | 1654 | if (intel_dp->dpcd[DP_DPCD_REV] != 0) |
1590 | status = connector_status_connected; | 1655 | status = connector_status_connected; |
1591 | } | 1656 | } |
1592 | 1657 | ||
@@ -1790,8 +1855,7 @@ intel_dp_hot_plug(struct intel_encoder *intel_encoder) | |||
1790 | { | 1855 | { |
1791 | struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base); | 1856 | struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base); |
1792 | 1857 | ||
1793 | if (intel_dp->dpms_mode == DRM_MODE_DPMS_ON) | 1858 | intel_dp_check_link_status(intel_dp); |
1794 | intel_dp_check_link_status(intel_dp); | ||
1795 | } | 1859 | } |
1796 | 1860 | ||
1797 | /* Return which DP Port should be selected for Transcoder DP control */ | 1861 | /* Return which DP Port should be selected for Transcoder DP control */ |
@@ -1859,7 +1923,6 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
1859 | return; | 1923 | return; |
1860 | 1924 | ||
1861 | intel_dp->output_reg = output_reg; | 1925 | intel_dp->output_reg = output_reg; |
1862 | intel_dp->dpms_mode = -1; | ||
1863 | 1926 | ||
1864 | intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); | 1927 | intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); |
1865 | if (!intel_connector) { | 1928 | if (!intel_connector) { |
@@ -1954,8 +2017,9 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
1954 | sizeof(intel_dp->dpcd)); | 2017 | sizeof(intel_dp->dpcd)); |
1955 | ironlake_edp_panel_vdd_off(intel_dp); | 2018 | ironlake_edp_panel_vdd_off(intel_dp); |
1956 | if (ret == sizeof(intel_dp->dpcd)) { | 2019 | if (ret == sizeof(intel_dp->dpcd)) { |
1957 | if (intel_dp->dpcd[0] >= 0x11) | 2020 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) |
1958 | dev_priv->no_aux_handshake = intel_dp->dpcd[3] & | 2021 | dev_priv->no_aux_handshake = |
2022 | intel_dp->dpcd[DP_MAX_DOWNSPREAD] & | ||
1959 | DP_NO_AUX_HANDSHAKE_LINK_TRAINING; | 2023 | DP_NO_AUX_HANDSHAKE_LINK_TRAINING; |
1960 | } else { | 2024 | } else { |
1961 | /* if this fails, presume the device is a ghost */ | 2025 | /* if this fails, presume the device is a ghost */ |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index c0e0ee63fbf4..39ac2b634ae5 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
@@ -165,7 +165,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); | |||
165 | int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n); | 165 | int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n); |
166 | static inline int intel_wait_ring_idle(struct intel_ring_buffer *ring) | 166 | static inline int intel_wait_ring_idle(struct intel_ring_buffer *ring) |
167 | { | 167 | { |
168 | return intel_wait_ring_buffer(ring, ring->space - 8); | 168 | return intel_wait_ring_buffer(ring, ring->size - 8); |
169 | } | 169 | } |
170 | 170 | ||
171 | int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n); | 171 | int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n); |