aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2013-10-15 04:04:08 -0400
committerDave Airlie <airlied@redhat.com>2013-10-15 04:04:08 -0400
commit5259c522a0b2e827b402c2993b8088071a87d7e2 (patch)
tree812608fd6efcfe81096bd51b1ec1c2a4167385f6
parent6aba5b6cf098ba305fc31b23cc14114a16768d22 (diff)
parent967ad7f1489da7babbe0746f81c283458ecd3f84 (diff)
Merge branch 'drm-intel-next' of git://people.freedesktop.org/~danvet/drm-intel into drm-next
New feature pile for 3.12! Highlights: - Stereo/3d support for hdmi from Damien, both the drm core bits and the i915 integration. - Manual boost/deboost logic for gpu turbo (Chris) - Fixed up clock readout support for vlv (Chris). - Tons of little fixes and improvements for vlv in general (Chon Minng Lee and Jesse Barnes). - Power well support for the legacy vga plane (Ville). - DP impromevents from Jani. - Improvements to the Haswell modeset sequence (Ville+Paulo). - Haswell DDI improvements, using the VBT for some tuning values and to check the configuration (Paulo). - Tons of other small improvements and fixups. * 'drm-intel-next' of git://people.freedesktop.org/~danvet/drm-intel: (92 commits) drm/i915: Use adjusted_mode in the fastboot hack to disable pfit drm/i915: Add a more detailed comment about the set_base() fastboot hack drm/i915/vlv: Turn off power gate for BIOS-less system. drm/i915/vlv: reset DPIO on load and resume v2 drm/i915: Simplify PSR debugfs drm/i915: Tweak RPS thresholds to more aggressively downclock drm/i915: Boost RPS frequency for CPU stalls drm/i915: Fix __wait_seqno to use true infinite timeouts drm/i915: Add some missing steps to i915_driver_load error path drm/i915: Clean up the ring scaling calculations drm/i915: Don't populate pipe_src_{w,h} multiple times drm/i915: implement the Haswell mode set sequence workaround drm/i915: Disable/enable planes as the first/last thing during modeset on HSW i915/vlv: untangle integrated clock source handling v4 drm/i915: fix typo s/PatherPoint/PantherPoint/ drm/i915: Make intel_resume_power_well() static drm/i915: destroy connector sysfs files earlier drm/i915/dp: do not write DP_TRAINING_PATTERN_SET all the time drm/i915/dp: retry i2c-over-aux seven times on AUX DEFER drm/i915/vlv: reduce GT FIFO error info to a debug message ...
-rw-r--r--drivers/gpu/drm/drm_crtc.c100
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c8
-rw-r--r--drivers/gpu/drm/drm_drv.c1
-rw-r--r--drivers/gpu/drm/drm_edid.c176
-rw-r--r--drivers/gpu/drm/drm_ioctl.c21
-rw-r--r--drivers/gpu/drm/drm_modes.c41
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c198
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c64
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c8
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h74
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c283
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c18
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c6
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c70
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h51
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c22
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h62
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c179
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h46
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c39
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c54
-rw-r--r--drivers/gpu/drm/i915/intel_display.c557
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c227
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h510
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c1
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c7
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c25
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c12
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c64
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c10
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c4
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c19
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c235
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c39
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c2
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c15
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c49
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c2
-rw-r--r--include/drm/drmP.h5
-rw-r--r--include/drm/drm_crtc.h17
-rw-r--r--include/uapi/drm/drm.h37
-rw-r--r--include/uapi/drm/drm_mode.h43
46 files changed, 2268 insertions, 1150 deletions
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index e79577cb4665..d7a8370e3cdc 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -1319,6 +1319,9 @@ static int drm_crtc_convert_umode(struct drm_display_mode *out,
1319 if (in->clock > INT_MAX || in->vrefresh > INT_MAX) 1319 if (in->clock > INT_MAX || in->vrefresh > INT_MAX)
1320 return -ERANGE; 1320 return -ERANGE;
1321 1321
1322 if ((in->flags & DRM_MODE_FLAG_3D_MASK) > DRM_MODE_FLAG_3D_MAX)
1323 return -EINVAL;
1324
1322 out->clock = in->clock; 1325 out->clock = in->clock;
1323 out->hdisplay = in->hdisplay; 1326 out->hdisplay = in->hdisplay;
1324 out->hsync_start = in->hsync_start; 1327 out->hsync_start = in->hsync_start;
@@ -1581,6 +1584,19 @@ out:
1581 return ret; 1584 return ret;
1582} 1585}
1583 1586
1587static bool drm_mode_expose_to_userspace(const struct drm_display_mode *mode,
1588 const struct drm_file *file_priv)
1589{
1590 /*
1591 * If user-space hasn't configured the driver to expose the stereo 3D
1592 * modes, don't expose them.
1593 */
1594 if (!file_priv->stereo_allowed && drm_mode_is_stereo(mode))
1595 return false;
1596
1597 return true;
1598}
1599
1584/** 1600/**
1585 * drm_mode_getconnector - get connector configuration 1601 * drm_mode_getconnector - get connector configuration
1586 * @dev: drm device for the ioctl 1602 * @dev: drm device for the ioctl
@@ -1646,7 +1662,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
1646 1662
1647 /* delayed so we get modes regardless of pre-fill_modes state */ 1663 /* delayed so we get modes regardless of pre-fill_modes state */
1648 list_for_each_entry(mode, &connector->modes, head) 1664 list_for_each_entry(mode, &connector->modes, head)
1649 mode_count++; 1665 if (drm_mode_expose_to_userspace(mode, file_priv))
1666 mode_count++;
1650 1667
1651 out_resp->connector_id = connector->base.id; 1668 out_resp->connector_id = connector->base.id;
1652 out_resp->connector_type = connector->connector_type; 1669 out_resp->connector_type = connector->connector_type;
@@ -1668,6 +1685,9 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
1668 copied = 0; 1685 copied = 0;
1669 mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr; 1686 mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
1670 list_for_each_entry(mode, &connector->modes, head) { 1687 list_for_each_entry(mode, &connector->modes, head) {
1688 if (!drm_mode_expose_to_userspace(mode, file_priv))
1689 continue;
1690
1671 drm_crtc_convert_to_umode(&u_mode, mode); 1691 drm_crtc_convert_to_umode(&u_mode, mode);
1672 if (copy_to_user(mode_ptr + copied, 1692 if (copy_to_user(mode_ptr + copied,
1673 &u_mode, sizeof(u_mode))) { 1693 &u_mode, sizeof(u_mode))) {
@@ -2042,6 +2062,45 @@ int drm_mode_set_config_internal(struct drm_mode_set *set)
2042} 2062}
2043EXPORT_SYMBOL(drm_mode_set_config_internal); 2063EXPORT_SYMBOL(drm_mode_set_config_internal);
2044 2064
2065/*
2066 * Checks that the framebuffer is big enough for the CRTC viewport
2067 * (x, y, hdisplay, vdisplay)
2068 */
2069static int drm_crtc_check_viewport(const struct drm_crtc *crtc,
2070 int x, int y,
2071 const struct drm_display_mode *mode,
2072 const struct drm_framebuffer *fb)
2073
2074{
2075 int hdisplay, vdisplay;
2076
2077 hdisplay = mode->hdisplay;
2078 vdisplay = mode->vdisplay;
2079
2080 if (drm_mode_is_stereo(mode)) {
2081 struct drm_display_mode adjusted = *mode;
2082
2083 drm_mode_set_crtcinfo(&adjusted, CRTC_STEREO_DOUBLE);
2084 hdisplay = adjusted.crtc_hdisplay;
2085 vdisplay = adjusted.crtc_vdisplay;
2086 }
2087
2088 if (crtc->invert_dimensions)
2089 swap(hdisplay, vdisplay);
2090
2091 if (hdisplay > fb->width ||
2092 vdisplay > fb->height ||
2093 x > fb->width - hdisplay ||
2094 y > fb->height - vdisplay) {
2095 DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
2096 fb->width, fb->height, hdisplay, vdisplay, x, y,
2097 crtc->invert_dimensions ? " (inverted)" : "");
2098 return -ENOSPC;
2099 }
2100
2101 return 0;
2102}
2103
2045/** 2104/**
2046 * drm_mode_setcrtc - set CRTC configuration 2105 * drm_mode_setcrtc - set CRTC configuration
2047 * @dev: drm device for the ioctl 2106 * @dev: drm device for the ioctl
@@ -2089,7 +2148,6 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
2089 DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); 2148 DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
2090 2149
2091 if (crtc_req->mode_valid) { 2150 if (crtc_req->mode_valid) {
2092 int hdisplay, vdisplay;
2093 /* If we have a mode we need a framebuffer. */ 2151 /* If we have a mode we need a framebuffer. */
2094 /* If we pass -1, set the mode with the currently bound fb */ 2152 /* If we pass -1, set the mode with the currently bound fb */
2095 if (crtc_req->fb_id == -1) { 2153 if (crtc_req->fb_id == -1) {
@@ -2125,23 +2183,11 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
2125 2183
2126 drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); 2184 drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
2127 2185
2128 hdisplay = mode->hdisplay; 2186 ret = drm_crtc_check_viewport(crtc, crtc_req->x, crtc_req->y,
2129 vdisplay = mode->vdisplay; 2187 mode, fb);
2130 2188 if (ret)
2131 if (crtc->invert_dimensions)
2132 swap(hdisplay, vdisplay);
2133
2134 if (hdisplay > fb->width ||
2135 vdisplay > fb->height ||
2136 crtc_req->x > fb->width - hdisplay ||
2137 crtc_req->y > fb->height - vdisplay) {
2138 DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
2139 fb->width, fb->height,
2140 hdisplay, vdisplay, crtc_req->x, crtc_req->y,
2141 crtc->invert_dimensions ? " (inverted)" : "");
2142 ret = -ENOSPC;
2143 goto out; 2189 goto out;
2144 } 2190
2145 } 2191 }
2146 2192
2147 if (crtc_req->count_connectors == 0 && mode) { 2193 if (crtc_req->count_connectors == 0 && mode) {
@@ -3558,7 +3604,6 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
3558 struct drm_framebuffer *fb = NULL, *old_fb = NULL; 3604 struct drm_framebuffer *fb = NULL, *old_fb = NULL;
3559 struct drm_pending_vblank_event *e = NULL; 3605 struct drm_pending_vblank_event *e = NULL;
3560 unsigned long flags; 3606 unsigned long flags;
3561 int hdisplay, vdisplay;
3562 int ret = -EINVAL; 3607 int ret = -EINVAL;
3563 3608
3564 if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS || 3609 if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
@@ -3590,22 +3635,9 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
3590 if (!fb) 3635 if (!fb)
3591 goto out; 3636 goto out;
3592 3637
3593 hdisplay = crtc->mode.hdisplay; 3638 ret = drm_crtc_check_viewport(crtc, crtc->x, crtc->y, &crtc->mode, fb);
3594 vdisplay = crtc->mode.vdisplay; 3639 if (ret)
3595
3596 if (crtc->invert_dimensions)
3597 swap(hdisplay, vdisplay);
3598
3599 if (hdisplay > fb->width ||
3600 vdisplay > fb->height ||
3601 crtc->x > fb->width - hdisplay ||
3602 crtc->y > fb->height - vdisplay) {
3603 DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
3604 fb->width, fb->height, hdisplay, vdisplay, crtc->x, crtc->y,
3605 crtc->invert_dimensions ? " (inverted)" : "");
3606 ret = -ENOSPC;
3607 goto out; 3640 goto out;
3608 }
3609 3641
3610 if (crtc->fb->pixel_format != fb->pixel_format) { 3642 if (crtc->fb->pixel_format != fb->pixel_format) {
3611 DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n"); 3643 DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n");
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index c2a525dcf0d4..5fcb9d487672 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -76,7 +76,8 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
76{ 76{
77 struct drm_display_mode *mode; 77 struct drm_display_mode *mode;
78 78
79 if (flags == (DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_INTERLACE)) 79 if (flags == (DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_INTERLACE |
80 DRM_MODE_FLAG_3D_MASK))
80 return; 81 return;
81 82
82 list_for_each_entry(mode, &connector->modes, head) { 83 list_for_each_entry(mode, &connector->modes, head) {
@@ -86,6 +87,9 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
86 if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) && 87 if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) &&
87 !(flags & DRM_MODE_FLAG_DBLSCAN)) 88 !(flags & DRM_MODE_FLAG_DBLSCAN))
88 mode->status = MODE_NO_DBLESCAN; 89 mode->status = MODE_NO_DBLESCAN;
90 if ((mode->flags & DRM_MODE_FLAG_3D_MASK) &&
91 !(flags & DRM_MODE_FLAG_3D_MASK))
92 mode->status = MODE_NO_STEREO;
89 } 93 }
90 94
91 return; 95 return;
@@ -175,6 +179,8 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
175 mode_flags |= DRM_MODE_FLAG_INTERLACE; 179 mode_flags |= DRM_MODE_FLAG_INTERLACE;
176 if (connector->doublescan_allowed) 180 if (connector->doublescan_allowed)
177 mode_flags |= DRM_MODE_FLAG_DBLSCAN; 181 mode_flags |= DRM_MODE_FLAG_DBLSCAN;
182 if (connector->stereo_allowed)
183 mode_flags |= DRM_MODE_FLAG_3D_MASK;
178 drm_mode_validate_flag(connector, mode_flags); 184 drm_mode_validate_flag(connector, mode_flags);
179 185
180 list_for_each_entry(mode, &connector->modes, head) { 186 list_for_each_entry(mode, &connector->modes, head) {
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 6495bdfd35dc..b55f138bd990 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -69,6 +69,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
69 DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED), 69 DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
70 DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED), 70 DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED),
71 DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW), 71 DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW),
72 DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, 0),
72 DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER), 73 DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
73 74
74 DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 75 DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 9173be34b4c2..9e81609b1e29 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -2416,7 +2416,7 @@ u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
2416 2416
2417 if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) || 2417 if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
2418 KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) && 2418 KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
2419 drm_mode_equal_no_clocks(to_match, cea_mode)) 2419 drm_mode_equal_no_clocks_no_stereo(to_match, cea_mode))
2420 return mode + 1; 2420 return mode + 1;
2421 } 2421 }
2422 return 0; 2422 return 0;
@@ -2465,7 +2465,7 @@ static u8 drm_match_hdmi_mode(const struct drm_display_mode *to_match)
2465 2465
2466 if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) || 2466 if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
2467 KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) && 2467 KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
2468 drm_mode_equal_no_clocks(to_match, hdmi_mode)) 2468 drm_mode_equal_no_clocks_no_stereo(to_match, hdmi_mode))
2469 return mode + 1; 2469 return mode + 1;
2470 } 2470 }
2471 return 0; 2471 return 0;
@@ -2519,6 +2519,9 @@ add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid)
2519 if (!newmode) 2519 if (!newmode)
2520 continue; 2520 continue;
2521 2521
2522 /* Carry over the stereo flags */
2523 newmode->flags |= mode->flags & DRM_MODE_FLAG_3D_MASK;
2524
2522 /* 2525 /*
2523 * The current mode could be either variant. Make 2526 * The current mode could be either variant. Make
2524 * sure to pick the "other" clock for the new mode. 2527 * sure to pick the "other" clock for the new mode.
@@ -2565,18 +2568,102 @@ do_cea_modes(struct drm_connector *connector, const u8 *db, u8 len)
2565 return modes; 2568 return modes;
2566} 2569}
2567 2570
2571struct stereo_mandatory_mode {
2572 int width, height, vrefresh;
2573 unsigned int flags;
2574};
2575
2576static const struct stereo_mandatory_mode stereo_mandatory_modes[] = {
2577 { 1920, 1080, 24, DRM_MODE_FLAG_3D_TOP_AND_BOTTOM },
2578 { 1920, 1080, 24, DRM_MODE_FLAG_3D_FRAME_PACKING },
2579 { 1920, 1080, 50,
2580 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF },
2581 { 1920, 1080, 60,
2582 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF },
2583 { 1280, 720, 50, DRM_MODE_FLAG_3D_TOP_AND_BOTTOM },
2584 { 1280, 720, 50, DRM_MODE_FLAG_3D_FRAME_PACKING },
2585 { 1280, 720, 60, DRM_MODE_FLAG_3D_TOP_AND_BOTTOM },
2586 { 1280, 720, 60, DRM_MODE_FLAG_3D_FRAME_PACKING }
2587};
2588
2589static bool
2590stereo_match_mandatory(const struct drm_display_mode *mode,
2591 const struct stereo_mandatory_mode *stereo_mode)
2592{
2593 unsigned int interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;
2594
2595 return mode->hdisplay == stereo_mode->width &&
2596 mode->vdisplay == stereo_mode->height &&
2597 interlaced == (stereo_mode->flags & DRM_MODE_FLAG_INTERLACE) &&
2598 drm_mode_vrefresh(mode) == stereo_mode->vrefresh;
2599}
2600
2601static int add_hdmi_mandatory_stereo_modes(struct drm_connector *connector)
2602{
2603 struct drm_device *dev = connector->dev;
2604 const struct drm_display_mode *mode;
2605 struct list_head stereo_modes;
2606 int modes = 0, i;
2607
2608 INIT_LIST_HEAD(&stereo_modes);
2609
2610 list_for_each_entry(mode, &connector->probed_modes, head) {
2611 for (i = 0; i < ARRAY_SIZE(stereo_mandatory_modes); i++) {
2612 const struct stereo_mandatory_mode *mandatory;
2613 struct drm_display_mode *new_mode;
2614
2615 if (!stereo_match_mandatory(mode,
2616 &stereo_mandatory_modes[i]))
2617 continue;
2618
2619 mandatory = &stereo_mandatory_modes[i];
2620 new_mode = drm_mode_duplicate(dev, mode);
2621 if (!new_mode)
2622 continue;
2623
2624 new_mode->flags |= mandatory->flags;
2625 list_add_tail(&new_mode->head, &stereo_modes);
2626 modes++;
2627 }
2628 }
2629
2630 list_splice_tail(&stereo_modes, &connector->probed_modes);
2631
2632 return modes;
2633}
2634
2635static int add_hdmi_mode(struct drm_connector *connector, u8 vic)
2636{
2637 struct drm_device *dev = connector->dev;
2638 struct drm_display_mode *newmode;
2639
2640 vic--; /* VICs start at 1 */
2641 if (vic >= ARRAY_SIZE(edid_4k_modes)) {
2642 DRM_ERROR("Unknown HDMI VIC: %d\n", vic);
2643 return 0;
2644 }
2645
2646 newmode = drm_mode_duplicate(dev, &edid_4k_modes[vic]);
2647 if (!newmode)
2648 return 0;
2649
2650 drm_mode_probed_add(connector, newmode);
2651
2652 return 1;
2653}
2654
2568/* 2655/*
2569 * do_hdmi_vsdb_modes - Parse the HDMI Vendor Specific data block 2656 * do_hdmi_vsdb_modes - Parse the HDMI Vendor Specific data block
2570 * @connector: connector corresponding to the HDMI sink 2657 * @connector: connector corresponding to the HDMI sink
2571 * @db: start of the CEA vendor specific block 2658 * @db: start of the CEA vendor specific block
2572 * @len: length of the CEA block payload, ie. one can access up to db[len] 2659 * @len: length of the CEA block payload, ie. one can access up to db[len]
2573 * 2660 *
2574 * Parses the HDMI VSDB looking for modes to add to @connector. 2661 * Parses the HDMI VSDB looking for modes to add to @connector. This function
2662 * also adds the stereo 3d modes when applicable.
2575 */ 2663 */
2576static int 2664static int
2577do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len) 2665do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len)
2578{ 2666{
2579 struct drm_device *dev = connector->dev;
2580 int modes = 0, offset = 0, i; 2667 int modes = 0, offset = 0, i;
2581 u8 vic_len; 2668 u8 vic_len;
2582 2669
@@ -2597,30 +2684,22 @@ do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len)
2597 2684
2598 /* the declared length is not long enough for the 2 first bytes 2685 /* the declared length is not long enough for the 2 first bytes
2599 * of additional video format capabilities */ 2686 * of additional video format capabilities */
2600 offset += 2; 2687 if (len < (8 + offset + 2))
2601 if (len < (8 + offset))
2602 goto out; 2688 goto out;
2603 2689
2690 /* 3D_Present */
2691 offset++;
2692 if (db[8 + offset] & (1 << 7))
2693 modes += add_hdmi_mandatory_stereo_modes(connector);
2694
2695 offset++;
2604 vic_len = db[8 + offset] >> 5; 2696 vic_len = db[8 + offset] >> 5;
2605 2697
2606 for (i = 0; i < vic_len && len >= (9 + offset + i); i++) { 2698 for (i = 0; i < vic_len && len >= (9 + offset + i); i++) {
2607 struct drm_display_mode *newmode;
2608 u8 vic; 2699 u8 vic;
2609 2700
2610 vic = db[9 + offset + i]; 2701 vic = db[9 + offset + i];
2611 2702 modes += add_hdmi_mode(connector, vic);
2612 vic--; /* VICs start at 1 */
2613 if (vic >= ARRAY_SIZE(edid_4k_modes)) {
2614 DRM_ERROR("Unknown HDMI VIC: %d\n", vic);
2615 continue;
2616 }
2617
2618 newmode = drm_mode_duplicate(dev, &edid_4k_modes[vic]);
2619 if (!newmode)
2620 continue;
2621
2622 drm_mode_probed_add(connector, newmode);
2623 modes++;
2624 } 2703 }
2625 2704
2626out: 2705out:
@@ -2680,8 +2759,8 @@ static int
2680add_cea_modes(struct drm_connector *connector, struct edid *edid) 2759add_cea_modes(struct drm_connector *connector, struct edid *edid)
2681{ 2760{
2682 const u8 *cea = drm_find_cea_extension(edid); 2761 const u8 *cea = drm_find_cea_extension(edid);
2683 const u8 *db; 2762 const u8 *db, *hdmi = NULL;
2684 u8 dbl; 2763 u8 dbl, hdmi_len;
2685 int modes = 0; 2764 int modes = 0;
2686 2765
2687 if (cea && cea_revision(cea) >= 3) { 2766 if (cea && cea_revision(cea) >= 3) {
@@ -2696,11 +2775,20 @@ add_cea_modes(struct drm_connector *connector, struct edid *edid)
2696 2775
2697 if (cea_db_tag(db) == VIDEO_BLOCK) 2776 if (cea_db_tag(db) == VIDEO_BLOCK)
2698 modes += do_cea_modes(connector, db + 1, dbl); 2777 modes += do_cea_modes(connector, db + 1, dbl);
2699 else if (cea_db_is_hdmi_vsdb(db)) 2778 else if (cea_db_is_hdmi_vsdb(db)) {
2700 modes += do_hdmi_vsdb_modes(connector, db, dbl); 2779 hdmi = db;
2780 hdmi_len = dbl;
2781 }
2701 } 2782 }
2702 } 2783 }
2703 2784
2785 /*
2786 * We parse the HDMI VSDB after having added the cea modes as we will
2787 * be patching their flags when the sink supports stereo 3D.
2788 */
2789 if (hdmi)
2790 modes += do_hdmi_vsdb_modes(connector, hdmi, hdmi_len);
2791
2704 return modes; 2792 return modes;
2705} 2793}
2706 2794
@@ -3333,6 +3421,33 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
3333} 3421}
3334EXPORT_SYMBOL(drm_hdmi_avi_infoframe_from_display_mode); 3422EXPORT_SYMBOL(drm_hdmi_avi_infoframe_from_display_mode);
3335 3423
3424static enum hdmi_3d_structure
3425s3d_structure_from_display_mode(const struct drm_display_mode *mode)
3426{
3427 u32 layout = mode->flags & DRM_MODE_FLAG_3D_MASK;
3428
3429 switch (layout) {
3430 case DRM_MODE_FLAG_3D_FRAME_PACKING:
3431 return HDMI_3D_STRUCTURE_FRAME_PACKING;
3432 case DRM_MODE_FLAG_3D_FIELD_ALTERNATIVE:
3433 return HDMI_3D_STRUCTURE_FIELD_ALTERNATIVE;
3434 case DRM_MODE_FLAG_3D_LINE_ALTERNATIVE:
3435 return HDMI_3D_STRUCTURE_LINE_ALTERNATIVE;
3436 case DRM_MODE_FLAG_3D_SIDE_BY_SIDE_FULL:
3437 return HDMI_3D_STRUCTURE_SIDE_BY_SIDE_FULL;
3438 case DRM_MODE_FLAG_3D_L_DEPTH:
3439 return HDMI_3D_STRUCTURE_L_DEPTH;
3440 case DRM_MODE_FLAG_3D_L_DEPTH_GFX_GFX_DEPTH:
3441 return HDMI_3D_STRUCTURE_L_DEPTH_GFX_GFX_DEPTH;
3442 case DRM_MODE_FLAG_3D_TOP_AND_BOTTOM:
3443 return HDMI_3D_STRUCTURE_TOP_AND_BOTTOM;
3444 case DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF:
3445 return HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF;
3446 default:
3447 return HDMI_3D_STRUCTURE_INVALID;
3448 }
3449}
3450
3336/** 3451/**
3337 * drm_hdmi_vendor_infoframe_from_display_mode() - fill an HDMI infoframe with 3452 * drm_hdmi_vendor_infoframe_from_display_mode() - fill an HDMI infoframe with
3338 * data from a DRM display mode 3453 * data from a DRM display mode
@@ -3350,20 +3465,29 @@ drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
3350 const struct drm_display_mode *mode) 3465 const struct drm_display_mode *mode)
3351{ 3466{
3352 int err; 3467 int err;
3468 u32 s3d_flags;
3353 u8 vic; 3469 u8 vic;
3354 3470
3355 if (!frame || !mode) 3471 if (!frame || !mode)
3356 return -EINVAL; 3472 return -EINVAL;
3357 3473
3358 vic = drm_match_hdmi_mode(mode); 3474 vic = drm_match_hdmi_mode(mode);
3359 if (!vic) 3475 s3d_flags = mode->flags & DRM_MODE_FLAG_3D_MASK;
3476
3477 if (!vic && !s3d_flags)
3478 return -EINVAL;
3479
3480 if (vic && s3d_flags)
3360 return -EINVAL; 3481 return -EINVAL;
3361 3482
3362 err = hdmi_vendor_infoframe_init(frame); 3483 err = hdmi_vendor_infoframe_init(frame);
3363 if (err < 0) 3484 if (err < 0)
3364 return err; 3485 return err;
3365 3486
3366 frame->vic = vic; 3487 if (vic)
3488 frame->vic = vic;
3489 else
3490 frame->s3d_struct = s3d_structure_from_display_mode(mode);
3367 3491
3368 return 0; 3492 return 0;
3369} 3493}
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 07247e2855a2..dffc836144cc 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -303,6 +303,27 @@ int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
303} 303}
304 304
305/** 305/**
306 * Set device/driver capabilities
307 */
308int
309drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
310{
311 struct drm_set_client_cap *req = data;
312
313 switch (req->capability) {
314 case DRM_CLIENT_CAP_STEREO_3D:
315 if (req->value > 1)
316 return -EINVAL;
317 file_priv->stereo_allowed = req->value;
318 break;
319 default:
320 return -EINVAL;
321 }
322
323 return 0;
324}
325
326/**
306 * Setversion ioctl. 327 * Setversion ioctl.
307 * 328 *
308 * \param inode device inode. 329 * \param inode device inode.
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index fc2adb62b757..b0733153dfd2 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -707,18 +707,25 @@ EXPORT_SYMBOL(drm_mode_vrefresh);
707/** 707/**
708 * drm_mode_set_crtcinfo - set CRTC modesetting parameters 708 * drm_mode_set_crtcinfo - set CRTC modesetting parameters
709 * @p: mode 709 * @p: mode
710 * @adjust_flags: unused? (FIXME) 710 * @adjust_flags: a combination of adjustment flags
711 * 711 *
712 * LOCKING: 712 * LOCKING:
713 * None. 713 * None.
714 * 714 *
715 * Setup the CRTC modesetting parameters for @p, adjusting if necessary. 715 * Setup the CRTC modesetting parameters for @p, adjusting if necessary.
716 *
717 * - The CRTC_INTERLACE_HALVE_V flag can be used to halve vertical timings of
718 * interlaced modes.
719 * - The CRTC_STEREO_DOUBLE flag can be used to compute the timings for
720 * buffers containing two eyes (only adjust the timings when needed, eg. for
721 * "frame packing" or "side by side full").
716 */ 722 */
717void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags) 723void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
718{ 724{
719 if ((p == NULL) || ((p->type & DRM_MODE_TYPE_CRTC_C) == DRM_MODE_TYPE_BUILTIN)) 725 if ((p == NULL) || ((p->type & DRM_MODE_TYPE_CRTC_C) == DRM_MODE_TYPE_BUILTIN))
720 return; 726 return;
721 727
728 p->crtc_clock = p->clock;
722 p->crtc_hdisplay = p->hdisplay; 729 p->crtc_hdisplay = p->hdisplay;
723 p->crtc_hsync_start = p->hsync_start; 730 p->crtc_hsync_start = p->hsync_start;
724 p->crtc_hsync_end = p->hsync_end; 731 p->crtc_hsync_end = p->hsync_end;
@@ -752,6 +759,20 @@ void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
752 p->crtc_vtotal *= p->vscan; 759 p->crtc_vtotal *= p->vscan;
753 } 760 }
754 761
762 if (adjust_flags & CRTC_STEREO_DOUBLE) {
763 unsigned int layout = p->flags & DRM_MODE_FLAG_3D_MASK;
764
765 switch (layout) {
766 case DRM_MODE_FLAG_3D_FRAME_PACKING:
767 p->crtc_clock *= 2;
768 p->crtc_vdisplay += p->crtc_vtotal;
769 p->crtc_vsync_start += p->crtc_vtotal;
770 p->crtc_vsync_end += p->crtc_vtotal;
771 p->crtc_vtotal += p->crtc_vtotal;
772 break;
773 }
774 }
775
755 p->crtc_vblank_start = min(p->crtc_vsync_start, p->crtc_vdisplay); 776 p->crtc_vblank_start = min(p->crtc_vsync_start, p->crtc_vdisplay);
756 p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal); 777 p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal);
757 p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay); 778 p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay);
@@ -830,12 +851,16 @@ bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_displ
830 } else if (mode1->clock != mode2->clock) 851 } else if (mode1->clock != mode2->clock)
831 return false; 852 return false;
832 853
833 return drm_mode_equal_no_clocks(mode1, mode2); 854 if ((mode1->flags & DRM_MODE_FLAG_3D_MASK) !=
855 (mode2->flags & DRM_MODE_FLAG_3D_MASK))
856 return false;
857
858 return drm_mode_equal_no_clocks_no_stereo(mode1, mode2);
834} 859}
835EXPORT_SYMBOL(drm_mode_equal); 860EXPORT_SYMBOL(drm_mode_equal);
836 861
837/** 862/**
838 * drm_mode_equal_no_clocks - test modes for equality 863 * drm_mode_equal_no_clocks_no_stereo - test modes for equality
839 * @mode1: first mode 864 * @mode1: first mode
840 * @mode2: second mode 865 * @mode2: second mode
841 * 866 *
@@ -843,12 +868,13 @@ EXPORT_SYMBOL(drm_mode_equal);
843 * None. 868 * None.
844 * 869 *
845 * Check to see if @mode1 and @mode2 are equivalent, but 870 * Check to see if @mode1 and @mode2 are equivalent, but
846 * don't check the pixel clocks. 871 * don't check the pixel clocks nor the stereo layout.
847 * 872 *
848 * RETURNS: 873 * RETURNS:
849 * True if the modes are equal, false otherwise. 874 * True if the modes are equal, false otherwise.
850 */ 875 */
851bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2) 876bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1,
877 const struct drm_display_mode *mode2)
852{ 878{
853 if (mode1->hdisplay == mode2->hdisplay && 879 if (mode1->hdisplay == mode2->hdisplay &&
854 mode1->hsync_start == mode2->hsync_start && 880 mode1->hsync_start == mode2->hsync_start &&
@@ -860,12 +886,13 @@ bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, const struct
860 mode1->vsync_end == mode2->vsync_end && 886 mode1->vsync_end == mode2->vsync_end &&
861 mode1->vtotal == mode2->vtotal && 887 mode1->vtotal == mode2->vtotal &&
862 mode1->vscan == mode2->vscan && 888 mode1->vscan == mode2->vscan &&
863 mode1->flags == mode2->flags) 889 (mode1->flags & ~DRM_MODE_FLAG_3D_MASK) ==
890 (mode2->flags & ~DRM_MODE_FLAG_3D_MASK))
864 return true; 891 return true;
865 892
866 return false; 893 return false;
867} 894}
868EXPORT_SYMBOL(drm_mode_equal_no_clocks); 895EXPORT_SYMBOL(drm_mode_equal_no_clocks_no_stereo);
869 896
870/** 897/**
871 * drm_mode_validate_size - make sure modes adhere to size constraints 898 * drm_mode_validate_size - make sure modes adhere to size constraints
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 80bed69fe5b7..61fd61969e21 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1666,126 +1666,20 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
1666 struct drm_info_node *node = m->private; 1666 struct drm_info_node *node = m->private;
1667 struct drm_device *dev = node->minor->dev; 1667 struct drm_device *dev = node->minor->dev;
1668 struct drm_i915_private *dev_priv = dev->dev_private; 1668 struct drm_i915_private *dev_priv = dev->dev_private;
1669 u32 psrstat, psrperf; 1669 u32 psrperf = 0;
1670 bool enabled = false;
1670 1671
1671 if (!IS_HASWELL(dev)) { 1672 seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
1672 seq_puts(m, "PSR not supported on this platform\n"); 1673 seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
1673 } else if (IS_HASWELL(dev) && I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE) {
1674 seq_puts(m, "PSR enabled\n");
1675 } else {
1676 seq_puts(m, "PSR disabled: ");
1677 switch (dev_priv->no_psr_reason) {
1678 case PSR_NO_SOURCE:
1679 seq_puts(m, "not supported on this platform");
1680 break;
1681 case PSR_NO_SINK:
1682 seq_puts(m, "not supported by panel");
1683 break;
1684 case PSR_MODULE_PARAM:
1685 seq_puts(m, "disabled by flag");
1686 break;
1687 case PSR_CRTC_NOT_ACTIVE:
1688 seq_puts(m, "crtc not active");
1689 break;
1690 case PSR_PWR_WELL_ENABLED:
1691 seq_puts(m, "power well enabled");
1692 break;
1693 case PSR_NOT_TILED:
1694 seq_puts(m, "not tiled");
1695 break;
1696 case PSR_SPRITE_ENABLED:
1697 seq_puts(m, "sprite enabled");
1698 break;
1699 case PSR_S3D_ENABLED:
1700 seq_puts(m, "stereo 3d enabled");
1701 break;
1702 case PSR_INTERLACED_ENABLED:
1703 seq_puts(m, "interlaced enabled");
1704 break;
1705 case PSR_HSW_NOT_DDIA:
1706 seq_puts(m, "HSW ties PSR to DDI A (eDP)");
1707 break;
1708 default:
1709 seq_puts(m, "unknown reason");
1710 }
1711 seq_puts(m, "\n");
1712 return 0;
1713 }
1714
1715 psrstat = I915_READ(EDP_PSR_STATUS_CTL);
1716
1717 seq_puts(m, "PSR Current State: ");
1718 switch (psrstat & EDP_PSR_STATUS_STATE_MASK) {
1719 case EDP_PSR_STATUS_STATE_IDLE:
1720 seq_puts(m, "Reset state\n");
1721 break;
1722 case EDP_PSR_STATUS_STATE_SRDONACK:
1723 seq_puts(m, "Wait for TG/Stream to send on frame of data after SRD conditions are met\n");
1724 break;
1725 case EDP_PSR_STATUS_STATE_SRDENT:
1726 seq_puts(m, "SRD entry\n");
1727 break;
1728 case EDP_PSR_STATUS_STATE_BUFOFF:
1729 seq_puts(m, "Wait for buffer turn off\n");
1730 break;
1731 case EDP_PSR_STATUS_STATE_BUFON:
1732 seq_puts(m, "Wait for buffer turn on\n");
1733 break;
1734 case EDP_PSR_STATUS_STATE_AUXACK:
1735 seq_puts(m, "Wait for AUX to acknowledge on SRD exit\n");
1736 break;
1737 case EDP_PSR_STATUS_STATE_SRDOFFACK:
1738 seq_puts(m, "Wait for TG/Stream to acknowledge the SRD VDM exit\n");
1739 break;
1740 default:
1741 seq_puts(m, "Unknown\n");
1742 break;
1743 }
1744
1745 seq_puts(m, "Link Status: ");
1746 switch (psrstat & EDP_PSR_STATUS_LINK_MASK) {
1747 case EDP_PSR_STATUS_LINK_FULL_OFF:
1748 seq_puts(m, "Link is fully off\n");
1749 break;
1750 case EDP_PSR_STATUS_LINK_FULL_ON:
1751 seq_puts(m, "Link is fully on\n");
1752 break;
1753 case EDP_PSR_STATUS_LINK_STANDBY:
1754 seq_puts(m, "Link is in standby\n");
1755 break;
1756 default:
1757 seq_puts(m, "Unknown\n");
1758 break;
1759 }
1760
1761 seq_printf(m, "PSR Entry Count: %u\n",
1762 psrstat >> EDP_PSR_STATUS_COUNT_SHIFT &
1763 EDP_PSR_STATUS_COUNT_MASK);
1764
1765 seq_printf(m, "Max Sleep Timer Counter: %u\n",
1766 psrstat >> EDP_PSR_STATUS_MAX_SLEEP_TIMER_SHIFT &
1767 EDP_PSR_STATUS_MAX_SLEEP_TIMER_MASK);
1768
1769 seq_printf(m, "Had AUX error: %s\n",
1770 yesno(psrstat & EDP_PSR_STATUS_AUX_ERROR));
1771
1772 seq_printf(m, "Sending AUX: %s\n",
1773 yesno(psrstat & EDP_PSR_STATUS_AUX_SENDING));
1774 1674
1775 seq_printf(m, "Sending Idle: %s\n", 1675 enabled = HAS_PSR(dev) &&
1776 yesno(psrstat & EDP_PSR_STATUS_SENDING_IDLE)); 1676 I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
1677 seq_printf(m, "Enabled: %s\n", yesno(enabled));
1777 1678
1778 seq_printf(m, "Sending TP2 TP3: %s\n", 1679 if (HAS_PSR(dev))
1779 yesno(psrstat & EDP_PSR_STATUS_SENDING_TP2_TP3)); 1680 psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) &
1780 1681 EDP_PSR_PERF_CNT_MASK;
1781 seq_printf(m, "Sending TP1: %s\n", 1682 seq_printf(m, "Performance_Counter: %u\n", psrperf);
1782 yesno(psrstat & EDP_PSR_STATUS_SENDING_TP1));
1783
1784 seq_printf(m, "Idle Count: %u\n",
1785 psrstat & EDP_PSR_STATUS_IDLE_MASK);
1786
1787 psrperf = (I915_READ(EDP_PSR_PERF_CNT)) & EDP_PSR_PERF_CNT_MASK;
1788 seq_printf(m, "Performance Counter: %u\n", psrperf);
1789 1683
1790 return 0; 1684 return 0;
1791} 1685}
@@ -1896,6 +1790,72 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops,
1896 i915_ring_stop_get, i915_ring_stop_set, 1790 i915_ring_stop_get, i915_ring_stop_set,
1897 "0x%08llx\n"); 1791 "0x%08llx\n");
1898 1792
1793static int
1794i915_ring_missed_irq_get(void *data, u64 *val)
1795{
1796 struct drm_device *dev = data;
1797 struct drm_i915_private *dev_priv = dev->dev_private;
1798
1799 *val = dev_priv->gpu_error.missed_irq_rings;
1800 return 0;
1801}
1802
1803static int
1804i915_ring_missed_irq_set(void *data, u64 val)
1805{
1806 struct drm_device *dev = data;
1807 struct drm_i915_private *dev_priv = dev->dev_private;
1808 int ret;
1809
1810 /* Lock against concurrent debugfs callers */
1811 ret = mutex_lock_interruptible(&dev->struct_mutex);
1812 if (ret)
1813 return ret;
1814 dev_priv->gpu_error.missed_irq_rings = val;
1815 mutex_unlock(&dev->struct_mutex);
1816
1817 return 0;
1818}
1819
1820DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
1821 i915_ring_missed_irq_get, i915_ring_missed_irq_set,
1822 "0x%08llx\n");
1823
1824static int
1825i915_ring_test_irq_get(void *data, u64 *val)
1826{
1827 struct drm_device *dev = data;
1828 struct drm_i915_private *dev_priv = dev->dev_private;
1829
1830 *val = dev_priv->gpu_error.test_irq_rings;
1831
1832 return 0;
1833}
1834
1835static int
1836i915_ring_test_irq_set(void *data, u64 val)
1837{
1838 struct drm_device *dev = data;
1839 struct drm_i915_private *dev_priv = dev->dev_private;
1840 int ret;
1841
1842 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
1843
1844 /* Lock against concurrent debugfs callers */
1845 ret = mutex_lock_interruptible(&dev->struct_mutex);
1846 if (ret)
1847 return ret;
1848
1849 dev_priv->gpu_error.test_irq_rings = val;
1850 mutex_unlock(&dev->struct_mutex);
1851
1852 return 0;
1853}
1854
1855DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
1856 i915_ring_test_irq_get, i915_ring_test_irq_set,
1857 "0x%08llx\n");
1858
1899#define DROP_UNBOUND 0x1 1859#define DROP_UNBOUND 0x1
1900#define DROP_BOUND 0x2 1860#define DROP_BOUND 0x2
1901#define DROP_RETIRE 0x4 1861#define DROP_RETIRE 0x4
@@ -2156,7 +2116,7 @@ drm_add_fake_info_node(struct drm_minor *minor,
2156{ 2116{
2157 struct drm_info_node *node; 2117 struct drm_info_node *node;
2158 2118
2159 node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL); 2119 node = kmalloc(sizeof(*node), GFP_KERNEL);
2160 if (node == NULL) { 2120 if (node == NULL) {
2161 debugfs_remove(ent); 2121 debugfs_remove(ent);
2162 return -ENOMEM; 2122 return -ENOMEM;
@@ -2289,6 +2249,8 @@ static struct i915_debugfs_files {
2289 {"i915_min_freq", &i915_min_freq_fops}, 2249 {"i915_min_freq", &i915_min_freq_fops},
2290 {"i915_cache_sharing", &i915_cache_sharing_fops}, 2250 {"i915_cache_sharing", &i915_cache_sharing_fops},
2291 {"i915_ring_stop", &i915_ring_stop_fops}, 2251 {"i915_ring_stop", &i915_ring_stop_fops},
2252 {"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
2253 {"i915_ring_test_irq", &i915_ring_test_irq_fops},
2292 {"i915_gem_drop_caches", &i915_drop_caches_fops}, 2254 {"i915_gem_drop_caches", &i915_drop_caches_fops},
2293 {"i915_error_state", &i915_error_state_fops}, 2255 {"i915_error_state", &i915_error_state_fops},
2294 {"i915_next_seqno", &i915_next_seqno_fops}, 2256 {"i915_next_seqno", &i915_next_seqno_fops},
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 637b695eafbd..b3873c945d1b 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -641,7 +641,7 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
641 641
642 if (batch->num_cliprects) { 642 if (batch->num_cliprects) {
643 cliprects = kcalloc(batch->num_cliprects, 643 cliprects = kcalloc(batch->num_cliprects,
644 sizeof(struct drm_clip_rect), 644 sizeof(*cliprects),
645 GFP_KERNEL); 645 GFP_KERNEL);
646 if (cliprects == NULL) 646 if (cliprects == NULL)
647 return -ENOMEM; 647 return -ENOMEM;
@@ -703,7 +703,7 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
703 703
704 if (cmdbuf->num_cliprects) { 704 if (cmdbuf->num_cliprects) {
705 cliprects = kcalloc(cmdbuf->num_cliprects, 705 cliprects = kcalloc(cmdbuf->num_cliprects,
706 sizeof(struct drm_clip_rect), GFP_KERNEL); 706 sizeof(*cliprects), GFP_KERNEL);
707 if (cliprects == NULL) { 707 if (cliprects == NULL) {
708 ret = -ENOMEM; 708 ret = -ENOMEM;
709 goto fail_batch_free; 709 goto fail_batch_free;
@@ -1314,25 +1314,30 @@ static int i915_load_modeset_init(struct drm_device *dev)
1314 if (ret) 1314 if (ret)
1315 goto cleanup_gem_stolen; 1315 goto cleanup_gem_stolen;
1316 1316
1317 intel_init_power_well(dev);
1318
1319 /* Keep VGA alive until i915_disable_vga_mem() */
1320 intel_display_power_get(dev, POWER_DOMAIN_VGA);
1321
1317 /* Important: The output setup functions called by modeset_init need 1322 /* Important: The output setup functions called by modeset_init need
1318 * working irqs for e.g. gmbus and dp aux transfers. */ 1323 * working irqs for e.g. gmbus and dp aux transfers. */
1319 intel_modeset_init(dev); 1324 intel_modeset_init(dev);
1320 1325
1321 ret = i915_gem_init(dev); 1326 ret = i915_gem_init(dev);
1322 if (ret) 1327 if (ret)
1323 goto cleanup_irq; 1328 goto cleanup_power;
1324 1329
1325 INIT_WORK(&dev_priv->console_resume_work, intel_console_resume); 1330 INIT_WORK(&dev_priv->console_resume_work, intel_console_resume);
1326 1331
1327 intel_init_power_well(dev);
1328
1329 intel_modeset_gem_init(dev); 1332 intel_modeset_gem_init(dev);
1330 1333
1331 /* Always safe in the mode setting case. */ 1334 /* Always safe in the mode setting case. */
1332 /* FIXME: do pre/post-mode set stuff in core KMS code */ 1335 /* FIXME: do pre/post-mode set stuff in core KMS code */
1333 dev->vblank_disable_allowed = true; 1336 dev->vblank_disable_allowed = true;
1334 if (INTEL_INFO(dev)->num_pipes == 0) 1337 if (INTEL_INFO(dev)->num_pipes == 0) {
1338 intel_display_power_put(dev, POWER_DOMAIN_VGA);
1335 return 0; 1339 return 0;
1340 }
1336 1341
1337 ret = intel_fbdev_init(dev); 1342 ret = intel_fbdev_init(dev);
1338 if (ret) 1343 if (ret)
@@ -1358,6 +1363,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
1358 * vgacon_save_screen() works during the handover. 1363 * vgacon_save_screen() works during the handover.
1359 */ 1364 */
1360 i915_disable_vga_mem(dev); 1365 i915_disable_vga_mem(dev);
1366 intel_display_power_put(dev, POWER_DOMAIN_VGA);
1361 1367
1362 /* Only enable hotplug handling once the fbdev is fully set up. */ 1368 /* Only enable hotplug handling once the fbdev is fully set up. */
1363 dev_priv->enable_hotplug_processing = true; 1369 dev_priv->enable_hotplug_processing = true;
@@ -1373,7 +1379,8 @@ cleanup_gem:
1373 mutex_unlock(&dev->struct_mutex); 1379 mutex_unlock(&dev->struct_mutex);
1374 i915_gem_cleanup_aliasing_ppgtt(dev); 1380 i915_gem_cleanup_aliasing_ppgtt(dev);
1375 drm_mm_takedown(&dev_priv->gtt.base.mm); 1381 drm_mm_takedown(&dev_priv->gtt.base.mm);
1376cleanup_irq: 1382cleanup_power:
1383 intel_display_power_put(dev, POWER_DOMAIN_VGA);
1377 drm_irq_uninstall(dev); 1384 drm_irq_uninstall(dev);
1378cleanup_gem_stolen: 1385cleanup_gem_stolen:
1379 i915_gem_cleanup_stolen(dev); 1386 i915_gem_cleanup_stolen(dev);
@@ -1473,7 +1480,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1473 if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) 1480 if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET))
1474 return -ENODEV; 1481 return -ENODEV;
1475 1482
1476 dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL); 1483 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
1477 if (dev_priv == NULL) 1484 if (dev_priv == NULL)
1478 return -ENOMEM; 1485 return -ENOMEM;
1479 1486
@@ -1547,7 +1554,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1547 1554
1548 ret = i915_gem_gtt_init(dev); 1555 ret = i915_gem_gtt_init(dev);
1549 if (ret) 1556 if (ret)
1550 goto put_bridge; 1557 goto out_regs;
1551 1558
1552 if (drm_core_check_feature(dev, DRIVER_MODESET)) 1559 if (drm_core_check_feature(dev, DRIVER_MODESET))
1553 i915_kick_out_firmware_fb(dev_priv); 1560 i915_kick_out_firmware_fb(dev_priv);
@@ -1576,7 +1583,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1576 aperture_size); 1583 aperture_size);
1577 if (dev_priv->gtt.mappable == NULL) { 1584 if (dev_priv->gtt.mappable == NULL) {
1578 ret = -EIO; 1585 ret = -EIO;
1579 goto out_rmmap; 1586 goto out_gtt;
1580 } 1587 }
1581 1588
1582 dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base, 1589 dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
@@ -1650,7 +1657,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1650 ret = i915_load_modeset_init(dev); 1657 ret = i915_load_modeset_init(dev);
1651 if (ret < 0) { 1658 if (ret < 0) {
1652 DRM_ERROR("failed to init modeset\n"); 1659 DRM_ERROR("failed to init modeset\n");
1653 goto out_gem_unload; 1660 goto out_power_well;
1654 } 1661 }
1655 } else { 1662 } else {
1656 /* Start out suspended in ums mode. */ 1663 /* Start out suspended in ums mode. */
@@ -1670,6 +1677,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1670 1677
1671 return 0; 1678 return 0;
1672 1679
1680out_power_well:
1681 if (HAS_POWER_WELL(dev))
1682 i915_remove_power_well(dev);
1683 drm_vblank_cleanup(dev);
1673out_gem_unload: 1684out_gem_unload:
1674 if (dev_priv->mm.inactive_shrinker.scan_objects) 1685 if (dev_priv->mm.inactive_shrinker.scan_objects)
1675 unregister_shrinker(&dev_priv->mm.inactive_shrinker); 1686 unregister_shrinker(&dev_priv->mm.inactive_shrinker);
@@ -1683,12 +1694,17 @@ out_gem_unload:
1683out_mtrrfree: 1694out_mtrrfree:
1684 arch_phys_wc_del(dev_priv->gtt.mtrr); 1695 arch_phys_wc_del(dev_priv->gtt.mtrr);
1685 io_mapping_free(dev_priv->gtt.mappable); 1696 io_mapping_free(dev_priv->gtt.mappable);
1697out_gtt:
1698 list_del(&dev_priv->gtt.base.global_link);
1699 drm_mm_takedown(&dev_priv->gtt.base.mm);
1686 dev_priv->gtt.base.cleanup(&dev_priv->gtt.base); 1700 dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
1687out_rmmap: 1701out_regs:
1688 pci_iounmap(dev->pdev, dev_priv->regs); 1702 pci_iounmap(dev->pdev, dev_priv->regs);
1689put_bridge: 1703put_bridge:
1690 pci_dev_put(dev_priv->bridge_dev); 1704 pci_dev_put(dev_priv->bridge_dev);
1691free_priv: 1705free_priv:
1706 if (dev_priv->slab)
1707 kmem_cache_destroy(dev_priv->slab);
1692 kfree(dev_priv); 1708 kfree(dev_priv);
1693 return ret; 1709 return ret;
1694} 1710}
@@ -1778,8 +1794,8 @@ int i915_driver_unload(struct drm_device *dev)
1778 list_del(&dev_priv->gtt.base.global_link); 1794 list_del(&dev_priv->gtt.base.global_link);
1779 WARN_ON(!list_empty(&dev_priv->vm_list)); 1795 WARN_ON(!list_empty(&dev_priv->vm_list));
1780 drm_mm_takedown(&dev_priv->gtt.base.mm); 1796 drm_mm_takedown(&dev_priv->gtt.base.mm);
1781 if (dev_priv->regs != NULL) 1797
1782 pci_iounmap(dev->pdev, dev_priv->regs); 1798 drm_vblank_cleanup(dev);
1783 1799
1784 intel_teardown_gmbus(dev); 1800 intel_teardown_gmbus(dev);
1785 intel_teardown_mchbar(dev); 1801 intel_teardown_mchbar(dev);
@@ -1789,6 +1805,10 @@ int i915_driver_unload(struct drm_device *dev)
1789 1805
1790 dev_priv->gtt.base.cleanup(&dev_priv->gtt.base); 1806 dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
1791 1807
1808 intel_uncore_fini(dev);
1809 if (dev_priv->regs != NULL)
1810 pci_iounmap(dev->pdev, dev_priv->regs);
1811
1792 if (dev_priv->slab) 1812 if (dev_priv->slab)
1793 kmem_cache_destroy(dev_priv->slab); 1813 kmem_cache_destroy(dev_priv->slab);
1794 1814
@@ -1800,19 +1820,11 @@ int i915_driver_unload(struct drm_device *dev)
1800 1820
1801int i915_driver_open(struct drm_device *dev, struct drm_file *file) 1821int i915_driver_open(struct drm_device *dev, struct drm_file *file)
1802{ 1822{
1803 struct drm_i915_file_private *file_priv; 1823 int ret;
1804
1805 DRM_DEBUG_DRIVER("\n");
1806 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
1807 if (!file_priv)
1808 return -ENOMEM;
1809
1810 file->driver_priv = file_priv;
1811
1812 spin_lock_init(&file_priv->mm.lock);
1813 INIT_LIST_HEAD(&file_priv->mm.request_list);
1814 1824
1815 idr_init(&file_priv->context_idr); 1825 ret = i915_gem_open(dev, file);
1826 if (ret)
1827 return ret;
1816 1828
1817 return 0; 1829 return 0;
1818} 1830}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 5f424899009b..96f230497cbe 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -416,7 +416,7 @@ void intel_detect_pch(struct drm_device *dev)
416 } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) { 416 } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
417 /* PantherPoint is CPT compatible */ 417 /* PantherPoint is CPT compatible */
418 dev_priv->pch_type = PCH_CPT; 418 dev_priv->pch_type = PCH_CPT;
419 DRM_DEBUG_KMS("Found PatherPoint PCH\n"); 419 DRM_DEBUG_KMS("Found PantherPoint PCH\n");
420 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev))); 420 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
421 } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { 421 } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
422 dev_priv->pch_type = PCH_LPT; 422 dev_priv->pch_type = PCH_LPT;
@@ -581,6 +581,8 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
581 struct drm_i915_private *dev_priv = dev->dev_private; 581 struct drm_i915_private *dev_priv = dev->dev_private;
582 int error = 0; 582 int error = 0;
583 583
584 intel_uncore_early_sanitize(dev);
585
584 intel_uncore_sanitize(dev); 586 intel_uncore_sanitize(dev);
585 587
586 if (drm_core_check_feature(dev, DRIVER_MODESET) && 588 if (drm_core_check_feature(dev, DRIVER_MODESET) &&
@@ -590,6 +592,8 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
590 mutex_unlock(&dev->struct_mutex); 592 mutex_unlock(&dev->struct_mutex);
591 } 593 }
592 594
595 intel_init_power_well(dev);
596
593 i915_restore_state(dev); 597 i915_restore_state(dev);
594 intel_opregion_setup(dev); 598 intel_opregion_setup(dev);
595 599
@@ -605,8 +609,6 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
605 /* We need working interrupts for modeset enabling ... */ 609 /* We need working interrupts for modeset enabling ... */
606 drm_irq_install(dev); 610 drm_irq_install(dev);
607 611
608 intel_init_power_well(dev);
609
610 intel_modeset_init_hw(dev); 612 intel_modeset_init_hw(dev);
611 613
612 drm_modeset_lock_all(dev); 614 drm_modeset_lock_all(dev);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a404e7dc065a..6a5b7ab0c3fa 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -324,7 +324,7 @@ struct drm_i915_error_state {
324 u32 dirty:1; 324 u32 dirty:1;
325 u32 purgeable:1; 325 u32 purgeable:1;
326 s32 ring:4; 326 s32 ring:4;
327 u32 cache_level:2; 327 u32 cache_level:3;
328 } **active_bo, **pinned_bo; 328 } **active_bo, **pinned_bo;
329 u32 *active_bo_count, *pinned_bo_count; 329 u32 *active_bo_count, *pinned_bo_count;
330 struct intel_overlay_error_state *overlay; 330 struct intel_overlay_error_state *overlay;
@@ -408,6 +408,8 @@ struct intel_uncore {
408 408
409 unsigned fifo_count; 409 unsigned fifo_count;
410 unsigned forcewake_count; 410 unsigned forcewake_count;
411
412 struct delayed_work force_wake_work;
411}; 413};
412 414
413#define DEV_INFO_FOR_EACH_FLAG(func, sep) \ 415#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
@@ -642,17 +644,9 @@ struct i915_fbc {
642 } no_fbc_reason; 644 } no_fbc_reason;
643}; 645};
644 646
645enum no_psr_reason { 647struct i915_psr {
646 PSR_NO_SOURCE, /* Not supported on platform */ 648 bool sink_support;
647 PSR_NO_SINK, /* Not supported by panel */ 649 bool source_ok;
648 PSR_MODULE_PARAM,
649 PSR_CRTC_NOT_ACTIVE,
650 PSR_PWR_WELL_ENABLED,
651 PSR_NOT_TILED,
652 PSR_SPRITE_ENABLED,
653 PSR_S3D_ENABLED,
654 PSR_INTERLACED_ENABLED,
655 PSR_HSW_NOT_DDIA,
656}; 650};
657 651
658enum intel_pch { 652enum intel_pch {
@@ -842,17 +836,19 @@ struct intel_gen6_power_mgmt {
842 struct work_struct work; 836 struct work_struct work;
843 u32 pm_iir; 837 u32 pm_iir;
844 838
845 /* On vlv we need to manually drop to Vmin with a delayed work. */
846 struct delayed_work vlv_work;
847
848 /* The below variables an all the rps hw state are protected by 839 /* The below variables an all the rps hw state are protected by
849 * dev->struct mutext. */ 840 * dev->struct mutext. */
850 u8 cur_delay; 841 u8 cur_delay;
851 u8 min_delay; 842 u8 min_delay;
852 u8 max_delay; 843 u8 max_delay;
853 u8 rpe_delay; 844 u8 rpe_delay;
845 u8 rp1_delay;
846 u8 rp0_delay;
854 u8 hw_max; 847 u8 hw_max;
855 848
849 int last_adj;
850 enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
851
856 struct delayed_work delayed_resume_work; 852 struct delayed_work delayed_resume_work;
857 853
858 /* 854 /*
@@ -963,6 +959,15 @@ struct i915_gem_mm {
963 struct delayed_work retire_work; 959 struct delayed_work retire_work;
964 960
965 /** 961 /**
962 * When we detect an idle GPU, we want to turn on
963 * powersaving features. So once we see that there
964 * are no more requests outstanding and no more
965 * arrive within a small period of time, we fire
966 * off the idle_work.
967 */
968 struct delayed_work idle_work;
969
970 /**
966 * Are we in a non-interruptible section of code like 971 * Are we in a non-interruptible section of code like
967 * modesetting? 972 * modesetting?
968 */ 973 */
@@ -1011,6 +1016,9 @@ struct i915_gpu_error {
1011 struct drm_i915_error_state *first_error; 1016 struct drm_i915_error_state *first_error;
1012 struct work_struct work; 1017 struct work_struct work;
1013 1018
1019
1020 unsigned long missed_irq_rings;
1021
1014 /** 1022 /**
1015 * State variable and reset counter controlling the reset flow 1023 * State variable and reset counter controlling the reset flow
1016 * 1024 *
@@ -1049,6 +1057,9 @@ struct i915_gpu_error {
1049 1057
1050 /* For gpu hang simulation. */ 1058 /* For gpu hang simulation. */
1051 unsigned int stop_rings; 1059 unsigned int stop_rings;
1060
1061 /* For missed irq/seqno simulation. */
1062 unsigned int test_irq_rings;
1052}; 1063};
1053 1064
1054enum modeset_restore { 1065enum modeset_restore {
@@ -1057,6 +1068,14 @@ enum modeset_restore {
1057 MODESET_SUSPENDED, 1068 MODESET_SUSPENDED,
1058}; 1069};
1059 1070
1071struct ddi_vbt_port_info {
1072 uint8_t hdmi_level_shift;
1073
1074 uint8_t supports_dvi:1;
1075 uint8_t supports_hdmi:1;
1076 uint8_t supports_dp:1;
1077};
1078
1060struct intel_vbt_data { 1079struct intel_vbt_data {
1061 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ 1080 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
1062 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ 1081 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
@@ -1090,7 +1109,9 @@ struct intel_vbt_data {
1090 int crt_ddc_pin; 1109 int crt_ddc_pin;
1091 1110
1092 int child_dev_num; 1111 int child_dev_num;
1093 struct child_device_config *child_dev; 1112 union child_device_config *child_dev;
1113
1114 struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS];
1094}; 1115};
1095 1116
1096enum intel_ddb_partitioning { 1117enum intel_ddb_partitioning {
@@ -1327,7 +1348,7 @@ typedef struct drm_i915_private {
1327 /* Haswell power well */ 1348 /* Haswell power well */
1328 struct i915_power_well power_well; 1349 struct i915_power_well power_well;
1329 1350
1330 enum no_psr_reason no_psr_reason; 1351 struct i915_psr psr;
1331 1352
1332 struct i915_gpu_error gpu_error; 1353 struct i915_gpu_error gpu_error;
1333 1354
@@ -1579,13 +1600,17 @@ struct drm_i915_gem_request {
1579}; 1600};
1580 1601
1581struct drm_i915_file_private { 1602struct drm_i915_file_private {
1603 struct drm_i915_private *dev_priv;
1604
1582 struct { 1605 struct {
1583 spinlock_t lock; 1606 spinlock_t lock;
1584 struct list_head request_list; 1607 struct list_head request_list;
1608 struct delayed_work idle_work;
1585 } mm; 1609 } mm;
1586 struct idr context_idr; 1610 struct idr context_idr;
1587 1611
1588 struct i915_ctx_hang_stats hang_stats; 1612 struct i915_ctx_hang_stats hang_stats;
1613 atomic_t rps_wait_boost;
1589}; 1614};
1590 1615
1591#define INTEL_INFO(dev) (to_i915(dev)->info) 1616#define INTEL_INFO(dev) (to_i915(dev)->info)
@@ -1662,7 +1687,6 @@ struct drm_i915_file_private {
1662#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) 1687#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
1663#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev)) 1688#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
1664#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev)) 1689#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
1665#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
1666#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv) 1690#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
1667#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) 1691#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
1668 1692
@@ -1675,6 +1699,7 @@ struct drm_i915_file_private {
1675#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi) 1699#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
1676#define HAS_POWER_WELL(dev) (IS_HASWELL(dev)) 1700#define HAS_POWER_WELL(dev) (IS_HASWELL(dev))
1677#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg) 1701#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
1702#define HAS_PSR(dev) (IS_HASWELL(dev))
1678 1703
1679#define INTEL_PCH_DEVICE_ID_MASK 0xff00 1704#define INTEL_PCH_DEVICE_ID_MASK 0xff00
1680#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 1705#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
@@ -1791,6 +1816,7 @@ extern void intel_uncore_early_sanitize(struct drm_device *dev);
1791extern void intel_uncore_init(struct drm_device *dev); 1816extern void intel_uncore_init(struct drm_device *dev);
1792extern void intel_uncore_clear_errors(struct drm_device *dev); 1817extern void intel_uncore_clear_errors(struct drm_device *dev);
1793extern void intel_uncore_check_errors(struct drm_device *dev); 1818extern void intel_uncore_check_errors(struct drm_device *dev);
1819extern void intel_uncore_fini(struct drm_device *dev);
1794 1820
1795void 1821void
1796i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); 1822i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
@@ -1891,9 +1917,8 @@ static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
1891int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); 1917int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
1892int i915_gem_object_sync(struct drm_i915_gem_object *obj, 1918int i915_gem_object_sync(struct drm_i915_gem_object *obj,
1893 struct intel_ring_buffer *to); 1919 struct intel_ring_buffer *to);
1894void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, 1920void i915_vma_move_to_active(struct i915_vma *vma,
1895 struct intel_ring_buffer *ring); 1921 struct intel_ring_buffer *ring);
1896
1897int i915_gem_dumb_create(struct drm_file *file_priv, 1922int i915_gem_dumb_create(struct drm_file *file_priv,
1898 struct drm_device *dev, 1923 struct drm_device *dev,
1899 struct drm_mode_create_dumb *args); 1924 struct drm_mode_create_dumb *args);
@@ -1934,7 +1959,7 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
1934 } 1959 }
1935} 1960}
1936 1961
1937void i915_gem_retire_requests(struct drm_device *dev); 1962bool i915_gem_retire_requests(struct drm_device *dev);
1938void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring); 1963void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
1939int __must_check i915_gem_check_wedge(struct i915_gpu_error *error, 1964int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
1940 bool interruptible); 1965 bool interruptible);
@@ -1985,6 +2010,7 @@ int i915_gem_attach_phys_object(struct drm_device *dev,
1985void i915_gem_detach_phys_object(struct drm_device *dev, 2010void i915_gem_detach_phys_object(struct drm_device *dev,
1986 struct drm_i915_gem_object *obj); 2011 struct drm_i915_gem_object *obj);
1987void i915_gem_free_all_phys_object(struct drm_device *dev); 2012void i915_gem_free_all_phys_object(struct drm_device *dev);
2013int i915_gem_open(struct drm_device *dev, struct drm_file *file);
1988void i915_gem_release(struct drm_device *dev, struct drm_file *file); 2014void i915_gem_release(struct drm_device *dev, struct drm_file *file);
1989 2015
1990uint32_t 2016uint32_t
@@ -2016,6 +2042,9 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
2016struct i915_vma * 2042struct i915_vma *
2017i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, 2043i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
2018 struct i915_address_space *vm); 2044 struct i915_address_space *vm);
2045
2046struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj);
2047
2019/* Some GGTT VM helpers */ 2048/* Some GGTT VM helpers */
2020#define obj_to_ggtt(obj) \ 2049#define obj_to_ggtt(obj) \
2021 (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base) 2050 (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
@@ -2052,7 +2081,6 @@ i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
2052 return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment, 2081 return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment,
2053 map_and_fenceable, nonblocking); 2082 map_and_fenceable, nonblocking);
2054} 2083}
2055#undef obj_to_ggtt
2056 2084
2057/* i915_gem_context.c */ 2085/* i915_gem_context.c */
2058void i915_gem_context_init(struct drm_device *dev); 2086void i915_gem_context_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index cf57276ce9aa..13c885d66383 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -971,6 +971,25 @@ i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
971 return ret; 971 return ret;
972} 972}
973 973
974static void fake_irq(unsigned long data)
975{
976 wake_up_process((struct task_struct *)data);
977}
978
979static bool missed_irq(struct drm_i915_private *dev_priv,
980 struct intel_ring_buffer *ring)
981{
982 return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
983}
984
985static bool can_wait_boost(struct drm_i915_file_private *file_priv)
986{
987 if (file_priv == NULL)
988 return true;
989
990 return !atomic_xchg(&file_priv->rps_wait_boost, true);
991}
992
974/** 993/**
975 * __wait_seqno - wait until execution of seqno has finished 994 * __wait_seqno - wait until execution of seqno has finished
976 * @ring: the ring expected to report seqno 995 * @ring: the ring expected to report seqno
@@ -991,13 +1010,14 @@ i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
991 */ 1010 */
992static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, 1011static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
993 unsigned reset_counter, 1012 unsigned reset_counter,
994 bool interruptible, struct timespec *timeout) 1013 bool interruptible,
1014 struct timespec *timeout,
1015 struct drm_i915_file_private *file_priv)
995{ 1016{
996 drm_i915_private_t *dev_priv = ring->dev->dev_private; 1017 drm_i915_private_t *dev_priv = ring->dev->dev_private;
997 struct timespec before, now, wait_time={1,0}; 1018 struct timespec before, now;
998 unsigned long timeout_jiffies; 1019 DEFINE_WAIT(wait);
999 long end; 1020 long timeout_jiffies;
1000 bool wait_forever = true;
1001 int ret; 1021 int ret;
1002 1022
1003 WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n"); 1023 WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n");
@@ -1005,51 +1025,79 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
1005 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno)) 1025 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
1006 return 0; 1026 return 0;
1007 1027
1008 trace_i915_gem_request_wait_begin(ring, seqno); 1028 timeout_jiffies = timeout ? timespec_to_jiffies_timeout(timeout) : 1;
1009 1029
1010 if (timeout != NULL) { 1030 if (dev_priv->info->gen >= 6 && can_wait_boost(file_priv)) {
1011 wait_time = *timeout; 1031 gen6_rps_boost(dev_priv);
1012 wait_forever = false; 1032 if (file_priv)
1033 mod_delayed_work(dev_priv->wq,
1034 &file_priv->mm.idle_work,
1035 msecs_to_jiffies(100));
1013 } 1036 }
1014 1037
1015 timeout_jiffies = timespec_to_jiffies_timeout(&wait_time); 1038 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)) &&
1016 1039 WARN_ON(!ring->irq_get(ring)))
1017 if (WARN_ON(!ring->irq_get(ring)))
1018 return -ENODEV; 1040 return -ENODEV;
1019 1041
1020 /* Record current time in case interrupted by signal, or wedged * */ 1042 /* Record current time in case interrupted by signal, or wedged */
1043 trace_i915_gem_request_wait_begin(ring, seqno);
1021 getrawmonotonic(&before); 1044 getrawmonotonic(&before);
1045 for (;;) {
1046 struct timer_list timer;
1047 unsigned long expire;
1022 1048
1023#define EXIT_COND \ 1049 prepare_to_wait(&ring->irq_queue, &wait,
1024 (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \ 1050 interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
1025 i915_reset_in_progress(&dev_priv->gpu_error) || \
1026 reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
1027 do {
1028 if (interruptible)
1029 end = wait_event_interruptible_timeout(ring->irq_queue,
1030 EXIT_COND,
1031 timeout_jiffies);
1032 else
1033 end = wait_event_timeout(ring->irq_queue, EXIT_COND,
1034 timeout_jiffies);
1035 1051
1036 /* We need to check whether any gpu reset happened in between 1052 /* We need to check whether any gpu reset happened in between
1037 * the caller grabbing the seqno and now ... */ 1053 * the caller grabbing the seqno and now ... */
1038 if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) 1054 if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
1039 end = -EAGAIN; 1055 /* ... but upgrade the -EAGAIN to an -EIO if the gpu
1056 * is truely gone. */
1057 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1058 if (ret == 0)
1059 ret = -EAGAIN;
1060 break;
1061 }
1040 1062
1041 /* ... but upgrade the -EGAIN to an -EIO if the gpu is truely 1063 if (i915_seqno_passed(ring->get_seqno(ring, false), seqno)) {
1042 * gone. */ 1064 ret = 0;
1043 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible); 1065 break;
1044 if (ret) 1066 }
1045 end = ret; 1067
1046 } while (end == 0 && wait_forever); 1068 if (interruptible && signal_pending(current)) {
1069 ret = -ERESTARTSYS;
1070 break;
1071 }
1072
1073 if (timeout_jiffies <= 0) {
1074 ret = -ETIME;
1075 break;
1076 }
1077
1078 timer.function = NULL;
1079 if (timeout || missed_irq(dev_priv, ring)) {
1080 setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
1081 expire = jiffies + (missed_irq(dev_priv, ring) ? 1: timeout_jiffies);
1082 mod_timer(&timer, expire);
1083 }
1084
1085 schedule();
1047 1086
1087 if (timeout)
1088 timeout_jiffies = expire - jiffies;
1089
1090 if (timer.function) {
1091 del_singleshot_timer_sync(&timer);
1092 destroy_timer_on_stack(&timer);
1093 }
1094 }
1048 getrawmonotonic(&now); 1095 getrawmonotonic(&now);
1096 trace_i915_gem_request_wait_end(ring, seqno);
1049 1097
1050 ring->irq_put(ring); 1098 ring->irq_put(ring);
1051 trace_i915_gem_request_wait_end(ring, seqno); 1099
1052#undef EXIT_COND 1100 finish_wait(&ring->irq_queue, &wait);
1053 1101
1054 if (timeout) { 1102 if (timeout) {
1055 struct timespec sleep_time = timespec_sub(now, before); 1103 struct timespec sleep_time = timespec_sub(now, before);
@@ -1058,17 +1106,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
1058 set_normalized_timespec(timeout, 0, 0); 1106 set_normalized_timespec(timeout, 0, 0);
1059 } 1107 }
1060 1108
1061 switch (end) { 1109 return ret;
1062 case -EIO:
1063 case -EAGAIN: /* Wedged */
1064 case -ERESTARTSYS: /* Signal */
1065 return (int)end;
1066 case 0: /* Timeout */
1067 return -ETIME;
1068 default: /* Completed */
1069 WARN_ON(end < 0); /* We're not aware of other errors */
1070 return 0;
1071 }
1072} 1110}
1073 1111
1074/** 1112/**
@@ -1096,7 +1134,7 @@ i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
1096 1134
1097 return __wait_seqno(ring, seqno, 1135 return __wait_seqno(ring, seqno,
1098 atomic_read(&dev_priv->gpu_error.reset_counter), 1136 atomic_read(&dev_priv->gpu_error.reset_counter),
1099 interruptible, NULL); 1137 interruptible, NULL, NULL);
1100} 1138}
1101 1139
1102static int 1140static int
@@ -1146,6 +1184,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1146 */ 1184 */
1147static __must_check int 1185static __must_check int
1148i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, 1186i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1187 struct drm_file *file,
1149 bool readonly) 1188 bool readonly)
1150{ 1189{
1151 struct drm_device *dev = obj->base.dev; 1190 struct drm_device *dev = obj->base.dev;
@@ -1172,7 +1211,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1172 1211
1173 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); 1212 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
1174 mutex_unlock(&dev->struct_mutex); 1213 mutex_unlock(&dev->struct_mutex);
1175 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL); 1214 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file->driver_priv);
1176 mutex_lock(&dev->struct_mutex); 1215 mutex_lock(&dev->struct_mutex);
1177 if (ret) 1216 if (ret)
1178 return ret; 1217 return ret;
@@ -1221,7 +1260,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1221 * We will repeat the flush holding the lock in the normal manner 1260 * We will repeat the flush holding the lock in the normal manner
1222 * to catch cases where we are gazumped. 1261 * to catch cases where we are gazumped.
1223 */ 1262 */
1224 ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain); 1263 ret = i915_gem_object_wait_rendering__nonblocking(obj, file, !write_domain);
1225 if (ret) 1264 if (ret)
1226 goto unref; 1265 goto unref;
1227 1266
@@ -1917,7 +1956,7 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
1917 return 0; 1956 return 0;
1918} 1957}
1919 1958
1920void 1959static void
1921i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, 1960i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1922 struct intel_ring_buffer *ring) 1961 struct intel_ring_buffer *ring)
1923{ 1962{
@@ -1956,6 +1995,13 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1956 } 1995 }
1957} 1996}
1958 1997
1998void i915_vma_move_to_active(struct i915_vma *vma,
1999 struct intel_ring_buffer *ring)
2000{
2001 list_move_tail(&vma->mm_list, &vma->vm->active_list);
2002 return i915_gem_object_move_to_active(vma->obj, ring);
2003}
2004
1959static void 2005static void
1960i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) 2006i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1961{ 2007{
@@ -2135,6 +2181,7 @@ int __i915_add_request(struct intel_ring_buffer *ring,
2135 i915_queue_hangcheck(ring->dev); 2181 i915_queue_hangcheck(ring->dev);
2136 2182
2137 if (was_empty) { 2183 if (was_empty) {
2184 cancel_delayed_work_sync(&dev_priv->mm.idle_work);
2138 queue_delayed_work(dev_priv->wq, 2185 queue_delayed_work(dev_priv->wq,
2139 &dev_priv->mm.retire_work, 2186 &dev_priv->mm.retire_work,
2140 round_jiffies_up_relative(HZ)); 2187 round_jiffies_up_relative(HZ));
@@ -2156,10 +2203,8 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
2156 return; 2203 return;
2157 2204
2158 spin_lock(&file_priv->mm.lock); 2205 spin_lock(&file_priv->mm.lock);
2159 if (request->file_priv) { 2206 list_del(&request->client_list);
2160 list_del(&request->client_list); 2207 request->file_priv = NULL;
2161 request->file_priv = NULL;
2162 }
2163 spin_unlock(&file_priv->mm.lock); 2208 spin_unlock(&file_priv->mm.lock);
2164} 2209}
2165 2210
@@ -2423,57 +2468,53 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
2423 WARN_ON(i915_verify_lists(ring->dev)); 2468 WARN_ON(i915_verify_lists(ring->dev));
2424} 2469}
2425 2470
2426void 2471bool
2427i915_gem_retire_requests(struct drm_device *dev) 2472i915_gem_retire_requests(struct drm_device *dev)
2428{ 2473{
2429 drm_i915_private_t *dev_priv = dev->dev_private; 2474 drm_i915_private_t *dev_priv = dev->dev_private;
2430 struct intel_ring_buffer *ring; 2475 struct intel_ring_buffer *ring;
2476 bool idle = true;
2431 int i; 2477 int i;
2432 2478
2433 for_each_ring(ring, dev_priv, i) 2479 for_each_ring(ring, dev_priv, i) {
2434 i915_gem_retire_requests_ring(ring); 2480 i915_gem_retire_requests_ring(ring);
2481 idle &= list_empty(&ring->request_list);
2482 }
2483
2484 if (idle)
2485 mod_delayed_work(dev_priv->wq,
2486 &dev_priv->mm.idle_work,
2487 msecs_to_jiffies(100));
2488
2489 return idle;
2435} 2490}
2436 2491
2437static void 2492static void
2438i915_gem_retire_work_handler(struct work_struct *work) 2493i915_gem_retire_work_handler(struct work_struct *work)
2439{ 2494{
2440 drm_i915_private_t *dev_priv; 2495 struct drm_i915_private *dev_priv =
2441 struct drm_device *dev; 2496 container_of(work, typeof(*dev_priv), mm.retire_work.work);
2442 struct intel_ring_buffer *ring; 2497 struct drm_device *dev = dev_priv->dev;
2443 bool idle; 2498 bool idle;
2444 int i;
2445
2446 dev_priv = container_of(work, drm_i915_private_t,
2447 mm.retire_work.work);
2448 dev = dev_priv->dev;
2449 2499
2450 /* Come back later if the device is busy... */ 2500 /* Come back later if the device is busy... */
2451 if (!mutex_trylock(&dev->struct_mutex)) { 2501 idle = false;
2452 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 2502 if (mutex_trylock(&dev->struct_mutex)) {
2453 round_jiffies_up_relative(HZ)); 2503 idle = i915_gem_retire_requests(dev);
2454 return; 2504 mutex_unlock(&dev->struct_mutex);
2455 }
2456
2457 i915_gem_retire_requests(dev);
2458
2459 /* Send a periodic flush down the ring so we don't hold onto GEM
2460 * objects indefinitely.
2461 */
2462 idle = true;
2463 for_each_ring(ring, dev_priv, i) {
2464 if (ring->gpu_caches_dirty)
2465 i915_add_request(ring, NULL);
2466
2467 idle &= list_empty(&ring->request_list);
2468 } 2505 }
2469 2506 if (!idle)
2470 if (!dev_priv->ums.mm_suspended && !idle)
2471 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 2507 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2472 round_jiffies_up_relative(HZ)); 2508 round_jiffies_up_relative(HZ));
2473 if (idle) 2509}
2474 intel_mark_idle(dev);
2475 2510
2476 mutex_unlock(&dev->struct_mutex); 2511static void
2512i915_gem_idle_work_handler(struct work_struct *work)
2513{
2514 struct drm_i915_private *dev_priv =
2515 container_of(work, typeof(*dev_priv), mm.idle_work.work);
2516
2517 intel_mark_idle(dev_priv->dev);
2477} 2518}
2478 2519
2479/** 2520/**
@@ -2571,7 +2612,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2571 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); 2612 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
2572 mutex_unlock(&dev->struct_mutex); 2613 mutex_unlock(&dev->struct_mutex);
2573 2614
2574 ret = __wait_seqno(ring, seqno, reset_counter, true, timeout); 2615 ret = __wait_seqno(ring, seqno, reset_counter, true, timeout, file->driver_priv);
2575 if (timeout) 2616 if (timeout)
2576 args->timeout_ns = timespec_to_ns(timeout); 2617 args->timeout_ns = timespec_to_ns(timeout);
2577 return ret; 2618 return ret;
@@ -2618,6 +2659,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
2618 if (ret) 2659 if (ret)
2619 return ret; 2660 return ret;
2620 2661
2662 trace_i915_gem_ring_sync_to(from, to, seqno);
2621 ret = to->sync_to(to, from, seqno); 2663 ret = to->sync_to(to, from, seqno);
2622 if (!ret) 2664 if (!ret)
2623 /* We use last_read_seqno because sync_to() 2665 /* We use last_read_seqno because sync_to()
@@ -3410,8 +3452,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3410 3452
3411 /* And bump the LRU for this access */ 3453 /* And bump the LRU for this access */
3412 if (i915_gem_object_is_inactive(obj)) { 3454 if (i915_gem_object_is_inactive(obj)) {
3413 struct i915_vma *vma = i915_gem_obj_to_vma(obj, 3455 struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
3414 &dev_priv->gtt.base);
3415 if (vma) 3456 if (vma)
3416 list_move_tail(&vma->mm_list, 3457 list_move_tail(&vma->mm_list,
3417 &dev_priv->gtt.base.inactive_list); 3458 &dev_priv->gtt.base.inactive_list);
@@ -3782,7 +3823,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3782 if (seqno == 0) 3823 if (seqno == 0)
3783 return 0; 3824 return 0;
3784 3825
3785 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL); 3826 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
3786 if (ret == 0) 3827 if (ret == 0)
3787 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); 3828 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
3788 3829
@@ -4225,16 +4266,13 @@ i915_gem_idle(struct drm_device *dev)
4225 drm_i915_private_t *dev_priv = dev->dev_private; 4266 drm_i915_private_t *dev_priv = dev->dev_private;
4226 int ret; 4267 int ret;
4227 4268
4228 if (dev_priv->ums.mm_suspended) { 4269 if (dev_priv->ums.mm_suspended)
4229 mutex_unlock(&dev->struct_mutex);
4230 return 0; 4270 return 0;
4231 }
4232 4271
4233 ret = i915_gpu_idle(dev); 4272 ret = i915_gpu_idle(dev);
4234 if (ret) { 4273 if (ret)
4235 mutex_unlock(&dev->struct_mutex);
4236 return ret; 4274 return ret;
4237 } 4275
4238 i915_gem_retire_requests(dev); 4276 i915_gem_retire_requests(dev);
4239 4277
4240 /* Under UMS, be paranoid and evict. */ 4278 /* Under UMS, be paranoid and evict. */
@@ -4248,6 +4286,7 @@ i915_gem_idle(struct drm_device *dev)
4248 4286
4249 /* Cancel the retire work handler, which should be idle now. */ 4287 /* Cancel the retire work handler, which should be idle now. */
4250 cancel_delayed_work_sync(&dev_priv->mm.retire_work); 4288 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4289 cancel_delayed_work_sync(&dev_priv->mm.idle_work);
4251 4290
4252 return 0; 4291 return 0;
4253} 4292}
@@ -4581,6 +4620,8 @@ i915_gem_load(struct drm_device *dev)
4581 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); 4620 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
4582 INIT_DELAYED_WORK(&dev_priv->mm.retire_work, 4621 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4583 i915_gem_retire_work_handler); 4622 i915_gem_retire_work_handler);
4623 INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
4624 i915_gem_idle_work_handler);
4584 init_waitqueue_head(&dev_priv->gpu_error.reset_queue); 4625 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
4585 4626
4586 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */ 4627 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
@@ -4631,7 +4672,7 @@ static int i915_gem_init_phys_object(struct drm_device *dev,
4631 if (dev_priv->mm.phys_objs[id - 1] || !size) 4672 if (dev_priv->mm.phys_objs[id - 1] || !size)
4632 return 0; 4673 return 0;
4633 4674
4634 phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL); 4675 phys_obj = kzalloc(sizeof(*phys_obj), GFP_KERNEL);
4635 if (!phys_obj) 4676 if (!phys_obj)
4636 return -ENOMEM; 4677 return -ENOMEM;
4637 4678
@@ -4805,6 +4846,8 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4805{ 4846{
4806 struct drm_i915_file_private *file_priv = file->driver_priv; 4847 struct drm_i915_file_private *file_priv = file->driver_priv;
4807 4848
4849 cancel_delayed_work_sync(&file_priv->mm.idle_work);
4850
4808 /* Clean up our request list when the client is going away, so that 4851 /* Clean up our request list when the client is going away, so that
4809 * later retire_requests won't dereference our soon-to-be-gone 4852 * later retire_requests won't dereference our soon-to-be-gone
4810 * file_priv. 4853 * file_priv.
@@ -4822,6 +4865,38 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4822 spin_unlock(&file_priv->mm.lock); 4865 spin_unlock(&file_priv->mm.lock);
4823} 4866}
4824 4867
4868static void
4869i915_gem_file_idle_work_handler(struct work_struct *work)
4870{
4871 struct drm_i915_file_private *file_priv =
4872 container_of(work, typeof(*file_priv), mm.idle_work.work);
4873
4874 atomic_set(&file_priv->rps_wait_boost, false);
4875}
4876
4877int i915_gem_open(struct drm_device *dev, struct drm_file *file)
4878{
4879 struct drm_i915_file_private *file_priv;
4880
4881 DRM_DEBUG_DRIVER("\n");
4882
4883 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
4884 if (!file_priv)
4885 return -ENOMEM;
4886
4887 file->driver_priv = file_priv;
4888 file_priv->dev_priv = dev->dev_private;
4889
4890 spin_lock_init(&file_priv->mm.lock);
4891 INIT_LIST_HEAD(&file_priv->mm.request_list);
4892 INIT_DELAYED_WORK(&file_priv->mm.idle_work,
4893 i915_gem_file_idle_work_handler);
4894
4895 idr_init(&file_priv->context_idr);
4896
4897 return 0;
4898}
4899
4825static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task) 4900static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
4826{ 4901{
4827 if (!mutex_is_locked(mutex)) 4902 if (!mutex_is_locked(mutex))
@@ -4968,3 +5043,17 @@ i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
4968 mutex_unlock(&dev->struct_mutex); 5043 mutex_unlock(&dev->struct_mutex);
4969 return freed; 5044 return freed;
4970} 5045}
5046
5047struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
5048{
5049 struct i915_vma *vma;
5050
5051 if (WARN_ON(list_empty(&obj->vma_list)))
5052 return NULL;
5053
5054 vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
5055 if (WARN_ON(vma->vm != obj_to_ggtt(obj)))
5056 return NULL;
5057
5058 return vma;
5059}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 9af3fe7e42b0..1a877a547290 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -453,11 +453,8 @@ static int do_switch(struct i915_hw_context *to)
453 * MI_SET_CONTEXT instead of when the next seqno has completed. 453 * MI_SET_CONTEXT instead of when the next seqno has completed.
454 */ 454 */
455 if (from != NULL) { 455 if (from != NULL) {
456 struct drm_i915_private *dev_priv = from->obj->base.dev->dev_private;
457 struct i915_address_space *ggtt = &dev_priv->gtt.base;
458 from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; 456 from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
459 list_move_tail(&i915_gem_obj_to_vma(from->obj, ggtt)->mm_list, &ggtt->active_list); 457 i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->obj), ring);
460 i915_gem_object_move_to_active(from->obj, ring);
461 /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the 458 /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
462 * whole damn pipeline, we don't need to explicitly mark the 459 * whole damn pipeline, we don't need to explicitly mark the
463 * object dirty. The only exception is that the context must be 460 * object dirty. The only exception is that the context must be
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 3a3981eb3012..b7376533633d 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -175,6 +175,8 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
175 struct i915_vma *vma, *next; 175 struct i915_vma *vma, *next;
176 int ret; 176 int ret;
177 177
178 trace_i915_gem_evict_vm(vm);
179
178 if (do_idle) { 180 if (do_idle) {
179 ret = i915_gpu_idle(vm->dev); 181 ret = i915_gpu_idle(vm->dev);
180 if (ret) 182 if (ret)
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index ee933572bdc1..0ce0d47e4b0f 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -48,15 +48,15 @@ eb_create(struct drm_i915_gem_execbuffer2 *args, struct i915_address_space *vm)
48 struct eb_vmas *eb = NULL; 48 struct eb_vmas *eb = NULL;
49 49
50 if (args->flags & I915_EXEC_HANDLE_LUT) { 50 if (args->flags & I915_EXEC_HANDLE_LUT) {
51 int size = args->buffer_count; 51 unsigned size = args->buffer_count;
52 size *= sizeof(struct i915_vma *); 52 size *= sizeof(struct i915_vma *);
53 size += sizeof(struct eb_vmas); 53 size += sizeof(struct eb_vmas);
54 eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); 54 eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
55 } 55 }
56 56
57 if (eb == NULL) { 57 if (eb == NULL) {
58 int size = args->buffer_count; 58 unsigned size = args->buffer_count;
59 int count = PAGE_SIZE / sizeof(struct hlist_head) / 2; 59 unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
60 BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head)); 60 BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
61 while (count > 2*size) 61 while (count > 2*size)
62 count >>= 1; 62 count >>= 1;
@@ -667,7 +667,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
667 bool need_relocs; 667 bool need_relocs;
668 int *reloc_offset; 668 int *reloc_offset;
669 int i, total, ret; 669 int i, total, ret;
670 int count = args->buffer_count; 670 unsigned count = args->buffer_count;
671 671
672 if (WARN_ON(list_empty(&eb->vmas))) 672 if (WARN_ON(list_empty(&eb->vmas)))
673 return 0; 673 return 0;
@@ -818,8 +818,8 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
818 int count) 818 int count)
819{ 819{
820 int i; 820 int i;
821 int relocs_total = 0; 821 unsigned relocs_total = 0;
822 int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry); 822 unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
823 823
824 for (i = 0; i < count; i++) { 824 for (i = 0; i < count; i++) {
825 char __user *ptr = to_user_ptr(exec[i].relocs_ptr); 825 char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
@@ -872,8 +872,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
872 obj->base.read_domains = obj->base.pending_read_domains; 872 obj->base.read_domains = obj->base.pending_read_domains;
873 obj->fenced_gpu_access = obj->pending_fenced_gpu_access; 873 obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
874 874
875 list_move_tail(&vma->mm_list, &vma->vm->active_list); 875 i915_vma_move_to_active(vma, ring);
876 i915_gem_object_move_to_active(obj, ring);
877 if (obj->base.write_domain) { 876 if (obj->base.write_domain) {
878 obj->dirty = 1; 877 obj->dirty = 1;
879 obj->last_write_seqno = intel_ring_get_seqno(ring); 878 obj->last_write_seqno = intel_ring_get_seqno(ring);
@@ -1047,7 +1046,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1047 return -EINVAL; 1046 return -EINVAL;
1048 } 1047 }
1049 1048
1050 cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects), 1049 cliprects = kcalloc(args->num_cliprects,
1050 sizeof(*cliprects),
1051 GFP_KERNEL); 1051 GFP_KERNEL);
1052 if (cliprects == NULL) { 1052 if (cliprects == NULL) {
1053 ret = -ENOMEM; 1053 ret = -ENOMEM;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 212f6d8c35ec..e999496532c6 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -336,7 +336,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
336 ppgtt->base.insert_entries = gen6_ppgtt_insert_entries; 336 ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
337 ppgtt->base.cleanup = gen6_ppgtt_cleanup; 337 ppgtt->base.cleanup = gen6_ppgtt_cleanup;
338 ppgtt->base.scratch = dev_priv->gtt.base.scratch; 338 ppgtt->base.scratch = dev_priv->gtt.base.scratch;
339 ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries, 339 ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *),
340 GFP_KERNEL); 340 GFP_KERNEL);
341 if (!ppgtt->pt_pages) 341 if (!ppgtt->pt_pages)
342 return -ENOMEM; 342 return -ENOMEM;
@@ -347,7 +347,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
347 goto err_pt_alloc; 347 goto err_pt_alloc;
348 } 348 }
349 349
350 ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t) *ppgtt->num_pd_entries, 350 ppgtt->pt_dma_addr = kcalloc(ppgtt->num_pd_entries, sizeof(dma_addr_t),
351 GFP_KERNEL); 351 GFP_KERNEL);
352 if (!ppgtt->pt_dma_addr) 352 if (!ppgtt->pt_dma_addr)
353 goto err_pt_alloc; 353 goto err_pt_alloc;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 032e9ef9c896..ac9ebe98f8b0 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -393,7 +393,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
393 /* Try to preallocate memory required to save swizzling on put-pages */ 393 /* Try to preallocate memory required to save swizzling on put-pages */
394 if (i915_gem_object_needs_bit17_swizzle(obj)) { 394 if (i915_gem_object_needs_bit17_swizzle(obj)) {
395 if (obj->bit_17 == NULL) { 395 if (obj->bit_17 == NULL) {
396 obj->bit_17 = kmalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT) * 396 obj->bit_17 = kcalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT),
397 sizeof(long), GFP_KERNEL); 397 sizeof(long), GFP_KERNEL);
398 } 398 }
399 } else { 399 } else {
@@ -504,8 +504,8 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
504 int i; 504 int i;
505 505
506 if (obj->bit_17 == NULL) { 506 if (obj->bit_17 == NULL) {
507 obj->bit_17 = kmalloc(BITS_TO_LONGS(page_count) * 507 obj->bit_17 = kcalloc(BITS_TO_LONGS(page_count),
508 sizeof(long), GFP_KERNEL); 508 sizeof(long), GFP_KERNEL);
509 if (obj->bit_17 == NULL) { 509 if (obj->bit_17 == NULL) {
510 DRM_ERROR("Failed to allocate memory for bit 17 " 510 DRM_ERROR("Failed to allocate memory for bit 17 "
511 "record\n"); 511 "record\n");
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 3858825e0978..915c8ca08969 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -311,6 +311,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
311 err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake); 311 err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
312 err_printf(m, "DERRMR: 0x%08x\n", error->derrmr); 312 err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
313 err_printf(m, "CCID: 0x%08x\n", error->ccid); 313 err_printf(m, "CCID: 0x%08x\n", error->ccid);
314 err_printf(m, "Missed interrupts: 0x%08lx\n", dev_priv->gpu_error.missed_irq_rings);
314 315
315 for (i = 0; i < dev_priv->num_fence_regs; i++) 316 for (i = 0; i < dev_priv->num_fence_regs; i++)
316 err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]); 317 err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
@@ -793,7 +794,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
793 794
794 error->ring[i].num_requests = count; 795 error->ring[i].num_requests = count;
795 error->ring[i].requests = 796 error->ring[i].requests =
796 kmalloc(count*sizeof(struct drm_i915_error_request), 797 kcalloc(count, sizeof(*error->ring[i].requests),
797 GFP_ATOMIC); 798 GFP_ATOMIC);
798 if (error->ring[i].requests == NULL) { 799 if (error->ring[i].requests == NULL) {
799 error->ring[i].num_requests = 0; 800 error->ring[i].num_requests = 0;
@@ -835,7 +836,7 @@ static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
835 error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx]; 836 error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
836 837
837 if (i) { 838 if (i) {
838 active_bo = kmalloc(sizeof(*active_bo)*i, GFP_ATOMIC); 839 active_bo = kcalloc(i, sizeof(*active_bo), GFP_ATOMIC);
839 if (active_bo) 840 if (active_bo)
840 pinned_bo = active_bo + error->active_bo_count[ndx]; 841 pinned_bo = active_bo + error->active_bo_count[ndx];
841 } 842 }
@@ -1012,6 +1013,7 @@ const char *i915_cache_level_str(int type)
1012 case I915_CACHE_NONE: return " uncached"; 1013 case I915_CACHE_NONE: return " uncached";
1013 case I915_CACHE_LLC: return " snooped or LLC"; 1014 case I915_CACHE_LLC: return " snooped or LLC";
1014 case I915_CACHE_L3_LLC: return " L3+LLC"; 1015 case I915_CACHE_L3_LLC: return " L3+LLC";
1016 case I915_CACHE_WT: return " WT";
1015 default: return ""; 1017 default: return "";
1016 } 1018 }
1017} 1019}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index b356dc15adda..418ad642c742 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -807,7 +807,7 @@ static void notify_ring(struct drm_device *dev,
807 if (ring->obj == NULL) 807 if (ring->obj == NULL)
808 return; 808 return;
809 809
810 trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false)); 810 trace_i915_gem_request_complete(ring);
811 811
812 wake_up_all(&ring->irq_queue); 812 wake_up_all(&ring->irq_queue);
813 i915_queue_hangcheck(dev); 813 i915_queue_hangcheck(dev);
@@ -818,7 +818,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
818 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 818 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
819 rps.work); 819 rps.work);
820 u32 pm_iir; 820 u32 pm_iir;
821 u8 new_delay; 821 int new_delay, adj;
822 822
823 spin_lock_irq(&dev_priv->irq_lock); 823 spin_lock_irq(&dev_priv->irq_lock);
824 pm_iir = dev_priv->rps.pm_iir; 824 pm_iir = dev_priv->rps.pm_iir;
@@ -835,40 +835,49 @@ static void gen6_pm_rps_work(struct work_struct *work)
835 835
836 mutex_lock(&dev_priv->rps.hw_lock); 836 mutex_lock(&dev_priv->rps.hw_lock);
837 837
838 adj = dev_priv->rps.last_adj;
838 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 839 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
839 new_delay = dev_priv->rps.cur_delay + 1; 840 if (adj > 0)
841 adj *= 2;
842 else
843 adj = 1;
844 new_delay = dev_priv->rps.cur_delay + adj;
840 845
841 /* 846 /*
842 * For better performance, jump directly 847 * For better performance, jump directly
843 * to RPe if we're below it. 848 * to RPe if we're below it.
844 */ 849 */
845 if (IS_VALLEYVIEW(dev_priv->dev) && 850 if (new_delay < dev_priv->rps.rpe_delay)
846 dev_priv->rps.cur_delay < dev_priv->rps.rpe_delay)
847 new_delay = dev_priv->rps.rpe_delay; 851 new_delay = dev_priv->rps.rpe_delay;
848 } else 852 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
849 new_delay = dev_priv->rps.cur_delay - 1; 853 if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay)
854 new_delay = dev_priv->rps.rpe_delay;
855 else
856 new_delay = dev_priv->rps.min_delay;
857 adj = 0;
858 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
859 if (adj < 0)
860 adj *= 2;
861 else
862 adj = -1;
863 new_delay = dev_priv->rps.cur_delay + adj;
864 } else { /* unknown event */
865 new_delay = dev_priv->rps.cur_delay;
866 }
850 867
851 /* sysfs frequency interfaces may have snuck in while servicing the 868 /* sysfs frequency interfaces may have snuck in while servicing the
852 * interrupt 869 * interrupt
853 */ 870 */
854 if (new_delay >= dev_priv->rps.min_delay && 871 if (new_delay < (int)dev_priv->rps.min_delay)
855 new_delay <= dev_priv->rps.max_delay) { 872 new_delay = dev_priv->rps.min_delay;
856 if (IS_VALLEYVIEW(dev_priv->dev)) 873 if (new_delay > (int)dev_priv->rps.max_delay)
857 valleyview_set_rps(dev_priv->dev, new_delay); 874 new_delay = dev_priv->rps.max_delay;
858 else 875 dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay;
859 gen6_set_rps(dev_priv->dev, new_delay); 876
860 } 877 if (IS_VALLEYVIEW(dev_priv->dev))
861 878 valleyview_set_rps(dev_priv->dev, new_delay);
862 if (IS_VALLEYVIEW(dev_priv->dev)) { 879 else
863 /* 880 gen6_set_rps(dev_priv->dev, new_delay);
864 * On VLV, when we enter RC6 we may not be at the minimum
865 * voltage level, so arm a timer to check. It should only
866 * fire when there's activity or once after we've entered
867 * RC6, and then won't be re-armed until the next RPS interrupt.
868 */
869 mod_delayed_work(dev_priv->wq, &dev_priv->rps.vlv_work,
870 msecs_to_jiffies(100));
871 }
872 881
873 mutex_unlock(&dev_priv->rps.hw_lock); 882 mutex_unlock(&dev_priv->rps.hw_lock);
874} 883}
@@ -2039,10 +2048,13 @@ static void i915_hangcheck_elapsed(unsigned long data)
2039 2048
2040 if (waitqueue_active(&ring->irq_queue)) { 2049 if (waitqueue_active(&ring->irq_queue)) {
2041 /* Issue a wake-up to catch stuck h/w. */ 2050 /* Issue a wake-up to catch stuck h/w. */
2042 DRM_ERROR("Hangcheck timer elapsed... %s idle\n", 2051 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
2043 ring->name); 2052 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2044 wake_up_all(&ring->irq_queue); 2053 ring->name);
2045 ring->hangcheck.score += HUNG; 2054 wake_up_all(&ring->irq_queue);
2055 }
2056 /* Safeguard against driver failure */
2057 ring->hangcheck.score += BUSY;
2046 } else 2058 } else
2047 busy = false; 2059 busy = false;
2048 } else { 2060 } else {
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index c4f9bef6d073..95385023e0ba 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -361,6 +361,15 @@
361#define PUNIT_OPCODE_REG_READ 6 361#define PUNIT_OPCODE_REG_READ 6
362#define PUNIT_OPCODE_REG_WRITE 7 362#define PUNIT_OPCODE_REG_WRITE 7
363 363
364#define PUNIT_REG_PWRGT_CTRL 0x60
365#define PUNIT_REG_PWRGT_STATUS 0x61
366#define PUNIT_CLK_GATE 1
367#define PUNIT_PWR_RESET 2
368#define PUNIT_PWR_GATE 3
369#define RENDER_PWRGT (PUNIT_PWR_GATE << 0)
370#define MEDIA_PWRGT (PUNIT_PWR_GATE << 2)
371#define DISP2D_PWRGT (PUNIT_PWR_GATE << 6)
372
364#define PUNIT_REG_GPU_LFM 0xd3 373#define PUNIT_REG_GPU_LFM 0xd3
365#define PUNIT_REG_GPU_FREQ_REQ 0xd4 374#define PUNIT_REG_GPU_FREQ_REQ 0xd4
366#define PUNIT_REG_GPU_FREQ_STS 0xd8 375#define PUNIT_REG_GPU_FREQ_STS 0xd8
@@ -382,6 +391,8 @@
382#define FB_FMAX_VMIN_FREQ_LO_MASK 0xf8000000 391#define FB_FMAX_VMIN_FREQ_LO_MASK 0xf8000000
383 392
384/* vlv2 north clock has */ 393/* vlv2 north clock has */
394#define CCK_FUSE_REG 0x8
395#define CCK_FUSE_HPLL_FREQ_MASK 0x3
385#define CCK_REG_DSI_PLL_FUSE 0x44 396#define CCK_REG_DSI_PLL_FUSE 0x44
386#define CCK_REG_DSI_PLL_CONTROL 0x48 397#define CCK_REG_DSI_PLL_CONTROL 0x48
387#define DSI_PLL_VCO_EN (1 << 31) 398#define DSI_PLL_VCO_EN (1 << 31)
@@ -428,7 +439,7 @@
428#define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */ 439#define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */
429#define DPIO_MODSEL0 (1<<2) /* if ref clk a == 27 */ 440#define DPIO_MODSEL0 (1<<2) /* if ref clk a == 27 */
430#define DPIO_SFR_BYPASS (1<<1) 441#define DPIO_SFR_BYPASS (1<<1)
431#define DPIO_RESET (1<<0) 442#define DPIO_CMNRST (1<<0)
432 443
433#define _DPIO_TX3_SWING_CTL4_A 0x690 444#define _DPIO_TX3_SWING_CTL4_A 0x690
434#define _DPIO_TX3_SWING_CTL4_B 0x2a90 445#define _DPIO_TX3_SWING_CTL4_B 0x2a90
@@ -940,7 +951,7 @@
940 951
941#define GT_PARITY_ERROR(dev) \ 952#define GT_PARITY_ERROR(dev) \
942 (GT_RENDER_L3_PARITY_ERROR_INTERRUPT | \ 953 (GT_RENDER_L3_PARITY_ERROR_INTERRUPT | \
943 IS_HASWELL(dev) ? GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 : 0) 954 (IS_HASWELL(dev) ? GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 : 0))
944 955
945/* These are all the "old" interrupts */ 956/* These are all the "old" interrupts */
946#define ILK_BSD_USER_INTERRUPT (1<<5) 957#define ILK_BSD_USER_INTERRUPT (1<<5)
@@ -1429,6 +1440,12 @@
1429 1440
1430#define MI_ARB_VLV (VLV_DISPLAY_BASE + 0x6504) 1441#define MI_ARB_VLV (VLV_DISPLAY_BASE + 0x6504)
1431 1442
1443#define CZCLK_CDCLK_FREQ_RATIO (VLV_DISPLAY_BASE + 0x6508)
1444#define CDCLK_FREQ_SHIFT 4
1445#define CDCLK_FREQ_MASK (0x1f << CDCLK_FREQ_SHIFT)
1446#define CZCLK_FREQ_MASK 0xf
1447#define GMBUSFREQ_VLV (VLV_DISPLAY_BASE + 0x6510)
1448
1432/* 1449/*
1433 * Palette regs 1450 * Palette regs
1434 */ 1451 */
@@ -1797,6 +1814,9 @@
1797 */ 1814 */
1798#define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE) 1815#define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE)
1799 1816
1817#define VLV_CLK_CTL2 0x101104
1818#define CLK_CTL2_CZCOUNT_30NS_SHIFT 28
1819
1800/* 1820/*
1801 * Overlay regs 1821 * Overlay regs
1802 */ 1822 */
@@ -1848,7 +1868,8 @@
1848#define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B) 1868#define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B)
1849 1869
1850/* HSW eDP PSR registers */ 1870/* HSW eDP PSR registers */
1851#define EDP_PSR_CTL 0x64800 1871#define EDP_PSR_BASE(dev) 0x64800
1872#define EDP_PSR_CTL(dev) (EDP_PSR_BASE(dev) + 0)
1852#define EDP_PSR_ENABLE (1<<31) 1873#define EDP_PSR_ENABLE (1<<31)
1853#define EDP_PSR_LINK_DISABLE (0<<27) 1874#define EDP_PSR_LINK_DISABLE (0<<27)
1854#define EDP_PSR_LINK_STANDBY (1<<27) 1875#define EDP_PSR_LINK_STANDBY (1<<27)
@@ -1871,16 +1892,16 @@
1871#define EDP_PSR_TP1_TIME_0us (3<<4) 1892#define EDP_PSR_TP1_TIME_0us (3<<4)
1872#define EDP_PSR_IDLE_FRAME_SHIFT 0 1893#define EDP_PSR_IDLE_FRAME_SHIFT 0
1873 1894
1874#define EDP_PSR_AUX_CTL 0x64810 1895#define EDP_PSR_AUX_CTL(dev) (EDP_PSR_BASE(dev) + 0x10)
1875#define EDP_PSR_AUX_DATA1 0x64814 1896#define EDP_PSR_AUX_DATA1(dev) (EDP_PSR_BASE(dev) + 0x14)
1876#define EDP_PSR_DPCD_COMMAND 0x80060000 1897#define EDP_PSR_DPCD_COMMAND 0x80060000
1877#define EDP_PSR_AUX_DATA2 0x64818 1898#define EDP_PSR_AUX_DATA2(dev) (EDP_PSR_BASE(dev) + 0x18)
1878#define EDP_PSR_DPCD_NORMAL_OPERATION (1<<24) 1899#define EDP_PSR_DPCD_NORMAL_OPERATION (1<<24)
1879#define EDP_PSR_AUX_DATA3 0x6481c 1900#define EDP_PSR_AUX_DATA3(dev) (EDP_PSR_BASE(dev) + 0x1c)
1880#define EDP_PSR_AUX_DATA4 0x64820 1901#define EDP_PSR_AUX_DATA4(dev) (EDP_PSR_BASE(dev) + 0x20)
1881#define EDP_PSR_AUX_DATA5 0x64824 1902#define EDP_PSR_AUX_DATA5(dev) (EDP_PSR_BASE(dev) + 0x24)
1882 1903
1883#define EDP_PSR_STATUS_CTL 0x64840 1904#define EDP_PSR_STATUS_CTL(dev) (EDP_PSR_BASE(dev) + 0x40)
1884#define EDP_PSR_STATUS_STATE_MASK (7<<29) 1905#define EDP_PSR_STATUS_STATE_MASK (7<<29)
1885#define EDP_PSR_STATUS_STATE_IDLE (0<<29) 1906#define EDP_PSR_STATUS_STATE_IDLE (0<<29)
1886#define EDP_PSR_STATUS_STATE_SRDONACK (1<<29) 1907#define EDP_PSR_STATUS_STATE_SRDONACK (1<<29)
@@ -1904,10 +1925,10 @@
1904#define EDP_PSR_STATUS_SENDING_TP1 (1<<4) 1925#define EDP_PSR_STATUS_SENDING_TP1 (1<<4)
1905#define EDP_PSR_STATUS_IDLE_MASK 0xf 1926#define EDP_PSR_STATUS_IDLE_MASK 0xf
1906 1927
1907#define EDP_PSR_PERF_CNT 0x64844 1928#define EDP_PSR_PERF_CNT(dev) (EDP_PSR_BASE(dev) + 0x44)
1908#define EDP_PSR_PERF_CNT_MASK 0xffffff 1929#define EDP_PSR_PERF_CNT_MASK 0xffffff
1909 1930
1910#define EDP_PSR_DEBUG_CTL 0x64860 1931#define EDP_PSR_DEBUG_CTL(dev) (EDP_PSR_BASE(dev) + 0x60)
1911#define EDP_PSR_DEBUG_MASK_LPSP (1<<27) 1932#define EDP_PSR_DEBUG_MASK_LPSP (1<<27)
1912#define EDP_PSR_DEBUG_MASK_MEMUP (1<<26) 1933#define EDP_PSR_DEBUG_MASK_MEMUP (1<<26)
1913#define EDP_PSR_DEBUG_MASK_HPD (1<<25) 1934#define EDP_PSR_DEBUG_MASK_HPD (1<<25)
@@ -4675,7 +4696,7 @@
4675#define GEN6_RP_UP_IDLE_MIN (0x1<<3) 4696#define GEN6_RP_UP_IDLE_MIN (0x1<<3)
4676#define GEN6_RP_UP_BUSY_AVG (0x2<<3) 4697#define GEN6_RP_UP_BUSY_AVG (0x2<<3)
4677#define GEN6_RP_UP_BUSY_CONT (0x4<<3) 4698#define GEN6_RP_UP_BUSY_CONT (0x4<<3)
4678#define GEN7_RP_DOWN_IDLE_AVG (0x2<<0) 4699#define GEN6_RP_DOWN_IDLE_AVG (0x2<<0)
4679#define GEN6_RP_DOWN_IDLE_CONT (0x1<<0) 4700#define GEN6_RP_DOWN_IDLE_CONT (0x1<<0)
4680#define GEN6_RP_UP_THRESHOLD 0xA02C 4701#define GEN6_RP_UP_THRESHOLD 0xA02C
4681#define GEN6_RP_DOWN_THRESHOLD 0xA030 4702#define GEN6_RP_DOWN_THRESHOLD 0xA030
@@ -4720,6 +4741,10 @@
4720 GEN6_PM_RP_DOWN_TIMEOUT) 4741 GEN6_PM_RP_DOWN_TIMEOUT)
4721 4742
4722#define GEN6_GT_GFX_RC6_LOCKED 0x138104 4743#define GEN6_GT_GFX_RC6_LOCKED 0x138104
4744#define VLV_COUNTER_CONTROL 0x138104
4745#define VLV_COUNT_RANGE_HIGH (1<<15)
4746#define VLV_MEDIA_RC6_COUNT_EN (1<<1)
4747#define VLV_RENDER_RC6_COUNT_EN (1<<0)
4723#define GEN6_GT_GFX_RC6 0x138108 4748#define GEN6_GT_GFX_RC6 0x138108
4724#define GEN6_GT_GFX_RC6p 0x13810C 4749#define GEN6_GT_GFX_RC6p 0x13810C
4725#define GEN6_GT_GFX_RC6pp 0x138110 4750#define GEN6_GT_GFX_RC6pp 0x138110
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 44f4c1a6f7b1..8003886361b8 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -37,12 +37,30 @@ static u32 calc_residency(struct drm_device *dev, const u32 reg)
37{ 37{
38 struct drm_i915_private *dev_priv = dev->dev_private; 38 struct drm_i915_private *dev_priv = dev->dev_private;
39 u64 raw_time; /* 32b value may overflow during fixed point math */ 39 u64 raw_time; /* 32b value may overflow during fixed point math */
40 u64 units = 128ULL, div = 100000ULL, bias = 100ULL;
40 41
41 if (!intel_enable_rc6(dev)) 42 if (!intel_enable_rc6(dev))
42 return 0; 43 return 0;
43 44
44 raw_time = I915_READ(reg) * 128ULL; 45 /* On VLV, residency time is in CZ units rather than 1.28us */
45 return DIV_ROUND_UP_ULL(raw_time, 100000); 46 if (IS_VALLEYVIEW(dev)) {
47 u32 clkctl2;
48
49 clkctl2 = I915_READ(VLV_CLK_CTL2) >>
50 CLK_CTL2_CZCOUNT_30NS_SHIFT;
51 if (!clkctl2) {
52 WARN(!clkctl2, "bogus CZ count value");
53 return 0;
54 }
55 units = DIV_ROUND_UP_ULL(30ULL * bias, (u64)clkctl2);
56 if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
57 units <<= 8;
58
59 div = 1000000ULL * bias;
60 }
61
62 raw_time = I915_READ(reg) * units;
63 return DIV_ROUND_UP_ULL(raw_time, div);
46} 64}
47 65
48static ssize_t 66static ssize_t
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index e2c5ee6f6194..6e580c98dede 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -233,6 +233,47 @@ TRACE_EVENT(i915_gem_evict_everything,
233 TP_printk("dev=%d", __entry->dev) 233 TP_printk("dev=%d", __entry->dev)
234); 234);
235 235
236TRACE_EVENT(i915_gem_evict_vm,
237 TP_PROTO(struct i915_address_space *vm),
238 TP_ARGS(vm),
239
240 TP_STRUCT__entry(
241 __field(struct i915_address_space *, vm)
242 ),
243
244 TP_fast_assign(
245 __entry->vm = vm;
246 ),
247
248 TP_printk("dev=%d, vm=%p", __entry->vm->dev->primary->index, __entry->vm)
249);
250
251TRACE_EVENT(i915_gem_ring_sync_to,
252 TP_PROTO(struct intel_ring_buffer *from,
253 struct intel_ring_buffer *to,
254 u32 seqno),
255 TP_ARGS(from, to, seqno),
256
257 TP_STRUCT__entry(
258 __field(u32, dev)
259 __field(u32, sync_from)
260 __field(u32, sync_to)
261 __field(u32, seqno)
262 ),
263
264 TP_fast_assign(
265 __entry->dev = from->dev->primary->index;
266 __entry->sync_from = from->id;
267 __entry->sync_to = to->id;
268 __entry->seqno = seqno;
269 ),
270
271 TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u",
272 __entry->dev,
273 __entry->sync_from, __entry->sync_to,
274 __entry->seqno)
275);
276
236TRACE_EVENT(i915_gem_ring_dispatch, 277TRACE_EVENT(i915_gem_ring_dispatch,
237 TP_PROTO(struct intel_ring_buffer *ring, u32 seqno, u32 flags), 278 TP_PROTO(struct intel_ring_buffer *ring, u32 seqno, u32 flags),
238 TP_ARGS(ring, seqno, flags), 279 TP_ARGS(ring, seqno, flags),
@@ -304,9 +345,24 @@ DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
304 TP_ARGS(ring, seqno) 345 TP_ARGS(ring, seqno)
305); 346);
306 347
307DEFINE_EVENT(i915_gem_request, i915_gem_request_complete, 348TRACE_EVENT(i915_gem_request_complete,
308 TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), 349 TP_PROTO(struct intel_ring_buffer *ring),
309 TP_ARGS(ring, seqno) 350 TP_ARGS(ring),
351
352 TP_STRUCT__entry(
353 __field(u32, dev)
354 __field(u32, ring)
355 __field(u32, seqno)
356 ),
357
358 TP_fast_assign(
359 __entry->dev = ring->dev->primary->index;
360 __entry->ring = ring->id;
361 __entry->seqno = ring->get_seqno(ring, false);
362 ),
363
364 TP_printk("dev=%u, ring=%u, seqno=%u",
365 __entry->dev, __entry->ring, __entry->seqno)
310); 366);
311 367
312DEFINE_EVENT(i915_gem_request, i915_gem_request_retire, 368DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 6668873fb3a8..e29bcae1ef81 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -389,7 +389,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
389{ 389{
390 struct sdvo_device_mapping *p_mapping; 390 struct sdvo_device_mapping *p_mapping;
391 struct bdb_general_definitions *p_defs; 391 struct bdb_general_definitions *p_defs;
392 struct child_device_config *p_child; 392 union child_device_config *p_child;
393 int i, child_device_num, count; 393 int i, child_device_num, count;
394 u16 block_size; 394 u16 block_size;
395 395
@@ -416,36 +416,36 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
416 count = 0; 416 count = 0;
417 for (i = 0; i < child_device_num; i++) { 417 for (i = 0; i < child_device_num; i++) {
418 p_child = &(p_defs->devices[i]); 418 p_child = &(p_defs->devices[i]);
419 if (!p_child->device_type) { 419 if (!p_child->old.device_type) {
420 /* skip the device block if device type is invalid */ 420 /* skip the device block if device type is invalid */
421 continue; 421 continue;
422 } 422 }
423 if (p_child->slave_addr != SLAVE_ADDR1 && 423 if (p_child->old.slave_addr != SLAVE_ADDR1 &&
424 p_child->slave_addr != SLAVE_ADDR2) { 424 p_child->old.slave_addr != SLAVE_ADDR2) {
425 /* 425 /*
426 * If the slave address is neither 0x70 nor 0x72, 426 * If the slave address is neither 0x70 nor 0x72,
427 * it is not a SDVO device. Skip it. 427 * it is not a SDVO device. Skip it.
428 */ 428 */
429 continue; 429 continue;
430 } 430 }
431 if (p_child->dvo_port != DEVICE_PORT_DVOB && 431 if (p_child->old.dvo_port != DEVICE_PORT_DVOB &&
432 p_child->dvo_port != DEVICE_PORT_DVOC) { 432 p_child->old.dvo_port != DEVICE_PORT_DVOC) {
433 /* skip the incorrect SDVO port */ 433 /* skip the incorrect SDVO port */
434 DRM_DEBUG_KMS("Incorrect SDVO port. Skip it\n"); 434 DRM_DEBUG_KMS("Incorrect SDVO port. Skip it\n");
435 continue; 435 continue;
436 } 436 }
437 DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on" 437 DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on"
438 " %s port\n", 438 " %s port\n",
439 p_child->slave_addr, 439 p_child->old.slave_addr,
440 (p_child->dvo_port == DEVICE_PORT_DVOB) ? 440 (p_child->old.dvo_port == DEVICE_PORT_DVOB) ?
441 "SDVOB" : "SDVOC"); 441 "SDVOB" : "SDVOC");
442 p_mapping = &(dev_priv->sdvo_mappings[p_child->dvo_port - 1]); 442 p_mapping = &(dev_priv->sdvo_mappings[p_child->old.dvo_port - 1]);
443 if (!p_mapping->initialized) { 443 if (!p_mapping->initialized) {
444 p_mapping->dvo_port = p_child->dvo_port; 444 p_mapping->dvo_port = p_child->old.dvo_port;
445 p_mapping->slave_addr = p_child->slave_addr; 445 p_mapping->slave_addr = p_child->old.slave_addr;
446 p_mapping->dvo_wiring = p_child->dvo_wiring; 446 p_mapping->dvo_wiring = p_child->old.dvo_wiring;
447 p_mapping->ddc_pin = p_child->ddc_pin; 447 p_mapping->ddc_pin = p_child->old.ddc_pin;
448 p_mapping->i2c_pin = p_child->i2c_pin; 448 p_mapping->i2c_pin = p_child->old.i2c_pin;
449 p_mapping->initialized = 1; 449 p_mapping->initialized = 1;
450 DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n", 450 DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n",
451 p_mapping->dvo_port, 451 p_mapping->dvo_port,
@@ -457,7 +457,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
457 DRM_DEBUG_KMS("Maybe one SDVO port is shared by " 457 DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
458 "two SDVO device.\n"); 458 "two SDVO device.\n");
459 } 459 }
460 if (p_child->slave2_addr) { 460 if (p_child->old.slave2_addr) {
461 /* Maybe this is a SDVO device with multiple inputs */ 461 /* Maybe this is a SDVO device with multiple inputs */
462 /* And the mapping info is not added */ 462 /* And the mapping info is not added */
463 DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this" 463 DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this"
@@ -477,15 +477,13 @@ static void
477parse_driver_features(struct drm_i915_private *dev_priv, 477parse_driver_features(struct drm_i915_private *dev_priv,
478 struct bdb_header *bdb) 478 struct bdb_header *bdb)
479{ 479{
480 struct drm_device *dev = dev_priv->dev;
481 struct bdb_driver_features *driver; 480 struct bdb_driver_features *driver;
482 481
483 driver = find_section(bdb, BDB_DRIVER_FEATURES); 482 driver = find_section(bdb, BDB_DRIVER_FEATURES);
484 if (!driver) 483 if (!driver)
485 return; 484 return;
486 485
487 if (SUPPORTS_EDP(dev) && 486 if (driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
488 driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
489 dev_priv->vbt.edp_support = 1; 487 dev_priv->vbt.edp_support = 1;
490 488
491 if (driver->dual_frequency) 489 if (driver->dual_frequency)
@@ -501,7 +499,7 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
501 499
502 edp = find_section(bdb, BDB_EDP); 500 edp = find_section(bdb, BDB_EDP);
503 if (!edp) { 501 if (!edp) {
504 if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->vbt.edp_support) 502 if (dev_priv->vbt.edp_support)
505 DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported.\n"); 503 DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported.\n");
506 return; 504 return;
507 } 505 }
@@ -583,12 +581,135 @@ parse_mipi(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
583 dev_priv->vbt.dsi.panel_id = mipi->panel_id; 581 dev_priv->vbt.dsi.panel_id = mipi->panel_id;
584} 582}
585 583
584static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
585 struct bdb_header *bdb)
586{
587 union child_device_config *it, *child = NULL;
588 struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
589 uint8_t hdmi_level_shift;
590 int i, j;
591 bool is_dvi, is_hdmi, is_dp, is_edp, is_crt;
592 uint8_t aux_channel;
593 /* Each DDI port can have more than one value on the "DVO Port" field,
594 * so look for all the possible values for each port and abort if more
595 * than one is found. */
596 int dvo_ports[][2] = {
597 {DVO_PORT_HDMIA, DVO_PORT_DPA},
598 {DVO_PORT_HDMIB, DVO_PORT_DPB},
599 {DVO_PORT_HDMIC, DVO_PORT_DPC},
600 {DVO_PORT_HDMID, DVO_PORT_DPD},
601 {DVO_PORT_CRT, -1 /* Port E can only be DVO_PORT_CRT */ },
602 };
603
604 /* Find the child device to use, abort if more than one found. */
605 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
606 it = dev_priv->vbt.child_dev + i;
607
608 for (j = 0; j < 2; j++) {
609 if (dvo_ports[port][j] == -1)
610 break;
611
612 if (it->common.dvo_port == dvo_ports[port][j]) {
613 if (child) {
614 DRM_DEBUG_KMS("More than one child device for port %c in VBT.\n",
615 port_name(port));
616 return;
617 }
618 child = it;
619 }
620 }
621 }
622 if (!child)
623 return;
624
625 aux_channel = child->raw[25];
626
627 is_dvi = child->common.device_type & (1 << 4);
628 is_dp = child->common.device_type & (1 << 2);
629 is_crt = child->common.device_type & (1 << 0);
630 is_hdmi = is_dvi && (child->common.device_type & (1 << 11)) == 0;
631 is_edp = is_dp && (child->common.device_type & (1 << 12));
632
633 info->supports_dvi = is_dvi;
634 info->supports_hdmi = is_hdmi;
635 info->supports_dp = is_dp;
636
637 DRM_DEBUG_KMS("Port %c VBT info: DP:%d HDMI:%d DVI:%d EDP:%d CRT:%d\n",
638 port_name(port), is_dp, is_hdmi, is_dvi, is_edp, is_crt);
639
640 if (is_edp && is_dvi)
641 DRM_DEBUG_KMS("Internal DP port %c is TMDS compatible\n",
642 port_name(port));
643 if (is_crt && port != PORT_E)
644 DRM_DEBUG_KMS("Port %c is analog\n", port_name(port));
645 if (is_crt && (is_dvi || is_dp))
646 DRM_DEBUG_KMS("Analog port %c is also DP or TMDS compatible\n",
647 port_name(port));
648 if (is_dvi && (port == PORT_A || port == PORT_E))
649 DRM_DEBUG_KMS("Port %c is TMDS compabile\n", port_name(port));
650 if (!is_dvi && !is_dp && !is_crt)
651 DRM_DEBUG_KMS("Port %c is not DP/TMDS/CRT compatible\n",
652 port_name(port));
653 if (is_edp && (port == PORT_B || port == PORT_C || port == PORT_E))
654 DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port));
655
656 if (is_dvi) {
657 if (child->common.ddc_pin == 0x05 && port != PORT_B)
658 DRM_DEBUG_KMS("Unexpected DDC pin for port B\n");
659 if (child->common.ddc_pin == 0x04 && port != PORT_C)
660 DRM_DEBUG_KMS("Unexpected DDC pin for port C\n");
661 if (child->common.ddc_pin == 0x06 && port != PORT_D)
662 DRM_DEBUG_KMS("Unexpected DDC pin for port D\n");
663 }
664
665 if (is_dp) {
666 if (aux_channel == 0x40 && port != PORT_A)
667 DRM_DEBUG_KMS("Unexpected AUX channel for port A\n");
668 if (aux_channel == 0x10 && port != PORT_B)
669 DRM_DEBUG_KMS("Unexpected AUX channel for port B\n");
670 if (aux_channel == 0x20 && port != PORT_C)
671 DRM_DEBUG_KMS("Unexpected AUX channel for port C\n");
672 if (aux_channel == 0x30 && port != PORT_D)
673 DRM_DEBUG_KMS("Unexpected AUX channel for port D\n");
674 }
675
676 if (bdb->version >= 158) {
677 /* The VBT HDMI level shift values match the table we have. */
678 hdmi_level_shift = child->raw[7] & 0xF;
679 if (hdmi_level_shift < 0xC) {
680 DRM_DEBUG_KMS("VBT HDMI level shift for port %c: %d\n",
681 port_name(port),
682 hdmi_level_shift);
683 info->hdmi_level_shift = hdmi_level_shift;
684 }
685 }
686}
687
688static void parse_ddi_ports(struct drm_i915_private *dev_priv,
689 struct bdb_header *bdb)
690{
691 struct drm_device *dev = dev_priv->dev;
692 enum port port;
693
694 if (!HAS_DDI(dev))
695 return;
696
697 if (!dev_priv->vbt.child_dev_num)
698 return;
699
700 if (bdb->version < 155)
701 return;
702
703 for (port = PORT_A; port < I915_MAX_PORTS; port++)
704 parse_ddi_port(dev_priv, port, bdb);
705}
706
586static void 707static void
587parse_device_mapping(struct drm_i915_private *dev_priv, 708parse_device_mapping(struct drm_i915_private *dev_priv,
588 struct bdb_header *bdb) 709 struct bdb_header *bdb)
589{ 710{
590 struct bdb_general_definitions *p_defs; 711 struct bdb_general_definitions *p_defs;
591 struct child_device_config *p_child, *child_dev_ptr; 712 union child_device_config *p_child, *child_dev_ptr;
592 int i, child_device_num, count; 713 int i, child_device_num, count;
593 u16 block_size; 714 u16 block_size;
594 715
@@ -616,7 +737,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
616 /* get the number of child device that is present */ 737 /* get the number of child device that is present */
617 for (i = 0; i < child_device_num; i++) { 738 for (i = 0; i < child_device_num; i++) {
618 p_child = &(p_defs->devices[i]); 739 p_child = &(p_defs->devices[i]);
619 if (!p_child->device_type) { 740 if (!p_child->common.device_type) {
620 /* skip the device block if device type is invalid */ 741 /* skip the device block if device type is invalid */
621 continue; 742 continue;
622 } 743 }
@@ -636,7 +757,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
636 count = 0; 757 count = 0;
637 for (i = 0; i < child_device_num; i++) { 758 for (i = 0; i < child_device_num; i++) {
638 p_child = &(p_defs->devices[i]); 759 p_child = &(p_defs->devices[i]);
639 if (!p_child->device_type) { 760 if (!p_child->common.device_type) {
640 /* skip the device block if device type is invalid */ 761 /* skip the device block if device type is invalid */
641 continue; 762 continue;
642 } 763 }
@@ -652,6 +773,7 @@ static void
652init_vbt_defaults(struct drm_i915_private *dev_priv) 773init_vbt_defaults(struct drm_i915_private *dev_priv)
653{ 774{
654 struct drm_device *dev = dev_priv->dev; 775 struct drm_device *dev = dev_priv->dev;
776 enum port port;
655 777
656 dev_priv->vbt.crt_ddc_pin = GMBUS_PORT_VGADDC; 778 dev_priv->vbt.crt_ddc_pin = GMBUS_PORT_VGADDC;
657 779
@@ -670,6 +792,18 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
670 dev_priv->vbt.lvds_use_ssc = 1; 792 dev_priv->vbt.lvds_use_ssc = 1;
671 dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1); 793 dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1);
672 DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->vbt.lvds_ssc_freq); 794 DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->vbt.lvds_ssc_freq);
795
796 for (port = PORT_A; port < I915_MAX_PORTS; port++) {
797 struct ddi_vbt_port_info *info =
798 &dev_priv->vbt.ddi_port_info[port];
799
800 /* Recommended BSpec default: 800mV 0dB. */
801 info->hdmi_level_shift = 6;
802
803 info->supports_dvi = (port != PORT_A && port != PORT_E);
804 info->supports_hdmi = info->supports_dvi;
805 info->supports_dp = (port != PORT_E);
806 }
673} 807}
674 808
675static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id) 809static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
@@ -761,6 +895,7 @@ intel_parse_bios(struct drm_device *dev)
761 parse_driver_features(dev_priv, bdb); 895 parse_driver_features(dev_priv, bdb);
762 parse_edp(dev_priv, bdb); 896 parse_edp(dev_priv, bdb);
763 parse_mipi(dev_priv, bdb); 897 parse_mipi(dev_priv, bdb);
898 parse_ddi_ports(dev_priv, bdb);
764 899
765 if (bios) 900 if (bios)
766 pci_unmap_rom(pdev, bios); 901 pci_unmap_rom(pdev, bios);
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 6e9250eb9c2c..287cc5a21c2e 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -202,7 +202,10 @@ struct bdb_general_features {
202#define DEVICE_PORT_DVOB 0x01 202#define DEVICE_PORT_DVOB 0x01
203#define DEVICE_PORT_DVOC 0x02 203#define DEVICE_PORT_DVOC 0x02
204 204
205struct child_device_config { 205/* We used to keep this struct but without any version control. We should avoid
206 * using it in the future, but it should be safe to keep using it in the old
207 * code. */
208struct old_child_dev_config {
206 u16 handle; 209 u16 handle;
207 u16 device_type; 210 u16 device_type;
208 u8 device_id[10]; /* ascii string */ 211 u8 device_id[10]; /* ascii string */
@@ -224,6 +227,32 @@ struct child_device_config {
224 u8 dvo_function; 227 u8 dvo_function;
225} __attribute__((packed)); 228} __attribute__((packed));
226 229
230/* This one contains field offsets that are known to be common for all BDB
231 * versions. Notice that the meaning of the contents contents may still change,
232 * but at least the offsets are consistent. */
233struct common_child_dev_config {
234 u16 handle;
235 u16 device_type;
236 u8 not_common1[12];
237 u8 dvo_port;
238 u8 not_common2[2];
239 u8 ddc_pin;
240 u16 edid_ptr;
241} __attribute__((packed));
242
243/* This field changes depending on the BDB version, so the most reliable way to
244 * read it is by checking the BDB version and reading the raw pointer. */
245union child_device_config {
246 /* This one is safe to be used anywhere, but the code should still check
247 * the BDB version. */
248 u8 raw[33];
249 /* This one should only be kept for legacy code. */
250 struct old_child_dev_config old;
251 /* This one should also be safe to use anywhere, even without version
252 * checks. */
253 struct common_child_dev_config common;
254};
255
227struct bdb_general_definitions { 256struct bdb_general_definitions {
228 /* DDC GPIO */ 257 /* DDC GPIO */
229 u8 crt_ddc_gmbus_pin; 258 u8 crt_ddc_gmbus_pin;
@@ -249,7 +278,7 @@ struct bdb_general_definitions {
249 * number = (block_size - sizeof(bdb_general_definitions))/ 278 * number = (block_size - sizeof(bdb_general_definitions))/
250 * sizeof(child_device_config); 279 * sizeof(child_device_config);
251 */ 280 */
252 struct child_device_config devices[0]; 281 union child_device_config devices[0];
253} __attribute__((packed)); 282} __attribute__((packed));
254 283
255struct bdb_lvds_options { 284struct bdb_lvds_options {
@@ -619,6 +648,19 @@ int intel_parse_bios(struct drm_device *dev);
619#define PORT_IDPC 8 648#define PORT_IDPC 8
620#define PORT_IDPD 9 649#define PORT_IDPD 9
621 650
651/* Possible values for the "DVO Port" field for versions >= 155: */
652#define DVO_PORT_HDMIA 0
653#define DVO_PORT_HDMIB 1
654#define DVO_PORT_HDMIC 2
655#define DVO_PORT_HDMID 3
656#define DVO_PORT_LVDS 4
657#define DVO_PORT_TV 5
658#define DVO_PORT_CRT 6
659#define DVO_PORT_DPB 7
660#define DVO_PORT_DPC 8
661#define DVO_PORT_DPD 9
662#define DVO_PORT_DPA 10
663
622/* MIPI DSI panel info */ 664/* MIPI DSI panel info */
623struct bdb_mipi { 665struct bdb_mipi {
624 u16 panel_id; 666 u16 panel_id;
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 6f101d5620e4..942b9acb0d8e 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -83,13 +83,11 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
83 return true; 83 return true;
84} 84}
85 85
86static void intel_crt_get_config(struct intel_encoder *encoder, 86static unsigned int intel_crt_get_flags(struct intel_encoder *encoder)
87 struct intel_crtc_config *pipe_config)
88{ 87{
89 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 88 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
90 struct intel_crt *crt = intel_encoder_to_crt(encoder); 89 struct intel_crt *crt = intel_encoder_to_crt(encoder);
91 u32 tmp, flags = 0; 90 u32 tmp, flags = 0;
92 int dotclock;
93 91
94 tmp = I915_READ(crt->adpa_reg); 92 tmp = I915_READ(crt->adpa_reg);
95 93
@@ -103,14 +101,35 @@ static void intel_crt_get_config(struct intel_encoder *encoder,
103 else 101 else
104 flags |= DRM_MODE_FLAG_NVSYNC; 102 flags |= DRM_MODE_FLAG_NVSYNC;
105 103
106 pipe_config->adjusted_mode.flags |= flags; 104 return flags;
105}
106
107static void intel_crt_get_config(struct intel_encoder *encoder,
108 struct intel_crtc_config *pipe_config)
109{
110 struct drm_device *dev = encoder->base.dev;
111 int dotclock;
112
113 pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder);
107 114
108 dotclock = pipe_config->port_clock; 115 dotclock = pipe_config->port_clock;
109 116
110 if (HAS_PCH_SPLIT(dev_priv->dev)) 117 if (HAS_PCH_SPLIT(dev))
111 ironlake_check_encoder_dotclock(pipe_config, dotclock); 118 ironlake_check_encoder_dotclock(pipe_config, dotclock);
112 119
113 pipe_config->adjusted_mode.clock = dotclock; 120 pipe_config->adjusted_mode.crtc_clock = dotclock;
121}
122
123static void hsw_crt_get_config(struct intel_encoder *encoder,
124 struct intel_crtc_config *pipe_config)
125{
126 intel_ddi_get_config(encoder, pipe_config);
127
128 pipe_config->adjusted_mode.flags &= ~(DRM_MODE_FLAG_PHSYNC |
129 DRM_MODE_FLAG_NHSYNC |
130 DRM_MODE_FLAG_PVSYNC |
131 DRM_MODE_FLAG_NVSYNC);
132 pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder);
114} 133}
115 134
116/* Note: The caller is required to filter out dpms modes not supported by the 135/* Note: The caller is required to filter out dpms modes not supported by the
@@ -658,7 +677,6 @@ intel_crt_detect(struct drm_connector *connector, bool force)
658 677
659static void intel_crt_destroy(struct drm_connector *connector) 678static void intel_crt_destroy(struct drm_connector *connector)
660{ 679{
661 drm_sysfs_connector_remove(connector);
662 drm_connector_cleanup(connector); 680 drm_connector_cleanup(connector);
663 kfree(connector); 681 kfree(connector);
664} 682}
@@ -764,7 +782,7 @@ void intel_crt_init(struct drm_device *dev)
764 if (!crt) 782 if (!crt)
765 return; 783 return;
766 784
767 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); 785 intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
768 if (!intel_connector) { 786 if (!intel_connector) {
769 kfree(crt); 787 kfree(crt);
770 return; 788 return;
@@ -804,7 +822,10 @@ void intel_crt_init(struct drm_device *dev)
804 crt->base.mode_set = intel_crt_mode_set; 822 crt->base.mode_set = intel_crt_mode_set;
805 crt->base.disable = intel_disable_crt; 823 crt->base.disable = intel_disable_crt;
806 crt->base.enable = intel_enable_crt; 824 crt->base.enable = intel_enable_crt;
807 crt->base.get_config = intel_crt_get_config; 825 if (IS_HASWELL(dev))
826 crt->base.get_config = hsw_crt_get_config;
827 else
828 crt->base.get_config = intel_crt_get_config;
808 if (I915_HAS_HOTPLUG(dev)) 829 if (I915_HAS_HOTPLUG(dev))
809 crt->base.hpd_pin = HPD_CRT; 830 crt->base.hpd_pin = HPD_CRT;
810 if (HAS_DDI(dev)) 831 if (HAS_DDI(dev))
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 9792ea8df13e..6d335f8ca343 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -42,7 +42,6 @@ static const u32 hsw_ddi_translations_dp[] = {
42 0x80C30FFF, 0x000B0000, 42 0x80C30FFF, 0x000B0000,
43 0x00FFFFFF, 0x00040006, 43 0x00FFFFFF, 0x00040006,
44 0x80D75FFF, 0x000B0000, 44 0x80D75FFF, 0x000B0000,
45 0x00FFFFFF, 0x00040006 /* HDMI parameters */
46}; 45};
47 46
48static const u32 hsw_ddi_translations_fdi[] = { 47static const u32 hsw_ddi_translations_fdi[] = {
@@ -55,7 +54,22 @@ static const u32 hsw_ddi_translations_fdi[] = {
55 0x00C30FFF, 0x001E0000, 54 0x00C30FFF, 0x001E0000,
56 0x00FFFFFF, 0x00060006, 55 0x00FFFFFF, 0x00060006,
57 0x00D75FFF, 0x001E0000, 56 0x00D75FFF, 0x001E0000,
58 0x00FFFFFF, 0x00040006 /* HDMI parameters */ 57};
58
59static const u32 hsw_ddi_translations_hdmi[] = {
60 /* Idx NT mV diff T mV diff db */
61 0x00FFFFFF, 0x0006000E, /* 0: 400 400 0 */
62 0x00E79FFF, 0x000E000C, /* 1: 400 500 2 */
63 0x00D75FFF, 0x0005000A, /* 2: 400 600 3.5 */
64 0x00FFFFFF, 0x0005000A, /* 3: 600 600 0 */
65 0x00E79FFF, 0x001D0007, /* 4: 600 750 2 */
66 0x00D75FFF, 0x000C0004, /* 5: 600 900 3.5 */
67 0x00FFFFFF, 0x00040006, /* 6: 800 800 0 */
68 0x80E79FFF, 0x00030002, /* 7: 800 1000 2 */
69 0x00FFFFFF, 0x00140005, /* 8: 850 850 0 */
70 0x00FFFFFF, 0x000C0004, /* 9: 900 900 0 */
71 0x00FFFFFF, 0x001C0003, /* 10: 950 950 0 */
72 0x80FFFFFF, 0x00030002, /* 11: 1000 1000 0 */
59}; 73};
60 74
61enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder) 75enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
@@ -92,12 +106,18 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
92 const u32 *ddi_translations = (port == PORT_E) ? 106 const u32 *ddi_translations = (port == PORT_E) ?
93 hsw_ddi_translations_fdi : 107 hsw_ddi_translations_fdi :
94 hsw_ddi_translations_dp; 108 hsw_ddi_translations_dp;
109 int hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
95 110
96 for (i = 0, reg = DDI_BUF_TRANS(port); 111 for (i = 0, reg = DDI_BUF_TRANS(port);
97 i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) { 112 i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) {
98 I915_WRITE(reg, ddi_translations[i]); 113 I915_WRITE(reg, ddi_translations[i]);
99 reg += 4; 114 reg += 4;
100 } 115 }
116 /* Entry 9 is for HDMI: */
117 for (i = 0; i < 2; i++) {
118 I915_WRITE(reg, hsw_ddi_translations_hdmi[hdmi_level * 2 + i]);
119 reg += 4;
120 }
101} 121}
102 122
103/* Program DDI buffers translations for DP. By default, program ports A-D in DP 123/* Program DDI buffers translations for DP. By default, program ports A-D in DP
@@ -1246,8 +1266,8 @@ static void intel_ddi_hot_plug(struct intel_encoder *intel_encoder)
1246 intel_dp_check_link_status(intel_dp); 1266 intel_dp_check_link_status(intel_dp);
1247} 1267}
1248 1268
1249static void intel_ddi_get_config(struct intel_encoder *encoder, 1269void intel_ddi_get_config(struct intel_encoder *encoder,
1250 struct intel_crtc_config *pipe_config) 1270 struct intel_crtc_config *pipe_config)
1251{ 1271{
1252 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 1272 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
1253 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); 1273 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
@@ -1333,12 +1353,23 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
1333 struct drm_encoder *encoder; 1353 struct drm_encoder *encoder;
1334 struct intel_connector *hdmi_connector = NULL; 1354 struct intel_connector *hdmi_connector = NULL;
1335 struct intel_connector *dp_connector = NULL; 1355 struct intel_connector *dp_connector = NULL;
1356 bool init_hdmi, init_dp;
1357
1358 init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi ||
1359 dev_priv->vbt.ddi_port_info[port].supports_hdmi);
1360 init_dp = dev_priv->vbt.ddi_port_info[port].supports_dp;
1361 if (!init_dp && !init_hdmi) {
1362 DRM_DEBUG_KMS("VBT says port %c is not DVI/HDMI/DP compatible\n",
1363 port_name(port));
1364 init_hdmi = true;
1365 init_dp = true;
1366 }
1336 1367
1337 intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL); 1368 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
1338 if (!intel_dig_port) 1369 if (!intel_dig_port)
1339 return; 1370 return;
1340 1371
1341 dp_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); 1372 dp_connector = kzalloc(sizeof(*dp_connector), GFP_KERNEL);
1342 if (!dp_connector) { 1373 if (!dp_connector) {
1343 kfree(intel_dig_port); 1374 kfree(intel_dig_port);
1344 return; 1375 return;
@@ -1370,19 +1401,20 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
1370 intel_encoder->cloneable = false; 1401 intel_encoder->cloneable = false;
1371 intel_encoder->hot_plug = intel_ddi_hot_plug; 1402 intel_encoder->hot_plug = intel_ddi_hot_plug;
1372 1403
1373 if (!intel_dp_init_connector(intel_dig_port, dp_connector)) { 1404 if (init_dp && !intel_dp_init_connector(intel_dig_port, dp_connector)) {
1374 drm_encoder_cleanup(encoder); 1405 drm_encoder_cleanup(encoder);
1375 kfree(intel_dig_port); 1406 kfree(intel_dig_port);
1376 kfree(dp_connector); 1407 kfree(dp_connector);
1377 return; 1408 return;
1378 } 1409 }
1379 1410
1380 if (intel_encoder->type != INTEL_OUTPUT_EDP) { 1411 /* In theory we don't need the encoder->type check, but leave it just in
1381 hdmi_connector = kzalloc(sizeof(struct intel_connector), 1412 * case we have some really bad VBTs... */
1413 if (intel_encoder->type != INTEL_OUTPUT_EDP && init_hdmi) {
1414 hdmi_connector = kzalloc(sizeof(*hdmi_connector),
1382 GFP_KERNEL); 1415 GFP_KERNEL);
1383 if (!hdmi_connector) { 1416 if (!hdmi_connector)
1384 return; 1417 return;
1385 }
1386 1418
1387 intel_dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port); 1419 intel_dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port);
1388 intel_hdmi_init_connector(intel_dig_port, hdmi_connector); 1420 intel_hdmi_init_connector(intel_dig_port, hdmi_connector);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index bef786461a3f..617b963dfb67 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -41,7 +41,6 @@
41#include <drm/drm_crtc_helper.h> 41#include <drm/drm_crtc_helper.h>
42#include <linux/dma_remapping.h> 42#include <linux/dma_remapping.h>
43 43
44bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
45static void intel_increase_pllclock(struct drm_crtc *crtc); 44static void intel_increase_pllclock(struct drm_crtc *crtc);
46static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); 45static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
47 46
@@ -336,6 +335,21 @@ static const intel_limit_t intel_limits_vlv_hdmi = {
336 .p2_slow = 2, .p2_fast = 20 }, 335 .p2_slow = 2, .p2_fast = 20 },
337}; 336};
338 337
338/**
339 * Returns whether any output on the specified pipe is of the specified type
340 */
341static bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
342{
343 struct drm_device *dev = crtc->dev;
344 struct intel_encoder *encoder;
345
346 for_each_encoder_on_crtc(dev, crtc, encoder)
347 if (encoder->type == type)
348 return true;
349
350 return false;
351}
352
339static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc, 353static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
340 int refclk) 354 int refclk)
341{ 355{
@@ -438,21 +452,6 @@ static void i9xx_clock(int refclk, intel_clock_t *clock)
438 clock->dot = clock->vco / clock->p; 452 clock->dot = clock->vco / clock->p;
439} 453}
440 454
441/**
442 * Returns whether any output on the specified pipe is of the specified type
443 */
444bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
445{
446 struct drm_device *dev = crtc->dev;
447 struct intel_encoder *encoder;
448
449 for_each_encoder_on_crtc(dev, crtc, encoder)
450 if (encoder->type == type)
451 return true;
452
453 return false;
454}
455
456#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) 455#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
457/** 456/**
458 * Returns whether the given set of divisors are valid for a given refclk with 457 * Returns whether the given set of divisors are valid for a given refclk with
@@ -696,29 +695,30 @@ vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
696 p = p1 * p2; 695 p = p1 * p2;
697 /* based on hardware requirement, prefer bigger m1,m2 values */ 696 /* based on hardware requirement, prefer bigger m1,m2 values */
698 for (m1 = limit->m1.min; m1 <= limit->m1.max; m1++) { 697 for (m1 = limit->m1.min; m1 <= limit->m1.max; m1++) {
699 m2 = (((2*(fastclk * p * n / m1 )) + 698 m2 = DIV_ROUND_CLOSEST(fastclk * p * n, refclk * m1);
700 refclk) / (2*refclk));
701 m = m1 * m2; 699 m = m1 * m2;
702 vco = updrate * m; 700 vco = updrate * m;
703 if (vco >= limit->vco.min && vco < limit->vco.max) { 701
704 ppm = 1000000 * ((vco / p) - fastclk) / fastclk; 702 if (vco < limit->vco.min || vco >= limit->vco.max)
705 absppm = (ppm > 0) ? ppm : (-ppm); 703 continue;
706 if (absppm < 100 && ((p1 * p2) > (bestp1 * bestp2))) { 704
707 bestppm = 0; 705 ppm = 1000000 * ((vco / p) - fastclk) / fastclk;
708 flag = 1; 706 absppm = (ppm > 0) ? ppm : (-ppm);
709 } 707 if (absppm < 100 && ((p1 * p2) > (bestp1 * bestp2))) {
710 if (absppm < bestppm - 10) { 708 bestppm = 0;
711 bestppm = absppm; 709 flag = 1;
712 flag = 1; 710 }
713 } 711 if (absppm < bestppm - 10) {
714 if (flag) { 712 bestppm = absppm;
715 bestn = n; 713 flag = 1;
716 bestm1 = m1; 714 }
717 bestm2 = m2; 715 if (flag) {
718 bestp1 = p1; 716 bestn = n;
719 bestp2 = p2; 717 bestm1 = m1;
720 flag = 0; 718 bestm2 = m2;
721 } 719 bestp1 = p1;
720 bestp2 = p2;
721 flag = 0;
722 } 722 }
723 } 723 }
724 } 724 }
@@ -740,14 +740,14 @@ bool intel_crtc_active(struct drm_crtc *crtc)
740 /* Be paranoid as we can arrive here with only partial 740 /* Be paranoid as we can arrive here with only partial
741 * state retrieved from the hardware during setup. 741 * state retrieved from the hardware during setup.
742 * 742 *
743 * We can ditch the adjusted_mode.clock check as soon 743 * We can ditch the adjusted_mode.crtc_clock check as soon
744 * as Haswell has gained clock readout/fastboot support. 744 * as Haswell has gained clock readout/fastboot support.
745 * 745 *
746 * We can ditch the crtc->fb check as soon as we can 746 * We can ditch the crtc->fb check as soon as we can
747 * properly reconstruct framebuffers. 747 * properly reconstruct framebuffers.
748 */ 748 */
749 return intel_crtc->active && crtc->fb && 749 return intel_crtc->active && crtc->fb &&
750 intel_crtc->config.adjusted_mode.clock; 750 intel_crtc->config.adjusted_mode.crtc_clock;
751} 751}
752 752
753enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, 753enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
@@ -1360,6 +1360,26 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1360 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID); 1360 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
1361} 1361}
1362 1362
1363static void intel_init_dpio(struct drm_device *dev)
1364{
1365 struct drm_i915_private *dev_priv = dev->dev_private;
1366
1367 if (!IS_VALLEYVIEW(dev))
1368 return;
1369
1370 /*
1371 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1372 * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
1373 * a. GUnit 0x2110 bit[0] set to 1 (def 0)
1374 * b. The other bits such as sfr settings / modesel may all be set
1375 * to 0.
1376 *
1377 * This should only be done on init and resume from S3 with both
1378 * PLLs disabled, or we risk losing DPIO and PLL synchronization.
1379 */
1380 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
1381}
1382
1363static void vlv_enable_pll(struct intel_crtc *crtc) 1383static void vlv_enable_pll(struct intel_crtc *crtc)
1364{ 1384{
1365 struct drm_device *dev = crtc->base.dev; 1385 struct drm_device *dev = crtc->base.dev;
@@ -1466,6 +1486,20 @@ static void i9xx_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1466 POSTING_READ(DPLL(pipe)); 1486 POSTING_READ(DPLL(pipe));
1467} 1487}
1468 1488
1489static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1490{
1491 u32 val = 0;
1492
1493 /* Make sure the pipe isn't still relying on us */
1494 assert_pipe_disabled(dev_priv, pipe);
1495
1496 /* Leave integrated clock source enabled */
1497 if (pipe == PIPE_B)
1498 val = DPLL_INTEGRATED_CRI_CLK_VLV;
1499 I915_WRITE(DPLL(pipe), val);
1500 POSTING_READ(DPLL(pipe));
1501}
1502
1469void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port) 1503void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port)
1470{ 1504{
1471 u32 port_mask; 1505 u32 port_mask;
@@ -2286,11 +2320,26 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2286 return ret; 2320 return ret;
2287 } 2321 }
2288 2322
2289 /* Update pipe size and adjust fitter if needed */ 2323 /*
2324 * Update pipe size and adjust fitter if needed: the reason for this is
2325 * that in compute_mode_changes we check the native mode (not the pfit
2326 * mode) to see if we can flip rather than do a full mode set. In the
2327 * fastboot case, we'll flip, but if we don't update the pipesrc and
2328 * pfit state, we'll end up with a big fb scanned out into the wrong
2329 * sized surface.
2330 *
2331 * To fix this properly, we need to hoist the checks up into
2332 * compute_mode_changes (or above), check the actual pfit state and
2333 * whether the platform allows pfit disable with pipe active, and only
2334 * then update the pipesrc and pfit state, even on the flip path.
2335 */
2290 if (i915_fastboot) { 2336 if (i915_fastboot) {
2337 const struct drm_display_mode *adjusted_mode =
2338 &intel_crtc->config.adjusted_mode;
2339
2291 I915_WRITE(PIPESRC(intel_crtc->pipe), 2340 I915_WRITE(PIPESRC(intel_crtc->pipe),
2292 ((crtc->mode.hdisplay - 1) << 16) | 2341 ((adjusted_mode->crtc_hdisplay - 1) << 16) |
2293 (crtc->mode.vdisplay - 1)); 2342 (adjusted_mode->crtc_vdisplay - 1));
2294 if (!intel_crtc->config.pch_pfit.enabled && 2343 if (!intel_crtc->config.pch_pfit.enabled &&
2295 (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || 2344 (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
2296 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) { 2345 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
@@ -2914,7 +2963,7 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
2914{ 2963{
2915 struct drm_device *dev = crtc->dev; 2964 struct drm_device *dev = crtc->dev;
2916 struct drm_i915_private *dev_priv = dev->dev_private; 2965 struct drm_i915_private *dev_priv = dev->dev_private;
2917 int clock = to_intel_crtc(crtc)->config.adjusted_mode.clock; 2966 int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
2918 u32 divsel, phaseinc, auxdiv, phasedir = 0; 2967 u32 divsel, phaseinc, auxdiv, phasedir = 0;
2919 u32 temp; 2968 u32 temp;
2920 2969
@@ -2938,8 +2987,8 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
2938 phaseinc = 0x20; 2987 phaseinc = 0x20;
2939 } else { 2988 } else {
2940 /* The iCLK virtual clock root frequency is in MHz, 2989 /* The iCLK virtual clock root frequency is in MHz,
2941 * but the adjusted_mode->clock in in KHz. To get the divisors, 2990 * but the adjusted_mode->crtc_clock in in KHz. To get the
2942 * it is necessary to divide one by another, so we 2991 * divisors, it is necessary to divide one by another, so we
2943 * convert the virtual clock precision to KHz here for higher 2992 * convert the virtual clock precision to KHz here for higher
2944 * precision. 2993 * precision.
2945 */ 2994 */
@@ -3283,6 +3332,84 @@ static void intel_disable_planes(struct drm_crtc *crtc)
3283 intel_plane_disable(&intel_plane->base); 3332 intel_plane_disable(&intel_plane->base);
3284} 3333}
3285 3334
3335static void hsw_enable_ips(struct intel_crtc *crtc)
3336{
3337 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
3338
3339 if (!crtc->config.ips_enabled)
3340 return;
3341
3342 /* We can only enable IPS after we enable a plane and wait for a vblank.
3343 * We guarantee that the plane is enabled by calling intel_enable_ips
3344 * only after intel_enable_plane. And intel_enable_plane already waits
3345 * for a vblank, so all we need to do here is to enable the IPS bit. */
3346 assert_plane_enabled(dev_priv, crtc->plane);
3347 I915_WRITE(IPS_CTL, IPS_ENABLE);
3348}
3349
3350static void hsw_disable_ips(struct intel_crtc *crtc)
3351{
3352 struct drm_device *dev = crtc->base.dev;
3353 struct drm_i915_private *dev_priv = dev->dev_private;
3354
3355 if (!crtc->config.ips_enabled)
3356 return;
3357
3358 assert_plane_enabled(dev_priv, crtc->plane);
3359 I915_WRITE(IPS_CTL, 0);
3360 POSTING_READ(IPS_CTL);
3361
3362 /* We need to wait for a vblank before we can disable the plane. */
3363 intel_wait_for_vblank(dev, crtc->pipe);
3364}
3365
3366/** Loads the palette/gamma unit for the CRTC with the prepared values */
3367static void intel_crtc_load_lut(struct drm_crtc *crtc)
3368{
3369 struct drm_device *dev = crtc->dev;
3370 struct drm_i915_private *dev_priv = dev->dev_private;
3371 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3372 enum pipe pipe = intel_crtc->pipe;
3373 int palreg = PALETTE(pipe);
3374 int i;
3375 bool reenable_ips = false;
3376
3377 /* The clocks have to be on to load the palette. */
3378 if (!crtc->enabled || !intel_crtc->active)
3379 return;
3380
3381 if (!HAS_PCH_SPLIT(dev_priv->dev)) {
3382 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
3383 assert_dsi_pll_enabled(dev_priv);
3384 else
3385 assert_pll_enabled(dev_priv, pipe);
3386 }
3387
3388 /* use legacy palette for Ironlake */
3389 if (HAS_PCH_SPLIT(dev))
3390 palreg = LGC_PALETTE(pipe);
3391
3392 /* Workaround : Do not read or write the pipe palette/gamma data while
3393 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
3394 */
3395 if (intel_crtc->config.ips_enabled &&
3396 ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
3397 GAMMA_MODE_MODE_SPLIT)) {
3398 hsw_disable_ips(intel_crtc);
3399 reenable_ips = true;
3400 }
3401
3402 for (i = 0; i < 256; i++) {
3403 I915_WRITE(palreg + 4 * i,
3404 (intel_crtc->lut_r[i] << 16) |
3405 (intel_crtc->lut_g[i] << 8) |
3406 intel_crtc->lut_b[i]);
3407 }
3408
3409 if (reenable_ips)
3410 hsw_enable_ips(intel_crtc);
3411}
3412
3286static void ironlake_crtc_enable(struct drm_crtc *crtc) 3413static void ironlake_crtc_enable(struct drm_crtc *crtc)
3287{ 3414{
3288 struct drm_device *dev = crtc->dev; 3415 struct drm_device *dev = crtc->dev;
@@ -3361,35 +3488,74 @@ static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
3361 return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A; 3488 return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
3362} 3489}
3363 3490
3364static void hsw_enable_ips(struct intel_crtc *crtc) 3491static void haswell_crtc_enable_planes(struct drm_crtc *crtc)
3365{ 3492{
3366 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 3493 struct drm_device *dev = crtc->dev;
3494 struct drm_i915_private *dev_priv = dev->dev_private;
3495 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3496 int pipe = intel_crtc->pipe;
3497 int plane = intel_crtc->plane;
3367 3498
3368 if (!crtc->config.ips_enabled) 3499 intel_enable_plane(dev_priv, plane, pipe);
3369 return; 3500 intel_enable_planes(crtc);
3501 intel_crtc_update_cursor(crtc, true);
3370 3502
3371 /* We can only enable IPS after we enable a plane and wait for a vblank. 3503 hsw_enable_ips(intel_crtc);
3372 * We guarantee that the plane is enabled by calling intel_enable_ips 3504
3373 * only after intel_enable_plane. And intel_enable_plane already waits 3505 mutex_lock(&dev->struct_mutex);
3374 * for a vblank, so all we need to do here is to enable the IPS bit. */ 3506 intel_update_fbc(dev);
3375 assert_plane_enabled(dev_priv, crtc->plane); 3507 mutex_unlock(&dev->struct_mutex);
3376 I915_WRITE(IPS_CTL, IPS_ENABLE);
3377} 3508}
3378 3509
3379static void hsw_disable_ips(struct intel_crtc *crtc) 3510static void haswell_crtc_disable_planes(struct drm_crtc *crtc)
3380{ 3511{
3381 struct drm_device *dev = crtc->base.dev; 3512 struct drm_device *dev = crtc->dev;
3382 struct drm_i915_private *dev_priv = dev->dev_private; 3513 struct drm_i915_private *dev_priv = dev->dev_private;
3514 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3515 int pipe = intel_crtc->pipe;
3516 int plane = intel_crtc->plane;
3383 3517
3384 if (!crtc->config.ips_enabled) 3518 intel_crtc_wait_for_pending_flips(crtc);
3385 return; 3519 drm_vblank_off(dev, pipe);
3386 3520
3387 assert_plane_enabled(dev_priv, crtc->plane); 3521 /* FBC must be disabled before disabling the plane on HSW. */
3388 I915_WRITE(IPS_CTL, 0); 3522 if (dev_priv->fbc.plane == plane)
3389 POSTING_READ(IPS_CTL); 3523 intel_disable_fbc(dev);
3390 3524
3391 /* We need to wait for a vblank before we can disable the plane. */ 3525 hsw_disable_ips(intel_crtc);
3392 intel_wait_for_vblank(dev, crtc->pipe); 3526
3527 intel_crtc_update_cursor(crtc, false);
3528 intel_disable_planes(crtc);
3529 intel_disable_plane(dev_priv, plane, pipe);
3530}
3531
3532/*
3533 * This implements the workaround described in the "notes" section of the mode
3534 * set sequence documentation. When going from no pipes or single pipe to
3535 * multiple pipes, and planes are enabled after the pipe, we need to wait at
3536 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
3537 */
3538static void haswell_mode_set_planes_workaround(struct intel_crtc *crtc)
3539{
3540 struct drm_device *dev = crtc->base.dev;
3541 struct intel_crtc *crtc_it, *other_active_crtc = NULL;
3542
3543 /* We want to get the other_active_crtc only if there's only 1 other
3544 * active crtc. */
3545 list_for_each_entry(crtc_it, &dev->mode_config.crtc_list, base.head) {
3546 if (!crtc_it->active || crtc_it == crtc)
3547 continue;
3548
3549 if (other_active_crtc)
3550 return;
3551
3552 other_active_crtc = crtc_it;
3553 }
3554 if (!other_active_crtc)
3555 return;
3556
3557 intel_wait_for_vblank(dev, other_active_crtc->pipe);
3558 intel_wait_for_vblank(dev, other_active_crtc->pipe);
3393} 3559}
3394 3560
3395static void haswell_crtc_enable(struct drm_crtc *crtc) 3561static void haswell_crtc_enable(struct drm_crtc *crtc)
@@ -3399,7 +3565,6 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
3399 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3565 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3400 struct intel_encoder *encoder; 3566 struct intel_encoder *encoder;
3401 int pipe = intel_crtc->pipe; 3567 int pipe = intel_crtc->pipe;
3402 int plane = intel_crtc->plane;
3403 3568
3404 WARN_ON(!crtc->enabled); 3569 WARN_ON(!crtc->enabled);
3405 3570
@@ -3435,24 +3600,20 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
3435 intel_update_watermarks(crtc); 3600 intel_update_watermarks(crtc);
3436 intel_enable_pipe(dev_priv, pipe, 3601 intel_enable_pipe(dev_priv, pipe,
3437 intel_crtc->config.has_pch_encoder, false); 3602 intel_crtc->config.has_pch_encoder, false);
3438 intel_enable_plane(dev_priv, plane, pipe);
3439 intel_enable_planes(crtc);
3440 intel_crtc_update_cursor(crtc, true);
3441
3442 hsw_enable_ips(intel_crtc);
3443 3603
3444 if (intel_crtc->config.has_pch_encoder) 3604 if (intel_crtc->config.has_pch_encoder)
3445 lpt_pch_enable(crtc); 3605 lpt_pch_enable(crtc);
3446 3606
3447 mutex_lock(&dev->struct_mutex);
3448 intel_update_fbc(dev);
3449 mutex_unlock(&dev->struct_mutex);
3450
3451 for_each_encoder_on_crtc(dev, crtc, encoder) { 3607 for_each_encoder_on_crtc(dev, crtc, encoder) {
3452 encoder->enable(encoder); 3608 encoder->enable(encoder);
3453 intel_opregion_notify_encoder(encoder, true); 3609 intel_opregion_notify_encoder(encoder, true);
3454 } 3610 }
3455 3611
3612 /* If we change the relative order between pipe/planes enabling, we need
3613 * to change the workaround. */
3614 haswell_mode_set_planes_workaround(intel_crtc);
3615 haswell_crtc_enable_planes(crtc);
3616
3456 /* 3617 /*
3457 * There seems to be a race in PCH platform hw (at least on some 3618 * There seems to be a race in PCH platform hw (at least on some
3458 * outputs) where an enabled pipe still completes any pageflip right 3619 * outputs) where an enabled pipe still completes any pageflip right
@@ -3559,30 +3720,18 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
3559 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3720 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3560 struct intel_encoder *encoder; 3721 struct intel_encoder *encoder;
3561 int pipe = intel_crtc->pipe; 3722 int pipe = intel_crtc->pipe;
3562 int plane = intel_crtc->plane;
3563 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; 3723 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
3564 3724
3565 if (!intel_crtc->active) 3725 if (!intel_crtc->active)
3566 return; 3726 return;
3567 3727
3728 haswell_crtc_disable_planes(crtc);
3729
3568 for_each_encoder_on_crtc(dev, crtc, encoder) { 3730 for_each_encoder_on_crtc(dev, crtc, encoder) {
3569 intel_opregion_notify_encoder(encoder, false); 3731 intel_opregion_notify_encoder(encoder, false);
3570 encoder->disable(encoder); 3732 encoder->disable(encoder);
3571 } 3733 }
3572 3734
3573 intel_crtc_wait_for_pending_flips(crtc);
3574 drm_vblank_off(dev, pipe);
3575
3576 /* FBC must be disabled before disabling the plane on HSW. */
3577 if (dev_priv->fbc.plane == plane)
3578 intel_disable_fbc(dev);
3579
3580 hsw_disable_ips(intel_crtc);
3581
3582 intel_crtc_update_cursor(crtc, false);
3583 intel_disable_planes(crtc);
3584 intel_disable_plane(dev_priv, plane, pipe);
3585
3586 if (intel_crtc->config.has_pch_encoder) 3735 if (intel_crtc->config.has_pch_encoder)
3587 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false); 3736 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false);
3588 intel_disable_pipe(dev_priv, pipe); 3737 intel_disable_pipe(dev_priv, pipe);
@@ -3828,7 +3977,9 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
3828 if (encoder->post_disable) 3977 if (encoder->post_disable)
3829 encoder->post_disable(encoder); 3978 encoder->post_disable(encoder);
3830 3979
3831 if (!intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI)) 3980 if (IS_VALLEYVIEW(dev) && !intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
3981 vlv_disable_pll(dev_priv, pipe);
3982 else if (!IS_VALLEYVIEW(dev))
3832 i9xx_disable_pll(dev_priv, pipe); 3983 i9xx_disable_pll(dev_priv, pipe);
3833 3984
3834 intel_crtc->active = false; 3985 intel_crtc->active = false;
@@ -4102,7 +4253,7 @@ retry:
4102 */ 4253 */
4103 link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10; 4254 link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
4104 4255
4105 fdi_dotclock = adjusted_mode->clock; 4256 fdi_dotclock = adjusted_mode->crtc_clock;
4106 4257
4107 lane = ironlake_get_lanes_required(fdi_dotclock, link_bw, 4258 lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
4108 pipe_config->pipe_bpp); 4259 pipe_config->pipe_bpp);
@@ -4158,12 +4309,12 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
4158 * otherwise pipe A only. 4309 * otherwise pipe A only.
4159 */ 4310 */
4160 if ((crtc->pipe == PIPE_A || IS_I915G(dev)) && 4311 if ((crtc->pipe == PIPE_A || IS_I915G(dev)) &&
4161 adjusted_mode->clock > clock_limit * 9 / 10) { 4312 adjusted_mode->crtc_clock > clock_limit * 9 / 10) {
4162 clock_limit *= 2; 4313 clock_limit *= 2;
4163 pipe_config->double_wide = true; 4314 pipe_config->double_wide = true;
4164 } 4315 }
4165 4316
4166 if (adjusted_mode->clock > clock_limit * 9 / 10) 4317 if (adjusted_mode->crtc_clock > clock_limit * 9 / 10)
4167 return -EINVAL; 4318 return -EINVAL;
4168 } 4319 }
4169 4320
@@ -4568,9 +4719,9 @@ static void vlv_update_pll(struct intel_crtc *crtc)
4568 /* Enable DPIO clock input */ 4719 /* Enable DPIO clock input */
4569 dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV | 4720 dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
4570 DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV; 4721 DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
4571 if (pipe) 4722 /* We should never disable this, set it here for state tracking */
4723 if (pipe == PIPE_B)
4572 dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 4724 dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
4573
4574 dpll |= DPLL_VCO_ENABLE; 4725 dpll |= DPLL_VCO_ENABLE;
4575 crtc->config.dpll_hw_state.dpll = dpll; 4726 crtc->config.dpll_hw_state.dpll = dpll;
4576 4727
@@ -4823,7 +4974,7 @@ static void intel_crtc_mode_from_pipe_config(struct intel_crtc *intel_crtc,
4823 4974
4824 crtc->mode.flags = pipe_config->adjusted_mode.flags; 4975 crtc->mode.flags = pipe_config->adjusted_mode.flags;
4825 4976
4826 crtc->mode.clock = pipe_config->adjusted_mode.clock; 4977 crtc->mode.clock = pipe_config->adjusted_mode.crtc_clock;
4827 crtc->mode.flags |= pipe_config->adjusted_mode.flags; 4978 crtc->mode.flags |= pipe_config->adjusted_mode.flags;
4828} 4979}
4829 4980
@@ -4918,9 +5069,12 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4918 num_connectors++; 5069 num_connectors++;
4919 } 5070 }
4920 5071
4921 refclk = i9xx_get_refclk(crtc, num_connectors); 5072 if (is_dsi)
5073 goto skip_dpll;
5074
5075 if (!intel_crtc->config.clock_set) {
5076 refclk = i9xx_get_refclk(crtc, num_connectors);
4922 5077
4923 if (!is_dsi && !intel_crtc->config.clock_set) {
4924 /* 5078 /*
4925 * Returns a set of divisors for the desired target clock with 5079 * Returns a set of divisors for the desired target clock with
4926 * the given refclk, or FALSE. The returned values represent 5080 * the given refclk, or FALSE. The returned values represent
@@ -4931,28 +5085,25 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4931 ok = dev_priv->display.find_dpll(limit, crtc, 5085 ok = dev_priv->display.find_dpll(limit, crtc,
4932 intel_crtc->config.port_clock, 5086 intel_crtc->config.port_clock,
4933 refclk, NULL, &clock); 5087 refclk, NULL, &clock);
4934 if (!ok && !intel_crtc->config.clock_set) { 5088 if (!ok) {
4935 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 5089 DRM_ERROR("Couldn't find PLL settings for mode!\n");
4936 return -EINVAL; 5090 return -EINVAL;
4937 } 5091 }
4938 }
4939 5092
4940 if (is_lvds && dev_priv->lvds_downclock_avail) { 5093 if (is_lvds && dev_priv->lvds_downclock_avail) {
4941 /* 5094 /*
4942 * Ensure we match the reduced clock's P to the target clock. 5095 * Ensure we match the reduced clock's P to the target
4943 * If the clocks don't match, we can't switch the display clock 5096 * clock. If the clocks don't match, we can't switch
4944 * by using the FP0/FP1. In such case we will disable the LVDS 5097 * the display clock by using the FP0/FP1. In such case
4945 * downclock feature. 5098 * we will disable the LVDS downclock feature.
4946 */ 5099 */
4947 limit = intel_limit(crtc, refclk); 5100 has_reduced_clock =
4948 has_reduced_clock = 5101 dev_priv->display.find_dpll(limit, crtc,
4949 dev_priv->display.find_dpll(limit, crtc, 5102 dev_priv->lvds_downclock,
4950 dev_priv->lvds_downclock, 5103 refclk, &clock,
4951 refclk, &clock, 5104 &reduced_clock);
4952 &reduced_clock); 5105 }
4953 } 5106 /* Compat-code for transition, will disappear. */
4954 /* Compat-code for transition, will disappear. */
4955 if (!intel_crtc->config.clock_set) {
4956 intel_crtc->config.dpll.n = clock.n; 5107 intel_crtc->config.dpll.n = clock.n;
4957 intel_crtc->config.dpll.m1 = clock.m1; 5108 intel_crtc->config.dpll.m1 = clock.m1;
4958 intel_crtc->config.dpll.m2 = clock.m2; 5109 intel_crtc->config.dpll.m2 = clock.m2;
@@ -4965,14 +5116,14 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4965 has_reduced_clock ? &reduced_clock : NULL, 5116 has_reduced_clock ? &reduced_clock : NULL,
4966 num_connectors); 5117 num_connectors);
4967 } else if (IS_VALLEYVIEW(dev)) { 5118 } else if (IS_VALLEYVIEW(dev)) {
4968 if (!is_dsi) 5119 vlv_update_pll(intel_crtc);
4969 vlv_update_pll(intel_crtc);
4970 } else { 5120 } else {
4971 i9xx_update_pll(intel_crtc, 5121 i9xx_update_pll(intel_crtc,
4972 has_reduced_clock ? &reduced_clock : NULL, 5122 has_reduced_clock ? &reduced_clock : NULL,
4973 num_connectors); 5123 num_connectors);
4974 } 5124 }
4975 5125
5126skip_dpll:
4976 /* Set up the display plane register */ 5127 /* Set up the display plane register */
4977 dspcntr = DISPPLANE_GAMMA_ENABLE; 5128 dspcntr = DISPPLANE_GAMMA_ENABLE;
4978 5129
@@ -5030,6 +5181,32 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc,
5030 I915_READ(LVDS) & LVDS_BORDER_ENABLE; 5181 I915_READ(LVDS) & LVDS_BORDER_ENABLE;
5031} 5182}
5032 5183
5184static void vlv_crtc_clock_get(struct intel_crtc *crtc,
5185 struct intel_crtc_config *pipe_config)
5186{
5187 struct drm_device *dev = crtc->base.dev;
5188 struct drm_i915_private *dev_priv = dev->dev_private;
5189 int pipe = pipe_config->cpu_transcoder;
5190 intel_clock_t clock;
5191 u32 mdiv;
5192 int refclk = 100000;
5193
5194 mutex_lock(&dev_priv->dpio_lock);
5195 mdiv = vlv_dpio_read(dev_priv, pipe, DPIO_DIV(pipe));
5196 mutex_unlock(&dev_priv->dpio_lock);
5197
5198 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
5199 clock.m2 = mdiv & DPIO_M2DIV_MASK;
5200 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
5201 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
5202 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
5203
5204 clock.vco = refclk * clock.m1 * clock.m2 / clock.n;
5205 clock.dot = 2 * clock.vco / (clock.p1 * clock.p2);
5206
5207 pipe_config->port_clock = clock.dot / 10;
5208}
5209
5033static bool i9xx_get_pipe_config(struct intel_crtc *crtc, 5210static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
5034 struct intel_crtc_config *pipe_config) 5211 struct intel_crtc_config *pipe_config)
5035{ 5212{
@@ -5095,7 +5272,10 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
5095 DPLL_PORTB_READY_MASK); 5272 DPLL_PORTB_READY_MASK);
5096 } 5273 }
5097 5274
5098 i9xx_crtc_clock_get(crtc, pipe_config); 5275 if (IS_VALLEYVIEW(dev))
5276 vlv_crtc_clock_get(crtc, pipe_config);
5277 else
5278 i9xx_crtc_clock_get(crtc, pipe_config);
5099 5279
5100 return true; 5280 return true;
5101} 5281}
@@ -6111,8 +6291,8 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
6111 * register. Callers should take care of disabling all the display engine 6291 * register. Callers should take care of disabling all the display engine
6112 * functions, doing the mode unset, fixing interrupts, etc. 6292 * functions, doing the mode unset, fixing interrupts, etc.
6113 */ 6293 */
6114void hsw_disable_lcpll(struct drm_i915_private *dev_priv, 6294static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
6115 bool switch_to_fclk, bool allow_power_down) 6295 bool switch_to_fclk, bool allow_power_down)
6116{ 6296{
6117 uint32_t val; 6297 uint32_t val;
6118 6298
@@ -6162,7 +6342,7 @@ void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
6162 * Fully restores LCPLL, disallowing power down and switching back to LCPLL 6342 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
6163 * source. 6343 * source.
6164 */ 6344 */
6165void hsw_restore_lcpll(struct drm_i915_private *dev_priv) 6345static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
6166{ 6346{
6167 uint32_t val; 6347 uint32_t val;
6168 6348
@@ -6787,53 +6967,6 @@ void intel_write_eld(struct drm_encoder *encoder,
6787 dev_priv->display.write_eld(connector, crtc); 6967 dev_priv->display.write_eld(connector, crtc);
6788} 6968}
6789 6969
6790/** Loads the palette/gamma unit for the CRTC with the prepared values */
6791void intel_crtc_load_lut(struct drm_crtc *crtc)
6792{
6793 struct drm_device *dev = crtc->dev;
6794 struct drm_i915_private *dev_priv = dev->dev_private;
6795 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6796 enum pipe pipe = intel_crtc->pipe;
6797 int palreg = PALETTE(pipe);
6798 int i;
6799 bool reenable_ips = false;
6800
6801 /* The clocks have to be on to load the palette. */
6802 if (!crtc->enabled || !intel_crtc->active)
6803 return;
6804
6805 if (!HAS_PCH_SPLIT(dev_priv->dev)) {
6806 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
6807 assert_dsi_pll_enabled(dev_priv);
6808 else
6809 assert_pll_enabled(dev_priv, pipe);
6810 }
6811
6812 /* use legacy palette for Ironlake */
6813 if (HAS_PCH_SPLIT(dev))
6814 palreg = LGC_PALETTE(pipe);
6815
6816 /* Workaround : Do not read or write the pipe palette/gamma data while
6817 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
6818 */
6819 if (intel_crtc->config.ips_enabled &&
6820 ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
6821 GAMMA_MODE_MODE_SPLIT)) {
6822 hsw_disable_ips(intel_crtc);
6823 reenable_ips = true;
6824 }
6825
6826 for (i = 0; i < 256; i++) {
6827 I915_WRITE(palreg + 4 * i,
6828 (intel_crtc->lut_r[i] << 16) |
6829 (intel_crtc->lut_g[i] << 8) |
6830 intel_crtc->lut_b[i]);
6831 }
6832
6833 if (reenable_ips)
6834 hsw_enable_ips(intel_crtc);
6835}
6836
6837static void i845_update_cursor(struct drm_crtc *crtc, u32 base) 6970static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
6838{ 6971{
6839 struct drm_device *dev = crtc->dev; 6972 struct drm_device *dev = crtc->dev;
@@ -7103,27 +7236,6 @@ static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
7103 return 0; 7236 return 0;
7104} 7237}
7105 7238
7106/** Sets the color ramps on behalf of RandR */
7107void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
7108 u16 blue, int regno)
7109{
7110 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7111
7112 intel_crtc->lut_r[regno] = red >> 8;
7113 intel_crtc->lut_g[regno] = green >> 8;
7114 intel_crtc->lut_b[regno] = blue >> 8;
7115}
7116
7117void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
7118 u16 *blue, int regno)
7119{
7120 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7121
7122 *red = intel_crtc->lut_r[regno] << 8;
7123 *green = intel_crtc->lut_g[regno] << 8;
7124 *blue = intel_crtc->lut_b[regno] << 8;
7125}
7126
7127static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 7239static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
7128 u16 *blue, uint32_t start, uint32_t size) 7240 u16 *blue, uint32_t start, uint32_t size)
7129{ 7241{
@@ -7466,7 +7578,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
7466 7578
7467 /* 7579 /*
7468 * This value includes pixel_multiplier. We will use 7580 * This value includes pixel_multiplier. We will use
7469 * port_clock to compute adjusted_mode.clock in the 7581 * port_clock to compute adjusted_mode.crtc_clock in the
7470 * encoder's get_config() function. 7582 * encoder's get_config() function.
7471 */ 7583 */
7472 pipe_config->port_clock = clock.dot; 7584 pipe_config->port_clock = clock.dot;
@@ -7501,11 +7613,11 @@ static void ironlake_pch_clock_get(struct intel_crtc *crtc,
7501 7613
7502 /* 7614 /*
7503 * This value does not include pixel_multiplier. 7615 * This value does not include pixel_multiplier.
7504 * We will check that port_clock and adjusted_mode.clock 7616 * We will check that port_clock and adjusted_mode.crtc_clock
7505 * agree once we know their relationship in the encoder's 7617 * agree once we know their relationship in the encoder's
7506 * get_config() function. 7618 * get_config() function.
7507 */ 7619 */
7508 pipe_config->adjusted_mode.clock = 7620 pipe_config->adjusted_mode.crtc_clock =
7509 intel_dotclock_calculate(intel_fdi_link_freq(dev) * 10000, 7621 intel_dotclock_calculate(intel_fdi_link_freq(dev) * 10000,
7510 &pipe_config->fdi_m_n); 7622 &pipe_config->fdi_m_n);
7511} 7623}
@@ -7543,7 +7655,7 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
7543 pipe_config.dpll_hw_state.fp1 = I915_READ(FP1(pipe)); 7655 pipe_config.dpll_hw_state.fp1 = I915_READ(FP1(pipe));
7544 i9xx_crtc_clock_get(intel_crtc, &pipe_config); 7656 i9xx_crtc_clock_get(intel_crtc, &pipe_config);
7545 7657
7546 mode->clock = pipe_config.adjusted_mode.clock; 7658 mode->clock = pipe_config.port_clock / pipe_config.pixel_multiplier;
7547 mode->hdisplay = (htot & 0xffff) + 1; 7659 mode->hdisplay = (htot & 0xffff) + 1;
7548 mode->htotal = ((htot & 0xffff0000) >> 16) + 1; 7660 mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
7549 mode->hsync_start = (hsync & 0xffff) + 1; 7661 mode->hsync_start = (hsync & 0xffff) + 1;
@@ -7649,6 +7761,9 @@ void intel_mark_idle(struct drm_device *dev)
7649 7761
7650 intel_decrease_pllclock(crtc); 7762 intel_decrease_pllclock(crtc);
7651 } 7763 }
7764
7765 if (dev_priv->info->gen >= 6)
7766 gen6_rps_idle(dev->dev_private);
7652} 7767}
7653 7768
7654void intel_mark_fb_busy(struct drm_i915_gem_object *obj, 7769void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
@@ -8097,7 +8212,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
8097 fb->pitches[0] != crtc->fb->pitches[0])) 8212 fb->pitches[0] != crtc->fb->pitches[0]))
8098 return -EINVAL; 8213 return -EINVAL;
8099 8214
8100 work = kzalloc(sizeof *work, GFP_KERNEL); 8215 work = kzalloc(sizeof(*work), GFP_KERNEL);
8101 if (work == NULL) 8216 if (work == NULL)
8102 return -ENOMEM; 8217 return -ENOMEM;
8103 8218
@@ -8336,7 +8451,7 @@ static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
8336{ 8451{
8337 DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, " 8452 DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
8338 "type: 0x%x flags: 0x%x\n", 8453 "type: 0x%x flags: 0x%x\n",
8339 mode->clock, 8454 mode->crtc_clock,
8340 mode->crtc_hdisplay, mode->crtc_hsync_start, 8455 mode->crtc_hdisplay, mode->crtc_hsync_start,
8341 mode->crtc_hsync_end, mode->crtc_htotal, 8456 mode->crtc_hsync_end, mode->crtc_htotal,
8342 mode->crtc_vdisplay, mode->crtc_vsync_start, 8457 mode->crtc_vdisplay, mode->crtc_vsync_start,
@@ -8426,9 +8541,6 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
8426 drm_mode_copy(&pipe_config->adjusted_mode, mode); 8541 drm_mode_copy(&pipe_config->adjusted_mode, mode);
8427 drm_mode_copy(&pipe_config->requested_mode, mode); 8542 drm_mode_copy(&pipe_config->requested_mode, mode);
8428 8543
8429 pipe_config->pipe_src_w = mode->hdisplay;
8430 pipe_config->pipe_src_h = mode->vdisplay;
8431
8432 pipe_config->cpu_transcoder = 8544 pipe_config->cpu_transcoder =
8433 (enum transcoder) to_intel_crtc(crtc)->pipe; 8545 (enum transcoder) to_intel_crtc(crtc)->pipe;
8434 pipe_config->shared_dpll = DPLL_ID_PRIVATE; 8546 pipe_config->shared_dpll = DPLL_ID_PRIVATE;
@@ -8455,13 +8567,25 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
8455 if (plane_bpp < 0) 8567 if (plane_bpp < 0)
8456 goto fail; 8568 goto fail;
8457 8569
8570 /*
8571 * Determine the real pipe dimensions. Note that stereo modes can
8572 * increase the actual pipe size due to the frame doubling and
8573 * insertion of additional space for blanks between the frame. This
8574 * is stored in the crtc timings. We use the requested mode to do this
8575 * computation to clearly distinguish it from the adjusted mode, which
8576 * can be changed by the connectors in the below retry loop.
8577 */
8578 drm_mode_set_crtcinfo(&pipe_config->requested_mode, CRTC_STEREO_DOUBLE);
8579 pipe_config->pipe_src_w = pipe_config->requested_mode.crtc_hdisplay;
8580 pipe_config->pipe_src_h = pipe_config->requested_mode.crtc_vdisplay;
8581
8458encoder_retry: 8582encoder_retry:
8459 /* Ensure the port clock defaults are reset when retrying. */ 8583 /* Ensure the port clock defaults are reset when retrying. */
8460 pipe_config->port_clock = 0; 8584 pipe_config->port_clock = 0;
8461 pipe_config->pixel_multiplier = 1; 8585 pipe_config->pixel_multiplier = 1;
8462 8586
8463 /* Fill in default crtc timings, allow encoders to overwrite them. */ 8587 /* Fill in default crtc timings, allow encoders to overwrite them. */
8464 drm_mode_set_crtcinfo(&pipe_config->adjusted_mode, 0); 8588 drm_mode_set_crtcinfo(&pipe_config->adjusted_mode, CRTC_STEREO_DOUBLE);
8465 8589
8466 /* Pass our mode to the connectors and the CRTC to give them a chance to 8590 /* Pass our mode to the connectors and the CRTC to give them a chance to
8467 * adjust it according to limitations or connector properties, and also 8591 * adjust it according to limitations or connector properties, and also
@@ -8482,8 +8606,8 @@ encoder_retry:
8482 /* Set default port clock if not overwritten by the encoder. Needs to be 8606 /* Set default port clock if not overwritten by the encoder. Needs to be
8483 * done afterwards in case the encoder adjusts the mode. */ 8607 * done afterwards in case the encoder adjusts the mode. */
8484 if (!pipe_config->port_clock) 8608 if (!pipe_config->port_clock)
8485 pipe_config->port_clock = pipe_config->adjusted_mode.clock * 8609 pipe_config->port_clock = pipe_config->adjusted_mode.crtc_clock
8486 pipe_config->pixel_multiplier; 8610 * pipe_config->pixel_multiplier;
8487 8611
8488 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config); 8612 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
8489 if (ret < 0) { 8613 if (ret < 0) {
@@ -8813,7 +8937,7 @@ intel_pipe_config_compare(struct drm_device *dev,
8813 PIPE_CONF_CHECK_I(pipe_bpp); 8937 PIPE_CONF_CHECK_I(pipe_bpp);
8814 8938
8815 if (!IS_HASWELL(dev)) { 8939 if (!IS_HASWELL(dev)) {
8816 PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.clock); 8940 PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock);
8817 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); 8941 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
8818 } 8942 }
8819 8943
@@ -9035,9 +9159,9 @@ void ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config
9035 * FDI already provided one idea for the dotclock. 9159 * FDI already provided one idea for the dotclock.
9036 * Yell if the encoder disagrees. 9160 * Yell if the encoder disagrees.
9037 */ 9161 */
9038 WARN(!intel_fuzzy_clock_check(pipe_config->adjusted_mode.clock, dotclock), 9162 WARN(!intel_fuzzy_clock_check(pipe_config->adjusted_mode.crtc_clock, dotclock),
9039 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n", 9163 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
9040 pipe_config->adjusted_mode.clock, dotclock); 9164 pipe_config->adjusted_mode.crtc_clock, dotclock);
9041} 9165}
9042 9166
9043static int __intel_set_mode(struct drm_crtc *crtc, 9167static int __intel_set_mode(struct drm_crtc *crtc,
@@ -9052,7 +9176,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
9052 unsigned disable_pipes, prepare_pipes, modeset_pipes; 9176 unsigned disable_pipes, prepare_pipes, modeset_pipes;
9053 int ret = 0; 9177 int ret = 0;
9054 9178
9055 saved_mode = kmalloc(2 * sizeof(*saved_mode), GFP_KERNEL); 9179 saved_mode = kcalloc(2, sizeof(*saved_mode), GFP_KERNEL);
9056 if (!saved_mode) 9180 if (!saved_mode)
9057 return -ENOMEM; 9181 return -ENOMEM;
9058 saved_hwmode = saved_mode + 1; 9182 saved_hwmode = saved_mode + 1;
@@ -9591,7 +9715,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
9591 struct intel_crtc *intel_crtc; 9715 struct intel_crtc *intel_crtc;
9592 int i; 9716 int i;
9593 9717
9594 intel_crtc = kzalloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); 9718 intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
9595 if (intel_crtc == NULL) 9719 if (intel_crtc == NULL)
9596 return; 9720 return;
9597 9721
@@ -10270,10 +10394,19 @@ void i915_disable_vga_mem(struct drm_device *dev)
10270 10394
10271void intel_modeset_init_hw(struct drm_device *dev) 10395void intel_modeset_init_hw(struct drm_device *dev)
10272{ 10396{
10397 struct drm_i915_private *dev_priv = dev->dev_private;
10398
10273 intel_prepare_ddi(dev); 10399 intel_prepare_ddi(dev);
10274 10400
10275 intel_init_clock_gating(dev); 10401 intel_init_clock_gating(dev);
10276 10402
10403 /* Enable the CRI clock source so we can get at the display */
10404 if (IS_VALLEYVIEW(dev))
10405 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
10406 DPLL_INTEGRATED_CRI_CLK_VLV);
10407
10408 intel_init_dpio(dev);
10409
10277 mutex_lock(&dev->struct_mutex); 10410 mutex_lock(&dev->struct_mutex);
10278 intel_enable_gt_powersave(dev); 10411 intel_enable_gt_powersave(dev);
10279 mutex_unlock(&dev->struct_mutex); 10412 mutex_unlock(&dev->struct_mutex);
@@ -10636,7 +10769,6 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
10636{ 10769{
10637 struct drm_i915_private *dev_priv = dev->dev_private; 10770 struct drm_i915_private *dev_priv = dev->dev_private;
10638 enum pipe pipe; 10771 enum pipe pipe;
10639 struct drm_plane *plane;
10640 struct intel_crtc *crtc; 10772 struct intel_crtc *crtc;
10641 struct intel_encoder *encoder; 10773 struct intel_encoder *encoder;
10642 int i; 10774 int i;
@@ -10684,6 +10816,8 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
10684 } 10816 }
10685 10817
10686 if (force_restore) { 10818 if (force_restore) {
10819 i915_redisable_vga(dev);
10820
10687 /* 10821 /*
10688 * We need to use raw interfaces for restoring state to avoid 10822 * We need to use raw interfaces for restoring state to avoid
10689 * checking (bogus) intermediate states. 10823 * checking (bogus) intermediate states.
@@ -10695,10 +10829,6 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
10695 __intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, 10829 __intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
10696 crtc->fb); 10830 crtc->fb);
10697 } 10831 }
10698 list_for_each_entry(plane, &dev->mode_config.plane_list, head)
10699 intel_plane_restore(plane);
10700
10701 i915_redisable_vga(dev);
10702 } else { 10832 } else {
10703 intel_modeset_update_staged_output_state(dev); 10833 intel_modeset_update_staged_output_state(dev);
10704 } 10834 }
@@ -10721,6 +10851,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
10721{ 10851{
10722 struct drm_i915_private *dev_priv = dev->dev_private; 10852 struct drm_i915_private *dev_priv = dev->dev_private;
10723 struct drm_crtc *crtc; 10853 struct drm_crtc *crtc;
10854 struct drm_connector *connector;
10724 10855
10725 /* 10856 /*
10726 * Interrupts and polling as the first thing to avoid creating havoc. 10857 * Interrupts and polling as the first thing to avoid creating havoc.
@@ -10763,6 +10894,10 @@ void intel_modeset_cleanup(struct drm_device *dev)
10763 /* destroy backlight, if any, before the connectors */ 10894 /* destroy backlight, if any, before the connectors */
10764 intel_panel_destroy_backlight(dev); 10895 intel_panel_destroy_backlight(dev);
10765 10896
10897 /* destroy the sysfs files before encoders/connectors */
10898 list_for_each_entry(connector, &dev->mode_config.connector_list, head)
10899 drm_sysfs_connector_remove(connector);
10900
10766 drm_mode_config_cleanup(dev); 10901 drm_mode_config_cleanup(dev);
10767 10902
10768 intel_cleanup_overlay(dev); 10903 intel_cleanup_overlay(dev);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 5f0c783027c9..4f52ec75b39f 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -59,7 +59,7 @@ static const struct dp_link_dpll pch_dpll[] = {
59 59
60static const struct dp_link_dpll vlv_dpll[] = { 60static const struct dp_link_dpll vlv_dpll[] = {
61 { DP_LINK_BW_1_62, 61 { DP_LINK_BW_1_62,
62 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 5, .m2 = 3 } }, 62 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
63 { DP_LINK_BW_2_7, 63 { DP_LINK_BW_2_7,
64 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } } 64 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
65}; 65};
@@ -654,7 +654,12 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
654 break; 654 break;
655 } 655 }
656 656
657 for (retry = 0; retry < 5; retry++) { 657 /*
658 * DP1.2 sections 2.7.7.1.5.6.1 and 2.7.7.1.6.6.1: A DP Source device is
659 * required to retry at least seven times upon receiving AUX_DEFER
660 * before giving up the AUX transaction.
661 */
662 for (retry = 0; retry < 7; retry++) {
658 ret = intel_dp_aux_ch(intel_dp, 663 ret = intel_dp_aux_ch(intel_dp,
659 msg, msg_bytes, 664 msg, msg_bytes,
660 reply, reply_bytes); 665 reply, reply_bytes);
@@ -811,7 +816,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
811 816
812 DRM_DEBUG_KMS("DP link computation with max lane count %i " 817 DRM_DEBUG_KMS("DP link computation with max lane count %i "
813 "max bw %02x pixel clock %iKHz\n", 818 "max bw %02x pixel clock %iKHz\n",
814 max_lane_count, bws[max_clock], adjusted_mode->clock); 819 max_lane_count, bws[max_clock],
820 adjusted_mode->crtc_clock);
815 821
816 /* Walk through all bpp values. Luckily they're all nicely spaced with 2 822 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
817 * bpc in between. */ 823 * bpc in between. */
@@ -823,7 +829,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
823 } 829 }
824 830
825 for (; bpp >= 6*3; bpp -= 2*3) { 831 for (; bpp >= 6*3; bpp -= 2*3) {
826 mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp); 832 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
833 bpp);
827 834
828 for (clock = 0; clock <= max_clock; clock++) { 835 for (clock = 0; clock <= max_clock; clock++) {
829 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { 836 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
@@ -868,7 +875,8 @@ found:
868 mode_rate, link_avail); 875 mode_rate, link_avail);
869 876
870 intel_link_compute_m_n(bpp, lane_count, 877 intel_link_compute_m_n(bpp, lane_count,
871 adjusted_mode->clock, pipe_config->port_clock, 878 adjusted_mode->crtc_clock,
879 pipe_config->port_clock,
872 &pipe_config->dp_m_n); 880 &pipe_config->dp_m_n);
873 881
874 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw); 882 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
@@ -1466,23 +1474,24 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
1466 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A) 1474 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
1467 ironlake_check_encoder_dotclock(pipe_config, dotclock); 1475 ironlake_check_encoder_dotclock(pipe_config, dotclock);
1468 1476
1469 pipe_config->adjusted_mode.clock = dotclock; 1477 pipe_config->adjusted_mode.crtc_clock = dotclock;
1470} 1478}
1471 1479
1472static bool is_edp_psr(struct intel_dp *intel_dp) 1480static bool is_edp_psr(struct drm_device *dev)
1473{ 1481{
1474 return is_edp(intel_dp) && 1482 struct drm_i915_private *dev_priv = dev->dev_private;
1475 intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED; 1483
1484 return dev_priv->psr.sink_support;
1476} 1485}
1477 1486
1478static bool intel_edp_is_psr_enabled(struct drm_device *dev) 1487static bool intel_edp_is_psr_enabled(struct drm_device *dev)
1479{ 1488{
1480 struct drm_i915_private *dev_priv = dev->dev_private; 1489 struct drm_i915_private *dev_priv = dev->dev_private;
1481 1490
1482 if (!IS_HASWELL(dev)) 1491 if (!HAS_PSR(dev))
1483 return false; 1492 return false;
1484 1493
1485 return I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE; 1494 return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
1486} 1495}
1487 1496
1488static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp, 1497static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp,
@@ -1532,7 +1541,7 @@ static void intel_edp_psr_setup(struct intel_dp *intel_dp)
1532 intel_edp_psr_write_vsc(intel_dp, &psr_vsc); 1541 intel_edp_psr_write_vsc(intel_dp, &psr_vsc);
1533 1542
1534 /* Avoid continuous PSR exit by masking memup and hpd */ 1543 /* Avoid continuous PSR exit by masking memup and hpd */
1535 I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP | 1544 I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
1536 EDP_PSR_DEBUG_MASK_HPD); 1545 EDP_PSR_DEBUG_MASK_HPD);
1537 1546
1538 intel_dp->psr_setup_done = true; 1547 intel_dp->psr_setup_done = true;
@@ -1557,9 +1566,9 @@ static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
1557 DP_PSR_MAIN_LINK_ACTIVE); 1566 DP_PSR_MAIN_LINK_ACTIVE);
1558 1567
1559 /* Setup AUX registers */ 1568 /* Setup AUX registers */
1560 I915_WRITE(EDP_PSR_AUX_DATA1, EDP_PSR_DPCD_COMMAND); 1569 I915_WRITE(EDP_PSR_AUX_DATA1(dev), EDP_PSR_DPCD_COMMAND);
1561 I915_WRITE(EDP_PSR_AUX_DATA2, EDP_PSR_DPCD_NORMAL_OPERATION); 1570 I915_WRITE(EDP_PSR_AUX_DATA2(dev), EDP_PSR_DPCD_NORMAL_OPERATION);
1562 I915_WRITE(EDP_PSR_AUX_CTL, 1571 I915_WRITE(EDP_PSR_AUX_CTL(dev),
1563 DP_AUX_CH_CTL_TIME_OUT_400us | 1572 DP_AUX_CH_CTL_TIME_OUT_400us |
1564 (msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 1573 (msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1565 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | 1574 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
@@ -1582,7 +1591,7 @@ static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
1582 } else 1591 } else
1583 val |= EDP_PSR_LINK_DISABLE; 1592 val |= EDP_PSR_LINK_DISABLE;
1584 1593
1585 I915_WRITE(EDP_PSR_CTL, val | 1594 I915_WRITE(EDP_PSR_CTL(dev), val |
1586 EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES | 1595 EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES |
1587 max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT | 1596 max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
1588 idle_frames << EDP_PSR_IDLE_FRAME_SHIFT | 1597 idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
@@ -1599,42 +1608,33 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
1599 struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->fb)->obj; 1608 struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->fb)->obj;
1600 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base; 1609 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
1601 1610
1602 if (!IS_HASWELL(dev)) { 1611 dev_priv->psr.source_ok = false;
1612
1613 if (!HAS_PSR(dev)) {
1603 DRM_DEBUG_KMS("PSR not supported on this platform\n"); 1614 DRM_DEBUG_KMS("PSR not supported on this platform\n");
1604 dev_priv->no_psr_reason = PSR_NO_SOURCE;
1605 return false; 1615 return false;
1606 } 1616 }
1607 1617
1608 if ((intel_encoder->type != INTEL_OUTPUT_EDP) || 1618 if ((intel_encoder->type != INTEL_OUTPUT_EDP) ||
1609 (dig_port->port != PORT_A)) { 1619 (dig_port->port != PORT_A)) {
1610 DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n"); 1620 DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
1611 dev_priv->no_psr_reason = PSR_HSW_NOT_DDIA;
1612 return false;
1613 }
1614
1615 if (!is_edp_psr(intel_dp)) {
1616 DRM_DEBUG_KMS("PSR not supported by this panel\n");
1617 dev_priv->no_psr_reason = PSR_NO_SINK;
1618 return false; 1621 return false;
1619 } 1622 }
1620 1623
1621 if (!i915_enable_psr) { 1624 if (!i915_enable_psr) {
1622 DRM_DEBUG_KMS("PSR disable by flag\n"); 1625 DRM_DEBUG_KMS("PSR disable by flag\n");
1623 dev_priv->no_psr_reason = PSR_MODULE_PARAM;
1624 return false; 1626 return false;
1625 } 1627 }
1626 1628
1627 crtc = dig_port->base.base.crtc; 1629 crtc = dig_port->base.base.crtc;
1628 if (crtc == NULL) { 1630 if (crtc == NULL) {
1629 DRM_DEBUG_KMS("crtc not active for PSR\n"); 1631 DRM_DEBUG_KMS("crtc not active for PSR\n");
1630 dev_priv->no_psr_reason = PSR_CRTC_NOT_ACTIVE;
1631 return false; 1632 return false;
1632 } 1633 }
1633 1634
1634 intel_crtc = to_intel_crtc(crtc); 1635 intel_crtc = to_intel_crtc(crtc);
1635 if (!intel_crtc_active(crtc)) { 1636 if (!intel_crtc_active(crtc)) {
1636 DRM_DEBUG_KMS("crtc not active for PSR\n"); 1637 DRM_DEBUG_KMS("crtc not active for PSR\n");
1637 dev_priv->no_psr_reason = PSR_CRTC_NOT_ACTIVE;
1638 return false; 1638 return false;
1639 } 1639 }
1640 1640
@@ -1642,29 +1642,26 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
1642 if (obj->tiling_mode != I915_TILING_X || 1642 if (obj->tiling_mode != I915_TILING_X ||
1643 obj->fence_reg == I915_FENCE_REG_NONE) { 1643 obj->fence_reg == I915_FENCE_REG_NONE) {
1644 DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n"); 1644 DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n");
1645 dev_priv->no_psr_reason = PSR_NOT_TILED;
1646 return false; 1645 return false;
1647 } 1646 }
1648 1647
1649 if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) { 1648 if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) {
1650 DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n"); 1649 DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n");
1651 dev_priv->no_psr_reason = PSR_SPRITE_ENABLED;
1652 return false; 1650 return false;
1653 } 1651 }
1654 1652
1655 if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) & 1653 if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
1656 S3D_ENABLE) { 1654 S3D_ENABLE) {
1657 DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n"); 1655 DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
1658 dev_priv->no_psr_reason = PSR_S3D_ENABLED;
1659 return false; 1656 return false;
1660 } 1657 }
1661 1658
1662 if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { 1659 if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
1663 DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n"); 1660 DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
1664 dev_priv->no_psr_reason = PSR_INTERLACED_ENABLED;
1665 return false; 1661 return false;
1666 } 1662 }
1667 1663
1664 dev_priv->psr.source_ok = true;
1668 return true; 1665 return true;
1669} 1666}
1670 1667
@@ -1703,10 +1700,11 @@ void intel_edp_psr_disable(struct intel_dp *intel_dp)
1703 if (!intel_edp_is_psr_enabled(dev)) 1700 if (!intel_edp_is_psr_enabled(dev))
1704 return; 1701 return;
1705 1702
1706 I915_WRITE(EDP_PSR_CTL, I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE); 1703 I915_WRITE(EDP_PSR_CTL(dev),
1704 I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
1707 1705
1708 /* Wait till PSR is idle */ 1706 /* Wait till PSR is idle */
1709 if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL) & 1707 if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
1710 EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10)) 1708 EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
1711 DRM_ERROR("Timed out waiting for PSR Idle State\n"); 1709 DRM_ERROR("Timed out waiting for PSR Idle State\n");
1712} 1710}
@@ -1720,7 +1718,7 @@ void intel_edp_psr_update(struct drm_device *dev)
1720 if (encoder->type == INTEL_OUTPUT_EDP) { 1718 if (encoder->type == INTEL_OUTPUT_EDP) {
1721 intel_dp = enc_to_intel_dp(&encoder->base); 1719 intel_dp = enc_to_intel_dp(&encoder->base);
1722 1720
1723 if (!is_edp_psr(intel_dp)) 1721 if (!is_edp_psr(dev))
1724 return; 1722 return;
1725 1723
1726 if (!intel_edp_psr_match_conditions(intel_dp)) 1724 if (!intel_edp_psr_match_conditions(intel_dp))
@@ -2292,7 +2290,7 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
2292 2290
2293static bool 2291static bool
2294intel_dp_set_link_train(struct intel_dp *intel_dp, 2292intel_dp_set_link_train(struct intel_dp *intel_dp,
2295 uint32_t dp_reg_value, 2293 uint32_t *DP,
2296 uint8_t dp_train_pat) 2294 uint8_t dp_train_pat)
2297{ 2295{
2298 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2296 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
@@ -2328,50 +2326,51 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
2328 I915_WRITE(DP_TP_CTL(port), temp); 2326 I915_WRITE(DP_TP_CTL(port), temp);
2329 2327
2330 } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) { 2328 } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
2331 dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT; 2329 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2332 2330
2333 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 2331 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2334 case DP_TRAINING_PATTERN_DISABLE: 2332 case DP_TRAINING_PATTERN_DISABLE:
2335 dp_reg_value |= DP_LINK_TRAIN_OFF_CPT; 2333 *DP |= DP_LINK_TRAIN_OFF_CPT;
2336 break; 2334 break;
2337 case DP_TRAINING_PATTERN_1: 2335 case DP_TRAINING_PATTERN_1:
2338 dp_reg_value |= DP_LINK_TRAIN_PAT_1_CPT; 2336 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2339 break; 2337 break;
2340 case DP_TRAINING_PATTERN_2: 2338 case DP_TRAINING_PATTERN_2:
2341 dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT; 2339 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2342 break; 2340 break;
2343 case DP_TRAINING_PATTERN_3: 2341 case DP_TRAINING_PATTERN_3:
2344 DRM_ERROR("DP training pattern 3 not supported\n"); 2342 DRM_ERROR("DP training pattern 3 not supported\n");
2345 dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT; 2343 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2346 break; 2344 break;
2347 } 2345 }
2348 2346
2349 } else { 2347 } else {
2350 dp_reg_value &= ~DP_LINK_TRAIN_MASK; 2348 *DP &= ~DP_LINK_TRAIN_MASK;
2351 2349
2352 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 2350 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2353 case DP_TRAINING_PATTERN_DISABLE: 2351 case DP_TRAINING_PATTERN_DISABLE:
2354 dp_reg_value |= DP_LINK_TRAIN_OFF; 2352 *DP |= DP_LINK_TRAIN_OFF;
2355 break; 2353 break;
2356 case DP_TRAINING_PATTERN_1: 2354 case DP_TRAINING_PATTERN_1:
2357 dp_reg_value |= DP_LINK_TRAIN_PAT_1; 2355 *DP |= DP_LINK_TRAIN_PAT_1;
2358 break; 2356 break;
2359 case DP_TRAINING_PATTERN_2: 2357 case DP_TRAINING_PATTERN_2:
2360 dp_reg_value |= DP_LINK_TRAIN_PAT_2; 2358 *DP |= DP_LINK_TRAIN_PAT_2;
2361 break; 2359 break;
2362 case DP_TRAINING_PATTERN_3: 2360 case DP_TRAINING_PATTERN_3:
2363 DRM_ERROR("DP training pattern 3 not supported\n"); 2361 DRM_ERROR("DP training pattern 3 not supported\n");
2364 dp_reg_value |= DP_LINK_TRAIN_PAT_2; 2362 *DP |= DP_LINK_TRAIN_PAT_2;
2365 break; 2363 break;
2366 } 2364 }
2367 } 2365 }
2368 2366
2369 I915_WRITE(intel_dp->output_reg, dp_reg_value); 2367 I915_WRITE(intel_dp->output_reg, *DP);
2370 POSTING_READ(intel_dp->output_reg); 2368 POSTING_READ(intel_dp->output_reg);
2371 2369
2372 intel_dp_aux_native_write_1(intel_dp, 2370 ret = intel_dp_aux_native_write_1(intel_dp, DP_TRAINING_PATTERN_SET,
2373 DP_TRAINING_PATTERN_SET, 2371 dp_train_pat);
2374 dp_train_pat); 2372 if (ret != 1)
2373 return false;
2375 2374
2376 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) != 2375 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) !=
2377 DP_TRAINING_PATTERN_DISABLE) { 2376 DP_TRAINING_PATTERN_DISABLE) {
@@ -2386,6 +2385,37 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
2386 return true; 2385 return true;
2387} 2386}
2388 2387
2388static bool
2389intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
2390 uint8_t dp_train_pat)
2391{
2392 memset(intel_dp->train_set, 0, 4);
2393 intel_dp_set_signal_levels(intel_dp, DP);
2394 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
2395}
2396
2397static bool
2398intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
2399 uint8_t link_status[DP_LINK_STATUS_SIZE])
2400{
2401 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2402 struct drm_device *dev = intel_dig_port->base.base.dev;
2403 struct drm_i915_private *dev_priv = dev->dev_private;
2404 int ret;
2405
2406 intel_get_adjust_train(intel_dp, link_status);
2407 intel_dp_set_signal_levels(intel_dp, DP);
2408
2409 I915_WRITE(intel_dp->output_reg, *DP);
2410 POSTING_READ(intel_dp->output_reg);
2411
2412 ret = intel_dp_aux_native_write(intel_dp, DP_TRAINING_LANE0_SET,
2413 intel_dp->train_set,
2414 intel_dp->lane_count);
2415
2416 return ret == intel_dp->lane_count;
2417}
2418
2389static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp) 2419static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
2390{ 2420{
2391 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2421 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
@@ -2445,21 +2475,19 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
2445 2475
2446 DP |= DP_PORT_EN; 2476 DP |= DP_PORT_EN;
2447 2477
2448 memset(intel_dp->train_set, 0, 4); 2478 /* clock recovery */
2479 if (!intel_dp_reset_link_train(intel_dp, &DP,
2480 DP_TRAINING_PATTERN_1 |
2481 DP_LINK_SCRAMBLING_DISABLE)) {
2482 DRM_ERROR("failed to enable link training\n");
2483 return;
2484 }
2485
2449 voltage = 0xff; 2486 voltage = 0xff;
2450 voltage_tries = 0; 2487 voltage_tries = 0;
2451 loop_tries = 0; 2488 loop_tries = 0;
2452 for (;;) { 2489 for (;;) {
2453 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ 2490 uint8_t link_status[DP_LINK_STATUS_SIZE];
2454 uint8_t link_status[DP_LINK_STATUS_SIZE];
2455
2456 intel_dp_set_signal_levels(intel_dp, &DP);
2457
2458 /* Set training pattern 1 */
2459 if (!intel_dp_set_link_train(intel_dp, DP,
2460 DP_TRAINING_PATTERN_1 |
2461 DP_LINK_SCRAMBLING_DISABLE))
2462 break;
2463 2491
2464 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd); 2492 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
2465 if (!intel_dp_get_link_status(intel_dp, link_status)) { 2493 if (!intel_dp_get_link_status(intel_dp, link_status)) {
@@ -2482,7 +2510,9 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
2482 DRM_DEBUG_KMS("too many full retries, give up\n"); 2510 DRM_DEBUG_KMS("too many full retries, give up\n");
2483 break; 2511 break;
2484 } 2512 }
2485 memset(intel_dp->train_set, 0, 4); 2513 intel_dp_reset_link_train(intel_dp, &DP,
2514 DP_TRAINING_PATTERN_1 |
2515 DP_LINK_SCRAMBLING_DISABLE);
2486 voltage_tries = 0; 2516 voltage_tries = 0;
2487 continue; 2517 continue;
2488 } 2518 }
@@ -2498,8 +2528,11 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
2498 voltage_tries = 0; 2528 voltage_tries = 0;
2499 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; 2529 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
2500 2530
2501 /* Compute new intel_dp->train_set as requested by target */ 2531 /* Update training set as requested by target */
2502 intel_get_adjust_train(intel_dp, link_status); 2532 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
2533 DRM_ERROR("failed to update link training\n");
2534 break;
2535 }
2503 } 2536 }
2504 2537
2505 intel_dp->DP = DP; 2538 intel_dp->DP = DP;
@@ -2513,11 +2546,18 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
2513 uint32_t DP = intel_dp->DP; 2546 uint32_t DP = intel_dp->DP;
2514 2547
2515 /* channel equalization */ 2548 /* channel equalization */
2549 if (!intel_dp_set_link_train(intel_dp, &DP,
2550 DP_TRAINING_PATTERN_2 |
2551 DP_LINK_SCRAMBLING_DISABLE)) {
2552 DRM_ERROR("failed to start channel equalization\n");
2553 return;
2554 }
2555
2516 tries = 0; 2556 tries = 0;
2517 cr_tries = 0; 2557 cr_tries = 0;
2518 channel_eq = false; 2558 channel_eq = false;
2519 for (;;) { 2559 for (;;) {
2520 uint8_t link_status[DP_LINK_STATUS_SIZE]; 2560 uint8_t link_status[DP_LINK_STATUS_SIZE];
2521 2561
2522 if (cr_tries > 5) { 2562 if (cr_tries > 5) {
2523 DRM_ERROR("failed to train DP, aborting\n"); 2563 DRM_ERROR("failed to train DP, aborting\n");
@@ -2525,21 +2565,18 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
2525 break; 2565 break;
2526 } 2566 }
2527 2567
2528 intel_dp_set_signal_levels(intel_dp, &DP);
2529
2530 /* channel eq pattern */
2531 if (!intel_dp_set_link_train(intel_dp, DP,
2532 DP_TRAINING_PATTERN_2 |
2533 DP_LINK_SCRAMBLING_DISABLE))
2534 break;
2535
2536 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd); 2568 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
2537 if (!intel_dp_get_link_status(intel_dp, link_status)) 2569 if (!intel_dp_get_link_status(intel_dp, link_status)) {
2570 DRM_ERROR("failed to get link status\n");
2538 break; 2571 break;
2572 }
2539 2573
2540 /* Make sure clock is still ok */ 2574 /* Make sure clock is still ok */
2541 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { 2575 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
2542 intel_dp_start_link_train(intel_dp); 2576 intel_dp_start_link_train(intel_dp);
2577 intel_dp_set_link_train(intel_dp, &DP,
2578 DP_TRAINING_PATTERN_2 |
2579 DP_LINK_SCRAMBLING_DISABLE);
2543 cr_tries++; 2580 cr_tries++;
2544 continue; 2581 continue;
2545 } 2582 }
@@ -2553,13 +2590,19 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
2553 if (tries > 5) { 2590 if (tries > 5) {
2554 intel_dp_link_down(intel_dp); 2591 intel_dp_link_down(intel_dp);
2555 intel_dp_start_link_train(intel_dp); 2592 intel_dp_start_link_train(intel_dp);
2593 intel_dp_set_link_train(intel_dp, &DP,
2594 DP_TRAINING_PATTERN_2 |
2595 DP_LINK_SCRAMBLING_DISABLE);
2556 tries = 0; 2596 tries = 0;
2557 cr_tries++; 2597 cr_tries++;
2558 continue; 2598 continue;
2559 } 2599 }
2560 2600
2561 /* Compute new intel_dp->train_set as requested by target */ 2601 /* Update training set as requested by target */
2562 intel_get_adjust_train(intel_dp, link_status); 2602 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
2603 DRM_ERROR("failed to update link training\n");
2604 break;
2605 }
2563 ++tries; 2606 ++tries;
2564 } 2607 }
2565 2608
@@ -2574,7 +2617,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
2574 2617
2575void intel_dp_stop_link_train(struct intel_dp *intel_dp) 2618void intel_dp_stop_link_train(struct intel_dp *intel_dp)
2576{ 2619{
2577 intel_dp_set_link_train(intel_dp, intel_dp->DP, 2620 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2578 DP_TRAINING_PATTERN_DISABLE); 2621 DP_TRAINING_PATTERN_DISABLE);
2579} 2622}
2580 2623
@@ -2661,6 +2704,10 @@ intel_dp_link_down(struct intel_dp *intel_dp)
2661static bool 2704static bool
2662intel_dp_get_dpcd(struct intel_dp *intel_dp) 2705intel_dp_get_dpcd(struct intel_dp *intel_dp)
2663{ 2706{
2707 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2708 struct drm_device *dev = dig_port->base.base.dev;
2709 struct drm_i915_private *dev_priv = dev->dev_private;
2710
2664 char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3]; 2711 char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
2665 2712
2666 if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd, 2713 if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd,
@@ -2676,11 +2723,16 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
2676 2723
2677 /* Check if the panel supports PSR */ 2724 /* Check if the panel supports PSR */
2678 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd)); 2725 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
2679 intel_dp_aux_native_read_retry(intel_dp, DP_PSR_SUPPORT, 2726 if (is_edp(intel_dp)) {
2680 intel_dp->psr_dpcd, 2727 intel_dp_aux_native_read_retry(intel_dp, DP_PSR_SUPPORT,
2681 sizeof(intel_dp->psr_dpcd)); 2728 intel_dp->psr_dpcd,
2682 if (is_edp_psr(intel_dp)) 2729 sizeof(intel_dp->psr_dpcd));
2683 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n"); 2730 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
2731 dev_priv->psr.sink_support = true;
2732 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
2733 }
2734 }
2735
2684 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & 2736 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
2685 DP_DWN_STRM_PORT_PRESENT)) 2737 DP_DWN_STRM_PORT_PRESENT))
2686 return true; /* native DP sink */ 2738 return true; /* native DP sink */
@@ -3122,7 +3174,6 @@ intel_dp_connector_destroy(struct drm_connector *connector)
3122 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) 3174 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3123 intel_panel_fini(&intel_connector->panel); 3175 intel_panel_fini(&intel_connector->panel);
3124 3176
3125 drm_sysfs_connector_remove(connector);
3126 drm_connector_cleanup(connector); 3177 drm_connector_cleanup(connector);
3127 kfree(connector); 3178 kfree(connector);
3128} 3179}
@@ -3193,7 +3244,7 @@ intel_trans_dp_port_sel(struct drm_crtc *crtc)
3193bool intel_dpd_is_edp(struct drm_device *dev) 3244bool intel_dpd_is_edp(struct drm_device *dev)
3194{ 3245{
3195 struct drm_i915_private *dev_priv = dev->dev_private; 3246 struct drm_i915_private *dev_priv = dev->dev_private;
3196 struct child_device_config *p_child; 3247 union child_device_config *p_child;
3197 int i; 3248 int i;
3198 3249
3199 if (!dev_priv->vbt.child_dev_num) 3250 if (!dev_priv->vbt.child_dev_num)
@@ -3202,8 +3253,8 @@ bool intel_dpd_is_edp(struct drm_device *dev)
3202 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { 3253 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
3203 p_child = dev_priv->vbt.child_dev + i; 3254 p_child = dev_priv->vbt.child_dev + i;
3204 3255
3205 if (p_child->dvo_port == PORT_IDPD && 3256 if (p_child->common.dvo_port == PORT_IDPD &&
3206 p_child->device_type == DEVICE_TYPE_eDP) 3257 p_child->common.device_type == DEVICE_TYPE_eDP)
3207 return true; 3258 return true;
3208 } 3259 }
3209 return false; 3260 return false;
@@ -3615,11 +3666,11 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
3615 struct drm_encoder *encoder; 3666 struct drm_encoder *encoder;
3616 struct intel_connector *intel_connector; 3667 struct intel_connector *intel_connector;
3617 3668
3618 intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL); 3669 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
3619 if (!intel_dig_port) 3670 if (!intel_dig_port)
3620 return; 3671 return;
3621 3672
3622 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); 3673 intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
3623 if (!intel_connector) { 3674 if (!intel_connector) {
3624 kfree(intel_dig_port); 3675 kfree(intel_dig_port);
3625 return; 3676 return;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 287bbef72da9..eaf0003ddfd9 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -77,7 +77,6 @@
77/* the i915, i945 have a single sDVO i2c bus - which is different */ 77/* the i915, i945 have a single sDVO i2c bus - which is different */
78#define MAX_OUTPUTS 6 78#define MAX_OUTPUTS 6
79/* maximum connectors per crtcs in the mode set */ 79/* maximum connectors per crtcs in the mode set */
80#define INTELFB_CONN_LIMIT 4
81 80
82#define INTEL_I2C_BUS_DVO 1 81#define INTEL_I2C_BUS_DVO 1
83#define INTEL_I2C_BUS_SDVO 2 82#define INTEL_I2C_BUS_SDVO 2
@@ -218,7 +217,7 @@ struct intel_crtc_config {
218 * preferred input timings. */ 217 * preferred input timings. */
219 struct drm_display_mode requested_mode; 218 struct drm_display_mode requested_mode;
220 /* Actual pipe timings ie. what we program into the pipe timing 219 /* Actual pipe timings ie. what we program into the pipe timing
221 * registers. adjusted_mode.clock is the pipe pixel clock. */ 220 * registers. adjusted_mode.crtc_clock is the pipe pixel clock. */
222 struct drm_display_mode adjusted_mode; 221 struct drm_display_mode adjusted_mode;
223 222
224 /* Pipe source size (ie. panel fitter input size) 223 /* Pipe source size (ie. panel fitter input size)
@@ -513,80 +512,6 @@ struct intel_unpin_work {
513 bool enable_stall_check; 512 bool enable_stall_check;
514}; 513};
515 514
516int intel_pch_rawclk(struct drm_device *dev);
517
518int intel_connector_update_modes(struct drm_connector *connector,
519 struct edid *edid);
520int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
521
522extern void intel_attach_force_audio_property(struct drm_connector *connector);
523extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
524
525extern bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
526extern void intel_crt_init(struct drm_device *dev);
527extern void intel_hdmi_init(struct drm_device *dev,
528 int hdmi_reg, enum port port);
529extern void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
530 struct intel_connector *intel_connector);
531extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
532extern bool intel_hdmi_compute_config(struct intel_encoder *encoder,
533 struct intel_crtc_config *pipe_config);
534extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
535 bool is_sdvob);
536extern void intel_dvo_init(struct drm_device *dev);
537extern void intel_tv_init(struct drm_device *dev);
538extern void intel_mark_busy(struct drm_device *dev);
539extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
540 struct intel_ring_buffer *ring);
541extern void intel_mark_idle(struct drm_device *dev);
542extern void intel_lvds_init(struct drm_device *dev);
543extern bool intel_dsi_init(struct drm_device *dev);
544extern bool intel_is_dual_link_lvds(struct drm_device *dev);
545extern void intel_dp_init(struct drm_device *dev, int output_reg,
546 enum port port);
547extern bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
548 struct intel_connector *intel_connector);
549extern void intel_dp_start_link_train(struct intel_dp *intel_dp);
550extern void intel_dp_complete_link_train(struct intel_dp *intel_dp);
551extern void intel_dp_stop_link_train(struct intel_dp *intel_dp);
552extern void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
553extern void intel_dp_encoder_destroy(struct drm_encoder *encoder);
554extern void intel_dp_check_link_status(struct intel_dp *intel_dp);
555extern bool intel_dp_compute_config(struct intel_encoder *encoder,
556 struct intel_crtc_config *pipe_config);
557extern bool intel_dpd_is_edp(struct drm_device *dev);
558extern void ironlake_edp_backlight_on(struct intel_dp *intel_dp);
559extern void ironlake_edp_backlight_off(struct intel_dp *intel_dp);
560extern void ironlake_edp_panel_on(struct intel_dp *intel_dp);
561extern void ironlake_edp_panel_off(struct intel_dp *intel_dp);
562extern void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
563extern void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
564extern int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
565extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
566 enum plane plane);
567
568/* intel_panel.c */
569extern int intel_panel_init(struct intel_panel *panel,
570 struct drm_display_mode *fixed_mode);
571extern void intel_panel_fini(struct intel_panel *panel);
572
573extern void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
574 struct drm_display_mode *adjusted_mode);
575extern void intel_pch_panel_fitting(struct intel_crtc *crtc,
576 struct intel_crtc_config *pipe_config,
577 int fitting_mode);
578extern void intel_gmch_panel_fitting(struct intel_crtc *crtc,
579 struct intel_crtc_config *pipe_config,
580 int fitting_mode);
581extern void intel_panel_set_backlight(struct drm_device *dev,
582 u32 level, u32 max);
583extern int intel_panel_setup_backlight(struct drm_connector *connector);
584extern void intel_panel_enable_backlight(struct drm_device *dev,
585 enum pipe pipe);
586extern void intel_panel_disable_backlight(struct drm_device *dev);
587extern void intel_panel_destroy_backlight(struct drm_device *dev);
588extern enum drm_connector_status intel_panel_detect(struct drm_device *dev);
589
590struct intel_set_config { 515struct intel_set_config {
591 struct drm_encoder **save_connector_encoders; 516 struct drm_encoder **save_connector_encoders;
592 struct drm_crtc **save_encoder_crtcs; 517 struct drm_crtc **save_encoder_crtcs;
@@ -595,18 +520,14 @@ struct intel_set_config {
595 bool mode_changed; 520 bool mode_changed;
596}; 521};
597 522
598extern void intel_crtc_restore_mode(struct drm_crtc *crtc); 523struct intel_load_detect_pipe {
599extern void intel_crtc_load_lut(struct drm_crtc *crtc); 524 struct drm_framebuffer *release_fb;
600extern void intel_crtc_update_dpms(struct drm_crtc *crtc); 525 bool load_detect_temp;
601extern void intel_encoder_destroy(struct drm_encoder *encoder); 526 int dpms_mode;
602extern void intel_connector_dpms(struct drm_connector *, int mode); 527};
603extern bool intel_connector_get_hw_state(struct intel_connector *connector);
604extern void intel_modeset_check_state(struct drm_device *dev);
605extern void intel_plane_restore(struct drm_plane *plane);
606extern void intel_plane_disable(struct drm_plane *plane);
607
608 528
609static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector) 529static inline struct intel_encoder *
530intel_attached_encoder(struct drm_connector *connector)
610{ 531{
611 return to_intel_connector(connector)->encoder; 532 return to_intel_connector(connector)->encoder;
612} 533}
@@ -634,73 +555,94 @@ hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
634 return container_of(intel_hdmi, struct intel_digital_port, hdmi); 555 return container_of(intel_hdmi, struct intel_digital_port, hdmi);
635} 556}
636 557
558
559/* i915_irq.c */
560bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
561 enum pipe pipe, bool enable);
562bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
563 enum transcoder pch_transcoder,
564 bool enable);
565void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
566void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
567void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
568void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
569void hsw_pc8_disable_interrupts(struct drm_device *dev);
570void hsw_pc8_restore_interrupts(struct drm_device *dev);
571
572
573/* intel_crt.c */
574void intel_crt_init(struct drm_device *dev);
575
576
577/* intel_ddi.c */
578void intel_prepare_ddi(struct drm_device *dev);
579void hsw_fdi_link_train(struct drm_crtc *crtc);
580void intel_ddi_init(struct drm_device *dev, enum port port);
581enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder);
582bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
583int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv);
584void intel_ddi_pll_init(struct drm_device *dev);
585void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc);
586void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
587 enum transcoder cpu_transcoder);
588void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
589void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
590void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
591bool intel_ddi_pll_mode_set(struct drm_crtc *crtc);
592void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
593void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
594void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
595bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
596void intel_ddi_fdi_disable(struct drm_crtc *crtc);
597void intel_ddi_get_config(struct intel_encoder *encoder,
598 struct intel_crtc_config *pipe_config);
599
600
601/* intel_display.c */
602int intel_pch_rawclk(struct drm_device *dev);
603void intel_mark_busy(struct drm_device *dev);
604void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
605 struct intel_ring_buffer *ring);
606void intel_mark_idle(struct drm_device *dev);
607void intel_crtc_restore_mode(struct drm_crtc *crtc);
608void intel_crtc_update_dpms(struct drm_crtc *crtc);
609void intel_encoder_destroy(struct drm_encoder *encoder);
610void intel_connector_dpms(struct drm_connector *, int mode);
611bool intel_connector_get_hw_state(struct intel_connector *connector);
612void intel_modeset_check_state(struct drm_device *dev);
637bool ibx_digital_port_connected(struct drm_i915_private *dev_priv, 613bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
638 struct intel_digital_port *port); 614 struct intel_digital_port *port);
639 615void intel_connector_attach_encoder(struct intel_connector *connector,
640extern void intel_connector_attach_encoder(struct intel_connector *connector, 616 struct intel_encoder *encoder);
641 struct intel_encoder *encoder); 617struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
642extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector); 618struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
643 619 struct drm_crtc *crtc);
644extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
645 struct drm_crtc *crtc);
646int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 620int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
647 struct drm_file *file_priv); 621 struct drm_file *file_priv);
648extern enum transcoder 622enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
649intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, 623 enum pipe pipe);
650 enum pipe pipe); 624void intel_wait_for_vblank(struct drm_device *dev, int pipe);
651extern void intel_wait_for_vblank(struct drm_device *dev, int pipe); 625void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
652extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe); 626int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
653extern int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp); 627void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port);
654extern void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port); 628bool intel_get_load_detect_pipe(struct drm_connector *connector,
655 629 struct drm_display_mode *mode,
656struct intel_load_detect_pipe { 630 struct intel_load_detect_pipe *old);
657 struct drm_framebuffer *release_fb; 631void intel_release_load_detect_pipe(struct drm_connector *connector,
658 bool load_detect_temp; 632 struct intel_load_detect_pipe *old);
659 int dpms_mode; 633int intel_pin_and_fence_fb_obj(struct drm_device *dev,
660}; 634 struct drm_i915_gem_object *obj,
661extern bool intel_get_load_detect_pipe(struct drm_connector *connector, 635 struct intel_ring_buffer *pipelined);
662 struct drm_display_mode *mode, 636void intel_unpin_fb_obj(struct drm_i915_gem_object *obj);
663 struct intel_load_detect_pipe *old); 637int intel_framebuffer_init(struct drm_device *dev,
664extern void intel_release_load_detect_pipe(struct drm_connector *connector, 638 struct intel_framebuffer *ifb,
665 struct intel_load_detect_pipe *old); 639 struct drm_mode_fb_cmd2 *mode_cmd,
666 640 struct drm_i915_gem_object *obj);
667extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, 641void intel_framebuffer_fini(struct intel_framebuffer *fb);
668 u16 blue, int regno); 642void intel_prepare_page_flip(struct drm_device *dev, int plane);
669extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, 643void intel_finish_page_flip(struct drm_device *dev, int pipe);
670 u16 *blue, int regno); 644void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
671 645struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
672extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
673 struct drm_i915_gem_object *obj,
674 struct intel_ring_buffer *pipelined);
675extern void intel_unpin_fb_obj(struct drm_i915_gem_object *obj);
676
677extern int intel_framebuffer_init(struct drm_device *dev,
678 struct intel_framebuffer *ifb,
679 struct drm_mode_fb_cmd2 *mode_cmd,
680 struct drm_i915_gem_object *obj);
681extern void intel_framebuffer_fini(struct intel_framebuffer *fb);
682extern int intel_fbdev_init(struct drm_device *dev);
683extern void intel_fbdev_initial_config(struct drm_device *dev);
684extern void intel_fbdev_fini(struct drm_device *dev);
685extern void intel_fbdev_set_suspend(struct drm_device *dev, int state);
686extern void intel_prepare_page_flip(struct drm_device *dev, int plane);
687extern void intel_finish_page_flip(struct drm_device *dev, int pipe);
688extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
689
690extern void intel_setup_overlay(struct drm_device *dev);
691extern void intel_cleanup_overlay(struct drm_device *dev);
692extern int intel_overlay_switch_off(struct intel_overlay *overlay);
693extern int intel_overlay_put_image(struct drm_device *dev, void *data,
694 struct drm_file *file_priv);
695extern int intel_overlay_attrs(struct drm_device *dev, void *data,
696 struct drm_file *file_priv);
697
698extern void intel_fb_output_poll_changed(struct drm_device *dev);
699extern void intel_fb_restore_mode(struct drm_device *dev);
700
701struct intel_shared_dpll *
702intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
703
704void assert_shared_dpll(struct drm_i915_private *dev_priv, 646void assert_shared_dpll(struct drm_i915_private *dev_priv,
705 struct intel_shared_dpll *pll, 647 struct intel_shared_dpll *pll,
706 bool state); 648 bool state);
@@ -714,117 +656,173 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
714 enum pipe pipe, bool state); 656 enum pipe pipe, bool state);
715#define assert_fdi_rx_pll_enabled(d, p) assert_fdi_rx_pll(d, p, true) 657#define assert_fdi_rx_pll_enabled(d, p) assert_fdi_rx_pll(d, p, true)
716#define assert_fdi_rx_pll_disabled(d, p) assert_fdi_rx_pll(d, p, false) 658#define assert_fdi_rx_pll_disabled(d, p) assert_fdi_rx_pll(d, p, false)
717extern void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, 659void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
718 bool state);
719#define assert_pipe_enabled(d, p) assert_pipe(d, p, true) 660#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
720#define assert_pipe_disabled(d, p) assert_pipe(d, p, false) 661#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
662void intel_write_eld(struct drm_encoder *encoder,
663 struct drm_display_mode *mode);
664unsigned long intel_gen4_compute_page_offset(int *x, int *y,
665 unsigned int tiling_mode,
666 unsigned int bpp,
667 unsigned int pitch);
668void intel_display_handle_reset(struct drm_device *dev);
669void hsw_enable_pc8_work(struct work_struct *__work);
670void hsw_enable_package_c8(struct drm_i915_private *dev_priv);
671void hsw_disable_package_c8(struct drm_i915_private *dev_priv);
672void intel_dp_get_m_n(struct intel_crtc *crtc,
673 struct intel_crtc_config *pipe_config);
674int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
675void
676ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
677 int dotclock);
678bool intel_crtc_active(struct drm_crtc *crtc);
679void i915_disable_vga_mem(struct drm_device *dev);
680
681
682/* intel_dp.c */
683void intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
684bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
685 struct intel_connector *intel_connector);
686void intel_dp_start_link_train(struct intel_dp *intel_dp);
687void intel_dp_complete_link_train(struct intel_dp *intel_dp);
688void intel_dp_stop_link_train(struct intel_dp *intel_dp);
689void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
690void intel_dp_encoder_destroy(struct drm_encoder *encoder);
691void intel_dp_check_link_status(struct intel_dp *intel_dp);
692bool intel_dp_compute_config(struct intel_encoder *encoder,
693 struct intel_crtc_config *pipe_config);
694bool intel_dpd_is_edp(struct drm_device *dev);
695void ironlake_edp_backlight_on(struct intel_dp *intel_dp);
696void ironlake_edp_backlight_off(struct intel_dp *intel_dp);
697void ironlake_edp_panel_on(struct intel_dp *intel_dp);
698void ironlake_edp_panel_off(struct intel_dp *intel_dp);
699void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
700void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
701void intel_edp_psr_enable(struct intel_dp *intel_dp);
702void intel_edp_psr_disable(struct intel_dp *intel_dp);
703void intel_edp_psr_update(struct drm_device *dev);
704
705
706/* intel_dsi.c */
707bool intel_dsi_init(struct drm_device *dev);
708
709
710/* intel_dvo.c */
711void intel_dvo_init(struct drm_device *dev);
712
713
714/* intel_fb.c */
715int intel_fbdev_init(struct drm_device *dev);
716void intel_fbdev_initial_config(struct drm_device *dev);
717void intel_fbdev_fini(struct drm_device *dev);
718void intel_fbdev_set_suspend(struct drm_device *dev, int state);
719void intel_fb_output_poll_changed(struct drm_device *dev);
720void intel_fb_restore_mode(struct drm_device *dev);
721 721
722extern void intel_init_clock_gating(struct drm_device *dev); 722
723extern void intel_suspend_hw(struct drm_device *dev); 723/* intel_hdmi.c */
724extern void intel_write_eld(struct drm_encoder *encoder, 724void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port);
725 struct drm_display_mode *mode); 725void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
726extern void intel_prepare_ddi(struct drm_device *dev); 726 struct intel_connector *intel_connector);
727extern void hsw_fdi_link_train(struct drm_crtc *crtc); 727struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
728extern void intel_ddi_init(struct drm_device *dev, enum port port); 728bool intel_hdmi_compute_config(struct intel_encoder *encoder,
729extern enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder); 729 struct intel_crtc_config *pipe_config);
730 730
731/* For use by IVB LP watermark workaround in intel_sprite.c */ 731
732extern void intel_update_watermarks(struct drm_crtc *crtc); 732/* intel_lvds.c */
733extern void intel_update_sprite_watermarks(struct drm_plane *plane, 733void intel_lvds_init(struct drm_device *dev);
734 struct drm_crtc *crtc, 734bool intel_is_dual_link_lvds(struct drm_device *dev);
735 uint32_t sprite_width, int pixel_size, 735
736 bool enabled, bool scaled); 736
737 737/* intel_modes.c */
738extern unsigned long intel_gen4_compute_page_offset(int *x, int *y, 738int intel_connector_update_modes(struct drm_connector *connector,
739 unsigned int tiling_mode, 739 struct edid *edid);
740 unsigned int bpp, 740int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
741 unsigned int pitch); 741void intel_attach_force_audio_property(struct drm_connector *connector);
742 742void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
743extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data, 743
744 struct drm_file *file_priv); 744
745extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data, 745/* intel_overlay.c */
746 struct drm_file *file_priv); 746void intel_setup_overlay(struct drm_device *dev);
747 747void intel_cleanup_overlay(struct drm_device *dev);
748/* Power-related functions, located in intel_pm.c */ 748int intel_overlay_switch_off(struct intel_overlay *overlay);
749extern void intel_init_pm(struct drm_device *dev); 749int intel_overlay_put_image(struct drm_device *dev, void *data,
750/* FBC */ 750 struct drm_file *file_priv);
751extern bool intel_fbc_enabled(struct drm_device *dev); 751int intel_overlay_attrs(struct drm_device *dev, void *data,
752extern void intel_update_fbc(struct drm_device *dev); 752 struct drm_file *file_priv);
753/* IPS */ 753
754extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv); 754
755extern void intel_gpu_ips_teardown(void); 755/* intel_panel.c */
756 756int intel_panel_init(struct intel_panel *panel,
757/* Power well */ 757 struct drm_display_mode *fixed_mode);
758extern int i915_init_power_well(struct drm_device *dev); 758void intel_panel_fini(struct intel_panel *panel);
759extern void i915_remove_power_well(struct drm_device *dev); 759void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
760 760 struct drm_display_mode *adjusted_mode);
761extern bool intel_display_power_enabled(struct drm_device *dev, 761void intel_pch_panel_fitting(struct intel_crtc *crtc,
762 enum intel_display_power_domain domain); 762 struct intel_crtc_config *pipe_config,
763extern void intel_display_power_get(struct drm_device *dev, 763 int fitting_mode);
764 enum intel_display_power_domain domain); 764void intel_gmch_panel_fitting(struct intel_crtc *crtc,
765extern void intel_display_power_put(struct drm_device *dev, 765 struct intel_crtc_config *pipe_config,
766 enum intel_display_power_domain domain); 766 int fitting_mode);
767extern void intel_init_power_well(struct drm_device *dev); 767void intel_panel_set_backlight(struct drm_device *dev, u32 level, u32 max);
768extern void intel_set_power_well(struct drm_device *dev, bool enable); 768int intel_panel_setup_backlight(struct drm_connector *connector);
769extern void intel_resume_power_well(struct drm_device *dev); 769void intel_panel_enable_backlight(struct drm_device *dev, enum pipe pipe);
770extern void intel_enable_gt_powersave(struct drm_device *dev); 770void intel_panel_disable_backlight(struct drm_device *dev);
771extern void intel_disable_gt_powersave(struct drm_device *dev); 771void intel_panel_destroy_backlight(struct drm_device *dev);
772extern void ironlake_teardown_rc6(struct drm_device *dev); 772enum drm_connector_status intel_panel_detect(struct drm_device *dev);
773
774
775/* intel_pm.c */
776void intel_init_clock_gating(struct drm_device *dev);
777void intel_suspend_hw(struct drm_device *dev);
778void intel_update_watermarks(struct drm_crtc *crtc);
779void intel_update_sprite_watermarks(struct drm_plane *plane,
780 struct drm_crtc *crtc,
781 uint32_t sprite_width, int pixel_size,
782 bool enabled, bool scaled);
783void intel_init_pm(struct drm_device *dev);
784bool intel_fbc_enabled(struct drm_device *dev);
785void intel_update_fbc(struct drm_device *dev);
786void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
787void intel_gpu_ips_teardown(void);
788int i915_init_power_well(struct drm_device *dev);
789void i915_remove_power_well(struct drm_device *dev);
790bool intel_display_power_enabled(struct drm_device *dev,
791 enum intel_display_power_domain domain);
792void intel_display_power_get(struct drm_device *dev,
793 enum intel_display_power_domain domain);
794void intel_display_power_put(struct drm_device *dev,
795 enum intel_display_power_domain domain);
796void intel_init_power_well(struct drm_device *dev);
797void intel_set_power_well(struct drm_device *dev, bool enable);
798void intel_enable_gt_powersave(struct drm_device *dev);
799void intel_disable_gt_powersave(struct drm_device *dev);
800void ironlake_teardown_rc6(struct drm_device *dev);
773void gen6_update_ring_freq(struct drm_device *dev); 801void gen6_update_ring_freq(struct drm_device *dev);
802void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
803void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
774 804
775extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
776 enum pipe *pipe);
777extern int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv);
778extern void intel_ddi_pll_init(struct drm_device *dev);
779extern void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc);
780extern void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
781 enum transcoder cpu_transcoder);
782extern void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
783extern void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
784extern void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
785extern bool intel_ddi_pll_mode_set(struct drm_crtc *crtc);
786extern void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
787extern void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
788extern void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
789extern bool
790intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
791extern void intel_ddi_fdi_disable(struct drm_crtc *crtc);
792
793extern void intel_display_handle_reset(struct drm_device *dev);
794extern bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
795 enum pipe pipe,
796 bool enable);
797extern bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
798 enum transcoder pch_transcoder,
799 bool enable);
800
801extern void intel_edp_psr_enable(struct intel_dp *intel_dp);
802extern void intel_edp_psr_disable(struct intel_dp *intel_dp);
803extern void intel_edp_psr_update(struct drm_device *dev);
804extern void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
805 bool switch_to_fclk, bool allow_power_down);
806extern void hsw_restore_lcpll(struct drm_i915_private *dev_priv);
807extern void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
808extern void ilk_disable_gt_irq(struct drm_i915_private *dev_priv,
809 uint32_t mask);
810extern void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
811extern void snb_disable_pm_irq(struct drm_i915_private *dev_priv,
812 uint32_t mask);
813extern void hsw_enable_pc8_work(struct work_struct *__work);
814extern void hsw_enable_package_c8(struct drm_i915_private *dev_priv);
815extern void hsw_disable_package_c8(struct drm_i915_private *dev_priv);
816extern void hsw_pc8_disable_interrupts(struct drm_device *dev);
817extern void hsw_pc8_restore_interrupts(struct drm_device *dev);
818extern void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
819extern void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
820extern void intel_dp_get_m_n(struct intel_crtc *crtc,
821 struct intel_crtc_config *pipe_config);
822extern int intel_dotclock_calculate(int link_freq,
823 const struct intel_link_m_n *m_n);
824extern void ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
825 int dotclock);
826 805
827extern bool intel_crtc_active(struct drm_crtc *crtc); 806/* intel_sdvo.c */
828extern void i915_disable_vga_mem(struct drm_device *dev); 807bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob);
808
809
810/* intel_sprite.c */
811int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
812void intel_flush_display_plane(struct drm_i915_private *dev_priv,
813 enum plane plane);
814void intel_plane_restore(struct drm_plane *plane);
815void intel_plane_disable(struct drm_plane *plane);
816int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
817 struct drm_file *file_priv);
818int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
819 struct drm_file *file_priv);
820
821
822/* intel_tv.c */
823void intel_tv_init(struct drm_device *dev);
824
825void gen6_rps_idle(struct drm_i915_private *dev_priv);
826void gen6_rps_boost(struct drm_i915_private *dev_priv);
829 827
830#endif /* __INTEL_DRV_H__ */ 828#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 674fd4989b45..9a2fdd2a7e34 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -504,7 +504,6 @@ static void intel_dsi_destroy(struct drm_connector *connector)
504 504
505 DRM_DEBUG_KMS("\n"); 505 DRM_DEBUG_KMS("\n");
506 intel_panel_fini(&intel_connector->panel); 506 intel_panel_fini(&intel_connector->panel);
507 drm_sysfs_connector_remove(connector);
508 drm_connector_cleanup(connector); 507 drm_connector_cleanup(connector);
509 kfree(connector); 508 kfree(connector);
510} 509}
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index ff86c366218c..1b64145c669a 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -154,7 +154,7 @@ static void intel_dvo_get_config(struct intel_encoder *encoder,
154 154
155 pipe_config->adjusted_mode.flags |= flags; 155 pipe_config->adjusted_mode.flags |= flags;
156 156
157 pipe_config->adjusted_mode.clock = pipe_config->port_clock; 157 pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock;
158} 158}
159 159
160static void intel_disable_dvo(struct intel_encoder *encoder) 160static void intel_disable_dvo(struct intel_encoder *encoder)
@@ -367,7 +367,6 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
367 367
368static void intel_dvo_destroy(struct drm_connector *connector) 368static void intel_dvo_destroy(struct drm_connector *connector)
369{ 369{
370 drm_sysfs_connector_remove(connector);
371 drm_connector_cleanup(connector); 370 drm_connector_cleanup(connector);
372 kfree(connector); 371 kfree(connector);
373} 372}
@@ -448,11 +447,11 @@ void intel_dvo_init(struct drm_device *dev)
448 int i; 447 int i;
449 int encoder_type = DRM_MODE_ENCODER_NONE; 448 int encoder_type = DRM_MODE_ENCODER_NONE;
450 449
451 intel_dvo = kzalloc(sizeof(struct intel_dvo), GFP_KERNEL); 450 intel_dvo = kzalloc(sizeof(*intel_dvo), GFP_KERNEL);
452 if (!intel_dvo) 451 if (!intel_dvo)
453 return; 452 return;
454 453
455 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); 454 intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
456 if (!intel_connector) { 455 if (!intel_connector) {
457 kfree(intel_dvo); 456 kfree(intel_dvo);
458 return; 457 return;
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index bc2100007b21..d883b77b1b78 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -184,6 +184,27 @@ out:
184 return ret; 184 return ret;
185} 185}
186 186
187/** Sets the color ramps on behalf of RandR */
188static void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
189 u16 blue, int regno)
190{
191 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
192
193 intel_crtc->lut_r[regno] = red >> 8;
194 intel_crtc->lut_g[regno] = green >> 8;
195 intel_crtc->lut_b[regno] = blue >> 8;
196}
197
198static void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
199 u16 *blue, int regno)
200{
201 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
202
203 *red = intel_crtc->lut_r[regno] << 8;
204 *green = intel_crtc->lut_g[regno] << 8;
205 *blue = intel_crtc->lut_b[regno] << 8;
206}
207
187static struct drm_fb_helper_funcs intel_fb_helper_funcs = { 208static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
188 .gamma_set = intel_crtc_fb_gamma_set, 209 .gamma_set = intel_crtc_fb_gamma_set,
189 .gamma_get = intel_crtc_fb_gamma_get, 210 .gamma_get = intel_crtc_fb_gamma_get,
@@ -216,7 +237,7 @@ int intel_fbdev_init(struct drm_device *dev)
216 struct drm_i915_private *dev_priv = dev->dev_private; 237 struct drm_i915_private *dev_priv = dev->dev_private;
217 int ret; 238 int ret;
218 239
219 ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL); 240 ifbdev = kzalloc(sizeof(*ifbdev), GFP_KERNEL);
220 if (!ifbdev) 241 if (!ifbdev)
221 return -ENOMEM; 242 return -ENOMEM;
222 243
@@ -225,7 +246,7 @@ int intel_fbdev_init(struct drm_device *dev)
225 246
226 ret = drm_fb_helper_init(dev, &ifbdev->helper, 247 ret = drm_fb_helper_init(dev, &ifbdev->helper,
227 INTEL_INFO(dev)->num_pipes, 248 INTEL_INFO(dev)->num_pipes,
228 INTELFB_CONN_LIMIT); 249 4);
229 if (ret) { 250 if (ret) {
230 kfree(ifbdev); 251 kfree(ifbdev);
231 return ret; 252 return ret;
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 79582f912414..4f4d346db8f0 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -737,7 +737,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
737 if (HAS_PCH_SPLIT(dev_priv->dev)) 737 if (HAS_PCH_SPLIT(dev_priv->dev))
738 ironlake_check_encoder_dotclock(pipe_config, dotclock); 738 ironlake_check_encoder_dotclock(pipe_config, dotclock);
739 739
740 pipe_config->adjusted_mode.clock = dotclock; 740 pipe_config->adjusted_mode.crtc_clock = dotclock;
741} 741}
742 742
743static void intel_enable_hdmi(struct intel_encoder *encoder) 743static void intel_enable_hdmi(struct intel_encoder *encoder)
@@ -873,7 +873,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
873 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); 873 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
874 struct drm_device *dev = encoder->base.dev; 874 struct drm_device *dev = encoder->base.dev;
875 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; 875 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
876 int clock_12bpc = pipe_config->adjusted_mode.clock * 3 / 2; 876 int clock_12bpc = pipe_config->adjusted_mode.crtc_clock * 3 / 2;
877 int portclock_limit = hdmi_portclock_limit(intel_hdmi); 877 int portclock_limit = hdmi_portclock_limit(intel_hdmi);
878 int desired_bpp; 878 int desired_bpp;
879 879
@@ -915,7 +915,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
915 pipe_config->pipe_bpp = desired_bpp; 915 pipe_config->pipe_bpp = desired_bpp;
916 } 916 }
917 917
918 if (adjusted_mode->clock > portclock_limit) { 918 if (adjusted_mode->crtc_clock > portclock_limit) {
919 DRM_DEBUG_KMS("too high HDMI clock, rejecting mode\n"); 919 DRM_DEBUG_KMS("too high HDMI clock, rejecting mode\n");
920 return false; 920 return false;
921 } 921 }
@@ -1181,7 +1181,6 @@ static void intel_hdmi_post_disable(struct intel_encoder *encoder)
1181 1181
1182static void intel_hdmi_destroy(struct drm_connector *connector) 1182static void intel_hdmi_destroy(struct drm_connector *connector)
1183{ 1183{
1184 drm_sysfs_connector_remove(connector);
1185 drm_connector_cleanup(connector); 1184 drm_connector_cleanup(connector);
1186 kfree(connector); 1185 kfree(connector);
1187} 1186}
@@ -1228,6 +1227,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
1228 1227
1229 connector->interlace_allowed = 1; 1228 connector->interlace_allowed = 1;
1230 connector->doublescan_allowed = 0; 1229 connector->doublescan_allowed = 0;
1230 connector->stereo_allowed = 1;
1231 1231
1232 switch (port) { 1232 switch (port) {
1233 case PORT_B: 1233 case PORT_B:
@@ -1292,11 +1292,11 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
1292 struct intel_encoder *intel_encoder; 1292 struct intel_encoder *intel_encoder;
1293 struct intel_connector *intel_connector; 1293 struct intel_connector *intel_connector;
1294 1294
1295 intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL); 1295 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
1296 if (!intel_dig_port) 1296 if (!intel_dig_port)
1297 return; 1297 return;
1298 1298
1299 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); 1299 intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
1300 if (!intel_connector) { 1300 if (!intel_connector) {
1301 kfree(intel_dig_port); 1301 kfree(intel_dig_port);
1302 return; 1302 return;
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index d1c1e0f7f262..2ca17b14b6c1 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -34,6 +34,11 @@
34#include <drm/i915_drm.h> 34#include <drm/i915_drm.h>
35#include "i915_drv.h" 35#include "i915_drv.h"
36 36
37enum disp_clk {
38 CDCLK,
39 CZCLK
40};
41
37struct gmbus_port { 42struct gmbus_port {
38 const char *name; 43 const char *name;
39 int reg; 44 int reg;
@@ -58,10 +63,69 @@ to_intel_gmbus(struct i2c_adapter *i2c)
58 return container_of(i2c, struct intel_gmbus, adapter); 63 return container_of(i2c, struct intel_gmbus, adapter);
59} 64}
60 65
66static int get_disp_clk_div(struct drm_i915_private *dev_priv,
67 enum disp_clk clk)
68{
69 u32 reg_val;
70 int clk_ratio;
71
72 reg_val = I915_READ(CZCLK_CDCLK_FREQ_RATIO);
73
74 if (clk == CDCLK)
75 clk_ratio =
76 ((reg_val & CDCLK_FREQ_MASK) >> CDCLK_FREQ_SHIFT) + 1;
77 else
78 clk_ratio = (reg_val & CZCLK_FREQ_MASK) + 1;
79
80 return clk_ratio;
81}
82
83static void gmbus_set_freq(struct drm_i915_private *dev_priv)
84{
85 int vco_freq[] = { 800, 1600, 2000, 2400 };
86 int gmbus_freq = 0, cdclk_div, hpll_freq;
87
88 BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
89
90 /* Skip setting the gmbus freq if BIOS has already programmed it */
91 if (I915_READ(GMBUSFREQ_VLV) != 0xA0)
92 return;
93
94 /* Obtain SKU information */
95 mutex_lock(&dev_priv->dpio_lock);
96 hpll_freq =
97 vlv_cck_read(dev_priv, CCK_FUSE_REG) & CCK_FUSE_HPLL_FREQ_MASK;
98 mutex_unlock(&dev_priv->dpio_lock);
99
100 /* Get the CDCLK divide ratio */
101 cdclk_div = get_disp_clk_div(dev_priv, CDCLK);
102
103 /*
104 * Program the gmbus_freq based on the cdclk frequency.
105 * BSpec erroneously claims we should aim for 4MHz, but
106 * in fact 1MHz is the correct frequency.
107 */
108 if (cdclk_div)
109 gmbus_freq = (vco_freq[hpll_freq] << 1) / cdclk_div;
110
111 if (WARN_ON(gmbus_freq == 0))
112 return;
113
114 I915_WRITE(GMBUSFREQ_VLV, gmbus_freq);
115}
116
61void 117void
62intel_i2c_reset(struct drm_device *dev) 118intel_i2c_reset(struct drm_device *dev)
63{ 119{
64 struct drm_i915_private *dev_priv = dev->dev_private; 120 struct drm_i915_private *dev_priv = dev->dev_private;
121
122 /*
123 * In BIOS-less system, program the correct gmbus frequency
124 * before reading edid.
125 */
126 if (IS_VALLEYVIEW(dev))
127 gmbus_set_freq(dev_priv);
128
65 I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0); 129 I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0);
66 I915_WRITE(dev_priv->gpio_mmio_base + GMBUS4, 0); 130 I915_WRITE(dev_priv->gpio_mmio_base + GMBUS4, 0);
67} 131}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 05e5485a630f..ae0c843dd263 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -123,7 +123,7 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
123 if (HAS_PCH_SPLIT(dev_priv->dev)) 123 if (HAS_PCH_SPLIT(dev_priv->dev))
124 ironlake_check_encoder_dotclock(pipe_config, dotclock); 124 ironlake_check_encoder_dotclock(pipe_config, dotclock);
125 125
126 pipe_config->adjusted_mode.clock = dotclock; 126 pipe_config->adjusted_mode.crtc_clock = dotclock;
127} 127}
128 128
129/* The LVDS pin pair needs to be on before the DPLLs are enabled. 129/* The LVDS pin pair needs to be on before the DPLLs are enabled.
@@ -474,7 +474,6 @@ static void intel_lvds_destroy(struct drm_connector *connector)
474 474
475 intel_panel_fini(&lvds_connector->base.panel); 475 intel_panel_fini(&lvds_connector->base.panel);
476 476
477 drm_sysfs_connector_remove(connector);
478 drm_connector_cleanup(connector); 477 drm_connector_cleanup(connector);
479 kfree(connector); 478 kfree(connector);
480} 479}
@@ -794,7 +793,8 @@ static bool lvds_is_present_in_vbt(struct drm_device *dev,
794 return true; 793 return true;
795 794
796 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { 795 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
797 struct child_device_config *child = dev_priv->vbt.child_dev + i; 796 union child_device_config *uchild = dev_priv->vbt.child_dev + i;
797 struct old_child_dev_config *child = &uchild->old;
798 798
799 /* If the device type is not LFP, continue. 799 /* If the device type is not LFP, continue.
800 * We have to check both the new identifiers as well as the 800 * We have to check both the new identifiers as well as the
@@ -948,11 +948,11 @@ void intel_lvds_init(struct drm_device *dev)
948 } 948 }
949 } 949 }
950 950
951 lvds_encoder = kzalloc(sizeof(struct intel_lvds_encoder), GFP_KERNEL); 951 lvds_encoder = kzalloc(sizeof(*lvds_encoder), GFP_KERNEL);
952 if (!lvds_encoder) 952 if (!lvds_encoder)
953 return; 953 return;
954 954
955 lvds_connector = kzalloc(sizeof(struct intel_lvds_connector), GFP_KERNEL); 955 lvds_connector = kzalloc(sizeof(*lvds_connector), GFP_KERNEL);
956 if (!lvds_connector) { 956 if (!lvds_connector) {
957 kfree(lvds_encoder); 957 kfree(lvds_encoder);
958 return; 958 return;
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 8d6d0a1bf5bf..a98a990fbab3 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -1053,7 +1053,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
1053 return ret; 1053 return ret;
1054 } 1054 }
1055 1055
1056 params = kmalloc(sizeof(struct put_image_params), GFP_KERNEL); 1056 params = kmalloc(sizeof(*params), GFP_KERNEL);
1057 if (!params) 1057 if (!params)
1058 return -ENOMEM; 1058 return -ENOMEM;
1059 1059
@@ -1320,7 +1320,7 @@ void intel_setup_overlay(struct drm_device *dev)
1320 if (!HAS_OVERLAY(dev)) 1320 if (!HAS_OVERLAY(dev))
1321 return; 1321 return;
1322 1322
1323 overlay = kzalloc(sizeof(struct intel_overlay), GFP_KERNEL); 1323 overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
1324 if (!overlay) 1324 if (!overlay)
1325 return; 1325 return;
1326 1326
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 3bc89a6bc3ee..54684168de1e 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -329,7 +329,7 @@ static int is_backlight_combination_mode(struct drm_device *dev)
329{ 329{
330 struct drm_i915_private *dev_priv = dev->dev_private; 330 struct drm_i915_private *dev_priv = dev->dev_private;
331 331
332 if (INTEL_INFO(dev)->gen >= 4) 332 if (IS_GEN4(dev))
333 return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE; 333 return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE;
334 334
335 if (IS_GEN2(dev)) 335 if (IS_GEN2(dev))
@@ -372,6 +372,9 @@ static u32 i915_read_blc_pwm_ctl(struct drm_device *dev)
372 I915_WRITE(BLC_PWM_CTL2, 372 I915_WRITE(BLC_PWM_CTL2,
373 dev_priv->regfile.saveBLC_PWM_CTL2); 373 dev_priv->regfile.saveBLC_PWM_CTL2);
374 } 374 }
375
376 if (IS_VALLEYVIEW(dev) && !val)
377 val = 0x0f42ffff;
375 } 378 }
376 379
377 return val; 380 return val;
@@ -629,10 +632,24 @@ set_level:
629 spin_unlock_irqrestore(&dev_priv->backlight.lock, flags); 632 spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
630} 633}
631 634
635/* FIXME: use VBT vals to init PWM_CTL and PWM_CTL2 correctly */
636static void intel_panel_init_backlight_regs(struct drm_device *dev)
637{
638 struct drm_i915_private *dev_priv = dev->dev_private;
639
640 if (IS_VALLEYVIEW(dev)) {
641 u32 cur_val = I915_READ(BLC_PWM_CTL) &
642 BACKLIGHT_DUTY_CYCLE_MASK;
643 I915_WRITE(BLC_PWM_CTL, (0xf42 << 16) | cur_val);
644 }
645}
646
632static void intel_panel_init_backlight(struct drm_device *dev) 647static void intel_panel_init_backlight(struct drm_device *dev)
633{ 648{
634 struct drm_i915_private *dev_priv = dev->dev_private; 649 struct drm_i915_private *dev_priv = dev->dev_private;
635 650
651 intel_panel_init_backlight_regs(dev);
652
636 dev_priv->backlight.level = intel_panel_get_backlight(dev); 653 dev_priv->backlight.level = intel_panel_get_backlight(dev);
637 dev_priv->backlight.enabled = dev_priv->backlight.level != 0; 654 dev_priv->backlight.enabled = dev_priv->backlight.level != 0;
638} 655}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 3907c60a72de..008ec0bb017f 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -370,7 +370,7 @@ static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
370 370
371 intel_cancel_fbc_work(dev_priv); 371 intel_cancel_fbc_work(dev_priv);
372 372
373 work = kzalloc(sizeof *work, GFP_KERNEL); 373 work = kzalloc(sizeof(*work), GFP_KERNEL);
374 if (work == NULL) { 374 if (work == NULL) {
375 DRM_ERROR("Failed to allocate FBC work structure\n"); 375 DRM_ERROR("Failed to allocate FBC work structure\n");
376 dev_priv->display.enable_fbc(crtc, interval); 376 dev_priv->display.enable_fbc(crtc, interval);
@@ -1100,8 +1100,12 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
1100 1100
1101 crtc = single_enabled_crtc(dev); 1101 crtc = single_enabled_crtc(dev);
1102 if (crtc) { 1102 if (crtc) {
1103 int clock = to_intel_crtc(crtc)->config.adjusted_mode.clock; 1103 const struct drm_display_mode *adjusted_mode;
1104 int pixel_size = crtc->fb->bits_per_pixel / 8; 1104 int pixel_size = crtc->fb->bits_per_pixel / 8;
1105 int clock;
1106
1107 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1108 clock = adjusted_mode->crtc_clock;
1105 1109
1106 /* Display SR */ 1110 /* Display SR */
1107 wm = intel_calculate_wm(clock, &pineview_display_wm, 1111 wm = intel_calculate_wm(clock, &pineview_display_wm,
@@ -1174,7 +1178,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
1174 } 1178 }
1175 1179
1176 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode; 1180 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1177 clock = adjusted_mode->clock; 1181 clock = adjusted_mode->crtc_clock;
1178 htotal = adjusted_mode->htotal; 1182 htotal = adjusted_mode->htotal;
1179 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; 1183 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1180 pixel_size = crtc->fb->bits_per_pixel / 8; 1184 pixel_size = crtc->fb->bits_per_pixel / 8;
@@ -1261,7 +1265,7 @@ static bool g4x_compute_srwm(struct drm_device *dev,
1261 1265
1262 crtc = intel_get_crtc_for_plane(dev, plane); 1266 crtc = intel_get_crtc_for_plane(dev, plane);
1263 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode; 1267 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1264 clock = adjusted_mode->clock; 1268 clock = adjusted_mode->crtc_clock;
1265 htotal = adjusted_mode->htotal; 1269 htotal = adjusted_mode->htotal;
1266 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; 1270 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1267 pixel_size = crtc->fb->bits_per_pixel / 8; 1271 pixel_size = crtc->fb->bits_per_pixel / 8;
@@ -1302,7 +1306,7 @@ static bool vlv_compute_drain_latency(struct drm_device *dev,
1302 if (!intel_crtc_active(crtc)) 1306 if (!intel_crtc_active(crtc))
1303 return false; 1307 return false;
1304 1308
1305 clock = to_intel_crtc(crtc)->config.adjusted_mode.clock; 1309 clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
1306 pixel_size = crtc->fb->bits_per_pixel / 8; /* BPP */ 1310 pixel_size = crtc->fb->bits_per_pixel / 8; /* BPP */
1307 1311
1308 entries = (clock / 1000) * pixel_size; 1312 entries = (clock / 1000) * pixel_size;
@@ -1492,7 +1496,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
1492 static const int sr_latency_ns = 12000; 1496 static const int sr_latency_ns = 12000;
1493 const struct drm_display_mode *adjusted_mode = 1497 const struct drm_display_mode *adjusted_mode =
1494 &to_intel_crtc(crtc)->config.adjusted_mode; 1498 &to_intel_crtc(crtc)->config.adjusted_mode;
1495 int clock = adjusted_mode->clock; 1499 int clock = adjusted_mode->crtc_clock;
1496 int htotal = adjusted_mode->htotal; 1500 int htotal = adjusted_mode->htotal;
1497 int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; 1501 int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1498 int pixel_size = crtc->fb->bits_per_pixel / 8; 1502 int pixel_size = crtc->fb->bits_per_pixel / 8;
@@ -1567,11 +1571,13 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1567 fifo_size = dev_priv->display.get_fifo_size(dev, 0); 1571 fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1568 crtc = intel_get_crtc_for_plane(dev, 0); 1572 crtc = intel_get_crtc_for_plane(dev, 0);
1569 if (intel_crtc_active(crtc)) { 1573 if (intel_crtc_active(crtc)) {
1574 const struct drm_display_mode *adjusted_mode;
1570 int cpp = crtc->fb->bits_per_pixel / 8; 1575 int cpp = crtc->fb->bits_per_pixel / 8;
1571 if (IS_GEN2(dev)) 1576 if (IS_GEN2(dev))
1572 cpp = 4; 1577 cpp = 4;
1573 1578
1574 planea_wm = intel_calculate_wm(to_intel_crtc(crtc)->config.adjusted_mode.clock, 1579 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1580 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1575 wm_info, fifo_size, cpp, 1581 wm_info, fifo_size, cpp,
1576 latency_ns); 1582 latency_ns);
1577 enabled = crtc; 1583 enabled = crtc;
@@ -1581,11 +1587,13 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1581 fifo_size = dev_priv->display.get_fifo_size(dev, 1); 1587 fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1582 crtc = intel_get_crtc_for_plane(dev, 1); 1588 crtc = intel_get_crtc_for_plane(dev, 1);
1583 if (intel_crtc_active(crtc)) { 1589 if (intel_crtc_active(crtc)) {
1590 const struct drm_display_mode *adjusted_mode;
1584 int cpp = crtc->fb->bits_per_pixel / 8; 1591 int cpp = crtc->fb->bits_per_pixel / 8;
1585 if (IS_GEN2(dev)) 1592 if (IS_GEN2(dev))
1586 cpp = 4; 1593 cpp = 4;
1587 1594
1588 planeb_wm = intel_calculate_wm(to_intel_crtc(crtc)->config.adjusted_mode.clock, 1595 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1596 planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1589 wm_info, fifo_size, cpp, 1597 wm_info, fifo_size, cpp,
1590 latency_ns); 1598 latency_ns);
1591 if (enabled == NULL) 1599 if (enabled == NULL)
@@ -1614,7 +1622,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1614 static const int sr_latency_ns = 6000; 1622 static const int sr_latency_ns = 6000;
1615 const struct drm_display_mode *adjusted_mode = 1623 const struct drm_display_mode *adjusted_mode =
1616 &to_intel_crtc(enabled)->config.adjusted_mode; 1624 &to_intel_crtc(enabled)->config.adjusted_mode;
1617 int clock = adjusted_mode->clock; 1625 int clock = adjusted_mode->crtc_clock;
1618 int htotal = adjusted_mode->htotal; 1626 int htotal = adjusted_mode->htotal;
1619 int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; 1627 int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1620 int pixel_size = enabled->fb->bits_per_pixel / 8; 1628 int pixel_size = enabled->fb->bits_per_pixel / 8;
@@ -1670,6 +1678,7 @@ static void i830_update_wm(struct drm_crtc *unused_crtc)
1670 struct drm_device *dev = unused_crtc->dev; 1678 struct drm_device *dev = unused_crtc->dev;
1671 struct drm_i915_private *dev_priv = dev->dev_private; 1679 struct drm_i915_private *dev_priv = dev->dev_private;
1672 struct drm_crtc *crtc; 1680 struct drm_crtc *crtc;
1681 const struct drm_display_mode *adjusted_mode;
1673 uint32_t fwater_lo; 1682 uint32_t fwater_lo;
1674 int planea_wm; 1683 int planea_wm;
1675 1684
@@ -1677,7 +1686,8 @@ static void i830_update_wm(struct drm_crtc *unused_crtc)
1677 if (crtc == NULL) 1686 if (crtc == NULL)
1678 return; 1687 return;
1679 1688
1680 planea_wm = intel_calculate_wm(to_intel_crtc(crtc)->config.adjusted_mode.clock, 1689 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1690 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1681 &i830_wm_info, 1691 &i830_wm_info,
1682 dev_priv->display.get_fifo_size(dev, 0), 1692 dev_priv->display.get_fifo_size(dev, 0),
1683 4, latency_ns); 1693 4, latency_ns);
@@ -1764,7 +1774,7 @@ static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
1764 1774
1765 crtc = intel_get_crtc_for_plane(dev, plane); 1775 crtc = intel_get_crtc_for_plane(dev, plane);
1766 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode; 1776 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1767 clock = adjusted_mode->clock; 1777 clock = adjusted_mode->crtc_clock;
1768 htotal = adjusted_mode->htotal; 1778 htotal = adjusted_mode->htotal;
1769 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w; 1779 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1770 pixel_size = crtc->fb->bits_per_pixel / 8; 1780 pixel_size = crtc->fb->bits_per_pixel / 8;
@@ -2112,7 +2122,7 @@ static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
2112 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2122 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2113 uint32_t pixel_rate; 2123 uint32_t pixel_rate;
2114 2124
2115 pixel_rate = intel_crtc->config.adjusted_mode.clock; 2125 pixel_rate = intel_crtc->config.adjusted_mode.crtc_clock;
2116 2126
2117 /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to 2127 /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
2118 * adjust the pixel_rate here. */ 2128 * adjust the pixel_rate here. */
@@ -2913,7 +2923,7 @@ sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
2913 return false; 2923 return false;
2914 } 2924 }
2915 2925
2916 clock = to_intel_crtc(crtc)->config.adjusted_mode.clock; 2926 clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
2917 2927
2918 /* Use the small buffer method to calculate the sprite watermark */ 2928 /* Use the small buffer method to calculate the sprite watermark */
2919 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000; 2929 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
@@ -2948,7 +2958,7 @@ sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
2948 } 2958 }
2949 2959
2950 crtc = intel_get_crtc_for_plane(dev, plane); 2960 crtc = intel_get_crtc_for_plane(dev, plane);
2951 clock = to_intel_crtc(crtc)->config.adjusted_mode.clock; 2961 clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
2952 if (!clock) { 2962 if (!clock) {
2953 *sprite_wm = 0; 2963 *sprite_wm = 0;
2954 return false; 2964 return false;
@@ -3302,6 +3312,98 @@ static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 *val)
3302 return limits; 3312 return limits;
3303} 3313}
3304 3314
3315static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
3316{
3317 int new_power;
3318
3319 new_power = dev_priv->rps.power;
3320 switch (dev_priv->rps.power) {
3321 case LOW_POWER:
3322 if (val > dev_priv->rps.rpe_delay + 1 && val > dev_priv->rps.cur_delay)
3323 new_power = BETWEEN;
3324 break;
3325
3326 case BETWEEN:
3327 if (val <= dev_priv->rps.rpe_delay && val < dev_priv->rps.cur_delay)
3328 new_power = LOW_POWER;
3329 else if (val >= dev_priv->rps.rp0_delay && val > dev_priv->rps.cur_delay)
3330 new_power = HIGH_POWER;
3331 break;
3332
3333 case HIGH_POWER:
3334 if (val < (dev_priv->rps.rp1_delay + dev_priv->rps.rp0_delay) >> 1 && val < dev_priv->rps.cur_delay)
3335 new_power = BETWEEN;
3336 break;
3337 }
3338 /* Max/min bins are special */
3339 if (val == dev_priv->rps.min_delay)
3340 new_power = LOW_POWER;
3341 if (val == dev_priv->rps.max_delay)
3342 new_power = HIGH_POWER;
3343 if (new_power == dev_priv->rps.power)
3344 return;
3345
3346 /* Note the units here are not exactly 1us, but 1280ns. */
3347 switch (new_power) {
3348 case LOW_POWER:
3349 /* Upclock if more than 95% busy over 16ms */
3350 I915_WRITE(GEN6_RP_UP_EI, 12500);
3351 I915_WRITE(GEN6_RP_UP_THRESHOLD, 11800);
3352
3353 /* Downclock if less than 85% busy over 32ms */
3354 I915_WRITE(GEN6_RP_DOWN_EI, 25000);
3355 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 21250);
3356
3357 I915_WRITE(GEN6_RP_CONTROL,
3358 GEN6_RP_MEDIA_TURBO |
3359 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3360 GEN6_RP_MEDIA_IS_GFX |
3361 GEN6_RP_ENABLE |
3362 GEN6_RP_UP_BUSY_AVG |
3363 GEN6_RP_DOWN_IDLE_AVG);
3364 break;
3365
3366 case BETWEEN:
3367 /* Upclock if more than 90% busy over 13ms */
3368 I915_WRITE(GEN6_RP_UP_EI, 10250);
3369 I915_WRITE(GEN6_RP_UP_THRESHOLD, 9225);
3370
3371 /* Downclock if less than 75% busy over 32ms */
3372 I915_WRITE(GEN6_RP_DOWN_EI, 25000);
3373 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 18750);
3374
3375 I915_WRITE(GEN6_RP_CONTROL,
3376 GEN6_RP_MEDIA_TURBO |
3377 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3378 GEN6_RP_MEDIA_IS_GFX |
3379 GEN6_RP_ENABLE |
3380 GEN6_RP_UP_BUSY_AVG |
3381 GEN6_RP_DOWN_IDLE_AVG);
3382 break;
3383
3384 case HIGH_POWER:
3385 /* Upclock if more than 85% busy over 10ms */
3386 I915_WRITE(GEN6_RP_UP_EI, 8000);
3387 I915_WRITE(GEN6_RP_UP_THRESHOLD, 6800);
3388
3389 /* Downclock if less than 60% busy over 32ms */
3390 I915_WRITE(GEN6_RP_DOWN_EI, 25000);
3391 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 15000);
3392
3393 I915_WRITE(GEN6_RP_CONTROL,
3394 GEN6_RP_MEDIA_TURBO |
3395 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3396 GEN6_RP_MEDIA_IS_GFX |
3397 GEN6_RP_ENABLE |
3398 GEN6_RP_UP_BUSY_AVG |
3399 GEN6_RP_DOWN_IDLE_AVG);
3400 break;
3401 }
3402
3403 dev_priv->rps.power = new_power;
3404 dev_priv->rps.last_adj = 0;
3405}
3406
3305void gen6_set_rps(struct drm_device *dev, u8 val) 3407void gen6_set_rps(struct drm_device *dev, u8 val)
3306{ 3408{
3307 struct drm_i915_private *dev_priv = dev->dev_private; 3409 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3314,6 +3416,8 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
3314 if (val == dev_priv->rps.cur_delay) 3416 if (val == dev_priv->rps.cur_delay)
3315 return; 3417 return;
3316 3418
3419 gen6_set_rps_thresholds(dev_priv, val);
3420
3317 if (IS_HASWELL(dev)) 3421 if (IS_HASWELL(dev))
3318 I915_WRITE(GEN6_RPNSWREQ, 3422 I915_WRITE(GEN6_RPNSWREQ,
3319 HSW_FREQUENCY(val)); 3423 HSW_FREQUENCY(val));
@@ -3335,6 +3439,28 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
3335 trace_intel_gpu_freq_change(val * 50); 3439 trace_intel_gpu_freq_change(val * 50);
3336} 3440}
3337 3441
3442void gen6_rps_idle(struct drm_i915_private *dev_priv)
3443{
3444 mutex_lock(&dev_priv->rps.hw_lock);
3445 if (dev_priv->info->is_valleyview)
3446 valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
3447 else
3448 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
3449 dev_priv->rps.last_adj = 0;
3450 mutex_unlock(&dev_priv->rps.hw_lock);
3451}
3452
3453void gen6_rps_boost(struct drm_i915_private *dev_priv)
3454{
3455 mutex_lock(&dev_priv->rps.hw_lock);
3456 if (dev_priv->info->is_valleyview)
3457 valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_delay);
3458 else
3459 gen6_set_rps(dev_priv->dev, dev_priv->rps.max_delay);
3460 dev_priv->rps.last_adj = 0;
3461 mutex_unlock(&dev_priv->rps.hw_lock);
3462}
3463
3338/* 3464/*
3339 * Wait until the previous freq change has completed, 3465 * Wait until the previous freq change has completed,
3340 * or the timeout elapsed, and then update our notion 3466 * or the timeout elapsed, and then update our notion
@@ -3516,7 +3642,10 @@ static void gen6_enable_rps(struct drm_device *dev)
3516 3642
3517 /* In units of 50MHz */ 3643 /* In units of 50MHz */
3518 dev_priv->rps.hw_max = dev_priv->rps.max_delay = rp_state_cap & 0xff; 3644 dev_priv->rps.hw_max = dev_priv->rps.max_delay = rp_state_cap & 0xff;
3519 dev_priv->rps.min_delay = (rp_state_cap & 0xff0000) >> 16; 3645 dev_priv->rps.min_delay = (rp_state_cap >> 16) & 0xff;
3646 dev_priv->rps.rp1_delay = (rp_state_cap >> 8) & 0xff;
3647 dev_priv->rps.rp0_delay = (rp_state_cap >> 0) & 0xff;
3648 dev_priv->rps.rpe_delay = dev_priv->rps.rp1_delay;
3520 dev_priv->rps.cur_delay = 0; 3649 dev_priv->rps.cur_delay = 0;
3521 3650
3522 /* disable the counters and set deterministic thresholds */ 3651 /* disable the counters and set deterministic thresholds */
@@ -3564,38 +3693,9 @@ static void gen6_enable_rps(struct drm_device *dev)
3564 GEN6_RC_CTL_EI_MODE(1) | 3693 GEN6_RC_CTL_EI_MODE(1) |
3565 GEN6_RC_CTL_HW_ENABLE); 3694 GEN6_RC_CTL_HW_ENABLE);
3566 3695
3567 if (IS_HASWELL(dev)) { 3696 /* Power down if completely idle for over 50ms */
3568 I915_WRITE(GEN6_RPNSWREQ, 3697 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
3569 HSW_FREQUENCY(10));
3570 I915_WRITE(GEN6_RC_VIDEO_FREQ,
3571 HSW_FREQUENCY(12));
3572 } else {
3573 I915_WRITE(GEN6_RPNSWREQ,
3574 GEN6_FREQUENCY(10) |
3575 GEN6_OFFSET(0) |
3576 GEN6_AGGRESSIVE_TURBO);
3577 I915_WRITE(GEN6_RC_VIDEO_FREQ,
3578 GEN6_FREQUENCY(12));
3579 }
3580
3581 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
3582 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
3583 dev_priv->rps.max_delay << 24 |
3584 dev_priv->rps.min_delay << 16);
3585
3586 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
3587 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
3588 I915_WRITE(GEN6_RP_UP_EI, 66000);
3589 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
3590
3591 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); 3698 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
3592 I915_WRITE(GEN6_RP_CONTROL,
3593 GEN6_RP_MEDIA_TURBO |
3594 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3595 GEN6_RP_MEDIA_IS_GFX |
3596 GEN6_RP_ENABLE |
3597 GEN6_RP_UP_BUSY_AVG |
3598 (IS_HASWELL(dev) ? GEN7_RP_DOWN_IDLE_AVG : GEN6_RP_DOWN_IDLE_CONT));
3599 3699
3600 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0); 3700 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
3601 if (!ret) { 3701 if (!ret) {
@@ -3611,7 +3711,8 @@ static void gen6_enable_rps(struct drm_device *dev)
3611 DRM_DEBUG_DRIVER("Failed to set the min frequency\n"); 3711 DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
3612 } 3712 }
3613 3713
3614 gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8); 3714 dev_priv->rps.power = HIGH_POWER; /* force a reset */
3715 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
3615 3716
3616 gen6_enable_rps_interrupts(dev); 3717 gen6_enable_rps_interrupts(dev);
3617 3718
@@ -3653,9 +3754,9 @@ void gen6_update_ring_freq(struct drm_device *dev)
3653 /* Convert from kHz to MHz */ 3754 /* Convert from kHz to MHz */
3654 max_ia_freq /= 1000; 3755 max_ia_freq /= 1000;
3655 3756
3656 min_ring_freq = I915_READ(MCHBAR_MIRROR_BASE_SNB + DCLK); 3757 min_ring_freq = I915_READ(MCHBAR_MIRROR_BASE_SNB + DCLK) & 0xf;
3657 /* convert DDR frequency from units of 133.3MHz to bandwidth */ 3758 /* convert DDR frequency from units of 266.6MHz to bandwidth */
3658 min_ring_freq = (2 * 4 * min_ring_freq + 2) / 3; 3759 min_ring_freq = mult_frac(min_ring_freq, 8, 3);
3659 3760
3660 /* 3761 /*
3661 * For each potential GPU frequency, load a ring frequency we'd like 3762 * For each potential GPU frequency, load a ring frequency we'd like
@@ -3668,7 +3769,7 @@ void gen6_update_ring_freq(struct drm_device *dev)
3668 unsigned int ia_freq = 0, ring_freq = 0; 3769 unsigned int ia_freq = 0, ring_freq = 0;
3669 3770
3670 if (IS_HASWELL(dev)) { 3771 if (IS_HASWELL(dev)) {
3671 ring_freq = (gpu_freq * 5 + 3) / 4; 3772 ring_freq = mult_frac(gpu_freq, 5, 4);
3672 ring_freq = max(min_ring_freq, ring_freq); 3773 ring_freq = max(min_ring_freq, ring_freq);
3673 /* leave ia_freq as the default, chosen by cpufreq */ 3774 /* leave ia_freq as the default, chosen by cpufreq */
3674 } else { 3775 } else {
@@ -3724,24 +3825,6 @@ int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
3724 return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff; 3825 return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
3725} 3826}
3726 3827
3727static void vlv_rps_timer_work(struct work_struct *work)
3728{
3729 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
3730 rps.vlv_work.work);
3731
3732 /*
3733 * Timer fired, we must be idle. Drop to min voltage state.
3734 * Note: we use RPe here since it should match the
3735 * Vmin we were shooting for. That should give us better
3736 * perf when we come back out of RC6 than if we used the
3737 * min freq available.
3738 */
3739 mutex_lock(&dev_priv->rps.hw_lock);
3740 if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay)
3741 valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
3742 mutex_unlock(&dev_priv->rps.hw_lock);
3743}
3744
3745static void valleyview_setup_pctx(struct drm_device *dev) 3828static void valleyview_setup_pctx(struct drm_device *dev)
3746{ 3829{
3747 struct drm_i915_private *dev_priv = dev->dev_private; 3830 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3794,7 +3877,8 @@ static void valleyview_enable_rps(struct drm_device *dev)
3794 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 3877 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3795 3878
3796 if ((gtfifodbg = I915_READ(GTFIFODBG))) { 3879 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
3797 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg); 3880 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
3881 gtfifodbg);
3798 I915_WRITE(GTFIFODBG, gtfifodbg); 3882 I915_WRITE(GTFIFODBG, gtfifodbg);
3799 } 3883 }
3800 3884
@@ -3827,7 +3911,10 @@ static void valleyview_enable_rps(struct drm_device *dev)
3827 I915_WRITE(GEN6_RC6_THRESHOLD, 0xc350); 3911 I915_WRITE(GEN6_RC6_THRESHOLD, 0xc350);
3828 3912
3829 /* allows RC6 residency counter to work */ 3913 /* allows RC6 residency counter to work */
3830 I915_WRITE(0x138104, _MASKED_BIT_ENABLE(0x3)); 3914 I915_WRITE(VLV_COUNTER_CONTROL,
3915 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
3916 VLV_MEDIA_RC6_COUNT_EN |
3917 VLV_RENDER_RC6_COUNT_EN));
3831 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) 3918 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
3832 rc6_mode = GEN7_RC_CTL_TO_MODE; 3919 rc6_mode = GEN7_RC_CTL_TO_MODE;
3833 I915_WRITE(GEN6_RC_CONTROL, rc6_mode); 3920 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
@@ -3880,8 +3967,6 @@ static void valleyview_enable_rps(struct drm_device *dev)
3880 dev_priv->rps.rpe_delay), 3967 dev_priv->rps.rpe_delay),
3881 dev_priv->rps.rpe_delay); 3968 dev_priv->rps.rpe_delay);
3882 3969
3883 INIT_DELAYED_WORK(&dev_priv->rps.vlv_work, vlv_rps_timer_work);
3884
3885 valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay); 3970 valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
3886 3971
3887 gen6_enable_rps_interrupts(dev); 3972 gen6_enable_rps_interrupts(dev);
@@ -4621,8 +4706,6 @@ void intel_disable_gt_powersave(struct drm_device *dev)
4621 } else if (INTEL_INFO(dev)->gen >= 6) { 4706 } else if (INTEL_INFO(dev)->gen >= 6) {
4622 cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work); 4707 cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
4623 cancel_work_sync(&dev_priv->rps.work); 4708 cancel_work_sync(&dev_priv->rps.work);
4624 if (IS_VALLEYVIEW(dev))
4625 cancel_delayed_work_sync(&dev_priv->rps.vlv_work);
4626 mutex_lock(&dev_priv->rps.hw_lock); 4709 mutex_lock(&dev_priv->rps.hw_lock);
4627 if (IS_VALLEYVIEW(dev)) 4710 if (IS_VALLEYVIEW(dev))
4628 valleyview_disable_rps(dev); 4711 valleyview_disable_rps(dev);
@@ -5498,7 +5581,7 @@ void intel_set_power_well(struct drm_device *dev, bool enable)
5498 spin_unlock_irq(&power_well->lock); 5581 spin_unlock_irq(&power_well->lock);
5499} 5582}
5500 5583
5501void intel_resume_power_well(struct drm_device *dev) 5584static void intel_resume_power_well(struct drm_device *dev)
5502{ 5585{
5503 struct drm_i915_private *dev_priv = dev->dev_private; 5586 struct drm_i915_private *dev_priv = dev->dev_private;
5504 struct i915_power_well *power_well = &dev_priv->power_well; 5587 struct i915_power_well *power_well = &dev_priv->power_well;
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 91aea9e1ab6f..a583e8f718a7 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -539,7 +539,7 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
539 goto log_fail; 539 goto log_fail;
540 540
541 while ((status == SDVO_CMD_STATUS_PENDING || 541 while ((status == SDVO_CMD_STATUS_PENDING ||
542 status == SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED) && --retry) { 542 status == SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED) && --retry) {
543 if (retry < 10) 543 if (retry < 10)
544 msleep(15); 544 msleep(15);
545 else 545 else
@@ -1369,7 +1369,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
1369 if (HAS_PCH_SPLIT(dev)) 1369 if (HAS_PCH_SPLIT(dev))
1370 ironlake_check_encoder_dotclock(pipe_config, dotclock); 1370 ironlake_check_encoder_dotclock(pipe_config, dotclock);
1371 1371
1372 pipe_config->adjusted_mode.clock = dotclock; 1372 pipe_config->adjusted_mode.crtc_clock = dotclock;
1373 1373
1374 /* Cross check the port pixel multiplier with the sdvo encoder state. */ 1374 /* Cross check the port pixel multiplier with the sdvo encoder state. */
1375 if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_CLOCK_RATE_MULT, 1375 if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_CLOCK_RATE_MULT,
@@ -1773,6 +1773,9 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
1773{ 1773{
1774 struct edid *edid; 1774 struct edid *edid;
1775 1775
1776 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
1777 connector->base.id, drm_get_connector_name(connector));
1778
1776 /* set the bus switch and get the modes */ 1779 /* set the bus switch and get the modes */
1777 edid = intel_sdvo_get_edid(connector); 1780 edid = intel_sdvo_get_edid(connector);
1778 1781
@@ -1868,6 +1871,9 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
1868 uint32_t reply = 0, format_map = 0; 1871 uint32_t reply = 0, format_map = 0;
1869 int i; 1872 int i;
1870 1873
1874 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
1875 connector->base.id, drm_get_connector_name(connector));
1876
1871 /* Read the list of supported input resolutions for the selected TV 1877 /* Read the list of supported input resolutions for the selected TV
1872 * format. 1878 * format.
1873 */ 1879 */
@@ -1902,6 +1908,9 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
1902 struct drm_i915_private *dev_priv = connector->dev->dev_private; 1908 struct drm_i915_private *dev_priv = connector->dev->dev_private;
1903 struct drm_display_mode *newmode; 1909 struct drm_display_mode *newmode;
1904 1910
1911 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
1912 connector->base.id, drm_get_connector_name(connector));
1913
1905 /* 1914 /*
1906 * Fetch modes from VBT. For SDVO prefer the VBT mode since some 1915 * Fetch modes from VBT. For SDVO prefer the VBT mode since some
1907 * SDVO->LVDS transcoders can't cope with the EDID mode. 1916 * SDVO->LVDS transcoders can't cope with the EDID mode.
@@ -1933,7 +1942,6 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
1933 break; 1942 break;
1934 } 1943 }
1935 } 1944 }
1936
1937} 1945}
1938 1946
1939static int intel_sdvo_get_modes(struct drm_connector *connector) 1947static int intel_sdvo_get_modes(struct drm_connector *connector)
@@ -2001,7 +2009,6 @@ static void intel_sdvo_destroy(struct drm_connector *connector)
2001 intel_sdvo_connector->tv_format); 2009 intel_sdvo_connector->tv_format);
2002 2010
2003 intel_sdvo_destroy_enhance_property(connector); 2011 intel_sdvo_destroy_enhance_property(connector);
2004 drm_sysfs_connector_remove(connector);
2005 drm_connector_cleanup(connector); 2012 drm_connector_cleanup(connector);
2006 kfree(intel_sdvo_connector); 2013 kfree(intel_sdvo_connector);
2007} 2014}
@@ -2397,7 +2404,9 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
2397 struct intel_connector *intel_connector; 2404 struct intel_connector *intel_connector;
2398 struct intel_sdvo_connector *intel_sdvo_connector; 2405 struct intel_sdvo_connector *intel_sdvo_connector;
2399 2406
2400 intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL); 2407 DRM_DEBUG_KMS("initialising DVI device %d\n", device);
2408
2409 intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
2401 if (!intel_sdvo_connector) 2410 if (!intel_sdvo_connector)
2402 return false; 2411 return false;
2403 2412
@@ -2445,7 +2454,9 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
2445 struct intel_connector *intel_connector; 2454 struct intel_connector *intel_connector;
2446 struct intel_sdvo_connector *intel_sdvo_connector; 2455 struct intel_sdvo_connector *intel_sdvo_connector;
2447 2456
2448 intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL); 2457 DRM_DEBUG_KMS("initialising TV type %d\n", type);
2458
2459 intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
2449 if (!intel_sdvo_connector) 2460 if (!intel_sdvo_connector)
2450 return false; 2461 return false;
2451 2462
@@ -2470,6 +2481,7 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
2470 return true; 2481 return true;
2471 2482
2472err: 2483err:
2484 drm_sysfs_connector_remove(connector);
2473 intel_sdvo_destroy(connector); 2485 intel_sdvo_destroy(connector);
2474 return false; 2486 return false;
2475} 2487}
@@ -2482,7 +2494,9 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
2482 struct intel_connector *intel_connector; 2494 struct intel_connector *intel_connector;
2483 struct intel_sdvo_connector *intel_sdvo_connector; 2495 struct intel_sdvo_connector *intel_sdvo_connector;
2484 2496
2485 intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL); 2497 DRM_DEBUG_KMS("initialising analog device %d\n", device);
2498
2499 intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
2486 if (!intel_sdvo_connector) 2500 if (!intel_sdvo_connector)
2487 return false; 2501 return false;
2488 2502
@@ -2513,7 +2527,9 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
2513 struct intel_connector *intel_connector; 2527 struct intel_connector *intel_connector;
2514 struct intel_sdvo_connector *intel_sdvo_connector; 2528 struct intel_sdvo_connector *intel_sdvo_connector;
2515 2529
2516 intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL); 2530 DRM_DEBUG_KMS("initialising LVDS device %d\n", device);
2531
2532 intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
2517 if (!intel_sdvo_connector) 2533 if (!intel_sdvo_connector)
2518 return false; 2534 return false;
2519 2535
@@ -2537,6 +2553,7 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
2537 return true; 2553 return true;
2538 2554
2539err: 2555err:
2556 drm_sysfs_connector_remove(connector);
2540 intel_sdvo_destroy(connector); 2557 intel_sdvo_destroy(connector);
2541 return false; 2558 return false;
2542} 2559}
@@ -2608,8 +2625,10 @@ static void intel_sdvo_output_cleanup(struct intel_sdvo *intel_sdvo)
2608 2625
2609 list_for_each_entry_safe(connector, tmp, 2626 list_for_each_entry_safe(connector, tmp,
2610 &dev->mode_config.connector_list, head) { 2627 &dev->mode_config.connector_list, head) {
2611 if (intel_attached_encoder(connector) == &intel_sdvo->base) 2628 if (intel_attached_encoder(connector) == &intel_sdvo->base) {
2629 drm_sysfs_connector_remove(connector);
2612 intel_sdvo_destroy(connector); 2630 intel_sdvo_destroy(connector);
2631 }
2613 } 2632 }
2614} 2633}
2615 2634
@@ -2879,7 +2898,7 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
2879 struct intel_encoder *intel_encoder; 2898 struct intel_encoder *intel_encoder;
2880 struct intel_sdvo *intel_sdvo; 2899 struct intel_sdvo *intel_sdvo;
2881 int i; 2900 int i;
2882 intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL); 2901 intel_sdvo = kzalloc(sizeof(*intel_sdvo), GFP_KERNEL);
2883 if (!intel_sdvo) 2902 if (!intel_sdvo)
2884 return false; 2903 return false;
2885 2904
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 231b289e8e57..cae10bc746d0 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -1034,7 +1034,7 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
1034 if (INTEL_INFO(dev)->gen < 5) 1034 if (INTEL_INFO(dev)->gen < 5)
1035 return -ENODEV; 1035 return -ENODEV;
1036 1036
1037 intel_plane = kzalloc(sizeof(struct intel_plane), GFP_KERNEL); 1037 intel_plane = kzalloc(sizeof(*intel_plane), GFP_KERNEL);
1038 if (!intel_plane) 1038 if (!intel_plane)
1039 return -ENOMEM; 1039 return -ENOMEM;
1040 1040
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index fa602d3b1cb7..d61aec23a523 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -912,7 +912,7 @@ intel_tv_compute_config(struct intel_encoder *encoder,
912 if (!tv_mode) 912 if (!tv_mode)
913 return false; 913 return false;
914 914
915 pipe_config->adjusted_mode.clock = tv_mode->clock; 915 pipe_config->adjusted_mode.crtc_clock = tv_mode->clock;
916 DRM_DEBUG_KMS("forcing bpc to 8 for TV\n"); 916 DRM_DEBUG_KMS("forcing bpc to 8 for TV\n");
917 pipe_config->pipe_bpp = 8*3; 917 pipe_config->pipe_bpp = 8*3;
918 918
@@ -1433,7 +1433,6 @@ intel_tv_get_modes(struct drm_connector *connector)
1433static void 1433static void
1434intel_tv_destroy(struct drm_connector *connector) 1434intel_tv_destroy(struct drm_connector *connector)
1435{ 1435{
1436 drm_sysfs_connector_remove(connector);
1437 drm_connector_cleanup(connector); 1436 drm_connector_cleanup(connector);
1438 kfree(connector); 1437 kfree(connector);
1439} 1438}
@@ -1518,7 +1517,7 @@ static const struct drm_encoder_funcs intel_tv_enc_funcs = {
1518static int tv_is_present_in_vbt(struct drm_device *dev) 1517static int tv_is_present_in_vbt(struct drm_device *dev)
1519{ 1518{
1520 struct drm_i915_private *dev_priv = dev->dev_private; 1519 struct drm_i915_private *dev_priv = dev->dev_private;
1521 struct child_device_config *p_child; 1520 union child_device_config *p_child;
1522 int i, ret; 1521 int i, ret;
1523 1522
1524 if (!dev_priv->vbt.child_dev_num) 1523 if (!dev_priv->vbt.child_dev_num)
@@ -1530,13 +1529,13 @@ static int tv_is_present_in_vbt(struct drm_device *dev)
1530 /* 1529 /*
1531 * If the device type is not TV, continue. 1530 * If the device type is not TV, continue.
1532 */ 1531 */
1533 if (p_child->device_type != DEVICE_TYPE_INT_TV && 1532 if (p_child->old.device_type != DEVICE_TYPE_INT_TV &&
1534 p_child->device_type != DEVICE_TYPE_TV) 1533 p_child->old.device_type != DEVICE_TYPE_TV)
1535 continue; 1534 continue;
1536 /* Only when the addin_offset is non-zero, it is regarded 1535 /* Only when the addin_offset is non-zero, it is regarded
1537 * as present. 1536 * as present.
1538 */ 1537 */
1539 if (p_child->addin_offset) { 1538 if (p_child->old.addin_offset) {
1540 ret = 1; 1539 ret = 1;
1541 break; 1540 break;
1542 } 1541 }
@@ -1590,12 +1589,12 @@ intel_tv_init(struct drm_device *dev)
1590 (tv_dac_off & TVDAC_STATE_CHG_EN) != 0) 1589 (tv_dac_off & TVDAC_STATE_CHG_EN) != 0)
1591 return; 1590 return;
1592 1591
1593 intel_tv = kzalloc(sizeof(struct intel_tv), GFP_KERNEL); 1592 intel_tv = kzalloc(sizeof(*intel_tv), GFP_KERNEL);
1594 if (!intel_tv) { 1593 if (!intel_tv) {
1595 return; 1594 return;
1596 } 1595 }
1597 1596
1598 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); 1597 intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
1599 if (!intel_connector) { 1598 if (!intel_connector) {
1600 kfree(intel_tv); 1599 kfree(intel_tv);
1601 return; 1600 return;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 8649f1c36b00..288a3a654f06 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -204,6 +204,18 @@ static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
204 gen6_gt_check_fifodbg(dev_priv); 204 gen6_gt_check_fifodbg(dev_priv);
205} 205}
206 206
207static void gen6_force_wake_work(struct work_struct *work)
208{
209 struct drm_i915_private *dev_priv =
210 container_of(work, typeof(*dev_priv), uncore.force_wake_work.work);
211 unsigned long irqflags;
212
213 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
214 if (--dev_priv->uncore.forcewake_count == 0)
215 dev_priv->uncore.funcs.force_wake_put(dev_priv);
216 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
217}
218
207void intel_uncore_early_sanitize(struct drm_device *dev) 219void intel_uncore_early_sanitize(struct drm_device *dev)
208{ 220{
209 struct drm_i915_private *dev_priv = dev->dev_private; 221 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -216,6 +228,9 @@ void intel_uncore_init(struct drm_device *dev)
216{ 228{
217 struct drm_i915_private *dev_priv = dev->dev_private; 229 struct drm_i915_private *dev_priv = dev->dev_private;
218 230
231 INIT_DELAYED_WORK(&dev_priv->uncore.force_wake_work,
232 gen6_force_wake_work);
233
219 if (IS_VALLEYVIEW(dev)) { 234 if (IS_VALLEYVIEW(dev)) {
220 dev_priv->uncore.funcs.force_wake_get = vlv_force_wake_get; 235 dev_priv->uncore.funcs.force_wake_get = vlv_force_wake_get;
221 dev_priv->uncore.funcs.force_wake_put = vlv_force_wake_put; 236 dev_priv->uncore.funcs.force_wake_put = vlv_force_wake_put;
@@ -261,6 +276,16 @@ void intel_uncore_init(struct drm_device *dev)
261 } 276 }
262} 277}
263 278
279void intel_uncore_fini(struct drm_device *dev)
280{
281 struct drm_i915_private *dev_priv = dev->dev_private;
282
283 flush_delayed_work(&dev_priv->uncore.force_wake_work);
284
285 /* Paranoia: make sure we have disabled everything before we exit. */
286 intel_uncore_sanitize(dev);
287}
288
264static void intel_uncore_forcewake_reset(struct drm_device *dev) 289static void intel_uncore_forcewake_reset(struct drm_device *dev)
265{ 290{
266 struct drm_i915_private *dev_priv = dev->dev_private; 291 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -276,10 +301,26 @@ static void intel_uncore_forcewake_reset(struct drm_device *dev)
276 301
277void intel_uncore_sanitize(struct drm_device *dev) 302void intel_uncore_sanitize(struct drm_device *dev)
278{ 303{
304 struct drm_i915_private *dev_priv = dev->dev_private;
305 u32 reg_val;
306
279 intel_uncore_forcewake_reset(dev); 307 intel_uncore_forcewake_reset(dev);
280 308
281 /* BIOS often leaves RC6 enabled, but disable it for hw init */ 309 /* BIOS often leaves RC6 enabled, but disable it for hw init */
282 intel_disable_gt_powersave(dev); 310 intel_disable_gt_powersave(dev);
311
312 /* Turn off power gate, require especially for the BIOS less system */
313 if (IS_VALLEYVIEW(dev)) {
314
315 mutex_lock(&dev_priv->rps.hw_lock);
316 reg_val = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS);
317
318 if (reg_val & (RENDER_PWRGT | MEDIA_PWRGT | DISP2D_PWRGT))
319 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, 0x0);
320
321 mutex_unlock(&dev_priv->rps.hw_lock);
322
323 }
283} 324}
284 325
285/* 326/*
@@ -306,8 +347,12 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
306 unsigned long irqflags; 347 unsigned long irqflags;
307 348
308 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 349 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
309 if (--dev_priv->uncore.forcewake_count == 0) 350 if (--dev_priv->uncore.forcewake_count == 0) {
310 dev_priv->uncore.funcs.force_wake_put(dev_priv); 351 dev_priv->uncore.forcewake_count++;
352 mod_delayed_work(dev_priv->wq,
353 &dev_priv->uncore.force_wake_work,
354 1);
355 }
311 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 356 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
312} 357}
313 358
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index d4fbf11360fe..0e3270c3ffd2 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -326,8 +326,6 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
326 regp->MiscOutReg = 0x23; /* +hsync +vsync */ 326 regp->MiscOutReg = 0x23; /* +hsync +vsync */
327 } 327 }
328 328
329 regp->MiscOutReg |= (mode->clock_index & 0x03) << 2;
330
331 /* 329 /*
332 * Time Sequencer 330 * Time Sequencer
333 */ 331 */
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 9ba6a38f54be..2b954adf5bd4 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -433,6 +433,9 @@ struct drm_file {
433 struct drm_master *master; /* master this node is currently associated with 433 struct drm_master *master; /* master this node is currently associated with
434 N.B. not always minor->master */ 434 N.B. not always minor->master */
435 435
436 /* true when the client has asked us to expose stereo 3D mode flags */
437 bool stereo_allowed;
438
436 /** 439 /**
437 * fbs - List of framebuffers associated with this file. 440 * fbs - List of framebuffers associated with this file.
438 * 441 *
@@ -1294,6 +1297,8 @@ extern int drm_getstats(struct drm_device *dev, void *data,
1294 struct drm_file *file_priv); 1297 struct drm_file *file_priv);
1295extern int drm_getcap(struct drm_device *dev, void *data, 1298extern int drm_getcap(struct drm_device *dev, void *data,
1296 struct drm_file *file_priv); 1299 struct drm_file *file_priv);
1300extern int drm_setclientcap(struct drm_device *dev, void *data,
1301 struct drm_file *file_priv);
1297extern int drm_setversion(struct drm_device *dev, void *data, 1302extern int drm_setversion(struct drm_device *dev, void *data,
1298 struct drm_file *file_priv); 1303 struct drm_file *file_priv);
1299extern int drm_noop(struct drm_device *dev, void *data, 1304extern int drm_noop(struct drm_device *dev, void *data,
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 92e782060199..ba407f6b4f1f 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -108,6 +108,7 @@ enum drm_mode_status {
108 MODE_ONE_HEIGHT, /* only one height is supported */ 108 MODE_ONE_HEIGHT, /* only one height is supported */
109 MODE_ONE_SIZE, /* only one resolution is supported */ 109 MODE_ONE_SIZE, /* only one resolution is supported */
110 MODE_NO_REDUCED, /* monitor doesn't accept reduced blanking */ 110 MODE_NO_REDUCED, /* monitor doesn't accept reduced blanking */
111 MODE_NO_STEREO, /* stereo modes not supported */
111 MODE_UNVERIFIED = -3, /* mode needs to reverified */ 112 MODE_UNVERIFIED = -3, /* mode needs to reverified */
112 MODE_BAD = -2, /* unspecified reason */ 113 MODE_BAD = -2, /* unspecified reason */
113 MODE_ERROR = -1 /* error condition */ 114 MODE_ERROR = -1 /* error condition */
@@ -124,7 +125,10 @@ enum drm_mode_status {
124 .vscan = (vs), .flags = (f), \ 125 .vscan = (vs), .flags = (f), \
125 .base.type = DRM_MODE_OBJECT_MODE 126 .base.type = DRM_MODE_OBJECT_MODE
126 127
127#define CRTC_INTERLACE_HALVE_V 0x1 /* halve V values for interlacing */ 128#define CRTC_INTERLACE_HALVE_V (1 << 0) /* halve V values for interlacing */
129#define CRTC_STEREO_DOUBLE (1 << 1) /* adjust timings for stereo modes */
130
131#define DRM_MODE_FLAG_3D_MAX DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF
128 132
129struct drm_display_mode { 133struct drm_display_mode {
130 /* Header */ 134 /* Header */
@@ -155,8 +159,7 @@ struct drm_display_mode {
155 int height_mm; 159 int height_mm;
156 160
157 /* Actual mode we give to hw */ 161 /* Actual mode we give to hw */
158 int clock_index; 162 int crtc_clock; /* in KHz */
159 int synth_clock;
160 int crtc_hdisplay; 163 int crtc_hdisplay;
161 int crtc_hblank_start; 164 int crtc_hblank_start;
162 int crtc_hblank_end; 165 int crtc_hblank_end;
@@ -180,6 +183,11 @@ struct drm_display_mode {
180 int hsync; /* in kHz */ 183 int hsync; /* in kHz */
181}; 184};
182 185
186static inline bool drm_mode_is_stereo(const struct drm_display_mode *mode)
187{
188 return mode->flags & DRM_MODE_FLAG_3D_MASK;
189}
190
183enum drm_connector_status { 191enum drm_connector_status {
184 connector_status_connected = 1, 192 connector_status_connected = 1,
185 connector_status_disconnected = 2, 193 connector_status_disconnected = 2,
@@ -597,6 +605,7 @@ struct drm_connector {
597 int connector_type_id; 605 int connector_type_id;
598 bool interlace_allowed; 606 bool interlace_allowed;
599 bool doublescan_allowed; 607 bool doublescan_allowed;
608 bool stereo_allowed;
600 struct list_head modes; /* list of modes on this connector */ 609 struct list_head modes; /* list of modes on this connector */
601 610
602 enum drm_connector_status status; 611 enum drm_connector_status status;
@@ -976,7 +985,7 @@ extern void drm_mode_config_reset(struct drm_device *dev);
976extern void drm_mode_config_cleanup(struct drm_device *dev); 985extern void drm_mode_config_cleanup(struct drm_device *dev);
977extern void drm_mode_set_name(struct drm_display_mode *mode); 986extern void drm_mode_set_name(struct drm_display_mode *mode);
978extern bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2); 987extern bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2);
979extern bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2); 988extern bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2);
980extern int drm_mode_width(const struct drm_display_mode *mode); 989extern int drm_mode_width(const struct drm_display_mode *mode);
981extern int drm_mode_height(const struct drm_display_mode *mode); 990extern int drm_mode_height(const struct drm_display_mode *mode);
982 991
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index ece867889cc7..9b24d65fed72 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -611,12 +611,37 @@ struct drm_gem_open {
611 __u64 size; 611 __u64 size;
612}; 612};
613 613
614#define DRM_CAP_DUMB_BUFFER 0x1
615#define DRM_CAP_VBLANK_HIGH_CRTC 0x2
616#define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3
617#define DRM_CAP_DUMB_PREFER_SHADOW 0x4
618#define DRM_CAP_PRIME 0x5
619#define DRM_PRIME_CAP_IMPORT 0x1
620#define DRM_PRIME_CAP_EXPORT 0x2
621#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6
622#define DRM_CAP_ASYNC_PAGE_FLIP 0x7
623
614/** DRM_IOCTL_GET_CAP ioctl argument type */ 624/** DRM_IOCTL_GET_CAP ioctl argument type */
615struct drm_get_cap { 625struct drm_get_cap {
616 __u64 capability; 626 __u64 capability;
617 __u64 value; 627 __u64 value;
618}; 628};
619 629
630/**
631 * DRM_CLIENT_CAP_STEREO_3D
632 *
633 * if set to 1, the DRM core will expose the stereo 3D capabilities of the
634 * monitor by advertising the supported 3D layouts in the flags of struct
635 * drm_mode_modeinfo.
636 */
637#define DRM_CLIENT_CAP_STEREO_3D 1
638
639/** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
640struct drm_set_client_cap {
641 __u64 capability;
642 __u64 value;
643};
644
620#define DRM_CLOEXEC O_CLOEXEC 645#define DRM_CLOEXEC O_CLOEXEC
621struct drm_prime_handle { 646struct drm_prime_handle {
622 __u32 handle; 647 __u32 handle;
@@ -649,6 +674,7 @@ struct drm_prime_handle {
649#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink) 674#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink)
650#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open) 675#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open)
651#define DRM_IOCTL_GET_CAP DRM_IOWR(0x0c, struct drm_get_cap) 676#define DRM_IOCTL_GET_CAP DRM_IOWR(0x0c, struct drm_get_cap)
677#define DRM_IOCTL_SET_CLIENT_CAP DRM_IOW( 0x0d, struct drm_set_client_cap)
652 678
653#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique) 679#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique)
654#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth) 680#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth)
@@ -774,17 +800,6 @@ struct drm_event_vblank {
774 __u32 reserved; 800 __u32 reserved;
775}; 801};
776 802
777#define DRM_CAP_DUMB_BUFFER 0x1
778#define DRM_CAP_VBLANK_HIGH_CRTC 0x2
779#define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3
780#define DRM_CAP_DUMB_PREFER_SHADOW 0x4
781#define DRM_CAP_PRIME 0x5
782#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6
783#define DRM_CAP_ASYNC_PAGE_FLIP 0x7
784
785#define DRM_PRIME_CAP_IMPORT 0x1
786#define DRM_PRIME_CAP_EXPORT 0x2
787
788/* typedef area */ 803/* typedef area */
789#ifndef __KERNEL__ 804#ifndef __KERNEL__
790typedef struct drm_clip_rect drm_clip_rect_t; 805typedef struct drm_clip_rect drm_clip_rect_t;
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 113d32457fa4..c2c4ace3db61 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -44,20 +44,35 @@
44 44
45/* Video mode flags */ 45/* Video mode flags */
46/* bit compatible with the xorg definitions. */ 46/* bit compatible with the xorg definitions. */
47#define DRM_MODE_FLAG_PHSYNC (1<<0) 47#define DRM_MODE_FLAG_PHSYNC (1<<0)
48#define DRM_MODE_FLAG_NHSYNC (1<<1) 48#define DRM_MODE_FLAG_NHSYNC (1<<1)
49#define DRM_MODE_FLAG_PVSYNC (1<<2) 49#define DRM_MODE_FLAG_PVSYNC (1<<2)
50#define DRM_MODE_FLAG_NVSYNC (1<<3) 50#define DRM_MODE_FLAG_NVSYNC (1<<3)
51#define DRM_MODE_FLAG_INTERLACE (1<<4) 51#define DRM_MODE_FLAG_INTERLACE (1<<4)
52#define DRM_MODE_FLAG_DBLSCAN (1<<5) 52#define DRM_MODE_FLAG_DBLSCAN (1<<5)
53#define DRM_MODE_FLAG_CSYNC (1<<6) 53#define DRM_MODE_FLAG_CSYNC (1<<6)
54#define DRM_MODE_FLAG_PCSYNC (1<<7) 54#define DRM_MODE_FLAG_PCSYNC (1<<7)
55#define DRM_MODE_FLAG_NCSYNC (1<<8) 55#define DRM_MODE_FLAG_NCSYNC (1<<8)
56#define DRM_MODE_FLAG_HSKEW (1<<9) /* hskew provided */ 56#define DRM_MODE_FLAG_HSKEW (1<<9) /* hskew provided */
57#define DRM_MODE_FLAG_BCAST (1<<10) 57#define DRM_MODE_FLAG_BCAST (1<<10)
58#define DRM_MODE_FLAG_PIXMUX (1<<11) 58#define DRM_MODE_FLAG_PIXMUX (1<<11)
59#define DRM_MODE_FLAG_DBLCLK (1<<12) 59#define DRM_MODE_FLAG_DBLCLK (1<<12)
60#define DRM_MODE_FLAG_CLKDIV2 (1<<13) 60#define DRM_MODE_FLAG_CLKDIV2 (1<<13)
61 /*
62 * When adding a new stereo mode don't forget to adjust DRM_MODE_FLAGS_3D_MAX
63 * (define not exposed to user space).
64 */
65#define DRM_MODE_FLAG_3D_MASK (0x1f<<14)
66#define DRM_MODE_FLAG_3D_NONE (0<<14)
67#define DRM_MODE_FLAG_3D_FRAME_PACKING (1<<14)
68#define DRM_MODE_FLAG_3D_FIELD_ALTERNATIVE (2<<14)
69#define DRM_MODE_FLAG_3D_LINE_ALTERNATIVE (3<<14)
70#define DRM_MODE_FLAG_3D_SIDE_BY_SIDE_FULL (4<<14)
71#define DRM_MODE_FLAG_3D_L_DEPTH (5<<14)
72#define DRM_MODE_FLAG_3D_L_DEPTH_GFX_GFX_DEPTH (6<<14)
73#define DRM_MODE_FLAG_3D_TOP_AND_BOTTOM (7<<14)
74#define DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF (8<<14)
75
61 76
62/* DPMS flags */ 77/* DPMS flags */
63/* bit compatible with the xorg definitions. */ 78/* bit compatible with the xorg definitions. */