aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-14 03:39:08 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-14 03:39:08 -0400
commit2d65a9f48fcdf7866aab6457bc707ca233e0c791 (patch)
treef93e5838d6ac2e59434367f4ff905f7d9c45fc2b /drivers/gpu/drm/i915
parentda92da3638a04894afdca8b99e973ddd20268471 (diff)
parentdfda0df3426483cf5fc7441f23f318edbabecb03 (diff)
Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux
Pull drm updates from Dave Airlie: "This is the main git pull for the drm, I pretty much froze major pulls at -rc5/6 time, and haven't had much fallout, so will probably continue doing that. Lots of changes all over, big internal header cleanup to make it clear drm features are legacy things and what are things that modern KMS drivers should be using. Also big move to use the new generic fences in all the TTM drivers. core: atomic prep work, vblank rework changes, allows immediate vblank disables major header reworking and cleanups to better delinate legacy interfaces from what KMS drivers should be using. cursor planes locking fixes ttm: move to generic fences (affects all TTM drivers) ppc64 caching fixes radeon: userptr support, uvd for old asics, reset rework for fence changes better buffer placement changes, dpm feature enablement hdmi audio support fixes intel: Cherryview work, 180 degree rotation, skylake prep work, execlist command submission full ppgtt prep work cursor improvements edid caching, vdd handling improvements nouveau: fence reworking kepler memory clock work gt21x clock work fan control improvements hdmi infoframe fixes DP audio ast: ppc64 fixes caching fix rcar: rcar-du DT support ipuv3: prep work for capture support msm: LVDS support for mdp4, new panel, gpu refactoring exynos: exynos3250 SoC support, drop bad mmap interface, mipi dsi changes, and component match support" * 'drm-next' of git://people.freedesktop.org/~airlied/linux: (640 commits) drm/mst: rework payload table allocation to conform better. drm/ast: Fix HW cursor image drm/radeon/kv: add uvd/vce info to dpm debugfs output drm/radeon/ci: add uvd/vce info to dpm debugfs output drm/radeon: export reservation_object from dmabuf to ttm drm/radeon: cope with foreign fences inside the reservation object drm/radeon: cope with foreign fences inside display drm/core: use helper to check driver features drm/radeon/cik: write gfx ucode version to ucode addr reg drm/radeon/si: print full CS when we hit a packet 0 drm/radeon: remove unecessary includes drm/radeon/combios: declare legacy_connector_convert as static drm/radeon/atombios: declare connector convert tables as static drm/radeon: drop btc_get_max_clock_from_voltage_dependency_table drm/radeon/dpm: drop clk/voltage dependency filters for BTC drm/radeon/dpm: drop clk/voltage dependency filters for CI drm/radeon/dpm: drop clk/voltage dependency filters for SI drm/radeon/dpm: drop clk/voltage dependency filters for NI drm/radeon: disable audio when we disable hdmi (v2) drm/radeon: split audio enable between eg and r600 (v2) ...
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/dvo_ns2501.c560
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c4
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c370
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c31
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c194
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h222
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c370
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c216
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c227
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c291
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h32
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c40
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.h47
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c13
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c11
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c31
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c196
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c241
-rw-r--r--drivers/gpu/drm/i915/i915_params.c6
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h283
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c2
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c29
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h3
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c344
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1312
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c1255
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h38
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c40
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.h2
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_cmd.c2
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_panel_vbt.c38
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_pll.c17
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c11
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c83
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c168
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c1766
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h114
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c3
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c29
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c818
-rw-r--r--drivers/gpu/drm/i915/intel_renderstate.h8
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c256
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h46
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c109
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c6
47 files changed, 7172 insertions, 2717 deletions
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 91bd167e1cb7..c1dd485aeb6c 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -31,6 +31,7 @@ i915-y += i915_cmd_parser.o \
31 i915_gpu_error.o \ 31 i915_gpu_error.o \
32 i915_irq.o \ 32 i915_irq.o \
33 i915_trace_points.o \ 33 i915_trace_points.o \
34 intel_lrc.o \
34 intel_ringbuffer.o \ 35 intel_ringbuffer.o \
35 intel_uncore.o 36 intel_uncore.o
36 37
diff --git a/drivers/gpu/drm/i915/dvo_ns2501.c b/drivers/gpu/drm/i915/dvo_ns2501.c
index 74f2af7c2d3e..441630434d34 100644
--- a/drivers/gpu/drm/i915/dvo_ns2501.c
+++ b/drivers/gpu/drm/i915/dvo_ns2501.c
@@ -60,16 +60,297 @@
60 60
61#define NS2501_REGC 0x0c 61#define NS2501_REGC 0x0c
62 62
63enum {
64 MODE_640x480,
65 MODE_800x600,
66 MODE_1024x768,
67};
68
69struct ns2501_reg {
70 uint8_t offset;
71 uint8_t value;
72};
73
74/*
75 * Magic values based on what the BIOS on
76 * Fujitsu-Siemens Lifebook S6010 programs (1024x768 panel).
77 */
78static const struct ns2501_reg regs_1024x768[][86] = {
79 [MODE_640x480] = {
80 [0] = { .offset = 0x0a, .value = 0x81, },
81 [1] = { .offset = 0x18, .value = 0x07, },
82 [2] = { .offset = 0x19, .value = 0x00, },
83 [3] = { .offset = 0x1a, .value = 0x00, },
84 [4] = { .offset = 0x1b, .value = 0x11, },
85 [5] = { .offset = 0x1c, .value = 0x54, },
86 [6] = { .offset = 0x1d, .value = 0x03, },
87 [7] = { .offset = 0x1e, .value = 0x02, },
88 [8] = { .offset = 0xf3, .value = 0x90, },
89 [9] = { .offset = 0xf9, .value = 0x00, },
90 [10] = { .offset = 0xc1, .value = 0x90, },
91 [11] = { .offset = 0xc2, .value = 0x00, },
92 [12] = { .offset = 0xc3, .value = 0x0f, },
93 [13] = { .offset = 0xc4, .value = 0x03, },
94 [14] = { .offset = 0xc5, .value = 0x16, },
95 [15] = { .offset = 0xc6, .value = 0x00, },
96 [16] = { .offset = 0xc7, .value = 0x02, },
97 [17] = { .offset = 0xc8, .value = 0x02, },
98 [18] = { .offset = 0xf4, .value = 0x00, },
99 [19] = { .offset = 0x80, .value = 0xff, },
100 [20] = { .offset = 0x81, .value = 0x07, },
101 [21] = { .offset = 0x82, .value = 0x3d, },
102 [22] = { .offset = 0x83, .value = 0x05, },
103 [23] = { .offset = 0x94, .value = 0x00, },
104 [24] = { .offset = 0x95, .value = 0x00, },
105 [25] = { .offset = 0x96, .value = 0x05, },
106 [26] = { .offset = 0x97, .value = 0x00, },
107 [27] = { .offset = 0x9a, .value = 0x88, },
108 [28] = { .offset = 0x9b, .value = 0x00, },
109 [29] = { .offset = 0x98, .value = 0x00, },
110 [30] = { .offset = 0x99, .value = 0x00, },
111 [31] = { .offset = 0xf7, .value = 0x88, },
112 [32] = { .offset = 0xf8, .value = 0x0a, },
113 [33] = { .offset = 0x9c, .value = 0x24, },
114 [34] = { .offset = 0x9d, .value = 0x00, },
115 [35] = { .offset = 0x9e, .value = 0x25, },
116 [36] = { .offset = 0x9f, .value = 0x03, },
117 [37] = { .offset = 0xa0, .value = 0x28, },
118 [38] = { .offset = 0xa1, .value = 0x01, },
119 [39] = { .offset = 0xa2, .value = 0x28, },
120 [40] = { .offset = 0xa3, .value = 0x05, },
121 [41] = { .offset = 0xb6, .value = 0x09, },
122 [42] = { .offset = 0xb8, .value = 0x00, },
123 [43] = { .offset = 0xb9, .value = 0xa0, },
124 [44] = { .offset = 0xba, .value = 0x00, },
125 [45] = { .offset = 0xbb, .value = 0x20, },
126 [46] = { .offset = 0x10, .value = 0x00, },
127 [47] = { .offset = 0x11, .value = 0xa0, },
128 [48] = { .offset = 0x12, .value = 0x02, },
129 [49] = { .offset = 0x20, .value = 0x00, },
130 [50] = { .offset = 0x22, .value = 0x00, },
131 [51] = { .offset = 0x23, .value = 0x00, },
132 [52] = { .offset = 0x24, .value = 0x00, },
133 [53] = { .offset = 0x25, .value = 0x00, },
134 [54] = { .offset = 0x8c, .value = 0x10, },
135 [55] = { .offset = 0x8d, .value = 0x02, },
136 [56] = { .offset = 0x8e, .value = 0x10, },
137 [57] = { .offset = 0x8f, .value = 0x00, },
138 [58] = { .offset = 0x90, .value = 0xff, },
139 [59] = { .offset = 0x91, .value = 0x07, },
140 [60] = { .offset = 0x92, .value = 0xa0, },
141 [61] = { .offset = 0x93, .value = 0x02, },
142 [62] = { .offset = 0xa5, .value = 0x00, },
143 [63] = { .offset = 0xa6, .value = 0x00, },
144 [64] = { .offset = 0xa7, .value = 0x00, },
145 [65] = { .offset = 0xa8, .value = 0x00, },
146 [66] = { .offset = 0xa9, .value = 0x04, },
147 [67] = { .offset = 0xaa, .value = 0x70, },
148 [68] = { .offset = 0xab, .value = 0x4f, },
149 [69] = { .offset = 0xac, .value = 0x00, },
150 [70] = { .offset = 0xa4, .value = 0x84, },
151 [71] = { .offset = 0x7e, .value = 0x18, },
152 [72] = { .offset = 0x84, .value = 0x00, },
153 [73] = { .offset = 0x85, .value = 0x00, },
154 [74] = { .offset = 0x86, .value = 0x00, },
155 [75] = { .offset = 0x87, .value = 0x00, },
156 [76] = { .offset = 0x88, .value = 0x00, },
157 [77] = { .offset = 0x89, .value = 0x00, },
158 [78] = { .offset = 0x8a, .value = 0x00, },
159 [79] = { .offset = 0x8b, .value = 0x00, },
160 [80] = { .offset = 0x26, .value = 0x00, },
161 [81] = { .offset = 0x27, .value = 0x00, },
162 [82] = { .offset = 0xad, .value = 0x00, },
163 [83] = { .offset = 0x08, .value = 0x30, }, /* 0x31 */
164 [84] = { .offset = 0x41, .value = 0x00, },
165 [85] = { .offset = 0xc0, .value = 0x05, },
166 },
167 [MODE_800x600] = {
168 [0] = { .offset = 0x0a, .value = 0x81, },
169 [1] = { .offset = 0x18, .value = 0x07, },
170 [2] = { .offset = 0x19, .value = 0x00, },
171 [3] = { .offset = 0x1a, .value = 0x00, },
172 [4] = { .offset = 0x1b, .value = 0x19, },
173 [5] = { .offset = 0x1c, .value = 0x64, },
174 [6] = { .offset = 0x1d, .value = 0x02, },
175 [7] = { .offset = 0x1e, .value = 0x02, },
176 [8] = { .offset = 0xf3, .value = 0x90, },
177 [9] = { .offset = 0xf9, .value = 0x00, },
178 [10] = { .offset = 0xc1, .value = 0xd7, },
179 [11] = { .offset = 0xc2, .value = 0x00, },
180 [12] = { .offset = 0xc3, .value = 0xf8, },
181 [13] = { .offset = 0xc4, .value = 0x03, },
182 [14] = { .offset = 0xc5, .value = 0x1a, },
183 [15] = { .offset = 0xc6, .value = 0x00, },
184 [16] = { .offset = 0xc7, .value = 0x73, },
185 [17] = { .offset = 0xc8, .value = 0x02, },
186 [18] = { .offset = 0xf4, .value = 0x00, },
187 [19] = { .offset = 0x80, .value = 0x27, },
188 [20] = { .offset = 0x81, .value = 0x03, },
189 [21] = { .offset = 0x82, .value = 0x41, },
190 [22] = { .offset = 0x83, .value = 0x05, },
191 [23] = { .offset = 0x94, .value = 0x00, },
192 [24] = { .offset = 0x95, .value = 0x00, },
193 [25] = { .offset = 0x96, .value = 0x05, },
194 [26] = { .offset = 0x97, .value = 0x00, },
195 [27] = { .offset = 0x9a, .value = 0x88, },
196 [28] = { .offset = 0x9b, .value = 0x00, },
197 [29] = { .offset = 0x98, .value = 0x00, },
198 [30] = { .offset = 0x99, .value = 0x00, },
199 [31] = { .offset = 0xf7, .value = 0x88, },
200 [32] = { .offset = 0xf8, .value = 0x06, },
201 [33] = { .offset = 0x9c, .value = 0x23, },
202 [34] = { .offset = 0x9d, .value = 0x00, },
203 [35] = { .offset = 0x9e, .value = 0x25, },
204 [36] = { .offset = 0x9f, .value = 0x03, },
205 [37] = { .offset = 0xa0, .value = 0x28, },
206 [38] = { .offset = 0xa1, .value = 0x01, },
207 [39] = { .offset = 0xa2, .value = 0x28, },
208 [40] = { .offset = 0xa3, .value = 0x05, },
209 [41] = { .offset = 0xb6, .value = 0x09, },
210 [42] = { .offset = 0xb8, .value = 0x30, },
211 [43] = { .offset = 0xb9, .value = 0xc8, },
212 [44] = { .offset = 0xba, .value = 0x00, },
213 [45] = { .offset = 0xbb, .value = 0x20, },
214 [46] = { .offset = 0x10, .value = 0x20, },
215 [47] = { .offset = 0x11, .value = 0xc8, },
216 [48] = { .offset = 0x12, .value = 0x02, },
217 [49] = { .offset = 0x20, .value = 0x00, },
218 [50] = { .offset = 0x22, .value = 0x00, },
219 [51] = { .offset = 0x23, .value = 0x00, },
220 [52] = { .offset = 0x24, .value = 0x00, },
221 [53] = { .offset = 0x25, .value = 0x00, },
222 [54] = { .offset = 0x8c, .value = 0x10, },
223 [55] = { .offset = 0x8d, .value = 0x02, },
224 [56] = { .offset = 0x8e, .value = 0x04, },
225 [57] = { .offset = 0x8f, .value = 0x00, },
226 [58] = { .offset = 0x90, .value = 0xff, },
227 [59] = { .offset = 0x91, .value = 0x07, },
228 [60] = { .offset = 0x92, .value = 0xa0, },
229 [61] = { .offset = 0x93, .value = 0x02, },
230 [62] = { .offset = 0xa5, .value = 0x00, },
231 [63] = { .offset = 0xa6, .value = 0x00, },
232 [64] = { .offset = 0xa7, .value = 0x00, },
233 [65] = { .offset = 0xa8, .value = 0x00, },
234 [66] = { .offset = 0xa9, .value = 0x83, },
235 [67] = { .offset = 0xaa, .value = 0x40, },
236 [68] = { .offset = 0xab, .value = 0x32, },
237 [69] = { .offset = 0xac, .value = 0x00, },
238 [70] = { .offset = 0xa4, .value = 0x80, },
239 [71] = { .offset = 0x7e, .value = 0x18, },
240 [72] = { .offset = 0x84, .value = 0x00, },
241 [73] = { .offset = 0x85, .value = 0x00, },
242 [74] = { .offset = 0x86, .value = 0x00, },
243 [75] = { .offset = 0x87, .value = 0x00, },
244 [76] = { .offset = 0x88, .value = 0x00, },
245 [77] = { .offset = 0x89, .value = 0x00, },
246 [78] = { .offset = 0x8a, .value = 0x00, },
247 [79] = { .offset = 0x8b, .value = 0x00, },
248 [80] = { .offset = 0x26, .value = 0x00, },
249 [81] = { .offset = 0x27, .value = 0x00, },
250 [82] = { .offset = 0xad, .value = 0x00, },
251 [83] = { .offset = 0x08, .value = 0x30, }, /* 0x31 */
252 [84] = { .offset = 0x41, .value = 0x00, },
253 [85] = { .offset = 0xc0, .value = 0x07, },
254 },
255 [MODE_1024x768] = {
256 [0] = { .offset = 0x0a, .value = 0x81, },
257 [1] = { .offset = 0x18, .value = 0x07, },
258 [2] = { .offset = 0x19, .value = 0x00, },
259 [3] = { .offset = 0x1a, .value = 0x00, },
260 [4] = { .offset = 0x1b, .value = 0x11, },
261 [5] = { .offset = 0x1c, .value = 0x54, },
262 [6] = { .offset = 0x1d, .value = 0x03, },
263 [7] = { .offset = 0x1e, .value = 0x02, },
264 [8] = { .offset = 0xf3, .value = 0x90, },
265 [9] = { .offset = 0xf9, .value = 0x00, },
266 [10] = { .offset = 0xc1, .value = 0x90, },
267 [11] = { .offset = 0xc2, .value = 0x00, },
268 [12] = { .offset = 0xc3, .value = 0x0f, },
269 [13] = { .offset = 0xc4, .value = 0x03, },
270 [14] = { .offset = 0xc5, .value = 0x16, },
271 [15] = { .offset = 0xc6, .value = 0x00, },
272 [16] = { .offset = 0xc7, .value = 0x02, },
273 [17] = { .offset = 0xc8, .value = 0x02, },
274 [18] = { .offset = 0xf4, .value = 0x00, },
275 [19] = { .offset = 0x80, .value = 0xff, },
276 [20] = { .offset = 0x81, .value = 0x07, },
277 [21] = { .offset = 0x82, .value = 0x3d, },
278 [22] = { .offset = 0x83, .value = 0x05, },
279 [23] = { .offset = 0x94, .value = 0x00, },
280 [24] = { .offset = 0x95, .value = 0x00, },
281 [25] = { .offset = 0x96, .value = 0x05, },
282 [26] = { .offset = 0x97, .value = 0x00, },
283 [27] = { .offset = 0x9a, .value = 0x88, },
284 [28] = { .offset = 0x9b, .value = 0x00, },
285 [29] = { .offset = 0x98, .value = 0x00, },
286 [30] = { .offset = 0x99, .value = 0x00, },
287 [31] = { .offset = 0xf7, .value = 0x88, },
288 [32] = { .offset = 0xf8, .value = 0x0a, },
289 [33] = { .offset = 0x9c, .value = 0x24, },
290 [34] = { .offset = 0x9d, .value = 0x00, },
291 [35] = { .offset = 0x9e, .value = 0x25, },
292 [36] = { .offset = 0x9f, .value = 0x03, },
293 [37] = { .offset = 0xa0, .value = 0x28, },
294 [38] = { .offset = 0xa1, .value = 0x01, },
295 [39] = { .offset = 0xa2, .value = 0x28, },
296 [40] = { .offset = 0xa3, .value = 0x05, },
297 [41] = { .offset = 0xb6, .value = 0x09, },
298 [42] = { .offset = 0xb8, .value = 0x00, },
299 [43] = { .offset = 0xb9, .value = 0xa0, },
300 [44] = { .offset = 0xba, .value = 0x00, },
301 [45] = { .offset = 0xbb, .value = 0x20, },
302 [46] = { .offset = 0x10, .value = 0x00, },
303 [47] = { .offset = 0x11, .value = 0xa0, },
304 [48] = { .offset = 0x12, .value = 0x02, },
305 [49] = { .offset = 0x20, .value = 0x00, },
306 [50] = { .offset = 0x22, .value = 0x00, },
307 [51] = { .offset = 0x23, .value = 0x00, },
308 [52] = { .offset = 0x24, .value = 0x00, },
309 [53] = { .offset = 0x25, .value = 0x00, },
310 [54] = { .offset = 0x8c, .value = 0x10, },
311 [55] = { .offset = 0x8d, .value = 0x02, },
312 [56] = { .offset = 0x8e, .value = 0x10, },
313 [57] = { .offset = 0x8f, .value = 0x00, },
314 [58] = { .offset = 0x90, .value = 0xff, },
315 [59] = { .offset = 0x91, .value = 0x07, },
316 [60] = { .offset = 0x92, .value = 0xa0, },
317 [61] = { .offset = 0x93, .value = 0x02, },
318 [62] = { .offset = 0xa5, .value = 0x00, },
319 [63] = { .offset = 0xa6, .value = 0x00, },
320 [64] = { .offset = 0xa7, .value = 0x00, },
321 [65] = { .offset = 0xa8, .value = 0x00, },
322 [66] = { .offset = 0xa9, .value = 0x04, },
323 [67] = { .offset = 0xaa, .value = 0x70, },
324 [68] = { .offset = 0xab, .value = 0x4f, },
325 [69] = { .offset = 0xac, .value = 0x00, },
326 [70] = { .offset = 0xa4, .value = 0x84, },
327 [71] = { .offset = 0x7e, .value = 0x18, },
328 [72] = { .offset = 0x84, .value = 0x00, },
329 [73] = { .offset = 0x85, .value = 0x00, },
330 [74] = { .offset = 0x86, .value = 0x00, },
331 [75] = { .offset = 0x87, .value = 0x00, },
332 [76] = { .offset = 0x88, .value = 0x00, },
333 [77] = { .offset = 0x89, .value = 0x00, },
334 [78] = { .offset = 0x8a, .value = 0x00, },
335 [79] = { .offset = 0x8b, .value = 0x00, },
336 [80] = { .offset = 0x26, .value = 0x00, },
337 [81] = { .offset = 0x27, .value = 0x00, },
338 [82] = { .offset = 0xad, .value = 0x00, },
339 [83] = { .offset = 0x08, .value = 0x34, }, /* 0x35 */
340 [84] = { .offset = 0x41, .value = 0x00, },
341 [85] = { .offset = 0xc0, .value = 0x01, },
342 },
343};
344
345static const struct ns2501_reg regs_init[] = {
346 [0] = { .offset = 0x35, .value = 0xff, },
347 [1] = { .offset = 0x34, .value = 0x00, },
348 [2] = { .offset = 0x08, .value = 0x30, },
349};
350
63struct ns2501_priv { 351struct ns2501_priv {
64 //I2CDevRec d;
65 bool quiet; 352 bool quiet;
66 int reg_8_shadow; 353 const struct ns2501_reg *regs;
67 int reg_8_set;
68 // Shadow registers for i915
69 int dvoc;
70 int pll_a;
71 int srcdim;
72 int fw_blc;
73}; 354};
74 355
75#define NSPTR(d) ((NS2501Ptr)(d->DriverPrivate.ptr)) 356#define NSPTR(d) ((NS2501Ptr)(d->DriverPrivate.ptr))
@@ -205,11 +486,9 @@ static bool ns2501_init(struct intel_dvo_device *dvo,
205 goto out; 486 goto out;
206 } 487 }
207 ns->quiet = false; 488 ns->quiet = false;
208 ns->reg_8_set = 0;
209 ns->reg_8_shadow =
210 NS2501_8_PD | NS2501_8_BPAS | NS2501_8_VEN | NS2501_8_HEN;
211 489
212 DRM_DEBUG_KMS("init ns2501 dvo controller successfully!\n"); 490 DRM_DEBUG_KMS("init ns2501 dvo controller successfully!\n");
491
213 return true; 492 return true;
214 493
215out: 494out:
@@ -242,9 +521,9 @@ static enum drm_mode_status ns2501_mode_valid(struct intel_dvo_device *dvo,
242 * of the panel in here so we could always accept it 521 * of the panel in here so we could always accept it
243 * by disabling the scaler. 522 * by disabling the scaler.
244 */ 523 */
245 if ((mode->hdisplay == 800 && mode->vdisplay == 600) || 524 if ((mode->hdisplay == 640 && mode->vdisplay == 480 && mode->clock == 25175) ||
246 (mode->hdisplay == 640 && mode->vdisplay == 480) || 525 (mode->hdisplay == 800 && mode->vdisplay == 600 && mode->clock == 40000) ||
247 (mode->hdisplay == 1024 && mode->vdisplay == 768)) { 526 (mode->hdisplay == 1024 && mode->vdisplay == 768 && mode->clock == 65000)) {
248 return MODE_OK; 527 return MODE_OK;
249 } else { 528 } else {
250 return MODE_ONE_SIZE; /* Is this a reasonable error? */ 529 return MODE_ONE_SIZE; /* Is this a reasonable error? */
@@ -255,180 +534,30 @@ static void ns2501_mode_set(struct intel_dvo_device *dvo,
255 struct drm_display_mode *mode, 534 struct drm_display_mode *mode,
256 struct drm_display_mode *adjusted_mode) 535 struct drm_display_mode *adjusted_mode)
257{ 536{
258 bool ok;
259 int retries = 10;
260 struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv); 537 struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
538 int mode_idx, i;
261 539
262 DRM_DEBUG_KMS 540 DRM_DEBUG_KMS
263 ("set mode (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d).\n", 541 ("set mode (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d).\n",
264 mode->hdisplay, mode->htotal, mode->vdisplay, mode->vtotal); 542 mode->hdisplay, mode->htotal, mode->vdisplay, mode->vtotal);
265 543
266 /* 544 if (mode->hdisplay == 640 && mode->vdisplay == 480)
267 * Where do I find the native resolution for which scaling is not required??? 545 mode_idx = MODE_640x480;
268 * 546 else if (mode->hdisplay == 800 && mode->vdisplay == 600)
269 * First trigger the DVO on as otherwise the chip does not appear on the i2c 547 mode_idx = MODE_800x600;
270 * bus. 548 else if (mode->hdisplay == 1024 && mode->vdisplay == 768)
271 */ 549 mode_idx = MODE_1024x768;
272 do { 550 else
273 ok = true; 551 return;
274
275 if (mode->hdisplay == 800 && mode->vdisplay == 600) {
276 /* mode 277 */
277 ns->reg_8_shadow &= ~NS2501_8_BPAS;
278 DRM_DEBUG_KMS("switching to 800x600\n");
279
280 /*
281 * No, I do not know where this data comes from.
282 * It is just what the video bios left in the DVO, so
283 * I'm just copying it here over.
284 * This also means that I cannot support any other modes
285 * except the ones supported by the bios.
286 */
287 ok &= ns2501_writeb(dvo, 0x11, 0xc8); // 0xc7 also works.
288 ok &= ns2501_writeb(dvo, 0x1b, 0x19);
289 ok &= ns2501_writeb(dvo, 0x1c, 0x62); // VBIOS left 0x64 here, but 0x62 works nicer
290 ok &= ns2501_writeb(dvo, 0x1d, 0x02);
291
292 ok &= ns2501_writeb(dvo, 0x34, 0x03);
293 ok &= ns2501_writeb(dvo, 0x35, 0xff);
294 552
295 ok &= ns2501_writeb(dvo, 0x80, 0x27); 553 /* Hopefully doing it every time won't hurt... */
296 ok &= ns2501_writeb(dvo, 0x81, 0x03); 554 for (i = 0; i < ARRAY_SIZE(regs_init); i++)
297 ok &= ns2501_writeb(dvo, 0x82, 0x41); 555 ns2501_writeb(dvo, regs_init[i].offset, regs_init[i].value);
298 ok &= ns2501_writeb(dvo, 0x83, 0x05);
299 556
300 ok &= ns2501_writeb(dvo, 0x8d, 0x02); 557 ns->regs = regs_1024x768[mode_idx];
301 ok &= ns2501_writeb(dvo, 0x8e, 0x04);
302 ok &= ns2501_writeb(dvo, 0x8f, 0x00);
303 558
304 ok &= ns2501_writeb(dvo, 0x90, 0xfe); /* vertical. VBIOS left 0xff here, but 0xfe works better */ 559 for (i = 0; i < 84; i++)
305 ok &= ns2501_writeb(dvo, 0x91, 0x07); 560 ns2501_writeb(dvo, ns->regs[i].offset, ns->regs[i].value);
306 ok &= ns2501_writeb(dvo, 0x94, 0x00);
307 ok &= ns2501_writeb(dvo, 0x95, 0x00);
308
309 ok &= ns2501_writeb(dvo, 0x96, 0x00);
310
311 ok &= ns2501_writeb(dvo, 0x99, 0x00);
312 ok &= ns2501_writeb(dvo, 0x9a, 0x88);
313
314 ok &= ns2501_writeb(dvo, 0x9c, 0x23); /* Looks like first and last line of the image. */
315 ok &= ns2501_writeb(dvo, 0x9d, 0x00);
316 ok &= ns2501_writeb(dvo, 0x9e, 0x25);
317 ok &= ns2501_writeb(dvo, 0x9f, 0x03);
318
319 ok &= ns2501_writeb(dvo, 0xa4, 0x80);
320
321 ok &= ns2501_writeb(dvo, 0xb6, 0x00);
322
323 ok &= ns2501_writeb(dvo, 0xb9, 0xc8); /* horizontal? */
324 ok &= ns2501_writeb(dvo, 0xba, 0x00); /* horizontal? */
325
326 ok &= ns2501_writeb(dvo, 0xc0, 0x05); /* horizontal? */
327 ok &= ns2501_writeb(dvo, 0xc1, 0xd7);
328
329 ok &= ns2501_writeb(dvo, 0xc2, 0x00);
330 ok &= ns2501_writeb(dvo, 0xc3, 0xf8);
331
332 ok &= ns2501_writeb(dvo, 0xc4, 0x03);
333 ok &= ns2501_writeb(dvo, 0xc5, 0x1a);
334
335 ok &= ns2501_writeb(dvo, 0xc6, 0x00);
336 ok &= ns2501_writeb(dvo, 0xc7, 0x73);
337 ok &= ns2501_writeb(dvo, 0xc8, 0x02);
338
339 } else if (mode->hdisplay == 640 && mode->vdisplay == 480) {
340 /* mode 274 */
341 DRM_DEBUG_KMS("switching to 640x480\n");
342 /*
343 * No, I do not know where this data comes from.
344 * It is just what the video bios left in the DVO, so
345 * I'm just copying it here over.
346 * This also means that I cannot support any other modes
347 * except the ones supported by the bios.
348 */
349 ns->reg_8_shadow &= ~NS2501_8_BPAS;
350
351 ok &= ns2501_writeb(dvo, 0x11, 0xa0);
352 ok &= ns2501_writeb(dvo, 0x1b, 0x11);
353 ok &= ns2501_writeb(dvo, 0x1c, 0x54);
354 ok &= ns2501_writeb(dvo, 0x1d, 0x03);
355
356 ok &= ns2501_writeb(dvo, 0x34, 0x03);
357 ok &= ns2501_writeb(dvo, 0x35, 0xff);
358
359 ok &= ns2501_writeb(dvo, 0x80, 0xff);
360 ok &= ns2501_writeb(dvo, 0x81, 0x07);
361 ok &= ns2501_writeb(dvo, 0x82, 0x3d);
362 ok &= ns2501_writeb(dvo, 0x83, 0x05);
363
364 ok &= ns2501_writeb(dvo, 0x8d, 0x02);
365 ok &= ns2501_writeb(dvo, 0x8e, 0x10);
366 ok &= ns2501_writeb(dvo, 0x8f, 0x00);
367
368 ok &= ns2501_writeb(dvo, 0x90, 0xff); /* vertical */
369 ok &= ns2501_writeb(dvo, 0x91, 0x07);
370 ok &= ns2501_writeb(dvo, 0x94, 0x00);
371 ok &= ns2501_writeb(dvo, 0x95, 0x00);
372
373 ok &= ns2501_writeb(dvo, 0x96, 0x05);
374
375 ok &= ns2501_writeb(dvo, 0x99, 0x00);
376 ok &= ns2501_writeb(dvo, 0x9a, 0x88);
377
378 ok &= ns2501_writeb(dvo, 0x9c, 0x24);
379 ok &= ns2501_writeb(dvo, 0x9d, 0x00);
380 ok &= ns2501_writeb(dvo, 0x9e, 0x25);
381 ok &= ns2501_writeb(dvo, 0x9f, 0x03);
382
383 ok &= ns2501_writeb(dvo, 0xa4, 0x84);
384
385 ok &= ns2501_writeb(dvo, 0xb6, 0x09);
386
387 ok &= ns2501_writeb(dvo, 0xb9, 0xa0); /* horizontal? */
388 ok &= ns2501_writeb(dvo, 0xba, 0x00); /* horizontal? */
389
390 ok &= ns2501_writeb(dvo, 0xc0, 0x05); /* horizontal? */
391 ok &= ns2501_writeb(dvo, 0xc1, 0x90);
392
393 ok &= ns2501_writeb(dvo, 0xc2, 0x00);
394 ok &= ns2501_writeb(dvo, 0xc3, 0x0f);
395
396 ok &= ns2501_writeb(dvo, 0xc4, 0x03);
397 ok &= ns2501_writeb(dvo, 0xc5, 0x16);
398
399 ok &= ns2501_writeb(dvo, 0xc6, 0x00);
400 ok &= ns2501_writeb(dvo, 0xc7, 0x02);
401 ok &= ns2501_writeb(dvo, 0xc8, 0x02);
402
403 } else if (mode->hdisplay == 1024 && mode->vdisplay == 768) {
404 /* mode 280 */
405 DRM_DEBUG_KMS("switching to 1024x768\n");
406 /*
407 * This might or might not work, actually. I'm silently
408 * assuming here that the native panel resolution is
409 * 1024x768. If not, then this leaves the scaler disabled
410 * generating a picture that is likely not the expected.
411 *
412 * Problem is that I do not know where to take the panel
413 * dimensions from.
414 *
415 * Enable the bypass, scaling not required.
416 *
417 * The scaler registers are irrelevant here....
418 *
419 */
420 ns->reg_8_shadow |= NS2501_8_BPAS;
421 ok &= ns2501_writeb(dvo, 0x37, 0x44);
422 } else {
423 /*
424 * Data not known. Bummer!
425 * Hopefully, the code should not go here
426 * as mode_OK delivered no other modes.
427 */
428 ns->reg_8_shadow |= NS2501_8_BPAS;
429 }
430 ok &= ns2501_writeb(dvo, NS2501_REG8, ns->reg_8_shadow);
431 } while (!ok && retries--);
432} 561}
433 562
434/* set the NS2501 power state */ 563/* set the NS2501 power state */
@@ -439,60 +568,46 @@ static bool ns2501_get_hw_state(struct intel_dvo_device *dvo)
439 if (!ns2501_readb(dvo, NS2501_REG8, &ch)) 568 if (!ns2501_readb(dvo, NS2501_REG8, &ch))
440 return false; 569 return false;
441 570
442 if (ch & NS2501_8_PD) 571 return ch & NS2501_8_PD;
443 return true;
444 else
445 return false;
446} 572}
447 573
448/* set the NS2501 power state */ 574/* set the NS2501 power state */
449static void ns2501_dpms(struct intel_dvo_device *dvo, bool enable) 575static void ns2501_dpms(struct intel_dvo_device *dvo, bool enable)
450{ 576{
451 bool ok;
452 int retries = 10;
453 struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv); 577 struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
454 unsigned char ch;
455 578
456 DRM_DEBUG_KMS("Trying set the dpms of the DVO to %i\n", enable); 579 DRM_DEBUG_KMS("Trying set the dpms of the DVO to %i\n", enable);
457 580
458 ch = ns->reg_8_shadow; 581 if (enable) {
582 if (WARN_ON(ns->regs[83].offset != 0x08 ||
583 ns->regs[84].offset != 0x41 ||
584 ns->regs[85].offset != 0xc0))
585 return;
459 586
460 if (enable) 587 ns2501_writeb(dvo, 0xc0, ns->regs[85].value | 0x08);
461 ch |= NS2501_8_PD;
462 else
463 ch &= ~NS2501_8_PD;
464
465 if (ns->reg_8_set == 0 || ns->reg_8_shadow != ch) {
466 ns->reg_8_set = 1;
467 ns->reg_8_shadow = ch;
468
469 do {
470 ok = true;
471 ok &= ns2501_writeb(dvo, NS2501_REG8, ch);
472 ok &=
473 ns2501_writeb(dvo, 0x34,
474 enable ? 0x03 : 0x00);
475 ok &=
476 ns2501_writeb(dvo, 0x35,
477 enable ? 0xff : 0x00);
478 } while (!ok && retries--);
479 }
480}
481 588
482static void ns2501_dump_regs(struct intel_dvo_device *dvo) 589 ns2501_writeb(dvo, 0x41, ns->regs[84].value);
483{ 590
484 uint8_t val; 591 ns2501_writeb(dvo, 0x34, 0x01);
485 592 msleep(15);
486 ns2501_readb(dvo, NS2501_FREQ_LO, &val); 593
487 DRM_DEBUG_KMS("NS2501_FREQ_LO: 0x%02x\n", val); 594 ns2501_writeb(dvo, 0x08, 0x35);
488 ns2501_readb(dvo, NS2501_FREQ_HI, &val); 595 if (!(ns->regs[83].value & NS2501_8_BPAS))
489 DRM_DEBUG_KMS("NS2501_FREQ_HI: 0x%02x\n", val); 596 ns2501_writeb(dvo, 0x08, 0x31);
490 ns2501_readb(dvo, NS2501_REG8, &val); 597 msleep(200);
491 DRM_DEBUG_KMS("NS2501_REG8: 0x%02x\n", val); 598
492 ns2501_readb(dvo, NS2501_REG9, &val); 599 ns2501_writeb(dvo, 0x34, 0x03);
493 DRM_DEBUG_KMS("NS2501_REG9: 0x%02x\n", val); 600
494 ns2501_readb(dvo, NS2501_REGC, &val); 601 ns2501_writeb(dvo, 0xc0, ns->regs[85].value);
495 DRM_DEBUG_KMS("NS2501_REGC: 0x%02x\n", val); 602 } else {
603 ns2501_writeb(dvo, 0x34, 0x01);
604 msleep(200);
605
606 ns2501_writeb(dvo, 0x08, 0x34);
607 msleep(15);
608
609 ns2501_writeb(dvo, 0x34, 0x00);
610 }
496} 611}
497 612
498static void ns2501_destroy(struct intel_dvo_device *dvo) 613static void ns2501_destroy(struct intel_dvo_device *dvo)
@@ -512,6 +627,5 @@ struct intel_dvo_dev_ops ns2501_ops = {
512 .mode_set = ns2501_mode_set, 627 .mode_set = ns2501_mode_set,
513 .dpms = ns2501_dpms, 628 .dpms = ns2501_dpms,
514 .get_hw_state = ns2501_get_hw_state, 629 .get_hw_state = ns2501_get_hw_state,
515 .dump_regs = ns2501_dump_regs,
516 .destroy = ns2501_destroy, 630 .destroy = ns2501_destroy,
517}; 631};
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 4b7ed5289217..593b657d3e59 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -844,8 +844,6 @@ finish:
844 */ 844 */
845bool i915_needs_cmd_parser(struct intel_engine_cs *ring) 845bool i915_needs_cmd_parser(struct intel_engine_cs *ring)
846{ 846{
847 struct drm_i915_private *dev_priv = ring->dev->dev_private;
848
849 if (!ring->needs_cmd_parser) 847 if (!ring->needs_cmd_parser)
850 return false; 848 return false;
851 849
@@ -854,7 +852,7 @@ bool i915_needs_cmd_parser(struct intel_engine_cs *ring)
854 * disabled. That will cause all of the parser's PPGTT checks to 852 * disabled. That will cause all of the parser's PPGTT checks to
855 * fail. For now, disable parsing when PPGTT is off. 853 * fail. For now, disable parsing when PPGTT is off.
856 */ 854 */
857 if (!dev_priv->mm.aliasing_ppgtt) 855 if (USES_PPGTT(ring->dev))
858 return false; 856 return false;
859 857
860 return (i915.enable_cmd_parser == 1); 858 return (i915.enable_cmd_parser == 1);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 9e737b771c40..063b44817e08 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -136,7 +136,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
136 obj->last_read_seqno, 136 obj->last_read_seqno,
137 obj->last_write_seqno, 137 obj->last_write_seqno,
138 obj->last_fenced_seqno, 138 obj->last_fenced_seqno,
139 i915_cache_level_str(obj->cache_level), 139 i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level),
140 obj->dirty ? " dirty" : "", 140 obj->dirty ? " dirty" : "",
141 obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); 141 obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
142 if (obj->base.name) 142 if (obj->base.name)
@@ -333,7 +333,7 @@ static int per_file_stats(int id, void *ptr, void *data)
333 } 333 }
334 334
335 ppgtt = container_of(vma->vm, struct i915_hw_ppgtt, base); 335 ppgtt = container_of(vma->vm, struct i915_hw_ppgtt, base);
336 if (ppgtt->ctx && ppgtt->ctx->file_priv != stats->file_priv) 336 if (ppgtt->file_priv != stats->file_priv)
337 continue; 337 continue;
338 338
339 if (obj->ring) /* XXX per-vma statistic */ 339 if (obj->ring) /* XXX per-vma statistic */
@@ -515,6 +515,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
515{ 515{
516 struct drm_info_node *node = m->private; 516 struct drm_info_node *node = m->private;
517 struct drm_device *dev = node->minor->dev; 517 struct drm_device *dev = node->minor->dev;
518 struct drm_i915_private *dev_priv = dev->dev_private;
518 unsigned long flags; 519 unsigned long flags;
519 struct intel_crtc *crtc; 520 struct intel_crtc *crtc;
520 int ret; 521 int ret;
@@ -534,6 +535,8 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
534 seq_printf(m, "No flip due on pipe %c (plane %c)\n", 535 seq_printf(m, "No flip due on pipe %c (plane %c)\n",
535 pipe, plane); 536 pipe, plane);
536 } else { 537 } else {
538 u32 addr;
539
537 if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) { 540 if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
538 seq_printf(m, "Flip queued on pipe %c (plane %c)\n", 541 seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
539 pipe, plane); 542 pipe, plane);
@@ -541,23 +544,35 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
541 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n", 544 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
542 pipe, plane); 545 pipe, plane);
543 } 546 }
547 if (work->flip_queued_ring) {
548 seq_printf(m, "Flip queued on %s at seqno %u, next seqno %u [current breadcrumb %u], completed? %d\n",
549 work->flip_queued_ring->name,
550 work->flip_queued_seqno,
551 dev_priv->next_seqno,
552 work->flip_queued_ring->get_seqno(work->flip_queued_ring, true),
553 i915_seqno_passed(work->flip_queued_ring->get_seqno(work->flip_queued_ring, true),
554 work->flip_queued_seqno));
555 } else
556 seq_printf(m, "Flip not associated with any ring\n");
557 seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
558 work->flip_queued_vblank,
559 work->flip_ready_vblank,
560 drm_vblank_count(dev, crtc->pipe));
544 if (work->enable_stall_check) 561 if (work->enable_stall_check)
545 seq_puts(m, "Stall check enabled, "); 562 seq_puts(m, "Stall check enabled, ");
546 else 563 else
547 seq_puts(m, "Stall check waiting for page flip ioctl, "); 564 seq_puts(m, "Stall check waiting for page flip ioctl, ");
548 seq_printf(m, "%d prepares\n", atomic_read(&work->pending)); 565 seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
549 566
550 if (work->old_fb_obj) { 567 if (INTEL_INFO(dev)->gen >= 4)
551 struct drm_i915_gem_object *obj = work->old_fb_obj; 568 addr = I915_HI_DISPBASE(I915_READ(DSPSURF(crtc->plane)));
552 if (obj) 569 else
553 seq_printf(m, "Old framebuffer gtt_offset 0x%08lx\n", 570 addr = I915_READ(DSPADDR(crtc->plane));
554 i915_gem_obj_ggtt_offset(obj)); 571 seq_printf(m, "Current scanout address 0x%08x\n", addr);
555 } 572
556 if (work->pending_flip_obj) { 573 if (work->pending_flip_obj) {
557 struct drm_i915_gem_object *obj = work->pending_flip_obj; 574 seq_printf(m, "New framebuffer address 0x%08lx\n", (long)work->gtt_offset);
558 if (obj) 575 seq_printf(m, "MMIO update completed? %d\n", addr == work->gtt_offset);
559 seq_printf(m, "New framebuffer gtt_offset 0x%08lx\n",
560 i915_gem_obj_ggtt_offset(obj));
561 } 576 }
562 } 577 }
563 spin_unlock_irqrestore(&dev->event_lock, flags); 578 spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -650,7 +665,6 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
650 intel_runtime_pm_get(dev_priv); 665 intel_runtime_pm_get(dev_priv);
651 666
652 if (IS_CHERRYVIEW(dev)) { 667 if (IS_CHERRYVIEW(dev)) {
653 int i;
654 seq_printf(m, "Master Interrupt Control:\t%08x\n", 668 seq_printf(m, "Master Interrupt Control:\t%08x\n",
655 I915_READ(GEN8_MASTER_IRQ)); 669 I915_READ(GEN8_MASTER_IRQ));
656 670
@@ -662,7 +676,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
662 I915_READ(VLV_IIR_RW)); 676 I915_READ(VLV_IIR_RW));
663 seq_printf(m, "Display IMR:\t%08x\n", 677 seq_printf(m, "Display IMR:\t%08x\n",
664 I915_READ(VLV_IMR)); 678 I915_READ(VLV_IMR));
665 for_each_pipe(pipe) 679 for_each_pipe(dev_priv, pipe)
666 seq_printf(m, "Pipe %c stat:\t%08x\n", 680 seq_printf(m, "Pipe %c stat:\t%08x\n",
667 pipe_name(pipe), 681 pipe_name(pipe),
668 I915_READ(PIPESTAT(pipe))); 682 I915_READ(PIPESTAT(pipe)));
@@ -702,7 +716,13 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
702 i, I915_READ(GEN8_GT_IER(i))); 716 i, I915_READ(GEN8_GT_IER(i)));
703 } 717 }
704 718
705 for_each_pipe(pipe) { 719 for_each_pipe(dev_priv, pipe) {
720 if (!intel_display_power_enabled(dev_priv,
721 POWER_DOMAIN_PIPE(pipe))) {
722 seq_printf(m, "Pipe %c power disabled\n",
723 pipe_name(pipe));
724 continue;
725 }
706 seq_printf(m, "Pipe %c IMR:\t%08x\n", 726 seq_printf(m, "Pipe %c IMR:\t%08x\n",
707 pipe_name(pipe), 727 pipe_name(pipe),
708 I915_READ(GEN8_DE_PIPE_IMR(pipe))); 728 I915_READ(GEN8_DE_PIPE_IMR(pipe)));
@@ -743,7 +763,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
743 I915_READ(VLV_IIR_RW)); 763 I915_READ(VLV_IIR_RW));
744 seq_printf(m, "Display IMR:\t%08x\n", 764 seq_printf(m, "Display IMR:\t%08x\n",
745 I915_READ(VLV_IMR)); 765 I915_READ(VLV_IMR));
746 for_each_pipe(pipe) 766 for_each_pipe(dev_priv, pipe)
747 seq_printf(m, "Pipe %c stat:\t%08x\n", 767 seq_printf(m, "Pipe %c stat:\t%08x\n",
748 pipe_name(pipe), 768 pipe_name(pipe),
749 I915_READ(PIPESTAT(pipe))); 769 I915_READ(PIPESTAT(pipe)));
@@ -779,7 +799,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
779 I915_READ(IIR)); 799 I915_READ(IIR));
780 seq_printf(m, "Interrupt mask: %08x\n", 800 seq_printf(m, "Interrupt mask: %08x\n",
781 I915_READ(IMR)); 801 I915_READ(IMR));
782 for_each_pipe(pipe) 802 for_each_pipe(dev_priv, pipe)
783 seq_printf(m, "Pipe %c stat: %08x\n", 803 seq_printf(m, "Pipe %c stat: %08x\n",
784 pipe_name(pipe), 804 pipe_name(pipe),
785 I915_READ(PIPESTAT(pipe))); 805 I915_READ(PIPESTAT(pipe)));
@@ -927,7 +947,7 @@ static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
927 ssize_t ret_count = 0; 947 ssize_t ret_count = 0;
928 int ret; 948 int ret;
929 949
930 ret = i915_error_state_buf_init(&error_str, count, *pos); 950 ret = i915_error_state_buf_init(&error_str, to_i915(error_priv->dev), count, *pos);
931 if (ret) 951 if (ret)
932 return ret; 952 return ret;
933 953
@@ -1024,6 +1044,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
1024 u32 rpstat, cagf, reqf; 1044 u32 rpstat, cagf, reqf;
1025 u32 rpupei, rpcurup, rpprevup; 1045 u32 rpupei, rpcurup, rpprevup;
1026 u32 rpdownei, rpcurdown, rpprevdown; 1046 u32 rpdownei, rpcurdown, rpprevdown;
1047 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
1027 int max_freq; 1048 int max_freq;
1028 1049
1029 /* RPSTAT1 is in the GT power well */ 1050 /* RPSTAT1 is in the GT power well */
@@ -1061,12 +1082,21 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
1061 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); 1082 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
1062 mutex_unlock(&dev->struct_mutex); 1083 mutex_unlock(&dev->struct_mutex);
1063 1084
1085 if (IS_GEN6(dev) || IS_GEN7(dev)) {
1086 pm_ier = I915_READ(GEN6_PMIER);
1087 pm_imr = I915_READ(GEN6_PMIMR);
1088 pm_isr = I915_READ(GEN6_PMISR);
1089 pm_iir = I915_READ(GEN6_PMIIR);
1090 pm_mask = I915_READ(GEN6_PMINTRMSK);
1091 } else {
1092 pm_ier = I915_READ(GEN8_GT_IER(2));
1093 pm_imr = I915_READ(GEN8_GT_IMR(2));
1094 pm_isr = I915_READ(GEN8_GT_ISR(2));
1095 pm_iir = I915_READ(GEN8_GT_IIR(2));
1096 pm_mask = I915_READ(GEN6_PMINTRMSK);
1097 }
1064 seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n", 1098 seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
1065 I915_READ(GEN6_PMIER), 1099 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
1066 I915_READ(GEN6_PMIMR),
1067 I915_READ(GEN6_PMISR),
1068 I915_READ(GEN6_PMIIR),
1069 I915_READ(GEN6_PMINTRMSK));
1070 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); 1100 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
1071 seq_printf(m, "Render p-state ratio: %d\n", 1101 seq_printf(m, "Render p-state ratio: %d\n",
1072 (gt_perf_status & 0xff00) >> 8); 1102 (gt_perf_status & 0xff00) >> 8);
@@ -1365,7 +1395,7 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
1365 1395
1366 if (IS_VALLEYVIEW(dev)) 1396 if (IS_VALLEYVIEW(dev))
1367 return vlv_drpc_info(m); 1397 return vlv_drpc_info(m);
1368 else if (IS_GEN6(dev) || IS_GEN7(dev)) 1398 else if (INTEL_INFO(dev)->gen >= 6)
1369 return gen6_drpc_info(m); 1399 return gen6_drpc_info(m);
1370 else 1400 else
1371 return ironlake_drpc_info(m); 1401 return ironlake_drpc_info(m);
@@ -1433,6 +1463,47 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
1433 return 0; 1463 return 0;
1434} 1464}
1435 1465
1466static int i915_fbc_fc_get(void *data, u64 *val)
1467{
1468 struct drm_device *dev = data;
1469 struct drm_i915_private *dev_priv = dev->dev_private;
1470
1471 if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
1472 return -ENODEV;
1473
1474 drm_modeset_lock_all(dev);
1475 *val = dev_priv->fbc.false_color;
1476 drm_modeset_unlock_all(dev);
1477
1478 return 0;
1479}
1480
1481static int i915_fbc_fc_set(void *data, u64 val)
1482{
1483 struct drm_device *dev = data;
1484 struct drm_i915_private *dev_priv = dev->dev_private;
1485 u32 reg;
1486
1487 if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
1488 return -ENODEV;
1489
1490 drm_modeset_lock_all(dev);
1491
1492 reg = I915_READ(ILK_DPFC_CONTROL);
1493 dev_priv->fbc.false_color = val;
1494
1495 I915_WRITE(ILK_DPFC_CONTROL, val ?
1496 (reg | FBC_CTL_FALSE_COLOR) :
1497 (reg & ~FBC_CTL_FALSE_COLOR));
1498
1499 drm_modeset_unlock_all(dev);
1500 return 0;
1501}
1502
1503DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_fc_fops,
1504 i915_fbc_fc_get, i915_fbc_fc_set,
1505 "%llu\n");
1506
1436static int i915_ips_status(struct seq_file *m, void *unused) 1507static int i915_ips_status(struct seq_file *m, void *unused)
1437{ 1508{
1438 struct drm_info_node *node = m->private; 1509 struct drm_info_node *node = m->private;
@@ -1630,6 +1701,14 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1630 return 0; 1701 return 0;
1631} 1702}
1632 1703
1704static void describe_ctx_ringbuf(struct seq_file *m,
1705 struct intel_ringbuffer *ringbuf)
1706{
1707 seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, last head: %d)",
1708 ringbuf->space, ringbuf->head, ringbuf->tail,
1709 ringbuf->last_retired_head);
1710}
1711
1633static int i915_context_status(struct seq_file *m, void *unused) 1712static int i915_context_status(struct seq_file *m, void *unused)
1634{ 1713{
1635 struct drm_info_node *node = m->private; 1714 struct drm_info_node *node = m->private;
@@ -1656,16 +1735,168 @@ static int i915_context_status(struct seq_file *m, void *unused)
1656 } 1735 }
1657 1736
1658 list_for_each_entry(ctx, &dev_priv->context_list, link) { 1737 list_for_each_entry(ctx, &dev_priv->context_list, link) {
1659 if (ctx->legacy_hw_ctx.rcs_state == NULL) 1738 if (!i915.enable_execlists &&
1739 ctx->legacy_hw_ctx.rcs_state == NULL)
1660 continue; 1740 continue;
1661 1741
1662 seq_puts(m, "HW context "); 1742 seq_puts(m, "HW context ");
1663 describe_ctx(m, ctx); 1743 describe_ctx(m, ctx);
1664 for_each_ring(ring, dev_priv, i) 1744 for_each_ring(ring, dev_priv, i) {
1745 if (ring->default_context == ctx)
1746 seq_printf(m, "(default context %s) ",
1747 ring->name);
1748 }
1749
1750 if (i915.enable_execlists) {
1751 seq_putc(m, '\n');
1752 for_each_ring(ring, dev_priv, i) {
1753 struct drm_i915_gem_object *ctx_obj =
1754 ctx->engine[i].state;
1755 struct intel_ringbuffer *ringbuf =
1756 ctx->engine[i].ringbuf;
1757
1758 seq_printf(m, "%s: ", ring->name);
1759 if (ctx_obj)
1760 describe_obj(m, ctx_obj);
1761 if (ringbuf)
1762 describe_ctx_ringbuf(m, ringbuf);
1763 seq_putc(m, '\n');
1764 }
1765 } else {
1766 describe_obj(m, ctx->legacy_hw_ctx.rcs_state);
1767 }
1768
1769 seq_putc(m, '\n');
1770 }
1771
1772 mutex_unlock(&dev->struct_mutex);
1773
1774 return 0;
1775}
1776
1777static int i915_dump_lrc(struct seq_file *m, void *unused)
1778{
1779 struct drm_info_node *node = (struct drm_info_node *) m->private;
1780 struct drm_device *dev = node->minor->dev;
1781 struct drm_i915_private *dev_priv = dev->dev_private;
1782 struct intel_engine_cs *ring;
1783 struct intel_context *ctx;
1784 int ret, i;
1785
1786 if (!i915.enable_execlists) {
1787 seq_printf(m, "Logical Ring Contexts are disabled\n");
1788 return 0;
1789 }
1790
1791 ret = mutex_lock_interruptible(&dev->struct_mutex);
1792 if (ret)
1793 return ret;
1794
1795 list_for_each_entry(ctx, &dev_priv->context_list, link) {
1796 for_each_ring(ring, dev_priv, i) {
1797 struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
1798
1665 if (ring->default_context == ctx) 1799 if (ring->default_context == ctx)
1666 seq_printf(m, "(default context %s) ", ring->name); 1800 continue;
1801
1802 if (ctx_obj) {
1803 struct page *page = i915_gem_object_get_page(ctx_obj, 1);
1804 uint32_t *reg_state = kmap_atomic(page);
1805 int j;
1806
1807 seq_printf(m, "CONTEXT: %s %u\n", ring->name,
1808 intel_execlists_ctx_id(ctx_obj));
1809
1810 for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) {
1811 seq_printf(m, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n",
1812 i915_gem_obj_ggtt_offset(ctx_obj) + 4096 + (j * 4),
1813 reg_state[j], reg_state[j + 1],
1814 reg_state[j + 2], reg_state[j + 3]);
1815 }
1816 kunmap_atomic(reg_state);
1817
1818 seq_putc(m, '\n');
1819 }
1820 }
1821 }
1822
1823 mutex_unlock(&dev->struct_mutex);
1824
1825 return 0;
1826}
1827
1828static int i915_execlists(struct seq_file *m, void *data)
1829{
1830 struct drm_info_node *node = (struct drm_info_node *)m->private;
1831 struct drm_device *dev = node->minor->dev;
1832 struct drm_i915_private *dev_priv = dev->dev_private;
1833 struct intel_engine_cs *ring;
1834 u32 status_pointer;
1835 u8 read_pointer;
1836 u8 write_pointer;
1837 u32 status;
1838 u32 ctx_id;
1839 struct list_head *cursor;
1840 int ring_id, i;
1841 int ret;
1842
1843 if (!i915.enable_execlists) {
1844 seq_puts(m, "Logical Ring Contexts are disabled\n");
1845 return 0;
1846 }
1847
1848 ret = mutex_lock_interruptible(&dev->struct_mutex);
1849 if (ret)
1850 return ret;
1851
1852 for_each_ring(ring, dev_priv, ring_id) {
1853 struct intel_ctx_submit_request *head_req = NULL;
1854 int count = 0;
1855 unsigned long flags;
1856
1857 seq_printf(m, "%s\n", ring->name);
1858
1859 status = I915_READ(RING_EXECLIST_STATUS(ring));
1860 ctx_id = I915_READ(RING_EXECLIST_STATUS(ring) + 4);
1861 seq_printf(m, "\tExeclist status: 0x%08X, context: %u\n",
1862 status, ctx_id);
1863
1864 status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring));
1865 seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer);
1866
1867 read_pointer = ring->next_context_status_buffer;
1868 write_pointer = status_pointer & 0x07;
1869 if (read_pointer > write_pointer)
1870 write_pointer += 6;
1871 seq_printf(m, "\tRead pointer: 0x%08X, write pointer 0x%08X\n",
1872 read_pointer, write_pointer);
1873
1874 for (i = 0; i < 6; i++) {
1875 status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + 8*i);
1876 ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + 8*i + 4);
1877
1878 seq_printf(m, "\tStatus buffer %d: 0x%08X, context: %u\n",
1879 i, status, ctx_id);
1880 }
1881
1882 spin_lock_irqsave(&ring->execlist_lock, flags);
1883 list_for_each(cursor, &ring->execlist_queue)
1884 count++;
1885 head_req = list_first_entry_or_null(&ring->execlist_queue,
1886 struct intel_ctx_submit_request, execlist_link);
1887 spin_unlock_irqrestore(&ring->execlist_lock, flags);
1888
1889 seq_printf(m, "\t%d requests in queue\n", count);
1890 if (head_req) {
1891 struct drm_i915_gem_object *ctx_obj;
1892
1893 ctx_obj = head_req->ctx->engine[ring_id].state;
1894 seq_printf(m, "\tHead request id: %u\n",
1895 intel_execlists_ctx_id(ctx_obj));
1896 seq_printf(m, "\tHead request tail: %u\n",
1897 head_req->tail);
1898 }
1667 1899
1668 describe_obj(m, ctx->legacy_hw_ctx.rcs_state);
1669 seq_putc(m, '\n'); 1900 seq_putc(m, '\n');
1670 } 1901 }
1671 1902
@@ -1774,7 +2005,13 @@ static int per_file_ctx(int id, void *ptr, void *data)
1774{ 2005{
1775 struct intel_context *ctx = ptr; 2006 struct intel_context *ctx = ptr;
1776 struct seq_file *m = data; 2007 struct seq_file *m = data;
1777 struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(ctx); 2008 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
2009
2010 if (!ppgtt) {
2011 seq_printf(m, " no ppgtt for context %d\n",
2012 ctx->user_handle);
2013 return 0;
2014 }
1778 2015
1779 if (i915_gem_context_is_default(ctx)) 2016 if (i915_gem_context_is_default(ctx))
1780 seq_puts(m, " default context:\n"); 2017 seq_puts(m, " default context:\n");
@@ -1834,8 +2071,7 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
1834 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset); 2071 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
1835 2072
1836 ppgtt->debug_dump(ppgtt, m); 2073 ppgtt->debug_dump(ppgtt, m);
1837 } else 2074 }
1838 return;
1839 2075
1840 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 2076 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
1841 struct drm_i915_file_private *file_priv = file->driver_priv; 2077 struct drm_i915_file_private *file_priv = file->driver_priv;
@@ -2406,6 +2642,40 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
2406 return 0; 2642 return 0;
2407} 2643}
2408 2644
2645static int i915_wa_registers(struct seq_file *m, void *unused)
2646{
2647 int i;
2648 int ret;
2649 struct drm_info_node *node = (struct drm_info_node *) m->private;
2650 struct drm_device *dev = node->minor->dev;
2651 struct drm_i915_private *dev_priv = dev->dev_private;
2652
2653 ret = mutex_lock_interruptible(&dev->struct_mutex);
2654 if (ret)
2655 return ret;
2656
2657 intel_runtime_pm_get(dev_priv);
2658
2659 seq_printf(m, "Workarounds applied: %d\n", dev_priv->num_wa_regs);
2660 for (i = 0; i < dev_priv->num_wa_regs; ++i) {
2661 u32 addr, mask;
2662
2663 addr = dev_priv->intel_wa_regs[i].addr;
2664 mask = dev_priv->intel_wa_regs[i].mask;
2665 dev_priv->intel_wa_regs[i].value = I915_READ(addr) | mask;
2666 if (dev_priv->intel_wa_regs[i].addr)
2667 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
2668 dev_priv->intel_wa_regs[i].addr,
2669 dev_priv->intel_wa_regs[i].value,
2670 dev_priv->intel_wa_regs[i].mask);
2671 }
2672
2673 intel_runtime_pm_put(dev_priv);
2674 mutex_unlock(&dev->struct_mutex);
2675
2676 return 0;
2677}
2678
2409struct pipe_crc_info { 2679struct pipe_crc_info {
2410 const char *name; 2680 const char *name;
2411 struct drm_device *dev; 2681 struct drm_device *dev;
@@ -2667,8 +2937,7 @@ static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe,
2667 *source = INTEL_PIPE_CRC_SOURCE_PIPE; 2937 *source = INTEL_PIPE_CRC_SOURCE_PIPE;
2668 2938
2669 drm_modeset_lock_all(dev); 2939 drm_modeset_lock_all(dev);
2670 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 2940 for_each_intel_encoder(dev, encoder) {
2671 base.head) {
2672 if (!encoder->base.crtc) 2941 if (!encoder->base.crtc)
2673 continue; 2942 continue;
2674 2943
@@ -3557,9 +3826,6 @@ i915_drop_caches_set(void *data, u64 val)
3557{ 3826{
3558 struct drm_device *dev = data; 3827 struct drm_device *dev = data;
3559 struct drm_i915_private *dev_priv = dev->dev_private; 3828 struct drm_i915_private *dev_priv = dev->dev_private;
3560 struct drm_i915_gem_object *obj, *next;
3561 struct i915_address_space *vm;
3562 struct i915_vma *vma, *x;
3563 int ret; 3829 int ret;
3564 3830
3565 DRM_DEBUG("Dropping caches: 0x%08llx\n", val); 3831 DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
@@ -3579,29 +3845,11 @@ i915_drop_caches_set(void *data, u64 val)
3579 if (val & (DROP_RETIRE | DROP_ACTIVE)) 3845 if (val & (DROP_RETIRE | DROP_ACTIVE))
3580 i915_gem_retire_requests(dev); 3846 i915_gem_retire_requests(dev);
3581 3847
3582 if (val & DROP_BOUND) { 3848 if (val & DROP_BOUND)
3583 list_for_each_entry(vm, &dev_priv->vm_list, global_link) { 3849 i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND);
3584 list_for_each_entry_safe(vma, x, &vm->inactive_list,
3585 mm_list) {
3586 if (vma->pin_count)
3587 continue;
3588 3850
3589 ret = i915_vma_unbind(vma); 3851 if (val & DROP_UNBOUND)
3590 if (ret) 3852 i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_UNBOUND);
3591 goto unlock;
3592 }
3593 }
3594 }
3595
3596 if (val & DROP_UNBOUND) {
3597 list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
3598 global_list)
3599 if (obj->pages_pin_count == 0) {
3600 ret = i915_gem_object_put_pages(obj);
3601 if (ret)
3602 goto unlock;
3603 }
3604 }
3605 3853
3606unlock: 3854unlock:
3607 mutex_unlock(&dev->struct_mutex); 3855 mutex_unlock(&dev->struct_mutex);
@@ -3923,6 +4171,8 @@ static const struct drm_info_list i915_debugfs_list[] = {
3923 {"i915_opregion", i915_opregion, 0}, 4171 {"i915_opregion", i915_opregion, 0},
3924 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, 4172 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
3925 {"i915_context_status", i915_context_status, 0}, 4173 {"i915_context_status", i915_context_status, 0},
4174 {"i915_dump_lrc", i915_dump_lrc, 0},
4175 {"i915_execlists", i915_execlists, 0},
3926 {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0}, 4176 {"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
3927 {"i915_swizzle_info", i915_swizzle_info, 0}, 4177 {"i915_swizzle_info", i915_swizzle_info, 0},
3928 {"i915_ppgtt_info", i915_ppgtt_info, 0}, 4178 {"i915_ppgtt_info", i915_ppgtt_info, 0},
@@ -3936,6 +4186,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
3936 {"i915_semaphore_status", i915_semaphore_status, 0}, 4186 {"i915_semaphore_status", i915_semaphore_status, 0},
3937 {"i915_shared_dplls_info", i915_shared_dplls_info, 0}, 4187 {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
3938 {"i915_dp_mst_info", i915_dp_mst_info, 0}, 4188 {"i915_dp_mst_info", i915_dp_mst_info, 0},
4189 {"i915_wa_registers", i915_wa_registers, 0},
3939}; 4190};
3940#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 4191#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
3941 4192
@@ -3957,6 +4208,7 @@ static const struct i915_debugfs_files {
3957 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops}, 4208 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
3958 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops}, 4209 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
3959 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops}, 4210 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
4211 {"i915_fbc_false_color", &i915_fbc_fc_fops},
3960}; 4212};
3961 4213
3962void intel_display_crc_init(struct drm_device *dev) 4214void intel_display_crc_init(struct drm_device *dev)
@@ -3964,7 +4216,7 @@ void intel_display_crc_init(struct drm_device *dev)
3964 struct drm_i915_private *dev_priv = dev->dev_private; 4216 struct drm_i915_private *dev_priv = dev->dev_private;
3965 enum pipe pipe; 4217 enum pipe pipe;
3966 4218
3967 for_each_pipe(pipe) { 4219 for_each_pipe(dev_priv, pipe) {
3968 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 4220 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
3969 4221
3970 pipe_crc->opened = false; 4222 pipe_crc->opened = false;
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 9933c26017ed..1403b01e8216 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -28,9 +28,11 @@
28 28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30 30
31#include <linux/async.h>
31#include <drm/drmP.h> 32#include <drm/drmP.h>
32#include <drm/drm_crtc_helper.h> 33#include <drm/drm_crtc_helper.h>
33#include <drm/drm_fb_helper.h> 34#include <drm/drm_fb_helper.h>
35#include <drm/drm_legacy.h>
34#include "intel_drv.h" 36#include "intel_drv.h"
35#include <drm/i915_drm.h> 37#include <drm/i915_drm.h>
36#include "i915_drv.h" 38#include "i915_drv.h"
@@ -196,7 +198,7 @@ static int i915_initialize(struct drm_device *dev, drm_i915_init_t *init)
196 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 198 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
197 int ret; 199 int ret;
198 200
199 master_priv->sarea = drm_getsarea(dev); 201 master_priv->sarea = drm_legacy_getsarea(dev);
200 if (master_priv->sarea) { 202 if (master_priv->sarea) {
201 master_priv->sarea_priv = (drm_i915_sarea_t *) 203 master_priv->sarea_priv = (drm_i915_sarea_t *)
202 ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset); 204 ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset);
@@ -999,7 +1001,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
999 value = HAS_WT(dev); 1001 value = HAS_WT(dev);
1000 break; 1002 break;
1001 case I915_PARAM_HAS_ALIASING_PPGTT: 1003 case I915_PARAM_HAS_ALIASING_PPGTT:
1002 value = dev_priv->mm.aliasing_ppgtt || USES_FULL_PPGTT(dev); 1004 value = USES_PPGTT(dev);
1003 break; 1005 break;
1004 case I915_PARAM_HAS_WAIT_TIMEOUT: 1006 case I915_PARAM_HAS_WAIT_TIMEOUT:
1005 value = 1; 1007 value = 1;
@@ -1355,8 +1357,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
1355 if (ret) 1357 if (ret)
1356 goto cleanup_irq; 1358 goto cleanup_irq;
1357 1359
1358 INIT_WORK(&dev_priv->console_resume_work, intel_console_resume);
1359
1360 intel_modeset_gem_init(dev); 1360 intel_modeset_gem_init(dev);
1361 1361
1362 /* Always safe in the mode setting case. */ 1362 /* Always safe in the mode setting case. */
@@ -1382,7 +1382,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
1382 * scanning against hotplug events. Hence do this first and ignore the 1382 * scanning against hotplug events. Hence do this first and ignore the
1383 * tiny window where we will loose hotplug notifactions. 1383 * tiny window where we will loose hotplug notifactions.
1384 */ 1384 */
1385 intel_fbdev_initial_config(dev); 1385 async_schedule(intel_fbdev_initial_config, dev_priv);
1386 1386
1387 drm_kms_helper_poll_init(dev); 1387 drm_kms_helper_poll_init(dev);
1388 1388
@@ -1393,7 +1393,6 @@ cleanup_gem:
1393 i915_gem_cleanup_ringbuffer(dev); 1393 i915_gem_cleanup_ringbuffer(dev);
1394 i915_gem_context_fini(dev); 1394 i915_gem_context_fini(dev);
1395 mutex_unlock(&dev->struct_mutex); 1395 mutex_unlock(&dev->struct_mutex);
1396 WARN_ON(dev_priv->mm.aliasing_ppgtt);
1397cleanup_irq: 1396cleanup_irq:
1398 drm_irq_uninstall(dev); 1397 drm_irq_uninstall(dev);
1399cleanup_gem_stolen: 1398cleanup_gem_stolen:
@@ -1536,10 +1535,10 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
1536 info = (struct intel_device_info *)&dev_priv->info; 1535 info = (struct intel_device_info *)&dev_priv->info;
1537 1536
1538 if (IS_VALLEYVIEW(dev)) 1537 if (IS_VALLEYVIEW(dev))
1539 for_each_pipe(pipe) 1538 for_each_pipe(dev_priv, pipe)
1540 info->num_sprites[pipe] = 2; 1539 info->num_sprites[pipe] = 2;
1541 else 1540 else
1542 for_each_pipe(pipe) 1541 for_each_pipe(dev_priv, pipe)
1543 info->num_sprites[pipe] = 1; 1542 info->num_sprites[pipe] = 1;
1544 1543
1545 if (i915.disable_display) { 1544 if (i915.disable_display) {
@@ -1608,9 +1607,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1608 dev->dev_private = dev_priv; 1607 dev->dev_private = dev_priv;
1609 dev_priv->dev = dev; 1608 dev_priv->dev = dev;
1610 1609
1611 /* copy initial configuration to dev_priv->info */ 1610 /* Setup the write-once "constant" device info */
1612 device_info = (struct intel_device_info *)&dev_priv->info; 1611 device_info = (struct intel_device_info *)&dev_priv->info;
1613 *device_info = *info; 1612 memcpy(device_info, info, sizeof(dev_priv->info));
1613 device_info->device_id = dev->pdev->device;
1614 1614
1615 spin_lock_init(&dev_priv->irq_lock); 1615 spin_lock_init(&dev_priv->irq_lock);
1616 spin_lock_init(&dev_priv->gpu_error.lock); 1616 spin_lock_init(&dev_priv->gpu_error.lock);
@@ -1822,7 +1822,7 @@ out_mtrrfree:
1822 arch_phys_wc_del(dev_priv->gtt.mtrr); 1822 arch_phys_wc_del(dev_priv->gtt.mtrr);
1823 io_mapping_free(dev_priv->gtt.mappable); 1823 io_mapping_free(dev_priv->gtt.mappable);
1824out_gtt: 1824out_gtt:
1825 dev_priv->gtt.base.cleanup(&dev_priv->gtt.base); 1825 i915_global_gtt_cleanup(dev);
1826out_regs: 1826out_regs:
1827 intel_uncore_fini(dev); 1827 intel_uncore_fini(dev);
1828 pci_iounmap(dev->pdev, dev_priv->regs); 1828 pci_iounmap(dev->pdev, dev_priv->regs);
@@ -1869,7 +1869,6 @@ int i915_driver_unload(struct drm_device *dev)
1869 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1869 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1870 intel_fbdev_fini(dev); 1870 intel_fbdev_fini(dev);
1871 intel_modeset_cleanup(dev); 1871 intel_modeset_cleanup(dev);
1872 cancel_work_sync(&dev_priv->console_resume_work);
1873 1872
1874 /* 1873 /*
1875 * free the memory space allocated for the child device 1874 * free the memory space allocated for the child device
@@ -1902,7 +1901,6 @@ int i915_driver_unload(struct drm_device *dev)
1902 mutex_lock(&dev->struct_mutex); 1901 mutex_lock(&dev->struct_mutex);
1903 i915_gem_cleanup_ringbuffer(dev); 1902 i915_gem_cleanup_ringbuffer(dev);
1904 i915_gem_context_fini(dev); 1903 i915_gem_context_fini(dev);
1905 WARN_ON(dev_priv->mm.aliasing_ppgtt);
1906 mutex_unlock(&dev->struct_mutex); 1904 mutex_unlock(&dev->struct_mutex);
1907 i915_gem_cleanup_stolen(dev); 1905 i915_gem_cleanup_stolen(dev);
1908 1906
@@ -1910,8 +1908,6 @@ int i915_driver_unload(struct drm_device *dev)
1910 i915_free_hws(dev); 1908 i915_free_hws(dev);
1911 } 1909 }
1912 1910
1913 WARN_ON(!list_empty(&dev_priv->vm_list));
1914
1915 drm_vblank_cleanup(dev); 1911 drm_vblank_cleanup(dev);
1916 1912
1917 intel_teardown_gmbus(dev); 1913 intel_teardown_gmbus(dev);
@@ -1921,7 +1917,7 @@ int i915_driver_unload(struct drm_device *dev)
1921 destroy_workqueue(dev_priv->wq); 1917 destroy_workqueue(dev_priv->wq);
1922 pm_qos_remove_request(&dev_priv->pm_qos); 1918 pm_qos_remove_request(&dev_priv->pm_qos);
1923 1919
1924 dev_priv->gtt.base.cleanup(&dev_priv->gtt.base); 1920 i915_global_gtt_cleanup(dev);
1925 1921
1926 intel_uncore_fini(dev); 1922 intel_uncore_fini(dev);
1927 if (dev_priv->regs != NULL) 1923 if (dev_priv->regs != NULL)
@@ -1986,6 +1982,9 @@ void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
1986 i915_gem_context_close(dev, file); 1982 i915_gem_context_close(dev, file);
1987 i915_gem_release(dev, file); 1983 i915_gem_release(dev, file);
1988 mutex_unlock(&dev->struct_mutex); 1984 mutex_unlock(&dev->struct_mutex);
1985
1986 if (drm_core_check_feature(dev, DRIVER_MODESET))
1987 intel_modeset_preclose(dev, file);
1989} 1988}
1990 1989
1991void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) 1990void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index e27cdbe9d524..055d5e7fbf12 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -481,6 +481,10 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
481 if (i915.semaphores >= 0) 481 if (i915.semaphores >= 0)
482 return i915.semaphores; 482 return i915.semaphores;
483 483
484 /* TODO: make semaphores and Execlists play nicely together */
485 if (i915.enable_execlists)
486 return false;
487
484 /* Until we get further testing... */ 488 /* Until we get further testing... */
485 if (IS_GEN8(dev)) 489 if (IS_GEN8(dev))
486 return false; 490 return false;
@@ -524,6 +528,10 @@ static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
524 drm_modeset_unlock_all(dev); 528 drm_modeset_unlock_all(dev);
525} 529}
526 530
531static int intel_suspend_complete(struct drm_i915_private *dev_priv);
532static int intel_resume_prepare(struct drm_i915_private *dev_priv,
533 bool rpm_resume);
534
527static int i915_drm_freeze(struct drm_device *dev) 535static int i915_drm_freeze(struct drm_device *dev)
528{ 536{
529 struct drm_i915_private *dev_priv = dev->dev_private; 537 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -591,9 +599,7 @@ static int i915_drm_freeze(struct drm_device *dev)
591 intel_uncore_forcewake_reset(dev, false); 599 intel_uncore_forcewake_reset(dev, false);
592 intel_opregion_fini(dev); 600 intel_opregion_fini(dev);
593 601
594 console_lock(); 602 intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
595 intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED);
596 console_unlock();
597 603
598 dev_priv->suspend_count++; 604 dev_priv->suspend_count++;
599 605
@@ -632,30 +638,20 @@ int i915_suspend(struct drm_device *dev, pm_message_t state)
632 return 0; 638 return 0;
633} 639}
634 640
635void intel_console_resume(struct work_struct *work)
636{
637 struct drm_i915_private *dev_priv =
638 container_of(work, struct drm_i915_private,
639 console_resume_work);
640 struct drm_device *dev = dev_priv->dev;
641
642 console_lock();
643 intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
644 console_unlock();
645}
646
647static int i915_drm_thaw_early(struct drm_device *dev) 641static int i915_drm_thaw_early(struct drm_device *dev)
648{ 642{
649 struct drm_i915_private *dev_priv = dev->dev_private; 643 struct drm_i915_private *dev_priv = dev->dev_private;
644 int ret;
650 645
651 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 646 ret = intel_resume_prepare(dev_priv, false);
652 hsw_disable_pc8(dev_priv); 647 if (ret)
648 DRM_ERROR("Resume prepare failed: %d,Continuing resume\n", ret);
653 649
654 intel_uncore_early_sanitize(dev, true); 650 intel_uncore_early_sanitize(dev, true);
655 intel_uncore_sanitize(dev); 651 intel_uncore_sanitize(dev);
656 intel_power_domains_init_hw(dev_priv); 652 intel_power_domains_init_hw(dev_priv);
657 653
658 return 0; 654 return ret;
659} 655}
660 656
661static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings) 657static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
@@ -714,17 +710,7 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
714 710
715 intel_opregion_init(dev); 711 intel_opregion_init(dev);
716 712
717 /* 713 intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
718 * The console lock can be pretty contented on resume due
719 * to all the printk activity. Try to keep it out of the hot
720 * path of resume if possible.
721 */
722 if (console_trylock()) {
723 intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
724 console_unlock();
725 } else {
726 schedule_work(&dev_priv->console_resume_work);
727 }
728 714
729 mutex_lock(&dev_priv->modeset_restore_lock); 715 mutex_lock(&dev_priv->modeset_restore_lock);
730 dev_priv->modeset_restore = MODESET_DONE; 716 dev_priv->modeset_restore = MODESET_DONE;
@@ -858,7 +844,13 @@ int i915_reset(struct drm_device *dev)
858 !dev_priv->ums.mm_suspended) { 844 !dev_priv->ums.mm_suspended) {
859 dev_priv->ums.mm_suspended = 0; 845 dev_priv->ums.mm_suspended = 0;
860 846
847 /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
848 dev_priv->gpu_error.reload_in_reset = true;
849
861 ret = i915_gem_init_hw(dev); 850 ret = i915_gem_init_hw(dev);
851
852 dev_priv->gpu_error.reload_in_reset = false;
853
862 mutex_unlock(&dev->struct_mutex); 854 mutex_unlock(&dev->struct_mutex);
863 if (ret) { 855 if (ret) {
864 DRM_ERROR("Failed hw init on reset %d\n", ret); 856 DRM_ERROR("Failed hw init on reset %d\n", ret);
@@ -879,8 +871,6 @@ int i915_reset(struct drm_device *dev)
879 */ 871 */
880 if (INTEL_INFO(dev)->gen > 5) 872 if (INTEL_INFO(dev)->gen > 5)
881 intel_reset_gt_powersave(dev); 873 intel_reset_gt_powersave(dev);
882
883 intel_hpd_init(dev);
884 } else { 874 } else {
885 mutex_unlock(&dev->struct_mutex); 875 mutex_unlock(&dev->struct_mutex);
886 } 876 }
@@ -941,6 +931,7 @@ static int i915_pm_suspend_late(struct device *dev)
941 struct pci_dev *pdev = to_pci_dev(dev); 931 struct pci_dev *pdev = to_pci_dev(dev);
942 struct drm_device *drm_dev = pci_get_drvdata(pdev); 932 struct drm_device *drm_dev = pci_get_drvdata(pdev);
943 struct drm_i915_private *dev_priv = drm_dev->dev_private; 933 struct drm_i915_private *dev_priv = drm_dev->dev_private;
934 int ret;
944 935
945 /* 936 /*
946 * We have a suspedn ordering issue with the snd-hda driver also 937 * We have a suspedn ordering issue with the snd-hda driver also
@@ -954,13 +945,16 @@ static int i915_pm_suspend_late(struct device *dev)
954 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 945 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
955 return 0; 946 return 0;
956 947
957 if (IS_HASWELL(drm_dev) || IS_BROADWELL(drm_dev)) 948 ret = intel_suspend_complete(dev_priv);
958 hsw_enable_pc8(dev_priv);
959 949
960 pci_disable_device(pdev); 950 if (ret)
961 pci_set_power_state(pdev, PCI_D3hot); 951 DRM_ERROR("Suspend complete failed: %d\n", ret);
952 else {
953 pci_disable_device(pdev);
954 pci_set_power_state(pdev, PCI_D3hot);
955 }
962 956
963 return 0; 957 return ret;
964} 958}
965 959
966static int i915_pm_resume_early(struct device *dev) 960static int i915_pm_resume_early(struct device *dev)
@@ -1016,23 +1010,26 @@ static int i915_pm_poweroff(struct device *dev)
1016 return i915_drm_freeze(drm_dev); 1010 return i915_drm_freeze(drm_dev);
1017} 1011}
1018 1012
1019static int hsw_runtime_suspend(struct drm_i915_private *dev_priv) 1013static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
1020{ 1014{
1021 hsw_enable_pc8(dev_priv); 1015 hsw_enable_pc8(dev_priv);
1022 1016
1023 return 0; 1017 return 0;
1024} 1018}
1025 1019
1026static int snb_runtime_resume(struct drm_i915_private *dev_priv) 1020static int snb_resume_prepare(struct drm_i915_private *dev_priv,
1021 bool rpm_resume)
1027{ 1022{
1028 struct drm_device *dev = dev_priv->dev; 1023 struct drm_device *dev = dev_priv->dev;
1029 1024
1030 intel_init_pch_refclk(dev); 1025 if (rpm_resume)
1026 intel_init_pch_refclk(dev);
1031 1027
1032 return 0; 1028 return 0;
1033} 1029}
1034 1030
1035static int hsw_runtime_resume(struct drm_i915_private *dev_priv) 1031static int hsw_resume_prepare(struct drm_i915_private *dev_priv,
1032 bool rpm_resume)
1036{ 1033{
1037 hsw_disable_pc8(dev_priv); 1034 hsw_disable_pc8(dev_priv);
1038 1035
@@ -1328,7 +1325,7 @@ static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
1328 I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR); 1325 I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
1329} 1326}
1330 1327
1331static int vlv_runtime_suspend(struct drm_i915_private *dev_priv) 1328static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
1332{ 1329{
1333 u32 mask; 1330 u32 mask;
1334 int err; 1331 int err;
@@ -1368,7 +1365,8 @@ err1:
1368 return err; 1365 return err;
1369} 1366}
1370 1367
1371static int vlv_runtime_resume(struct drm_i915_private *dev_priv) 1368static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
1369 bool rpm_resume)
1372{ 1370{
1373 struct drm_device *dev = dev_priv->dev; 1371 struct drm_device *dev = dev_priv->dev;
1374 int err; 1372 int err;
@@ -1393,8 +1391,10 @@ static int vlv_runtime_resume(struct drm_i915_private *dev_priv)
1393 1391
1394 vlv_check_no_gt_access(dev_priv); 1392 vlv_check_no_gt_access(dev_priv);
1395 1393
1396 intel_init_clock_gating(dev); 1394 if (rpm_resume) {
1397 i915_gem_restore_fences(dev); 1395 intel_init_clock_gating(dev);
1396 i915_gem_restore_fences(dev);
1397 }
1398 1398
1399 return ret; 1399 return ret;
1400} 1400}
@@ -1409,7 +1409,9 @@ static int intel_runtime_suspend(struct device *device)
1409 if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev)))) 1409 if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev))))
1410 return -ENODEV; 1410 return -ENODEV;
1411 1411
1412 WARN_ON(!HAS_RUNTIME_PM(dev)); 1412 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
1413 return -ENODEV;
1414
1413 assert_force_wake_inactive(dev_priv); 1415 assert_force_wake_inactive(dev_priv);
1414 1416
1415 DRM_DEBUG_KMS("Suspending device\n"); 1417 DRM_DEBUG_KMS("Suspending device\n");
@@ -1446,17 +1448,7 @@ static int intel_runtime_suspend(struct device *device)
1446 cancel_work_sync(&dev_priv->rps.work); 1448 cancel_work_sync(&dev_priv->rps.work);
1447 intel_runtime_pm_disable_interrupts(dev); 1449 intel_runtime_pm_disable_interrupts(dev);
1448 1450
1449 if (IS_GEN6(dev)) { 1451 ret = intel_suspend_complete(dev_priv);
1450 ret = 0;
1451 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
1452 ret = hsw_runtime_suspend(dev_priv);
1453 } else if (IS_VALLEYVIEW(dev)) {
1454 ret = vlv_runtime_suspend(dev_priv);
1455 } else {
1456 ret = -ENODEV;
1457 WARN_ON(1);
1458 }
1459
1460 if (ret) { 1452 if (ret) {
1461 DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret); 1453 DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
1462 intel_runtime_pm_restore_interrupts(dev); 1454 intel_runtime_pm_restore_interrupts(dev);
@@ -1468,13 +1460,29 @@ static int intel_runtime_suspend(struct device *device)
1468 dev_priv->pm.suspended = true; 1460 dev_priv->pm.suspended = true;
1469 1461
1470 /* 1462 /*
1471 * current versions of firmware which depend on this opregion 1463 * FIXME: We really should find a document that references the arguments
1472 * notification have repurposed the D1 definition to mean 1464 * used below!
1473 * "runtime suspended" vs. what you would normally expect (D3)
1474 * to distinguish it from notifications that might be sent
1475 * via the suspend path.
1476 */ 1465 */
1477 intel_opregion_notify_adapter(dev, PCI_D1); 1466 if (IS_HASWELL(dev)) {
1467 /*
1468 * current versions of firmware which depend on this opregion
1469 * notification have repurposed the D1 definition to mean
1470 * "runtime suspended" vs. what you would normally expect (D3)
1471 * to distinguish it from notifications that might be sent via
1472 * the suspend path.
1473 */
1474 intel_opregion_notify_adapter(dev, PCI_D1);
1475 } else {
1476 /*
1477 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
1478 * being detected, and the call we do at intel_runtime_resume()
1479 * won't be able to restore them. Since PCI_D3hot matches the
1480 * actual specification and appears to be working, use it. Let's
1481 * assume the other non-Haswell platforms will stay the same as
1482 * Broadwell.
1483 */
1484 intel_opregion_notify_adapter(dev, PCI_D3hot);
1485 }
1478 1486
1479 DRM_DEBUG_KMS("Device suspended\n"); 1487 DRM_DEBUG_KMS("Device suspended\n");
1480 return 0; 1488 return 0;
@@ -1487,24 +1495,15 @@ static int intel_runtime_resume(struct device *device)
1487 struct drm_i915_private *dev_priv = dev->dev_private; 1495 struct drm_i915_private *dev_priv = dev->dev_private;
1488 int ret; 1496 int ret;
1489 1497
1490 WARN_ON(!HAS_RUNTIME_PM(dev)); 1498 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
1499 return -ENODEV;
1491 1500
1492 DRM_DEBUG_KMS("Resuming device\n"); 1501 DRM_DEBUG_KMS("Resuming device\n");
1493 1502
1494 intel_opregion_notify_adapter(dev, PCI_D0); 1503 intel_opregion_notify_adapter(dev, PCI_D0);
1495 dev_priv->pm.suspended = false; 1504 dev_priv->pm.suspended = false;
1496 1505
1497 if (IS_GEN6(dev)) { 1506 ret = intel_resume_prepare(dev_priv, true);
1498 ret = snb_runtime_resume(dev_priv);
1499 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
1500 ret = hsw_runtime_resume(dev_priv);
1501 } else if (IS_VALLEYVIEW(dev)) {
1502 ret = vlv_runtime_resume(dev_priv);
1503 } else {
1504 WARN_ON(1);
1505 ret = -ENODEV;
1506 }
1507
1508 /* 1507 /*
1509 * No point of rolling back things in case of an error, as the best 1508 * No point of rolling back things in case of an error, as the best
1510 * we can do is to hope that things will still work (and disable RPM). 1509 * we can do is to hope that things will still work (and disable RPM).
@@ -1523,6 +1522,48 @@ static int intel_runtime_resume(struct device *device)
1523 return ret; 1522 return ret;
1524} 1523}
1525 1524
1525/*
1526 * This function implements common functionality of runtime and system
1527 * suspend sequence.
1528 */
1529static int intel_suspend_complete(struct drm_i915_private *dev_priv)
1530{
1531 struct drm_device *dev = dev_priv->dev;
1532 int ret;
1533
1534 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1535 ret = hsw_suspend_complete(dev_priv);
1536 else if (IS_VALLEYVIEW(dev))
1537 ret = vlv_suspend_complete(dev_priv);
1538 else
1539 ret = 0;
1540
1541 return ret;
1542}
1543
1544/*
1545 * This function implements common functionality of runtime and system
1546 * resume sequence. Variable rpm_resume used for implementing different
1547 * code paths.
1548 */
1549static int intel_resume_prepare(struct drm_i915_private *dev_priv,
1550 bool rpm_resume)
1551{
1552 struct drm_device *dev = dev_priv->dev;
1553 int ret;
1554
1555 if (IS_GEN6(dev))
1556 ret = snb_resume_prepare(dev_priv, rpm_resume);
1557 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1558 ret = hsw_resume_prepare(dev_priv, rpm_resume);
1559 else if (IS_VALLEYVIEW(dev))
1560 ret = vlv_resume_prepare(dev_priv, rpm_resume);
1561 else
1562 ret = 0;
1563
1564 return ret;
1565}
1566
1526static const struct dev_pm_ops i915_pm_ops = { 1567static const struct dev_pm_ops i915_pm_ops = {
1527 .suspend = i915_pm_suspend, 1568 .suspend = i915_pm_suspend,
1528 .suspend_late = i915_pm_suspend_late, 1569 .suspend_late = i915_pm_suspend_late,
@@ -1572,6 +1613,7 @@ static struct drm_driver driver = {
1572 .lastclose = i915_driver_lastclose, 1613 .lastclose = i915_driver_lastclose,
1573 .preclose = i915_driver_preclose, 1614 .preclose = i915_driver_preclose,
1574 .postclose = i915_driver_postclose, 1615 .postclose = i915_driver_postclose,
1616 .set_busid = drm_pci_set_busid,
1575 1617
1576 /* Used in place of i915_pm_ops for non-DRIVER_MODESET */ 1618 /* Used in place of i915_pm_ops for non-DRIVER_MODESET */
1577 .suspend = i915_suspend, 1619 .suspend = i915_suspend,
@@ -1663,6 +1705,8 @@ static void __exit i915_exit(void)
1663module_init(i915_init); 1705module_init(i915_init);
1664module_exit(i915_exit); 1706module_exit(i915_exit);
1665 1707
1666MODULE_AUTHOR(DRIVER_AUTHOR); 1708MODULE_AUTHOR("Tungsten Graphics, Inc.");
1709MODULE_AUTHOR("Intel Corporation");
1710
1667MODULE_DESCRIPTION(DRIVER_DESC); 1711MODULE_DESCRIPTION(DRIVER_DESC);
1668MODULE_LICENSE("GPL and additional rights"); 1712MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 3524306d8cfb..16a6f6d187a1 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -35,11 +35,15 @@
35#include "i915_reg.h" 35#include "i915_reg.h"
36#include "intel_bios.h" 36#include "intel_bios.h"
37#include "intel_ringbuffer.h" 37#include "intel_ringbuffer.h"
38#include "intel_lrc.h"
38#include "i915_gem_gtt.h" 39#include "i915_gem_gtt.h"
40#include "i915_gem_render_state.h"
39#include <linux/io-mapping.h> 41#include <linux/io-mapping.h>
40#include <linux/i2c.h> 42#include <linux/i2c.h>
41#include <linux/i2c-algo-bit.h> 43#include <linux/i2c-algo-bit.h>
42#include <drm/intel-gtt.h> 44#include <drm/intel-gtt.h>
45#include <drm/drm_legacy.h> /* for struct drm_dma_handle */
46#include <drm/drm_gem.h>
43#include <linux/backlight.h> 47#include <linux/backlight.h>
44#include <linux/hashtable.h> 48#include <linux/hashtable.h>
45#include <linux/intel-iommu.h> 49#include <linux/intel-iommu.h>
@@ -49,11 +53,9 @@
49/* General customization: 53/* General customization:
50 */ 54 */
51 55
52#define DRIVER_AUTHOR "Tungsten Graphics, Inc."
53
54#define DRIVER_NAME "i915" 56#define DRIVER_NAME "i915"
55#define DRIVER_DESC "Intel Graphics" 57#define DRIVER_DESC "Intel Graphics"
56#define DRIVER_DATE "20140725" 58#define DRIVER_DATE "20140905"
57 59
58enum pipe { 60enum pipe {
59 INVALID_PIPE = -1, 61 INVALID_PIPE = -1,
@@ -162,7 +164,10 @@ enum hpd_pin {
162 I915_GEM_DOMAIN_INSTRUCTION | \ 164 I915_GEM_DOMAIN_INSTRUCTION | \
163 I915_GEM_DOMAIN_VERTEX) 165 I915_GEM_DOMAIN_VERTEX)
164 166
165#define for_each_pipe(p) for ((p) = 0; (p) < INTEL_INFO(dev)->num_pipes; (p)++) 167#define for_each_pipe(__dev_priv, __p) \
168 for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++)
169#define for_each_plane(pipe, p) \
170 for ((p) = 0; (p) < INTEL_INFO(dev)->num_sprites[(pipe)] + 1; (p)++)
166#define for_each_sprite(p, s) for ((s) = 0; (s) < INTEL_INFO(dev)->num_sprites[(p)]; (s)++) 171#define for_each_sprite(p, s) for ((s) = 0; (s) < INTEL_INFO(dev)->num_sprites[(p)]; (s)++)
167 172
168#define for_each_crtc(dev, crtc) \ 173#define for_each_crtc(dev, crtc) \
@@ -171,6 +176,11 @@ enum hpd_pin {
171#define for_each_intel_crtc(dev, intel_crtc) \ 176#define for_each_intel_crtc(dev, intel_crtc) \
172 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) 177 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head)
173 178
179#define for_each_intel_encoder(dev, intel_encoder) \
180 list_for_each_entry(intel_encoder, \
181 &(dev)->mode_config.encoder_list, \
182 base.head)
183
174#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \ 184#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
175 list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \ 185 list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
176 if ((intel_encoder)->base.crtc == (__crtc)) 186 if ((intel_encoder)->base.crtc == (__crtc))
@@ -198,10 +208,13 @@ enum intel_dpll_id {
198#define I915_NUM_PLLS 2 208#define I915_NUM_PLLS 2
199 209
200struct intel_dpll_hw_state { 210struct intel_dpll_hw_state {
211 /* i9xx, pch plls */
201 uint32_t dpll; 212 uint32_t dpll;
202 uint32_t dpll_md; 213 uint32_t dpll_md;
203 uint32_t fp0; 214 uint32_t fp0;
204 uint32_t fp1; 215 uint32_t fp1;
216
217 /* hsw, bdw */
205 uint32_t wrpll; 218 uint32_t wrpll;
206}; 219};
207 220
@@ -277,8 +290,10 @@ struct intel_opregion {
277struct intel_overlay; 290struct intel_overlay;
278struct intel_overlay_error_state; 291struct intel_overlay_error_state;
279 292
293struct drm_local_map;
294
280struct drm_i915_master_private { 295struct drm_i915_master_private {
281 drm_local_map_t *sarea; 296 struct drm_local_map *sarea;
282 struct _drm_i915_sarea *sarea_priv; 297 struct _drm_i915_sarea *sarea_priv;
283}; 298};
284#define I915_FENCE_REG_NONE -1 299#define I915_FENCE_REG_NONE -1
@@ -388,6 +403,7 @@ struct drm_i915_error_state {
388 pid_t pid; 403 pid_t pid;
389 char comm[TASK_COMM_LEN]; 404 char comm[TASK_COMM_LEN];
390 } ring[I915_NUM_RINGS]; 405 } ring[I915_NUM_RINGS];
406
391 struct drm_i915_error_buffer { 407 struct drm_i915_error_buffer {
392 u32 size; 408 u32 size;
393 u32 name; 409 u32 name;
@@ -406,6 +422,7 @@ struct drm_i915_error_state {
406 } **active_bo, **pinned_bo; 422 } **active_bo, **pinned_bo;
407 423
408 u32 *active_bo_count, *pinned_bo_count; 424 u32 *active_bo_count, *pinned_bo_count;
425 u32 vm_count;
409}; 426};
410 427
411struct intel_connector; 428struct intel_connector;
@@ -551,6 +568,7 @@ struct intel_uncore {
551 568
552struct intel_device_info { 569struct intel_device_info {
553 u32 display_mmio_offset; 570 u32 display_mmio_offset;
571 u16 device_id;
554 u8 num_pipes:3; 572 u8 num_pipes:3;
555 u8 num_sprites[I915_MAX_PIPES]; 573 u8 num_sprites[I915_MAX_PIPES];
556 u8 gen; 574 u8 gen;
@@ -615,13 +633,21 @@ struct intel_context {
615 uint8_t remap_slice; 633 uint8_t remap_slice;
616 struct drm_i915_file_private *file_priv; 634 struct drm_i915_file_private *file_priv;
617 struct i915_ctx_hang_stats hang_stats; 635 struct i915_ctx_hang_stats hang_stats;
618 struct i915_address_space *vm; 636 struct i915_hw_ppgtt *ppgtt;
619 637
638 /* Legacy ring buffer submission */
620 struct { 639 struct {
621 struct drm_i915_gem_object *rcs_state; 640 struct drm_i915_gem_object *rcs_state;
622 bool initialized; 641 bool initialized;
623 } legacy_hw_ctx; 642 } legacy_hw_ctx;
624 643
644 /* Execlists */
645 bool rcs_initialized;
646 struct {
647 struct drm_i915_gem_object *state;
648 struct intel_ringbuffer *ringbuf;
649 } engine[I915_NUM_RINGS];
650
625 struct list_head link; 651 struct list_head link;
626}; 652};
627 653
@@ -635,6 +661,8 @@ struct i915_fbc {
635 struct drm_mm_node compressed_fb; 661 struct drm_mm_node compressed_fb;
636 struct drm_mm_node *compressed_llb; 662 struct drm_mm_node *compressed_llb;
637 663
664 bool false_color;
665
638 struct intel_fbc_work { 666 struct intel_fbc_work {
639 struct delayed_work work; 667 struct delayed_work work;
640 struct drm_crtc *crtc; 668 struct drm_crtc *crtc;
@@ -688,6 +716,7 @@ enum intel_sbi_destination {
688#define QUIRK_LVDS_SSC_DISABLE (1<<1) 716#define QUIRK_LVDS_SSC_DISABLE (1<<1)
689#define QUIRK_INVERT_BRIGHTNESS (1<<2) 717#define QUIRK_INVERT_BRIGHTNESS (1<<2)
690#define QUIRK_BACKLIGHT_PRESENT (1<<3) 718#define QUIRK_BACKLIGHT_PRESENT (1<<3)
719#define QUIRK_PIPEB_FORCE (1<<4)
691 720
692struct intel_fbdev; 721struct intel_fbdev;
693struct intel_fbc_work; 722struct intel_fbc_work;
@@ -1147,6 +1176,7 @@ struct i915_gem_mm {
1147}; 1176};
1148 1177
1149struct drm_i915_error_state_buf { 1178struct drm_i915_error_state_buf {
1179 struct drm_i915_private *i915;
1150 unsigned bytes; 1180 unsigned bytes;
1151 unsigned size; 1181 unsigned size;
1152 int err; 1182 int err;
@@ -1219,6 +1249,9 @@ struct i915_gpu_error {
1219 1249
1220 /* For missed irq/seqno simulation. */ 1250 /* For missed irq/seqno simulation. */
1221 unsigned int test_irq_rings; 1251 unsigned int test_irq_rings;
1252
1253 /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
1254 bool reload_in_reset;
1222}; 1255};
1223 1256
1224enum modeset_restore { 1257enum modeset_restore {
@@ -1228,6 +1261,12 @@ enum modeset_restore {
1228}; 1261};
1229 1262
1230struct ddi_vbt_port_info { 1263struct ddi_vbt_port_info {
1264 /*
1265 * This is an index in the HDMI/DVI DDI buffer translation table.
1266 * The special value HDMI_LEVEL_SHIFT_UNKNOWN means the VBT didn't
1267 * populate this field.
1268 */
1269#define HDMI_LEVEL_SHIFT_UNKNOWN 0xff
1231 uint8_t hdmi_level_shift; 1270 uint8_t hdmi_level_shift;
1232 1271
1233 uint8_t supports_dvi:1; 1272 uint8_t supports_dvi:1;
@@ -1421,7 +1460,7 @@ struct drm_i915_private {
1421 struct drm_i915_gem_object *semaphore_obj; 1460 struct drm_i915_gem_object *semaphore_obj;
1422 uint32_t last_seqno, next_seqno; 1461 uint32_t last_seqno, next_seqno;
1423 1462
1424 drm_dma_handle_t *status_page_dmah; 1463 struct drm_dma_handle *status_page_dmah;
1425 struct resource mch_res; 1464 struct resource mch_res;
1426 1465
1427 /* protects the irq masks */ 1466 /* protects the irq masks */
@@ -1475,6 +1514,9 @@ struct drm_i915_private {
1475 /* LVDS info */ 1514 /* LVDS info */
1476 bool no_aux_handshake; 1515 bool no_aux_handshake;
1477 1516
1517 /* protects panel power sequencer state */
1518 struct mutex pps_mutex;
1519
1478 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */ 1520 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
1479 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ 1521 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
1480 int num_fence_regs; /* 8 on pre-965, 16 otherwise */ 1522 int num_fence_regs; /* 8 on pre-965, 16 otherwise */
@@ -1526,6 +1568,20 @@ struct drm_i915_private {
1526 struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; 1568 struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
1527 int dpio_phy_iosf_port[I915_NUM_PHYS_VLV]; 1569 int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
1528 1570
1571 /*
1572 * workarounds are currently applied at different places and
1573 * changes are being done to consolidate them so exact count is
1574 * not clear at this point, use a max value for now.
1575 */
1576#define I915_MAX_WA_REGS 16
1577 struct {
1578 u32 addr;
1579 u32 value;
1580 /* bitmask representing WA bits */
1581 u32 mask;
1582 } intel_wa_regs[I915_MAX_WA_REGS];
1583 u32 num_wa_regs;
1584
1529 /* Reclocking support */ 1585 /* Reclocking support */
1530 bool render_reclock_avail; 1586 bool render_reclock_avail;
1531 bool lvds_downclock_avail; 1587 bool lvds_downclock_avail;
@@ -1561,14 +1617,9 @@ struct drm_i915_private {
1561#ifdef CONFIG_DRM_I915_FBDEV 1617#ifdef CONFIG_DRM_I915_FBDEV
1562 /* list of fbdev register on this device */ 1618 /* list of fbdev register on this device */
1563 struct intel_fbdev *fbdev; 1619 struct intel_fbdev *fbdev;
1620 struct work_struct fbdev_suspend_work;
1564#endif 1621#endif
1565 1622
1566 /*
1567 * The console may be contended at resume, but we don't
1568 * want it to block on it.
1569 */
1570 struct work_struct console_resume_work;
1571
1572 struct drm_property *broadcast_rgb_property; 1623 struct drm_property *broadcast_rgb_property;
1573 struct drm_property *force_audio_property; 1624 struct drm_property *force_audio_property;
1574 1625
@@ -1614,12 +1665,28 @@ struct drm_i915_private {
1614 */ 1665 */
1615 struct workqueue_struct *dp_wq; 1666 struct workqueue_struct *dp_wq;
1616 1667
1668 uint32_t bios_vgacntr;
1669
1617 /* Old dri1 support infrastructure, beware the dragons ya fools entering 1670 /* Old dri1 support infrastructure, beware the dragons ya fools entering
1618 * here! */ 1671 * here! */
1619 struct i915_dri1_state dri1; 1672 struct i915_dri1_state dri1;
1620 /* Old ums support infrastructure, same warning applies. */ 1673 /* Old ums support infrastructure, same warning applies. */
1621 struct i915_ums_state ums; 1674 struct i915_ums_state ums;
1622 1675
1676 /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
1677 struct {
1678 int (*do_execbuf)(struct drm_device *dev, struct drm_file *file,
1679 struct intel_engine_cs *ring,
1680 struct intel_context *ctx,
1681 struct drm_i915_gem_execbuffer2 *args,
1682 struct list_head *vmas,
1683 struct drm_i915_gem_object *batch_obj,
1684 u64 exec_start, u32 flags);
1685 int (*init_rings)(struct drm_device *dev);
1686 void (*cleanup_ring)(struct intel_engine_cs *ring);
1687 void (*stop_ring)(struct intel_engine_cs *ring);
1688 } gt;
1689
1623 /* 1690 /*
1624 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch 1691 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
1625 * will be rejected. Instead look for a better place. 1692 * will be rejected. Instead look for a better place.
@@ -1761,13 +1828,6 @@ struct drm_i915_gem_object {
1761 * Only honoured if hardware has relevant pte bit 1828 * Only honoured if hardware has relevant pte bit
1762 */ 1829 */
1763 unsigned long gt_ro:1; 1830 unsigned long gt_ro:1;
1764
1765 /*
1766 * Is the GPU currently using a fence to access this buffer,
1767 */
1768 unsigned int pending_fenced_gpu_access:1;
1769 unsigned int fenced_gpu_access:1;
1770
1771 unsigned int cache_level:3; 1831 unsigned int cache_level:3;
1772 1832
1773 unsigned int has_aliasing_ppgtt_mapping:1; 1833 unsigned int has_aliasing_ppgtt_mapping:1;
@@ -1805,7 +1865,7 @@ struct drm_i915_gem_object {
1805 struct drm_file *pin_filp; 1865 struct drm_file *pin_filp;
1806 1866
1807 /** for phy allocated objects */ 1867 /** for phy allocated objects */
1808 drm_dma_handle_t *phys_handle; 1868 struct drm_dma_handle *phys_handle;
1809 1869
1810 union { 1870 union {
1811 struct i915_gem_userptr { 1871 struct i915_gem_userptr {
@@ -1971,51 +2031,63 @@ struct drm_i915_cmd_table {
1971 int count; 2031 int count;
1972}; 2032};
1973 2033
1974#define INTEL_INFO(dev) (&to_i915(dev)->info) 2034/* Note that the (struct drm_i915_private *) cast is just to shut up gcc. */
1975 2035#define __I915__(p) ({ \
1976#define IS_I830(dev) ((dev)->pdev->device == 0x3577) 2036 struct drm_i915_private *__p; \
1977#define IS_845G(dev) ((dev)->pdev->device == 0x2562) 2037 if (__builtin_types_compatible_p(typeof(*p), struct drm_i915_private)) \
2038 __p = (struct drm_i915_private *)p; \
2039 else if (__builtin_types_compatible_p(typeof(*p), struct drm_device)) \
2040 __p = to_i915((struct drm_device *)p); \
2041 else \
2042 BUILD_BUG(); \
2043 __p; \
2044})
2045#define INTEL_INFO(p) (&__I915__(p)->info)
2046#define INTEL_DEVID(p) (INTEL_INFO(p)->device_id)
2047
2048#define IS_I830(dev) (INTEL_DEVID(dev) == 0x3577)
2049#define IS_845G(dev) (INTEL_DEVID(dev) == 0x2562)
1978#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) 2050#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
1979#define IS_I865G(dev) ((dev)->pdev->device == 0x2572) 2051#define IS_I865G(dev) (INTEL_DEVID(dev) == 0x2572)
1980#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) 2052#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
1981#define IS_I915GM(dev) ((dev)->pdev->device == 0x2592) 2053#define IS_I915GM(dev) (INTEL_DEVID(dev) == 0x2592)
1982#define IS_I945G(dev) ((dev)->pdev->device == 0x2772) 2054#define IS_I945G(dev) (INTEL_DEVID(dev) == 0x2772)
1983#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm) 2055#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
1984#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater) 2056#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
1985#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline) 2057#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
1986#define IS_GM45(dev) ((dev)->pdev->device == 0x2A42) 2058#define IS_GM45(dev) (INTEL_DEVID(dev) == 0x2A42)
1987#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x) 2059#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
1988#define IS_PINEVIEW_G(dev) ((dev)->pdev->device == 0xa001) 2060#define IS_PINEVIEW_G(dev) (INTEL_DEVID(dev) == 0xa001)
1989#define IS_PINEVIEW_M(dev) ((dev)->pdev->device == 0xa011) 2061#define IS_PINEVIEW_M(dev) (INTEL_DEVID(dev) == 0xa011)
1990#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview) 2062#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
1991#define IS_G33(dev) (INTEL_INFO(dev)->is_g33) 2063#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
1992#define IS_IRONLAKE_M(dev) ((dev)->pdev->device == 0x0046) 2064#define IS_IRONLAKE_M(dev) (INTEL_DEVID(dev) == 0x0046)
1993#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge) 2065#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
1994#define IS_IVB_GT1(dev) ((dev)->pdev->device == 0x0156 || \ 2066#define IS_IVB_GT1(dev) (INTEL_DEVID(dev) == 0x0156 || \
1995 (dev)->pdev->device == 0x0152 || \ 2067 INTEL_DEVID(dev) == 0x0152 || \
1996 (dev)->pdev->device == 0x015a) 2068 INTEL_DEVID(dev) == 0x015a)
1997#define IS_SNB_GT1(dev) ((dev)->pdev->device == 0x0102 || \ 2069#define IS_SNB_GT1(dev) (INTEL_DEVID(dev) == 0x0102 || \
1998 (dev)->pdev->device == 0x0106 || \ 2070 INTEL_DEVID(dev) == 0x0106 || \
1999 (dev)->pdev->device == 0x010A) 2071 INTEL_DEVID(dev) == 0x010A)
2000#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) 2072#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
2001#define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev)) 2073#define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
2002#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) 2074#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
2003#define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev)) 2075#define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
2004#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) 2076#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
2005#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ 2077#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
2006 ((dev)->pdev->device & 0xFF00) == 0x0C00) 2078 (INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
2007#define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \ 2079#define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \
2008 (((dev)->pdev->device & 0xf) == 0x2 || \ 2080 ((INTEL_DEVID(dev) & 0xf) == 0x2 || \
2009 ((dev)->pdev->device & 0xf) == 0x6 || \ 2081 (INTEL_DEVID(dev) & 0xf) == 0x6 || \
2010 ((dev)->pdev->device & 0xf) == 0xe)) 2082 (INTEL_DEVID(dev) & 0xf) == 0xe))
2011#define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \ 2083#define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \
2012 ((dev)->pdev->device & 0xFF00) == 0x0A00) 2084 (INTEL_DEVID(dev) & 0xFF00) == 0x0A00)
2013#define IS_ULT(dev) (IS_HSW_ULT(dev) || IS_BDW_ULT(dev)) 2085#define IS_ULT(dev) (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
2014#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \ 2086#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \
2015 ((dev)->pdev->device & 0x00F0) == 0x0020) 2087 (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
2016/* ULX machines are also considered ULT. */ 2088/* ULX machines are also considered ULT. */
2017#define IS_HSW_ULX(dev) ((dev)->pdev->device == 0x0A0E || \ 2089#define IS_HSW_ULX(dev) (INTEL_DEVID(dev) == 0x0A0E || \
2018 (dev)->pdev->device == 0x0A1E) 2090 INTEL_DEVID(dev) == 0x0A1E)
2019#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary) 2091#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
2020 2092
2021/* 2093/*
@@ -2047,10 +2119,11 @@ struct drm_i915_cmd_table {
2047#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) 2119#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
2048 2120
2049#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6) 2121#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
2122#define HAS_LOGICAL_RING_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 8)
2050#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >= 6) 2123#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >= 6)
2051#define HAS_PPGTT(dev) (INTEL_INFO(dev)->gen >= 7 && !IS_GEN8(dev)) 2124#define HAS_PPGTT(dev) (INTEL_INFO(dev)->gen >= 7 && !IS_GEN8(dev))
2052#define USES_PPGTT(dev) intel_enable_ppgtt(dev, false) 2125#define USES_PPGTT(dev) (i915.enable_ppgtt)
2053#define USES_FULL_PPGTT(dev) intel_enable_ppgtt(dev, true) 2126#define USES_FULL_PPGTT(dev) (i915.enable_ppgtt == 2)
2054 2127
2055#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) 2128#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
2056#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) 2129#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
@@ -2134,6 +2207,7 @@ struct i915_params {
2134 int enable_rc6; 2207 int enable_rc6;
2135 int enable_fbc; 2208 int enable_fbc;
2136 int enable_ppgtt; 2209 int enable_ppgtt;
2210 int enable_execlists;
2137 int enable_psr; 2211 int enable_psr;
2138 unsigned int preliminary_hw_support; 2212 unsigned int preliminary_hw_support;
2139 int disable_power_well; 2213 int disable_power_well;
@@ -2180,8 +2254,6 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
2180int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); 2254int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
2181void intel_hpd_cancel_work(struct drm_i915_private *dev_priv); 2255void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
2182 2256
2183extern void intel_console_resume(struct work_struct *work);
2184
2185/* i915_irq.c */ 2257/* i915_irq.c */
2186void i915_queue_hangcheck(struct drm_device *dev); 2258void i915_queue_hangcheck(struct drm_device *dev);
2187__printf(3, 4) 2259__printf(3, 4)
@@ -2229,6 +2301,20 @@ int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
2229 struct drm_file *file_priv); 2301 struct drm_file *file_priv);
2230int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, 2302int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
2231 struct drm_file *file_priv); 2303 struct drm_file *file_priv);
2304void i915_gem_execbuffer_move_to_active(struct list_head *vmas,
2305 struct intel_engine_cs *ring);
2306void i915_gem_execbuffer_retire_commands(struct drm_device *dev,
2307 struct drm_file *file,
2308 struct intel_engine_cs *ring,
2309 struct drm_i915_gem_object *obj);
2310int i915_gem_ringbuffer_submission(struct drm_device *dev,
2311 struct drm_file *file,
2312 struct intel_engine_cs *ring,
2313 struct intel_context *ctx,
2314 struct drm_i915_gem_execbuffer2 *args,
2315 struct list_head *vmas,
2316 struct drm_i915_gem_object *batch_obj,
2317 u64 exec_start, u32 flags);
2232int i915_gem_execbuffer(struct drm_device *dev, void *data, 2318int i915_gem_execbuffer(struct drm_device *dev, void *data,
2233 struct drm_file *file_priv); 2319 struct drm_file *file_priv);
2234int i915_gem_execbuffer2(struct drm_device *dev, void *data, 2320int i915_gem_execbuffer2(struct drm_device *dev, void *data,
@@ -2263,6 +2349,12 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
2263int i915_gem_wait_ioctl(struct drm_device *dev, void *data, 2349int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
2264 struct drm_file *file_priv); 2350 struct drm_file *file_priv);
2265void i915_gem_load(struct drm_device *dev); 2351void i915_gem_load(struct drm_device *dev);
2352unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
2353 long target,
2354 unsigned flags);
2355#define I915_SHRINK_PURGEABLE 0x1
2356#define I915_SHRINK_UNBOUND 0x2
2357#define I915_SHRINK_BOUND 0x4
2266void *i915_gem_object_alloc(struct drm_device *dev); 2358void *i915_gem_object_alloc(struct drm_device *dev);
2267void i915_gem_object_free(struct drm_i915_gem_object *obj); 2359void i915_gem_object_free(struct drm_i915_gem_object *obj);
2268void i915_gem_object_init(struct drm_i915_gem_object *obj, 2360void i915_gem_object_init(struct drm_i915_gem_object *obj,
@@ -2381,6 +2473,7 @@ void i915_gem_reset(struct drm_device *dev);
2381bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); 2473bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
2382int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj); 2474int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
2383int __must_check i915_gem_init(struct drm_device *dev); 2475int __must_check i915_gem_init(struct drm_device *dev);
2476int i915_gem_init_rings(struct drm_device *dev);
2384int __must_check i915_gem_init_hw(struct drm_device *dev); 2477int __must_check i915_gem_init_hw(struct drm_device *dev);
2385int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice); 2478int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice);
2386void i915_gem_init_swizzling(struct drm_device *dev); 2479void i915_gem_init_swizzling(struct drm_device *dev);
@@ -2451,7 +2544,7 @@ static inline bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) {
2451} 2544}
2452 2545
2453/* Some GGTT VM helpers */ 2546/* Some GGTT VM helpers */
2454#define obj_to_ggtt(obj) \ 2547#define i915_obj_to_ggtt(obj) \
2455 (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base) 2548 (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
2456static inline bool i915_is_ggtt(struct i915_address_space *vm) 2549static inline bool i915_is_ggtt(struct i915_address_space *vm)
2457{ 2550{
@@ -2460,21 +2553,30 @@ static inline bool i915_is_ggtt(struct i915_address_space *vm)
2460 return vm == ggtt; 2553 return vm == ggtt;
2461} 2554}
2462 2555
2556static inline struct i915_hw_ppgtt *
2557i915_vm_to_ppgtt(struct i915_address_space *vm)
2558{
2559 WARN_ON(i915_is_ggtt(vm));
2560
2561 return container_of(vm, struct i915_hw_ppgtt, base);
2562}
2563
2564
2463static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj) 2565static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
2464{ 2566{
2465 return i915_gem_obj_bound(obj, obj_to_ggtt(obj)); 2567 return i915_gem_obj_bound(obj, i915_obj_to_ggtt(obj));
2466} 2568}
2467 2569
2468static inline unsigned long 2570static inline unsigned long
2469i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *obj) 2571i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *obj)
2470{ 2572{
2471 return i915_gem_obj_offset(obj, obj_to_ggtt(obj)); 2573 return i915_gem_obj_offset(obj, i915_obj_to_ggtt(obj));
2472} 2574}
2473 2575
2474static inline unsigned long 2576static inline unsigned long
2475i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj) 2577i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj)
2476{ 2578{
2477 return i915_gem_obj_size(obj, obj_to_ggtt(obj)); 2579 return i915_gem_obj_size(obj, i915_obj_to_ggtt(obj));
2478} 2580}
2479 2581
2480static inline int __must_check 2582static inline int __must_check
@@ -2482,7 +2584,8 @@ i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
2482 uint32_t alignment, 2584 uint32_t alignment,
2483 unsigned flags) 2585 unsigned flags)
2484{ 2586{
2485 return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment, flags | PIN_GLOBAL); 2587 return i915_gem_object_pin(obj, i915_obj_to_ggtt(obj),
2588 alignment, flags | PIN_GLOBAL);
2486} 2589}
2487 2590
2488static inline int 2591static inline int
@@ -2494,7 +2597,6 @@ i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
2494void i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj); 2597void i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj);
2495 2598
2496/* i915_gem_context.c */ 2599/* i915_gem_context.c */
2497#define ctx_to_ppgtt(ctx) container_of((ctx)->vm, struct i915_hw_ppgtt, base)
2498int __must_check i915_gem_context_init(struct drm_device *dev); 2600int __must_check i915_gem_context_init(struct drm_device *dev);
2499void i915_gem_context_fini(struct drm_device *dev); 2601void i915_gem_context_fini(struct drm_device *dev);
2500void i915_gem_context_reset(struct drm_device *dev); 2602void i915_gem_context_reset(struct drm_device *dev);
@@ -2506,6 +2608,8 @@ int i915_switch_context(struct intel_engine_cs *ring,
2506struct intel_context * 2608struct intel_context *
2507i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id); 2609i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
2508void i915_gem_context_free(struct kref *ctx_ref); 2610void i915_gem_context_free(struct kref *ctx_ref);
2611struct drm_i915_gem_object *
2612i915_gem_alloc_context_obj(struct drm_device *dev, size_t size);
2509static inline void i915_gem_context_reference(struct intel_context *ctx) 2613static inline void i915_gem_context_reference(struct intel_context *ctx)
2510{ 2614{
2511 kref_get(&ctx->ref); 2615 kref_get(&ctx->ref);
@@ -2526,8 +2630,6 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
2526int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, 2630int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
2527 struct drm_file *file); 2631 struct drm_file *file);
2528 2632
2529/* i915_gem_render_state.c */
2530int i915_gem_render_state_init(struct intel_engine_cs *ring);
2531/* i915_gem_evict.c */ 2633/* i915_gem_evict.c */
2532int __must_check i915_gem_evict_something(struct drm_device *dev, 2634int __must_check i915_gem_evict_something(struct drm_device *dev,
2533 struct i915_address_space *vm, 2635 struct i915_address_space *vm,
@@ -2595,6 +2697,7 @@ void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
2595int i915_error_state_to_str(struct drm_i915_error_state_buf *estr, 2697int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
2596 const struct i915_error_state_file_priv *error); 2698 const struct i915_error_state_file_priv *error);
2597int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb, 2699int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
2700 struct drm_i915_private *i915,
2598 size_t count, loff_t pos); 2701 size_t count, loff_t pos);
2599static inline void i915_error_state_buf_release( 2702static inline void i915_error_state_buf_release(
2600 struct drm_i915_error_state_buf *eb) 2703 struct drm_i915_error_state_buf *eb)
@@ -2609,7 +2712,7 @@ void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
2609void i915_destroy_error_state(struct drm_device *dev); 2712void i915_destroy_error_state(struct drm_device *dev);
2610 2713
2611void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone); 2714void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
2612const char *i915_cache_level_str(int type); 2715const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
2613 2716
2614/* i915_cmd_parser.c */ 2717/* i915_cmd_parser.c */
2615int i915_cmd_parser_get_version(void); 2718int i915_cmd_parser_get_version(void);
@@ -2701,6 +2804,7 @@ extern void intel_modeset_setup_hw_state(struct drm_device *dev,
2701extern void i915_redisable_vga(struct drm_device *dev); 2804extern void i915_redisable_vga(struct drm_device *dev);
2702extern void i915_redisable_vga_power_on(struct drm_device *dev); 2805extern void i915_redisable_vga_power_on(struct drm_device *dev);
2703extern bool intel_fbc_enabled(struct drm_device *dev); 2806extern bool intel_fbc_enabled(struct drm_device *dev);
2807extern void gen8_fbc_sw_flush(struct drm_device *dev, u32 value);
2704extern void intel_disable_fbc(struct drm_device *dev); 2808extern void intel_disable_fbc(struct drm_device *dev);
2705extern bool ironlake_set_drps(struct drm_device *dev, u8 val); 2809extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
2706extern void intel_init_pch_refclk(struct drm_device *dev); 2810extern void intel_init_pch_refclk(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index ad55b06a3cb1..28f91df2604d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -60,7 +60,6 @@ static unsigned long i915_gem_shrinker_scan(struct shrinker *shrinker,
60static int i915_gem_shrinker_oom(struct notifier_block *nb, 60static int i915_gem_shrinker_oom(struct notifier_block *nb,
61 unsigned long event, 61 unsigned long event,
62 void *ptr); 62 void *ptr);
63static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
64static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv); 63static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
65 64
66static bool cpu_cache_is_coherent(struct drm_device *dev, 65static bool cpu_cache_is_coherent(struct drm_device *dev,
@@ -1085,7 +1084,13 @@ i915_gem_check_wedge(struct i915_gpu_error *error,
1085 if (i915_terminally_wedged(error)) 1084 if (i915_terminally_wedged(error))
1086 return -EIO; 1085 return -EIO;
1087 1086
1088 return -EAGAIN; 1087 /*
1088 * Check if GPU Reset is in progress - we need intel_ring_begin
1089 * to work properly to reinit the hw state while the gpu is
1090 * still marked as reset-in-progress. Handle this with a flag.
1091 */
1092 if (!error->reload_in_reset)
1093 return -EAGAIN;
1089 } 1094 }
1090 1095
1091 return 0; 1096 return 0;
@@ -1735,7 +1740,11 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1735 * offsets on purgeable objects by truncating it and marking it purged, 1740 * offsets on purgeable objects by truncating it and marking it purged,
1736 * which prevents userspace from ever using that object again. 1741 * which prevents userspace from ever using that object again.
1737 */ 1742 */
1738 i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT); 1743 i915_gem_shrink(dev_priv,
1744 obj->base.size >> PAGE_SHIFT,
1745 I915_SHRINK_BOUND |
1746 I915_SHRINK_UNBOUND |
1747 I915_SHRINK_PURGEABLE);
1739 ret = drm_gem_create_mmap_offset(&obj->base); 1748 ret = drm_gem_create_mmap_offset(&obj->base);
1740 if (ret != -ENOSPC) 1749 if (ret != -ENOSPC)
1741 goto out; 1750 goto out;
@@ -1932,12 +1941,11 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1932 return 0; 1941 return 0;
1933} 1942}
1934 1943
1935static unsigned long 1944unsigned long
1936__i915_gem_shrink(struct drm_i915_private *dev_priv, long target, 1945i915_gem_shrink(struct drm_i915_private *dev_priv,
1937 bool purgeable_only) 1946 long target, unsigned flags)
1938{ 1947{
1939 struct list_head still_in_list; 1948 const bool purgeable_only = flags & I915_SHRINK_PURGEABLE;
1940 struct drm_i915_gem_object *obj;
1941 unsigned long count = 0; 1949 unsigned long count = 0;
1942 1950
1943 /* 1951 /*
@@ -1959,62 +1967,68 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
1959 * dev->struct_mutex and so we won't ever be able to observe an 1967 * dev->struct_mutex and so we won't ever be able to observe an
1960 * object on the bound_list with a reference count equals 0. 1968 * object on the bound_list with a reference count equals 0.
1961 */ 1969 */
1962 INIT_LIST_HEAD(&still_in_list); 1970 if (flags & I915_SHRINK_UNBOUND) {
1963 while (count < target && !list_empty(&dev_priv->mm.unbound_list)) { 1971 struct list_head still_in_list;
1964 obj = list_first_entry(&dev_priv->mm.unbound_list,
1965 typeof(*obj), global_list);
1966 list_move_tail(&obj->global_list, &still_in_list);
1967 1972
1968 if (!i915_gem_object_is_purgeable(obj) && purgeable_only) 1973 INIT_LIST_HEAD(&still_in_list);
1969 continue; 1974 while (count < target && !list_empty(&dev_priv->mm.unbound_list)) {
1975 struct drm_i915_gem_object *obj;
1970 1976
1971 drm_gem_object_reference(&obj->base); 1977 obj = list_first_entry(&dev_priv->mm.unbound_list,
1978 typeof(*obj), global_list);
1979 list_move_tail(&obj->global_list, &still_in_list);
1972 1980
1973 if (i915_gem_object_put_pages(obj) == 0) 1981 if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
1974 count += obj->base.size >> PAGE_SHIFT; 1982 continue;
1975 1983
1976 drm_gem_object_unreference(&obj->base); 1984 drm_gem_object_reference(&obj->base);
1985
1986 if (i915_gem_object_put_pages(obj) == 0)
1987 count += obj->base.size >> PAGE_SHIFT;
1988
1989 drm_gem_object_unreference(&obj->base);
1990 }
1991 list_splice(&still_in_list, &dev_priv->mm.unbound_list);
1977 } 1992 }
1978 list_splice(&still_in_list, &dev_priv->mm.unbound_list);
1979 1993
1980 INIT_LIST_HEAD(&still_in_list); 1994 if (flags & I915_SHRINK_BOUND) {
1981 while (count < target && !list_empty(&dev_priv->mm.bound_list)) { 1995 struct list_head still_in_list;
1982 struct i915_vma *vma, *v;
1983 1996
1984 obj = list_first_entry(&dev_priv->mm.bound_list, 1997 INIT_LIST_HEAD(&still_in_list);
1985 typeof(*obj), global_list); 1998 while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
1986 list_move_tail(&obj->global_list, &still_in_list); 1999 struct drm_i915_gem_object *obj;
2000 struct i915_vma *vma, *v;
1987 2001
1988 if (!i915_gem_object_is_purgeable(obj) && purgeable_only) 2002 obj = list_first_entry(&dev_priv->mm.bound_list,
1989 continue; 2003 typeof(*obj), global_list);
2004 list_move_tail(&obj->global_list, &still_in_list);
1990 2005
1991 drm_gem_object_reference(&obj->base); 2006 if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
2007 continue;
1992 2008
1993 list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link) 2009 drm_gem_object_reference(&obj->base);
1994 if (i915_vma_unbind(vma))
1995 break;
1996 2010
1997 if (i915_gem_object_put_pages(obj) == 0) 2011 list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
1998 count += obj->base.size >> PAGE_SHIFT; 2012 if (i915_vma_unbind(vma))
2013 break;
1999 2014
2000 drm_gem_object_unreference(&obj->base); 2015 if (i915_gem_object_put_pages(obj) == 0)
2016 count += obj->base.size >> PAGE_SHIFT;
2017
2018 drm_gem_object_unreference(&obj->base);
2019 }
2020 list_splice(&still_in_list, &dev_priv->mm.bound_list);
2001 } 2021 }
2002 list_splice(&still_in_list, &dev_priv->mm.bound_list);
2003 2022
2004 return count; 2023 return count;
2005} 2024}
2006 2025
2007static unsigned long 2026static unsigned long
2008i915_gem_purge(struct drm_i915_private *dev_priv, long target)
2009{
2010 return __i915_gem_shrink(dev_priv, target, true);
2011}
2012
2013static unsigned long
2014i915_gem_shrink_all(struct drm_i915_private *dev_priv) 2027i915_gem_shrink_all(struct drm_i915_private *dev_priv)
2015{ 2028{
2016 i915_gem_evict_everything(dev_priv->dev); 2029 i915_gem_evict_everything(dev_priv->dev);
2017 return __i915_gem_shrink(dev_priv, LONG_MAX, false); 2030 return i915_gem_shrink(dev_priv, LONG_MAX,
2031 I915_SHRINK_BOUND | I915_SHRINK_UNBOUND);
2018} 2032}
2019 2033
2020static int 2034static int
@@ -2061,7 +2075,11 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2061 for (i = 0; i < page_count; i++) { 2075 for (i = 0; i < page_count; i++) {
2062 page = shmem_read_mapping_page_gfp(mapping, i, gfp); 2076 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2063 if (IS_ERR(page)) { 2077 if (IS_ERR(page)) {
2064 i915_gem_purge(dev_priv, page_count); 2078 i915_gem_shrink(dev_priv,
2079 page_count,
2080 I915_SHRINK_BOUND |
2081 I915_SHRINK_UNBOUND |
2082 I915_SHRINK_PURGEABLE);
2065 page = shmem_read_mapping_page_gfp(mapping, i, gfp); 2083 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2066 } 2084 }
2067 if (IS_ERR(page)) { 2085 if (IS_ERR(page)) {
@@ -2163,8 +2181,6 @@ static void
2163i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, 2181i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
2164 struct intel_engine_cs *ring) 2182 struct intel_engine_cs *ring)
2165{ 2183{
2166 struct drm_device *dev = obj->base.dev;
2167 struct drm_i915_private *dev_priv = dev->dev_private;
2168 u32 seqno = intel_ring_get_seqno(ring); 2184 u32 seqno = intel_ring_get_seqno(ring);
2169 2185
2170 BUG_ON(ring == NULL); 2186 BUG_ON(ring == NULL);
@@ -2183,19 +2199,6 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
2183 list_move_tail(&obj->ring_list, &ring->active_list); 2199 list_move_tail(&obj->ring_list, &ring->active_list);
2184 2200
2185 obj->last_read_seqno = seqno; 2201 obj->last_read_seqno = seqno;
2186
2187 if (obj->fenced_gpu_access) {
2188 obj->last_fenced_seqno = seqno;
2189
2190 /* Bump MRU to take account of the delayed flush */
2191 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2192 struct drm_i915_fence_reg *reg;
2193
2194 reg = &dev_priv->fence_regs[obj->fence_reg];
2195 list_move_tail(&reg->lru_list,
2196 &dev_priv->mm.fence_list);
2197 }
2198 }
2199} 2202}
2200 2203
2201void i915_vma_move_to_active(struct i915_vma *vma, 2204void i915_vma_move_to_active(struct i915_vma *vma,
@@ -2231,7 +2234,6 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
2231 obj->base.write_domain = 0; 2234 obj->base.write_domain = 0;
2232 2235
2233 obj->last_fenced_seqno = 0; 2236 obj->last_fenced_seqno = 0;
2234 obj->fenced_gpu_access = false;
2235 2237
2236 obj->active = 0; 2238 obj->active = 0;
2237 drm_gem_object_unreference(&obj->base); 2239 drm_gem_object_unreference(&obj->base);
@@ -2329,10 +2331,21 @@ int __i915_add_request(struct intel_engine_cs *ring,
2329{ 2331{
2330 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2332 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2331 struct drm_i915_gem_request *request; 2333 struct drm_i915_gem_request *request;
2334 struct intel_ringbuffer *ringbuf;
2332 u32 request_ring_position, request_start; 2335 u32 request_ring_position, request_start;
2333 int ret; 2336 int ret;
2334 2337
2335 request_start = intel_ring_get_tail(ring->buffer); 2338 request = ring->preallocated_lazy_request;
2339 if (WARN_ON(request == NULL))
2340 return -ENOMEM;
2341
2342 if (i915.enable_execlists) {
2343 struct intel_context *ctx = request->ctx;
2344 ringbuf = ctx->engine[ring->id].ringbuf;
2345 } else
2346 ringbuf = ring->buffer;
2347
2348 request_start = intel_ring_get_tail(ringbuf);
2336 /* 2349 /*
2337 * Emit any outstanding flushes - execbuf can fail to emit the flush 2350 * Emit any outstanding flushes - execbuf can fail to emit the flush
2338 * after having emitted the batchbuffer command. Hence we need to fix 2351 * after having emitted the batchbuffer command. Hence we need to fix
@@ -2340,24 +2353,32 @@ int __i915_add_request(struct intel_engine_cs *ring,
2340 * is that the flush _must_ happen before the next request, no matter 2353 * is that the flush _must_ happen before the next request, no matter
2341 * what. 2354 * what.
2342 */ 2355 */
2343 ret = intel_ring_flush_all_caches(ring); 2356 if (i915.enable_execlists) {
2344 if (ret) 2357 ret = logical_ring_flush_all_caches(ringbuf);
2345 return ret; 2358 if (ret)
2346 2359 return ret;
2347 request = ring->preallocated_lazy_request; 2360 } else {
2348 if (WARN_ON(request == NULL)) 2361 ret = intel_ring_flush_all_caches(ring);
2349 return -ENOMEM; 2362 if (ret)
2363 return ret;
2364 }
2350 2365
2351 /* Record the position of the start of the request so that 2366 /* Record the position of the start of the request so that
2352 * should we detect the updated seqno part-way through the 2367 * should we detect the updated seqno part-way through the
2353 * GPU processing the request, we never over-estimate the 2368 * GPU processing the request, we never over-estimate the
2354 * position of the head. 2369 * position of the head.
2355 */ 2370 */
2356 request_ring_position = intel_ring_get_tail(ring->buffer); 2371 request_ring_position = intel_ring_get_tail(ringbuf);
2357 2372
2358 ret = ring->add_request(ring); 2373 if (i915.enable_execlists) {
2359 if (ret) 2374 ret = ring->emit_request(ringbuf);
2360 return ret; 2375 if (ret)
2376 return ret;
2377 } else {
2378 ret = ring->add_request(ring);
2379 if (ret)
2380 return ret;
2381 }
2361 2382
2362 request->seqno = intel_ring_get_seqno(ring); 2383 request->seqno = intel_ring_get_seqno(ring);
2363 request->ring = ring; 2384 request->ring = ring;
@@ -2372,12 +2393,14 @@ int __i915_add_request(struct intel_engine_cs *ring,
2372 */ 2393 */
2373 request->batch_obj = obj; 2394 request->batch_obj = obj;
2374 2395
2375 /* Hold a reference to the current context so that we can inspect 2396 if (!i915.enable_execlists) {
2376 * it later in case a hangcheck error event fires. 2397 /* Hold a reference to the current context so that we can inspect
2377 */ 2398 * it later in case a hangcheck error event fires.
2378 request->ctx = ring->last_context; 2399 */
2379 if (request->ctx) 2400 request->ctx = ring->last_context;
2380 i915_gem_context_reference(request->ctx); 2401 if (request->ctx)
2402 i915_gem_context_reference(request->ctx);
2403 }
2381 2404
2382 request->emitted_jiffies = jiffies; 2405 request->emitted_jiffies = jiffies;
2383 list_add_tail(&request->list, &ring->request_list); 2406 list_add_tail(&request->list, &ring->request_list);
@@ -2548,6 +2571,18 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
2548 i915_gem_free_request(request); 2571 i915_gem_free_request(request);
2549 } 2572 }
2550 2573
2574 while (!list_empty(&ring->execlist_queue)) {
2575 struct intel_ctx_submit_request *submit_req;
2576
2577 submit_req = list_first_entry(&ring->execlist_queue,
2578 struct intel_ctx_submit_request,
2579 execlist_link);
2580 list_del(&submit_req->execlist_link);
2581 intel_runtime_pm_put(dev_priv);
2582 i915_gem_context_unreference(submit_req->ctx);
2583 kfree(submit_req);
2584 }
2585
2551 /* These may not have been flush before the reset, do so now */ 2586 /* These may not have been flush before the reset, do so now */
2552 kfree(ring->preallocated_lazy_request); 2587 kfree(ring->preallocated_lazy_request);
2553 ring->preallocated_lazy_request = NULL; 2588 ring->preallocated_lazy_request = NULL;
@@ -2632,6 +2667,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
2632 2667
2633 while (!list_empty(&ring->request_list)) { 2668 while (!list_empty(&ring->request_list)) {
2634 struct drm_i915_gem_request *request; 2669 struct drm_i915_gem_request *request;
2670 struct intel_ringbuffer *ringbuf;
2635 2671
2636 request = list_first_entry(&ring->request_list, 2672 request = list_first_entry(&ring->request_list,
2637 struct drm_i915_gem_request, 2673 struct drm_i915_gem_request,
@@ -2641,12 +2677,24 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
2641 break; 2677 break;
2642 2678
2643 trace_i915_gem_request_retire(ring, request->seqno); 2679 trace_i915_gem_request_retire(ring, request->seqno);
2680
2681 /* This is one of the few common intersection points
2682 * between legacy ringbuffer submission and execlists:
2683 * we need to tell them apart in order to find the correct
2684 * ringbuffer to which the request belongs to.
2685 */
2686 if (i915.enable_execlists) {
2687 struct intel_context *ctx = request->ctx;
2688 ringbuf = ctx->engine[ring->id].ringbuf;
2689 } else
2690 ringbuf = ring->buffer;
2691
2644 /* We know the GPU must have read the request to have 2692 /* We know the GPU must have read the request to have
2645 * sent us the seqno + interrupt, so use the position 2693 * sent us the seqno + interrupt, so use the position
2646 * of tail of the request to update the last known position 2694 * of tail of the request to update the last known position
2647 * of the GPU head. 2695 * of the GPU head.
2648 */ 2696 */
2649 ring->buffer->last_retired_head = request->tail; 2697 ringbuf->last_retired_head = request->tail;
2650 2698
2651 i915_gem_free_request(request); 2699 i915_gem_free_request(request);
2652 } 2700 }
@@ -2908,6 +2956,9 @@ int i915_vma_unbind(struct i915_vma *vma)
2908 * cause memory corruption through use-after-free. 2956 * cause memory corruption through use-after-free.
2909 */ 2957 */
2910 2958
2959 /* Throw away the active reference before moving to the unbound list */
2960 i915_gem_object_retire(obj);
2961
2911 if (i915_is_ggtt(vma->vm)) { 2962 if (i915_is_ggtt(vma->vm)) {
2912 i915_gem_object_finish_gtt(obj); 2963 i915_gem_object_finish_gtt(obj);
2913 2964
@@ -2922,9 +2973,8 @@ int i915_vma_unbind(struct i915_vma *vma)
2922 vma->unbind_vma(vma); 2973 vma->unbind_vma(vma);
2923 2974
2924 list_del_init(&vma->mm_list); 2975 list_del_init(&vma->mm_list);
2925 /* Avoid an unnecessary call to unbind on rebind. */
2926 if (i915_is_ggtt(vma->vm)) 2976 if (i915_is_ggtt(vma->vm))
2927 obj->map_and_fenceable = true; 2977 obj->map_and_fenceable = false;
2928 2978
2929 drm_mm_remove_node(&vma->node); 2979 drm_mm_remove_node(&vma->node);
2930 i915_gem_vma_destroy(vma); 2980 i915_gem_vma_destroy(vma);
@@ -2953,9 +3003,11 @@ int i915_gpu_idle(struct drm_device *dev)
2953 3003
2954 /* Flush everything onto the inactive list. */ 3004 /* Flush everything onto the inactive list. */
2955 for_each_ring(ring, dev_priv, i) { 3005 for_each_ring(ring, dev_priv, i) {
2956 ret = i915_switch_context(ring, ring->default_context); 3006 if (!i915.enable_execlists) {
2957 if (ret) 3007 ret = i915_switch_context(ring, ring->default_context);
2958 return ret; 3008 if (ret)
3009 return ret;
3010 }
2959 3011
2960 ret = intel_ring_idle(ring); 3012 ret = intel_ring_idle(ring);
2961 if (ret) 3013 if (ret)
@@ -3169,7 +3221,6 @@ i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
3169 obj->last_fenced_seqno = 0; 3221 obj->last_fenced_seqno = 0;
3170 } 3222 }
3171 3223
3172 obj->fenced_gpu_access = false;
3173 return 0; 3224 return 0;
3174} 3225}
3175 3226
@@ -3276,6 +3327,9 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
3276 return 0; 3327 return 0;
3277 } 3328 }
3278 } else if (enable) { 3329 } else if (enable) {
3330 if (WARN_ON(!obj->map_and_fenceable))
3331 return -EINVAL;
3332
3279 reg = i915_find_fence_reg(dev); 3333 reg = i915_find_fence_reg(dev);
3280 if (IS_ERR(reg)) 3334 if (IS_ERR(reg))
3281 return PTR_ERR(reg); 3335 return PTR_ERR(reg);
@@ -3297,17 +3351,20 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
3297 return 0; 3351 return 0;
3298} 3352}
3299 3353
3300static bool i915_gem_valid_gtt_space(struct drm_device *dev, 3354static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
3301 struct drm_mm_node *gtt_space,
3302 unsigned long cache_level) 3355 unsigned long cache_level)
3303{ 3356{
3357 struct drm_mm_node *gtt_space = &vma->node;
3304 struct drm_mm_node *other; 3358 struct drm_mm_node *other;
3305 3359
3306 /* On non-LLC machines we have to be careful when putting differing 3360 /*
3307 * types of snoopable memory together to avoid the prefetcher 3361 * On some machines we have to be careful when putting differing types
3308 * crossing memory domains and dying. 3362 * of snoopable memory together to avoid the prefetcher crossing memory
3363 * domains and dying. During vm initialisation, we decide whether or not
3364 * these constraints apply and set the drm_mm.color_adjust
3365 * appropriately.
3309 */ 3366 */
3310 if (HAS_LLC(dev)) 3367 if (vma->vm->mm.color_adjust == NULL)
3311 return true; 3368 return true;
3312 3369
3313 if (!drm_mm_node_allocated(gtt_space)) 3370 if (!drm_mm_node_allocated(gtt_space))
@@ -3445,8 +3502,7 @@ search_free:
3445 3502
3446 goto err_free_vma; 3503 goto err_free_vma;
3447 } 3504 }
3448 if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node, 3505 if (WARN_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level))) {
3449 obj->cache_level))) {
3450 ret = -EINVAL; 3506 ret = -EINVAL;
3451 goto err_remove_node; 3507 goto err_remove_node;
3452 } 3508 }
@@ -3586,11 +3642,12 @@ int
3586i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) 3642i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3587{ 3643{
3588 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 3644 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3645 struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
3589 uint32_t old_write_domain, old_read_domains; 3646 uint32_t old_write_domain, old_read_domains;
3590 int ret; 3647 int ret;
3591 3648
3592 /* Not valid to be called on unbound objects. */ 3649 /* Not valid to be called on unbound objects. */
3593 if (!i915_gem_obj_bound_any(obj)) 3650 if (vma == NULL)
3594 return -EINVAL; 3651 return -EINVAL;
3595 3652
3596 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT) 3653 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
@@ -3632,13 +3689,9 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3632 old_write_domain); 3689 old_write_domain);
3633 3690
3634 /* And bump the LRU for this access */ 3691 /* And bump the LRU for this access */
3635 if (i915_gem_object_is_inactive(obj)) { 3692 if (i915_gem_object_is_inactive(obj))
3636 struct i915_vma *vma = i915_gem_obj_to_ggtt(obj); 3693 list_move_tail(&vma->mm_list,
3637 if (vma) 3694 &dev_priv->gtt.base.inactive_list);
3638 list_move_tail(&vma->mm_list,
3639 &dev_priv->gtt.base.inactive_list);
3640
3641 }
3642 3695
3643 return 0; 3696 return 0;
3644} 3697}
@@ -3659,7 +3712,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3659 } 3712 }
3660 3713
3661 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) { 3714 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
3662 if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) { 3715 if (!i915_gem_valid_gtt_space(vma, cache_level)) {
3663 ret = i915_vma_unbind(vma); 3716 ret = i915_vma_unbind(vma);
3664 if (ret) 3717 if (ret)
3665 return ret; 3718 return ret;
@@ -3802,9 +3855,6 @@ static bool is_pin_display(struct drm_i915_gem_object *obj)
3802{ 3855{
3803 struct i915_vma *vma; 3856 struct i915_vma *vma;
3804 3857
3805 if (list_empty(&obj->vma_list))
3806 return false;
3807
3808 vma = i915_gem_obj_to_ggtt(obj); 3858 vma = i915_gem_obj_to_ggtt(obj);
3809 if (!vma) 3859 if (!vma)
3810 return false; 3860 return false;
@@ -4331,8 +4381,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
4331 4381
4332 obj->fence_reg = I915_FENCE_REG_NONE; 4382 obj->fence_reg = I915_FENCE_REG_NONE;
4333 obj->madv = I915_MADV_WILLNEED; 4383 obj->madv = I915_MADV_WILLNEED;
4334 /* Avoid an unnecessary call to unbind on the first bind. */
4335 obj->map_and_fenceable = true;
4336 4384
4337 i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size); 4385 i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
4338} 4386}
@@ -4493,12 +4541,18 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
4493 4541
4494void i915_gem_vma_destroy(struct i915_vma *vma) 4542void i915_gem_vma_destroy(struct i915_vma *vma)
4495{ 4543{
4544 struct i915_address_space *vm = NULL;
4496 WARN_ON(vma->node.allocated); 4545 WARN_ON(vma->node.allocated);
4497 4546
4498 /* Keep the vma as a placeholder in the execbuffer reservation lists */ 4547 /* Keep the vma as a placeholder in the execbuffer reservation lists */
4499 if (!list_empty(&vma->exec_list)) 4548 if (!list_empty(&vma->exec_list))
4500 return; 4549 return;
4501 4550
4551 vm = vma->vm;
4552
4553 if (!i915_is_ggtt(vm))
4554 i915_ppgtt_put(i915_vm_to_ppgtt(vm));
4555
4502 list_del(&vma->vma_link); 4556 list_del(&vma->vma_link);
4503 4557
4504 kfree(vma); 4558 kfree(vma);
@@ -4512,7 +4566,7 @@ i915_gem_stop_ringbuffers(struct drm_device *dev)
4512 int i; 4566 int i;
4513 4567
4514 for_each_ring(ring, dev_priv, i) 4568 for_each_ring(ring, dev_priv, i)
4515 intel_stop_ring_buffer(ring); 4569 dev_priv->gt.stop_ring(ring);
4516} 4570}
4517 4571
4518int 4572int
@@ -4629,11 +4683,46 @@ intel_enable_blt(struct drm_device *dev)
4629 return true; 4683 return true;
4630} 4684}
4631 4685
4632static int i915_gem_init_rings(struct drm_device *dev) 4686static void init_unused_ring(struct drm_device *dev, u32 base)
4687{
4688 struct drm_i915_private *dev_priv = dev->dev_private;
4689
4690 I915_WRITE(RING_CTL(base), 0);
4691 I915_WRITE(RING_HEAD(base), 0);
4692 I915_WRITE(RING_TAIL(base), 0);
4693 I915_WRITE(RING_START(base), 0);
4694}
4695
4696static void init_unused_rings(struct drm_device *dev)
4697{
4698 if (IS_I830(dev)) {
4699 init_unused_ring(dev, PRB1_BASE);
4700 init_unused_ring(dev, SRB0_BASE);
4701 init_unused_ring(dev, SRB1_BASE);
4702 init_unused_ring(dev, SRB2_BASE);
4703 init_unused_ring(dev, SRB3_BASE);
4704 } else if (IS_GEN2(dev)) {
4705 init_unused_ring(dev, SRB0_BASE);
4706 init_unused_ring(dev, SRB1_BASE);
4707 } else if (IS_GEN3(dev)) {
4708 init_unused_ring(dev, PRB1_BASE);
4709 init_unused_ring(dev, PRB2_BASE);
4710 }
4711}
4712
4713int i915_gem_init_rings(struct drm_device *dev)
4633{ 4714{
4634 struct drm_i915_private *dev_priv = dev->dev_private; 4715 struct drm_i915_private *dev_priv = dev->dev_private;
4635 int ret; 4716 int ret;
4636 4717
4718 /*
4719 * At least 830 can leave some of the unused rings
4720 * "active" (ie. head != tail) after resume which
4721 * will prevent c3 entry. Makes sure all unused rings
4722 * are totally idle.
4723 */
4724 init_unused_rings(dev);
4725
4637 ret = intel_init_render_ring_buffer(dev); 4726 ret = intel_init_render_ring_buffer(dev);
4638 if (ret) 4727 if (ret)
4639 return ret; 4728 return ret;
@@ -4712,7 +4801,7 @@ i915_gem_init_hw(struct drm_device *dev)
4712 4801
4713 i915_gem_init_swizzling(dev); 4802 i915_gem_init_swizzling(dev);
4714 4803
4715 ret = i915_gem_init_rings(dev); 4804 ret = dev_priv->gt.init_rings(dev);
4716 if (ret) 4805 if (ret)
4717 return ret; 4806 return ret;
4718 4807
@@ -4730,6 +4819,14 @@ i915_gem_init_hw(struct drm_device *dev)
4730 if (ret && ret != -EIO) { 4819 if (ret && ret != -EIO) {
4731 DRM_ERROR("Context enable failed %d\n", ret); 4820 DRM_ERROR("Context enable failed %d\n", ret);
4732 i915_gem_cleanup_ringbuffer(dev); 4821 i915_gem_cleanup_ringbuffer(dev);
4822
4823 return ret;
4824 }
4825
4826 ret = i915_ppgtt_init_hw(dev);
4827 if (ret && ret != -EIO) {
4828 DRM_ERROR("PPGTT enable failed %d\n", ret);
4829 i915_gem_cleanup_ringbuffer(dev);
4733 } 4830 }
4734 4831
4735 return ret; 4832 return ret;
@@ -4740,6 +4837,9 @@ int i915_gem_init(struct drm_device *dev)
4740 struct drm_i915_private *dev_priv = dev->dev_private; 4837 struct drm_i915_private *dev_priv = dev->dev_private;
4741 int ret; 4838 int ret;
4742 4839
4840 i915.enable_execlists = intel_sanitize_enable_execlists(dev,
4841 i915.enable_execlists);
4842
4743 mutex_lock(&dev->struct_mutex); 4843 mutex_lock(&dev->struct_mutex);
4744 4844
4745 if (IS_VALLEYVIEW(dev)) { 4845 if (IS_VALLEYVIEW(dev)) {
@@ -4750,7 +4850,24 @@ int i915_gem_init(struct drm_device *dev)
4750 DRM_DEBUG_DRIVER("allow wake ack timed out\n"); 4850 DRM_DEBUG_DRIVER("allow wake ack timed out\n");
4751 } 4851 }
4752 4852
4753 i915_gem_init_userptr(dev); 4853 if (!i915.enable_execlists) {
4854 dev_priv->gt.do_execbuf = i915_gem_ringbuffer_submission;
4855 dev_priv->gt.init_rings = i915_gem_init_rings;
4856 dev_priv->gt.cleanup_ring = intel_cleanup_ring_buffer;
4857 dev_priv->gt.stop_ring = intel_stop_ring_buffer;
4858 } else {
4859 dev_priv->gt.do_execbuf = intel_execlists_submission;
4860 dev_priv->gt.init_rings = intel_logical_rings_init;
4861 dev_priv->gt.cleanup_ring = intel_logical_ring_cleanup;
4862 dev_priv->gt.stop_ring = intel_logical_ring_stop;
4863 }
4864
4865 ret = i915_gem_init_userptr(dev);
4866 if (ret) {
4867 mutex_unlock(&dev->struct_mutex);
4868 return ret;
4869 }
4870
4754 i915_gem_init_global_gtt(dev); 4871 i915_gem_init_global_gtt(dev);
4755 4872
4756 ret = i915_gem_context_init(dev); 4873 ret = i915_gem_context_init(dev);
@@ -4785,7 +4902,7 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4785 int i; 4902 int i;
4786 4903
4787 for_each_ring(ring, dev_priv, i) 4904 for_each_ring(ring, dev_priv, i)
4788 intel_cleanup_ring_buffer(ring); 4905 dev_priv->gt.cleanup_ring(ring);
4789} 4906}
4790 4907
4791int 4908int
@@ -5097,9 +5214,7 @@ unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
5097 struct drm_i915_private *dev_priv = o->base.dev->dev_private; 5214 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5098 struct i915_vma *vma; 5215 struct i915_vma *vma;
5099 5216
5100 if (!dev_priv->mm.aliasing_ppgtt || 5217 WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
5101 vm == &dev_priv->mm.aliasing_ppgtt->base)
5102 vm = &dev_priv->gtt.base;
5103 5218
5104 list_for_each_entry(vma, &o->vma_list, vma_link) { 5219 list_for_each_entry(vma, &o->vma_list, vma_link) {
5105 if (vma->vm == vm) 5220 if (vma->vm == vm)
@@ -5140,9 +5255,7 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
5140 struct drm_i915_private *dev_priv = o->base.dev->dev_private; 5255 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5141 struct i915_vma *vma; 5256 struct i915_vma *vma;
5142 5257
5143 if (!dev_priv->mm.aliasing_ppgtt || 5258 WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
5144 vm == &dev_priv->mm.aliasing_ppgtt->base)
5145 vm = &dev_priv->gtt.base;
5146 5259
5147 BUG_ON(list_empty(&o->vma_list)); 5260 BUG_ON(list_empty(&o->vma_list));
5148 5261
@@ -5165,11 +5278,16 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
5165 if (!i915_gem_shrinker_lock(dev, &unlock)) 5278 if (!i915_gem_shrinker_lock(dev, &unlock))
5166 return SHRINK_STOP; 5279 return SHRINK_STOP;
5167 5280
5168 freed = i915_gem_purge(dev_priv, sc->nr_to_scan); 5281 freed = i915_gem_shrink(dev_priv,
5282 sc->nr_to_scan,
5283 I915_SHRINK_BOUND |
5284 I915_SHRINK_UNBOUND |
5285 I915_SHRINK_PURGEABLE);
5169 if (freed < sc->nr_to_scan) 5286 if (freed < sc->nr_to_scan)
5170 freed += __i915_gem_shrink(dev_priv, 5287 freed += i915_gem_shrink(dev_priv,
5171 sc->nr_to_scan - freed, 5288 sc->nr_to_scan - freed,
5172 false); 5289 I915_SHRINK_BOUND |
5290 I915_SHRINK_UNBOUND);
5173 if (unlock) 5291 if (unlock)
5174 mutex_unlock(&dev->struct_mutex); 5292 mutex_unlock(&dev->struct_mutex);
5175 5293
@@ -5247,14 +5365,8 @@ struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
5247{ 5365{
5248 struct i915_vma *vma; 5366 struct i915_vma *vma;
5249 5367
5250 /* This WARN has probably outlived its usefulness (callers already
5251 * WARN if they don't find the GGTT vma they expect). When removing,
5252 * remember to remove the pre-check in is_pin_display() as well */
5253 if (WARN_ON(list_empty(&obj->vma_list)))
5254 return NULL;
5255
5256 vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link); 5368 vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
5257 if (vma->vm != obj_to_ggtt(obj)) 5369 if (vma->vm != i915_obj_to_ggtt(obj))
5258 return NULL; 5370 return NULL;
5259 5371
5260 return vma; 5372 return vma;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 3b99390e467a..a5221d8f1580 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -96,50 +96,6 @@
96#define GEN6_CONTEXT_ALIGN (64<<10) 96#define GEN6_CONTEXT_ALIGN (64<<10)
97#define GEN7_CONTEXT_ALIGN 4096 97#define GEN7_CONTEXT_ALIGN 4096
98 98
99static void do_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
100{
101 struct drm_device *dev = ppgtt->base.dev;
102 struct drm_i915_private *dev_priv = dev->dev_private;
103 struct i915_address_space *vm = &ppgtt->base;
104
105 if (ppgtt == dev_priv->mm.aliasing_ppgtt ||
106 (list_empty(&vm->active_list) && list_empty(&vm->inactive_list))) {
107 ppgtt->base.cleanup(&ppgtt->base);
108 return;
109 }
110
111 /*
112 * Make sure vmas are unbound before we take down the drm_mm
113 *
114 * FIXME: Proper refcounting should take care of this, this shouldn't be
115 * needed at all.
116 */
117 if (!list_empty(&vm->active_list)) {
118 struct i915_vma *vma;
119
120 list_for_each_entry(vma, &vm->active_list, mm_list)
121 if (WARN_ON(list_empty(&vma->vma_link) ||
122 list_is_singular(&vma->vma_link)))
123 break;
124
125 i915_gem_evict_vm(&ppgtt->base, true);
126 } else {
127 i915_gem_retire_requests(dev);
128 i915_gem_evict_vm(&ppgtt->base, false);
129 }
130
131 ppgtt->base.cleanup(&ppgtt->base);
132}
133
134static void ppgtt_release(struct kref *kref)
135{
136 struct i915_hw_ppgtt *ppgtt =
137 container_of(kref, struct i915_hw_ppgtt, ref);
138
139 do_ppgtt_cleanup(ppgtt);
140 kfree(ppgtt);
141}
142
143static size_t get_context_alignment(struct drm_device *dev) 99static size_t get_context_alignment(struct drm_device *dev)
144{ 100{
145 if (IS_GEN6(dev)) 101 if (IS_GEN6(dev))
@@ -179,24 +135,20 @@ static int get_context_size(struct drm_device *dev)
179void i915_gem_context_free(struct kref *ctx_ref) 135void i915_gem_context_free(struct kref *ctx_ref)
180{ 136{
181 struct intel_context *ctx = container_of(ctx_ref, 137 struct intel_context *ctx = container_of(ctx_ref,
182 typeof(*ctx), ref); 138 typeof(*ctx), ref);
183 struct i915_hw_ppgtt *ppgtt = NULL;
184 139
185 if (ctx->legacy_hw_ctx.rcs_state) { 140 if (i915.enable_execlists)
186 /* We refcount even the aliasing PPGTT to keep the code symmetric */ 141 intel_lr_context_free(ctx);
187 if (USES_PPGTT(ctx->legacy_hw_ctx.rcs_state->base.dev)) 142
188 ppgtt = ctx_to_ppgtt(ctx); 143 i915_ppgtt_put(ctx->ppgtt);
189 }
190 144
191 if (ppgtt)
192 kref_put(&ppgtt->ref, ppgtt_release);
193 if (ctx->legacy_hw_ctx.rcs_state) 145 if (ctx->legacy_hw_ctx.rcs_state)
194 drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base); 146 drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base);
195 list_del(&ctx->link); 147 list_del(&ctx->link);
196 kfree(ctx); 148 kfree(ctx);
197} 149}
198 150
199static struct drm_i915_gem_object * 151struct drm_i915_gem_object *
200i915_gem_alloc_context_obj(struct drm_device *dev, size_t size) 152i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
201{ 153{
202 struct drm_i915_gem_object *obj; 154 struct drm_i915_gem_object *obj;
@@ -226,29 +178,9 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
226 return obj; 178 return obj;
227} 179}
228 180
229static struct i915_hw_ppgtt *
230create_vm_for_ctx(struct drm_device *dev, struct intel_context *ctx)
231{
232 struct i915_hw_ppgtt *ppgtt;
233 int ret;
234
235 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
236 if (!ppgtt)
237 return ERR_PTR(-ENOMEM);
238
239 ret = i915_gem_init_ppgtt(dev, ppgtt);
240 if (ret) {
241 kfree(ppgtt);
242 return ERR_PTR(ret);
243 }
244
245 ppgtt->ctx = ctx;
246 return ppgtt;
247}
248
249static struct intel_context * 181static struct intel_context *
250__create_hw_context(struct drm_device *dev, 182__create_hw_context(struct drm_device *dev,
251 struct drm_i915_file_private *file_priv) 183 struct drm_i915_file_private *file_priv)
252{ 184{
253 struct drm_i915_private *dev_priv = dev->dev_private; 185 struct drm_i915_private *dev_priv = dev->dev_private;
254 struct intel_context *ctx; 186 struct intel_context *ctx;
@@ -301,11 +233,9 @@ err_out:
301 */ 233 */
302static struct intel_context * 234static struct intel_context *
303i915_gem_create_context(struct drm_device *dev, 235i915_gem_create_context(struct drm_device *dev,
304 struct drm_i915_file_private *file_priv, 236 struct drm_i915_file_private *file_priv)
305 bool create_vm)
306{ 237{
307 const bool is_global_default_ctx = file_priv == NULL; 238 const bool is_global_default_ctx = file_priv == NULL;
308 struct drm_i915_private *dev_priv = dev->dev_private;
309 struct intel_context *ctx; 239 struct intel_context *ctx;
310 int ret = 0; 240 int ret = 0;
311 241
@@ -331,34 +261,18 @@ i915_gem_create_context(struct drm_device *dev,
331 } 261 }
332 } 262 }
333 263
334 if (create_vm) { 264 if (USES_FULL_PPGTT(dev)) {
335 struct i915_hw_ppgtt *ppgtt = create_vm_for_ctx(dev, ctx); 265 struct i915_hw_ppgtt *ppgtt = i915_ppgtt_create(dev, file_priv);
336 266
337 if (IS_ERR_OR_NULL(ppgtt)) { 267 if (IS_ERR_OR_NULL(ppgtt)) {
338 DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n", 268 DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
339 PTR_ERR(ppgtt)); 269 PTR_ERR(ppgtt));
340 ret = PTR_ERR(ppgtt); 270 ret = PTR_ERR(ppgtt);
341 goto err_unpin; 271 goto err_unpin;
342 } else
343 ctx->vm = &ppgtt->base;
344
345 /* This case is reserved for the global default context and
346 * should only happen once. */
347 if (is_global_default_ctx) {
348 if (WARN_ON(dev_priv->mm.aliasing_ppgtt)) {
349 ret = -EEXIST;
350 goto err_unpin;
351 }
352
353 dev_priv->mm.aliasing_ppgtt = ppgtt;
354 } 272 }
355 } else if (USES_PPGTT(dev)) { 273
356 /* For platforms which only have aliasing PPGTT, we fake the 274 ctx->ppgtt = ppgtt;
357 * address space and refcounting. */ 275 }
358 ctx->vm = &dev_priv->mm.aliasing_ppgtt->base;
359 kref_get(&dev_priv->mm.aliasing_ppgtt->ref);
360 } else
361 ctx->vm = &dev_priv->gtt.base;
362 276
363 return ctx; 277 return ctx;
364 278
@@ -375,34 +289,23 @@ void i915_gem_context_reset(struct drm_device *dev)
375 struct drm_i915_private *dev_priv = dev->dev_private; 289 struct drm_i915_private *dev_priv = dev->dev_private;
376 int i; 290 int i;
377 291
378 /* Prevent the hardware from restoring the last context (which hung) on 292 /* In execlists mode we will unreference the context when the execlist
379 * the next switch */ 293 * queue is cleared and the requests destroyed.
294 */
295 if (i915.enable_execlists)
296 return;
297
380 for (i = 0; i < I915_NUM_RINGS; i++) { 298 for (i = 0; i < I915_NUM_RINGS; i++) {
381 struct intel_engine_cs *ring = &dev_priv->ring[i]; 299 struct intel_engine_cs *ring = &dev_priv->ring[i];
382 struct intel_context *dctx = ring->default_context;
383 struct intel_context *lctx = ring->last_context; 300 struct intel_context *lctx = ring->last_context;
384 301
385 /* Do a fake switch to the default context */ 302 if (lctx) {
386 if (lctx == dctx) 303 if (lctx->legacy_hw_ctx.rcs_state && i == RCS)
387 continue; 304 i915_gem_object_ggtt_unpin(lctx->legacy_hw_ctx.rcs_state);
388
389 if (!lctx)
390 continue;
391 305
392 if (dctx->legacy_hw_ctx.rcs_state && i == RCS) { 306 i915_gem_context_unreference(lctx);
393 WARN_ON(i915_gem_obj_ggtt_pin(dctx->legacy_hw_ctx.rcs_state, 307 ring->last_context = NULL;
394 get_context_alignment(dev), 0));
395 /* Fake a finish/inactive */
396 dctx->legacy_hw_ctx.rcs_state->base.write_domain = 0;
397 dctx->legacy_hw_ctx.rcs_state->active = 0;
398 } 308 }
399
400 if (lctx->legacy_hw_ctx.rcs_state && i == RCS)
401 i915_gem_object_ggtt_unpin(lctx->legacy_hw_ctx.rcs_state);
402
403 i915_gem_context_unreference(lctx);
404 i915_gem_context_reference(dctx);
405 ring->last_context = dctx;
406 } 309 }
407} 310}
408 311
@@ -417,7 +320,11 @@ int i915_gem_context_init(struct drm_device *dev)
417 if (WARN_ON(dev_priv->ring[RCS].default_context)) 320 if (WARN_ON(dev_priv->ring[RCS].default_context))
418 return 0; 321 return 0;
419 322
420 if (HAS_HW_CONTEXTS(dev)) { 323 if (i915.enable_execlists) {
324 /* NB: intentionally left blank. We will allocate our own
325 * backing objects as we need them, thank you very much */
326 dev_priv->hw_context_size = 0;
327 } else if (HAS_HW_CONTEXTS(dev)) {
421 dev_priv->hw_context_size = round_up(get_context_size(dev), 4096); 328 dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
422 if (dev_priv->hw_context_size > (1<<20)) { 329 if (dev_priv->hw_context_size > (1<<20)) {
423 DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n", 330 DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
@@ -426,18 +333,23 @@ int i915_gem_context_init(struct drm_device *dev)
426 } 333 }
427 } 334 }
428 335
429 ctx = i915_gem_create_context(dev, NULL, USES_PPGTT(dev)); 336 ctx = i915_gem_create_context(dev, NULL);
430 if (IS_ERR(ctx)) { 337 if (IS_ERR(ctx)) {
431 DRM_ERROR("Failed to create default global context (error %ld)\n", 338 DRM_ERROR("Failed to create default global context (error %ld)\n",
432 PTR_ERR(ctx)); 339 PTR_ERR(ctx));
433 return PTR_ERR(ctx); 340 return PTR_ERR(ctx);
434 } 341 }
435 342
436 /* NB: RCS will hold a ref for all rings */ 343 for (i = 0; i < I915_NUM_RINGS; i++) {
437 for (i = 0; i < I915_NUM_RINGS; i++) 344 struct intel_engine_cs *ring = &dev_priv->ring[i];
438 dev_priv->ring[i].default_context = ctx;
439 345
440 DRM_DEBUG_DRIVER("%s context support initialized\n", dev_priv->hw_context_size ? "HW" : "fake"); 346 /* NB: RCS will hold a ref for all rings */
347 ring->default_context = ctx;
348 }
349
350 DRM_DEBUG_DRIVER("%s context support initialized\n",
351 i915.enable_execlists ? "LR" :
352 dev_priv->hw_context_size ? "HW" : "fake");
441 return 0; 353 return 0;
442} 354}
443 355
@@ -489,19 +401,11 @@ int i915_gem_context_enable(struct drm_i915_private *dev_priv)
489 struct intel_engine_cs *ring; 401 struct intel_engine_cs *ring;
490 int ret, i; 402 int ret, i;
491 403
492 /* This is the only place the aliasing PPGTT gets enabled, which means 404 BUG_ON(!dev_priv->ring[RCS].default_context);
493 * it has to happen before we bail on reset */
494 if (dev_priv->mm.aliasing_ppgtt) {
495 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
496 ppgtt->enable(ppgtt);
497 }
498 405
499 /* FIXME: We should make this work, even in reset */ 406 if (i915.enable_execlists)
500 if (i915_reset_in_progress(&dev_priv->gpu_error))
501 return 0; 407 return 0;
502 408
503 BUG_ON(!dev_priv->ring[RCS].default_context);
504
505 for_each_ring(ring, dev_priv, i) { 409 for_each_ring(ring, dev_priv, i) {
506 ret = i915_switch_context(ring, ring->default_context); 410 ret = i915_switch_context(ring, ring->default_context);
507 if (ret) 411 if (ret)
@@ -527,7 +431,7 @@ int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
527 idr_init(&file_priv->context_idr); 431 idr_init(&file_priv->context_idr);
528 432
529 mutex_lock(&dev->struct_mutex); 433 mutex_lock(&dev->struct_mutex);
530 ctx = i915_gem_create_context(dev, file_priv, USES_FULL_PPGTT(dev)); 434 ctx = i915_gem_create_context(dev, file_priv);
531 mutex_unlock(&dev->struct_mutex); 435 mutex_unlock(&dev->struct_mutex);
532 436
533 if (IS_ERR(ctx)) { 437 if (IS_ERR(ctx)) {
@@ -563,6 +467,7 @@ mi_set_context(struct intel_engine_cs *ring,
563 struct intel_context *new_context, 467 struct intel_context *new_context,
564 u32 hw_flags) 468 u32 hw_flags)
565{ 469{
470 u32 flags = hw_flags | MI_MM_SPACE_GTT;
566 int ret; 471 int ret;
567 472
568 /* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB 473 /* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
@@ -576,6 +481,10 @@ mi_set_context(struct intel_engine_cs *ring,
576 return ret; 481 return ret;
577 } 482 }
578 483
484 /* These flags are for resource streamer on HSW+ */
485 if (!IS_HASWELL(ring->dev) && INTEL_INFO(ring->dev)->gen < 8)
486 flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
487
579 ret = intel_ring_begin(ring, 6); 488 ret = intel_ring_begin(ring, 6);
580 if (ret) 489 if (ret)
581 return ret; 490 return ret;
@@ -589,10 +498,7 @@ mi_set_context(struct intel_engine_cs *ring,
589 intel_ring_emit(ring, MI_NOOP); 498 intel_ring_emit(ring, MI_NOOP);
590 intel_ring_emit(ring, MI_SET_CONTEXT); 499 intel_ring_emit(ring, MI_SET_CONTEXT);
591 intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->legacy_hw_ctx.rcs_state) | 500 intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->legacy_hw_ctx.rcs_state) |
592 MI_MM_SPACE_GTT | 501 flags);
593 MI_SAVE_EXT_STATE_EN |
594 MI_RESTORE_EXT_STATE_EN |
595 hw_flags);
596 /* 502 /*
597 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP 503 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
598 * WaMiSetContext_Hang:snb,ivb,vlv 504 * WaMiSetContext_Hang:snb,ivb,vlv
@@ -614,7 +520,6 @@ static int do_switch(struct intel_engine_cs *ring,
614{ 520{
615 struct drm_i915_private *dev_priv = ring->dev->dev_private; 521 struct drm_i915_private *dev_priv = ring->dev->dev_private;
616 struct intel_context *from = ring->last_context; 522 struct intel_context *from = ring->last_context;
617 struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(to);
618 u32 hw_flags = 0; 523 u32 hw_flags = 0;
619 bool uninitialized = false; 524 bool uninitialized = false;
620 int ret, i; 525 int ret, i;
@@ -642,8 +547,8 @@ static int do_switch(struct intel_engine_cs *ring,
642 */ 547 */
643 from = ring->last_context; 548 from = ring->last_context;
644 549
645 if (USES_FULL_PPGTT(ring->dev)) { 550 if (to->ppgtt) {
646 ret = ppgtt->switch_mm(ppgtt, ring, false); 551 ret = to->ppgtt->switch_mm(to->ppgtt, ring);
647 if (ret) 552 if (ret)
648 goto unpin_out; 553 goto unpin_out;
649 } 554 }
@@ -723,6 +628,12 @@ done:
723 ring->last_context = to; 628 ring->last_context = to;
724 629
725 if (uninitialized) { 630 if (uninitialized) {
631 if (ring->init_context) {
632 ret = ring->init_context(ring);
633 if (ret)
634 DRM_ERROR("ring init context: %d\n", ret);
635 }
636
726 ret = i915_gem_render_state_init(ring); 637 ret = i915_gem_render_state_init(ring);
727 if (ret) 638 if (ret)
728 DRM_ERROR("init render state: %d\n", ret); 639 DRM_ERROR("init render state: %d\n", ret);
@@ -743,14 +654,19 @@ unpin_out:
743 * 654 *
744 * The context life cycle is simple. The context refcount is incremented and 655 * The context life cycle is simple. The context refcount is incremented and
745 * decremented by 1 and create and destroy. If the context is in use by the GPU, 656 * decremented by 1 and create and destroy. If the context is in use by the GPU,
746 * it will have a refoucnt > 1. This allows us to destroy the context abstract 657 * it will have a refcount > 1. This allows us to destroy the context abstract
747 * object while letting the normal object tracking destroy the backing BO. 658 * object while letting the normal object tracking destroy the backing BO.
659 *
660 * This function should not be used in execlists mode. Instead the context is
661 * switched by writing to the ELSP and requests keep a reference to their
662 * context.
748 */ 663 */
749int i915_switch_context(struct intel_engine_cs *ring, 664int i915_switch_context(struct intel_engine_cs *ring,
750 struct intel_context *to) 665 struct intel_context *to)
751{ 666{
752 struct drm_i915_private *dev_priv = ring->dev->dev_private; 667 struct drm_i915_private *dev_priv = ring->dev->dev_private;
753 668
669 WARN_ON(i915.enable_execlists);
754 WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); 670 WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
755 671
756 if (to->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */ 672 if (to->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
@@ -766,9 +682,9 @@ int i915_switch_context(struct intel_engine_cs *ring,
766 return do_switch(ring, to); 682 return do_switch(ring, to);
767} 683}
768 684
769static bool hw_context_enabled(struct drm_device *dev) 685static bool contexts_enabled(struct drm_device *dev)
770{ 686{
771 return to_i915(dev)->hw_context_size; 687 return i915.enable_execlists || to_i915(dev)->hw_context_size;
772} 688}
773 689
774int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, 690int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
@@ -779,14 +695,14 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
779 struct intel_context *ctx; 695 struct intel_context *ctx;
780 int ret; 696 int ret;
781 697
782 if (!hw_context_enabled(dev)) 698 if (!contexts_enabled(dev))
783 return -ENODEV; 699 return -ENODEV;
784 700
785 ret = i915_mutex_lock_interruptible(dev); 701 ret = i915_mutex_lock_interruptible(dev);
786 if (ret) 702 if (ret)
787 return ret; 703 return ret;
788 704
789 ctx = i915_gem_create_context(dev, file_priv, USES_FULL_PPGTT(dev)); 705 ctx = i915_gem_create_context(dev, file_priv);
790 mutex_unlock(&dev->struct_mutex); 706 mutex_unlock(&dev->struct_mutex);
791 if (IS_ERR(ctx)) 707 if (IS_ERR(ctx))
792 return PTR_ERR(ctx); 708 return PTR_ERR(ctx);
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index bbf4b12d842e..886ff2ee7a28 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -243,7 +243,7 @@ int
243i915_gem_evict_everything(struct drm_device *dev) 243i915_gem_evict_everything(struct drm_device *dev)
244{ 244{
245 struct drm_i915_private *dev_priv = dev->dev_private; 245 struct drm_i915_private *dev_priv = dev->dev_private;
246 struct i915_address_space *vm; 246 struct i915_address_space *vm, *v;
247 bool lists_empty = true; 247 bool lists_empty = true;
248 int ret; 248 int ret;
249 249
@@ -270,7 +270,7 @@ i915_gem_evict_everything(struct drm_device *dev)
270 i915_gem_retire_requests(dev); 270 i915_gem_retire_requests(dev);
271 271
272 /* Having flushed everything, unbind() should never raise an error */ 272 /* Having flushed everything, unbind() should never raise an error */
273 list_for_each_entry(vm, &dev_priv->vm_list, global_link) 273 list_for_each_entry_safe(vm, v, &dev_priv->vm_list, global_link)
274 WARN_ON(i915_gem_evict_vm(vm, false)); 274 WARN_ON(i915_gem_evict_vm(vm, false));
275 275
276 return 0; 276 return 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 60998fc4e5b2..1a0611bb576b 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -35,6 +35,7 @@
35 35
36#define __EXEC_OBJECT_HAS_PIN (1<<31) 36#define __EXEC_OBJECT_HAS_PIN (1<<31)
37#define __EXEC_OBJECT_HAS_FENCE (1<<30) 37#define __EXEC_OBJECT_HAS_FENCE (1<<30)
38#define __EXEC_OBJECT_NEEDS_MAP (1<<29)
38#define __EXEC_OBJECT_NEEDS_BIAS (1<<28) 39#define __EXEC_OBJECT_NEEDS_BIAS (1<<28)
39 40
40#define BATCH_OFFSET_BIAS (256*1024) 41#define BATCH_OFFSET_BIAS (256*1024)
@@ -94,7 +95,6 @@ eb_lookup_vmas(struct eb_vmas *eb,
94 struct i915_address_space *vm, 95 struct i915_address_space *vm,
95 struct drm_file *file) 96 struct drm_file *file)
96{ 97{
97 struct drm_i915_private *dev_priv = vm->dev->dev_private;
98 struct drm_i915_gem_object *obj; 98 struct drm_i915_gem_object *obj;
99 struct list_head objects; 99 struct list_head objects;
100 int i, ret; 100 int i, ret;
@@ -129,20 +129,6 @@ eb_lookup_vmas(struct eb_vmas *eb,
129 i = 0; 129 i = 0;
130 while (!list_empty(&objects)) { 130 while (!list_empty(&objects)) {
131 struct i915_vma *vma; 131 struct i915_vma *vma;
132 struct i915_address_space *bind_vm = vm;
133
134 if (exec[i].flags & EXEC_OBJECT_NEEDS_GTT &&
135 USES_FULL_PPGTT(vm->dev)) {
136 ret = -EINVAL;
137 goto err;
138 }
139
140 /* If we have secure dispatch, or the userspace assures us that
141 * they know what they're doing, use the GGTT VM.
142 */
143 if (((args->flags & I915_EXEC_SECURE) &&
144 (i == (args->buffer_count - 1))))
145 bind_vm = &dev_priv->gtt.base;
146 132
147 obj = list_first_entry(&objects, 133 obj = list_first_entry(&objects,
148 struct drm_i915_gem_object, 134 struct drm_i915_gem_object,
@@ -156,7 +142,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
156 * from the (obj, vm) we don't run the risk of creating 142 * from the (obj, vm) we don't run the risk of creating
157 * duplicated vmas for the same vm. 143 * duplicated vmas for the same vm.
158 */ 144 */
159 vma = i915_gem_obj_lookup_or_create_vma(obj, bind_vm); 145 vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
160 if (IS_ERR(vma)) { 146 if (IS_ERR(vma)) {
161 DRM_DEBUG("Failed to lookup VMA\n"); 147 DRM_DEBUG("Failed to lookup VMA\n");
162 ret = PTR_ERR(vma); 148 ret = PTR_ERR(vma);
@@ -307,7 +293,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
307 struct drm_device *dev = obj->base.dev; 293 struct drm_device *dev = obj->base.dev;
308 struct drm_i915_private *dev_priv = dev->dev_private; 294 struct drm_i915_private *dev_priv = dev->dev_private;
309 uint64_t delta = reloc->delta + target_offset; 295 uint64_t delta = reloc->delta + target_offset;
310 uint32_t __iomem *reloc_entry; 296 uint64_t offset;
311 void __iomem *reloc_page; 297 void __iomem *reloc_page;
312 int ret; 298 int ret;
313 299
@@ -320,25 +306,24 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
320 return ret; 306 return ret;
321 307
322 /* Map the page containing the relocation we're going to perform. */ 308 /* Map the page containing the relocation we're going to perform. */
323 reloc->offset += i915_gem_obj_ggtt_offset(obj); 309 offset = i915_gem_obj_ggtt_offset(obj);
310 offset += reloc->offset;
324 reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, 311 reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
325 reloc->offset & PAGE_MASK); 312 offset & PAGE_MASK);
326 reloc_entry = (uint32_t __iomem *) 313 iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
327 (reloc_page + offset_in_page(reloc->offset));
328 iowrite32(lower_32_bits(delta), reloc_entry);
329 314
330 if (INTEL_INFO(dev)->gen >= 8) { 315 if (INTEL_INFO(dev)->gen >= 8) {
331 reloc_entry += 1; 316 offset += sizeof(uint32_t);
332 317
333 if (offset_in_page(reloc->offset + sizeof(uint32_t)) == 0) { 318 if (offset_in_page(offset) == 0) {
334 io_mapping_unmap_atomic(reloc_page); 319 io_mapping_unmap_atomic(reloc_page);
335 reloc_page = io_mapping_map_atomic_wc( 320 reloc_page =
336 dev_priv->gtt.mappable, 321 io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
337 reloc->offset + sizeof(uint32_t)); 322 offset);
338 reloc_entry = reloc_page;
339 } 323 }
340 324
341 iowrite32(upper_32_bits(delta), reloc_entry); 325 iowrite32(upper_32_bits(delta),
326 reloc_page + offset_in_page(offset));
342 } 327 }
343 328
344 io_mapping_unmap_atomic(reloc_page); 329 io_mapping_unmap_atomic(reloc_page);
@@ -535,34 +520,18 @@ i915_gem_execbuffer_relocate(struct eb_vmas *eb)
535} 520}
536 521
537static int 522static int
538need_reloc_mappable(struct i915_vma *vma)
539{
540 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
541 return entry->relocation_count && !use_cpu_reloc(vma->obj) &&
542 i915_is_ggtt(vma->vm);
543}
544
545static int
546i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, 523i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
547 struct intel_engine_cs *ring, 524 struct intel_engine_cs *ring,
548 bool *need_reloc) 525 bool *need_reloc)
549{ 526{
550 struct drm_i915_gem_object *obj = vma->obj; 527 struct drm_i915_gem_object *obj = vma->obj;
551 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; 528 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
552 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
553 bool need_fence;
554 uint64_t flags; 529 uint64_t flags;
555 int ret; 530 int ret;
556 531
557 flags = 0; 532 flags = 0;
558 533 if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
559 need_fence =
560 has_fenced_gpu_access &&
561 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
562 obj->tiling_mode != I915_TILING_NONE;
563 if (need_fence || need_reloc_mappable(vma))
564 flags |= PIN_MAPPABLE; 534 flags |= PIN_MAPPABLE;
565
566 if (entry->flags & EXEC_OBJECT_NEEDS_GTT) 535 if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
567 flags |= PIN_GLOBAL; 536 flags |= PIN_GLOBAL;
568 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS) 537 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
@@ -574,17 +543,13 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
574 543
575 entry->flags |= __EXEC_OBJECT_HAS_PIN; 544 entry->flags |= __EXEC_OBJECT_HAS_PIN;
576 545
577 if (has_fenced_gpu_access) { 546 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
578 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) { 547 ret = i915_gem_object_get_fence(obj);
579 ret = i915_gem_object_get_fence(obj); 548 if (ret)
580 if (ret) 549 return ret;
581 return ret;
582
583 if (i915_gem_object_pin_fence(obj))
584 entry->flags |= __EXEC_OBJECT_HAS_FENCE;
585 550
586 obj->pending_fenced_gpu_access = true; 551 if (i915_gem_object_pin_fence(obj))
587 } 552 entry->flags |= __EXEC_OBJECT_HAS_FENCE;
588 } 553 }
589 554
590 if (entry->offset != vma->node.start) { 555 if (entry->offset != vma->node.start) {
@@ -601,26 +566,40 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
601} 566}
602 567
603static bool 568static bool
604eb_vma_misplaced(struct i915_vma *vma, bool has_fenced_gpu_access) 569need_reloc_mappable(struct i915_vma *vma)
605{ 570{
606 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; 571 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
607 struct drm_i915_gem_object *obj = vma->obj;
608 bool need_fence, need_mappable;
609 572
610 need_fence = 573 if (entry->relocation_count == 0)
611 has_fenced_gpu_access && 574 return false;
612 entry->flags & EXEC_OBJECT_NEEDS_FENCE && 575
613 obj->tiling_mode != I915_TILING_NONE; 576 if (!i915_is_ggtt(vma->vm))
614 need_mappable = need_fence || need_reloc_mappable(vma); 577 return false;
578
579 /* See also use_cpu_reloc() */
580 if (HAS_LLC(vma->obj->base.dev))
581 return false;
615 582
616 WARN_ON((need_mappable || need_fence) && 583 if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU)
584 return false;
585
586 return true;
587}
588
589static bool
590eb_vma_misplaced(struct i915_vma *vma)
591{
592 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
593 struct drm_i915_gem_object *obj = vma->obj;
594
595 WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
617 !i915_is_ggtt(vma->vm)); 596 !i915_is_ggtt(vma->vm));
618 597
619 if (entry->alignment && 598 if (entry->alignment &&
620 vma->node.start & (entry->alignment - 1)) 599 vma->node.start & (entry->alignment - 1))
621 return true; 600 return true;
622 601
623 if (need_mappable && !obj->map_and_fenceable) 602 if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable)
624 return true; 603 return true;
625 604
626 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS && 605 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
@@ -642,9 +621,6 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
642 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; 621 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
643 int retry; 622 int retry;
644 623
645 if (list_empty(vmas))
646 return 0;
647
648 i915_gem_retire_requests_ring(ring); 624 i915_gem_retire_requests_ring(ring);
649 625
650 vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm; 626 vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
@@ -658,20 +634,21 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
658 obj = vma->obj; 634 obj = vma->obj;
659 entry = vma->exec_entry; 635 entry = vma->exec_entry;
660 636
637 if (!has_fenced_gpu_access)
638 entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
661 need_fence = 639 need_fence =
662 has_fenced_gpu_access &&
663 entry->flags & EXEC_OBJECT_NEEDS_FENCE && 640 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
664 obj->tiling_mode != I915_TILING_NONE; 641 obj->tiling_mode != I915_TILING_NONE;
665 need_mappable = need_fence || need_reloc_mappable(vma); 642 need_mappable = need_fence || need_reloc_mappable(vma);
666 643
667 if (need_mappable) 644 if (need_mappable) {
645 entry->flags |= __EXEC_OBJECT_NEEDS_MAP;
668 list_move(&vma->exec_list, &ordered_vmas); 646 list_move(&vma->exec_list, &ordered_vmas);
669 else 647 } else
670 list_move_tail(&vma->exec_list, &ordered_vmas); 648 list_move_tail(&vma->exec_list, &ordered_vmas);
671 649
672 obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND; 650 obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
673 obj->base.pending_write_domain = 0; 651 obj->base.pending_write_domain = 0;
674 obj->pending_fenced_gpu_access = false;
675 } 652 }
676 list_splice(&ordered_vmas, vmas); 653 list_splice(&ordered_vmas, vmas);
677 654
@@ -696,7 +673,7 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
696 if (!drm_mm_node_allocated(&vma->node)) 673 if (!drm_mm_node_allocated(&vma->node))
697 continue; 674 continue;
698 675
699 if (eb_vma_misplaced(vma, has_fenced_gpu_access)) 676 if (eb_vma_misplaced(vma))
700 ret = i915_vma_unbind(vma); 677 ret = i915_vma_unbind(vma);
701 else 678 else
702 ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs); 679 ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
@@ -744,9 +721,6 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
744 int i, total, ret; 721 int i, total, ret;
745 unsigned count = args->buffer_count; 722 unsigned count = args->buffer_count;
746 723
747 if (WARN_ON(list_empty(&eb->vmas)))
748 return 0;
749
750 vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm; 724 vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
751 725
752 /* We may process another execbuffer during the unlock... */ 726 /* We may process another execbuffer during the unlock... */
@@ -890,18 +864,24 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
890} 864}
891 865
892static int 866static int
893validate_exec_list(struct drm_i915_gem_exec_object2 *exec, 867validate_exec_list(struct drm_device *dev,
868 struct drm_i915_gem_exec_object2 *exec,
894 int count) 869 int count)
895{ 870{
896 int i;
897 unsigned relocs_total = 0; 871 unsigned relocs_total = 0;
898 unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry); 872 unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
873 unsigned invalid_flags;
874 int i;
875
876 invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
877 if (USES_FULL_PPGTT(dev))
878 invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
899 879
900 for (i = 0; i < count; i++) { 880 for (i = 0; i < count; i++) {
901 char __user *ptr = to_user_ptr(exec[i].relocs_ptr); 881 char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
902 int length; /* limited by fault_in_pages_readable() */ 882 int length; /* limited by fault_in_pages_readable() */
903 883
904 if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS) 884 if (exec[i].flags & invalid_flags)
905 return -EINVAL; 885 return -EINVAL;
906 886
907 /* First check for malicious input causing overflow in 887 /* First check for malicious input causing overflow in
@@ -951,16 +931,26 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
951 return ERR_PTR(-EIO); 931 return ERR_PTR(-EIO);
952 } 932 }
953 933
934 if (i915.enable_execlists && !ctx->engine[ring->id].state) {
935 int ret = intel_lr_context_deferred_create(ctx, ring);
936 if (ret) {
937 DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret);
938 return ERR_PTR(ret);
939 }
940 }
941
954 return ctx; 942 return ctx;
955} 943}
956 944
957static void 945void
958i915_gem_execbuffer_move_to_active(struct list_head *vmas, 946i915_gem_execbuffer_move_to_active(struct list_head *vmas,
959 struct intel_engine_cs *ring) 947 struct intel_engine_cs *ring)
960{ 948{
949 u32 seqno = intel_ring_get_seqno(ring);
961 struct i915_vma *vma; 950 struct i915_vma *vma;
962 951
963 list_for_each_entry(vma, vmas, exec_list) { 952 list_for_each_entry(vma, vmas, exec_list) {
953 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
964 struct drm_i915_gem_object *obj = vma->obj; 954 struct drm_i915_gem_object *obj = vma->obj;
965 u32 old_read = obj->base.read_domains; 955 u32 old_read = obj->base.read_domains;
966 u32 old_write = obj->base.write_domain; 956 u32 old_write = obj->base.write_domain;
@@ -969,24 +959,31 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
969 if (obj->base.write_domain == 0) 959 if (obj->base.write_domain == 0)
970 obj->base.pending_read_domains |= obj->base.read_domains; 960 obj->base.pending_read_domains |= obj->base.read_domains;
971 obj->base.read_domains = obj->base.pending_read_domains; 961 obj->base.read_domains = obj->base.pending_read_domains;
972 obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
973 962
974 i915_vma_move_to_active(vma, ring); 963 i915_vma_move_to_active(vma, ring);
975 if (obj->base.write_domain) { 964 if (obj->base.write_domain) {
976 obj->dirty = 1; 965 obj->dirty = 1;
977 obj->last_write_seqno = intel_ring_get_seqno(ring); 966 obj->last_write_seqno = seqno;
978 967
979 intel_fb_obj_invalidate(obj, ring); 968 intel_fb_obj_invalidate(obj, ring);
980 969
981 /* update for the implicit flush after a batch */ 970 /* update for the implicit flush after a batch */
982 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; 971 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
983 } 972 }
973 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
974 obj->last_fenced_seqno = seqno;
975 if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
976 struct drm_i915_private *dev_priv = to_i915(ring->dev);
977 list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
978 &dev_priv->mm.fence_list);
979 }
980 }
984 981
985 trace_i915_gem_object_change_domain(obj, old_read, old_write); 982 trace_i915_gem_object_change_domain(obj, old_read, old_write);
986 } 983 }
987} 984}
988 985
989static void 986void
990i915_gem_execbuffer_retire_commands(struct drm_device *dev, 987i915_gem_execbuffer_retire_commands(struct drm_device *dev,
991 struct drm_file *file, 988 struct drm_file *file,
992 struct intel_engine_cs *ring, 989 struct intel_engine_cs *ring,
@@ -1026,14 +1023,14 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
1026 return 0; 1023 return 0;
1027} 1024}
1028 1025
1029static int 1026int
1030legacy_ringbuffer_submission(struct drm_device *dev, struct drm_file *file, 1027i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
1031 struct intel_engine_cs *ring, 1028 struct intel_engine_cs *ring,
1032 struct intel_context *ctx, 1029 struct intel_context *ctx,
1033 struct drm_i915_gem_execbuffer2 *args, 1030 struct drm_i915_gem_execbuffer2 *args,
1034 struct list_head *vmas, 1031 struct list_head *vmas,
1035 struct drm_i915_gem_object *batch_obj, 1032 struct drm_i915_gem_object *batch_obj,
1036 u64 exec_start, u32 flags) 1033 u64 exec_start, u32 flags)
1037{ 1034{
1038 struct drm_clip_rect *cliprects = NULL; 1035 struct drm_clip_rect *cliprects = NULL;
1039 struct drm_i915_private *dev_priv = dev->dev_private; 1036 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1254,7 +1251,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1254 if (!i915_gem_check_execbuffer(args)) 1251 if (!i915_gem_check_execbuffer(args))
1255 return -EINVAL; 1252 return -EINVAL;
1256 1253
1257 ret = validate_exec_list(exec, args->buffer_count); 1254 ret = validate_exec_list(dev, exec, args->buffer_count);
1258 if (ret) 1255 if (ret)
1259 return ret; 1256 return ret;
1260 1257
@@ -1318,8 +1315,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1318 1315
1319 i915_gem_context_reference(ctx); 1316 i915_gem_context_reference(ctx);
1320 1317
1321 vm = ctx->vm; 1318 if (ctx->ppgtt)
1322 if (!USES_FULL_PPGTT(dev)) 1319 vm = &ctx->ppgtt->base;
1320 else
1323 vm = &dev_priv->gtt.base; 1321 vm = &dev_priv->gtt.base;
1324 1322
1325 eb = eb_create(args); 1323 eb = eb_create(args);
@@ -1386,25 +1384,36 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1386 /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure 1384 /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
1387 * batch" bit. Hence we need to pin secure batches into the global gtt. 1385 * batch" bit. Hence we need to pin secure batches into the global gtt.
1388 * hsw should have this fixed, but bdw mucks it up again. */ 1386 * hsw should have this fixed, but bdw mucks it up again. */
1389 if (flags & I915_DISPATCH_SECURE && 1387 if (flags & I915_DISPATCH_SECURE) {
1390 !batch_obj->has_global_gtt_mapping) { 1388 /*
1391 /* When we have multiple VMs, we'll need to make sure that we 1389 * So on first glance it looks freaky that we pin the batch here
1392 * allocate space first */ 1390 * outside of the reservation loop. But:
1393 struct i915_vma *vma = i915_gem_obj_to_ggtt(batch_obj); 1391 * - The batch is already pinned into the relevant ppgtt, so we
1394 BUG_ON(!vma); 1392 * already have the backing storage fully allocated.
1395 vma->bind_vma(vma, batch_obj->cache_level, GLOBAL_BIND); 1393 * - No other BO uses the global gtt (well contexts, but meh),
1396 } 1394 * so we don't really have issues with mutliple objects not
1395 * fitting due to fragmentation.
1396 * So this is actually safe.
1397 */
1398 ret = i915_gem_obj_ggtt_pin(batch_obj, 0, 0);
1399 if (ret)
1400 goto err;
1397 1401
1398 if (flags & I915_DISPATCH_SECURE)
1399 exec_start += i915_gem_obj_ggtt_offset(batch_obj); 1402 exec_start += i915_gem_obj_ggtt_offset(batch_obj);
1400 else 1403 } else
1401 exec_start += i915_gem_obj_offset(batch_obj, vm); 1404 exec_start += i915_gem_obj_offset(batch_obj, vm);
1402 1405
1403 ret = legacy_ringbuffer_submission(dev, file, ring, ctx, 1406 ret = dev_priv->gt.do_execbuf(dev, file, ring, ctx, args,
1404 args, &eb->vmas, batch_obj, exec_start, flags); 1407 &eb->vmas, batch_obj, exec_start, flags);
1405 if (ret)
1406 goto err;
1407 1408
1409 /*
1410 * FIXME: We crucially rely upon the active tracking for the (ppgtt)
1411 * batch vma for correctness. For less ugly and less fragility this
1412 * needs to be adjusted to also track the ggtt batch vma properly as
1413 * active.
1414 */
1415 if (flags & I915_DISPATCH_SECURE)
1416 i915_gem_object_ggtt_unpin(batch_obj);
1408err: 1417err:
1409 /* the request owns the ref now */ 1418 /* the request owns the ref now */
1410 i915_gem_context_unreference(ctx); 1419 i915_gem_context_unreference(ctx);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index e42925f76b4b..b672b843fd5e 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -33,17 +33,6 @@
33static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv); 33static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv);
34static void chv_setup_private_ppat(struct drm_i915_private *dev_priv); 34static void chv_setup_private_ppat(struct drm_i915_private *dev_priv);
35 35
36bool intel_enable_ppgtt(struct drm_device *dev, bool full)
37{
38 if (i915.enable_ppgtt == 0)
39 return false;
40
41 if (i915.enable_ppgtt == 1 && full)
42 return false;
43
44 return true;
45}
46
47static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) 36static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
48{ 37{
49 if (enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev)) 38 if (enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev))
@@ -78,7 +67,6 @@ static void ppgtt_bind_vma(struct i915_vma *vma,
78 enum i915_cache_level cache_level, 67 enum i915_cache_level cache_level,
79 u32 flags); 68 u32 flags);
80static void ppgtt_unbind_vma(struct i915_vma *vma); 69static void ppgtt_unbind_vma(struct i915_vma *vma);
81static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt);
82 70
83static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr, 71static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr,
84 enum i915_cache_level level, 72 enum i915_cache_level level,
@@ -216,19 +204,12 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
216 204
217/* Broadwell Page Directory Pointer Descriptors */ 205/* Broadwell Page Directory Pointer Descriptors */
218static int gen8_write_pdp(struct intel_engine_cs *ring, unsigned entry, 206static int gen8_write_pdp(struct intel_engine_cs *ring, unsigned entry,
219 uint64_t val, bool synchronous) 207 uint64_t val)
220{ 208{
221 struct drm_i915_private *dev_priv = ring->dev->dev_private;
222 int ret; 209 int ret;
223 210
224 BUG_ON(entry >= 4); 211 BUG_ON(entry >= 4);
225 212
226 if (synchronous) {
227 I915_WRITE(GEN8_RING_PDP_UDW(ring, entry), val >> 32);
228 I915_WRITE(GEN8_RING_PDP_LDW(ring, entry), (u32)val);
229 return 0;
230 }
231
232 ret = intel_ring_begin(ring, 6); 213 ret = intel_ring_begin(ring, 6);
233 if (ret) 214 if (ret)
234 return ret; 215 return ret;
@@ -245,8 +226,7 @@ static int gen8_write_pdp(struct intel_engine_cs *ring, unsigned entry,
245} 226}
246 227
247static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt, 228static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
248 struct intel_engine_cs *ring, 229 struct intel_engine_cs *ring)
249 bool synchronous)
250{ 230{
251 int i, ret; 231 int i, ret;
252 232
@@ -255,7 +235,7 @@ static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
255 235
256 for (i = used_pd - 1; i >= 0; i--) { 236 for (i = used_pd - 1; i >= 0; i--) {
257 dma_addr_t addr = ppgtt->pd_dma_addr[i]; 237 dma_addr_t addr = ppgtt->pd_dma_addr[i];
258 ret = gen8_write_pdp(ring, i, addr, synchronous); 238 ret = gen8_write_pdp(ring, i, addr);
259 if (ret) 239 if (ret)
260 return ret; 240 return ret;
261 } 241 }
@@ -403,9 +383,6 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
403 struct i915_hw_ppgtt *ppgtt = 383 struct i915_hw_ppgtt *ppgtt =
404 container_of(vm, struct i915_hw_ppgtt, base); 384 container_of(vm, struct i915_hw_ppgtt, base);
405 385
406 list_del(&vm->global_link);
407 drm_mm_takedown(&vm->mm);
408
409 gen8_ppgtt_unmap_pages(ppgtt); 386 gen8_ppgtt_unmap_pages(ppgtt);
410 gen8_ppgtt_free(ppgtt); 387 gen8_ppgtt_free(ppgtt);
411} 388}
@@ -615,7 +592,6 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
615 kunmap_atomic(pd_vaddr); 592 kunmap_atomic(pd_vaddr);
616 } 593 }
617 594
618 ppgtt->enable = gen8_ppgtt_enable;
619 ppgtt->switch_mm = gen8_mm_switch; 595 ppgtt->switch_mm = gen8_mm_switch;
620 ppgtt->base.clear_range = gen8_ppgtt_clear_range; 596 ppgtt->base.clear_range = gen8_ppgtt_clear_range;
621 ppgtt->base.insert_entries = gen8_ppgtt_insert_entries; 597 ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
@@ -724,29 +700,10 @@ static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
724} 700}
725 701
726static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt, 702static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
727 struct intel_engine_cs *ring, 703 struct intel_engine_cs *ring)
728 bool synchronous)
729{ 704{
730 struct drm_device *dev = ppgtt->base.dev;
731 struct drm_i915_private *dev_priv = dev->dev_private;
732 int ret; 705 int ret;
733 706
734 /* If we're in reset, we can assume the GPU is sufficiently idle to
735 * manually frob these bits. Ideally we could use the ring functions,
736 * except our error handling makes it quite difficult (can't use
737 * intel_ring_begin, ring->flush, or intel_ring_advance)
738 *
739 * FIXME: We should try not to special case reset
740 */
741 if (synchronous ||
742 i915_reset_in_progress(&dev_priv->gpu_error)) {
743 WARN_ON(ppgtt != dev_priv->mm.aliasing_ppgtt);
744 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
745 I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
746 POSTING_READ(RING_PP_DIR_BASE(ring));
747 return 0;
748 }
749
750 /* NB: TLBs must be flushed and invalidated before a switch */ 707 /* NB: TLBs must be flushed and invalidated before a switch */
751 ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); 708 ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
752 if (ret) 709 if (ret)
@@ -768,29 +725,10 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
768} 725}
769 726
770static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt, 727static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
771 struct intel_engine_cs *ring, 728 struct intel_engine_cs *ring)
772 bool synchronous)
773{ 729{
774 struct drm_device *dev = ppgtt->base.dev;
775 struct drm_i915_private *dev_priv = dev->dev_private;
776 int ret; 730 int ret;
777 731
778 /* If we're in reset, we can assume the GPU is sufficiently idle to
779 * manually frob these bits. Ideally we could use the ring functions,
780 * except our error handling makes it quite difficult (can't use
781 * intel_ring_begin, ring->flush, or intel_ring_advance)
782 *
783 * FIXME: We should try not to special case reset
784 */
785 if (synchronous ||
786 i915_reset_in_progress(&dev_priv->gpu_error)) {
787 WARN_ON(ppgtt != dev_priv->mm.aliasing_ppgtt);
788 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
789 I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
790 POSTING_READ(RING_PP_DIR_BASE(ring));
791 return 0;
792 }
793
794 /* NB: TLBs must be flushed and invalidated before a switch */ 732 /* NB: TLBs must be flushed and invalidated before a switch */
795 ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); 733 ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
796 if (ret) 734 if (ret)
@@ -819,14 +757,11 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
819} 757}
820 758
821static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt, 759static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
822 struct intel_engine_cs *ring, 760 struct intel_engine_cs *ring)
823 bool synchronous)
824{ 761{
825 struct drm_device *dev = ppgtt->base.dev; 762 struct drm_device *dev = ppgtt->base.dev;
826 struct drm_i915_private *dev_priv = dev->dev_private; 763 struct drm_i915_private *dev_priv = dev->dev_private;
827 764
828 if (!synchronous)
829 return 0;
830 765
831 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G); 766 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
832 I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt)); 767 I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
@@ -836,39 +771,20 @@ static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
836 return 0; 771 return 0;
837} 772}
838 773
839static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt) 774static void gen8_ppgtt_enable(struct drm_device *dev)
840{ 775{
841 struct drm_device *dev = ppgtt->base.dev;
842 struct drm_i915_private *dev_priv = dev->dev_private; 776 struct drm_i915_private *dev_priv = dev->dev_private;
843 struct intel_engine_cs *ring; 777 struct intel_engine_cs *ring;
844 int j, ret; 778 int j;
845 779
846 for_each_ring(ring, dev_priv, j) { 780 for_each_ring(ring, dev_priv, j) {
847 I915_WRITE(RING_MODE_GEN7(ring), 781 I915_WRITE(RING_MODE_GEN7(ring),
848 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); 782 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
849
850 /* We promise to do a switch later with FULL PPGTT. If this is
851 * aliasing, this is the one and only switch we'll do */
852 if (USES_FULL_PPGTT(dev))
853 continue;
854
855 ret = ppgtt->switch_mm(ppgtt, ring, true);
856 if (ret)
857 goto err_out;
858 } 783 }
859
860 return 0;
861
862err_out:
863 for_each_ring(ring, dev_priv, j)
864 I915_WRITE(RING_MODE_GEN7(ring),
865 _MASKED_BIT_DISABLE(GFX_PPGTT_ENABLE));
866 return ret;
867} 784}
868 785
869static int gen7_ppgtt_enable(struct i915_hw_ppgtt *ppgtt) 786static void gen7_ppgtt_enable(struct drm_device *dev)
870{ 787{
871 struct drm_device *dev = ppgtt->base.dev;
872 struct drm_i915_private *dev_priv = dev->dev_private; 788 struct drm_i915_private *dev_priv = dev->dev_private;
873 struct intel_engine_cs *ring; 789 struct intel_engine_cs *ring;
874 uint32_t ecochk, ecobits; 790 uint32_t ecochk, ecobits;
@@ -887,31 +803,16 @@ static int gen7_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
887 I915_WRITE(GAM_ECOCHK, ecochk); 803 I915_WRITE(GAM_ECOCHK, ecochk);
888 804
889 for_each_ring(ring, dev_priv, i) { 805 for_each_ring(ring, dev_priv, i) {
890 int ret;
891 /* GFX_MODE is per-ring on gen7+ */ 806 /* GFX_MODE is per-ring on gen7+ */
892 I915_WRITE(RING_MODE_GEN7(ring), 807 I915_WRITE(RING_MODE_GEN7(ring),
893 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); 808 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
894
895 /* We promise to do a switch later with FULL PPGTT. If this is
896 * aliasing, this is the one and only switch we'll do */
897 if (USES_FULL_PPGTT(dev))
898 continue;
899
900 ret = ppgtt->switch_mm(ppgtt, ring, true);
901 if (ret)
902 return ret;
903 } 809 }
904
905 return 0;
906} 810}
907 811
908static int gen6_ppgtt_enable(struct i915_hw_ppgtt *ppgtt) 812static void gen6_ppgtt_enable(struct drm_device *dev)
909{ 813{
910 struct drm_device *dev = ppgtt->base.dev;
911 struct drm_i915_private *dev_priv = dev->dev_private; 814 struct drm_i915_private *dev_priv = dev->dev_private;
912 struct intel_engine_cs *ring;
913 uint32_t ecochk, gab_ctl, ecobits; 815 uint32_t ecochk, gab_ctl, ecobits;
914 int i;
915 816
916 ecobits = I915_READ(GAC_ECO_BITS); 817 ecobits = I915_READ(GAC_ECO_BITS);
917 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT | 818 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
@@ -924,14 +825,6 @@ static int gen6_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
924 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B); 825 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
925 826
926 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); 827 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
927
928 for_each_ring(ring, dev_priv, i) {
929 int ret = ppgtt->switch_mm(ppgtt, ring, true);
930 if (ret)
931 return ret;
932 }
933
934 return 0;
935} 828}
936 829
937/* PPGTT support for Sandybdrige/Gen6 and later */ 830/* PPGTT support for Sandybdrige/Gen6 and later */
@@ -1029,8 +922,6 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
1029 struct i915_hw_ppgtt *ppgtt = 922 struct i915_hw_ppgtt *ppgtt =
1030 container_of(vm, struct i915_hw_ppgtt, base); 923 container_of(vm, struct i915_hw_ppgtt, base);
1031 924
1032 list_del(&vm->global_link);
1033 drm_mm_takedown(&ppgtt->base.mm);
1034 drm_mm_remove_node(&ppgtt->node); 925 drm_mm_remove_node(&ppgtt->node);
1035 926
1036 gen6_ppgtt_unmap_pages(ppgtt); 927 gen6_ppgtt_unmap_pages(ppgtt);
@@ -1151,13 +1042,10 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
1151 1042
1152 ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode; 1043 ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
1153 if (IS_GEN6(dev)) { 1044 if (IS_GEN6(dev)) {
1154 ppgtt->enable = gen6_ppgtt_enable;
1155 ppgtt->switch_mm = gen6_mm_switch; 1045 ppgtt->switch_mm = gen6_mm_switch;
1156 } else if (IS_HASWELL(dev)) { 1046 } else if (IS_HASWELL(dev)) {
1157 ppgtt->enable = gen7_ppgtt_enable;
1158 ppgtt->switch_mm = hsw_mm_switch; 1047 ppgtt->switch_mm = hsw_mm_switch;
1159 } else if (IS_GEN7(dev)) { 1048 } else if (IS_GEN7(dev)) {
1160 ppgtt->enable = gen7_ppgtt_enable;
1161 ppgtt->switch_mm = gen7_mm_switch; 1049 ppgtt->switch_mm = gen7_mm_switch;
1162 } else 1050 } else
1163 BUG(); 1051 BUG();
@@ -1188,39 +1076,114 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
1188 ppgtt->node.size >> 20, 1076 ppgtt->node.size >> 20,
1189 ppgtt->node.start / PAGE_SIZE); 1077 ppgtt->node.start / PAGE_SIZE);
1190 1078
1079 gen6_write_pdes(ppgtt);
1080 DRM_DEBUG("Adding PPGTT at offset %x\n",
1081 ppgtt->pd_offset << 10);
1082
1191 return 0; 1083 return 0;
1192} 1084}
1193 1085
1194int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) 1086static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
1195{ 1087{
1196 struct drm_i915_private *dev_priv = dev->dev_private; 1088 struct drm_i915_private *dev_priv = dev->dev_private;
1197 int ret = 0;
1198 1089
1199 ppgtt->base.dev = dev; 1090 ppgtt->base.dev = dev;
1200 ppgtt->base.scratch = dev_priv->gtt.base.scratch; 1091 ppgtt->base.scratch = dev_priv->gtt.base.scratch;
1201 1092
1202 if (INTEL_INFO(dev)->gen < 8) 1093 if (INTEL_INFO(dev)->gen < 8)
1203 ret = gen6_ppgtt_init(ppgtt); 1094 return gen6_ppgtt_init(ppgtt);
1204 else if (IS_GEN8(dev)) 1095 else if (IS_GEN8(dev))
1205 ret = gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total); 1096 return gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total);
1206 else 1097 else
1207 BUG(); 1098 BUG();
1099}
1100int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
1101{
1102 struct drm_i915_private *dev_priv = dev->dev_private;
1103 int ret = 0;
1208 1104
1209 if (!ret) { 1105 ret = __hw_ppgtt_init(dev, ppgtt);
1210 struct drm_i915_private *dev_priv = dev->dev_private; 1106 if (ret == 0) {
1211 kref_init(&ppgtt->ref); 1107 kref_init(&ppgtt->ref);
1212 drm_mm_init(&ppgtt->base.mm, ppgtt->base.start, 1108 drm_mm_init(&ppgtt->base.mm, ppgtt->base.start,
1213 ppgtt->base.total); 1109 ppgtt->base.total);
1214 i915_init_vm(dev_priv, &ppgtt->base); 1110 i915_init_vm(dev_priv, &ppgtt->base);
1215 if (INTEL_INFO(dev)->gen < 8) { 1111 }
1216 gen6_write_pdes(ppgtt); 1112
1217 DRM_DEBUG("Adding PPGTT at offset %x\n", 1113 return ret;
1218 ppgtt->pd_offset << 10); 1114}
1115
1116int i915_ppgtt_init_hw(struct drm_device *dev)
1117{
1118 struct drm_i915_private *dev_priv = dev->dev_private;
1119 struct intel_engine_cs *ring;
1120 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
1121 int i, ret = 0;
1122
1123 /* In the case of execlists, PPGTT is enabled by the context descriptor
1124 * and the PDPs are contained within the context itself. We don't
1125 * need to do anything here. */
1126 if (i915.enable_execlists)
1127 return 0;
1128
1129 if (!USES_PPGTT(dev))
1130 return 0;
1131
1132 if (IS_GEN6(dev))
1133 gen6_ppgtt_enable(dev);
1134 else if (IS_GEN7(dev))
1135 gen7_ppgtt_enable(dev);
1136 else if (INTEL_INFO(dev)->gen >= 8)
1137 gen8_ppgtt_enable(dev);
1138 else
1139 WARN_ON(1);
1140
1141 if (ppgtt) {
1142 for_each_ring(ring, dev_priv, i) {
1143 ret = ppgtt->switch_mm(ppgtt, ring);
1144 if (ret != 0)
1145 return ret;
1219 } 1146 }
1220 } 1147 }
1221 1148
1222 return ret; 1149 return ret;
1223} 1150}
1151struct i915_hw_ppgtt *
1152i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv)
1153{
1154 struct i915_hw_ppgtt *ppgtt;
1155 int ret;
1156
1157 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
1158 if (!ppgtt)
1159 return ERR_PTR(-ENOMEM);
1160
1161 ret = i915_ppgtt_init(dev, ppgtt);
1162 if (ret) {
1163 kfree(ppgtt);
1164 return ERR_PTR(ret);
1165 }
1166
1167 ppgtt->file_priv = fpriv;
1168
1169 return ppgtt;
1170}
1171
1172void i915_ppgtt_release(struct kref *kref)
1173{
1174 struct i915_hw_ppgtt *ppgtt =
1175 container_of(kref, struct i915_hw_ppgtt, ref);
1176
1177 /* vmas should already be unbound */
1178 WARN_ON(!list_empty(&ppgtt->base.active_list));
1179 WARN_ON(!list_empty(&ppgtt->base.inactive_list));
1180
1181 list_del(&ppgtt->base.global_link);
1182 drm_mm_takedown(&ppgtt->base.mm);
1183
1184 ppgtt->base.cleanup(&ppgtt->base);
1185 kfree(ppgtt);
1186}
1224 1187
1225static void 1188static void
1226ppgtt_bind_vma(struct i915_vma *vma, 1189ppgtt_bind_vma(struct i915_vma *vma,
@@ -1687,10 +1650,10 @@ static void i915_gtt_color_adjust(struct drm_mm_node *node,
1687 } 1650 }
1688} 1651}
1689 1652
1690void i915_gem_setup_global_gtt(struct drm_device *dev, 1653int i915_gem_setup_global_gtt(struct drm_device *dev,
1691 unsigned long start, 1654 unsigned long start,
1692 unsigned long mappable_end, 1655 unsigned long mappable_end,
1693 unsigned long end) 1656 unsigned long end)
1694{ 1657{
1695 /* Let GEM Manage all of the aperture. 1658 /* Let GEM Manage all of the aperture.
1696 * 1659 *
@@ -1706,6 +1669,7 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
1706 struct drm_mm_node *entry; 1669 struct drm_mm_node *entry;
1707 struct drm_i915_gem_object *obj; 1670 struct drm_i915_gem_object *obj;
1708 unsigned long hole_start, hole_end; 1671 unsigned long hole_start, hole_end;
1672 int ret;
1709 1673
1710 BUG_ON(mappable_end > end); 1674 BUG_ON(mappable_end > end);
1711 1675
@@ -1717,14 +1681,16 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
1717 /* Mark any preallocated objects as occupied */ 1681 /* Mark any preallocated objects as occupied */
1718 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 1682 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
1719 struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm); 1683 struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
1720 int ret; 1684
1721 DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n", 1685 DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n",
1722 i915_gem_obj_ggtt_offset(obj), obj->base.size); 1686 i915_gem_obj_ggtt_offset(obj), obj->base.size);
1723 1687
1724 WARN_ON(i915_gem_obj_ggtt_bound(obj)); 1688 WARN_ON(i915_gem_obj_ggtt_bound(obj));
1725 ret = drm_mm_reserve_node(&ggtt_vm->mm, &vma->node); 1689 ret = drm_mm_reserve_node(&ggtt_vm->mm, &vma->node);
1726 if (ret) 1690 if (ret) {
1727 DRM_DEBUG_KMS("Reservation failed\n"); 1691 DRM_DEBUG_KMS("Reservation failed: %i\n", ret);
1692 return ret;
1693 }
1728 obj->has_global_gtt_mapping = 1; 1694 obj->has_global_gtt_mapping = 1;
1729 } 1695 }
1730 1696
@@ -1741,6 +1707,22 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
1741 1707
1742 /* And finally clear the reserved guard page */ 1708 /* And finally clear the reserved guard page */
1743 ggtt_vm->clear_range(ggtt_vm, end - PAGE_SIZE, PAGE_SIZE, true); 1709 ggtt_vm->clear_range(ggtt_vm, end - PAGE_SIZE, PAGE_SIZE, true);
1710
1711 if (USES_PPGTT(dev) && !USES_FULL_PPGTT(dev)) {
1712 struct i915_hw_ppgtt *ppgtt;
1713
1714 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
1715 if (!ppgtt)
1716 return -ENOMEM;
1717
1718 ret = __hw_ppgtt_init(dev, ppgtt);
1719 if (ret != 0)
1720 return ret;
1721
1722 dev_priv->mm.aliasing_ppgtt = ppgtt;
1723 }
1724
1725 return 0;
1744} 1726}
1745 1727
1746void i915_gem_init_global_gtt(struct drm_device *dev) 1728void i915_gem_init_global_gtt(struct drm_device *dev)
@@ -1754,6 +1736,25 @@ void i915_gem_init_global_gtt(struct drm_device *dev)
1754 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size); 1736 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
1755} 1737}
1756 1738
1739void i915_global_gtt_cleanup(struct drm_device *dev)
1740{
1741 struct drm_i915_private *dev_priv = dev->dev_private;
1742 struct i915_address_space *vm = &dev_priv->gtt.base;
1743
1744 if (dev_priv->mm.aliasing_ppgtt) {
1745 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
1746
1747 ppgtt->base.cleanup(&ppgtt->base);
1748 }
1749
1750 if (drm_mm_initialized(&vm->mm)) {
1751 drm_mm_takedown(&vm->mm);
1752 list_del(&vm->global_link);
1753 }
1754
1755 vm->cleanup(vm);
1756}
1757
1757static int setup_scratch_page(struct drm_device *dev) 1758static int setup_scratch_page(struct drm_device *dev)
1758{ 1759{
1759 struct drm_i915_private *dev_priv = dev->dev_private; 1760 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2022,10 +2023,6 @@ static void gen6_gmch_remove(struct i915_address_space *vm)
2022 2023
2023 struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base); 2024 struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
2024 2025
2025 if (drm_mm_initialized(&vm->mm)) {
2026 drm_mm_takedown(&vm->mm);
2027 list_del(&vm->global_link);
2028 }
2029 iounmap(gtt->gsm); 2026 iounmap(gtt->gsm);
2030 teardown_scratch_page(vm->dev); 2027 teardown_scratch_page(vm->dev);
2031} 2028}
@@ -2058,10 +2055,6 @@ static int i915_gmch_probe(struct drm_device *dev,
2058 2055
2059static void i915_gmch_remove(struct i915_address_space *vm) 2056static void i915_gmch_remove(struct i915_address_space *vm)
2060{ 2057{
2061 if (drm_mm_initialized(&vm->mm)) {
2062 drm_mm_takedown(&vm->mm);
2063 list_del(&vm->global_link);
2064 }
2065 intel_gmch_remove(); 2058 intel_gmch_remove();
2066} 2059}
2067 2060
@@ -2160,8 +2153,10 @@ static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
2160 /* Keep GGTT vmas first to make debug easier */ 2153 /* Keep GGTT vmas first to make debug easier */
2161 if (i915_is_ggtt(vm)) 2154 if (i915_is_ggtt(vm))
2162 list_add(&vma->vma_link, &obj->vma_list); 2155 list_add(&vma->vma_link, &obj->vma_list);
2163 else 2156 else {
2164 list_add_tail(&vma->vma_link, &obj->vma_list); 2157 list_add_tail(&vma->vma_link, &obj->vma_list);
2158 i915_ppgtt_get(i915_vm_to_ppgtt(vm));
2159 }
2165 2160
2166 return vma; 2161 return vma;
2167} 2162}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 8d6f7c18c404..d5c14af51e99 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -34,6 +34,8 @@
34#ifndef __I915_GEM_GTT_H__ 34#ifndef __I915_GEM_GTT_H__
35#define __I915_GEM_GTT_H__ 35#define __I915_GEM_GTT_H__
36 36
37struct drm_i915_file_private;
38
37typedef uint32_t gen6_gtt_pte_t; 39typedef uint32_t gen6_gtt_pte_t;
38typedef uint64_t gen8_gtt_pte_t; 40typedef uint64_t gen8_gtt_pte_t;
39typedef gen8_gtt_pte_t gen8_ppgtt_pde_t; 41typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
@@ -258,22 +260,36 @@ struct i915_hw_ppgtt {
258 dma_addr_t *gen8_pt_dma_addr[4]; 260 dma_addr_t *gen8_pt_dma_addr[4];
259 }; 261 };
260 262
261 struct intel_context *ctx; 263 struct drm_i915_file_private *file_priv;
262 264
263 int (*enable)(struct i915_hw_ppgtt *ppgtt); 265 int (*enable)(struct i915_hw_ppgtt *ppgtt);
264 int (*switch_mm)(struct i915_hw_ppgtt *ppgtt, 266 int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
265 struct intel_engine_cs *ring, 267 struct intel_engine_cs *ring);
266 bool synchronous);
267 void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m); 268 void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
268}; 269};
269 270
270int i915_gem_gtt_init(struct drm_device *dev); 271int i915_gem_gtt_init(struct drm_device *dev);
271void i915_gem_init_global_gtt(struct drm_device *dev); 272void i915_gem_init_global_gtt(struct drm_device *dev);
272void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start, 273int i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
273 unsigned long mappable_end, unsigned long end); 274 unsigned long mappable_end, unsigned long end);
274 275void i915_global_gtt_cleanup(struct drm_device *dev);
275bool intel_enable_ppgtt(struct drm_device *dev, bool full); 276
276int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt); 277
278int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
279int i915_ppgtt_init_hw(struct drm_device *dev);
280void i915_ppgtt_release(struct kref *kref);
281struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_device *dev,
282 struct drm_i915_file_private *fpriv);
283static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt)
284{
285 if (ppgtt)
286 kref_get(&ppgtt->ref);
287}
288static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
289{
290 if (ppgtt)
291 kref_put(&ppgtt->ref, i915_ppgtt_release);
292}
277 293
278void i915_check_and_clear_faults(struct drm_device *dev); 294void i915_check_and_clear_faults(struct drm_device *dev);
279void i915_gem_suspend_gtt_mappings(struct drm_device *dev); 295void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index e60be3f552a6..a9a62d75aa57 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -28,13 +28,6 @@
28#include "i915_drv.h" 28#include "i915_drv.h"
29#include "intel_renderstate.h" 29#include "intel_renderstate.h"
30 30
31struct render_state {
32 const struct intel_renderstate_rodata *rodata;
33 struct drm_i915_gem_object *obj;
34 u64 ggtt_offset;
35 int gen;
36};
37
38static const struct intel_renderstate_rodata * 31static const struct intel_renderstate_rodata *
39render_state_get_rodata(struct drm_device *dev, const int gen) 32render_state_get_rodata(struct drm_device *dev, const int gen)
40{ 33{
@@ -127,30 +120,47 @@ static int render_state_setup(struct render_state *so)
127 return 0; 120 return 0;
128} 121}
129 122
130static void render_state_fini(struct render_state *so) 123void i915_gem_render_state_fini(struct render_state *so)
131{ 124{
132 i915_gem_object_ggtt_unpin(so->obj); 125 i915_gem_object_ggtt_unpin(so->obj);
133 drm_gem_object_unreference(&so->obj->base); 126 drm_gem_object_unreference(&so->obj->base);
134} 127}
135 128
136int i915_gem_render_state_init(struct intel_engine_cs *ring) 129int i915_gem_render_state_prepare(struct intel_engine_cs *ring,
130 struct render_state *so)
137{ 131{
138 struct render_state so;
139 int ret; 132 int ret;
140 133
141 if (WARN_ON(ring->id != RCS)) 134 if (WARN_ON(ring->id != RCS))
142 return -ENOENT; 135 return -ENOENT;
143 136
144 ret = render_state_init(&so, ring->dev); 137 ret = render_state_init(so, ring->dev);
145 if (ret) 138 if (ret)
146 return ret; 139 return ret;
147 140
148 if (so.rodata == NULL) 141 if (so->rodata == NULL)
149 return 0; 142 return 0;
150 143
151 ret = render_state_setup(&so); 144 ret = render_state_setup(so);
145 if (ret) {
146 i915_gem_render_state_fini(so);
147 return ret;
148 }
149
150 return 0;
151}
152
153int i915_gem_render_state_init(struct intel_engine_cs *ring)
154{
155 struct render_state so;
156 int ret;
157
158 ret = i915_gem_render_state_prepare(ring, &so);
152 if (ret) 159 if (ret)
153 goto out; 160 return ret;
161
162 if (so.rodata == NULL)
163 return 0;
154 164
155 ret = ring->dispatch_execbuffer(ring, 165 ret = ring->dispatch_execbuffer(ring,
156 so.ggtt_offset, 166 so.ggtt_offset,
@@ -164,6 +174,6 @@ int i915_gem_render_state_init(struct intel_engine_cs *ring)
164 ret = __i915_add_request(ring, NULL, so.obj, NULL); 174 ret = __i915_add_request(ring, NULL, so.obj, NULL);
165 /* __i915_add_request moves object to inactive if it fails */ 175 /* __i915_add_request moves object to inactive if it fails */
166out: 176out:
167 render_state_fini(&so); 177 i915_gem_render_state_fini(&so);
168 return ret; 178 return ret;
169} 179}
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.h b/drivers/gpu/drm/i915/i915_gem_render_state.h
new file mode 100644
index 000000000000..c44961ed3fad
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.h
@@ -0,0 +1,47 @@
1/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24#ifndef _I915_GEM_RENDER_STATE_H_
25#define _I915_GEM_RENDER_STATE_H_
26
27#include <linux/types.h>
28
29struct intel_renderstate_rodata {
30 const u32 *reloc;
31 const u32 *batch;
32 const u32 batch_items;
33};
34
35struct render_state {
36 const struct intel_renderstate_rodata *rodata;
37 struct drm_i915_gem_object *obj;
38 u64 ggtt_offset;
39 int gen;
40};
41
42int i915_gem_render_state_init(struct intel_engine_cs *ring);
43void i915_gem_render_state_fini(struct render_state *so);
44int i915_gem_render_state_prepare(struct intel_engine_cs *ring,
45 struct render_state *so);
46
47#endif /* _I915_GEM_RENDER_STATE_H_ */
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 21c025a209c0..85fda6b803e4 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -289,6 +289,7 @@ void i915_gem_cleanup_stolen(struct drm_device *dev)
289int i915_gem_init_stolen(struct drm_device *dev) 289int i915_gem_init_stolen(struct drm_device *dev)
290{ 290{
291 struct drm_i915_private *dev_priv = dev->dev_private; 291 struct drm_i915_private *dev_priv = dev->dev_private;
292 u32 tmp;
292 int bios_reserved = 0; 293 int bios_reserved = 0;
293 294
294#ifdef CONFIG_INTEL_IOMMU 295#ifdef CONFIG_INTEL_IOMMU
@@ -308,8 +309,16 @@ int i915_gem_init_stolen(struct drm_device *dev)
308 DRM_DEBUG_KMS("found %zd bytes of stolen memory at %08lx\n", 309 DRM_DEBUG_KMS("found %zd bytes of stolen memory at %08lx\n",
309 dev_priv->gtt.stolen_size, dev_priv->mm.stolen_base); 310 dev_priv->gtt.stolen_size, dev_priv->mm.stolen_base);
310 311
311 if (IS_VALLEYVIEW(dev)) 312 if (INTEL_INFO(dev)->gen >= 8) {
312 bios_reserved = 1024*1024; /* top 1M on VLV/BYT */ 313 tmp = I915_READ(GEN7_BIOS_RESERVED);
314 tmp >>= GEN8_BIOS_RESERVED_SHIFT;
315 tmp &= GEN8_BIOS_RESERVED_MASK;
316 bios_reserved = (1024*1024) << tmp;
317 } else if (IS_GEN7(dev)) {
318 tmp = I915_READ(GEN7_BIOS_RESERVED);
319 bios_reserved = tmp & GEN7_BIOS_RESERVED_256K ?
320 256*1024 : 1024*1024;
321 }
313 322
314 if (WARN_ON(bios_reserved > dev_priv->gtt.stolen_size)) 323 if (WARN_ON(bios_reserved > dev_priv->gtt.stolen_size))
315 return 0; 324 return 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index cb150e8b4336..2cefb597df6d 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -91,7 +91,14 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
91 uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; 91 uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
92 uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; 92 uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
93 93
94 if (IS_VALLEYVIEW(dev)) { 94 if (INTEL_INFO(dev)->gen >= 8 || IS_VALLEYVIEW(dev)) {
95 /*
96 * On BDW+, swizzling is not used. We leave the CPU memory
97 * controller in charge of optimizing memory accesses without
98 * the extra address manipulation GPU side.
99 *
100 * VLV and CHV don't have GPU swizzling.
101 */
95 swizzle_x = I915_BIT_6_SWIZZLE_NONE; 102 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
96 swizzle_y = I915_BIT_6_SWIZZLE_NONE; 103 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
97 } else if (INTEL_INFO(dev)->gen >= 6) { 104 } else if (INTEL_INFO(dev)->gen >= 6) {
@@ -376,7 +383,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
376 383
377 if (ret == 0) { 384 if (ret == 0) {
378 obj->fence_dirty = 385 obj->fence_dirty =
379 obj->fenced_gpu_access || 386 obj->last_fenced_seqno ||
380 obj->fence_reg != I915_FENCE_REG_NONE; 387 obj->fence_reg != I915_FENCE_REG_NONE;
381 388
382 obj->tiling_mode = args->tiling_mode; 389 obj->tiling_mode = args->tiling_mode;
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index d38413997379..d182058383a9 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -293,15 +293,23 @@ i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
293static struct i915_mmu_notifier * 293static struct i915_mmu_notifier *
294i915_mmu_notifier_find(struct i915_mm_struct *mm) 294i915_mmu_notifier_find(struct i915_mm_struct *mm)
295{ 295{
296 if (mm->mn == NULL) { 296 struct i915_mmu_notifier *mn = mm->mn;
297 down_write(&mm->mm->mmap_sem); 297
298 mutex_lock(&to_i915(mm->dev)->mm_lock); 298 mn = mm->mn;
299 if (mm->mn == NULL) 299 if (mn)
300 mm->mn = i915_mmu_notifier_create(mm->mm); 300 return mn;
301 mutex_unlock(&to_i915(mm->dev)->mm_lock); 301
302 up_write(&mm->mm->mmap_sem); 302 down_write(&mm->mm->mmap_sem);
303 mutex_lock(&to_i915(mm->dev)->mm_lock);
304 if ((mn = mm->mn) == NULL) {
305 mn = i915_mmu_notifier_create(mm->mm);
306 if (!IS_ERR(mn))
307 mm->mn = mn;
303 } 308 }
304 return mm->mn; 309 mutex_unlock(&to_i915(mm->dev)->mm_lock);
310 up_write(&mm->mm->mmap_sem);
311
312 return mn;
305} 313}
306 314
307static int 315static int
@@ -681,16 +689,15 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
681static void 689static void
682i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj) 690i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
683{ 691{
684 struct scatterlist *sg; 692 struct sg_page_iter sg_iter;
685 int i;
686 693
687 BUG_ON(obj->userptr.work != NULL); 694 BUG_ON(obj->userptr.work != NULL);
688 695
689 if (obj->madv != I915_MADV_WILLNEED) 696 if (obj->madv != I915_MADV_WILLNEED)
690 obj->dirty = 0; 697 obj->dirty = 0;
691 698
692 for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) { 699 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
693 struct page *page = sg_page(sg); 700 struct page *page = sg_page_iter_page(&sg_iter);
694 701
695 if (obj->dirty) 702 if (obj->dirty)
696 set_page_dirty(page); 703 set_page_dirty(page);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index eab41f9390f8..2c87a797213f 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -192,10 +192,10 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
192 struct drm_i915_error_buffer *err, 192 struct drm_i915_error_buffer *err,
193 int count) 193 int count)
194{ 194{
195 err_printf(m, "%s [%d]:\n", name, count); 195 err_printf(m, " %s [%d]:\n", name, count);
196 196
197 while (count--) { 197 while (count--) {
198 err_printf(m, " %08x %8u %02x %02x %x %x", 198 err_printf(m, " %08x %8u %02x %02x %x %x",
199 err->gtt_offset, 199 err->gtt_offset,
200 err->size, 200 err->size,
201 err->read_domains, 201 err->read_domains,
@@ -208,7 +208,7 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
208 err_puts(m, err->userptr ? " userptr" : ""); 208 err_puts(m, err->userptr ? " userptr" : "");
209 err_puts(m, err->ring != -1 ? " " : ""); 209 err_puts(m, err->ring != -1 ? " " : "");
210 err_puts(m, ring_str(err->ring)); 210 err_puts(m, ring_str(err->ring));
211 err_puts(m, i915_cache_level_str(err->cache_level)); 211 err_puts(m, i915_cache_level_str(m->i915, err->cache_level));
212 212
213 if (err->name) 213 if (err->name)
214 err_printf(m, " (name: %d)", err->name); 214 err_printf(m, " (name: %d)", err->name);
@@ -393,15 +393,17 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
393 i915_ring_error_state(m, dev, &error->ring[i]); 393 i915_ring_error_state(m, dev, &error->ring[i]);
394 } 394 }
395 395
396 if (error->active_bo) 396 for (i = 0; i < error->vm_count; i++) {
397 err_printf(m, "vm[%d]\n", i);
398
397 print_error_buffers(m, "Active", 399 print_error_buffers(m, "Active",
398 error->active_bo[0], 400 error->active_bo[i],
399 error->active_bo_count[0]); 401 error->active_bo_count[i]);
400 402
401 if (error->pinned_bo)
402 print_error_buffers(m, "Pinned", 403 print_error_buffers(m, "Pinned",
403 error->pinned_bo[0], 404 error->pinned_bo[i],
404 error->pinned_bo_count[0]); 405 error->pinned_bo_count[i]);
406 }
405 407
406 for (i = 0; i < ARRAY_SIZE(error->ring); i++) { 408 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
407 obj = error->ring[i].batchbuffer; 409 obj = error->ring[i].batchbuffer;
@@ -492,9 +494,11 @@ out:
492} 494}
493 495
494int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf, 496int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf,
497 struct drm_i915_private *i915,
495 size_t count, loff_t pos) 498 size_t count, loff_t pos)
496{ 499{
497 memset(ebuf, 0, sizeof(*ebuf)); 500 memset(ebuf, 0, sizeof(*ebuf));
501 ebuf->i915 = i915;
498 502
499 /* We need to have enough room to store any i915_error_state printf 503 /* We need to have enough room to store any i915_error_state printf
500 * so that we can move it to start position. 504 * so that we can move it to start position.
@@ -556,24 +560,54 @@ static void i915_error_state_free(struct kref *error_ref)
556} 560}
557 561
558static struct drm_i915_error_object * 562static struct drm_i915_error_object *
559i915_error_object_create_sized(struct drm_i915_private *dev_priv, 563i915_error_object_create(struct drm_i915_private *dev_priv,
560 struct drm_i915_gem_object *src, 564 struct drm_i915_gem_object *src,
561 struct i915_address_space *vm, 565 struct i915_address_space *vm)
562 const int num_pages)
563{ 566{
564 struct drm_i915_error_object *dst; 567 struct drm_i915_error_object *dst;
565 int i; 568 int num_pages;
569 bool use_ggtt;
570 int i = 0;
566 u32 reloc_offset; 571 u32 reloc_offset;
567 572
568 if (src == NULL || src->pages == NULL) 573 if (src == NULL || src->pages == NULL)
569 return NULL; 574 return NULL;
570 575
576 num_pages = src->base.size >> PAGE_SHIFT;
577
571 dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC); 578 dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
572 if (dst == NULL) 579 if (dst == NULL)
573 return NULL; 580 return NULL;
574 581
575 reloc_offset = dst->gtt_offset = i915_gem_obj_offset(src, vm); 582 if (i915_gem_obj_bound(src, vm))
576 for (i = 0; i < num_pages; i++) { 583 dst->gtt_offset = i915_gem_obj_offset(src, vm);
584 else
585 dst->gtt_offset = -1;
586
587 reloc_offset = dst->gtt_offset;
588 use_ggtt = (src->cache_level == I915_CACHE_NONE &&
589 i915_is_ggtt(vm) &&
590 src->has_global_gtt_mapping &&
591 reloc_offset + num_pages * PAGE_SIZE <= dev_priv->gtt.mappable_end);
592
593 /* Cannot access stolen address directly, try to use the aperture */
594 if (src->stolen) {
595 use_ggtt = true;
596
597 if (!src->has_global_gtt_mapping)
598 goto unwind;
599
600 reloc_offset = i915_gem_obj_ggtt_offset(src);
601 if (reloc_offset + num_pages * PAGE_SIZE > dev_priv->gtt.mappable_end)
602 goto unwind;
603 }
604
605 /* Cannot access snooped pages through the aperture */
606 if (use_ggtt && src->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv->dev))
607 goto unwind;
608
609 dst->page_count = num_pages;
610 while (num_pages--) {
577 unsigned long flags; 611 unsigned long flags;
578 void *d; 612 void *d;
579 613
@@ -582,10 +616,7 @@ i915_error_object_create_sized(struct drm_i915_private *dev_priv,
582 goto unwind; 616 goto unwind;
583 617
584 local_irq_save(flags); 618 local_irq_save(flags);
585 if (src->cache_level == I915_CACHE_NONE && 619 if (use_ggtt) {
586 reloc_offset < dev_priv->gtt.mappable_end &&
587 src->has_global_gtt_mapping &&
588 i915_is_ggtt(vm)) {
589 void __iomem *s; 620 void __iomem *s;
590 621
591 /* Simply ignore tiling or any overlapping fence. 622 /* Simply ignore tiling or any overlapping fence.
@@ -597,14 +628,6 @@ i915_error_object_create_sized(struct drm_i915_private *dev_priv,
597 reloc_offset); 628 reloc_offset);
598 memcpy_fromio(d, s, PAGE_SIZE); 629 memcpy_fromio(d, s, PAGE_SIZE);
599 io_mapping_unmap_atomic(s); 630 io_mapping_unmap_atomic(s);
600 } else if (src->stolen) {
601 unsigned long offset;
602
603 offset = dev_priv->mm.stolen_base;
604 offset += src->stolen->start;
605 offset += i << PAGE_SHIFT;
606
607 memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
608 } else { 631 } else {
609 struct page *page; 632 struct page *page;
610 void *s; 633 void *s;
@@ -621,11 +644,9 @@ i915_error_object_create_sized(struct drm_i915_private *dev_priv,
621 } 644 }
622 local_irq_restore(flags); 645 local_irq_restore(flags);
623 646
624 dst->pages[i] = d; 647 dst->pages[i++] = d;
625
626 reloc_offset += PAGE_SIZE; 648 reloc_offset += PAGE_SIZE;
627 } 649 }
628 dst->page_count = num_pages;
629 650
630 return dst; 651 return dst;
631 652
@@ -635,22 +656,19 @@ unwind:
635 kfree(dst); 656 kfree(dst);
636 return NULL; 657 return NULL;
637} 658}
638#define i915_error_object_create(dev_priv, src, vm) \
639 i915_error_object_create_sized((dev_priv), (src), (vm), \
640 (src)->base.size>>PAGE_SHIFT)
641
642#define i915_error_ggtt_object_create(dev_priv, src) \ 659#define i915_error_ggtt_object_create(dev_priv, src) \
643 i915_error_object_create_sized((dev_priv), (src), &(dev_priv)->gtt.base, \ 660 i915_error_object_create((dev_priv), (src), &(dev_priv)->gtt.base)
644 (src)->base.size>>PAGE_SHIFT)
645 661
646static void capture_bo(struct drm_i915_error_buffer *err, 662static void capture_bo(struct drm_i915_error_buffer *err,
647 struct drm_i915_gem_object *obj) 663 struct i915_vma *vma)
648{ 664{
665 struct drm_i915_gem_object *obj = vma->obj;
666
649 err->size = obj->base.size; 667 err->size = obj->base.size;
650 err->name = obj->base.name; 668 err->name = obj->base.name;
651 err->rseqno = obj->last_read_seqno; 669 err->rseqno = obj->last_read_seqno;
652 err->wseqno = obj->last_write_seqno; 670 err->wseqno = obj->last_write_seqno;
653 err->gtt_offset = i915_gem_obj_ggtt_offset(obj); 671 err->gtt_offset = vma->node.start;
654 err->read_domains = obj->base.read_domains; 672 err->read_domains = obj->base.read_domains;
655 err->write_domain = obj->base.write_domain; 673 err->write_domain = obj->base.write_domain;
656 err->fence_reg = obj->fence_reg; 674 err->fence_reg = obj->fence_reg;
@@ -674,7 +692,7 @@ static u32 capture_active_bo(struct drm_i915_error_buffer *err,
674 int i = 0; 692 int i = 0;
675 693
676 list_for_each_entry(vma, head, mm_list) { 694 list_for_each_entry(vma, head, mm_list) {
677 capture_bo(err++, vma->obj); 695 capture_bo(err++, vma);
678 if (++i == count) 696 if (++i == count)
679 break; 697 break;
680 } 698 }
@@ -683,21 +701,27 @@ static u32 capture_active_bo(struct drm_i915_error_buffer *err,
683} 701}
684 702
685static u32 capture_pinned_bo(struct drm_i915_error_buffer *err, 703static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
686 int count, struct list_head *head) 704 int count, struct list_head *head,
705 struct i915_address_space *vm)
687{ 706{
688 struct drm_i915_gem_object *obj; 707 struct drm_i915_gem_object *obj;
689 int i = 0; 708 struct drm_i915_error_buffer * const first = err;
709 struct drm_i915_error_buffer * const last = err + count;
690 710
691 list_for_each_entry(obj, head, global_list) { 711 list_for_each_entry(obj, head, global_list) {
692 if (!i915_gem_obj_is_pinned(obj)) 712 struct i915_vma *vma;
693 continue;
694 713
695 capture_bo(err++, obj); 714 if (err == last)
696 if (++i == count)
697 break; 715 break;
716
717 list_for_each_entry(vma, &obj->vma_list, vma_link)
718 if (vma->vm == vm && vma->pin_count > 0) {
719 capture_bo(err++, vma);
720 break;
721 }
698 } 722 }
699 723
700 return i; 724 return err - first;
701} 725}
702 726
703/* Generate a semi-unique error code. The code is not meant to have meaning, The 727/* Generate a semi-unique error code. The code is not meant to have meaning, The
@@ -890,9 +914,6 @@ static void i915_record_ring_state(struct drm_device *dev,
890 ering->hws = I915_READ(mmio); 914 ering->hws = I915_READ(mmio);
891 } 915 }
892 916
893 ering->cpu_ring_head = ring->buffer->head;
894 ering->cpu_ring_tail = ring->buffer->tail;
895
896 ering->hangcheck_score = ring->hangcheck.score; 917 ering->hangcheck_score = ring->hangcheck.score;
897 ering->hangcheck_action = ring->hangcheck.action; 918 ering->hangcheck_action = ring->hangcheck.action;
898 919
@@ -955,6 +976,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
955 976
956 for (i = 0; i < I915_NUM_RINGS; i++) { 977 for (i = 0; i < I915_NUM_RINGS; i++) {
957 struct intel_engine_cs *ring = &dev_priv->ring[i]; 978 struct intel_engine_cs *ring = &dev_priv->ring[i];
979 struct intel_ringbuffer *rbuf;
958 980
959 error->ring[i].pid = -1; 981 error->ring[i].pid = -1;
960 982
@@ -967,6 +989,12 @@ static void i915_gem_record_rings(struct drm_device *dev,
967 989
968 request = i915_gem_find_active_request(ring); 990 request = i915_gem_find_active_request(ring);
969 if (request) { 991 if (request) {
992 struct i915_address_space *vm;
993
994 vm = request->ctx && request->ctx->ppgtt ?
995 &request->ctx->ppgtt->base :
996 &dev_priv->gtt.base;
997
970 /* We need to copy these to an anonymous buffer 998 /* We need to copy these to an anonymous buffer
971 * as the simplest method to avoid being overwritten 999 * as the simplest method to avoid being overwritten
972 * by userspace. 1000 * by userspace.
@@ -974,12 +1002,9 @@ static void i915_gem_record_rings(struct drm_device *dev,
974 error->ring[i].batchbuffer = 1002 error->ring[i].batchbuffer =
975 i915_error_object_create(dev_priv, 1003 i915_error_object_create(dev_priv,
976 request->batch_obj, 1004 request->batch_obj,
977 request->ctx ? 1005 vm);
978 request->ctx->vm :
979 &dev_priv->gtt.base);
980 1006
981 if (HAS_BROKEN_CS_TLB(dev_priv->dev) && 1007 if (HAS_BROKEN_CS_TLB(dev_priv->dev))
982 ring->scratch.obj)
983 error->ring[i].wa_batchbuffer = 1008 error->ring[i].wa_batchbuffer =
984 i915_error_ggtt_object_create(dev_priv, 1009 i915_error_ggtt_object_create(dev_priv,
985 ring->scratch.obj); 1010 ring->scratch.obj);
@@ -998,12 +1023,27 @@ static void i915_gem_record_rings(struct drm_device *dev,
998 } 1023 }
999 } 1024 }
1000 1025
1026 if (i915.enable_execlists) {
1027 /* TODO: This is only a small fix to keep basic error
1028 * capture working, but we need to add more information
1029 * for it to be useful (e.g. dump the context being
1030 * executed).
1031 */
1032 if (request)
1033 rbuf = request->ctx->engine[ring->id].ringbuf;
1034 else
1035 rbuf = ring->default_context->engine[ring->id].ringbuf;
1036 } else
1037 rbuf = ring->buffer;
1038
1039 error->ring[i].cpu_ring_head = rbuf->head;
1040 error->ring[i].cpu_ring_tail = rbuf->tail;
1041
1001 error->ring[i].ringbuffer = 1042 error->ring[i].ringbuffer =
1002 i915_error_ggtt_object_create(dev_priv, ring->buffer->obj); 1043 i915_error_ggtt_object_create(dev_priv, rbuf->obj);
1003 1044
1004 if (ring->status_page.obj) 1045 error->ring[i].hws_page =
1005 error->ring[i].hws_page = 1046 i915_error_ggtt_object_create(dev_priv, ring->status_page.obj);
1006 i915_error_ggtt_object_create(dev_priv, ring->status_page.obj);
1007 1047
1008 i915_gem_record_active_context(ring, error, &error->ring[i]); 1048 i915_gem_record_active_context(ring, error, &error->ring[i]);
1009 1049
@@ -1049,9 +1089,14 @@ static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
1049 list_for_each_entry(vma, &vm->active_list, mm_list) 1089 list_for_each_entry(vma, &vm->active_list, mm_list)
1050 i++; 1090 i++;
1051 error->active_bo_count[ndx] = i; 1091 error->active_bo_count[ndx] = i;
1052 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) 1092
1053 if (i915_gem_obj_is_pinned(obj)) 1093 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
1054 i++; 1094 list_for_each_entry(vma, &obj->vma_list, vma_link)
1095 if (vma->vm == vm && vma->pin_count > 0) {
1096 i++;
1097 break;
1098 }
1099 }
1055 error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx]; 1100 error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
1056 1101
1057 if (i) { 1102 if (i) {
@@ -1070,7 +1115,7 @@ static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
1070 error->pinned_bo_count[ndx] = 1115 error->pinned_bo_count[ndx] =
1071 capture_pinned_bo(pinned_bo, 1116 capture_pinned_bo(pinned_bo,
1072 error->pinned_bo_count[ndx], 1117 error->pinned_bo_count[ndx],
1073 &dev_priv->mm.bound_list); 1118 &dev_priv->mm.bound_list, vm);
1074 error->active_bo[ndx] = active_bo; 1119 error->active_bo[ndx] = active_bo;
1075 error->pinned_bo[ndx] = pinned_bo; 1120 error->pinned_bo[ndx] = pinned_bo;
1076} 1121}
@@ -1091,8 +1136,25 @@ static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
1091 error->pinned_bo_count = kcalloc(cnt, sizeof(*error->pinned_bo_count), 1136 error->pinned_bo_count = kcalloc(cnt, sizeof(*error->pinned_bo_count),
1092 GFP_ATOMIC); 1137 GFP_ATOMIC);
1093 1138
1094 list_for_each_entry(vm, &dev_priv->vm_list, global_link) 1139 if (error->active_bo == NULL ||
1095 i915_gem_capture_vm(dev_priv, error, vm, i++); 1140 error->pinned_bo == NULL ||
1141 error->active_bo_count == NULL ||
1142 error->pinned_bo_count == NULL) {
1143 kfree(error->active_bo);
1144 kfree(error->active_bo_count);
1145 kfree(error->pinned_bo);
1146 kfree(error->pinned_bo_count);
1147
1148 error->active_bo = NULL;
1149 error->active_bo_count = NULL;
1150 error->pinned_bo = NULL;
1151 error->pinned_bo_count = NULL;
1152 } else {
1153 list_for_each_entry(vm, &dev_priv->vm_list, global_link)
1154 i915_gem_capture_vm(dev_priv, error, vm, i++);
1155
1156 error->vm_count = cnt;
1157 }
1096} 1158}
1097 1159
1098/* Capture all registers which don't fit into another category. */ 1160/* Capture all registers which don't fit into another category. */
@@ -1295,11 +1357,11 @@ void i915_destroy_error_state(struct drm_device *dev)
1295 kref_put(&error->ref, i915_error_state_free); 1357 kref_put(&error->ref, i915_error_state_free);
1296} 1358}
1297 1359
1298const char *i915_cache_level_str(int type) 1360const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
1299{ 1361{
1300 switch (type) { 1362 switch (type) {
1301 case I915_CACHE_NONE: return " uncached"; 1363 case I915_CACHE_NONE: return " uncached";
1302 case I915_CACHE_LLC: return " snooped or LLC"; 1364 case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
1303 case I915_CACHE_L3_LLC: return " L3+LLC"; 1365 case I915_CACHE_L3_LLC: return " L3+LLC";
1304 case I915_CACHE_WT: return " WT"; 1366 case I915_CACHE_WT: return " WT";
1305 default: return ""; 1367 default: return "";
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 0050ee9470f1..3201986bf25e 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -151,7 +151,7 @@ ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
151{ 151{
152 assert_spin_locked(&dev_priv->irq_lock); 152 assert_spin_locked(&dev_priv->irq_lock);
153 153
154 if (!intel_irqs_enabled(dev_priv)) 154 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
155 return; 155 return;
156 156
157 if ((dev_priv->irq_mask & mask) != mask) { 157 if ((dev_priv->irq_mask & mask) != mask) {
@@ -238,7 +238,7 @@ static bool ivb_can_enable_err_int(struct drm_device *dev)
238 238
239 assert_spin_locked(&dev_priv->irq_lock); 239 assert_spin_locked(&dev_priv->irq_lock);
240 240
241 for_each_pipe(pipe) { 241 for_each_pipe(dev_priv, pipe) {
242 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 242 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
243 243
244 if (crtc->cpu_fifo_underrun_disabled) 244 if (crtc->cpu_fifo_underrun_disabled)
@@ -296,7 +296,7 @@ static bool cpt_can_enable_serr_int(struct drm_device *dev)
296 296
297 assert_spin_locked(&dev_priv->irq_lock); 297 assert_spin_locked(&dev_priv->irq_lock);
298 298
299 for_each_pipe(pipe) { 299 for_each_pipe(dev_priv, pipe) {
300 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 300 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
301 301
302 if (crtc->pch_fifo_underrun_disabled) 302 if (crtc->pch_fifo_underrun_disabled)
@@ -497,7 +497,7 @@ static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
497 old = !intel_crtc->cpu_fifo_underrun_disabled; 497 old = !intel_crtc->cpu_fifo_underrun_disabled;
498 intel_crtc->cpu_fifo_underrun_disabled = !enable; 498 intel_crtc->cpu_fifo_underrun_disabled = !enable;
499 499
500 if (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev)) 500 if (HAS_GMCH_DISPLAY(dev))
501 i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old); 501 i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old);
502 else if (IS_GEN5(dev) || IS_GEN6(dev)) 502 else if (IS_GEN5(dev) || IS_GEN6(dev))
503 ironlake_set_fifo_underrun_reporting(dev, pipe, enable); 503 ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
@@ -1020,7 +1020,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
1020 1020
1021 /* In vblank? */ 1021 /* In vblank? */
1022 if (in_vbl) 1022 if (in_vbl)
1023 ret |= DRM_SCANOUTPOS_INVBL; 1023 ret |= DRM_SCANOUTPOS_IN_VBLANK;
1024 1024
1025 return ret; 1025 return ret;
1026} 1026}
@@ -1322,10 +1322,10 @@ static u32 vlv_c0_residency(struct drm_i915_private *dev_priv,
1322 * @dev_priv: DRM device private 1322 * @dev_priv: DRM device private
1323 * 1323 *
1324 */ 1324 */
1325static u32 vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv) 1325static int vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv)
1326{ 1326{
1327 u32 residency_C0_up = 0, residency_C0_down = 0; 1327 u32 residency_C0_up = 0, residency_C0_down = 0;
1328 u8 new_delay, adj; 1328 int new_delay, adj;
1329 1329
1330 dev_priv->rps.ei_interrupt_count++; 1330 dev_priv->rps.ei_interrupt_count++;
1331 1331
@@ -1627,6 +1627,7 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1627 struct drm_i915_private *dev_priv, 1627 struct drm_i915_private *dev_priv,
1628 u32 master_ctl) 1628 u32 master_ctl)
1629{ 1629{
1630 struct intel_engine_cs *ring;
1630 u32 rcs, bcs, vcs; 1631 u32 rcs, bcs, vcs;
1631 uint32_t tmp = 0; 1632 uint32_t tmp = 0;
1632 irqreturn_t ret = IRQ_NONE; 1633 irqreturn_t ret = IRQ_NONE;
@@ -1636,12 +1637,20 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1636 if (tmp) { 1637 if (tmp) {
1637 I915_WRITE(GEN8_GT_IIR(0), tmp); 1638 I915_WRITE(GEN8_GT_IIR(0), tmp);
1638 ret = IRQ_HANDLED; 1639 ret = IRQ_HANDLED;
1640
1639 rcs = tmp >> GEN8_RCS_IRQ_SHIFT; 1641 rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
1640 bcs = tmp >> GEN8_BCS_IRQ_SHIFT; 1642 ring = &dev_priv->ring[RCS];
1641 if (rcs & GT_RENDER_USER_INTERRUPT) 1643 if (rcs & GT_RENDER_USER_INTERRUPT)
1642 notify_ring(dev, &dev_priv->ring[RCS]); 1644 notify_ring(dev, ring);
1645 if (rcs & GT_CONTEXT_SWITCH_INTERRUPT)
1646 intel_execlists_handle_ctx_events(ring);
1647
1648 bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
1649 ring = &dev_priv->ring[BCS];
1643 if (bcs & GT_RENDER_USER_INTERRUPT) 1650 if (bcs & GT_RENDER_USER_INTERRUPT)
1644 notify_ring(dev, &dev_priv->ring[BCS]); 1651 notify_ring(dev, ring);
1652 if (bcs & GT_CONTEXT_SWITCH_INTERRUPT)
1653 intel_execlists_handle_ctx_events(ring);
1645 } else 1654 } else
1646 DRM_ERROR("The master control interrupt lied (GT0)!\n"); 1655 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1647 } 1656 }
@@ -1651,12 +1660,20 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1651 if (tmp) { 1660 if (tmp) {
1652 I915_WRITE(GEN8_GT_IIR(1), tmp); 1661 I915_WRITE(GEN8_GT_IIR(1), tmp);
1653 ret = IRQ_HANDLED; 1662 ret = IRQ_HANDLED;
1663
1654 vcs = tmp >> GEN8_VCS1_IRQ_SHIFT; 1664 vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
1665 ring = &dev_priv->ring[VCS];
1655 if (vcs & GT_RENDER_USER_INTERRUPT) 1666 if (vcs & GT_RENDER_USER_INTERRUPT)
1656 notify_ring(dev, &dev_priv->ring[VCS]); 1667 notify_ring(dev, ring);
1668 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1669 intel_execlists_handle_ctx_events(ring);
1670
1657 vcs = tmp >> GEN8_VCS2_IRQ_SHIFT; 1671 vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
1672 ring = &dev_priv->ring[VCS2];
1658 if (vcs & GT_RENDER_USER_INTERRUPT) 1673 if (vcs & GT_RENDER_USER_INTERRUPT)
1659 notify_ring(dev, &dev_priv->ring[VCS2]); 1674 notify_ring(dev, ring);
1675 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1676 intel_execlists_handle_ctx_events(ring);
1660 } else 1677 } else
1661 DRM_ERROR("The master control interrupt lied (GT1)!\n"); 1678 DRM_ERROR("The master control interrupt lied (GT1)!\n");
1662 } 1679 }
@@ -1677,9 +1694,13 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1677 if (tmp) { 1694 if (tmp) {
1678 I915_WRITE(GEN8_GT_IIR(3), tmp); 1695 I915_WRITE(GEN8_GT_IIR(3), tmp);
1679 ret = IRQ_HANDLED; 1696 ret = IRQ_HANDLED;
1697
1680 vcs = tmp >> GEN8_VECS_IRQ_SHIFT; 1698 vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
1699 ring = &dev_priv->ring[VECS];
1681 if (vcs & GT_RENDER_USER_INTERRUPT) 1700 if (vcs & GT_RENDER_USER_INTERRUPT)
1682 notify_ring(dev, &dev_priv->ring[VECS]); 1701 notify_ring(dev, ring);
1702 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1703 intel_execlists_handle_ctx_events(ring);
1683 } else 1704 } else
1684 DRM_ERROR("The master control interrupt lied (GT3)!\n"); 1705 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1685 } 1706 }
@@ -1772,7 +1793,9 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
1772 long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT; 1793 long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1773 } 1794 }
1774 1795
1775 DRM_DEBUG_DRIVER("digital hpd port %d %d\n", port, long_hpd); 1796 DRM_DEBUG_DRIVER("digital hpd port %c - %s\n",
1797 port_name(port),
1798 long_hpd ? "long" : "short");
1776 /* for long HPD pulses we want to have the digital queue happen, 1799 /* for long HPD pulses we want to have the digital queue happen,
1777 but we still want HPD storm detection to function. */ 1800 but we still want HPD storm detection to function. */
1778 if (long_hpd) { 1801 if (long_hpd) {
@@ -1984,14 +2007,9 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1984 2007
1985static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe) 2008static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
1986{ 2009{
1987 struct intel_crtc *crtc;
1988
1989 if (!drm_handle_vblank(dev, pipe)) 2010 if (!drm_handle_vblank(dev, pipe))
1990 return false; 2011 return false;
1991 2012
1992 crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
1993 wake_up(&crtc->vbl_wait);
1994
1995 return true; 2013 return true;
1996} 2014}
1997 2015
@@ -2002,7 +2020,7 @@ static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
2002 int pipe; 2020 int pipe;
2003 2021
2004 spin_lock(&dev_priv->irq_lock); 2022 spin_lock(&dev_priv->irq_lock);
2005 for_each_pipe(pipe) { 2023 for_each_pipe(dev_priv, pipe) {
2006 int reg; 2024 int reg;
2007 u32 mask, iir_bit = 0; 2025 u32 mask, iir_bit = 0;
2008 2026
@@ -2047,9 +2065,10 @@ static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
2047 } 2065 }
2048 spin_unlock(&dev_priv->irq_lock); 2066 spin_unlock(&dev_priv->irq_lock);
2049 2067
2050 for_each_pipe(pipe) { 2068 for_each_pipe(dev_priv, pipe) {
2051 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 2069 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
2052 intel_pipe_handle_vblank(dev, pipe); 2070 intel_pipe_handle_vblank(dev, pipe))
2071 intel_check_page_flip(dev, pipe);
2053 2072
2054 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) { 2073 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
2055 intel_prepare_page_flip(dev, pipe); 2074 intel_prepare_page_flip(dev, pipe);
@@ -2216,7 +2235,7 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
2216 DRM_ERROR("PCH poison interrupt\n"); 2235 DRM_ERROR("PCH poison interrupt\n");
2217 2236
2218 if (pch_iir & SDE_FDI_MASK) 2237 if (pch_iir & SDE_FDI_MASK)
2219 for_each_pipe(pipe) 2238 for_each_pipe(dev_priv, pipe)
2220 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 2239 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2221 pipe_name(pipe), 2240 pipe_name(pipe),
2222 I915_READ(FDI_RX_IIR(pipe))); 2241 I915_READ(FDI_RX_IIR(pipe)));
@@ -2247,7 +2266,7 @@ static void ivb_err_int_handler(struct drm_device *dev)
2247 if (err_int & ERR_INT_POISON) 2266 if (err_int & ERR_INT_POISON)
2248 DRM_ERROR("Poison interrupt\n"); 2267 DRM_ERROR("Poison interrupt\n");
2249 2268
2250 for_each_pipe(pipe) { 2269 for_each_pipe(dev_priv, pipe) {
2251 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) { 2270 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) {
2252 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, 2271 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
2253 false)) 2272 false))
@@ -2324,7 +2343,7 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
2324 DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 2343 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
2325 2344
2326 if (pch_iir & SDE_FDI_MASK_CPT) 2345 if (pch_iir & SDE_FDI_MASK_CPT)
2327 for_each_pipe(pipe) 2346 for_each_pipe(dev_priv, pipe)
2328 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 2347 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2329 pipe_name(pipe), 2348 pipe_name(pipe),
2330 I915_READ(FDI_RX_IIR(pipe))); 2349 I915_READ(FDI_RX_IIR(pipe)));
@@ -2347,9 +2366,10 @@ static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
2347 if (de_iir & DE_POISON) 2366 if (de_iir & DE_POISON)
2348 DRM_ERROR("Poison interrupt\n"); 2367 DRM_ERROR("Poison interrupt\n");
2349 2368
2350 for_each_pipe(pipe) { 2369 for_each_pipe(dev_priv, pipe) {
2351 if (de_iir & DE_PIPE_VBLANK(pipe)) 2370 if (de_iir & DE_PIPE_VBLANK(pipe) &&
2352 intel_pipe_handle_vblank(dev, pipe); 2371 intel_pipe_handle_vblank(dev, pipe))
2372 intel_check_page_flip(dev, pipe);
2353 2373
2354 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 2374 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2355 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) 2375 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
@@ -2397,9 +2417,10 @@ static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
2397 if (de_iir & DE_GSE_IVB) 2417 if (de_iir & DE_GSE_IVB)
2398 intel_opregion_asle_intr(dev); 2418 intel_opregion_asle_intr(dev);
2399 2419
2400 for_each_pipe(pipe) { 2420 for_each_pipe(dev_priv, pipe) {
2401 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe))) 2421 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
2402 intel_pipe_handle_vblank(dev, pipe); 2422 intel_pipe_handle_vblank(dev, pipe))
2423 intel_check_page_flip(dev, pipe);
2403 2424
2404 /* plane/pipes map 1:1 on ilk+ */ 2425 /* plane/pipes map 1:1 on ilk+ */
2405 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) { 2426 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
@@ -2544,7 +2565,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
2544 DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); 2565 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2545 } 2566 }
2546 2567
2547 for_each_pipe(pipe) { 2568 for_each_pipe(dev_priv, pipe) {
2548 uint32_t pipe_iir; 2569 uint32_t pipe_iir;
2549 2570
2550 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 2571 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
@@ -2554,8 +2575,9 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
2554 if (pipe_iir) { 2575 if (pipe_iir) {
2555 ret = IRQ_HANDLED; 2576 ret = IRQ_HANDLED;
2556 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir); 2577 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
2557 if (pipe_iir & GEN8_PIPE_VBLANK) 2578 if (pipe_iir & GEN8_PIPE_VBLANK &&
2558 intel_pipe_handle_vblank(dev, pipe); 2579 intel_pipe_handle_vblank(dev, pipe))
2580 intel_check_page_flip(dev, pipe);
2559 2581
2560 if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) { 2582 if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) {
2561 intel_prepare_page_flip(dev, pipe); 2583 intel_prepare_page_flip(dev, pipe);
@@ -2763,7 +2785,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
2763 2785
2764 if (eir & I915_ERROR_MEMORY_REFRESH) { 2786 if (eir & I915_ERROR_MEMORY_REFRESH) {
2765 pr_err("memory refresh error:\n"); 2787 pr_err("memory refresh error:\n");
2766 for_each_pipe(pipe) 2788 for_each_pipe(dev_priv, pipe)
2767 pr_err("pipe %c stat: 0x%08x\n", 2789 pr_err("pipe %c stat: 0x%08x\n",
2768 pipe_name(pipe), I915_READ(PIPESTAT(pipe))); 2790 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2769 /* pipestat has already been acked */ 2791 /* pipestat has already been acked */
@@ -2860,52 +2882,6 @@ void i915_handle_error(struct drm_device *dev, bool wedged,
2860 schedule_work(&dev_priv->gpu_error.work); 2882 schedule_work(&dev_priv->gpu_error.work);
2861} 2883}
2862 2884
2863static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
2864{
2865 struct drm_i915_private *dev_priv = dev->dev_private;
2866 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
2867 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2868 struct drm_i915_gem_object *obj;
2869 struct intel_unpin_work *work;
2870 unsigned long flags;
2871 bool stall_detected;
2872
2873 /* Ignore early vblank irqs */
2874 if (intel_crtc == NULL)
2875 return;
2876
2877 spin_lock_irqsave(&dev->event_lock, flags);
2878 work = intel_crtc->unpin_work;
2879
2880 if (work == NULL ||
2881 atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
2882 !work->enable_stall_check) {
2883 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
2884 spin_unlock_irqrestore(&dev->event_lock, flags);
2885 return;
2886 }
2887
2888 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
2889 obj = work->pending_flip_obj;
2890 if (INTEL_INFO(dev)->gen >= 4) {
2891 int dspsurf = DSPSURF(intel_crtc->plane);
2892 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
2893 i915_gem_obj_ggtt_offset(obj);
2894 } else {
2895 int dspaddr = DSPADDR(intel_crtc->plane);
2896 stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) +
2897 crtc->y * crtc->primary->fb->pitches[0] +
2898 crtc->x * crtc->primary->fb->bits_per_pixel/8);
2899 }
2900
2901 spin_unlock_irqrestore(&dev->event_lock, flags);
2902
2903 if (stall_detected) {
2904 DRM_DEBUG_DRIVER("Pageflip stall detected\n");
2905 intel_prepare_page_flip(dev, intel_crtc->plane);
2906 }
2907}
2908
2909/* Called from drm generic code, passed 'crtc' which 2885/* Called from drm generic code, passed 'crtc' which
2910 * we use as a pipe index 2886 * we use as a pipe index
2911 */ 2887 */
@@ -3441,7 +3417,7 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
3441 3417
3442 I915_WRITE(PORT_HOTPLUG_EN, 0); 3418 I915_WRITE(PORT_HOTPLUG_EN, 0);
3443 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3419 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3444 for_each_pipe(pipe) 3420 for_each_pipe(dev_priv, pipe)
3445 I915_WRITE(PIPESTAT(pipe), 0xffff); 3421 I915_WRITE(PIPESTAT(pipe), 0xffff);
3446 I915_WRITE(VLV_IIR, 0xffffffff); 3422 I915_WRITE(VLV_IIR, 0xffffffff);
3447 I915_WRITE(VLV_IMR, 0xffffffff); 3423 I915_WRITE(VLV_IMR, 0xffffffff);
@@ -3467,7 +3443,7 @@ static void gen8_irq_reset(struct drm_device *dev)
3467 3443
3468 gen8_gt_irq_reset(dev_priv); 3444 gen8_gt_irq_reset(dev_priv);
3469 3445
3470 for_each_pipe(pipe) 3446 for_each_pipe(dev_priv, pipe)
3471 if (intel_display_power_enabled(dev_priv, 3447 if (intel_display_power_enabled(dev_priv,
3472 POWER_DOMAIN_PIPE(pipe))) 3448 POWER_DOMAIN_PIPE(pipe)))
3473 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3449 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
@@ -3510,7 +3486,7 @@ static void cherryview_irq_preinstall(struct drm_device *dev)
3510 I915_WRITE(PORT_HOTPLUG_EN, 0); 3486 I915_WRITE(PORT_HOTPLUG_EN, 0);
3511 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3487 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3512 3488
3513 for_each_pipe(pipe) 3489 for_each_pipe(dev_priv, pipe)
3514 I915_WRITE(PIPESTAT(pipe), 0xffff); 3490 I915_WRITE(PIPESTAT(pipe), 0xffff);
3515 3491
3516 I915_WRITE(VLV_IMR, 0xffffffff); 3492 I915_WRITE(VLV_IMR, 0xffffffff);
@@ -3522,18 +3498,17 @@ static void cherryview_irq_preinstall(struct drm_device *dev)
3522static void ibx_hpd_irq_setup(struct drm_device *dev) 3498static void ibx_hpd_irq_setup(struct drm_device *dev)
3523{ 3499{
3524 struct drm_i915_private *dev_priv = dev->dev_private; 3500 struct drm_i915_private *dev_priv = dev->dev_private;
3525 struct drm_mode_config *mode_config = &dev->mode_config;
3526 struct intel_encoder *intel_encoder; 3501 struct intel_encoder *intel_encoder;
3527 u32 hotplug_irqs, hotplug, enabled_irqs = 0; 3502 u32 hotplug_irqs, hotplug, enabled_irqs = 0;
3528 3503
3529 if (HAS_PCH_IBX(dev)) { 3504 if (HAS_PCH_IBX(dev)) {
3530 hotplug_irqs = SDE_HOTPLUG_MASK; 3505 hotplug_irqs = SDE_HOTPLUG_MASK;
3531 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 3506 for_each_intel_encoder(dev, intel_encoder)
3532 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 3507 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3533 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin]; 3508 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
3534 } else { 3509 } else {
3535 hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 3510 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3536 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 3511 for_each_intel_encoder(dev, intel_encoder)
3537 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 3512 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3538 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin]; 3513 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
3539 } 3514 }
@@ -3782,28 +3757,31 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
3782 3757
3783static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) 3758static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3784{ 3759{
3785 int i;
3786
3787 /* These are interrupts we'll toggle with the ring mask register */ 3760 /* These are interrupts we'll toggle with the ring mask register */
3788 uint32_t gt_interrupts[] = { 3761 uint32_t gt_interrupts[] = {
3789 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3762 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3763 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3790 GT_RENDER_L3_PARITY_ERROR_INTERRUPT | 3764 GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
3791 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT, 3765 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3766 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3792 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3767 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3793 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, 3768 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3769 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3770 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3794 0, 3771 0,
3795 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT 3772 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3773 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3796 }; 3774 };
3797 3775
3798 for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++)
3799 GEN8_IRQ_INIT_NDX(GT, i, ~gt_interrupts[i], gt_interrupts[i]);
3800
3801 dev_priv->pm_irq_mask = 0xffffffff; 3776 dev_priv->pm_irq_mask = 0xffffffff;
3777 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3778 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
3779 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, dev_priv->pm_rps_events);
3780 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
3802} 3781}
3803 3782
3804static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 3783static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3805{ 3784{
3806 struct drm_device *dev = dev_priv->dev;
3807 uint32_t de_pipe_masked = GEN8_PIPE_PRIMARY_FLIP_DONE | 3785 uint32_t de_pipe_masked = GEN8_PIPE_PRIMARY_FLIP_DONE |
3808 GEN8_PIPE_CDCLK_CRC_DONE | 3786 GEN8_PIPE_CDCLK_CRC_DONE |
3809 GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 3787 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
@@ -3814,7 +3792,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3814 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; 3792 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3815 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked; 3793 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3816 3794
3817 for_each_pipe(pipe) 3795 for_each_pipe(dev_priv, pipe)
3818 if (intel_display_power_enabled(dev_priv, 3796 if (intel_display_power_enabled(dev_priv,
3819 POWER_DOMAIN_PIPE(pipe))) 3797 POWER_DOMAIN_PIPE(pipe)))
3820 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 3798 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
@@ -3859,12 +3837,12 @@ static int cherryview_irq_postinstall(struct drm_device *dev)
3859 */ 3837 */
3860 dev_priv->irq_mask = ~enable_mask; 3838 dev_priv->irq_mask = ~enable_mask;
3861 3839
3862 for_each_pipe(pipe) 3840 for_each_pipe(dev_priv, pipe)
3863 I915_WRITE(PIPESTAT(pipe), 0xffff); 3841 I915_WRITE(PIPESTAT(pipe), 0xffff);
3864 3842
3865 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3843 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3866 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3844 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3867 for_each_pipe(pipe) 3845 for_each_pipe(dev_priv, pipe)
3868 i915_enable_pipestat(dev_priv, pipe, pipestat_enable); 3846 i915_enable_pipestat(dev_priv, pipe, pipestat_enable);
3869 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3847 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3870 3848
@@ -3901,7 +3879,7 @@ static void valleyview_irq_uninstall(struct drm_device *dev)
3901 3879
3902 I915_WRITE(VLV_MASTER_IER, 0); 3880 I915_WRITE(VLV_MASTER_IER, 0);
3903 3881
3904 for_each_pipe(pipe) 3882 for_each_pipe(dev_priv, pipe)
3905 I915_WRITE(PIPESTAT(pipe), 0xffff); 3883 I915_WRITE(PIPESTAT(pipe), 0xffff);
3906 3884
3907 I915_WRITE(HWSTAM, 0xffffffff); 3885 I915_WRITE(HWSTAM, 0xffffffff);
@@ -3963,7 +3941,7 @@ do { \
3963 I915_WRITE(PORT_HOTPLUG_EN, 0); 3941 I915_WRITE(PORT_HOTPLUG_EN, 0);
3964 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3942 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3965 3943
3966 for_each_pipe(pipe) 3944 for_each_pipe(dev_priv, pipe)
3967 I915_WRITE(PIPESTAT(pipe), 0xffff); 3945 I915_WRITE(PIPESTAT(pipe), 0xffff);
3968 3946
3969 I915_WRITE(VLV_IMR, 0xffffffff); 3947 I915_WRITE(VLV_IMR, 0xffffffff);
@@ -3987,7 +3965,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
3987 struct drm_i915_private *dev_priv = dev->dev_private; 3965 struct drm_i915_private *dev_priv = dev->dev_private;
3988 int pipe; 3966 int pipe;
3989 3967
3990 for_each_pipe(pipe) 3968 for_each_pipe(dev_priv, pipe)
3991 I915_WRITE(PIPESTAT(pipe), 0); 3969 I915_WRITE(PIPESTAT(pipe), 0);
3992 I915_WRITE16(IMR, 0xffff); 3970 I915_WRITE16(IMR, 0xffff);
3993 I915_WRITE16(IER, 0x0); 3971 I915_WRITE16(IER, 0x0);
@@ -4041,7 +4019,7 @@ static bool i8xx_handle_vblank(struct drm_device *dev,
4041 return false; 4019 return false;
4042 4020
4043 if ((iir & flip_pending) == 0) 4021 if ((iir & flip_pending) == 0)
4044 return false; 4022 goto check_page_flip;
4045 4023
4046 intel_prepare_page_flip(dev, plane); 4024 intel_prepare_page_flip(dev, plane);
4047 4025
@@ -4052,11 +4030,14 @@ static bool i8xx_handle_vblank(struct drm_device *dev,
4052 * an interrupt per se, we watch for the change at vblank. 4030 * an interrupt per se, we watch for the change at vblank.
4053 */ 4031 */
4054 if (I915_READ16(ISR) & flip_pending) 4032 if (I915_READ16(ISR) & flip_pending)
4055 return false; 4033 goto check_page_flip;
4056 4034
4057 intel_finish_page_flip(dev, pipe); 4035 intel_finish_page_flip(dev, pipe);
4058
4059 return true; 4036 return true;
4037
4038check_page_flip:
4039 intel_check_page_flip(dev, pipe);
4040 return false;
4060} 4041}
4061 4042
4062static irqreturn_t i8xx_irq_handler(int irq, void *arg) 4043static irqreturn_t i8xx_irq_handler(int irq, void *arg)
@@ -4087,7 +4068,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
4087 "Command parser error, iir 0x%08x", 4068 "Command parser error, iir 0x%08x",
4088 iir); 4069 iir);
4089 4070
4090 for_each_pipe(pipe) { 4071 for_each_pipe(dev_priv, pipe) {
4091 int reg = PIPESTAT(pipe); 4072 int reg = PIPESTAT(pipe);
4092 pipe_stats[pipe] = I915_READ(reg); 4073 pipe_stats[pipe] = I915_READ(reg);
4093 4074
@@ -4107,7 +4088,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
4107 if (iir & I915_USER_INTERRUPT) 4088 if (iir & I915_USER_INTERRUPT)
4108 notify_ring(dev, &dev_priv->ring[RCS]); 4089 notify_ring(dev, &dev_priv->ring[RCS]);
4109 4090
4110 for_each_pipe(pipe) { 4091 for_each_pipe(dev_priv, pipe) {
4111 int plane = pipe; 4092 int plane = pipe;
4112 if (HAS_FBC(dev)) 4093 if (HAS_FBC(dev))
4113 plane = !plane; 4094 plane = !plane;
@@ -4135,7 +4116,7 @@ static void i8xx_irq_uninstall(struct drm_device * dev)
4135 struct drm_i915_private *dev_priv = dev->dev_private; 4116 struct drm_i915_private *dev_priv = dev->dev_private;
4136 int pipe; 4117 int pipe;
4137 4118
4138 for_each_pipe(pipe) { 4119 for_each_pipe(dev_priv, pipe) {
4139 /* Clear enable bits; then clear status bits */ 4120 /* Clear enable bits; then clear status bits */
4140 I915_WRITE(PIPESTAT(pipe), 0); 4121 I915_WRITE(PIPESTAT(pipe), 0);
4141 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 4122 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
@@ -4156,7 +4137,7 @@ static void i915_irq_preinstall(struct drm_device * dev)
4156 } 4137 }
4157 4138
4158 I915_WRITE16(HWSTAM, 0xeffe); 4139 I915_WRITE16(HWSTAM, 0xeffe);
4159 for_each_pipe(pipe) 4140 for_each_pipe(dev_priv, pipe)
4160 I915_WRITE(PIPESTAT(pipe), 0); 4141 I915_WRITE(PIPESTAT(pipe), 0);
4161 I915_WRITE(IMR, 0xffffffff); 4142 I915_WRITE(IMR, 0xffffffff);
4162 I915_WRITE(IER, 0x0); 4143 I915_WRITE(IER, 0x0);
@@ -4226,7 +4207,7 @@ static bool i915_handle_vblank(struct drm_device *dev,
4226 return false; 4207 return false;
4227 4208
4228 if ((iir & flip_pending) == 0) 4209 if ((iir & flip_pending) == 0)
4229 return false; 4210 goto check_page_flip;
4230 4211
4231 intel_prepare_page_flip(dev, plane); 4212 intel_prepare_page_flip(dev, plane);
4232 4213
@@ -4237,11 +4218,14 @@ static bool i915_handle_vblank(struct drm_device *dev,
4237 * an interrupt per se, we watch for the change at vblank. 4218 * an interrupt per se, we watch for the change at vblank.
4238 */ 4219 */
4239 if (I915_READ(ISR) & flip_pending) 4220 if (I915_READ(ISR) & flip_pending)
4240 return false; 4221 goto check_page_flip;
4241 4222
4242 intel_finish_page_flip(dev, pipe); 4223 intel_finish_page_flip(dev, pipe);
4243
4244 return true; 4224 return true;
4225
4226check_page_flip:
4227 intel_check_page_flip(dev, pipe);
4228 return false;
4245} 4229}
4246 4230
4247static irqreturn_t i915_irq_handler(int irq, void *arg) 4231static irqreturn_t i915_irq_handler(int irq, void *arg)
@@ -4271,7 +4255,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
4271 "Command parser error, iir 0x%08x", 4255 "Command parser error, iir 0x%08x",
4272 iir); 4256 iir);
4273 4257
4274 for_each_pipe(pipe) { 4258 for_each_pipe(dev_priv, pipe) {
4275 int reg = PIPESTAT(pipe); 4259 int reg = PIPESTAT(pipe);
4276 pipe_stats[pipe] = I915_READ(reg); 4260 pipe_stats[pipe] = I915_READ(reg);
4277 4261
@@ -4297,7 +4281,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
4297 if (iir & I915_USER_INTERRUPT) 4281 if (iir & I915_USER_INTERRUPT)
4298 notify_ring(dev, &dev_priv->ring[RCS]); 4282 notify_ring(dev, &dev_priv->ring[RCS]);
4299 4283
4300 for_each_pipe(pipe) { 4284 for_each_pipe(dev_priv, pipe) {
4301 int plane = pipe; 4285 int plane = pipe;
4302 if (HAS_FBC(dev)) 4286 if (HAS_FBC(dev))
4303 plane = !plane; 4287 plane = !plane;
@@ -4355,7 +4339,7 @@ static void i915_irq_uninstall(struct drm_device * dev)
4355 } 4339 }
4356 4340
4357 I915_WRITE16(HWSTAM, 0xffff); 4341 I915_WRITE16(HWSTAM, 0xffff);
4358 for_each_pipe(pipe) { 4342 for_each_pipe(dev_priv, pipe) {
4359 /* Clear enable bits; then clear status bits */ 4343 /* Clear enable bits; then clear status bits */
4360 I915_WRITE(PIPESTAT(pipe), 0); 4344 I915_WRITE(PIPESTAT(pipe), 0);
4361 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 4345 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
@@ -4375,7 +4359,7 @@ static void i965_irq_preinstall(struct drm_device * dev)
4375 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4359 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4376 4360
4377 I915_WRITE(HWSTAM, 0xeffe); 4361 I915_WRITE(HWSTAM, 0xeffe);
4378 for_each_pipe(pipe) 4362 for_each_pipe(dev_priv, pipe)
4379 I915_WRITE(PIPESTAT(pipe), 0); 4363 I915_WRITE(PIPESTAT(pipe), 0);
4380 I915_WRITE(IMR, 0xffffffff); 4364 I915_WRITE(IMR, 0xffffffff);
4381 I915_WRITE(IER, 0x0); 4365 I915_WRITE(IER, 0x0);
@@ -4444,7 +4428,6 @@ static int i965_irq_postinstall(struct drm_device *dev)
4444static void i915_hpd_irq_setup(struct drm_device *dev) 4428static void i915_hpd_irq_setup(struct drm_device *dev)
4445{ 4429{
4446 struct drm_i915_private *dev_priv = dev->dev_private; 4430 struct drm_i915_private *dev_priv = dev->dev_private;
4447 struct drm_mode_config *mode_config = &dev->mode_config;
4448 struct intel_encoder *intel_encoder; 4431 struct intel_encoder *intel_encoder;
4449 u32 hotplug_en; 4432 u32 hotplug_en;
4450 4433
@@ -4455,7 +4438,7 @@ static void i915_hpd_irq_setup(struct drm_device *dev)
4455 hotplug_en &= ~HOTPLUG_INT_EN_MASK; 4438 hotplug_en &= ~HOTPLUG_INT_EN_MASK;
4456 /* Note HDMI and DP share hotplug bits */ 4439 /* Note HDMI and DP share hotplug bits */
4457 /* enable bits are the same for all generations */ 4440 /* enable bits are the same for all generations */
4458 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 4441 for_each_intel_encoder(dev, intel_encoder)
4459 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 4442 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
4460 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin]; 4443 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
4461 /* Programming the CRT detection parameters tends 4444 /* Programming the CRT detection parameters tends
@@ -4501,7 +4484,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
4501 "Command parser error, iir 0x%08x", 4484 "Command parser error, iir 0x%08x",
4502 iir); 4485 iir);
4503 4486
4504 for_each_pipe(pipe) { 4487 for_each_pipe(dev_priv, pipe) {
4505 int reg = PIPESTAT(pipe); 4488 int reg = PIPESTAT(pipe);
4506 pipe_stats[pipe] = I915_READ(reg); 4489 pipe_stats[pipe] = I915_READ(reg);
4507 4490
@@ -4532,7 +4515,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
4532 if (iir & I915_BSD_USER_INTERRUPT) 4515 if (iir & I915_BSD_USER_INTERRUPT)
4533 notify_ring(dev, &dev_priv->ring[VCS]); 4516 notify_ring(dev, &dev_priv->ring[VCS]);
4534 4517
4535 for_each_pipe(pipe) { 4518 for_each_pipe(dev_priv, pipe) {
4536 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 4519 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4537 i915_handle_vblank(dev, pipe, pipe, iir)) 4520 i915_handle_vblank(dev, pipe, pipe, iir))
4538 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); 4521 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
@@ -4589,12 +4572,12 @@ static void i965_irq_uninstall(struct drm_device * dev)
4589 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4572 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4590 4573
4591 I915_WRITE(HWSTAM, 0xffffffff); 4574 I915_WRITE(HWSTAM, 0xffffffff);
4592 for_each_pipe(pipe) 4575 for_each_pipe(dev_priv, pipe)
4593 I915_WRITE(PIPESTAT(pipe), 0); 4576 I915_WRITE(PIPESTAT(pipe), 0);
4594 I915_WRITE(IMR, 0xffffffff); 4577 I915_WRITE(IMR, 0xffffffff);
4595 I915_WRITE(IER, 0x0); 4578 I915_WRITE(IER, 0x0);
4596 4579
4597 for_each_pipe(pipe) 4580 for_each_pipe(dev_priv, pipe)
4598 I915_WRITE(PIPESTAT(pipe), 4581 I915_WRITE(PIPESTAT(pipe),
4599 I915_READ(PIPESTAT(pipe)) & 0x8000ffff); 4582 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4600 I915_WRITE(IIR, I915_READ(IIR)); 4583 I915_WRITE(IIR, I915_READ(IIR));
@@ -4652,8 +4635,8 @@ void intel_irq_init(struct drm_device *dev)
4652 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 4635 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4653 4636
4654 /* Let's track the enabled rps events */ 4637 /* Let's track the enabled rps events */
4655 if (IS_VALLEYVIEW(dev)) 4638 if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev))
4656 /* WaGsvRC0ResidenncyMethod:VLV */ 4639 /* WaGsvRC0ResidencyMethod:vlv */
4657 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED; 4640 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
4658 else 4641 else
4659 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; 4642 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
@@ -4680,6 +4663,14 @@ void intel_irq_init(struct drm_device *dev)
4680 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 4663 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4681 } 4664 }
4682 4665
4666 /*
4667 * Opt out of the vblank disable timer on everything except gen2.
4668 * Gen2 doesn't have a hardware frame counter and so depends on
4669 * vblank interrupts to produce sane vblank seuquence numbers.
4670 */
4671 if (!IS_GEN2(dev))
4672 dev->vblank_disable_immediate = true;
4673
4683 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 4674 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
4684 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 4675 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4685 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 4676 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 9842fd2e742a..c91cb2033cc5 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -35,6 +35,7 @@ struct i915_params i915 __read_mostly = {
35 .vbt_sdvo_panel_type = -1, 35 .vbt_sdvo_panel_type = -1,
36 .enable_rc6 = -1, 36 .enable_rc6 = -1,
37 .enable_fbc = -1, 37 .enable_fbc = -1,
38 .enable_execlists = 0,
38 .enable_hangcheck = true, 39 .enable_hangcheck = true,
39 .enable_ppgtt = -1, 40 .enable_ppgtt = -1,
40 .enable_psr = 0, 41 .enable_psr = 0,
@@ -118,6 +119,11 @@ MODULE_PARM_DESC(enable_ppgtt,
118 "Override PPGTT usage. " 119 "Override PPGTT usage. "
119 "(-1=auto [default], 0=disabled, 1=aliasing, 2=full)"); 120 "(-1=auto [default], 0=disabled, 1=aliasing, 2=full)");
120 121
122module_param_named(enable_execlists, i915.enable_execlists, int, 0400);
123MODULE_PARM_DESC(enable_execlists,
124 "Override execlists usage. "
125 "(-1=auto, 0=disabled [default], 1=enabled)");
126
121module_param_named(enable_psr, i915.enable_psr, int, 0600); 127module_param_named(enable_psr, i915.enable_psr, int, 0600);
122MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)"); 128MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
123 129
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index f29b44c86a2f..c01e5f31430e 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -143,6 +143,14 @@
143#define GAB_CTL 0x24000 143#define GAB_CTL 0x24000
144#define GAB_CTL_CONT_AFTER_PAGEFAULT (1<<8) 144#define GAB_CTL_CONT_AFTER_PAGEFAULT (1<<8)
145 145
146#define GEN7_BIOS_RESERVED 0x1082C0
147#define GEN7_BIOS_RESERVED_1M (0 << 5)
148#define GEN7_BIOS_RESERVED_256K (1 << 5)
149#define GEN8_BIOS_RESERVED_SHIFT 7
150#define GEN7_BIOS_RESERVED_MASK 0x1
151#define GEN8_BIOS_RESERVED_MASK 0x3
152
153
146/* VGA stuff */ 154/* VGA stuff */
147 155
148#define VGA_ST01_MDA 0x3ba 156#define VGA_ST01_MDA 0x3ba
@@ -272,6 +280,7 @@
272#define MI_SEMAPHORE_POLL (1<<15) 280#define MI_SEMAPHORE_POLL (1<<15)
273#define MI_SEMAPHORE_SAD_GTE_SDD (1<<12) 281#define MI_SEMAPHORE_SAD_GTE_SDD (1<<12)
274#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) 282#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
283#define MI_STORE_DWORD_IMM_GEN8 MI_INSTR(0x20, 2)
275#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */ 284#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
276#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1) 285#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
277#define MI_STORE_DWORD_INDEX_SHIFT 2 286#define MI_STORE_DWORD_INDEX_SHIFT 2
@@ -282,6 +291,7 @@
282 * address/value pairs. Don't overdue it, though, x <= 2^4 must hold! 291 * address/value pairs. Don't overdue it, though, x <= 2^4 must hold!
283 */ 292 */
284#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*(x)-1) 293#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*(x)-1)
294#define MI_LRI_FORCE_POSTED (1<<12)
285#define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*(x)-1) 295#define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*(x)-1)
286#define MI_STORE_REGISTER_MEM_GEN8(x) MI_INSTR(0x24, 3*(x)-1) 296#define MI_STORE_REGISTER_MEM_GEN8(x) MI_INSTR(0x24, 3*(x)-1)
287#define MI_SRM_LRM_GLOBAL_GTT (1<<22) 297#define MI_SRM_LRM_GLOBAL_GTT (1<<22)
@@ -501,10 +511,26 @@
501#define BUNIT_REG_BISOC 0x11 511#define BUNIT_REG_BISOC 0x11
502 512
503#define PUNIT_REG_DSPFREQ 0x36 513#define PUNIT_REG_DSPFREQ 0x36
514#define DSPFREQSTAT_SHIFT_CHV 24
515#define DSPFREQSTAT_MASK_CHV (0x1f << DSPFREQSTAT_SHIFT_CHV)
516#define DSPFREQGUAR_SHIFT_CHV 8
517#define DSPFREQGUAR_MASK_CHV (0x1f << DSPFREQGUAR_SHIFT_CHV)
504#define DSPFREQSTAT_SHIFT 30 518#define DSPFREQSTAT_SHIFT 30
505#define DSPFREQSTAT_MASK (0x3 << DSPFREQSTAT_SHIFT) 519#define DSPFREQSTAT_MASK (0x3 << DSPFREQSTAT_SHIFT)
506#define DSPFREQGUAR_SHIFT 14 520#define DSPFREQGUAR_SHIFT 14
507#define DSPFREQGUAR_MASK (0x3 << DSPFREQGUAR_SHIFT) 521#define DSPFREQGUAR_MASK (0x3 << DSPFREQGUAR_SHIFT)
522#define _DP_SSC(val, pipe) ((val) << (2 * (pipe)))
523#define DP_SSC_MASK(pipe) _DP_SSC(0x3, (pipe))
524#define DP_SSC_PWR_ON(pipe) _DP_SSC(0x0, (pipe))
525#define DP_SSC_CLK_GATE(pipe) _DP_SSC(0x1, (pipe))
526#define DP_SSC_RESET(pipe) _DP_SSC(0x2, (pipe))
527#define DP_SSC_PWR_GATE(pipe) _DP_SSC(0x3, (pipe))
528#define _DP_SSS(val, pipe) ((val) << (2 * (pipe) + 16))
529#define DP_SSS_MASK(pipe) _DP_SSS(0x3, (pipe))
530#define DP_SSS_PWR_ON(pipe) _DP_SSS(0x0, (pipe))
531#define DP_SSS_CLK_GATE(pipe) _DP_SSS(0x1, (pipe))
532#define DP_SSS_RESET(pipe) _DP_SSS(0x2, (pipe))
533#define DP_SSS_PWR_GATE(pipe) _DP_SSS(0x3, (pipe))
508 534
509/* See the PUNIT HAS v0.8 for the below bits */ 535/* See the PUNIT HAS v0.8 for the below bits */
510enum punit_power_well { 536enum punit_power_well {
@@ -518,6 +544,11 @@ enum punit_power_well {
518 PUNIT_POWER_WELL_DPIO_TX_C_LANES_23 = 9, 544 PUNIT_POWER_WELL_DPIO_TX_C_LANES_23 = 9,
519 PUNIT_POWER_WELL_DPIO_RX0 = 10, 545 PUNIT_POWER_WELL_DPIO_RX0 = 10,
520 PUNIT_POWER_WELL_DPIO_RX1 = 11, 546 PUNIT_POWER_WELL_DPIO_RX1 = 11,
547 PUNIT_POWER_WELL_DPIO_CMN_D = 12,
548 /* FIXME: guesswork below */
549 PUNIT_POWER_WELL_DPIO_TX_D_LANES_01 = 13,
550 PUNIT_POWER_WELL_DPIO_TX_D_LANES_23 = 14,
551 PUNIT_POWER_WELL_DPIO_RX2 = 15,
521 552
522 PUNIT_POWER_WELL_NUM, 553 PUNIT_POWER_WELL_NUM,
523}; 554};
@@ -838,8 +869,8 @@ enum punit_power_well {
838 869
839#define _VLV_TX_DW2_CH0 0x8288 870#define _VLV_TX_DW2_CH0 0x8288
840#define _VLV_TX_DW2_CH1 0x8488 871#define _VLV_TX_DW2_CH1 0x8488
841#define DPIO_SWING_MARGIN_SHIFT 16 872#define DPIO_SWING_MARGIN000_SHIFT 16
842#define DPIO_SWING_MARGIN_MASK (0xff << DPIO_SWING_MARGIN_SHIFT) 873#define DPIO_SWING_MARGIN000_MASK (0xff << DPIO_SWING_MARGIN000_SHIFT)
843#define DPIO_UNIQ_TRANS_SCALE_SHIFT 8 874#define DPIO_UNIQ_TRANS_SCALE_SHIFT 8
844#define VLV_TX_DW2(ch) _PORT(ch, _VLV_TX_DW2_CH0, _VLV_TX_DW2_CH1) 875#define VLV_TX_DW2(ch) _PORT(ch, _VLV_TX_DW2_CH0, _VLV_TX_DW2_CH1)
845 876
@@ -847,12 +878,16 @@ enum punit_power_well {
847#define _VLV_TX_DW3_CH1 0x848c 878#define _VLV_TX_DW3_CH1 0x848c
848/* The following bit for CHV phy */ 879/* The following bit for CHV phy */
849#define DPIO_TX_UNIQ_TRANS_SCALE_EN (1<<27) 880#define DPIO_TX_UNIQ_TRANS_SCALE_EN (1<<27)
881#define DPIO_SWING_MARGIN101_SHIFT 16
882#define DPIO_SWING_MARGIN101_MASK (0xff << DPIO_SWING_MARGIN101_SHIFT)
850#define VLV_TX_DW3(ch) _PORT(ch, _VLV_TX_DW3_CH0, _VLV_TX_DW3_CH1) 883#define VLV_TX_DW3(ch) _PORT(ch, _VLV_TX_DW3_CH0, _VLV_TX_DW3_CH1)
851 884
852#define _VLV_TX_DW4_CH0 0x8290 885#define _VLV_TX_DW4_CH0 0x8290
853#define _VLV_TX_DW4_CH1 0x8490 886#define _VLV_TX_DW4_CH1 0x8490
854#define DPIO_SWING_DEEMPH9P5_SHIFT 24 887#define DPIO_SWING_DEEMPH9P5_SHIFT 24
855#define DPIO_SWING_DEEMPH9P5_MASK (0xff << DPIO_SWING_DEEMPH9P5_SHIFT) 888#define DPIO_SWING_DEEMPH9P5_MASK (0xff << DPIO_SWING_DEEMPH9P5_SHIFT)
889#define DPIO_SWING_DEEMPH6P0_SHIFT 16
890#define DPIO_SWING_DEEMPH6P0_MASK (0xff << DPIO_SWING_DEEMPH6P0_SHIFT)
856#define VLV_TX_DW4(ch) _PORT(ch, _VLV_TX_DW4_CH0, _VLV_TX_DW4_CH1) 891#define VLV_TX_DW4(ch) _PORT(ch, _VLV_TX_DW4_CH0, _VLV_TX_DW4_CH1)
857 892
858#define _VLV_TX3_DW4_CH0 0x690 893#define _VLV_TX3_DW4_CH0 0x690
@@ -1003,6 +1038,13 @@ enum punit_power_well {
1003#define PGTBL_ADDRESS_LO_MASK 0xfffff000 /* bits [31:12] */ 1038#define PGTBL_ADDRESS_LO_MASK 0xfffff000 /* bits [31:12] */
1004#define PGTBL_ADDRESS_HI_MASK 0x000000f0 /* bits [35:32] (gen4) */ 1039#define PGTBL_ADDRESS_HI_MASK 0x000000f0 /* bits [35:32] (gen4) */
1005#define PGTBL_ER 0x02024 1040#define PGTBL_ER 0x02024
1041#define PRB0_BASE (0x2030-0x30)
1042#define PRB1_BASE (0x2040-0x30) /* 830,gen3 */
1043#define PRB2_BASE (0x2050-0x30) /* gen3 */
1044#define SRB0_BASE (0x2100-0x30) /* gen2 */
1045#define SRB1_BASE (0x2110-0x30) /* gen2 */
1046#define SRB2_BASE (0x2120-0x30) /* 830 */
1047#define SRB3_BASE (0x2130-0x30) /* 830 */
1006#define RENDER_RING_BASE 0x02000 1048#define RENDER_RING_BASE 0x02000
1007#define BSD_RING_BASE 0x04000 1049#define BSD_RING_BASE 0x04000
1008#define GEN6_BSD_RING_BASE 0x12000 1050#define GEN6_BSD_RING_BASE 0x12000
@@ -1064,6 +1106,7 @@ enum punit_power_well {
1064#define RING_ACTHD_UDW(base) ((base)+0x5c) 1106#define RING_ACTHD_UDW(base) ((base)+0x5c)
1065#define RING_NOPID(base) ((base)+0x94) 1107#define RING_NOPID(base) ((base)+0x94)
1066#define RING_IMR(base) ((base)+0xa8) 1108#define RING_IMR(base) ((base)+0xa8)
1109#define RING_HWSTAM(base) ((base)+0x98)
1067#define RING_TIMESTAMP(base) ((base)+0x358) 1110#define RING_TIMESTAMP(base) ((base)+0x358)
1068#define TAIL_ADDR 0x001FFFF8 1111#define TAIL_ADDR 0x001FFFF8
1069#define HEAD_WRAP_COUNT 0xFFE00000 1112#define HEAD_WRAP_COUNT 0xFFE00000
@@ -1248,6 +1291,10 @@ enum punit_power_well {
1248#define INSTPM_TLB_INVALIDATE (1<<9) 1291#define INSTPM_TLB_INVALIDATE (1<<9)
1249#define INSTPM_SYNC_FLUSH (1<<5) 1292#define INSTPM_SYNC_FLUSH (1<<5)
1250#define ACTHD 0x020c8 1293#define ACTHD 0x020c8
1294#define MEM_MODE 0x020cc
1295#define MEM_DISPLAY_B_TRICKLE_FEED_DISABLE (1<<3) /* 830 only */
1296#define MEM_DISPLAY_A_TRICKLE_FEED_DISABLE (1<<2) /* 830/845 only */
1297#define MEM_DISPLAY_TRICKLE_FEED_DISABLE (1<<2) /* 85x only */
1251#define FW_BLC 0x020d8 1298#define FW_BLC 0x020d8
1252#define FW_BLC2 0x020dc 1299#define FW_BLC2 0x020dc
1253#define FW_BLC_SELF 0x020e0 /* 915+ only */ 1300#define FW_BLC_SELF 0x020e0 /* 915+ only */
@@ -1380,6 +1427,7 @@ enum punit_power_well {
1380#define GT_BSD_CS_ERROR_INTERRUPT (1 << 15) 1427#define GT_BSD_CS_ERROR_INTERRUPT (1 << 15)
1381#define GT_BSD_USER_INTERRUPT (1 << 12) 1428#define GT_BSD_USER_INTERRUPT (1 << 12)
1382#define GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 (1 << 11) /* hsw+; rsvd on snb, ivb, vlv */ 1429#define GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 (1 << 11) /* hsw+; rsvd on snb, ivb, vlv */
1430#define GT_CONTEXT_SWITCH_INTERRUPT (1 << 8)
1383#define GT_RENDER_L3_PARITY_ERROR_INTERRUPT (1 << 5) /* !snb */ 1431#define GT_RENDER_L3_PARITY_ERROR_INTERRUPT (1 << 5) /* !snb */
1384#define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT (1 << 4) 1432#define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT (1 << 4)
1385#define GT_RENDER_CS_MASTER_ERROR_INTERRUPT (1 << 3) 1433#define GT_RENDER_CS_MASTER_ERROR_INTERRUPT (1 << 3)
@@ -1519,6 +1567,7 @@ enum punit_power_well {
1519/* Framebuffer compression for Ironlake */ 1567/* Framebuffer compression for Ironlake */
1520#define ILK_DPFC_CB_BASE 0x43200 1568#define ILK_DPFC_CB_BASE 0x43200
1521#define ILK_DPFC_CONTROL 0x43208 1569#define ILK_DPFC_CONTROL 0x43208
1570#define FBC_CTL_FALSE_COLOR (1<<10)
1522/* The bit 28-8 is reserved */ 1571/* The bit 28-8 is reserved */
1523#define DPFC_RESERVED (0x1FFFFF00) 1572#define DPFC_RESERVED (0x1FFFFF00)
1524#define ILK_DPFC_RECOMP_CTL 0x4320c 1573#define ILK_DPFC_RECOMP_CTL 0x4320c
@@ -1675,12 +1724,9 @@ enum punit_power_well {
1675#define DPIO_PHY_STATUS (VLV_DISPLAY_BASE + 0x6240) 1724#define DPIO_PHY_STATUS (VLV_DISPLAY_BASE + 0x6240)
1676#define DPLL_PORTD_READY_MASK (0xf) 1725#define DPLL_PORTD_READY_MASK (0xf)
1677#define DISPLAY_PHY_CONTROL (VLV_DISPLAY_BASE + 0x60100) 1726#define DISPLAY_PHY_CONTROL (VLV_DISPLAY_BASE + 0x60100)
1678#define PHY_COM_LANE_RESET_DEASSERT(phy, val) \ 1727#define PHY_COM_LANE_RESET_DEASSERT(phy) (1 << (phy))
1679 ((phy == DPIO_PHY0) ? (val | 1) : (val | 2))
1680#define PHY_COM_LANE_RESET_ASSERT(phy, val) \
1681 ((phy == DPIO_PHY0) ? (val & ~1) : (val & ~2))
1682#define DISPLAY_PHY_STATUS (VLV_DISPLAY_BASE + 0x60104) 1728#define DISPLAY_PHY_STATUS (VLV_DISPLAY_BASE + 0x60104)
1683#define PHY_POWERGOOD(phy) ((phy == DPIO_PHY0) ? (1<<31) : (1<<30)) 1729#define PHY_POWERGOOD(phy) (((phy) == DPIO_PHY0) ? (1<<31) : (1<<30))
1684 1730
1685/* 1731/*
1686 * The i830 generation, in LVDS mode, defines P1 as the bit number set within 1732 * The i830 generation, in LVDS mode, defines P1 as the bit number set within
@@ -2397,6 +2443,7 @@ enum punit_power_well {
2397#define _PIPEASRC 0x6001c 2443#define _PIPEASRC 0x6001c
2398#define _BCLRPAT_A 0x60020 2444#define _BCLRPAT_A 0x60020
2399#define _VSYNCSHIFT_A 0x60028 2445#define _VSYNCSHIFT_A 0x60028
2446#define _PIPE_MULT_A 0x6002c
2400 2447
2401/* Pipe B timing regs */ 2448/* Pipe B timing regs */
2402#define _HTOTAL_B 0x61000 2449#define _HTOTAL_B 0x61000
@@ -2408,6 +2455,7 @@ enum punit_power_well {
2408#define _PIPEBSRC 0x6101c 2455#define _PIPEBSRC 0x6101c
2409#define _BCLRPAT_B 0x61020 2456#define _BCLRPAT_B 0x61020
2410#define _VSYNCSHIFT_B 0x61028 2457#define _VSYNCSHIFT_B 0x61028
2458#define _PIPE_MULT_B 0x6102c
2411 2459
2412#define TRANSCODER_A_OFFSET 0x60000 2460#define TRANSCODER_A_OFFSET 0x60000
2413#define TRANSCODER_B_OFFSET 0x61000 2461#define TRANSCODER_B_OFFSET 0x61000
@@ -2428,6 +2476,7 @@ enum punit_power_well {
2428#define BCLRPAT(trans) _TRANSCODER2(trans, _BCLRPAT_A) 2476#define BCLRPAT(trans) _TRANSCODER2(trans, _BCLRPAT_A)
2429#define VSYNCSHIFT(trans) _TRANSCODER2(trans, _VSYNCSHIFT_A) 2477#define VSYNCSHIFT(trans) _TRANSCODER2(trans, _VSYNCSHIFT_A)
2430#define PIPESRC(trans) _TRANSCODER2(trans, _PIPEASRC) 2478#define PIPESRC(trans) _TRANSCODER2(trans, _PIPEASRC)
2479#define PIPE_MULT(trans) _TRANSCODER2(trans, _PIPE_MULT_A)
2431 2480
2432/* HSW+ eDP PSR registers */ 2481/* HSW+ eDP PSR registers */
2433#define EDP_PSR_BASE(dev) (IS_HASWELL(dev) ? 0x64800 : 0x6f800) 2482#define EDP_PSR_BASE(dev) (IS_HASWELL(dev) ? 0x64800 : 0x6f800)
@@ -3476,6 +3525,8 @@ enum punit_power_well {
3476#define DP_LINK_TRAIN_OFF (3 << 28) 3525#define DP_LINK_TRAIN_OFF (3 << 28)
3477#define DP_LINK_TRAIN_MASK (3 << 28) 3526#define DP_LINK_TRAIN_MASK (3 << 28)
3478#define DP_LINK_TRAIN_SHIFT 28 3527#define DP_LINK_TRAIN_SHIFT 28
3528#define DP_LINK_TRAIN_PAT_3_CHV (1 << 14)
3529#define DP_LINK_TRAIN_MASK_CHV ((3 << 28)|(1<<14))
3479 3530
3480/* CPT Link training mode */ 3531/* CPT Link training mode */
3481#define DP_LINK_TRAIN_PAT_1_CPT (0 << 8) 3532#define DP_LINK_TRAIN_PAT_1_CPT (0 << 8)
@@ -3732,7 +3783,6 @@ enum punit_power_well {
3732#define PIPE_VSYNC_INTERRUPT_STATUS (1UL<<9) 3783#define PIPE_VSYNC_INTERRUPT_STATUS (1UL<<9)
3733#define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL<<8) 3784#define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL<<8)
3734#define PIPE_DPST_EVENT_STATUS (1UL<<7) 3785#define PIPE_DPST_EVENT_STATUS (1UL<<7)
3735#define PIPE_LEGACY_BLC_EVENT_STATUS (1UL<<6)
3736#define PIPE_A_PSR_STATUS_VLV (1UL<<6) 3786#define PIPE_A_PSR_STATUS_VLV (1UL<<6)
3737#define PIPE_LEGACY_BLC_EVENT_STATUS (1UL<<6) 3787#define PIPE_LEGACY_BLC_EVENT_STATUS (1UL<<6)
3738#define PIPE_ODD_FIELD_INTERRUPT_STATUS (1UL<<5) 3788#define PIPE_ODD_FIELD_INTERRUPT_STATUS (1UL<<5)
@@ -3842,73 +3892,151 @@ enum punit_power_well {
3842#define DSPARB_BEND_SHIFT 9 /* on 855 */ 3892#define DSPARB_BEND_SHIFT 9 /* on 855 */
3843#define DSPARB_AEND_SHIFT 0 3893#define DSPARB_AEND_SHIFT 0
3844 3894
3895/* pnv/gen4/g4x/vlv/chv */
3845#define DSPFW1 (dev_priv->info.display_mmio_offset + 0x70034) 3896#define DSPFW1 (dev_priv->info.display_mmio_offset + 0x70034)
3846#define DSPFW_SR_SHIFT 23 3897#define DSPFW_SR_SHIFT 23
3847#define DSPFW_SR_MASK (0x1ff<<23) 3898#define DSPFW_SR_MASK (0x1ff<<23)
3848#define DSPFW_CURSORB_SHIFT 16 3899#define DSPFW_CURSORB_SHIFT 16
3849#define DSPFW_CURSORB_MASK (0x3f<<16) 3900#define DSPFW_CURSORB_MASK (0x3f<<16)
3850#define DSPFW_PLANEB_SHIFT 8 3901#define DSPFW_PLANEB_SHIFT 8
3851#define DSPFW_PLANEB_MASK (0x7f<<8) 3902#define DSPFW_PLANEB_MASK (0x7f<<8)
3852#define DSPFW_PLANEA_MASK (0x7f) 3903#define DSPFW_PLANEB_MASK_VLV (0xff<<8) /* vlv/chv */
3904#define DSPFW_PLANEA_SHIFT 0
3905#define DSPFW_PLANEA_MASK (0x7f<<0)
3906#define DSPFW_PLANEA_MASK_VLV (0xff<<0) /* vlv/chv */
3853#define DSPFW2 (dev_priv->info.display_mmio_offset + 0x70038) 3907#define DSPFW2 (dev_priv->info.display_mmio_offset + 0x70038)
3854#define DSPFW_CURSORA_MASK 0x00003f00 3908#define DSPFW_FBC_SR_EN (1<<31) /* g4x */
3855#define DSPFW_CURSORA_SHIFT 8 3909#define DSPFW_FBC_SR_SHIFT 28
3856#define DSPFW_PLANEC_MASK (0x7f) 3910#define DSPFW_FBC_SR_MASK (0x7<<28) /* g4x */
3911#define DSPFW_FBC_HPLL_SR_SHIFT 24
3912#define DSPFW_FBC_HPLL_SR_MASK (0xf<<24) /* g4x */
3913#define DSPFW_SPRITEB_SHIFT (16)
3914#define DSPFW_SPRITEB_MASK (0x7f<<16) /* g4x */
3915#define DSPFW_SPRITEB_MASK_VLV (0xff<<16) /* vlv/chv */
3916#define DSPFW_CURSORA_SHIFT 8
3917#define DSPFW_CURSORA_MASK (0x3f<<8)
3918#define DSPFW_PLANEC_SHIFT_OLD 0
3919#define DSPFW_PLANEC_MASK_OLD (0x7f<<0) /* pre-gen4 sprite C */
3920#define DSPFW_SPRITEA_SHIFT 0
3921#define DSPFW_SPRITEA_MASK (0x7f<<0) /* g4x */
3922#define DSPFW_SPRITEA_MASK_VLV (0xff<<0) /* vlv/chv */
3857#define DSPFW3 (dev_priv->info.display_mmio_offset + 0x7003c) 3923#define DSPFW3 (dev_priv->info.display_mmio_offset + 0x7003c)
3858#define DSPFW_HPLL_SR_EN (1<<31) 3924#define DSPFW_HPLL_SR_EN (1<<31)
3859#define DSPFW_CURSOR_SR_SHIFT 24
3860#define PINEVIEW_SELF_REFRESH_EN (1<<30) 3925#define PINEVIEW_SELF_REFRESH_EN (1<<30)
3926#define DSPFW_CURSOR_SR_SHIFT 24
3861#define DSPFW_CURSOR_SR_MASK (0x3f<<24) 3927#define DSPFW_CURSOR_SR_MASK (0x3f<<24)
3862#define DSPFW_HPLL_CURSOR_SHIFT 16 3928#define DSPFW_HPLL_CURSOR_SHIFT 16
3863#define DSPFW_HPLL_CURSOR_MASK (0x3f<<16) 3929#define DSPFW_HPLL_CURSOR_MASK (0x3f<<16)
3864#define DSPFW_HPLL_SR_MASK (0x1ff) 3930#define DSPFW_HPLL_SR_SHIFT 0
3865#define DSPFW4 (dev_priv->info.display_mmio_offset + 0x70070) 3931#define DSPFW_HPLL_SR_MASK (0x1ff<<0)
3866#define DSPFW7 (dev_priv->info.display_mmio_offset + 0x7007c) 3932
3933/* vlv/chv */
3934#define DSPFW4 (VLV_DISPLAY_BASE + 0x70070)
3935#define DSPFW_SPRITEB_WM1_SHIFT 16
3936#define DSPFW_SPRITEB_WM1_MASK (0xff<<16)
3937#define DSPFW_CURSORA_WM1_SHIFT 8
3938#define DSPFW_CURSORA_WM1_MASK (0x3f<<8)
3939#define DSPFW_SPRITEA_WM1_SHIFT 0
3940#define DSPFW_SPRITEA_WM1_MASK (0xff<<0)
3941#define DSPFW5 (VLV_DISPLAY_BASE + 0x70074)
3942#define DSPFW_PLANEB_WM1_SHIFT 24
3943#define DSPFW_PLANEB_WM1_MASK (0xff<<24)
3944#define DSPFW_PLANEA_WM1_SHIFT 16
3945#define DSPFW_PLANEA_WM1_MASK (0xff<<16)
3946#define DSPFW_CURSORB_WM1_SHIFT 8
3947#define DSPFW_CURSORB_WM1_MASK (0x3f<<8)
3948#define DSPFW_CURSOR_SR_WM1_SHIFT 0
3949#define DSPFW_CURSOR_SR_WM1_MASK (0x3f<<0)
3950#define DSPFW6 (VLV_DISPLAY_BASE + 0x70078)
3951#define DSPFW_SR_WM1_SHIFT 0
3952#define DSPFW_SR_WM1_MASK (0x1ff<<0)
3953#define DSPFW7 (VLV_DISPLAY_BASE + 0x7007c)
3954#define DSPFW7_CHV (VLV_DISPLAY_BASE + 0x700b4) /* wtf #1? */
3955#define DSPFW_SPRITED_WM1_SHIFT 24
3956#define DSPFW_SPRITED_WM1_MASK (0xff<<24)
3957#define DSPFW_SPRITED_SHIFT 16
3958#define DSPFW_SPRITED_MASK (0xff<<16)
3959#define DSPFW_SPRITEC_WM1_SHIFT 8
3960#define DSPFW_SPRITEC_WM1_MASK (0xff<<8)
3961#define DSPFW_SPRITEC_SHIFT 0
3962#define DSPFW_SPRITEC_MASK (0xff<<0)
3963#define DSPFW8_CHV (VLV_DISPLAY_BASE + 0x700b8)
3964#define DSPFW_SPRITEF_WM1_SHIFT 24
3965#define DSPFW_SPRITEF_WM1_MASK (0xff<<24)
3966#define DSPFW_SPRITEF_SHIFT 16
3967#define DSPFW_SPRITEF_MASK (0xff<<16)
3968#define DSPFW_SPRITEE_WM1_SHIFT 8
3969#define DSPFW_SPRITEE_WM1_MASK (0xff<<8)
3970#define DSPFW_SPRITEE_SHIFT 0
3971#define DSPFW_SPRITEE_MASK (0xff<<0)
3972#define DSPFW9_CHV (VLV_DISPLAY_BASE + 0x7007c) /* wtf #2? */
3973#define DSPFW_PLANEC_WM1_SHIFT 24
3974#define DSPFW_PLANEC_WM1_MASK (0xff<<24)
3975#define DSPFW_PLANEC_SHIFT 16
3976#define DSPFW_PLANEC_MASK (0xff<<16)
3977#define DSPFW_CURSORC_WM1_SHIFT 8
3978#define DSPFW_CURSORC_WM1_MASK (0x3f<<16)
3979#define DSPFW_CURSORC_SHIFT 0
3980#define DSPFW_CURSORC_MASK (0x3f<<0)
3981
3982/* vlv/chv high order bits */
3983#define DSPHOWM (VLV_DISPLAY_BASE + 0x70064)
3984#define DSPFW_SR_HI_SHIFT 24
3985#define DSPFW_SR_HI_MASK (1<<24)
3986#define DSPFW_SPRITEF_HI_SHIFT 23
3987#define DSPFW_SPRITEF_HI_MASK (1<<23)
3988#define DSPFW_SPRITEE_HI_SHIFT 22
3989#define DSPFW_SPRITEE_HI_MASK (1<<22)
3990#define DSPFW_PLANEC_HI_SHIFT 21
3991#define DSPFW_PLANEC_HI_MASK (1<<21)
3992#define DSPFW_SPRITED_HI_SHIFT 20
3993#define DSPFW_SPRITED_HI_MASK (1<<20)
3994#define DSPFW_SPRITEC_HI_SHIFT 16
3995#define DSPFW_SPRITEC_HI_MASK (1<<16)
3996#define DSPFW_PLANEB_HI_SHIFT 12
3997#define DSPFW_PLANEB_HI_MASK (1<<12)
3998#define DSPFW_SPRITEB_HI_SHIFT 8
3999#define DSPFW_SPRITEB_HI_MASK (1<<8)
4000#define DSPFW_SPRITEA_HI_SHIFT 4
4001#define DSPFW_SPRITEA_HI_MASK (1<<4)
4002#define DSPFW_PLANEA_HI_SHIFT 0
4003#define DSPFW_PLANEA_HI_MASK (1<<0)
4004#define DSPHOWM1 (VLV_DISPLAY_BASE + 0x70068)
4005#define DSPFW_SR_WM1_HI_SHIFT 24
4006#define DSPFW_SR_WM1_HI_MASK (1<<24)
4007#define DSPFW_SPRITEF_WM1_HI_SHIFT 23
4008#define DSPFW_SPRITEF_WM1_HI_MASK (1<<23)
4009#define DSPFW_SPRITEE_WM1_HI_SHIFT 22
4010#define DSPFW_SPRITEE_WM1_HI_MASK (1<<22)
4011#define DSPFW_PLANEC_WM1_HI_SHIFT 21
4012#define DSPFW_PLANEC_WM1_HI_MASK (1<<21)
4013#define DSPFW_SPRITED_WM1_HI_SHIFT 20
4014#define DSPFW_SPRITED_WM1_HI_MASK (1<<20)
4015#define DSPFW_SPRITEC_WM1_HI_SHIFT 16
4016#define DSPFW_SPRITEC_WM1_HI_MASK (1<<16)
4017#define DSPFW_PLANEB_WM1_HI_SHIFT 12
4018#define DSPFW_PLANEB_WM1_HI_MASK (1<<12)
4019#define DSPFW_SPRITEB_WM1_HI_SHIFT 8
4020#define DSPFW_SPRITEB_WM1_HI_MASK (1<<8)
4021#define DSPFW_SPRITEA_WM1_HI_SHIFT 4
4022#define DSPFW_SPRITEA_WM1_HI_MASK (1<<4)
4023#define DSPFW_PLANEA_WM1_HI_SHIFT 0
4024#define DSPFW_PLANEA_WM1_HI_MASK (1<<0)
3867 4025
3868/* drain latency register values*/ 4026/* drain latency register values*/
3869#define DRAIN_LATENCY_PRECISION_32 32 4027#define DRAIN_LATENCY_PRECISION_32 32
3870#define DRAIN_LATENCY_PRECISION_64 64 4028#define DRAIN_LATENCY_PRECISION_64 64
3871#define VLV_DDL1 (VLV_DISPLAY_BASE + 0x70050) 4029#define VLV_DDL(pipe) (VLV_DISPLAY_BASE + 0x70050 + 4 * (pipe))
3872#define DDL_CURSORA_PRECISION_64 (1<<31) 4030#define DDL_CURSOR_PRECISION_64 (1<<31)
3873#define DDL_CURSORA_PRECISION_32 (0<<31) 4031#define DDL_CURSOR_PRECISION_32 (0<<31)
3874#define DDL_CURSORA_SHIFT 24 4032#define DDL_CURSOR_SHIFT 24
3875#define DDL_SPRITEB_PRECISION_64 (1<<23) 4033#define DDL_SPRITE_PRECISION_64(sprite) (1<<(15+8*(sprite)))
3876#define DDL_SPRITEB_PRECISION_32 (0<<23) 4034#define DDL_SPRITE_PRECISION_32(sprite) (0<<(15+8*(sprite)))
3877#define DDL_SPRITEB_SHIFT 16 4035#define DDL_SPRITE_SHIFT(sprite) (8+8*(sprite))
3878#define DDL_SPRITEA_PRECISION_64 (1<<15) 4036#define DDL_PLANE_PRECISION_64 (1<<7)
3879#define DDL_SPRITEA_PRECISION_32 (0<<15) 4037#define DDL_PLANE_PRECISION_32 (0<<7)
3880#define DDL_SPRITEA_SHIFT 8 4038#define DDL_PLANE_SHIFT 0
3881#define DDL_PLANEA_PRECISION_64 (1<<7) 4039#define DRAIN_LATENCY_MASK 0x7f
3882#define DDL_PLANEA_PRECISION_32 (0<<7)
3883#define DDL_PLANEA_SHIFT 0
3884
3885#define VLV_DDL2 (VLV_DISPLAY_BASE + 0x70054)
3886#define DDL_CURSORB_PRECISION_64 (1<<31)
3887#define DDL_CURSORB_PRECISION_32 (0<<31)
3888#define DDL_CURSORB_SHIFT 24
3889#define DDL_SPRITED_PRECISION_64 (1<<23)
3890#define DDL_SPRITED_PRECISION_32 (0<<23)
3891#define DDL_SPRITED_SHIFT 16
3892#define DDL_SPRITEC_PRECISION_64 (1<<15)
3893#define DDL_SPRITEC_PRECISION_32 (0<<15)
3894#define DDL_SPRITEC_SHIFT 8
3895#define DDL_PLANEB_PRECISION_64 (1<<7)
3896#define DDL_PLANEB_PRECISION_32 (0<<7)
3897#define DDL_PLANEB_SHIFT 0
3898
3899#define VLV_DDL3 (VLV_DISPLAY_BASE + 0x70058)
3900#define DDL_CURSORC_PRECISION_64 (1<<31)
3901#define DDL_CURSORC_PRECISION_32 (0<<31)
3902#define DDL_CURSORC_SHIFT 24
3903#define DDL_SPRITEF_PRECISION_64 (1<<23)
3904#define DDL_SPRITEF_PRECISION_32 (0<<23)
3905#define DDL_SPRITEF_SHIFT 16
3906#define DDL_SPRITEE_PRECISION_64 (1<<15)
3907#define DDL_SPRITEE_PRECISION_32 (0<<15)
3908#define DDL_SPRITEE_SHIFT 8
3909#define DDL_PLANEC_PRECISION_64 (1<<7)
3910#define DDL_PLANEC_PRECISION_32 (0<<7)
3911#define DDL_PLANEC_SHIFT 0
3912 4040
3913/* FIFO watermark sizes etc */ 4041/* FIFO watermark sizes etc */
3914#define G4X_FIFO_LINE_SIZE 64 4042#define G4X_FIFO_LINE_SIZE 64
@@ -4026,7 +4154,8 @@ enum punit_power_well {
4026/* Old style CUR*CNTR flags (desktop 8xx) */ 4154/* Old style CUR*CNTR flags (desktop 8xx) */
4027#define CURSOR_ENABLE 0x80000000 4155#define CURSOR_ENABLE 0x80000000
4028#define CURSOR_GAMMA_ENABLE 0x40000000 4156#define CURSOR_GAMMA_ENABLE 0x40000000
4029#define CURSOR_STRIDE_MASK 0x30000000 4157#define CURSOR_STRIDE_SHIFT 28
4158#define CURSOR_STRIDE(x) ((ffs(x)-9) << CURSOR_STRIDE_SHIFT) /* 256,512,1k,2k */
4030#define CURSOR_PIPE_CSC_ENABLE (1<<24) 4159#define CURSOR_PIPE_CSC_ENABLE (1<<24)
4031#define CURSOR_FORMAT_SHIFT 24 4160#define CURSOR_FORMAT_SHIFT 24
4032#define CURSOR_FORMAT_MASK (0x07 << CURSOR_FORMAT_SHIFT) 4161#define CURSOR_FORMAT_MASK (0x07 << CURSOR_FORMAT_SHIFT)
@@ -4111,6 +4240,7 @@ enum punit_power_well {
4111#define DISPPLANE_NO_LINE_DOUBLE 0 4240#define DISPPLANE_NO_LINE_DOUBLE 0
4112#define DISPPLANE_STEREO_POLARITY_FIRST 0 4241#define DISPPLANE_STEREO_POLARITY_FIRST 0
4113#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18) 4242#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
4243#define DISPPLANE_ROTATE_180 (1<<15)
4114#define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */ 4244#define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */
4115#define DISPPLANE_TILED (1<<10) 4245#define DISPPLANE_TILED (1<<10)
4116#define _DSPAADDR 0x70184 4246#define _DSPAADDR 0x70184
@@ -4195,6 +4325,7 @@ enum punit_power_well {
4195#define DVS_YUV_ORDER_UYVY (1<<16) 4325#define DVS_YUV_ORDER_UYVY (1<<16)
4196#define DVS_YUV_ORDER_YVYU (2<<16) 4326#define DVS_YUV_ORDER_YVYU (2<<16)
4197#define DVS_YUV_ORDER_VYUY (3<<16) 4327#define DVS_YUV_ORDER_VYUY (3<<16)
4328#define DVS_ROTATE_180 (1<<15)
4198#define DVS_DEST_KEY (1<<2) 4329#define DVS_DEST_KEY (1<<2)
4199#define DVS_TRICKLE_FEED_DISABLE (1<<14) 4330#define DVS_TRICKLE_FEED_DISABLE (1<<14)
4200#define DVS_TILED (1<<10) 4331#define DVS_TILED (1<<10)
@@ -4265,6 +4396,7 @@ enum punit_power_well {
4265#define SPRITE_YUV_ORDER_UYVY (1<<16) 4396#define SPRITE_YUV_ORDER_UYVY (1<<16)
4266#define SPRITE_YUV_ORDER_YVYU (2<<16) 4397#define SPRITE_YUV_ORDER_YVYU (2<<16)
4267#define SPRITE_YUV_ORDER_VYUY (3<<16) 4398#define SPRITE_YUV_ORDER_VYUY (3<<16)
4399#define SPRITE_ROTATE_180 (1<<15)
4268#define SPRITE_TRICKLE_FEED_DISABLE (1<<14) 4400#define SPRITE_TRICKLE_FEED_DISABLE (1<<14)
4269#define SPRITE_INT_GAMMA_ENABLE (1<<13) 4401#define SPRITE_INT_GAMMA_ENABLE (1<<13)
4270#define SPRITE_TILED (1<<10) 4402#define SPRITE_TILED (1<<10)
@@ -4338,6 +4470,7 @@ enum punit_power_well {
4338#define SP_YUV_ORDER_UYVY (1<<16) 4470#define SP_YUV_ORDER_UYVY (1<<16)
4339#define SP_YUV_ORDER_YVYU (2<<16) 4471#define SP_YUV_ORDER_YVYU (2<<16)
4340#define SP_YUV_ORDER_VYUY (3<<16) 4472#define SP_YUV_ORDER_VYUY (3<<16)
4473#define SP_ROTATE_180 (1<<15)
4341#define SP_TILED (1<<10) 4474#define SP_TILED (1<<10)
4342#define _SPALINOFF (VLV_DISPLAY_BASE + 0x72184) 4475#define _SPALINOFF (VLV_DISPLAY_BASE + 0x72184)
4343#define _SPASTRIDE (VLV_DISPLAY_BASE + 0x72188) 4476#define _SPASTRIDE (VLV_DISPLAY_BASE + 0x72188)
@@ -5246,8 +5379,7 @@ enum punit_power_well {
5246#define PIPEA_PP_STATUS (VLV_DISPLAY_BASE + 0x61200) 5379#define PIPEA_PP_STATUS (VLV_DISPLAY_BASE + 0x61200)
5247#define PIPEA_PP_CONTROL (VLV_DISPLAY_BASE + 0x61204) 5380#define PIPEA_PP_CONTROL (VLV_DISPLAY_BASE + 0x61204)
5248#define PIPEA_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61208) 5381#define PIPEA_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61208)
5249#define PANEL_PORT_SELECT_DPB_VLV (1 << 30) 5382#define PANEL_PORT_SELECT_VLV(port) ((port) << 30)
5250#define PANEL_PORT_SELECT_DPC_VLV (2 << 30)
5251#define PIPEA_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6120c) 5383#define PIPEA_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6120c)
5252#define PIPEA_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61210) 5384#define PIPEA_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61210)
5253 5385
@@ -5407,7 +5539,6 @@ enum punit_power_well {
5407#define VLV_GTLC_ALLOWWAKEERR (1 << 1) 5539#define VLV_GTLC_ALLOWWAKEERR (1 << 1)
5408#define VLV_GTLC_PW_MEDIA_STATUS_MASK (1 << 5) 5540#define VLV_GTLC_PW_MEDIA_STATUS_MASK (1 << 5)
5409#define VLV_GTLC_PW_RENDER_STATUS_MASK (1 << 7) 5541#define VLV_GTLC_PW_RENDER_STATUS_MASK (1 << 7)
5410#define VLV_GTLC_SURVIVABILITY_REG 0x130098
5411#define FORCEWAKE_MT 0xa188 /* multi-threaded */ 5542#define FORCEWAKE_MT 0xa188 /* multi-threaded */
5412#define FORCEWAKE_KERNEL 0x1 5543#define FORCEWAKE_KERNEL 0x1
5413#define FORCEWAKE_USER 0x2 5544#define FORCEWAKE_USER 0x2
@@ -5545,12 +5676,6 @@ enum punit_power_well {
5545 GEN6_PM_RP_DOWN_THRESHOLD | \ 5676 GEN6_PM_RP_DOWN_THRESHOLD | \
5546 GEN6_PM_RP_DOWN_TIMEOUT) 5677 GEN6_PM_RP_DOWN_TIMEOUT)
5547 5678
5548#define CHV_CZ_CLOCK_FREQ_MODE_200 200
5549#define CHV_CZ_CLOCK_FREQ_MODE_267 267
5550#define CHV_CZ_CLOCK_FREQ_MODE_320 320
5551#define CHV_CZ_CLOCK_FREQ_MODE_333 333
5552#define CHV_CZ_CLOCK_FREQ_MODE_400 400
5553
5554#define GEN7_GT_SCRATCH_BASE 0x4F100 5679#define GEN7_GT_SCRATCH_BASE 0x4F100
5555#define GEN7_GT_SCRATCH_REG_NUM 8 5680#define GEN7_GT_SCRATCH_REG_NUM 8
5556 5681
@@ -5866,15 +5991,7 @@ enum punit_power_well {
5866#define DDI_BUF_CTL_B 0x64100 5991#define DDI_BUF_CTL_B 0x64100
5867#define DDI_BUF_CTL(port) _PORT(port, DDI_BUF_CTL_A, DDI_BUF_CTL_B) 5992#define DDI_BUF_CTL(port) _PORT(port, DDI_BUF_CTL_A, DDI_BUF_CTL_B)
5868#define DDI_BUF_CTL_ENABLE (1<<31) 5993#define DDI_BUF_CTL_ENABLE (1<<31)
5869#define DDI_BUF_EMP_400MV_0DB_HSW (0<<24) /* Sel0 */ 5994#define DDI_BUF_TRANS_SELECT(n) ((n) << 24)
5870#define DDI_BUF_EMP_400MV_3_5DB_HSW (1<<24) /* Sel1 */
5871#define DDI_BUF_EMP_400MV_6DB_HSW (2<<24) /* Sel2 */
5872#define DDI_BUF_EMP_400MV_9_5DB_HSW (3<<24) /* Sel3 */
5873#define DDI_BUF_EMP_600MV_0DB_HSW (4<<24) /* Sel4 */
5874#define DDI_BUF_EMP_600MV_3_5DB_HSW (5<<24) /* Sel5 */
5875#define DDI_BUF_EMP_600MV_6DB_HSW (6<<24) /* Sel6 */
5876#define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */
5877#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */
5878#define DDI_BUF_EMP_MASK (0xf<<24) 5995#define DDI_BUF_EMP_MASK (0xf<<24)
5879#define DDI_BUF_PORT_REVERSAL (1<<16) 5996#define DDI_BUF_PORT_REVERSAL (1<<16)
5880#define DDI_BUF_IS_IDLE (1<<7) 5997#define DDI_BUF_IS_IDLE (1<<7)
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index ae7fd8fc27f0..503847f18fdd 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -540,7 +540,7 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
540 540
541 memset(&error_priv, 0, sizeof(error_priv)); 541 memset(&error_priv, 0, sizeof(error_priv));
542 542
543 ret = i915_error_state_buf_init(&error_str, count, off); 543 ret = i915_error_state_buf_init(&error_str, to_i915(dev), count, off);
544 if (ret) 544 if (ret)
545 return ret; 545 return ret;
546 546
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index afcc8dd40bdd..a4bd90f36a03 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -627,16 +627,16 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
627 627
628 switch (edp_link_params->preemphasis) { 628 switch (edp_link_params->preemphasis) {
629 case EDP_PREEMPHASIS_NONE: 629 case EDP_PREEMPHASIS_NONE:
630 dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_0; 630 dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_0;
631 break; 631 break;
632 case EDP_PREEMPHASIS_3_5dB: 632 case EDP_PREEMPHASIS_3_5dB:
633 dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5; 633 dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_1;
634 break; 634 break;
635 case EDP_PREEMPHASIS_6dB: 635 case EDP_PREEMPHASIS_6dB:
636 dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_6; 636 dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_2;
637 break; 637 break;
638 case EDP_PREEMPHASIS_9_5dB: 638 case EDP_PREEMPHASIS_9_5dB:
639 dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5; 639 dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_3;
640 break; 640 break;
641 default: 641 default:
642 DRM_DEBUG_KMS("VBT has unknown eDP pre-emphasis value %u\n", 642 DRM_DEBUG_KMS("VBT has unknown eDP pre-emphasis value %u\n",
@@ -646,16 +646,16 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
646 646
647 switch (edp_link_params->vswing) { 647 switch (edp_link_params->vswing) {
648 case EDP_VSWING_0_4V: 648 case EDP_VSWING_0_4V:
649 dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_400; 649 dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
650 break; 650 break;
651 case EDP_VSWING_0_6V: 651 case EDP_VSWING_0_6V:
652 dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_600; 652 dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_1;
653 break; 653 break;
654 case EDP_VSWING_0_8V: 654 case EDP_VSWING_0_8V:
655 dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_800; 655 dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
656 break; 656 break;
657 case EDP_VSWING_1_2V: 657 case EDP_VSWING_1_2V:
658 dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_1200; 658 dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
659 break; 659 break;
660 default: 660 default:
661 DRM_DEBUG_KMS("VBT has unknown eDP voltage swing value %u\n", 661 DRM_DEBUG_KMS("VBT has unknown eDP voltage swing value %u\n",
@@ -976,12 +976,10 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
976 if (bdb->version >= 158) { 976 if (bdb->version >= 158) {
977 /* The VBT HDMI level shift values match the table we have. */ 977 /* The VBT HDMI level shift values match the table we have. */
978 hdmi_level_shift = child->raw[7] & 0xF; 978 hdmi_level_shift = child->raw[7] & 0xF;
979 if (hdmi_level_shift < 0xC) { 979 DRM_DEBUG_KMS("VBT HDMI level shift for port %c: %d\n",
980 DRM_DEBUG_KMS("VBT HDMI level shift for port %c: %d\n", 980 port_name(port),
981 port_name(port), 981 hdmi_level_shift);
982 hdmi_level_shift); 982 info->hdmi_level_shift = hdmi_level_shift;
983 info->hdmi_level_shift = hdmi_level_shift;
984 }
985 } 983 }
986} 984}
987 985
@@ -1114,8 +1112,7 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
1114 struct ddi_vbt_port_info *info = 1112 struct ddi_vbt_port_info *info =
1115 &dev_priv->vbt.ddi_port_info[port]; 1113 &dev_priv->vbt.ddi_port_info[port];
1116 1114
1117 /* Recommended BSpec default: 800mV 0dB. */ 1115 info->hdmi_level_shift = HDMI_LEVEL_SHIFT_UNKNOWN;
1118 info->hdmi_level_shift = 6;
1119 1116
1120 info->supports_dvi = (port != PORT_A && port != PORT_E); 1117 info->supports_dvi = (port != PORT_A && port != PORT_E);
1121 info->supports_hdmi = info->supports_dvi; 1118 info->supports_hdmi = info->supports_dvi;
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index b98667796337..905999bee2ac 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -802,7 +802,8 @@ struct mipi_config {
802 802
803 u16 rsvd4; 803 u16 rsvd4;
804 804
805 u8 rsvd5[5]; 805 u8 rsvd5;
806 u32 target_burst_mode_freq;
806 u32 dsi_ddr_clk; 807 u32 dsi_ddr_clk;
807 u32 bridge_ref_clk; 808 u32 bridge_ref_clk;
808 809
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 5db0b5552e39..b63d4fa204a3 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -28,87 +28,103 @@
28#include "i915_drv.h" 28#include "i915_drv.h"
29#include "intel_drv.h" 29#include "intel_drv.h"
30 30
31struct ddi_buf_trans {
32 u32 trans1; /* balance leg enable, de-emph level */
33 u32 trans2; /* vref sel, vswing */
34};
35
31/* HDMI/DVI modes ignore everything but the last 2 items. So we share 36/* HDMI/DVI modes ignore everything but the last 2 items. So we share
32 * them for both DP and FDI transports, allowing those ports to 37 * them for both DP and FDI transports, allowing those ports to
33 * automatically adapt to HDMI connections as well 38 * automatically adapt to HDMI connections as well
34 */ 39 */
35static const u32 hsw_ddi_translations_dp[] = { 40static const struct ddi_buf_trans hsw_ddi_translations_dp[] = {
36 0x00FFFFFF, 0x0006000E, /* DP parameters */ 41 { 0x00FFFFFF, 0x0006000E },
37 0x00D75FFF, 0x0005000A, 42 { 0x00D75FFF, 0x0005000A },
38 0x00C30FFF, 0x00040006, 43 { 0x00C30FFF, 0x00040006 },
39 0x80AAAFFF, 0x000B0000, 44 { 0x80AAAFFF, 0x000B0000 },
40 0x00FFFFFF, 0x0005000A, 45 { 0x00FFFFFF, 0x0005000A },
41 0x00D75FFF, 0x000C0004, 46 { 0x00D75FFF, 0x000C0004 },
42 0x80C30FFF, 0x000B0000, 47 { 0x80C30FFF, 0x000B0000 },
43 0x00FFFFFF, 0x00040006, 48 { 0x00FFFFFF, 0x00040006 },
44 0x80D75FFF, 0x000B0000, 49 { 0x80D75FFF, 0x000B0000 },
45}; 50};
46 51
47static const u32 hsw_ddi_translations_fdi[] = { 52static const struct ddi_buf_trans hsw_ddi_translations_fdi[] = {
48 0x00FFFFFF, 0x0007000E, /* FDI parameters */ 53 { 0x00FFFFFF, 0x0007000E },
49 0x00D75FFF, 0x000F000A, 54 { 0x00D75FFF, 0x000F000A },
50 0x00C30FFF, 0x00060006, 55 { 0x00C30FFF, 0x00060006 },
51 0x00AAAFFF, 0x001E0000, 56 { 0x00AAAFFF, 0x001E0000 },
52 0x00FFFFFF, 0x000F000A, 57 { 0x00FFFFFF, 0x000F000A },
53 0x00D75FFF, 0x00160004, 58 { 0x00D75FFF, 0x00160004 },
54 0x00C30FFF, 0x001E0000, 59 { 0x00C30FFF, 0x001E0000 },
55 0x00FFFFFF, 0x00060006, 60 { 0x00FFFFFF, 0x00060006 },
56 0x00D75FFF, 0x001E0000, 61 { 0x00D75FFF, 0x001E0000 },
57}; 62};
58 63
59static const u32 hsw_ddi_translations_hdmi[] = { 64static const struct ddi_buf_trans hsw_ddi_translations_hdmi[] = {
60 /* Idx NT mV diff T mV diff db */ 65 /* Idx NT mV d T mV d db */
61 0x00FFFFFF, 0x0006000E, /* 0: 400 400 0 */ 66 { 0x00FFFFFF, 0x0006000E }, /* 0: 400 400 0 */
62 0x00E79FFF, 0x000E000C, /* 1: 400 500 2 */ 67 { 0x00E79FFF, 0x000E000C }, /* 1: 400 500 2 */
63 0x00D75FFF, 0x0005000A, /* 2: 400 600 3.5 */ 68 { 0x00D75FFF, 0x0005000A }, /* 2: 400 600 3.5 */
64 0x00FFFFFF, 0x0005000A, /* 3: 600 600 0 */ 69 { 0x00FFFFFF, 0x0005000A }, /* 3: 600 600 0 */
65 0x00E79FFF, 0x001D0007, /* 4: 600 750 2 */ 70 { 0x00E79FFF, 0x001D0007 }, /* 4: 600 750 2 */
66 0x00D75FFF, 0x000C0004, /* 5: 600 900 3.5 */ 71 { 0x00D75FFF, 0x000C0004 }, /* 5: 600 900 3.5 */
67 0x00FFFFFF, 0x00040006, /* 6: 800 800 0 */ 72 { 0x00FFFFFF, 0x00040006 }, /* 6: 800 800 0 */
68 0x80E79FFF, 0x00030002, /* 7: 800 1000 2 */ 73 { 0x80E79FFF, 0x00030002 }, /* 7: 800 1000 2 */
69 0x00FFFFFF, 0x00140005, /* 8: 850 850 0 */ 74 { 0x00FFFFFF, 0x00140005 }, /* 8: 850 850 0 */
70 0x00FFFFFF, 0x000C0004, /* 9: 900 900 0 */ 75 { 0x00FFFFFF, 0x000C0004 }, /* 9: 900 900 0 */
71 0x00FFFFFF, 0x001C0003, /* 10: 950 950 0 */ 76 { 0x00FFFFFF, 0x001C0003 }, /* 10: 950 950 0 */
72 0x80FFFFFF, 0x00030002, /* 11: 1000 1000 0 */ 77 { 0x80FFFFFF, 0x00030002 }, /* 11: 1000 1000 0 */
73}; 78};
74 79
75static const u32 bdw_ddi_translations_edp[] = { 80static const struct ddi_buf_trans bdw_ddi_translations_edp[] = {
76 0x00FFFFFF, 0x00000012, /* eDP parameters */ 81 { 0x00FFFFFF, 0x00000012 },
77 0x00EBAFFF, 0x00020011, 82 { 0x00EBAFFF, 0x00020011 },
78 0x00C71FFF, 0x0006000F, 83 { 0x00C71FFF, 0x0006000F },
79 0x00AAAFFF, 0x000E000A, 84 { 0x00AAAFFF, 0x000E000A },
80 0x00FFFFFF, 0x00020011, 85 { 0x00FFFFFF, 0x00020011 },
81 0x00DB6FFF, 0x0005000F, 86 { 0x00DB6FFF, 0x0005000F },
82 0x00BEEFFF, 0x000A000C, 87 { 0x00BEEFFF, 0x000A000C },
83 0x00FFFFFF, 0x0005000F, 88 { 0x00FFFFFF, 0x0005000F },
84 0x00DB6FFF, 0x000A000C, 89 { 0x00DB6FFF, 0x000A000C },
85 0x00FFFFFF, 0x00140006 /* HDMI parameters 800mV 0dB*/
86}; 90};
87 91
88static const u32 bdw_ddi_translations_dp[] = { 92static const struct ddi_buf_trans bdw_ddi_translations_dp[] = {
89 0x00FFFFFF, 0x0007000E, /* DP parameters */ 93 { 0x00FFFFFF, 0x0007000E },
90 0x00D75FFF, 0x000E000A, 94 { 0x00D75FFF, 0x000E000A },
91 0x00BEFFFF, 0x00140006, 95 { 0x00BEFFFF, 0x00140006 },
92 0x80B2CFFF, 0x001B0002, 96 { 0x80B2CFFF, 0x001B0002 },
93 0x00FFFFFF, 0x000E000A, 97 { 0x00FFFFFF, 0x000E000A },
94 0x00D75FFF, 0x00180004, 98 { 0x00D75FFF, 0x00180004 },
95 0x80CB2FFF, 0x001B0002, 99 { 0x80CB2FFF, 0x001B0002 },
96 0x00F7DFFF, 0x00180004, 100 { 0x00F7DFFF, 0x00180004 },
97 0x80D75FFF, 0x001B0002, 101 { 0x80D75FFF, 0x001B0002 },
98 0x00FFFFFF, 0x00140006 /* HDMI parameters 800mV 0dB*/
99}; 102};
100 103
101static const u32 bdw_ddi_translations_fdi[] = { 104static const struct ddi_buf_trans bdw_ddi_translations_fdi[] = {
102 0x00FFFFFF, 0x0001000E, /* FDI parameters */ 105 { 0x00FFFFFF, 0x0001000E },
103 0x00D75FFF, 0x0004000A, 106 { 0x00D75FFF, 0x0004000A },
104 0x00C30FFF, 0x00070006, 107 { 0x00C30FFF, 0x00070006 },
105 0x00AAAFFF, 0x000C0000, 108 { 0x00AAAFFF, 0x000C0000 },
106 0x00FFFFFF, 0x0004000A, 109 { 0x00FFFFFF, 0x0004000A },
107 0x00D75FFF, 0x00090004, 110 { 0x00D75FFF, 0x00090004 },
108 0x00C30FFF, 0x000C0000, 111 { 0x00C30FFF, 0x000C0000 },
109 0x00FFFFFF, 0x00070006, 112 { 0x00FFFFFF, 0x00070006 },
110 0x00D75FFF, 0x000C0000, 113 { 0x00D75FFF, 0x000C0000 },
111 0x00FFFFFF, 0x00140006 /* HDMI parameters 800mV 0dB*/ 114};
115
116static const struct ddi_buf_trans bdw_ddi_translations_hdmi[] = {
117 /* Idx NT mV d T mV df db */
118 { 0x00FFFFFF, 0x0007000E }, /* 0: 400 400 0 */
119 { 0x00D75FFF, 0x000E000A }, /* 1: 400 600 3.5 */
120 { 0x00BEFFFF, 0x00140006 }, /* 2: 400 800 6 */
121 { 0x00FFFFFF, 0x0009000D }, /* 3: 450 450 0 */
122 { 0x00FFFFFF, 0x000E000A }, /* 4: 600 600 0 */
123 { 0x00D7FFFF, 0x00140006 }, /* 5: 600 800 2.5 */
124 { 0x80CB2FFF, 0x001B0002 }, /* 6: 600 1000 4.5 */
125 { 0x00FFFFFF, 0x00140006 }, /* 7: 800 800 0 */
126 { 0x80E79FFF, 0x001B0002 }, /* 8: 800 1000 2 */
127 { 0x80FFFFFF, 0x001B0002 }, /* 9: 1000 1000 0 */
112}; 128};
113 129
114enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder) 130enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
@@ -145,26 +161,36 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
145{ 161{
146 struct drm_i915_private *dev_priv = dev->dev_private; 162 struct drm_i915_private *dev_priv = dev->dev_private;
147 u32 reg; 163 u32 reg;
148 int i; 164 int i, n_hdmi_entries, hdmi_800mV_0dB;
149 int hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift; 165 int hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
150 const u32 *ddi_translations_fdi; 166 const struct ddi_buf_trans *ddi_translations_fdi;
151 const u32 *ddi_translations_dp; 167 const struct ddi_buf_trans *ddi_translations_dp;
152 const u32 *ddi_translations_edp; 168 const struct ddi_buf_trans *ddi_translations_edp;
153 const u32 *ddi_translations; 169 const struct ddi_buf_trans *ddi_translations_hdmi;
170 const struct ddi_buf_trans *ddi_translations;
154 171
155 if (IS_BROADWELL(dev)) { 172 if (IS_BROADWELL(dev)) {
156 ddi_translations_fdi = bdw_ddi_translations_fdi; 173 ddi_translations_fdi = bdw_ddi_translations_fdi;
157 ddi_translations_dp = bdw_ddi_translations_dp; 174 ddi_translations_dp = bdw_ddi_translations_dp;
158 ddi_translations_edp = bdw_ddi_translations_edp; 175 ddi_translations_edp = bdw_ddi_translations_edp;
176 ddi_translations_hdmi = bdw_ddi_translations_hdmi;
177 n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
178 hdmi_800mV_0dB = 7;
159 } else if (IS_HASWELL(dev)) { 179 } else if (IS_HASWELL(dev)) {
160 ddi_translations_fdi = hsw_ddi_translations_fdi; 180 ddi_translations_fdi = hsw_ddi_translations_fdi;
161 ddi_translations_dp = hsw_ddi_translations_dp; 181 ddi_translations_dp = hsw_ddi_translations_dp;
162 ddi_translations_edp = hsw_ddi_translations_dp; 182 ddi_translations_edp = hsw_ddi_translations_dp;
183 ddi_translations_hdmi = hsw_ddi_translations_hdmi;
184 n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi);
185 hdmi_800mV_0dB = 6;
163 } else { 186 } else {
164 WARN(1, "ddi translation table missing\n"); 187 WARN(1, "ddi translation table missing\n");
165 ddi_translations_edp = bdw_ddi_translations_dp; 188 ddi_translations_edp = bdw_ddi_translations_dp;
166 ddi_translations_fdi = bdw_ddi_translations_fdi; 189 ddi_translations_fdi = bdw_ddi_translations_fdi;
167 ddi_translations_dp = bdw_ddi_translations_dp; 190 ddi_translations_dp = bdw_ddi_translations_dp;
191 ddi_translations_hdmi = bdw_ddi_translations_hdmi;
192 n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
193 hdmi_800mV_0dB = 7;
168 } 194 }
169 195
170 switch (port) { 196 switch (port) {
@@ -190,14 +216,22 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
190 216
191 for (i = 0, reg = DDI_BUF_TRANS(port); 217 for (i = 0, reg = DDI_BUF_TRANS(port);
192 i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) { 218 i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) {
193 I915_WRITE(reg, ddi_translations[i]); 219 I915_WRITE(reg, ddi_translations[i].trans1);
194 reg += 4; 220 reg += 4;
195 } 221 I915_WRITE(reg, ddi_translations[i].trans2);
196 /* Entry 9 is for HDMI: */
197 for (i = 0; i < 2; i++) {
198 I915_WRITE(reg, hsw_ddi_translations_hdmi[hdmi_level * 2 + i]);
199 reg += 4; 222 reg += 4;
200 } 223 }
224
225 /* Choose a good default if VBT is badly populated */
226 if (hdmi_level == HDMI_LEVEL_SHIFT_UNKNOWN ||
227 hdmi_level >= n_hdmi_entries)
228 hdmi_level = hdmi_800mV_0dB;
229
230 /* Entry 9 is for HDMI: */
231 I915_WRITE(reg, ddi_translations_hdmi[hdmi_level].trans1);
232 reg += 4;
233 I915_WRITE(reg, ddi_translations_hdmi[hdmi_level].trans2);
234 reg += 4;
201} 235}
202 236
203/* Program DDI buffers translations for DP. By default, program ports A-D in DP 237/* Program DDI buffers translations for DP. By default, program ports A-D in DP
@@ -214,18 +248,6 @@ void intel_prepare_ddi(struct drm_device *dev)
214 intel_prepare_ddi_buffers(dev, port); 248 intel_prepare_ddi_buffers(dev, port);
215} 249}
216 250
217static const long hsw_ddi_buf_ctl_values[] = {
218 DDI_BUF_EMP_400MV_0DB_HSW,
219 DDI_BUF_EMP_400MV_3_5DB_HSW,
220 DDI_BUF_EMP_400MV_6DB_HSW,
221 DDI_BUF_EMP_400MV_9_5DB_HSW,
222 DDI_BUF_EMP_600MV_0DB_HSW,
223 DDI_BUF_EMP_600MV_3_5DB_HSW,
224 DDI_BUF_EMP_600MV_6DB_HSW,
225 DDI_BUF_EMP_800MV_0DB_HSW,
226 DDI_BUF_EMP_800MV_3_5DB_HSW
227};
228
229static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv, 251static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
230 enum port port) 252 enum port port)
231{ 253{
@@ -285,7 +307,7 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
285 307
286 /* Start the training iterating through available voltages and emphasis, 308 /* Start the training iterating through available voltages and emphasis,
287 * testing each value twice. */ 309 * testing each value twice. */
288 for (i = 0; i < ARRAY_SIZE(hsw_ddi_buf_ctl_values) * 2; i++) { 310 for (i = 0; i < ARRAY_SIZE(hsw_ddi_translations_fdi) * 2; i++) {
289 /* Configure DP_TP_CTL with auto-training */ 311 /* Configure DP_TP_CTL with auto-training */
290 I915_WRITE(DP_TP_CTL(PORT_E), 312 I915_WRITE(DP_TP_CTL(PORT_E),
291 DP_TP_CTL_FDI_AUTOTRAIN | 313 DP_TP_CTL_FDI_AUTOTRAIN |
@@ -300,7 +322,7 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
300 I915_WRITE(DDI_BUF_CTL(PORT_E), 322 I915_WRITE(DDI_BUF_CTL(PORT_E),
301 DDI_BUF_CTL_ENABLE | 323 DDI_BUF_CTL_ENABLE |
302 ((intel_crtc->config.fdi_lanes - 1) << 1) | 324 ((intel_crtc->config.fdi_lanes - 1) << 1) |
303 hsw_ddi_buf_ctl_values[i / 2]); 325 DDI_BUF_TRANS_SELECT(i / 2));
304 POSTING_READ(DDI_BUF_CTL(PORT_E)); 326 POSTING_READ(DDI_BUF_CTL(PORT_E));
305 327
306 udelay(600); 328 udelay(600);
@@ -375,7 +397,7 @@ void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder)
375 enc_to_dig_port(&encoder->base); 397 enc_to_dig_port(&encoder->base);
376 398
377 intel_dp->DP = intel_dig_port->saved_port_bits | 399 intel_dp->DP = intel_dig_port->saved_port_bits |
378 DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW; 400 DDI_BUF_CTL_ENABLE | DDI_BUF_TRANS_SELECT(0);
379 intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count); 401 intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count);
380 402
381} 403}
@@ -402,7 +424,7 @@ intel_ddi_get_crtc_encoder(struct drm_crtc *crtc)
402} 424}
403 425
404#define LC_FREQ 2700 426#define LC_FREQ 2700
405#define LC_FREQ_2K (LC_FREQ * 2000) 427#define LC_FREQ_2K U64_C(LC_FREQ * 2000)
406 428
407#define P_MIN 2 429#define P_MIN 2
408#define P_MAX 64 430#define P_MAX 64
@@ -414,7 +436,11 @@ intel_ddi_get_crtc_encoder(struct drm_crtc *crtc)
414#define VCO_MIN 2400 436#define VCO_MIN 2400
415#define VCO_MAX 4800 437#define VCO_MAX 4800
416 438
417#define ABS_DIFF(a, b) ((a > b) ? (a - b) : (b - a)) 439#define abs_diff(a, b) ({ \
440 typeof(a) __a = (a); \
441 typeof(b) __b = (b); \
442 (void) (&__a == &__b); \
443 __a > __b ? (__a - __b) : (__b - __a); })
418 444
419struct wrpll_rnp { 445struct wrpll_rnp {
420 unsigned p, n2, r2; 446 unsigned p, n2, r2;
@@ -524,9 +550,9 @@ static void wrpll_update_rnp(uint64_t freq2k, unsigned budget,
524 */ 550 */
525 a = freq2k * budget * p * r2; 551 a = freq2k * budget * p * r2;
526 b = freq2k * budget * best->p * best->r2; 552 b = freq2k * budget * best->p * best->r2;
527 diff = ABS_DIFF((freq2k * p * r2), (LC_FREQ_2K * n2)); 553 diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
528 diff_best = ABS_DIFF((freq2k * best->p * best->r2), 554 diff_best = abs_diff(freq2k * best->p * best->r2,
529 (LC_FREQ_2K * best->n2)); 555 LC_FREQ_2K * best->n2);
530 c = 1000000 * diff; 556 c = 1000000 * diff;
531 d = 1000000 * diff_best; 557 d = 1000000 * diff_best;
532 558
@@ -587,8 +613,8 @@ static int intel_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
587 return (refclk * n * 100) / (p * r); 613 return (refclk * n * 100) / (p * r);
588} 614}
589 615
590void intel_ddi_clock_get(struct intel_encoder *encoder, 616static void hsw_ddi_clock_get(struct intel_encoder *encoder,
591 struct intel_crtc_config *pipe_config) 617 struct intel_crtc_config *pipe_config)
592{ 618{
593 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 619 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
594 int link_clock = 0; 620 int link_clock = 0;
@@ -643,9 +669,15 @@ void intel_ddi_clock_get(struct intel_encoder *encoder,
643 pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock; 669 pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock;
644} 670}
645 671
672void intel_ddi_clock_get(struct intel_encoder *encoder,
673 struct intel_crtc_config *pipe_config)
674{
675 hsw_ddi_clock_get(encoder, pipe_config);
676}
677
646static void 678static void
647intel_ddi_calculate_wrpll(int clock /* in Hz */, 679hsw_ddi_calculate_wrpll(int clock /* in Hz */,
648 unsigned *r2_out, unsigned *n2_out, unsigned *p_out) 680 unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
649{ 681{
650 uint64_t freq2k; 682 uint64_t freq2k;
651 unsigned p, n2, r2; 683 unsigned p, n2, r2;
@@ -708,27 +740,17 @@ intel_ddi_calculate_wrpll(int clock /* in Hz */,
708 *r2_out = best.r2; 740 *r2_out = best.r2;
709} 741}
710 742
711/* 743static bool
712 * Tries to find a PLL for the CRTC. If it finds, it increases the refcount and 744hsw_ddi_pll_select(struct intel_crtc *intel_crtc,
713 * stores it in intel_crtc->ddi_pll_sel, so other mode sets won't be able to 745 struct intel_encoder *intel_encoder,
714 * steal the selected PLL. You need to call intel_ddi_pll_enable to actually 746 int clock)
715 * enable the PLL.
716 */
717bool intel_ddi_pll_select(struct intel_crtc *intel_crtc)
718{ 747{
719 struct drm_crtc *crtc = &intel_crtc->base; 748 if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
720 struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
721 int type = intel_encoder->type;
722 int clock = intel_crtc->config.port_clock;
723
724 intel_put_shared_dpll(intel_crtc);
725
726 if (type == INTEL_OUTPUT_HDMI) {
727 struct intel_shared_dpll *pll; 749 struct intel_shared_dpll *pll;
728 uint32_t val; 750 uint32_t val;
729 unsigned p, n2, r2; 751 unsigned p, n2, r2;
730 752
731 intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p); 753 hsw_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
732 754
733 val = WRPLL_PLL_ENABLE | WRPLL_PLL_LCPLL | 755 val = WRPLL_PLL_ENABLE | WRPLL_PLL_LCPLL |
734 WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) | 756 WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
@@ -749,6 +771,25 @@ bool intel_ddi_pll_select(struct intel_crtc *intel_crtc)
749 return true; 771 return true;
750} 772}
751 773
774
775/*
776 * Tries to find a *shared* PLL for the CRTC and store it in
777 * intel_crtc->ddi_pll_sel.
778 *
779 * For private DPLLs, compute_config() should do the selection for us. This
780 * function should be folded into compute_config() eventually.
781 */
782bool intel_ddi_pll_select(struct intel_crtc *intel_crtc)
783{
784 struct drm_crtc *crtc = &intel_crtc->base;
785 struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
786 int clock = intel_crtc->config.port_clock;
787
788 intel_put_shared_dpll(intel_crtc);
789
790 return hsw_ddi_pll_select(intel_crtc, intel_encoder, clock);
791}
792
752void intel_ddi_set_pipe_settings(struct drm_crtc *crtc) 793void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
753{ 794{
754 struct drm_i915_private *dev_priv = crtc->dev->dev_private; 795 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
@@ -1183,31 +1224,52 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
1183 } 1224 }
1184} 1225}
1185 1226
1186int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv) 1227static int bdw_get_cdclk_freq(struct drm_i915_private *dev_priv)
1228{
1229 uint32_t lcpll = I915_READ(LCPLL_CTL);
1230 uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
1231
1232 if (lcpll & LCPLL_CD_SOURCE_FCLK)
1233 return 800000;
1234 else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
1235 return 450000;
1236 else if (freq == LCPLL_CLK_FREQ_450)
1237 return 450000;
1238 else if (freq == LCPLL_CLK_FREQ_54O_BDW)
1239 return 540000;
1240 else if (freq == LCPLL_CLK_FREQ_337_5_BDW)
1241 return 337500;
1242 else
1243 return 675000;
1244}
1245
1246static int hsw_get_cdclk_freq(struct drm_i915_private *dev_priv)
1187{ 1247{
1188 struct drm_device *dev = dev_priv->dev; 1248 struct drm_device *dev = dev_priv->dev;
1189 uint32_t lcpll = I915_READ(LCPLL_CTL); 1249 uint32_t lcpll = I915_READ(LCPLL_CTL);
1190 uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK; 1250 uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
1191 1251
1192 if (lcpll & LCPLL_CD_SOURCE_FCLK) { 1252 if (lcpll & LCPLL_CD_SOURCE_FCLK)
1193 return 800000; 1253 return 800000;
1194 } else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT) { 1254 else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
1195 return 450000; 1255 return 450000;
1196 } else if (freq == LCPLL_CLK_FREQ_450) { 1256 else if (freq == LCPLL_CLK_FREQ_450)
1197 return 450000; 1257 return 450000;
1198 } else if (IS_HASWELL(dev)) { 1258 else if (IS_ULT(dev))
1199 if (IS_ULT(dev)) 1259 return 337500;
1200 return 337500; 1260 else
1201 else 1261 return 540000;
1202 return 540000; 1262}
1203 } else { 1263
1204 if (freq == LCPLL_CLK_FREQ_54O_BDW) 1264int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
1205 return 540000; 1265{
1206 else if (freq == LCPLL_CLK_FREQ_337_5_BDW) 1266 struct drm_device *dev = dev_priv->dev;
1207 return 337500; 1267
1208 else 1268 if (IS_BROADWELL(dev))
1209 return 675000; 1269 return bdw_get_cdclk_freq(dev_priv);
1210 } 1270
1271 /* Haswell */
1272 return hsw_get_cdclk_freq(dev_priv);
1211} 1273}
1212 1274
1213static void hsw_ddi_pll_enable(struct drm_i915_private *dev_priv, 1275static void hsw_ddi_pll_enable(struct drm_i915_private *dev_priv,
@@ -1248,10 +1310,8 @@ static const char * const hsw_ddi_pll_names[] = {
1248 "WRPLL 2", 1310 "WRPLL 2",
1249}; 1311};
1250 1312
1251void intel_ddi_pll_init(struct drm_device *dev) 1313static void hsw_shared_dplls_init(struct drm_i915_private *dev_priv)
1252{ 1314{
1253 struct drm_i915_private *dev_priv = dev->dev_private;
1254 uint32_t val = I915_READ(LCPLL_CTL);
1255 int i; 1315 int i;
1256 1316
1257 dev_priv->num_shared_dpll = 2; 1317 dev_priv->num_shared_dpll = 2;
@@ -1264,6 +1324,14 @@ void intel_ddi_pll_init(struct drm_device *dev)
1264 dev_priv->shared_dplls[i].get_hw_state = 1324 dev_priv->shared_dplls[i].get_hw_state =
1265 hsw_ddi_pll_get_hw_state; 1325 hsw_ddi_pll_get_hw_state;
1266 } 1326 }
1327}
1328
1329void intel_ddi_pll_init(struct drm_device *dev)
1330{
1331 struct drm_i915_private *dev_priv = dev->dev_private;
1332 uint32_t val = I915_READ(LCPLL_CTL);
1333
1334 hsw_shared_dplls_init(dev_priv);
1267 1335
1268 /* The LCPLL register should be turned on by the BIOS. For now let's 1336 /* The LCPLL register should be turned on by the BIOS. For now let's
1269 * just check its state and print errors in case something is wrong. 1337 * just check its state and print errors in case something is wrong.
@@ -1444,7 +1512,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
1444 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp; 1512 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
1445 } 1513 }
1446 1514
1447 intel_ddi_clock_get(encoder, pipe_config); 1515 hsw_ddi_clock_get(encoder, pipe_config);
1448} 1516}
1449 1517
1450static void intel_ddi_destroy(struct drm_encoder *encoder) 1518static void intel_ddi_destroy(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index d8324c69fa86..507370513f3d 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -91,15 +91,16 @@ static int intel_framebuffer_init(struct drm_device *dev,
91 struct intel_framebuffer *ifb, 91 struct intel_framebuffer *ifb,
92 struct drm_mode_fb_cmd2 *mode_cmd, 92 struct drm_mode_fb_cmd2 *mode_cmd,
93 struct drm_i915_gem_object *obj); 93 struct drm_i915_gem_object *obj);
94static void intel_dp_set_m_n(struct intel_crtc *crtc);
95static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc); 94static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
96static void intel_set_pipe_timings(struct intel_crtc *intel_crtc); 95static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
97static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc, 96static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
98 struct intel_link_m_n *m_n); 97 struct intel_link_m_n *m_n,
98 struct intel_link_m_n *m2_n2);
99static void ironlake_set_pipeconf(struct drm_crtc *crtc); 99static void ironlake_set_pipeconf(struct drm_crtc *crtc);
100static void haswell_set_pipeconf(struct drm_crtc *crtc); 100static void haswell_set_pipeconf(struct drm_crtc *crtc);
101static void intel_set_pipe_csc(struct drm_crtc *crtc); 101static void intel_set_pipe_csc(struct drm_crtc *crtc);
102static void vlv_prepare_pll(struct intel_crtc *crtc); 102static void vlv_prepare_pll(struct intel_crtc *crtc);
103static void chv_prepare_pll(struct intel_crtc *crtc);
103 104
104static struct intel_encoder *intel_find_encoder(struct intel_connector *connector, int pipe) 105static struct intel_encoder *intel_find_encoder(struct intel_connector *connector, int pipe)
105{ 106{
@@ -899,7 +900,8 @@ static void g4x_wait_for_vblank(struct drm_device *dev, int pipe)
899 frame = I915_READ(frame_reg); 900 frame = I915_READ(frame_reg);
900 901
901 if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50)) 902 if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50))
902 WARN(1, "vblank wait timed out\n"); 903 WARN(1, "vblank wait on pipe %c timed out\n",
904 pipe_name(pipe));
903} 905}
904 906
905/** 907/**
@@ -940,7 +942,8 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
940 if (wait_for(I915_READ(pipestat_reg) & 942 if (wait_for(I915_READ(pipestat_reg) &
941 PIPE_VBLANK_INTERRUPT_STATUS, 943 PIPE_VBLANK_INTERRUPT_STATUS,
942 50)) 944 50))
943 DRM_DEBUG_KMS("vblank wait timed out\n"); 945 DRM_DEBUG_KMS("vblank wait on pipe %c timed out\n",
946 pipe_name(pipe));
944} 947}
945 948
946static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe) 949static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
@@ -964,8 +967,7 @@ static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
964 967
965/* 968/*
966 * intel_wait_for_pipe_off - wait for pipe to turn off 969 * intel_wait_for_pipe_off - wait for pipe to turn off
967 * @dev: drm device 970 * @crtc: crtc whose pipe to wait for
968 * @pipe: pipe to wait for
969 * 971 *
970 * After disabling a pipe, we can't wait for vblank in the usual way, 972 * After disabling a pipe, we can't wait for vblank in the usual way,
971 * spinning on the vblank interrupt status bit, since we won't actually 973 * spinning on the vblank interrupt status bit, since we won't actually
@@ -979,11 +981,12 @@ static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
979 * ends up stopping at the start of the next frame). 981 * ends up stopping at the start of the next frame).
980 * 982 *
981 */ 983 */
982void intel_wait_for_pipe_off(struct drm_device *dev, int pipe) 984static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
983{ 985{
986 struct drm_device *dev = crtc->base.dev;
984 struct drm_i915_private *dev_priv = dev->dev_private; 987 struct drm_i915_private *dev_priv = dev->dev_private;
985 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 988 enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
986 pipe); 989 enum pipe pipe = crtc->pipe;
987 990
988 if (INTEL_INFO(dev)->gen >= 4) { 991 if (INTEL_INFO(dev)->gen >= 4) {
989 int reg = PIPECONF(cpu_transcoder); 992 int reg = PIPECONF(cpu_transcoder);
@@ -1192,27 +1195,40 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1192static void assert_panel_unlocked(struct drm_i915_private *dev_priv, 1195static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1193 enum pipe pipe) 1196 enum pipe pipe)
1194{ 1197{
1195 int pp_reg, lvds_reg; 1198 struct drm_device *dev = dev_priv->dev;
1199 int pp_reg;
1196 u32 val; 1200 u32 val;
1197 enum pipe panel_pipe = PIPE_A; 1201 enum pipe panel_pipe = PIPE_A;
1198 bool locked = true; 1202 bool locked = true;
1199 1203
1200 if (HAS_PCH_SPLIT(dev_priv->dev)) { 1204 if (WARN_ON(HAS_DDI(dev)))
1205 return;
1206
1207 if (HAS_PCH_SPLIT(dev)) {
1208 u32 port_sel;
1209
1201 pp_reg = PCH_PP_CONTROL; 1210 pp_reg = PCH_PP_CONTROL;
1202 lvds_reg = PCH_LVDS; 1211 port_sel = I915_READ(PCH_PP_ON_DELAYS) & PANEL_PORT_SELECT_MASK;
1212
1213 if (port_sel == PANEL_PORT_SELECT_LVDS &&
1214 I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT)
1215 panel_pipe = PIPE_B;
1216 /* XXX: else fix for eDP */
1217 } else if (IS_VALLEYVIEW(dev)) {
1218 /* presumably write lock depends on pipe, not port select */
1219 pp_reg = VLV_PIPE_PP_CONTROL(pipe);
1220 panel_pipe = pipe;
1203 } else { 1221 } else {
1204 pp_reg = PP_CONTROL; 1222 pp_reg = PP_CONTROL;
1205 lvds_reg = LVDS; 1223 if (I915_READ(LVDS) & LVDS_PIPEB_SELECT)
1224 panel_pipe = PIPE_B;
1206 } 1225 }
1207 1226
1208 val = I915_READ(pp_reg); 1227 val = I915_READ(pp_reg);
1209 if (!(val & PANEL_POWER_ON) || 1228 if (!(val & PANEL_POWER_ON) ||
1210 ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS)) 1229 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1211 locked = false; 1230 locked = false;
1212 1231
1213 if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
1214 panel_pipe = PIPE_B;
1215
1216 WARN(panel_pipe == pipe && locked, 1232 WARN(panel_pipe == pipe && locked,
1217 "panel assertion failure, pipe %c regs locked\n", 1233 "panel assertion failure, pipe %c regs locked\n",
1218 pipe_name(pipe)); 1234 pipe_name(pipe));
@@ -1245,8 +1261,9 @@ void assert_pipe(struct drm_i915_private *dev_priv,
1245 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1261 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1246 pipe); 1262 pipe);
1247 1263
1248 /* if we need the pipe A quirk it must be always on */ 1264 /* if we need the pipe quirk it must be always on */
1249 if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) 1265 if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1266 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
1250 state = true; 1267 state = true;
1251 1268
1252 if (!intel_display_power_enabled(dev_priv, 1269 if (!intel_display_power_enabled(dev_priv,
@@ -1300,7 +1317,7 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1300 } 1317 }
1301 1318
1302 /* Need to check both planes against the pipe */ 1319 /* Need to check both planes against the pipe */
1303 for_each_pipe(i) { 1320 for_each_pipe(dev_priv, i) {
1304 reg = DSPCNTR(i); 1321 reg = DSPCNTR(i);
1305 val = I915_READ(reg); 1322 val = I915_READ(reg);
1306 cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> 1323 cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
@@ -1341,6 +1358,12 @@ static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1341 } 1358 }
1342} 1359}
1343 1360
1361static void assert_vblank_disabled(struct drm_crtc *crtc)
1362{
1363 if (WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1364 drm_crtc_vblank_put(crtc);
1365}
1366
1344static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv) 1367static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
1345{ 1368{
1346 u32 val; 1369 u32 val;
@@ -1513,34 +1536,6 @@ static void intel_init_dpio(struct drm_device *dev)
1513 } 1536 }
1514} 1537}
1515 1538
1516static void intel_reset_dpio(struct drm_device *dev)
1517{
1518 struct drm_i915_private *dev_priv = dev->dev_private;
1519
1520 if (IS_CHERRYVIEW(dev)) {
1521 enum dpio_phy phy;
1522 u32 val;
1523
1524 for (phy = DPIO_PHY0; phy < I915_NUM_PHYS_VLV; phy++) {
1525 /* Poll for phypwrgood signal */
1526 if (wait_for(I915_READ(DISPLAY_PHY_STATUS) &
1527 PHY_POWERGOOD(phy), 1))
1528 DRM_ERROR("Display PHY %d is not power up\n", phy);
1529
1530 /*
1531 * Deassert common lane reset for PHY.
1532 *
1533 * This should only be done on init and resume from S3
1534 * with both PLLs disabled, or we risk losing DPIO and
1535 * PLL synchronization.
1536 */
1537 val = I915_READ(DISPLAY_PHY_CONTROL);
1538 I915_WRITE(DISPLAY_PHY_CONTROL,
1539 PHY_COM_LANE_RESET_DEASSERT(phy, val));
1540 }
1541 }
1542}
1543
1544static void vlv_enable_pll(struct intel_crtc *crtc) 1539static void vlv_enable_pll(struct intel_crtc *crtc)
1545{ 1540{
1546 struct drm_device *dev = crtc->base.dev; 1541 struct drm_device *dev = crtc->base.dev;
@@ -1554,7 +1549,7 @@ static void vlv_enable_pll(struct intel_crtc *crtc)
1554 BUG_ON(!IS_VALLEYVIEW(dev_priv->dev)); 1549 BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
1555 1550
1556 /* PLL is protected by panel, make sure we can write it */ 1551 /* PLL is protected by panel, make sure we can write it */
1557 if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev)) 1552 if (IS_MOBILE(dev_priv->dev))
1558 assert_panel_unlocked(dev_priv, crtc->pipe); 1553 assert_panel_unlocked(dev_priv, crtc->pipe);
1559 1554
1560 I915_WRITE(reg, dpll); 1555 I915_WRITE(reg, dpll);
@@ -1617,6 +1612,18 @@ static void chv_enable_pll(struct intel_crtc *crtc)
1617 mutex_unlock(&dev_priv->dpio_lock); 1612 mutex_unlock(&dev_priv->dpio_lock);
1618} 1613}
1619 1614
1615static int intel_num_dvo_pipes(struct drm_device *dev)
1616{
1617 struct intel_crtc *crtc;
1618 int count = 0;
1619
1620 for_each_intel_crtc(dev, crtc)
1621 count += crtc->active &&
1622 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO);
1623
1624 return count;
1625}
1626
1620static void i9xx_enable_pll(struct intel_crtc *crtc) 1627static void i9xx_enable_pll(struct intel_crtc *crtc)
1621{ 1628{
1622 struct drm_device *dev = crtc->base.dev; 1629 struct drm_device *dev = crtc->base.dev;
@@ -1633,7 +1640,18 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
1633 if (IS_MOBILE(dev) && !IS_I830(dev)) 1640 if (IS_MOBILE(dev) && !IS_I830(dev))
1634 assert_panel_unlocked(dev_priv, crtc->pipe); 1641 assert_panel_unlocked(dev_priv, crtc->pipe);
1635 1642
1636 I915_WRITE(reg, dpll); 1643 /* Enable DVO 2x clock on both PLLs if necessary */
1644 if (IS_I830(dev) && intel_num_dvo_pipes(dev) > 0) {
1645 /*
1646 * It appears to be important that we don't enable this
1647 * for the current pipe before otherwise configuring the
1648 * PLL. No idea how this should be handled if multiple
1649 * DVO outputs are enabled simultaneosly.
1650 */
1651 dpll |= DPLL_DVO_2X_MODE;
1652 I915_WRITE(DPLL(!crtc->pipe),
1653 I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
1654 }
1637 1655
1638 /* Wait for the clocks to stabilize. */ 1656 /* Wait for the clocks to stabilize. */
1639 POSTING_READ(reg); 1657 POSTING_READ(reg);
@@ -1672,10 +1690,25 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
1672 * 1690 *
1673 * Note! This is for pre-ILK only. 1691 * Note! This is for pre-ILK only.
1674 */ 1692 */
1675static void i9xx_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) 1693static void i9xx_disable_pll(struct intel_crtc *crtc)
1676{ 1694{
1677 /* Don't disable pipe A or pipe A PLLs if needed */ 1695 struct drm_device *dev = crtc->base.dev;
1678 if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) 1696 struct drm_i915_private *dev_priv = dev->dev_private;
1697 enum pipe pipe = crtc->pipe;
1698
1699 /* Disable DVO 2x clock on both PLLs if necessary */
1700 if (IS_I830(dev) &&
1701 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO) &&
1702 intel_num_dvo_pipes(dev) == 1) {
1703 I915_WRITE(DPLL(PIPE_B),
1704 I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
1705 I915_WRITE(DPLL(PIPE_A),
1706 I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
1707 }
1708
1709 /* Don't disable pipe or pipe PLLs if needed */
1710 if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1711 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
1679 return; 1712 return;
1680 1713
1681 /* Make sure the pipe isn't still relying on us */ 1714 /* Make sure the pipe isn't still relying on us */
@@ -1712,7 +1745,7 @@ static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1712 assert_pipe_disabled(dev_priv, pipe); 1745 assert_pipe_disabled(dev_priv, pipe);
1713 1746
1714 /* Set PLL en = 0 */ 1747 /* Set PLL en = 0 */
1715 val = DPLL_SSC_REF_CLOCK_CHV; 1748 val = DPLL_SSC_REF_CLOCK_CHV | DPLL_REFA_CLK_ENABLE_VLV;
1716 if (pipe != PIPE_A) 1749 if (pipe != PIPE_A)
1717 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1750 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1718 I915_WRITE(DPLL(pipe), val); 1751 I915_WRITE(DPLL(pipe), val);
@@ -1806,7 +1839,7 @@ static void intel_enable_shared_dpll(struct intel_crtc *crtc)
1806 if (WARN_ON(pll->refcount == 0)) 1839 if (WARN_ON(pll->refcount == 0))
1807 return; 1840 return;
1808 1841
1809 DRM_DEBUG_KMS("enable %s (active %d, on? %d)for crtc %d\n", 1842 DRM_DEBUG_KMS("enable %s (active %d, on? %d) for crtc %d\n",
1810 pll->name, pll->active, pll->on, 1843 pll->name, pll->active, pll->on,
1811 crtc->base.base.id); 1844 crtc->base.base.id);
1812 1845
@@ -1824,7 +1857,7 @@ static void intel_enable_shared_dpll(struct intel_crtc *crtc)
1824 pll->on = true; 1857 pll->on = true;
1825} 1858}
1826 1859
1827void intel_disable_shared_dpll(struct intel_crtc *crtc) 1860static void intel_disable_shared_dpll(struct intel_crtc *crtc)
1828{ 1861{
1829 struct drm_device *dev = crtc->base.dev; 1862 struct drm_device *dev = crtc->base.dev;
1830 struct drm_i915_private *dev_priv = dev->dev_private; 1863 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1868,7 +1901,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1868 uint32_t reg, val, pipeconf_val; 1901 uint32_t reg, val, pipeconf_val;
1869 1902
1870 /* PCH only available on ILK+ */ 1903 /* PCH only available on ILK+ */
1871 BUG_ON(INTEL_INFO(dev)->gen < 5); 1904 BUG_ON(!HAS_PCH_SPLIT(dev));
1872 1905
1873 /* Make sure PCH DPLL is enabled */ 1906 /* Make sure PCH DPLL is enabled */
1874 assert_shared_dpll_enabled(dev_priv, 1907 assert_shared_dpll_enabled(dev_priv,
@@ -1921,7 +1954,7 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1921 u32 val, pipeconf_val; 1954 u32 val, pipeconf_val;
1922 1955
1923 /* PCH only available on ILK+ */ 1956 /* PCH only available on ILK+ */
1924 BUG_ON(INTEL_INFO(dev_priv->dev)->gen < 5); 1957 BUG_ON(!HAS_PCH_SPLIT(dev_priv->dev));
1925 1958
1926 /* FDI must be feeding us bits for PCH ports */ 1959 /* FDI must be feeding us bits for PCH ports */
1927 assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder); 1960 assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
@@ -2043,8 +2076,8 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
2043 reg = PIPECONF(cpu_transcoder); 2076 reg = PIPECONF(cpu_transcoder);
2044 val = I915_READ(reg); 2077 val = I915_READ(reg);
2045 if (val & PIPECONF_ENABLE) { 2078 if (val & PIPECONF_ENABLE) {
2046 WARN_ON(!(pipe == PIPE_A && 2079 WARN_ON(!((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
2047 dev_priv->quirks & QUIRK_PIPEA_FORCE)); 2080 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)));
2048 return; 2081 return;
2049 } 2082 }
2050 2083
@@ -2054,21 +2087,19 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
2054 2087
2055/** 2088/**
2056 * intel_disable_pipe - disable a pipe, asserting requirements 2089 * intel_disable_pipe - disable a pipe, asserting requirements
2057 * @dev_priv: i915 private structure 2090 * @crtc: crtc whose pipes is to be disabled
2058 * @pipe: pipe to disable
2059 * 2091 *
2060 * Disable @pipe, making sure that various hardware specific requirements 2092 * Disable the pipe of @crtc, making sure that various hardware
2061 * are met, if applicable, e.g. plane disabled, panel fitter off, etc. 2093 * specific requirements are met, if applicable, e.g. plane
2062 * 2094 * disabled, panel fitter off, etc.
2063 * @pipe should be %PIPE_A or %PIPE_B.
2064 * 2095 *
2065 * Will wait until the pipe has shut down before returning. 2096 * Will wait until the pipe has shut down before returning.
2066 */ 2097 */
2067static void intel_disable_pipe(struct drm_i915_private *dev_priv, 2098static void intel_disable_pipe(struct intel_crtc *crtc)
2068 enum pipe pipe)
2069{ 2099{
2070 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 2100 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
2071 pipe); 2101 enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
2102 enum pipe pipe = crtc->pipe;
2072 int reg; 2103 int reg;
2073 u32 val; 2104 u32 val;
2074 2105
@@ -2080,17 +2111,26 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv,
2080 assert_cursor_disabled(dev_priv, pipe); 2111 assert_cursor_disabled(dev_priv, pipe);
2081 assert_sprites_disabled(dev_priv, pipe); 2112 assert_sprites_disabled(dev_priv, pipe);
2082 2113
2083 /* Don't disable pipe A or pipe A PLLs if needed */
2084 if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
2085 return;
2086
2087 reg = PIPECONF(cpu_transcoder); 2114 reg = PIPECONF(cpu_transcoder);
2088 val = I915_READ(reg); 2115 val = I915_READ(reg);
2089 if ((val & PIPECONF_ENABLE) == 0) 2116 if ((val & PIPECONF_ENABLE) == 0)
2090 return; 2117 return;
2091 2118
2092 I915_WRITE(reg, val & ~PIPECONF_ENABLE); 2119 /*
2093 intel_wait_for_pipe_off(dev_priv->dev, pipe); 2120 * Double wide has implications for planes
2121 * so best keep it disabled when not needed.
2122 */
2123 if (crtc->config.double_wide)
2124 val &= ~PIPECONF_DOUBLE_WIDE;
2125
2126 /* Don't disable pipe or pipe PLLs if needed */
2127 if (!(pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) &&
2128 !(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
2129 val &= ~PIPECONF_ENABLE;
2130
2131 I915_WRITE(reg, val);
2132 if ((val & PIPECONF_ENABLE) == 0)
2133 intel_wait_for_pipe_off(crtc);
2094} 2134}
2095 2135
2096/* 2136/*
@@ -2109,35 +2149,28 @@ void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
2109 2149
2110/** 2150/**
2111 * intel_enable_primary_hw_plane - enable the primary plane on a given pipe 2151 * intel_enable_primary_hw_plane - enable the primary plane on a given pipe
2112 * @dev_priv: i915 private structure 2152 * @plane: plane to be enabled
2113 * @plane: plane to enable 2153 * @crtc: crtc for the plane
2114 * @pipe: pipe being fed
2115 * 2154 *
2116 * Enable @plane on @pipe, making sure that @pipe is running first. 2155 * Enable @plane on @crtc, making sure that the pipe is running first.
2117 */ 2156 */
2118static void intel_enable_primary_hw_plane(struct drm_i915_private *dev_priv, 2157static void intel_enable_primary_hw_plane(struct drm_plane *plane,
2119 enum plane plane, enum pipe pipe) 2158 struct drm_crtc *crtc)
2120{ 2159{
2121 struct drm_device *dev = dev_priv->dev; 2160 struct drm_device *dev = plane->dev;
2122 struct intel_crtc *intel_crtc = 2161 struct drm_i915_private *dev_priv = dev->dev_private;
2123 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 2162 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2124 int reg;
2125 u32 val;
2126 2163
2127 /* If the pipe isn't enabled, we can't pump pixels and may hang */ 2164 /* If the pipe isn't enabled, we can't pump pixels and may hang */
2128 assert_pipe_enabled(dev_priv, pipe); 2165 assert_pipe_enabled(dev_priv, intel_crtc->pipe);
2129 2166
2130 if (intel_crtc->primary_enabled) 2167 if (intel_crtc->primary_enabled)
2131 return; 2168 return;
2132 2169
2133 intel_crtc->primary_enabled = true; 2170 intel_crtc->primary_enabled = true;
2134 2171
2135 reg = DSPCNTR(plane); 2172 dev_priv->display.update_primary_plane(crtc, plane->fb,
2136 val = I915_READ(reg); 2173 crtc->x, crtc->y);
2137 WARN_ON(val & DISPLAY_PLANE_ENABLE);
2138
2139 I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
2140 intel_flush_primary_plane(dev_priv, plane);
2141 2174
2142 /* 2175 /*
2143 * BDW signals flip done immediately if the plane 2176 * BDW signals flip done immediately if the plane
@@ -2150,31 +2183,27 @@ static void intel_enable_primary_hw_plane(struct drm_i915_private *dev_priv,
2150 2183
2151/** 2184/**
2152 * intel_disable_primary_hw_plane - disable the primary hardware plane 2185 * intel_disable_primary_hw_plane - disable the primary hardware plane
2153 * @dev_priv: i915 private structure 2186 * @plane: plane to be disabled
2154 * @plane: plane to disable 2187 * @crtc: crtc for the plane
2155 * @pipe: pipe consuming the data
2156 * 2188 *
2157 * Disable @plane; should be an independent operation. 2189 * Disable @plane on @crtc, making sure that the pipe is running first.
2158 */ 2190 */
2159static void intel_disable_primary_hw_plane(struct drm_i915_private *dev_priv, 2191static void intel_disable_primary_hw_plane(struct drm_plane *plane,
2160 enum plane plane, enum pipe pipe) 2192 struct drm_crtc *crtc)
2161{ 2193{
2162 struct intel_crtc *intel_crtc = 2194 struct drm_device *dev = plane->dev;
2163 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 2195 struct drm_i915_private *dev_priv = dev->dev_private;
2164 int reg; 2196 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2165 u32 val; 2197
2198 assert_pipe_enabled(dev_priv, intel_crtc->pipe);
2166 2199
2167 if (!intel_crtc->primary_enabled) 2200 if (!intel_crtc->primary_enabled)
2168 return; 2201 return;
2169 2202
2170 intel_crtc->primary_enabled = false; 2203 intel_crtc->primary_enabled = false;
2171 2204
2172 reg = DSPCNTR(plane); 2205 dev_priv->display.update_primary_plane(crtc, plane->fb,
2173 val = I915_READ(reg); 2206 crtc->x, crtc->y);
2174 WARN_ON((val & DISPLAY_PLANE_ENABLE) == 0);
2175
2176 I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
2177 intel_flush_primary_plane(dev_priv, plane);
2178} 2207}
2179 2208
2180static bool need_vtd_wa(struct drm_device *dev) 2209static bool need_vtd_wa(struct drm_device *dev)
@@ -2422,16 +2451,46 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc,
2422 struct drm_device *dev = crtc->dev; 2451 struct drm_device *dev = crtc->dev;
2423 struct drm_i915_private *dev_priv = dev->dev_private; 2452 struct drm_i915_private *dev_priv = dev->dev_private;
2424 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2453 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2425 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2454 struct drm_i915_gem_object *obj;
2426 int plane = intel_crtc->plane; 2455 int plane = intel_crtc->plane;
2427 unsigned long linear_offset; 2456 unsigned long linear_offset;
2428 u32 dspcntr; 2457 u32 dspcntr;
2429 u32 reg; 2458 u32 reg = DSPCNTR(plane);
2459 int pixel_size;
2460
2461 if (!intel_crtc->primary_enabled) {
2462 I915_WRITE(reg, 0);
2463 if (INTEL_INFO(dev)->gen >= 4)
2464 I915_WRITE(DSPSURF(plane), 0);
2465 else
2466 I915_WRITE(DSPADDR(plane), 0);
2467 POSTING_READ(reg);
2468 return;
2469 }
2470
2471 obj = intel_fb_obj(fb);
2472 if (WARN_ON(obj == NULL))
2473 return;
2474
2475 pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
2476
2477 dspcntr = DISPPLANE_GAMMA_ENABLE;
2478
2479 dspcntr |= DISPLAY_PLANE_ENABLE;
2480
2481 if (INTEL_INFO(dev)->gen < 4) {
2482 if (intel_crtc->pipe == PIPE_B)
2483 dspcntr |= DISPPLANE_SEL_PIPE_B;
2484
2485 /* pipesrc and dspsize control the size that is scaled from,
2486 * which should always be the user's requested size.
2487 */
2488 I915_WRITE(DSPSIZE(plane),
2489 ((intel_crtc->config.pipe_src_h - 1) << 16) |
2490 (intel_crtc->config.pipe_src_w - 1));
2491 I915_WRITE(DSPPOS(plane), 0);
2492 }
2430 2493
2431 reg = DSPCNTR(plane);
2432 dspcntr = I915_READ(reg);
2433 /* Mask out pixel format bits in case we change it */
2434 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2435 switch (fb->pixel_format) { 2494 switch (fb->pixel_format) {
2436 case DRM_FORMAT_C8: 2495 case DRM_FORMAT_C8:
2437 dspcntr |= DISPPLANE_8BPP; 2496 dspcntr |= DISPPLANE_8BPP;
@@ -2463,30 +2522,40 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc,
2463 BUG(); 2522 BUG();
2464 } 2523 }
2465 2524
2466 if (INTEL_INFO(dev)->gen >= 4) { 2525 if (INTEL_INFO(dev)->gen >= 4 &&
2467 if (obj->tiling_mode != I915_TILING_NONE) 2526 obj->tiling_mode != I915_TILING_NONE)
2468 dspcntr |= DISPPLANE_TILED; 2527 dspcntr |= DISPPLANE_TILED;
2469 else
2470 dspcntr &= ~DISPPLANE_TILED;
2471 }
2472 2528
2473 if (IS_G4X(dev)) 2529 if (IS_G4X(dev))
2474 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 2530 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2475 2531
2476 I915_WRITE(reg, dspcntr); 2532 linear_offset = y * fb->pitches[0] + x * pixel_size;
2477
2478 linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2479 2533
2480 if (INTEL_INFO(dev)->gen >= 4) { 2534 if (INTEL_INFO(dev)->gen >= 4) {
2481 intel_crtc->dspaddr_offset = 2535 intel_crtc->dspaddr_offset =
2482 intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode, 2536 intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
2483 fb->bits_per_pixel / 8, 2537 pixel_size,
2484 fb->pitches[0]); 2538 fb->pitches[0]);
2485 linear_offset -= intel_crtc->dspaddr_offset; 2539 linear_offset -= intel_crtc->dspaddr_offset;
2486 } else { 2540 } else {
2487 intel_crtc->dspaddr_offset = linear_offset; 2541 intel_crtc->dspaddr_offset = linear_offset;
2488 } 2542 }
2489 2543
2544 if (to_intel_plane(crtc->primary)->rotation == BIT(DRM_ROTATE_180)) {
2545 dspcntr |= DISPPLANE_ROTATE_180;
2546
2547 x += (intel_crtc->config.pipe_src_w - 1);
2548 y += (intel_crtc->config.pipe_src_h - 1);
2549
2550 /* Finding the last pixel of the last line of the display
2551 data and adding to linear_offset*/
2552 linear_offset +=
2553 (intel_crtc->config.pipe_src_h - 1) * fb->pitches[0] +
2554 (intel_crtc->config.pipe_src_w - 1) * pixel_size;
2555 }
2556
2557 I915_WRITE(reg, dspcntr);
2558
2490 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", 2559 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2491 i915_gem_obj_ggtt_offset(obj), linear_offset, x, y, 2560 i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
2492 fb->pitches[0]); 2561 fb->pitches[0]);
@@ -2508,16 +2577,33 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc,
2508 struct drm_device *dev = crtc->dev; 2577 struct drm_device *dev = crtc->dev;
2509 struct drm_i915_private *dev_priv = dev->dev_private; 2578 struct drm_i915_private *dev_priv = dev->dev_private;
2510 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2579 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2511 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2580 struct drm_i915_gem_object *obj;
2512 int plane = intel_crtc->plane; 2581 int plane = intel_crtc->plane;
2513 unsigned long linear_offset; 2582 unsigned long linear_offset;
2514 u32 dspcntr; 2583 u32 dspcntr;
2515 u32 reg; 2584 u32 reg = DSPCNTR(plane);
2585 int pixel_size;
2586
2587 if (!intel_crtc->primary_enabled) {
2588 I915_WRITE(reg, 0);
2589 I915_WRITE(DSPSURF(plane), 0);
2590 POSTING_READ(reg);
2591 return;
2592 }
2593
2594 obj = intel_fb_obj(fb);
2595 if (WARN_ON(obj == NULL))
2596 return;
2597
2598 pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
2599
2600 dspcntr = DISPPLANE_GAMMA_ENABLE;
2601
2602 dspcntr |= DISPLAY_PLANE_ENABLE;
2603
2604 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2605 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
2516 2606
2517 reg = DSPCNTR(plane);
2518 dspcntr = I915_READ(reg);
2519 /* Mask out pixel format bits in case we change it */
2520 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2521 switch (fb->pixel_format) { 2607 switch (fb->pixel_format) {
2522 case DRM_FORMAT_C8: 2608 case DRM_FORMAT_C8:
2523 dspcntr |= DISPPLANE_8BPP; 2609 dspcntr |= DISPPLANE_8BPP;
@@ -2547,22 +2633,32 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc,
2547 2633
2548 if (obj->tiling_mode != I915_TILING_NONE) 2634 if (obj->tiling_mode != I915_TILING_NONE)
2549 dspcntr |= DISPPLANE_TILED; 2635 dspcntr |= DISPPLANE_TILED;
2550 else
2551 dspcntr &= ~DISPPLANE_TILED;
2552 2636
2553 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 2637 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
2554 dspcntr &= ~DISPPLANE_TRICKLE_FEED_DISABLE;
2555 else
2556 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 2638 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2557 2639
2558 I915_WRITE(reg, dspcntr); 2640 linear_offset = y * fb->pitches[0] + x * pixel_size;
2559
2560 linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2561 intel_crtc->dspaddr_offset = 2641 intel_crtc->dspaddr_offset =
2562 intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode, 2642 intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
2563 fb->bits_per_pixel / 8, 2643 pixel_size,
2564 fb->pitches[0]); 2644 fb->pitches[0]);
2565 linear_offset -= intel_crtc->dspaddr_offset; 2645 linear_offset -= intel_crtc->dspaddr_offset;
2646 if (to_intel_plane(crtc->primary)->rotation == BIT(DRM_ROTATE_180)) {
2647 dspcntr |= DISPPLANE_ROTATE_180;
2648
2649 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
2650 x += (intel_crtc->config.pipe_src_w - 1);
2651 y += (intel_crtc->config.pipe_src_h - 1);
2652
2653 /* Finding the last pixel of the last line of the display
2654 data and adding to linear_offset*/
2655 linear_offset +=
2656 (intel_crtc->config.pipe_src_h - 1) * fb->pitches[0] +
2657 (intel_crtc->config.pipe_src_w - 1) * pixel_size;
2658 }
2659 }
2660
2661 I915_WRITE(reg, dspcntr);
2566 2662
2567 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", 2663 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2568 i915_gem_obj_ggtt_offset(obj), linear_offset, x, y, 2664 i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
@@ -3346,23 +3442,54 @@ bool intel_has_pending_fb_unpin(struct drm_device *dev)
3346 return false; 3442 return false;
3347} 3443}
3348 3444
3445static void page_flip_completed(struct intel_crtc *intel_crtc)
3446{
3447 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
3448 struct intel_unpin_work *work = intel_crtc->unpin_work;
3449
3450 /* ensure that the unpin work is consistent wrt ->pending. */
3451 smp_rmb();
3452 intel_crtc->unpin_work = NULL;
3453
3454 if (work->event)
3455 drm_send_vblank_event(intel_crtc->base.dev,
3456 intel_crtc->pipe,
3457 work->event);
3458
3459 drm_crtc_vblank_put(&intel_crtc->base);
3460
3461 wake_up_all(&dev_priv->pending_flip_queue);
3462 queue_work(dev_priv->wq, &work->work);
3463
3464 trace_i915_flip_complete(intel_crtc->plane,
3465 work->pending_flip_obj);
3466}
3467
3349void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) 3468void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
3350{ 3469{
3351 struct drm_device *dev = crtc->dev; 3470 struct drm_device *dev = crtc->dev;
3352 struct drm_i915_private *dev_priv = dev->dev_private; 3471 struct drm_i915_private *dev_priv = dev->dev_private;
3353 3472
3354 if (crtc->primary->fb == NULL)
3355 return;
3356
3357 WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue)); 3473 WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
3474 if (WARN_ON(wait_event_timeout(dev_priv->pending_flip_queue,
3475 !intel_crtc_has_pending_flip(crtc),
3476 60*HZ) == 0)) {
3477 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3478 unsigned long flags;
3358 3479
3359 WARN_ON(wait_event_timeout(dev_priv->pending_flip_queue, 3480 spin_lock_irqsave(&dev->event_lock, flags);
3360 !intel_crtc_has_pending_flip(crtc), 3481 if (intel_crtc->unpin_work) {
3361 60*HZ) == 0); 3482 WARN_ONCE(1, "Removing stuck page flip\n");
3483 page_flip_completed(intel_crtc);
3484 }
3485 spin_unlock_irqrestore(&dev->event_lock, flags);
3486 }
3362 3487
3363 mutex_lock(&dev->struct_mutex); 3488 if (crtc->primary->fb) {
3364 intel_finish_fb(crtc->primary->fb); 3489 mutex_lock(&dev->struct_mutex);
3365 mutex_unlock(&dev->struct_mutex); 3490 intel_finish_fb(crtc->primary->fb);
3491 mutex_unlock(&dev->struct_mutex);
3492 }
3366} 3493}
3367 3494
3368/* Program iCLKIP clock to the desired frequency */ 3495/* Program iCLKIP clock to the desired frequency */
@@ -3911,14 +4038,14 @@ static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
3911static void intel_crtc_enable_planes(struct drm_crtc *crtc) 4038static void intel_crtc_enable_planes(struct drm_crtc *crtc)
3912{ 4039{
3913 struct drm_device *dev = crtc->dev; 4040 struct drm_device *dev = crtc->dev;
3914 struct drm_i915_private *dev_priv = dev->dev_private;
3915 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4041 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3916 int pipe = intel_crtc->pipe; 4042 int pipe = intel_crtc->pipe;
3917 int plane = intel_crtc->plane; 4043
4044 assert_vblank_disabled(crtc);
3918 4045
3919 drm_vblank_on(dev, pipe); 4046 drm_vblank_on(dev, pipe);
3920 4047
3921 intel_enable_primary_hw_plane(dev_priv, plane, pipe); 4048 intel_enable_primary_hw_plane(crtc->primary, crtc);
3922 intel_enable_planes(crtc); 4049 intel_enable_planes(crtc);
3923 intel_crtc_update_cursor(crtc, true); 4050 intel_crtc_update_cursor(crtc, true);
3924 intel_crtc_dpms_overlay(intel_crtc, true); 4051 intel_crtc_dpms_overlay(intel_crtc, true);
@@ -3955,7 +4082,7 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc)
3955 intel_crtc_dpms_overlay(intel_crtc, false); 4082 intel_crtc_dpms_overlay(intel_crtc, false);
3956 intel_crtc_update_cursor(crtc, false); 4083 intel_crtc_update_cursor(crtc, false);
3957 intel_disable_planes(crtc); 4084 intel_disable_planes(crtc);
3958 intel_disable_primary_hw_plane(dev_priv, plane, pipe); 4085 intel_disable_primary_hw_plane(crtc->primary, crtc);
3959 4086
3960 /* 4087 /*
3961 * FIXME: Once we grow proper nuclear flip support out of this we need 4088 * FIXME: Once we grow proper nuclear flip support out of this we need
@@ -3965,6 +4092,8 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc)
3965 intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe)); 4092 intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
3966 4093
3967 drm_vblank_off(dev, pipe); 4094 drm_vblank_off(dev, pipe);
4095
4096 assert_vblank_disabled(crtc);
3968} 4097}
3969 4098
3970static void ironlake_crtc_enable(struct drm_crtc *crtc) 4099static void ironlake_crtc_enable(struct drm_crtc *crtc)
@@ -3974,7 +4103,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
3974 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4103 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3975 struct intel_encoder *encoder; 4104 struct intel_encoder *encoder;
3976 int pipe = intel_crtc->pipe; 4105 int pipe = intel_crtc->pipe;
3977 enum plane plane = intel_crtc->plane;
3978 4106
3979 WARN_ON(!crtc->enabled); 4107 WARN_ON(!crtc->enabled);
3980 4108
@@ -3991,18 +4119,11 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
3991 4119
3992 if (intel_crtc->config.has_pch_encoder) { 4120 if (intel_crtc->config.has_pch_encoder) {
3993 intel_cpu_transcoder_set_m_n(intel_crtc, 4121 intel_cpu_transcoder_set_m_n(intel_crtc,
3994 &intel_crtc->config.fdi_m_n); 4122 &intel_crtc->config.fdi_m_n, NULL);
3995 } 4123 }
3996 4124
3997 ironlake_set_pipeconf(crtc); 4125 ironlake_set_pipeconf(crtc);
3998 4126
3999 /* Set up the display plane register */
4000 I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
4001 POSTING_READ(DSPCNTR(plane));
4002
4003 dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
4004 crtc->x, crtc->y);
4005
4006 intel_crtc->active = true; 4127 intel_crtc->active = true;
4007 4128
4008 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); 4129 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
@@ -4087,7 +4208,6 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
4087 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4208 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4088 struct intel_encoder *encoder; 4209 struct intel_encoder *encoder;
4089 int pipe = intel_crtc->pipe; 4210 int pipe = intel_crtc->pipe;
4090 enum plane plane = intel_crtc->plane;
4091 4211
4092 WARN_ON(!crtc->enabled); 4212 WARN_ON(!crtc->enabled);
4093 4213
@@ -4102,22 +4222,20 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
4102 4222
4103 intel_set_pipe_timings(intel_crtc); 4223 intel_set_pipe_timings(intel_crtc);
4104 4224
4225 if (intel_crtc->config.cpu_transcoder != TRANSCODER_EDP) {
4226 I915_WRITE(PIPE_MULT(intel_crtc->config.cpu_transcoder),
4227 intel_crtc->config.pixel_multiplier - 1);
4228 }
4229
4105 if (intel_crtc->config.has_pch_encoder) { 4230 if (intel_crtc->config.has_pch_encoder) {
4106 intel_cpu_transcoder_set_m_n(intel_crtc, 4231 intel_cpu_transcoder_set_m_n(intel_crtc,
4107 &intel_crtc->config.fdi_m_n); 4232 &intel_crtc->config.fdi_m_n, NULL);
4108 } 4233 }
4109 4234
4110 haswell_set_pipeconf(crtc); 4235 haswell_set_pipeconf(crtc);
4111 4236
4112 intel_set_pipe_csc(crtc); 4237 intel_set_pipe_csc(crtc);
4113 4238
4114 /* Set up the display plane register */
4115 I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE | DISPPLANE_PIPE_CSC_ENABLE);
4116 POSTING_READ(DSPCNTR(plane));
4117
4118 dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
4119 crtc->x, crtc->y);
4120
4121 intel_crtc->active = true; 4239 intel_crtc->active = true;
4122 4240
4123 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); 4241 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
@@ -4198,7 +4316,8 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
4198 if (intel_crtc->config.has_pch_encoder) 4316 if (intel_crtc->config.has_pch_encoder)
4199 intel_set_pch_fifo_underrun_reporting(dev, pipe, false); 4317 intel_set_pch_fifo_underrun_reporting(dev, pipe, false);
4200 4318
4201 intel_disable_pipe(dev_priv, pipe); 4319 intel_disable_pipe(intel_crtc);
4320
4202 ironlake_pfit_disable(intel_crtc); 4321 ironlake_pfit_disable(intel_crtc);
4203 4322
4204 for_each_encoder_on_crtc(dev, crtc, encoder) 4323 for_each_encoder_on_crtc(dev, crtc, encoder)
@@ -4246,7 +4365,6 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
4246 struct drm_i915_private *dev_priv = dev->dev_private; 4365 struct drm_i915_private *dev_priv = dev->dev_private;
4247 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4366 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4248 struct intel_encoder *encoder; 4367 struct intel_encoder *encoder;
4249 int pipe = intel_crtc->pipe;
4250 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; 4368 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
4251 4369
4252 if (!intel_crtc->active) 4370 if (!intel_crtc->active)
@@ -4261,7 +4379,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
4261 4379
4262 if (intel_crtc->config.has_pch_encoder) 4380 if (intel_crtc->config.has_pch_encoder)
4263 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false); 4381 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false);
4264 intel_disable_pipe(dev_priv, pipe); 4382 intel_disable_pipe(intel_crtc);
4265 4383
4266 if (intel_crtc->config.dp_encoder_is_mst) 4384 if (intel_crtc->config.dp_encoder_is_mst)
4267 intel_ddi_set_vc_payload_alloc(crtc, false); 4385 intel_ddi_set_vc_payload_alloc(crtc, false);
@@ -4539,12 +4657,57 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
4539 vlv_update_cdclk(dev); 4657 vlv_update_cdclk(dev);
4540} 4658}
4541 4659
4660static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
4661{
4662 struct drm_i915_private *dev_priv = dev->dev_private;
4663 u32 val, cmd;
4664
4665 WARN_ON(dev_priv->display.get_display_clock_speed(dev) != dev_priv->vlv_cdclk_freq);
4666
4667 switch (cdclk) {
4668 case 400000:
4669 cmd = 3;
4670 break;
4671 case 333333:
4672 case 320000:
4673 cmd = 2;
4674 break;
4675 case 266667:
4676 cmd = 1;
4677 break;
4678 case 200000:
4679 cmd = 0;
4680 break;
4681 default:
4682 WARN_ON(1);
4683 return;
4684 }
4685
4686 mutex_lock(&dev_priv->rps.hw_lock);
4687 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
4688 val &= ~DSPFREQGUAR_MASK_CHV;
4689 val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
4690 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
4691 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
4692 DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
4693 50)) {
4694 DRM_ERROR("timed out waiting for CDclk change\n");
4695 }
4696 mutex_unlock(&dev_priv->rps.hw_lock);
4697
4698 vlv_update_cdclk(dev);
4699}
4700
4542static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv, 4701static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
4543 int max_pixclk) 4702 int max_pixclk)
4544{ 4703{
4545 int vco = valleyview_get_vco(dev_priv); 4704 int vco = valleyview_get_vco(dev_priv);
4546 int freq_320 = (vco << 1) % 320000 != 0 ? 333333 : 320000; 4705 int freq_320 = (vco << 1) % 320000 != 0 ? 333333 : 320000;
4547 4706
4707 /* FIXME: Punit isn't quite ready yet */
4708 if (IS_CHERRYVIEW(dev_priv->dev))
4709 return 400000;
4710
4548 /* 4711 /*
4549 * Really only a few cases to deal with, as only 4 CDclks are supported: 4712 * Really only a few cases to deal with, as only 4 CDclks are supported:
4550 * 200MHz 4713 * 200MHz
@@ -4607,21 +4770,23 @@ static void valleyview_modeset_global_resources(struct drm_device *dev)
4607 int max_pixclk = intel_mode_max_pixclk(dev_priv); 4770 int max_pixclk = intel_mode_max_pixclk(dev_priv);
4608 int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk); 4771 int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
4609 4772
4610 if (req_cdclk != dev_priv->vlv_cdclk_freq) 4773 if (req_cdclk != dev_priv->vlv_cdclk_freq) {
4611 valleyview_set_cdclk(dev, req_cdclk); 4774 if (IS_CHERRYVIEW(dev))
4775 cherryview_set_cdclk(dev, req_cdclk);
4776 else
4777 valleyview_set_cdclk(dev, req_cdclk);
4778 }
4779
4612 modeset_update_crtc_power_domains(dev); 4780 modeset_update_crtc_power_domains(dev);
4613} 4781}
4614 4782
4615static void valleyview_crtc_enable(struct drm_crtc *crtc) 4783static void valleyview_crtc_enable(struct drm_crtc *crtc)
4616{ 4784{
4617 struct drm_device *dev = crtc->dev; 4785 struct drm_device *dev = crtc->dev;
4618 struct drm_i915_private *dev_priv = dev->dev_private;
4619 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4786 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4620 struct intel_encoder *encoder; 4787 struct intel_encoder *encoder;
4621 int pipe = intel_crtc->pipe; 4788 int pipe = intel_crtc->pipe;
4622 int plane = intel_crtc->plane;
4623 bool is_dsi; 4789 bool is_dsi;
4624 u32 dspcntr;
4625 4790
4626 WARN_ON(!crtc->enabled); 4791 WARN_ON(!crtc->enabled);
4627 4792
@@ -4630,33 +4795,20 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
4630 4795
4631 is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI); 4796 is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI);
4632 4797
4633 if (!is_dsi && !IS_CHERRYVIEW(dev)) 4798 if (!is_dsi) {
4634 vlv_prepare_pll(intel_crtc); 4799 if (IS_CHERRYVIEW(dev))
4635 4800 chv_prepare_pll(intel_crtc);
4636 /* Set up the display plane register */ 4801 else
4637 dspcntr = DISPPLANE_GAMMA_ENABLE; 4802 vlv_prepare_pll(intel_crtc);
4803 }
4638 4804
4639 if (intel_crtc->config.has_dp_encoder) 4805 if (intel_crtc->config.has_dp_encoder)
4640 intel_dp_set_m_n(intel_crtc); 4806 intel_dp_set_m_n(intel_crtc);
4641 4807
4642 intel_set_pipe_timings(intel_crtc); 4808 intel_set_pipe_timings(intel_crtc);
4643 4809
4644 /* pipesrc and dspsize control the size that is scaled from,
4645 * which should always be the user's requested size.
4646 */
4647 I915_WRITE(DSPSIZE(plane),
4648 ((intel_crtc->config.pipe_src_h - 1) << 16) |
4649 (intel_crtc->config.pipe_src_w - 1));
4650 I915_WRITE(DSPPOS(plane), 0);
4651
4652 i9xx_set_pipeconf(intel_crtc); 4810 i9xx_set_pipeconf(intel_crtc);
4653 4811
4654 I915_WRITE(DSPCNTR(plane), dspcntr);
4655 POSTING_READ(DSPCNTR(plane));
4656
4657 dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
4658 crtc->x, crtc->y);
4659
4660 intel_crtc->active = true; 4812 intel_crtc->active = true;
4661 4813
4662 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); 4814 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
@@ -4704,12 +4856,9 @@ static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
4704static void i9xx_crtc_enable(struct drm_crtc *crtc) 4856static void i9xx_crtc_enable(struct drm_crtc *crtc)
4705{ 4857{
4706 struct drm_device *dev = crtc->dev; 4858 struct drm_device *dev = crtc->dev;
4707 struct drm_i915_private *dev_priv = dev->dev_private;
4708 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4859 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4709 struct intel_encoder *encoder; 4860 struct intel_encoder *encoder;
4710 int pipe = intel_crtc->pipe; 4861 int pipe = intel_crtc->pipe;
4711 int plane = intel_crtc->plane;
4712 u32 dspcntr;
4713 4862
4714 WARN_ON(!crtc->enabled); 4863 WARN_ON(!crtc->enabled);
4715 4864
@@ -4718,35 +4867,13 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
4718 4867
4719 i9xx_set_pll_dividers(intel_crtc); 4868 i9xx_set_pll_dividers(intel_crtc);
4720 4869
4721 /* Set up the display plane register */
4722 dspcntr = DISPPLANE_GAMMA_ENABLE;
4723
4724 if (pipe == 0)
4725 dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
4726 else
4727 dspcntr |= DISPPLANE_SEL_PIPE_B;
4728
4729 if (intel_crtc->config.has_dp_encoder) 4870 if (intel_crtc->config.has_dp_encoder)
4730 intel_dp_set_m_n(intel_crtc); 4871 intel_dp_set_m_n(intel_crtc);
4731 4872
4732 intel_set_pipe_timings(intel_crtc); 4873 intel_set_pipe_timings(intel_crtc);
4733 4874
4734 /* pipesrc and dspsize control the size that is scaled from,
4735 * which should always be the user's requested size.
4736 */
4737 I915_WRITE(DSPSIZE(plane),
4738 ((intel_crtc->config.pipe_src_h - 1) << 16) |
4739 (intel_crtc->config.pipe_src_w - 1));
4740 I915_WRITE(DSPPOS(plane), 0);
4741
4742 i9xx_set_pipeconf(intel_crtc); 4875 i9xx_set_pipeconf(intel_crtc);
4743 4876
4744 I915_WRITE(DSPCNTR(plane), dspcntr);
4745 POSTING_READ(DSPCNTR(plane));
4746
4747 dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
4748 crtc->x, crtc->y);
4749
4750 intel_crtc->active = true; 4877 intel_crtc->active = true;
4751 4878
4752 if (!IS_GEN2(dev)) 4879 if (!IS_GEN2(dev))
@@ -4842,7 +4969,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
4842 */ 4969 */
4843 intel_wait_for_vblank(dev, pipe); 4970 intel_wait_for_vblank(dev, pipe);
4844 4971
4845 intel_disable_pipe(dev_priv, pipe); 4972 intel_disable_pipe(intel_crtc);
4846 4973
4847 i9xx_pfit_disable(intel_crtc); 4974 i9xx_pfit_disable(intel_crtc);
4848 4975
@@ -4856,7 +4983,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
4856 else if (IS_VALLEYVIEW(dev)) 4983 else if (IS_VALLEYVIEW(dev))
4857 vlv_disable_pll(dev_priv, pipe); 4984 vlv_disable_pll(dev_priv, pipe);
4858 else 4985 else
4859 i9xx_disable_pll(dev_priv, pipe); 4986 i9xx_disable_pll(intel_crtc);
4860 } 4987 }
4861 4988
4862 if (!IS_GEN2(dev)) 4989 if (!IS_GEN2(dev))
@@ -5275,6 +5402,10 @@ static int valleyview_get_display_clock_speed(struct drm_device *dev)
5275 u32 val; 5402 u32 val;
5276 int divider; 5403 int divider;
5277 5404
5405 /* FIXME: Punit isn't quite ready yet */
5406 if (IS_CHERRYVIEW(dev))
5407 return 400000;
5408
5278 mutex_lock(&dev_priv->dpio_lock); 5409 mutex_lock(&dev_priv->dpio_lock);
5279 val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL); 5410 val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
5280 mutex_unlock(&dev_priv->dpio_lock); 5411 mutex_unlock(&dev_priv->dpio_lock);
@@ -5519,7 +5650,8 @@ static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
5519} 5650}
5520 5651
5521static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc, 5652static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
5522 struct intel_link_m_n *m_n) 5653 struct intel_link_m_n *m_n,
5654 struct intel_link_m_n *m2_n2)
5523{ 5655{
5524 struct drm_device *dev = crtc->base.dev; 5656 struct drm_device *dev = crtc->base.dev;
5525 struct drm_i915_private *dev_priv = dev->dev_private; 5657 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5531,6 +5663,18 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
5531 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n); 5663 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
5532 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m); 5664 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
5533 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n); 5665 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
5666 /* M2_N2 registers to be set only for gen < 8 (M2_N2 available
5667 * for gen < 8) and if DRRS is supported (to make sure the
5668 * registers are not unnecessarily accessed).
5669 */
5670 if (m2_n2 && INTEL_INFO(dev)->gen < 8 &&
5671 crtc->config.has_drrs) {
5672 I915_WRITE(PIPE_DATA_M2(transcoder),
5673 TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
5674 I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
5675 I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
5676 I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
5677 }
5534 } else { 5678 } else {
5535 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m); 5679 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
5536 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n); 5680 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
@@ -5539,12 +5683,13 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
5539 } 5683 }
5540} 5684}
5541 5685
5542static void intel_dp_set_m_n(struct intel_crtc *crtc) 5686void intel_dp_set_m_n(struct intel_crtc *crtc)
5543{ 5687{
5544 if (crtc->config.has_pch_encoder) 5688 if (crtc->config.has_pch_encoder)
5545 intel_pch_transcoder_set_m_n(crtc, &crtc->config.dp_m_n); 5689 intel_pch_transcoder_set_m_n(crtc, &crtc->config.dp_m_n);
5546 else 5690 else
5547 intel_cpu_transcoder_set_m_n(crtc, &crtc->config.dp_m_n); 5691 intel_cpu_transcoder_set_m_n(crtc, &crtc->config.dp_m_n,
5692 &crtc->config.dp_m2_n2);
5548} 5693}
5549 5694
5550static void vlv_update_pll(struct intel_crtc *crtc) 5695static void vlv_update_pll(struct intel_crtc *crtc)
@@ -5662,6 +5807,18 @@ static void vlv_prepare_pll(struct intel_crtc *crtc)
5662 5807
5663static void chv_update_pll(struct intel_crtc *crtc) 5808static void chv_update_pll(struct intel_crtc *crtc)
5664{ 5809{
5810 crtc->config.dpll_hw_state.dpll = DPLL_SSC_REF_CLOCK_CHV |
5811 DPLL_REFA_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
5812 DPLL_VCO_ENABLE;
5813 if (crtc->pipe != PIPE_A)
5814 crtc->config.dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
5815
5816 crtc->config.dpll_hw_state.dpll_md =
5817 (crtc->config.pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
5818}
5819
5820static void chv_prepare_pll(struct intel_crtc *crtc)
5821{
5665 struct drm_device *dev = crtc->base.dev; 5822 struct drm_device *dev = crtc->base.dev;
5666 struct drm_i915_private *dev_priv = dev->dev_private; 5823 struct drm_i915_private *dev_priv = dev->dev_private;
5667 int pipe = crtc->pipe; 5824 int pipe = crtc->pipe;
@@ -5671,15 +5828,6 @@ static void chv_update_pll(struct intel_crtc *crtc)
5671 u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac; 5828 u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
5672 int refclk; 5829 int refclk;
5673 5830
5674 crtc->config.dpll_hw_state.dpll = DPLL_SSC_REF_CLOCK_CHV |
5675 DPLL_REFA_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
5676 DPLL_VCO_ENABLE;
5677 if (pipe != PIPE_A)
5678 crtc->config.dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
5679
5680 crtc->config.dpll_hw_state.dpll_md =
5681 (crtc->config.pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
5682
5683 bestn = crtc->config.dpll.n; 5831 bestn = crtc->config.dpll.n;
5684 bestm2_frac = crtc->config.dpll.m2 & 0x3fffff; 5832 bestm2_frac = crtc->config.dpll.m2 & 0x3fffff;
5685 bestm1 = crtc->config.dpll.m1; 5833 bestm1 = crtc->config.dpll.m1;
@@ -5839,7 +5987,7 @@ static void i8xx_update_pll(struct intel_crtc *crtc,
5839 dpll |= PLL_P2_DIVIDE_BY_4; 5987 dpll |= PLL_P2_DIVIDE_BY_4;
5840 } 5988 }
5841 5989
5842 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO)) 5990 if (!IS_I830(dev) && intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO))
5843 dpll |= DPLL_DVO_2X_MODE; 5991 dpll |= DPLL_DVO_2X_MODE;
5844 5992
5845 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) && 5993 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
@@ -5990,9 +6138,9 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
5990 6138
5991 pipeconf = 0; 6139 pipeconf = 0;
5992 6140
5993 if (dev_priv->quirks & QUIRK_PIPEA_FORCE && 6141 if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
5994 I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE) 6142 (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
5995 pipeconf |= PIPECONF_ENABLE; 6143 pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE;
5996 6144
5997 if (intel_crtc->config.double_wide) 6145 if (intel_crtc->config.double_wide)
5998 pipeconf |= PIPECONF_DOUBLE_WIDE; 6146 pipeconf |= PIPECONF_DOUBLE_WIDE;
@@ -6235,7 +6383,7 @@ static void i9xx_get_plane_config(struct intel_crtc *crtc,
6235 crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1; 6383 crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1;
6236 6384
6237 val = I915_READ(DSPSTRIDE(pipe)); 6385 val = I915_READ(DSPSTRIDE(pipe));
6238 crtc->base.primary->fb->pitches[0] = val & 0xffffff80; 6386 crtc->base.primary->fb->pitches[0] = val & 0xffffffc0;
6239 6387
6240 aligned_height = intel_align_height(dev, crtc->base.primary->fb->height, 6388 aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
6241 plane_config->tiled); 6389 plane_config->tiled);
@@ -6345,6 +6493,14 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
6345 } 6493 }
6346 pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe)); 6494 pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
6347 if (!IS_VALLEYVIEW(dev)) { 6495 if (!IS_VALLEYVIEW(dev)) {
6496 /*
6497 * DPLL_DVO_2X_MODE must be enabled for both DPLLs
6498 * on 830. Filter it out here so that we don't
6499 * report errors due to that.
6500 */
6501 if (IS_I830(dev))
6502 pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
6503
6348 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe)); 6504 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
6349 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe)); 6505 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
6350 } else { 6506 } else {
@@ -6367,7 +6523,6 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
6367static void ironlake_init_pch_refclk(struct drm_device *dev) 6523static void ironlake_init_pch_refclk(struct drm_device *dev)
6368{ 6524{
6369 struct drm_i915_private *dev_priv = dev->dev_private; 6525 struct drm_i915_private *dev_priv = dev->dev_private;
6370 struct drm_mode_config *mode_config = &dev->mode_config;
6371 struct intel_encoder *encoder; 6526 struct intel_encoder *encoder;
6372 u32 val, final; 6527 u32 val, final;
6373 bool has_lvds = false; 6528 bool has_lvds = false;
@@ -6377,8 +6532,7 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
6377 bool can_ssc = false; 6532 bool can_ssc = false;
6378 6533
6379 /* We need to take the global config into account */ 6534 /* We need to take the global config into account */
6380 list_for_each_entry(encoder, &mode_config->encoder_list, 6535 for_each_intel_encoder(dev, encoder) {
6381 base.head) {
6382 switch (encoder->type) { 6536 switch (encoder->type) {
6383 case INTEL_OUTPUT_LVDS: 6537 case INTEL_OUTPUT_LVDS:
6384 has_panel = true; 6538 has_panel = true;
@@ -6685,11 +6839,10 @@ static void lpt_disable_clkout_dp(struct drm_device *dev)
6685 6839
6686static void lpt_init_pch_refclk(struct drm_device *dev) 6840static void lpt_init_pch_refclk(struct drm_device *dev)
6687{ 6841{
6688 struct drm_mode_config *mode_config = &dev->mode_config;
6689 struct intel_encoder *encoder; 6842 struct intel_encoder *encoder;
6690 bool has_vga = false; 6843 bool has_vga = false;
6691 6844
6692 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { 6845 for_each_intel_encoder(dev, encoder) {
6693 switch (encoder->type) { 6846 switch (encoder->type) {
6694 case INTEL_OUTPUT_ANALOG: 6847 case INTEL_OUTPUT_ANALOG:
6695 has_vga = true; 6848 has_vga = true;
@@ -7145,7 +7298,8 @@ static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
7145 7298
7146static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc, 7299static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
7147 enum transcoder transcoder, 7300 enum transcoder transcoder,
7148 struct intel_link_m_n *m_n) 7301 struct intel_link_m_n *m_n,
7302 struct intel_link_m_n *m2_n2)
7149{ 7303{
7150 struct drm_device *dev = crtc->base.dev; 7304 struct drm_device *dev = crtc->base.dev;
7151 struct drm_i915_private *dev_priv = dev->dev_private; 7305 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -7159,6 +7313,20 @@ static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
7159 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder)); 7313 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
7160 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder)) 7314 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
7161 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 7315 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
7316 /* Read M2_N2 registers only for gen < 8 (M2_N2 available for
7317 * gen < 8) and if DRRS is supported (to make sure the
7318 * registers are not unnecessarily read).
7319 */
7320 if (m2_n2 && INTEL_INFO(dev)->gen < 8 &&
7321 crtc->config.has_drrs) {
7322 m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
7323 m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
7324 m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
7325 & ~TU_SIZE_MASK;
7326 m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder));
7327 m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
7328 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
7329 }
7162 } else { 7330 } else {
7163 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe)); 7331 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
7164 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe)); 7332 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
@@ -7177,14 +7345,15 @@ void intel_dp_get_m_n(struct intel_crtc *crtc,
7177 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n); 7345 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
7178 else 7346 else
7179 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, 7347 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
7180 &pipe_config->dp_m_n); 7348 &pipe_config->dp_m_n,
7349 &pipe_config->dp_m2_n2);
7181} 7350}
7182 7351
7183static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc, 7352static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
7184 struct intel_crtc_config *pipe_config) 7353 struct intel_crtc_config *pipe_config)
7185{ 7354{
7186 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, 7355 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
7187 &pipe_config->fdi_m_n); 7356 &pipe_config->fdi_m_n, NULL);
7188} 7357}
7189 7358
7190static void ironlake_get_pfit_config(struct intel_crtc *crtc, 7359static void ironlake_get_pfit_config(struct intel_crtc *crtc,
@@ -7255,7 +7424,7 @@ static void ironlake_get_plane_config(struct intel_crtc *crtc,
7255 crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1; 7424 crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1;
7256 7425
7257 val = I915_READ(DSPSTRIDE(pipe)); 7426 val = I915_READ(DSPSTRIDE(pipe));
7258 crtc->base.primary->fb->pitches[0] = val & 0xffffff80; 7427 crtc->base.primary->fb->pitches[0] = val & 0xffffffc0;
7259 7428
7260 aligned_height = intel_align_height(dev, crtc->base.primary->fb->height, 7429 aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
7261 plane_config->tiled); 7430 plane_config->tiled);
@@ -7615,6 +7784,22 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
7615 return 0; 7784 return 0;
7616} 7785}
7617 7786
7787static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
7788 enum port port,
7789 struct intel_crtc_config *pipe_config)
7790{
7791 pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
7792
7793 switch (pipe_config->ddi_pll_sel) {
7794 case PORT_CLK_SEL_WRPLL1:
7795 pipe_config->shared_dpll = DPLL_ID_WRPLL1;
7796 break;
7797 case PORT_CLK_SEL_WRPLL2:
7798 pipe_config->shared_dpll = DPLL_ID_WRPLL2;
7799 break;
7800 }
7801}
7802
7618static void haswell_get_ddi_port_state(struct intel_crtc *crtc, 7803static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
7619 struct intel_crtc_config *pipe_config) 7804 struct intel_crtc_config *pipe_config)
7620{ 7805{
@@ -7628,16 +7813,7 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
7628 7813
7629 port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT; 7814 port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
7630 7815
7631 pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port)); 7816 haswell_get_ddi_pll(dev_priv, port, pipe_config);
7632
7633 switch (pipe_config->ddi_pll_sel) {
7634 case PORT_CLK_SEL_WRPLL1:
7635 pipe_config->shared_dpll = DPLL_ID_WRPLL1;
7636 break;
7637 case PORT_CLK_SEL_WRPLL2:
7638 pipe_config->shared_dpll = DPLL_ID_WRPLL2;
7639 break;
7640 }
7641 7817
7642 if (pipe_config->shared_dpll >= 0) { 7818 if (pipe_config->shared_dpll >= 0) {
7643 pll = &dev_priv->shared_dplls[pipe_config->shared_dpll]; 7819 pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
@@ -7719,7 +7895,12 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
7719 pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) && 7895 pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
7720 (I915_READ(IPS_CTL) & IPS_ENABLE); 7896 (I915_READ(IPS_CTL) & IPS_ENABLE);
7721 7897
7722 pipe_config->pixel_multiplier = 1; 7898 if (pipe_config->cpu_transcoder != TRANSCODER_EDP) {
7899 pipe_config->pixel_multiplier =
7900 I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
7901 } else {
7902 pipe_config->pixel_multiplier = 1;
7903 }
7723 7904
7724 return true; 7905 return true;
7725} 7906}
@@ -8037,74 +8218,62 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
8037 struct drm_device *dev = crtc->dev; 8218 struct drm_device *dev = crtc->dev;
8038 struct drm_i915_private *dev_priv = dev->dev_private; 8219 struct drm_i915_private *dev_priv = dev->dev_private;
8039 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8220 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8040 uint32_t cntl; 8221 uint32_t cntl = 0, size = 0;
8041 8222
8042 if (base != intel_crtc->cursor_base) { 8223 if (base) {
8043 /* On these chipsets we can only modify the base whilst 8224 unsigned int width = intel_crtc->cursor_width;
8044 * the cursor is disabled. 8225 unsigned int height = intel_crtc->cursor_height;
8045 */ 8226 unsigned int stride = roundup_pow_of_two(width) * 4;
8046 if (intel_crtc->cursor_cntl) { 8227
8047 I915_WRITE(_CURACNTR, 0); 8228 switch (stride) {
8048 POSTING_READ(_CURACNTR); 8229 default:
8049 intel_crtc->cursor_cntl = 0; 8230 WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n",
8231 width, stride);
8232 stride = 256;
8233 /* fallthrough */
8234 case 256:
8235 case 512:
8236 case 1024:
8237 case 2048:
8238 break;
8050 } 8239 }
8051 8240
8052 I915_WRITE(_CURABASE, base); 8241 cntl |= CURSOR_ENABLE |
8053 POSTING_READ(_CURABASE); 8242 CURSOR_GAMMA_ENABLE |
8243 CURSOR_FORMAT_ARGB |
8244 CURSOR_STRIDE(stride);
8245
8246 size = (height << 12) | width;
8054 } 8247 }
8055 8248
8056 /* XXX width must be 64, stride 256 => 0x00 << 28 */ 8249 if (intel_crtc->cursor_cntl != 0 &&
8057 cntl = 0; 8250 (intel_crtc->cursor_base != base ||
8058 if (base) 8251 intel_crtc->cursor_size != size ||
8059 cntl = (CURSOR_ENABLE | 8252 intel_crtc->cursor_cntl != cntl)) {
8060 CURSOR_GAMMA_ENABLE | 8253 /* On these chipsets we can only modify the base/size/stride
8061 CURSOR_FORMAT_ARGB); 8254 * whilst the cursor is disabled.
8062 if (intel_crtc->cursor_cntl != cntl) { 8255 */
8063 I915_WRITE(_CURACNTR, cntl); 8256 I915_WRITE(_CURACNTR, 0);
8064 POSTING_READ(_CURACNTR); 8257 POSTING_READ(_CURACNTR);
8065 intel_crtc->cursor_cntl = cntl; 8258 intel_crtc->cursor_cntl = 0;
8066 } 8259 }
8067}
8068 8260
8069static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) 8261 if (intel_crtc->cursor_base != base)
8070{ 8262 I915_WRITE(_CURABASE, base);
8071 struct drm_device *dev = crtc->dev;
8072 struct drm_i915_private *dev_priv = dev->dev_private;
8073 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8074 int pipe = intel_crtc->pipe;
8075 uint32_t cntl;
8076 8263
8077 cntl = 0; 8264 if (intel_crtc->cursor_size != size) {
8078 if (base) { 8265 I915_WRITE(CURSIZE, size);
8079 cntl = MCURSOR_GAMMA_ENABLE; 8266 intel_crtc->cursor_size = size;
8080 switch (intel_crtc->cursor_width) {
8081 case 64:
8082 cntl |= CURSOR_MODE_64_ARGB_AX;
8083 break;
8084 case 128:
8085 cntl |= CURSOR_MODE_128_ARGB_AX;
8086 break;
8087 case 256:
8088 cntl |= CURSOR_MODE_256_ARGB_AX;
8089 break;
8090 default:
8091 WARN_ON(1);
8092 return;
8093 }
8094 cntl |= pipe << 28; /* Connect to correct pipe */
8095 } 8267 }
8268
8096 if (intel_crtc->cursor_cntl != cntl) { 8269 if (intel_crtc->cursor_cntl != cntl) {
8097 I915_WRITE(CURCNTR(pipe), cntl); 8270 I915_WRITE(_CURACNTR, cntl);
8098 POSTING_READ(CURCNTR(pipe)); 8271 POSTING_READ(_CURACNTR);
8099 intel_crtc->cursor_cntl = cntl; 8272 intel_crtc->cursor_cntl = cntl;
8100 } 8273 }
8101
8102 /* and commit changes on next vblank */
8103 I915_WRITE(CURBASE(pipe), base);
8104 POSTING_READ(CURBASE(pipe));
8105} 8274}
8106 8275
8107static void ivb_update_cursor(struct drm_crtc *crtc, u32 base) 8276static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
8108{ 8277{
8109 struct drm_device *dev = crtc->dev; 8278 struct drm_device *dev = crtc->dev;
8110 struct drm_i915_private *dev_priv = dev->dev_private; 8279 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -8129,6 +8298,7 @@ static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
8129 WARN_ON(1); 8298 WARN_ON(1);
8130 return; 8299 return;
8131 } 8300 }
8301 cntl |= pipe << 28; /* Connect to correct pipe */
8132 } 8302 }
8133 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 8303 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
8134 cntl |= CURSOR_PIPE_CSC_ENABLE; 8304 cntl |= CURSOR_PIPE_CSC_ENABLE;
@@ -8188,15 +8358,50 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
8188 8358
8189 I915_WRITE(CURPOS(pipe), pos); 8359 I915_WRITE(CURPOS(pipe), pos);
8190 8360
8191 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev)) 8361 if (IS_845G(dev) || IS_I865G(dev))
8192 ivb_update_cursor(crtc, base);
8193 else if (IS_845G(dev) || IS_I865G(dev))
8194 i845_update_cursor(crtc, base); 8362 i845_update_cursor(crtc, base);
8195 else 8363 else
8196 i9xx_update_cursor(crtc, base); 8364 i9xx_update_cursor(crtc, base);
8197 intel_crtc->cursor_base = base; 8365 intel_crtc->cursor_base = base;
8198} 8366}
8199 8367
8368static bool cursor_size_ok(struct drm_device *dev,
8369 uint32_t width, uint32_t height)
8370{
8371 if (width == 0 || height == 0)
8372 return false;
8373
8374 /*
8375 * 845g/865g are special in that they are only limited by
8376 * the width of their cursors, the height is arbitrary up to
8377 * the precision of the register. Everything else requires
8378 * square cursors, limited to a few power-of-two sizes.
8379 */
8380 if (IS_845G(dev) || IS_I865G(dev)) {
8381 if ((width & 63) != 0)
8382 return false;
8383
8384 if (width > (IS_845G(dev) ? 64 : 512))
8385 return false;
8386
8387 if (height > 1023)
8388 return false;
8389 } else {
8390 switch (width | height) {
8391 case 256:
8392 case 128:
8393 if (IS_GEN2(dev))
8394 return false;
8395 case 64:
8396 break;
8397 default:
8398 return false;
8399 }
8400 }
8401
8402 return true;
8403}
8404
8200/* 8405/*
8201 * intel_crtc_cursor_set_obj - Set cursor to specified GEM object 8406 * intel_crtc_cursor_set_obj - Set cursor to specified GEM object
8202 * 8407 *
@@ -8212,7 +8417,7 @@ static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
8212 struct drm_i915_private *dev_priv = dev->dev_private; 8417 struct drm_i915_private *dev_priv = dev->dev_private;
8213 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8418 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8214 enum pipe pipe = intel_crtc->pipe; 8419 enum pipe pipe = intel_crtc->pipe;
8215 unsigned old_width; 8420 unsigned old_width, stride;
8216 uint32_t addr; 8421 uint32_t addr;
8217 int ret; 8422 int ret;
8218 8423
@@ -8220,20 +8425,18 @@ static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
8220 if (!obj) { 8425 if (!obj) {
8221 DRM_DEBUG_KMS("cursor off\n"); 8426 DRM_DEBUG_KMS("cursor off\n");
8222 addr = 0; 8427 addr = 0;
8223 obj = NULL;
8224 mutex_lock(&dev->struct_mutex); 8428 mutex_lock(&dev->struct_mutex);
8225 goto finish; 8429 goto finish;
8226 } 8430 }
8227 8431
8228 /* Check for which cursor types we support */ 8432 /* Check for which cursor types we support */
8229 if (!((width == 64 && height == 64) || 8433 if (!cursor_size_ok(dev, width, height)) {
8230 (width == 128 && height == 128 && !IS_GEN2(dev)) ||
8231 (width == 256 && height == 256 && !IS_GEN2(dev)))) {
8232 DRM_DEBUG("Cursor dimension not supported\n"); 8434 DRM_DEBUG("Cursor dimension not supported\n");
8233 return -EINVAL; 8435 return -EINVAL;
8234 } 8436 }
8235 8437
8236 if (obj->base.size < width * height * 4) { 8438 stride = roundup_pow_of_two(width) * 4;
8439 if (obj->base.size < stride * height) {
8237 DRM_DEBUG_KMS("buffer is too small\n"); 8440 DRM_DEBUG_KMS("buffer is too small\n");
8238 ret = -ENOMEM; 8441 ret = -ENOMEM;
8239 goto fail; 8442 goto fail;
@@ -8295,9 +8498,6 @@ static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
8295 addr = obj->phys_handle->busaddr; 8498 addr = obj->phys_handle->busaddr;
8296 } 8499 }
8297 8500
8298 if (IS_GEN2(dev))
8299 I915_WRITE(CURSIZE, (height << 12) | width);
8300
8301 finish: 8501 finish:
8302 if (intel_crtc->cursor_bo) { 8502 if (intel_crtc->cursor_bo) {
8303 if (!INTEL_INFO(dev)->cursor_needs_physical) 8503 if (!INTEL_INFO(dev)->cursor_needs_physical)
@@ -8944,12 +9144,13 @@ static void intel_mark_fb_busy(struct drm_device *dev,
8944 unsigned frontbuffer_bits, 9144 unsigned frontbuffer_bits,
8945 struct intel_engine_cs *ring) 9145 struct intel_engine_cs *ring)
8946{ 9146{
9147 struct drm_i915_private *dev_priv = dev->dev_private;
8947 enum pipe pipe; 9148 enum pipe pipe;
8948 9149
8949 if (!i915.powersave) 9150 if (!i915.powersave)
8950 return; 9151 return;
8951 9152
8952 for_each_pipe(pipe) { 9153 for_each_pipe(dev_priv, pipe) {
8953 if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe))) 9154 if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)))
8954 continue; 9155 continue;
8955 9156
@@ -9019,6 +9220,14 @@ void intel_frontbuffer_flush(struct drm_device *dev,
9019 intel_mark_fb_busy(dev, frontbuffer_bits, NULL); 9220 intel_mark_fb_busy(dev, frontbuffer_bits, NULL);
9020 9221
9021 intel_edp_psr_flush(dev, frontbuffer_bits); 9222 intel_edp_psr_flush(dev, frontbuffer_bits);
9223
9224 /*
9225 * FIXME: Unconditional fbc flushing here is a rather gross hack and
9226 * needs to be reworked into a proper frontbuffer tracking scheme like
9227 * psr employs.
9228 */
9229 if (IS_BROADWELL(dev))
9230 gen8_fbc_sw_flush(dev, FBC_REND_CACHE_CLEAN);
9022} 9231}
9023 9232
9024/** 9233/**
@@ -9151,7 +9360,6 @@ static void intel_unpin_work_fn(struct work_struct *__work)
9151static void do_intel_finish_page_flip(struct drm_device *dev, 9360static void do_intel_finish_page_flip(struct drm_device *dev,
9152 struct drm_crtc *crtc) 9361 struct drm_crtc *crtc)
9153{ 9362{
9154 struct drm_i915_private *dev_priv = dev->dev_private;
9155 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9363 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9156 struct intel_unpin_work *work; 9364 struct intel_unpin_work *work;
9157 unsigned long flags; 9365 unsigned long flags;
@@ -9171,23 +9379,9 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
9171 return; 9379 return;
9172 } 9380 }
9173 9381
9174 /* and that the unpin work is consistent wrt ->pending. */ 9382 page_flip_completed(intel_crtc);
9175 smp_rmb();
9176
9177 intel_crtc->unpin_work = NULL;
9178
9179 if (work->event)
9180 drm_send_vblank_event(dev, intel_crtc->pipe, work->event);
9181
9182 drm_crtc_vblank_put(crtc);
9183 9383
9184 spin_unlock_irqrestore(&dev->event_lock, flags); 9384 spin_unlock_irqrestore(&dev->event_lock, flags);
9185
9186 wake_up_all(&dev_priv->pending_flip_queue);
9187
9188 queue_work(dev_priv->wq, &work->work);
9189
9190 trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
9191} 9385}
9192 9386
9193void intel_finish_page_flip(struct drm_device *dev, int pipe) 9387void intel_finish_page_flip(struct drm_device *dev, int pipe)
@@ -9532,6 +9726,8 @@ static bool use_mmio_flip(struct intel_engine_cs *ring,
9532 return false; 9726 return false;
9533 else if (i915.use_mmio_flip > 0) 9727 else if (i915.use_mmio_flip > 0)
9534 return true; 9728 return true;
9729 else if (i915.enable_execlists)
9730 return true;
9535 else 9731 else
9536 return ring != obj->ring; 9732 return ring != obj->ring;
9537} 9733}
@@ -9665,6 +9861,65 @@ static int intel_default_queue_flip(struct drm_device *dev,
9665 return -ENODEV; 9861 return -ENODEV;
9666} 9862}
9667 9863
9864static bool __intel_pageflip_stall_check(struct drm_device *dev,
9865 struct drm_crtc *crtc)
9866{
9867 struct drm_i915_private *dev_priv = dev->dev_private;
9868 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9869 struct intel_unpin_work *work = intel_crtc->unpin_work;
9870 u32 addr;
9871
9872 if (atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE)
9873 return true;
9874
9875 if (!work->enable_stall_check)
9876 return false;
9877
9878 if (work->flip_ready_vblank == 0) {
9879 if (work->flip_queued_ring &&
9880 !i915_seqno_passed(work->flip_queued_ring->get_seqno(work->flip_queued_ring, true),
9881 work->flip_queued_seqno))
9882 return false;
9883
9884 work->flip_ready_vblank = drm_vblank_count(dev, intel_crtc->pipe);
9885 }
9886
9887 if (drm_vblank_count(dev, intel_crtc->pipe) - work->flip_ready_vblank < 3)
9888 return false;
9889
9890 /* Potential stall - if we see that the flip has happened,
9891 * assume a missed interrupt. */
9892 if (INTEL_INFO(dev)->gen >= 4)
9893 addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane)));
9894 else
9895 addr = I915_READ(DSPADDR(intel_crtc->plane));
9896
9897 /* There is a potential issue here with a false positive after a flip
9898 * to the same address. We could address this by checking for a
9899 * non-incrementing frame counter.
9900 */
9901 return addr == work->gtt_offset;
9902}
9903
9904void intel_check_page_flip(struct drm_device *dev, int pipe)
9905{
9906 struct drm_i915_private *dev_priv = dev->dev_private;
9907 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
9908 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9909 unsigned long flags;
9910
9911 if (crtc == NULL)
9912 return;
9913
9914 spin_lock_irqsave(&dev->event_lock, flags);
9915 if (intel_crtc->unpin_work && __intel_pageflip_stall_check(dev, crtc)) {
9916 WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n",
9917 intel_crtc->unpin_work->flip_queued_vblank, drm_vblank_count(dev, pipe));
9918 page_flip_completed(intel_crtc);
9919 }
9920 spin_unlock_irqrestore(&dev->event_lock, flags);
9921}
9922
9668static int intel_crtc_page_flip(struct drm_crtc *crtc, 9923static int intel_crtc_page_flip(struct drm_crtc *crtc,
9669 struct drm_framebuffer *fb, 9924 struct drm_framebuffer *fb,
9670 struct drm_pending_vblank_event *event, 9925 struct drm_pending_vblank_event *event,
@@ -9721,12 +9976,20 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
9721 /* We borrow the event spin lock for protecting unpin_work */ 9976 /* We borrow the event spin lock for protecting unpin_work */
9722 spin_lock_irqsave(&dev->event_lock, flags); 9977 spin_lock_irqsave(&dev->event_lock, flags);
9723 if (intel_crtc->unpin_work) { 9978 if (intel_crtc->unpin_work) {
9724 spin_unlock_irqrestore(&dev->event_lock, flags); 9979 /* Before declaring the flip queue wedged, check if
9725 kfree(work); 9980 * the hardware completed the operation behind our backs.
9726 drm_crtc_vblank_put(crtc); 9981 */
9982 if (__intel_pageflip_stall_check(dev, crtc)) {
9983 DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n");
9984 page_flip_completed(intel_crtc);
9985 } else {
9986 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
9987 spin_unlock_irqrestore(&dev->event_lock, flags);
9727 9988
9728 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); 9989 drm_crtc_vblank_put(crtc);
9729 return -EBUSY; 9990 kfree(work);
9991 return -EBUSY;
9992 }
9730 } 9993 }
9731 intel_crtc->unpin_work = work; 9994 intel_crtc->unpin_work = work;
9732 spin_unlock_irqrestore(&dev->event_lock, flags); 9995 spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -9746,8 +10009,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
9746 10009
9747 work->pending_flip_obj = obj; 10010 work->pending_flip_obj = obj;
9748 10011
9749 work->enable_stall_check = true;
9750
9751 atomic_inc(&intel_crtc->unpin_work_count); 10012 atomic_inc(&intel_crtc->unpin_work_count);
9752 intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); 10013 intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
9753 10014
@@ -9776,14 +10037,26 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
9776 work->gtt_offset = 10037 work->gtt_offset =
9777 i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset; 10038 i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset;
9778 10039
9779 if (use_mmio_flip(ring, obj)) 10040 if (use_mmio_flip(ring, obj)) {
9780 ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring, 10041 ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring,
9781 page_flip_flags); 10042 page_flip_flags);
9782 else 10043 if (ret)
10044 goto cleanup_unpin;
10045
10046 work->flip_queued_seqno = obj->last_write_seqno;
10047 work->flip_queued_ring = obj->ring;
10048 } else {
9783 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring, 10049 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring,
9784 page_flip_flags); 10050 page_flip_flags);
9785 if (ret) 10051 if (ret)
9786 goto cleanup_unpin; 10052 goto cleanup_unpin;
10053
10054 work->flip_queued_seqno = intel_ring_get_seqno(ring);
10055 work->flip_queued_ring = ring;
10056 }
10057
10058 work->flip_queued_vblank = drm_vblank_count(dev, intel_crtc->pipe);
10059 work->enable_stall_check = true;
9787 10060
9788 i915_gem_track_fb(work->old_fb_obj, obj, 10061 i915_gem_track_fb(work->old_fb_obj, obj,
9789 INTEL_FRONTBUFFER_PRIMARY(pipe)); 10062 INTEL_FRONTBUFFER_PRIMARY(pipe));
@@ -9818,8 +10091,11 @@ free_work:
9818out_hang: 10091out_hang:
9819 intel_crtc_wait_for_pending_flips(crtc); 10092 intel_crtc_wait_for_pending_flips(crtc);
9820 ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb); 10093 ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb);
9821 if (ret == 0 && event) 10094 if (ret == 0 && event) {
10095 spin_lock_irqsave(&dev->event_lock, flags);
9822 drm_send_vblank_event(dev, pipe, event); 10096 drm_send_vblank_event(dev, pipe, event);
10097 spin_unlock_irqrestore(&dev->event_lock, flags);
10098 }
9823 } 10099 }
9824 return ret; 10100 return ret;
9825} 10101}
@@ -9847,8 +10123,7 @@ static void intel_modeset_update_staged_output_state(struct drm_device *dev)
9847 to_intel_encoder(connector->base.encoder); 10123 to_intel_encoder(connector->base.encoder);
9848 } 10124 }
9849 10125
9850 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 10126 for_each_intel_encoder(dev, encoder) {
9851 base.head) {
9852 encoder->new_crtc = 10127 encoder->new_crtc =
9853 to_intel_crtc(encoder->base.crtc); 10128 to_intel_crtc(encoder->base.crtc);
9854 } 10129 }
@@ -9879,8 +10154,7 @@ static void intel_modeset_commit_output_state(struct drm_device *dev)
9879 connector->base.encoder = &connector->new_encoder->base; 10154 connector->base.encoder = &connector->new_encoder->base;
9880 } 10155 }
9881 10156
9882 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 10157 for_each_intel_encoder(dev, encoder) {
9883 base.head) {
9884 encoder->base.crtc = &encoder->new_crtc->base; 10158 encoder->base.crtc = &encoder->new_crtc->base;
9885 } 10159 }
9886 10160
@@ -10007,6 +10281,15 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
10007 pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n, 10281 pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
10008 pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n, 10282 pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
10009 pipe_config->dp_m_n.tu); 10283 pipe_config->dp_m_n.tu);
10284
10285 DRM_DEBUG_KMS("dp: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n",
10286 pipe_config->has_dp_encoder,
10287 pipe_config->dp_m2_n2.gmch_m,
10288 pipe_config->dp_m2_n2.gmch_n,
10289 pipe_config->dp_m2_n2.link_m,
10290 pipe_config->dp_m2_n2.link_n,
10291 pipe_config->dp_m2_n2.tu);
10292
10010 DRM_DEBUG_KMS("requested mode:\n"); 10293 DRM_DEBUG_KMS("requested mode:\n");
10011 drm_mode_debug_printmodeline(&pipe_config->requested_mode); 10294 drm_mode_debug_printmodeline(&pipe_config->requested_mode);
10012 DRM_DEBUG_KMS("adjusted mode:\n"); 10295 DRM_DEBUG_KMS("adjusted mode:\n");
@@ -10041,8 +10324,7 @@ static bool check_single_encoder_cloning(struct intel_crtc *crtc,
10041 struct drm_device *dev = crtc->base.dev; 10324 struct drm_device *dev = crtc->base.dev;
10042 struct intel_encoder *source_encoder; 10325 struct intel_encoder *source_encoder;
10043 10326
10044 list_for_each_entry(source_encoder, 10327 for_each_intel_encoder(dev, source_encoder) {
10045 &dev->mode_config.encoder_list, base.head) {
10046 if (source_encoder->new_crtc != crtc) 10328 if (source_encoder->new_crtc != crtc)
10047 continue; 10329 continue;
10048 10330
@@ -10058,8 +10340,7 @@ static bool check_encoder_cloning(struct intel_crtc *crtc)
10058 struct drm_device *dev = crtc->base.dev; 10340 struct drm_device *dev = crtc->base.dev;
10059 struct intel_encoder *encoder; 10341 struct intel_encoder *encoder;
10060 10342
10061 list_for_each_entry(encoder, 10343 for_each_intel_encoder(dev, encoder) {
10062 &dev->mode_config.encoder_list, base.head) {
10063 if (encoder->new_crtc != crtc) 10344 if (encoder->new_crtc != crtc)
10064 continue; 10345 continue;
10065 10346
@@ -10143,8 +10424,7 @@ encoder_retry:
10143 * adjust it according to limitations or connector properties, and also 10424 * adjust it according to limitations or connector properties, and also
10144 * a chance to reject the mode entirely. 10425 * a chance to reject the mode entirely.
10145 */ 10426 */
10146 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 10427 for_each_intel_encoder(dev, encoder) {
10147 base.head) {
10148 10428
10149 if (&encoder->new_crtc->base != crtc) 10429 if (&encoder->new_crtc->base != crtc)
10150 continue; 10430 continue;
@@ -10222,8 +10502,7 @@ intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
10222 1 << connector->new_encoder->new_crtc->pipe; 10502 1 << connector->new_encoder->new_crtc->pipe;
10223 } 10503 }
10224 10504
10225 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 10505 for_each_intel_encoder(dev, encoder) {
10226 base.head) {
10227 if (encoder->base.crtc == &encoder->new_crtc->base) 10506 if (encoder->base.crtc == &encoder->new_crtc->base)
10228 continue; 10507 continue;
10229 10508
@@ -10297,8 +10576,7 @@ intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
10297 struct intel_crtc *intel_crtc; 10576 struct intel_crtc *intel_crtc;
10298 struct drm_connector *connector; 10577 struct drm_connector *connector;
10299 10578
10300 list_for_each_entry(intel_encoder, &dev->mode_config.encoder_list, 10579 for_each_intel_encoder(dev, intel_encoder) {
10301 base.head) {
10302 if (!intel_encoder->base.crtc) 10580 if (!intel_encoder->base.crtc)
10303 continue; 10581 continue;
10304 10582
@@ -10387,6 +10665,22 @@ intel_pipe_config_compare(struct drm_device *dev,
10387 return false; \ 10665 return false; \
10388 } 10666 }
10389 10667
10668/* This is required for BDW+ where there is only one set of registers for
10669 * switching between high and low RR.
10670 * This macro can be used whenever a comparison has to be made between one
10671 * hw state and multiple sw state variables.
10672 */
10673#define PIPE_CONF_CHECK_I_ALT(name, alt_name) \
10674 if ((current_config->name != pipe_config->name) && \
10675 (current_config->alt_name != pipe_config->name)) { \
10676 DRM_ERROR("mismatch in " #name " " \
10677 "(expected %i or %i, found %i)\n", \
10678 current_config->name, \
10679 current_config->alt_name, \
10680 pipe_config->name); \
10681 return false; \
10682 }
10683
10390#define PIPE_CONF_CHECK_FLAGS(name, mask) \ 10684#define PIPE_CONF_CHECK_FLAGS(name, mask) \
10391 if ((current_config->name ^ pipe_config->name) & (mask)) { \ 10685 if ((current_config->name ^ pipe_config->name) & (mask)) { \
10392 DRM_ERROR("mismatch in " #name "(" #mask ") " \ 10686 DRM_ERROR("mismatch in " #name "(" #mask ") " \
@@ -10419,11 +10713,28 @@ intel_pipe_config_compare(struct drm_device *dev,
10419 PIPE_CONF_CHECK_I(fdi_m_n.tu); 10713 PIPE_CONF_CHECK_I(fdi_m_n.tu);
10420 10714
10421 PIPE_CONF_CHECK_I(has_dp_encoder); 10715 PIPE_CONF_CHECK_I(has_dp_encoder);
10422 PIPE_CONF_CHECK_I(dp_m_n.gmch_m); 10716
10423 PIPE_CONF_CHECK_I(dp_m_n.gmch_n); 10717 if (INTEL_INFO(dev)->gen < 8) {
10424 PIPE_CONF_CHECK_I(dp_m_n.link_m); 10718 PIPE_CONF_CHECK_I(dp_m_n.gmch_m);
10425 PIPE_CONF_CHECK_I(dp_m_n.link_n); 10719 PIPE_CONF_CHECK_I(dp_m_n.gmch_n);
10426 PIPE_CONF_CHECK_I(dp_m_n.tu); 10720 PIPE_CONF_CHECK_I(dp_m_n.link_m);
10721 PIPE_CONF_CHECK_I(dp_m_n.link_n);
10722 PIPE_CONF_CHECK_I(dp_m_n.tu);
10723
10724 if (current_config->has_drrs) {
10725 PIPE_CONF_CHECK_I(dp_m2_n2.gmch_m);
10726 PIPE_CONF_CHECK_I(dp_m2_n2.gmch_n);
10727 PIPE_CONF_CHECK_I(dp_m2_n2.link_m);
10728 PIPE_CONF_CHECK_I(dp_m2_n2.link_n);
10729 PIPE_CONF_CHECK_I(dp_m2_n2.tu);
10730 }
10731 } else {
10732 PIPE_CONF_CHECK_I_ALT(dp_m_n.gmch_m, dp_m2_n2.gmch_m);
10733 PIPE_CONF_CHECK_I_ALT(dp_m_n.gmch_n, dp_m2_n2.gmch_n);
10734 PIPE_CONF_CHECK_I_ALT(dp_m_n.link_m, dp_m2_n2.link_m);
10735 PIPE_CONF_CHECK_I_ALT(dp_m_n.link_n, dp_m2_n2.link_n);
10736 PIPE_CONF_CHECK_I_ALT(dp_m_n.tu, dp_m2_n2.tu);
10737 }
10427 10738
10428 PIPE_CONF_CHECK_I(adjusted_mode.crtc_hdisplay); 10739 PIPE_CONF_CHECK_I(adjusted_mode.crtc_hdisplay);
10429 PIPE_CONF_CHECK_I(adjusted_mode.crtc_htotal); 10740 PIPE_CONF_CHECK_I(adjusted_mode.crtc_htotal);
@@ -10509,6 +10820,7 @@ intel_pipe_config_compare(struct drm_device *dev,
10509 10820
10510#undef PIPE_CONF_CHECK_X 10821#undef PIPE_CONF_CHECK_X
10511#undef PIPE_CONF_CHECK_I 10822#undef PIPE_CONF_CHECK_I
10823#undef PIPE_CONF_CHECK_I_ALT
10512#undef PIPE_CONF_CHECK_FLAGS 10824#undef PIPE_CONF_CHECK_FLAGS
10513#undef PIPE_CONF_CHECK_CLOCK_FUZZY 10825#undef PIPE_CONF_CHECK_CLOCK_FUZZY
10514#undef PIPE_CONF_QUIRK 10826#undef PIPE_CONF_QUIRK
@@ -10538,8 +10850,7 @@ check_encoder_state(struct drm_device *dev)
10538 struct intel_encoder *encoder; 10850 struct intel_encoder *encoder;
10539 struct intel_connector *connector; 10851 struct intel_connector *connector;
10540 10852
10541 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 10853 for_each_intel_encoder(dev, encoder) {
10542 base.head) {
10543 bool enabled = false; 10854 bool enabled = false;
10544 bool active = false; 10855 bool active = false;
10545 enum pipe pipe, tracked_pipe; 10856 enum pipe pipe, tracked_pipe;
@@ -10618,8 +10929,7 @@ check_crtc_state(struct drm_device *dev)
10618 WARN(crtc->active && !crtc->base.enabled, 10929 WARN(crtc->active && !crtc->base.enabled,
10619 "active crtc, but not enabled in sw tracking\n"); 10930 "active crtc, but not enabled in sw tracking\n");
10620 10931
10621 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 10932 for_each_intel_encoder(dev, encoder) {
10622 base.head) {
10623 if (encoder->base.crtc != &crtc->base) 10933 if (encoder->base.crtc != &crtc->base)
10624 continue; 10934 continue;
10625 enabled = true; 10935 enabled = true;
@@ -10637,12 +10947,12 @@ check_crtc_state(struct drm_device *dev)
10637 active = dev_priv->display.get_pipe_config(crtc, 10947 active = dev_priv->display.get_pipe_config(crtc,
10638 &pipe_config); 10948 &pipe_config);
10639 10949
10640 /* hw state is inconsistent with the pipe A quirk */ 10950 /* hw state is inconsistent with the pipe quirk */
10641 if (crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) 10951 if ((crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
10952 (crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
10642 active = crtc->active; 10953 active = crtc->active;
10643 10954
10644 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 10955 for_each_intel_encoder(dev, encoder) {
10645 base.head) {
10646 enum pipe pipe; 10956 enum pipe pipe;
10647 if (encoder->base.crtc != &crtc->base) 10957 if (encoder->base.crtc != &crtc->base)
10648 continue; 10958 continue;
@@ -11010,7 +11320,7 @@ static void intel_set_config_restore_state(struct drm_device *dev,
11010 } 11320 }
11011 11321
11012 count = 0; 11322 count = 0;
11013 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { 11323 for_each_intel_encoder(dev, encoder) {
11014 encoder->new_crtc = 11324 encoder->new_crtc =
11015 to_intel_crtc(config->save_encoder_crtcs[count++]); 11325 to_intel_crtc(config->save_encoder_crtcs[count++]);
11016 } 11326 }
@@ -11169,8 +11479,7 @@ intel_modeset_stage_output_state(struct drm_device *dev,
11169 } 11479 }
11170 11480
11171 /* Check for any encoders that needs to be disabled. */ 11481 /* Check for any encoders that needs to be disabled. */
11172 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 11482 for_each_intel_encoder(dev, encoder) {
11173 base.head) {
11174 int num_connectors = 0; 11483 int num_connectors = 0;
11175 list_for_each_entry(connector, 11484 list_for_each_entry(connector,
11176 &dev->mode_config.connector_list, 11485 &dev->mode_config.connector_list,
@@ -11203,9 +11512,7 @@ intel_modeset_stage_output_state(struct drm_device *dev,
11203 for_each_intel_crtc(dev, crtc) { 11512 for_each_intel_crtc(dev, crtc) {
11204 crtc->new_enabled = false; 11513 crtc->new_enabled = false;
11205 11514
11206 list_for_each_entry(encoder, 11515 for_each_intel_encoder(dev, encoder) {
11207 &dev->mode_config.encoder_list,
11208 base.head) {
11209 if (encoder->new_crtc == crtc) { 11516 if (encoder->new_crtc == crtc) {
11210 crtc->new_enabled = true; 11517 crtc->new_enabled = true;
11211 break; 11518 break;
@@ -11242,7 +11549,7 @@ static void disable_crtc_nofb(struct intel_crtc *crtc)
11242 connector->new_encoder = NULL; 11549 connector->new_encoder = NULL;
11243 } 11550 }
11244 11551
11245 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { 11552 for_each_intel_encoder(dev, encoder) {
11246 if (encoder->new_crtc == crtc) 11553 if (encoder->new_crtc == crtc)
11247 encoder->new_crtc = NULL; 11554 encoder->new_crtc = NULL;
11248 } 11555 }
@@ -11305,7 +11612,6 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
11305 ret = intel_set_mode(set->crtc, set->mode, 11612 ret = intel_set_mode(set->crtc, set->mode,
11306 set->x, set->y, set->fb); 11613 set->x, set->y, set->fb);
11307 } else if (config->fb_changed) { 11614 } else if (config->fb_changed) {
11308 struct drm_i915_private *dev_priv = dev->dev_private;
11309 struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc); 11615 struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc);
11310 11616
11311 intel_crtc_wait_for_pending_flips(set->crtc); 11617 intel_crtc_wait_for_pending_flips(set->crtc);
@@ -11319,8 +11625,7 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
11319 */ 11625 */
11320 if (!intel_crtc->primary_enabled && ret == 0) { 11626 if (!intel_crtc->primary_enabled && ret == 0) {
11321 WARN_ON(!intel_crtc->active); 11627 WARN_ON(!intel_crtc->active);
11322 intel_enable_primary_hw_plane(dev_priv, intel_crtc->plane, 11628 intel_enable_primary_hw_plane(set->crtc->primary, set->crtc);
11323 intel_crtc->pipe);
11324 } 11629 }
11325 11630
11326 /* 11631 /*
@@ -11473,8 +11778,6 @@ static int
11473intel_primary_plane_disable(struct drm_plane *plane) 11778intel_primary_plane_disable(struct drm_plane *plane)
11474{ 11779{
11475 struct drm_device *dev = plane->dev; 11780 struct drm_device *dev = plane->dev;
11476 struct drm_i915_private *dev_priv = dev->dev_private;
11477 struct intel_plane *intel_plane = to_intel_plane(plane);
11478 struct intel_crtc *intel_crtc; 11781 struct intel_crtc *intel_crtc;
11479 11782
11480 if (!plane->fb) 11783 if (!plane->fb)
@@ -11497,8 +11800,8 @@ intel_primary_plane_disable(struct drm_plane *plane)
11497 goto disable_unpin; 11800 goto disable_unpin;
11498 11801
11499 intel_crtc_wait_for_pending_flips(plane->crtc); 11802 intel_crtc_wait_for_pending_flips(plane->crtc);
11500 intel_disable_primary_hw_plane(dev_priv, intel_plane->plane, 11803 intel_disable_primary_hw_plane(plane, plane->crtc);
11501 intel_plane->pipe); 11804
11502disable_unpin: 11805disable_unpin:
11503 mutex_lock(&dev->struct_mutex); 11806 mutex_lock(&dev->struct_mutex);
11504 i915_gem_track_fb(intel_fb_obj(plane->fb), NULL, 11807 i915_gem_track_fb(intel_fb_obj(plane->fb), NULL,
@@ -11520,7 +11823,6 @@ intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
11520 struct drm_device *dev = crtc->dev; 11823 struct drm_device *dev = crtc->dev;
11521 struct drm_i915_private *dev_priv = dev->dev_private; 11824 struct drm_i915_private *dev_priv = dev->dev_private;
11522 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11825 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11523 struct intel_plane *intel_plane = to_intel_plane(plane);
11524 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 11826 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11525 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb); 11827 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
11526 struct drm_rect dest = { 11828 struct drm_rect dest = {
@@ -11542,6 +11844,21 @@ intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
11542 .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0, 11844 .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
11543 .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0, 11845 .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
11544 }; 11846 };
11847 const struct {
11848 int crtc_x, crtc_y;
11849 unsigned int crtc_w, crtc_h;
11850 uint32_t src_x, src_y, src_w, src_h;
11851 } orig = {
11852 .crtc_x = crtc_x,
11853 .crtc_y = crtc_y,
11854 .crtc_w = crtc_w,
11855 .crtc_h = crtc_h,
11856 .src_x = src_x,
11857 .src_y = src_y,
11858 .src_w = src_w,
11859 .src_h = src_h,
11860 };
11861 struct intel_plane *intel_plane = to_intel_plane(plane);
11545 bool visible; 11862 bool visible;
11546 int ret; 11863 int ret;
11547 11864
@@ -11607,9 +11924,7 @@ intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
11607 INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe)); 11924 INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
11608 11925
11609 if (intel_crtc->primary_enabled) 11926 if (intel_crtc->primary_enabled)
11610 intel_disable_primary_hw_plane(dev_priv, 11927 intel_disable_primary_hw_plane(plane, crtc);
11611 intel_plane->plane,
11612 intel_plane->pipe);
11613 11928
11614 11929
11615 if (plane->fb != fb) 11930 if (plane->fb != fb)
@@ -11618,16 +11933,42 @@ intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
11618 11933
11619 mutex_unlock(&dev->struct_mutex); 11934 mutex_unlock(&dev->struct_mutex);
11620 11935
11621 return 0; 11936 } else {
11622 } 11937 if (intel_crtc && intel_crtc->active &&
11938 intel_crtc->primary_enabled) {
11939 /*
11940 * FBC does not work on some platforms for rotated
11941 * planes, so disable it when rotation is not 0 and
11942 * update it when rotation is set back to 0.
11943 *
11944 * FIXME: This is redundant with the fbc update done in
11945 * the primary plane enable function except that that
11946 * one is done too late. We eventually need to unify
11947 * this.
11948 */
11949 if (INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
11950 dev_priv->fbc.plane == intel_crtc->plane &&
11951 intel_plane->rotation != BIT(DRM_ROTATE_0)) {
11952 intel_disable_fbc(dev);
11953 }
11954 }
11955 ret = intel_pipe_set_base(crtc, src.x1, src.y1, fb);
11956 if (ret)
11957 return ret;
11623 11958
11624 ret = intel_pipe_set_base(crtc, src.x1, src.y1, fb); 11959 if (!intel_crtc->primary_enabled)
11625 if (ret) 11960 intel_enable_primary_hw_plane(plane, crtc);
11626 return ret; 11961 }
11627 11962
11628 if (!intel_crtc->primary_enabled) 11963 intel_plane->crtc_x = orig.crtc_x;
11629 intel_enable_primary_hw_plane(dev_priv, intel_crtc->plane, 11964 intel_plane->crtc_y = orig.crtc_y;
11630 intel_crtc->pipe); 11965 intel_plane->crtc_w = orig.crtc_w;
11966 intel_plane->crtc_h = orig.crtc_h;
11967 intel_plane->src_x = orig.src_x;
11968 intel_plane->src_y = orig.src_y;
11969 intel_plane->src_w = orig.src_w;
11970 intel_plane->src_h = orig.src_h;
11971 intel_plane->obj = obj;
11631 11972
11632 return 0; 11973 return 0;
11633} 11974}
@@ -11644,6 +11985,7 @@ static const struct drm_plane_funcs intel_primary_plane_funcs = {
11644 .update_plane = intel_primary_plane_setplane, 11985 .update_plane = intel_primary_plane_setplane,
11645 .disable_plane = intel_primary_plane_disable, 11986 .disable_plane = intel_primary_plane_disable,
11646 .destroy = intel_plane_destroy, 11987 .destroy = intel_plane_destroy,
11988 .set_property = intel_plane_set_property
11647}; 11989};
11648 11990
11649static struct drm_plane *intel_primary_plane_create(struct drm_device *dev, 11991static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
@@ -11661,6 +12003,7 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
11661 primary->max_downscale = 1; 12003 primary->max_downscale = 1;
11662 primary->pipe = pipe; 12004 primary->pipe = pipe;
11663 primary->plane = pipe; 12005 primary->plane = pipe;
12006 primary->rotation = BIT(DRM_ROTATE_0);
11664 if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) 12007 if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
11665 primary->plane = !pipe; 12008 primary->plane = !pipe;
11666 12009
@@ -11676,6 +12019,19 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
11676 &intel_primary_plane_funcs, 12019 &intel_primary_plane_funcs,
11677 intel_primary_formats, num_formats, 12020 intel_primary_formats, num_formats,
11678 DRM_PLANE_TYPE_PRIMARY); 12021 DRM_PLANE_TYPE_PRIMARY);
12022
12023 if (INTEL_INFO(dev)->gen >= 4) {
12024 if (!dev->mode_config.rotation_property)
12025 dev->mode_config.rotation_property =
12026 drm_mode_create_rotation_property(dev,
12027 BIT(DRM_ROTATE_0) |
12028 BIT(DRM_ROTATE_180));
12029 if (dev->mode_config.rotation_property)
12030 drm_object_attach_property(&primary->base.base,
12031 dev->mode_config.rotation_property,
12032 primary->rotation);
12033 }
12034
11679 return &primary->base; 12035 return &primary->base;
11680} 12036}
11681 12037
@@ -11736,6 +12092,10 @@ intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
11736 return intel_crtc_cursor_set_obj(crtc, obj, crtc_w, crtc_h); 12092 return intel_crtc_cursor_set_obj(crtc, obj, crtc_w, crtc_h);
11737 } else { 12093 } else {
11738 intel_crtc_update_cursor(crtc, visible); 12094 intel_crtc_update_cursor(crtc, visible);
12095
12096 intel_frontbuffer_flip(crtc->dev,
12097 INTEL_FRONTBUFFER_CURSOR(intel_crtc->pipe));
12098
11739 return 0; 12099 return 0;
11740 } 12100 }
11741} 12101}
@@ -11812,8 +12172,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
11812 12172
11813 intel_crtc->cursor_base = ~0; 12173 intel_crtc->cursor_base = ~0;
11814 intel_crtc->cursor_cntl = ~0; 12174 intel_crtc->cursor_cntl = ~0;
11815 12175 intel_crtc->cursor_size = ~0;
11816 init_waitqueue_head(&intel_crtc->vbl_wait);
11817 12176
11818 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || 12177 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
11819 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL); 12178 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
@@ -11876,8 +12235,7 @@ static int intel_encoder_clones(struct intel_encoder *encoder)
11876 int index_mask = 0; 12235 int index_mask = 0;
11877 int entry = 0; 12236 int entry = 0;
11878 12237
11879 list_for_each_entry(source_encoder, 12238 for_each_intel_encoder(dev, source_encoder) {
11880 &dev->mode_config.encoder_list, base.head) {
11881 if (encoders_cloneable(encoder, source_encoder)) 12239 if (encoders_cloneable(encoder, source_encoder))
11882 index_mask |= (1 << entry); 12240 index_mask |= (1 << entry);
11883 12241
@@ -12066,7 +12424,7 @@ static void intel_setup_outputs(struct drm_device *dev)
12066 12424
12067 intel_edp_psr_init(dev); 12425 intel_edp_psr_init(dev);
12068 12426
12069 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { 12427 for_each_intel_encoder(dev, encoder) {
12070 encoder->base.possible_crtcs = encoder->crtc_mask; 12428 encoder->base.possible_crtcs = encoder->crtc_mask;
12071 encoder->base.possible_clones = 12429 encoder->base.possible_clones =
12072 intel_encoder_clones(encoder); 12430 intel_encoder_clones(encoder);
@@ -12332,29 +12690,27 @@ static void intel_init_display(struct drm_device *dev)
12332 dev_priv->display.get_display_clock_speed = 12690 dev_priv->display.get_display_clock_speed =
12333 i830_get_display_clock_speed; 12691 i830_get_display_clock_speed;
12334 12692
12335 if (HAS_PCH_SPLIT(dev)) { 12693 if (IS_G4X(dev)) {
12336 if (IS_GEN5(dev)) {
12337 dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
12338 dev_priv->display.write_eld = ironlake_write_eld;
12339 } else if (IS_GEN6(dev)) {
12340 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
12341 dev_priv->display.write_eld = ironlake_write_eld;
12342 dev_priv->display.modeset_global_resources =
12343 snb_modeset_global_resources;
12344 } else if (IS_IVYBRIDGE(dev)) {
12345 /* FIXME: detect B0+ stepping and use auto training */
12346 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
12347 dev_priv->display.write_eld = ironlake_write_eld;
12348 dev_priv->display.modeset_global_resources =
12349 ivb_modeset_global_resources;
12350 } else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
12351 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
12352 dev_priv->display.write_eld = haswell_write_eld;
12353 dev_priv->display.modeset_global_resources =
12354 haswell_modeset_global_resources;
12355 }
12356 } else if (IS_G4X(dev)) {
12357 dev_priv->display.write_eld = g4x_write_eld; 12694 dev_priv->display.write_eld = g4x_write_eld;
12695 } else if (IS_GEN5(dev)) {
12696 dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
12697 dev_priv->display.write_eld = ironlake_write_eld;
12698 } else if (IS_GEN6(dev)) {
12699 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
12700 dev_priv->display.write_eld = ironlake_write_eld;
12701 dev_priv->display.modeset_global_resources =
12702 snb_modeset_global_resources;
12703 } else if (IS_IVYBRIDGE(dev)) {
12704 /* FIXME: detect B0+ stepping and use auto training */
12705 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
12706 dev_priv->display.write_eld = ironlake_write_eld;
12707 dev_priv->display.modeset_global_resources =
12708 ivb_modeset_global_resources;
12709 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
12710 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
12711 dev_priv->display.write_eld = haswell_write_eld;
12712 dev_priv->display.modeset_global_resources =
12713 haswell_modeset_global_resources;
12358 } else if (IS_VALLEYVIEW(dev)) { 12714 } else if (IS_VALLEYVIEW(dev)) {
12359 dev_priv->display.modeset_global_resources = 12715 dev_priv->display.modeset_global_resources =
12360 valleyview_modeset_global_resources; 12716 valleyview_modeset_global_resources;
@@ -12388,6 +12744,8 @@ static void intel_init_display(struct drm_device *dev)
12388 } 12744 }
12389 12745
12390 intel_panel_init_backlight_funcs(dev); 12746 intel_panel_init_backlight_funcs(dev);
12747
12748 mutex_init(&dev_priv->pps_mutex);
12391} 12749}
12392 12750
12393/* 12751/*
@@ -12403,6 +12761,14 @@ static void quirk_pipea_force(struct drm_device *dev)
12403 DRM_INFO("applying pipe a force quirk\n"); 12761 DRM_INFO("applying pipe a force quirk\n");
12404} 12762}
12405 12763
12764static void quirk_pipeb_force(struct drm_device *dev)
12765{
12766 struct drm_i915_private *dev_priv = dev->dev_private;
12767
12768 dev_priv->quirks |= QUIRK_PIPEB_FORCE;
12769 DRM_INFO("applying pipe b force quirk\n");
12770}
12771
12406/* 12772/*
12407 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason 12773 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
12408 */ 12774 */
@@ -12477,6 +12843,12 @@ static struct intel_quirk intel_quirks[] = {
12477 /* ThinkPad T60 needs pipe A force quirk (bug #16494) */ 12843 /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
12478 { 0x2782, 0x17aa, 0x201a, quirk_pipea_force }, 12844 { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
12479 12845
12846 /* 830 needs to leave pipe A & dpll A up */
12847 { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
12848
12849 /* 830 needs to leave pipe B & dpll B up */
12850 { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipeb_force },
12851
12480 /* Lenovo U160 cannot use SSC on LVDS */ 12852 /* Lenovo U160 cannot use SSC on LVDS */
12481 { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable }, 12853 { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
12482 12854
@@ -12550,7 +12922,11 @@ static void i915_disable_vga(struct drm_device *dev)
12550 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); 12922 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
12551 udelay(300); 12923 udelay(300);
12552 12924
12553 I915_WRITE(vga_reg, VGA_DISP_DISABLE); 12925 /*
12926 * Fujitsu-Siemens Lifebook S6010 (830) has problems resuming
12927 * from S3 without preserving (some of?) the other bits.
12928 */
12929 I915_WRITE(vga_reg, dev_priv->bios_vgacntr | VGA_DISP_DISABLE);
12554 POSTING_READ(vga_reg); 12930 POSTING_READ(vga_reg);
12555} 12931}
12556 12932
@@ -12563,8 +12939,6 @@ void intel_modeset_init_hw(struct drm_device *dev)
12563 12939
12564 intel_init_clock_gating(dev); 12940 intel_init_clock_gating(dev);
12565 12941
12566 intel_reset_dpio(dev);
12567
12568 intel_enable_gt_powersave(dev); 12942 intel_enable_gt_powersave(dev);
12569} 12943}
12570 12944
@@ -12610,7 +12984,10 @@ void intel_modeset_init(struct drm_device *dev)
12610 dev->mode_config.max_height = 8192; 12984 dev->mode_config.max_height = 8192;
12611 } 12985 }
12612 12986
12613 if (IS_GEN2(dev)) { 12987 if (IS_845G(dev) || IS_I865G(dev)) {
12988 dev->mode_config.cursor_width = IS_845G(dev) ? 64 : 512;
12989 dev->mode_config.cursor_height = 1023;
12990 } else if (IS_GEN2(dev)) {
12614 dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH; 12991 dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
12615 dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT; 12992 dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
12616 } else { 12993 } else {
@@ -12624,7 +13001,7 @@ void intel_modeset_init(struct drm_device *dev)
12624 INTEL_INFO(dev)->num_pipes, 13001 INTEL_INFO(dev)->num_pipes,
12625 INTEL_INFO(dev)->num_pipes > 1 ? "s" : ""); 13002 INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
12626 13003
12627 for_each_pipe(pipe) { 13004 for_each_pipe(dev_priv, pipe) {
12628 intel_crtc_init(dev, pipe); 13005 intel_crtc_init(dev, pipe);
12629 for_each_sprite(pipe, sprite) { 13006 for_each_sprite(pipe, sprite) {
12630 ret = intel_plane_init(dev, pipe, sprite); 13007 ret = intel_plane_init(dev, pipe, sprite);
@@ -12635,10 +13012,11 @@ void intel_modeset_init(struct drm_device *dev)
12635 } 13012 }
12636 13013
12637 intel_init_dpio(dev); 13014 intel_init_dpio(dev);
12638 intel_reset_dpio(dev);
12639 13015
12640 intel_shared_dpll_init(dev); 13016 intel_shared_dpll_init(dev);
12641 13017
13018 /* save the BIOS value before clobbering it */
13019 dev_priv->bios_vgacntr = I915_READ(i915_vgacntrl_reg(dev));
12642 /* Just disable it once at startup */ 13020 /* Just disable it once at startup */
12643 i915_disable_vga(dev); 13021 i915_disable_vga(dev);
12644 intel_setup_outputs(dev); 13022 intel_setup_outputs(dev);
@@ -12730,9 +13108,10 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
12730 I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); 13108 I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
12731 13109
12732 /* restore vblank interrupts to correct state */ 13110 /* restore vblank interrupts to correct state */
12733 if (crtc->active) 13111 if (crtc->active) {
13112 update_scanline_offset(crtc);
12734 drm_vblank_on(dev, crtc->pipe); 13113 drm_vblank_on(dev, crtc->pipe);
12735 else 13114 } else
12736 drm_vblank_off(dev, crtc->pipe); 13115 drm_vblank_off(dev, crtc->pipe);
12737 13116
12738 /* We need to sanitize the plane -> pipe mapping first because this will 13117 /* We need to sanitize the plane -> pipe mapping first because this will
@@ -12815,7 +13194,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
12815 } 13194 }
12816 } 13195 }
12817 13196
12818 if (crtc->active || IS_VALLEYVIEW(dev) || INTEL_INFO(dev)->gen < 5) { 13197 if (crtc->active || HAS_GMCH_DISPLAY(dev)) {
12819 /* 13198 /*
12820 * We start out with underrun reporting disabled to avoid races. 13199 * We start out with underrun reporting disabled to avoid races.
12821 * For correct bookkeeping mark this on active crtcs. 13200 * For correct bookkeeping mark this on active crtcs.
@@ -12831,8 +13210,6 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
12831 */ 13210 */
12832 crtc->cpu_fifo_underrun_disabled = true; 13211 crtc->cpu_fifo_underrun_disabled = true;
12833 crtc->pch_fifo_underrun_disabled = true; 13212 crtc->pch_fifo_underrun_disabled = true;
12834
12835 update_scanline_offset(crtc);
12836 } 13213 }
12837} 13214}
12838 13215
@@ -12964,8 +13341,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
12964 intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS); 13341 intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
12965 } 13342 }
12966 13343
12967 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 13344 for_each_intel_encoder(dev, encoder) {
12968 base.head) {
12969 pipe = 0; 13345 pipe = 0;
12970 13346
12971 if (encoder->get_hw_state(encoder, &pipe)) { 13347 if (encoder->get_hw_state(encoder, &pipe)) {
@@ -13029,12 +13405,11 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
13029 } 13405 }
13030 13406
13031 /* HW state is read out, now we need to sanitize this mess. */ 13407 /* HW state is read out, now we need to sanitize this mess. */
13032 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 13408 for_each_intel_encoder(dev, encoder) {
13033 base.head) {
13034 intel_sanitize_encoder(encoder); 13409 intel_sanitize_encoder(encoder);
13035 } 13410 }
13036 13411
13037 for_each_pipe(pipe) { 13412 for_each_pipe(dev_priv, pipe) {
13038 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 13413 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
13039 intel_sanitize_crtc(crtc); 13414 intel_sanitize_crtc(crtc);
13040 intel_dump_pipe_config(crtc, &crtc->config, "[setup_hw_state]"); 13415 intel_dump_pipe_config(crtc, &crtc->config, "[setup_hw_state]");
@@ -13062,7 +13437,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
13062 * We need to use raw interfaces for restoring state to avoid 13437 * We need to use raw interfaces for restoring state to avoid
13063 * checking (bogus) intermediate states. 13438 * checking (bogus) intermediate states.
13064 */ 13439 */
13065 for_each_pipe(pipe) { 13440 for_each_pipe(dev_priv, pipe) {
13066 struct drm_crtc *crtc = 13441 struct drm_crtc *crtc =
13067 dev_priv->pipe_to_crtc_mapping[pipe]; 13442 dev_priv->pipe_to_crtc_mapping[pipe];
13068 13443
@@ -13283,7 +13658,7 @@ intel_display_capture_error_state(struct drm_device *dev)
13283 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 13658 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
13284 error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER); 13659 error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
13285 13660
13286 for_each_pipe(i) { 13661 for_each_pipe(dev_priv, i) {
13287 error->pipe[i].power_domain_on = 13662 error->pipe[i].power_domain_on =
13288 intel_display_power_enabled_unlocked(dev_priv, 13663 intel_display_power_enabled_unlocked(dev_priv,
13289 POWER_DOMAIN_PIPE(i)); 13664 POWER_DOMAIN_PIPE(i));
@@ -13347,6 +13722,7 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
13347 struct drm_device *dev, 13722 struct drm_device *dev,
13348 struct intel_display_error_state *error) 13723 struct intel_display_error_state *error)
13349{ 13724{
13725 struct drm_i915_private *dev_priv = dev->dev_private;
13350 int i; 13726 int i;
13351 13727
13352 if (!error) 13728 if (!error)
@@ -13356,7 +13732,7 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
13356 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 13732 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
13357 err_printf(m, "PWR_WELL_CTL2: %08x\n", 13733 err_printf(m, "PWR_WELL_CTL2: %08x\n",
13358 error->power_well_driver); 13734 error->power_well_driver);
13359 for_each_pipe(i) { 13735 for_each_pipe(dev_priv, i) {
13360 err_printf(m, "Pipe [%d]:\n", i); 13736 err_printf(m, "Pipe [%d]:\n", i);
13361 err_printf(m, " Power: %s\n", 13737 err_printf(m, " Power: %s\n",
13362 error->pipe[i].power_domain_on ? "on" : "off"); 13738 error->pipe[i].power_domain_on ? "on" : "off");
@@ -13397,3 +13773,25 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
13397 err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync); 13773 err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync);
13398 } 13774 }
13399} 13775}
13776
13777void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file)
13778{
13779 struct intel_crtc *crtc;
13780
13781 for_each_intel_crtc(dev, crtc) {
13782 struct intel_unpin_work *work;
13783 unsigned long irqflags;
13784
13785 spin_lock_irqsave(&dev->event_lock, irqflags);
13786
13787 work = crtc->unpin_work;
13788
13789 if (work && work->event &&
13790 work->event->base.file_priv == file) {
13791 kfree(work->event);
13792 work->event = NULL;
13793 }
13794
13795 spin_unlock_irqrestore(&dev->event_lock, irqflags);
13796 }
13797}
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index fdff1d420c14..f6a3fdd5589e 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -111,7 +111,7 @@ static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
111} 111}
112 112
113static void intel_dp_link_down(struct intel_dp *intel_dp); 113static void intel_dp_link_down(struct intel_dp *intel_dp);
114static bool _edp_panel_vdd_on(struct intel_dp *intel_dp); 114static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
115static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync); 115static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
116 116
117int 117int
@@ -290,32 +290,201 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
290 struct intel_dp *intel_dp, 290 struct intel_dp *intel_dp,
291 struct edp_power_seq *out); 291 struct edp_power_seq *out);
292 292
293static void pps_lock(struct intel_dp *intel_dp)
294{
295 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
296 struct intel_encoder *encoder = &intel_dig_port->base;
297 struct drm_device *dev = encoder->base.dev;
298 struct drm_i915_private *dev_priv = dev->dev_private;
299 enum intel_display_power_domain power_domain;
300
301 /*
302 * See vlv_power_sequencer_reset() why we need
303 * a power domain reference here.
304 */
305 power_domain = intel_display_port_power_domain(encoder);
306 intel_display_power_get(dev_priv, power_domain);
307
308 mutex_lock(&dev_priv->pps_mutex);
309}
310
311static void pps_unlock(struct intel_dp *intel_dp)
312{
313 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
314 struct intel_encoder *encoder = &intel_dig_port->base;
315 struct drm_device *dev = encoder->base.dev;
316 struct drm_i915_private *dev_priv = dev->dev_private;
317 enum intel_display_power_domain power_domain;
318
319 mutex_unlock(&dev_priv->pps_mutex);
320
321 power_domain = intel_display_port_power_domain(encoder);
322 intel_display_power_put(dev_priv, power_domain);
323}
324
293static enum pipe 325static enum pipe
294vlv_power_sequencer_pipe(struct intel_dp *intel_dp) 326vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
295{ 327{
296 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 328 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
297 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
298 struct drm_device *dev = intel_dig_port->base.base.dev; 329 struct drm_device *dev = intel_dig_port->base.base.dev;
299 struct drm_i915_private *dev_priv = dev->dev_private; 330 struct drm_i915_private *dev_priv = dev->dev_private;
300 enum port port = intel_dig_port->port; 331 struct intel_encoder *encoder;
301 enum pipe pipe; 332 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
333 struct edp_power_seq power_seq;
334
335 lockdep_assert_held(&dev_priv->pps_mutex);
336
337 if (intel_dp->pps_pipe != INVALID_PIPE)
338 return intel_dp->pps_pipe;
339
340 /*
341 * We don't have power sequencer currently.
342 * Pick one that's not used by other ports.
343 */
344 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
345 base.head) {
346 struct intel_dp *tmp;
347
348 if (encoder->type != INTEL_OUTPUT_EDP)
349 continue;
350
351 tmp = enc_to_intel_dp(&encoder->base);
352
353 if (tmp->pps_pipe != INVALID_PIPE)
354 pipes &= ~(1 << tmp->pps_pipe);
355 }
356
357 /*
358 * Didn't find one. This should not happen since there
359 * are two power sequencers and up to two eDP ports.
360 */
361 if (WARN_ON(pipes == 0))
362 return PIPE_A;
363
364 intel_dp->pps_pipe = ffs(pipes) - 1;
365
366 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
367 pipe_name(intel_dp->pps_pipe),
368 port_name(intel_dig_port->port));
302 369
303 /* modeset should have pipe */ 370 /* init power sequencer on this pipe and port */
304 if (crtc) 371 intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
305 return to_intel_crtc(crtc)->pipe; 372 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
373 &power_seq);
374
375 return intel_dp->pps_pipe;
376}
377
378typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
379 enum pipe pipe);
380
381static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
382 enum pipe pipe)
383{
384 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
385}
386
387static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
388 enum pipe pipe)
389{
390 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
391}
392
393static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
394 enum pipe pipe)
395{
396 return true;
397}
398
399static enum pipe
400vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
401 enum port port,
402 vlv_pipe_check pipe_check)
403{
404 enum pipe pipe;
306 405
307 /* init time, try to find a pipe with this port selected */
308 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) { 406 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
309 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) & 407 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
310 PANEL_PORT_SELECT_MASK; 408 PANEL_PORT_SELECT_MASK;
311 if (port_sel == PANEL_PORT_SELECT_DPB_VLV && port == PORT_B) 409
312 return pipe; 410 if (port_sel != PANEL_PORT_SELECT_VLV(port))
313 if (port_sel == PANEL_PORT_SELECT_DPC_VLV && port == PORT_C) 411 continue;
314 return pipe; 412
413 if (!pipe_check(dev_priv, pipe))
414 continue;
415
416 return pipe;
417 }
418
419 return INVALID_PIPE;
420}
421
422static void
423vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
424{
425 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
426 struct drm_device *dev = intel_dig_port->base.base.dev;
427 struct drm_i915_private *dev_priv = dev->dev_private;
428 struct edp_power_seq power_seq;
429 enum port port = intel_dig_port->port;
430
431 lockdep_assert_held(&dev_priv->pps_mutex);
432
433 /* try to find a pipe with this port selected */
434 /* first pick one where the panel is on */
435 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
436 vlv_pipe_has_pp_on);
437 /* didn't find one? pick one where vdd is on */
438 if (intel_dp->pps_pipe == INVALID_PIPE)
439 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
440 vlv_pipe_has_vdd_on);
441 /* didn't find one? pick one with just the correct port */
442 if (intel_dp->pps_pipe == INVALID_PIPE)
443 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
444 vlv_pipe_any);
445
446 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
447 if (intel_dp->pps_pipe == INVALID_PIPE) {
448 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
449 port_name(port));
450 return;
315 } 451 }
316 452
317 /* shrug */ 453 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
318 return PIPE_A; 454 port_name(port), pipe_name(intel_dp->pps_pipe));
455
456 intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
457 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
458 &power_seq);
459}
460
461void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
462{
463 struct drm_device *dev = dev_priv->dev;
464 struct intel_encoder *encoder;
465
466 if (WARN_ON(!IS_VALLEYVIEW(dev)))
467 return;
468
469 /*
470 * We can't grab pps_mutex here due to deadlock with power_domain
471 * mutex when power_domain functions are called while holding pps_mutex.
472 * That also means that in order to use pps_pipe the code needs to
473 * hold both a power domain reference and pps_mutex, and the power domain
474 * reference get/put must be done while _not_ holding pps_mutex.
475 * pps_{lock,unlock}() do these steps in the correct order, so one
476 * should use them always.
477 */
478
479 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
480 struct intel_dp *intel_dp;
481
482 if (encoder->type != INTEL_OUTPUT_EDP)
483 continue;
484
485 intel_dp = enc_to_intel_dp(&encoder->base);
486 intel_dp->pps_pipe = INVALID_PIPE;
487 }
319} 488}
320 489
321static u32 _pp_ctrl_reg(struct intel_dp *intel_dp) 490static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
@@ -349,12 +518,15 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code,
349 struct drm_i915_private *dev_priv = dev->dev_private; 518 struct drm_i915_private *dev_priv = dev->dev_private;
350 u32 pp_div; 519 u32 pp_div;
351 u32 pp_ctrl_reg, pp_div_reg; 520 u32 pp_ctrl_reg, pp_div_reg;
352 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
353 521
354 if (!is_edp(intel_dp) || code != SYS_RESTART) 522 if (!is_edp(intel_dp) || code != SYS_RESTART)
355 return 0; 523 return 0;
356 524
525 pps_lock(intel_dp);
526
357 if (IS_VALLEYVIEW(dev)) { 527 if (IS_VALLEYVIEW(dev)) {
528 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
529
358 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe); 530 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
359 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe); 531 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
360 pp_div = I915_READ(pp_div_reg); 532 pp_div = I915_READ(pp_div_reg);
@@ -366,6 +538,8 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code,
366 msleep(intel_dp->panel_power_cycle_delay); 538 msleep(intel_dp->panel_power_cycle_delay);
367 } 539 }
368 540
541 pps_unlock(intel_dp);
542
369 return 0; 543 return 0;
370} 544}
371 545
@@ -374,6 +548,8 @@ static bool edp_have_panel_power(struct intel_dp *intel_dp)
374 struct drm_device *dev = intel_dp_to_dev(intel_dp); 548 struct drm_device *dev = intel_dp_to_dev(intel_dp);
375 struct drm_i915_private *dev_priv = dev->dev_private; 549 struct drm_i915_private *dev_priv = dev->dev_private;
376 550
551 lockdep_assert_held(&dev_priv->pps_mutex);
552
377 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0; 553 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
378} 554}
379 555
@@ -381,13 +557,10 @@ static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
381{ 557{
382 struct drm_device *dev = intel_dp_to_dev(intel_dp); 558 struct drm_device *dev = intel_dp_to_dev(intel_dp);
383 struct drm_i915_private *dev_priv = dev->dev_private; 559 struct drm_i915_private *dev_priv = dev->dev_private;
384 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
385 struct intel_encoder *intel_encoder = &intel_dig_port->base;
386 enum intel_display_power_domain power_domain;
387 560
388 power_domain = intel_display_port_power_domain(intel_encoder); 561 lockdep_assert_held(&dev_priv->pps_mutex);
389 return intel_display_power_enabled(dev_priv, power_domain) && 562
390 (I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD) != 0; 563 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
391} 564}
392 565
393static void 566static void
@@ -535,7 +708,15 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
535 bool has_aux_irq = HAS_AUX_IRQ(dev); 708 bool has_aux_irq = HAS_AUX_IRQ(dev);
536 bool vdd; 709 bool vdd;
537 710
538 vdd = _edp_panel_vdd_on(intel_dp); 711 pps_lock(intel_dp);
712
713 /*
714 * We will be called with VDD already enabled for dpcd/edid/oui reads.
715 * In such cases we want to leave VDD enabled and it's up to upper layers
716 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
717 * ourselves.
718 */
719 vdd = edp_panel_vdd_on(intel_dp);
539 720
540 /* dp aux is extremely sensitive to irq latency, hence request the 721 /* dp aux is extremely sensitive to irq latency, hence request the
541 * lowest possible wakeup latency and so prevent the cpu from going into 722 * lowest possible wakeup latency and so prevent the cpu from going into
@@ -644,6 +825,8 @@ out:
644 if (vdd) 825 if (vdd)
645 edp_panel_vdd_off(intel_dp, false); 826 edp_panel_vdd_off(intel_dp, false);
646 827
828 pps_unlock(intel_dp);
829
647 return ret; 830 return ret;
648} 831}
649 832
@@ -828,20 +1011,6 @@ intel_dp_set_clock(struct intel_encoder *encoder,
828 } 1011 }
829} 1012}
830 1013
831static void
832intel_dp_set_m2_n2(struct intel_crtc *crtc, struct intel_link_m_n *m_n)
833{
834 struct drm_device *dev = crtc->base.dev;
835 struct drm_i915_private *dev_priv = dev->dev_private;
836 enum transcoder transcoder = crtc->config.cpu_transcoder;
837
838 I915_WRITE(PIPE_DATA_M2(transcoder),
839 TU_SIZE(m_n->tu) | m_n->gmch_m);
840 I915_WRITE(PIPE_DATA_N2(transcoder), m_n->gmch_n);
841 I915_WRITE(PIPE_LINK_M2(transcoder), m_n->link_m);
842 I915_WRITE(PIPE_LINK_N2(transcoder), m_n->link_n);
843}
844
845bool 1014bool
846intel_dp_compute_config(struct intel_encoder *encoder, 1015intel_dp_compute_config(struct intel_encoder *encoder,
847 struct intel_crtc_config *pipe_config) 1016 struct intel_crtc_config *pipe_config)
@@ -867,6 +1036,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
867 pipe_config->has_pch_encoder = true; 1036 pipe_config->has_pch_encoder = true;
868 1037
869 pipe_config->has_dp_encoder = true; 1038 pipe_config->has_dp_encoder = true;
1039 pipe_config->has_drrs = false;
870 pipe_config->has_audio = intel_dp->has_audio; 1040 pipe_config->has_audio = intel_dp->has_audio;
871 1041
872 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) { 1042 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
@@ -898,23 +1068,15 @@ intel_dp_compute_config(struct intel_encoder *encoder,
898 bpp = dev_priv->vbt.edp_bpp; 1068 bpp = dev_priv->vbt.edp_bpp;
899 } 1069 }
900 1070
901 if (IS_BROADWELL(dev)) { 1071 /*
902 /* Yes, it's an ugly hack. */ 1072 * Use the maximum clock and number of lanes the eDP panel
903 min_lane_count = max_lane_count; 1073 * advertizes being capable of. The panels are generally
904 DRM_DEBUG_KMS("forcing lane count to max (%u) on BDW\n", 1074 * designed to support only a single clock and lane
905 min_lane_count); 1075 * configuration, and typically these values correspond to the
906 } else if (dev_priv->vbt.edp_lanes) { 1076 * native resolution of the panel.
907 min_lane_count = min(dev_priv->vbt.edp_lanes, 1077 */
908 max_lane_count); 1078 min_lane_count = max_lane_count;
909 DRM_DEBUG_KMS("using min %u lanes per VBT\n", 1079 min_clock = max_clock;
910 min_lane_count);
911 }
912
913 if (dev_priv->vbt.edp_rate) {
914 min_clock = min(dev_priv->vbt.edp_rate >> 3, max_clock);
915 DRM_DEBUG_KMS("using min %02x link bw per VBT\n",
916 bws[min_clock]);
917 }
918 } 1080 }
919 1081
920 for (; bpp >= 6*3; bpp -= 2*3) { 1082 for (; bpp >= 6*3; bpp -= 2*3) {
@@ -970,13 +1132,14 @@ found:
970 1132
971 if (intel_connector->panel.downclock_mode != NULL && 1133 if (intel_connector->panel.downclock_mode != NULL &&
972 intel_dp->drrs_state.type == SEAMLESS_DRRS_SUPPORT) { 1134 intel_dp->drrs_state.type == SEAMLESS_DRRS_SUPPORT) {
1135 pipe_config->has_drrs = true;
973 intel_link_compute_m_n(bpp, lane_count, 1136 intel_link_compute_m_n(bpp, lane_count,
974 intel_connector->panel.downclock_mode->clock, 1137 intel_connector->panel.downclock_mode->clock,
975 pipe_config->port_clock, 1138 pipe_config->port_clock,
976 &pipe_config->dp_m2_n2); 1139 &pipe_config->dp_m2_n2);
977 } 1140 }
978 1141
979 if (HAS_DDI(dev)) 1142 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
980 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw); 1143 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
981 else 1144 else
982 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw); 1145 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
@@ -1110,6 +1273,8 @@ static void wait_panel_status(struct intel_dp *intel_dp,
1110 struct drm_i915_private *dev_priv = dev->dev_private; 1273 struct drm_i915_private *dev_priv = dev->dev_private;
1111 u32 pp_stat_reg, pp_ctrl_reg; 1274 u32 pp_stat_reg, pp_ctrl_reg;
1112 1275
1276 lockdep_assert_held(&dev_priv->pps_mutex);
1277
1113 pp_stat_reg = _pp_stat_reg(intel_dp); 1278 pp_stat_reg = _pp_stat_reg(intel_dp);
1114 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 1279 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1115 1280
@@ -1173,13 +1338,20 @@ static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1173 struct drm_i915_private *dev_priv = dev->dev_private; 1338 struct drm_i915_private *dev_priv = dev->dev_private;
1174 u32 control; 1339 u32 control;
1175 1340
1341 lockdep_assert_held(&dev_priv->pps_mutex);
1342
1176 control = I915_READ(_pp_ctrl_reg(intel_dp)); 1343 control = I915_READ(_pp_ctrl_reg(intel_dp));
1177 control &= ~PANEL_UNLOCK_MASK; 1344 control &= ~PANEL_UNLOCK_MASK;
1178 control |= PANEL_UNLOCK_REGS; 1345 control |= PANEL_UNLOCK_REGS;
1179 return control; 1346 return control;
1180} 1347}
1181 1348
1182static bool _edp_panel_vdd_on(struct intel_dp *intel_dp) 1349/*
1350 * Must be paired with edp_panel_vdd_off().
1351 * Must hold pps_mutex around the whole on/off sequence.
1352 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1353 */
1354static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1183{ 1355{
1184 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1356 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1185 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1357 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
@@ -1190,6 +1362,8 @@ static bool _edp_panel_vdd_on(struct intel_dp *intel_dp)
1190 u32 pp_stat_reg, pp_ctrl_reg; 1362 u32 pp_stat_reg, pp_ctrl_reg;
1191 bool need_to_disable = !intel_dp->want_panel_vdd; 1363 bool need_to_disable = !intel_dp->want_panel_vdd;
1192 1364
1365 lockdep_assert_held(&dev_priv->pps_mutex);
1366
1193 if (!is_edp(intel_dp)) 1367 if (!is_edp(intel_dp))
1194 return false; 1368 return false;
1195 1369
@@ -1227,62 +1401,76 @@ static bool _edp_panel_vdd_on(struct intel_dp *intel_dp)
1227 return need_to_disable; 1401 return need_to_disable;
1228} 1402}
1229 1403
1404/*
1405 * Must be paired with intel_edp_panel_vdd_off() or
1406 * intel_edp_panel_off().
1407 * Nested calls to these functions are not allowed since
1408 * we drop the lock. Caller must use some higher level
1409 * locking to prevent nested calls from other threads.
1410 */
1230void intel_edp_panel_vdd_on(struct intel_dp *intel_dp) 1411void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1231{ 1412{
1232 if (is_edp(intel_dp)) { 1413 bool vdd;
1233 bool vdd = _edp_panel_vdd_on(intel_dp);
1234 1414
1235 WARN(!vdd, "eDP VDD already requested on\n"); 1415 if (!is_edp(intel_dp))
1236 } 1416 return;
1417
1418 pps_lock(intel_dp);
1419 vdd = edp_panel_vdd_on(intel_dp);
1420 pps_unlock(intel_dp);
1421
1422 WARN(!vdd, "eDP VDD already requested on\n");
1237} 1423}
1238 1424
1239static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp) 1425static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1240{ 1426{
1241 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1427 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1242 struct drm_i915_private *dev_priv = dev->dev_private; 1428 struct drm_i915_private *dev_priv = dev->dev_private;
1429 struct intel_digital_port *intel_dig_port =
1430 dp_to_dig_port(intel_dp);
1431 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1432 enum intel_display_power_domain power_domain;
1243 u32 pp; 1433 u32 pp;
1244 u32 pp_stat_reg, pp_ctrl_reg; 1434 u32 pp_stat_reg, pp_ctrl_reg;
1245 1435
1246 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); 1436 lockdep_assert_held(&dev_priv->pps_mutex);
1247 1437
1248 if (!intel_dp->want_panel_vdd && edp_have_panel_vdd(intel_dp)) { 1438 WARN_ON(intel_dp->want_panel_vdd);
1249 struct intel_digital_port *intel_dig_port = 1439
1250 dp_to_dig_port(intel_dp); 1440 if (!edp_have_panel_vdd(intel_dp))
1251 struct intel_encoder *intel_encoder = &intel_dig_port->base; 1441 return;
1252 enum intel_display_power_domain power_domain;
1253 1442
1254 DRM_DEBUG_KMS("Turning eDP VDD off\n"); 1443 DRM_DEBUG_KMS("Turning eDP VDD off\n");
1255 1444
1256 pp = ironlake_get_pp_control(intel_dp); 1445 pp = ironlake_get_pp_control(intel_dp);
1257 pp &= ~EDP_FORCE_VDD; 1446 pp &= ~EDP_FORCE_VDD;
1258 1447
1259 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 1448 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1260 pp_stat_reg = _pp_stat_reg(intel_dp); 1449 pp_stat_reg = _pp_stat_reg(intel_dp);
1261 1450
1262 I915_WRITE(pp_ctrl_reg, pp); 1451 I915_WRITE(pp_ctrl_reg, pp);
1263 POSTING_READ(pp_ctrl_reg); 1452 POSTING_READ(pp_ctrl_reg);
1264 1453
1265 /* Make sure sequencer is idle before allowing subsequent activity */ 1454 /* Make sure sequencer is idle before allowing subsequent activity */
1266 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", 1455 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1267 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg)); 1456 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1268 1457
1269 if ((pp & POWER_TARGET_ON) == 0) 1458 if ((pp & POWER_TARGET_ON) == 0)
1270 intel_dp->last_power_cycle = jiffies; 1459 intel_dp->last_power_cycle = jiffies;
1271 1460
1272 power_domain = intel_display_port_power_domain(intel_encoder); 1461 power_domain = intel_display_port_power_domain(intel_encoder);
1273 intel_display_power_put(dev_priv, power_domain); 1462 intel_display_power_put(dev_priv, power_domain);
1274 }
1275} 1463}
1276 1464
1277static void edp_panel_vdd_work(struct work_struct *__work) 1465static void edp_panel_vdd_work(struct work_struct *__work)
1278{ 1466{
1279 struct intel_dp *intel_dp = container_of(to_delayed_work(__work), 1467 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1280 struct intel_dp, panel_vdd_work); 1468 struct intel_dp, panel_vdd_work);
1281 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1282 1469
1283 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); 1470 pps_lock(intel_dp);
1284 edp_panel_vdd_off_sync(intel_dp); 1471 if (!intel_dp->want_panel_vdd)
1285 drm_modeset_unlock(&dev->mode_config.connection_mutex); 1472 edp_panel_vdd_off_sync(intel_dp);
1473 pps_unlock(intel_dp);
1286} 1474}
1287 1475
1288static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp) 1476static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
@@ -1298,8 +1486,18 @@ static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1298 schedule_delayed_work(&intel_dp->panel_vdd_work, delay); 1486 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1299} 1487}
1300 1488
1489/*
1490 * Must be paired with edp_panel_vdd_on().
1491 * Must hold pps_mutex around the whole on/off sequence.
1492 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1493 */
1301static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) 1494static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1302{ 1495{
1496 struct drm_i915_private *dev_priv =
1497 intel_dp_to_dev(intel_dp)->dev_private;
1498
1499 lockdep_assert_held(&dev_priv->pps_mutex);
1500
1303 if (!is_edp(intel_dp)) 1501 if (!is_edp(intel_dp))
1304 return; 1502 return;
1305 1503
@@ -1313,6 +1511,22 @@ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1313 edp_panel_vdd_schedule_off(intel_dp); 1511 edp_panel_vdd_schedule_off(intel_dp);
1314} 1512}
1315 1513
1514/*
1515 * Must be paired with intel_edp_panel_vdd_on().
1516 * Nested calls to these functions are not allowed since
1517 * we drop the lock. Caller must use some higher level
1518 * locking to prevent nested calls from other threads.
1519 */
1520static void intel_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1521{
1522 if (!is_edp(intel_dp))
1523 return;
1524
1525 pps_lock(intel_dp);
1526 edp_panel_vdd_off(intel_dp, sync);
1527 pps_unlock(intel_dp);
1528}
1529
1316void intel_edp_panel_on(struct intel_dp *intel_dp) 1530void intel_edp_panel_on(struct intel_dp *intel_dp)
1317{ 1531{
1318 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1532 struct drm_device *dev = intel_dp_to_dev(intel_dp);
@@ -1325,9 +1539,11 @@ void intel_edp_panel_on(struct intel_dp *intel_dp)
1325 1539
1326 DRM_DEBUG_KMS("Turn eDP power on\n"); 1540 DRM_DEBUG_KMS("Turn eDP power on\n");
1327 1541
1542 pps_lock(intel_dp);
1543
1328 if (edp_have_panel_power(intel_dp)) { 1544 if (edp_have_panel_power(intel_dp)) {
1329 DRM_DEBUG_KMS("eDP power already on\n"); 1545 DRM_DEBUG_KMS("eDP power already on\n");
1330 return; 1546 goto out;
1331 } 1547 }
1332 1548
1333 wait_panel_power_cycle(intel_dp); 1549 wait_panel_power_cycle(intel_dp);
@@ -1356,6 +1572,9 @@ void intel_edp_panel_on(struct intel_dp *intel_dp)
1356 I915_WRITE(pp_ctrl_reg, pp); 1572 I915_WRITE(pp_ctrl_reg, pp);
1357 POSTING_READ(pp_ctrl_reg); 1573 POSTING_READ(pp_ctrl_reg);
1358 } 1574 }
1575
1576 out:
1577 pps_unlock(intel_dp);
1359} 1578}
1360 1579
1361void intel_edp_panel_off(struct intel_dp *intel_dp) 1580void intel_edp_panel_off(struct intel_dp *intel_dp)
@@ -1373,6 +1592,8 @@ void intel_edp_panel_off(struct intel_dp *intel_dp)
1373 1592
1374 DRM_DEBUG_KMS("Turn eDP power off\n"); 1593 DRM_DEBUG_KMS("Turn eDP power off\n");
1375 1594
1595 pps_lock(intel_dp);
1596
1376 WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n"); 1597 WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
1377 1598
1378 pp = ironlake_get_pp_control(intel_dp); 1599 pp = ironlake_get_pp_control(intel_dp);
@@ -1394,9 +1615,12 @@ void intel_edp_panel_off(struct intel_dp *intel_dp)
1394 /* We got a reference when we enabled the VDD. */ 1615 /* We got a reference when we enabled the VDD. */
1395 power_domain = intel_display_port_power_domain(intel_encoder); 1616 power_domain = intel_display_port_power_domain(intel_encoder);
1396 intel_display_power_put(dev_priv, power_domain); 1617 intel_display_power_put(dev_priv, power_domain);
1618
1619 pps_unlock(intel_dp);
1397} 1620}
1398 1621
1399void intel_edp_backlight_on(struct intel_dp *intel_dp) 1622/* Enable backlight in the panel power control. */
1623static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
1400{ 1624{
1401 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1625 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1402 struct drm_device *dev = intel_dig_port->base.base.dev; 1626 struct drm_device *dev = intel_dig_port->base.base.dev;
@@ -1404,13 +1628,6 @@ void intel_edp_backlight_on(struct intel_dp *intel_dp)
1404 u32 pp; 1628 u32 pp;
1405 u32 pp_ctrl_reg; 1629 u32 pp_ctrl_reg;
1406 1630
1407 if (!is_edp(intel_dp))
1408 return;
1409
1410 DRM_DEBUG_KMS("\n");
1411
1412 intel_panel_enable_backlight(intel_dp->attached_connector);
1413
1414 /* 1631 /*
1415 * If we enable the backlight right away following a panel power 1632 * If we enable the backlight right away following a panel power
1416 * on, we may see slight flicker as the panel syncs with the eDP 1633 * on, we may see slight flicker as the panel syncs with the eDP
@@ -1418,6 +1635,9 @@ void intel_edp_backlight_on(struct intel_dp *intel_dp)
1418 * allowing it to appear. 1635 * allowing it to appear.
1419 */ 1636 */
1420 wait_backlight_on(intel_dp); 1637 wait_backlight_on(intel_dp);
1638
1639 pps_lock(intel_dp);
1640
1421 pp = ironlake_get_pp_control(intel_dp); 1641 pp = ironlake_get_pp_control(intel_dp);
1422 pp |= EDP_BLC_ENABLE; 1642 pp |= EDP_BLC_ENABLE;
1423 1643
@@ -1425,9 +1645,24 @@ void intel_edp_backlight_on(struct intel_dp *intel_dp)
1425 1645
1426 I915_WRITE(pp_ctrl_reg, pp); 1646 I915_WRITE(pp_ctrl_reg, pp);
1427 POSTING_READ(pp_ctrl_reg); 1647 POSTING_READ(pp_ctrl_reg);
1648
1649 pps_unlock(intel_dp);
1428} 1650}
1429 1651
1430void intel_edp_backlight_off(struct intel_dp *intel_dp) 1652/* Enable backlight PWM and backlight PP control. */
1653void intel_edp_backlight_on(struct intel_dp *intel_dp)
1654{
1655 if (!is_edp(intel_dp))
1656 return;
1657
1658 DRM_DEBUG_KMS("\n");
1659
1660 intel_panel_enable_backlight(intel_dp->attached_connector);
1661 _intel_edp_backlight_on(intel_dp);
1662}
1663
1664/* Disable backlight in the panel power control. */
1665static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
1431{ 1666{
1432 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1667 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1433 struct drm_i915_private *dev_priv = dev->dev_private; 1668 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1437,7 +1672,8 @@ void intel_edp_backlight_off(struct intel_dp *intel_dp)
1437 if (!is_edp(intel_dp)) 1672 if (!is_edp(intel_dp))
1438 return; 1673 return;
1439 1674
1440 DRM_DEBUG_KMS("\n"); 1675 pps_lock(intel_dp);
1676
1441 pp = ironlake_get_pp_control(intel_dp); 1677 pp = ironlake_get_pp_control(intel_dp);
1442 pp &= ~EDP_BLC_ENABLE; 1678 pp &= ~EDP_BLC_ENABLE;
1443 1679
@@ -1445,13 +1681,51 @@ void intel_edp_backlight_off(struct intel_dp *intel_dp)
1445 1681
1446 I915_WRITE(pp_ctrl_reg, pp); 1682 I915_WRITE(pp_ctrl_reg, pp);
1447 POSTING_READ(pp_ctrl_reg); 1683 POSTING_READ(pp_ctrl_reg);
1448 intel_dp->last_backlight_off = jiffies;
1449 1684
1685 pps_unlock(intel_dp);
1686
1687 intel_dp->last_backlight_off = jiffies;
1450 edp_wait_backlight_off(intel_dp); 1688 edp_wait_backlight_off(intel_dp);
1689}
1690
1691/* Disable backlight PP control and backlight PWM. */
1692void intel_edp_backlight_off(struct intel_dp *intel_dp)
1693{
1694 if (!is_edp(intel_dp))
1695 return;
1696
1697 DRM_DEBUG_KMS("\n");
1451 1698
1699 _intel_edp_backlight_off(intel_dp);
1452 intel_panel_disable_backlight(intel_dp->attached_connector); 1700 intel_panel_disable_backlight(intel_dp->attached_connector);
1453} 1701}
1454 1702
1703/*
1704 * Hook for controlling the panel power control backlight through the bl_power
1705 * sysfs attribute. Take care to handle multiple calls.
1706 */
1707static void intel_edp_backlight_power(struct intel_connector *connector,
1708 bool enable)
1709{
1710 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
1711 bool is_enabled;
1712
1713 pps_lock(intel_dp);
1714 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
1715 pps_unlock(intel_dp);
1716
1717 if (is_enabled == enable)
1718 return;
1719
1720 DRM_DEBUG_KMS("panel power control backlight %s\n",
1721 enable ? "enable" : "disable");
1722
1723 if (enable)
1724 _intel_edp_backlight_on(intel_dp);
1725 else
1726 _intel_edp_backlight_off(intel_dp);
1727}
1728
1455static void ironlake_edp_pll_on(struct intel_dp *intel_dp) 1729static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
1456{ 1730{
1457 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1731 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
@@ -1515,8 +1789,6 @@ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
1515 if (mode != DRM_MODE_DPMS_ON) { 1789 if (mode != DRM_MODE_DPMS_ON) {
1516 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, 1790 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
1517 DP_SET_POWER_D3); 1791 DP_SET_POWER_D3);
1518 if (ret != 1)
1519 DRM_DEBUG_DRIVER("failed to write sink power state\n");
1520 } else { 1792 } else {
1521 /* 1793 /*
1522 * When turning on, we need to retry for 1ms to give the sink 1794 * When turning on, we need to retry for 1ms to give the sink
@@ -1530,6 +1802,10 @@ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
1530 msleep(1); 1802 msleep(1);
1531 } 1803 }
1532 } 1804 }
1805
1806 if (ret != 1)
1807 DRM_DEBUG_KMS("failed to %s sink power state\n",
1808 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
1533} 1809}
1534 1810
1535static bool intel_dp_get_hw_state(struct intel_encoder *encoder, 1811static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
@@ -1576,7 +1852,7 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
1576 return true; 1852 return true;
1577 } 1853 }
1578 1854
1579 for_each_pipe(i) { 1855 for_each_pipe(dev_priv, i) {
1580 trans_dp = I915_READ(TRANS_DP_CTL(i)); 1856 trans_dp = I915_READ(TRANS_DP_CTL(i));
1581 if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) { 1857 if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
1582 *pipe = i; 1858 *pipe = i;
@@ -2036,7 +2312,6 @@ void intel_edp_psr_init(struct drm_device *dev)
2036static void intel_disable_dp(struct intel_encoder *encoder) 2312static void intel_disable_dp(struct intel_encoder *encoder)
2037{ 2313{
2038 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2314 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2039 enum port port = dp_to_dig_port(intel_dp)->port;
2040 struct drm_device *dev = encoder->base.dev; 2315 struct drm_device *dev = encoder->base.dev;
2041 2316
2042 /* Make sure the panel is off before trying to change the mode. But also 2317 /* Make sure the panel is off before trying to change the mode. But also
@@ -2046,21 +2321,19 @@ static void intel_disable_dp(struct intel_encoder *encoder)
2046 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); 2321 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2047 intel_edp_panel_off(intel_dp); 2322 intel_edp_panel_off(intel_dp);
2048 2323
2049 /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */ 2324 /* disable the port before the pipe on g4x */
2050 if (!(port == PORT_A || IS_VALLEYVIEW(dev))) 2325 if (INTEL_INFO(dev)->gen < 5)
2051 intel_dp_link_down(intel_dp); 2326 intel_dp_link_down(intel_dp);
2052} 2327}
2053 2328
2054static void g4x_post_disable_dp(struct intel_encoder *encoder) 2329static void ilk_post_disable_dp(struct intel_encoder *encoder)
2055{ 2330{
2056 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2331 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2057 enum port port = dp_to_dig_port(intel_dp)->port; 2332 enum port port = dp_to_dig_port(intel_dp)->port;
2058 2333
2059 if (port != PORT_A)
2060 return;
2061
2062 intel_dp_link_down(intel_dp); 2334 intel_dp_link_down(intel_dp);
2063 ironlake_edp_pll_off(intel_dp); 2335 if (port == PORT_A)
2336 ironlake_edp_pll_off(intel_dp);
2064} 2337}
2065 2338
2066static void vlv_post_disable_dp(struct intel_encoder *encoder) 2339static void vlv_post_disable_dp(struct intel_encoder *encoder)
@@ -2106,6 +2379,104 @@ static void chv_post_disable_dp(struct intel_encoder *encoder)
2106 mutex_unlock(&dev_priv->dpio_lock); 2379 mutex_unlock(&dev_priv->dpio_lock);
2107} 2380}
2108 2381
2382static void
2383_intel_dp_set_link_train(struct intel_dp *intel_dp,
2384 uint32_t *DP,
2385 uint8_t dp_train_pat)
2386{
2387 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2388 struct drm_device *dev = intel_dig_port->base.base.dev;
2389 struct drm_i915_private *dev_priv = dev->dev_private;
2390 enum port port = intel_dig_port->port;
2391
2392 if (HAS_DDI(dev)) {
2393 uint32_t temp = I915_READ(DP_TP_CTL(port));
2394
2395 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2396 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2397 else
2398 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2399
2400 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2401 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2402 case DP_TRAINING_PATTERN_DISABLE:
2403 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2404
2405 break;
2406 case DP_TRAINING_PATTERN_1:
2407 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2408 break;
2409 case DP_TRAINING_PATTERN_2:
2410 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2411 break;
2412 case DP_TRAINING_PATTERN_3:
2413 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2414 break;
2415 }
2416 I915_WRITE(DP_TP_CTL(port), temp);
2417
2418 } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
2419 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2420
2421 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2422 case DP_TRAINING_PATTERN_DISABLE:
2423 *DP |= DP_LINK_TRAIN_OFF_CPT;
2424 break;
2425 case DP_TRAINING_PATTERN_1:
2426 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2427 break;
2428 case DP_TRAINING_PATTERN_2:
2429 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2430 break;
2431 case DP_TRAINING_PATTERN_3:
2432 DRM_ERROR("DP training pattern 3 not supported\n");
2433 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2434 break;
2435 }
2436
2437 } else {
2438 if (IS_CHERRYVIEW(dev))
2439 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2440 else
2441 *DP &= ~DP_LINK_TRAIN_MASK;
2442
2443 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2444 case DP_TRAINING_PATTERN_DISABLE:
2445 *DP |= DP_LINK_TRAIN_OFF;
2446 break;
2447 case DP_TRAINING_PATTERN_1:
2448 *DP |= DP_LINK_TRAIN_PAT_1;
2449 break;
2450 case DP_TRAINING_PATTERN_2:
2451 *DP |= DP_LINK_TRAIN_PAT_2;
2452 break;
2453 case DP_TRAINING_PATTERN_3:
2454 if (IS_CHERRYVIEW(dev)) {
2455 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2456 } else {
2457 DRM_ERROR("DP training pattern 3 not supported\n");
2458 *DP |= DP_LINK_TRAIN_PAT_2;
2459 }
2460 break;
2461 }
2462 }
2463}
2464
2465static void intel_dp_enable_port(struct intel_dp *intel_dp)
2466{
2467 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2468 struct drm_i915_private *dev_priv = dev->dev_private;
2469
2470 intel_dp->DP |= DP_PORT_EN;
2471
2472 /* enable with pattern 1 (as per spec) */
2473 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2474 DP_TRAINING_PATTERN_1);
2475
2476 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2477 POSTING_READ(intel_dp->output_reg);
2478}
2479
2109static void intel_enable_dp(struct intel_encoder *encoder) 2480static void intel_enable_dp(struct intel_encoder *encoder)
2110{ 2481{
2111 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2482 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
@@ -2116,11 +2487,12 @@ static void intel_enable_dp(struct intel_encoder *encoder)
2116 if (WARN_ON(dp_reg & DP_PORT_EN)) 2487 if (WARN_ON(dp_reg & DP_PORT_EN))
2117 return; 2488 return;
2118 2489
2490 intel_dp_enable_port(intel_dp);
2119 intel_edp_panel_vdd_on(intel_dp); 2491 intel_edp_panel_vdd_on(intel_dp);
2492 intel_edp_panel_on(intel_dp);
2493 intel_edp_panel_vdd_off(intel_dp, true);
2120 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 2494 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2121 intel_dp_start_link_train(intel_dp); 2495 intel_dp_start_link_train(intel_dp);
2122 intel_edp_panel_on(intel_dp);
2123 edp_panel_vdd_off(intel_dp, true);
2124 intel_dp_complete_link_train(intel_dp); 2496 intel_dp_complete_link_train(intel_dp);
2125 intel_dp_stop_link_train(intel_dp); 2497 intel_dp_stop_link_train(intel_dp);
2126} 2498}
@@ -2154,6 +2526,78 @@ static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2154 } 2526 }
2155} 2527}
2156 2528
2529static void vlv_steal_power_sequencer(struct drm_device *dev,
2530 enum pipe pipe)
2531{
2532 struct drm_i915_private *dev_priv = dev->dev_private;
2533 struct intel_encoder *encoder;
2534
2535 lockdep_assert_held(&dev_priv->pps_mutex);
2536
2537 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2538 base.head) {
2539 struct intel_dp *intel_dp;
2540 enum port port;
2541
2542 if (encoder->type != INTEL_OUTPUT_EDP)
2543 continue;
2544
2545 intel_dp = enc_to_intel_dp(&encoder->base);
2546 port = dp_to_dig_port(intel_dp)->port;
2547
2548 if (intel_dp->pps_pipe != pipe)
2549 continue;
2550
2551 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2552 pipe_name(pipe), port_name(port));
2553
2554 /* make sure vdd is off before we steal it */
2555 edp_panel_vdd_off_sync(intel_dp);
2556
2557 intel_dp->pps_pipe = INVALID_PIPE;
2558 }
2559}
2560
2561static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2562{
2563 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2564 struct intel_encoder *encoder = &intel_dig_port->base;
2565 struct drm_device *dev = encoder->base.dev;
2566 struct drm_i915_private *dev_priv = dev->dev_private;
2567 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2568 struct edp_power_seq power_seq;
2569
2570 lockdep_assert_held(&dev_priv->pps_mutex);
2571
2572 if (intel_dp->pps_pipe == crtc->pipe)
2573 return;
2574
2575 /*
2576 * If another power sequencer was being used on this
2577 * port previously make sure to turn off vdd there while
2578 * we still have control of it.
2579 */
2580 if (intel_dp->pps_pipe != INVALID_PIPE)
2581 edp_panel_vdd_off_sync(intel_dp);
2582
2583 /*
2584 * We may be stealing the power
2585 * sequencer from another port.
2586 */
2587 vlv_steal_power_sequencer(dev, crtc->pipe);
2588
2589 /* now it's all ours */
2590 intel_dp->pps_pipe = crtc->pipe;
2591
2592 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2593 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2594
2595 /* init power sequencer on this pipe and port */
2596 intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
2597 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
2598 &power_seq);
2599}
2600
2157static void vlv_pre_enable_dp(struct intel_encoder *encoder) 2601static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2158{ 2602{
2159 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2603 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
@@ -2163,7 +2607,6 @@ static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2163 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); 2607 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2164 enum dpio_channel port = vlv_dport_to_channel(dport); 2608 enum dpio_channel port = vlv_dport_to_channel(dport);
2165 int pipe = intel_crtc->pipe; 2609 int pipe = intel_crtc->pipe;
2166 struct edp_power_seq power_seq;
2167 u32 val; 2610 u32 val;
2168 2611
2169 mutex_lock(&dev_priv->dpio_lock); 2612 mutex_lock(&dev_priv->dpio_lock);
@@ -2182,10 +2625,9 @@ static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2182 mutex_unlock(&dev_priv->dpio_lock); 2625 mutex_unlock(&dev_priv->dpio_lock);
2183 2626
2184 if (is_edp(intel_dp)) { 2627 if (is_edp(intel_dp)) {
2185 /* init power sequencer on this pipe and port */ 2628 pps_lock(intel_dp);
2186 intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq); 2629 vlv_init_panel_power_sequencer(intel_dp);
2187 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, 2630 pps_unlock(intel_dp);
2188 &power_seq);
2189 } 2631 }
2190 2632
2191 intel_enable_dp(encoder); 2633 intel_enable_dp(encoder);
@@ -2229,7 +2671,6 @@ static void chv_pre_enable_dp(struct intel_encoder *encoder)
2229 struct intel_digital_port *dport = dp_to_dig_port(intel_dp); 2671 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2230 struct drm_device *dev = encoder->base.dev; 2672 struct drm_device *dev = encoder->base.dev;
2231 struct drm_i915_private *dev_priv = dev->dev_private; 2673 struct drm_i915_private *dev_priv = dev->dev_private;
2232 struct edp_power_seq power_seq;
2233 struct intel_crtc *intel_crtc = 2674 struct intel_crtc *intel_crtc =
2234 to_intel_crtc(encoder->base.crtc); 2675 to_intel_crtc(encoder->base.crtc);
2235 enum dpio_channel ch = vlv_dport_to_channel(dport); 2676 enum dpio_channel ch = vlv_dport_to_channel(dport);
@@ -2275,10 +2716,9 @@ static void chv_pre_enable_dp(struct intel_encoder *encoder)
2275 mutex_unlock(&dev_priv->dpio_lock); 2716 mutex_unlock(&dev_priv->dpio_lock);
2276 2717
2277 if (is_edp(intel_dp)) { 2718 if (is_edp(intel_dp)) {
2278 /* init power sequencer on this pipe and port */ 2719 pps_lock(intel_dp);
2279 intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq); 2720 vlv_init_panel_power_sequencer(intel_dp);
2280 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, 2721 pps_unlock(intel_dp);
2281 &power_seq);
2282 } 2722 }
2283 2723
2284 intel_enable_dp(encoder); 2724 intel_enable_dp(encoder);
@@ -2297,6 +2737,8 @@ static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2297 enum pipe pipe = intel_crtc->pipe; 2737 enum pipe pipe = intel_crtc->pipe;
2298 u32 val; 2738 u32 val;
2299 2739
2740 intel_dp_prepare(encoder);
2741
2300 mutex_lock(&dev_priv->dpio_lock); 2742 mutex_lock(&dev_priv->dpio_lock);
2301 2743
2302 /* program left/right clock distribution */ 2744 /* program left/right clock distribution */
@@ -2395,13 +2837,13 @@ intel_dp_voltage_max(struct intel_dp *intel_dp)
2395 enum port port = dp_to_dig_port(intel_dp)->port; 2837 enum port port = dp_to_dig_port(intel_dp)->port;
2396 2838
2397 if (IS_VALLEYVIEW(dev)) 2839 if (IS_VALLEYVIEW(dev))
2398 return DP_TRAIN_VOLTAGE_SWING_1200; 2840 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2399 else if (IS_GEN7(dev) && port == PORT_A) 2841 else if (IS_GEN7(dev) && port == PORT_A)
2400 return DP_TRAIN_VOLTAGE_SWING_800; 2842 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2401 else if (HAS_PCH_CPT(dev) && port != PORT_A) 2843 else if (HAS_PCH_CPT(dev) && port != PORT_A)
2402 return DP_TRAIN_VOLTAGE_SWING_1200; 2844 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2403 else 2845 else
2404 return DP_TRAIN_VOLTAGE_SWING_800; 2846 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2405} 2847}
2406 2848
2407static uint8_t 2849static uint8_t
@@ -2412,49 +2854,49 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2412 2854
2413 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 2855 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2414 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 2856 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2415 case DP_TRAIN_VOLTAGE_SWING_400: 2857 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2416 return DP_TRAIN_PRE_EMPHASIS_9_5; 2858 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2417 case DP_TRAIN_VOLTAGE_SWING_600: 2859 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2418 return DP_TRAIN_PRE_EMPHASIS_6; 2860 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2419 case DP_TRAIN_VOLTAGE_SWING_800: 2861 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2420 return DP_TRAIN_PRE_EMPHASIS_3_5; 2862 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2421 case DP_TRAIN_VOLTAGE_SWING_1200: 2863 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2422 default: 2864 default:
2423 return DP_TRAIN_PRE_EMPHASIS_0; 2865 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2424 } 2866 }
2425 } else if (IS_VALLEYVIEW(dev)) { 2867 } else if (IS_VALLEYVIEW(dev)) {
2426 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 2868 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2427 case DP_TRAIN_VOLTAGE_SWING_400: 2869 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2428 return DP_TRAIN_PRE_EMPHASIS_9_5; 2870 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2429 case DP_TRAIN_VOLTAGE_SWING_600: 2871 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2430 return DP_TRAIN_PRE_EMPHASIS_6; 2872 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2431 case DP_TRAIN_VOLTAGE_SWING_800: 2873 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2432 return DP_TRAIN_PRE_EMPHASIS_3_5; 2874 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2433 case DP_TRAIN_VOLTAGE_SWING_1200: 2875 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2434 default: 2876 default:
2435 return DP_TRAIN_PRE_EMPHASIS_0; 2877 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2436 } 2878 }
2437 } else if (IS_GEN7(dev) && port == PORT_A) { 2879 } else if (IS_GEN7(dev) && port == PORT_A) {
2438 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 2880 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2439 case DP_TRAIN_VOLTAGE_SWING_400: 2881 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2440 return DP_TRAIN_PRE_EMPHASIS_6; 2882 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2441 case DP_TRAIN_VOLTAGE_SWING_600: 2883 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2442 case DP_TRAIN_VOLTAGE_SWING_800: 2884 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2443 return DP_TRAIN_PRE_EMPHASIS_3_5; 2885 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2444 default: 2886 default:
2445 return DP_TRAIN_PRE_EMPHASIS_0; 2887 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2446 } 2888 }
2447 } else { 2889 } else {
2448 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 2890 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2449 case DP_TRAIN_VOLTAGE_SWING_400: 2891 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2450 return DP_TRAIN_PRE_EMPHASIS_6; 2892 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2451 case DP_TRAIN_VOLTAGE_SWING_600: 2893 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2452 return DP_TRAIN_PRE_EMPHASIS_6; 2894 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2453 case DP_TRAIN_VOLTAGE_SWING_800: 2895 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2454 return DP_TRAIN_PRE_EMPHASIS_3_5; 2896 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2455 case DP_TRAIN_VOLTAGE_SWING_1200: 2897 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2456 default: 2898 default:
2457 return DP_TRAIN_PRE_EMPHASIS_0; 2899 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2458 } 2900 }
2459 } 2901 }
2460} 2902}
@@ -2473,22 +2915,22 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
2473 int pipe = intel_crtc->pipe; 2915 int pipe = intel_crtc->pipe;
2474 2916
2475 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 2917 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
2476 case DP_TRAIN_PRE_EMPHASIS_0: 2918 case DP_TRAIN_PRE_EMPH_LEVEL_0:
2477 preemph_reg_value = 0x0004000; 2919 preemph_reg_value = 0x0004000;
2478 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 2920 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2479 case DP_TRAIN_VOLTAGE_SWING_400: 2921 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2480 demph_reg_value = 0x2B405555; 2922 demph_reg_value = 0x2B405555;
2481 uniqtranscale_reg_value = 0x552AB83A; 2923 uniqtranscale_reg_value = 0x552AB83A;
2482 break; 2924 break;
2483 case DP_TRAIN_VOLTAGE_SWING_600: 2925 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2484 demph_reg_value = 0x2B404040; 2926 demph_reg_value = 0x2B404040;
2485 uniqtranscale_reg_value = 0x5548B83A; 2927 uniqtranscale_reg_value = 0x5548B83A;
2486 break; 2928 break;
2487 case DP_TRAIN_VOLTAGE_SWING_800: 2929 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2488 demph_reg_value = 0x2B245555; 2930 demph_reg_value = 0x2B245555;
2489 uniqtranscale_reg_value = 0x5560B83A; 2931 uniqtranscale_reg_value = 0x5560B83A;
2490 break; 2932 break;
2491 case DP_TRAIN_VOLTAGE_SWING_1200: 2933 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2492 demph_reg_value = 0x2B405555; 2934 demph_reg_value = 0x2B405555;
2493 uniqtranscale_reg_value = 0x5598DA3A; 2935 uniqtranscale_reg_value = 0x5598DA3A;
2494 break; 2936 break;
@@ -2496,18 +2938,18 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
2496 return 0; 2938 return 0;
2497 } 2939 }
2498 break; 2940 break;
2499 case DP_TRAIN_PRE_EMPHASIS_3_5: 2941 case DP_TRAIN_PRE_EMPH_LEVEL_1:
2500 preemph_reg_value = 0x0002000; 2942 preemph_reg_value = 0x0002000;
2501 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 2943 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2502 case DP_TRAIN_VOLTAGE_SWING_400: 2944 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2503 demph_reg_value = 0x2B404040; 2945 demph_reg_value = 0x2B404040;
2504 uniqtranscale_reg_value = 0x5552B83A; 2946 uniqtranscale_reg_value = 0x5552B83A;
2505 break; 2947 break;
2506 case DP_TRAIN_VOLTAGE_SWING_600: 2948 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2507 demph_reg_value = 0x2B404848; 2949 demph_reg_value = 0x2B404848;
2508 uniqtranscale_reg_value = 0x5580B83A; 2950 uniqtranscale_reg_value = 0x5580B83A;
2509 break; 2951 break;
2510 case DP_TRAIN_VOLTAGE_SWING_800: 2952 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2511 demph_reg_value = 0x2B404040; 2953 demph_reg_value = 0x2B404040;
2512 uniqtranscale_reg_value = 0x55ADDA3A; 2954 uniqtranscale_reg_value = 0x55ADDA3A;
2513 break; 2955 break;
@@ -2515,14 +2957,14 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
2515 return 0; 2957 return 0;
2516 } 2958 }
2517 break; 2959 break;
2518 case DP_TRAIN_PRE_EMPHASIS_6: 2960 case DP_TRAIN_PRE_EMPH_LEVEL_2:
2519 preemph_reg_value = 0x0000000; 2961 preemph_reg_value = 0x0000000;
2520 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 2962 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2521 case DP_TRAIN_VOLTAGE_SWING_400: 2963 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2522 demph_reg_value = 0x2B305555; 2964 demph_reg_value = 0x2B305555;
2523 uniqtranscale_reg_value = 0x5570B83A; 2965 uniqtranscale_reg_value = 0x5570B83A;
2524 break; 2966 break;
2525 case DP_TRAIN_VOLTAGE_SWING_600: 2967 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2526 demph_reg_value = 0x2B2B4040; 2968 demph_reg_value = 0x2B2B4040;
2527 uniqtranscale_reg_value = 0x55ADDA3A; 2969 uniqtranscale_reg_value = 0x55ADDA3A;
2528 break; 2970 break;
@@ -2530,10 +2972,10 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
2530 return 0; 2972 return 0;
2531 } 2973 }
2532 break; 2974 break;
2533 case DP_TRAIN_PRE_EMPHASIS_9_5: 2975 case DP_TRAIN_PRE_EMPH_LEVEL_3:
2534 preemph_reg_value = 0x0006000; 2976 preemph_reg_value = 0x0006000;
2535 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 2977 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2536 case DP_TRAIN_VOLTAGE_SWING_400: 2978 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2537 demph_reg_value = 0x1B405555; 2979 demph_reg_value = 0x1B405555;
2538 uniqtranscale_reg_value = 0x55ADDA3A; 2980 uniqtranscale_reg_value = 0x55ADDA3A;
2539 break; 2981 break;
@@ -2572,21 +3014,21 @@ static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
2572 int i; 3014 int i;
2573 3015
2574 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 3016 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
2575 case DP_TRAIN_PRE_EMPHASIS_0: 3017 case DP_TRAIN_PRE_EMPH_LEVEL_0:
2576 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 3018 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2577 case DP_TRAIN_VOLTAGE_SWING_400: 3019 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2578 deemph_reg_value = 128; 3020 deemph_reg_value = 128;
2579 margin_reg_value = 52; 3021 margin_reg_value = 52;
2580 break; 3022 break;
2581 case DP_TRAIN_VOLTAGE_SWING_600: 3023 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2582 deemph_reg_value = 128; 3024 deemph_reg_value = 128;
2583 margin_reg_value = 77; 3025 margin_reg_value = 77;
2584 break; 3026 break;
2585 case DP_TRAIN_VOLTAGE_SWING_800: 3027 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2586 deemph_reg_value = 128; 3028 deemph_reg_value = 128;
2587 margin_reg_value = 102; 3029 margin_reg_value = 102;
2588 break; 3030 break;
2589 case DP_TRAIN_VOLTAGE_SWING_1200: 3031 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2590 deemph_reg_value = 128; 3032 deemph_reg_value = 128;
2591 margin_reg_value = 154; 3033 margin_reg_value = 154;
2592 /* FIXME extra to set for 1200 */ 3034 /* FIXME extra to set for 1200 */
@@ -2595,17 +3037,17 @@ static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
2595 return 0; 3037 return 0;
2596 } 3038 }
2597 break; 3039 break;
2598 case DP_TRAIN_PRE_EMPHASIS_3_5: 3040 case DP_TRAIN_PRE_EMPH_LEVEL_1:
2599 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 3041 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2600 case DP_TRAIN_VOLTAGE_SWING_400: 3042 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2601 deemph_reg_value = 85; 3043 deemph_reg_value = 85;
2602 margin_reg_value = 78; 3044 margin_reg_value = 78;
2603 break; 3045 break;
2604 case DP_TRAIN_VOLTAGE_SWING_600: 3046 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2605 deemph_reg_value = 85; 3047 deemph_reg_value = 85;
2606 margin_reg_value = 116; 3048 margin_reg_value = 116;
2607 break; 3049 break;
2608 case DP_TRAIN_VOLTAGE_SWING_800: 3050 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2609 deemph_reg_value = 85; 3051 deemph_reg_value = 85;
2610 margin_reg_value = 154; 3052 margin_reg_value = 154;
2611 break; 3053 break;
@@ -2613,13 +3055,13 @@ static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
2613 return 0; 3055 return 0;
2614 } 3056 }
2615 break; 3057 break;
2616 case DP_TRAIN_PRE_EMPHASIS_6: 3058 case DP_TRAIN_PRE_EMPH_LEVEL_2:
2617 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 3059 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2618 case DP_TRAIN_VOLTAGE_SWING_400: 3060 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2619 deemph_reg_value = 64; 3061 deemph_reg_value = 64;
2620 margin_reg_value = 104; 3062 margin_reg_value = 104;
2621 break; 3063 break;
2622 case DP_TRAIN_VOLTAGE_SWING_600: 3064 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2623 deemph_reg_value = 64; 3065 deemph_reg_value = 64;
2624 margin_reg_value = 154; 3066 margin_reg_value = 154;
2625 break; 3067 break;
@@ -2627,9 +3069,9 @@ static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
2627 return 0; 3069 return 0;
2628 } 3070 }
2629 break; 3071 break;
2630 case DP_TRAIN_PRE_EMPHASIS_9_5: 3072 case DP_TRAIN_PRE_EMPH_LEVEL_3:
2631 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 3073 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2632 case DP_TRAIN_VOLTAGE_SWING_400: 3074 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2633 deemph_reg_value = 43; 3075 deemph_reg_value = 43;
2634 margin_reg_value = 154; 3076 margin_reg_value = 154;
2635 break; 3077 break;
@@ -2663,8 +3105,8 @@ static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
2663 /* Program swing margin */ 3105 /* Program swing margin */
2664 for (i = 0; i < 4; i++) { 3106 for (i = 0; i < 4; i++) {
2665 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i)); 3107 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
2666 val &= ~DPIO_SWING_MARGIN_MASK; 3108 val &= ~DPIO_SWING_MARGIN000_MASK;
2667 val |= margin_reg_value << DPIO_SWING_MARGIN_SHIFT; 3109 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
2668 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val); 3110 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
2669 } 3111 }
2670 3112
@@ -2676,9 +3118,9 @@ static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
2676 } 3118 }
2677 3119
2678 if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK) 3120 if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
2679 == DP_TRAIN_PRE_EMPHASIS_0) && 3121 == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
2680 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK) 3122 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
2681 == DP_TRAIN_VOLTAGE_SWING_1200)) { 3123 == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
2682 3124
2683 /* 3125 /*
2684 * The document said it needs to set bit 27 for ch0 and bit 26 3126 * The document said it needs to set bit 27 for ch0 and bit 26
@@ -2757,32 +3199,32 @@ intel_gen4_signal_levels(uint8_t train_set)
2757 uint32_t signal_levels = 0; 3199 uint32_t signal_levels = 0;
2758 3200
2759 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 3201 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
2760 case DP_TRAIN_VOLTAGE_SWING_400: 3202 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2761 default: 3203 default:
2762 signal_levels |= DP_VOLTAGE_0_4; 3204 signal_levels |= DP_VOLTAGE_0_4;
2763 break; 3205 break;
2764 case DP_TRAIN_VOLTAGE_SWING_600: 3206 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2765 signal_levels |= DP_VOLTAGE_0_6; 3207 signal_levels |= DP_VOLTAGE_0_6;
2766 break; 3208 break;
2767 case DP_TRAIN_VOLTAGE_SWING_800: 3209 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2768 signal_levels |= DP_VOLTAGE_0_8; 3210 signal_levels |= DP_VOLTAGE_0_8;
2769 break; 3211 break;
2770 case DP_TRAIN_VOLTAGE_SWING_1200: 3212 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2771 signal_levels |= DP_VOLTAGE_1_2; 3213 signal_levels |= DP_VOLTAGE_1_2;
2772 break; 3214 break;
2773 } 3215 }
2774 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 3216 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
2775 case DP_TRAIN_PRE_EMPHASIS_0: 3217 case DP_TRAIN_PRE_EMPH_LEVEL_0:
2776 default: 3218 default:
2777 signal_levels |= DP_PRE_EMPHASIS_0; 3219 signal_levels |= DP_PRE_EMPHASIS_0;
2778 break; 3220 break;
2779 case DP_TRAIN_PRE_EMPHASIS_3_5: 3221 case DP_TRAIN_PRE_EMPH_LEVEL_1:
2780 signal_levels |= DP_PRE_EMPHASIS_3_5; 3222 signal_levels |= DP_PRE_EMPHASIS_3_5;
2781 break; 3223 break;
2782 case DP_TRAIN_PRE_EMPHASIS_6: 3224 case DP_TRAIN_PRE_EMPH_LEVEL_2:
2783 signal_levels |= DP_PRE_EMPHASIS_6; 3225 signal_levels |= DP_PRE_EMPHASIS_6;
2784 break; 3226 break;
2785 case DP_TRAIN_PRE_EMPHASIS_9_5: 3227 case DP_TRAIN_PRE_EMPH_LEVEL_3:
2786 signal_levels |= DP_PRE_EMPHASIS_9_5; 3228 signal_levels |= DP_PRE_EMPHASIS_9_5;
2787 break; 3229 break;
2788 } 3230 }
@@ -2796,19 +3238,19 @@ intel_gen6_edp_signal_levels(uint8_t train_set)
2796 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 3238 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
2797 DP_TRAIN_PRE_EMPHASIS_MASK); 3239 DP_TRAIN_PRE_EMPHASIS_MASK);
2798 switch (signal_levels) { 3240 switch (signal_levels) {
2799 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: 3241 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
2800 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: 3242 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
2801 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; 3243 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
2802 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: 3244 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
2803 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B; 3245 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
2804 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: 3246 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
2805 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6: 3247 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
2806 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B; 3248 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
2807 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: 3249 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
2808 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: 3250 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
2809 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B; 3251 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
2810 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: 3252 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
2811 case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0: 3253 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
2812 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B; 3254 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
2813 default: 3255 default:
2814 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 3256 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
@@ -2824,21 +3266,21 @@ intel_gen7_edp_signal_levels(uint8_t train_set)
2824 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 3266 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
2825 DP_TRAIN_PRE_EMPHASIS_MASK); 3267 DP_TRAIN_PRE_EMPHASIS_MASK);
2826 switch (signal_levels) { 3268 switch (signal_levels) {
2827 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: 3269 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
2828 return EDP_LINK_TRAIN_400MV_0DB_IVB; 3270 return EDP_LINK_TRAIN_400MV_0DB_IVB;
2829 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: 3271 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
2830 return EDP_LINK_TRAIN_400MV_3_5DB_IVB; 3272 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
2831 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: 3273 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
2832 return EDP_LINK_TRAIN_400MV_6DB_IVB; 3274 return EDP_LINK_TRAIN_400MV_6DB_IVB;
2833 3275
2834 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: 3276 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
2835 return EDP_LINK_TRAIN_600MV_0DB_IVB; 3277 return EDP_LINK_TRAIN_600MV_0DB_IVB;
2836 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: 3278 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
2837 return EDP_LINK_TRAIN_600MV_3_5DB_IVB; 3279 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
2838 3280
2839 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: 3281 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
2840 return EDP_LINK_TRAIN_800MV_0DB_IVB; 3282 return EDP_LINK_TRAIN_800MV_0DB_IVB;
2841 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: 3283 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
2842 return EDP_LINK_TRAIN_800MV_3_5DB_IVB; 3284 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
2843 3285
2844 default: 3286 default:
@@ -2855,30 +3297,30 @@ intel_hsw_signal_levels(uint8_t train_set)
2855 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 3297 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
2856 DP_TRAIN_PRE_EMPHASIS_MASK); 3298 DP_TRAIN_PRE_EMPHASIS_MASK);
2857 switch (signal_levels) { 3299 switch (signal_levels) {
2858 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: 3300 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
2859 return DDI_BUF_EMP_400MV_0DB_HSW; 3301 return DDI_BUF_TRANS_SELECT(0);
2860 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: 3302 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
2861 return DDI_BUF_EMP_400MV_3_5DB_HSW; 3303 return DDI_BUF_TRANS_SELECT(1);
2862 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: 3304 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
2863 return DDI_BUF_EMP_400MV_6DB_HSW; 3305 return DDI_BUF_TRANS_SELECT(2);
2864 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5: 3306 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
2865 return DDI_BUF_EMP_400MV_9_5DB_HSW; 3307 return DDI_BUF_TRANS_SELECT(3);
2866 3308
2867 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: 3309 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
2868 return DDI_BUF_EMP_600MV_0DB_HSW; 3310 return DDI_BUF_TRANS_SELECT(4);
2869 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: 3311 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
2870 return DDI_BUF_EMP_600MV_3_5DB_HSW; 3312 return DDI_BUF_TRANS_SELECT(5);
2871 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6: 3313 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
2872 return DDI_BUF_EMP_600MV_6DB_HSW; 3314 return DDI_BUF_TRANS_SELECT(6);
2873 3315
2874 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: 3316 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
2875 return DDI_BUF_EMP_800MV_0DB_HSW; 3317 return DDI_BUF_TRANS_SELECT(7);
2876 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: 3318 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
2877 return DDI_BUF_EMP_800MV_3_5DB_HSW; 3319 return DDI_BUF_TRANS_SELECT(8);
2878 default: 3320 default:
2879 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 3321 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
2880 "0x%x\n", signal_levels); 3322 "0x%x\n", signal_levels);
2881 return DDI_BUF_EMP_400MV_0DB_HSW; 3323 return DDI_BUF_TRANS_SELECT(0);
2882 } 3324 }
2883} 3325}
2884 3326
@@ -2925,74 +3367,10 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
2925 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 3367 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2926 struct drm_device *dev = intel_dig_port->base.base.dev; 3368 struct drm_device *dev = intel_dig_port->base.base.dev;
2927 struct drm_i915_private *dev_priv = dev->dev_private; 3369 struct drm_i915_private *dev_priv = dev->dev_private;
2928 enum port port = intel_dig_port->port;
2929 uint8_t buf[sizeof(intel_dp->train_set) + 1]; 3370 uint8_t buf[sizeof(intel_dp->train_set) + 1];
2930 int ret, len; 3371 int ret, len;
2931 3372
2932 if (HAS_DDI(dev)) { 3373 _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
2933 uint32_t temp = I915_READ(DP_TP_CTL(port));
2934
2935 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2936 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2937 else
2938 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2939
2940 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2941 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2942 case DP_TRAINING_PATTERN_DISABLE:
2943 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2944
2945 break;
2946 case DP_TRAINING_PATTERN_1:
2947 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2948 break;
2949 case DP_TRAINING_PATTERN_2:
2950 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2951 break;
2952 case DP_TRAINING_PATTERN_3:
2953 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2954 break;
2955 }
2956 I915_WRITE(DP_TP_CTL(port), temp);
2957
2958 } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
2959 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2960
2961 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2962 case DP_TRAINING_PATTERN_DISABLE:
2963 *DP |= DP_LINK_TRAIN_OFF_CPT;
2964 break;
2965 case DP_TRAINING_PATTERN_1:
2966 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2967 break;
2968 case DP_TRAINING_PATTERN_2:
2969 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2970 break;
2971 case DP_TRAINING_PATTERN_3:
2972 DRM_ERROR("DP training pattern 3 not supported\n");
2973 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2974 break;
2975 }
2976
2977 } else {
2978 *DP &= ~DP_LINK_TRAIN_MASK;
2979
2980 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2981 case DP_TRAINING_PATTERN_DISABLE:
2982 *DP |= DP_LINK_TRAIN_OFF;
2983 break;
2984 case DP_TRAINING_PATTERN_1:
2985 *DP |= DP_LINK_TRAIN_PAT_1;
2986 break;
2987 case DP_TRAINING_PATTERN_2:
2988 *DP |= DP_LINK_TRAIN_PAT_2;
2989 break;
2990 case DP_TRAINING_PATTERN_3:
2991 DRM_ERROR("DP training pattern 3 not supported\n");
2992 *DP |= DP_LINK_TRAIN_PAT_2;
2993 break;
2994 }
2995 }
2996 3374
2997 I915_WRITE(intel_dp->output_reg, *DP); 3375 I915_WRITE(intel_dp->output_reg, *DP);
2998 POSTING_READ(intel_dp->output_reg); 3376 POSTING_READ(intel_dp->output_reg);
@@ -3276,7 +3654,10 @@ intel_dp_link_down(struct intel_dp *intel_dp)
3276 DP &= ~DP_LINK_TRAIN_MASK_CPT; 3654 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3277 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); 3655 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
3278 } else { 3656 } else {
3279 DP &= ~DP_LINK_TRAIN_MASK; 3657 if (IS_CHERRYVIEW(dev))
3658 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3659 else
3660 DP &= ~DP_LINK_TRAIN_MASK;
3280 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE); 3661 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
3281 } 3662 }
3282 POSTING_READ(intel_dp->output_reg); 3663 POSTING_READ(intel_dp->output_reg);
@@ -3322,15 +3703,11 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
3322 struct drm_device *dev = dig_port->base.base.dev; 3703 struct drm_device *dev = dig_port->base.base.dev;
3323 struct drm_i915_private *dev_priv = dev->dev_private; 3704 struct drm_i915_private *dev_priv = dev->dev_private;
3324 3705
3325 char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
3326
3327 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd, 3706 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3328 sizeof(intel_dp->dpcd)) < 0) 3707 sizeof(intel_dp->dpcd)) < 0)
3329 return false; /* aux transfer failed */ 3708 return false; /* aux transfer failed */
3330 3709
3331 hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd), 3710 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3332 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
3333 DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
3334 3711
3335 if (intel_dp->dpcd[DP_DPCD_REV] == 0) 3712 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3336 return false; /* DPCD not present */ 3713 return false; /* DPCD not present */
@@ -3351,7 +3728,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
3351 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 && 3728 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
3352 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED) { 3729 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED) {
3353 intel_dp->use_tps3 = true; 3730 intel_dp->use_tps3 = true;
3354 DRM_DEBUG_KMS("Displayport TPS3 supported"); 3731 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
3355 } else 3732 } else
3356 intel_dp->use_tps3 = false; 3733 intel_dp->use_tps3 = false;
3357 3734
@@ -3388,7 +3765,7 @@ intel_dp_probe_oui(struct intel_dp *intel_dp)
3388 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n", 3765 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3389 buf[0], buf[1], buf[2]); 3766 buf[0], buf[1], buf[2]);
3390 3767
3391 edp_panel_vdd_off(intel_dp, false); 3768 intel_edp_panel_vdd_off(intel_dp, false);
3392} 3769}
3393 3770
3394static bool 3771static bool
@@ -3402,7 +3779,7 @@ intel_dp_probe_mst(struct intel_dp *intel_dp)
3402 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12) 3779 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3403 return false; 3780 return false;
3404 3781
3405 _edp_panel_vdd_on(intel_dp); 3782 intel_edp_panel_vdd_on(intel_dp);
3406 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) { 3783 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3407 if (buf[0] & DP_MST_CAP) { 3784 if (buf[0] & DP_MST_CAP) {
3408 DRM_DEBUG_KMS("Sink is MST capable\n"); 3785 DRM_DEBUG_KMS("Sink is MST capable\n");
@@ -3412,7 +3789,7 @@ intel_dp_probe_mst(struct intel_dp *intel_dp)
3412 intel_dp->is_mst = false; 3789 intel_dp->is_mst = false;
3413 } 3790 }
3414 } 3791 }
3415 edp_panel_vdd_off(intel_dp, false); 3792 intel_edp_panel_vdd_off(intel_dp, false);
3416 3793
3417 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst); 3794 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3418 return intel_dp->is_mst; 3795 return intel_dp->is_mst;
@@ -3427,21 +3804,21 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
3427 u8 buf[1]; 3804 u8 buf[1];
3428 3805
3429 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, buf) < 0) 3806 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, buf) < 0)
3430 return -EAGAIN; 3807 return -EIO;
3431 3808
3432 if (!(buf[0] & DP_TEST_CRC_SUPPORTED)) 3809 if (!(buf[0] & DP_TEST_CRC_SUPPORTED))
3433 return -ENOTTY; 3810 return -ENOTTY;
3434 3811
3435 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, 3812 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3436 DP_TEST_SINK_START) < 0) 3813 DP_TEST_SINK_START) < 0)
3437 return -EAGAIN; 3814 return -EIO;
3438 3815
3439 /* Wait 2 vblanks to be sure we will have the correct CRC value */ 3816 /* Wait 2 vblanks to be sure we will have the correct CRC value */
3440 intel_wait_for_vblank(dev, intel_crtc->pipe); 3817 intel_wait_for_vblank(dev, intel_crtc->pipe);
3441 intel_wait_for_vblank(dev, intel_crtc->pipe); 3818 intel_wait_for_vblank(dev, intel_crtc->pipe);
3442 3819
3443 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) 3820 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
3444 return -EAGAIN; 3821 return -EIO;
3445 3822
3446 drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, 0); 3823 drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, 0);
3447 return 0; 3824 return 0;
@@ -3644,20 +4021,24 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
3644} 4021}
3645 4022
3646static enum drm_connector_status 4023static enum drm_connector_status
4024edp_detect(struct intel_dp *intel_dp)
4025{
4026 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4027 enum drm_connector_status status;
4028
4029 status = intel_panel_detect(dev);
4030 if (status == connector_status_unknown)
4031 status = connector_status_connected;
4032
4033 return status;
4034}
4035
4036static enum drm_connector_status
3647ironlake_dp_detect(struct intel_dp *intel_dp) 4037ironlake_dp_detect(struct intel_dp *intel_dp)
3648{ 4038{
3649 struct drm_device *dev = intel_dp_to_dev(intel_dp); 4039 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3650 struct drm_i915_private *dev_priv = dev->dev_private; 4040 struct drm_i915_private *dev_priv = dev->dev_private;
3651 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 4041 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3652 enum drm_connector_status status;
3653
3654 /* Can't disconnect eDP, but you can close the lid... */
3655 if (is_edp(intel_dp)) {
3656 status = intel_panel_detect(dev);
3657 if (status == connector_status_unknown)
3658 status = connector_status_connected;
3659 return status;
3660 }
3661 4042
3662 if (!ibx_digital_port_connected(dev_priv, intel_dig_port)) 4043 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
3663 return connector_status_disconnected; 4044 return connector_status_disconnected;
@@ -3733,9 +4114,9 @@ g4x_dp_detect(struct intel_dp *intel_dp)
3733} 4114}
3734 4115
3735static struct edid * 4116static struct edid *
3736intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) 4117intel_dp_get_edid(struct intel_dp *intel_dp)
3737{ 4118{
3738 struct intel_connector *intel_connector = to_intel_connector(connector); 4119 struct intel_connector *intel_connector = intel_dp->attached_connector;
3739 4120
3740 /* use cached edid if we have one */ 4121 /* use cached edid if we have one */
3741 if (intel_connector->edid) { 4122 if (intel_connector->edid) {
@@ -3744,27 +4125,55 @@ intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
3744 return NULL; 4125 return NULL;
3745 4126
3746 return drm_edid_duplicate(intel_connector->edid); 4127 return drm_edid_duplicate(intel_connector->edid);
3747 } 4128 } else
4129 return drm_get_edid(&intel_connector->base,
4130 &intel_dp->aux.ddc);
4131}
4132
4133static void
4134intel_dp_set_edid(struct intel_dp *intel_dp)
4135{
4136 struct intel_connector *intel_connector = intel_dp->attached_connector;
4137 struct edid *edid;
4138
4139 edid = intel_dp_get_edid(intel_dp);
4140 intel_connector->detect_edid = edid;
3748 4141
3749 return drm_get_edid(connector, adapter); 4142 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4143 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4144 else
4145 intel_dp->has_audio = drm_detect_monitor_audio(edid);
3750} 4146}
3751 4147
3752static int 4148static void
3753intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter) 4149intel_dp_unset_edid(struct intel_dp *intel_dp)
3754{ 4150{
3755 struct intel_connector *intel_connector = to_intel_connector(connector); 4151 struct intel_connector *intel_connector = intel_dp->attached_connector;
3756 4152
3757 /* use cached edid if we have one */ 4153 kfree(intel_connector->detect_edid);
3758 if (intel_connector->edid) { 4154 intel_connector->detect_edid = NULL;
3759 /* invalid edid */
3760 if (IS_ERR(intel_connector->edid))
3761 return 0;
3762 4155
3763 return intel_connector_update_modes(connector, 4156 intel_dp->has_audio = false;
3764 intel_connector->edid); 4157}
3765 } 4158
4159static enum intel_display_power_domain
4160intel_dp_power_get(struct intel_dp *dp)
4161{
4162 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4163 enum intel_display_power_domain power_domain;
4164
4165 power_domain = intel_display_port_power_domain(encoder);
4166 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4167
4168 return power_domain;
4169}
3766 4170
3767 return intel_ddc_get_modes(connector, adapter); 4171static void
4172intel_dp_power_put(struct intel_dp *dp,
4173 enum intel_display_power_domain power_domain)
4174{
4175 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4176 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
3768} 4177}
3769 4178
3770static enum drm_connector_status 4179static enum drm_connector_status
@@ -3774,33 +4183,30 @@ intel_dp_detect(struct drm_connector *connector, bool force)
3774 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 4183 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3775 struct intel_encoder *intel_encoder = &intel_dig_port->base; 4184 struct intel_encoder *intel_encoder = &intel_dig_port->base;
3776 struct drm_device *dev = connector->dev; 4185 struct drm_device *dev = connector->dev;
3777 struct drm_i915_private *dev_priv = dev->dev_private;
3778 enum drm_connector_status status; 4186 enum drm_connector_status status;
3779 enum intel_display_power_domain power_domain; 4187 enum intel_display_power_domain power_domain;
3780 struct edid *edid = NULL;
3781 bool ret; 4188 bool ret;
3782 4189
3783 power_domain = intel_display_port_power_domain(intel_encoder);
3784 intel_display_power_get(dev_priv, power_domain);
3785
3786 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 4190 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
3787 connector->base.id, connector->name); 4191 connector->base.id, connector->name);
4192 intel_dp_unset_edid(intel_dp);
3788 4193
3789 if (intel_dp->is_mst) { 4194 if (intel_dp->is_mst) {
3790 /* MST devices are disconnected from a monitor POV */ 4195 /* MST devices are disconnected from a monitor POV */
3791 if (intel_encoder->type != INTEL_OUTPUT_EDP) 4196 if (intel_encoder->type != INTEL_OUTPUT_EDP)
3792 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; 4197 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
3793 status = connector_status_disconnected; 4198 return connector_status_disconnected;
3794 goto out;
3795 } 4199 }
3796 4200
3797 intel_dp->has_audio = false; 4201 power_domain = intel_dp_power_get(intel_dp);
3798 4202
3799 if (HAS_PCH_SPLIT(dev)) 4203 /* Can't disconnect eDP, but you can close the lid... */
4204 if (is_edp(intel_dp))
4205 status = edp_detect(intel_dp);
4206 else if (HAS_PCH_SPLIT(dev))
3800 status = ironlake_dp_detect(intel_dp); 4207 status = ironlake_dp_detect(intel_dp);
3801 else 4208 else
3802 status = g4x_dp_detect(intel_dp); 4209 status = g4x_dp_detect(intel_dp);
3803
3804 if (status != connector_status_connected) 4210 if (status != connector_status_connected)
3805 goto out; 4211 goto out;
3806 4212
@@ -3816,82 +4222,78 @@ intel_dp_detect(struct drm_connector *connector, bool force)
3816 goto out; 4222 goto out;
3817 } 4223 }
3818 4224
3819 if (intel_dp->force_audio != HDMI_AUDIO_AUTO) { 4225 intel_dp_set_edid(intel_dp);
3820 intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON);
3821 } else {
3822 edid = intel_dp_get_edid(connector, &intel_dp->aux.ddc);
3823 if (edid) {
3824 intel_dp->has_audio = drm_detect_monitor_audio(edid);
3825 kfree(edid);
3826 }
3827 }
3828 4226
3829 if (intel_encoder->type != INTEL_OUTPUT_EDP) 4227 if (intel_encoder->type != INTEL_OUTPUT_EDP)
3830 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; 4228 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
3831 status = connector_status_connected; 4229 status = connector_status_connected;
3832 4230
3833out: 4231out:
3834 intel_display_power_put(dev_priv, power_domain); 4232 intel_dp_power_put(intel_dp, power_domain);
3835 return status; 4233 return status;
3836} 4234}
3837 4235
3838static int intel_dp_get_modes(struct drm_connector *connector) 4236static void
4237intel_dp_force(struct drm_connector *connector)
3839{ 4238{
3840 struct intel_dp *intel_dp = intel_attached_dp(connector); 4239 struct intel_dp *intel_dp = intel_attached_dp(connector);
3841 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 4240 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
3842 struct intel_encoder *intel_encoder = &intel_dig_port->base;
3843 struct intel_connector *intel_connector = to_intel_connector(connector);
3844 struct drm_device *dev = connector->dev;
3845 struct drm_i915_private *dev_priv = dev->dev_private;
3846 enum intel_display_power_domain power_domain; 4241 enum intel_display_power_domain power_domain;
3847 int ret;
3848 4242
3849 /* We should parse the EDID data and find out if it has an audio sink 4243 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
3850 */ 4244 connector->base.id, connector->name);
4245 intel_dp_unset_edid(intel_dp);
3851 4246
3852 power_domain = intel_display_port_power_domain(intel_encoder); 4247 if (connector->status != connector_status_connected)
3853 intel_display_power_get(dev_priv, power_domain); 4248 return;
3854 4249
3855 ret = intel_dp_get_edid_modes(connector, &intel_dp->aux.ddc); 4250 power_domain = intel_dp_power_get(intel_dp);
3856 intel_display_power_put(dev_priv, power_domain); 4251
3857 if (ret) 4252 intel_dp_set_edid(intel_dp);
3858 return ret; 4253
4254 intel_dp_power_put(intel_dp, power_domain);
4255
4256 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4257 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4258}
4259
4260static int intel_dp_get_modes(struct drm_connector *connector)
4261{
4262 struct intel_connector *intel_connector = to_intel_connector(connector);
4263 struct edid *edid;
4264
4265 edid = intel_connector->detect_edid;
4266 if (edid) {
4267 int ret = intel_connector_update_modes(connector, edid);
4268 if (ret)
4269 return ret;
4270 }
3859 4271
3860 /* if eDP has no EDID, fall back to fixed mode */ 4272 /* if eDP has no EDID, fall back to fixed mode */
3861 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) { 4273 if (is_edp(intel_attached_dp(connector)) &&
4274 intel_connector->panel.fixed_mode) {
3862 struct drm_display_mode *mode; 4275 struct drm_display_mode *mode;
3863 mode = drm_mode_duplicate(dev, 4276
4277 mode = drm_mode_duplicate(connector->dev,
3864 intel_connector->panel.fixed_mode); 4278 intel_connector->panel.fixed_mode);
3865 if (mode) { 4279 if (mode) {
3866 drm_mode_probed_add(connector, mode); 4280 drm_mode_probed_add(connector, mode);
3867 return 1; 4281 return 1;
3868 } 4282 }
3869 } 4283 }
4284
3870 return 0; 4285 return 0;
3871} 4286}
3872 4287
3873static bool 4288static bool
3874intel_dp_detect_audio(struct drm_connector *connector) 4289intel_dp_detect_audio(struct drm_connector *connector)
3875{ 4290{
3876 struct intel_dp *intel_dp = intel_attached_dp(connector);
3877 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3878 struct intel_encoder *intel_encoder = &intel_dig_port->base;
3879 struct drm_device *dev = connector->dev;
3880 struct drm_i915_private *dev_priv = dev->dev_private;
3881 enum intel_display_power_domain power_domain;
3882 struct edid *edid;
3883 bool has_audio = false; 4291 bool has_audio = false;
4292 struct edid *edid;
3884 4293
3885 power_domain = intel_display_port_power_domain(intel_encoder); 4294 edid = to_intel_connector(connector)->detect_edid;
3886 intel_display_power_get(dev_priv, power_domain); 4295 if (edid)
3887
3888 edid = intel_dp_get_edid(connector, &intel_dp->aux.ddc);
3889 if (edid) {
3890 has_audio = drm_detect_monitor_audio(edid); 4296 has_audio = drm_detect_monitor_audio(edid);
3891 kfree(edid);
3892 }
3893
3894 intel_display_power_put(dev_priv, power_domain);
3895 4297
3896 return has_audio; 4298 return has_audio;
3897} 4299}
@@ -3989,6 +4391,8 @@ intel_dp_connector_destroy(struct drm_connector *connector)
3989{ 4391{
3990 struct intel_connector *intel_connector = to_intel_connector(connector); 4392 struct intel_connector *intel_connector = to_intel_connector(connector);
3991 4393
4394 kfree(intel_connector->detect_edid);
4395
3992 if (!IS_ERR_OR_NULL(intel_connector->edid)) 4396 if (!IS_ERR_OR_NULL(intel_connector->edid))
3993 kfree(intel_connector->edid); 4397 kfree(intel_connector->edid);
3994 4398
@@ -4005,16 +4409,20 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4005{ 4409{
4006 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 4410 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4007 struct intel_dp *intel_dp = &intel_dig_port->dp; 4411 struct intel_dp *intel_dp = &intel_dig_port->dp;
4008 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4009 4412
4010 drm_dp_aux_unregister(&intel_dp->aux); 4413 drm_dp_aux_unregister(&intel_dp->aux);
4011 intel_dp_mst_encoder_cleanup(intel_dig_port); 4414 intel_dp_mst_encoder_cleanup(intel_dig_port);
4012 drm_encoder_cleanup(encoder); 4415 drm_encoder_cleanup(encoder);
4013 if (is_edp(intel_dp)) { 4416 if (is_edp(intel_dp)) {
4014 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 4417 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4015 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); 4418 /*
4419 * vdd might still be enabled do to the delayed vdd off.
4420 * Make sure vdd is actually turned off here.
4421 */
4422 pps_lock(intel_dp);
4016 edp_panel_vdd_off_sync(intel_dp); 4423 edp_panel_vdd_off_sync(intel_dp);
4017 drm_modeset_unlock(&dev->mode_config.connection_mutex); 4424 pps_unlock(intel_dp);
4425
4018 if (intel_dp->edp_notifier.notifier_call) { 4426 if (intel_dp->edp_notifier.notifier_call) {
4019 unregister_reboot_notifier(&intel_dp->edp_notifier); 4427 unregister_reboot_notifier(&intel_dp->edp_notifier);
4020 intel_dp->edp_notifier.notifier_call = NULL; 4428 intel_dp->edp_notifier.notifier_call = NULL;
@@ -4030,7 +4438,13 @@ static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4030 if (!is_edp(intel_dp)) 4438 if (!is_edp(intel_dp))
4031 return; 4439 return;
4032 4440
4441 /*
4442 * vdd might still be enabled do to the delayed vdd off.
4443 * Make sure vdd is actually turned off here.
4444 */
4445 pps_lock(intel_dp);
4033 edp_panel_vdd_off_sync(intel_dp); 4446 edp_panel_vdd_off_sync(intel_dp);
4447 pps_unlock(intel_dp);
4034} 4448}
4035 4449
4036static void intel_dp_encoder_reset(struct drm_encoder *encoder) 4450static void intel_dp_encoder_reset(struct drm_encoder *encoder)
@@ -4041,6 +4455,7 @@ static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4041static const struct drm_connector_funcs intel_dp_connector_funcs = { 4455static const struct drm_connector_funcs intel_dp_connector_funcs = {
4042 .dpms = intel_connector_dpms, 4456 .dpms = intel_connector_dpms,
4043 .detect = intel_dp_detect, 4457 .detect = intel_dp_detect,
4458 .force = intel_dp_force,
4044 .fill_modes = drm_helper_probe_single_connector_modes, 4459 .fill_modes = drm_helper_probe_single_connector_modes,
4045 .set_property = intel_dp_set_property, 4460 .set_property = intel_dp_set_property,
4046 .destroy = intel_dp_connector_destroy, 4461 .destroy = intel_dp_connector_destroy,
@@ -4076,7 +4491,8 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4076 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP) 4491 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4077 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT; 4492 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
4078 4493
4079 DRM_DEBUG_KMS("got hpd irq on port %d - %s\n", intel_dig_port->port, 4494 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4495 port_name(intel_dig_port->port),
4080 long_hpd ? "long" : "short"); 4496 long_hpd ? "long" : "short");
4081 4497
4082 power_domain = intel_display_port_power_domain(intel_encoder); 4498 power_domain = intel_display_port_power_domain(intel_encoder);
@@ -4216,6 +4632,8 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
4216 u32 pp_on, pp_off, pp_div, pp; 4632 u32 pp_on, pp_off, pp_div, pp;
4217 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg; 4633 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
4218 4634
4635 lockdep_assert_held(&dev_priv->pps_mutex);
4636
4219 if (HAS_PCH_SPLIT(dev)) { 4637 if (HAS_PCH_SPLIT(dev)) {
4220 pp_ctrl_reg = PCH_PP_CONTROL; 4638 pp_ctrl_reg = PCH_PP_CONTROL;
4221 pp_on_reg = PCH_PP_ON_DELAYS; 4639 pp_on_reg = PCH_PP_ON_DELAYS;
@@ -4315,6 +4733,9 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
4315 u32 pp_on, pp_off, pp_div, port_sel = 0; 4733 u32 pp_on, pp_off, pp_div, port_sel = 0;
4316 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev); 4734 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
4317 int pp_on_reg, pp_off_reg, pp_div_reg; 4735 int pp_on_reg, pp_off_reg, pp_div_reg;
4736 enum port port = dp_to_dig_port(intel_dp)->port;
4737
4738 lockdep_assert_held(&dev_priv->pps_mutex);
4318 4739
4319 if (HAS_PCH_SPLIT(dev)) { 4740 if (HAS_PCH_SPLIT(dev)) {
4320 pp_on_reg = PCH_PP_ON_DELAYS; 4741 pp_on_reg = PCH_PP_ON_DELAYS;
@@ -4349,12 +4770,9 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
4349 /* Haswell doesn't have any port selection bits for the panel 4770 /* Haswell doesn't have any port selection bits for the panel
4350 * power sequencer any more. */ 4771 * power sequencer any more. */
4351 if (IS_VALLEYVIEW(dev)) { 4772 if (IS_VALLEYVIEW(dev)) {
4352 if (dp_to_dig_port(intel_dp)->port == PORT_B) 4773 port_sel = PANEL_PORT_SELECT_VLV(port);
4353 port_sel = PANEL_PORT_SELECT_DPB_VLV;
4354 else
4355 port_sel = PANEL_PORT_SELECT_DPC_VLV;
4356 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { 4774 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
4357 if (dp_to_dig_port(intel_dp)->port == PORT_A) 4775 if (port == PORT_A)
4358 port_sel = PANEL_PORT_SELECT_DPA; 4776 port_sel = PANEL_PORT_SELECT_DPA;
4359 else 4777 else
4360 port_sel = PANEL_PORT_SELECT_DPD; 4778 port_sel = PANEL_PORT_SELECT_DPD;
@@ -4438,7 +4856,7 @@ void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
4438 val = I915_READ(reg); 4856 val = I915_READ(reg);
4439 if (index > DRRS_HIGH_RR) { 4857 if (index > DRRS_HIGH_RR) {
4440 val |= PIPECONF_EDP_RR_MODE_SWITCH; 4858 val |= PIPECONF_EDP_RR_MODE_SWITCH;
4441 intel_dp_set_m2_n2(intel_crtc, &config->dp_m2_n2); 4859 intel_dp_set_m_n(intel_crtc);
4442 } else { 4860 } else {
4443 val &= ~PIPECONF_EDP_RR_MODE_SWITCH; 4861 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
4444 } 4862 }
@@ -4478,7 +4896,7 @@ intel_dp_drrs_init(struct intel_digital_port *intel_dig_port,
4478 } 4896 }
4479 4897
4480 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) { 4898 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
4481 DRM_INFO("VBT doesn't support DRRS\n"); 4899 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
4482 return NULL; 4900 return NULL;
4483 } 4901 }
4484 4902
@@ -4486,7 +4904,7 @@ intel_dp_drrs_init(struct intel_digital_port *intel_dig_port,
4486 (dev, fixed_mode, connector); 4904 (dev, fixed_mode, connector);
4487 4905
4488 if (!downclock_mode) { 4906 if (!downclock_mode) {
4489 DRM_INFO("DRRS not supported\n"); 4907 DRM_DEBUG_KMS("DRRS not supported\n");
4490 return NULL; 4908 return NULL;
4491 } 4909 }
4492 4910
@@ -4497,7 +4915,7 @@ intel_dp_drrs_init(struct intel_digital_port *intel_dig_port,
4497 intel_dp->drrs_state.type = dev_priv->vbt.drrs_type; 4915 intel_dp->drrs_state.type = dev_priv->vbt.drrs_type;
4498 4916
4499 intel_dp->drrs_state.refresh_rate_type = DRRS_HIGH_RR; 4917 intel_dp->drrs_state.refresh_rate_type = DRRS_HIGH_RR;
4500 DRM_INFO("seamless DRRS supported for eDP panel.\n"); 4918 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
4501 return downclock_mode; 4919 return downclock_mode;
4502} 4920}
4503 4921
@@ -4512,8 +4930,11 @@ void intel_edp_panel_vdd_sanitize(struct intel_encoder *intel_encoder)
4512 return; 4930 return;
4513 4931
4514 intel_dp = enc_to_intel_dp(&intel_encoder->base); 4932 intel_dp = enc_to_intel_dp(&intel_encoder->base);
4933
4934 pps_lock(intel_dp);
4935
4515 if (!edp_have_panel_vdd(intel_dp)) 4936 if (!edp_have_panel_vdd(intel_dp))
4516 return; 4937 goto out;
4517 /* 4938 /*
4518 * The VDD bit needs a power domain reference, so if the bit is 4939 * The VDD bit needs a power domain reference, so if the bit is
4519 * already enabled when we boot or resume, grab this reference and 4940 * already enabled when we boot or resume, grab this reference and
@@ -4525,6 +4946,8 @@ void intel_edp_panel_vdd_sanitize(struct intel_encoder *intel_encoder)
4525 intel_display_power_get(dev_priv, power_domain); 4946 intel_display_power_get(dev_priv, power_domain);
4526 4947
4527 edp_panel_vdd_schedule_off(intel_dp); 4948 edp_panel_vdd_schedule_off(intel_dp);
4949 out:
4950 pps_unlock(intel_dp);
4528} 4951}
4529 4952
4530static bool intel_edp_init_connector(struct intel_dp *intel_dp, 4953static bool intel_edp_init_connector(struct intel_dp *intel_dp,
@@ -4552,7 +4975,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
4552 /* Cache DPCD and EDID for edp. */ 4975 /* Cache DPCD and EDID for edp. */
4553 intel_edp_panel_vdd_on(intel_dp); 4976 intel_edp_panel_vdd_on(intel_dp);
4554 has_dpcd = intel_dp_get_dpcd(intel_dp); 4977 has_dpcd = intel_dp_get_dpcd(intel_dp);
4555 edp_panel_vdd_off(intel_dp, false); 4978 intel_edp_panel_vdd_off(intel_dp, false);
4556 4979
4557 if (has_dpcd) { 4980 if (has_dpcd) {
4558 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) 4981 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
@@ -4566,7 +4989,9 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
4566 } 4989 }
4567 4990
4568 /* We now know it's not a ghost, init power sequence regs. */ 4991 /* We now know it's not a ghost, init power sequence regs. */
4992 pps_lock(intel_dp);
4569 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, power_seq); 4993 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, power_seq);
4994 pps_unlock(intel_dp);
4570 4995
4571 mutex_lock(&dev->mode_config.mutex); 4996 mutex_lock(&dev->mode_config.mutex);
4572 edid = drm_get_edid(connector, &intel_dp->aux.ddc); 4997 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
@@ -4610,6 +5035,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
4610 } 5035 }
4611 5036
4612 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode); 5037 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5038 intel_connector->panel.backlight_power = intel_edp_backlight_power;
4613 intel_panel_setup_backlight(connector); 5039 intel_panel_setup_backlight(connector);
4614 5040
4615 return true; 5041 return true;
@@ -4628,6 +5054,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
4628 struct edp_power_seq power_seq = { 0 }; 5054 struct edp_power_seq power_seq = { 0 };
4629 int type; 5055 int type;
4630 5056
5057 intel_dp->pps_pipe = INVALID_PIPE;
5058
4631 /* intel_dp vfuncs */ 5059 /* intel_dp vfuncs */
4632 if (IS_VALLEYVIEW(dev)) 5060 if (IS_VALLEYVIEW(dev))
4633 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider; 5061 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
@@ -4698,8 +5126,15 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
4698 } 5126 }
4699 5127
4700 if (is_edp(intel_dp)) { 5128 if (is_edp(intel_dp)) {
4701 intel_dp_init_panel_power_timestamps(intel_dp); 5129 pps_lock(intel_dp);
4702 intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq); 5130 if (IS_VALLEYVIEW(dev)) {
5131 vlv_initial_power_sequencer_setup(intel_dp);
5132 } else {
5133 intel_dp_init_panel_power_timestamps(intel_dp);
5134 intel_dp_init_panel_power_sequencer(dev, intel_dp,
5135 &power_seq);
5136 }
5137 pps_unlock(intel_dp);
4703 } 5138 }
4704 5139
4705 intel_dp_aux_init(intel_dp, intel_connector); 5140 intel_dp_aux_init(intel_dp, intel_connector);
@@ -4707,7 +5142,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
4707 /* init MST on ports that can support it */ 5142 /* init MST on ports that can support it */
4708 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 5143 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
4709 if (port == PORT_B || port == PORT_C || port == PORT_D) { 5144 if (port == PORT_B || port == PORT_C || port == PORT_D) {
4710 intel_dp_mst_encoder_init(intel_dig_port, intel_connector->base.base.id); 5145 intel_dp_mst_encoder_init(intel_dig_port,
5146 intel_connector->base.base.id);
4711 } 5147 }
4712 } 5148 }
4713 5149
@@ -4715,9 +5151,13 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
4715 drm_dp_aux_unregister(&intel_dp->aux); 5151 drm_dp_aux_unregister(&intel_dp->aux);
4716 if (is_edp(intel_dp)) { 5152 if (is_edp(intel_dp)) {
4717 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 5153 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4718 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); 5154 /*
5155 * vdd might still be enabled do to the delayed vdd off.
5156 * Make sure vdd is actually turned off here.
5157 */
5158 pps_lock(intel_dp);
4719 edp_panel_vdd_off_sync(intel_dp); 5159 edp_panel_vdd_off_sync(intel_dp);
4720 drm_modeset_unlock(&dev->mode_config.connection_mutex); 5160 pps_unlock(intel_dp);
4721 } 5161 }
4722 drm_connector_unregister(connector); 5162 drm_connector_unregister(connector);
4723 drm_connector_cleanup(connector); 5163 drm_connector_cleanup(connector);
@@ -4781,7 +5221,8 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
4781 } else { 5221 } else {
4782 intel_encoder->pre_enable = g4x_pre_enable_dp; 5222 intel_encoder->pre_enable = g4x_pre_enable_dp;
4783 intel_encoder->enable = g4x_enable_dp; 5223 intel_encoder->enable = g4x_enable_dp;
4784 intel_encoder->post_disable = g4x_post_disable_dp; 5224 if (INTEL_INFO(dev)->gen >= 5)
5225 intel_encoder->post_disable = ilk_post_disable_dp;
4785 } 5226 }
4786 5227
4787 intel_dig_port->port = port; 5228 intel_dig_port->port = port;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index b8c8bbd8e5f9..07ce04683c30 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -25,6 +25,7 @@
25#ifndef __INTEL_DRV_H__ 25#ifndef __INTEL_DRV_H__
26#define __INTEL_DRV_H__ 26#define __INTEL_DRV_H__
27 27
28#include <linux/async.h>
28#include <linux/i2c.h> 29#include <linux/i2c.h>
29#include <linux/hdmi.h> 30#include <linux/hdmi.h>
30#include <drm/i915_drm.h> 31#include <drm/i915_drm.h>
@@ -179,6 +180,8 @@ struct intel_panel {
179 bool active_low_pwm; 180 bool active_low_pwm;
180 struct backlight_device *device; 181 struct backlight_device *device;
181 } backlight; 182 } backlight;
183
184 void (*backlight_power)(struct intel_connector *, bool enable);
182}; 185};
183 186
184struct intel_connector { 187struct intel_connector {
@@ -211,6 +214,7 @@ struct intel_connector {
211 214
212 /* Cached EDID for eDP and LVDS. May hold ERR_PTR for invalid EDID. */ 215 /* Cached EDID for eDP and LVDS. May hold ERR_PTR for invalid EDID. */
213 struct edid *edid; 216 struct edid *edid;
217 struct edid *detect_edid;
214 218
215 /* since POLL and HPD connectors may use the same HPD line keep the native 219 /* since POLL and HPD connectors may use the same HPD line keep the native
216 state of connector->polled in case hotplug storm detection changes it */ 220 state of connector->polled in case hotplug storm detection changes it */
@@ -330,6 +334,7 @@ struct intel_crtc_config {
330 334
331 /* m2_n2 for eDP downclock */ 335 /* m2_n2 for eDP downclock */
332 struct intel_link_m_n dp_m2_n2; 336 struct intel_link_m_n dp_m2_n2;
337 bool has_drrs;
333 338
334 /* 339 /*
335 * Frequence the dpll for the port should run at. Differs from the 340 * Frequence the dpll for the port should run at. Differs from the
@@ -410,6 +415,7 @@ struct intel_crtc {
410 uint32_t cursor_addr; 415 uint32_t cursor_addr;
411 int16_t cursor_width, cursor_height; 416 int16_t cursor_width, cursor_height;
412 uint32_t cursor_cntl; 417 uint32_t cursor_cntl;
418 uint32_t cursor_size;
413 uint32_t cursor_base; 419 uint32_t cursor_base;
414 420
415 struct intel_plane_config plane_config; 421 struct intel_plane_config plane_config;
@@ -430,8 +436,6 @@ struct intel_crtc {
430 struct intel_pipe_wm active; 436 struct intel_pipe_wm active;
431 } wm; 437 } wm;
432 438
433 wait_queue_head_t vbl_wait;
434
435 int scanline_offset; 439 int scanline_offset;
436 struct intel_mmio_flip mmio_flip; 440 struct intel_mmio_flip mmio_flip;
437}; 441};
@@ -455,6 +459,7 @@ struct intel_plane {
455 unsigned int crtc_w, crtc_h; 459 unsigned int crtc_w, crtc_h;
456 uint32_t src_x, src_y; 460 uint32_t src_x, src_y;
457 uint32_t src_w, src_h; 461 uint32_t src_w, src_h;
462 unsigned int rotation;
458 463
459 /* Since we need to change the watermarks before/after 464 /* Since we need to change the watermarks before/after
460 * enabling/disabling the planes, we need to store the parameters here 465 * enabling/disabling the planes, we need to store the parameters here
@@ -565,6 +570,12 @@ struct intel_dp {
565 570
566 struct notifier_block edp_notifier; 571 struct notifier_block edp_notifier;
567 572
573 /*
574 * Pipe whose power sequencer is currently locked into
575 * this port. Only relevant on VLV/CHV.
576 */
577 enum pipe pps_pipe;
578
568 bool use_tps3; 579 bool use_tps3;
569 bool can_mst; /* this port supports mst */ 580 bool can_mst; /* this port supports mst */
570 bool is_mst; 581 bool is_mst;
@@ -663,6 +674,10 @@ struct intel_unpin_work {
663#define INTEL_FLIP_COMPLETE 2 674#define INTEL_FLIP_COMPLETE 2
664 u32 flip_count; 675 u32 flip_count;
665 u32 gtt_offset; 676 u32 gtt_offset;
677 struct intel_engine_cs *flip_queued_ring;
678 u32 flip_queued_seqno;
679 int flip_queued_vblank;
680 int flip_ready_vblank;
666 bool enable_stall_check; 681 bool enable_stall_check;
667}; 682};
668 683
@@ -827,7 +842,6 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
827enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, 842enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
828 enum pipe pipe); 843 enum pipe pipe);
829void intel_wait_for_vblank(struct drm_device *dev, int pipe); 844void intel_wait_for_vblank(struct drm_device *dev, int pipe);
830void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
831int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp); 845int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
832void vlv_wait_port_ready(struct drm_i915_private *dev_priv, 846void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
833 struct intel_digital_port *dport); 847 struct intel_digital_port *dport);
@@ -848,6 +862,7 @@ __intel_framebuffer_create(struct drm_device *dev,
848void intel_prepare_page_flip(struct drm_device *dev, int plane); 862void intel_prepare_page_flip(struct drm_device *dev, int plane);
849void intel_finish_page_flip(struct drm_device *dev, int pipe); 863void intel_finish_page_flip(struct drm_device *dev, int pipe);
850void intel_finish_page_flip_plane(struct drm_device *dev, int plane); 864void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
865void intel_check_page_flip(struct drm_device *dev, int pipe);
851 866
852/* shared dpll functions */ 867/* shared dpll functions */
853struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc); 868struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
@@ -882,6 +897,7 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv);
882void hsw_disable_pc8(struct drm_i915_private *dev_priv); 897void hsw_disable_pc8(struct drm_i915_private *dev_priv);
883void intel_dp_get_m_n(struct intel_crtc *crtc, 898void intel_dp_get_m_n(struct intel_crtc *crtc,
884 struct intel_crtc_config *pipe_config); 899 struct intel_crtc_config *pipe_config);
900void intel_dp_set_m_n(struct intel_crtc *crtc);
885int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n); 901int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
886void 902void
887ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config, 903ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
@@ -896,7 +912,7 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
896 struct intel_crtc_config *pipe_config); 912 struct intel_crtc_config *pipe_config);
897int intel_format_to_fourcc(int format); 913int intel_format_to_fourcc(int format);
898void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc); 914void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc);
899 915void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file);
900 916
901/* intel_dp.c */ 917/* intel_dp.c */
902void intel_dp_init(struct drm_device *dev, int output_reg, enum port port); 918void intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
@@ -935,6 +951,7 @@ void intel_dp_mst_suspend(struct drm_device *dev);
935void intel_dp_mst_resume(struct drm_device *dev); 951void intel_dp_mst_resume(struct drm_device *dev);
936int intel_dp_max_link_bw(struct intel_dp *intel_dp); 952int intel_dp_max_link_bw(struct intel_dp *intel_dp);
937void intel_dp_hot_plug(struct intel_encoder *intel_encoder); 953void intel_dp_hot_plug(struct intel_encoder *intel_encoder);
954void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv);
938/* intel_dp_mst.c */ 955/* intel_dp_mst.c */
939int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id); 956int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
940void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port); 957void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
@@ -949,9 +966,9 @@ void intel_dvo_init(struct drm_device *dev);
949/* legacy fbdev emulation in intel_fbdev.c */ 966/* legacy fbdev emulation in intel_fbdev.c */
950#ifdef CONFIG_DRM_I915_FBDEV 967#ifdef CONFIG_DRM_I915_FBDEV
951extern int intel_fbdev_init(struct drm_device *dev); 968extern int intel_fbdev_init(struct drm_device *dev);
952extern void intel_fbdev_initial_config(struct drm_device *dev); 969extern void intel_fbdev_initial_config(void *data, async_cookie_t cookie);
953extern void intel_fbdev_fini(struct drm_device *dev); 970extern void intel_fbdev_fini(struct drm_device *dev);
954extern void intel_fbdev_set_suspend(struct drm_device *dev, int state); 971extern void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous);
955extern void intel_fbdev_output_poll_changed(struct drm_device *dev); 972extern void intel_fbdev_output_poll_changed(struct drm_device *dev);
956extern void intel_fbdev_restore_mode(struct drm_device *dev); 973extern void intel_fbdev_restore_mode(struct drm_device *dev);
957#else 974#else
@@ -960,7 +977,7 @@ static inline int intel_fbdev_init(struct drm_device *dev)
960 return 0; 977 return 0;
961} 978}
962 979
963static inline void intel_fbdev_initial_config(struct drm_device *dev) 980static inline void intel_fbdev_initial_config(void *data, async_cookie_t cookie)
964{ 981{
965} 982}
966 983
@@ -968,7 +985,7 @@ static inline void intel_fbdev_fini(struct drm_device *dev)
968{ 985{
969} 986}
970 987
971static inline void intel_fbdev_set_suspend(struct drm_device *dev, int state) 988static inline void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
972{ 989{
973} 990}
974 991
@@ -1091,7 +1108,10 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob);
1091int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane); 1108int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
1092void intel_flush_primary_plane(struct drm_i915_private *dev_priv, 1109void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
1093 enum plane plane); 1110 enum plane plane);
1094void intel_plane_restore(struct drm_plane *plane); 1111int intel_plane_set_property(struct drm_plane *plane,
1112 struct drm_property *prop,
1113 uint64_t val);
1114int intel_plane_restore(struct drm_plane *plane);
1095void intel_plane_disable(struct drm_plane *plane); 1115void intel_plane_disable(struct drm_plane *plane);
1096int intel_sprite_set_colorkey(struct drm_device *dev, void *data, 1116int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
1097 struct drm_file *file_priv); 1117 struct drm_file *file_priv);
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 670c29a7b5dd..5bd9e09ad3c5 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -184,7 +184,7 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
184 184
185 /* update the hw state for DPLL */ 185 /* update the hw state for DPLL */
186 intel_crtc->config.dpll_hw_state.dpll = DPLL_INTEGRATED_CLOCK_VLV | 186 intel_crtc->config.dpll_hw_state.dpll = DPLL_INTEGRATED_CLOCK_VLV |
187 DPLL_REFA_CLK_ENABLE_VLV; 187 DPLL_REFA_CLK_ENABLE_VLV;
188 188
189 tmp = I915_READ(DSPCLK_GATE_D); 189 tmp = I915_READ(DSPCLK_GATE_D);
190 tmp |= DPOUNIT_CLOCK_GATE_DISABLE; 190 tmp |= DPOUNIT_CLOCK_GATE_DISABLE;
@@ -259,8 +259,8 @@ static void intel_dsi_disable(struct intel_encoder *encoder)
259 temp = I915_READ(MIPI_CTRL(pipe)); 259 temp = I915_READ(MIPI_CTRL(pipe));
260 temp &= ~ESCAPE_CLOCK_DIVIDER_MASK; 260 temp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
261 I915_WRITE(MIPI_CTRL(pipe), temp | 261 I915_WRITE(MIPI_CTRL(pipe), temp |
262 intel_dsi->escape_clk_div << 262 intel_dsi->escape_clk_div <<
263 ESCAPE_CLOCK_DIVIDER_SHIFT); 263 ESCAPE_CLOCK_DIVIDER_SHIFT);
264 264
265 I915_WRITE(MIPI_EOT_DISABLE(pipe), CLOCKSTOP); 265 I915_WRITE(MIPI_EOT_DISABLE(pipe), CLOCKSTOP);
266 266
@@ -297,7 +297,7 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
297 usleep_range(2000, 2500); 297 usleep_range(2000, 2500);
298 298
299 if (wait_for(((I915_READ(MIPI_PORT_CTRL(pipe)) & AFE_LATCHOUT) 299 if (wait_for(((I915_READ(MIPI_PORT_CTRL(pipe)) & AFE_LATCHOUT)
300 == 0x00000), 30)) 300 == 0x00000), 30))
301 DRM_ERROR("DSI LP not going Low\n"); 301 DRM_ERROR("DSI LP not going Low\n");
302 302
303 val = I915_READ(MIPI_PORT_CTRL(pipe)); 303 val = I915_READ(MIPI_PORT_CTRL(pipe));
@@ -423,9 +423,11 @@ static u16 txclkesc(u32 divider, unsigned int us)
423} 423}
424 424
425/* return pixels in terms of txbyteclkhs */ 425/* return pixels in terms of txbyteclkhs */
426static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count) 426static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count,
427 u16 burst_mode_ratio)
427{ 428{
428 return DIV_ROUND_UP(DIV_ROUND_UP(pixels * bpp, 8), lane_count); 429 return DIV_ROUND_UP(DIV_ROUND_UP(pixels * bpp * burst_mode_ratio,
430 8 * 100), lane_count);
429} 431}
430 432
431static void set_dsi_timings(struct drm_encoder *encoder, 433static void set_dsi_timings(struct drm_encoder *encoder,
@@ -451,10 +453,12 @@ static void set_dsi_timings(struct drm_encoder *encoder,
451 vbp = mode->vtotal - mode->vsync_end; 453 vbp = mode->vtotal - mode->vsync_end;
452 454
453 /* horizontal values are in terms of high speed byte clock */ 455 /* horizontal values are in terms of high speed byte clock */
454 hactive = txbyteclkhs(hactive, bpp, lane_count); 456 hactive = txbyteclkhs(hactive, bpp, lane_count,
455 hfp = txbyteclkhs(hfp, bpp, lane_count); 457 intel_dsi->burst_mode_ratio);
456 hsync = txbyteclkhs(hsync, bpp, lane_count); 458 hfp = txbyteclkhs(hfp, bpp, lane_count, intel_dsi->burst_mode_ratio);
457 hbp = txbyteclkhs(hbp, bpp, lane_count); 459 hsync = txbyteclkhs(hsync, bpp, lane_count,
460 intel_dsi->burst_mode_ratio);
461 hbp = txbyteclkhs(hbp, bpp, lane_count, intel_dsi->burst_mode_ratio);
458 462
459 I915_WRITE(MIPI_HACTIVE_AREA_COUNT(pipe), hactive); 463 I915_WRITE(MIPI_HACTIVE_AREA_COUNT(pipe), hactive);
460 I915_WRITE(MIPI_HFP_COUNT(pipe), hfp); 464 I915_WRITE(MIPI_HFP_COUNT(pipe), hfp);
@@ -541,12 +545,14 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
541 intel_dsi->video_mode_format == VIDEO_MODE_BURST) { 545 intel_dsi->video_mode_format == VIDEO_MODE_BURST) {
542 I915_WRITE(MIPI_HS_TX_TIMEOUT(pipe), 546 I915_WRITE(MIPI_HS_TX_TIMEOUT(pipe),
543 txbyteclkhs(adjusted_mode->htotal, bpp, 547 txbyteclkhs(adjusted_mode->htotal, bpp,
544 intel_dsi->lane_count) + 1); 548 intel_dsi->lane_count,
549 intel_dsi->burst_mode_ratio) + 1);
545 } else { 550 } else {
546 I915_WRITE(MIPI_HS_TX_TIMEOUT(pipe), 551 I915_WRITE(MIPI_HS_TX_TIMEOUT(pipe),
547 txbyteclkhs(adjusted_mode->vtotal * 552 txbyteclkhs(adjusted_mode->vtotal *
548 adjusted_mode->htotal, 553 adjusted_mode->htotal,
549 bpp, intel_dsi->lane_count) + 1); 554 bpp, intel_dsi->lane_count,
555 intel_dsi->burst_mode_ratio) + 1);
550 } 556 }
551 I915_WRITE(MIPI_LP_RX_TIMEOUT(pipe), intel_dsi->lp_rx_timeout); 557 I915_WRITE(MIPI_LP_RX_TIMEOUT(pipe), intel_dsi->lp_rx_timeout);
552 I915_WRITE(MIPI_TURN_AROUND_TIMEOUT(pipe), intel_dsi->turn_arnd_val); 558 I915_WRITE(MIPI_TURN_AROUND_TIMEOUT(pipe), intel_dsi->turn_arnd_val);
@@ -576,7 +582,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
576 * XXX: write MIPI_STOP_STATE_STALL? 582 * XXX: write MIPI_STOP_STATE_STALL?
577 */ 583 */
578 I915_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT(pipe), 584 I915_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT(pipe),
579 intel_dsi->hs_to_lp_count); 585 intel_dsi->hs_to_lp_count);
580 586
581 /* XXX: low power clock equivalence in terms of byte clock. the number 587 /* XXX: low power clock equivalence in terms of byte clock. the number
582 * of byte clocks occupied in one low power clock. based on txbyteclkhs 588 * of byte clocks occupied in one low power clock. based on txbyteclkhs
@@ -601,10 +607,10 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
601 * 64 like 1366 x 768. Enable RANDOM resolution support for such 607 * 64 like 1366 x 768. Enable RANDOM resolution support for such
602 * panels by default */ 608 * panels by default */
603 I915_WRITE(MIPI_VIDEO_MODE_FORMAT(pipe), 609 I915_WRITE(MIPI_VIDEO_MODE_FORMAT(pipe),
604 intel_dsi->video_frmt_cfg_bits | 610 intel_dsi->video_frmt_cfg_bits |
605 intel_dsi->video_mode_format | 611 intel_dsi->video_mode_format |
606 IP_TG_CONFIG | 612 IP_TG_CONFIG |
607 RANDOM_DPI_DISPLAY_RESOLUTION); 613 RANDOM_DPI_DISPLAY_RESOLUTION);
608} 614}
609 615
610static void intel_dsi_pre_pll_enable(struct intel_encoder *encoder) 616static void intel_dsi_pre_pll_enable(struct intel_encoder *encoder)
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index fd51867fd0d3..657eb5c1b9d8 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -116,6 +116,8 @@ struct intel_dsi {
116 u16 clk_hs_to_lp_count; 116 u16 clk_hs_to_lp_count;
117 117
118 u16 init_count; 118 u16 init_count;
119 u32 pclk;
120 u16 burst_mode_ratio;
119 121
120 /* all delays in ms */ 122 /* all delays in ms */
121 u16 backlight_off_delay; 123 u16 backlight_off_delay;
diff --git a/drivers/gpu/drm/i915/intel_dsi_cmd.c b/drivers/gpu/drm/i915/intel_dsi_cmd.c
index 7f1430ac8543..f4767fd2ebeb 100644
--- a/drivers/gpu/drm/i915/intel_dsi_cmd.c
+++ b/drivers/gpu/drm/i915/intel_dsi_cmd.c
@@ -430,7 +430,7 @@ void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi)
430 u32 mask; 430 u32 mask;
431 431
432 mask = LP_CTRL_FIFO_EMPTY | HS_CTRL_FIFO_EMPTY | 432 mask = LP_CTRL_FIFO_EMPTY | HS_CTRL_FIFO_EMPTY |
433 LP_DATA_FIFO_EMPTY | HS_DATA_FIFO_EMPTY; 433 LP_DATA_FIFO_EMPTY | HS_DATA_FIFO_EMPTY;
434 434
435 if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == mask, 100)) 435 if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == mask, 100))
436 DRM_ERROR("DPI FIFOs are not empty\n"); 436 DRM_ERROR("DPI FIFOs are not empty\n");
diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
index 47c7584a4aa0..f6bdd44069ce 100644
--- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
@@ -271,6 +271,8 @@ static bool generic_init(struct intel_dsi_device *dsi)
271 u32 ths_prepare_ns, tclk_trail_ns; 271 u32 ths_prepare_ns, tclk_trail_ns;
272 u32 tclk_prepare_clkzero, ths_prepare_hszero; 272 u32 tclk_prepare_clkzero, ths_prepare_hszero;
273 u32 lp_to_hs_switch, hs_to_lp_switch; 273 u32 lp_to_hs_switch, hs_to_lp_switch;
274 u32 pclk, computed_ddr;
275 u16 burst_mode_ratio;
274 276
275 DRM_DEBUG_KMS("\n"); 277 DRM_DEBUG_KMS("\n");
276 278
@@ -284,8 +286,6 @@ static bool generic_init(struct intel_dsi_device *dsi)
284 else if (intel_dsi->pixel_format == VID_MODE_FORMAT_RGB565) 286 else if (intel_dsi->pixel_format == VID_MODE_FORMAT_RGB565)
285 bits_per_pixel = 16; 287 bits_per_pixel = 16;
286 288
287 bitrate = (mode->clock * bits_per_pixel) / intel_dsi->lane_count;
288
289 intel_dsi->operation_mode = mipi_config->is_cmd_mode; 289 intel_dsi->operation_mode = mipi_config->is_cmd_mode;
290 intel_dsi->video_mode_format = mipi_config->video_transfer_mode; 290 intel_dsi->video_mode_format = mipi_config->video_transfer_mode;
291 intel_dsi->escape_clk_div = mipi_config->byte_clk_sel; 291 intel_dsi->escape_clk_div = mipi_config->byte_clk_sel;
@@ -297,6 +297,40 @@ static bool generic_init(struct intel_dsi_device *dsi)
297 intel_dsi->video_frmt_cfg_bits = 297 intel_dsi->video_frmt_cfg_bits =
298 mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0; 298 mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0;
299 299
300 pclk = mode->clock;
301
302 /* Burst Mode Ratio
303 * Target ddr frequency from VBT / non burst ddr freq
304 * multiply by 100 to preserve remainder
305 */
306 if (intel_dsi->video_mode_format == VIDEO_MODE_BURST) {
307 if (mipi_config->target_burst_mode_freq) {
308 computed_ddr =
309 (pclk * bits_per_pixel) / intel_dsi->lane_count;
310
311 if (mipi_config->target_burst_mode_freq <
312 computed_ddr) {
313 DRM_ERROR("Burst mode freq is less than computed\n");
314 return false;
315 }
316
317 burst_mode_ratio = DIV_ROUND_UP(
318 mipi_config->target_burst_mode_freq * 100,
319 computed_ddr);
320
321 pclk = DIV_ROUND_UP(pclk * burst_mode_ratio, 100);
322 } else {
323 DRM_ERROR("Burst mode target is not set\n");
324 return false;
325 }
326 } else
327 burst_mode_ratio = 100;
328
329 intel_dsi->burst_mode_ratio = burst_mode_ratio;
330 intel_dsi->pclk = pclk;
331
332 bitrate = (pclk * bits_per_pixel) / intel_dsi->lane_count;
333
300 switch (intel_dsi->escape_clk_div) { 334 switch (intel_dsi->escape_clk_div) {
301 case 0: 335 case 0:
302 tlpx_ns = 50; 336 tlpx_ns = 50;
diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c
index d8bb1ea2f0da..fa7a6ca34cd6 100644
--- a/drivers/gpu/drm/i915/intel_dsi_pll.c
+++ b/drivers/gpu/drm/i915/intel_dsi_pll.c
@@ -134,8 +134,7 @@ static u32 dsi_rr_formula(const struct drm_display_mode *mode,
134#else 134#else
135 135
136/* Get DSI clock from pixel clock */ 136/* Get DSI clock from pixel clock */
137static u32 dsi_clk_from_pclk(const struct drm_display_mode *mode, 137static u32 dsi_clk_from_pclk(u32 pclk, int pixel_format, int lane_count)
138 int pixel_format, int lane_count)
139{ 138{
140 u32 dsi_clk_khz; 139 u32 dsi_clk_khz;
141 u32 bpp; 140 u32 bpp;
@@ -156,7 +155,7 @@ static u32 dsi_clk_from_pclk(const struct drm_display_mode *mode,
156 155
157 /* DSI data rate = pixel clock * bits per pixel / lane count 156 /* DSI data rate = pixel clock * bits per pixel / lane count
158 pixel clock is converted from KHz to Hz */ 157 pixel clock is converted from KHz to Hz */
159 dsi_clk_khz = DIV_ROUND_CLOSEST(mode->clock * bpp, lane_count); 158 dsi_clk_khz = DIV_ROUND_CLOSEST(pclk * bpp, lane_count);
160 159
161 return dsi_clk_khz; 160 return dsi_clk_khz;
162} 161}
@@ -191,7 +190,7 @@ static int dsi_calc_mnp(u32 dsi_clk, struct dsi_mnp *dsi_mnp)
191 for (m = 62; m <= 92; m++) { 190 for (m = 62; m <= 92; m++) {
192 for (p = 2; p <= 6; p++) { 191 for (p = 2; p <= 6; p++) {
193 /* Find the optimal m and p divisors 192 /* Find the optimal m and p divisors
194 with minimal error +/- the required clock */ 193 with minimal error +/- the required clock */
195 calc_dsi_clk = (m * ref_clk) / p; 194 calc_dsi_clk = (m * ref_clk) / p;
196 if (calc_dsi_clk == target_dsi_clk) { 195 if (calc_dsi_clk == target_dsi_clk) {
197 calc_m = m; 196 calc_m = m;
@@ -228,15 +227,13 @@ static int dsi_calc_mnp(u32 dsi_clk, struct dsi_mnp *dsi_mnp)
228static void vlv_configure_dsi_pll(struct intel_encoder *encoder) 227static void vlv_configure_dsi_pll(struct intel_encoder *encoder)
229{ 228{
230 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 229 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
231 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
232 const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
233 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 230 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
234 int ret; 231 int ret;
235 struct dsi_mnp dsi_mnp; 232 struct dsi_mnp dsi_mnp;
236 u32 dsi_clk; 233 u32 dsi_clk;
237 234
238 dsi_clk = dsi_clk_from_pclk(mode, intel_dsi->pixel_format, 235 dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format,
239 intel_dsi->lane_count); 236 intel_dsi->lane_count);
240 237
241 ret = dsi_calc_mnp(dsi_clk, &dsi_mnp); 238 ret = dsi_calc_mnp(dsi_clk, &dsi_mnp);
242 if (ret) { 239 if (ret) {
@@ -318,8 +315,8 @@ static void assert_bpp_mismatch(int pixel_format, int pipe_bpp)
318 } 315 }
319 316
320 WARN(bpp != pipe_bpp, 317 WARN(bpp != pipe_bpp,
321 "bpp match assertion failure (expected %d, current %d)\n", 318 "bpp match assertion failure (expected %d, current %d)\n",
322 bpp, pipe_bpp); 319 bpp, pipe_bpp);
323} 320}
324 321
325u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp) 322u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp)
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 56b47d2ffaf7..e40e3df33517 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -85,7 +85,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
85 { 85 {
86 .type = INTEL_DVO_CHIP_TMDS, 86 .type = INTEL_DVO_CHIP_TMDS,
87 .name = "ns2501", 87 .name = "ns2501",
88 .dvo_reg = DVOC, 88 .dvo_reg = DVOB,
89 .slave_addr = NS2501_ADDR, 89 .slave_addr = NS2501_ADDR,
90 .dev_ops = &ns2501_ops, 90 .dev_ops = &ns2501_ops,
91 } 91 }
@@ -185,12 +185,13 @@ static void intel_enable_dvo(struct intel_encoder *encoder)
185 u32 dvo_reg = intel_dvo->dev.dvo_reg; 185 u32 dvo_reg = intel_dvo->dev.dvo_reg;
186 u32 temp = I915_READ(dvo_reg); 186 u32 temp = I915_READ(dvo_reg);
187 187
188 I915_WRITE(dvo_reg, temp | DVO_ENABLE);
189 I915_READ(dvo_reg);
190 intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev, 188 intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
191 &crtc->config.requested_mode, 189 &crtc->config.requested_mode,
192 &crtc->config.adjusted_mode); 190 &crtc->config.adjusted_mode);
193 191
192 I915_WRITE(dvo_reg, temp | DVO_ENABLE);
193 I915_READ(dvo_reg);
194
194 intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true); 195 intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
195} 196}
196 197
@@ -226,10 +227,6 @@ static void intel_dvo_dpms(struct drm_connector *connector, int mode)
226 227
227 intel_crtc_update_dpms(crtc); 228 intel_crtc_update_dpms(crtc);
228 229
229 intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
230 &config->requested_mode,
231 &config->adjusted_mode);
232
233 intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true); 230 intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
234 } else { 231 } else {
235 intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false); 232 intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index f475414671d8..9b584f3fbb99 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -24,8 +24,10 @@
24 * David Airlie 24 * David Airlie
25 */ 25 */
26 26
27#include <linux/async.h>
27#include <linux/module.h> 28#include <linux/module.h>
28#include <linux/kernel.h> 29#include <linux/kernel.h>
30#include <linux/console.h>
29#include <linux/errno.h> 31#include <linux/errno.h>
30#include <linux/string.h> 32#include <linux/string.h>
31#include <linux/mm.h> 33#include <linux/mm.h>
@@ -331,24 +333,6 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
331 int num_connectors_enabled = 0; 333 int num_connectors_enabled = 0;
332 int num_connectors_detected = 0; 334 int num_connectors_detected = 0;
333 335
334 /*
335 * If the user specified any force options, just bail here
336 * and use that config.
337 */
338 for (i = 0; i < fb_helper->connector_count; i++) {
339 struct drm_fb_helper_connector *fb_conn;
340 struct drm_connector *connector;
341
342 fb_conn = fb_helper->connector_info[i];
343 connector = fb_conn->connector;
344
345 if (!enabled[i])
346 continue;
347
348 if (connector->force != DRM_FORCE_UNSPECIFIED)
349 return false;
350 }
351
352 save_enabled = kcalloc(dev->mode_config.num_connector, sizeof(bool), 336 save_enabled = kcalloc(dev->mode_config.num_connector, sizeof(bool),
353 GFP_KERNEL); 337 GFP_KERNEL);
354 if (!save_enabled) 338 if (!save_enabled)
@@ -374,8 +358,18 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
374 continue; 358 continue;
375 } 359 }
376 360
361 if (connector->force == DRM_FORCE_OFF) {
362 DRM_DEBUG_KMS("connector %s is disabled by user, skipping\n",
363 connector->name);
364 enabled[i] = false;
365 continue;
366 }
367
377 encoder = connector->encoder; 368 encoder = connector->encoder;
378 if (!encoder || WARN_ON(!encoder->crtc)) { 369 if (!encoder || WARN_ON(!encoder->crtc)) {
370 if (connector->force > DRM_FORCE_OFF)
371 goto bail;
372
379 DRM_DEBUG_KMS("connector %s has no encoder or crtc, skipping\n", 373 DRM_DEBUG_KMS("connector %s has no encoder or crtc, skipping\n",
380 connector->name); 374 connector->name);
381 enabled[i] = false; 375 enabled[i] = false;
@@ -394,8 +388,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
394 for (j = 0; j < fb_helper->connector_count; j++) { 388 for (j = 0; j < fb_helper->connector_count; j++) {
395 if (crtcs[j] == new_crtc) { 389 if (crtcs[j] == new_crtc) {
396 DRM_DEBUG_KMS("fallback: cloned configuration\n"); 390 DRM_DEBUG_KMS("fallback: cloned configuration\n");
397 fallback = true; 391 goto bail;
398 goto out;
399 } 392 }
400 } 393 }
401 394
@@ -466,8 +459,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
466 fallback = true; 459 fallback = true;
467 } 460 }
468 461
469out:
470 if (fallback) { 462 if (fallback) {
463bail:
471 DRM_DEBUG_KMS("Not using firmware configuration\n"); 464 DRM_DEBUG_KMS("Not using firmware configuration\n");
472 memcpy(enabled, save_enabled, dev->mode_config.num_connector); 465 memcpy(enabled, save_enabled, dev->mode_config.num_connector);
473 kfree(save_enabled); 466 kfree(save_enabled);
@@ -636,6 +629,15 @@ out:
636 return false; 629 return false;
637} 630}
638 631
632static void intel_fbdev_suspend_worker(struct work_struct *work)
633{
634 intel_fbdev_set_suspend(container_of(work,
635 struct drm_i915_private,
636 fbdev_suspend_work)->dev,
637 FBINFO_STATE_RUNNING,
638 true);
639}
640
639int intel_fbdev_init(struct drm_device *dev) 641int intel_fbdev_init(struct drm_device *dev)
640{ 642{
641 struct intel_fbdev *ifbdev; 643 struct intel_fbdev *ifbdev;
@@ -662,14 +664,16 @@ int intel_fbdev_init(struct drm_device *dev)
662 } 664 }
663 665
664 dev_priv->fbdev = ifbdev; 666 dev_priv->fbdev = ifbdev;
667 INIT_WORK(&dev_priv->fbdev_suspend_work, intel_fbdev_suspend_worker);
668
665 drm_fb_helper_single_add_all_connectors(&ifbdev->helper); 669 drm_fb_helper_single_add_all_connectors(&ifbdev->helper);
666 670
667 return 0; 671 return 0;
668} 672}
669 673
670void intel_fbdev_initial_config(struct drm_device *dev) 674void intel_fbdev_initial_config(void *data, async_cookie_t cookie)
671{ 675{
672 struct drm_i915_private *dev_priv = dev->dev_private; 676 struct drm_i915_private *dev_priv = data;
673 struct intel_fbdev *ifbdev = dev_priv->fbdev; 677 struct intel_fbdev *ifbdev = dev_priv->fbdev;
674 678
675 /* Due to peculiar init order wrt to hpd handling this is separate. */ 679 /* Due to peculiar init order wrt to hpd handling this is separate. */
@@ -682,12 +686,15 @@ void intel_fbdev_fini(struct drm_device *dev)
682 if (!dev_priv->fbdev) 686 if (!dev_priv->fbdev)
683 return; 687 return;
684 688
689 flush_work(&dev_priv->fbdev_suspend_work);
690
691 async_synchronize_full();
685 intel_fbdev_destroy(dev, dev_priv->fbdev); 692 intel_fbdev_destroy(dev, dev_priv->fbdev);
686 kfree(dev_priv->fbdev); 693 kfree(dev_priv->fbdev);
687 dev_priv->fbdev = NULL; 694 dev_priv->fbdev = NULL;
688} 695}
689 696
690void intel_fbdev_set_suspend(struct drm_device *dev, int state) 697void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
691{ 698{
692 struct drm_i915_private *dev_priv = dev->dev_private; 699 struct drm_i915_private *dev_priv = dev->dev_private;
693 struct intel_fbdev *ifbdev = dev_priv->fbdev; 700 struct intel_fbdev *ifbdev = dev_priv->fbdev;
@@ -698,6 +705,33 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state)
698 705
699 info = ifbdev->helper.fbdev; 706 info = ifbdev->helper.fbdev;
700 707
708 if (synchronous) {
709 /* Flush any pending work to turn the console on, and then
710 * wait to turn it off. It must be synchronous as we are
711 * about to suspend or unload the driver.
712 *
713 * Note that from within the work-handler, we cannot flush
714 * ourselves, so only flush outstanding work upon suspend!
715 */
716 if (state != FBINFO_STATE_RUNNING)
717 flush_work(&dev_priv->fbdev_suspend_work);
718 console_lock();
719 } else {
720 /*
721 * The console lock can be pretty contented on resume due
722 * to all the printk activity. Try to keep it out of the hot
723 * path of resume if possible.
724 */
725 WARN_ON(state != FBINFO_STATE_RUNNING);
726 if (!console_trylock()) {
727 /* Don't block our own workqueue as this can
728 * be run in parallel with other i915.ko tasks.
729 */
730 schedule_work(&dev_priv->fbdev_suspend_work);
731 return;
732 }
733 }
734
701 /* On resume from hibernation: If the object is shmemfs backed, it has 735 /* On resume from hibernation: If the object is shmemfs backed, it has
702 * been restored from swap. If the object is stolen however, it will be 736 * been restored from swap. If the object is stolen however, it will be
703 * full of whatever garbage was left in there. 737 * full of whatever garbage was left in there.
@@ -706,6 +740,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state)
706 memset_io(info->screen_base, 0, info->screen_size); 740 memset_io(info->screen_base, 0, info->screen_size);
707 741
708 fb_set_suspend(info, state); 742 fb_set_suspend(info, state);
743 console_unlock();
709} 744}
710 745
711void intel_fbdev_output_poll_changed(struct drm_device *dev) 746void intel_fbdev_output_poll_changed(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 5a9de21637b7..29ec1535992d 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -869,10 +869,15 @@ static enum drm_mode_status
869intel_hdmi_mode_valid(struct drm_connector *connector, 869intel_hdmi_mode_valid(struct drm_connector *connector,
870 struct drm_display_mode *mode) 870 struct drm_display_mode *mode)
871{ 871{
872 if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector), 872 int clock = mode->clock;
873 true)) 873
874 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
875 clock *= 2;
876
877 if (clock > hdmi_portclock_limit(intel_attached_hdmi(connector),
878 true))
874 return MODE_CLOCK_HIGH; 879 return MODE_CLOCK_HIGH;
875 if (mode->clock < 20000) 880 if (clock < 20000)
876 return MODE_CLOCK_LOW; 881 return MODE_CLOCK_LOW;
877 882
878 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 883 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@ -890,7 +895,7 @@ static bool hdmi_12bpc_possible(struct intel_crtc *crtc)
890 if (HAS_GMCH_DISPLAY(dev)) 895 if (HAS_GMCH_DISPLAY(dev))
891 return false; 896 return false;
892 897
893 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { 898 for_each_intel_encoder(dev, encoder) {
894 if (encoder->new_crtc != crtc) 899 if (encoder->new_crtc != crtc)
895 continue; 900 continue;
896 901
@@ -926,6 +931,10 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
926 intel_hdmi->color_range = 0; 931 intel_hdmi->color_range = 0;
927 } 932 }
928 933
934 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) {
935 pipe_config->pixel_multiplier = 2;
936 }
937
929 if (intel_hdmi->color_range) 938 if (intel_hdmi->color_range)
930 pipe_config->limited_color_range = true; 939 pipe_config->limited_color_range = true;
931 940
@@ -967,104 +976,117 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
967 return true; 976 return true;
968} 977}
969 978
970static enum drm_connector_status 979static void
971intel_hdmi_detect(struct drm_connector *connector, bool force) 980intel_hdmi_unset_edid(struct drm_connector *connector)
972{ 981{
973 struct drm_device *dev = connector->dev;
974 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); 982 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
975 struct intel_digital_port *intel_dig_port =
976 hdmi_to_dig_port(intel_hdmi);
977 struct intel_encoder *intel_encoder = &intel_dig_port->base;
978 struct drm_i915_private *dev_priv = dev->dev_private;
979 struct edid *edid;
980 enum intel_display_power_domain power_domain;
981 enum drm_connector_status status = connector_status_disconnected;
982 983
983 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 984 intel_hdmi->has_hdmi_sink = false;
984 connector->base.id, connector->name); 985 intel_hdmi->has_audio = false;
986 intel_hdmi->rgb_quant_range_selectable = false;
987
988 kfree(to_intel_connector(connector)->detect_edid);
989 to_intel_connector(connector)->detect_edid = NULL;
990}
991
992static bool
993intel_hdmi_set_edid(struct drm_connector *connector)
994{
995 struct drm_i915_private *dev_priv = to_i915(connector->dev);
996 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
997 struct intel_encoder *intel_encoder =
998 &hdmi_to_dig_port(intel_hdmi)->base;
999 enum intel_display_power_domain power_domain;
1000 struct edid *edid;
1001 bool connected = false;
985 1002
986 power_domain = intel_display_port_power_domain(intel_encoder); 1003 power_domain = intel_display_port_power_domain(intel_encoder);
987 intel_display_power_get(dev_priv, power_domain); 1004 intel_display_power_get(dev_priv, power_domain);
988 1005
989 intel_hdmi->has_hdmi_sink = false;
990 intel_hdmi->has_audio = false;
991 intel_hdmi->rgb_quant_range_selectable = false;
992 edid = drm_get_edid(connector, 1006 edid = drm_get_edid(connector,
993 intel_gmbus_get_adapter(dev_priv, 1007 intel_gmbus_get_adapter(dev_priv,
994 intel_hdmi->ddc_bus)); 1008 intel_hdmi->ddc_bus));
995 1009
996 if (edid) { 1010 intel_display_power_put(dev_priv, power_domain);
997 if (edid->input & DRM_EDID_INPUT_DIGITAL) { 1011
998 status = connector_status_connected; 1012 to_intel_connector(connector)->detect_edid = edid;
999 if (intel_hdmi->force_audio != HDMI_AUDIO_OFF_DVI) 1013 if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) {
1000 intel_hdmi->has_hdmi_sink = 1014 intel_hdmi->rgb_quant_range_selectable =
1001 drm_detect_hdmi_monitor(edid); 1015 drm_rgb_quant_range_selectable(edid);
1002 intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
1003 intel_hdmi->rgb_quant_range_selectable =
1004 drm_rgb_quant_range_selectable(edid);
1005 }
1006 kfree(edid);
1007 }
1008 1016
1009 if (status == connector_status_connected) { 1017 intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
1010 if (intel_hdmi->force_audio != HDMI_AUDIO_AUTO) 1018 if (intel_hdmi->force_audio != HDMI_AUDIO_AUTO)
1011 intel_hdmi->has_audio = 1019 intel_hdmi->has_audio =
1012 (intel_hdmi->force_audio == HDMI_AUDIO_ON); 1020 intel_hdmi->force_audio == HDMI_AUDIO_ON;
1013 intel_encoder->type = INTEL_OUTPUT_HDMI; 1021
1022 if (intel_hdmi->force_audio != HDMI_AUDIO_OFF_DVI)
1023 intel_hdmi->has_hdmi_sink =
1024 drm_detect_hdmi_monitor(edid);
1025
1026 connected = true;
1014 } 1027 }
1015 1028
1016 intel_display_power_put(dev_priv, power_domain); 1029 return connected;
1030}
1031
1032static enum drm_connector_status
1033intel_hdmi_detect(struct drm_connector *connector, bool force)
1034{
1035 enum drm_connector_status status;
1036
1037 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
1038 connector->base.id, connector->name);
1039
1040 intel_hdmi_unset_edid(connector);
1041
1042 if (intel_hdmi_set_edid(connector)) {
1043 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
1044
1045 hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI;
1046 status = connector_status_connected;
1047 } else
1048 status = connector_status_disconnected;
1017 1049
1018 return status; 1050 return status;
1019} 1051}
1020 1052
1021static int intel_hdmi_get_modes(struct drm_connector *connector) 1053static void
1054intel_hdmi_force(struct drm_connector *connector)
1022{ 1055{
1023 struct intel_encoder *intel_encoder = intel_attached_encoder(connector); 1056 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
1024 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
1025 struct drm_i915_private *dev_priv = connector->dev->dev_private;
1026 enum intel_display_power_domain power_domain;
1027 int ret;
1028 1057
1029 /* We should parse the EDID data and find out if it's an HDMI sink so 1058 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
1030 * we can send audio to it. 1059 connector->base.id, connector->name);
1031 */
1032 1060
1033 power_domain = intel_display_port_power_domain(intel_encoder); 1061 intel_hdmi_unset_edid(connector);
1034 intel_display_power_get(dev_priv, power_domain);
1035 1062
1036 ret = intel_ddc_get_modes(connector, 1063 if (connector->status != connector_status_connected)
1037 intel_gmbus_get_adapter(dev_priv, 1064 return;
1038 intel_hdmi->ddc_bus));
1039 1065
1040 intel_display_power_put(dev_priv, power_domain); 1066 intel_hdmi_set_edid(connector);
1067 hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI;
1068}
1041 1069
1042 return ret; 1070static int intel_hdmi_get_modes(struct drm_connector *connector)
1071{
1072 struct edid *edid;
1073
1074 edid = to_intel_connector(connector)->detect_edid;
1075 if (edid == NULL)
1076 return 0;
1077
1078 return intel_connector_update_modes(connector, edid);
1043} 1079}
1044 1080
1045static bool 1081static bool
1046intel_hdmi_detect_audio(struct drm_connector *connector) 1082intel_hdmi_detect_audio(struct drm_connector *connector)
1047{ 1083{
1048 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
1049 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
1050 struct drm_i915_private *dev_priv = connector->dev->dev_private;
1051 enum intel_display_power_domain power_domain;
1052 struct edid *edid;
1053 bool has_audio = false; 1084 bool has_audio = false;
1085 struct edid *edid;
1054 1086
1055 power_domain = intel_display_port_power_domain(intel_encoder); 1087 edid = to_intel_connector(connector)->detect_edid;
1056 intel_display_power_get(dev_priv, power_domain); 1088 if (edid && edid->input & DRM_EDID_INPUT_DIGITAL)
1057 1089 has_audio = drm_detect_monitor_audio(edid);
1058 edid = drm_get_edid(connector,
1059 intel_gmbus_get_adapter(dev_priv,
1060 intel_hdmi->ddc_bus));
1061 if (edid) {
1062 if (edid->input & DRM_EDID_INPUT_DIGITAL)
1063 has_audio = drm_detect_monitor_audio(edid);
1064 kfree(edid);
1065 }
1066
1067 intel_display_power_put(dev_priv, power_domain);
1068 1090
1069 return has_audio; 1091 return has_audio;
1070} 1092}
@@ -1265,6 +1287,8 @@ static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
1265 enum pipe pipe = intel_crtc->pipe; 1287 enum pipe pipe = intel_crtc->pipe;
1266 u32 val; 1288 u32 val;
1267 1289
1290 intel_hdmi_prepare(encoder);
1291
1268 mutex_lock(&dev_priv->dpio_lock); 1292 mutex_lock(&dev_priv->dpio_lock);
1269 1293
1270 /* program left/right clock distribution */ 1294 /* program left/right clock distribution */
@@ -1434,8 +1458,8 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
1434 1458
1435 for (i = 0; i < 4; i++) { 1459 for (i = 0; i < 4; i++) {
1436 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i)); 1460 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
1437 val &= ~DPIO_SWING_MARGIN_MASK; 1461 val &= ~DPIO_SWING_MARGIN000_MASK;
1438 val |= 102 << DPIO_SWING_MARGIN_SHIFT; 1462 val |= 102 << DPIO_SWING_MARGIN000_SHIFT;
1439 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val); 1463 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
1440 } 1464 }
1441 1465
@@ -1482,6 +1506,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
1482 1506
1483static void intel_hdmi_destroy(struct drm_connector *connector) 1507static void intel_hdmi_destroy(struct drm_connector *connector)
1484{ 1508{
1509 kfree(to_intel_connector(connector)->detect_edid);
1485 drm_connector_cleanup(connector); 1510 drm_connector_cleanup(connector);
1486 kfree(connector); 1511 kfree(connector);
1487} 1512}
@@ -1489,6 +1514,7 @@ static void intel_hdmi_destroy(struct drm_connector *connector)
1489static const struct drm_connector_funcs intel_hdmi_connector_funcs = { 1514static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
1490 .dpms = intel_connector_dpms, 1515 .dpms = intel_connector_dpms,
1491 .detect = intel_hdmi_detect, 1516 .detect = intel_hdmi_detect,
1517 .force = intel_hdmi_force,
1492 .fill_modes = drm_helper_probe_single_connector_modes, 1518 .fill_modes = drm_helper_probe_single_connector_modes,
1493 .set_property = intel_hdmi_set_property, 1519 .set_property = intel_hdmi_set_property,
1494 .destroy = intel_hdmi_destroy, 1520 .destroy = intel_hdmi_destroy,
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
new file mode 100644
index 000000000000..bafd38b5703e
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -0,0 +1,1766 @@
1/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Ben Widawsky <ben@bwidawsk.net>
25 * Michel Thierry <michel.thierry@intel.com>
26 * Thomas Daniel <thomas.daniel@intel.com>
27 * Oscar Mateo <oscar.mateo@intel.com>
28 *
29 */
30
31/**
32 * DOC: Logical Rings, Logical Ring Contexts and Execlists
33 *
34 * Motivation:
35 * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts".
36 * These expanded contexts enable a number of new abilities, especially
37 * "Execlists" (also implemented in this file).
38 *
39 * One of the main differences with the legacy HW contexts is that logical
40 * ring contexts incorporate many more things to the context's state, like
41 * PDPs or ringbuffer control registers:
42 *
43 * The reason why PDPs are included in the context is straightforward: as
44 * PPGTTs (per-process GTTs) are actually per-context, having the PDPs
45 * contained there mean you don't need to do a ppgtt->switch_mm yourself,
46 * instead, the GPU will do it for you on the context switch.
47 *
48 * But, what about the ringbuffer control registers (head, tail, etc..)?
49 * shouldn't we just need a set of those per engine command streamer? This is
50 * where the name "Logical Rings" starts to make sense: by virtualizing the
51 * rings, the engine cs shifts to a new "ring buffer" with every context
52 * switch. When you want to submit a workload to the GPU you: A) choose your
53 * context, B) find its appropriate virtualized ring, C) write commands to it
54 * and then, finally, D) tell the GPU to switch to that context.
55 *
56 * Instead of the legacy MI_SET_CONTEXT, the way you tell the GPU to switch
57 * to a contexts is via a context execution list, ergo "Execlists".
58 *
59 * LRC implementation:
60 * Regarding the creation of contexts, we have:
61 *
62 * - One global default context.
63 * - One local default context for each opened fd.
64 * - One local extra context for each context create ioctl call.
65 *
66 * Now that ringbuffers belong per-context (and not per-engine, like before)
67 * and that contexts are uniquely tied to a given engine (and not reusable,
68 * like before) we need:
69 *
70 * - One ringbuffer per-engine inside each context.
71 * - One backing object per-engine inside each context.
72 *
73 * The global default context starts its life with these new objects fully
74 * allocated and populated. The local default context for each opened fd is
75 * more complex, because we don't know at creation time which engine is going
76 * to use them. To handle this, we have implemented a deferred creation of LR
77 * contexts:
78 *
79 * The local context starts its life as a hollow or blank holder, that only
80 * gets populated for a given engine once we receive an execbuffer. If later
81 * on we receive another execbuffer ioctl for the same context but a different
82 * engine, we allocate/populate a new ringbuffer and context backing object and
83 * so on.
84 *
85 * Finally, regarding local contexts created using the ioctl call: as they are
86 * only allowed with the render ring, we can allocate & populate them right
87 * away (no need to defer anything, at least for now).
88 *
89 * Execlists implementation:
90 * Execlists are the new method by which, on gen8+ hardware, workloads are
91 * submitted for execution (as opposed to the legacy, ringbuffer-based, method).
92 * This method works as follows:
93 *
94 * When a request is committed, its commands (the BB start and any leading or
95 * trailing commands, like the seqno breadcrumbs) are placed in the ringbuffer
96 * for the appropriate context. The tail pointer in the hardware context is not
97 * updated at this time, but instead, kept by the driver in the ringbuffer
98 * structure. A structure representing this request is added to a request queue
99 * for the appropriate engine: this structure contains a copy of the context's
100 * tail after the request was written to the ring buffer and a pointer to the
101 * context itself.
102 *
103 * If the engine's request queue was empty before the request was added, the
104 * queue is processed immediately. Otherwise the queue will be processed during
105 * a context switch interrupt. In any case, elements on the queue will get sent
106 * (in pairs) to the GPU's ExecLists Submit Port (ELSP, for short) with a
107 * globally unique 20-bits submission ID.
108 *
109 * When execution of a request completes, the GPU updates the context status
110 * buffer with a context complete event and generates a context switch interrupt.
111 * During the interrupt handling, the driver examines the events in the buffer:
112 * for each context complete event, if the announced ID matches that on the head
113 * of the request queue, then that request is retired and removed from the queue.
114 *
115 * After processing, if any requests were retired and the queue is not empty
116 * then a new execution list can be submitted. The two requests at the front of
117 * the queue are next to be submitted but since a context may not occur twice in
118 * an execution list, if subsequent requests have the same ID as the first then
119 * the two requests must be combined. This is done simply by discarding requests
120 * at the head of the queue until either only one requests is left (in which case
121 * we use a NULL second context) or the first two requests have unique IDs.
122 *
123 * By always executing the first two requests in the queue the driver ensures
124 * that the GPU is kept as busy as possible. In the case where a single context
125 * completes but a second context is still executing, the request for this second
126 * context will be at the head of the queue when we remove the first one. This
127 * request will then be resubmitted along with a new request for a different context,
128 * which will cause the hardware to continue executing the second request and queue
129 * the new request (the GPU detects the condition of a context getting preempted
130 * with the same context and optimizes the context switch flow by not doing
131 * preemption, but just sampling the new tail pointer).
132 *
133 */
134
135#include <drm/drmP.h>
136#include <drm/i915_drm.h>
137#include "i915_drv.h"
138
139#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
140#define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE)
141
142#define GEN8_LR_CONTEXT_ALIGN 4096
143
144#define RING_EXECLIST_QFULL (1 << 0x2)
145#define RING_EXECLIST1_VALID (1 << 0x3)
146#define RING_EXECLIST0_VALID (1 << 0x4)
147#define RING_EXECLIST_ACTIVE_STATUS (3 << 0xE)
148#define RING_EXECLIST1_ACTIVE (1 << 0x11)
149#define RING_EXECLIST0_ACTIVE (1 << 0x12)
150
151#define GEN8_CTX_STATUS_IDLE_ACTIVE (1 << 0)
152#define GEN8_CTX_STATUS_PREEMPTED (1 << 1)
153#define GEN8_CTX_STATUS_ELEMENT_SWITCH (1 << 2)
154#define GEN8_CTX_STATUS_ACTIVE_IDLE (1 << 3)
155#define GEN8_CTX_STATUS_COMPLETE (1 << 4)
156#define GEN8_CTX_STATUS_LITE_RESTORE (1 << 15)
157
158#define CTX_LRI_HEADER_0 0x01
159#define CTX_CONTEXT_CONTROL 0x02
160#define CTX_RING_HEAD 0x04
161#define CTX_RING_TAIL 0x06
162#define CTX_RING_BUFFER_START 0x08
163#define CTX_RING_BUFFER_CONTROL 0x0a
164#define CTX_BB_HEAD_U 0x0c
165#define CTX_BB_HEAD_L 0x0e
166#define CTX_BB_STATE 0x10
167#define CTX_SECOND_BB_HEAD_U 0x12
168#define CTX_SECOND_BB_HEAD_L 0x14
169#define CTX_SECOND_BB_STATE 0x16
170#define CTX_BB_PER_CTX_PTR 0x18
171#define CTX_RCS_INDIRECT_CTX 0x1a
172#define CTX_RCS_INDIRECT_CTX_OFFSET 0x1c
173#define CTX_LRI_HEADER_1 0x21
174#define CTX_CTX_TIMESTAMP 0x22
175#define CTX_PDP3_UDW 0x24
176#define CTX_PDP3_LDW 0x26
177#define CTX_PDP2_UDW 0x28
178#define CTX_PDP2_LDW 0x2a
179#define CTX_PDP1_UDW 0x2c
180#define CTX_PDP1_LDW 0x2e
181#define CTX_PDP0_UDW 0x30
182#define CTX_PDP0_LDW 0x32
183#define CTX_LRI_HEADER_2 0x41
184#define CTX_R_PWR_CLK_STATE 0x42
185#define CTX_GPGPU_CSR_BASE_ADDRESS 0x44
186
187#define GEN8_CTX_VALID (1<<0)
188#define GEN8_CTX_FORCE_PD_RESTORE (1<<1)
189#define GEN8_CTX_FORCE_RESTORE (1<<2)
190#define GEN8_CTX_L3LLC_COHERENT (1<<5)
191#define GEN8_CTX_PRIVILEGE (1<<8)
192enum {
193 ADVANCED_CONTEXT = 0,
194 LEGACY_CONTEXT,
195 ADVANCED_AD_CONTEXT,
196 LEGACY_64B_CONTEXT
197};
198#define GEN8_CTX_MODE_SHIFT 3
199enum {
200 FAULT_AND_HANG = 0,
201 FAULT_AND_HALT, /* Debug only */
202 FAULT_AND_STREAM,
203 FAULT_AND_CONTINUE /* Unsupported */
204};
205#define GEN8_CTX_ID_SHIFT 32
206
207/**
208 * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
209 * @dev: DRM device.
210 * @enable_execlists: value of i915.enable_execlists module parameter.
211 *
212 * Only certain platforms support Execlists (the prerequisites being
213 * support for Logical Ring Contexts and Aliasing PPGTT or better),
214 * and only when enabled via module parameter.
215 *
216 * Return: 1 if Execlists is supported and has to be enabled.
217 */
218int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists)
219{
220 WARN_ON(i915.enable_ppgtt == -1);
221
222 if (enable_execlists == 0)
223 return 0;
224
225 if (HAS_LOGICAL_RING_CONTEXTS(dev) && USES_PPGTT(dev) &&
226 i915.use_mmio_flip >= 0)
227 return 1;
228
229 return 0;
230}
231
232/**
233 * intel_execlists_ctx_id() - get the Execlists Context ID
234 * @ctx_obj: Logical Ring Context backing object.
235 *
236 * Do not confuse with ctx->id! Unfortunately we have a name overload
237 * here: the old context ID we pass to userspace as a handler so that
238 * they can refer to a context, and the new context ID we pass to the
239 * ELSP so that the GPU can inform us of the context status via
240 * interrupts.
241 *
242 * Return: 20-bits globally unique context ID.
243 */
244u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj)
245{
246 u32 lrca = i915_gem_obj_ggtt_offset(ctx_obj);
247
248 /* LRCA is required to be 4K aligned so the more significant 20 bits
249 * are globally unique */
250 return lrca >> 12;
251}
252
253static uint64_t execlists_ctx_descriptor(struct drm_i915_gem_object *ctx_obj)
254{
255 uint64_t desc;
256 uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj);
257
258 WARN_ON(lrca & 0xFFFFFFFF00000FFFULL);
259
260 desc = GEN8_CTX_VALID;
261 desc |= LEGACY_CONTEXT << GEN8_CTX_MODE_SHIFT;
262 desc |= GEN8_CTX_L3LLC_COHERENT;
263 desc |= GEN8_CTX_PRIVILEGE;
264 desc |= lrca;
265 desc |= (u64)intel_execlists_ctx_id(ctx_obj) << GEN8_CTX_ID_SHIFT;
266
267 /* TODO: WaDisableLiteRestore when we start using semaphore
268 * signalling between Command Streamers */
269 /* desc |= GEN8_CTX_FORCE_RESTORE; */
270
271 return desc;
272}
273
274static void execlists_elsp_write(struct intel_engine_cs *ring,
275 struct drm_i915_gem_object *ctx_obj0,
276 struct drm_i915_gem_object *ctx_obj1)
277{
278 struct drm_i915_private *dev_priv = ring->dev->dev_private;
279 uint64_t temp = 0;
280 uint32_t desc[4];
281 unsigned long flags;
282
283 /* XXX: You must always write both descriptors in the order below. */
284 if (ctx_obj1)
285 temp = execlists_ctx_descriptor(ctx_obj1);
286 else
287 temp = 0;
288 desc[1] = (u32)(temp >> 32);
289 desc[0] = (u32)temp;
290
291 temp = execlists_ctx_descriptor(ctx_obj0);
292 desc[3] = (u32)(temp >> 32);
293 desc[2] = (u32)temp;
294
295 /* Set Force Wakeup bit to prevent GT from entering C6 while ELSP writes
296 * are in progress.
297 *
298 * The other problem is that we can't just call gen6_gt_force_wake_get()
299 * because that function calls intel_runtime_pm_get(), which might sleep.
300 * Instead, we do the runtime_pm_get/put when creating/destroying requests.
301 */
302 spin_lock_irqsave(&dev_priv->uncore.lock, flags);
303 if (IS_CHERRYVIEW(dev_priv->dev)) {
304 if (dev_priv->uncore.fw_rendercount++ == 0)
305 dev_priv->uncore.funcs.force_wake_get(dev_priv,
306 FORCEWAKE_RENDER);
307 if (dev_priv->uncore.fw_mediacount++ == 0)
308 dev_priv->uncore.funcs.force_wake_get(dev_priv,
309 FORCEWAKE_MEDIA);
310 } else {
311 if (dev_priv->uncore.forcewake_count++ == 0)
312 dev_priv->uncore.funcs.force_wake_get(dev_priv,
313 FORCEWAKE_ALL);
314 }
315 spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
316
317 I915_WRITE(RING_ELSP(ring), desc[1]);
318 I915_WRITE(RING_ELSP(ring), desc[0]);
319 I915_WRITE(RING_ELSP(ring), desc[3]);
320 /* The context is automatically loaded after the following */
321 I915_WRITE(RING_ELSP(ring), desc[2]);
322
323 /* ELSP is a wo register, so use another nearby reg for posting instead */
324 POSTING_READ(RING_EXECLIST_STATUS(ring));
325
326 /* Release Force Wakeup (see the big comment above). */
327 spin_lock_irqsave(&dev_priv->uncore.lock, flags);
328 if (IS_CHERRYVIEW(dev_priv->dev)) {
329 if (--dev_priv->uncore.fw_rendercount == 0)
330 dev_priv->uncore.funcs.force_wake_put(dev_priv,
331 FORCEWAKE_RENDER);
332 if (--dev_priv->uncore.fw_mediacount == 0)
333 dev_priv->uncore.funcs.force_wake_put(dev_priv,
334 FORCEWAKE_MEDIA);
335 } else {
336 if (--dev_priv->uncore.forcewake_count == 0)
337 dev_priv->uncore.funcs.force_wake_put(dev_priv,
338 FORCEWAKE_ALL);
339 }
340
341 spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
342}
343
344static int execlists_ctx_write_tail(struct drm_i915_gem_object *ctx_obj, u32 tail)
345{
346 struct page *page;
347 uint32_t *reg_state;
348
349 page = i915_gem_object_get_page(ctx_obj, 1);
350 reg_state = kmap_atomic(page);
351
352 reg_state[CTX_RING_TAIL+1] = tail;
353
354 kunmap_atomic(reg_state);
355
356 return 0;
357}
358
359static int execlists_submit_context(struct intel_engine_cs *ring,
360 struct intel_context *to0, u32 tail0,
361 struct intel_context *to1, u32 tail1)
362{
363 struct drm_i915_gem_object *ctx_obj0;
364 struct drm_i915_gem_object *ctx_obj1 = NULL;
365
366 ctx_obj0 = to0->engine[ring->id].state;
367 BUG_ON(!ctx_obj0);
368 WARN_ON(!i915_gem_obj_is_pinned(ctx_obj0));
369
370 execlists_ctx_write_tail(ctx_obj0, tail0);
371
372 if (to1) {
373 ctx_obj1 = to1->engine[ring->id].state;
374 BUG_ON(!ctx_obj1);
375 WARN_ON(!i915_gem_obj_is_pinned(ctx_obj1));
376
377 execlists_ctx_write_tail(ctx_obj1, tail1);
378 }
379
380 execlists_elsp_write(ring, ctx_obj0, ctx_obj1);
381
382 return 0;
383}
384
385static void execlists_context_unqueue(struct intel_engine_cs *ring)
386{
387 struct intel_ctx_submit_request *req0 = NULL, *req1 = NULL;
388 struct intel_ctx_submit_request *cursor = NULL, *tmp = NULL;
389 struct drm_i915_private *dev_priv = ring->dev->dev_private;
390
391 assert_spin_locked(&ring->execlist_lock);
392
393 if (list_empty(&ring->execlist_queue))
394 return;
395
396 /* Try to read in pairs */
397 list_for_each_entry_safe(cursor, tmp, &ring->execlist_queue,
398 execlist_link) {
399 if (!req0) {
400 req0 = cursor;
401 } else if (req0->ctx == cursor->ctx) {
402 /* Same ctx: ignore first request, as second request
403 * will update tail past first request's workload */
404 cursor->elsp_submitted = req0->elsp_submitted;
405 list_del(&req0->execlist_link);
406 queue_work(dev_priv->wq, &req0->work);
407 req0 = cursor;
408 } else {
409 req1 = cursor;
410 break;
411 }
412 }
413
414 WARN_ON(req1 && req1->elsp_submitted);
415
416 WARN_ON(execlists_submit_context(ring, req0->ctx, req0->tail,
417 req1 ? req1->ctx : NULL,
418 req1 ? req1->tail : 0));
419
420 req0->elsp_submitted++;
421 if (req1)
422 req1->elsp_submitted++;
423}
424
425static bool execlists_check_remove_request(struct intel_engine_cs *ring,
426 u32 request_id)
427{
428 struct drm_i915_private *dev_priv = ring->dev->dev_private;
429 struct intel_ctx_submit_request *head_req;
430
431 assert_spin_locked(&ring->execlist_lock);
432
433 head_req = list_first_entry_or_null(&ring->execlist_queue,
434 struct intel_ctx_submit_request,
435 execlist_link);
436
437 if (head_req != NULL) {
438 struct drm_i915_gem_object *ctx_obj =
439 head_req->ctx->engine[ring->id].state;
440 if (intel_execlists_ctx_id(ctx_obj) == request_id) {
441 WARN(head_req->elsp_submitted == 0,
442 "Never submitted head request\n");
443
444 if (--head_req->elsp_submitted <= 0) {
445 list_del(&head_req->execlist_link);
446 queue_work(dev_priv->wq, &head_req->work);
447 return true;
448 }
449 }
450 }
451
452 return false;
453}
454
455/**
456 * intel_execlists_handle_ctx_events() - handle Context Switch interrupts
457 * @ring: Engine Command Streamer to handle.
458 *
459 * Check the unread Context Status Buffers and manage the submission of new
460 * contexts to the ELSP accordingly.
461 */
462void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring)
463{
464 struct drm_i915_private *dev_priv = ring->dev->dev_private;
465 u32 status_pointer;
466 u8 read_pointer;
467 u8 write_pointer;
468 u32 status;
469 u32 status_id;
470 u32 submit_contexts = 0;
471
472 status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring));
473
474 read_pointer = ring->next_context_status_buffer;
475 write_pointer = status_pointer & 0x07;
476 if (read_pointer > write_pointer)
477 write_pointer += 6;
478
479 spin_lock(&ring->execlist_lock);
480
481 while (read_pointer < write_pointer) {
482 read_pointer++;
483 status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) +
484 (read_pointer % 6) * 8);
485 status_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) +
486 (read_pointer % 6) * 8 + 4);
487
488 if (status & GEN8_CTX_STATUS_PREEMPTED) {
489 if (status & GEN8_CTX_STATUS_LITE_RESTORE) {
490 if (execlists_check_remove_request(ring, status_id))
491 WARN(1, "Lite Restored request removed from queue\n");
492 } else
493 WARN(1, "Preemption without Lite Restore\n");
494 }
495
496 if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) ||
497 (status & GEN8_CTX_STATUS_ELEMENT_SWITCH)) {
498 if (execlists_check_remove_request(ring, status_id))
499 submit_contexts++;
500 }
501 }
502
503 if (submit_contexts != 0)
504 execlists_context_unqueue(ring);
505
506 spin_unlock(&ring->execlist_lock);
507
508 WARN(submit_contexts > 2, "More than two context complete events?\n");
509 ring->next_context_status_buffer = write_pointer % 6;
510
511 I915_WRITE(RING_CONTEXT_STATUS_PTR(ring),
512 ((u32)ring->next_context_status_buffer & 0x07) << 8);
513}
514
515static void execlists_free_request_task(struct work_struct *work)
516{
517 struct intel_ctx_submit_request *req =
518 container_of(work, struct intel_ctx_submit_request, work);
519 struct drm_device *dev = req->ring->dev;
520 struct drm_i915_private *dev_priv = dev->dev_private;
521
522 intel_runtime_pm_put(dev_priv);
523
524 mutex_lock(&dev->struct_mutex);
525 i915_gem_context_unreference(req->ctx);
526 mutex_unlock(&dev->struct_mutex);
527
528 kfree(req);
529}
530
531static int execlists_context_queue(struct intel_engine_cs *ring,
532 struct intel_context *to,
533 u32 tail)
534{
535 struct intel_ctx_submit_request *req = NULL, *cursor;
536 struct drm_i915_private *dev_priv = ring->dev->dev_private;
537 unsigned long flags;
538 int num_elements = 0;
539
540 req = kzalloc(sizeof(*req), GFP_KERNEL);
541 if (req == NULL)
542 return -ENOMEM;
543 req->ctx = to;
544 i915_gem_context_reference(req->ctx);
545 req->ring = ring;
546 req->tail = tail;
547 INIT_WORK(&req->work, execlists_free_request_task);
548
549 intel_runtime_pm_get(dev_priv);
550
551 spin_lock_irqsave(&ring->execlist_lock, flags);
552
553 list_for_each_entry(cursor, &ring->execlist_queue, execlist_link)
554 if (++num_elements > 2)
555 break;
556
557 if (num_elements > 2) {
558 struct intel_ctx_submit_request *tail_req;
559
560 tail_req = list_last_entry(&ring->execlist_queue,
561 struct intel_ctx_submit_request,
562 execlist_link);
563
564 if (to == tail_req->ctx) {
565 WARN(tail_req->elsp_submitted != 0,
566 "More than 2 already-submitted reqs queued\n");
567 list_del(&tail_req->execlist_link);
568 queue_work(dev_priv->wq, &tail_req->work);
569 }
570 }
571
572 list_add_tail(&req->execlist_link, &ring->execlist_queue);
573 if (num_elements == 0)
574 execlists_context_unqueue(ring);
575
576 spin_unlock_irqrestore(&ring->execlist_lock, flags);
577
578 return 0;
579}
580
581static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf)
582{
583 struct intel_engine_cs *ring = ringbuf->ring;
584 uint32_t flush_domains;
585 int ret;
586
587 flush_domains = 0;
588 if (ring->gpu_caches_dirty)
589 flush_domains = I915_GEM_GPU_DOMAINS;
590
591 ret = ring->emit_flush(ringbuf, I915_GEM_GPU_DOMAINS, flush_domains);
592 if (ret)
593 return ret;
594
595 ring->gpu_caches_dirty = false;
596 return 0;
597}
598
599static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
600 struct list_head *vmas)
601{
602 struct intel_engine_cs *ring = ringbuf->ring;
603 struct i915_vma *vma;
604 uint32_t flush_domains = 0;
605 bool flush_chipset = false;
606 int ret;
607
608 list_for_each_entry(vma, vmas, exec_list) {
609 struct drm_i915_gem_object *obj = vma->obj;
610
611 ret = i915_gem_object_sync(obj, ring);
612 if (ret)
613 return ret;
614
615 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
616 flush_chipset |= i915_gem_clflush_object(obj, false);
617
618 flush_domains |= obj->base.write_domain;
619 }
620
621 if (flush_domains & I915_GEM_DOMAIN_GTT)
622 wmb();
623
624 /* Unconditionally invalidate gpu caches and ensure that we do flush
625 * any residual writes from the previous batch.
626 */
627 return logical_ring_invalidate_all_caches(ringbuf);
628}
629
630/**
631 * execlists_submission() - submit a batchbuffer for execution, Execlists style
632 * @dev: DRM device.
633 * @file: DRM file.
634 * @ring: Engine Command Streamer to submit to.
635 * @ctx: Context to employ for this submission.
636 * @args: execbuffer call arguments.
637 * @vmas: list of vmas.
638 * @batch_obj: the batchbuffer to submit.
639 * @exec_start: batchbuffer start virtual address pointer.
640 * @flags: translated execbuffer call flags.
641 *
642 * This is the evil twin version of i915_gem_ringbuffer_submission. It abstracts
643 * away the submission details of the execbuffer ioctl call.
644 *
645 * Return: non-zero if the submission fails.
646 */
647int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
648 struct intel_engine_cs *ring,
649 struct intel_context *ctx,
650 struct drm_i915_gem_execbuffer2 *args,
651 struct list_head *vmas,
652 struct drm_i915_gem_object *batch_obj,
653 u64 exec_start, u32 flags)
654{
655 struct drm_i915_private *dev_priv = dev->dev_private;
656 struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
657 int instp_mode;
658 u32 instp_mask;
659 int ret;
660
661 instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
662 instp_mask = I915_EXEC_CONSTANTS_MASK;
663 switch (instp_mode) {
664 case I915_EXEC_CONSTANTS_REL_GENERAL:
665 case I915_EXEC_CONSTANTS_ABSOLUTE:
666 case I915_EXEC_CONSTANTS_REL_SURFACE:
667 if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
668 DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
669 return -EINVAL;
670 }
671
672 if (instp_mode != dev_priv->relative_constants_mode) {
673 if (instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
674 DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
675 return -EINVAL;
676 }
677
678 /* The HW changed the meaning on this bit on gen6 */
679 instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
680 }
681 break;
682 default:
683 DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
684 return -EINVAL;
685 }
686
687 if (args->num_cliprects != 0) {
688 DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
689 return -EINVAL;
690 } else {
691 if (args->DR4 == 0xffffffff) {
692 DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
693 args->DR4 = 0;
694 }
695
696 if (args->DR1 || args->DR4 || args->cliprects_ptr) {
697 DRM_DEBUG("0 cliprects but dirt in cliprects fields\n");
698 return -EINVAL;
699 }
700 }
701
702 if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
703 DRM_DEBUG("sol reset is gen7 only\n");
704 return -EINVAL;
705 }
706
707 ret = execlists_move_to_gpu(ringbuf, vmas);
708 if (ret)
709 return ret;
710
711 if (ring == &dev_priv->ring[RCS] &&
712 instp_mode != dev_priv->relative_constants_mode) {
713 ret = intel_logical_ring_begin(ringbuf, 4);
714 if (ret)
715 return ret;
716
717 intel_logical_ring_emit(ringbuf, MI_NOOP);
718 intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1));
719 intel_logical_ring_emit(ringbuf, INSTPM);
720 intel_logical_ring_emit(ringbuf, instp_mask << 16 | instp_mode);
721 intel_logical_ring_advance(ringbuf);
722
723 dev_priv->relative_constants_mode = instp_mode;
724 }
725
726 ret = ring->emit_bb_start(ringbuf, exec_start, flags);
727 if (ret)
728 return ret;
729
730 i915_gem_execbuffer_move_to_active(vmas, ring);
731 i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
732
733 return 0;
734}
735
736void intel_logical_ring_stop(struct intel_engine_cs *ring)
737{
738 struct drm_i915_private *dev_priv = ring->dev->dev_private;
739 int ret;
740
741 if (!intel_ring_initialized(ring))
742 return;
743
744 ret = intel_ring_idle(ring);
745 if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error))
746 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
747 ring->name, ret);
748
749 /* TODO: Is this correct with Execlists enabled? */
750 I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING));
751 if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
752 DRM_ERROR("%s :timed out trying to stop ring\n", ring->name);
753 return;
754 }
755 I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
756}
757
758int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf)
759{
760 struct intel_engine_cs *ring = ringbuf->ring;
761 int ret;
762
763 if (!ring->gpu_caches_dirty)
764 return 0;
765
766 ret = ring->emit_flush(ringbuf, 0, I915_GEM_GPU_DOMAINS);
767 if (ret)
768 return ret;
769
770 ring->gpu_caches_dirty = false;
771 return 0;
772}
773
774/**
775 * intel_logical_ring_advance_and_submit() - advance the tail and submit the workload
776 * @ringbuf: Logical Ringbuffer to advance.
777 *
778 * The tail is updated in our logical ringbuffer struct, not in the actual context. What
779 * really happens during submission is that the context and current tail will be placed
780 * on a queue waiting for the ELSP to be ready to accept a new context submission. At that
781 * point, the tail *inside* the context is updated and the ELSP written to.
782 */
783void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf)
784{
785 struct intel_engine_cs *ring = ringbuf->ring;
786 struct intel_context *ctx = ringbuf->FIXME_lrc_ctx;
787
788 intel_logical_ring_advance(ringbuf);
789
790 if (intel_ring_stopped(ring))
791 return;
792
793 execlists_context_queue(ring, ctx, ringbuf->tail);
794}
795
796static int logical_ring_alloc_seqno(struct intel_engine_cs *ring,
797 struct intel_context *ctx)
798{
799 if (ring->outstanding_lazy_seqno)
800 return 0;
801
802 if (ring->preallocated_lazy_request == NULL) {
803 struct drm_i915_gem_request *request;
804
805 request = kmalloc(sizeof(*request), GFP_KERNEL);
806 if (request == NULL)
807 return -ENOMEM;
808
809 /* Hold a reference to the context this request belongs to
810 * (we will need it when the time comes to emit/retire the
811 * request).
812 */
813 request->ctx = ctx;
814 i915_gem_context_reference(request->ctx);
815
816 ring->preallocated_lazy_request = request;
817 }
818
819 return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
820}
821
822static int logical_ring_wait_request(struct intel_ringbuffer *ringbuf,
823 int bytes)
824{
825 struct intel_engine_cs *ring = ringbuf->ring;
826 struct drm_i915_gem_request *request;
827 u32 seqno = 0;
828 int ret;
829
830 if (ringbuf->last_retired_head != -1) {
831 ringbuf->head = ringbuf->last_retired_head;
832 ringbuf->last_retired_head = -1;
833
834 ringbuf->space = intel_ring_space(ringbuf);
835 if (ringbuf->space >= bytes)
836 return 0;
837 }
838
839 list_for_each_entry(request, &ring->request_list, list) {
840 if (__intel_ring_space(request->tail, ringbuf->tail,
841 ringbuf->size) >= bytes) {
842 seqno = request->seqno;
843 break;
844 }
845 }
846
847 if (seqno == 0)
848 return -ENOSPC;
849
850 ret = i915_wait_seqno(ring, seqno);
851 if (ret)
852 return ret;
853
854 i915_gem_retire_requests_ring(ring);
855 ringbuf->head = ringbuf->last_retired_head;
856 ringbuf->last_retired_head = -1;
857
858 ringbuf->space = intel_ring_space(ringbuf);
859 return 0;
860}
861
862static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
863 int bytes)
864{
865 struct intel_engine_cs *ring = ringbuf->ring;
866 struct drm_device *dev = ring->dev;
867 struct drm_i915_private *dev_priv = dev->dev_private;
868 unsigned long end;
869 int ret;
870
871 ret = logical_ring_wait_request(ringbuf, bytes);
872 if (ret != -ENOSPC)
873 return ret;
874
875 /* Force the context submission in case we have been skipping it */
876 intel_logical_ring_advance_and_submit(ringbuf);
877
878 /* With GEM the hangcheck timer should kick us out of the loop,
879 * leaving it early runs the risk of corrupting GEM state (due
880 * to running on almost untested codepaths). But on resume
881 * timers don't work yet, so prevent a complete hang in that
882 * case by choosing an insanely large timeout. */
883 end = jiffies + 60 * HZ;
884
885 do {
886 ringbuf->head = I915_READ_HEAD(ring);
887 ringbuf->space = intel_ring_space(ringbuf);
888 if (ringbuf->space >= bytes) {
889 ret = 0;
890 break;
891 }
892
893 msleep(1);
894
895 if (dev_priv->mm.interruptible && signal_pending(current)) {
896 ret = -ERESTARTSYS;
897 break;
898 }
899
900 ret = i915_gem_check_wedge(&dev_priv->gpu_error,
901 dev_priv->mm.interruptible);
902 if (ret)
903 break;
904
905 if (time_after(jiffies, end)) {
906 ret = -EBUSY;
907 break;
908 }
909 } while (1);
910
911 return ret;
912}
913
914static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf)
915{
916 uint32_t __iomem *virt;
917 int rem = ringbuf->size - ringbuf->tail;
918
919 if (ringbuf->space < rem) {
920 int ret = logical_ring_wait_for_space(ringbuf, rem);
921
922 if (ret)
923 return ret;
924 }
925
926 virt = ringbuf->virtual_start + ringbuf->tail;
927 rem /= 4;
928 while (rem--)
929 iowrite32(MI_NOOP, virt++);
930
931 ringbuf->tail = 0;
932 ringbuf->space = intel_ring_space(ringbuf);
933
934 return 0;
935}
936
937static int logical_ring_prepare(struct intel_ringbuffer *ringbuf, int bytes)
938{
939 int ret;
940
941 if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
942 ret = logical_ring_wrap_buffer(ringbuf);
943 if (unlikely(ret))
944 return ret;
945 }
946
947 if (unlikely(ringbuf->space < bytes)) {
948 ret = logical_ring_wait_for_space(ringbuf, bytes);
949 if (unlikely(ret))
950 return ret;
951 }
952
953 return 0;
954}
955
956/**
957 * intel_logical_ring_begin() - prepare the logical ringbuffer to accept some commands
958 *
959 * @ringbuf: Logical ringbuffer.
960 * @num_dwords: number of DWORDs that we plan to write to the ringbuffer.
961 *
962 * The ringbuffer might not be ready to accept the commands right away (maybe it needs to
963 * be wrapped, or wait a bit for the tail to be updated). This function takes care of that
964 * and also preallocates a request (every workload submission is still mediated through
965 * requests, same as it did with legacy ringbuffer submission).
966 *
967 * Return: non-zero if the ringbuffer is not ready to be written to.
968 */
969int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords)
970{
971 struct intel_engine_cs *ring = ringbuf->ring;
972 struct drm_device *dev = ring->dev;
973 struct drm_i915_private *dev_priv = dev->dev_private;
974 int ret;
975
976 ret = i915_gem_check_wedge(&dev_priv->gpu_error,
977 dev_priv->mm.interruptible);
978 if (ret)
979 return ret;
980
981 ret = logical_ring_prepare(ringbuf, num_dwords * sizeof(uint32_t));
982 if (ret)
983 return ret;
984
985 /* Preallocate the olr before touching the ring */
986 ret = logical_ring_alloc_seqno(ring, ringbuf->FIXME_lrc_ctx);
987 if (ret)
988 return ret;
989
990 ringbuf->space -= num_dwords * sizeof(uint32_t);
991 return 0;
992}
993
994static int gen8_init_common_ring(struct intel_engine_cs *ring)
995{
996 struct drm_device *dev = ring->dev;
997 struct drm_i915_private *dev_priv = dev->dev_private;
998
999 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
1000 I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
1001
1002 I915_WRITE(RING_MODE_GEN7(ring),
1003 _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
1004 _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
1005 POSTING_READ(RING_MODE_GEN7(ring));
1006 DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name);
1007
1008 memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
1009
1010 return 0;
1011}
1012
1013static int gen8_init_render_ring(struct intel_engine_cs *ring)
1014{
1015 struct drm_device *dev = ring->dev;
1016 struct drm_i915_private *dev_priv = dev->dev_private;
1017 int ret;
1018
1019 ret = gen8_init_common_ring(ring);
1020 if (ret)
1021 return ret;
1022
1023 /* We need to disable the AsyncFlip performance optimisations in order
1024 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
1025 * programmed to '1' on all products.
1026 *
1027 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
1028 */
1029 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
1030
1031 ret = intel_init_pipe_control(ring);
1032 if (ret)
1033 return ret;
1034
1035 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
1036
1037 return ret;
1038}
1039
1040static int gen8_emit_bb_start(struct intel_ringbuffer *ringbuf,
1041 u64 offset, unsigned flags)
1042{
1043 bool ppgtt = !(flags & I915_DISPATCH_SECURE);
1044 int ret;
1045
1046 ret = intel_logical_ring_begin(ringbuf, 4);
1047 if (ret)
1048 return ret;
1049
1050 /* FIXME(BDW): Address space and security selectors. */
1051 intel_logical_ring_emit(ringbuf, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8));
1052 intel_logical_ring_emit(ringbuf, lower_32_bits(offset));
1053 intel_logical_ring_emit(ringbuf, upper_32_bits(offset));
1054 intel_logical_ring_emit(ringbuf, MI_NOOP);
1055 intel_logical_ring_advance(ringbuf);
1056
1057 return 0;
1058}
1059
1060static bool gen8_logical_ring_get_irq(struct intel_engine_cs *ring)
1061{
1062 struct drm_device *dev = ring->dev;
1063 struct drm_i915_private *dev_priv = dev->dev_private;
1064 unsigned long flags;
1065
1066 if (!dev->irq_enabled)
1067 return false;
1068
1069 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1070 if (ring->irq_refcount++ == 0) {
1071 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
1072 POSTING_READ(RING_IMR(ring->mmio_base));
1073 }
1074 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1075
1076 return true;
1077}
1078
1079static void gen8_logical_ring_put_irq(struct intel_engine_cs *ring)
1080{
1081 struct drm_device *dev = ring->dev;
1082 struct drm_i915_private *dev_priv = dev->dev_private;
1083 unsigned long flags;
1084
1085 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1086 if (--ring->irq_refcount == 0) {
1087 I915_WRITE_IMR(ring, ~ring->irq_keep_mask);
1088 POSTING_READ(RING_IMR(ring->mmio_base));
1089 }
1090 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1091}
1092
1093static int gen8_emit_flush(struct intel_ringbuffer *ringbuf,
1094 u32 invalidate_domains,
1095 u32 unused)
1096{
1097 struct intel_engine_cs *ring = ringbuf->ring;
1098 struct drm_device *dev = ring->dev;
1099 struct drm_i915_private *dev_priv = dev->dev_private;
1100 uint32_t cmd;
1101 int ret;
1102
1103 ret = intel_logical_ring_begin(ringbuf, 4);
1104 if (ret)
1105 return ret;
1106
1107 cmd = MI_FLUSH_DW + 1;
1108
1109 if (ring == &dev_priv->ring[VCS]) {
1110 if (invalidate_domains & I915_GEM_GPU_DOMAINS)
1111 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD |
1112 MI_FLUSH_DW_STORE_INDEX |
1113 MI_FLUSH_DW_OP_STOREDW;
1114 } else {
1115 if (invalidate_domains & I915_GEM_DOMAIN_RENDER)
1116 cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX |
1117 MI_FLUSH_DW_OP_STOREDW;
1118 }
1119
1120 intel_logical_ring_emit(ringbuf, cmd);
1121 intel_logical_ring_emit(ringbuf,
1122 I915_GEM_HWS_SCRATCH_ADDR |
1123 MI_FLUSH_DW_USE_GTT);
1124 intel_logical_ring_emit(ringbuf, 0); /* upper addr */
1125 intel_logical_ring_emit(ringbuf, 0); /* value */
1126 intel_logical_ring_advance(ringbuf);
1127
1128 return 0;
1129}
1130
1131static int gen8_emit_flush_render(struct intel_ringbuffer *ringbuf,
1132 u32 invalidate_domains,
1133 u32 flush_domains)
1134{
1135 struct intel_engine_cs *ring = ringbuf->ring;
1136 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
1137 u32 flags = 0;
1138 int ret;
1139
1140 flags |= PIPE_CONTROL_CS_STALL;
1141
1142 if (flush_domains) {
1143 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
1144 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
1145 }
1146
1147 if (invalidate_domains) {
1148 flags |= PIPE_CONTROL_TLB_INVALIDATE;
1149 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
1150 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
1151 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
1152 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
1153 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
1154 flags |= PIPE_CONTROL_QW_WRITE;
1155 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
1156 }
1157
1158 ret = intel_logical_ring_begin(ringbuf, 6);
1159 if (ret)
1160 return ret;
1161
1162 intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
1163 intel_logical_ring_emit(ringbuf, flags);
1164 intel_logical_ring_emit(ringbuf, scratch_addr);
1165 intel_logical_ring_emit(ringbuf, 0);
1166 intel_logical_ring_emit(ringbuf, 0);
1167 intel_logical_ring_emit(ringbuf, 0);
1168 intel_logical_ring_advance(ringbuf);
1169
1170 return 0;
1171}
1172
1173static u32 gen8_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
1174{
1175 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
1176}
1177
1178static void gen8_set_seqno(struct intel_engine_cs *ring, u32 seqno)
1179{
1180 intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
1181}
1182
1183static int gen8_emit_request(struct intel_ringbuffer *ringbuf)
1184{
1185 struct intel_engine_cs *ring = ringbuf->ring;
1186 u32 cmd;
1187 int ret;
1188
1189 ret = intel_logical_ring_begin(ringbuf, 6);
1190 if (ret)
1191 return ret;
1192
1193 cmd = MI_STORE_DWORD_IMM_GEN8;
1194 cmd |= MI_GLOBAL_GTT;
1195
1196 intel_logical_ring_emit(ringbuf, cmd);
1197 intel_logical_ring_emit(ringbuf,
1198 (ring->status_page.gfx_addr +
1199 (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)));
1200 intel_logical_ring_emit(ringbuf, 0);
1201 intel_logical_ring_emit(ringbuf, ring->outstanding_lazy_seqno);
1202 intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
1203 intel_logical_ring_emit(ringbuf, MI_NOOP);
1204 intel_logical_ring_advance_and_submit(ringbuf);
1205
1206 return 0;
1207}
1208
1209/**
1210 * intel_logical_ring_cleanup() - deallocate the Engine Command Streamer
1211 *
1212 * @ring: Engine Command Streamer.
1213 *
1214 */
1215void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
1216{
1217 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1218
1219 if (!intel_ring_initialized(ring))
1220 return;
1221
1222 intel_logical_ring_stop(ring);
1223 WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
1224 ring->preallocated_lazy_request = NULL;
1225 ring->outstanding_lazy_seqno = 0;
1226
1227 if (ring->cleanup)
1228 ring->cleanup(ring);
1229
1230 i915_cmd_parser_fini_ring(ring);
1231
1232 if (ring->status_page.obj) {
1233 kunmap(sg_page(ring->status_page.obj->pages->sgl));
1234 ring->status_page.obj = NULL;
1235 }
1236}
1237
1238static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
1239{
1240 int ret;
1241
1242 /* Intentionally left blank. */
1243 ring->buffer = NULL;
1244
1245 ring->dev = dev;
1246 INIT_LIST_HEAD(&ring->active_list);
1247 INIT_LIST_HEAD(&ring->request_list);
1248 init_waitqueue_head(&ring->irq_queue);
1249
1250 INIT_LIST_HEAD(&ring->execlist_queue);
1251 spin_lock_init(&ring->execlist_lock);
1252 ring->next_context_status_buffer = 0;
1253
1254 ret = i915_cmd_parser_init_ring(ring);
1255 if (ret)
1256 return ret;
1257
1258 if (ring->init) {
1259 ret = ring->init(ring);
1260 if (ret)
1261 return ret;
1262 }
1263
1264 ret = intel_lr_context_deferred_create(ring->default_context, ring);
1265
1266 return ret;
1267}
1268
1269static int logical_render_ring_init(struct drm_device *dev)
1270{
1271 struct drm_i915_private *dev_priv = dev->dev_private;
1272 struct intel_engine_cs *ring = &dev_priv->ring[RCS];
1273
1274 ring->name = "render ring";
1275 ring->id = RCS;
1276 ring->mmio_base = RENDER_RING_BASE;
1277 ring->irq_enable_mask =
1278 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT;
1279 ring->irq_keep_mask =
1280 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT;
1281 if (HAS_L3_DPF(dev))
1282 ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
1283
1284 ring->init = gen8_init_render_ring;
1285 ring->cleanup = intel_fini_pipe_control;
1286 ring->get_seqno = gen8_get_seqno;
1287 ring->set_seqno = gen8_set_seqno;
1288 ring->emit_request = gen8_emit_request;
1289 ring->emit_flush = gen8_emit_flush_render;
1290 ring->irq_get = gen8_logical_ring_get_irq;
1291 ring->irq_put = gen8_logical_ring_put_irq;
1292 ring->emit_bb_start = gen8_emit_bb_start;
1293
1294 return logical_ring_init(dev, ring);
1295}
1296
1297static int logical_bsd_ring_init(struct drm_device *dev)
1298{
1299 struct drm_i915_private *dev_priv = dev->dev_private;
1300 struct intel_engine_cs *ring = &dev_priv->ring[VCS];
1301
1302 ring->name = "bsd ring";
1303 ring->id = VCS;
1304 ring->mmio_base = GEN6_BSD_RING_BASE;
1305 ring->irq_enable_mask =
1306 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
1307 ring->irq_keep_mask =
1308 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
1309
1310 ring->init = gen8_init_common_ring;
1311 ring->get_seqno = gen8_get_seqno;
1312 ring->set_seqno = gen8_set_seqno;
1313 ring->emit_request = gen8_emit_request;
1314 ring->emit_flush = gen8_emit_flush;
1315 ring->irq_get = gen8_logical_ring_get_irq;
1316 ring->irq_put = gen8_logical_ring_put_irq;
1317 ring->emit_bb_start = gen8_emit_bb_start;
1318
1319 return logical_ring_init(dev, ring);
1320}
1321
1322static int logical_bsd2_ring_init(struct drm_device *dev)
1323{
1324 struct drm_i915_private *dev_priv = dev->dev_private;
1325 struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
1326
1327 ring->name = "bds2 ring";
1328 ring->id = VCS2;
1329 ring->mmio_base = GEN8_BSD2_RING_BASE;
1330 ring->irq_enable_mask =
1331 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
1332 ring->irq_keep_mask =
1333 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
1334
1335 ring->init = gen8_init_common_ring;
1336 ring->get_seqno = gen8_get_seqno;
1337 ring->set_seqno = gen8_set_seqno;
1338 ring->emit_request = gen8_emit_request;
1339 ring->emit_flush = gen8_emit_flush;
1340 ring->irq_get = gen8_logical_ring_get_irq;
1341 ring->irq_put = gen8_logical_ring_put_irq;
1342 ring->emit_bb_start = gen8_emit_bb_start;
1343
1344 return logical_ring_init(dev, ring);
1345}
1346
1347static int logical_blt_ring_init(struct drm_device *dev)
1348{
1349 struct drm_i915_private *dev_priv = dev->dev_private;
1350 struct intel_engine_cs *ring = &dev_priv->ring[BCS];
1351
1352 ring->name = "blitter ring";
1353 ring->id = BCS;
1354 ring->mmio_base = BLT_RING_BASE;
1355 ring->irq_enable_mask =
1356 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
1357 ring->irq_keep_mask =
1358 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
1359
1360 ring->init = gen8_init_common_ring;
1361 ring->get_seqno = gen8_get_seqno;
1362 ring->set_seqno = gen8_set_seqno;
1363 ring->emit_request = gen8_emit_request;
1364 ring->emit_flush = gen8_emit_flush;
1365 ring->irq_get = gen8_logical_ring_get_irq;
1366 ring->irq_put = gen8_logical_ring_put_irq;
1367 ring->emit_bb_start = gen8_emit_bb_start;
1368
1369 return logical_ring_init(dev, ring);
1370}
1371
1372static int logical_vebox_ring_init(struct drm_device *dev)
1373{
1374 struct drm_i915_private *dev_priv = dev->dev_private;
1375 struct intel_engine_cs *ring = &dev_priv->ring[VECS];
1376
1377 ring->name = "video enhancement ring";
1378 ring->id = VECS;
1379 ring->mmio_base = VEBOX_RING_BASE;
1380 ring->irq_enable_mask =
1381 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
1382 ring->irq_keep_mask =
1383 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
1384
1385 ring->init = gen8_init_common_ring;
1386 ring->get_seqno = gen8_get_seqno;
1387 ring->set_seqno = gen8_set_seqno;
1388 ring->emit_request = gen8_emit_request;
1389 ring->emit_flush = gen8_emit_flush;
1390 ring->irq_get = gen8_logical_ring_get_irq;
1391 ring->irq_put = gen8_logical_ring_put_irq;
1392 ring->emit_bb_start = gen8_emit_bb_start;
1393
1394 return logical_ring_init(dev, ring);
1395}
1396
1397/**
1398 * intel_logical_rings_init() - allocate, populate and init the Engine Command Streamers
1399 * @dev: DRM device.
1400 *
1401 * This function inits the engines for an Execlists submission style (the equivalent in the
1402 * legacy ringbuffer submission world would be i915_gem_init_rings). It does it only for
1403 * those engines that are present in the hardware.
1404 *
1405 * Return: non-zero if the initialization failed.
1406 */
1407int intel_logical_rings_init(struct drm_device *dev)
1408{
1409 struct drm_i915_private *dev_priv = dev->dev_private;
1410 int ret;
1411
1412 ret = logical_render_ring_init(dev);
1413 if (ret)
1414 return ret;
1415
1416 if (HAS_BSD(dev)) {
1417 ret = logical_bsd_ring_init(dev);
1418 if (ret)
1419 goto cleanup_render_ring;
1420 }
1421
1422 if (HAS_BLT(dev)) {
1423 ret = logical_blt_ring_init(dev);
1424 if (ret)
1425 goto cleanup_bsd_ring;
1426 }
1427
1428 if (HAS_VEBOX(dev)) {
1429 ret = logical_vebox_ring_init(dev);
1430 if (ret)
1431 goto cleanup_blt_ring;
1432 }
1433
1434 if (HAS_BSD2(dev)) {
1435 ret = logical_bsd2_ring_init(dev);
1436 if (ret)
1437 goto cleanup_vebox_ring;
1438 }
1439
1440 ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
1441 if (ret)
1442 goto cleanup_bsd2_ring;
1443
1444 return 0;
1445
1446cleanup_bsd2_ring:
1447 intel_logical_ring_cleanup(&dev_priv->ring[VCS2]);
1448cleanup_vebox_ring:
1449 intel_logical_ring_cleanup(&dev_priv->ring[VECS]);
1450cleanup_blt_ring:
1451 intel_logical_ring_cleanup(&dev_priv->ring[BCS]);
1452cleanup_bsd_ring:
1453 intel_logical_ring_cleanup(&dev_priv->ring[VCS]);
1454cleanup_render_ring:
1455 intel_logical_ring_cleanup(&dev_priv->ring[RCS]);
1456
1457 return ret;
1458}
1459
1460int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
1461 struct intel_context *ctx)
1462{
1463 struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
1464 struct render_state so;
1465 struct drm_i915_file_private *file_priv = ctx->file_priv;
1466 struct drm_file *file = file_priv ? file_priv->file : NULL;
1467 int ret;
1468
1469 ret = i915_gem_render_state_prepare(ring, &so);
1470 if (ret)
1471 return ret;
1472
1473 if (so.rodata == NULL)
1474 return 0;
1475
1476 ret = ring->emit_bb_start(ringbuf,
1477 so.ggtt_offset,
1478 I915_DISPATCH_SECURE);
1479 if (ret)
1480 goto out;
1481
1482 i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
1483
1484 ret = __i915_add_request(ring, file, so.obj, NULL);
1485 /* intel_logical_ring_add_request moves object to inactive if it
1486 * fails */
1487out:
1488 i915_gem_render_state_fini(&so);
1489 return ret;
1490}
1491
1492static int
1493populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_obj,
1494 struct intel_engine_cs *ring, struct intel_ringbuffer *ringbuf)
1495{
1496 struct drm_device *dev = ring->dev;
1497 struct drm_i915_private *dev_priv = dev->dev_private;
1498 struct drm_i915_gem_object *ring_obj = ringbuf->obj;
1499 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
1500 struct page *page;
1501 uint32_t *reg_state;
1502 int ret;
1503
1504 if (!ppgtt)
1505 ppgtt = dev_priv->mm.aliasing_ppgtt;
1506
1507 ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
1508 if (ret) {
1509 DRM_DEBUG_DRIVER("Could not set to CPU domain\n");
1510 return ret;
1511 }
1512
1513 ret = i915_gem_object_get_pages(ctx_obj);
1514 if (ret) {
1515 DRM_DEBUG_DRIVER("Could not get object pages\n");
1516 return ret;
1517 }
1518
1519 i915_gem_object_pin_pages(ctx_obj);
1520
1521 /* The second page of the context object contains some fields which must
1522 * be set up prior to the first execution. */
1523 page = i915_gem_object_get_page(ctx_obj, 1);
1524 reg_state = kmap_atomic(page);
1525
1526 /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM
1527 * commands followed by (reg, value) pairs. The values we are setting here are
1528 * only for the first context restore: on a subsequent save, the GPU will
1529 * recreate this batchbuffer with new values (including all the missing
1530 * MI_LOAD_REGISTER_IMM commands that we are not initializing here). */
1531 if (ring->id == RCS)
1532 reg_state[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(14);
1533 else
1534 reg_state[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(11);
1535 reg_state[CTX_LRI_HEADER_0] |= MI_LRI_FORCE_POSTED;
1536 reg_state[CTX_CONTEXT_CONTROL] = RING_CONTEXT_CONTROL(ring);
1537 reg_state[CTX_CONTEXT_CONTROL+1] =
1538 _MASKED_BIT_ENABLE((1<<3) | MI_RESTORE_INHIBIT);
1539 reg_state[CTX_RING_HEAD] = RING_HEAD(ring->mmio_base);
1540 reg_state[CTX_RING_HEAD+1] = 0;
1541 reg_state[CTX_RING_TAIL] = RING_TAIL(ring->mmio_base);
1542 reg_state[CTX_RING_TAIL+1] = 0;
1543 reg_state[CTX_RING_BUFFER_START] = RING_START(ring->mmio_base);
1544 reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(ring_obj);
1545 reg_state[CTX_RING_BUFFER_CONTROL] = RING_CTL(ring->mmio_base);
1546 reg_state[CTX_RING_BUFFER_CONTROL+1] =
1547 ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID;
1548 reg_state[CTX_BB_HEAD_U] = ring->mmio_base + 0x168;
1549 reg_state[CTX_BB_HEAD_U+1] = 0;
1550 reg_state[CTX_BB_HEAD_L] = ring->mmio_base + 0x140;
1551 reg_state[CTX_BB_HEAD_L+1] = 0;
1552 reg_state[CTX_BB_STATE] = ring->mmio_base + 0x110;
1553 reg_state[CTX_BB_STATE+1] = (1<<5);
1554 reg_state[CTX_SECOND_BB_HEAD_U] = ring->mmio_base + 0x11c;
1555 reg_state[CTX_SECOND_BB_HEAD_U+1] = 0;
1556 reg_state[CTX_SECOND_BB_HEAD_L] = ring->mmio_base + 0x114;
1557 reg_state[CTX_SECOND_BB_HEAD_L+1] = 0;
1558 reg_state[CTX_SECOND_BB_STATE] = ring->mmio_base + 0x118;
1559 reg_state[CTX_SECOND_BB_STATE+1] = 0;
1560 if (ring->id == RCS) {
1561 /* TODO: according to BSpec, the register state context
1562 * for CHV does not have these. OTOH, these registers do
1563 * exist in CHV. I'm waiting for a clarification */
1564 reg_state[CTX_BB_PER_CTX_PTR] = ring->mmio_base + 0x1c0;
1565 reg_state[CTX_BB_PER_CTX_PTR+1] = 0;
1566 reg_state[CTX_RCS_INDIRECT_CTX] = ring->mmio_base + 0x1c4;
1567 reg_state[CTX_RCS_INDIRECT_CTX+1] = 0;
1568 reg_state[CTX_RCS_INDIRECT_CTX_OFFSET] = ring->mmio_base + 0x1c8;
1569 reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] = 0;
1570 }
1571 reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9);
1572 reg_state[CTX_LRI_HEADER_1] |= MI_LRI_FORCE_POSTED;
1573 reg_state[CTX_CTX_TIMESTAMP] = ring->mmio_base + 0x3a8;
1574 reg_state[CTX_CTX_TIMESTAMP+1] = 0;
1575 reg_state[CTX_PDP3_UDW] = GEN8_RING_PDP_UDW(ring, 3);
1576 reg_state[CTX_PDP3_LDW] = GEN8_RING_PDP_LDW(ring, 3);
1577 reg_state[CTX_PDP2_UDW] = GEN8_RING_PDP_UDW(ring, 2);
1578 reg_state[CTX_PDP2_LDW] = GEN8_RING_PDP_LDW(ring, 2);
1579 reg_state[CTX_PDP1_UDW] = GEN8_RING_PDP_UDW(ring, 1);
1580 reg_state[CTX_PDP1_LDW] = GEN8_RING_PDP_LDW(ring, 1);
1581 reg_state[CTX_PDP0_UDW] = GEN8_RING_PDP_UDW(ring, 0);
1582 reg_state[CTX_PDP0_LDW] = GEN8_RING_PDP_LDW(ring, 0);
1583 reg_state[CTX_PDP3_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[3]);
1584 reg_state[CTX_PDP3_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[3]);
1585 reg_state[CTX_PDP2_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[2]);
1586 reg_state[CTX_PDP2_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[2]);
1587 reg_state[CTX_PDP1_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[1]);
1588 reg_state[CTX_PDP1_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[1]);
1589 reg_state[CTX_PDP0_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[0]);
1590 reg_state[CTX_PDP0_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[0]);
1591 if (ring->id == RCS) {
1592 reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
1593 reg_state[CTX_R_PWR_CLK_STATE] = 0x20c8;
1594 reg_state[CTX_R_PWR_CLK_STATE+1] = 0;
1595 }
1596
1597 kunmap_atomic(reg_state);
1598
1599 ctx_obj->dirty = 1;
1600 set_page_dirty(page);
1601 i915_gem_object_unpin_pages(ctx_obj);
1602
1603 return 0;
1604}
1605
1606/**
1607 * intel_lr_context_free() - free the LRC specific bits of a context
1608 * @ctx: the LR context to free.
1609 *
1610 * The real context freeing is done in i915_gem_context_free: this only
1611 * takes care of the bits that are LRC related: the per-engine backing
1612 * objects and the logical ringbuffer.
1613 */
1614void intel_lr_context_free(struct intel_context *ctx)
1615{
1616 int i;
1617
1618 for (i = 0; i < I915_NUM_RINGS; i++) {
1619 struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
1620 struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
1621
1622 if (ctx_obj) {
1623 intel_destroy_ringbuffer_obj(ringbuf);
1624 kfree(ringbuf);
1625 i915_gem_object_ggtt_unpin(ctx_obj);
1626 drm_gem_object_unreference(&ctx_obj->base);
1627 }
1628 }
1629}
1630
1631static uint32_t get_lr_context_size(struct intel_engine_cs *ring)
1632{
1633 int ret = 0;
1634
1635 WARN_ON(INTEL_INFO(ring->dev)->gen != 8);
1636
1637 switch (ring->id) {
1638 case RCS:
1639 ret = GEN8_LR_CONTEXT_RENDER_SIZE;
1640 break;
1641 case VCS:
1642 case BCS:
1643 case VECS:
1644 case VCS2:
1645 ret = GEN8_LR_CONTEXT_OTHER_SIZE;
1646 break;
1647 }
1648
1649 return ret;
1650}
1651
1652/**
1653 * intel_lr_context_deferred_create() - create the LRC specific bits of a context
1654 * @ctx: LR context to create.
1655 * @ring: engine to be used with the context.
1656 *
1657 * This function can be called more than once, with different engines, if we plan
1658 * to use the context with them. The context backing objects and the ringbuffers
1659 * (specially the ringbuffer backing objects) suck a lot of memory up, and that's why
1660 * the creation is a deferred call: it's better to make sure first that we need to use
1661 * a given ring with the context.
1662 *
1663 * Return: non-zero on eror.
1664 */
1665int intel_lr_context_deferred_create(struct intel_context *ctx,
1666 struct intel_engine_cs *ring)
1667{
1668 struct drm_device *dev = ring->dev;
1669 struct drm_i915_gem_object *ctx_obj;
1670 uint32_t context_size;
1671 struct intel_ringbuffer *ringbuf;
1672 int ret;
1673
1674 WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL);
1675 if (ctx->engine[ring->id].state)
1676 return 0;
1677
1678 context_size = round_up(get_lr_context_size(ring), 4096);
1679
1680 ctx_obj = i915_gem_alloc_context_obj(dev, context_size);
1681 if (IS_ERR(ctx_obj)) {
1682 ret = PTR_ERR(ctx_obj);
1683 DRM_DEBUG_DRIVER("Alloc LRC backing obj failed: %d\n", ret);
1684 return ret;
1685 }
1686
1687 ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, 0);
1688 if (ret) {
1689 DRM_DEBUG_DRIVER("Pin LRC backing obj failed: %d\n", ret);
1690 drm_gem_object_unreference(&ctx_obj->base);
1691 return ret;
1692 }
1693
1694 ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
1695 if (!ringbuf) {
1696 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n",
1697 ring->name);
1698 i915_gem_object_ggtt_unpin(ctx_obj);
1699 drm_gem_object_unreference(&ctx_obj->base);
1700 ret = -ENOMEM;
1701 return ret;
1702 }
1703
1704 ringbuf->ring = ring;
1705 ringbuf->FIXME_lrc_ctx = ctx;
1706
1707 ringbuf->size = 32 * PAGE_SIZE;
1708 ringbuf->effective_size = ringbuf->size;
1709 ringbuf->head = 0;
1710 ringbuf->tail = 0;
1711 ringbuf->space = ringbuf->size;
1712 ringbuf->last_retired_head = -1;
1713
1714 /* TODO: For now we put this in the mappable region so that we can reuse
1715 * the existing ringbuffer code which ioremaps it. When we start
1716 * creating many contexts, this will no longer work and we must switch
1717 * to a kmapish interface.
1718 */
1719 ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
1720 if (ret) {
1721 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer obj %s: %d\n",
1722 ring->name, ret);
1723 goto error;
1724 }
1725
1726 ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf);
1727 if (ret) {
1728 DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
1729 intel_destroy_ringbuffer_obj(ringbuf);
1730 goto error;
1731 }
1732
1733 ctx->engine[ring->id].ringbuf = ringbuf;
1734 ctx->engine[ring->id].state = ctx_obj;
1735
1736 if (ctx == ring->default_context) {
1737 /* The status page is offset 0 from the default context object
1738 * in LRC mode. */
1739 ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(ctx_obj);
1740 ring->status_page.page_addr =
1741 kmap(sg_page(ctx_obj->pages->sgl));
1742 if (ring->status_page.page_addr == NULL)
1743 return -ENOMEM;
1744 ring->status_page.obj = ctx_obj;
1745 }
1746
1747 if (ring->id == RCS && !ctx->rcs_initialized) {
1748 ret = intel_lr_context_render_state_init(ring, ctx);
1749 if (ret) {
1750 DRM_ERROR("Init render state failed: %d\n", ret);
1751 ctx->engine[ring->id].ringbuf = NULL;
1752 ctx->engine[ring->id].state = NULL;
1753 intel_destroy_ringbuffer_obj(ringbuf);
1754 goto error;
1755 }
1756 ctx->rcs_initialized = true;
1757 }
1758
1759 return 0;
1760
1761error:
1762 kfree(ringbuf);
1763 i915_gem_object_ggtt_unpin(ctx_obj);
1764 drm_gem_object_unreference(&ctx_obj->base);
1765 return ret;
1766}
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
new file mode 100644
index 000000000000..33c3b4bf28c5
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -0,0 +1,114 @@
1/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24#ifndef _INTEL_LRC_H_
25#define _INTEL_LRC_H_
26
27/* Execlists regs */
28#define RING_ELSP(ring) ((ring)->mmio_base+0x230)
29#define RING_EXECLIST_STATUS(ring) ((ring)->mmio_base+0x234)
30#define RING_CONTEXT_CONTROL(ring) ((ring)->mmio_base+0x244)
31#define RING_CONTEXT_STATUS_BUF(ring) ((ring)->mmio_base+0x370)
32#define RING_CONTEXT_STATUS_PTR(ring) ((ring)->mmio_base+0x3a0)
33
34/* Logical Rings */
35void intel_logical_ring_stop(struct intel_engine_cs *ring);
36void intel_logical_ring_cleanup(struct intel_engine_cs *ring);
37int intel_logical_rings_init(struct drm_device *dev);
38
39int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf);
40void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf);
41/**
42 * intel_logical_ring_advance() - advance the ringbuffer tail
43 * @ringbuf: Ringbuffer to advance.
44 *
45 * The tail is only updated in our logical ringbuffer struct.
46 */
47static inline void intel_logical_ring_advance(struct intel_ringbuffer *ringbuf)
48{
49 ringbuf->tail &= ringbuf->size - 1;
50}
51/**
52 * intel_logical_ring_emit() - write a DWORD to the ringbuffer.
53 * @ringbuf: Ringbuffer to write to.
54 * @data: DWORD to write.
55 */
56static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf,
57 u32 data)
58{
59 iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
60 ringbuf->tail += 4;
61}
62int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords);
63
64/* Logical Ring Contexts */
65int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
66 struct intel_context *ctx);
67void intel_lr_context_free(struct intel_context *ctx);
68int intel_lr_context_deferred_create(struct intel_context *ctx,
69 struct intel_engine_cs *ring);
70
71/* Execlists */
72int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
73int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
74 struct intel_engine_cs *ring,
75 struct intel_context *ctx,
76 struct drm_i915_gem_execbuffer2 *args,
77 struct list_head *vmas,
78 struct drm_i915_gem_object *batch_obj,
79 u64 exec_start, u32 flags);
80u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj);
81
82/**
83 * struct intel_ctx_submit_request - queued context submission request
84 * @ctx: Context to submit to the ELSP.
85 * @ring: Engine to submit it to.
86 * @tail: how far in the context's ringbuffer this request goes to.
87 * @execlist_link: link in the submission queue.
88 * @work: workqueue for processing this request in a bottom half.
89 * @elsp_submitted: no. of times this request has been sent to the ELSP.
90 *
91 * The ELSP only accepts two elements at a time, so we queue context/tail
92 * pairs on a given queue (ring->execlist_queue) until the hardware is
93 * available. The queue serves a double purpose: we also use it to keep track
94 * of the up to 2 contexts currently in the hardware (usually one in execution
95 * and the other queued up by the GPU): We only remove elements from the head
96 * of the queue when the hardware informs us that an element has been
97 * completed.
98 *
99 * All accesses to the queue are mediated by a spinlock (ring->execlist_lock).
100 */
101struct intel_ctx_submit_request {
102 struct intel_context *ctx;
103 struct intel_engine_cs *ring;
104 u32 tail;
105
106 struct list_head execlist_link;
107 struct work_struct work;
108
109 int elsp_submitted;
110};
111
112void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring);
113
114#endif /* _INTEL_LRC_H_ */
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index fdf40267249c..a6bd1422e38f 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -823,8 +823,7 @@ bool intel_is_dual_link_lvds(struct drm_device *dev)
823 struct intel_encoder *encoder; 823 struct intel_encoder *encoder;
824 struct intel_lvds_encoder *lvds_encoder; 824 struct intel_lvds_encoder *lvds_encoder;
825 825
826 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 826 for_each_intel_encoder(dev, encoder) {
827 base.head) {
828 if (encoder->type == INTEL_OUTPUT_LVDS) { 827 if (encoder->type == INTEL_OUTPUT_LVDS) {
829 lvds_encoder = to_lvds_encoder(&encoder->base); 828 lvds_encoder = to_lvds_encoder(&encoder->base);
830 829
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 8e374449c6b5..18784470a760 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -751,6 +751,8 @@ void intel_panel_disable_backlight(struct intel_connector *connector)
751 751
752 spin_lock_irqsave(&dev_priv->backlight_lock, flags); 752 spin_lock_irqsave(&dev_priv->backlight_lock, flags);
753 753
754 if (panel->backlight.device)
755 panel->backlight.device->props.power = FB_BLANK_POWERDOWN;
754 panel->backlight.enabled = false; 756 panel->backlight.enabled = false;
755 dev_priv->display.disable_backlight(connector); 757 dev_priv->display.disable_backlight(connector);
756 758
@@ -957,6 +959,8 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
957 959
958 dev_priv->display.enable_backlight(connector); 960 dev_priv->display.enable_backlight(connector);
959 panel->backlight.enabled = true; 961 panel->backlight.enabled = true;
962 if (panel->backlight.device)
963 panel->backlight.device->props.power = FB_BLANK_UNBLANK;
960 964
961 spin_unlock_irqrestore(&dev_priv->backlight_lock, flags); 965 spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
962} 966}
@@ -965,6 +969,7 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
965static int intel_backlight_device_update_status(struct backlight_device *bd) 969static int intel_backlight_device_update_status(struct backlight_device *bd)
966{ 970{
967 struct intel_connector *connector = bl_get_data(bd); 971 struct intel_connector *connector = bl_get_data(bd);
972 struct intel_panel *panel = &connector->panel;
968 struct drm_device *dev = connector->base.dev; 973 struct drm_device *dev = connector->base.dev;
969 974
970 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); 975 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
@@ -972,6 +977,23 @@ static int intel_backlight_device_update_status(struct backlight_device *bd)
972 bd->props.brightness, bd->props.max_brightness); 977 bd->props.brightness, bd->props.max_brightness);
973 intel_panel_set_backlight(connector, bd->props.brightness, 978 intel_panel_set_backlight(connector, bd->props.brightness,
974 bd->props.max_brightness); 979 bd->props.max_brightness);
980
981 /*
982 * Allow flipping bl_power as a sub-state of enabled. Sadly the
983 * backlight class device does not make it easy to to differentiate
984 * between callbacks for brightness and bl_power, so our backlight_power
985 * callback needs to take this into account.
986 */
987 if (panel->backlight.enabled) {
988 if (panel->backlight_power) {
989 bool enable = bd->props.power == FB_BLANK_UNBLANK &&
990 bd->props.brightness != 0;
991 panel->backlight_power(connector, enable);
992 }
993 } else {
994 bd->props.power = FB_BLANK_POWERDOWN;
995 }
996
975 drm_modeset_unlock(&dev->mode_config.connection_mutex); 997 drm_modeset_unlock(&dev->mode_config.connection_mutex);
976 return 0; 998 return 0;
977} 999}
@@ -1023,6 +1045,11 @@ static int intel_backlight_device_register(struct intel_connector *connector)
1023 panel->backlight.level, 1045 panel->backlight.level,
1024 props.max_brightness); 1046 props.max_brightness);
1025 1047
1048 if (panel->backlight.enabled)
1049 props.power = FB_BLANK_UNBLANK;
1050 else
1051 props.power = FB_BLANK_POWERDOWN;
1052
1026 /* 1053 /*
1027 * Note: using the same name independent of the connector prevents 1054 * Note: using the same name independent of the connector prevents
1028 * registration of multiple backlight devices in the driver. 1055 * registration of multiple backlight devices in the driver.
@@ -1203,7 +1230,7 @@ static int vlv_setup_backlight(struct intel_connector *connector)
1203 enum pipe pipe; 1230 enum pipe pipe;
1204 u32 ctl, ctl2, val; 1231 u32 ctl, ctl2, val;
1205 1232
1206 for_each_pipe(pipe) { 1233 for_each_pipe(dev_priv, pipe) {
1207 u32 cur_val = I915_READ(VLV_BLC_PWM_CTL(pipe)); 1234 u32 cur_val = I915_READ(VLV_BLC_PWM_CTL(pipe));
1208 1235
1209 /* Skip if the modulation freq is already set */ 1236 /* Skip if the modulation freq is already set */
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 40c12295c0bd..c27b6140bfd1 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -309,6 +309,9 @@ static void gen7_enable_fbc(struct drm_crtc *crtc)
309 309
310 dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN; 310 dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
311 311
312 if (dev_priv->fbc.false_color)
313 dpfc_ctl |= FBC_CTL_FALSE_COLOR;
314
312 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); 315 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
313 316
314 if (IS_IVYBRIDGE(dev)) { 317 if (IS_IVYBRIDGE(dev)) {
@@ -342,6 +345,16 @@ bool intel_fbc_enabled(struct drm_device *dev)
342 return dev_priv->display.fbc_enabled(dev); 345 return dev_priv->display.fbc_enabled(dev);
343} 346}
344 347
348void gen8_fbc_sw_flush(struct drm_device *dev, u32 value)
349{
350 struct drm_i915_private *dev_priv = dev->dev_private;
351
352 if (!IS_GEN8(dev))
353 return;
354
355 I915_WRITE(MSG_FBC_REND_STATE, value);
356}
357
345static void intel_fbc_work_fn(struct work_struct *__work) 358static void intel_fbc_work_fn(struct work_struct *__work)
346{ 359{
347 struct intel_fbc_work *work = 360 struct intel_fbc_work *work =
@@ -578,6 +591,12 @@ void intel_update_fbc(struct drm_device *dev)
578 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n"); 591 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
579 goto out_disable; 592 goto out_disable;
580 } 593 }
594 if (INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
595 to_intel_plane(crtc->primary)->rotation != BIT(DRM_ROTATE_0)) {
596 if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
597 DRM_DEBUG_KMS("Rotation unsupported, disabling\n");
598 goto out_disable;
599 }
581 600
582 /* If the kernel debugger is active, always disable compression */ 601 /* If the kernel debugger is active, always disable compression */
583 if (in_dbg_master()) 602 if (in_dbg_master())
@@ -853,7 +872,7 @@ void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
853 * A value of 5us seems to be a good balance; safe for very low end 872 * A value of 5us seems to be a good balance; safe for very low end
854 * platforms but not overly aggressive on lower latency configs. 873 * platforms but not overly aggressive on lower latency configs.
855 */ 874 */
856static const int latency_ns = 5000; 875static const int pessimal_latency_ns = 5000;
857 876
858static int i9xx_get_fifo_size(struct drm_device *dev, int plane) 877static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
859{ 878{
@@ -982,13 +1001,20 @@ static const struct intel_watermark_params i915_wm_info = {
982 .guard_size = 2, 1001 .guard_size = 2,
983 .cacheline_size = I915_FIFO_LINE_SIZE, 1002 .cacheline_size = I915_FIFO_LINE_SIZE,
984}; 1003};
985static const struct intel_watermark_params i830_wm_info = { 1004static const struct intel_watermark_params i830_a_wm_info = {
986 .fifo_size = I855GM_FIFO_SIZE, 1005 .fifo_size = I855GM_FIFO_SIZE,
987 .max_wm = I915_MAX_WM, 1006 .max_wm = I915_MAX_WM,
988 .default_wm = 1, 1007 .default_wm = 1,
989 .guard_size = 2, 1008 .guard_size = 2,
990 .cacheline_size = I830_FIFO_LINE_SIZE, 1009 .cacheline_size = I830_FIFO_LINE_SIZE,
991}; 1010};
1011static const struct intel_watermark_params i830_bc_wm_info = {
1012 .fifo_size = I855GM_FIFO_SIZE,
1013 .max_wm = I915_MAX_WM/2,
1014 .default_wm = 1,
1015 .guard_size = 2,
1016 .cacheline_size = I830_FIFO_LINE_SIZE,
1017};
992static const struct intel_watermark_params i845_wm_info = { 1018static const struct intel_watermark_params i845_wm_info = {
993 .fifo_size = I830_FIFO_SIZE, 1019 .fifo_size = I830_FIFO_SIZE,
994 .max_wm = I915_MAX_WM, 1020 .max_wm = I915_MAX_WM,
@@ -1044,6 +1070,17 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
1044 wm_size = wm->max_wm; 1070 wm_size = wm->max_wm;
1045 if (wm_size <= 0) 1071 if (wm_size <= 0)
1046 wm_size = wm->default_wm; 1072 wm_size = wm->default_wm;
1073
1074 /*
1075 * Bspec seems to indicate that the value shouldn't be lower than
1076 * 'burst size + 1'. Certainly 830 is quite unhappy with low values.
1077 * Lets go for 8 which is the burst size since certain platforms
1078 * already use a hardcoded 8 (which is what the spec says should be
1079 * done).
1080 */
1081 if (wm_size <= 8)
1082 wm_size = 8;
1083
1047 return wm_size; 1084 return wm_size;
1048} 1085}
1049 1086
@@ -1268,33 +1305,27 @@ static bool g4x_compute_srwm(struct drm_device *dev,
1268 display, cursor); 1305 display, cursor);
1269} 1306}
1270 1307
1271static bool vlv_compute_drain_latency(struct drm_device *dev, 1308static bool vlv_compute_drain_latency(struct drm_crtc *crtc,
1272 int plane, 1309 int pixel_size,
1273 int *plane_prec_mult, 1310 int *prec_mult,
1274 int *plane_dl, 1311 int *drain_latency)
1275 int *cursor_prec_mult,
1276 int *cursor_dl)
1277{ 1312{
1278 struct drm_crtc *crtc;
1279 int clock, pixel_size;
1280 int entries; 1313 int entries;
1314 int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
1281 1315
1282 crtc = intel_get_crtc_for_plane(dev, plane); 1316 if (WARN(clock == 0, "Pixel clock is zero!\n"))
1283 if (!intel_crtc_active(crtc))
1284 return false; 1317 return false;
1285 1318
1286 clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock; 1319 if (WARN(pixel_size == 0, "Pixel size is zero!\n"))
1287 pixel_size = crtc->primary->fb->bits_per_pixel / 8; /* BPP */ 1320 return false;
1288 1321
1289 entries = (clock / 1000) * pixel_size; 1322 entries = DIV_ROUND_UP(clock, 1000) * pixel_size;
1290 *plane_prec_mult = (entries > 128) ? 1323 *prec_mult = (entries > 128) ? DRAIN_LATENCY_PRECISION_64 :
1291 DRAIN_LATENCY_PRECISION_64 : DRAIN_LATENCY_PRECISION_32; 1324 DRAIN_LATENCY_PRECISION_32;
1292 *plane_dl = (64 * (*plane_prec_mult) * 4) / entries; 1325 *drain_latency = (64 * (*prec_mult) * 4) / entries;
1293 1326
1294 entries = (clock / 1000) * 4; /* BPP is always 4 for cursor */ 1327 if (*drain_latency > DRAIN_LATENCY_MASK)
1295 *cursor_prec_mult = (entries > 128) ? 1328 *drain_latency = DRAIN_LATENCY_MASK;
1296 DRAIN_LATENCY_PRECISION_64 : DRAIN_LATENCY_PRECISION_32;
1297 *cursor_dl = (64 * (*cursor_prec_mult) * 4) / entries;
1298 1329
1299 return true; 1330 return true;
1300} 1331}
@@ -1307,39 +1338,48 @@ static bool vlv_compute_drain_latency(struct drm_device *dev,
1307 * latency value. 1338 * latency value.
1308 */ 1339 */
1309 1340
1310static void vlv_update_drain_latency(struct drm_device *dev) 1341static void vlv_update_drain_latency(struct drm_crtc *crtc)
1311{ 1342{
1312 struct drm_i915_private *dev_priv = dev->dev_private; 1343 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
1313 int planea_prec, planea_dl, planeb_prec, planeb_dl; 1344 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1314 int cursora_prec, cursora_dl, cursorb_prec, cursorb_dl; 1345 int pixel_size;
1315 int plane_prec_mult, cursor_prec_mult; /* Precision multiplier is 1346 int drain_latency;
1316 either 16 or 32 */ 1347 enum pipe pipe = intel_crtc->pipe;
1348 int plane_prec, prec_mult, plane_dl;
1349
1350 plane_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_PLANE_PRECISION_64 |
1351 DRAIN_LATENCY_MASK | DDL_CURSOR_PRECISION_64 |
1352 (DRAIN_LATENCY_MASK << DDL_CURSOR_SHIFT));
1317 1353
1318 /* For plane A, Cursor A */ 1354 if (!intel_crtc_active(crtc)) {
1319 if (vlv_compute_drain_latency(dev, 0, &plane_prec_mult, &planea_dl, 1355 I915_WRITE(VLV_DDL(pipe), plane_dl);
1320 &cursor_prec_mult, &cursora_dl)) { 1356 return;
1321 cursora_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ? 1357 }
1322 DDL_CURSORA_PRECISION_32 : DDL_CURSORA_PRECISION_64;
1323 planea_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1324 DDL_PLANEA_PRECISION_32 : DDL_PLANEA_PRECISION_64;
1325 1358
1326 I915_WRITE(VLV_DDL1, cursora_prec | 1359 /* Primary plane Drain Latency */
1327 (cursora_dl << DDL_CURSORA_SHIFT) | 1360 pixel_size = crtc->primary->fb->bits_per_pixel / 8; /* BPP */
1328 planea_prec | planea_dl); 1361 if (vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) {
1362 plane_prec = (prec_mult == DRAIN_LATENCY_PRECISION_64) ?
1363 DDL_PLANE_PRECISION_64 :
1364 DDL_PLANE_PRECISION_32;
1365 plane_dl |= plane_prec | drain_latency;
1329 } 1366 }
1330 1367
1331 /* For plane B, Cursor B */ 1368 /* Cursor Drain Latency
1332 if (vlv_compute_drain_latency(dev, 1, &plane_prec_mult, &planeb_dl, 1369 * BPP is always 4 for cursor
1333 &cursor_prec_mult, &cursorb_dl)) { 1370 */
1334 cursorb_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ? 1371 pixel_size = 4;
1335 DDL_CURSORB_PRECISION_32 : DDL_CURSORB_PRECISION_64;
1336 planeb_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1337 DDL_PLANEB_PRECISION_32 : DDL_PLANEB_PRECISION_64;
1338 1372
1339 I915_WRITE(VLV_DDL2, cursorb_prec | 1373 /* Program cursor DL only if it is enabled */
1340 (cursorb_dl << DDL_CURSORB_SHIFT) | 1374 if (intel_crtc->cursor_base &&
1341 planeb_prec | planeb_dl); 1375 vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) {
1376 plane_prec = (prec_mult == DRAIN_LATENCY_PRECISION_64) ?
1377 DDL_CURSOR_PRECISION_64 :
1378 DDL_CURSOR_PRECISION_32;
1379 plane_dl |= plane_prec | (drain_latency << DDL_CURSOR_SHIFT);
1342 } 1380 }
1381
1382 I915_WRITE(VLV_DDL(pipe), plane_dl);
1343} 1383}
1344 1384
1345#define single_plane_enabled(mask) is_power_of_2(mask) 1385#define single_plane_enabled(mask) is_power_of_2(mask)
@@ -1355,20 +1395,92 @@ static void valleyview_update_wm(struct drm_crtc *crtc)
1355 unsigned int enabled = 0; 1395 unsigned int enabled = 0;
1356 bool cxsr_enabled; 1396 bool cxsr_enabled;
1357 1397
1358 vlv_update_drain_latency(dev); 1398 vlv_update_drain_latency(crtc);
1399
1400 if (g4x_compute_wm0(dev, PIPE_A,
1401 &valleyview_wm_info, pessimal_latency_ns,
1402 &valleyview_cursor_wm_info, pessimal_latency_ns,
1403 &planea_wm, &cursora_wm))
1404 enabled |= 1 << PIPE_A;
1405
1406 if (g4x_compute_wm0(dev, PIPE_B,
1407 &valleyview_wm_info, pessimal_latency_ns,
1408 &valleyview_cursor_wm_info, pessimal_latency_ns,
1409 &planeb_wm, &cursorb_wm))
1410 enabled |= 1 << PIPE_B;
1411
1412 if (single_plane_enabled(enabled) &&
1413 g4x_compute_srwm(dev, ffs(enabled) - 1,
1414 sr_latency_ns,
1415 &valleyview_wm_info,
1416 &valleyview_cursor_wm_info,
1417 &plane_sr, &ignore_cursor_sr) &&
1418 g4x_compute_srwm(dev, ffs(enabled) - 1,
1419 2*sr_latency_ns,
1420 &valleyview_wm_info,
1421 &valleyview_cursor_wm_info,
1422 &ignore_plane_sr, &cursor_sr)) {
1423 cxsr_enabled = true;
1424 } else {
1425 cxsr_enabled = false;
1426 intel_set_memory_cxsr(dev_priv, false);
1427 plane_sr = cursor_sr = 0;
1428 }
1429
1430 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
1431 "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1432 planea_wm, cursora_wm,
1433 planeb_wm, cursorb_wm,
1434 plane_sr, cursor_sr);
1435
1436 I915_WRITE(DSPFW1,
1437 (plane_sr << DSPFW_SR_SHIFT) |
1438 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1439 (planeb_wm << DSPFW_PLANEB_SHIFT) |
1440 (planea_wm << DSPFW_PLANEA_SHIFT));
1441 I915_WRITE(DSPFW2,
1442 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1443 (cursora_wm << DSPFW_CURSORA_SHIFT));
1444 I915_WRITE(DSPFW3,
1445 (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
1446 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1447
1448 if (cxsr_enabled)
1449 intel_set_memory_cxsr(dev_priv, true);
1450}
1451
1452static void cherryview_update_wm(struct drm_crtc *crtc)
1453{
1454 struct drm_device *dev = crtc->dev;
1455 static const int sr_latency_ns = 12000;
1456 struct drm_i915_private *dev_priv = dev->dev_private;
1457 int planea_wm, planeb_wm, planec_wm;
1458 int cursora_wm, cursorb_wm, cursorc_wm;
1459 int plane_sr, cursor_sr;
1460 int ignore_plane_sr, ignore_cursor_sr;
1461 unsigned int enabled = 0;
1462 bool cxsr_enabled;
1463
1464 vlv_update_drain_latency(crtc);
1359 1465
1360 if (g4x_compute_wm0(dev, PIPE_A, 1466 if (g4x_compute_wm0(dev, PIPE_A,
1361 &valleyview_wm_info, latency_ns, 1467 &valleyview_wm_info, pessimal_latency_ns,
1362 &valleyview_cursor_wm_info, latency_ns, 1468 &valleyview_cursor_wm_info, pessimal_latency_ns,
1363 &planea_wm, &cursora_wm)) 1469 &planea_wm, &cursora_wm))
1364 enabled |= 1 << PIPE_A; 1470 enabled |= 1 << PIPE_A;
1365 1471
1366 if (g4x_compute_wm0(dev, PIPE_B, 1472 if (g4x_compute_wm0(dev, PIPE_B,
1367 &valleyview_wm_info, latency_ns, 1473 &valleyview_wm_info, pessimal_latency_ns,
1368 &valleyview_cursor_wm_info, latency_ns, 1474 &valleyview_cursor_wm_info, pessimal_latency_ns,
1369 &planeb_wm, &cursorb_wm)) 1475 &planeb_wm, &cursorb_wm))
1370 enabled |= 1 << PIPE_B; 1476 enabled |= 1 << PIPE_B;
1371 1477
1478 if (g4x_compute_wm0(dev, PIPE_C,
1479 &valleyview_wm_info, pessimal_latency_ns,
1480 &valleyview_cursor_wm_info, pessimal_latency_ns,
1481 &planec_wm, &cursorc_wm))
1482 enabled |= 1 << PIPE_C;
1483
1372 if (single_plane_enabled(enabled) && 1484 if (single_plane_enabled(enabled) &&
1373 g4x_compute_srwm(dev, ffs(enabled) - 1, 1485 g4x_compute_srwm(dev, ffs(enabled) - 1,
1374 sr_latency_ns, 1486 sr_latency_ns,
@@ -1387,27 +1499,66 @@ static void valleyview_update_wm(struct drm_crtc *crtc)
1387 plane_sr = cursor_sr = 0; 1499 plane_sr = cursor_sr = 0;
1388 } 1500 }
1389 1501
1390 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n", 1502 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
1503 "B: plane=%d, cursor=%d, C: plane=%d, cursor=%d, "
1504 "SR: plane=%d, cursor=%d\n",
1391 planea_wm, cursora_wm, 1505 planea_wm, cursora_wm,
1392 planeb_wm, cursorb_wm, 1506 planeb_wm, cursorb_wm,
1507 planec_wm, cursorc_wm,
1393 plane_sr, cursor_sr); 1508 plane_sr, cursor_sr);
1394 1509
1395 I915_WRITE(DSPFW1, 1510 I915_WRITE(DSPFW1,
1396 (plane_sr << DSPFW_SR_SHIFT) | 1511 (plane_sr << DSPFW_SR_SHIFT) |
1397 (cursorb_wm << DSPFW_CURSORB_SHIFT) | 1512 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1398 (planeb_wm << DSPFW_PLANEB_SHIFT) | 1513 (planeb_wm << DSPFW_PLANEB_SHIFT) |
1399 planea_wm); 1514 (planea_wm << DSPFW_PLANEA_SHIFT));
1400 I915_WRITE(DSPFW2, 1515 I915_WRITE(DSPFW2,
1401 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) | 1516 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1402 (cursora_wm << DSPFW_CURSORA_SHIFT)); 1517 (cursora_wm << DSPFW_CURSORA_SHIFT));
1403 I915_WRITE(DSPFW3, 1518 I915_WRITE(DSPFW3,
1404 (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) | 1519 (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
1405 (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); 1520 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1521 I915_WRITE(DSPFW9_CHV,
1522 (I915_READ(DSPFW9_CHV) & ~(DSPFW_PLANEC_MASK |
1523 DSPFW_CURSORC_MASK)) |
1524 (planec_wm << DSPFW_PLANEC_SHIFT) |
1525 (cursorc_wm << DSPFW_CURSORC_SHIFT));
1406 1526
1407 if (cxsr_enabled) 1527 if (cxsr_enabled)
1408 intel_set_memory_cxsr(dev_priv, true); 1528 intel_set_memory_cxsr(dev_priv, true);
1409} 1529}
1410 1530
1531static void valleyview_update_sprite_wm(struct drm_plane *plane,
1532 struct drm_crtc *crtc,
1533 uint32_t sprite_width,
1534 uint32_t sprite_height,
1535 int pixel_size,
1536 bool enabled, bool scaled)
1537{
1538 struct drm_device *dev = crtc->dev;
1539 struct drm_i915_private *dev_priv = dev->dev_private;
1540 int pipe = to_intel_plane(plane)->pipe;
1541 int sprite = to_intel_plane(plane)->plane;
1542 int drain_latency;
1543 int plane_prec;
1544 int sprite_dl;
1545 int prec_mult;
1546
1547 sprite_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_SPRITE_PRECISION_64(sprite) |
1548 (DRAIN_LATENCY_MASK << DDL_SPRITE_SHIFT(sprite)));
1549
1550 if (enabled && vlv_compute_drain_latency(crtc, pixel_size, &prec_mult,
1551 &drain_latency)) {
1552 plane_prec = (prec_mult == DRAIN_LATENCY_PRECISION_64) ?
1553 DDL_SPRITE_PRECISION_64(sprite) :
1554 DDL_SPRITE_PRECISION_32(sprite);
1555 sprite_dl |= plane_prec |
1556 (drain_latency << DDL_SPRITE_SHIFT(sprite));
1557 }
1558
1559 I915_WRITE(VLV_DDL(pipe), sprite_dl);
1560}
1561
1411static void g4x_update_wm(struct drm_crtc *crtc) 1562static void g4x_update_wm(struct drm_crtc *crtc)
1412{ 1563{
1413 struct drm_device *dev = crtc->dev; 1564 struct drm_device *dev = crtc->dev;
@@ -1419,14 +1570,14 @@ static void g4x_update_wm(struct drm_crtc *crtc)
1419 bool cxsr_enabled; 1570 bool cxsr_enabled;
1420 1571
1421 if (g4x_compute_wm0(dev, PIPE_A, 1572 if (g4x_compute_wm0(dev, PIPE_A,
1422 &g4x_wm_info, latency_ns, 1573 &g4x_wm_info, pessimal_latency_ns,
1423 &g4x_cursor_wm_info, latency_ns, 1574 &g4x_cursor_wm_info, pessimal_latency_ns,
1424 &planea_wm, &cursora_wm)) 1575 &planea_wm, &cursora_wm))
1425 enabled |= 1 << PIPE_A; 1576 enabled |= 1 << PIPE_A;
1426 1577
1427 if (g4x_compute_wm0(dev, PIPE_B, 1578 if (g4x_compute_wm0(dev, PIPE_B,
1428 &g4x_wm_info, latency_ns, 1579 &g4x_wm_info, pessimal_latency_ns,
1429 &g4x_cursor_wm_info, latency_ns, 1580 &g4x_cursor_wm_info, pessimal_latency_ns,
1430 &planeb_wm, &cursorb_wm)) 1581 &planeb_wm, &cursorb_wm))
1431 enabled |= 1 << PIPE_B; 1582 enabled |= 1 << PIPE_B;
1432 1583
@@ -1443,7 +1594,8 @@ static void g4x_update_wm(struct drm_crtc *crtc)
1443 plane_sr = cursor_sr = 0; 1594 plane_sr = cursor_sr = 0;
1444 } 1595 }
1445 1596
1446 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n", 1597 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
1598 "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1447 planea_wm, cursora_wm, 1599 planea_wm, cursora_wm,
1448 planeb_wm, cursorb_wm, 1600 planeb_wm, cursorb_wm,
1449 plane_sr, cursor_sr); 1601 plane_sr, cursor_sr);
@@ -1452,7 +1604,7 @@ static void g4x_update_wm(struct drm_crtc *crtc)
1452 (plane_sr << DSPFW_SR_SHIFT) | 1604 (plane_sr << DSPFW_SR_SHIFT) |
1453 (cursorb_wm << DSPFW_CURSORB_SHIFT) | 1605 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1454 (planeb_wm << DSPFW_PLANEB_SHIFT) | 1606 (planeb_wm << DSPFW_PLANEB_SHIFT) |
1455 planea_wm); 1607 (planea_wm << DSPFW_PLANEA_SHIFT));
1456 I915_WRITE(DSPFW2, 1608 I915_WRITE(DSPFW2,
1457 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) | 1609 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1458 (cursora_wm << DSPFW_CURSORA_SHIFT)); 1610 (cursora_wm << DSPFW_CURSORA_SHIFT));
@@ -1526,8 +1678,11 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
1526 1678
1527 /* 965 has limitations... */ 1679 /* 965 has limitations... */
1528 I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | 1680 I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
1529 (8 << 16) | (8 << 8) | (8 << 0)); 1681 (8 << DSPFW_CURSORB_SHIFT) |
1530 I915_WRITE(DSPFW2, (8 << 8) | (8 << 0)); 1682 (8 << DSPFW_PLANEB_SHIFT) |
1683 (8 << DSPFW_PLANEA_SHIFT));
1684 I915_WRITE(DSPFW2, (8 << DSPFW_CURSORA_SHIFT) |
1685 (8 << DSPFW_PLANEC_SHIFT_OLD));
1531 /* update cursor SR watermark */ 1686 /* update cursor SR watermark */
1532 I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); 1687 I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1533 1688
@@ -1552,7 +1707,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1552 else if (!IS_GEN2(dev)) 1707 else if (!IS_GEN2(dev))
1553 wm_info = &i915_wm_info; 1708 wm_info = &i915_wm_info;
1554 else 1709 else
1555 wm_info = &i830_wm_info; 1710 wm_info = &i830_a_wm_info;
1556 1711
1557 fifo_size = dev_priv->display.get_fifo_size(dev, 0); 1712 fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1558 crtc = intel_get_crtc_for_plane(dev, 0); 1713 crtc = intel_get_crtc_for_plane(dev, 0);
@@ -1565,10 +1720,16 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1565 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode; 1720 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1566 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock, 1721 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1567 wm_info, fifo_size, cpp, 1722 wm_info, fifo_size, cpp,
1568 latency_ns); 1723 pessimal_latency_ns);
1569 enabled = crtc; 1724 enabled = crtc;
1570 } else 1725 } else {
1571 planea_wm = fifo_size - wm_info->guard_size; 1726 planea_wm = fifo_size - wm_info->guard_size;
1727 if (planea_wm > (long)wm_info->max_wm)
1728 planea_wm = wm_info->max_wm;
1729 }
1730
1731 if (IS_GEN2(dev))
1732 wm_info = &i830_bc_wm_info;
1572 1733
1573 fifo_size = dev_priv->display.get_fifo_size(dev, 1); 1734 fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1574 crtc = intel_get_crtc_for_plane(dev, 1); 1735 crtc = intel_get_crtc_for_plane(dev, 1);
@@ -1581,13 +1742,16 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1581 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode; 1742 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1582 planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock, 1743 planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1583 wm_info, fifo_size, cpp, 1744 wm_info, fifo_size, cpp,
1584 latency_ns); 1745 pessimal_latency_ns);
1585 if (enabled == NULL) 1746 if (enabled == NULL)
1586 enabled = crtc; 1747 enabled = crtc;
1587 else 1748 else
1588 enabled = NULL; 1749 enabled = NULL;
1589 } else 1750 } else {
1590 planeb_wm = fifo_size - wm_info->guard_size; 1751 planeb_wm = fifo_size - wm_info->guard_size;
1752 if (planeb_wm > (long)wm_info->max_wm)
1753 planeb_wm = wm_info->max_wm;
1754 }
1591 1755
1592 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); 1756 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
1593 1757
@@ -1674,7 +1838,7 @@ static void i845_update_wm(struct drm_crtc *unused_crtc)
1674 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock, 1838 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1675 &i845_wm_info, 1839 &i845_wm_info,
1676 dev_priv->display.get_fifo_size(dev, 0), 1840 dev_priv->display.get_fifo_size(dev, 0),
1677 4, latency_ns); 1841 4, pessimal_latency_ns);
1678 fwater_lo = I915_READ(FW_BLC) & ~0xfff; 1842 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
1679 fwater_lo |= (3<<8) | planea_wm; 1843 fwater_lo |= (3<<8) | planea_wm;
1680 1844
@@ -2527,7 +2691,7 @@ static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev,
2527#define WM_DIRTY_FBC (1 << 24) 2691#define WM_DIRTY_FBC (1 << 24)
2528#define WM_DIRTY_DDB (1 << 25) 2692#define WM_DIRTY_DDB (1 << 25)
2529 2693
2530static unsigned int ilk_compute_wm_dirty(struct drm_device *dev, 2694static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
2531 const struct ilk_wm_values *old, 2695 const struct ilk_wm_values *old,
2532 const struct ilk_wm_values *new) 2696 const struct ilk_wm_values *new)
2533{ 2697{
@@ -2535,7 +2699,7 @@ static unsigned int ilk_compute_wm_dirty(struct drm_device *dev,
2535 enum pipe pipe; 2699 enum pipe pipe;
2536 int wm_lp; 2700 int wm_lp;
2537 2701
2538 for_each_pipe(pipe) { 2702 for_each_pipe(dev_priv, pipe) {
2539 if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) { 2703 if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
2540 dirty |= WM_DIRTY_LINETIME(pipe); 2704 dirty |= WM_DIRTY_LINETIME(pipe);
2541 /* Must disable LP1+ watermarks too */ 2705 /* Must disable LP1+ watermarks too */
@@ -2621,7 +2785,7 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
2621 unsigned int dirty; 2785 unsigned int dirty;
2622 uint32_t val; 2786 uint32_t val;
2623 2787
2624 dirty = ilk_compute_wm_dirty(dev, previous, results); 2788 dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
2625 if (!dirty) 2789 if (!dirty)
2626 return; 2790 return;
2627 2791
@@ -3327,13 +3491,18 @@ void valleyview_set_rps(struct drm_device *dev, u8 val)
3327 WARN_ON(val > dev_priv->rps.max_freq_softlimit); 3491 WARN_ON(val > dev_priv->rps.max_freq_softlimit);
3328 WARN_ON(val < dev_priv->rps.min_freq_softlimit); 3492 WARN_ON(val < dev_priv->rps.min_freq_softlimit);
3329 3493
3330 DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n", 3494 if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1),
3331 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq), 3495 "Odd GPU freq value\n"))
3332 dev_priv->rps.cur_freq, 3496 val &= ~1;
3333 vlv_gpu_freq(dev_priv, val), val); 3497
3498 if (val != dev_priv->rps.cur_freq) {
3499 DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
3500 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
3501 dev_priv->rps.cur_freq,
3502 vlv_gpu_freq(dev_priv, val), val);
3334 3503
3335 if (val != dev_priv->rps.cur_freq)
3336 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val); 3504 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
3505 }
3337 3506
3338 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val)); 3507 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
3339 3508
@@ -3406,8 +3575,14 @@ static void valleyview_disable_rps(struct drm_device *dev)
3406{ 3575{
3407 struct drm_i915_private *dev_priv = dev->dev_private; 3576 struct drm_i915_private *dev_priv = dev->dev_private;
3408 3577
3578 /* we're doing forcewake before Disabling RC6,
3579 * This what the BIOS expects when going into suspend */
3580 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
3581
3409 I915_WRITE(GEN6_RC_CONTROL, 0); 3582 I915_WRITE(GEN6_RC_CONTROL, 0);
3410 3583
3584 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
3585
3411 gen6_disable_rps_interrupts(dev); 3586 gen6_disable_rps_interrupts(dev);
3412} 3587}
3413 3588
@@ -3598,7 +3773,6 @@ static void gen6_enable_rps(struct drm_device *dev)
3598 struct drm_i915_private *dev_priv = dev->dev_private; 3773 struct drm_i915_private *dev_priv = dev->dev_private;
3599 struct intel_engine_cs *ring; 3774 struct intel_engine_cs *ring;
3600 u32 rp_state_cap; 3775 u32 rp_state_cap;
3601 u32 gt_perf_status;
3602 u32 rc6vids, pcu_mbox = 0, rc6_mask = 0; 3776 u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
3603 u32 gtfifodbg; 3777 u32 gtfifodbg;
3604 int rc6_mode; 3778 int rc6_mode;
@@ -3623,7 +3797,6 @@ static void gen6_enable_rps(struct drm_device *dev)
3623 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); 3797 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
3624 3798
3625 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 3799 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
3626 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
3627 3800
3628 parse_rp_state_cap(dev_priv, rp_state_cap); 3801 parse_rp_state_cap(dev_priv, rp_state_cap);
3629 3802
@@ -3965,11 +4138,27 @@ static void valleyview_cleanup_pctx(struct drm_device *dev)
3965static void valleyview_init_gt_powersave(struct drm_device *dev) 4138static void valleyview_init_gt_powersave(struct drm_device *dev)
3966{ 4139{
3967 struct drm_i915_private *dev_priv = dev->dev_private; 4140 struct drm_i915_private *dev_priv = dev->dev_private;
4141 u32 val;
3968 4142
3969 valleyview_setup_pctx(dev); 4143 valleyview_setup_pctx(dev);
3970 4144
3971 mutex_lock(&dev_priv->rps.hw_lock); 4145 mutex_lock(&dev_priv->rps.hw_lock);
3972 4146
4147 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
4148 switch ((val >> 6) & 3) {
4149 case 0:
4150 case 1:
4151 dev_priv->mem_freq = 800;
4152 break;
4153 case 2:
4154 dev_priv->mem_freq = 1066;
4155 break;
4156 case 3:
4157 dev_priv->mem_freq = 1333;
4158 break;
4159 }
4160 DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
4161
3973 dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv); 4162 dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
3974 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq; 4163 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
3975 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n", 4164 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
@@ -4004,11 +4193,38 @@ static void valleyview_init_gt_powersave(struct drm_device *dev)
4004static void cherryview_init_gt_powersave(struct drm_device *dev) 4193static void cherryview_init_gt_powersave(struct drm_device *dev)
4005{ 4194{
4006 struct drm_i915_private *dev_priv = dev->dev_private; 4195 struct drm_i915_private *dev_priv = dev->dev_private;
4196 u32 val;
4007 4197
4008 cherryview_setup_pctx(dev); 4198 cherryview_setup_pctx(dev);
4009 4199
4010 mutex_lock(&dev_priv->rps.hw_lock); 4200 mutex_lock(&dev_priv->rps.hw_lock);
4011 4201
4202 val = vlv_punit_read(dev_priv, CCK_FUSE_REG);
4203 switch ((val >> 2) & 0x7) {
4204 case 0:
4205 case 1:
4206 dev_priv->rps.cz_freq = 200;
4207 dev_priv->mem_freq = 1600;
4208 break;
4209 case 2:
4210 dev_priv->rps.cz_freq = 267;
4211 dev_priv->mem_freq = 1600;
4212 break;
4213 case 3:
4214 dev_priv->rps.cz_freq = 333;
4215 dev_priv->mem_freq = 2000;
4216 break;
4217 case 4:
4218 dev_priv->rps.cz_freq = 320;
4219 dev_priv->mem_freq = 1600;
4220 break;
4221 case 5:
4222 dev_priv->rps.cz_freq = 400;
4223 dev_priv->mem_freq = 1600;
4224 break;
4225 }
4226 DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
4227
4012 dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv); 4228 dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
4013 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq; 4229 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
4014 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n", 4230 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
@@ -4030,6 +4246,12 @@ static void cherryview_init_gt_powersave(struct drm_device *dev)
4030 vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq), 4246 vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
4031 dev_priv->rps.min_freq); 4247 dev_priv->rps.min_freq);
4032 4248
4249 WARN_ONCE((dev_priv->rps.max_freq |
4250 dev_priv->rps.efficient_freq |
4251 dev_priv->rps.rp1_freq |
4252 dev_priv->rps.min_freq) & 1,
4253 "Odd GPU freq values\n");
4254
4033 /* Preserve min/max settings in case of re-init */ 4255 /* Preserve min/max settings in case of re-init */
4034 if (dev_priv->rps.max_freq_softlimit == 0) 4256 if (dev_priv->rps.max_freq_softlimit == 0)
4035 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq; 4257 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
@@ -5088,7 +5310,7 @@ static void g4x_disable_trickle_feed(struct drm_device *dev)
5088 struct drm_i915_private *dev_priv = dev->dev_private; 5310 struct drm_i915_private *dev_priv = dev->dev_private;
5089 int pipe; 5311 int pipe;
5090 5312
5091 for_each_pipe(pipe) { 5313 for_each_pipe(dev_priv, pipe) {
5092 I915_WRITE(DSPCNTR(pipe), 5314 I915_WRITE(DSPCNTR(pipe),
5093 I915_READ(DSPCNTR(pipe)) | 5315 I915_READ(DSPCNTR(pipe)) |
5094 DISPPLANE_TRICKLE_FEED_DISABLE); 5316 DISPPLANE_TRICKLE_FEED_DISABLE);
@@ -5203,7 +5425,7 @@ static void cpt_init_clock_gating(struct drm_device *dev)
5203 /* The below fixes the weird display corruption, a few pixels shifted 5425 /* The below fixes the weird display corruption, a few pixels shifted
5204 * downward, on (only) LVDS of some HP laptops with IVY. 5426 * downward, on (only) LVDS of some HP laptops with IVY.
5205 */ 5427 */
5206 for_each_pipe(pipe) { 5428 for_each_pipe(dev_priv, pipe) {
5207 val = I915_READ(TRANS_CHICKEN2(pipe)); 5429 val = I915_READ(TRANS_CHICKEN2(pipe));
5208 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 5430 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
5209 val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED; 5431 val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
@@ -5215,7 +5437,7 @@ static void cpt_init_clock_gating(struct drm_device *dev)
5215 I915_WRITE(TRANS_CHICKEN2(pipe), val); 5437 I915_WRITE(TRANS_CHICKEN2(pipe), val);
5216 } 5438 }
5217 /* WADP0ClockGatingDisable */ 5439 /* WADP0ClockGatingDisable */
5218 for_each_pipe(pipe) { 5440 for_each_pipe(dev_priv, pipe) {
5219 I915_WRITE(TRANS_CHICKEN1(pipe), 5441 I915_WRITE(TRANS_CHICKEN1(pipe),
5220 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE); 5442 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
5221 } 5443 }
@@ -5383,7 +5605,7 @@ static void lpt_suspend_hw(struct drm_device *dev)
5383 } 5605 }
5384} 5606}
5385 5607
5386static void gen8_init_clock_gating(struct drm_device *dev) 5608static void broadwell_init_clock_gating(struct drm_device *dev)
5387{ 5609{
5388 struct drm_i915_private *dev_priv = dev->dev_private; 5610 struct drm_i915_private *dev_priv = dev->dev_private;
5389 enum pipe pipe; 5611 enum pipe pipe;
@@ -5395,37 +5617,12 @@ static void gen8_init_clock_gating(struct drm_device *dev)
5395 /* FIXME(BDW): Check all the w/a, some might only apply to 5617 /* FIXME(BDW): Check all the w/a, some might only apply to
5396 * pre-production hw. */ 5618 * pre-production hw. */
5397 5619
5398 /* WaDisablePartialInstShootdown:bdw */
5399 I915_WRITE(GEN8_ROW_CHICKEN,
5400 _MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE));
5401 5620
5402 /* WaDisableThreadStallDopClockGating:bdw */
5403 /* FIXME: Unclear whether we really need this on production bdw. */
5404 I915_WRITE(GEN8_ROW_CHICKEN,
5405 _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
5406
5407 /*
5408 * This GEN8_CENTROID_PIXEL_OPT_DIS W/A is only needed for
5409 * pre-production hardware
5410 */
5411 I915_WRITE(HALF_SLICE_CHICKEN3,
5412 _MASKED_BIT_ENABLE(GEN8_CENTROID_PIXEL_OPT_DIS));
5413 I915_WRITE(HALF_SLICE_CHICKEN3,
5414 _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
5415 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_BWGTLB_DISABLE)); 5621 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_BWGTLB_DISABLE));
5416 5622
5417 I915_WRITE(_3D_CHICKEN3, 5623 I915_WRITE(_3D_CHICKEN3,
5418 _MASKED_BIT_ENABLE(_3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2))); 5624 _MASKED_BIT_ENABLE(_3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2)));
5419 5625
5420 I915_WRITE(COMMON_SLICE_CHICKEN2,
5421 _MASKED_BIT_ENABLE(GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE));
5422
5423 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
5424 _MASKED_BIT_ENABLE(GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE));
5425
5426 /* WaDisableDopClockGating:bdw May not be needed for production */
5427 I915_WRITE(GEN7_ROW_CHICKEN2,
5428 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
5429 5626
5430 /* WaSwitchSolVfFArbitrationPriority:bdw */ 5627 /* WaSwitchSolVfFArbitrationPriority:bdw */
5431 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); 5628 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
@@ -5435,37 +5632,18 @@ static void gen8_init_clock_gating(struct drm_device *dev)
5435 I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD); 5632 I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
5436 5633
5437 /* WaPsrDPRSUnmaskVBlankInSRD:bdw */ 5634 /* WaPsrDPRSUnmaskVBlankInSRD:bdw */
5438 for_each_pipe(pipe) { 5635 for_each_pipe(dev_priv, pipe) {
5439 I915_WRITE(CHICKEN_PIPESL_1(pipe), 5636 I915_WRITE(CHICKEN_PIPESL_1(pipe),
5440 I915_READ(CHICKEN_PIPESL_1(pipe)) | 5637 I915_READ(CHICKEN_PIPESL_1(pipe)) |
5441 BDW_DPRS_MASK_VBLANK_SRD); 5638 BDW_DPRS_MASK_VBLANK_SRD);
5442 } 5639 }
5443 5640
5444 /* Use Force Non-Coherent whenever executing a 3D context. This is a
5445 * workaround for for a possible hang in the unlikely event a TLB
5446 * invalidation occurs during a PSD flush.
5447 */
5448 I915_WRITE(HDC_CHICKEN0,
5449 I915_READ(HDC_CHICKEN0) |
5450 _MASKED_BIT_ENABLE(HDC_FORCE_NON_COHERENT));
5451
5452 /* WaVSRefCountFullforceMissDisable:bdw */ 5641 /* WaVSRefCountFullforceMissDisable:bdw */
5453 /* WaDSRefCountFullforceMissDisable:bdw */ 5642 /* WaDSRefCountFullforceMissDisable:bdw */
5454 I915_WRITE(GEN7_FF_THREAD_MODE, 5643 I915_WRITE(GEN7_FF_THREAD_MODE,
5455 I915_READ(GEN7_FF_THREAD_MODE) & 5644 I915_READ(GEN7_FF_THREAD_MODE) &
5456 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME)); 5645 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
5457 5646
5458 /*
5459 * BSpec recommends 8x4 when MSAA is used,
5460 * however in practice 16x4 seems fastest.
5461 *
5462 * Note that PS/WM thread counts depend on the WIZ hashing
5463 * disable bit, which we don't touch here, but it's good
5464 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
5465 */
5466 I915_WRITE(GEN7_GT_MODE,
5467 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
5468
5469 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL, 5647 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
5470 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE)); 5648 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
5471 5649
@@ -5473,9 +5651,7 @@ static void gen8_init_clock_gating(struct drm_device *dev)
5473 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | 5651 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
5474 GEN8_SDEUNIT_CLOCK_GATE_DISABLE); 5652 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
5475 5653
5476 /* Wa4x4STCOptimizationDisable:bdw */ 5654 lpt_init_clock_gating(dev);
5477 I915_WRITE(CACHE_MODE_1,
5478 _MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE));
5479} 5655}
5480 5656
5481static void haswell_init_clock_gating(struct drm_device *dev) 5657static void haswell_init_clock_gating(struct drm_device *dev)
@@ -5631,24 +5807,6 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
5631static void valleyview_init_clock_gating(struct drm_device *dev) 5807static void valleyview_init_clock_gating(struct drm_device *dev)
5632{ 5808{
5633 struct drm_i915_private *dev_priv = dev->dev_private; 5809 struct drm_i915_private *dev_priv = dev->dev_private;
5634 u32 val;
5635
5636 mutex_lock(&dev_priv->rps.hw_lock);
5637 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
5638 mutex_unlock(&dev_priv->rps.hw_lock);
5639 switch ((val >> 6) & 3) {
5640 case 0:
5641 case 1:
5642 dev_priv->mem_freq = 800;
5643 break;
5644 case 2:
5645 dev_priv->mem_freq = 1066;
5646 break;
5647 case 3:
5648 dev_priv->mem_freq = 1333;
5649 break;
5650 }
5651 DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
5652 5810
5653 I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE); 5811 I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
5654 5812
@@ -5724,48 +5882,11 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
5724static void cherryview_init_clock_gating(struct drm_device *dev) 5882static void cherryview_init_clock_gating(struct drm_device *dev)
5725{ 5883{
5726 struct drm_i915_private *dev_priv = dev->dev_private; 5884 struct drm_i915_private *dev_priv = dev->dev_private;
5727 u32 val;
5728
5729 mutex_lock(&dev_priv->rps.hw_lock);
5730 val = vlv_punit_read(dev_priv, CCK_FUSE_REG);
5731 mutex_unlock(&dev_priv->rps.hw_lock);
5732 switch ((val >> 2) & 0x7) {
5733 case 0:
5734 case 1:
5735 dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_200;
5736 dev_priv->mem_freq = 1600;
5737 break;
5738 case 2:
5739 dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_267;
5740 dev_priv->mem_freq = 1600;
5741 break;
5742 case 3:
5743 dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_333;
5744 dev_priv->mem_freq = 2000;
5745 break;
5746 case 4:
5747 dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_320;
5748 dev_priv->mem_freq = 1600;
5749 break;
5750 case 5:
5751 dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_400;
5752 dev_priv->mem_freq = 1600;
5753 break;
5754 }
5755 DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
5756 5885
5757 I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE); 5886 I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
5758 5887
5759 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); 5888 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
5760 5889
5761 /* WaDisablePartialInstShootdown:chv */
5762 I915_WRITE(GEN8_ROW_CHICKEN,
5763 _MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE));
5764
5765 /* WaDisableThreadStallDopClockGating:chv */
5766 I915_WRITE(GEN8_ROW_CHICKEN,
5767 _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
5768
5769 /* WaVSRefCountFullforceMissDisable:chv */ 5890 /* WaVSRefCountFullforceMissDisable:chv */
5770 /* WaDSRefCountFullforceMissDisable:chv */ 5891 /* WaDSRefCountFullforceMissDisable:chv */
5771 I915_WRITE(GEN7_FF_THREAD_MODE, 5892 I915_WRITE(GEN7_FF_THREAD_MODE,
@@ -5784,10 +5905,6 @@ static void cherryview_init_clock_gating(struct drm_device *dev)
5784 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | 5905 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
5785 GEN8_SDEUNIT_CLOCK_GATE_DISABLE); 5906 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
5786 5907
5787 /* WaDisableSamplerPowerBypass:chv (pre-production hw) */
5788 I915_WRITE(HALF_SLICE_CHICKEN3,
5789 _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
5790
5791 /* WaDisableGunitClockGating:chv (pre-production hw) */ 5908 /* WaDisableGunitClockGating:chv (pre-production hw) */
5792 I915_WRITE(VLV_GUNIT_CLOCK_GATE, I915_READ(VLV_GUNIT_CLOCK_GATE) | 5909 I915_WRITE(VLV_GUNIT_CLOCK_GATE, I915_READ(VLV_GUNIT_CLOCK_GATE) |
5793 GINT_DIS); 5910 GINT_DIS);
@@ -5797,8 +5914,6 @@ static void cherryview_init_clock_gating(struct drm_device *dev)
5797 _MASKED_BIT_ENABLE(GEN8_FF_DOP_CLOCK_GATE_DISABLE)); 5914 _MASKED_BIT_ENABLE(GEN8_FF_DOP_CLOCK_GATE_DISABLE));
5798 5915
5799 /* WaDisableDopClockGating:chv (pre-production hw) */ 5916 /* WaDisableDopClockGating:chv (pre-production hw) */
5800 I915_WRITE(GEN7_ROW_CHICKEN2,
5801 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
5802 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) | 5917 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
5803 GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE); 5918 GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE);
5804} 5919}
@@ -5883,6 +5998,9 @@ static void gen3_init_clock_gating(struct drm_device *dev)
5883 5998
5884 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */ 5999 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
5885 I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE)); 6000 I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
6001
6002 I915_WRITE(MI_ARB_STATE,
6003 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
5886} 6004}
5887 6005
5888static void i85x_init_clock_gating(struct drm_device *dev) 6006static void i85x_init_clock_gating(struct drm_device *dev)
@@ -5894,6 +6012,9 @@ static void i85x_init_clock_gating(struct drm_device *dev)
5894 /* interrupts should cause a wake up from C3 */ 6012 /* interrupts should cause a wake up from C3 */
5895 I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) | 6013 I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
5896 _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE)); 6014 _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
6015
6016 I915_WRITE(MEM_MODE,
6017 _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
5897} 6018}
5898 6019
5899static void i830_init_clock_gating(struct drm_device *dev) 6020static void i830_init_clock_gating(struct drm_device *dev)
@@ -5901,6 +6022,10 @@ static void i830_init_clock_gating(struct drm_device *dev)
5901 struct drm_i915_private *dev_priv = dev->dev_private; 6022 struct drm_i915_private *dev_priv = dev->dev_private;
5902 6023
5903 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); 6024 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
6025
6026 I915_WRITE(MEM_MODE,
6027 _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
6028 _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
5904} 6029}
5905 6030
5906void intel_init_clock_gating(struct drm_device *dev) 6031void intel_init_clock_gating(struct drm_device *dev)
@@ -6203,6 +6328,8 @@ static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
6203 spin_unlock_irq(&dev_priv->irq_lock); 6328 spin_unlock_irq(&dev_priv->irq_lock);
6204 6329
6205 vlv_set_power_well(dev_priv, power_well, false); 6330 vlv_set_power_well(dev_priv, power_well, false);
6331
6332 vlv_power_sequencer_reset(dev_priv);
6206} 6333}
6207 6334
6208static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 6335static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
@@ -6238,12 +6365,11 @@ static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
6238static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 6365static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
6239 struct i915_power_well *power_well) 6366 struct i915_power_well *power_well)
6240{ 6367{
6241 struct drm_device *dev = dev_priv->dev;
6242 enum pipe pipe; 6368 enum pipe pipe;
6243 6369
6244 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC); 6370 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
6245 6371
6246 for_each_pipe(pipe) 6372 for_each_pipe(dev_priv, pipe)
6247 assert_pll_disabled(dev_priv, pipe); 6373 assert_pll_disabled(dev_priv, pipe);
6248 6374
6249 /* Assert common reset */ 6375 /* Assert common reset */
@@ -6252,6 +6378,153 @@ static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
6252 vlv_set_power_well(dev_priv, power_well, false); 6378 vlv_set_power_well(dev_priv, power_well, false);
6253} 6379}
6254 6380
6381static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
6382 struct i915_power_well *power_well)
6383{
6384 enum dpio_phy phy;
6385
6386 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
6387 power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
6388
6389 /*
6390 * Enable the CRI clock source so we can get at the
6391 * display and the reference clock for VGA
6392 * hotplug / manual detection.
6393 */
6394 if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
6395 phy = DPIO_PHY0;
6396 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
6397 DPLL_REFA_CLK_ENABLE_VLV);
6398 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
6399 DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
6400 } else {
6401 phy = DPIO_PHY1;
6402 I915_WRITE(DPLL(PIPE_C), I915_READ(DPLL(PIPE_C)) |
6403 DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
6404 }
6405 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
6406 vlv_set_power_well(dev_priv, power_well, true);
6407
6408 /* Poll for phypwrgood signal */
6409 if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1))
6410 DRM_ERROR("Display PHY %d is not power up\n", phy);
6411
6412 I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) |
6413 PHY_COM_LANE_RESET_DEASSERT(phy));
6414}
6415
6416static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
6417 struct i915_power_well *power_well)
6418{
6419 enum dpio_phy phy;
6420
6421 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
6422 power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
6423
6424 if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
6425 phy = DPIO_PHY0;
6426 assert_pll_disabled(dev_priv, PIPE_A);
6427 assert_pll_disabled(dev_priv, PIPE_B);
6428 } else {
6429 phy = DPIO_PHY1;
6430 assert_pll_disabled(dev_priv, PIPE_C);
6431 }
6432
6433 I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) &
6434 ~PHY_COM_LANE_RESET_DEASSERT(phy));
6435
6436 vlv_set_power_well(dev_priv, power_well, false);
6437}
6438
6439static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
6440 struct i915_power_well *power_well)
6441{
6442 enum pipe pipe = power_well->data;
6443 bool enabled;
6444 u32 state, ctrl;
6445
6446 mutex_lock(&dev_priv->rps.hw_lock);
6447
6448 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
6449 /*
6450 * We only ever set the power-on and power-gate states, anything
6451 * else is unexpected.
6452 */
6453 WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
6454 enabled = state == DP_SSS_PWR_ON(pipe);
6455
6456 /*
6457 * A transient state at this point would mean some unexpected party
6458 * is poking at the power controls too.
6459 */
6460 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
6461 WARN_ON(ctrl << 16 != state);
6462
6463 mutex_unlock(&dev_priv->rps.hw_lock);
6464
6465 return enabled;
6466}
6467
6468static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
6469 struct i915_power_well *power_well,
6470 bool enable)
6471{
6472 enum pipe pipe = power_well->data;
6473 u32 state;
6474 u32 ctrl;
6475
6476 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
6477
6478 mutex_lock(&dev_priv->rps.hw_lock);
6479
6480#define COND \
6481 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
6482
6483 if (COND)
6484 goto out;
6485
6486 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
6487 ctrl &= ~DP_SSC_MASK(pipe);
6488 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
6489 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
6490
6491 if (wait_for(COND, 100))
6492 DRM_ERROR("timout setting power well state %08x (%08x)\n",
6493 state,
6494 vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
6495
6496#undef COND
6497
6498out:
6499 mutex_unlock(&dev_priv->rps.hw_lock);
6500}
6501
6502static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
6503 struct i915_power_well *power_well)
6504{
6505 chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
6506}
6507
6508static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
6509 struct i915_power_well *power_well)
6510{
6511 WARN_ON_ONCE(power_well->data != PIPE_A &&
6512 power_well->data != PIPE_B &&
6513 power_well->data != PIPE_C);
6514
6515 chv_set_pipe_power_well(dev_priv, power_well, true);
6516}
6517
6518static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
6519 struct i915_power_well *power_well)
6520{
6521 WARN_ON_ONCE(power_well->data != PIPE_A &&
6522 power_well->data != PIPE_B &&
6523 power_well->data != PIPE_C);
6524
6525 chv_set_pipe_power_well(dev_priv, power_well, false);
6526}
6527
6255static void check_power_well_state(struct drm_i915_private *dev_priv, 6528static void check_power_well_state(struct drm_i915_private *dev_priv,
6256 struct i915_power_well *power_well) 6529 struct i915_power_well *power_well)
6257{ 6530{
@@ -6443,6 +6716,39 @@ EXPORT_SYMBOL_GPL(i915_get_cdclk_freq);
6443 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \ 6716 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
6444 BIT(POWER_DOMAIN_INIT)) 6717 BIT(POWER_DOMAIN_INIT))
6445 6718
6719#define CHV_PIPE_A_POWER_DOMAINS ( \
6720 BIT(POWER_DOMAIN_PIPE_A) | \
6721 BIT(POWER_DOMAIN_INIT))
6722
6723#define CHV_PIPE_B_POWER_DOMAINS ( \
6724 BIT(POWER_DOMAIN_PIPE_B) | \
6725 BIT(POWER_DOMAIN_INIT))
6726
6727#define CHV_PIPE_C_POWER_DOMAINS ( \
6728 BIT(POWER_DOMAIN_PIPE_C) | \
6729 BIT(POWER_DOMAIN_INIT))
6730
6731#define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
6732 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
6733 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
6734 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
6735 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
6736 BIT(POWER_DOMAIN_INIT))
6737
6738#define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
6739 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
6740 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
6741 BIT(POWER_DOMAIN_INIT))
6742
6743#define CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS ( \
6744 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
6745 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
6746 BIT(POWER_DOMAIN_INIT))
6747
6748#define CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS ( \
6749 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
6750 BIT(POWER_DOMAIN_INIT))
6751
6446static const struct i915_power_well_ops i9xx_always_on_power_well_ops = { 6752static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
6447 .sync_hw = i9xx_always_on_power_well_noop, 6753 .sync_hw = i9xx_always_on_power_well_noop,
6448 .enable = i9xx_always_on_power_well_noop, 6754 .enable = i9xx_always_on_power_well_noop,
@@ -6450,6 +6756,20 @@ static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
6450 .is_enabled = i9xx_always_on_power_well_enabled, 6756 .is_enabled = i9xx_always_on_power_well_enabled,
6451}; 6757};
6452 6758
6759static const struct i915_power_well_ops chv_pipe_power_well_ops = {
6760 .sync_hw = chv_pipe_power_well_sync_hw,
6761 .enable = chv_pipe_power_well_enable,
6762 .disable = chv_pipe_power_well_disable,
6763 .is_enabled = chv_pipe_power_well_enabled,
6764};
6765
6766static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
6767 .sync_hw = vlv_power_well_sync_hw,
6768 .enable = chv_dpio_cmn_power_well_enable,
6769 .disable = chv_dpio_cmn_power_well_disable,
6770 .is_enabled = vlv_power_well_enabled,
6771};
6772
6453static struct i915_power_well i9xx_always_on_power_well[] = { 6773static struct i915_power_well i9xx_always_on_power_well[] = {
6454 { 6774 {
6455 .name = "always-on", 6775 .name = "always-on",
@@ -6572,6 +6892,107 @@ static struct i915_power_well vlv_power_wells[] = {
6572 }, 6892 },
6573}; 6893};
6574 6894
6895static struct i915_power_well chv_power_wells[] = {
6896 {
6897 .name = "always-on",
6898 .always_on = 1,
6899 .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
6900 .ops = &i9xx_always_on_power_well_ops,
6901 },
6902#if 0
6903 {
6904 .name = "display",
6905 .domains = VLV_DISPLAY_POWER_DOMAINS,
6906 .data = PUNIT_POWER_WELL_DISP2D,
6907 .ops = &vlv_display_power_well_ops,
6908 },
6909 {
6910 .name = "pipe-a",
6911 .domains = CHV_PIPE_A_POWER_DOMAINS,
6912 .data = PIPE_A,
6913 .ops = &chv_pipe_power_well_ops,
6914 },
6915 {
6916 .name = "pipe-b",
6917 .domains = CHV_PIPE_B_POWER_DOMAINS,
6918 .data = PIPE_B,
6919 .ops = &chv_pipe_power_well_ops,
6920 },
6921 {
6922 .name = "pipe-c",
6923 .domains = CHV_PIPE_C_POWER_DOMAINS,
6924 .data = PIPE_C,
6925 .ops = &chv_pipe_power_well_ops,
6926 },
6927#endif
6928 {
6929 .name = "dpio-common-bc",
6930 /*
6931 * XXX: cmnreset for one PHY seems to disturb the other.
6932 * As a workaround keep both powered on at the same
6933 * time for now.
6934 */
6935 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
6936 .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
6937 .ops = &chv_dpio_cmn_power_well_ops,
6938 },
6939 {
6940 .name = "dpio-common-d",
6941 /*
6942 * XXX: cmnreset for one PHY seems to disturb the other.
6943 * As a workaround keep both powered on at the same
6944 * time for now.
6945 */
6946 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
6947 .data = PUNIT_POWER_WELL_DPIO_CMN_D,
6948 .ops = &chv_dpio_cmn_power_well_ops,
6949 },
6950#if 0
6951 {
6952 .name = "dpio-tx-b-01",
6953 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
6954 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
6955 .ops = &vlv_dpio_power_well_ops,
6956 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
6957 },
6958 {
6959 .name = "dpio-tx-b-23",
6960 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
6961 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
6962 .ops = &vlv_dpio_power_well_ops,
6963 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
6964 },
6965 {
6966 .name = "dpio-tx-c-01",
6967 .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
6968 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
6969 .ops = &vlv_dpio_power_well_ops,
6970 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
6971 },
6972 {
6973 .name = "dpio-tx-c-23",
6974 .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
6975 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
6976 .ops = &vlv_dpio_power_well_ops,
6977 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
6978 },
6979 {
6980 .name = "dpio-tx-d-01",
6981 .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
6982 CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
6983 .ops = &vlv_dpio_power_well_ops,
6984 .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_01,
6985 },
6986 {
6987 .name = "dpio-tx-d-23",
6988 .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
6989 CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
6990 .ops = &vlv_dpio_power_well_ops,
6991 .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_23,
6992 },
6993#endif
6994};
6995
6575static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv, 6996static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
6576 enum punit_power_well power_well_id) 6997 enum punit_power_well power_well_id)
6577{ 6998{
@@ -6608,6 +7029,8 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
6608 } else if (IS_BROADWELL(dev_priv->dev)) { 7029 } else if (IS_BROADWELL(dev_priv->dev)) {
6609 set_power_wells(power_domains, bdw_power_wells); 7030 set_power_wells(power_domains, bdw_power_wells);
6610 hsw_pwr = power_domains; 7031 hsw_pwr = power_domains;
7032 } else if (IS_CHERRYVIEW(dev_priv->dev)) {
7033 set_power_wells(power_domains, chv_power_wells);
6611 } else if (IS_VALLEYVIEW(dev_priv->dev)) { 7034 } else if (IS_VALLEYVIEW(dev_priv->dev)) {
6612 set_power_wells(power_domains, vlv_power_wells); 7035 set_power_wells(power_domains, vlv_power_wells);
6613 } else { 7036 } else {
@@ -6833,13 +7256,15 @@ void intel_init_pm(struct drm_device *dev)
6833 else if (IS_HASWELL(dev)) 7256 else if (IS_HASWELL(dev))
6834 dev_priv->display.init_clock_gating = haswell_init_clock_gating; 7257 dev_priv->display.init_clock_gating = haswell_init_clock_gating;
6835 else if (INTEL_INFO(dev)->gen == 8) 7258 else if (INTEL_INFO(dev)->gen == 8)
6836 dev_priv->display.init_clock_gating = gen8_init_clock_gating; 7259 dev_priv->display.init_clock_gating = broadwell_init_clock_gating;
6837 } else if (IS_CHERRYVIEW(dev)) { 7260 } else if (IS_CHERRYVIEW(dev)) {
6838 dev_priv->display.update_wm = valleyview_update_wm; 7261 dev_priv->display.update_wm = cherryview_update_wm;
7262 dev_priv->display.update_sprite_wm = valleyview_update_sprite_wm;
6839 dev_priv->display.init_clock_gating = 7263 dev_priv->display.init_clock_gating =
6840 cherryview_init_clock_gating; 7264 cherryview_init_clock_gating;
6841 } else if (IS_VALLEYVIEW(dev)) { 7265 } else if (IS_VALLEYVIEW(dev)) {
6842 dev_priv->display.update_wm = valleyview_update_wm; 7266 dev_priv->display.update_wm = valleyview_update_wm;
7267 dev_priv->display.update_sprite_wm = valleyview_update_sprite_wm;
6843 dev_priv->display.init_clock_gating = 7268 dev_priv->display.init_clock_gating =
6844 valleyview_init_clock_gating; 7269 valleyview_init_clock_gating;
6845 } else if (IS_PINEVIEW(dev)) { 7270 } else if (IS_PINEVIEW(dev)) {
@@ -7025,6 +7450,7 @@ static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
7025 return -1; 7450 return -1;
7026 } 7451 }
7027 7452
7453 /* CHV needs even values */
7028 opcode = (DIV_ROUND_CLOSEST((val * 2 * mul), dev_priv->rps.cz_freq) * 2); 7454 opcode = (DIV_ROUND_CLOSEST((val * 2 * mul), dev_priv->rps.cz_freq) * 2);
7029 7455
7030 return opcode; 7456 return opcode;
diff --git a/drivers/gpu/drm/i915/intel_renderstate.h b/drivers/gpu/drm/i915/intel_renderstate.h
index fd4f66231d30..6c792d3a9c9c 100644
--- a/drivers/gpu/drm/i915/intel_renderstate.h
+++ b/drivers/gpu/drm/i915/intel_renderstate.h
@@ -24,13 +24,7 @@
24#ifndef _INTEL_RENDERSTATE_H 24#ifndef _INTEL_RENDERSTATE_H
25#define _INTEL_RENDERSTATE_H 25#define _INTEL_RENDERSTATE_H
26 26
27#include <linux/types.h> 27#include "i915_drv.h"
28
29struct intel_renderstate_rodata {
30 const u32 *reloc;
31 const u32 *batch;
32 const u32 batch_items;
33};
34 28
35extern const struct intel_renderstate_rodata gen6_null_state; 29extern const struct intel_renderstate_rodata gen6_null_state;
36extern const struct intel_renderstate_rodata gen7_null_state; 30extern const struct intel_renderstate_rodata gen7_null_state;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 47a126a0493f..0a80e419b589 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -33,14 +33,24 @@
33#include "i915_trace.h" 33#include "i915_trace.h"
34#include "intel_drv.h" 34#include "intel_drv.h"
35 35
36/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill, 36bool
37 * but keeps the logic simple. Indeed, the whole purpose of this macro is just 37intel_ring_initialized(struct intel_engine_cs *ring)
38 * to give some inclination as to some of the magic values used in the various 38{
39 * workarounds! 39 struct drm_device *dev = ring->dev;
40 */ 40
41#define CACHELINE_BYTES 64 41 if (!dev)
42 return false;
43
44 if (i915.enable_execlists) {
45 struct intel_context *dctx = ring->default_context;
46 struct intel_ringbuffer *ringbuf = dctx->engine[ring->id].ringbuf;
47
48 return ringbuf->obj;
49 } else
50 return ring->buffer && ring->buffer->obj;
51}
42 52
43static inline int __ring_space(int head, int tail, int size) 53int __intel_ring_space(int head, int tail, int size)
44{ 54{
45 int space = head - (tail + I915_RING_FREE_SPACE); 55 int space = head - (tail + I915_RING_FREE_SPACE);
46 if (space < 0) 56 if (space < 0)
@@ -48,12 +58,13 @@ static inline int __ring_space(int head, int tail, int size)
48 return space; 58 return space;
49} 59}
50 60
51static inline int ring_space(struct intel_ringbuffer *ringbuf) 61int intel_ring_space(struct intel_ringbuffer *ringbuf)
52{ 62{
53 return __ring_space(ringbuf->head & HEAD_ADDR, ringbuf->tail, ringbuf->size); 63 return __intel_ring_space(ringbuf->head & HEAD_ADDR,
64 ringbuf->tail, ringbuf->size);
54} 65}
55 66
56static bool intel_ring_stopped(struct intel_engine_cs *ring) 67bool intel_ring_stopped(struct intel_engine_cs *ring)
57{ 68{
58 struct drm_i915_private *dev_priv = ring->dev->dev_private; 69 struct drm_i915_private *dev_priv = ring->dev->dev_private;
59 return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring); 70 return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring);
@@ -433,7 +444,14 @@ gen8_render_ring_flush(struct intel_engine_cs *ring,
433 return ret; 444 return ret;
434 } 445 }
435 446
436 return gen8_emit_pipe_control(ring, flags, scratch_addr); 447 ret = gen8_emit_pipe_control(ring, flags, scratch_addr);
448 if (ret)
449 return ret;
450
451 if (!invalidate_domains && flush_domains)
452 return gen7_ring_fbc_flush(ring, FBC_REND_NUKE);
453
454 return 0;
437} 455}
438 456
439static void ring_write_tail(struct intel_engine_cs *ring, 457static void ring_write_tail(struct intel_engine_cs *ring,
@@ -476,9 +494,14 @@ static bool stop_ring(struct intel_engine_cs *ring)
476 494
477 if (!IS_GEN2(ring->dev)) { 495 if (!IS_GEN2(ring->dev)) {
478 I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING)); 496 I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING));
479 if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) { 497 if (wait_for((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
480 DRM_ERROR("%s :timed out trying to stop ring\n", ring->name); 498 DRM_ERROR("%s : timed out trying to stop ring\n", ring->name);
481 return false; 499 /* Sometimes we observe that the idle flag is not
500 * set even though the ring is empty. So double
501 * check before giving up.
502 */
503 if (I915_READ_HEAD(ring) != I915_READ_TAIL(ring))
504 return false;
482 } 505 }
483 } 506 }
484 507
@@ -540,6 +563,14 @@ static int init_ring_common(struct intel_engine_cs *ring)
540 * also enforces ordering), otherwise the hw might lose the new ring 563 * also enforces ordering), otherwise the hw might lose the new ring
541 * register values. */ 564 * register values. */
542 I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj)); 565 I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
566
567 /* WaClearRingBufHeadRegAtInit:ctg,elk */
568 if (I915_READ_HEAD(ring))
569 DRM_DEBUG("%s initialization failed [head=%08x], fudging\n",
570 ring->name, I915_READ_HEAD(ring));
571 I915_WRITE_HEAD(ring, 0);
572 (void)I915_READ_HEAD(ring);
573
543 I915_WRITE_CTL(ring, 574 I915_WRITE_CTL(ring,
544 ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) 575 ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES)
545 | RING_VALID); 576 | RING_VALID);
@@ -563,7 +594,7 @@ static int init_ring_common(struct intel_engine_cs *ring)
563 else { 594 else {
564 ringbuf->head = I915_READ_HEAD(ring); 595 ringbuf->head = I915_READ_HEAD(ring);
565 ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR; 596 ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
566 ringbuf->space = ring_space(ringbuf); 597 ringbuf->space = intel_ring_space(ringbuf);
567 ringbuf->last_retired_head = -1; 598 ringbuf->last_retired_head = -1;
568 } 599 }
569 600
@@ -575,8 +606,25 @@ out:
575 return ret; 606 return ret;
576} 607}
577 608
578static int 609void
579init_pipe_control(struct intel_engine_cs *ring) 610intel_fini_pipe_control(struct intel_engine_cs *ring)
611{
612 struct drm_device *dev = ring->dev;
613
614 if (ring->scratch.obj == NULL)
615 return;
616
617 if (INTEL_INFO(dev)->gen >= 5) {
618 kunmap(sg_page(ring->scratch.obj->pages->sgl));
619 i915_gem_object_ggtt_unpin(ring->scratch.obj);
620 }
621
622 drm_gem_object_unreference(&ring->scratch.obj->base);
623 ring->scratch.obj = NULL;
624}
625
626int
627intel_init_pipe_control(struct intel_engine_cs *ring)
580{ 628{
581 int ret; 629 int ret;
582 630
@@ -617,6 +665,135 @@ err:
617 return ret; 665 return ret;
618} 666}
619 667
668static inline void intel_ring_emit_wa(struct intel_engine_cs *ring,
669 u32 addr, u32 value)
670{
671 struct drm_device *dev = ring->dev;
672 struct drm_i915_private *dev_priv = dev->dev_private;
673
674 if (WARN_ON(dev_priv->num_wa_regs >= I915_MAX_WA_REGS))
675 return;
676
677 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
678 intel_ring_emit(ring, addr);
679 intel_ring_emit(ring, value);
680
681 dev_priv->intel_wa_regs[dev_priv->num_wa_regs].addr = addr;
682 dev_priv->intel_wa_regs[dev_priv->num_wa_regs].mask = value & 0xFFFF;
683 /* value is updated with the status of remaining bits of this
684 * register when it is read from debugfs file
685 */
686 dev_priv->intel_wa_regs[dev_priv->num_wa_regs].value = value;
687 dev_priv->num_wa_regs++;
688
689 return;
690}
691
692static int bdw_init_workarounds(struct intel_engine_cs *ring)
693{
694 int ret;
695 struct drm_device *dev = ring->dev;
696 struct drm_i915_private *dev_priv = dev->dev_private;
697
698 /*
699 * workarounds applied in this fn are part of register state context,
700 * they need to be re-initialized followed by gpu reset, suspend/resume,
701 * module reload.
702 */
703 dev_priv->num_wa_regs = 0;
704 memset(dev_priv->intel_wa_regs, 0, sizeof(dev_priv->intel_wa_regs));
705
706 /*
707 * update the number of dwords required based on the
708 * actual number of workarounds applied
709 */
710 ret = intel_ring_begin(ring, 18);
711 if (ret)
712 return ret;
713
714 /* WaDisablePartialInstShootdown:bdw */
715 /* WaDisableThreadStallDopClockGating:bdw */
716 /* FIXME: Unclear whether we really need this on production bdw. */
717 intel_ring_emit_wa(ring, GEN8_ROW_CHICKEN,
718 _MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE
719 | STALL_DOP_GATING_DISABLE));
720
721 /* WaDisableDopClockGating:bdw May not be needed for production */
722 intel_ring_emit_wa(ring, GEN7_ROW_CHICKEN2,
723 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
724
725 intel_ring_emit_wa(ring, HALF_SLICE_CHICKEN3,
726 _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
727
728 /* Use Force Non-Coherent whenever executing a 3D context. This is a
729 * workaround for for a possible hang in the unlikely event a TLB
730 * invalidation occurs during a PSD flush.
731 */
732 intel_ring_emit_wa(ring, HDC_CHICKEN0,
733 _MASKED_BIT_ENABLE(HDC_FORCE_NON_COHERENT));
734
735 /* Wa4x4STCOptimizationDisable:bdw */
736 intel_ring_emit_wa(ring, CACHE_MODE_1,
737 _MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE));
738
739 /*
740 * BSpec recommends 8x4 when MSAA is used,
741 * however in practice 16x4 seems fastest.
742 *
743 * Note that PS/WM thread counts depend on the WIZ hashing
744 * disable bit, which we don't touch here, but it's good
745 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
746 */
747 intel_ring_emit_wa(ring, GEN7_GT_MODE,
748 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
749
750 intel_ring_advance(ring);
751
752 DRM_DEBUG_DRIVER("Number of Workarounds applied: %d\n",
753 dev_priv->num_wa_regs);
754
755 return 0;
756}
757
758static int chv_init_workarounds(struct intel_engine_cs *ring)
759{
760 int ret;
761 struct drm_device *dev = ring->dev;
762 struct drm_i915_private *dev_priv = dev->dev_private;
763
764 /*
765 * workarounds applied in this fn are part of register state context,
766 * they need to be re-initialized followed by gpu reset, suspend/resume,
767 * module reload.
768 */
769 dev_priv->num_wa_regs = 0;
770 memset(dev_priv->intel_wa_regs, 0, sizeof(dev_priv->intel_wa_regs));
771
772 ret = intel_ring_begin(ring, 12);
773 if (ret)
774 return ret;
775
776 /* WaDisablePartialInstShootdown:chv */
777 intel_ring_emit_wa(ring, GEN8_ROW_CHICKEN,
778 _MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE));
779
780 /* WaDisableThreadStallDopClockGating:chv */
781 intel_ring_emit_wa(ring, GEN8_ROW_CHICKEN,
782 _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
783
784 /* WaDisableDopClockGating:chv (pre-production hw) */
785 intel_ring_emit_wa(ring, GEN7_ROW_CHICKEN2,
786 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
787
788 /* WaDisableSamplerPowerBypass:chv (pre-production hw) */
789 intel_ring_emit_wa(ring, HALF_SLICE_CHICKEN3,
790 _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
791
792 intel_ring_advance(ring);
793
794 return 0;
795}
796
620static int init_render_ring(struct intel_engine_cs *ring) 797static int init_render_ring(struct intel_engine_cs *ring)
621{ 798{
622 struct drm_device *dev = ring->dev; 799 struct drm_device *dev = ring->dev;
@@ -651,7 +828,7 @@ static int init_render_ring(struct intel_engine_cs *ring)
651 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); 828 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
652 829
653 if (INTEL_INFO(dev)->gen >= 5) { 830 if (INTEL_INFO(dev)->gen >= 5) {
654 ret = init_pipe_control(ring); 831 ret = intel_init_pipe_control(ring);
655 if (ret) 832 if (ret)
656 return ret; 833 return ret;
657 } 834 }
@@ -686,16 +863,7 @@ static void render_ring_cleanup(struct intel_engine_cs *ring)
686 dev_priv->semaphore_obj = NULL; 863 dev_priv->semaphore_obj = NULL;
687 } 864 }
688 865
689 if (ring->scratch.obj == NULL) 866 intel_fini_pipe_control(ring);
690 return;
691
692 if (INTEL_INFO(dev)->gen >= 5) {
693 kunmap(sg_page(ring->scratch.obj->pages->sgl));
694 i915_gem_object_ggtt_unpin(ring->scratch.obj);
695 }
696
697 drm_gem_object_unreference(&ring->scratch.obj->base);
698 ring->scratch.obj = NULL;
699} 867}
700 868
701static int gen8_rcs_signal(struct intel_engine_cs *signaller, 869static int gen8_rcs_signal(struct intel_engine_cs *signaller,
@@ -1526,7 +1694,7 @@ static int init_phys_status_page(struct intel_engine_cs *ring)
1526 return 0; 1694 return 0;
1527} 1695}
1528 1696
1529static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf) 1697void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
1530{ 1698{
1531 if (!ringbuf->obj) 1699 if (!ringbuf->obj)
1532 return; 1700 return;
@@ -1537,8 +1705,8 @@ static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
1537 ringbuf->obj = NULL; 1705 ringbuf->obj = NULL;
1538} 1706}
1539 1707
1540static int intel_alloc_ringbuffer_obj(struct drm_device *dev, 1708int intel_alloc_ringbuffer_obj(struct drm_device *dev,
1541 struct intel_ringbuffer *ringbuf) 1709 struct intel_ringbuffer *ringbuf)
1542{ 1710{
1543 struct drm_i915_private *dev_priv = to_i915(dev); 1711 struct drm_i915_private *dev_priv = to_i915(dev);
1544 struct drm_i915_gem_object *obj; 1712 struct drm_i915_gem_object *obj;
@@ -1600,7 +1768,9 @@ static int intel_init_ring_buffer(struct drm_device *dev,
1600 ring->dev = dev; 1768 ring->dev = dev;
1601 INIT_LIST_HEAD(&ring->active_list); 1769 INIT_LIST_HEAD(&ring->active_list);
1602 INIT_LIST_HEAD(&ring->request_list); 1770 INIT_LIST_HEAD(&ring->request_list);
1771 INIT_LIST_HEAD(&ring->execlist_queue);
1603 ringbuf->size = 32 * PAGE_SIZE; 1772 ringbuf->size = 32 * PAGE_SIZE;
1773 ringbuf->ring = ring;
1604 memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno)); 1774 memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
1605 1775
1606 init_waitqueue_head(&ring->irq_queue); 1776 init_waitqueue_head(&ring->irq_queue);
@@ -1683,13 +1853,14 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
1683 ringbuf->head = ringbuf->last_retired_head; 1853 ringbuf->head = ringbuf->last_retired_head;
1684 ringbuf->last_retired_head = -1; 1854 ringbuf->last_retired_head = -1;
1685 1855
1686 ringbuf->space = ring_space(ringbuf); 1856 ringbuf->space = intel_ring_space(ringbuf);
1687 if (ringbuf->space >= n) 1857 if (ringbuf->space >= n)
1688 return 0; 1858 return 0;
1689 } 1859 }
1690 1860
1691 list_for_each_entry(request, &ring->request_list, list) { 1861 list_for_each_entry(request, &ring->request_list, list) {
1692 if (__ring_space(request->tail, ringbuf->tail, ringbuf->size) >= n) { 1862 if (__intel_ring_space(request->tail, ringbuf->tail,
1863 ringbuf->size) >= n) {
1693 seqno = request->seqno; 1864 seqno = request->seqno;
1694 break; 1865 break;
1695 } 1866 }
@@ -1706,7 +1877,7 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
1706 ringbuf->head = ringbuf->last_retired_head; 1877 ringbuf->head = ringbuf->last_retired_head;
1707 ringbuf->last_retired_head = -1; 1878 ringbuf->last_retired_head = -1;
1708 1879
1709 ringbuf->space = ring_space(ringbuf); 1880 ringbuf->space = intel_ring_space(ringbuf);
1710 return 0; 1881 return 0;
1711} 1882}
1712 1883
@@ -1735,7 +1906,7 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
1735 trace_i915_ring_wait_begin(ring); 1906 trace_i915_ring_wait_begin(ring);
1736 do { 1907 do {
1737 ringbuf->head = I915_READ_HEAD(ring); 1908 ringbuf->head = I915_READ_HEAD(ring);
1738 ringbuf->space = ring_space(ringbuf); 1909 ringbuf->space = intel_ring_space(ringbuf);
1739 if (ringbuf->space >= n) { 1910 if (ringbuf->space >= n) {
1740 ret = 0; 1911 ret = 0;
1741 break; 1912 break;
@@ -1787,7 +1958,7 @@ static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
1787 iowrite32(MI_NOOP, virt++); 1958 iowrite32(MI_NOOP, virt++);
1788 1959
1789 ringbuf->tail = 0; 1960 ringbuf->tail = 0;
1790 ringbuf->space = ring_space(ringbuf); 1961 ringbuf->space = intel_ring_space(ringbuf);
1791 1962
1792 return 0; 1963 return 0;
1793} 1964}
@@ -1992,9 +2163,7 @@ gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
1992 u64 offset, u32 len, 2163 u64 offset, u32 len,
1993 unsigned flags) 2164 unsigned flags)
1994{ 2165{
1995 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2166 bool ppgtt = USES_PPGTT(ring->dev) && !(flags & I915_DISPATCH_SECURE);
1996 bool ppgtt = dev_priv->mm.aliasing_ppgtt != NULL &&
1997 !(flags & I915_DISPATCH_SECURE);
1998 int ret; 2167 int ret;
1999 2168
2000 ret = intel_ring_begin(ring, 4); 2169 ret = intel_ring_begin(ring, 4);
@@ -2023,8 +2192,9 @@ hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
2023 return ret; 2192 return ret;
2024 2193
2025 intel_ring_emit(ring, 2194 intel_ring_emit(ring,
2026 MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW | 2195 MI_BATCH_BUFFER_START |
2027 (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW)); 2196 (flags & I915_DISPATCH_SECURE ?
2197 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW));
2028 /* bit0-7 is the length on GEN6+ */ 2198 /* bit0-7 is the length on GEN6+ */
2029 intel_ring_emit(ring, offset); 2199 intel_ring_emit(ring, offset);
2030 intel_ring_advance(ring); 2200 intel_ring_advance(ring);
@@ -2123,6 +2293,10 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
2123 dev_priv->semaphore_obj = obj; 2293 dev_priv->semaphore_obj = obj;
2124 } 2294 }
2125 } 2295 }
2296 if (IS_CHERRYVIEW(dev))
2297 ring->init_context = chv_init_workarounds;
2298 else
2299 ring->init_context = bdw_init_workarounds;
2126 ring->add_request = gen6_add_request; 2300 ring->add_request = gen6_add_request;
2127 ring->flush = gen8_render_ring_flush; 2301 ring->flush = gen8_render_ring_flush;
2128 ring->irq_get = gen8_ring_get_irq; 2302 ring->irq_get = gen8_ring_get_irq;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 70525d0c2c74..96479c89f4bd 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -5,6 +5,13 @@
5 5
6#define I915_CMD_HASH_ORDER 9 6#define I915_CMD_HASH_ORDER 9
7 7
8/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
9 * but keeps the logic simple. Indeed, the whole purpose of this macro is just
10 * to give some inclination as to some of the magic values used in the various
11 * workarounds!
12 */
13#define CACHELINE_BYTES 64
14
8/* 15/*
9 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use" 16 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
10 * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use" 17 * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
@@ -90,6 +97,15 @@ struct intel_ringbuffer {
90 struct drm_i915_gem_object *obj; 97 struct drm_i915_gem_object *obj;
91 void __iomem *virtual_start; 98 void __iomem *virtual_start;
92 99
100 struct intel_engine_cs *ring;
101
102 /*
103 * FIXME: This backpointer is an artifact of the history of how the
104 * execlist patches came into being. It will get removed once the basic
105 * code has landed.
106 */
107 struct intel_context *FIXME_lrc_ctx;
108
93 u32 head; 109 u32 head;
94 u32 tail; 110 u32 tail;
95 int space; 111 int space;
@@ -132,6 +148,8 @@ struct intel_engine_cs {
132 148
133 int (*init)(struct intel_engine_cs *ring); 149 int (*init)(struct intel_engine_cs *ring);
134 150
151 int (*init_context)(struct intel_engine_cs *ring);
152
135 void (*write_tail)(struct intel_engine_cs *ring, 153 void (*write_tail)(struct intel_engine_cs *ring,
136 u32 value); 154 u32 value);
137 int __must_check (*flush)(struct intel_engine_cs *ring, 155 int __must_check (*flush)(struct intel_engine_cs *ring,
@@ -214,6 +232,18 @@ struct intel_engine_cs {
214 unsigned int num_dwords); 232 unsigned int num_dwords);
215 } semaphore; 233 } semaphore;
216 234
235 /* Execlists */
236 spinlock_t execlist_lock;
237 struct list_head execlist_queue;
238 u8 next_context_status_buffer;
239 u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */
240 int (*emit_request)(struct intel_ringbuffer *ringbuf);
241 int (*emit_flush)(struct intel_ringbuffer *ringbuf,
242 u32 invalidate_domains,
243 u32 flush_domains);
244 int (*emit_bb_start)(struct intel_ringbuffer *ringbuf,
245 u64 offset, unsigned flags);
246
217 /** 247 /**
218 * List of objects currently involved in rendering from the 248 * List of objects currently involved in rendering from the
219 * ringbuffer. 249 * ringbuffer.
@@ -287,11 +317,7 @@ struct intel_engine_cs {
287 u32 (*get_cmd_length_mask)(u32 cmd_header); 317 u32 (*get_cmd_length_mask)(u32 cmd_header);
288}; 318};
289 319
290static inline bool 320bool intel_ring_initialized(struct intel_engine_cs *ring);
291intel_ring_initialized(struct intel_engine_cs *ring)
292{
293 return ring->buffer && ring->buffer->obj;
294}
295 321
296static inline unsigned 322static inline unsigned
297intel_ring_flag(struct intel_engine_cs *ring) 323intel_ring_flag(struct intel_engine_cs *ring)
@@ -355,6 +381,10 @@ intel_write_status_page(struct intel_engine_cs *ring,
355#define I915_GEM_HWS_SCRATCH_INDEX 0x30 381#define I915_GEM_HWS_SCRATCH_INDEX 0x30
356#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) 382#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
357 383
384void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
385int intel_alloc_ringbuffer_obj(struct drm_device *dev,
386 struct intel_ringbuffer *ringbuf);
387
358void intel_stop_ring_buffer(struct intel_engine_cs *ring); 388void intel_stop_ring_buffer(struct intel_engine_cs *ring);
359void intel_cleanup_ring_buffer(struct intel_engine_cs *ring); 389void intel_cleanup_ring_buffer(struct intel_engine_cs *ring);
360 390
@@ -372,6 +402,9 @@ static inline void intel_ring_advance(struct intel_engine_cs *ring)
372 struct intel_ringbuffer *ringbuf = ring->buffer; 402 struct intel_ringbuffer *ringbuf = ring->buffer;
373 ringbuf->tail &= ringbuf->size - 1; 403 ringbuf->tail &= ringbuf->size - 1;
374} 404}
405int __intel_ring_space(int head, int tail, int size);
406int intel_ring_space(struct intel_ringbuffer *ringbuf);
407bool intel_ring_stopped(struct intel_engine_cs *ring);
375void __intel_ring_advance(struct intel_engine_cs *ring); 408void __intel_ring_advance(struct intel_engine_cs *ring);
376 409
377int __must_check intel_ring_idle(struct intel_engine_cs *ring); 410int __must_check intel_ring_idle(struct intel_engine_cs *ring);
@@ -379,6 +412,9 @@ void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno);
379int intel_ring_flush_all_caches(struct intel_engine_cs *ring); 412int intel_ring_flush_all_caches(struct intel_engine_cs *ring);
380int intel_ring_invalidate_all_caches(struct intel_engine_cs *ring); 413int intel_ring_invalidate_all_caches(struct intel_engine_cs *ring);
381 414
415void intel_fini_pipe_control(struct intel_engine_cs *ring);
416int intel_init_pipe_control(struct intel_engine_cs *ring);
417
382int intel_init_render_ring_buffer(struct drm_device *dev); 418int intel_init_render_ring_buffer(struct drm_device *dev);
383int intel_init_bsd_ring_buffer(struct drm_device *dev); 419int intel_init_bsd_ring_buffer(struct drm_device *dev);
384int intel_init_bsd2_ring_buffer(struct drm_device *dev); 420int intel_init_bsd2_ring_buffer(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 168c6652cda1..07a74ef589bd 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -53,6 +53,7 @@ static bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl
53 enum pipe pipe = crtc->pipe; 53 enum pipe pipe = crtc->pipe;
54 long timeout = msecs_to_jiffies_timeout(1); 54 long timeout = msecs_to_jiffies_timeout(1);
55 int scanline, min, max, vblank_start; 55 int scanline, min, max, vblank_start;
56 wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
56 DEFINE_WAIT(wait); 57 DEFINE_WAIT(wait);
57 58
58 WARN_ON(!drm_modeset_is_locked(&crtc->base.mutex)); 59 WARN_ON(!drm_modeset_is_locked(&crtc->base.mutex));
@@ -81,7 +82,7 @@ static bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl
81 * other CPUs can see the task state update by the time we 82 * other CPUs can see the task state update by the time we
82 * read the scanline. 83 * read the scanline.
83 */ 84 */
84 prepare_to_wait(&crtc->vbl_wait, &wait, TASK_UNINTERRUPTIBLE); 85 prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
85 86
86 scanline = intel_get_crtc_scanline(crtc); 87 scanline = intel_get_crtc_scanline(crtc);
87 if (scanline < min || scanline > max) 88 if (scanline < min || scanline > max)
@@ -100,7 +101,7 @@ static bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl
100 local_irq_disable(); 101 local_irq_disable();
101 } 102 }
102 103
103 finish_wait(&crtc->vbl_wait, &wait); 104 finish_wait(wq, &wait);
104 105
105 drm_vblank_put(dev, pipe); 106 drm_vblank_put(dev, pipe);
106 107
@@ -163,6 +164,7 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
163 sprctl &= ~SP_PIXFORMAT_MASK; 164 sprctl &= ~SP_PIXFORMAT_MASK;
164 sprctl &= ~SP_YUV_BYTE_ORDER_MASK; 165 sprctl &= ~SP_YUV_BYTE_ORDER_MASK;
165 sprctl &= ~SP_TILED; 166 sprctl &= ~SP_TILED;
167 sprctl &= ~SP_ROTATE_180;
166 168
167 switch (fb->pixel_format) { 169 switch (fb->pixel_format) {
168 case DRM_FORMAT_YUYV: 170 case DRM_FORMAT_YUYV:
@@ -235,6 +237,14 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
235 fb->pitches[0]); 237 fb->pitches[0]);
236 linear_offset -= sprsurf_offset; 238 linear_offset -= sprsurf_offset;
237 239
240 if (intel_plane->rotation == BIT(DRM_ROTATE_180)) {
241 sprctl |= SP_ROTATE_180;
242
243 x += src_w;
244 y += src_h;
245 linear_offset += src_h * fb->pitches[0] + src_w * pixel_size;
246 }
247
238 atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count); 248 atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
239 249
240 intel_update_primary_plane(intel_crtc); 250 intel_update_primary_plane(intel_crtc);
@@ -364,6 +374,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
364 sprctl &= ~SPRITE_RGB_ORDER_RGBX; 374 sprctl &= ~SPRITE_RGB_ORDER_RGBX;
365 sprctl &= ~SPRITE_YUV_BYTE_ORDER_MASK; 375 sprctl &= ~SPRITE_YUV_BYTE_ORDER_MASK;
366 sprctl &= ~SPRITE_TILED; 376 sprctl &= ~SPRITE_TILED;
377 sprctl &= ~SPRITE_ROTATE_180;
367 378
368 switch (fb->pixel_format) { 379 switch (fb->pixel_format) {
369 case DRM_FORMAT_XBGR8888: 380 case DRM_FORMAT_XBGR8888:
@@ -426,6 +437,18 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
426 pixel_size, fb->pitches[0]); 437 pixel_size, fb->pitches[0]);
427 linear_offset -= sprsurf_offset; 438 linear_offset -= sprsurf_offset;
428 439
440 if (intel_plane->rotation == BIT(DRM_ROTATE_180)) {
441 sprctl |= SPRITE_ROTATE_180;
442
443 /* HSW and BDW does this automagically in hardware */
444 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
445 x += src_w;
446 y += src_h;
447 linear_offset += src_h * fb->pitches[0] +
448 src_w * pixel_size;
449 }
450 }
451
429 atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count); 452 atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
430 453
431 intel_update_primary_plane(intel_crtc); 454 intel_update_primary_plane(intel_crtc);
@@ -571,6 +594,7 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
571 dvscntr &= ~DVS_RGB_ORDER_XBGR; 594 dvscntr &= ~DVS_RGB_ORDER_XBGR;
572 dvscntr &= ~DVS_YUV_BYTE_ORDER_MASK; 595 dvscntr &= ~DVS_YUV_BYTE_ORDER_MASK;
573 dvscntr &= ~DVS_TILED; 596 dvscntr &= ~DVS_TILED;
597 dvscntr &= ~DVS_ROTATE_180;
574 598
575 switch (fb->pixel_format) { 599 switch (fb->pixel_format) {
576 case DRM_FORMAT_XBGR8888: 600 case DRM_FORMAT_XBGR8888:
@@ -628,6 +652,14 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
628 pixel_size, fb->pitches[0]); 652 pixel_size, fb->pitches[0]);
629 linear_offset -= dvssurf_offset; 653 linear_offset -= dvssurf_offset;
630 654
655 if (intel_plane->rotation == BIT(DRM_ROTATE_180)) {
656 dvscntr |= DVS_ROTATE_180;
657
658 x += src_w;
659 y += src_h;
660 linear_offset += src_h * fb->pitches[0] + src_w * pixel_size;
661 }
662
631 atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count); 663 atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
632 664
633 intel_update_primary_plane(intel_crtc); 665 intel_update_primary_plane(intel_crtc);
@@ -895,6 +927,9 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
895 max_scale = intel_plane->max_downscale << 16; 927 max_scale = intel_plane->max_downscale << 16;
896 min_scale = intel_plane->can_scale ? 1 : (1 << 16); 928 min_scale = intel_plane->can_scale ? 1 : (1 << 16);
897 929
930 drm_rect_rotate(&src, fb->width << 16, fb->height << 16,
931 intel_plane->rotation);
932
898 hscale = drm_rect_calc_hscale_relaxed(&src, &dst, min_scale, max_scale); 933 hscale = drm_rect_calc_hscale_relaxed(&src, &dst, min_scale, max_scale);
899 BUG_ON(hscale < 0); 934 BUG_ON(hscale < 0);
900 935
@@ -933,6 +968,9 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
933 drm_rect_width(&dst) * hscale - drm_rect_width(&src), 968 drm_rect_width(&dst) * hscale - drm_rect_width(&src),
934 drm_rect_height(&dst) * vscale - drm_rect_height(&src)); 969 drm_rect_height(&dst) * vscale - drm_rect_height(&src));
935 970
971 drm_rect_rotate_inv(&src, fb->width << 16, fb->height << 16,
972 intel_plane->rotation);
973
936 /* sanity check to make sure the src viewport wasn't enlarged */ 974 /* sanity check to make sure the src viewport wasn't enlarged */
937 WARN_ON(src.x1 < (int) src_x || 975 WARN_ON(src.x1 < (int) src_x ||
938 src.y1 < (int) src_y || 976 src.y1 < (int) src_y ||
@@ -1180,18 +1218,45 @@ out_unlock:
1180 return ret; 1218 return ret;
1181} 1219}
1182 1220
1183void intel_plane_restore(struct drm_plane *plane) 1221int intel_plane_set_property(struct drm_plane *plane,
1222 struct drm_property *prop,
1223 uint64_t val)
1224{
1225 struct drm_device *dev = plane->dev;
1226 struct intel_plane *intel_plane = to_intel_plane(plane);
1227 uint64_t old_val;
1228 int ret = -ENOENT;
1229
1230 if (prop == dev->mode_config.rotation_property) {
1231 /* exactly one rotation angle please */
1232 if (hweight32(val & 0xf) != 1)
1233 return -EINVAL;
1234
1235 if (intel_plane->rotation == val)
1236 return 0;
1237
1238 old_val = intel_plane->rotation;
1239 intel_plane->rotation = val;
1240 ret = intel_plane_restore(plane);
1241 if (ret)
1242 intel_plane->rotation = old_val;
1243 }
1244
1245 return ret;
1246}
1247
1248int intel_plane_restore(struct drm_plane *plane)
1184{ 1249{
1185 struct intel_plane *intel_plane = to_intel_plane(plane); 1250 struct intel_plane *intel_plane = to_intel_plane(plane);
1186 1251
1187 if (!plane->crtc || !plane->fb) 1252 if (!plane->crtc || !plane->fb)
1188 return; 1253 return 0;
1189 1254
1190 intel_update_plane(plane, plane->crtc, plane->fb, 1255 return plane->funcs->update_plane(plane, plane->crtc, plane->fb,
1191 intel_plane->crtc_x, intel_plane->crtc_y, 1256 intel_plane->crtc_x, intel_plane->crtc_y,
1192 intel_plane->crtc_w, intel_plane->crtc_h, 1257 intel_plane->crtc_w, intel_plane->crtc_h,
1193 intel_plane->src_x, intel_plane->src_y, 1258 intel_plane->src_x, intel_plane->src_y,
1194 intel_plane->src_w, intel_plane->src_h); 1259 intel_plane->src_w, intel_plane->src_h);
1195} 1260}
1196 1261
1197void intel_plane_disable(struct drm_plane *plane) 1262void intel_plane_disable(struct drm_plane *plane)
@@ -1206,6 +1271,7 @@ static const struct drm_plane_funcs intel_plane_funcs = {
1206 .update_plane = intel_update_plane, 1271 .update_plane = intel_update_plane,
1207 .disable_plane = intel_disable_plane, 1272 .disable_plane = intel_disable_plane,
1208 .destroy = intel_destroy_plane, 1273 .destroy = intel_destroy_plane,
1274 .set_property = intel_plane_set_property,
1209}; 1275};
1210 1276
1211static uint32_t ilk_plane_formats[] = { 1277static uint32_t ilk_plane_formats[] = {
@@ -1310,13 +1376,28 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
1310 1376
1311 intel_plane->pipe = pipe; 1377 intel_plane->pipe = pipe;
1312 intel_plane->plane = plane; 1378 intel_plane->plane = plane;
1379 intel_plane->rotation = BIT(DRM_ROTATE_0);
1313 possible_crtcs = (1 << pipe); 1380 possible_crtcs = (1 << pipe);
1314 ret = drm_plane_init(dev, &intel_plane->base, possible_crtcs, 1381 ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
1315 &intel_plane_funcs, 1382 &intel_plane_funcs,
1316 plane_formats, num_plane_formats, 1383 plane_formats, num_plane_formats,
1317 false); 1384 DRM_PLANE_TYPE_OVERLAY);
1318 if (ret) 1385 if (ret) {
1319 kfree(intel_plane); 1386 kfree(intel_plane);
1387 goto out;
1388 }
1389
1390 if (!dev->mode_config.rotation_property)
1391 dev->mode_config.rotation_property =
1392 drm_mode_create_rotation_property(dev,
1393 BIT(DRM_ROTATE_0) |
1394 BIT(DRM_ROTATE_180));
1395
1396 if (dev->mode_config.rotation_property)
1397 drm_object_attach_property(&intel_plane->base.base,
1398 dev->mode_config.rotation_property,
1399 intel_plane->rotation);
1320 1400
1401 out:
1321 return ret; 1402 return ret;
1322} 1403}
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index e81bc3bdc533..918b76163965 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -101,7 +101,7 @@ static void __gen7_gt_force_wake_mt_get(struct drm_i915_private *dev_priv,
101{ 101{
102 u32 forcewake_ack; 102 u32 forcewake_ack;
103 103
104 if (IS_HASWELL(dev_priv->dev) || IS_GEN8(dev_priv->dev)) 104 if (IS_HASWELL(dev_priv->dev) || IS_BROADWELL(dev_priv->dev))
105 forcewake_ack = FORCEWAKE_ACK_HSW; 105 forcewake_ack = FORCEWAKE_ACK_HSW;
106 else 106 else
107 forcewake_ack = FORCEWAKE_MT_ACK; 107 forcewake_ack = FORCEWAKE_MT_ACK;
@@ -334,7 +334,7 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
334 else if (IS_GEN6(dev) || IS_GEN7(dev)) 334 else if (IS_GEN6(dev) || IS_GEN7(dev))
335 __gen6_gt_force_wake_reset(dev_priv); 335 __gen6_gt_force_wake_reset(dev_priv);
336 336
337 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_GEN8(dev)) 337 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev))
338 __gen7_gt_force_wake_mt_reset(dev_priv); 338 __gen7_gt_force_wake_mt_reset(dev_priv);
339 339
340 if (restore) { /* If reset with a user forcewake, try to restore */ 340 if (restore) { /* If reset with a user forcewake, try to restore */
@@ -838,7 +838,7 @@ void intel_uncore_init(struct drm_device *dev)
838 if (IS_VALLEYVIEW(dev)) { 838 if (IS_VALLEYVIEW(dev)) {
839 dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get; 839 dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get;
840 dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put; 840 dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put;
841 } else if (IS_HASWELL(dev) || IS_GEN8(dev)) { 841 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
842 dev_priv->uncore.funcs.force_wake_get = __gen7_gt_force_wake_mt_get; 842 dev_priv->uncore.funcs.force_wake_get = __gen7_gt_force_wake_mt_get;
843 dev_priv->uncore.funcs.force_wake_put = __gen7_gt_force_wake_mt_put; 843 dev_priv->uncore.funcs.force_wake_put = __gen7_gt_force_wake_mt_put;
844 } else if (IS_IVYBRIDGE(dev)) { 844 } else if (IS_IVYBRIDGE(dev)) {